diff --git a/.gitignore b/.gitignore index e6e327327c2a72bc2262d9b2923314c950c78cf6..b400d719cc967fd4c1270355f876bd3c39cfc2f3 100644 --- a/.gitignore +++ b/.gitignore @@ -79,3 +79,15 @@ tests/comparisonTest/opentsdb/opentsdbtest/.settings/ tests/examples/JDBC/JDBCDemo/.classpath tests/examples/JDBC/JDBCDemo/.project tests/examples/JDBC/JDBCDemo/.settings/ + +# Emacs +# -*- mode: gitignore; -*- +*~ +\#*\# +/.emacs.desktop +/.emacs.desktop.lock +*.elc +auto-save-list +tramp +.\#* +TAGS diff --git a/.gitmodules b/.gitmodules index 156226d54486c17e64b9c514e47e3a7dc3fe6942..74afbbf9973abec6423633b848181b349de4ed6f 100644 --- a/.gitmodules +++ b/.gitmodules @@ -4,9 +4,9 @@ [submodule "src/connector/grafanaplugin"] path = src/connector/grafanaplugin url = https://github.com/taosdata/grafanaplugin +[submodule "src/connector/hivemq-tdengine-extension"] + path = src/connector/hivemq-tdengine-extension + url = https://github.com/huskar-t/hivemq-tdengine-extension.git [submodule "tests/examples/rust"] path = tests/examples/rust url = https://github.com/songtianyi/tdengine-rust-bindings.git -[submodule "src/connector/hivemq-tdengine-extension"] - path = src/connector/hivemq-tdengine-extension - url = https://github.com/huskar-t/hivemq-tdengine-extension.git \ No newline at end of file diff --git a/CMakeLists.txt b/CMakeLists.txt index a55b5fbed97c08117f23488cf3e0d60b894316e7..7bb36fe1b001473cf5641ad195959581affeb2cb 100755 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -1,4 +1,4 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 3.5) +CMAKE_MINIMUM_REQUIRED(VERSION 2.8) IF (CMAKE_VERSION VERSION_LESS 3.0) PROJECT(TDengine CXX) SET(PROJECT_VERSION_MAJOR "${LIB_MAJOR_VERSION}") diff --git a/Jenkinsfile b/Jenkinsfile index 6b3d9e51510ce10d38bd49a56a63e9859e5cd691..7a3da2b3a0a33e27c0dccc5126fdac13336f1356 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -5,7 +5,7 @@ node { git url: 'https://github.com/taosdata/TDengine.git' } -def kipstage=0 +def skipstage=0 def abortPreviousBuilds() { def currentJobName = env.JOB_NAME def currentBuildNumber = env.BUILD_NUMBER.toInteger() @@ -80,25 +80,29 @@ pipeline { } steps { sh''' - cd ${WORKSPACE} + cp -r ${WORKSPACE} ${WORKSPACE}.tes + cd ${WORKSPACE}.tes git checkout develop git pull git fetch origin +refs/pull/${CHANGE_ID}/merge git checkout -qf FETCH_HEAD ''' script{ - skipstage=sh(script:"git --no-pager diff --name-only FETCH_HEAD develop|grep -v -E '.*md|//src//connector|Jenkinsfile|test-all.sh' || echo 1 ",returnStdout:true) + env.skipstage=sh(script:"cd ${WORKSPACE}.tes && git --no-pager diff --name-only FETCH_HEAD develop|grep -v -E '.*md|//src//connector|Jenkinsfile|test-all.sh' || echo 0 ",returnStdout:true) } + println env.skipstage + sh''' + rm -rf ${WORKSPACE}.tes + ''' } } stage('Parallel test stage') { - //only build pr when { changeRequest() expression { - skipstage != 1 + env.skipstage != 0 } } parallel { @@ -124,12 +128,12 @@ pipeline { pre_test() timeout(time: 45, unit: 'MINUTES'){ - sh ''' - date - cd ${WKC}/tests - find pytest -name '*'sql|xargs rm -rf - ./test-all.sh p2 - date''' + sh ''' + date + cd ${WKC}/tests + find pytest -name '*'sql|xargs rm -rf + ./test-all.sh p2 + date''' } } } @@ -161,6 +165,7 @@ pipeline { stage('test_crash_gen_s3') { agent{label "b2"} + steps { pre_test() catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') { @@ -169,20 +174,22 @@ pipeline { ./crash_gen.sh -a -p -t 4 -s 2000 ''' } - catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') { + + sh ''' + cd ${WKC}/tests/pytest + rm -rf /var/lib/taos/* + rm -rf /var/log/taos/* + ./handle_crash_gen_val_log.sh + ''' + timeout(time: 45, unit: 'MINUTES'){ sh ''' - cd ${WKC}/tests/pytest - ./handle_crash_gen_val_log.sh + date + cd ${WKC}/tests + ./test-all.sh b2fq + date ''' - } - timeout(time: 45, unit: 'MINUTES'){ - sh ''' - date - cd ${WKC}/tests - ./test-all.sh b2fq - date - ''' - } + } + } } @@ -216,6 +223,8 @@ pipeline { date cd ${WKC}/tests ./test-all.sh b4fq + cd ${WKC}/tests + ./test-all.sh p4 date''' } } diff --git a/cmake/install.inc b/cmake/install.inc index 2f0404334c06125e6ee8241ab91b22064ab20b89..ec0ec645198772d892461da64c1eaa2bc947f72c 100755 --- a/cmake/install.inc +++ b/cmake/install.inc @@ -32,7 +32,7 @@ ELSEIF (TD_WINDOWS) #INSTALL(TARGETS taos RUNTIME DESTINATION driver) #INSTALL(TARGETS shell RUNTIME DESTINATION .) IF (TD_MVN_INSTALLED) - INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos-jdbcdriver-2.0.18-dist.jar DESTINATION connector/jdbc) + INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos-jdbcdriver-2.0.19-dist.jar DESTINATION connector/jdbc) ENDIF () ELSEIF (TD_DARWIN) SET(TD_MAKE_INSTALL_SH "${TD_COMMUNITY_DIR}/packaging/tools/make_install.sh") diff --git a/cmake/version.inc b/cmake/version.inc index 07356bb2161bde20a2e6e0c9588a15cf2fb11b10..962f1f60406f3657a379ee99427e110985988c44 100755 --- a/cmake/version.inc +++ b/cmake/version.inc @@ -4,7 +4,7 @@ PROJECT(TDengine) IF (DEFINED VERNUMBER) SET(TD_VER_NUMBER ${VERNUMBER}) ELSE () - SET(TD_VER_NUMBER "2.0.14.0") + SET(TD_VER_NUMBER "2.0.16.0") ENDIF () IF (DEFINED VERCOMPATIBLE) diff --git a/deps/CMakeLists.txt b/deps/CMakeLists.txt index a4db6fd5fbc3e867cd30d2a1c871e48a7bfbd5eb..cfc17442f5c21c2d002ba42c45ce523c80eb957f 100644 --- a/deps/CMakeLists.txt +++ b/deps/CMakeLists.txt @@ -1,4 +1,4 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 3.5) +CMAKE_MINIMUM_REQUIRED(VERSION 2.8) PROJECT(TDengine) ADD_SUBDIRECTORY(zlib-1.2.11) diff --git a/deps/MQTT-C/CMakeLists.txt b/deps/MQTT-C/CMakeLists.txt index 36ede467acfa05f893cbe99ed518458d57986c79..15b35525210ec90e6e2efbdcd0e6128cb4d34f91 100644 --- a/deps/MQTT-C/CMakeLists.txt +++ b/deps/MQTT-C/CMakeLists.txt @@ -1,4 +1,4 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 3.5) +CMAKE_MINIMUM_REQUIRED(VERSION 2.8) # MQTT-C build options option(MQTT_C_OpenSSL_SUPPORT "Build MQTT-C with OpenSSL support?" OFF) diff --git a/deps/MsvcLibX/CMakeLists.txt b/deps/MsvcLibX/CMakeLists.txt index c02e4c7a4d89cdf657756ec9786d5a624419d182..4428579e1c098425c9d72d7d58a5fda15cd34c93 100644 --- a/deps/MsvcLibX/CMakeLists.txt +++ b/deps/MsvcLibX/CMakeLists.txt @@ -1,4 +1,4 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 3.5) +CMAKE_MINIMUM_REQUIRED(VERSION 2.8) PROJECT(TDengine) IF (TD_WINDOWS) diff --git a/deps/iconv/CMakeLists.txt b/deps/iconv/CMakeLists.txt index f26f109735ee81bc999b9539185ce60ccb976bfd..286070fa9071f8fcd1949850cec87c1ced3245d7 100644 --- a/deps/iconv/CMakeLists.txt +++ b/deps/iconv/CMakeLists.txt @@ -1,4 +1,4 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 3.5) +CMAKE_MINIMUM_REQUIRED(VERSION 2.8) PROJECT(TDengine) IF (TD_WINDOWS) diff --git a/deps/libcurl/lib/win64/libcurl_a.lib b/deps/libcurl/lib/win64/libcurl_a.lib new file mode 100644 index 0000000000000000000000000000000000000000..69e9fe0a57ef49b3b95cbd369fed95ef885efb95 Binary files /dev/null and b/deps/libcurl/lib/win64/libcurl_a.lib differ diff --git a/deps/pthread/CMakeLists.txt b/deps/pthread/CMakeLists.txt index dcd9ed035814ac9efd66340b58b287ccbabe61df..04e5be7472a9b8cbdb384348697b919bf2dd0ece 100644 --- a/deps/pthread/CMakeLists.txt +++ b/deps/pthread/CMakeLists.txt @@ -1,4 +1,4 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 3.5) +CMAKE_MINIMUM_REQUIRED(VERSION 2.8) PROJECT(TDengine) IF (TD_WINDOWS) diff --git a/deps/regex/CMakeLists.txt b/deps/regex/CMakeLists.txt index c7e983b992fe8a2dd21b8420e652358976663a37..054b093d07c386d7ff9b0ffc4c05909d79b33129 100644 --- a/deps/regex/CMakeLists.txt +++ b/deps/regex/CMakeLists.txt @@ -1,4 +1,4 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 3.5) +CMAKE_MINIMUM_REQUIRED(VERSION 2.8) PROJECT(TDengine) IF (TD_WINDOWS) diff --git a/deps/wepoll/CMakeLists.txt b/deps/wepoll/CMakeLists.txt index a8b34112215fae0df7bcac034622015fd72d337b..a81fd782bbc4b05a1158273a7fcc6701bc4d980d 100644 --- a/deps/wepoll/CMakeLists.txt +++ b/deps/wepoll/CMakeLists.txt @@ -1,4 +1,4 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 3.5) +CMAKE_MINIMUM_REQUIRED(VERSION 2.8) PROJECT(TDengine) IF (TD_WINDOWS) diff --git a/deps/zlib-1.2.11/CMakeLists.txt b/deps/zlib-1.2.11/CMakeLists.txt index a8750471d694bcfa98b711348861449e3fc7b23a..f83aa70085491fb6575c0a6bf93252192cddd040 100644 --- a/deps/zlib-1.2.11/CMakeLists.txt +++ b/deps/zlib-1.2.11/CMakeLists.txt @@ -1,4 +1,4 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 3.5) +CMAKE_MINIMUM_REQUIRED(VERSION 2.8) PROJECT(TDengine) IF (TD_WINDOWS) diff --git a/documentation20/cn/00.index/docs.md b/documentation20/cn/00.index/docs.md new file mode 100644 index 0000000000000000000000000000000000000000..3c991547e7a7ccaf3701da6186c40b3ec6044ebf --- /dev/null +++ b/documentation20/cn/00.index/docs.md @@ -0,0 +1,151 @@ +# TDengine文档 + +TDengine是一个高效的存储、查询、分析时序大数据的平台,专为物联网、车联网、工业互联网、运维监测等优化而设计。您可以像使用关系型数据库MySQL一样来使用它,但建议您在使用前仔细阅读一遍下面的文档,特别是 [数据模型](/architecture) 与 [数据建模](/model)。除本文档之外,欢迎 [下载产品白皮书](https://www.taosdata.com/downloads/TDengine%20White%20Paper.pdf)。如需查阅TDengine 1.6 文档,请点击 [这里](https://www.taosdata.com/cn/documentation16/) 访问。 + +## [TDengine介绍](/evaluation) + +* [TDengine 简介及特色](/evaluation#intro) +* [TDengine 适用场景](/evaluation#scenes) +* [TDengine 性能指标介绍和验证方法](/evaluation#) + +## [立即开始](/getting-started) + +* [快捷安装](/getting-started#install):可通过源码、安装包或docker安装,三秒钟搞定 +* [轻松启动](/getting-started#start):使用systemctl 启停TDengine +* [命令行程序TAOS](/getting-started#console):访问TDengine的简便方式 +* [极速体验](/getting-started#demo):运行示例程序,快速体验高效的数据插入、查询 +* [支持平台列表](/getting-started#platforms):TDengine服务器和客户端支持的平台列表 + +## [整体架构](/architecture) + +* [数据模型](/architecture#model):关系型数据库模型,但要求每个采集点单独建表 +* [集群与基本逻辑单元](/architecture#cluster):吸取NoSQL优点,支持水平扩展,支持高可靠 +* [存储模型与数据分区、分片](/architecture#sharding):标签数据与时序数据完全分离,按vnode和时间两个维度对数据切分 +* [数据写入与复制流程](/architecture#replication):先写入WAL、之后写入缓存,再给应用确认,支持多副本 +* [缓存与持久化](/architecture#persistence):最新数据缓存在内存中,但落盘时采用列式存储、超高压缩比 +* [数据查询](/architecture#query):支持各种函数、时间轴聚合、插值、多表聚合 + +## [数据建模](/model) + +* [创建库](/model#create-db):为具有相似数据特征的数据采集点创建一个库 +* [创建超级表](/model#create-stable):为同一类型的数据采集点创建一个超级表 +* [创建表](/model#create-table):使用超级表做模板,为每一个具体的数据采集点单独建表 + +## [高效写入数据](/insert) + +* [SQL写入](/insert#sql):使用SQL insert命令向一张或多张表写入单条或多条记录 +* [Prometheus写入](/insert#prometheus):配置Prometheus, 不用任何代码,将数据直接写入 +* [Telegraf写入](/insert#telegraf):配置Telegraf, 不用任何代码,将采集数据直接写入 +* [EMQ X Broker](/insert#emq):配置EMQ X,不用任何代码,就可将MQTT数据直接写入 +* [HiveMQ Broker](/insert#hivemq):配置HiveMQ,不用任何代码,就可将MQTT数据直接写入 + +## [高效查询数据](/queries) + +* [主要查询功能](/queries#queries):支持各种标准函数,设置过滤条件,时间段查询 +* [多表聚合查询](/queries#aggregation):使用超级表,设置标签过滤条件,进行高效聚合查询 +* [降采样查询值](/queries#sampling):按时间段分段聚合,支持插值 + +## [高级功能](/advanced-features) + +* [连续查询(Continuous Query)](/advanced-features#continuous-query):基于滑动窗口,定时自动的对数据流进行查询计算 +* [数据订阅(Publisher/Subscriber)](/advanced-features#subscribe):象典型的消息队列,应用可订阅接收到的最新数据 +* [缓存(Cache)](/advanced-features#cache):每个设备最新的数据都会缓存在内存中,可快速获取 +* [报警监测](/advanced-features#alert):根据配置规则,自动监测超限行为数据,并主动推送 + +## [连接器](/connector) + +* [C/C++ Connector](/connector#c-cpp):通过libtaos客户端的库,连接TDengine服务器的主要方法 +* [Java Connector(JDBC)](/connector/java):通过标准的JDBC API,给Java应用提供到TDengine的连接 +* [Python Connector](/connector#python):给Python应用提供一个连接TDengine服务器的驱动 +* [RESTful Connector](/connector#restful):提供一最简单的连接TDengine服务器的方式 +* [Go Connector](/connector#go):给Go应用提供一个连接TDengine服务器的驱动 +* [Node.js Connector](/connector#nodejs):给node应用提供一个连接TDengine服务器的驱动 +* [C# Connector](/connector#csharp):给C#应用提供一个连接TDengine服务器的驱动 +* [Windows客户端](https://www.taosdata.com/blog/2019/07/26/514.html):自行编译windows客户端,Windows环境的各种连接器都需要它 + +## [与其他工具的连接](/connections) + +* [Grafana](/connections#grafana):获取并可视化保存在TDengine的数据 +* [Matlab](/connections#matlab):通过配置Matlab的JDBC数据源访问保存在TDengine的数据 +* [R](/connections#r):通过配置R的JDBC数据源访问保存在TDengine的数据 +* [IDEA Database](https://www.taosdata.com/blog/2020/08/27/1767.html):通过IDEA 数据库管理工具可视化使用 TDengine + +## [TDengine集群的安装、管理](/cluster) + +* [准备工作](/cluster#prepare):部署环境前的几点注意事项 +* [创建第一个节点](/cluster#node-one):与快捷安装完全一样,非常简单 +* [创建后续节点](/cluster#node-other):配置新节点的taos.cfg, 在现有集群添加新的节点 +* [节点管理](/cluster#management):增加、删除、查看集群的节点 +* [Vnode 的高可用性](/cluster#high-availability):通过多副本的机制来提供 Vnode 的高可用性 +* [Mnode 的管理](/cluster#mnode):系统自动创建、无需任何人工干预 +* [负载均衡](/cluster#load-balancing):一旦节点个数或负载有变化,自动进行 +* [节点离线处理](/cluster#offline):节点离线超过一定时长,将从集群中剔除 +* [Arbitrator](/cluster#arbitrator):对于偶数个副本的情形,使用它可以防止split brain + +## [TDengine的运营和维护](/administrator) + +* [容量规划](/administrator#planning):根据场景,估算硬件资源 +* [容错和灾备](/administrator#tolerance):设置正确的WAL和数据副本数 +* [系统配置](/administrator#config):端口,缓存大小,文件块大小和其他系统配置 +* [用户管理](/administrator#user):添加、删除TDengine用户,修改用户密码 +* [数据导入](/administrator#import):可按脚本文件导入,也可按数据文件导入 +* [数据导出](/administrator#export):从shell按表导出,也可用taosdump工具做各种导出 +* [系统监控](/administrator#status):检查系统现有的连接、查询、流式计算,日志和事件等 +* [文件目录结构](/administrator#directories):TDengine数据文件、配置文件等所在目录 +* [参数限制与保留关键字](/administrator#keywords):TDengine的参数限制与保留关键字列表 + +## [TAOS SQL](/taos-sql) + +* [支持的数据类型](/taos-sql#data-type):支持时间戳、整型、浮点型、布尔型、字符型等多种数据类型 +* [数据库管理](/taos-sql#management):添加、删除、查看数据库 +* [表管理](/taos-sql#table):添加、删除、查看、修改表 +* [超级表管理](/taos-sql#super-table):添加、删除、查看、修改超级表 +* [标签管理](/taos-sql#tags):增加、删除、修改标签 +* [数据写入](/taos-sql#insert):支持单表单条、多条、多表多条写入,支持历史数据写入 +* [数据查询](/taos-sql#select):支持时间段、值过滤、排序、查询结果手动分页等 +* [SQL函数](/taos-sql#functions):支持各种聚合函数、选择函数、计算函数,如avg, min, diff等 +* [时间维度聚合](/taos-sql#aggregation):将表中数据按照时间段进行切割后聚合,降维处理 +* [边界限制](/taos-sql#limitation):库、表、SQL等边界限制条件 +* [错误码](/taos-sql/error-code):TDengine 2.0 错误码以及对应的十进制码 + +## TDengine的技术设计 + +* [系统模块](/architecture/taosd):taosd的功能和模块划分 +* [数据复制](/architecture/replica):支持实时同步、异步复制,保证系统的High Availibility +* [技术博客](https://www.taosdata.com/cn/blog/?categories=3):更多的技术分析和架构设计文章 + +## 常用工具 + +* [TDengine样例导入工具](https://www.taosdata.com/blog/2020/01/18/1166.html) +* [TDengine性能对比测试工具](https://www.taosdata.com/blog/2020/01/18/1166.html) +* [IDEA数据库管理工具可视化使用TDengine](https://www.taosdata.com/blog/2020/08/27/1767.html) + +## TDengine与其他数据库的对比测试 + +* [用InfluxDB开源的性能测试工具对比InfluxDB和TDengine](https://www.taosdata.com/blog/2020/01/13/1105.html) +* [TDengine与OpenTSDB对比测试](https://www.taosdata.com/blog/2019/08/21/621.html) +* [TDengine与Cassandra对比测试](https://www.taosdata.com/blog/2019/08/14/573.html) +* [TDengine与InfluxDB对比测试](https://www.taosdata.com/blog/2019/07/19/419.html) +* [TDengine与InfluxDB、OpenTSDB、Cassandra、MySQL、ClickHouse等数据库的对比测试报告](https://www.taosdata.com/downloads/TDengine_Testing_Report_cn.pdf) + +## 物联网大数据 + +* [物联网、工业互联网大数据的特点](https://www.taosdata.com/blog/2019/07/09/105.html) +* [物联网大数据平台应具备的功能和特点](https://www.taosdata.com/blog/2019/07/29/542.html) +* [通用大数据架构为什么不适合处理物联网数据?](https://www.taosdata.com/blog/2019/07/09/107.html) +* [物联网、车联网、工业互联网大数据平台,为什么推荐使用TDengine?](https://www.taosdata.com/blog/2019/07/09/109.html) + +## 培训和FAQ + +* [FAQ:常见问题与答案](/faq) +* [技术公开课:开源、高效的物联网大数据平台,TDengine内核技术剖析](https://www.taosdata.com/blog/2020/12/25/2126.html) +* [TDengine视频教程-快速上手](https://www.taosdata.com/blog/2020/11/11/1941.html) +* [TDengine视频教程-数据建模](https://www.taosdata.com/blog/2020/11/11/1945.html) +* [TDengine视频教程-集群搭建](https://www.taosdata.com/blog/2020/11/11/1961.html) +* [TDengine视频教程-Go Connector](https://www.taosdata.com/blog/2020/11/11/1951.html) +* [TDengine视频教程-JDBC Connector](https://www.taosdata.com/blog/2020/11/11/1955.html) +* [TDengine视频教程-NodeJS Connector](https://www.taosdata.com/blog/2020/11/11/1957.html) +* [TDengine视频教程-Python Connector](https://www.taosdata.com/blog/2020/11/11/1963.html) +* [TDengine视频教程-RESTful Connector](https://www.taosdata.com/blog/2020/11/11/1965.html) +* [TDengine视频教程-“零”代码运维监控](https://www.taosdata.com/blog/2020/11/11/1959.html) +* [应用案例:一些使用实例来解释如何使用TDengine](https://www.taosdata.com/cn/blog/?categories=4) diff --git a/documentation20/webdocs/markdowndocs/Evaluation-ch.md b/documentation20/cn/01.evaluation/docs.md similarity index 97% rename from documentation20/webdocs/markdowndocs/Evaluation-ch.md rename to documentation20/cn/01.evaluation/docs.md index fa6cec6e488d144a7009dc52772987380d5065da..0ae2106ff2a63696dc8bbc51d25bbf5e811ef561 100644 --- a/documentation20/webdocs/markdowndocs/Evaluation-ch.md +++ b/documentation20/cn/01.evaluation/docs.md @@ -1,6 +1,6 @@ # TDengine 介绍 -## TDengine 简介 +## TDengine 简介 TDengine是涛思数据面对高速增长的物联网大数据市场和技术挑战推出的创新性的大数据处理产品,它不依赖任何第三方软件,也不是优化或包装了一个开源的数据库或流式计算产品,而是在吸取众多传统关系型数据库、NoSQL数据库、流式计算引擎、消息队列等软件的优点之后自主开发的产品,在时序空间大数据处理上,有着自己独到的优势。 @@ -15,10 +15,11 @@ TDengine的模块之一是时序数据库。但除此之外,为减少研发的 采用TDengine,可将典型的物联网、车联网、工业互联网大数据平台的总拥有成本大幅降低。但需要指出的是,因充分利用了物联网时序数据的特点,它无法用来处理网络爬虫、微博、微信、电商、ERP、CRM等通用型数据。 -
+![TDengine技术生态图](page://images/eco_system.png)
图 1. TDengine技术生态图
-## TDengine 总体适用场景 + +## TDengine 总体适用场景 作为一个IOT大数据平台,TDengine的典型适用场景是在IOT范畴,而且用户有一定的数据量。本文后续的介绍主要针对这个范畴里面的系统。范畴之外的系统,比如CRM,ERP等,不在本文讨论范围内。 diff --git a/documentation20/webdocs/markdowndocs/Getting Started-ch.md b/documentation20/cn/02.getting-started/docs.md similarity index 91% rename from documentation20/webdocs/markdowndocs/Getting Started-ch.md rename to documentation20/cn/02.getting-started/docs.md index b8b298b9501a43396e134eb1efa9ba6e6a029b79..c9c82942f3bd47fa774636dd5cf7c5b33f879209 100644 --- a/documentation20/webdocs/markdowndocs/Getting Started-ch.md +++ b/documentation20/cn/02.getting-started/docs.md @@ -1,10 +1,10 @@ # 立即开始 -## 快捷安装 +## 快捷安装 TDengine软件分为服务器、客户端和报警模块三部分,目前2.0版服务器仅能在Linux系统上安装和运行,后续会支持Windows、mac OS等系统。客户端可以在Windows或Linux上安装和运行。任何OS的应用也可以选择RESTful接口连接服务器taosd。CPU支持X64/ARM64/MIPS64/Alpha64,后续会支持ARM32、RISC-V等CPU架构。用户可根据需求选择通过[源码](https://www.taosdata.com/cn/getting-started/#通过源码安装)或者[安装包](https://www.taosdata.com/cn/getting-started/#通过安装包安装)来安装。 -### 通过源码安装 +### 通过源码安装 请参考我们的[TDengine github主页](https://github.com/taosdata/TDengine)下载源码并安装. @@ -12,17 +12,15 @@ TDengine软件分为服务器、客户端和报警模块三部分,目前2.0版 请参考[TDengine官方Docker镜像的发布、下载和使用](https://www.taosdata.com/blog/2020/05/13/1509.html) -### 通过安装包安装 +### 通过安装包安装 TDengine的安装非常简单,从下载到安装成功仅仅只要几秒钟。服务端安装包包含客户端和连接器,我们提供三种安装包,您可以根据需要选择: -- TDengine-server-2.0.10.0-Linux-x64.rpm (4.2M) -- TDengine-server-2.0.10.0-Linux-x64.deb (2.7M) -- TDengine-server-2.0.10.0-Linux-x64.tar.gz (4.5M) +安装包下载在[这里](https://www.taosdata.com/cn/getting-started/#通过安装包安装)。 -具体的安装过程,请参见TDengine多种安装包的安装和卸载以及视频教程。 +具体的安装过程,请参见[TDengine多种安装包的安装和卸载](https://www.taosdata.com/blog/2019/08/09/566.html)以及[视频教程](https://www.taosdata.com/blog/2020/11/11/1941.html)。 -## 轻松启动 +## 轻松启动 安装成功后,用户可使用`systemctl`命令来启动TDengine的服务进程。 @@ -52,8 +50,7 @@ $ systemctl status taosd 如果系统中不支持systemd,也可以用手动运行 /usr/local/taos/bin/taosd 方式启动 TDengine 服务。 - -## TDengine命令行程序 +## TDengine命令行程序 执行TDengine命令行程序,您只要在Linux终端执行`taos`即可。 @@ -61,7 +58,7 @@ $ systemctl status taosd $ taos ``` -如果TDengine终端连接服务成功,将会打印出欢迎消息和版本信息。如果失败,则会打印错误消息出来(请参考[FAQ](https://www.taosdata.com/cn/faq/)来解决终端连接服务端失败的问题)。TDengine终端的提示符号如下: +如果TDengine终端连接服务成功,将会打印出欢迎消息和版本信息。如果失败,则会打印错误消息出来(请参考[FAQ](https://www.taosdata.com/cn/documentation/faq/)来解决终端连接服务端失败的问题)。TDengine终端的提示符号如下: ```cmd taos> @@ -117,7 +114,8 @@ taos> source ; - ctrl+c 中止正在进行中的查询 - 执行`RESET QUERY CACHE`清空本地缓存的表的schema -## TDengine 极速体验 + +## TDengine 极速体验 启动TDengine的服务,在Linux终端执行taosdemo @@ -164,7 +162,6 @@ taos> select avg(f1), max(f2), min(f3) from test.t10 interval(10s); **Note:** taosdemo命令本身带有很多选项,配置表的数目、记录条数等等,请执行 `taosdemo --help`详细列出。您可以设置不同参数进行体验。 - ## 客户端和报警模块 如果客户端和服务端运行在不同的电脑上,可以单独安装客户端。Linux和Windows安装包如下: @@ -178,8 +175,7 @@ taos> select avg(f1), max(f2), min(f3) from test.t10 interval(10s); - TDengine-alert-2.0.10.0-Linux-x64.tar.gz (8.1M) - -## 支持平台列表 +## 支持平台列表 ### TDengine服务器支持的平台列表 @@ -220,5 +216,5 @@ taos> select avg(f1), max(f2), min(f3) from test.t10 interval(10s); 注: ● 表示经过官方测试验证, ○ 表示非官方测试验证。 -请跳转到 [连接器 ](https://www.taosdata.com/cn/documentation/connector)查看更详细的信息。 +请跳转到 [连接器](https://www.taosdata.com/cn/documentation/connector)查看更详细的信息。 diff --git a/documentation20/webdocs/markdowndocs/taosd-ch.md b/documentation20/cn/03.architecture/01.taosd/docs.md similarity index 97% rename from documentation20/webdocs/markdowndocs/taosd-ch.md rename to documentation20/cn/03.architecture/01.taosd/docs.md index 08be0c163e7076b58f03ff8ea3165e902a80fe64..66d51ed2dc2ea1546ab167cad680c20b3fa9729c 100644 --- a/documentation20/webdocs/markdowndocs/taosd-ch.md +++ b/documentation20/cn/03.architecture/01.taosd/docs.md @@ -3,9 +3,10 @@ 逻辑上,TDengine系统包含dnode, taosc和App,dnode是服务器侧执行代码taosd的一个运行实例,因此taosd是TDengine的核心,本文对taosd的设计做一简单的介绍,模块内的实现细节请见其他文档。 ## 系统模块图 + taosd包含rpc, dnode, vnode, tsdb, query, cq, sync, wal, mnode, http, monitor等模块,具体如下图: -
+![modules.png](page://images/architecture/modules.png) taosd的启动入口是dnode模块,dnode然后启动其他模块,包括可选配置的http, monitor模块。taosc或dnode之间交互的消息都是通过rpc模块进行,dnode模块根据接收到的消息类型,将消息分发到vnode或mnode的消息队列,或由dnode模块自己消费。dnode的工作线程(worker)消费消息队列里的消息,交给mnode或vnode进行处理。下面对各个模块做简要说明。 @@ -40,13 +41,14 @@ RPC模块还提供数据压缩功能,如果数据包的字节数超过系统 taosd的消息消费由dnode通过读写线程池进行控制,是系统的中枢。该模块内的结构体图如下: -
+![dnode.png](page://images/architecture/dnode.png) ## VNODE模块 vnode是一独立的数据存储查询逻辑单元,但因为一个vnode只能容许一个DB,因此vnode内部没有account, DB, user等概念。为实现更好的模块化、封装以及未来的扩展,它有很多子模块,包括负责存储的TSDB,负责查询的Query, 负责数据复制的sync,负责数据库日志的的wal, 负责连续查询的cq(continuous query), 负责事件触发的流计算的event等模块,这些子模块只与vnode模块发生关系,与其他模块没有任何调用关系。模块图如下: -
+![vnode.png](page://images/architecture/vnode.png) + vnode模块向下,与dnodeVRead,dnodeVWrite发生互动,向上,与子模块发生互动。它主要的功能有: - 协调各个子模块的互动。各个子模块之间都不直接调用,都需要通过vnode模块进行; @@ -68,30 +70,37 @@ mnode是整个系统的大脑,负责整个系统的资源调度,负责meta d mnode里还负责account, user, DB, stable, table, vgroup, dnode的创建、删除与更新。mnode不仅把这些entity的meta data保存在内存,还做持久化存储。但为节省内存,各个表的标签值不保存在mnode(保存在vnode),而且子表不维护自己的schema, 而是与stable共享。为减小mnode的查询压力,taosc会缓存table、stable的schema。对于查询类的操作,各个slave mnode也可以提供,以减轻master压力。 ## TSDB模块 + TSDB模块是VNODE中的负责快速高并发地存储和读取属于该VNODE的表的元数据及采集的时序数据的引擎。除此之外,TSDB还提供了表结构的修改、表标签值的修改等功能。TSDB提供API供VNODE和Query等模块调用。TSDB中存储了两类数据,1:元数据信息;2:时序数据 ### 元数据信息 + TSDB中存储的元数据包含属于其所在的VNODE中表的类型,schema的定义等。对于超级表和超级表下的子表而言,又包含了tag的schema定义以及子表的tag值等。对于元数据信息而言,TSDB就相当于一个全内存的KV型数据库,属于该VNODE的表对象全部在内存中,方便快速查询表的信息。除此之外,TSDB还对其中的子表,按照tag的第一列取值做了全内存的索引,大大加快了对于标签的过滤查询。TSDB中的元数据的最新状态在落盘时,会以追加(append-only)的形式,写入到meta文件中。meta文件只进行追加操作,即便是元数据的删除,也会以一条记录的形式写入到文件末尾。TSDB也提供了对于元数据的修改操作,如表schema的修改,tag schema的修改以及tag值的修改等。 ### 时序数据 + 每个TSDB在创建时,都会事先分配一定量的内存缓冲区,且内存缓冲区的大小可配可修改。表采集的时序数据,在写入TSDB时,首先以追加的方式写入到分配的内存缓冲区中,同时建立基于时间戳的内存索引,方便快速查询。当内存缓冲区的数据积累到一定的程度时(达到内存缓冲区总大小的1/3),则会触发落盘操作,将缓冲区中的数据持久化到硬盘文件上。时序数据在内存缓冲区中是以行(row)的形式存储的。 而时序数据在写入到TSDB的数据文件时,是以列(column)的形式存储的。TSDB中的数据文件包含多个数据文件组,每个数据文件组中又包含.head、.data和.last三个文件,如(v2f1801.head、v2f1801.data、v2f1801.last)数据文件组。TSDB中的数据文件组是按照时间跨度进行分片的,默认是10天一个文件组,且可通过配置文件及建库选项进行配置。分片的数据文件组又按照编号递增排列,方便快速定位某一时间段的时序数据,高效定位数据文件组。时序数据在TSDB的数据文件中是以块的形式进行列式存储的,每个块中只包含一张表的数据,且数据在一个块中是按照时间顺序递增排列的。在一个数据文件组中,.head文件负责存储数据块的索引及统计信息,如每个块的位置,压缩算法,时间戳范围等。存储在.head文件中一张表的索引信息是按照数据块中存储的数据的时间递增排列的,方便进行折半查找等工作。.head和.last文件是存储真实数据块的文件,若数据块中的数据累计到一定程度,则会写入.data文件中,否则,会写入.last文件中,等待下次落盘时合并数据写入.data文件中,从而大大减少文件中块的个数,避免数据的过度碎片化。 ## Query模块 + 该模块负责整体系统的查询处理。客户端调用该该模块进行SQL语法解析,并将查询或写入请求发送到vnode,同时负责针对超级表的查询进行二阶段的聚合操作。在Vnode端,该模块调用TSDB模块读取系统中存储的数据进行查询处理。Query模块还定义了系统能够支持的全部查询函数,查询函数的实现机制与查询框架无耦合,可以在不修改查询流程的情况下动态增加查询函数。详细的设计请参见《TDengine 2.0查询模块设计》。 ## SYNC模块 + 该模块实现数据的多副本复制,包括vnode与mnode的数据复制,支持异步和同步两种复制方式,以满足meta data与时序数据不同复制的需求。因为它为mnode与vnode共享,系统为mnode副本预留了一个特殊的vgroup ID:1。因此vnode group的ID是从2开始的。 -每个vnode/mnode模块实例会有一对应的sync模块实例,他们是一一对应的。详细设计请见TDengine 2.0 数据复制模块设计 +每个vnode/mnode模块实例会有一对应的sync模块实例,他们是一一对应的。详细设计请见[TDengine 2.0 数据复制模块设计](https://www.taosdata.com/cn/documentation/architecture/replica/) ## WAL模块 + 该模块负责将新插入的数据写入write ahead log(WAL), 为vnode, mnode共享。以保证服务器crash或其他故障,能从WAL中恢复数据。 每个vnode/mnode模块实例会有一对应的wal模块实例,是完全一一对应的。WAL的落盘操作由两个参数walLevel, fsync控制。看具体场景,如果要100%保证数据不会丢失,需要将walLevel配置为2,fsync设置为0,每条数据插入请求,都会实时落盘后,才会给应用确认 ## HTTP模块 + 该模块负责处理系统对外的RESTful接口,可以通过配置,由dnode启动或停止。 该模块将接收到的RESTful请求,做了各种合法性检查后,将其变成标准的SQL语句,通过taosc的异步接口,将请求发往整个系统中的任一dnode。收到处理后的结果后,再翻译成HTTP协议,返回给应用。 @@ -99,6 +108,7 @@ TSDB中存储的元数据包含属于其所在的VNODE中表的类型,schema 如果HTTP模块启动,就意味着启动了一个taosc的实例。任一一个dnode都可以启动该模块,以实现对RESTful请求的分布式处理。 ## Monitor模块 + 该模块负责检测一个dnode的运行状态,可以通过配置,由dnode启动或停止。原则上,每个dnode都应该启动一个monitor实例。 Monitor采集TDengine里的关键操作,比如创建、删除、更新账号、表、库等,而且周期性的收集CPU、内存、网络等资源的使用情况(采集周期由系统配置参数monitorInterval控制)。获得这些数据后,monitor模块将采集的数据写入系统的日志库(DB名字由系统配置参数monitorDbName控制)。 diff --git a/documentation20/webdocs/markdowndocs/replica-ch.md b/documentation20/cn/03.architecture/02.replica/docs.md similarity index 95% rename from documentation20/webdocs/markdowndocs/replica-ch.md rename to documentation20/cn/03.architecture/02.replica/docs.md index 4d714fb5502534521e47d761e24af67b31248f51..8e1b1e3ab1513fbeaa5b9b805263485a13483b9b 100644 --- a/documentation20/webdocs/markdowndocs/replica-ch.md +++ b/documentation20/cn/03.architecture/02.replica/docs.md @@ -10,13 +10,13 @@ TDengine面向的是物联网场景,需要支持数据的实时复制,来最 数据复制是与数据存储(写入、读取)密切相关的,但两者又是相对独立,可以完全脱耦的。在TDengine系统中,有两种不同类型的数据,一种是时序数据,由TSDB模块负责;一种是元数据(Meta Data), 由MNODE负责。这两种性质不同的数据都需要同步功能。数据复制模块通过不同的实例启动配置参数,为这两种类型数据都提供同步功能。 -在阅读本文之前,请先阅读《TDengine 2.0 整体架构》,了解TDengine的集群设计和基本概念 +在阅读本文之前,请先阅读《[TDengine 2.0 整体架构](https://www.taosdata.com/cn/documentation/architecture/)》,了解TDengine的集群设计和基本概念 特别注明:本文中提到数据更新操作包括数据的增加、删除与修改。 ## 基本概念和定义 -TDengine里存在vnode, mnode, vnode用来存储时序数据,mnode用来存储元数据。但从同步数据复制的模块来看,两者没有本质的区别,因此本文里的虚拟节点不仅包括vnode, 也包括mnode, vgoup也指mnode group, 除非特别注明。 +TDengine里存在vnode, mnode, vnode用来存储时序数据,mnode用来存储元数据。但从同步数据复制的模块来看,两者没有本质的区别,因此本文里的虚拟节点不仅包括vnode, 也包括mnode, vgroup也指mnode group, 除非特别注明。 **版本(version)**: @@ -90,7 +90,7 @@ TDengine采取的是Master-Slave模式进行同步,与流行的RAFT一致性 具体的流程图如下: -
+![replica-master.png](page://images/architecture/replica-master.png) 选择Master的具体规则如下: @@ -105,7 +105,7 @@ TDengine采取的是Master-Slave模式进行同步,与流行的RAFT一致性 如果vnode A是master, vnode B是slave, vnode A能接受客户端的写请求,而vnode B不能。当vnode A收到写的请求后,遵循下面的流程: -
+![replica-forward.png](page://images/architecture/replica-forward.png) 1. 应用对写请求做基本的合法性检查,通过,则给改请求包打上一个版本号(version, 单调递增) 2. 应用将打上版本号的写请求封装一个WAL Head, 写入WAL(Write Ahead Log) @@ -128,19 +128,19 @@ TDengine采取的是Master-Slave模式进行同步,与流行的RAFT一致性 2. 任何一个数据文件(file)有名字、大小,还有一个magic number。只有文件名、大小与magic number一致时,两个文件才判断是一样的,无需同步。Magic number可以是checksum, 也可以是简单的文件大小。怎么计算magic,换句话说,如何检测数据文件是否有效,完全由应用决定。 3. 文件名的处理有点复杂,因为每台服务器的路径可能不一致。比如node A的TDengine的数据文件存放在 /etc/taos目录下,而node B的数据存放在 /home/jhtao目录下。因此同步模块需要应用在启动一个同步实例时提供一个path,这样两台服务器的绝对路径可以不一样,但仍然可以做对比,做同步。 4. 当sync模块调用回调函数getFileInfo获得数据文件信息时,有如下的规则 - 1. index 为0,表示获取最老的文件,同时修改index返回给sync模块。如果index不为0,表示获取指定位置的文件。 - 2. 如果name为空,表示sync想获取位于index位置的文件信息,包括magic, size。Master节点会这么调用 - 3. 如果name不为空,表示sync想获取指定文件名和index的信息,slave节点会这么调用 - 4. 如果某个index的文件不存在,magic返回0,表示文件已经是最后一个。因此整个系统里,文件的index必须是连续的一段整数。 + * index 为0,表示获取最老的文件,同时修改index返回给sync模块。如果index不为0,表示获取指定位置的文件。 + * 如果name为空,表示sync想获取位于index位置的文件信息,包括magic, size。Master节点会这么调用 + * 如果name不为空,表示sync想获取指定文件名和index的信息,slave节点会这么调用 + * 如果某个index的文件不存在,magic返回0,表示文件已经是最后一个。因此整个系统里,文件的index必须是连续的一段整数。 5. 当sync模块调用回调函数getWalInfo获得wal信息时,有如下规则 - 1. index为0,表示获得最老的WAL文件, 返回时,index更新为具体的数字 - 2. 如果返回0,表示这是最新的一个WAL文件,如果返回值是1,表示后面还有更新的WAL文件 - 3. 返回的文件名为空,那表示没有WAL文件 + * index为0,表示获得最老的WAL文件, 返回时,index更新为具体的数字 + * 如果返回0,表示这是最新的一个WAL文件,如果返回值是1,表示后面还有更新的WAL文件 + * 返回的文件名为空,那表示没有WAL文件 6. 无论是getFileInfo, 还是getWalInfo, 只要获取出错(不是文件不存在),返回-1即可,系统会报错,停止同步 整个数据恢复流程分为两大步骤,第一步,先恢复archived data(file), 然后恢复wal。具体流程如下: -
+![replica-forward.png](page://images/architecture/replica-forward.png) 1. 通过已经建立的TCP连接,发送sync req给master节点 2. master收到sync req后,以client的身份,向vnode B主动建立一新的专用于同步的TCP连接(syncFd) diff --git a/documentation20/webdocs/markdowndocs/architecture-ch.md b/documentation20/cn/03.architecture/docs.md similarity index 95% rename from documentation20/webdocs/markdowndocs/architecture-ch.md rename to documentation20/cn/03.architecture/docs.md index 8921633c8d31f0fc2c2ab3f00d36e8e57d5dbb9a..6f9f1699d4e9bab944388e91aff3b89b1421563a 100644 --- a/documentation20/webdocs/markdowndocs/architecture-ch.md +++ b/documentation20/cn/03.architecture/docs.md @@ -1,7 +1,9 @@ # 数据模型和整体架构 -## 数据模型 +## 数据模型 + ### 物联网典型场景 + 在典型的物联网、车联网、运维监测场景中,往往有多种不同类型的数据采集设备,采集一个到多个不同的物理量。而同一种采集设备类型,往往又有多个具体的采集设备分布在不同的地点。大数据处理系统就是要将各种采集的数据汇总,然后进行计算和分析。对于同一类设备,其采集的数据都是很规则的。以智能电表为例,假设每个智能电表采集电流、电压、相位三个量,其采集的数据类似如下的表格:
@@ -103,6 +105,7 @@ 每一条记录都有设备ID,时间戳,采集的物理量(如上图中的电流、电压、相位),还有与每个设备相关的静态标签(如上述表一中的位置Location和分组groupId)。每个设备是受外界的触发,或按照设定的周期采集数据。采集的数据点是时序的,是一个数据流。 ### 数据特征 + 除时序特征外,仔细研究发现,物联网、车联网、运维监测类数据还具有很多其他明显的特征: 1. 数据高度结构化; @@ -119,9 +122,11 @@ 充分利用上述特征,TDengine 采取了经特殊优化的存储和计算设计来处理时序数据,它将系统处理能力显著提高,同时大幅降低了系统运维的复杂度。 ### 关系型数据库模型 + 因为采集的数据一般是结构化数据,同时为降低学习门槛,TDengine采用传统的关系型数据库模型管理数据。因此用户需要先创建库,然后创建表,之后才能插入或查询数据。TDengine采用的是结构化存储,而不是NoSQL的key-value存储。 ### 一个数据采集点一张表 + 为充分利用其数据的时序性和其他数据特点,TDengine要求**对每个数据采集点单独建表**(比如有一千万个智能电表,就需创建一千万张表,上述表格中的d1001, d1002, d1003, d1004都需单独建表),用来存储这个采集点所采集的时序数据。这种设计有几大优点: 1. 能保证一个采集点的数据在存储介质上是以块为单位连续存储的。如果读取一个时间段的数据,它能大幅减少随机读取操作,成数量级的提升读取和查询速度。 @@ -133,6 +138,7 @@ TDengine 建议用数据采集点的名字(如上表中的D1001)来做表名。每个数据采集点可能同时采集多个物理量(如上表中的curent, voltage, phase),每个物理量对应一张表中的一列,数据类型可以是整型、浮点型、字符串等。除此之外,表的第一列必须是时间戳,即数据类型为 timestamp。对采集的数据,TDengine将自动按照时间戳建立索引,但对采集的物理量不建任何索引。数据用列式存储方式保存。 ### 超级表:同一类型数据采集点的集合 + 由于一个数据采集点一张表,导致表的数量巨增,难以管理,而且应用经常需要做采集点之间的聚合操作,聚合的操作也变得复杂起来。为解决这个问题,TDengine引入超级表(Super Table,简称为STable)的概念。 超级表是指某一特定类型的数据采集点的集合。同一类型的数据采集点,其表的结构是完全一样的,但每个表(数据采集点)的静态属性(标签)是不一样的。描述一个超级表(某一特定类型的数据采集点的结合),除需要定义采集量的表结构之外,还需要定义其标签的schema,标签的数据类型可以是整数、浮点数、字符串,标签可以有多个,可以事后增加、删除或修改。 如果整个系统有N个不同类型的数据采集点,就需要建立N个超级表。 @@ -141,16 +147,20 @@ TDengine 建议用数据采集点的名字(如上表中的D1001)来做表名。 当对多个具有相同数据类型的数据采集点进行聚合操作时,TDengine将先把满足标签过滤条件的表从超级表的中查找出来,然后再扫描这些表的时序数据,进行聚合操作,这样能将需要扫描的数据集大幅减少,从而大幅提高聚合计算的性能。 -## 集群与基本逻辑单元 +## 集群与基本逻辑单元 + TDengine 的设计是基于单个硬件、软件系统不可靠,基于任何单台计算机都无法提供足够计算能力和存储能力处理海量数据的假设进行设计的。因此 TDengine 从研发的第一天起,就按照分布式高可靠架构进行设计,是支持水平扩展的,这样任何单台或多台服务器发生硬件故障或软件错误都不影响系统的可用性和可靠性。同时,通过节点虚拟化并辅以自动化负载均衡技术,TDengine 能最高效率地利用异构集群中的计算和存储资源降低硬件投资。 ### 主要逻辑单元 + TDengine 分布式架构的逻辑结构图如下: -
+ +![TDengine架构示意图](page://images/architecture/structure.png)
图 1 TDengine架构示意图
+ 一个完整的 TDengine 系统是运行在一到多个物理节点上的,逻辑上,它包含数据节点(dnode)、TDengine应用驱动(taosc)以及应用(app)。系统中存在一到多个数据节点,这些数据节点组成一个集群(cluster)。应用通过taosc的API与TDengine集群进行互动。下面对每个逻辑单元进行简要介绍。 -**物理节点(pnode):** pnode是一独立运行、拥有自己的计算、存储和网络能力的计算机,可以是安装有OS的物理机、虚拟机或Docker容器。物理节点由其配置的 FQDN(Fully Qualified Domain Name)来标识。TDengine完全依赖FQDN来进行网络通讯,如果不了解FQDN,请看博文《一篇文章说清楚TDengine的FQDN》。 +**物理节点(pnode):** pnode是一独立运行、拥有自己的计算、存储和网络能力的计算机,可以是安装有OS的物理机、虚拟机或Docker容器。物理节点由其配置的 FQDN(Fully Qualified Domain Name)来标识。TDengine完全依赖FQDN来进行网络通讯,如果不了解FQDN,请看博文[《一篇文章说清楚TDengine的FQDN》](https://www.taosdata.com/blog/2020/09/11/1824.html)。 **数据节点(dnode):** dnode 是 TDengine 服务器侧执行代码 taosd 在物理节点上的一个运行实例,一个工作的系统必须有至少一个数据节点。dnode包含零到多个逻辑的虚拟节点(VNODE),零或者至多一个逻辑的管理节点(mnode)。dnode在系统中的唯一标识由实例的End Point (EP )决定。EP是dnode所在物理节点的FQDN (Fully Qualified Domain Name)和系统所配置的网络端口号(Port)的组合。通过配置不同的端口,一个物理节点(一台物理机、虚拟机或容器)可以运行多个实例,或有多个数据节点。 @@ -163,6 +173,7 @@ TDengine 分布式架构的逻辑结构图如下: **TAOSC:** taosc是TDengine给应用提供的驱动程序(driver),负责处理应用与集群的接口交互,提供C/C++语言原生接口,内嵌于JDBC、C#、Python、Go、Node.js语言连接库里。应用都是通过taosc而不是直接连接集群中的数据节点与整个集群进行交互的。这个模块负责获取并缓存元数据;将插入、查询等请求转发到正确的数据节点;在把结果返回给应用时,还需要负责最后一级的聚合、排序、过滤等操作。对于JDBC, C/C++/C#/Python/Go/Node.js接口而言,这个模块是在应用所处的物理节点上运行。同时,为支持全分布式的RESTful接口,taosc在TDengine集群的每个dnode上都有一运行实例。 ### 节点之间的通讯 + **通讯方式:**TDengine系统的各个数据节点之间,以及应用驱动与各数据节点之间的通讯是通过TCP/UDP进行的。因为考虑到物联网场景,数据写入的包一般不大,因此TDengine 除采用TCP做传输之外,还采用UDP方式,因为UDP 更加高效,而且不受连接数的限制。TDengine实现了自己的超时、重传、确认等机制,以确保UDP的可靠传输。对于数据量不到15K的数据包,采取UDP的方式进行传输,超过15K的,或者是查询类的操作,自动采取TCP的方式进行传输。同时,TDengine根据配置和数据包,会自动对数据进行压缩/解压缩,数字签名/认证等处理。对于数据节点之间的数据复制,只采用TCP方式进行数据传输。 **FQDN配置**:一个数据节点有一个或多个FQDN,可以在系统配置文件taos.cfg通过参数“fqdn"进行指定,如果没有指定,系统将自动获取计算机的hostname作为其FQDN。如果节点没有配置FQDN,可以直接将该节点的配置参数fqdn设置为它的IP地址。但不建议使用IP,因为IP地址可变,一旦变化,将让集群无法正常工作。一个数据节点的EP(End Point)由FQDN + Port组成。采用FQDN,需要保证DNS服务正常工作,或者在节点以及应用所在的节点配置好hosts文件。 @@ -180,9 +191,12 @@ TDengine 分布式架构的逻辑结构图如下: **重定向**:无论是dnode还是taosc,最先都是要发起与mnode的连接,但mnode是系统自动创建并维护的,因此对于用户来说,并不知道哪个dnode在运行mnode。TDengine只要求向系统中任何一个工作的dnode发起连接即可。因为任何一个正在运行的dnode,都维护有目前运行的mnode EP List。当收到一个来自新启动的dnode或taosc的连接请求,如果自己不是mnode,则将mnode EP List回复给对方,taosc或新启动的dnode收到这个list, 就重新尝试建立连接。当mnode EP List发生改变,通过节点之间的消息交互,各个数据节点就很快获取最新列表,并通知taosc。 ### 一个典型的消息流程 + 为解释vnode, mnode, taosc和应用之间的关系以及各自扮演的角色,下面对写入数据这个典型操作的流程进行剖析。 -
+ +![TDengine典型的操作流程](page://images/architecture/message.png)
图 2 TDengine典型的操作流程
+ 1. 应用通过JDBC、ODBC或其他API接口发起插入数据的请求。 2. taosc会检查缓存,看是否保存有该表的meta data。如果有,直接到第4步。如果没有,taosc将向mnode发出get meta-data请求。 3. mnode将该表的meta-data返回给taosc。Meta-data包含有该表的schema, 而且还有该表所属的vgroup信息(vnode ID以及所在的dnode的End Point,如果副本数为N,就有N组End Point)。如果taosc迟迟得不到mnode回应,而且存在多个mnode, taosc将向下一个mnode发出请求。 @@ -198,14 +212,15 @@ TDengine 分布式架构的逻辑结构图如下: 通过taosc缓存机制,只有在第一次对一张表操作时,才需要访问mnode,因此mnode不会成为系统瓶颈。但因为schema有可能变化,而且vgroup有可能发生改变(比如负载均衡发生),因此taosc会定时和mnode交互,自动更新缓存。 -## 存储模型与数据分区、分片 +## 存储模型与数据分区、分片 ### 存储模型 + TDengine存储的数据包括采集的时序数据以及库、表相关的元数据、标签数据等,这些数据具体分为三部分: -- 时序数据:存放于vnode里,由data、head和last三个文件组成,数据量大,查询量取决于应用场景。容许乱序写入,但暂时不支持删除和更新操作。通过采用一个采集点一张表的模型,一个时间段的数据是连续存储,对单张表的写入是简单的追加操作,一次读,可以读到多条记录,这样保证对单个采集点的插入和查询操作,性能达到最优。 +- 时序数据:存放于vnode里,由data、head和last三个文件组成,数据量大,查询量取决于应用场景。容许乱序写入,但暂时不支持删除操作,并且仅在update参数设置为1时允许更新操作。通过采用一个采集点一张表的模型,一个时间段的数据是连续存储,对单张表的写入是简单的追加操作,一次读,可以读到多条记录,这样保证对单个采集点的插入和查询操作,性能达到最优。 - 标签数据:存放于vnode里的meta文件,支持增删改查四个标准操作。数据量不大,有N张表,就有N条记录,因此可以全内存存储。如果标签过滤操作很多,查询将十分频繁,因此TDengine支持多核多线程并发查询。只要计算资源足够,即使有数千万张表,过滤结果能毫秒级返回。 -- 其他元数据:存放于mnode里,包含系统节点、用户、DB、Table Schema等等,支持增删改查四个标准操作。这部分数据的量不大,可以全内存保存,而且由于客户端有缓存,查询量也不大。因此目前的设计虽是集中式存储管理,但不会构成性能瓶颈。 +- 元数据:存放于mnode里,包含系统节点、用户、DB、Table Schema等信息,支持增删改查四个标准操作。这部分数据的量不大,可以全内存保存,而且由于客户端有缓存,查询量也不大。因此目前的设计虽是集中式存储管理,但不会构成性能瓶颈。 与典型的NoSQL存储模型相比,TDengine将标签数据与时序数据完全分离存储,它具有两大优势: @@ -213,6 +228,7 @@ TDengine存储的数据包括采集的时序数据以及库、表相关的元数 - 能够实现极为高效的多表之间的聚合查询:做多表之间聚合查询时,先把符合标签过滤条件的表查找出来,然后再查找这些表相应的数据块,这样大幅减少要扫描的数据集,从而大幅提高查询效率。而且标签数据采用全内存的结构进行管理和维护,千万级别规模的标签数据查询可以在毫秒级别返回。 ### 数据分片 + 对于海量的数据管理,为实现水平扩展,一般都需要采取分片(Sharding)分区(Partitioning)策略。TDengine是通过vnode来实现数据分片的,通过一个时间段一个数据文件来实现时序数据分区的。 vnode(虚拟数据节点)负责为采集的时序数据提供写入、查询和计算功能。为便于负载均衡、数据恢复、支持异构环境,TDengine将一个数据节点根据其计算和存储资源切分为多个vnode。这些vnode的管理是TDengine自动完成的,对应用完全透明。 @@ -224,11 +240,13 @@ vnode(虚拟数据节点)负责为采集的时序数据提供写入、查询和 每张表的meda data(包含schema, 标签等)也存放于vnode里,而不是集中存放于mnode,实际上这是对Meta数据的分片,这样便于高效并行的进行标签过滤操作。 ### 数据分区 + TDengine除vnode分片之外,还对时序数据按照时间段进行分区。每个数据文件只包含一个时间段的时序数据,时间段的长度由DB的配置参数days决定。这种按时间段分区的方法还便于高效实现数据的保留策略,只要数据文件超过规定的天数(系统配置参数keep),将被自动删除。而且不同的时间段可以存放于不同的路径和存储介质,以便于大数据的冷热管理,实现多级存储。 总的来说,**TDengine是通过vnode以及时间两个维度,对大数据进行切分**,便于并行高效的管理,实现水平扩展。 ### 负载均衡 + 每个dnode都定时向 mnode(虚拟管理节点)报告其状态(包括硬盘空间、内存大小、CPU、网络、虚拟节点个数等),因此mnode了解整个集群的状态。基于整体状态,当mnode发现某个dnode负载过重,它会将dnode上的一个或多个vnode挪到其他dnode。在挪动过程中,对外服务继续进行,数据插入、查询和计算操作都不受影响。 如果mnode一段时间没有收到dnode的状态报告,mnode会认为这个dnode已经离线。如果离线时间超过一定时长(时长由配置参数offlineThreshold决定),该dnode将被mnode强制剔除出集群。该dnode上的vnodes如果副本数大于一,系统将自动在其他dnode上创建新的副本,以保证数据的副本数。如果该dnode上还有mnode, 而且mnode的副本数大于一,系统也将自动在其他dnode上创建新的mnode, 以保证mnode的副本数。 @@ -238,13 +256,17 @@ TDengine除vnode分片之外,还对时序数据按照时间段进行分区。 负载均衡过程无需任何人工干预,应用也无需重启,将自动连接新的节点,完全透明。 **提示:负载均衡由参数balance控制,决定开启/关闭自动负载均衡。** -## 数据写入与复制流程 +## 数据写入与复制流程 + 如果一个数据库有N个副本,那一个虚拟节点组就有N个虚拟节点,但是只有一个是Master,其他都是slave。当应用将新的记录写入系统时,只有Master vnode能接受写的请求。如果slave vnode收到写的请求,系统将通知taosc需要重新定向。 + ### Master vnode写入流程 + Master Vnode遵循下面的写入流程: -
+![TDengine Master写入流程](page://images/architecture/write_master.png)
图 3 TDengine Master写入流程
+ 1. Master vnode收到应用的数据插入请求,验证OK,进入下一步; 2. 如果系统配置参数walLevel大于0,vnode将把该请求的原始数据包写入数据库日志文件WAL。如果walLevel设置为2,而且fsync设置为0,TDengine还将WAL数据立即落盘,以保证即使宕机,也能从数据库日志文件中恢复数据,避免数据的丢失; 3. 如果有多个副本,vnode将把数据包转发给同一虚拟节点组内slave vnodes, 该转发包带有数据的版本号(version); @@ -253,10 +275,12 @@ Master Vnode遵循下面的写入流程: 6. 如果第2,3,4步中任何一步失败,将直接返回错误给应用。 ### Slave vnode写入流程 + 对于slave vnode, 写入流程是: -
+![TDengine Slave写入流程](page://images/architecture/write_slave.png)
图 4 TDengine Slave写入流程
+ 1. Slave vnode收到Master vnode转发了的数据插入请求。 2. 如果系统配置参数walLevel大于0,vnode将把该请求的原始数据包写入数据库日志文件WAL。如果walLevel设置为2,而且fsync设置为0,TDengine还将WAL数据立即落盘,以保证即使宕机,也能从数据库日志文件中恢复数据,避免数据的丢失; 3. 写入内存,更新内存中的skip list。 @@ -264,6 +288,7 @@ Master Vnode遵循下面的写入流程: 与Master vnode相比,slave vnode不存在转发环节,也不存在回复确认环节,少了两步。但写内存与WAL是完全一样的。 ### 异地容灾、IDC迁移 + 从上述Master和Slave流程可以看出,TDengine采用的是异步复制的方式进行数据同步。这种方式能够大幅提高写入性能,网络延时对写入速度不会有大的影响。通过配置每个物理节点的IDC和机架号,可以保证对于一个虚拟节点组,虚拟节点由来自不同IDC、不同机架的物理节点组成,从而实现异地容灾。因此TDengine原生支持异地容灾,无需再使用其他工具。 另外一方面,TDengine支持动态修改副本数,一旦副本数增加,新加入的虚拟节点将立即进入数据同步流程,同步结束后,新加入的虚拟节点即可提供服务。而在同步过程中,master以及其他已经同步的虚拟节点都可以对外提供服务。利用这一特性,TDengine可以实现无服务中断的IDC机房迁移。只需要将新IDC的物理节点加入现有集群,等数据同步完成后,再将老的IDC的物理节点从集群中剔除即可。 @@ -280,6 +305,7 @@ Master Vnode遵循下面的写入流程: **提示:该功能暂未提供** ### 主从选择 + Vnode会保持一个数据版本号(Version),对内存数据进行持久化存储时,对该版本号也进行持久化存储。每个数据更新操作,无论是采集的时序数据还是元数据,这个版本号将增一。 一个vnode启动时,角色(master、slave) 是不定的,数据是处于未同步状态,它需要与虚拟节点组内其他节点建立TCP连接,并互相交换status,其中包括version和自己的角色。通过status的交换,系统进入选主流程,规则如下: @@ -289,17 +315,20 @@ Vnode会保持一个数据版本号(Version),对内存数据进行持久化存 3. 在线的虚拟节点数过半,而且有虚拟节点是slave的话,该虚拟节点自动成为master 4. 对于2和3,如果多个虚拟节点满足成为master的要求,那么虚拟节点组的节点列表里,最前面的选为master -更多的关于数据复制的流程,请见TDengine 2.0数据复制模块设计。 +更多的关于数据复制的流程,请见[TDengine 2.0数据复制模块设计](https://www.taosdata.com/cn/documentation/architecture/replica/)。 ### 同步复制 + 对于数据一致性要求更高的场景,异步数据复制无法满足要求,因为有极小的概率丢失数据,因此TDengine提供同步复制的机制供用户选择。在创建数据库时,除指定副本数replica之外,用户还需要指定新的参数quorum。如果quorum大于一,它表示每次Master转发给副本时,需要等待quorum-1个回复确认,才能通知应用,数据在slave已经写入成功。如果在一定的时间内,得不到quorum-1个回复确认,master vnode将返回错误给应用。 采用同步复制,系统的性能会有所下降,而且latency会增加。因为元数据要强一致,mnode之间的数据同步缺省就是采用的同步复制。 注:vnode之间的同步复制仅仅企业版支持 -## 缓存与持久化 +## 缓存与持久化 + ### 缓存 + TDengine采用时间驱动缓存管理策略(First-In-First-Out,FIFO),又称为写驱动的缓存管理机制。这种策略有别于读驱动的数据缓存模式(Least-Recent-Used,LRU),直接将最近写入的数据保存在系统的缓存中。当缓存达到临界值的时候,将最早的数据批量写入磁盘。一般意义上来说,对于物联网数据的使用,用户最为关心的是刚产生的数据,即当前状态。TDengine充分利用这一特性,将最近到达的(当前状态)数据保存在缓存中。 TDengine通过查询函数向用户提供毫秒级的数据获取能力。直接将最近到达的数据保存在缓存中,可以更加快速地响应用户针对最近一条或一批数据的查询分析,整体上提供更快的数据库查询响应能力。从这个意义上来说,**可通过设置合适的配置参数将TDengine作为数据缓存来使用,而不需要再部署Redis或其他额外的缓存系统**,可有效地简化系统架构,降低运维的成本。需要注意的是,TDengine重启以后系统的缓存将被清空,之前缓存的数据均会被批量写入磁盘,缓存的数据将不会像专门的Key-value缓存系统再将之前缓存的数据重新加载到缓存中。 @@ -307,6 +336,7 @@ TDengine通过查询函数向用户提供毫秒级的数据获取能力。直接 每个vnode有自己独立的内存,而且由多个固定大小的内存块组成,不同vnode之间完全隔离。数据写入时,类似于日志的写法,数据被顺序追加写入内存,但每个vnode维护有自己的skip list,便于迅速查找。当三分之一以上的内存块写满时,启动落盘操作,而且后续写的操作在新的内存块进行。这样,一个vnode里有三分之一内存块是保留有最近的数据的,以达到缓存、快速查找的目的。一个vnode的内存块的个数由配置参数blocks决定,内存块的大小由配置参数cache决定。 ### 持久化存储 + TDengine采用数据驱动的方式让缓存中的数据写入硬盘进行持久化存储。当vnode中缓存的数据达到一定规模时,为了不阻塞后续数据的写入,TDengine也会拉起落盘线程将缓存的数据写入持久化存储。TDengine在数据落盘时会打开新的数据库日志文件,在落盘成功后则会删除老的数据库日志文件,避免日志文件无限制的增长。 为充分利用时序数据特点,TDengine将一个vnode保存在持久化存储的数据切分成多个文件,每个文件只保存固定天数的数据,这个天数由系统配置参数days决定。切分成多个文件后,给定查询的起止日期,无需任何索引,就可以立即定位需要打开哪些数据文件,大大加快读取速度。 @@ -322,6 +352,7 @@ TDengine采用数据驱动的方式让缓存中的数据写入硬盘进行持久 数据写入磁盘时,根据系统配置参数comp决定是否压缩数据。TDengine提供了三种压缩选项:无压缩、一阶段压缩和两阶段压缩,分别对应comp值为0、1和2的情况。一阶段压缩根据数据的类型进行了相应的压缩,压缩算法包括delta-delta编码、simple 8B方法、zig-zag编码、LZ4等算法。二阶段压缩在一阶段压缩的基础上又用通用压缩算法进行了压缩,压缩率更高。 ### 多级存储 + 在默认配置下,TDengine会将所有数据保存在/var/lib/taos目录下,而且每个vnode的数据文件保存在该目录下的不同目录。为扩大存储空间,尽量减少文件读取的瓶颈,提高数据吞吐率 TDengine可通过配置系统参数dataDir让多个挂载的硬盘被系统同时使用。除此之外,TDengine也提供了数据分级存储的功能,即根据数据文件的新老程度存储在不同的存储介质上。比如最新的数据存储在SSD上,超过一周的数据存储在本地硬盘上,超过4周的数据存储在网络存储设备上,这样来降低存储成本,而又保证高效的访问数据。数据在不同存储介质上的移动是由系统自动完成的,对应用是完全透明的。数据的分级存储也是通过系统参数dataDir来配置。 dataDir的配置格式如下: @@ -345,10 +376,12 @@ dataDir /mnt/disk6/taos 2 注:多级存储功能仅企业版支持 -## 数据查询 +## 数据查询 + TDengine提供了多种多样针对表和超级表的查询处理功能,除了常规的聚合查询之外,还提供针对时序数据的窗口查询、统计聚合等功能。TDengine的查询处理需要客户端、vnode, mnode节点协同完成。 ### 单表查询 + SQL语句的解析和校验工作在客户端完成。解析SQL语句并生成抽象语法树(Abstract Syntax Tree, AST),然后对其进行校验和检查。以及向管理节点(mnode)请求查询中指定表的元数据信息(table metadata)。 根据元数据信息中的End Point信息,将查询请求序列化后发送到该表所在的数据节点(dnode)。dnode接收到查询请求后,识别出该查询请求指向的虚拟节点(vnode),将消息转发到vnode的查询执行队列。vnode的查询执行线程建立基础的查询执行环境,并立即返回该查询请求,同时开始执行该查询。 @@ -374,18 +407,21 @@ select count(*) from d1001 interval(1h) fill(prev); 针对d1001设备采集数据统计每小时记录数,如果某一个小时不存在数据,则返回之前一个小时的统计数据。TDengine提供前向插值(prev)、线性插值(linear)、NULL值填充(NULL)、特定值填充(value)。 ### 多表聚合查询 + TDengine对每个数据采集点单独建表,但在实际应用中经常需要对不同的采集点数据进行聚合。为高效的进行聚合操作,TDengine引入超级表(STable)的概念。超级表用来代表一特定类型的数据采集点,它是包含多张表的表集合,集合里每张表的模式(schema)完全一致,但每张表都带有自己的静态标签,标签可以多个,可以随时增加、删除和修改。 应用可通过指定标签的过滤条件,对一个STable下的全部或部分表进行聚合或统计操作,这样大大简化应用的开发。其具体流程如下图所示: -
+![多表聚合查询原理图](page://images/architecture/multi_tables.png)
图 5 多表聚合查询原理图
-1:应用将一个查询条件发往系统; -2: taosc将超级表的名字发往 Meta Node(管理节点); -3:管理节点将超级表所拥有的 vnode 列表发回 taosc; -4:taosc将计算的请求连同标签过滤条件发往这些vnode对应的多个数据节点; -5:每个vnode先在内存里查找出自己节点里符合标签过滤条件的表的集合,然后扫描存储的时序数据,完成相应的聚合计算,将结果返回给taosc; -6:taosc将多个数据节点返回的结果做最后的聚合,将其返回给应用。 + +1. 应用将一个查询条件发往系统; +2. taosc将超级表的名字发往 Meta Node(管理节点); +3. 管理节点将超级表所拥有的 vnode 列表发回 taosc; +4. taosc将计算的请求连同标签过滤条件发往这些vnode对应的多个数据节点; +5. 每个vnode先在内存里查找出自己节点里符合标签过滤条件的表的集合,然后扫描存储的时序数据,完成相应的聚合计算,将结果返回给taosc; +6. taosc将多个数据节点返回的结果做最后的聚合,将其返回给应用。 由于TDengine在vnode内将标签数据与时序数据分离存储,通过在内存里过滤标签数据,先找到需要参与聚合操作的表的集合,将需要扫描的数据集大幅减少,大幅提升聚合计算速度。同时,由于数据分布在多个vnode/dnode,聚合计算操作在多个vnode里并发进行,又进一步提升了聚合的速度。 对普通表的聚合函数以及绝大部分操作都适用于超级表,语法完全一样,细节请看 TAOS SQL。 ### 预计算 + 为有效提升查询处理的性能,针对物联网数据的不可更改的特点,在数据块头部记录该数据块中存储数据的统计信息:包括最大值、最小值、和。我们称之为预计算单元。如果查询处理涉及整个数据块的全部数据,直接使用预计算结果,完全不需要读取数据块的内容。由于预计算数据量远小于磁盘上存储的数据块数据的大小,对于磁盘IO为瓶颈的查询处理,使用预计算结果可以极大地减小读取IO压力,加速查询处理的流程。预计算机制与Postgre SQL的索引BRIN(block range index)有异曲同工之妙。 diff --git a/documentation20/webdocs/markdowndocs/Model-ch.md b/documentation20/cn/04.model/docs.md similarity index 82% rename from documentation20/webdocs/markdowndocs/Model-ch.md rename to documentation20/cn/04.model/docs.md index ea1be899a85fe6bb31ab03674ab496d7b301432f..1a25e4407d0ed77c71040f676656fdc1451e2f81 100644 --- a/documentation20/webdocs/markdowndocs/Model-ch.md +++ b/documentation20/cn/04.model/docs.md @@ -4,16 +4,16 @@ TDengine采用关系型数据模型,需要建库、建表。因此对于一个具体的应用场景,需要考虑库的设计,超级表和普通表的设计。本节不讨论细致的语法规则,只介绍概念。 -关于数据建模请参考视频教程。 +关于数据建模请参考[视频教程](https://www.taosdata.com/blog/2020/11/11/1945.html)。 -## 创建库 +## 创建库 不同类型的数据采集点往往具有不同的数据特征,包括数据采集频率的高低,数据保留时间的长短,副本的数目,数据块的大小,是否允许更新数据等等。为让各种场景下TDengine都能最大效率的工作,TDengine建议将不同数据特征的表创建在不同的库里,因为每个库可以配置不同的存储策略。创建一个库时,除SQL标准的选项外,应用还可以指定保留时长、副本数、内存块个数、时间精度、文件块里最大最小记录条数、是否压缩、一个数据文件覆盖的天数等多种参数。比如: ```mysql CREATE DATABASE power KEEP 365 DAYS 10 BLOCKS 4 UPDATE 1; ``` -上述语句将创建一个名为power的库,这个库的数据将保留365天(超过365天将被自动删除),每10天一个数据文件,内存块数为4,允许更新数据。详细的语法及参数请见TAOS SQL +上述语句将创建一个名为power的库,这个库的数据将保留365天(超过365天将被自动删除),每10天一个数据文件,内存块数为4,允许更新数据。详细的语法及参数请见 [TAOS SQL 的数据管理](https://www.taosdata.com/cn/documentation/taos-sql#management) 章节。 创建库之后,需要使用SQL命令USE将当前库切换过来,例如: @@ -28,23 +28,25 @@ USE power; - 任何一张表或超级表是属于一个库的,在创建表之前,必须先创建库。 - 处于两个不同库的表是不能进行JOIN操作的。 -## 创建超级表 +## 创建超级表 + 一个物联网系统,往往存在多种类型的设备,比如对于电网,存在智能电表、变压器、母线、开关等等。为便于多表之间的聚合,使用TDengine, 需要对每个类型的数据采集点创建一超级表。以表一中的智能电表为例,可以使用如下的SQL命令创建超级表: ```mysql -CREATE TABLE meters (ts timestamp, current float, voltage int, phase float) TAGS (location binary(64), groupdId int); +CREATE STABLE meters (ts timestamp, current float, voltage int, phase float) TAGS (location binary(64), groupdId int); ``` -与创建普通表一样,创建表时,需要提供表名(示例中为meters),表结构Schema,即数据列的定义。第一列必须为时间戳(示例中为ts),其他列为采集的物理量(示例中为current, voltage, phase),数据类型可以为整型、浮点型、字符串等。除此之外,还需要提供标签的schema (示例中为location, groupId),标签的数据类型可以为整型、浮点型、字符串等。采集点的静态属性往往可以作为标签,比如采集点的地理位置、设备型号、设备组ID、管理员ID等等。标签的schema可以事后增加、删除、修改。具体定义以及细节请见 TAOS SQL 一节。 +与创建普通表一样,创建表时,需要提供表名(示例中为meters),表结构Schema,即数据列的定义。第一列必须为时间戳(示例中为ts),其他列为采集的物理量(示例中为current, voltage, phase),数据类型可以为整型、浮点型、字符串等。除此之外,还需要提供标签的schema (示例中为location, groupId),标签的数据类型可以为整型、浮点型、字符串等。采集点的静态属性往往可以作为标签,比如采集点的地理位置、设备型号、设备组ID、管理员ID等等。标签的schema可以事后增加、删除、修改。具体定义以及细节请见 [TAOS SQL 的超级表管理](https://www.taosdata.com/cn/documentation/taos-sql#super-table) 章节。 每一种类型的数据采集点需要建立一个超级表,因此一个物联网系统,往往会有多个超级表。对于电网,我们就需要对智能电表、变压器、母线、开关等都建立一个超级表。在物联网中,一个设备就可能有多个数据采集点(比如一台风力发电的风机,有的采集点采集电流、电压等电参数,有的采集点采集温度、湿度、风向等环境参数),这个时候,对这一类型的设备,需要建立多张超级表。一张超级表里包含的采集物理量必须是同时采集的(时间戳是一致的)。 一张超级表最多容许1024列,如果一个采集点采集的物理量个数超过1024,需要建多张超级表来处理。一个系统可以有多个DB,一个DB里可以有一到多个超级表。 -## 创建表 +## 创建表 + TDengine对每个数据采集点需要独立建表。与标准的关系型数据一样,一张表有表名,Schema,但除此之外,还可以带有一到多个标签。创建时,需要使用超级表做模板,同时指定标签的具体值。以表一中的智能电表为例,可以使用如下的SQL命令建表: -```cmd +```mysql CREATE TABLE d1001 USING meters TAGS ("Beijing.Chaoyang", 2); ``` -其中d1001是表名,meters是超级表的表名,后面紧跟标签Location的具体标签值”Beijing.Chaoyang",标签groupId的具体标签值2。虽然在创建表时,需要指定标签值,但可以事后修改。详细细则请见 TAOS SQL。 +其中d1001是表名,meters是超级表的表名,后面紧跟标签Location的具体标签值”Beijing.Chaoyang",标签groupId的具体标签值2。虽然在创建表时,需要指定标签值,但可以事后修改。详细细则请见 [TAOS SQL 的表管理](https://www.taosdata.com/cn/documentation/taos-sql#table) 章节。 **注意:**目前 TDengine 没有从技术层面限制使用一个 database (dbA)的超级表作为模板建立另一个 database (dbB)的子表,后续会禁止这种用法,不建议使用这种方法建表。 @@ -52,12 +54,15 @@ TDengine建议将数据采集点的全局唯一ID作为表名(比如设备序列 **自动建表**:在某些特殊场景中,用户在写数据时并不确定某个数据采集点的表是否存在,此时可在写入数据时使用自动建表语法来创建不存在的表,若该表已存在则不会建立新表。比如: -```cmd +```mysql INSERT INTO d1001 USING METERS TAGS ("Beijng.Chaoyang", 2) VALUES (now, 10.2, 219, 0.32); ``` -上述SQL语句将记录(now, 10.2, 219, 0.32) 插入进表d1001。如果表d1001还未创建,则使用超级表meters做模板自动创建,同时打上标签值“Beijing.Chaoyang", 2。 +上述SQL语句将记录 (now, 10.2, 219, 0.32) 插入表d1001。如果表d1001还未创建,则使用超级表meters做模板自动创建,同时打上标签值“Beijing.Chaoyang", 2。 + +关于自动建表的详细语法请参见 [插入记录时自动建表](https://www.taosdata.com/cn/documentation/taos-sql#auto_create_table) 章节。 ## 多列模型 vs 单列模型 + TDengine支持多列模型,只要物理量是一个数据采集点同时采集的(时间戳一致),这些量就可以作为不同列放在一张超级表里。但还有一种极限的设计,单列模型,每个采集的物理量都单独建表,因此每种类型的物理量都单独建立一超级表。比如电流、电压、相位,就建三张超级表。 TDengine建议尽可能采用多列模型,因为插入效率以及存储效率更高。但对于有些场景,一个采集点的采集量的种类经常变化,这个时候,如果采用多列模型,就需要频繁修改超级表的结构定义,让应用变的复杂,这个时候,采用单列模型会显得简单。 diff --git a/documentation20/webdocs/markdowndocs/insert-ch.md b/documentation20/cn/05.insert/docs.md similarity index 72% rename from documentation20/webdocs/markdowndocs/insert-ch.md rename to documentation20/cn/05.insert/docs.md index 7d380ac952dce5f57ff259159c33dd9e9b53edf3..ce2d65e7d2259c6dac9efc67a61f7c009dd96984 100644 --- a/documentation20/webdocs/markdowndocs/insert-ch.md +++ b/documentation20/cn/05.insert/docs.md @@ -2,7 +2,7 @@ TDengine支持多种接口写入数据,包括SQL, Prometheus, Telegraf, EMQ MQTT Broker, HiveMQ Broker, CSV文件等,后续还将提供Kafka, OPC等接口。数据可以单条插入,也可以批量插入,可以插入一个数据采集点的数据,也可以同时插入多个数据采集点的数据。支持多线程插入,支持时间乱序数据插入,也支持历史数据插入。 -## SQL写入 +## SQL写入 应用通过C/C++, JDBC, GO, 或Python Connector 执行SQL insert语句来插入数据,用户还可以通过TAOS Shell,手动输入SQL insert语句插入数据。比如下面这条insert 就将一条记录写入到表d1001中: ```mysql @@ -18,7 +18,7 @@ TDengine也支持一次向多个表写入数据,比如下面这条命令就向 INSERT INTO d1001 VALUES (1538548685000, 10.3, 219, 0.31) (1538548695000, 12.6, 218, 0.33) d1002 VALUES (1538548696800, 12.3, 221, 0.31); ``` -详细的SQL INSERT语法规则请见TAOS SQL +详细的SQL INSERT语法规则请见 [TAOS SQL 的数据写入](https://www.taosdata.com/cn/documentation/taos-sql#insert) 章节。 **Tips:** @@ -27,11 +27,13 @@ INSERT INTO d1001 VALUES (1538548685000, 10.3, 219, 0.31) (1538548695000, 12.6, - 对同一张表,如果新插入记录的时间戳已经存在,默认(没有使用 UPDATE 1 创建数据库)新记录将被直接抛弃,也就是说,在一张表里,时间戳必须是唯一的。如果应用自动生成记录,很有可能生成的时间戳是一样的,这样,成功插入的记录条数会小于应用插入的记录条数。如果在创建数据库时使用 UPDATE 1 选项,插入相同时间戳的新记录将覆盖原有记录。 - 写入的数据的时间戳必须大于当前时间减去配置参数keep的时间。如果keep配置为3650天,那么无法写入比3650天还老的数据。写入数据的时间戳也不能大于当前时间加配置参数days。如果days配置为2,那么无法写入比当前时间还晚2天的数据。 -## Prometheus直接写入 -Prometheus作为Cloud Native Computing Fundation毕业的项目,在性能监控以及K8S性能监控领域有着非常广泛的应用。TDengine提供一个小工具Bailongma,只需在Prometheus做简单配置,无需任何代码,就可将Prometheus采集的数据直接写入TDengine,并按规则在TDengine自动创建库和相关表项。博文用Docker容器快速搭建一个Devops监控Demo即是采用bailongma将Prometheus和Telegraf的数据写入TDengine中的示例,可以参考。 +## Prometheus直接写入 + +[Prometheus](https://www.prometheus.io/)作为Cloud Native Computing Fundation毕业的项目,在性能监控以及K8S性能监控领域有着非常广泛的应用。TDengine提供一个小工具[Bailongma](https://github.com/taosdata/Bailongma),只需在Prometheus做简单配置,无需任何代码,就可将Prometheus采集的数据直接写入TDengine,并按规则在TDengine自动创建库和相关表项。博文[用Docker容器快速搭建一个Devops监控Demo](https://www.taosdata.com/blog/2020/02/03/1189.html)即是采用bailongma将Prometheus和Telegraf的数据写入TDengine中的示例,可以参考。 ### 从源代码编译blm_prometheus -用户需要从github下载Bailongma的源码,使用Golang语言编译器编译生成可执行文件。在开始编译前,需要准备好以下条件: + +用户需要从github下载[Bailongma](https://github.com/taosdata/Bailongma)的源码,使用Golang语言编译器编译生成可执行文件。在开始编译前,需要准备好以下条件: - Linux操作系统的服务器 - 安装好Golang, 1.10版本以上 - 对应的TDengine版本。因为用到了TDengine的客户端动态链接库,因此需要安装好和服务端相同版本的TDengine程序;比如服务端版本是TDengine 2.0.0, 则在bailongma所在的linux服务器(可以与TDengine在同一台服务器,或者不同服务器) @@ -45,10 +47,12 @@ go build 一切正常的情况下,就会在对应的目录下生成一个blm_prometheus的可执行程序。 ### 安装Prometheus -通过Prometheus的官网下载安装。下载地址 + +通过Prometheus的官网下载安装。[下载地址](https://prometheus.io/download/) ### 配置Prometheus -参考Prometheus的配置文档,在Prometheus的配置文件中的部分,增加以下配置 + +参考Prometheus的[配置文档](https://prometheus.io/docs/prometheus/latest/configuration/configuration/),在Prometheus的配置文件中的部分,增加以下配置 - url: bailongma API服务提供的URL, 参考下面的blm_prometheus启动示例章节 @@ -112,11 +116,13 @@ use prometheus; select * from apiserver_request_latencies_bucket; ``` -## Telegraf直接写入 -是一流行的IT运维数据采集开源工具,TDengine提供一个小工具Bailongma,只需在Telegraf做简单配置,无需任何代码,就可将Telegraf采集的数据直接写入TDengine,并按规则在TDengine自动创建库和相关表项。博文用Docker容器快速搭建一个Devops监控Demo即是采用bailongma将Prometheus和Telegraf的数据写入TDengine中的示例,可以参考。 +## Telegraf直接写入 + +[Telegraf](https://www.influxdata.com/time-series-platform/telegraf/)是一流行的IT运维数据采集开源工具,TDengine提供一个小工具[Bailongma](https://github.com/taosdata/Bailongma),只需在Telegraf做简单配置,无需任何代码,就可将Telegraf采集的数据直接写入TDengine,并按规则在TDengine自动创建库和相关表项。博文[用Docker容器快速搭建一个Devops监控Demo](https://www.taosdata.com/blog/2020/02/03/1189.html)即是采用bailongma将Prometheus和Telegraf的数据写入TDengine中的示例,可以参考。 ### 从源代码编译blm_telegraf -用户需要从github下载Bailongma的源码,使用Golang语言编译器编译生成可执行文件。在开始编译前,需要准备好以下条件: + +用户需要从github下载[Bailongma](https://github.com/taosdata/Bailongma)的源码,使用Golang语言编译器编译生成可执行文件。在开始编译前,需要准备好以下条件: - Linux操作系统的服务器 - 安装好Golang, 1.10版本以上 @@ -132,9 +138,11 @@ go build 一切正常的情况下,就会在对应的目录下生成一个blm_telegraf的可执行程序。 ### 安装Telegraf -目前TDengine支持Telegraf 1.7.4以上的版本。用户可以根据当前的操作系统,到Telegraf官网下载安装包,并执行安装。下载地址如下:https://portal.influxdata.com/downloads + +目前TDengine支持Telegraf 1.7.4以上的版本。用户可以根据当前的操作系统,到Telegraf官网下载安装包,并执行安装。下载地址如下:https://portal.influxdata.com/downloads ### 配置Telegraf + 修改Telegraf配置文件/etc/telegraf/telegraf.conf中与TDengine有关的配置项。 在output plugins部分,增加[[outputs.http]]配置项: @@ -148,7 +156,7 @@ go build - hostname: 区分不同采集设备的机器名称,需确保其唯一性 - metric_batch_size: 100,允许Telegraf每批次写入记录最大数量,增大其数量可以降低Telegraf的请求发送频率。 -关于如何使用Telegraf采集数据以及更多有关使用Telegraf的信息,请参考Telegraf官方的文档。 +关于如何使用Telegraf采集数据以及更多有关使用Telegraf的信息,请参考Telegraf官方的[文档](https://docs.influxdata.com/telegraf/v1.11/)。 ### 启动blm_telegraf程序 blm_telegraf程序有以下选项,在启动blm_telegraf程序时可以通过设定这些选项来设定blm_telegraf的配置。 @@ -174,6 +182,7 @@ blm_telegraf对telegraf提供服务的端口号。 ``` ### 启动示例 + 通过以下命令启动一个blm_telegraf的API服务 ```bash ./blm_telegraf -host 127.0.0.1 -port 8089 @@ -186,6 +195,7 @@ url = "http://10.1.2.3:8089/telegraf" ``` ### 查询telegraf写入数据 + telegraf产生的数据格式如下: ```json { @@ -220,10 +230,10 @@ select * from cpu; MQTT是一流行的物联网数据传输协议,TDengine 可以很方便的接入 MQTT Broker 接受的数据并写入到 TDengine。 -## EMQ Broker 直接写入 +## EMQ Broker 直接写入 -EMQ是一开源的MQTT Broker软件,无需任何代码,只需要在EMQ Dashboard里使用“规则”做简单配置,即可将MQTT的数据直接写入TDengine。EMQ X 支持通过 发送到 Web 服务 的方式保存数据到 TDengine,也在企业版上提供原生的 TDengine 驱动实现直接保存。详细使用方法请参考EMQ 官方文档。 +[EMQ](https://github.com/emqx/emqx)是一开源的MQTT Broker软件,无需任何代码,只需要在EMQ Dashboard里使用“规则”做简单配置,即可将MQTT的数据直接写入TDengine。EMQ X 支持通过 发送到 Web 服务 的方式保存数据到 TDengine,也在企业版上提供原生的 TDengine 驱动实现直接保存。详细使用方法请参考[EMQ 官方文档](https://docs.emqx.io/broker/latest/cn/rule/rule-example.html#%E4%BF%9D%E5%AD%98%E6%95%B0%E6%8D%AE%E5%88%B0-tdengine)。 -## HiveMQ Broker 直接写入 +## HiveMQ Broker 直接写入 -HiveMQ 是一个提供免费个人版和企业版的 MQTT 代理,主要用于企业和新兴的机器到机器M2M通讯和内部传输,满足可伸缩性、易管理和安全特性。HiveMQ 提供了开源的插件开发包。可以通过 HiveMQ extension - TDengine 保存数据到 TDengine。详细使用方法请参考 HiveMQ extension - TDengine 说明文档。 +[HiveMQ](https://www.hivemq.com/) 是一个提供免费个人版和企业版的 MQTT 代理,主要用于企业和新兴的机器到机器M2M通讯和内部传输,满足可伸缩性、易管理和安全特性。HiveMQ 提供了开源的插件开发包。可以通过 HiveMQ extension - TDengine 保存数据到 TDengine。详细使用方法请参考 [HiveMQ extension - TDengine 说明文档](https://github.com/huskar-t/hivemq-tdengine-extension/blob/b62a26ecc164a310104df57691691b237e091c89/README.md)。 diff --git a/documentation20/webdocs/markdowndocs/Queries-ch.md b/documentation20/cn/06.queries/docs.md similarity index 90% rename from documentation20/webdocs/markdowndocs/Queries-ch.md rename to documentation20/cn/06.queries/docs.md index 839809ccba1914a9d5cfa9005be9f32e94f19924..a161778a72728ca05a75538c8b04ca0277e01bb2 100644 --- a/documentation20/webdocs/markdowndocs/Queries-ch.md +++ b/documentation20/cn/06.queries/docs.md @@ -1,10 +1,7 @@ - - - # 高效查询数据 -## 主要查询功能 +## 主要查询功能 TDengine 采用 SQL 作为查询语言。应用程序可以通过 C/C++, Java, Go, Python 连接器发送 SQL 语句,用户可以通过 TDengine 提供的命令行(Command Line Interface, CLI)工具 TAOS Shell 手动执行 SQL 即席查询(Ad-Hoc Query)。TDengine 支持如下查询功能: @@ -26,9 +23,10 @@ Query OK, 2 row(s) in set (0.001100s) ``` 为满足物联网场景的需求,TDengine支持几个特殊的函数,比如twa(时间加权平均),spread (最大值与最小值的差),last_row(最后一条记录)等,更多与物联网场景相关的函数将添加进来。TDengine还支持连续查询。 -具体的查询语法请看TAOS SQL 。 +具体的查询语法请看 [TAOS SQL 的数据查询](https://www.taosdata.com/cn/documentation/taos-sql#select) 章节。 + +## 多表聚合查询 -## 多表聚合查询 物联网场景中,往往同一个类型的数据采集点有多个。TDengine采用超级表(STable)的概念来描述某一个类型的数据采集点,一张普通的表来描述一个具体的数据采集点。同时TDengine使用标签来描述数据采集点的静态属性,一个具体的数据采集点有具体的标签值。通过指定标签的过滤条件,TDengine提供了一高效的方法将超级表(某一类型的数据采集点)所属的子表进行聚合查询。对普通表的聚合函数以及绝大部分操作都适用于超级表,语法完全一样。 **示例1**:在TAOS Shell,查找北京所有智能电表采集的电压平均值,并按照location分组 @@ -51,9 +49,9 @@ taos> SELECT count(*), max(current) FROM meters where groupId = 2 and ts > now - Query OK, 1 row(s) in set (0.002136s) ``` -TDengine仅容许对属于同一个超级表的表之间进行聚合查询,不同超级表之间的聚合查询不支持。在TAOS SQL 一章,查询类操作都会注明是否支持超级表。 +TDengine仅容许对属于同一个超级表的表之间进行聚合查询,不同超级表之间的聚合查询不支持。在 [TAOS SQL 的数据查询](https://www.taosdata.com/cn/documentation/taos-sql#select) 一章,查询类操作都会注明是否支持超级表。 -## 降采样查询、插值 +## 降采样查询、插值 物联网场景里,经常需要通过降采样(down sampling)将采集的数据按时间段进行聚合。TDengine 提供了一个简便的关键词 interval 让按照时间窗口的查询操作变得极为简单。比如,将智能电表 d1001 采集的电流值每10秒钟求和 ```mysql @@ -91,5 +89,5 @@ Query OK, 5 row(s) in set (0.001521s) 物联网场景里,每个数据采集点采集数据的时间是难同步的,但很多分析算法(比如FFT)需要把采集的数据严格按照时间等间隔的对齐,在很多系统里,需要应用自己写程序来处理,但使用TDengine的降采样操作就轻松解决。如果一个时间间隔里,没有采集的数据,TDengine还提供插值计算的功能。 -语法规则细节请见TAOS SQL 。 +语法规则细节请见 [TAOS SQL 的时间维度聚合](https://www.taosdata.com/cn/documentation/taos-sql#aggregation) 章节。 diff --git a/documentation20/webdocs/markdowndocs/advanced features-ch.md b/documentation20/cn/07.advanced-features/docs.md similarity index 97% rename from documentation20/webdocs/markdowndocs/advanced features-ch.md rename to documentation20/cn/07.advanced-features/docs.md index 0ca8428ecee2c5ef162810737f77cb9cf4b9412b..bdf93fdc3d74184bd7a6fd6f4eefaf3db6853c22 100644 --- a/documentation20/webdocs/markdowndocs/advanced features-ch.md +++ b/documentation20/cn/07.advanced-features/docs.md @@ -1,6 +1,6 @@ # 高级功能 -## 连续查询(Continuous Query) +## 连续查询(Continuous Query) 连续查询是TDengine定期自动执行的查询,采用滑动窗口的方式进行计算,是一种简化的时间驱动的流式计算。 针对库中的表或超级表,TDengine可提供定期自动执行的连续查询, @@ -17,10 +17,8 @@ TDengine提供的连续查询与普通流计算中的时间窗口计算具有以 - 不同于流计算的实时反馈计算结果,连续查询只在时间窗口关闭以后才开始计算。 例如时间周期是1天,那么当天的结果只会在23:59:59以后才会生成。 - - 如果有历史记录写入到已经计算完成的时间区间,连续查询并不会重新进行计算, 也不会重新将结果推送给用户。对于写回TDengine的模式,也不会更新已经存在的计算结果。 - - 使用连续查询推送结果的模式,服务端并不缓存客户端计算状态,也不提供Exactly-Once的语意保证。 如果用户的应用端崩溃,再次拉起的连续查询将只会从再次拉起的时间开始重新计算最近的一个完整的时间窗口。 如果使用写回模式,TDengine可确保数据写回的有效性和连续性。 @@ -95,7 +93,7 @@ create table avg_vol as select avg(voltage) from meters where ts > now and ts <= 后续版本会提供更细粒度和便捷的连续查询管理命令。 -## 数据订阅(Publisher/Subscriber) +## 数据订阅(Publisher/Subscriber) 基于数据天然的时间序列特性,TDengine的数据写入(insert)与消息系统的数据发布(pub)逻辑上一致, 均可视为系统中插入一条带时间戳的新记录。 @@ -118,7 +116,7 @@ taos_consume taos_unsubscribe ``` -这些API的文档请见 [C/C++ Connector](https://www.taosdata.com/cn/documentation20/connector/), +这些API的文档请见 [C/C++ Connector](https://www.taosdata.com/cn/documentation/connector/), 下面仍以智能电表场景为例介绍一下它们的具体用法(超级表和子表结构请参考上一节“连续查询”), 完整的示例代码可以在 [这里](https://github.com/taosdata/TDengine/blob/master/tests/examples/c/subscribe.c) 找到。 @@ -296,7 +294,7 @@ $ taos ### Java 使用数据订阅功能 -订阅功能也提供了 Java 开发接口,相关说明请见 [Java Connector](https://www.taosdata.com/cn/documentation20/connector/)。需要注意的是,目前 Java 接口没有提供异步订阅模式,但用户程序可以通过创建 `TimerTask` 等方式达到同样的效果。 +订阅功能也提供了 Java 开发接口,相关说明请见 [Java Connector](https://www.taosdata.com/cn/documentation/connector/)。需要注意的是,目前 Java 接口没有提供异步订阅模式,但用户程序可以通过创建 `TimerTask` 等方式达到同样的效果。 下面以一个示例程序介绍其具体使用方法。它所完成的功能与前面介绍的 C 语言示例基本相同,也是订阅数据库中所有电流超过 10A 的记录。 @@ -406,7 +404,7 @@ ts: 1597466400000 current: 12.4 voltage: 220 phase: 1 location: Beijing.Chaoyang ``` -## 缓存(Cache) +## 缓存(Cache) TDengine采用时间驱动缓存管理策略(First-In-First-Out,FIFO),又称为写驱动的缓存管理机制。这种策略有别于读驱动的数据缓存模式(Least-Recent-Use,LRU),直接将最近写入的数据保存在系统的缓存中。当缓存达到临界值的时候,将最早的数据批量写入磁盘。一般意义上来说,对于物联网数据的使用,用户最为关心最近产生的数据,即当前状态。TDengine充分利用了这一特性,将最近到达的(当前状态)数据保存在缓存中。 @@ -425,7 +423,7 @@ select last_row(voltage) from meters where location='Beijing.Chaoyang'; 该SQL语句将获取所有位于北京朝阳区的电表最后记录的电压值。 -## 报警监测(Alert) +## 报警监测(Alert) 在 TDengine 的应用场景中,报警监测是一个常见需求,从概念上说,它要求程序从最近一段时间的数据中筛选出符合一定条件的数据,并基于这些数据根据定义好的公式计算出一个结果,当这个结果符合某个条件且持续一定时间后,以某种形式通知用户。 diff --git a/documentation20/webdocs/markdowndocs/connector-java-ch.md b/documentation20/cn/08.connector/01.java/docs.md similarity index 97% rename from documentation20/webdocs/markdowndocs/connector-java-ch.md rename to documentation20/cn/08.connector/01.java/docs.md index 62c1f11baba7b113749e99537585dc910cfdef15..4141004c4e027a23b9f98c206beb983720a9893c 100644 --- a/documentation20/webdocs/markdowndocs/connector-java-ch.md +++ b/documentation20/cn/08.connector/01.java/docs.md @@ -4,7 +4,7 @@ TDengine 提供了遵循 JDBC 标准(3.0)API 规范的 `taos-jdbcdriver` 实 `taos-jdbcdriver` 的实现包括 2 种形式: JDBC-JNI 和 JDBC-RESTful(taos-jdbcdriver-2.0.18 开始支持 JDBC-RESTful)。 JDBC-JNI 通过调用客户端 libtaos.so(或 taos.dll )的本地方法实现, JDBC-RESTful 则在内部封装了 RESTful 接口实现。 -![tdengine-connector](../assets/tdengine-jdbc-connector.png) +![tdengine-connector](page://images/tdengine-jdbc-connector.png) 上图显示了 3 种 Java 应用使用连接器访问 TDengine 的方式: @@ -119,7 +119,7 @@ Connection conn = DriverManager.getConnection(jdbcUrl); > 在 windows 环境开发时需要安装 TDengine 对应的 [windows 客户端][14],Linux 服务器安装完 TDengine 之后默认已安装 client,也可以单独安装 [Linux 客户端][15] 连接远程 TDengine Server。 -JDBC-JNI 的使用请参见视频教程。 +JDBC-JNI 的使用请参见[视频教程](https://www.taosdata.com/blog/2020/11/11/1955.html)。 TDengine 的 JDBC URL 规范格式为: `jdbc:[TAOS|TAOS-RS]://[host_name]:[port]/[database_name]?[user={user}|&password={password}|&charset={charset}|&cfgdir={config_dir}|&locale={locale}|&timezone={timezone}]` @@ -170,6 +170,7 @@ properties 中的配置参数如下: 如下所示: 1. 在 Java 应用中不指定 hostname 和 port + ```java public Connection getConn() throws Exception{ Class.forName("com.taosdata.jdbc.TSDBDriver"); @@ -182,7 +183,9 @@ public Connection getConn() throws Exception{ return conn; } ``` + 2. 在配置文件中指定 firstEp 和 secondEp + ``` # first fully qualified domain name (FQDN) for TDengine system firstEp cluster_node1:6030 @@ -191,7 +194,7 @@ firstEp cluster_node1:6030 secondEp cluster_node2:6030 # default system charset -# charset UTF-8 +# charset UTF-8 # system locale # locale en_US.UTF-8 @@ -322,6 +325,7 @@ conn.close(); **HikariCP** * 引入相应 HikariCP maven 依赖: + ```xml com.zaxxer @@ -331,6 +335,7 @@ conn.close(); ``` * 使用示例如下: + ```java public static void main(String[] args) throws SQLException { HikariConfig config = new HikariConfig(); @@ -374,6 +379,7 @@ conn.close(); ``` * 使用示例如下: + ```java public static void main(String[] args) throws Exception { @@ -479,7 +485,7 @@ TDengine 目前支持时间戳、数字、字符、布尔类型,与 Java 对 [10]: https://maven.aliyun.com/mvn/search [11]: https://github.com/taosdata/TDengine/tree/develop/tests/examples/JDBC/SpringJdbcTemplate [12]: https://github.com/taosdata/TDengine/tree/develop/tests/examples/JDBC/springbootdemo -[13]: https://www.taosdata.com/cn/documentation20/administrator/#%E5%AE%A2%E6%88%B7%E7%AB%AF%E9%85%8D%E7%BD%AE +[13]: https://www.taosdata.com/cn/documentation/administrator/#client [14]: https://www.taosdata.com/cn/all-downloads/#TDengine-Windows-Client -[15]: https://www.taosdata.com/cn/getting-started/#%E5%BF%AB%E9%80%9F%E4%B8%8A%E6%89%8B +[15]: https://www.taosdata.com/cn/getting-started/#%E5%AE%A2%E6%88%B7%E7%AB%AF diff --git a/documentation20/webdocs/markdowndocs/connector-ch.md b/documentation20/cn/08.connector/docs.md similarity index 89% rename from documentation20/webdocs/markdowndocs/connector-ch.md rename to documentation20/cn/08.connector/docs.md index 6736eea7c75ec12f017aa272385d4301c7875317..f3c2363d6c712c0323bcfeeb1c3a01d5df32ca63 100644 --- a/documentation20/webdocs/markdowndocs/connector-ch.md +++ b/documentation20/cn/08.connector/docs.md @@ -2,7 +2,7 @@ TDengine提供了丰富的应用程序开发接口,其中包括C/C++、Java、Python、Go、Node.js、C# 、RESTful 等,便于用户快速开发应用。 -![image-connecotr](../assets/connector.png) +![image-connecotr](page://images/connector.png) 目前TDengine的连接器可支持的平台广泛,包括:X64/X86/ARM64/ARM32/MIPS/Alpha等硬件平台,以及Linux/Win64/Win32等开发环境。对照矩阵如下: @@ -24,8 +24,9 @@ TDengine提供了丰富的应用程序开发接口,其中包括C/C++、Java、 * 在没有安装TDengine服务端软件的系统中使用连接器(除RESTful外)访问 TDengine 数据库,需要安装相应版本的客户端安装包来使应用驱动(Linux系统中文件名为libtaos.so,Windows系统中为taos.dll)被安装在系统中,否则会产生无法找到相应库文件的错误。 * 所有执行 SQL 语句的 API,例如 C/C++ Connector 中的 `tao_query`、`taos_query_a`、`taos_subscribe` 等,以及其它语言中与它们对应的API,每次都只能执行一条 SQL 语句,如果实际参数中包含了多条语句,它们的行为是未定义的。 * 升级到TDengine到2.0.8.0版本的用户,必须更新JDBC连接TDengine必须升级taos-jdbcdriver到2.0.12及以上。 +* 无论选用何种编程语言的连接器,2.0 及以上版本的 TDengine 推荐数据库应用的每个线程都建立一个独立的连接,或基于线程建立连接池,以避免连接内的“USE statement”状态量在线程之间相互干扰(但连接的查询和写入操作都是线程安全的)。 -## 安装连接器驱动步骤 +## 安装连接器驱动步骤 服务器应该已经安装TDengine服务端安装包。连接器驱动安装步骤如下: @@ -136,7 +137,7 @@ taos> taos> ``` -## C/C++ Connector +## C/C++ Connector **C/C++连接器支持的系统有**: @@ -156,7 +157,7 @@ C/C++的API类似于MySQL的C API。应用程序使用时,需要包含TDengine * 在编译时需要链接TDengine动态库。Linux 为 *libtaos.so* ,安装后,位于 _/usr/local/taos/driver_。Windows为 taos.dll,安装后位于 *C:\TDengine*。 * 如未特别说明,当API的返回值是整数时,_0_ 代表成功,其它是代表失败原因的错误码,当返回值是指针时, _NULL_ 表示失败。 -使用C/C++连接器的示例代码请参见 https://github.com/taosdata/TDengine/tree/develop/tests/examples/c。 +使用C/C++连接器的示例代码请参见 https://github.com/taosdata/TDengine/tree/develop/tests/examples/c 。 ### 基础API @@ -238,13 +239,13 @@ C/C++的API类似于MySQL的C API。应用程序使用时,需要包含TDengine 获取查询结果集每列数据的属性(数据类型、名字、字节数),与taos_num_fileds配合使用,可用来解析`taos_fetch_row`返回的一个元组(一行)的数据。 `TAOS_FIELD` 的结构如下: - ```c - typedef struct taosField { - char name[65]; // 列名 - uint8_t type; // 数据类型 - int16_t bytes; // 字节数 - } TAOS_FIELD; - ``` +```c +typedef struct taosField { + char name[65]; // 列名 + uint8_t type; // 数据类型 + int16_t bytes; // 字节数 +} TAOS_FIELD; +``` - `void taos_stop_query(TAOS_RES *res)` @@ -266,7 +267,7 @@ C/C++的API类似于MySQL的C API。应用程序使用时,需要包含TDengine ### 异步查询API -同步API之外,TDengine还提供性能更高的异步调用API处理数据插入、查询操作。在软硬件环境相同的情况下,异步API处理数据插入的速度比同步API快2\~4倍。异步API采用非阻塞式的调用方式,在系统真正完成某个具体数据库操作前,立即返回。调用的线程可以去处理其他工作,从而可以提升整个应用的性能。异步API在网络延迟严重的情况下,优点尤为突出。 +同步API之外,TDengine还提供性能更高的异步调用API处理数据插入、查询操作。在软硬件环境相同的情况下,异步API处理数据插入的速度比同步API快2~4倍。异步API采用非阻塞式的调用方式,在系统真正完成某个具体数据库操作前,立即返回。调用的线程可以去处理其他工作,从而可以提升整个应用的性能。异步API在网络延迟严重的情况下,优点尤为突出。 异步API都需要应用提供相应的回调函数,回调函数参数设置如下:前两个参数都是一致的,第三个参数依不同的API而定。第一个参数param是应用调用异步API时提供给系统的,用于回调时,应用能够找回具体操作的上下文,依具体实现而定。第二个参数是SQL操作的结果集,如果为空,比如insert操作,表示没有记录返回,如果不为空,比如select操作,表示有记录返回。 @@ -306,17 +307,17 @@ TDengine的异步API均采用非阻塞调用模式。应用程序可以用多线 进行参数绑定,bind指向一个数组,需保证此数组的元素数量和顺序与sql语句中的参数完全一致。TAOS_BIND 的使用方法与 MySQL中的 MYSQL_BIND 一致,具体定义如下: - ```c - typedef struct TAOS_BIND { - int buffer_type; - void * buffer; - unsigned long buffer_length; // 未实际使用 - unsigned long *length; - int * is_null; - int is_unsigned; // 未实际使用 - int * error; // 未实际使用 - } TAOS_BIND; - ``` +```c +typedef struct TAOS_BIND { + int buffer_type; + void * buffer; + unsigned long buffer_length; // 未实际使用 + unsigned long *length; + int * is_null; + int is_unsigned; // 未实际使用 + int * error; // 未实际使用 +} TAOS_BIND; +``` - `int taos_stmt_add_batch(TAOS_STMT *stmt)` @@ -385,12 +386,12 @@ TDengine提供时间驱动的实时流式计算API。可以每隔一指定的时 取消订阅。 如参数 `keepProgress` 不为0,API会保留订阅的进度信息,后续调用 `taos_subscribe` 时可以基于此进度继续;否则将删除进度信息,后续只能重新开始读取数据。 -## Python Connector +## Python Connector -Python连接器的使用参见视频教程 +Python连接器的使用参见[视频教程](https://www.taosdata.com/blog/2020/11/11/1963.html) ### 安装准备 -* 应用驱动安装请参考安装连接器驱动步骤。 +* 应用驱动安装请参考[安装连接器驱动步骤](https://www.taosdata.com/cn/documentation/connector#driver)。 * 已安装python 2.7 or >= 3.4 * 已安装pip 或 pip3 @@ -431,6 +432,7 @@ python -m pip install python3\ import taos ``` * 获取连接并获取游标对象 + ```python conn = taos.connect(host="127.0.0.1", user="root", password="taosdata", config="/etc/taos") c1 = conn.cursor() @@ -438,6 +440,7 @@ c1 = conn.cursor() * host 是TDengine 服务端所有IP, config 为客户端配置文件所在目录 * 写入数据 + ```python import datetime @@ -459,6 +462,7 @@ affected_rows = c1.execute(' '.join(sqlcmd)) ``` * 查询数据 + ```python c1.execute('select * from tb') # 拉取查询结果 @@ -476,6 +480,7 @@ for data in c1: ``` * 创建订阅 + ```python # 创建一个主题为 'test' 消费周期为1000毫秒的订阅 # 第一个参数为 True 表示重新开始订阅,如为 False 且之前创建过主题为 'test' 的订阅,则表示继续消费此订阅的数据,而不是重新开始消费所有数据 @@ -483,6 +488,7 @@ sub = conn.subscribe(True, "test", "select * from tb;", 1000) ``` * 消费订阅的数据 + ```python data = sub.consume() for d in data: @@ -490,15 +496,18 @@ for d in data: ``` * 取消订阅 + ```python sub.close() ``` * 关闭连接 + ```python c1.close() conn.close() ``` + #### 帮助信息 用户可通过python的帮助信息直接查看模块的使用信息,或者参考tests/examples/python中的示例程序。以下为部分常用类和方法: @@ -518,6 +527,7 @@ conn.close() 用于生成taos.TDengineConnection的实例。 ### Python客户端使用示例代码 + 在tests/examples/python中,我们提供了一个示例Python程序read_example.py,可以参考这个程序来设计用户自己的写入、查询程序。在安装了对应的客户端后,通过import taos引入taos类。主要步骤如下 - 通过taos.connect获取TDengineConnection对象,这个对象可以一个程序只申请一个,在多线程中共享。 @@ -527,9 +537,9 @@ conn.close() - 如果执行的是查询语句,则execute执行成功后,需要通过fetchall方法去拉取结果集。 具体方法可以参考示例代码。 -## RESTful Connector +## RESTful Connector -为支持各种不同类型平台的开发,TDengine提供符合REST设计标准的API,即RESTful API。为最大程度降低学习成本,不同于其他数据库RESTful API的设计方法,TDengine直接通过HTTP POST 请求BODY中包含的SQL语句来操作数据库,仅需要一个URL。RESTful连接器的使用参见视频教程。 +为支持各种不同类型平台的开发,TDengine提供符合REST设计标准的API,即RESTful API。为最大程度降低学习成本,不同于其他数据库RESTful API的设计方法,TDengine直接通过HTTP POST 请求BODY中包含的SQL语句来操作数据库,仅需要一个URL。RESTful连接器的使用参见[视频教程](https://www.taosdata.com/blog/2020/11/11/1965.html)。 ### HTTP请求格式 @@ -713,21 +723,21 @@ HTTP请求URL采用`sqlutc`时,返回结果集的时间戳将采用UTC时间 ### 重要配置项 -下面仅列出一些与RESTFul接口有关的配置参数,其他系统参数请看配置文件里的说明。注意:配置修改后,需要重启taosd服务才能生效 +下面仅列出一些与RESTful接口有关的配置参数,其他系统参数请看配置文件里的说明。注意:配置修改后,需要重启taosd服务才能生效 -- httpPort: 对外提供RESTFul服务的端口号,默认绑定到6041 +- httpPort: 对外提供RESTful服务的端口号,默认绑定到6041 - httpMaxThreads: 启动的线程数量,默认为2 - restfulRowLimit: 返回结果集(JSON格式)的最大条数,默认值为10240 - httpEnableCompress: 是否支持压缩,默认不支持,目前TDengine仅支持gzip压缩格式 - httpDebugFlag: 日志开关,131:仅错误和报警信息,135:调试信息,143:非常详细的调试信息,默认131 -## CSharp Connector +## CSharp Connector C#连接器支持的系统有:Linux 64/Windows x64/Windows x86 ### 安装准备 -* 应用驱动安装请参考安装连接器驱动步骤。 +* 应用驱动安装请参考[安装连接器驱动步骤](https://www.taosdata.com/cn/documentation/connector#driver)。 * .NET接口文件TDengineDrivercs.cs和参考程序示例TDengineTest.cs均位于Windows客户端install_directory/examples/C#目录下。 * 在Windows系统上,C#应用程序可以使用TDengine的原生C接口来执行所有数据库操作,后续版本将提供ORM(dapper)框架驱动。 @@ -766,15 +776,15 @@ https://github.com/maikebing/Maikebing.EntityFrameworkCore.Taos https://www.taosdata.com/blog/2020/11/02/1901.html ``` -## Go Connector +## Go Connector ### 安装准备 -* 应用驱动安装请参考安装连接器驱动步骤。 +* 应用驱动安装请参考[安装连接器驱动步骤](https://www.taosdata.com/cn/documentation/connector#driver)。 TDengine提供了GO驱动程序`taosSql`。 `taosSql`实现了GO语言的内置接口`database/sql/driver`。用户只需按如下方式引入包就可以在应用程序中访问TDengine, 详见`https://github.com/taosdata/driver-go/blob/develop/taosSql/driver_test.go`。 -使用 Go 连接器的示例代码请参考 https://github.com/taosdata/TDengine/tree/develop/tests/examples/go 以及视频教程。 +使用 Go 连接器的示例代码请参考 https://github.com/taosdata/TDengine/tree/develop/tests/examples/go 以及[视频教程](https://www.taosdata.com/blog/2020/11/11/1951.html)。 ```Go import ( @@ -821,7 +831,7 @@ go env -w GOPROXY=https://goproxy.io,direct sql.Open内置的方法,Close closes the statement. -## Node.js Connector +## Node.js Connector Node.js连接器支持的系统有: @@ -830,47 +840,47 @@ Node.js连接器支持的系统有: | **OS类型** | Linux | Win64 | Win32 | Linux | Linux | | **支持与否** | **支持** | **支持** | **支持** | **支持** | **支持** | -Node.js连接器的使用参见视频教程 +Node.js连接器的使用参见[视频教程](https://www.taosdata.com/blog/2020/11/11/1957.html) ### 安装准备 -* 应用驱动安装请参考安装连接器驱动步骤。 +* 应用驱动安装请参考[安装连接器驱动步骤](https://www.taosdata.com/cn/documentation/connector#driver)。 ### 安装Node.js连接器 -用户可以通过npm来进行安装,也可以通过源代码*src/connector/nodejs/* 来进行安装。具体安装步骤如下: +用户可以通过[npm](https://www.npmjs.com/)来进行安装,也可以通过源代码*src/connector/nodejs/* 来进行安装。具体安装步骤如下: -首先,通过npm安装node.js 连接器. +首先,通过[npm](https://www.npmjs.com/)安装node.js 连接器. ```bash npm install td2.0-connector ``` 我们建议用户使用npm 安装node.js连接器。如果您没有安装npm, 可以将*src/connector/nodejs/*拷贝到您的nodejs 项目目录下 -我们使用node-gyp和TDengine服务端进行交互。安装node.js 连接器之前,还需安装以下软件: +我们使用[node-gyp](https://github.com/nodejs/node-gyp)和TDengine服务端进行交互。安装node.js 连接器之前,还需安装以下软件: ### Linux - `python` (建议`v2.7` , `v3.x.x` 目前还不支持) - `node` 2.0.6支持v12.x和v10.x,2.0.5及更早版本支持v10.x版本,其他版本可能存在包兼容性的问题。 - `make` -- c语言编译器比如GCC +- c语言编译器比如[GCC](https://gcc.gnu.org) ### Windows #### 安装方法1 -使用微软的windows-build-tools在`cmd` 命令行界面执行`npm install --global --production windows-build-tools` 即可安装所有的必备工具 +使用微软的[windows-build-tools](https://github.com/felixrieseberg/windows-build-tools)在`cmd` 命令行界面执行`npm install --global --production windows-build-tools` 即可安装所有的必备工具 #### 安装方法2 手动安装以下工具: -- 安装Visual Studio相关:Visual Studio 2017 Community -- 安装 Python 2.7(`v3.x.x` 暂不支持) 并执行 `npm config set python python2.7` +- 安装Visual Studio相关:[Visual Studio Build 工具](https://visualstudio.microsoft.com/thank-you-downloading-visual-studio/?sku=BuildTools) 或者 [Visual Studio 2017 Community](https://visualstudio.microsoft.com/pl/thank-you-downloading-visual-studio/?sku=Community) +- 安装 [Python](https://www.python.org/downloads/) 2.7(`v3.x.x` 暂不支持) 并执行 `npm config set python python2.7` - 进入`cmd`命令行界面, `npm config set msvs_version 2017` -如果以上步骤不能成功执行, 可以参考微软的node.js用户手册Microsoft's Node.js Guidelines for Windows +如果以上步骤不能成功执行, 可以参考微软的node.js用户手册[Microsoft's Node.js Guidelines for Windows](https://github.com/Microsoft/nodejs-guidelines/blob/master/windows-environment.md#compiling-native-addon-modules) 如果在Windows 10 ARM 上使用ARM64 Node.js, 还需添加 "Visual C++ compilers and libraries for ARM64" 和 "Visual C++ ATL for ARM64". @@ -887,7 +897,7 @@ Node-example-raw.js 验证方法: -1. 新建安装验证目录,例如:\~/tdengine-test,拷贝github上nodejsChecker.js源程序。下载地址:(https://github.com/taosdata/TDengine/tree/develop/tests/examples/nodejs/nodejsChecker.js)。 +1. 新建安装验证目录,例如:`~/tdengine-test`,拷贝github上nodejsChecker.js源程序。下载地址:(https://github.com/taosdata/TDengine/tree/develop/tests/examples/nodejs/nodejsChecker.js)。 2. 在命令中执行以下命令: @@ -901,8 +911,7 @@ node nodejsChecker.js host=localhost ### Node.js连接器的使用 -(http://docs.taosdata.com/node) -以下是node.js 连接器的一些基本使用方法,详细的使用方法可参考该文档 +以下是node.js 连接器的一些基本使用方法,详细的使用方法可参考[TDengine Node.js connector](http://docs.taosdata.com/node) #### 建立连接 @@ -973,6 +982,7 @@ promise.then(function(result) { }) ``` #### 异步函数 + 异步查询数据库的操作和上面类似,只需要在`cursor.execute`, `TaosQuery.execute`等函数后面加上`_a`。 ```javascript var promise1 = cursor.query('select count(*), avg(v1), avg(v2) from meter1;').execute_a() @@ -986,6 +996,7 @@ promise2.then(function(result) { ``` ### 示例 -这里提供了一个使用NodeJS 连接器建表,插入天气数据并查询插入的数据的代码示例 -这里同样是一个使用NodeJS 连接器建表,插入天气数据并查询插入的数据的代码示例,但和上面不同的是,该示例只使用`cursor`. +[node-example.js](https://github.com/taosdata/TDengine/tree/master/tests/examples/nodejs/node-example.js)提供了一个使用NodeJS 连接器建表,插入天气数据并查询插入的数据的代码示例 + +[node-example-raw.js](https://github.com/taosdata/TDengine/tree/master/tests/examples/nodejs/node-example-raw.js)同样是一个使用NodeJS 连接器建表,插入天气数据并查询插入的数据的代码示例,但和上面不同的是,该示例只使用`cursor`. diff --git a/documentation20/webdocs/markdowndocs/Connections with other Tools-ch.md b/documentation20/cn/09.connections/docs.md similarity index 79% rename from documentation20/webdocs/markdowndocs/Connections with other Tools-ch.md rename to documentation20/cn/09.connections/docs.md index 4e55f3c5e8f12f3277898b1f825dca5e22b2e6d1..6a74ee382f9addea255ac6d705e8c9eac82da133 100644 --- a/documentation20/webdocs/markdowndocs/Connections with other Tools-ch.md +++ b/documentation20/cn/09.connections/docs.md @@ -1,7 +1,7 @@ # 与其他工具的连接 -## Grafana +## Grafana TDengine能够与开源数据可视化系统[Grafana](https://www.grafana.com/)快速集成搭建数据监测报警系统,整个过程无需任何代码开发,TDengine中数据表中内容可以在仪表盘(DashBoard)上进行可视化展现。 @@ -21,15 +21,15 @@ TDengine的Grafana插件在安装包的/usr/local/taos/connector/grafanaplugin 用户可以直接通过 localhost:3000 的网址,登录 Grafana 服务器(用户名/密码:admin/admin),通过左侧 `Configuration -> Data Sources` 可以添加数据源,如下图所示: -![img](../assets/add_datasource1.jpg) +![img](page://images/connections/add_datasource1.jpg) 点击 `Add data source` 可进入新增数据源页面,在查询框中输入 TDengine 可选择添加,如下图所示: -![img](../assets/add_datasource2.jpg) +![img](page://images/connections/add_datasource2.jpg) 进入数据源配置页面,按照默认提示修改相应配置即可: -![img](../assets/add_datasource3.jpg) +![img](page://images/connections/add_datasource3.jpg) * Host: TDengine 集群的中任意一台服务器的 IP 地址与 TDengine RESTful 接口的端口号(6041),默认 http://localhost:6041 * User:TDengine 用户名。 @@ -37,13 +37,13 @@ TDengine的Grafana插件在安装包的/usr/local/taos/connector/grafanaplugin 点击 `Save & Test` 进行测试,成功会有如下提示: -![img](../assets/add_datasource4.jpg) +![img](page://images/connections/add_datasource4.jpg) #### 创建 Dashboard 回到主界面创建 Dashboard,点击 Add Query 进入面板查询页面: -![img](../assets/create_dashboard1.jpg) +![img](page://images/connections/create_dashboard1.jpg) 如上图所示,在 Query 中选中 `TDengine` 数据源,在下方查询框可输入相应 sql 进行查询,具体说明如下: @@ -54,7 +54,7 @@ TDengine的Grafana插件在安装包的/usr/local/taos/connector/grafanaplugin 按照默认提示查询当前 TDengine 部署所在服务器指定间隔系统内存平均使用量如下: -![img](../assets/create_dashboard2.jpg) +![img](page://images/connections/create_dashboard2.jpg) > 关于如何使用Grafana创建相应的监测界面以及更多有关使用Grafana的信息,请参考Grafana官方的[文档](https://grafana.com/docs/)。 @@ -64,14 +64,14 @@ TDengine的Grafana插件在安装包的/usr/local/taos/connector/grafanaplugin 点击左侧 `Import` 按钮,并上传 `tdengine-grafana.json` 文件: -![img](../assets/import_dashboard1.jpg) +![img](page://images/connections/import_dashboard1.jpg) 导入完成之后可看到如下效果: -![img](../assets/import_dashboard2.jpg) +![img](page://images/connections/import_dashboard2.jpg) -## Matlab +## Matlab MatLab可以通过安装包内提供的JDBC Driver直接连接到TDengine获取数据到本地工作空间。 @@ -82,12 +82,15 @@ MatLab的适配有下面几个步骤,下面以Windows10上适配MatLab2017a为 - 将TDengine安装包内的驱动程序JDBCDriver-1.0.0-dist.jar拷贝到${matlab_root}\MATLAB\R2017a\java\jar\toolbox - 将TDengine安装包内的taos.lib文件拷贝至${matlab_ root _dir}\MATLAB\R2017a\lib\win64 - 将新添加的驱动jar包加入MatLab的classpath。在${matlab_ root _dir}\MATLAB\R2017a\toolbox\local\classpath.txt文件中添加下面一行 - -​ `$matlabroot/java/jar/toolbox/JDBCDriver-1.0.0-dist.jar` - +​ +``` +$matlabroot/java/jar/toolbox/JDBCDriver-1.0.0-dist.jar +``` - 在${user_home}\AppData\Roaming\MathWorks\MATLAB\R2017a\下添加一个文件javalibrarypath.txt, 并在该文件中添加taos.dll的路径,比如您的taos.dll是在安装时拷贝到了C:\Windows\System32下,那么就应该在javalibrarypath.txt中添加如下一行: - -​ `C:\Windows\System32` +​ +``` +C:\Windows\System32 +``` ### 在MatLab中连接TDengine获取数据 @@ -95,23 +98,25 @@ MatLab的适配有下面几个步骤,下面以Windows10上适配MatLab2017a为 - 创建一个连接: - `conn = database(‘db’, ‘root’, ‘taosdata’, ‘com.taosdata.jdbc.TSDBDriver’, ‘jdbc:TSDB://127.0.0.1:0/’)` - +```matlab +conn = database(‘db’, ‘root’, ‘taosdata’, ‘com.taosdata.jdbc.TSDBDriver’, ‘jdbc:TSDB://127.0.0.1:0/’) +``` - 执行一次查询: - `sql0 = [‘select * from tb’]` - - `data = select(conn, sql0);` - +```matlab +sql0 = [‘select * from tb’] +data = select(conn, sql0); +``` - 插入一条记录: - `sql1 = [‘insert into tb values (now, 1)’]` - - `exec(conn, sql1)` +```matlab +sql1 = [‘insert into tb values (now, 1)’] +exec(conn, sql1) +``` 更多例子细节请参考安装包内examples\Matlab\TDengineDemo.m文件。 -## R +## R R语言支持通过JDBC接口来连接TDengine数据库。首先需要安装R语言的JDBC包。启动R语言环境,然后执行以下命令安装R语言的JDBC支持库: @@ -147,3 +152,10 @@ TDengine客户端暂不支持如下函数: - dbListTables(conn):显示连接中的所有表 +## DataX + +[DataX](https://github.com/alibaba/DataX) 是阿里巴巴集团开源的一款通用离线数据采集/同步工具,能够简单、高效地接入 TDengine 进行数据写入和读取。 + +* 数据读取集成的方法请参见 [TSDBReader 插件文档](https://github.com/alibaba/DataX/blob/master/tsdbreader/doc/tsdbreader.md) +* 数据写入集成的方法请参见 [TSDBWriter 插件文档](https://github.com/alibaba/DataX/blob/master/tsdbwriter/doc/tsdbhttpwriter.md) + diff --git a/documentation20/webdocs/markdowndocs/cluster-ch.md b/documentation20/cn/10.cluster/docs.md similarity index 92% rename from documentation20/webdocs/markdowndocs/cluster-ch.md rename to documentation20/cn/10.cluster/docs.md index f6019b1a5aabdaea095eafc1919c46432ec88c77..15ac449c1aeb576ab9dd3401a717a4364fa4c0b6 100644 --- a/documentation20/webdocs/markdowndocs/cluster-ch.md +++ b/documentation20/cn/10.cluster/docs.md @@ -1,19 +1,19 @@ # TDengine 集群安装、管理 -多个TDengine服务器,也就是多个taosd的运行实例可以组成一个集群,以保证TDengine的高可靠运行,并提供水平扩展能力。要了解TDengine 2.0的集群管理,需要对集群的基本概念有所了解,请看TDengine 2.0整体架构一章。而且在安装集群之前,先请按照[《立即开始》](https://www.taosdata.com/cn/getting-started20/)一章安装并体验单节点功能。 +多个TDengine服务器,也就是多个taosd的运行实例可以组成一个集群,以保证TDengine的高可靠运行,并提供水平扩展能力。要了解TDengine 2.0的集群管理,需要对集群的基本概念有所了解,请看TDengine 2.0整体架构一章。而且在安装集群之前,先请按照[《立即开始》](https://www.taosdata.com/cn/documentation/getting-started/)一章安装并体验单节点功能。 集群的每个数据节点是由End Point来唯一标识的,End Point是由FQDN(Fully Qualified Domain Name)外加Port组成,比如 h1.taosdata.com:6030。一般FQDN就是服务器的hostname,可通过Linux命令`hostname -f`获取(如何配置FQDN,请参考:[一篇文章说清楚TDengine的FQDN](https://www.taosdata.com/blog/2020/09/11/1824.html))。端口是这个数据节点对外服务的端口号,缺省是6030,但可以通过taos.cfg里配置参数serverPort进行修改。一个物理节点可能配置了多个hostname, TDengine会自动获取第一个,但也可以通过taos.cfg里配置参数fqdn进行指定。如果习惯IP地址直接访问,可以将参数fqdn设置为本节点的IP地址。 TDengine的集群管理极其简单,除添加和删除节点需要人工干预之外,其他全部是自动完成,最大程度的降低了运维的工作量。本章对集群管理的操作做详细的描述。 -关于集群搭建请参考视频教程。 +关于集群搭建请参考[视频教程](https://www.taosdata.com/blog/2020/11/11/1961.html)。 -## 准备工作 +## 准备工作 **第零步**:规划集群所有物理节点的FQDN,将规划好的FQDN分别添加到每个物理节点的/etc/hostname;修改每个物理节点的/etc/hosts,将所有集群物理节点的IP与FQDN的对应添加好。【如部署了DNS,请联系网络管理员在DNS上做好相关配置】 -**第一步**:如果搭建集群的物理节点中,存有之前的测试数据、装过1.X的版本,或者装过其他版本的TDengine,请先将其删除,并清空所有数据,具体步骤请参考博客[《TDengine多种安装包的安装和卸载》](https://www.taosdata.com/blog/2019/08/09/566.html ) -**注意1:**因为FQDN的信息会写进文件,如果之前没有配置或者更改FQDN,且启动了TDengine。请一定在确保数据无用或者备份的前提下,清理一下之前的数据(rm -rf /var/lib/taos/); +**第一步**:如果搭建集群的物理节点中,存有之前的测试数据、装过1.X的版本,或者装过其他版本的TDengine,请先将其删除,并清空所有数据,具体步骤请参考博客[《TDengine多种安装包的安装和卸载》](https://www.taosdata.com/blog/2019/08/09/566.html ) +**注意1:**因为FQDN的信息会写进文件,如果之前没有配置或者更改FQDN,且启动了TDengine。请一定在确保数据无用或者备份的前提下,清理一下之前的数据(rm -rf /var/lib/taos/); **注意2:**客户端也需要配置,确保它可以正确解析每个节点的FQDN配置,不管是通过DNS服务,还是 Host 文件。 **第二步**:建议关闭所有物理节点的防火墙,至少保证端口:6030 - 6042的TCP和UDP端口都是开放的。**强烈建议**先关闭防火墙,集群搭建完毕之后,再来配置端口; @@ -59,15 +59,15 @@ arbitrator ha.taosdata.com:6042 | 8 | charset | 字符集编码 | | 9 | balance | 是否启动负载均衡 | | 10 | maxTablesPerVnode | 每个vnode中能够创建的最大表个数 | -| 11 | maxVgroupsPerDb | 每个DB中 能够使用的最大vnode个数 | +| 11 | maxVgroupsPerDb | 每个DB中能够使用的最大vgroup个数 | -## 启动第一个数据节点 +## 启动第一个数据节点 -按照[《立即开始》](https://www.taosdata.com/cn/getting-started20/)里的指示,启动第一个数据节点,例如h1.taosdata.com,然后执行taos, 启动taos shell,从shell里执行命令"show dnodes;",如下所示: +按照[《立即开始》](https://www.taosdata.com/cn/documentation/getting-started/)里的指示,启动第一个数据节点,例如h1.taosdata.com,然后执行taos, 启动taos shell,从shell里执行命令"show dnodes;",如下所示: - ``` +``` Welcome to the TDengine shell from Linux, Client Version:2.0.0.0 Copyright (c) 2017 by TAOS Data, Inc. All rights reserved. @@ -78,15 +78,15 @@ taos> show dnodes; Query OK, 1 row(s) in set (0.006385s) taos> - ``` +``` 上述命令里,可以看到这个刚启动的这个数据节点的End Point是:h1.taos.com:6030,就是这个新集群的firstEP。 -## 启动后续数据节点 +## 启动后续数据节点 将后续的数据节点添加到现有集群,具体有以下几步: -1. 按照["立即开始“](https://www.taosdata.com/cn/getting-started/)一章的方法在每个物理节点启动taosd; +1. 按照[《立即开始》](https://www.taosdata.com/cn/documentation/getting-started/)一章的方法在每个物理节点启动taosd; 2. 在第一个数据节点,使用CLI程序taos, 登录进TDengine系统, 执行命令: @@ -115,7 +115,7 @@ taos> - firstEp这个参数仅仅在该数据节点首次加入集群时有作用,加入集群后,该数据节点会保存最新的mnode的End Point列表,不再依赖这个参数。 - 两个没有配置firstEp参数的数据节点dnode启动后,会独立运行起来。这个时候,无法将其中一个数据节点加入到另外一个数据节点,形成集群。**无法将两个独立的集群合并成为新的集群**。 -## 数据节点管理 +## 数据节点管理 上面已经介绍如何从零开始搭建集群。集群组建完后,还可以随时添加新的数据节点进行扩容,或删除数据节点,并检查集群当前状态。 @@ -169,7 +169,7 @@ SHOW DNODES; SHOW VGROUPS; ``` -## vnode的高可用性 +## vnode的高可用性 TDengine通过多副本的机制来提供系统的高可用性,包括vnode和mnode的高可用性。 @@ -185,7 +185,7 @@ CREATE DATABASE demo replica 3; 因为vnode的引入,无法简单的给出结论:“集群中过半数据节点dnode工作,集群就应该工作”。但是对于简单的情形,很好下结论。比如副本数为3,只有三个dnode,那如果仅有一个节点不工作,整个集群还是可以正常工作的,但如果有两个数据节点不工作,那整个集群就无法正常工作了。 -## Mnode的高可用性 +## Mnode的高可用性 TDengine集群是由mnode (taosd的一个模块,管理节点) 负责管理的,为保证mnode的高可用,可以配置多个mnode副本,副本数由系统配置参数numOfMnodes决定,有效范围为1-3。为保证元数据的强一致性,mnode副本之间是通过同步的方式进行数据复制的。 @@ -202,7 +202,7 @@ SHOW MNODES; **注意:**一个TDengine高可用系统,无论是vnode还是mnode, 都必须配置多个副本。 -## 负载均衡 +## 负载均衡 有三种情况,将触发负载均衡,而且都无需人工干预。 @@ -214,7 +214,7 @@ SHOW MNODES; **【提示】负载均衡由参数balance控制,它决定是否启动自动负载均衡。** -## 数据节点离线处理 +## 数据节点离线处理 如果一个数据节点离线,TDengine集群将自动检测到。有如下两种情况: @@ -223,7 +223,7 @@ SHOW MNODES; **注意:**如果一个虚拟节点组(包括mnode组)里所归属的每个数据节点都处于离线或unsynced状态,必须等该虚拟节点组里的所有数据节点都上线、都能交换状态信息后,才能选出Master,该虚拟节点组才能对外提供服务。比如整个集群有3个数据节点,副本数为3,如果3个数据节点都宕机,然后2个数据节点重启,是无法工作的,只有等3个数据节点都重启成功,才能对外服务。 -## Arbitrator的使用 +## Arbitrator的使用 如果副本数为偶数,当一个vnode group里一半vnode不工作时,是无法从中选出master的。同理,一半mnode不工作时,是无法选出mnode的master的,因为存在“split brain”问题。为解决这个问题,TDengine引入了Arbitrator的概念。Arbitrator模拟一个vnode或mnode在工作,但只简单的负责网络连接,不处理任何数据插入或访问。只要包含Arbitrator在内,超过半数的vnode或mnode工作,那么该vnode group或mnode组就可以正常的提供数据插入或查询服务。比如对于副本数为2的情形,如果一个节点A离线,但另外一个节点B正常,而且能连接到Arbitrator,那么节点B就能正常工作。 diff --git a/documentation20/webdocs/markdowndocs/administrator-ch.md b/documentation20/cn/11.administrator/docs.md similarity index 91% rename from documentation20/webdocs/markdowndocs/administrator-ch.md rename to documentation20/cn/11.administrator/docs.md index 7c8344d9ca5cb2c9738c9b308406a6c96f871db1..86ad8e5bb91e8883b4be4a2bec7b5fb7dcb4c483 100644 --- a/documentation20/webdocs/markdowndocs/administrator-ch.md +++ b/documentation20/cn/11.administrator/docs.md @@ -1,12 +1,12 @@ # TDengine的运营与维护 -## 容量规划 +## 容量规划 使用TDengine来搭建一个物联网大数据平台,计算资源、存储资源需要根据业务场景进行规划。下面分别讨论系统运行所需要的内存、CPU以及硬盘空间。 ### 内存需求 -每个DB可以创建固定数目的vnode,默认与CPU核数相同,可通过maxVgroupsPerDb配置;每个vnode会占用固定大小的内存(大小与数据库的配置参数blocks和cache有关);每个Table会占用与标签总长度有关的内存;此外,系统会有一些固定的内存开销。因此,每个DB需要的系统内存可通过如下公式计算: +每个DB可以创建固定数目的vgroup,默认与CPU核数相同,可通过maxVgroupsPerDb配置;vgroup中的每个副本会是一个vnode;每个vnode会占用固定大小的内存(大小与数据库的配置参数blocks和cache有关);每个Table会占用与标签总长度有关的内存;此外,系统会有一些固定的内存开销。因此,每个DB需要的系统内存可通过如下公式计算: ``` Memory Size = maxVgroupsPerDb * (blocks * cache + 10Mb) + numOfTables * (tagSizePerTable + 0.5Kb) @@ -47,9 +47,9 @@ Raw DataSize = numOfTables * rowSizePerTable * rowsPerTable 因为TDengine具有很好的水平扩展能力,根据总量,再根据单个物理机或虚拟机的资源,就可以轻松决定需要购置多少台物理机或虚拟机了。 -**立即计算CPU、内存、存储,请参见:资源估算方法** +**立即计算CPU、内存、存储,请参见:[资源估算方法](https://www.taosdata.com/config/config.html)** -## 容错和灾备 +## 容错和灾备 ### 容错 @@ -76,7 +76,7 @@ TDengine集群的节点数必须大于等于副本数,否则创建表时将报 当TDengine集群中的节点部署在不同的物理机上,并设置多个副本数时,就实现了系统的高可靠性,无需再使用其他软件或工具。TDengine企业版还可以将副本部署在不同机房,从而实现异地容灾。 -## 服务端配置 +## 服务端配置 TDengine系统后台服务由taosd提供,可以在配置文件taos.cfg里修改配置参数,以满足不同场景的需求。配置文件的缺省位置在/etc/taos目录,可以通过taosd命令行执行参数-c指定配置文件目录。比如taosd -c /home/user来指定配置文件位于/home/user这个目录。 @@ -102,7 +102,7 @@ taosd -C - maxSQLLength:单条SQL语句允许最长限制。默认值:65380字节。 - telemetryReporting: 是否允许 TDengine 采集和上报基本使用信息,0表示不允许,1表示允许。 默认值:1。 - stream: 是否启用连续查询(流计算功能),0表示不允许,1表示允许。 默认值:1。 -- queryBufferSize: 为所有并发查询占用保留的内存大小。计算规则可以根据实际应用可能的最大并发数和表的数字相乘,再乘 170 。单位为字节。 +- queryBufferSize: 为所有并发查询占用保留的内存大小。计算规则可以根据实际应用可能的最大并发数和表的数字相乘,再乘 170 。单位为 MB(2.0.15 以前的版本中,此参数的单位是字节)。 - ratioOfQueryCores: 设置查询线程的最大数量。最小值0 表示只有1个查询线程;最大值2表示最大建立2倍CPU核数的查询线程。默认为1,表示最大和CPU核数相等的查询线程。该值可以为小数,即0.5表示最大建立CPU核数一半的查询线程。 **注意:**对于端口,TDengine会使用从serverPort起13个连续的TCP和UDP端口号,请务必在防火墙打开。因此如果是缺省配置,需要打开从6030到6042共13个端口,而且必须TCP和UDP都打开。 @@ -111,15 +111,16 @@ taosd -C - days:一个数据文件存储数据的时间跨度,单位为天,默认值:10。 - keep:数据库中数据保留的天数,单位为天,默认值:3650。 -- minRows: 文件块中记录的最小条数,单位为条,默认值:100。 -- maxRows: 文件块中记录的最大条数,单位为条,默认值:4096。 -- comp: 文件压缩标志位,0:关闭,1:一阶段压缩,2:两阶段压缩。默认值:2。 -- walLevel:WAL级别。1:写wal, 但不执行fsync; 2:写wal, 而且执行fsync。默认值:1。 +- minRows:文件块中记录的最小条数,单位为条,默认值:100。 +- maxRows:文件块中记录的最大条数,单位为条,默认值:4096。 +- comp:文件压缩标志位,0:关闭;1:一阶段压缩;2:两阶段压缩。默认值:2。 +- walLevel:WAL级别。1:写wal,但不执行fsync;2:写wal, 而且执行fsync。默认值:1。 - fsync:当wal设置为2时,执行fsync的周期。设置为0,表示每次写入,立即执行fsync。单位为毫秒,默认值:3000。 -- cache: 内存块的大小,单位为兆字节(MB),默认值:16。 -- blocks: 每个VNODE(TSDB)中有多少cache大小的内存块。因此一个VNODE的用的内存大小粗略为(cache * blocks)。单位为块,默认值:4。 +- cache:内存块的大小,单位为兆字节(MB),默认值:16。 +- blocks:每个VNODE(TSDB)中有多少cache大小的内存块。因此一个VNODE的用的内存大小粗略为(cache * blocks)。单位为块,默认值:4。 - replica:副本个数,取值范围:1-3。单位为个,默认值:1 - precision:时间戳精度标识,ms表示毫秒,us表示微秒。默认值:ms +- cacheLast:是否在内存中缓存子表 last_row,0:关闭;1:开启。默认值:0。(从 2.0.11 版本开始支持此参数) 对于一个应用场景,可能有多种数据特征的数据并存,最佳的设计是将具有相同数据特征的表放在一个库里,这样一个应用有多个库,而每个库可以配置不同的存储参数,从而保证系统有最优的性能。TDengine允许应用在创建库时指定上述存储参数,如果指定,该参数就将覆盖对应的系统配置参数。举例,有下述SQL: @@ -137,7 +138,7 @@ TDengine集群中加入一个新的dnode时,涉及集群相关的一些参数 - offlineThreshold: dnode离线阈值,超过该时间将导致该dnode从集群中删除。单位为秒,默认值:86400*10(即10天)。 - statusInterval: dnode向mnode报告状态时长。单位为秒,默认值:1。 - maxTablesPerVnode: 每个vnode中能够创建的最大表个数。默认值:1000000。 -- maxVgroupsPerDb: 每个数据库中能够使用的最大vnode个数。 +- maxVgroupsPerDb: 每个数据库中能够使用的最大vgroup个数。 - arbitrator: 系统中裁决器的end point,缺省为空。 - timezone、locale、charset 的配置见客户端配置。 @@ -157,9 +158,9 @@ ALTER DNODE alter dnode 1 debugFlag 135; ``` -## 客户端配置 +## 客户端配置 -TDengine系统的前台交互客户端应用程序为taos,以及应用驱动,它与taosd共享同一个配置文件taos.cfg。运行taos时,使用参数-c指定配置文件目录,如taos -c /home/cfg,表示使用/home/cfg/目录下的taos.cfg配置文件中的参数,缺省目录是/etc/taos。更多taos的使用方法请见Shell命令行程序。本节主要说明 taos 客户端应用在配置文件 taos.cfg 文件中使用到的参数。 +TDengine系统的前台交互客户端应用程序为taos,以及应用驱动,它与taosd共享同一个配置文件taos.cfg。运行taos时,使用参数-c指定配置文件目录,如taos -c /home/cfg,表示使用/home/cfg/目录下的taos.cfg配置文件中的参数,缺省目录是/etc/taos。更多taos的使用方法请见帮助信息 `taos --help`。本节主要说明 taos 客户端应用在配置文件 taos.cfg 文件中使用到的参数。 **2.0.10.0 之后版本支持命令行以下参数显示当前客户端参数的配置** @@ -247,7 +248,7 @@ taos -C 或 taos --dump-config Shell中binary 和 nchar字段的显示宽度上限,超过此限制的部分将被隐藏。默认值:30。可在 shell 中通过命令 set max_binary_display_width nn 动态修改此选项。 -## 用户管理 +## 用户管理 系统管理员可以在CLI界面里添加、删除用户,也可以修改密码。CLI里SQL语法如下: @@ -285,7 +286,7 @@ SHOW USERS; **注意:**SQL 语法中,< >表示需要用户输入的部分,但请不要输入< >本身 -## 数据导入 +## 数据导入 TDengine提供多种方便的数据导入功能,一种按脚本文件导入,一种按数据文件导入,一种是taosdump工具导入本身导出的文件。 @@ -337,9 +338,9 @@ Query OK, 9 row(s) affected (0.004763s) **taosdump工具导入** -TDengine提供了方便的数据库导入导出工具taosdump。用户可以将taosdump从一个系统导出的数据,导入到其他系统中。具体使用方法,请参见博客:TDengine DUMP工具使用指南 +TDengine提供了方便的数据库导入导出工具taosdump。用户可以将taosdump从一个系统导出的数据,导入到其他系统中。具体使用方法,请参见博客:[TDengine DUMP工具使用指南](https://www.taosdata.com/blog/2020/03/09/1334.html) -## 数据导出 +## 数据导出 为方便数据导出,TDengine提供了两种导出方式,分别是按表导出和用taosdump导出。 @@ -355,9 +356,9 @@ select * from >> data.csv; **用taosdump导出数据** -TDengine提供了方便的数据库导出工具taosdump。用户可以根据需要选择导出所有数据库、一个数据库或者数据库中的一张表,所有数据或一时间段的数据,甚至仅仅表的定义。具体使用方法,请参见博客:TDengine DUMP工具使用指南 +TDengine提供了方便的数据库导出工具taosdump。用户可以根据需要选择导出所有数据库、一个数据库或者数据库中的一张表,所有数据或一时间段的数据,甚至仅仅表的定义。具体使用方法,请参见博客:[TDengine DUMP工具使用指南](https://www.taosdata.com/blog/2020/03/09/1334.html) -## 系统连接、任务查询管理 +## 系统连接、任务查询管理 系统管理员可以从CLI查询系统的连接、正在进行的查询、流式计算,并且可以关闭连接、停止正在进行的查询和流式计算。CLI里SQL语法如下: @@ -403,7 +404,7 @@ TDengine启动后,会自动创建一个监测数据库log,并自动将服务 这些监测信息的采集缺省是打开的,但可以修改配置文件里的选项enableMonitor将其关闭或打开。 -## 文件目录结构 +## 文件目录结构 安装TDengine后,默认会在操作系统中生成下列目录或文件: @@ -429,7 +430,7 @@ TDengine的所有可执行文件默认存放在 _/usr/local/taos/bin_ 目录下 您可以通过修改系统配置文件taos.cfg来配置不同的数据目录和日志目录。 -## TDengine参数限制与保留关键字 +## TDengine参数限制与保留关键字 - 数据库名:不能包含“.”以及特殊字符,不能超过32个字符 - 表名:不能包含“.”以及特殊字符,与所属数据库名一起,不能超过192个字符 diff --git a/documentation20/webdocs/markdowndocs/Taos Error Code-ch.md b/documentation20/cn/12.taos-sql/01.error-code/docs.md similarity index 100% rename from documentation20/webdocs/markdowndocs/Taos Error Code-ch.md rename to documentation20/cn/12.taos-sql/01.error-code/docs.md diff --git a/documentation20/webdocs/markdowndocs/TAOS SQL-ch.md b/documentation20/cn/12.taos-sql/docs.md similarity index 88% rename from documentation20/webdocs/markdowndocs/TAOS SQL-ch.md rename to documentation20/cn/12.taos-sql/docs.md index 75c3fb897e4dfa405301b75bf3be24d2c7e17ff8..33321348bb69d93b7449b8469f74d76b6223cf08 100644 --- a/documentation20/webdocs/markdowndocs/TAOS SQL-ch.md +++ b/documentation20/cn/12.taos-sql/docs.md @@ -25,7 +25,7 @@ taos> DESCRIBE meters; ``` 数据集包含4个智能电表的数据,按照TDengine的建模规则,对应4个子表,其名称分别是 d1001, d1002, d1003, d1004。 -## 支持的数据类型 +## 支持的数据类型 使用TDengine,最重要的是时间戳。创建并插入记录、查询历史记录的时候,均需要指定时间戳。时间戳有如下规则: @@ -42,19 +42,21 @@ TDengine缺省的时间戳是毫秒精度,但通过修改配置参数enableMic | | 类型 | Bytes | 说明 | | ---- | :-------: | ------ | ------------------------------------------------------------ | | 1 | TIMESTAMP | 8 | 时间戳。缺省精度毫秒,可支持微秒。从格林威治时间 1970-01-01 00:00:00.000 (UTC/GMT) 开始,计时不能早于该时间。 | -| 2 | INT | 4 | 整型,范围 [-2^31+1, 2^31-1], -2^31用作Null | -| 3 | BIGINT | 8 | 长整型,范围 [-2^63+1, 2^63-1], -2^63用于NULL | -| 4 | FLOAT | 4 | 浮点型,有效位数6-7,范围 [-3.4E38, 3.4E38] | -| 5 | DOUBLE | 8 | 双精度浮点型,有效位数15-16,范围 [-1.7E308, 1.7E308] | -| 6 | BINARY | 自定义 | 用于记录字符串,理论上,最长可以有16374字节,但由于每行数据最多16K字节,实际上限一般小于理论值。 binary仅支持字符串输入,字符串两端使用单引号引用,否则英文全部自动转化为小写。使用时须指定大小,如binary(20)定义了最长为20个字符的字符串,每个字符占1byte的存储空间。如果用户字符串超出20字节将会报错。对于字符串内的单引号,可以用转义字符反斜线加单引号来表示, 即 **\’**。 | -| 7 | SMALLINT | 2 | 短整型, 范围 [-32767, 32767], -32768用于NULL | -| 8 | TINYINT | 1 | 单字节整型,范围 [-127, 127], -128用于NULL | -| 9 | BOOL | 1 | 布尔型,{true, false} | -| 10 | NCHAR | 自定义 | 用于记录非ASCII字符串,如中文字符。每个nchar字符占用4bytes的存储空间。字符串两端使用单引号引用,字符串内的单引号需用转义字符 **\’**。nchar使用时须指定字符串大小,类型为nchar(10)的列表示此列的字符串最多存储10个nchar字符,会固定占用40bytes的空间。如用户字符串长度超出声明长度,则将会报错。 | - -**Tips**: TDengine对SQL语句中的英文字符不区分大小写,自动转化为小写执行。因此用户大小写敏感的字符串及密码,需要使用单引号将字符串引起来。 - -## 数据库管理 +| 2 | INT | 4 | 整型,范围 [-2^31+1, 2^31-1], -2^31 用作 NULL | +| 3 | BIGINT | 8 | 长整型,范围 [-2^63+1, 2^63-1], -2^63 用于 NULL | +| 4 | FLOAT | 4 | 浮点型,有效位数 6-7,范围 [-3.4E38, 3.4E38] | +| 5 | DOUBLE | 8 | 双精度浮点型,有效位数 15-16,范围 [-1.7E308, 1.7E308] | +| 6 | BINARY | 自定义 | 用于记录 ASCII 型字符串。理论上,最长可以有 16374 字节,但由于每行数据最多 16K 字节,实际上限一般小于理论值。 binary 仅支持字符串输入,字符串两端使用单引号引用,否则英文全部自动转化为小写。使用时须指定大小,如 binary(20) 定义了最长为 20 个字符的字符串,每个字符占 1 byte 的存储空间,此时如果用户字符串超出 20 字节将会报错。对于字符串内的单引号,可以用转义字符反斜线加单引号来表示,即 `\’`。 | +| 7 | SMALLINT | 2 | 短整型, 范围 [-32767, 32767], -32768 用于 NULL | +| 8 | TINYINT | 1 | 单字节整型,范围 [-127, 127], -128 用于 NULL | +| 9 | BOOL | 1 | 布尔型,{true, false} | +| 10 | NCHAR | 自定义 | 用于记录非 ASCII 型字符串,如中文字符。每个 nchar 字符占用 4 bytes 的存储空间。字符串两端使用单引号引用,字符串内的单引号需用转义字符 `\’`。nchar 使用时须指定字符串大小,类型为 nchar(10) 的列表示此列的字符串最多存储 10 个 nchar 字符,会固定占用 40 bytes 的空间。如果用户字符串长度超出声明长度,将会报错。 | + +**Tips**: +1. TDengine 对 SQL 语句中的英文字符不区分大小写,自动转化为小写执行。因此用户大小写敏感的字符串及密码,需要使用单引号将字符串引起来。 +2. 应避免使用 BINARY 类型来保存非 ASCII 型的字符串,会很容易导致数据乱码等错误。正确的做法是使用 NCHAR 类型来保存中文字符。 + +## 数据库管理 - **创建数据库** @@ -96,27 +98,32 @@ TDengine缺省的时间戳是毫秒精度,但通过修改配置参数enableMic ```mysql ALTER DATABASE db_name COMP 2; ``` - COMP参数是指修改数据库文件压缩标志位,取值范围为[0, 2]. 0表示不压缩,1表示一阶段压缩,2表示两阶段压缩。 + COMP 参数是指修改数据库文件压缩标志位,缺省值为 2,取值范围为 [0, 2]。0 表示不压缩,1 表示一阶段压缩,2 表示两阶段压缩。 ```mysql ALTER DATABASE db_name REPLICA 2; ``` - REPLICA参数是指修改数据库副本数,取值范围[1, 3]。在集群中使用,副本数必须小于或等于dnode的数目。 + REPLICA 参数是指修改数据库副本数,取值范围 [1, 3]。在集群中使用,副本数必须小于或等于 DNODE 的数目。 ```mysql ALTER DATABASE db_name KEEP 365; ``` - KEEP参数是指修改数据文件保存的天数,缺省值为3650,取值范围[days, 365000],必须大于或等于days参数值。 + KEEP 参数是指修改数据文件保存的天数,缺省值为 3650,取值范围 [days, 365000],必须大于或等于 days 参数值。 ```mysql ALTER DATABASE db_name QUORUM 2; ``` - QUORUM参数是指数据写入成功所需要的确认数。取值范围[1, 3]。对于异步复制,quorum设为1,具有master角色的虚拟节点自己确认即可。对于同步复制,需要至少大于等于2。原则上,Quorum >=1 并且 Quorum <= replica(副本数),这个参数在启动一个同步模块实例时需要提供。 + QUORUM 参数是指数据写入成功所需要的确认数,取值范围 [1, 3]。对于异步复制,quorum 设为 1,具有 master 角色的虚拟节点自己确认即可。对于同步复制,需要至少大于等于 2。原则上,Quorum >= 1 并且 Quorum <= replica(副本数),这个参数在启动一个同步模块实例时需要提供。 ```mysql ALTER DATABASE db_name BLOCKS 100; ``` - BLOCKS参数是每个VNODE (TSDB) 中有多少cache大小的内存块,因此一个VNODE的用的内存大小粗略为(cache * blocks)。取值范围[3, 1000]。 + BLOCKS 参数是每个 VNODE (TSDB) 中有多少 cache 大小的内存块,因此一个 VNODE 的用的内存大小粗略为(cache * blocks)。取值范围 [3, 1000]。 + + ```mysql + ALTER DATABASE db_name CACHELAST 0; + ``` + CACHELAST 参数控制是否在内存中缓存数据子表的 last_row。缺省值为 0,取值范围 [0, 1]。其中 0 表示不启用、1 表示启用。(从 2.0.11 版本开始支持) **Tips**: 以上所有参数修改后都可以用show databases来确认是否修改成功。 @@ -126,7 +133,8 @@ TDengine缺省的时间戳是毫秒精度,但通过修改配置参数enableMic SHOW DATABASES; ``` -## 表管理 +## 表管理 + - **创建数据表** ```mysql @@ -151,6 +159,14 @@ TDengine缺省的时间戳是毫秒精度,但通过修改配置参数enableMic ``` 以指定的超级表为模板,指定 tags 的值来创建数据表。 +- **以超级表为模板创建数据表,并指定具体的 tags 列** + + ```mysql + CREATE TABLE [IF NOT EXISTS] tb_name USING stb_name (tag_name1, ...) TAGS (tag_value1, ...); + ``` + 以指定的超级表为模板,指定一部分 tags 列的值来创建数据表。(没被指定的 tags 列会设为空值。) + 说明:从 2.0.17 版本开始支持这种方式。在之前的版本中,不允许指定 tags 列,而必须显式给出所有 tags 列的取值。 + - **批量创建数据表** ```mysql @@ -212,7 +228,7 @@ TDengine缺省的时间戳是毫秒精度,但通过修改配置参数enableMic ``` 如果表是通过[超级表](../super-table/)创建,更改表结构的操作只能对超级表进行。同时针对超级表的结构更改对所有通过该结构创建的表生效。对于不是通过超级表创建的表,可以直接修改表结构 -## 超级表STable管理 +## 超级表STable管理 注意:在 2.0.15 以前的版本中,并不支持 STABLE 保留字,而是写作 TABLE。也即,在本节后文的指令说明中,CREATE、DROP、ALTER 三个指令在老版本中保留字需写作 TABLE 而不是 STABLE。 @@ -265,7 +281,8 @@ TDengine缺省的时间戳是毫秒精度,但通过修改配置参数enableMic ALTER STABLE stb_name DROP COLUMN field_name; ``` -## 超级表 STable 中 TAG 管理 +## 超级表 STable 中 TAG 管理 + - **添加标签** ```mysql @@ -294,7 +311,7 @@ TDengine缺省的时间戳是毫秒精度,但通过修改配置参数enableMic ``` 说明:除了更新标签的值的操作是针对子表进行,其他所有的标签操作(添加标签、删除标签等)均只能作用于 STable,不能对单个子表操作。对 STable 添加标签以后,依托于该 STable 建立的所有表将自动增加了一个标签,所有新增标签的默认值都是 NULL。 -## 数据写入 +## 数据写入 - **插入一条记录** ```mysql @@ -304,7 +321,7 @@ TDengine缺省的时间戳是毫秒精度,但通过修改配置参数enableMic - **插入一条记录,数据对应到指定的列** ```mysql - INSERT INTO tb_name (field1_name, ...) VALUES (field1_value, ...) + INSERT INTO tb_name (field1_name, ...) VALUES (field1_value1, ...); ``` 向表tb_name中插入一条记录,数据对应到指定的列。SQL语句中没有出现的列,数据库将自动填充为NULL。主键(时间戳)不能为NULL。 @@ -334,29 +351,21 @@ TDengine缺省的时间戳是毫秒精度,但通过修改配置参数enableMic ``` 同时向表tb1_name和tb2_name中按列分别插入多条记录 - 注意: - 1) 如果时间戳为0,系统将自动使用服务器当前时间作为该记录的时间戳; - 2) 允许插入的最老记录的时间戳,是相对于当前服务器时间,减去配置的keep值(数据保留的天数),允许插入的最新记录的时间戳,是相对于当前服务器时间,加上配置的days值(数据文件存储数据的时间跨度,单位为天)。keep和days都是可以在创建数据库时指定的,缺省值分别是3650天和10天。 + 注意:允许插入的最老记录的时间戳,是相对于当前服务器时间,减去配置的keep值(数据保留的天数),允许插入的最新记录的时间戳,是相对于当前服务器时间,加上配置的days值(数据文件存储数据的时间跨度,单位为天)。keep和days都是可以在创建数据库时指定的,缺省值分别是3650天和10天。 -**历史记录写入**:可使用IMPORT或者INSERT命令,IMPORT的语法,功能与INSERT完全一样。 - -## 数据查询 +- **插入记录时自动建表** + ```mysql + INSERT INTO tb_name USING stb_name TAGS (tag_value1, ...) VALUES (field_value1, ...); + ``` + 如果用户在写数据时并不确定某个表是否存在,此时可以在写入数据时使用自动建表语法来创建不存在的表,若该表已存在则不会建立新表。自动建表时,要求必须以超级表为模板,并写明数据表的 tags 取值。 -### 查询语法: +- **插入记录时自动建表,并指定具体的 tags 列** + ```mysql + INSERT INTO tb_name USING stb_name (tag_name1, ...) TAGS (tag_value1, ...) VALUES (field_value1, ...); + ``` + 在自动建表时,可以只是指定部分 tags 列的取值,未被指定的 tags 列将取为空值。 -```mysql -SELECT select_expr [, select_expr ...] - FROM {tb_name_list} - [WHERE where_condition] - [INTERVAL (interval_val [, interval_offset])] - [FILL fill_val] - [SLIDING fill_val] - [GROUP BY col_list] - [ORDER BY col_list { DESC | ASC }] - [SLIMIT limit_val [, SOFFSET offset_val]] - [LIMIT limit_val [, OFFSET offset_val]] - [>> export_file] -``` +**历史记录写入**:可使用IMPORT或者INSERT命令,IMPORT的语法,功能与INSERT完全一样。 说明:针对 insert 类型的 SQL 语句,我们采用的流式解析策略,在发现后面的错误之前,前面正确的部分SQL仍会执行。下面的sql中,insert语句是无效的,但是d1001仍会被创建。 @@ -384,10 +393,30 @@ taos> SHOW TABLES; Query OK, 1 row(s) in set (0.001091s) ``` +## 数据查询 + +### 查询语法: + +```mysql +SELECT select_expr [, select_expr ...] + FROM {tb_name_list} + [WHERE where_condition] + [INTERVAL (interval_val [, interval_offset])] + [FILL fill_val] + [SLIDING fill_val] + [GROUP BY col_list] + [ORDER BY col_list { DESC | ASC }] + [SLIMIT limit_val [, SOFFSET offset_val]] + [LIMIT limit_val [, OFFSET offset_val]] + [>> export_file]; +``` + #### SELECT子句 + 一个选择子句可以是联合查询(UNION)和另一个查询的子查询(SUBQUERY)。 ##### 通配符 + 通配符 * 可以用于代指全部列。对于普通表,结果中只有普通列。 ```mysql taos> SELECT * FROM d1001; @@ -496,6 +525,7 @@ Query OK, 3 row(s) in set (0.001191s) 但是针对```first(*)```、```last(*)```、```last_row(*)```不支持针对单列的重命名。 #### 隐式结果列 + ```Select_exprs```可以是表所属列的列名,也可以是基于列的函数表达式或计算式,数量的上限256个。当用户使用了```interval```或```group by tags```的子句以后,在最后返回结果中会强制返回时间戳列(第一列)和group by子句中的标签列。后续的版本中可以支持关闭group by子句中隐式列的输出,列输出完全由select子句控制。 #### 表(超级表)列表 @@ -510,6 +540,7 @@ SELECT * FROM d1001; ``` #### 特殊功能 + 部分特殊的查询功能可以不使用FROM子句执行。获取当前所在的数据库 database() ```mysql taos> SELECT DATABASE(); @@ -554,12 +585,14 @@ taos> SELECT SERVER_STATUS() AS status; 1 | Query OK, 1 row(s) in set (0.000081s) ``` + #### TAOS SQL中特殊关键词 > TBNAME: 在超级表查询中可视为一个特殊的标签,代表查询涉及的子表名
\_c0: 表示表(超级表)的第一列 #### 小技巧 + 获取一个超级表所有的子表名及相关的标签信息: ```mysql SELECT TBNAME, location FROM meters; @@ -640,7 +673,7 @@ Query OK, 1 row(s) in set (0.001091s) SELECT COUNT(*) FROM tb1 WHERE ts >= NOW - 10m AND col2 > 3.14 >> /home/testoutpu.csv; ``` -## SQL 函数 +## SQL 函数 ### 聚合函数 @@ -1119,7 +1152,8 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数 Query OK, 3 row(s) in set (0.001046s) ``` -## 时间维度聚合 +## 时间维度聚合 + TDengine支持按时间段进行聚合,可以将表中数据按照时间段进行切割后聚合生成结果,比如温度传感器每秒采集一次数据,但需查询每隔10分钟的温度平均值。这个聚合适合于降维(down sample)操作, 语法如下: ```mysql @@ -1138,10 +1172,10 @@ SELECT function_list FROM stb_name - 聚合时间段的长度由关键词INTERVAL指定,最短时间间隔10毫秒(10a),并且支持偏移(偏移必须小于间隔)。聚合查询中,能够同时执行的聚合和选择函数仅限于单个输出的函数:count、avg、sum 、stddev、leastsquares、percentile、min、max、first、last,不能使用具有多行输出结果的函数(例如:top、bottom、diff以及四则运算)。 - WHERE语句可以指定查询的起止时间和其他过滤条件 - FILL语句指定某一时间区间数据缺失的情况下的填充模式。填充模式包括以下几种: - 1. 不进行填充:NONE(默认填充模式)。 - 2. VALUE填充:固定值填充,此时需要指定填充的数值。例如:fill(value, 1.23)。 - 3. NULL填充:使用NULL填充数据。例如:fill(null)。 - 4. PREV填充:使用前一个非NULL值填充数据。例如:fill(prev)。 + * 不进行填充:NONE(默认填充模式)。 + * VALUE填充:固定值填充,此时需要指定填充的数值。例如:fill(value, 1.23)。 + * NULL填充:使用NULL填充数据。例如:fill(null)。 + * PREV填充:使用前一个非NULL值填充数据。例如:fill(prev)。 说明: 1. 使用FILL语句的时候可能生成大量的填充输出,务必指定查询的时间区间。针对每次查询,系统可返回不超过1千万条具有插值的结果。 @@ -1164,7 +1198,8 @@ SELECT AVG(current), MAX(current), LEASTSQUARES(current, start_val, step_val), P FILL(PREV); ``` -## TAOS SQL 边界限制 +## TAOS SQL 边界限制 + - 数据库名最大长度为32 - 表名最大长度为192,每行数据最大长度16k个字符 - 列名最大长度为64,最多允许1024列,最少需要2列,第一列必须是时间戳 diff --git a/documentation20/webdocs/markdowndocs/faq-ch.md b/documentation20/cn/13.faq/docs.md similarity index 81% rename from documentation20/webdocs/markdowndocs/faq-ch.md rename to documentation20/cn/13.faq/docs.md index cd6f0ae08caf19340b6cef9a9428abcb66c97dc6..4e72cbb21af95af034002fade0ce1b50cd8ccbc4 100644 --- a/documentation20/webdocs/markdowndocs/faq-ch.md +++ b/documentation20/cn/13.faq/docs.md @@ -6,7 +6,7 @@ 1. /var/log/taos (如果没有修改过默认路径) 2. /etc/taos -附上必要的问题描述,包括使用的 TDengine 版本信息、平台环境信息、发生该问题的执行操作、出现问题的表征及大概的时间,在 GitHub提交Issue。 +附上必要的问题描述,包括使用的 TDengine 版本信息、平台环境信息、发生该问题的执行操作、出现问题的表征及大概的时间,在 [GitHub](https://github.com/taosdata/TDengine) 提交Issue。 为了保证有足够的debug信息,如果问题能够重复,请修改/etc/taos/taos.cfg文件,最后面添加一行“debugFlag 135"(不带引号本身),然后重启taosd, 重复问题,然后再递交。也可以通过如下SQL语句,临时设置taosd的日志级别。 ``` @@ -25,13 +25,16 @@ 5. 如果数据需要迁移数据或者数据文件损坏,请联系涛思数据官方技术支持团队,进行协助解决 ## 2. Windows平台下JDBCDriver找不到动态链接库,怎么办? -请看为此问题撰写的技术博客 + +请看为此问题撰写的[技术博客](https://www.taosdata.com/blog/2019/12/03/jdbcdriver找不到动态链接库/) ## 3. 创建数据表时提示more dnodes are needed -请看为此问题撰写的技术博客 + +请看为此问题撰写的[技术博客](https://www.taosdata.com/blog/2019/12/03/创建数据表时提示more-dnodes-are-needed/) ## 4. 如何让TDengine crash时生成core文件? -请看为此问题撰写的技术博客 + +请看为此问题撰写的[技术博客](https://www.taosdata.com/blog/2019/12/06/tdengine-crash时生成core文件的方法/) ## 5. 遇到错误"Unable to establish connection", 我怎么办? @@ -46,7 +49,7 @@ 3. 在服务器,执行 `systemctl status taosd` 检查*taosd*运行状态。如果没有运行,启动*taosd* -4. 确认客户端连接时指定了正确的服务器FQDN (Fully Qualified Domain Name(可在服务器上执行Linux命令hostname -f获得)),FQDN配置参考:一篇文章说清楚TDengine的FQDN。 +4. 确认客户端连接时指定了正确的服务器FQDN (Fully Qualified Domain Name(可在服务器上执行Linux命令hostname -f获得)),FQDN配置参考:[一篇文章说清楚TDengine的FQDN](https://www.taosdata.com/blog/2020/09/11/1824.html)。 5. ping服务器FQDN,如果没有反应,请检查你的网络,DNS设置,或客户端所在计算机的系统hosts文件 @@ -65,12 +68,12 @@ * Windows 系统请使用 PowerShell 命令 Net-TestConnection -ComputerName {fqdn} -Port {port} 检测服务段端口是否访问 -10. 也可以使用taos程序内嵌的网络连通检测功能,来验证服务器和客户端之间指定的端口连接是否通畅(包括TCP和UDP):TDengine 内嵌网络检测工具使用指南。 +10. 也可以使用taos程序内嵌的网络连通检测功能,来验证服务器和客户端之间指定的端口连接是否通畅(包括TCP和UDP):[TDengine 内嵌网络检测工具使用指南](https://www.taosdata.com/blog/2020/09/08/1816.html)。 ## 6. 遇到错误“Unexpected generic error in RPC”或者"TDengine Error: Unable to resolve FQDN", 我怎么办? 产生这个错误,是由于客户端或数据节点无法解析FQDN(Fully Qualified Domain Name)导致。对于TAOS Shell或客户端应用,请做如下检查: -1. 请检查连接的服务器的FQDN是否正确,FQDN配置参考:一篇文章说清楚TDengine的FQDN。 +1. 请检查连接的服务器的FQDN是否正确,FQDN配置参考:[一篇文章说清楚TDengine的FQDN](https://www.taosdata.com/blog/2020/09/11/1824.html)。 2. 如果网络配置有DNS server, 请检查是否正常工作 3. 如果网络没有配置DNS server, 请检查客户端所在机器的hosts文件,查看该FQDN是否配置,并是否有正确的IP地址。 4. 如果网络配置OK,从客户端所在机器,你需要能Ping该连接的FQDN,否则客户端是无法连接服务器的 @@ -106,10 +109,9 @@ Properties properties = new Properties(); properties.setProperty(TSDBDriver.LOCALE_KEY, "UTF-8"); Connection = DriverManager.getConnection(url, properties); ``` -## 12.TDengine GO windows驱动的如何编译? -请看为此问题撰写的技术博客 -## 13.JDBC报错: the excuted SQL is not a DML or a DDL? +## 12.JDBC报错: the excuted SQL is not a DML or a DDL? + 请更新至最新的JDBC驱动 ```JAVA @@ -118,15 +120,16 @@ Connection = DriverManager.getConnection(url, properties); 2.0.4 ``` -## 14. taos connect failed, reason: invalid timestamp + +## 13. taos connect failed, reason: invalid timestamp 常见原因是服务器和客户端时间没有校准,可以通过和时间服务器同步的方式(Linux 下使用 ntpdate 命令,Windows 在系统时间设置中选择自动同步)校准。 -## 15. 表名显示不全 +## 14. 表名显示不全 由于 taos shell 在终端中显示宽度有限,有可能比较长的表名显示不全,如果按照显示的不全的表名进行相关操作会发生 Table does not exist 错误。解决方法可以是通过修改 taos.cfg 文件中的设置项 maxBinaryDisplayWidth, 或者直接输入命令 set max_binary_display_width 100。或者在命令结尾使用 \G 参数来调整结果的显示方式。 -## 16. 如何进行数据迁移? +## 15. 如何进行数据迁移? TDengine是根据hostname唯一标志一台机器的,在数据文件从机器A移动机器B时,注意如下两件事: @@ -134,3 +137,20 @@ TDengine是根据hostname唯一标志一台机器的,在数据文件从机器A - 2.0.7.0 及以后的版本,到/var/lib/taos/dnode下,修复dnodeEps.json的dnodeId对应的FQDN,重启。确保机器内所有机器的此文件是完全相同的。 - 1.x 和 2.x 版本的存储结构不兼容,需要使用迁移工具或者自己开发应用导出导入数据。 +## 16. 如何在命令行程序 taos 中临时调整日志级别 + +为了调试方便,从 2.0.16 版本开始,命令行程序 taos 新增了与日志记录相关的两条指令: + +```mysql +ALTER LOCAL flag_name flag_value; +``` + +其含义是,在当前的命令行程序下,修改一个特定模块的日志记录级别(只对当前命令行程序有效,如果 taos 命令行程序重启,则需要重新设置): +- flag_name 的取值可以是:debugFlag,cDebugFlag,tmrDebugFlag,uDebugFlag,rpcDebugFlag +- flag_value 的取值可以是:131(输出错误和警告日志),135( 输出错误、警告和调试日志),143( 输出错误、警告、调试和跟踪日志) + +```mysql +ALTER LOCAL RESETLOG; +``` + +其含义是,清空本机所有由客户端生成的日志文件。 diff --git a/documentation20/webdocs/assets/dnode.png b/documentation20/cn/images/architecture/dnode.png similarity index 100% rename from documentation20/webdocs/assets/dnode.png rename to documentation20/cn/images/architecture/dnode.png diff --git a/documentation20/webdocs/assets/message.png b/documentation20/cn/images/architecture/message.png similarity index 100% rename from documentation20/webdocs/assets/message.png rename to documentation20/cn/images/architecture/message.png diff --git a/documentation20/webdocs/assets/modules.png b/documentation20/cn/images/architecture/modules.png similarity index 100% rename from documentation20/webdocs/assets/modules.png rename to documentation20/cn/images/architecture/modules.png diff --git a/documentation20/cn/images/architecture/multi_tables.png b/documentation20/cn/images/architecture/multi_tables.png new file mode 100644 index 0000000000000000000000000000000000000000..0cefaab6a9a4cdd671c671f7c6186dea41415ff0 Binary files /dev/null and b/documentation20/cn/images/architecture/multi_tables.png differ diff --git a/documentation20/cn/images/architecture/replica-forward.png b/documentation20/cn/images/architecture/replica-forward.png new file mode 100644 index 0000000000000000000000000000000000000000..bf616e030b130603eceb5dccfd30b4a1dfa68ea5 Binary files /dev/null and b/documentation20/cn/images/architecture/replica-forward.png differ diff --git a/documentation20/webdocs/assets/replica-master.png b/documentation20/cn/images/architecture/replica-master.png similarity index 100% rename from documentation20/webdocs/assets/replica-master.png rename to documentation20/cn/images/architecture/replica-master.png diff --git a/documentation20/cn/images/architecture/replica-restore.png b/documentation20/cn/images/architecture/replica-restore.png new file mode 100644 index 0000000000000000000000000000000000000000..1558e5ed0108d23efdc6b5d9ea0e44a1dff45d28 Binary files /dev/null and b/documentation20/cn/images/architecture/replica-restore.png differ diff --git a/documentation20/cn/images/architecture/structure.png b/documentation20/cn/images/architecture/structure.png new file mode 100644 index 0000000000000000000000000000000000000000..4fc8f47ab0a30d95b85ba1d85105726ed981e56e Binary files /dev/null and b/documentation20/cn/images/architecture/structure.png differ diff --git a/documentation20/webdocs/assets/vnode.png b/documentation20/cn/images/architecture/vnode.png similarity index 100% rename from documentation20/webdocs/assets/vnode.png rename to documentation20/cn/images/architecture/vnode.png diff --git a/documentation20/webdocs/assets/write_master.png b/documentation20/cn/images/architecture/write_master.png similarity index 100% rename from documentation20/webdocs/assets/write_master.png rename to documentation20/cn/images/architecture/write_master.png diff --git a/documentation20/webdocs/assets/write_slave.png b/documentation20/cn/images/architecture/write_slave.png similarity index 100% rename from documentation20/webdocs/assets/write_slave.png rename to documentation20/cn/images/architecture/write_slave.png diff --git a/documentation20/cn/images/connections/add_datasource1.jpg b/documentation20/cn/images/connections/add_datasource1.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1f0f5110f312c57f3ec1788bbc02f04fac6ac142 Binary files /dev/null and b/documentation20/cn/images/connections/add_datasource1.jpg differ diff --git a/documentation20/cn/images/connections/add_datasource2.jpg b/documentation20/cn/images/connections/add_datasource2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..fa7a83e00e96fae649910dff4edf5f5bdadd7850 Binary files /dev/null and b/documentation20/cn/images/connections/add_datasource2.jpg differ diff --git a/documentation20/cn/images/connections/add_datasource3.jpg b/documentation20/cn/images/connections/add_datasource3.jpg new file mode 100644 index 0000000000000000000000000000000000000000..fc850ad08ff1174de972906842e0d5ee64e6e5cb Binary files /dev/null and b/documentation20/cn/images/connections/add_datasource3.jpg differ diff --git a/documentation20/cn/images/connections/add_datasource4.jpg b/documentation20/cn/images/connections/add_datasource4.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3ba73e50d455111f8621f4165746078554c2d790 Binary files /dev/null and b/documentation20/cn/images/connections/add_datasource4.jpg differ diff --git a/documentation20/cn/images/connections/create_dashboard1.jpg b/documentation20/cn/images/connections/create_dashboard1.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3b83c3a1714e9e7540e0b06239ef7c1c4f63fe2c Binary files /dev/null and b/documentation20/cn/images/connections/create_dashboard1.jpg differ diff --git a/documentation20/cn/images/connections/create_dashboard2.jpg b/documentation20/cn/images/connections/create_dashboard2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..fe5d768ac55254251e0290bf257178f5ff28f5a5 Binary files /dev/null and b/documentation20/cn/images/connections/create_dashboard2.jpg differ diff --git a/documentation20/cn/images/connections/import_dashboard1.jpg b/documentation20/cn/images/connections/import_dashboard1.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9d2ce7ed65eb0c2c729de50283b30491793493dc Binary files /dev/null and b/documentation20/cn/images/connections/import_dashboard1.jpg differ diff --git a/documentation20/cn/images/connections/import_dashboard2.jpg b/documentation20/cn/images/connections/import_dashboard2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..94b09f0ee39552bb84f7ba1f65815ce2c9548b2d Binary files /dev/null and b/documentation20/cn/images/connections/import_dashboard2.jpg differ diff --git a/documentation20/webdocs/assets/connector.png b/documentation20/cn/images/connector.png similarity index 100% rename from documentation20/webdocs/assets/connector.png rename to documentation20/cn/images/connector.png diff --git a/documentation20/webdocs/assets/EcoSystem.png b/documentation20/cn/images/eco_system.png similarity index 100% rename from documentation20/webdocs/assets/EcoSystem.png rename to documentation20/cn/images/eco_system.png diff --git a/documentation20/webdocs/assets/tdengine-jdbc-connector.png b/documentation20/cn/images/tdengine-jdbc-connector.png similarity index 100% rename from documentation20/webdocs/assets/tdengine-jdbc-connector.png rename to documentation20/cn/images/tdengine-jdbc-connector.png diff --git a/documentation20/webdocs/assets/replica-forward.jpg b/documentation20/webdocs/assets/replica-forward.jpg deleted file mode 100644 index 00b8c357b9ca2f067127c7dfae37b386b268f8dd..0000000000000000000000000000000000000000 Binary files a/documentation20/webdocs/assets/replica-forward.jpg and /dev/null differ diff --git a/documentation20/webdocs/assets/replica-restore.jpg b/documentation20/webdocs/assets/replica-restore.jpg deleted file mode 100644 index 6da1bed4cc90b8a119b2fddf5f66ef3ab1b0a2fd..0000000000000000000000000000000000000000 Binary files a/documentation20/webdocs/assets/replica-restore.jpg and /dev/null differ diff --git a/documentation20/webdocs/markdowndocs/Documentation-ch.md b/documentation20/webdocs/markdowndocs/Documentation-ch.md deleted file mode 100644 index 20f9bcb7307c68f1330519d8f6ed249419bf46db..0000000000000000000000000000000000000000 --- a/documentation20/webdocs/markdowndocs/Documentation-ch.md +++ /dev/null @@ -1,145 +0,0 @@ -# TDengine文档 - -TDengine是一个高效的存储、查询、分析时序大数据的平台,专为物联网、车联网、工业互联网、运维监测等优化而设计。您可以像使用关系型数据库MySQL一样来使用它,但建议您在使用前仔细阅读一遍下面的文档,特别是[数据模型](https://www.taosdata.com/cn/documentation20/data-model-and-architecture)与[数据建模](https://www.taosdata.com/cn/documentation20/model)一节。除本文档之外,欢迎[下载产品白皮书](https://www.taosdata.com/downloads/TDengine White Paper.pdf)。如需查阅TDengine 1.6 文档,请点击[这里](https://www.taosdata.com/cn/documentation16/)访问。 - -## TDengine介绍 - -- TDengine 简介及特色 -- TDengine 适用场景 - -## [立即开始](https://www.taosdata.com/cn/getting-started) - -- [快捷安装](https://www.taosdata.com/cn/documentation20/getting-started/#快捷安装):可通过源码、安装包或docker安装,三秒钟搞定 -- [轻松启动](https://www.taosdata.com/cn/documentation20/getting-started/#轻松启动):使用systemctl 启停TDengine -- [命令行程序TAOS](https://www.taosdata.com/cn/documentation20/getting-started/#TDengine命令行程序):访问TDengine的简便方式 -- [极速体验](https://www.taosdata.com/cn/documentation20/getting-started/#TDengine-极速体验):运行示例程序,快速体验高效的数据插入、查询 - -## [数据模型和整体架构](https://www.taosdata.com/cn/documentation20/architecture) - -- [数据模型](https://www.taosdata.com/cn/documentation20/architecture/#数据模型):关系型数据库模型,但要求每个采集点单独建表 -- [集群与基本逻辑单元](https://www.taosdata.com/cn/documentation20/architecture/#集群与基本逻辑单元):吸取NoSQL优点,支持水平扩展,支持高可靠 -- [存储模型与数据分区、分片](https://www.taosdata.com/cn/documentation20/architecture/#存储模型与数据分区、分片):标签数据与时序数据完全分离,按vnode和时间两个维度对数据切分 -- [数据写入与复制流程](https://www.taosdata.com/cn/documentation20/architecture/#数据写入与复制流程):先写入WAL、之后写入缓存,再给应用确认,支持多副本 -- [缓存与持久化](https://www.taosdata.com/cn/documentation20/architecture/#缓存与持久化):最新数据缓存在内存中,但落盘时采用列式存储、超高压缩比 -- [数据查询](https://www.taosdata.com/cn/documentation20/architecture/#数据查询):支持各种函数、时间轴聚合、插值、多表聚合 - -## [数据建模](https://www.taosdata.com/cn/documentation20/model) - -- [创建库](https://www.taosdata.com/cn/documentation20/model/#创建库):为具有相似数据特征的数据采集点创建一个库 -- [创建超级表](https://www.taosdata.com/cn/documentation20/model/#创建超级表):为同一类型的数据采集点创建一个超级表 -- [创建表](https://www.taosdata.com/cn/documentation20/model/#创建表):使用超级表做模板,为每一个具体的数据采集点单独建表 - -## [高效写入数据](https://www.taosdata.com/cn/documentation20/insert) - -- [SQL写入](https://www.taosdata.com/cn/documentation20/insert/#SQL写入):使用SQL insert命令向一张或多张表写入单条或多条记录 -- [Telegraf写入](https://www.taosdata.com/cn/documentation20/insert/#Telegraf直接写入):配置Telegraf, 不用任何代码,将采集数据直接写入 -- [Prometheus写入](https://www.taosdata.com/cn/documentation20/insert/#Prometheus直接写入):配置Prometheus, 不用任何代码,将数据直接写入 -- [EMQ X Broker](https://www.taosdata.com/cn/documentation20/insert/#EMQ-X-Broker直接写入):配置EMQ X,不用任何代码,就可将 MQTT 数据直接写入 -- [HiveMQ Broker](https://www.taosdata.com/cn/documentation20/insert/#HiveMQ-Broker直接写入):通过 HiveMQ Extension,不用任何代码,就可将 MQTT 数据直接写入 - -## [高效查询数据](https://www.taosdata.com/cn/documentation20/queries) - -- [主要查询功能](https://www.taosdata.com/cn/documentation20/queries/#主要查询功能):支持各种标准函数,设置过滤条件,时间段查询 -- [多表聚合查询](https://www.taosdata.com/cn/documentation20/queries/#多表聚合查询):使用超级表,设置标签过滤条件,进行高效聚合查询 -- [降采样查询值](https://www.taosdata.com/cn/documentation20/queries/#降采样查询、插值):按时间段分段聚合,支持插值 - -## [高级功能](https://www.taosdata.com/cn/documentation20/advanced-features) - -- [连续查询(Continuous Query)](https://www.taosdata.com/cn/documentation20/advanced-features/#连续查询(Continuous-Query)):基于滑动窗口,定时自动的对数据流进行查询计算 -- [数据订阅(Publisher/Subscriber)](https://www.taosdata.com/cn/documentation20/advanced-features/#数据订阅(Publisher/Subscriber)):象典型的消息队列,应用可订阅接收到的最新数据 -- [缓存(Cache)](https://www.taosdata.com/cn/documentation20/advanced-features/#缓存(Cache)):每个设备最新的数据都会缓存在内存中,可快速获取 -- [报警监测](https://www.taosdata.com/cn/documentation20/advanced-features/#报警监测(Alert)):根据配置规则,自动监测超限行为数据,并主动推送 - -## [连接器](https://www.taosdata.com/cn/documentation20/connector) - -- [C/C++ Connector](https://www.taosdata.com/cn/documentation20/connector/#C/C++-Connector):通过libtaos客户端的库,连接TDengine服务器的主要方法 -- [Java Connector(JDBC)](https://www.taosdata.com/cn/documentation20/connector-java):通过标准的JDBC API,给Java应用提供到TDengine的连接 -- [Python Connector](https://www.taosdata.com/cn/documentation20/connector/#Python-Connector):给Python应用提供一个连接TDengine服务器的驱动 -- [RESTful Connector](https://www.taosdata.com/cn/documentation20/connector/#RESTful-Connector):提供一最简单的连接TDengine服务器的方式 -- [Go Connector](https://www.taosdata.com/cn/documentation20/connector/#Go-Connector):给Go应用提供一个连接TDengine服务器的驱动 -- [Node.js Connector](https://www.taosdata.com/cn/documentation20/connector/#Node.js-Connector):给node应用提供一个链接TDengine服务器的驱动 - -## [与其他工具的连接](https://www.taosdata.com/cn/documentation20/connections-with-other-tools) - -- [Grafana](https://www.taosdata.com/cn/documentation20/connections-with-other-tools/#Grafana):获取并可视化保存在TDengine的数据 -- [Matlab](https://www.taosdata.com/cn/documentation20/connections-with-other-tools/#Matlab):通过配置Matlab的JDBC数据源访问保存在TDengine的数据 -- [R](https://www.taosdata.com/cn/documentation20/connections-with-other-tools/#R):通过配置R的JDBC数据源访问保存在TDengine的数据 - -## [TDengine集群的安装、管理](https://www.taosdata.com/cn/documentation20/cluster) - -- [安装](https://www.taosdata.com/cn/documentation20/cluster/#创建第一个节点):与单节点的安装一样,但要设好配置文件里的参数first -- [节点管理](https://www.taosdata.com/cn/documentation20/cluster/#节点管理):增加、删除、查看集群的节点 -- [mnode的管理](https://www.taosdata.com/cn/documentation20/cluster/#Mnode的高可用):系统自动创建、无需任何人工干预 -- [负载均衡](https://www.taosdata.com/cn/documentation20/cluster/#负载均衡):一旦节点个数或负载有变化,自动进行 -- [节点离线处理](https://www.taosdata.com/cn/documentation20/cluster/#节点离线处理):节点离线超过一定时长,将从集群中剔除 -- [Arbitrator](https://www.taosdata.com/cn/documentation20/cluster/#Arbitrator的使用):对于偶数个副本的情形,使用它可以防止split brain - -## [TDengine的运营和维护](https://www.taosdata.com/cn/documentation20/administrator) - -- [容量规划](https://www.taosdata.com/cn/documentation20/administrator/#容量规划):根据场景,估算硬件资源 -- [容错和灾备](https://www.taosdata.com/cn/documentation20/administrator/#容错和灾备):设置正确的WAL和数据副本数 -- [系统配置](https://www.taosdata.com/cn/documentation20/administrator/#服务端配置):端口,缓存大小,文件块大小和其他系统配置 -- [用户管理](https://www.taosdata.com/cn/documentation20/administrator/#用户管理):添加、删除TDengine用户,修改用户密码 -- [数据导入](https://www.taosdata.com/cn/documentation20/administrator/#数据导入):可按脚本文件导入,也可按数据文件导入 -- [数据导出](https://www.taosdata.com/cn/documentation20/administrator/#数据导出):从shell按表导出,也可用taosdump工具做各种导出 -- [系统监控](https://www.taosdata.com/cn/documentation20/administrator/#系统监控):检查系统现有的连接、查询、流式计算,日志和事件等 -- [文件目录结构](https://www.taosdata.com/cn/documentation20/administrator/#文件目录结构):TDengine数据文件、配置文件等所在目录 -- [参数限制和保留关键字](https://www.taosdata.com/cn/documentation20/administrator/#参数限制和保留关键字):TDengine的参数限制和保留关键字列表 - -## [TAOS SQL](https://www.taosdata.com/cn/documentation20/taos-sql) - -- [支持的数据类型](https://www.taosdata.com/cn/documentation20/taos-sql/#支持的数据类型):支持时间戳、整型、浮点型、布尔型、字符型等多种数据类型 -- [数据库管理](https://www.taosdata.com/cn/documentation20/taos-sql/#数据库管理):添加、删除、查看数据库 -- [表管理](https://www.taosdata.com/cn/documentation20/taos-sql/#表管理):添加、删除、查看、修改表 -- [超级表管理](https://www.taosdata.com/cn/documentation20/taos-sql/#超级表STable管理):添加、删除、查看、修改超级表 -- [标签管理](https://www.taosdata.com/cn/documentation20/taos-sql/#超级表-STable-中-TAG-管理):增加、删除、修改标签 -- [数据写入](https://www.taosdata.com/cn/documentation20/taos-sql/#数据写入):支持单表单条、多条、多表多条写入,支持历史数据写入 -- [数据查询](https://www.taosdata.com/cn/documentation20/taos-sql/#数据查询):支持时间段、值过滤、排序、查询结果手动分页等 -- [SQL函数](https://www.taosdata.com/cn/documentation20/taos-sql/#SQL函数):支持各种聚合函数、选择函数、计算函数,如avg, min, diff等 -- [时间维度聚合](https://www.taosdata.com/cn/documentation20/taos-sql/#时间维度聚合):将表中数据按照时间段进行切割后聚合,降维处理 -- [边界线制](https://www.taosdata.com/cn/documentation20/taos-sql/#TAOS-SQL-边界限制):TAOS SQL的边界限制 -- [错误码](https://www.taosdata.com/cn/documentation20/Taos-Error-Code):TDengine 2.0 错误码以及对应的十进制码 - -## TDengine的技术设计 - -- 系统模块:taosd的功能和模块划分 -- 数据复制:支持实时同步、异步复制,保证系统的High Availibility -- [技术博客](https://www.taosdata.com/cn/blog/?categories=3):更多的技术分析和架构设计文章 - -## 常用工具 - -- [TDengine样例导入工具](https://www.taosdata.com/blog/2020/01/18/1166.html) -- [TDengine性能对比测试工具](https://www.taosdata.com/blog/2020/01/18/1166.html) - -## TDengine与其他数据库的对比测试 - -- [用InfluxDB开源的性能测试工具对比InfluxDB和TDengine](https://www.taosdata.com/blog/2020/01/13/1105.html) -- [TDengine与OpenTSDB对比测试](https://www.taosdata.com/blog/2019/08/21/621.html) -- [TDengine与Cassandra对比测试](https://www.taosdata.com/blog/2019/08/14/573.html) -- [TDengine与InfluxDB对比测试](https://www.taosdata.com/blog/2019/07/19/419.html) -- [TDengine与InfluxDB、OpenTSDB、Cassandra、MySQL、ClickHouse等数据库的对比测试报告](https://www.taosdata.com/downloads/TDengine_Testing_Report_cn.pdf) - -##物联网大数据 - -- [物联网、工业互联网大数据的特点](https://www.taosdata.com/blog/2019/07/09/105.html) -- [物联网大数据平台应具备的功能和特点](https://www.taosdata.com/blog/2019/07/29/542.html) -- [通用大数据架构为什么不适合处理物联网数据?](https://www.taosdata.com/blog/2019/07/09/107.html) -- [物联网、车联网、工业互联网大数据平台,为什么推荐使用TDengine?](https://www.taosdata.com/blog/2019/07/09/109.html) - -## [培训和FAQ](https://www.taosdata.com/cn/faq) - - - diff --git a/packaging/cfg/taos.cfg b/packaging/cfg/taos.cfg index 5ac0e39dcc134260945d5328ac8035559574f277..a1178e2eefd6c4e6a4433a451d2e4e865a39cfe6 100644 --- a/packaging/cfg/taos.cfg +++ b/packaging/cfg/taos.cfg @@ -34,7 +34,7 @@ # 1.0: all CPU cores are available for query processing [default]. # 0.5: only half of the CPU cores are available for query. # 0.0: only one core available. -# tsRatioOfQueryCores 1.0 +# ratioOfQueryCores 1.0 # the last_row/first/last aggregator will not change the original column name in the result fields # keepColumnName 0 diff --git a/packaging/deb/DEBIAN/prerm b/packaging/deb/DEBIAN/prerm index 3d57ece2adf4e8efd3338f52787d5d9424016e78..d24502a1cb8e69ddaf3989a89e51cc07dfb55f00 100644 --- a/packaging/deb/DEBIAN/prerm +++ b/packaging/deb/DEBIAN/prerm @@ -26,7 +26,6 @@ else ${csudo} rm -f ${bin_link_dir}/taos || : ${csudo} rm -f ${bin_link_dir}/taosd || : ${csudo} rm -f ${bin_link_dir}/taosdemo || : - ${csudo} rm -f ${bin_link_dir}/taosdemox || : ${csudo} rm -f ${bin_link_dir}/taosdump || : ${csudo} rm -f ${cfg_link_dir}/* || : ${csudo} rm -f ${inc_link_dir}/taos.h || : diff --git a/packaging/deb/makedeb.sh b/packaging/deb/makedeb.sh index 850c636940a34e5a2b58c227e1fd105fb57fa285..36870b2ebe49d45390d7b5ce18f6984b9e8e2ac2 100755 --- a/packaging/deb/makedeb.sh +++ b/packaging/deb/makedeb.sh @@ -51,7 +51,6 @@ cp ${compile_dir}/../packaging/tools/startPre.sh ${pkg_dir}${install_home_pat cp ${compile_dir}/../packaging/tools/set_core.sh ${pkg_dir}${install_home_path}/bin cp ${compile_dir}/../packaging/tools/taosd-dump-cfg.gdb ${pkg_dir}${install_home_path}/bin cp ${compile_dir}/build/bin/taosdemo ${pkg_dir}${install_home_path}/bin -cp ${compile_dir}/build/bin/taosdemox ${pkg_dir}${install_home_path}/bin cp ${compile_dir}/build/bin/taosdump ${pkg_dir}${install_home_path}/bin cp ${compile_dir}/build/bin/taosd ${pkg_dir}${install_home_path}/bin cp ${compile_dir}/build/bin/taos ${pkg_dir}${install_home_path}/bin diff --git a/packaging/docker/dockerManifest.sh b/packaging/docker/dockerManifest.sh index ca2c3c66c9875a697b372f73448aa53deb887f68..b52580cfa6bdc13fcc7b716a85029fd1c679a6b6 100755 --- a/packaging/docker/dockerManifest.sh +++ b/packaging/docker/dockerManifest.sh @@ -35,10 +35,11 @@ done echo "verNumber=${verNumber}" -docker manifest create -a tdengine/tdengine:${verNumber} tdengine/tdengine-amd64:${verNumber} tdengine/tdengine-aarch64:${verNumber} tdengine/tdengine-aarch32:${verNumber} +#docker manifest create -a tdengine/tdengine:${verNumber} tdengine/tdengine-amd64:${verNumber} tdengine/tdengine-aarch64:${verNumber} tdengine/tdengine-aarch32:${verNumber} +docker manifest create -a tdengine/tdengine tdengine/tdengine-amd64:latest tdengine/tdengine-aarch64:latest tdengine/tdengine-aarch32:latest docker login -u tdengine -p ${passWord} #replace the docker registry username and password -docker manifest push tdengine/tdengine:${verNumber} +docker manifest push tdengine/tdengine # how set latest version ??? diff --git a/packaging/rpm/tdengine.spec b/packaging/rpm/tdengine.spec index d20a6c91cdd78a8c54b89b14d7b77807ebef4877..92c917cb3d6d4cd5f41441c9ca75a742aa3641b6 100644 --- a/packaging/rpm/tdengine.spec +++ b/packaging/rpm/tdengine.spec @@ -9,6 +9,7 @@ Summary: tdengine from taosdata Group: Application/Database License: AGPL URL: www.taosdata.com +AutoReqProv: no #BuildRoot: %_topdir/BUILDROOT BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root @@ -61,7 +62,6 @@ cp %{_compiledir}/../packaging/tools/taosd-dump-cfg.gdb %{buildroot}%{homepat cp %{_compiledir}/build/bin/taos %{buildroot}%{homepath}/bin cp %{_compiledir}/build/bin/taosd %{buildroot}%{homepath}/bin cp %{_compiledir}/build/bin/taosdemo %{buildroot}%{homepath}/bin -cp %{_compiledir}/build/bin/taosdemox %{buildroot}%{homepath}/bin cp %{_compiledir}/build/bin/taosdump %{buildroot}%{homepath}/bin cp %{_compiledir}/build/lib/${libfile} %{buildroot}%{homepath}/driver cp %{_compiledir}/../src/inc/taos.h %{buildroot}%{homepath}/include @@ -139,7 +139,6 @@ if [ $1 -eq 0 ];then ${csudo} rm -f ${bin_link_dir}/taos || : ${csudo} rm -f ${bin_link_dir}/taosd || : ${csudo} rm -f ${bin_link_dir}/taosdemo || : - ${csudo} rm -f ${bin_link_dir}/taosdemox || : ${csudo} rm -f ${bin_link_dir}/taosdump || : ${csudo} rm -f ${cfg_link_dir}/* || : ${csudo} rm -f ${inc_link_dir}/taos.h || : diff --git a/packaging/tools/install.sh b/packaging/tools/install.sh index 9cec9963af6f8abdcaedeeb4e7d40f9244508be2..dca3dd2ff623672eb85b3de72bcc34e0ea5e3d8a 100755 --- a/packaging/tools/install.sh +++ b/packaging/tools/install.sh @@ -147,8 +147,8 @@ done #echo "verType=${verType} interactiveFqdn=${interactiveFqdn}" -function kill_taosd() { - pid=$(ps -ef | grep "taosd" | grep -v "grep" | awk '{print $2}') +function kill_process() { + pid=$(ps -ef | grep "$1" | grep -v "grep" | awk '{print $2}') if [ -n "$pid" ]; then ${csudo} kill -9 $pid || : fi @@ -168,7 +168,10 @@ function install_main_path() { if [ "$verMode" == "cluster" ]; then ${csudo} mkdir -p ${nginx_dir} fi - ${csudo} cp ${script_dir}/email ${install_main_dir}/ ||: + + if [[ -e ${script_dir}/email ]]; then + ${csudo} cp ${script_dir}/email ${install_main_dir}/ ||: + fi } function install_bin() { @@ -176,7 +179,6 @@ function install_bin() { ${csudo} rm -f ${bin_link_dir}/taos || : ${csudo} rm -f ${bin_link_dir}/taosd || : ${csudo} rm -f ${bin_link_dir}/taosdemo || : - ${csudo} rm -f ${bin_link_dir}/taosdemox || : ${csudo} rm -f ${bin_link_dir}/taosdump || : ${csudo} rm -f ${bin_link_dir}/rmtaos || : ${csudo} rm -f ${bin_link_dir}/tarbitrator || : @@ -188,7 +190,6 @@ function install_bin() { [ -x ${install_main_dir}/bin/taos ] && ${csudo} ln -s ${install_main_dir}/bin/taos ${bin_link_dir}/taos || : [ -x ${install_main_dir}/bin/taosd ] && ${csudo} ln -s ${install_main_dir}/bin/taosd ${bin_link_dir}/taosd || : [ -x ${install_main_dir}/bin/taosdemo ] && ${csudo} ln -s ${install_main_dir}/bin/taosdemo ${bin_link_dir}/taosdemo || : - [ -x ${install_main_dir}/bin/taosdemox ] && ${csudo} ln -s ${install_main_dir}/bin/taosdemox ${bin_link_dir}/taosdemox || : [ -x ${install_main_dir}/bin/taosdump ] && ${csudo} ln -s ${install_main_dir}/bin/taosdump ${bin_link_dir}/taosdump || : [ -x ${install_main_dir}/bin/remove.sh ] && ${csudo} ln -s ${install_main_dir}/bin/remove.sh ${bin_link_dir}/rmtaos || : [ -x ${install_main_dir}/bin/set_core.sh ] && ${csudo} ln -s ${install_main_dir}/bin/set_core.sh ${bin_link_dir}/set_core || : @@ -680,7 +681,7 @@ function install_service() { install_service_on_sysvinit else # must manual stop taosd - kill_taosd + kill_process taosd fi } @@ -749,9 +750,22 @@ function update_TDengine() { elif ((${service_mod}==1)); then ${csudo} service taosd stop || : else - kill_taosd + kill_process taosd + fi + sleep 1 + fi + + if [ "$verMode" == "cluster" ]; then + if pidof nginx &> /dev/null; then + if ((${service_mod}==0)); then + ${csudo} systemctl stop nginxd || : + elif ((${service_mod}==1)); then + ${csudo} service nginxd stop || : + else + kill_process nginx fi sleep 1 + fi fi install_main_path diff --git a/packaging/tools/install_client.sh b/packaging/tools/install_client.sh index d52428dc83805b6f03a36bb39edc774e79d04398..0a0a6633e376d084532abb5f490917abd1a173f2 100755 --- a/packaging/tools/install_client.sh +++ b/packaging/tools/install_client.sh @@ -86,7 +86,6 @@ function install_bin() { ${csudo} rm -f ${bin_link_dir}/taos || : if [ "$osType" != "Darwin" ]; then ${csudo} rm -f ${bin_link_dir}/taosdemo || : - ${csudo} rm -f ${bin_link_dir}/taosdemox || : ${csudo} rm -f ${bin_link_dir}/taosdump || : fi ${csudo} rm -f ${bin_link_dir}/rmtaos || : @@ -98,7 +97,6 @@ function install_bin() { [ -x ${install_main_dir}/bin/taos ] && ${csudo} ln -s ${install_main_dir}/bin/taos ${bin_link_dir}/taos || : if [ "$osType" != "Darwin" ]; then [ -x ${install_main_dir}/bin/taosdemo ] && ${csudo} ln -s ${install_main_dir}/bin/taosdemo ${bin_link_dir}/taosdemo || : - [ -x ${install_main_dir}/bin/taosdemox ] && ${csudo} ln -s ${install_main_dir}/bin/taosdemox ${bin_link_dir}/taosdemox || : [ -x ${install_main_dir}/bin/taosdump ] && ${csudo} ln -s ${install_main_dir}/bin/taosdump ${bin_link_dir}/taosdump || : fi [ -x ${install_main_dir}/bin/remove_client.sh ] && ${csudo} ln -s ${install_main_dir}/bin/remove_client.sh ${bin_link_dir}/rmtaos || : diff --git a/packaging/tools/install_client_power.sh b/packaging/tools/install_client_power.sh index 04fd23d5abb2874d0b3a68b937be529bd2c91a59..8d7463366ff46bcae2822ee3e76dbc9b588f2a89 100755 --- a/packaging/tools/install_client_power.sh +++ b/packaging/tools/install_client_power.sh @@ -86,7 +86,6 @@ function install_bin() { ${csudo} rm -f ${bin_link_dir}/power || : if [ "$osType" != "Darwin" ]; then ${csudo} rm -f ${bin_link_dir}/powerdemo || : - ${csudo} rm -f ${bin_link_dir}/powerdemox || : ${csudo} rm -f ${bin_link_dir}/powerdump || : fi ${csudo} rm -f ${bin_link_dir}/rmpower || : @@ -98,7 +97,6 @@ function install_bin() { [ -x ${install_main_dir}/bin/power ] && ${csudo} ln -s ${install_main_dir}/bin/power ${bin_link_dir}/power || : if [ "$osType" != "Darwin" ]; then [ -x ${install_main_dir}/bin/powerdemo ] && ${csudo} ln -s ${install_main_dir}/bin/powerdemo ${bin_link_dir}/powerdemo || : - [ -x ${install_main_dir}/bin/powerdemox ] && ${csudo} ln -s ${install_main_dir}/bin/powerdemox ${bin_link_dir}/powerdemox || : [ -x ${install_main_dir}/bin/powerdump ] && ${csudo} ln -s ${install_main_dir}/bin/powerdump ${bin_link_dir}/powerdump || : fi [ -x ${install_main_dir}/bin/remove_client_power.sh ] && ${csudo} ln -s ${install_main_dir}/bin/remove_client_power.sh ${bin_link_dir}/rmpower || : diff --git a/packaging/tools/install_power.sh b/packaging/tools/install_power.sh index 89b5ce5b4f1f9edc91b7abbf459303cd8f632edf..ba6ace400935c10caadd9426c0701c16b4f86baa 100755 --- a/packaging/tools/install_power.sh +++ b/packaging/tools/install_power.sh @@ -146,8 +146,8 @@ done #echo "verType=${verType} interactiveFqdn=${interactiveFqdn}" -function kill_powerd() { - pid=$(ps -ef | grep "powerd" | grep -v "grep" | awk '{print $2}') +function kill_process() { + pid=$(ps -ef | grep "$1" | grep -v "grep" | awk '{print $2}') if [ -n "$pid" ]; then ${csudo} kill -9 $pid || : fi @@ -174,7 +174,6 @@ function install_bin() { ${csudo} rm -f ${bin_link_dir}/power || : ${csudo} rm -f ${bin_link_dir}/powerd || : ${csudo} rm -f ${bin_link_dir}/powerdemo || : - ${csudo} rm -f ${bin_link_dir}/powerdemox || : ${csudo} rm -f ${bin_link_dir}/rmpower || : ${csudo} rm -f ${bin_link_dir}/tarbitrator || : ${csudo} rm -f ${bin_link_dir}/set_core || : @@ -185,7 +184,6 @@ function install_bin() { [ -x ${install_main_dir}/bin/power ] && ${csudo} ln -s ${install_main_dir}/bin/power ${bin_link_dir}/power || : [ -x ${install_main_dir}/bin/powerd ] && ${csudo} ln -s ${install_main_dir}/bin/powerd ${bin_link_dir}/powerd || : [ -x ${install_main_dir}/bin/powerdemo ] && ${csudo} ln -s ${install_main_dir}/bin/powerdemo ${bin_link_dir}/powerdemo || : - [ -x ${install_main_dir}/bin/powerdemox ] && ${csudo} ln -s ${install_main_dir}/bin/powerdemox ${bin_link_dir}/powerdemox || : [ -x ${install_main_dir}/bin/remove_power.sh ] && ${csudo} ln -s ${install_main_dir}/bin/remove_power.sh ${bin_link_dir}/rmpower || : [ -x ${install_main_dir}/bin/set_core.sh ] && ${csudo} ln -s ${install_main_dir}/bin/set_core.sh ${bin_link_dir}/set_core || : [ -x ${install_main_dir}/bin/tarbitrator ] && ${csudo} ln -s ${install_main_dir}/bin/tarbitrator ${bin_link_dir}/tarbitrator || : @@ -652,7 +650,7 @@ function install_service() { install_service_on_sysvinit else # must manual stop powerd - kill_powerd + kill_process powerd fi } @@ -721,9 +719,21 @@ function update_PowerDB() { elif ((${service_mod}==1)); then ${csudo} service powerd stop || : else - kill_powerd + kill_process powerd fi sleep 1 + fi + if [ "$verMode" == "cluster" ]; then + if pidof nginx &> /dev/null; then + if ((${service_mod}==0)); then + ${csudo} systemctl stop nginxd || : + elif ((${service_mod}==1)); then + ${csudo} service nginxd stop || : + else + kill_process nginx + fi + sleep 1 + fi fi install_main_path diff --git a/packaging/tools/makeclient.sh b/packaging/tools/makeclient.sh index 52a4e059065ba666e9624800a24f267f74e454f9..30e9fa51a7d9c4a98d2c8f300287ebd242fecd74 100755 --- a/packaging/tools/makeclient.sh +++ b/packaging/tools/makeclient.sh @@ -45,7 +45,7 @@ if [ "$osType" != "Darwin" ]; then strip ${build_dir}/bin/taos bin_files="${build_dir}/bin/taos ${script_dir}/remove_client.sh" else - bin_files="${build_dir}/bin/taos ${build_dir}/bin/taosdump ${build_dir}/bin/taosdemo ${build_dir}/bin/taosdemox\ + bin_files="${build_dir}/bin/taos ${build_dir}/bin/taosdump ${build_dir}/bin/taosdemo \ ${script_dir}/remove_client.sh ${script_dir}/set_core.sh ${script_dir}/get_client.sh ${script_dir}/taosd-dump-cfg.gdb" fi lib_files="${build_dir}/lib/libtaos.so.${version}" @@ -55,7 +55,11 @@ else fi header_files="${code_dir}/inc/taos.h ${code_dir}/inc/taoserror.h" -cfg_dir="${top_dir}/packaging/cfg" +if [ "$verMode" == "cluster" ]; then + cfg_dir="${top_dir}/../enterprise/packaging/cfg" +else + cfg_dir="${top_dir}/packaging/cfg" +fi install_files="${script_dir}/install_client.sh" diff --git a/packaging/tools/makeclient_power.sh b/packaging/tools/makeclient_power.sh index 15f8994e945aac89fc30b1fe7eaaf3c72ea8c105..181536b7f19d252164201291d9c37cade6cf3490 100755 --- a/packaging/tools/makeclient_power.sh +++ b/packaging/tools/makeclient_power.sh @@ -54,7 +54,11 @@ else fi header_files="${code_dir}/inc/taos.h ${code_dir}/inc/taoserror.h" -cfg_dir="${top_dir}/packaging/cfg" +if [ "$verMode" == "cluster" ]; then + cfg_dir="${top_dir}/../enterprise/packaging/cfg" +else + cfg_dir="${top_dir}/packaging/cfg" +fi install_files="${script_dir}/install_client_power.sh" @@ -77,7 +81,6 @@ if [ "$osType" != "Darwin" ]; then cp ${build_dir}/bin/taos ${install_dir}/bin/power cp ${script_dir}/remove_power.sh ${install_dir}/bin cp ${build_dir}/bin/taosdemo ${install_dir}/bin/powerdemo - cp ${build_dir}/bin/taosdemox ${install_dir}/bin/powerdemox cp ${build_dir}/bin/taosdump ${install_dir}/bin/powerdump cp ${script_dir}/set_core.sh ${install_dir}/bin cp ${script_dir}/get_client.sh ${install_dir}/bin diff --git a/packaging/tools/makepkg.sh b/packaging/tools/makepkg.sh index 267338ed06d528be84d13f4a871865f76b940344..36b1fe5bd88950f69a56e84d98fba9c4dae0cf05 100755 --- a/packaging/tools/makepkg.sh +++ b/packaging/tools/makepkg.sh @@ -36,13 +36,18 @@ if [ "$pagMode" == "lite" ]; then strip ${build_dir}/bin/taos bin_files="${build_dir}/bin/taosd ${build_dir}/bin/taos ${script_dir}/remove.sh" else - bin_files="${build_dir}/bin/taosd ${build_dir}/bin/taos ${build_dir}/bin/taosdump ${build_dir}/bin/taosdemo ${build_dir}/bin/taosdemox ${build_dir}/bin/tarbitrator\ + bin_files="${build_dir}/bin/taosd ${build_dir}/bin/taos ${build_dir}/bin/taosdump ${build_dir}/bin/taosdemo ${build_dir}/bin/tarbitrator\ ${script_dir}/remove.sh ${script_dir}/set_core.sh ${script_dir}/startPre.sh ${script_dir}/taosd-dump-cfg.gdb" fi lib_files="${build_dir}/lib/libtaos.so.${version}" header_files="${code_dir}/inc/taos.h ${code_dir}/inc/taoserror.h" -cfg_dir="${top_dir}/packaging/cfg" +if [ "$verMode" == "cluster" ]; then + cfg_dir="${top_dir}/../enterprise/packaging/cfg" +else + cfg_dir="${top_dir}/packaging/cfg" +fi + install_files="${script_dir}/install.sh" nginx_dir="${code_dir}/../../enterprise/src/plugins/web" diff --git a/packaging/tools/makepkg_power.sh b/packaging/tools/makepkg_power.sh index 7227a08b7ab5e6fdfbdb4801fac3d174fe481f1e..554e7884b1c3db69acd3ba0e3234e468b1d31c79 100755 --- a/packaging/tools/makepkg_power.sh +++ b/packaging/tools/makepkg_power.sh @@ -42,7 +42,11 @@ fi lib_files="${build_dir}/lib/libtaos.so.${version}" header_files="${code_dir}/inc/taos.h ${code_dir}/inc/taoserror.h" -cfg_dir="${top_dir}/packaging/cfg" +if [ "$verMode" == "cluster" ]; then + cfg_dir="${top_dir}/../enterprise/packaging/cfg" +else + cfg_dir="${top_dir}/packaging/cfg" +fi install_files="${script_dir}/install_power.sh" nginx_dir="${code_dir}/../../enterprise/src/plugins/web" @@ -78,7 +82,6 @@ else cp ${build_dir}/bin/taosd ${install_dir}/bin/powerd cp ${script_dir}/remove_power.sh ${install_dir}/bin cp ${build_dir}/bin/taosdemo ${install_dir}/bin/powerdemo - cp ${build_dir}/bin/taosdemox ${install_dir}/bin/powerdemox cp ${build_dir}/bin/taosdump ${install_dir}/bin/powerdump cp ${build_dir}/bin/tarbitrator ${install_dir}/bin cp ${script_dir}/set_core.sh ${install_dir}/bin diff --git a/packaging/tools/post.sh b/packaging/tools/post.sh index c6ef73932d8dbaedbe82fdc69997ef092c9953e7..8665b3fec3a392b3dcae8c6a197625ba85ed953b 100755 --- a/packaging/tools/post.sh +++ b/packaging/tools/post.sh @@ -96,7 +96,6 @@ function install_bin() { ${csudo} rm -f ${bin_link_dir}/taos || : ${csudo} rm -f ${bin_link_dir}/taosd || : ${csudo} rm -f ${bin_link_dir}/taosdemo || : - ${csudo} rm -f ${bin_link_dir}/taosdemox || : ${csudo} rm -f ${bin_link_dir}/taosdump || : ${csudo} rm -f ${bin_link_dir}/rmtaos || : ${csudo} rm -f ${bin_link_dir}/set_core || : @@ -107,7 +106,6 @@ function install_bin() { [ -x ${bin_dir}/taos ] && ${csudo} ln -s ${bin_dir}/taos ${bin_link_dir}/taos || : [ -x ${bin_dir}/taosd ] && ${csudo} ln -s ${bin_dir}/taosd ${bin_link_dir}/taosd || : [ -x ${bin_dir}/taosdemo ] && ${csudo} ln -s ${bin_dir}/taosdemo ${bin_link_dir}/taosdemo || : - [ -x ${bin_dir}/taosdemox ] && ${csudo} ln -s ${bin_dir}/taosdemox ${bin_link_dir}/taosdemox || : [ -x ${bin_dir}/taosdump ] && ${csudo} ln -s ${bin_dir}/taosdump ${bin_link_dir}/taosdump || : [ -x ${bin_dir}/set_core.sh ] && ${csudo} ln -s ${bin_dir}/set_core.sh ${bin_link_dir}/set_core || : } diff --git a/packaging/tools/remove.sh b/packaging/tools/remove.sh index 8d96ef851c9812dcf7764dd1150350721bd2ec6e..2f2660d44635c86df3b51d2b86e37b3399869a88 100755 --- a/packaging/tools/remove.sh +++ b/packaging/tools/remove.sh @@ -72,7 +72,6 @@ function clean_bin() { ${csudo} rm -f ${bin_link_dir}/taos || : ${csudo} rm -f ${bin_link_dir}/taosd || : ${csudo} rm -f ${bin_link_dir}/taosdemo || : - ${csudo} rm -f ${bin_link_dir}/taosdemox || : ${csudo} rm -f ${bin_link_dir}/taosdump || : ${csudo} rm -f ${bin_link_dir}/rmtaos || : ${csudo} rm -f ${bin_link_dir}/tarbitrator || : diff --git a/packaging/tools/remove_client.sh b/packaging/tools/remove_client.sh index e84cdd2620f7e83ea12af7500c2385892d7072fa..7579162dc60e290754e71ed6a71c10cfaee5537b 100755 --- a/packaging/tools/remove_client.sh +++ b/packaging/tools/remove_client.sh @@ -38,7 +38,6 @@ function clean_bin() { # Remove link ${csudo} rm -f ${bin_link_dir}/taos || : ${csudo} rm -f ${bin_link_dir}/taosdemo || : - ${csudo} rm -f ${bin_link_dir}/taosdemox || : ${csudo} rm -f ${bin_link_dir}/taosdump || : ${csudo} rm -f ${bin_link_dir}/rmtaos || : ${csudo} rm -f ${bin_link_dir}/set_core || : diff --git a/packaging/tools/remove_client_power.sh b/packaging/tools/remove_client_power.sh index 1842e86a5b2c55b6c2aac02e6150bc9eaa5836a6..580c46e2077d7f21e06d4d4a8f69dcd5b6bbf51d 100755 --- a/packaging/tools/remove_client_power.sh +++ b/packaging/tools/remove_client_power.sh @@ -38,7 +38,6 @@ function clean_bin() { # Remove link ${csudo} rm -f ${bin_link_dir}/power || : ${csudo} rm -f ${bin_link_dir}/powerdemo || : - ${csudo} rm -f ${bin_link_dir}/powerdemox || : ${csudo} rm -f ${bin_link_dir}/powerdump || : ${csudo} rm -f ${bin_link_dir}/rmpower || : ${csudo} rm -f ${bin_link_dir}/set_core || : diff --git a/packaging/tools/remove_power.sh b/packaging/tools/remove_power.sh index 59073105de9828bc1255d269bdc773d613500e5a..816869cf444d8001e0c0aae30840d2c40a9e6af4 100755 --- a/packaging/tools/remove_power.sh +++ b/packaging/tools/remove_power.sh @@ -72,7 +72,6 @@ function clean_bin() { ${csudo} rm -f ${bin_link_dir}/power || : ${csudo} rm -f ${bin_link_dir}/powerd || : ${csudo} rm -f ${bin_link_dir}/powerdemo || : - ${csudo} rm -f ${bin_link_dir}/powerdemox || : ${csudo} rm -f ${bin_link_dir}/powerdump || : ${csudo} rm -f ${bin_link_dir}/rmpower || : ${csudo} rm -f ${bin_link_dir}/tarbitrator || : diff --git a/snap/snapcraft.yaml b/snap/snapcraft.yaml index 102fea6b9e3f8892d8e8af26c6ee54bdaa948fb8..7b6bfee42bd1a533798365edecacdff528420d73 100644 --- a/snap/snapcraft.yaml +++ b/snap/snapcraft.yaml @@ -1,6 +1,6 @@ name: tdengine base: core18 -version: '2.0.14.0' +version: '2.0.16.0' icon: snap/gui/t-dengine.svg summary: an open-source big data platform designed and optimized for IoT. description: | @@ -72,7 +72,7 @@ parts: - usr/bin/taosd - usr/bin/taos - usr/bin/taosdemo - - usr/lib/libtaos.so.2.0.14.0 + - usr/lib/libtaos.so.2.0.16.0 - usr/lib/libtaos.so.1 - usr/lib/libtaos.so diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index d67aba4b66be6ec2f13e994248c8214178b55663..b0f2cc0a48f906b40d7be5185ae5f081c2ed4418 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -1,4 +1,4 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 3.5) +CMAKE_MINIMUM_REQUIRED(VERSION 2.8) PROJECT(TDengine) # Base compile diff --git a/src/balance/CMakeLists.txt b/src/balance/CMakeLists.txt index bcb37690876462c227b43343d9bd3228d4405963..967635e52ce20761dbd674a380563deeeb9af189 100644 --- a/src/balance/CMakeLists.txt +++ b/src/balance/CMakeLists.txt @@ -1,4 +1,4 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 3.5) +CMAKE_MINIMUM_REQUIRED(VERSION 2.8) PROJECT(TDengine) INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/mnode/inc) diff --git a/src/client/CMakeLists.txt b/src/client/CMakeLists.txt index 660ad564a5cb611da4411b5f23eb494f6255c7af..fb43751b9e8fd715d538abb1198e1bdfd0a2e9ae 100644 --- a/src/client/CMakeLists.txt +++ b/src/client/CMakeLists.txt @@ -1,4 +1,4 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 3.5) +CMAKE_MINIMUM_REQUIRED(VERSION 2.8) PROJECT(TDengine) INCLUDE_DIRECTORIES(inc) diff --git a/src/client/src/tscAsync.c b/src/client/src/tscAsync.c index 8e5f621b37d77d41e44ff2cb329e6d6d03d62340..7d5b76cc2b3be68ae751e61b4b6c9870120c7d66 100644 --- a/src/client/src/tscAsync.c +++ b/src/client/src/tscAsync.c @@ -273,14 +273,15 @@ void tscQueueAsyncError(void(*fp), void *param, int32_t code) { taosScheduleTask(tscQhandle, &schedMsg); } -void tscAsyncResultOnError(SSqlObj *pSql) { +static void tscAsyncResultCallback(SSchedMsg *pMsg) { + SSqlObj* pSql = pMsg->ahandle; if (pSql == NULL || pSql->signature != pSql) { tscDebug("%p SqlObj is freed, not add into queue async res", pSql); return; } assert(pSql->res.code != TSDB_CODE_SUCCESS); - tscError("%p invoke user specified function due to error occured, code:%s", pSql, tstrerror(pSql->res.code)); + tscError("%p invoke user specified function due to error occurred, code:%s", pSql, tstrerror(pSql->res.code)); SSqlRes *pRes = &pSql->res; if (pSql->fp == NULL || pSql->fetchFp == NULL){ @@ -291,6 +292,16 @@ void tscAsyncResultOnError(SSqlObj *pSql) { (*pSql->fp)(pSql->param, pSql, pRes->code); } +void tscAsyncResultOnError(SSqlObj* pSql) { + SSchedMsg schedMsg = {0}; + schedMsg.fp = tscAsyncResultCallback; + schedMsg.ahandle = pSql; + schedMsg.thandle = (void *)1; + schedMsg.msg = 0; + taosScheduleTask(tscQhandle, &schedMsg); +} + + int tscSendMsgToServer(SSqlObj *pSql); void tscTableMetaCallBack(void *param, TAOS_RES *res, int code) { @@ -322,7 +333,7 @@ void tscTableMetaCallBack(void *param, TAOS_RES *res, int code) { code = tscGetTableMeta(pSql, pTableMetaInfo); assert(code == TSDB_CODE_TSC_ACTION_IN_PROGRESS || code == TSDB_CODE_SUCCESS); - if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) { + if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) { taosReleaseRef(tscObjRef, pSql->self); return; } diff --git a/src/client/src/tscLocalMerge.c b/src/client/src/tscLocalMerge.c index da350197d408bf8e2b3c7aaf9f5ebf17894c86a5..23fb0ab67cded77ff737fac4246343486e80eb95 100644 --- a/src/client/src/tscLocalMerge.c +++ b/src/client/src/tscLocalMerge.c @@ -86,7 +86,6 @@ static void tscInitSqlContext(SSqlCmd *pCmd, SLocalMerger *pReducer, tOrderDescr pCtx->outputBytes = pExpr->resBytes; pCtx->outputType = pExpr->resType; - pCtx->startOffset = 0; pCtx->size = 1; pCtx->hasNull = true; pCtx->currentStage = MERGE_STAGE; diff --git a/src/client/src/tscProfile.c b/src/client/src/tscProfile.c index f813ff85d99e6642827a49defe6b96f29720fc57..9203dcfbbab8d4b512b490bf13ab91fe1b475c22 100644 --- a/src/client/src/tscProfile.c +++ b/src/client/src/tscProfile.c @@ -233,6 +233,7 @@ int tscBuildQueryStreamDesc(void *pMsg, STscObj *pObj) { // We extract the lock to tscBuildHeartBeatMsg function. + int64_t now = taosGetTimestampMs(); SSqlObj *pSql = pObj->sqlList; while (pSql) { /* @@ -247,7 +248,8 @@ int tscBuildQueryStreamDesc(void *pMsg, STscObj *pObj) { tstrncpy(pQdesc->sql, pSql->sqlstr, sizeof(pQdesc->sql)); pQdesc->stime = htobe64(pSql->stime); pQdesc->queryId = htonl(pSql->queryId); - pQdesc->useconds = htobe64(pSql->res.useconds); + //pQdesc->useconds = htobe64(pSql->res.useconds); + pQdesc->useconds = htobe64(now - pSql->stime); pQdesc->qHandle = htobe64(pSql->res.qhandle); pHeartbeat->numOfQueries++; diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c index 3b248a6281f7049076705be624139d2f8e34bbd9..8c72eefc159fd7b95d16cd6563e0610bfdf55de6 100644 --- a/src/client/src/tscSQLParser.c +++ b/src/client/src/tscSQLParser.c @@ -2981,7 +2981,6 @@ int32_t parseGroupbyClause(SQueryInfo* pQueryInfo, SArray* pList, SSqlCmd* pCmd) STableMeta* pTableMeta = NULL; SSchema* pSchema = NULL; -// SSchema s = tGetTbnameColumnSchema(); int32_t tableIndex = COLUMN_INDEX_INITIAL_VAL; @@ -4748,7 +4747,7 @@ static void setDefaultOrderInfo(SQueryInfo* pQueryInfo) { int32_t parseOrderbyClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySQL* pQuerySql, SSchema* pSchema) { const char* msg0 = "only support order by primary timestamp"; const char* msg1 = "invalid column name"; - const char* msg2 = "only support order by primary timestamp or first tag in groupby clause allowed"; + const char* msg2 = "order by primary timestamp or first tag in groupby clause allowed"; const char* msg3 = "invalid column in order by clause, only primary timestamp or first tag in groupby clause allowed"; setDefaultOrderInfo(pQueryInfo); @@ -6376,16 +6375,14 @@ int32_t doCheckForCreateFromStable(SSqlObj* pSql, SSqlInfo* pInfo) { // get table meta from mnode code = tNameExtractFullName(&pStableMetaInfo->name, pCreateTableInfo->tagdata.name); - SArray* pList = pCreateTableInfo->pTagVals; + SArray* pValList = pCreateTableInfo->pTagVals; code = tscGetTableMeta(pSql, pStableMetaInfo); if (code != TSDB_CODE_SUCCESS) { return code; } - size_t size = taosArrayGetSize(pList); - if (tscGetNumOfTags(pStableMetaInfo->pTableMeta) != size) { - return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg5); - } + size_t valSize = taosArrayGetSize(pValList); + // too long tag values will return invalid sql, not be truncated automatically SSchema *pTagSchema = tscGetTableTagSchema(pStableMetaInfo->pTableMeta); @@ -6396,36 +6393,115 @@ int32_t doCheckForCreateFromStable(SSqlObj* pSql, SSqlInfo* pInfo) { return TSDB_CODE_TSC_OUT_OF_MEMORY; } + + SArray* pNameList = NULL; + size_t nameSize = 0; + int32_t schemaSize = tscGetNumOfTags(pStableMetaInfo->pTableMeta); int32_t ret = TSDB_CODE_SUCCESS; - for (int32_t i = 0; i < size; ++i) { - SSchema* pSchema = &pTagSchema[i]; - tVariantListItem* pItem = taosArrayGet(pList, i); - char tagVal[TSDB_MAX_TAGS_LEN]; - if (pSchema->type == TSDB_DATA_TYPE_BINARY || pSchema->type == TSDB_DATA_TYPE_NCHAR) { - if (pItem->pVar.nLen > pSchema->bytes) { - tdDestroyKVRowBuilder(&kvRowBuilder); - return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3); - } + if (pCreateTableInfo->pTagNames) { + pNameList = pCreateTableInfo->pTagNames; + nameSize = taosArrayGetSize(pNameList); + + if (valSize != nameSize) { + tdDestroyKVRowBuilder(&kvRowBuilder); + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg5); + } + + if (schemaSize < valSize) { + tdDestroyKVRowBuilder(&kvRowBuilder); + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg5); } - ret = tVariantDump(&(pItem->pVar), tagVal, pSchema->type, true); + bool findColumnIndex = false; - // check again after the convert since it may be converted from binary to nchar. - if (pSchema->type == TSDB_DATA_TYPE_BINARY || pSchema->type == TSDB_DATA_TYPE_NCHAR) { - int16_t len = varDataTLen(tagVal); - if (len > pSchema->bytes) { - tdDestroyKVRowBuilder(&kvRowBuilder); - return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3); + for (int32_t i = 0; i < nameSize; ++i) { + SStrToken* sToken = taosArrayGet(pNameList, i); + if (TK_STRING == sToken->type) { + tscDequoteAndTrimToken(sToken); } - } - if (ret != TSDB_CODE_SUCCESS) { + tVariantListItem* pItem = taosArrayGet(pValList, i); + + findColumnIndex = false; + + // todo speedup by using hash list + for (int32_t t = 0; t < schemaSize; ++t) { + if (strncmp(sToken->z, pTagSchema[t].name, sToken->n) == 0 && strlen(pTagSchema[t].name) == sToken->n) { + SSchema* pSchema = &pTagSchema[t]; + + char tagVal[TSDB_MAX_TAGS_LEN]; + if (pSchema->type == TSDB_DATA_TYPE_BINARY || pSchema->type == TSDB_DATA_TYPE_NCHAR) { + if (pItem->pVar.nLen > pSchema->bytes) { + tdDestroyKVRowBuilder(&kvRowBuilder); + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3); + } + } + + ret = tVariantDump(&(pItem->pVar), tagVal, pSchema->type, true); + + // check again after the convert since it may be converted from binary to nchar. + if (pSchema->type == TSDB_DATA_TYPE_BINARY || pSchema->type == TSDB_DATA_TYPE_NCHAR) { + int16_t len = varDataTLen(tagVal); + if (len > pSchema->bytes) { + tdDestroyKVRowBuilder(&kvRowBuilder); + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3); + } + } + + if (ret != TSDB_CODE_SUCCESS) { + tdDestroyKVRowBuilder(&kvRowBuilder); + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg4); + } + + tdAddColToKVRow(&kvRowBuilder, pSchema->colId, pSchema->type, tagVal); + + findColumnIndex = true; + break; + } + } + + if (!findColumnIndex) { + tdDestroyKVRowBuilder(&kvRowBuilder); + return tscInvalidSQLErrMsg(pCmd->payload, "invalid tag name", sToken->z); + } + } + } else { + if (schemaSize != valSize) { tdDestroyKVRowBuilder(&kvRowBuilder); - return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg4); + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg5); } - tdAddColToKVRow(&kvRowBuilder, pSchema->colId, pSchema->type, tagVal); + for (int32_t i = 0; i < valSize; ++i) { + SSchema* pSchema = &pTagSchema[i]; + tVariantListItem* pItem = taosArrayGet(pValList, i); + + char tagVal[TSDB_MAX_TAGS_LEN]; + if (pSchema->type == TSDB_DATA_TYPE_BINARY || pSchema->type == TSDB_DATA_TYPE_NCHAR) { + if (pItem->pVar.nLen > pSchema->bytes) { + tdDestroyKVRowBuilder(&kvRowBuilder); + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3); + } + } + + ret = tVariantDump(&(pItem->pVar), tagVal, pSchema->type, true); + + // check again after the convert since it may be converted from binary to nchar. + if (pSchema->type == TSDB_DATA_TYPE_BINARY || pSchema->type == TSDB_DATA_TYPE_NCHAR) { + int16_t len = varDataTLen(tagVal); + if (len > pSchema->bytes) { + tdDestroyKVRowBuilder(&kvRowBuilder); + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3); + } + } + + if (ret != TSDB_CODE_SUCCESS) { + tdDestroyKVRowBuilder(&kvRowBuilder); + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg4); + } + + tdAddColToKVRow(&kvRowBuilder, pSchema->colId, pSchema->type, tagVal); + } } SKVRow row = tdGetKVRowFromBuilder(&kvRowBuilder); diff --git a/src/client/src/tscServer.c b/src/client/src/tscServer.c index b10b040c7b5c821f6e31270c235b4fd34a90aef7..cdf9aaea25ec32858bf76303709b7f7479992871 100644 --- a/src/client/src/tscServer.c +++ b/src/client/src/tscServer.c @@ -330,7 +330,7 @@ void tscProcessMsgFromServer(SRpcMsg *rpcMsg, SRpcEpSet *pEpSet) { pSql->cmd.submitSchema = 1; } - if ((cmd == TSDB_SQL_SELECT || cmd == TSDB_SQL_FETCH || cmd == TSDB_SQL_UPDATE_TAGS_VAL) && + if ((cmd == TSDB_SQL_SELECT || cmd == TSDB_SQL_UPDATE_TAGS_VAL) && (rpcMsg->code == TSDB_CODE_TDB_INVALID_TABLE_ID || rpcMsg->code == TSDB_CODE_VND_INVALID_VGROUP_ID || rpcMsg->code == TSDB_CODE_RPC_NETWORK_UNAVAIL || @@ -451,7 +451,7 @@ int doProcessSql(SSqlObj *pSql) { if (pRes->code != TSDB_CODE_SUCCESS) { tscAsyncResultOnError(pSql); - return pRes->code; + return TSDB_CODE_SUCCESS; } int32_t code = tscSendMsgToServer(pSql); @@ -460,7 +460,7 @@ int doProcessSql(SSqlObj *pSql) { if (code != TSDB_CODE_SUCCESS) { pRes->code = code; tscAsyncResultOnError(pSql); - return code; + return TSDB_CODE_SUCCESS; } return TSDB_CODE_SUCCESS; @@ -609,7 +609,7 @@ static int32_t tscEstimateQueryMsgSize(SSqlObj *pSql, int32_t clauseIndex) { } return MIN_QUERY_MSG_PKT_SIZE + minMsgSize() + sizeof(SQueryTableMsg) + srcColListSize + exprSize + tsBufSize + - tableSerialize + sqlLen + 4096; + tableSerialize + sqlLen + 4096 + pQueryInfo->bufLen; } static char *doSerializeTableInfo(SQueryTableMsg* pQueryMsg, SSqlObj *pSql, char *pMsg) { @@ -770,6 +770,7 @@ int tscBuildQueryMsg(SSqlObj *pSql, SSqlInfo *pInfo) { char n[TSDB_TABLE_FNAME_LEN] = {0}; tNameExtractFullName(&pTableMetaInfo->name, n); + tscError("%p tid:%d uid:%" PRIu64" id:%s, column index out of range, numOfColumns:%d, index:%d, column name:%s", pSql, pTableMeta->id.tid, pTableMeta->id.uid, n, tscGetNumOfColumns(pTableMeta), pCol->colIndex.columnIndex, pColSchema->name); @@ -813,6 +814,13 @@ int tscBuildQueryMsg(SSqlObj *pSql, SSqlInfo *pInfo) { for (int32_t i = 0; i < tscSqlExprNumOfExprs(pQueryInfo); ++i) { SSqlExpr *pExpr = tscSqlExprGet(pQueryInfo, i); + // the queried table has been removed and a new table with the same name has already been created already + // return error msg + if (pExpr->uid != pTableMeta->id.uid) { + tscError("%p table has already been destroyed", pSql); + return TSDB_CODE_TSC_INVALID_TABLE_NAME; + } + if (!tscValidateColumnId(pTableMetaInfo, pExpr->colInfo.colId, pExpr->numOfParams)) { tscError("%p table schema is not matched with parsed sql", pSql); return TSDB_CODE_TSC_INVALID_SQL; @@ -856,6 +864,13 @@ int tscBuildQueryMsg(SSqlObj *pSql, SSqlInfo *pInfo) { SInternalField* pField = tscFieldInfoGetInternalField(&pQueryInfo->fieldsInfo, i); SSqlExpr *pExpr = pField->pSqlExpr; if (pExpr != NULL) { + // the queried table has been removed and a new table with the same name has already been created already + // return error msg + if (pExpr->uid != pTableMeta->id.uid) { + tscError("%p table has already been destroyed", pSql); + return TSDB_CODE_TSC_INVALID_TABLE_NAME; + } + if (!tscValidateColumnId(pTableMetaInfo, pExpr->colInfo.colId, pExpr->numOfParams)) { tscError("%p table schema is not matched with parsed sql", pSql); return TSDB_CODE_TSC_INVALID_SQL; diff --git a/src/client/src/tscSql.c b/src/client/src/tscSql.c index 8a240accecc904675bd305c1966891bbfc62916a..13539a9b197875210d76c86bd93ec986190c0c37 100644 --- a/src/client/src/tscSql.c +++ b/src/client/src/tscSql.c @@ -52,7 +52,9 @@ static bool validPassword(const char* passwd) { static SSqlObj *taosConnectImpl(const char *ip, const char *user, const char *pass, const char *auth, const char *db, uint16_t port, void (*fp)(void *, TAOS_RES *, int), void *param, TAOS **taos) { - taos_init(); + if (taos_init()) { + return NULL; + } if (!validUserName(user)) { terrno = TSDB_CODE_TSC_INVALID_USER_LENGTH; @@ -696,7 +698,7 @@ static void tscKillSTableQuery(SSqlObj *pSql) { } tscAsyncResultOnError(pSubObj); - taosReleaseRef(tscObjRef, pSubObj->self); + // taosRelekaseRef(tscObjRef, pSubObj->self); } if (pSql->subState.numOfSub <= 0) { diff --git a/src/client/src/tscSubquery.c b/src/client/src/tscSubquery.c index 96aae423d5e394a4fcb6a728fa84b94a039d77af..380c43825547465079098870e3150dd6a2138b78 100644 --- a/src/client/src/tscSubquery.c +++ b/src/client/src/tscSubquery.c @@ -95,11 +95,21 @@ static bool subAndCheckDone(SSqlObj *pSql, SSqlObj *pParentSql, int idx) { pthread_mutex_lock(&subState->mutex); + bool done = allSubqueryDone(pParentSql); + + if (done) { + tscDebug("%p subquery:%p,%d all subs already done", pParentSql, pSql, idx); + + pthread_mutex_unlock(&subState->mutex); + + return false; + } + tscDebug("%p subquery:%p,%d state set to 1", pParentSql, pSql, idx); subState->states[idx] = 1; - bool done = allSubqueryDone(pParentSql); + done = allSubqueryDone(pParentSql); pthread_mutex_unlock(&subState->mutex); @@ -1838,7 +1848,7 @@ void doAppendData(SInterResult* pInterResult, TAOS_ROW row, int32_t numOfCols, S TSKEY key = INT64_MIN; for(int32_t i = 0; i < numOfCols; ++i) { SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, i); - if (TSDB_COL_IS_TAG(pExpr->colInfo.flag)) { + if (TSDB_COL_IS_TAG(pExpr->colInfo.flag) || pExpr->functionId == TSDB_FUNC_PRJ) { continue; } @@ -1878,14 +1888,31 @@ void doAppendData(SInterResult* pInterResult, TAOS_ROW row, int32_t numOfCols, S } } +static void destroySup(SFirstRoundQuerySup* pSup) { + taosArrayDestroyEx(pSup->pResult, freeInterResult); + taosArrayDestroy(pSup->pColsInfo); + tfree(pSup); +} + void tscFirstRoundRetrieveCallback(void* param, TAOS_RES* tres, int numOfRows) { SSqlObj* pSql = (SSqlObj*)tres; SSqlRes* pRes = &pSql->res; SFirstRoundQuerySup* pSup = param; - SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0); - if (numOfRows > 0) { + SSqlObj* pParent = pSup->pParent; + SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0); + + int32_t code = taos_errno(pSql); + if (code != TSDB_CODE_SUCCESS) { + destroySup(pSup); + taos_free_result(pSql); + pParent->res.code = code; + tscAsyncResultOnError(pParent); + return; + } + + if (numOfRows > 0) { // the number is not correct for group by column in super table query TAOS_ROW row = NULL; int32_t numOfCols = taos_field_count(tres); @@ -1895,6 +1922,7 @@ void tscFirstRoundRetrieveCallback(void* param, TAOS_RES* tres, int numOfRows) { while ((row = taos_fetch_row(tres)) != NULL) { doAppendData(&interResult, row, numOfCols, pQueryInfo); + pSup->numOfRows += 1; } } else { // tagLen > 0 char* p = calloc(1, pSup->tagLen); @@ -1906,7 +1934,9 @@ void tscFirstRoundRetrieveCallback(void* param, TAOS_RES* tres, int numOfRows) { int32_t offset = 0; for (int32_t i = 0; i < numOfCols && offset < pSup->tagLen; ++i) { SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, i); - if (TSDB_COL_IS_TAG(pExpr->colInfo.flag)) { + + // tag or group by column + if (TSDB_COL_IS_TAG(pExpr->colInfo.flag) || pExpr->functionId == TSDB_FUNC_PRJ) { memcpy(p + offset, row[i], length[i]); offset += pExpr->resBytes; } @@ -1935,23 +1965,24 @@ void tscFirstRoundRetrieveCallback(void* param, TAOS_RES* tres, int numOfRows) { taosArrayPush(pSup->pResult, &interResult); doAppendData(&interResult, row, numOfCols, pQueryInfo); } + + pSup->numOfRows += 1; } tfree(p); } } - pSup->numOfRows += numOfRows; if (!pRes->completed) { taos_fetch_rows_a(tres, tscFirstRoundRetrieveCallback, param); return; } // set the parameters for the second round query process - SSqlObj *pParent = pSup->pParent; SSqlCmd *pPCmd = &pParent->cmd; SQueryInfo *pQueryInfo1 = tscGetQueryInfoDetail(pPCmd, 0); - + int32_t resRows = pSup->numOfRows; + if (pSup->numOfRows > 0) { SBufferWriter bw = tbufInitWriter(NULL, false); interResToBinary(&bw, pSup->pResult, pSup->tagLen); @@ -1969,14 +2000,30 @@ void tscFirstRoundRetrieveCallback(void* param, TAOS_RES* tres, int numOfRows) { taos_free_result(pSql); + if (resRows == 0) { + pParent->cmd.command = TSDB_SQL_RETRIEVE_EMPTY_RESULT; + (*pParent->fp)(pParent->param, pParent, 0); + return; + } + pQueryInfo1->round = 1; tscDoQuery(pParent); } void tscFirstRoundCallback(void* param, TAOS_RES* tres, int code) { - int32_t c = taos_errno(tres); + SFirstRoundQuerySup* pSup = (SFirstRoundQuerySup*) param; + + SSqlObj* pSql = (SSqlObj*) tres; + int32_t c = taos_errno(pSql); + if (c != TSDB_CODE_SUCCESS) { - // TODO HANDLE ERROR + SSqlObj* parent = pSup->pParent; + + destroySup(pSup); + taos_free_result(pSql); + parent->res.code = code; + tscAsyncResultOnError(parent); + return; } taos_fetch_rows_a(tres, tscFirstRoundRetrieveCallback, param); @@ -2010,13 +2057,13 @@ int32_t tscHandleFirstRoundStableQuery(SSqlObj *pSql) { pNewQueryInfo->groupbyExpr.columnInfo = taosArrayDup(pQueryInfo->groupbyExpr.columnInfo); if (pNewQueryInfo->groupbyExpr.columnInfo == NULL) { terrno = TSDB_CODE_TSC_OUT_OF_MEMORY; -// goto _error; + goto _error; } } if (tscTagCondCopy(&pNewQueryInfo->tagCond, &pQueryInfo->tagCond) != 0) { terrno = TSDB_CODE_TSC_OUT_OF_MEMORY; -// goto _error; + goto _error; } pNewQueryInfo->interval = pQueryInfo->interval; @@ -2027,7 +2074,6 @@ int32_t tscHandleFirstRoundStableQuery(SSqlObj *pSql) { int32_t numOfExprs = (int32_t) tscSqlExprNumOfExprs(pQueryInfo); int32_t index = 0; - int32_t numOfTags = 0; for(int32_t i = 0; i < numOfExprs; ++i) { SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, i); if (pExpr->functionId == TSDB_FUNC_TS && pQueryInfo->interval.interval > 0) { @@ -2060,7 +2106,25 @@ int32_t tscHandleFirstRoundStableQuery(SSqlObj *pSql) { SSqlExpr* p = tscAddFuncInSelectClause(pNewQueryInfo, index++, TSDB_FUNC_TAG, &colIndex, schema, TSDB_COL_TAG); p->resColId = pExpr->resColId; - numOfTags += 1; + } else if (pExpr->functionId == TSDB_FUNC_PRJ) { + int32_t num = (int32_t) taosArrayGetSize(pNewQueryInfo->groupbyExpr.columnInfo); + for(int32_t k = 0; k < num; ++k) { + SColIndex* pIndex = taosArrayGet(pNewQueryInfo->groupbyExpr.columnInfo, k); + if (pExpr->colInfo.colId == pIndex->colId) { + pSup->tagLen += pExpr->resBytes; + taosArrayPush(pSup->pColsInfo, &pExpr->resColId); + + SColumnIndex colIndex = {.tableIndex = 0, .columnIndex = pIndex->colIndex}; + SSchema* schema = tscGetColumnSchemaById(pTableMetaInfo1->pTableMeta, pExpr->colInfo.colId); + + //doLimitOutputNormalColOfGroupby + SSqlExpr* p = tscAddFuncInSelectClause(pNewQueryInfo, index++, TSDB_FUNC_PRJ, &colIndex, schema, TSDB_COL_NORMAL); + p->numOfParams = 1; + p->param[0].i64 = 1; + p->param[0].nType = TSDB_DATA_TYPE_INT; + p->resColId = pExpr->resColId; // update the result column id + } + } } } @@ -2077,6 +2141,13 @@ int32_t tscHandleFirstRoundStableQuery(SSqlObj *pSql) { tscHandleMasterSTableQuery(pNew); return TSDB_CODE_SUCCESS; + + _error: + destroySup(pSup); + taos_free_result(pNew); + pSql->res.code = terrno; + tscAsyncResultOnError(pSql); + return terrno; } int32_t tscHandleMasterSTableQuery(SSqlObj *pSql) { @@ -2118,7 +2189,7 @@ int32_t tscHandleMasterSTableQuery(SSqlObj *pSql) { tfree(pMemoryBuf); return ret; } - + tscDebug("%p retrieved query data from %d vnode(s)", pSql, pState->numOfSub); pSql->pSubs = calloc(pState->numOfSub, POINTER_BYTES); if (pSql->pSubs == NULL) { @@ -2245,7 +2316,9 @@ static void tscAbortFurtherRetryRetrieval(SRetrieveSupport *trsupport, TAOS_RES * current query failed, and the retry count is less than the available * count, retry query clear previous retrieved data, then launch a new sub query */ -static int32_t tscReissueSubquery(SRetrieveSupport *oriTrs, SSqlObj *pSql, int32_t code) { +static int32_t tscReissueSubquery(SRetrieveSupport *oriTrs, SSqlObj *pSql, int32_t code, int32_t *sent) { + *sent = 0; + SRetrieveSupport *trsupport = malloc(sizeof(SRetrieveSupport)); if (trsupport == NULL) { return TSDB_CODE_TSC_OUT_OF_MEMORY; @@ -2277,21 +2350,28 @@ static int32_t tscReissueSubquery(SRetrieveSupport *oriTrs, SSqlObj *pSql, int32 SSqlObj *pNew = tscCreateSTableSubquery(trsupport->pParentSql, trsupport, pSql); if (pNew == NULL) { tscError("%p sub:%p failed to create new subquery due to error:%s, abort retry, vgId:%d, orderOfSub:%d", - trsupport->pParentSql, pSql, tstrerror(terrno), pVgroup->vgId, trsupport->subqueryIndex); + oriTrs->pParentSql, pSql, tstrerror(terrno), pVgroup->vgId, oriTrs->subqueryIndex); pParentSql->res.code = terrno; - trsupport->numOfRetry = MAX_NUM_OF_SUBQUERY_RETRY; + oriTrs->numOfRetry = MAX_NUM_OF_SUBQUERY_RETRY; + tfree(trsupport); return pParentSql->res.code; } int32_t ret = tscProcessSql(pNew); + *sent = 1; + // if failed to process sql, let following code handle the pSql if (ret == TSDB_CODE_SUCCESS) { + tscFreeRetrieveSup(pSql); taos_free_result(pSql); return ret; - } else { + } else { + pParentSql->pSubs[trsupport->subqueryIndex] = pSql; + tscFreeRetrieveSup(pNew); + taos_free_result(pNew); return ret; } } @@ -2328,7 +2408,10 @@ void tscHandleSubqueryError(SRetrieveSupport *trsupport, SSqlObj *pSql, int numO subqueryIndex, tstrerror(pParentSql->res.code)); } else { if (trsupport->numOfRetry++ < MAX_NUM_OF_SUBQUERY_RETRY && pParentSql->res.code == TSDB_CODE_SUCCESS) { - if (tscReissueSubquery(trsupport, pSql, numOfRows) == TSDB_CODE_SUCCESS) { + int32_t sent = 0; + + tscReissueSubquery(trsupport, pSql, numOfRows, &sent); + if (sent) { return; } } else { // reach the maximum retry count, abort @@ -2450,7 +2533,6 @@ static void tscRetrieveFromDnodeCallBack(void *param, TAOS_RES *tres, int numOfR SRetrieveSupport *trsupport = (SRetrieveSupport *)param; if (pSql->param == NULL || param == NULL) { tscDebug("%p already freed in dnodecallback", pSql); - assert(pSql->res.code == TSDB_CODE_TSC_QUERY_CANCELLED); return; } @@ -2482,7 +2564,10 @@ static void tscRetrieveFromDnodeCallBack(void *param, TAOS_RES *tres, int numOfR if (trsupport->numOfRetry++ < MAX_NUM_OF_SUBQUERY_RETRY) { tscError("%p sub:%p failed code:%s, retry:%d", pParentSql, pSql, tstrerror(numOfRows), trsupport->numOfRetry); - if (tscReissueSubquery(trsupport, pSql, numOfRows) == TSDB_CODE_SUCCESS) { + int32_t sent = 0; + + tscReissueSubquery(trsupport, pSql, numOfRows, &sent); + if (sent) { return; } } else { @@ -2604,7 +2689,11 @@ void tscRetrieveDataRes(void *param, TAOS_RES *tres, int code) { if (trsupport->numOfRetry++ < MAX_NUM_OF_SUBQUERY_RETRY) { tscError("%p sub:%p failed code:%s, retry:%d", pParentSql, pSql, tstrerror(code), trsupport->numOfRetry); - if (tscReissueSubquery(trsupport, pSql, code) == TSDB_CODE_SUCCESS) { + + int32_t sent = 0; + + tscReissueSubquery(trsupport, pSql, code, &sent); + if (sent) { return; } } else { diff --git a/src/client/src/tscSystem.c b/src/client/src/tscSystem.c index 52ede2318fbe62eafbfe763a03aa4c24c7e72983..4da922dadd1cf7fa55c4c7423568045886e61ab2 100644 --- a/src/client/src/tscSystem.c +++ b/src/client/src/tscSystem.c @@ -47,10 +47,11 @@ void *tscRpcCache; // cache to keep rpc obj int32_t tscNumOfThreads = 1; // num of rpc threads static pthread_mutex_t rpcObjMutex; // mutex to protect open the rpc obj concurrently static pthread_once_t tscinit = PTHREAD_ONCE_INIT; +static volatile int tscInitRes = 0; void tscCheckDiskUsage(void *UNUSED_PARAM(para), void *UNUSED_PARAM(param)) { taosGetDisk(); - taosTmrReset(tscCheckDiskUsage, 1000, NULL, tscTmr, &tscCheckDiskUsageTmr); + taosTmrReset(tscCheckDiskUsage, 20 * 1000, NULL, tscTmr, &tscCheckDiskUsageTmr); } void tscFreeRpcObj(void *param) { assert(param); @@ -64,7 +65,7 @@ void tscReleaseRpc(void *param) { return; } pthread_mutex_lock(&rpcObjMutex); - taosCacheRelease(tscRpcCache, (void *)¶m, true); + taosCacheRelease(tscRpcCache, (void *)¶m, false); pthread_mutex_unlock(&rpcObjMutex); } @@ -101,7 +102,7 @@ int32_t tscAcquireRpc(const char *key, const char *user, const char *secretEncry tscError("failed to init connection to TDengine"); return -1; } - pRpcObj = taosCachePut(tscRpcCache, rpcObj.key, strlen(rpcObj.key), &rpcObj, sizeof(rpcObj), 1000*10); + pRpcObj = taosCachePut(tscRpcCache, rpcObj.key, strlen(rpcObj.key), &rpcObj, sizeof(rpcObj), 1000*5); if (pRpcObj == NULL) { rpcClose(rpcObj.pDnodeConn); pthread_mutex_unlock(&rpcObjMutex); @@ -137,7 +138,11 @@ void taos_init_imp(void) { } taosReadGlobalCfg(); - taosCheckGlobalCfg(); + if (taosCheckGlobalCfg()) { + tscInitRes = -1; + return; + } + taosInitNotes(); rpcInit(); @@ -154,16 +159,18 @@ void taos_init_imp(void) { if (tscNumOfThreads < 2) { tscNumOfThreads = 2; } + taosTmrThreads = tscNumOfThreads; tscQhandle = taosInitScheduler(queueSize, tscNumOfThreads, "tsc"); if (NULL == tscQhandle) { tscError("failed to init scheduler"); + tscInitRes = -1; return; } tscTmr = taosTmrInit(tsMaxConnections * 2, 200, 60000, "TSC"); if(0 == tscEmbedded){ - taosTmrReset(tscCheckDiskUsage, 10, NULL, tscTmr, &tscCheckDiskUsageTmr); + taosTmrReset(tscCheckDiskUsage, 20 * 1000, NULL, tscTmr, &tscCheckDiskUsageTmr); } if (tscTableMetaInfo == NULL) { @@ -186,7 +193,7 @@ void taos_init_imp(void) { tscDebug("client is initialized successfully"); } -void taos_init() { pthread_once(&tscinit, taos_init_imp); } +int taos_init() { pthread_once(&tscinit, taos_init_imp); return tscInitRes;} // this function may be called by user or system, or by both simultaneously. void taos_cleanup(void) { diff --git a/src/client/src/tscUtil.c b/src/client/src/tscUtil.c index 95cf28ec49346e500d00c1bcac9dc6f4055a0eb4..727ca9ad7f3a50442eb5a4eb461deda1ff283b32 100644 --- a/src/client/src/tscUtil.c +++ b/src/client/src/tscUtil.c @@ -2057,6 +2057,8 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, __async_cb_func_t pnCmd->parseFinished = 1; pnCmd->pTableNameList = NULL; pnCmd->pTableBlockHashList = NULL; + pnCmd->tagData.data = NULL; + pnCmd->tagData.dataLen = 0; if (tscAddSubqueryInfo(pnCmd) != TSDB_CODE_SUCCESS) { terrno = TSDB_CODE_TSC_OUT_OF_MEMORY; diff --git a/src/client/tests/CMakeLists.txt b/src/client/tests/CMakeLists.txt index 4ea0b80bf7c1c870532f1bc3cac313c43f1a57f0..f07af85e255eaa5e285d9a4ce0853251e0fdaa21 100644 --- a/src/client/tests/CMakeLists.txt +++ b/src/client/tests/CMakeLists.txt @@ -1,4 +1,4 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 3.5) +CMAKE_MINIMUM_REQUIRED(VERSION 2.8) PROJECT(TDengine) FIND_PATH(HEADER_GTEST_INCLUDE_DIR gtest.h /usr/include/gtest /usr/local/include/gtest) diff --git a/src/client/tests/cliTest.cpp b/src/client/tests/cliTest.cpp index 5cfe61d92a1ea36cc41585e314e907a10ea9ff59..30f248b5418b54b1be26dfcf15348b03fd70af4d 100644 --- a/src/client/tests/cliTest.cpp +++ b/src/client/tests/cliTest.cpp @@ -57,7 +57,7 @@ void stmtInsertTest() { v.ts = start_ts + 20; v.k = 123; - char* str = "abc"; + char str[] = "abc"; uintptr_t len = strlen(str); v.a = str; @@ -65,7 +65,7 @@ void stmtInsertTest() { params[2].buffer_length = len; params[2].buffer = str; - char* nstr = "999"; + char nstr[] = "999"; uintptr_t len1 = strlen(nstr); v.b = nstr; @@ -84,18 +84,18 @@ void stmtInsertTest() { v.ts = start_ts + 30; v.k = 911; - str = "92"; - len = strlen(str); + char str1[] = "92"; + len = strlen(str1); params[2].length = &len; params[2].buffer_length = len; - params[2].buffer = str; + params[2].buffer = str1; - nstr = "1920"; - len1 = strlen(nstr); + char nstr1[] = "1920"; + len1 = strlen(nstr1); params[3].buffer_length = len1; - params[3].buffer = nstr; + params[3].buffer = nstr1; params[3].length = &len1; taos_stmt_bind_param(stmt, params); @@ -103,7 +103,7 @@ void stmtInsertTest() { ret = taos_stmt_execute(stmt); if (ret != 0) { - printf("%p\n", ret); + printf("%d\n", ret); printf("\033[31mfailed to execute insert statement.\033[0m\n"); return; } diff --git a/src/common/CMakeLists.txt b/src/common/CMakeLists.txt index df0ac7986599da3e53d97f64c0c5113a357f9177..0da7bda994db83882e36e9d52a7983635ad85330 100644 --- a/src/common/CMakeLists.txt +++ b/src/common/CMakeLists.txt @@ -1,4 +1,4 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 3.5) +CMAKE_MINIMUM_REQUIRED(VERSION 2.8) PROJECT(TDengine) INCLUDE_DIRECTORIES(inc) diff --git a/src/common/src/tglobal.c b/src/common/src/tglobal.c index fb6d7459318174bc864b8d0cc813df3d3e65dc00..349ccb35acd52aa5e3ccd5882d03a596e3e064ec 100644 --- a/src/common/src/tglobal.c +++ b/src/common/src/tglobal.c @@ -373,6 +373,23 @@ static void taosCheckDataDirCfg() { } } +static int32_t taosCheckTmpDir(void) { + if (strlen(tsTempDir) <= 0){ + uError("tempDir is not set"); + return -1; + } + + DIR *dir = opendir(tsTempDir); + if (dir == NULL) { + uError("can not open tempDir:%s, error:%s", tsTempDir, strerror(errno)); + return -1; + } + + closedir(dir); + + return 0; +} + static void doInitGlobalConfig(void) { osInit(); srand(taosSafeRand()); @@ -1488,6 +1505,11 @@ int32_t taosCheckGlobalCfg() { } taosCheckDataDirCfg(); + + if (taosCheckTmpDir()) { + return -1; + } + taosGetSystemInfo(); tsSetLocale(); @@ -1533,6 +1555,8 @@ int32_t taosCheckGlobalCfg() { tsQueryBufferSizeBytes = tsQueryBufferSize * 1048576UL; } + uInfo(" check global cfg completed"); + uInfo("=================================="); taosPrintGlobalCfg(); return 0; diff --git a/src/connector/jdbc/CMakeLists.txt b/src/connector/jdbc/CMakeLists.txt index 59b09c8695d59c1fd0584f73a7e4be1eb1ab1c0b..47d6b90e916e4f16446e6e0ebff2e21e3f15474a 100644 --- a/src/connector/jdbc/CMakeLists.txt +++ b/src/connector/jdbc/CMakeLists.txt @@ -1,4 +1,4 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 3.5) +CMAKE_MINIMUM_REQUIRED(VERSION 2.8) PROJECT(TDengine) @@ -8,7 +8,7 @@ IF (TD_MVN_INSTALLED) ADD_CUSTOM_COMMAND(OUTPUT ${JDBC_CMD_NAME} POST_BUILD COMMAND mvn -Dmaven.test.skip=true install -f ${CMAKE_CURRENT_SOURCE_DIR}/pom.xml - COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_CURRENT_SOURCE_DIR}/target/taos-jdbcdriver-2.0.18-dist.jar ${LIBRARY_OUTPUT_PATH} + COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_CURRENT_SOURCE_DIR}/target/taos-jdbcdriver-2.0.19-dist.jar ${LIBRARY_OUTPUT_PATH} COMMAND mvn -Dmaven.test.skip=true clean -f ${CMAKE_CURRENT_SOURCE_DIR}/pom.xml COMMENT "build jdbc driver") ADD_CUSTOM_TARGET(${JDBC_TARGET_NAME} ALL WORKING_DIRECTORY ${EXECUTABLE_OUTPUT_PATH} DEPENDS ${JDBC_CMD_NAME}) diff --git a/src/connector/jdbc/deploy-pom.xml b/src/connector/jdbc/deploy-pom.xml index 34b0a3c6d37530ad01600a455d426c1606613c23..f6221aca89e67426a28b1cf4e90e699d9963ca0e 100755 --- a/src/connector/jdbc/deploy-pom.xml +++ b/src/connector/jdbc/deploy-pom.xml @@ -5,7 +5,7 @@ com.taosdata.jdbc taos-jdbcdriver - 2.0.18 + 2.0.19 jar JDBCDriver diff --git a/src/connector/jdbc/pom.xml b/src/connector/jdbc/pom.xml index 0626bcf1fb1161cc38ad05002b984b2200b68d68..8ebf8aa5cceb1060f735ec498c913c7606747fd5 100755 --- a/src/connector/jdbc/pom.xml +++ b/src/connector/jdbc/pom.xml @@ -3,7 +3,7 @@ 4.0.0 com.taosdata.jdbc taos-jdbcdriver - 2.0.18 + 2.0.19 jar JDBCDriver https://github.com/taosdata/TDengine/tree/master/src/connector/jdbc @@ -81,8 +81,6 @@ commons-dbcp2 2.7.0
- - @@ -130,8 +128,10 @@ **/AppMemoryLeakTest.java + **/AuthenticationTest.java **/TaosInfoMonitorTest.java **/FailOverTest.java + **/InvalidResultSetPointerTest.java true diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/AbstractConnection.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/AbstractConnection.java new file mode 100644 index 0000000000000000000000000000000000000000..bb621bd1308e18d7d404fcc7b53e9fa07d487d73 --- /dev/null +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/AbstractConnection.java @@ -0,0 +1,529 @@ +package com.taosdata.jdbc; + +import java.sql.*; +import java.util.Enumeration; +import java.util.Map; +import java.util.Properties; +import java.util.concurrent.*; + +public abstract class AbstractConnection extends WrapperImpl implements Connection { + + protected volatile boolean isClosed; + protected volatile String catalog; + protected volatile Properties clientInfoProps = new Properties(); + + @Override + public abstract Statement createStatement() throws SQLException; + + @Override + public abstract PreparedStatement prepareStatement(String sql) throws SQLException; + + @Override + public CallableStatement prepareCall(String sql) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public String nativeSQL(String sql) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); + // do nothing + return sql; + } + + @Override + public void setAutoCommit(boolean autoCommit) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); + + //do nothing + } + + @Override + public boolean getAutoCommit() throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); + + return true; + } + + @Override + public void commit() throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); + + // do nothing + } + + @Override + public void rollback() throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); + + // do nothing + } + + @Override + public abstract void close() throws SQLException; + + @Override + public abstract boolean isClosed() throws SQLException; + + @Override + public abstract DatabaseMetaData getMetaData() throws SQLException; + + @Override + public void setReadOnly(boolean readOnly) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); + + //do nothing + } + + @Override + public boolean isReadOnly() throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); + return true; + } + + @Override + public void setCatalog(String catalog) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); + /* + try (Statement stmt = createStatement()) { + boolean execute = stmt.execute("use " + catalog); + if (execute) + this.catalog = catalog; + } catch (SQLException e) { + // do nothing + } + */ + + this.catalog = catalog; + } + + @Override + public String getCatalog() throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); + + return this.catalog; + } + + @Override + public void setTransactionIsolation(int level) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); + + switch (level) { + case Connection.TRANSACTION_NONE: + break; + case Connection.TRANSACTION_READ_UNCOMMITTED: + case Connection.TRANSACTION_READ_COMMITTED: + case Connection.TRANSACTION_REPEATABLE_READ: + case Connection.TRANSACTION_SERIALIZABLE: + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); + default: + throw new SQLException(TSDBConstants.INVALID_VARIABLES); + } + //do nothing + } + + @Override + public int getTransactionIsolation() throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); + + return Connection.TRANSACTION_NONE; + } + + @Override + public SQLWarning getWarnings() throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); + + return null; + } + + @Override + public void clearWarnings() throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); + + // do nothing + } + + @Override + public Statement createStatement(int resultSetType, int resultSetConcurrency) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); + + switch (resultSetType) { + case ResultSet.TYPE_FORWARD_ONLY: + break; + case ResultSet.TYPE_SCROLL_INSENSITIVE: + case ResultSet.TYPE_SCROLL_SENSITIVE: + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + default: + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_INVALID_VARIABLE); + } + + switch (resultSetConcurrency) { + case ResultSet.CONCUR_READ_ONLY: + break; + case ResultSet.CONCUR_UPDATABLE: + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + default: + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_INVALID_VARIABLE); + } + + return createStatement(); + } + + @Override + public PreparedStatement prepareStatement(String sql, int resultSetType, int resultSetConcurrency) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); + + switch (resultSetType) { + case ResultSet.TYPE_FORWARD_ONLY: + break; + case ResultSet.TYPE_SCROLL_INSENSITIVE: + case ResultSet.TYPE_SCROLL_SENSITIVE: + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + default: + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_INVALID_VARIABLE); + } + + switch (resultSetConcurrency) { + case ResultSet.CONCUR_READ_ONLY: + break; + case ResultSet.CONCUR_UPDATABLE: + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + default: + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_INVALID_VARIABLE); + } + return prepareStatement(sql); + } + + @Override + public CallableStatement prepareCall(String sql, int resultSetType, int resultSetConcurrency) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); + + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public Map> getTypeMap() throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); + + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void setTypeMap(Map> map) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); + + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void setHoldability(int holdability) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); + + switch (holdability) { + case ResultSet.HOLD_CURSORS_OVER_COMMIT: + break; + case ResultSet.CLOSE_CURSORS_AT_COMMIT: + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + default: + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_INVALID_VARIABLE); + } + //do nothing + } + + @Override + public int getHoldability() throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); + + return ResultSet.HOLD_CURSORS_OVER_COMMIT; + } + + @Override + public Savepoint setSavepoint() throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); + + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public Savepoint setSavepoint(String name) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); + + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void rollback(Savepoint savepoint) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); + + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void releaseSavepoint(Savepoint savepoint) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); + + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public Statement createStatement(int resultSetType, int resultSetConcurrency, int resultSetHoldability) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); + + switch (resultSetHoldability) { + case ResultSet.HOLD_CURSORS_OVER_COMMIT: + break; + case ResultSet.CLOSE_CURSORS_AT_COMMIT: + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + default: + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_INVALID_VARIABLE); + } + + return createStatement(resultSetType, resultSetConcurrency); + } + + @Override + public PreparedStatement prepareStatement(String sql, int resultSetType, int resultSetConcurrency, int resultSetHoldability) + throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); + + switch (resultSetHoldability) { + case ResultSet.HOLD_CURSORS_OVER_COMMIT: + break; + case ResultSet.CLOSE_CURSORS_AT_COMMIT: + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + default: + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_INVALID_VARIABLE); + } + return prepareStatement(sql, resultSetType, resultSetConcurrency); + } + + @Override + public CallableStatement prepareCall(String sql, int resultSetType, int resultSetConcurrency, int resultSetHoldability) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); + + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public PreparedStatement prepareStatement(String sql, int autoGeneratedKeys) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); + + switch (autoGeneratedKeys) { + case Statement.RETURN_GENERATED_KEYS: + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + case Statement.NO_GENERATED_KEYS: + break; + } + return prepareStatement(sql, ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_READ_ONLY, ResultSet.HOLD_CURSORS_OVER_COMMIT); + } + + @Override + public PreparedStatement prepareStatement(String sql, int[] columnIndexes) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); + + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public PreparedStatement prepareStatement(String sql, String[] columnNames) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); + + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public Clob createClob() throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); + + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public Blob createBlob() throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); + + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public NClob createNClob() throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); + + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public SQLXML createSQLXML() throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); + + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public boolean isValid(int timeout) throws SQLException { + //true if the connection is valid, false otherwise + if (isClosed()) + return false; + if (timeout < 0) //SQLException - if the value supplied for timeout is less then 0 + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_INVALID_VARIABLE); + + ExecutorService executor = Executors.newCachedThreadPool(); + Future future = executor.submit(() -> { + int status; + try (Statement stmt = createStatement()) { + ResultSet resultSet = stmt.executeQuery("select server_status()"); + resultSet.next(); + status = resultSet.getInt("server_status()"); + resultSet.close(); + } + return status == 1 ? true : false; + }); + + boolean status = false; + try { + if (timeout == 0) + status = future.get(); + else + status = future.get(timeout, TimeUnit.SECONDS); + } catch (InterruptedException e) { + e.printStackTrace(); + } catch (ExecutionException e) { + e.printStackTrace(); + } catch (TimeoutException e) { + future.cancel(true); + status = false; + } finally { + executor.shutdownNow(); + } + return status; + } + + @Override + public void setClientInfo(String name, String value) throws SQLClientInfoException { + if (isClosed) + throw TSDBError.createSQLClientInfoException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); + + if (clientInfoProps == null) + clientInfoProps = new Properties(); + clientInfoProps.setProperty(name, value); + } + + @Override + public void setClientInfo(Properties properties) throws SQLClientInfoException { + if (isClosed) + throw TSDBError.createSQLClientInfoException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); + + for (Enumeration enumer = properties.keys(); enumer.hasMoreElements(); ) { + String name = (String) enumer.nextElement(); + clientInfoProps.put(name, properties.getProperty(name)); + } + } + + @Override + public String getClientInfo(String name) throws SQLException { + if (isClosed) + throw TSDBError.createSQLClientInfoException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); + + return clientInfoProps.getProperty(name); + } + + @Override + public Properties getClientInfo() throws SQLException { + if (isClosed) + throw TSDBError.createSQLClientInfoException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); + + return clientInfoProps; + } + + @Override + public Array createArrayOf(String typeName, Object[] elements) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); + + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public Struct createStruct(String typeName, Object[] attributes) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); + + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void setSchema(String schema) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); + //do nothing + } + + @Override + public String getSchema() throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); + + return null; + } + + @Override + public void abort(Executor executor) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); + + // do nothing + } + + @Override + public void setNetworkTimeout(Executor executor, int milliseconds) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); + if (milliseconds < 0) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_INVALID_VARIABLE); + + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public int getNetworkTimeout() throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); + + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } +} diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/AbstractDatabaseMetaData.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/AbstractDatabaseMetaData.java index 08414d05e9f8b03582ac1257e6c460c05522f57e..5dcaa77ebd4a15087785a6a9b642b85f160f5287 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/AbstractDatabaseMetaData.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/AbstractDatabaseMetaData.java @@ -4,7 +4,7 @@ import java.sql.*; import java.util.ArrayList; import java.util.List; -public abstract class AbstractDatabaseMetaData implements DatabaseMetaData, Wrapper { +public abstract class AbstractDatabaseMetaData extends WrapperImpl implements DatabaseMetaData { private final static String PRODUCT_NAME = "TDengine"; private final static String PRODUCT_VESION = "2.0.x.x"; @@ -981,9 +981,7 @@ public abstract class AbstractDatabaseMetaData implements DatabaseMetaData, Wrap return getEmptyResultSet(); } - public Connection getConnection() throws SQLException { - return null; - } + public abstract Connection getConnection() throws SQLException; public boolean supportsSavepoints() throws SQLException { return false; @@ -1067,6 +1065,7 @@ public abstract class AbstractDatabaseMetaData implements DatabaseMetaData, Wrap } public ResultSet getClientInfoProperties() throws SQLException { + //TODO: see https://docs.oracle.com/javase/8/docs/api/java/sql/Connection.html#setClientInfo-java.lang.String-java.lang.String- return getEmptyResultSet(); } @@ -1093,20 +1092,6 @@ public abstract class AbstractDatabaseMetaData implements DatabaseMetaData, Wrap return new EmptyResultSet(); } - @Override - public T unwrap(Class iface) throws SQLException { - try { - return iface.cast(this); - } catch (ClassCastException cce) { - throw new SQLException("Unable to unwrap to " + iface.toString()); - } - } - - @Override - public boolean isWrapperFor(Class iface) throws SQLException { - return iface.isInstance(this); - } - protected ResultSet getCatalogs(Connection conn) throws SQLException { try (Statement stmt = conn.createStatement()) { DatabaseMetaDataResultSet resultSet = new DatabaseMetaDataResultSet(); diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/AbstractTaosDriver.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/AbstractDriver.java similarity index 99% rename from src/connector/jdbc/src/main/java/com/taosdata/jdbc/AbstractTaosDriver.java rename to src/connector/jdbc/src/main/java/com/taosdata/jdbc/AbstractDriver.java index f864788bfffc8bdfefb0b91ec645a10ae8eec843..21bf8e7a932b0515d77c8f124eae2d0a4596b3b6 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/AbstractTaosDriver.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/AbstractDriver.java @@ -8,7 +8,7 @@ import java.util.List; import java.util.Properties; import java.util.StringTokenizer; -public abstract class AbstractTaosDriver implements Driver { +public abstract class AbstractDriver implements Driver { private static final String TAOS_CFG_FILENAME = "taos.cfg"; diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/AbstractResultSet.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/AbstractResultSet.java new file mode 100644 index 0000000000000000000000000000000000000000..14bd2929f17e344381510897528bc479c50a4d36 --- /dev/null +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/AbstractResultSet.java @@ -0,0 +1,1210 @@ +package com.taosdata.jdbc; + +import java.io.InputStream; +import java.io.Reader; +import java.math.BigDecimal; +import java.net.URL; +import java.sql.*; +import java.util.Calendar; +import java.util.Map; + +public abstract class AbstractResultSet extends WrapperImpl implements ResultSet { + private int fetchSize; + + @Override + public abstract boolean next() throws SQLException; + + @Override + public abstract void close() throws SQLException; + + @Override + public boolean wasNull() throws SQLException { + return false; + } + + @Override + public abstract String getString(int columnIndex) throws SQLException; + + @Override + public abstract boolean getBoolean(int columnIndex) throws SQLException; + + @Override + public byte getByte(int columnIndex) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESULTSET_CLOSED); + + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public abstract short getShort(int columnIndex) throws SQLException; + + @Override + public abstract int getInt(int columnIndex) throws SQLException; + + @Override + public abstract long getLong(int columnIndex) throws SQLException; + + @Override + public abstract float getFloat(int columnIndex) throws SQLException; + + @Override + public abstract double getDouble(int columnIndex) throws SQLException; + + @Override + public BigDecimal getBigDecimal(int columnIndex, int scale) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESULTSET_CLOSED); + + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public byte[] getBytes(int columnIndex) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESULTSET_CLOSED); + + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public Date getDate(int columnIndex) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESULTSET_CLOSED); + + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + + } + + @Override + public Time getTime(int columnIndex) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESULTSET_CLOSED); + + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public abstract Timestamp getTimestamp(int columnIndex) throws SQLException; + + @Override + public InputStream getAsciiStream(int columnIndex) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESULTSET_CLOSED); + + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + + } + + @Override + public InputStream getUnicodeStream(int columnIndex) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESULTSET_CLOSED); + + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public InputStream getBinaryStream(int columnIndex) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESULTSET_CLOSED); + + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public String getString(String columnLabel) throws SQLException { + return getString(findColumn(columnLabel)); + } + + @Override + public boolean getBoolean(String columnLabel) throws SQLException { + return getBoolean(findColumn(columnLabel)); + } + + @Override + public byte getByte(String columnLabel) throws SQLException { + return getByte(findColumn(columnLabel)); + } + + @Override + public short getShort(String columnLabel) throws SQLException { + return getShort(findColumn(columnLabel)); + } + + @Override + public int getInt(String columnLabel) throws SQLException { + return getInt(findColumn(columnLabel)); + } + + @Override + public long getLong(String columnLabel) throws SQLException { + return getLong(findColumn(columnLabel)); + } + + @Override + public float getFloat(String columnLabel) throws SQLException { + return getFloat(findColumn(columnLabel)); + } + + @Override + public double getDouble(String columnLabel) throws SQLException { + return getDouble(findColumn(columnLabel)); + } + + @Override + public BigDecimal getBigDecimal(String columnLabel, int scale) throws SQLException { + return getBigDecimal(findColumn(columnLabel)); + } + + @Override + public byte[] getBytes(String columnLabel) throws SQLException { + return getBytes(findColumn(columnLabel)); + } + + @Override + public Date getDate(String columnLabel) throws SQLException { + return getDate(findColumn(columnLabel)); + } + + @Override + public Time getTime(String columnLabel) throws SQLException { + return getTime(findColumn(columnLabel)); + } + + @Override + public Timestamp getTimestamp(String columnLabel) throws SQLException { + return getTimestamp(findColumn(columnLabel)); + } + + @Override + public InputStream getAsciiStream(String columnLabel) throws SQLException { + return getAsciiStream(findColumn(columnLabel)); + } + + @Override + public InputStream getUnicodeStream(String columnLabel) throws SQLException { + return getUnicodeStream(findColumn(columnLabel)); + } + + @Override + public InputStream getBinaryStream(String columnLabel) throws SQLException { + return getBinaryStream(findColumn(columnLabel)); + } + + @Override + public SQLWarning getWarnings() throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESULTSET_CLOSED); + + return null; + } + + @Override + public void clearWarnings() throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESULTSET_CLOSED); + + } + + @Override + public String getCursorName() throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESULTSET_CLOSED); + + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public abstract ResultSetMetaData getMetaData() throws SQLException; + + @Override + public Object getObject(int columnIndex) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESULTSET_CLOSED); + + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public Object getObject(String columnLabel) throws SQLException { + return getObject(findColumn(columnLabel)); + } + + @Override + public abstract int findColumn(String columnLabel) throws SQLException; + + @Override + public Reader getCharacterStream(int columnIndex) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESULTSET_CLOSED); + + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public Reader getCharacterStream(String columnLabel) throws SQLException { + return getCharacterStream(findColumn(columnLabel)); + } + + @Override + public BigDecimal getBigDecimal(int columnIndex) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESULTSET_CLOSED); + + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public BigDecimal getBigDecimal(String columnLabel) throws SQLException { + return getBigDecimal(findColumn(columnLabel)); + } + + @Override + public abstract boolean isBeforeFirst() throws SQLException; + + @Override + public abstract boolean isAfterLast() throws SQLException; + + @Override + public abstract boolean isFirst() throws SQLException; + + @Override + public abstract boolean isLast() throws SQLException; + + @Override + public abstract void beforeFirst() throws SQLException; + + @Override + public abstract void afterLast() throws SQLException; + + @Override + public abstract boolean first() throws SQLException; + + @Override + public abstract boolean last() throws SQLException; + + @Override + public abstract int getRow() throws SQLException; + + @Override + public abstract boolean absolute(int row) throws SQLException; + + @Override + public abstract boolean relative(int rows) throws SQLException; + + @Override + public abstract boolean previous() throws SQLException; + + @Override + public void setFetchDirection(int direction) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESULTSET_CLOSED); + //nothing to do + } + + @Override + public int getFetchDirection() throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESULTSET_CLOSED); + return ResultSet.FETCH_FORWARD; + } + + @Override + public void setFetchSize(int rows) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + if (rows < 0) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_INVALID_VARIABLE); + //nothing to do + this.fetchSize = rows; + } + + @Override + public int getFetchSize() throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + + return this.fetchSize; + } + + @Override + public int getType() throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + + return ResultSet.TYPE_FORWARD_ONLY; + } + + @Override + public int getConcurrency() throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + + return ResultSet.CONCUR_READ_ONLY; + } + + @Override + public boolean rowUpdated() throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public boolean rowInserted() throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public boolean rowDeleted() throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateNull(int columnIndex) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateBoolean(int columnIndex, boolean x) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateByte(int columnIndex, byte x) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateShort(int columnIndex, short x) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateInt(int columnIndex, int x) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateLong(int columnIndex, long x) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateFloat(int columnIndex, float x) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateDouble(int columnIndex, double x) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateBigDecimal(int columnIndex, BigDecimal x) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateString(int columnIndex, String x) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateBytes(int columnIndex, byte[] x) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateDate(int columnIndex, Date x) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateTime(int columnIndex, Time x) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateTimestamp(int columnIndex, Timestamp x) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateAsciiStream(int columnIndex, InputStream x, int length) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateBinaryStream(int columnIndex, InputStream x, int length) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateCharacterStream(int columnIndex, Reader x, int length) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateObject(int columnIndex, Object x, int scaleOrLength) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateObject(int columnIndex, Object x) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateNull(String columnLabel) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateBoolean(String columnLabel, boolean x) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateByte(String columnLabel, byte x) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateShort(String columnLabel, short x) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateInt(String columnLabel, int x) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateLong(String columnLabel, long x) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateFloat(String columnLabel, float x) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateDouble(String columnLabel, double x) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateBigDecimal(String columnLabel, BigDecimal x) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateString(String columnLabel, String x) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateBytes(String columnLabel, byte[] x) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateDate(String columnLabel, Date x) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateTime(String columnLabel, Time x) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateTimestamp(String columnLabel, Timestamp x) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateAsciiStream(String columnLabel, InputStream x, int length) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateBinaryStream(String columnLabel, InputStream x, int length) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateCharacterStream(String columnLabel, Reader reader, int length) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateObject(String columnLabel, Object x, int scaleOrLength) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateObject(String columnLabel, Object x) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void insertRow() throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateRow() throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void deleteRow() throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void refreshRow() throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void cancelRowUpdates() throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void moveToInsertRow() throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void moveToCurrentRow() throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public abstract Statement getStatement() throws SQLException; + + @Override + public Object getObject(int columnIndex, Map> map) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public Ref getRef(int columnIndex) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public Blob getBlob(int columnIndex) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public Clob getClob(int columnIndex) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public Array getArray(int columnIndex) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public Object getObject(String columnLabel, Map> map) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public Ref getRef(String columnLabel) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public Blob getBlob(String columnLabel) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public Clob getClob(String columnLabel) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public Array getArray(String columnLabel) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public Date getDate(int columnIndex, Calendar cal) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public Date getDate(String columnLabel, Calendar cal) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public Time getTime(int columnIndex, Calendar cal) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public Time getTime(String columnLabel, Calendar cal) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public Timestamp getTimestamp(int columnIndex, Calendar cal) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public Timestamp getTimestamp(String columnLabel, Calendar cal) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public URL getURL(int columnIndex) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public URL getURL(String columnLabel) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateRef(int columnIndex, Ref x) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateRef(String columnLabel, Ref x) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateBlob(int columnIndex, Blob x) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateBlob(String columnLabel, Blob x) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateClob(int columnIndex, Clob x) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateClob(String columnLabel, Clob x) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateArray(int columnIndex, Array x) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateArray(String columnLabel, Array x) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public RowId getRowId(int columnIndex) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public RowId getRowId(String columnLabel) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateRowId(int columnIndex, RowId x) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateRowId(String columnLabel, RowId x) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public int getHoldability() throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + return ResultSet.HOLD_CURSORS_OVER_COMMIT; + } + + @Override + public abstract boolean isClosed() throws SQLException; + + @Override + public void updateNString(int columnIndex, String nString) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateNString(String columnLabel, String nString) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateNClob(int columnIndex, NClob nClob) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateNClob(String columnLabel, NClob nClob) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public NClob getNClob(int columnIndex) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public NClob getNClob(String columnLabel) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public SQLXML getSQLXML(int columnIndex) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public SQLXML getSQLXML(String columnLabel) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateSQLXML(int columnIndex, SQLXML xmlObject) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateSQLXML(String columnLabel, SQLXML xmlObject) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public abstract String getNString(int columnIndex) throws SQLException; + + @Override + public String getNString(String columnLabel) throws SQLException { + return getNString(findColumn(columnLabel)); + } + + @Override + public Reader getNCharacterStream(int columnIndex) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public Reader getNCharacterStream(String columnLabel) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateNCharacterStream(int columnIndex, Reader x, long length) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateNCharacterStream(String columnLabel, Reader reader, long length) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateAsciiStream(int columnIndex, InputStream x, long length) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateBinaryStream(int columnIndex, InputStream x, long length) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateCharacterStream(int columnIndex, Reader x, long length) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateAsciiStream(String columnLabel, InputStream x, long length) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateBinaryStream(String columnLabel, InputStream x, long length) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateCharacterStream(String columnLabel, Reader reader, long length) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateBlob(int columnIndex, InputStream inputStream, long length) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateBlob(String columnLabel, InputStream inputStream, long length) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateClob(int columnIndex, Reader reader, long length) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateClob(String columnLabel, Reader reader, long length) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateNClob(int columnIndex, Reader reader, long length) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateNClob(String columnLabel, Reader reader, long length) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateNCharacterStream(int columnIndex, Reader x) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateNCharacterStream(String columnLabel, Reader reader) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateAsciiStream(int columnIndex, InputStream x) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateBinaryStream(int columnIndex, InputStream x) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateCharacterStream(int columnIndex, Reader x) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateAsciiStream(String columnLabel, InputStream x) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateBinaryStream(String columnLabel, InputStream x) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateCharacterStream(String columnLabel, Reader reader) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateBlob(int columnIndex, InputStream inputStream) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateBlob(String columnLabel, InputStream inputStream) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateClob(int columnIndex, Reader reader) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateClob(String columnLabel, Reader reader) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateNClob(int columnIndex, Reader reader) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public void updateNClob(String columnLabel, Reader reader) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public T getObject(int columnIndex, Class type) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public T getObject(String columnLabel, Class type) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); } + +} diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/AbstractStatement.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/AbstractStatement.java new file mode 100644 index 0000000000000000000000000000000000000000..aac97c530dbce21d3e7797933f07bf25a0fc1a85 --- /dev/null +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/AbstractStatement.java @@ -0,0 +1,249 @@ +package com.taosdata.jdbc; + +import java.sql.*; + +public abstract class AbstractStatement extends WrapperImpl implements Statement { + + private volatile boolean closeOnCompletion; + private int fetchSize; + + @Override + public abstract ResultSet executeQuery(String sql) throws SQLException; + + @Override + public abstract int executeUpdate(String sql) throws SQLException; + + @Override + public abstract void close() throws SQLException; + + @Override + public int getMaxFieldSize() throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + return TSDBConstants.maxFieldSize; + } + + @Override + public void setMaxFieldSize(int max) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + if (max < 0) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_INVALID_VARIABLE); + // nothing to do + } + + @Override + public int getMaxRows() throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + return 0; + } + + @Override + public void setMaxRows(int max) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + if (max < 0) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_INVALID_VARIABLE); + // nothing to do + } + + @Override + public void setEscapeProcessing(boolean enable) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + } + + @Override + public int getQueryTimeout() throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + return 0; + } + + @Override + public void setQueryTimeout(int seconds) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + if (seconds < 0) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_INVALID_VARIABLE); + } + + @Override + public void cancel() throws SQLException { + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public SQLWarning getWarnings() throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + return null; + } + + @Override + public void clearWarnings() throws SQLException { + // nothing to do + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + } + + @Override + public void setCursorName(String name) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public abstract boolean execute(String sql) throws SQLException; + + @Override + public abstract ResultSet getResultSet() throws SQLException; + + @Override + public abstract int getUpdateCount() throws SQLException; + + @Override + public boolean getMoreResults() throws SQLException { + return getMoreResults(CLOSE_CURRENT_RESULT); + } + + @Override + public void setFetchDirection(int direction) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + //nothing to do + } + + @Override + public int getFetchDirection() throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + return ResultSet.FETCH_FORWARD; + } + + @Override + public void setFetchSize(int rows) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + if (rows < 0) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_INVALID_VARIABLE); + //nothing to do + this.fetchSize = rows; + } + + @Override + public int getFetchSize() throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + return this.fetchSize; + } + + @Override + public int getResultSetConcurrency() throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + return ResultSet.CONCUR_READ_ONLY; + } + + @Override + public int getResultSetType() throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + return ResultSet.TYPE_FORWARD_ONLY; + } + + @Override + public abstract void addBatch(String sql) throws SQLException; + + @Override + public abstract void clearBatch() throws SQLException; + + @Override + public abstract int[] executeBatch() throws SQLException; + + @Override + public abstract Connection getConnection() throws SQLException; + + @Override + public boolean getMoreResults(int current) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + return false; + } + + @Override + public ResultSet getGeneratedKeys() throws SQLException { + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); + } + + @Override + public int executeUpdate(String sql, int autoGeneratedKeys) throws SQLException { + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); + } + + @Override + public int executeUpdate(String sql, int[] columnIndexes) throws SQLException { + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); + } + + @Override + public int executeUpdate(String sql, String[] columnNames) throws SQLException { + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); + } + + @Override + public boolean execute(String sql, int autoGeneratedKeys) throws SQLException { + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); + } + + @Override + public boolean execute(String sql, int[] columnIndexes) throws SQLException { + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); + } + + @Override + public boolean execute(String sql, String[] columnNames) throws SQLException { + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); + } + + @Override + public int getResultSetHoldability() throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + return ResultSet.HOLD_CURSORS_OVER_COMMIT; + } + + @Override + public abstract boolean isClosed() throws SQLException; + + @Override + public void setPoolable(boolean poolable) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + //nothing to do + } + + @Override + public boolean isPoolable() throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + return false; + } + + @Override + public void closeOnCompletion() throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + this.closeOnCompletion = true; + } + + @Override + public boolean isCloseOnCompletion() throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + return this.closeOnCompletion; + } + +} diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/DatabaseMetaDataResultSet.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/DatabaseMetaDataResultSet.java index f82c064e751c195eb6327580c285a815346c917b..499c656c9d7914901062044324f80fcf74a42b22 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/DatabaseMetaDataResultSet.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/DatabaseMetaDataResultSet.java @@ -160,12 +160,12 @@ public class DatabaseMetaDataResultSet implements ResultSet { @Override public Date getDate(int columnIndex) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override public Time getTime(int columnIndex) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override @@ -176,17 +176,17 @@ public class DatabaseMetaDataResultSet implements ResultSet { @Override public InputStream getAsciiStream(int columnIndex) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override public InputStream getUnicodeStream(int columnIndex) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override public InputStream getBinaryStream(int columnIndex) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override @@ -256,22 +256,22 @@ public class DatabaseMetaDataResultSet implements ResultSet { @Override public InputStream getAsciiStream(String columnLabel) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override public InputStream getUnicodeStream(String columnLabel) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override public InputStream getBinaryStream(String columnLabel) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override public SQLWarning getWarnings() throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override @@ -281,7 +281,7 @@ public class DatabaseMetaDataResultSet implements ResultSet { @Override public String getCursorName() throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override @@ -313,12 +313,12 @@ public class DatabaseMetaDataResultSet implements ResultSet { @Override public Reader getCharacterStream(int columnIndex) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override public Reader getCharacterStream(String columnLabel) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override @@ -353,22 +353,22 @@ public class DatabaseMetaDataResultSet implements ResultSet { @Override public void beforeFirst() throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override public void afterLast() throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override public boolean first() throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override public boolean last() throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override @@ -383,17 +383,17 @@ public class DatabaseMetaDataResultSet implements ResultSet { @Override public boolean absolute(int row) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override public boolean relative(int rows) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override public boolean previous() throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override @@ -443,227 +443,227 @@ public class DatabaseMetaDataResultSet implements ResultSet { @Override public void updateNull(int columnIndex) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override public void updateBoolean(int columnIndex, boolean x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override public void updateByte(int columnIndex, byte x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override public void updateShort(int columnIndex, short x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override public void updateInt(int columnIndex, int x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override public void updateLong(int columnIndex, long x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override public void updateFloat(int columnIndex, float x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override public void updateDouble(int columnIndex, double x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override public void updateBigDecimal(int columnIndex, BigDecimal x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override public void updateString(int columnIndex, String x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override public void updateBytes(int columnIndex, byte[] x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override public void updateDate(int columnIndex, Date x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override public void updateTime(int columnIndex, Time x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override public void updateTimestamp(int columnIndex, Timestamp x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override public void updateAsciiStream(int columnIndex, InputStream x, int length) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override public void updateBinaryStream(int columnIndex, InputStream x, int length) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override public void updateCharacterStream(int columnIndex, Reader x, int length) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override public void updateObject(int columnIndex, Object x, int scaleOrLength) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override public void updateObject(int columnIndex, Object x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override public void updateNull(String columnLabel) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override public void updateBoolean(String columnLabel, boolean x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override public void updateByte(String columnLabel, byte x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override public void updateShort(String columnLabel, short x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override public void updateInt(String columnLabel, int x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override public void updateLong(String columnLabel, long x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override public void updateFloat(String columnLabel, float x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override public void updateDouble(String columnLabel, double x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override public void updateBigDecimal(String columnLabel, BigDecimal x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override public void updateString(String columnLabel, String x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override public void updateBytes(String columnLabel, byte[] x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override public void updateDate(String columnLabel, Date x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override public void updateTime(String columnLabel, Time x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override public void updateTimestamp(String columnLabel, Timestamp x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override public void updateAsciiStream(String columnLabel, InputStream x, int length) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override public void updateBinaryStream(String columnLabel, InputStream x, int length) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override public void updateCharacterStream(String columnLabel, Reader reader, int length) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override public void updateObject(String columnLabel, Object x, int scaleOrLength) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override public void updateObject(String columnLabel, Object x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override public void insertRow() throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override public void updateRow() throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override public void deleteRow() throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override public void refreshRow() throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override public void cancelRowUpdates() throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override public void moveToInsertRow() throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override public void moveToCurrentRow() throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override @@ -673,12 +673,12 @@ public class DatabaseMetaDataResultSet implements ResultSet { @Override public Object getObject(int columnIndex, Map> map) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override public Ref getRef(int columnIndex) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override @@ -1043,12 +1043,12 @@ public class DatabaseMetaDataResultSet implements ResultSet { @Override public T getObject(int columnIndex, Class type) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override public T getObject(String columnLabel, Class type) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBConnection.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBConnection.java index 547fe6a9e9900981551b138c204d0f24d6ef152b..8d947b9411eb91eded49b3c7b1f12586682346ff 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBConnection.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBConnection.java @@ -14,41 +14,25 @@ *****************************************************************************/ package com.taosdata.jdbc; -import java.sql.Array; -import java.sql.Blob; -import java.sql.CallableStatement; -import java.sql.Clob; -import java.sql.Connection; -import java.sql.DatabaseMetaData; -import java.sql.NClob; -import java.sql.PreparedStatement; -import java.sql.ResultSet; -import java.sql.SQLClientInfoException; -import java.sql.SQLException; -import java.sql.SQLWarning; -import java.sql.SQLXML; -import java.sql.Savepoint; -import java.sql.Statement; -import java.sql.Struct; -import java.util.*; -import java.util.concurrent.Executor; +import java.sql.*; +import java.util.Properties; -public class TSDBConnection implements Connection { +public class TSDBConnection extends AbstractConnection { - private TSDBJNIConnector connector = null; + private TSDBJNIConnector connector; + private TSDBDatabaseMetaData databaseMetaData; + private boolean batchFetch; - private String catalog = null; - - private TSDBDatabaseMetaData dbMetaData; - - private Properties clientInfoProps = new Properties(); - - private int timeoutMilliseconds = 0; + public Boolean getBatchFetch() { + return this.batchFetch; + } - private boolean batchFetch = false; + public void setBatchFetch(Boolean batchFetch) { + this.batchFetch = batchFetch; + } public TSDBConnection(Properties info, TSDBDatabaseMetaData meta) throws SQLException { - this.dbMetaData = meta; + this.databaseMetaData = meta; connect(info.getProperty(TSDBDriver.PROPERTY_KEY_HOST), Integer.parseInt(info.getProperty(TSDBDriver.PROPERTY_KEY_PORT, "0")), info.getProperty(TSDBDriver.PROPERTY_KEY_DBNAME), @@ -64,8 +48,8 @@ public class TSDBConnection implements Connection { private void connect(String host, int port, String dbName, String user, String password) throws SQLException { this.connector = new TSDBJNIConnector(); this.connector.connect(host, port, dbName, user, password); - this.setCatalog(dbName); - this.dbMetaData.setConnection(this); + this.catalog = dbName; + this.databaseMetaData.setConnection(this); } public TSDBJNIConnector getConnection() { @@ -102,52 +86,11 @@ public class TSDBConnection implements Connection { return new TSDBPreparedStatement(this, this.connector, sql); } - public CallableStatement prepareCall(String sql) throws SQLException { - if (isClosed()) { - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); - } - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); - } - - public String nativeSQL(String sql) throws SQLException { - if (isClosed()) { - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); - } - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); - } - - public void setAutoCommit(boolean autoCommit) throws SQLException { - if (isClosed()) { - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); - } - - } - - public boolean getAutoCommit() throws SQLException { - if (isClosed()) { - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); - } - - return true; - } - - public void commit() throws SQLException { - if (isClosed()) { - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); - } - } - - public void rollback() throws SQLException { - if (isClosed()) { - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); - } - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); - } - public void close() throws SQLException { if (isClosed()) { throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); } + this.isClosed = true; this.connector.closeConnection(); } @@ -155,105 +98,11 @@ public class TSDBConnection implements Connection { return this.connector != null && this.connector.isClosed(); } - /** - * A connection's database is able to provide information describing its tables, - * its supported SQL grammar, its stored procedures, the capabilities of this - * connection, etc. This information is made available through a - * DatabaseMetaData object. - * - * @return a DatabaseMetaData object for this connection - * @throws SQLException if a database access error occurs - */ public DatabaseMetaData getMetaData() throws SQLException { if (isClosed()) { throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); } - return this.dbMetaData; - } - - /** - * This readOnly option is not supported by TDengine. However, the method is intentionally left blank here to - * support HikariCP connection. - * - * @param readOnly - * @throws SQLException - */ - public void setReadOnly(boolean readOnly) throws SQLException { - if (isClosed()) { - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); - } - } - - public boolean isReadOnly() throws SQLException { - if (isClosed()) { - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); - } - return true; - } - - public void setCatalog(String catalog) throws SQLException { - if (isClosed()) { - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); - } - this.catalog = catalog; - } - - public String getCatalog() throws SQLException { - if (isClosed()) { - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); - } - return this.catalog; - } - - /** - * The transaction isolation level option is not supported by TDengine. - * This method is intentionally left empty to support HikariCP connection. - * - * @param level - * @throws SQLException - */ - public void setTransactionIsolation(int level) throws SQLException { - if (isClosed()) { - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); - } - switch (level) { - case Connection.TRANSACTION_NONE: - case Connection.TRANSACTION_READ_COMMITTED: - case Connection.TRANSACTION_READ_UNCOMMITTED: - case Connection.TRANSACTION_REPEATABLE_READ: - case Connection.TRANSACTION_SERIALIZABLE: - break; - default: - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_INVALID_VARIABLE); - } - } - - /** - * The transaction isolation level option is not supported by TDengine. - * - * @return - * @throws SQLException - */ - public int getTransactionIsolation() throws SQLException { - if (isClosed()) { - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); - } - return Connection.TRANSACTION_NONE; - } - - public SQLWarning getWarnings() throws SQLException { - if (isClosed()) { - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); - } - //todo: implement getWarnings according to the warning messages returned from TDengine - return null; - } - - public void clearWarnings() throws SQLException { - if (isClosed()) { - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); - } - //todo: implement clearWarnings according to the warning messages returned from TDengine + return this.databaseMetaData; } public Statement createStatement(int resultSetType, int resultSetConcurrency) throws SQLException { @@ -263,253 +112,4 @@ public class TSDBConnection implements Connection { throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); } - public PreparedStatement prepareStatement(String sql, int resultSetType, int resultSetConcurrency) - throws SQLException { - // This method is implemented in the current way to support Spark - if (resultSetType != ResultSet.TYPE_FORWARD_ONLY) { - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_INVALID_VARIABLE); - } - - if (resultSetConcurrency != ResultSet.CONCUR_READ_ONLY) { - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_INVALID_VARIABLE); - } - - return this.prepareStatement(sql); - } - - public Boolean getBatchFetch() { - return this.batchFetch; - } - - public void setBatchFetch(Boolean batchFetch) { - this.batchFetch = batchFetch; - } - - public CallableStatement prepareCall(String sql, int resultSetType, int resultSetConcurrency) throws SQLException { - if (isClosed()) { - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); - } - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); - } - - public Map> getTypeMap() throws SQLException { - if (isClosed()) { - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); - } - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); - } - - public void setTypeMap(Map> map) throws SQLException { - if (isClosed()) { - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); - } - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); - } - - public void setHoldability(int holdability) throws SQLException { - if (isClosed()) { - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); - } - } - - /** - * the transaction is not supported by TDengine, so the opened ResultSet Objects will remain open - * - * @return - * @throws SQLException - */ - public int getHoldability() throws SQLException { - if (isClosed()) { - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); - } - return ResultSet.HOLD_CURSORS_OVER_COMMIT; - } - - public Savepoint setSavepoint() throws SQLException { - if (isClosed()) { - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); - } - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); - } - - public Savepoint setSavepoint(String name) throws SQLException { - if (isClosed()) { - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); - } - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); - } - - public void rollback(Savepoint savepoint) throws SQLException { - if (isClosed()) { - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); - } - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); - } - - public void releaseSavepoint(Savepoint savepoint) throws SQLException { - if (isClosed()) { - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); - } - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); - } - - public Statement createStatement(int resultSetType, int resultSetConcurrency, int resultSetHoldability) - throws SQLException { - if (isClosed()) { - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); - } - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); - } - - public PreparedStatement prepareStatement(String sql, int resultSetType, int resultSetConcurrency, - int resultSetHoldability) throws SQLException { - if (isClosed()) { - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); - } - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); - } - - public CallableStatement prepareCall(String sql, int resultSetType, int resultSetConcurrency, - int resultSetHoldability) throws SQLException { - if (isClosed()) { - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); - } - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); - } - - public PreparedStatement prepareStatement(String sql, int autoGeneratedKeys) throws SQLException { - if (isClosed()) { - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); - } - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); - } - - public PreparedStatement prepareStatement(String sql, int[] columnIndexes) throws SQLException { - if (isClosed()) { - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); - } - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); - } - - public PreparedStatement prepareStatement(String sql, String[] columnNames) throws SQLException { - if (isClosed()) { - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); - } - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); - } - - public Clob createClob() throws SQLException { - if (isClosed()) { - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); - } - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); - } - - public Blob createBlob() throws SQLException { - if (isClosed()) { - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); - } - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); - } - - public NClob createNClob() throws SQLException { - if (isClosed()) { - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); - } - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); - } - - public SQLXML createSQLXML() throws SQLException { - if (isClosed()) { - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); - } - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); - } - - public boolean isValid(int timeout) throws SQLException { - return !this.isClosed(); - } - - public void setClientInfo(String name, String value) throws SQLClientInfoException { - clientInfoProps.setProperty(name, value); - } - - public void setClientInfo(Properties properties) throws SQLClientInfoException { - for (Enumeration enumer = properties.keys(); enumer.hasMoreElements(); ) { - String name = (String) enumer.nextElement(); - clientInfoProps.put(name, properties.getProperty(name)); - } - } - - public String getClientInfo(String name) throws SQLException { - if (isClosed()) { - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); - } - return clientInfoProps.getProperty(name); - } - - public Properties getClientInfo() throws SQLException { - if (isClosed()) { - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); - } - return clientInfoProps; - } - - public Array createArrayOf(String typeName, Object[] elements) throws SQLException { - if (isClosed()) { - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); - } - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); - } - - public Struct createStruct(String typeName, Object[] attributes) throws SQLException { - if (isClosed()) { - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); - } - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); - } - - public void setSchema(String schema) throws SQLException { - if (isClosed()) { - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); - } - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); - } - - public String getSchema() throws SQLException { - if (isClosed()) { - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); - } - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); - } - - public void abort(Executor executor) throws SQLException { - if (isClosed()) { - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); - } - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); - } - - public void setNetworkTimeout(Executor executor, int milliseconds) throws SQLException { - this.timeoutMilliseconds = milliseconds; - } - - public int getNetworkTimeout() throws SQLException { - if (isClosed()) { - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); - } - return this.timeoutMilliseconds; - } - - public T unwrap(Class iface) throws SQLException { - try { - return iface.cast(this); - } catch (ClassCastException cce) { - throw new SQLException("Unable to unwrap to " + iface.toString()); - } - } - - public boolean isWrapperFor(Class iface) throws SQLException { - return iface.isInstance(this); - } } diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBConstants.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBConstants.java index 0cf33692b05ab5e19e198463dc420b8a07c637a5..043db9bbd75ffeaa56de3fb5439231849a824662 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBConstants.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBConstants.java @@ -20,7 +20,7 @@ import java.util.Map; public abstract class TSDBConstants { public static final String STATEMENT_CLOSED = "statement is closed"; - public static final String UNSUPPORT_METHOD_EXCEPTIONZ_MSG = "this operation is NOT supported currently!"; + public static final String UNSUPPORTED_METHOD_EXCEPTION_MSG = "this operation is NOT supported currently!"; public static final String INVALID_VARIABLES = "invalid variables"; public static final String RESULT_SET_IS_CLOSED = "resultSet is closed"; @@ -36,6 +36,7 @@ public abstract class TSDBConstants { public static final int JNI_NUM_OF_FIELDS_0 = -4; public static final int JNI_SQL_NULL = -5; public static final int JNI_FETCH_END = -6; + public static final int JNI_OUT_OF_MEMORY = -7; public static final int TSDB_DATA_TYPE_NULL = 0; public static final int TSDB_DATA_TYPE_BOOL = 1; diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBDatabaseMetaData.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBDatabaseMetaData.java index d1f1e77b1c1e325e04c018a23d5589b7501f4919..8b7ede148e89cce0d8db22e62627bd1e1c49f9bb 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBDatabaseMetaData.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBDatabaseMetaData.java @@ -57,39 +57,38 @@ public class TSDBDatabaseMetaData extends AbstractDatabaseMetaData { */ public ResultSet getTables(String catalog, String schemaPattern, String tableNamePattern, String[] types) throws SQLException { if (conn == null || conn.isClosed()) { - throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_CONNECTION_NULL)); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); } return super.getTables(catalog, schemaPattern, tableNamePattern, types, conn); } - public ResultSet getCatalogs() throws SQLException { if (conn == null || conn.isClosed()) - throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_CONNECTION_NULL)); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); return super.getCatalogs(conn); } public ResultSet getTableTypes() throws SQLException { if (conn == null || conn.isClosed()) - throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_CONNECTION_NULL)); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); return super.getTableTypes(); } public ResultSet getColumns(String catalog, String schemaPattern, String tableNamePattern, String columnNamePattern) throws SQLException { if (conn == null || conn.isClosed()) - throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_CONNECTION_NULL)); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); return super.getColumns(catalog, schemaPattern, tableNamePattern, columnNamePattern, conn); } public ResultSet getPrimaryKeys(String catalog, String schema, String table) throws SQLException { if (conn == null || conn.isClosed()) - throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_CONNECTION_NULL)); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); return super.getPrimaryKeys(catalog, schema, table, conn); } public ResultSet getSuperTables(String catalog, String schemaPattern, String tableNamePattern) throws SQLException { if (conn == null || conn.isClosed()) - throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_CONNECTION_NULL)); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); return super.getSuperTables(catalog, schemaPattern, tableNamePattern, conn); } diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBDriver.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBDriver.java index c171ca2a36f78b6899cafd6348b3ebc3407d1b2a..2b87b72fef0f2b621536c5a11aba69975aa86434 100755 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBDriver.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBDriver.java @@ -37,7 +37,7 @@ import java.util.logging.Logger; * register it with the DriverManager. This means that a user can load and * register a driver by doing Class.forName("foo.bah.Driver") */ -public class TSDBDriver extends AbstractTaosDriver { +public class TSDBDriver extends AbstractDriver { @Deprecated private static final String URL_PREFIX1 = "jdbc:TSDB://"; @@ -90,7 +90,7 @@ public class TSDBDriver extends AbstractTaosDriver { * fetch data from native function in a batch model */ public static final String PROPERTY_KEY_BATCH_LOAD = "batchfetch"; - + private TSDBDatabaseMetaData dbMetaData = null; static { @@ -179,18 +179,18 @@ public class TSDBDriver extends AbstractTaosDriver { while (queryParams.hasMoreElements()) { String oneToken = queryParams.nextToken(); String[] pair = oneToken.split("="); - + if ((pair[0] != null && pair[0].trim().length() > 0) && (pair[1] != null && pair[1].trim().length() > 0)) { urlProps.setProperty(pair[0].trim(), pair[1].trim()); } } } - + // parse Product Name String dbProductName = url.substring(0, beginningOfSlashes); dbProductName = dbProductName.substring(dbProductName.indexOf(":") + 1); dbProductName = dbProductName.substring(0, dbProductName.indexOf(":")); - + // parse database name url = url.substring(beginningOfSlashes + 2); int indexOfSlash = url.indexOf("/"); @@ -200,7 +200,7 @@ public class TSDBDriver extends AbstractTaosDriver { } url = url.substring(0, indexOfSlash); } - + // parse port int indexOfColon = url.indexOf(":"); if (indexOfColon != -1) { @@ -209,11 +209,11 @@ public class TSDBDriver extends AbstractTaosDriver { } url = url.substring(0, indexOfColon); } - + if (url != null && url.length() > 0 && url.trim().length() > 0) { urlProps.setProperty(TSDBDriver.PROPERTY_KEY_HOST, url); } - + this.dbMetaData = new TSDBDatabaseMetaData(urlForMeta, urlProps.getProperty(TSDBDriver.PROPERTY_KEY_USER)); return urlProps; } diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBError.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBError.java index ede0b4e4e847e758d6f4e335d4077d39096e7afc..ce1fcaae5a63790b6277f26e121a40cb91de7af4 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBError.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBError.java @@ -1,6 +1,8 @@ package com.taosdata.jdbc; +import java.sql.SQLClientInfoException; import java.sql.SQLException; +import java.sql.SQLFeatureNotSupportedException; import java.util.HashMap; import java.util.Map; @@ -13,8 +15,27 @@ public class TSDBError { TSDBErrorMap.put(TSDBErrorNumbers.ERROR_INVALID_VARIABLE, "invalid variables"); TSDBErrorMap.put(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED, "statement is closed"); TSDBErrorMap.put(TSDBErrorNumbers.ERROR_RESULTSET_CLOSED, "resultSet is closed"); + TSDBErrorMap.put(TSDBErrorNumbers.ERROR_BATCH_IS_EMPTY, "Batch is empty!"); + TSDBErrorMap.put(TSDBErrorNumbers.ERROR_INVALID_WITH_EXECUTEQUERY, "Can not issue data manipulation statements with executeQuery()"); + TSDBErrorMap.put(TSDBErrorNumbers.ERROR_INVALID_WITH_EXECUTEUPDATE, "Can not issue SELECT via executeUpdate()"); + TSDBErrorMap.put(TSDBErrorNumbers.ERROR_INVALID_FOR_EXECUTE_QUERY, "not a valid sql for executeQuery: (?)"); + TSDBErrorMap.put(TSDBErrorNumbers.ERROR_DATABASE_NOT_SPECIFIED_OR_AVAILABLE, "Database not specified or available"); + TSDBErrorMap.put(TSDBErrorNumbers.ERROR_INVALID_FOR_EXECUTE_UPDATE, "not a valid sql for executeUpdate: (?)"); + TSDBErrorMap.put(TSDBErrorNumbers.ERROR_INVALID_FOR_EXECUTE, "not a valid sql for execute: (?)"); + + /**************************************************/ + TSDBErrorMap.put(TSDBErrorNumbers.ERROR_UNKNOWN, "unknown error"); /**************************************************/ TSDBErrorMap.put(TSDBErrorNumbers.ERROR_SUBSCRIBE_FAILED, "failed to create subscription"); + TSDBErrorMap.put(TSDBErrorNumbers.ERROR_UNSUPPORTED_ENCODING, "Unsupported encoding"); + + TSDBErrorMap.put(TSDBErrorNumbers.ERROR_JNI_TDENGINE_ERROR, "internal error of database!"); + TSDBErrorMap.put(TSDBErrorNumbers.ERROR_JNI_CONNECTION_NULL, "JNI connection already closed!"); + TSDBErrorMap.put(TSDBErrorNumbers.ERROR_JNI_RESULT_SET_NULL, "invalid JNI result set!"); + TSDBErrorMap.put(TSDBErrorNumbers.ERROR_JNI_NUM_OF_FIELDS_0, "invalid num of fields!"); + TSDBErrorMap.put(TSDBErrorNumbers.ERROR_JNI_SQL_NULL, "empty sql string!"); + TSDBErrorMap.put(TSDBErrorNumbers.ERROR_JNI_FETCH_END, "fetch to the end of resultset"); + TSDBErrorMap.put(TSDBErrorNumbers.ERROR_JNI_OUT_OF_MEMORY, "JNI alloc memory failed!"); } public static String wrapErrMsg(String msg) { @@ -22,10 +43,28 @@ public class TSDBError { } public static SQLException createSQLException(int errorNumber) { - // JDBC exception code is less than 0x2350 - if (errorNumber <= 0x2350) - return new SQLException(TSDBErrorMap.get(errorNumber)); - // JNI exception code is - return new SQLException(wrapErrMsg(TSDBErrorMap.get(errorNumber))); + return createSQLException(errorNumber, null); + } + + public static SQLException createSQLException(int errorNumber, String message) { + if (message == null || message.isEmpty()) { + if (TSDBErrorNumbers.contains(errorNumber)) + message = TSDBErrorMap.get(errorNumber); + else + message = TSDBErrorMap.get(TSDBErrorNumbers.ERROR_UNKNOWN); + } + + if (errorNumber == TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD) + return new SQLFeatureNotSupportedException(message); + + if (errorNumber < TSDBErrorNumbers.ERROR_UNKNOWN) + // JDBC exception's error number is less than 0x2350 + return new SQLException("ERROR (" + Integer.toHexString(errorNumber) + "): " + message); + // JNI exception's error number is large than 0x2350 + return new SQLException("TDengine ERROR (" + Integer.toHexString(errorNumber) + "): " + message); + } + + public static SQLClientInfoException createSQLClientInfoException(int errorNumber) { + return new SQLClientInfoException(); } } diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBErrorNumbers.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBErrorNumbers.java index 74dbb8ab9af0a19a2d969e9fefc0c863f444a77b..9a4effb8eeb2934b9fec50c2572b04b77564915a 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBErrorNumbers.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBErrorNumbers.java @@ -1,5 +1,7 @@ package com.taosdata.jdbc; +import java.util.HashSet; + public class TSDBErrorNumbers { public static final int ERROR_CONNECTION_CLOSED = 0x2301; // connection already closed @@ -7,9 +9,61 @@ public class TSDBErrorNumbers { public static final int ERROR_INVALID_VARIABLE = 0x2303; //invalid variables public static final int ERROR_STATEMENT_CLOSED = 0x2304; //statement already closed public static final int ERROR_RESULTSET_CLOSED = 0x2305; //resultSet is closed + public static final int ERROR_BATCH_IS_EMPTY = 0x2306; //Batch is empty! + public static final int ERROR_INVALID_WITH_EXECUTEQUERY = 0x2307; //Can not issue data manipulation statements with executeQuery() + public static final int ERROR_INVALID_WITH_EXECUTEUPDATE = 0x2308; //Can not issue SELECT via executeUpdate() + public static final int ERROR_INVALID_FOR_EXECUTE_QUERY = 0x2309; //not a valid sql for executeQuery: (SQL) + public static final int ERROR_DATABASE_NOT_SPECIFIED_OR_AVAILABLE = 0x2310; //Database not specified or available + public static final int ERROR_INVALID_FOR_EXECUTE_UPDATE = 0x2311; //not a valid sql for executeUpdate: (SQL) + public static final int ERROR_INVALID_FOR_EXECUTE = 0x2312; //not a valid sql for execute: (SQL) + + public static final int ERROR_UNKNOWN = 0x2350; //unknown error + + public static final int ERROR_SUBSCRIBE_FAILED = 0x2351; //failed to create subscription + public static final int ERROR_UNSUPPORTED_ENCODING = 0x2352; //Unsupported encoding + + public static final int ERROR_JNI_TDENGINE_ERROR = 0x2353; + public static final int ERROR_JNI_CONNECTION_NULL = 0x2354; //invalid tdengine connection! + public static final int ERROR_JNI_RESULT_SET_NULL = 0x2355; + public static final int ERROR_JNI_NUM_OF_FIELDS_0 = 0x2356; + public static final int ERROR_JNI_SQL_NULL = 0x2357; + public static final int ERROR_JNI_FETCH_END = 0x2358; + public static final int ERROR_JNI_OUT_OF_MEMORY = 0x2359; + + private static final HashSet errorNumbers; + + static { + errorNumbers = new HashSet(); + errorNumbers.add(ERROR_CONNECTION_CLOSED); + errorNumbers.add(ERROR_UNSUPPORTED_METHOD); + errorNumbers.add(ERROR_INVALID_VARIABLE); + errorNumbers.add(ERROR_STATEMENT_CLOSED); + errorNumbers.add(ERROR_RESULTSET_CLOSED); + errorNumbers.add(ERROR_INVALID_WITH_EXECUTEQUERY); + errorNumbers.add(ERROR_INVALID_WITH_EXECUTEUPDATE); + errorNumbers.add(ERROR_INVALID_FOR_EXECUTE_QUERY); + errorNumbers.add(ERROR_DATABASE_NOT_SPECIFIED_OR_AVAILABLE); + errorNumbers.add(ERROR_INVALID_FOR_EXECUTE_UPDATE); + errorNumbers.add(ERROR_INVALID_FOR_EXECUTE); - public static final int ERROR_SUBSCRIBE_FAILED = 0x2350; //failed to create subscription + /*****************************************************/ + errorNumbers.add(ERROR_SUBSCRIBE_FAILED); + errorNumbers.add(ERROR_UNSUPPORTED_ENCODING); + + errorNumbers.add(ERROR_JNI_TDENGINE_ERROR); + errorNumbers.add(ERROR_JNI_CONNECTION_NULL); + errorNumbers.add(ERROR_JNI_RESULT_SET_NULL); + errorNumbers.add(ERROR_JNI_NUM_OF_FIELDS_0); + errorNumbers.add(ERROR_JNI_SQL_NULL); + errorNumbers.add(ERROR_JNI_FETCH_END); + errorNumbers.add(ERROR_JNI_OUT_OF_MEMORY); + + } private TSDBErrorNumbers() { } + + public static boolean contains(int errorNumber) { + return errorNumbers.contains(errorNumber); + } } diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBJNIConnector.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBJNIConnector.java index 349a02fb37c28b263bb73bcc05f644bc53f71079..b0f016cd72e06aac795595cf0a42a3f98faf04f4 100755 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBJNIConnector.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBJNIConnector.java @@ -38,7 +38,7 @@ public class TSDBJNIConnector { /** * Result set pointer for the current connection */ - private long taosResultSetPointer = TSDBConstants.JNI_NULL_POINTER; +// private long taosResultSetPointer = TSDBConstants.JNI_NULL_POINTER; /** * result set status in current connection @@ -119,9 +119,9 @@ public class TSDBJNIConnector { public long executeQuery(String sql) throws SQLException { // close previous result set if the user forgets to invoke the // free method to close previous result set. - if (!this.isResultsetClosed) { - freeResultSet(taosResultSetPointer); - } +// if (!this.isResultsetClosed) { +// freeResultSet(taosResultSetPointer); +// } Long pSql = 0l; try { @@ -130,21 +130,32 @@ public class TSDBJNIConnector { } catch (Exception e) { e.printStackTrace(); this.freeResultSetImp(this.taos, pSql); - throw new SQLException(TSDBConstants.WrapErrMsg("Unsupported encoding")); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_ENCODING); + } + if (pSql == TSDBConstants.JNI_CONNECTION_NULL) { + this.freeResultSetImp(this.taos, pSql); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_JNI_CONNECTION_NULL); + } + if (pSql == TSDBConstants.JNI_SQL_NULL) { + this.freeResultSetImp(this.taos, pSql); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_JNI_SQL_NULL); + } + if (pSql == TSDBConstants.JNI_OUT_OF_MEMORY) { + this.freeResultSetImp(this.taos, pSql); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_JNI_OUT_OF_MEMORY); } int code = this.getErrCode(pSql); - if (code != 0) { + if (code != TSDBConstants.JNI_SUCCESS) { affectedRows = -1; String msg = this.getErrMsg(pSql); - this.freeResultSetImp(this.taos, pSql); - throw new SQLException(TSDBConstants.WrapErrMsg(msg), "", code); + throw TSDBError.createSQLException(code, msg); } // Try retrieving result set for the executed SQL using the current connection pointer. - taosResultSetPointer = this.getResultSetImp(this.taos, pSql); - isResultsetClosed = (taosResultSetPointer == TSDBConstants.JNI_NULL_POINTER); + pSql = this.getResultSetImp(this.taos, pSql); + isResultsetClosed = (pSql == TSDBConstants.JNI_NULL_POINTER); return pSql; } @@ -173,9 +184,9 @@ public class TSDBJNIConnector { * Get resultset pointer * Each connection should have a single open result set at a time */ - public long getResultSet() { - return taosResultSetPointer; - } +// public long getResultSet() { +// return taosResultSetPointer; +// } private native long getResultSetImp(long connection, long pSql); @@ -188,16 +199,16 @@ public class TSDBJNIConnector { /** * Free resultset operation from C to release resultset pointer by JNI */ - public int freeResultSet(long result) { + public int freeResultSet(long pSql) { int res = TSDBConstants.JNI_SUCCESS; - if (result != taosResultSetPointer && taosResultSetPointer != TSDBConstants.JNI_NULL_POINTER) { - throw new RuntimeException("Invalid result set pointer"); - } +// if (result != taosResultSetPointer && taosResultSetPointer != TSDBConstants.JNI_NULL_POINTER) { +// throw new RuntimeException("Invalid result set pointer"); +// } - if (taosResultSetPointer != TSDBConstants.JNI_NULL_POINTER) { - res = this.freeResultSetImp(this.taos, result); - taosResultSetPointer = TSDBConstants.JNI_NULL_POINTER; - } +// if (taosResultSetPointer != TSDBConstants.JNI_NULL_POINTER) { + res = this.freeResultSetImp(this.taos, pSql); +// taosResultSetPointer = TSDBConstants.JNI_NULL_POINTER; +// } isResultsetClosed = true; return res; @@ -207,15 +218,15 @@ public class TSDBJNIConnector { * Close the open result set which is associated to the current connection. If the result set is already * closed, return 0 for success. */ - public int freeResultSet() { - int resCode = TSDBConstants.JNI_SUCCESS; - if (!isResultsetClosed) { - resCode = this.freeResultSetImp(this.taos, this.taosResultSetPointer); - taosResultSetPointer = TSDBConstants.JNI_NULL_POINTER; - isResultsetClosed = true; - } - return resCode; - } +// public int freeResultSet() { +// int resCode = TSDBConstants.JNI_SUCCESS; +// if (!isResultsetClosed) { +// resCode = this.freeResultSetImp(this.taos, this.taosResultSetPointer); +// taosResultSetPointer = TSDBConstants.JNI_NULL_POINTER; +// isResultsetClosed = true; +// } +// return resCode; +// } private native int freeResultSetImp(long connection, long result); diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBPreparedStatement.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBPreparedStatement.java index c6b41ce004d739a94c00e87f141b9180efa18e57..decf14434ec67e3efdff8e93853574041e7c9530 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBPreparedStatement.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBPreparedStatement.java @@ -264,17 +264,17 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat @Override public void setAsciiStream(int parameterIndex, InputStream x, int length) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override public void setUnicodeStream(int parameterIndex, InputStream x, int length) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override public void setBinaryStream(int parameterIndex, InputStream x, int length) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override @@ -284,7 +284,7 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat @Override public void setObject(int parameterIndex, Object x, int targetSqlType) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override @@ -321,156 +321,156 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat @Override public void setCharacterStream(int parameterIndex, Reader reader, int length) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override public void setRef(int parameterIndex, Ref x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override public void setBlob(int parameterIndex, Blob x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override public void setClob(int parameterIndex, Clob x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override public void setArray(int parameterIndex, Array x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override public ResultSetMetaData getMetaData() throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override public void setDate(int parameterIndex, Date x, Calendar cal) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override public void setTime(int parameterIndex, Time x, Calendar cal) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override public void setTimestamp(int parameterIndex, Timestamp x, Calendar cal) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override public void setNull(int parameterIndex, int sqlType, String typeName) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override public void setURL(int parameterIndex, URL x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override public ParameterMetaData getParameterMetaData() throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override public void setRowId(int parameterIndex, RowId x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override public void setNString(int parameterIndex, String value) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override public void setNCharacterStream(int parameterIndex, Reader value, long length) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override public void setNClob(int parameterIndex, NClob value) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override public void setClob(int parameterIndex, Reader reader, long length) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override public void setBlob(int parameterIndex, InputStream inputStream, long length) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override public void setNClob(int parameterIndex, Reader reader, long length) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override public void setSQLXML(int parameterIndex, SQLXML xmlObject) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override public void setObject(int parameterIndex, Object x, int targetSqlType, int scaleOrLength) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override public void setAsciiStream(int parameterIndex, InputStream x, long length) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override public void setBinaryStream(int parameterIndex, InputStream x, long length) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override public void setCharacterStream(int parameterIndex, Reader reader, long length) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override public void setAsciiStream(int parameterIndex, InputStream x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override public void setBinaryStream(int parameterIndex, InputStream x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override public void setCharacterStream(int parameterIndex, Reader reader) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override public void setNCharacterStream(int parameterIndex, Reader value) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override public void setClob(int parameterIndex, Reader reader) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override public void setBlob(int parameterIndex, InputStream inputStream) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override public void setNClob(int parameterIndex, Reader reader) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } } diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSet.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSet.java index d06922c680ccec9c4ec6f53cdbe60cb530deae97..80ff49253016c632b6bf85f4756fe2d702a9bffc 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSet.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSet.java @@ -14,36 +14,17 @@ *****************************************************************************/ package com.taosdata.jdbc; -import java.io.InputStream; -import java.io.Reader; import java.math.BigDecimal; -import java.net.URL; -import java.sql.Array; -import java.sql.Blob; -import java.sql.Clob; -import java.sql.Date; -import java.sql.NClob; -import java.sql.Ref; -import java.sql.ResultSet; -import java.sql.ResultSetMetaData; -import java.sql.RowId; -import java.sql.SQLException; -import java.sql.SQLWarning; -import java.sql.SQLXML; -import java.sql.Statement; -import java.sql.Time; -import java.sql.Timestamp; +import java.sql.*; import java.util.ArrayList; -import java.util.Calendar; -import java.util.Iterator; import java.util.List; -import java.util.Map; -public class TSDBResultSet implements ResultSet { - private TSDBJNIConnector jniConnector = null; +public class TSDBResultSet extends AbstractResultSet implements ResultSet { + private TSDBJNIConnector jniConnector; + private final TSDBStatement statement; private long resultSetPointer = 0L; - private List columnMetaDataList = new ArrayList(); + private List columnMetaDataList = new ArrayList<>(); private TSDBResultSetRowData rowData; private TSDBResultSetBlockData blockData; @@ -52,24 +33,6 @@ public class TSDBResultSet implements ResultSet { private boolean lastWasNull = false; private final int COLUMN_INDEX_START_VALUE = 1; - private int rowIndex = 0; - - public TSDBJNIConnector getJniConnector() { - return jniConnector; - } - - public void setJniConnector(TSDBJNIConnector jniConnector) { - this.jniConnector = jniConnector; - } - - public long getResultSetPointer() { - return resultSetPointer; - } - - public void setResultSetPointer(long resultSetPointer) { - this.resultSetPointer = resultSetPointer; - } - public void setBatchFetch(boolean batchFetch) { this.batchFetch = batchFetch; } @@ -78,10 +41,6 @@ public class TSDBResultSet implements ResultSet { return this.batchFetch; } - public List getColumnMetaDataList() { - return columnMetaDataList; - } - public void setColumnMetaDataList(List columnMetaDataList) { this.columnMetaDataList = columnMetaDataList; } @@ -90,56 +49,25 @@ public class TSDBResultSet implements ResultSet { return rowData; } - public void setRowData(TSDBResultSetRowData rowData) { - this.rowData = rowData; - } - - public boolean isLastWasNull() { - return lastWasNull; - } - - public void setLastWasNull(boolean lastWasNull) { - this.lastWasNull = lastWasNull; - } - - public TSDBResultSet() { - - } - - public TSDBResultSet(TSDBJNIConnector connector, long resultSetPointer) throws SQLException { + public TSDBResultSet(TSDBStatement statement, TSDBJNIConnector connector, long resultSetPointer) throws SQLException { + this.statement = statement; this.jniConnector = connector; this.resultSetPointer = resultSetPointer; + int code = this.jniConnector.getSchemaMetaData(this.resultSetPointer, this.columnMetaDataList); if (code == TSDBConstants.JNI_CONNECTION_NULL) { throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_CONNECTION_NULL)); - } else if (code == TSDBConstants.JNI_RESULT_SET_NULL) { + } + if (code == TSDBConstants.JNI_RESULT_SET_NULL) { throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_RESULT_SET_NULL)); - } else if (code == TSDBConstants.JNI_NUM_OF_FIELDS_0) { + } + if (code == TSDBConstants.JNI_NUM_OF_FIELDS_0) { throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_NUM_OF_FIELDS_0)); } - this.rowData = new TSDBResultSetRowData(this.columnMetaDataList.size()); this.blockData = new TSDBResultSetBlockData(this.columnMetaDataList, this.columnMetaDataList.size()); } - public T unwrap(Class iface) throws SQLException { - if (isClosed()) - throw new SQLException(TSDBConstants.WrapErrMsg(TSDBConstants.RESULT_SET_IS_CLOSED)); - - try { - return iface.cast(this); - } catch (ClassCastException cce) { - throw new SQLException("Unable to unwrap to " + iface.toString()); - } - } - - public boolean isWrapperFor(Class iface) throws SQLException { - if (isClosed()) - throw new SQLException(TSDBConstants.WrapErrMsg(TSDBConstants.RESULT_SET_IS_CLOSED)); - - return iface.isInstance(this); - } - public boolean next() throws SQLException { if (this.getBatchFetch()) { if (this.blockData.forward()) { @@ -273,7 +201,7 @@ public class TSDBResultSet implements ResultSet { } public long getLong(int columnIndex) throws SQLException { - long res = 0l; + long res = 0L; int colIndex = getTrueColumnIndex(columnIndex); if (!this.getBatchFetch()) { @@ -317,14 +245,6 @@ public class TSDBResultSet implements ResultSet { } } - /* - * (non-Javadoc) - * - * @see java.sql.ResultSet#getBigDecimal(int, int) - * - * @deprecated Use {@code getBigDecimal(int columnIndex)} or {@code - * getBigDecimal(String columnLabel)} - */ @Deprecated public BigDecimal getBigDecimal(int columnIndex, int scale) throws SQLException { return new BigDecimal(getLong(columnIndex)); @@ -334,16 +254,6 @@ public class TSDBResultSet implements ResultSet { return getString(columnIndex).getBytes(); } - public Date getDate(int columnIndex) throws SQLException { - int colIndex = getTrueColumnIndex(columnIndex); - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public Time getTime(int columnIndex) throws SQLException { - int colIndex = getTrueColumnIndex(columnIndex); - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - public Timestamp getTimestamp(int columnIndex) throws SQLException { Timestamp res = null; int colIndex = getTrueColumnIndex(columnIndex); @@ -359,112 +269,11 @@ public class TSDBResultSet implements ResultSet { } } - public InputStream getAsciiStream(int columnIndex) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - /* - * (non-Javadoc) - * - * @see java.sql.ResultSet#getUnicodeStream(int) - * - * * @deprecated use getCharacterStream in place of - * getUnicodeStream - */ - @Deprecated - public InputStream getUnicodeStream(int columnIndex) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public InputStream getBinaryStream(int columnIndex) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public String getString(String columnLabel) throws SQLException { - return this.getString(this.findColumn(columnLabel)); - } - - public boolean getBoolean(String columnLabel) throws SQLException { - return this.getBoolean(this.findColumn(columnLabel)); - } - - public byte getByte(String columnLabel) throws SQLException { - return this.getByte(this.findColumn(columnLabel)); - } - - public short getShort(String columnLabel) throws SQLException { - return this.getShort(this.findColumn(columnLabel)); - } - - public int getInt(String columnLabel) throws SQLException { - return this.getInt(this.findColumn(columnLabel)); - } - - public long getLong(String columnLabel) throws SQLException { - return this.getLong(this.findColumn(columnLabel)); - } - - public float getFloat(String columnLabel) throws SQLException { - return this.getFloat(this.findColumn(columnLabel)); - } - - public double getDouble(String columnLabel) throws SQLException { - return this.getDouble(this.findColumn(columnLabel)); - } - - /* - * used by spark - */ - @Deprecated - public BigDecimal getBigDecimal(String columnLabel, int scale) throws SQLException { - return this.getBigDecimal(this.findColumn(columnLabel), scale); - } - - public byte[] getBytes(String columnLabel) throws SQLException { - return this.getBytes(this.findColumn(columnLabel)); - } - - public Date getDate(String columnLabel) throws SQLException { - return this.getDate(this.findColumn(columnLabel)); - } - - public Time getTime(String columnLabel) throws SQLException { - return this.getTime(this.findColumn(columnLabel)); - } - - public Timestamp getTimestamp(String columnLabel) throws SQLException { - return this.getTimestamp(this.findColumn(columnLabel)); - } - - public InputStream getAsciiStream(String columnLabel) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Deprecated - public InputStream getUnicodeStream(String columnLabel) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public InputStream getBinaryStream(String columnLabel) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public SQLWarning getWarnings() throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void clearWarnings() throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public String getCursorName() throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - public ResultSetMetaData getMetaData() throws SQLException { return new TSDBResultSetMetaData(this.columnMetaDataList); } + @Override public Object getObject(int columnIndex) throws SQLException { int colIndex = getTrueColumnIndex(columnIndex); @@ -476,32 +285,21 @@ public class TSDBResultSet implements ResultSet { } } + @Override public Object getObject(String columnLabel) throws SQLException { return this.getObject(this.findColumn(columnLabel)); } public int findColumn(String columnLabel) throws SQLException { - Iterator colMetaDataIt = this.columnMetaDataList.iterator(); - while (colMetaDataIt.hasNext()) { - ColumnMetaData colMetaData = colMetaDataIt.next(); + for (ColumnMetaData colMetaData : this.columnMetaDataList) { if (colMetaData.getColName() != null && colMetaData.getColName().equalsIgnoreCase(columnLabel)) { return colMetaData.getColIndex() + 1; } } - throw new SQLException(TSDBConstants.INVALID_VARIABLES); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_INVALID_VARIABLE); } - public Reader getCharacterStream(int columnIndex) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public Reader getCharacterStream(String columnLabel) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - /* - * used by spark - */ + @Override public BigDecimal getBigDecimal(int columnIndex) throws SQLException { int colIndex = getTrueColumnIndex(columnIndex); @@ -513,403 +311,111 @@ public class TSDBResultSet implements ResultSet { } } - public BigDecimal getBigDecimal(String columnLabel) throws SQLException { - return this.getBigDecimal(this.findColumn(columnLabel)); - } - + @Override public boolean isBeforeFirst() throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public boolean isAfterLast() throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public boolean isFirst() throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public boolean isLast() throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void beforeFirst() throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void afterLast() throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public boolean first() throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public boolean last() throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public int getRow() throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public boolean absolute(int row) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public boolean relative(int rows) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public boolean previous() throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void setFetchDirection(int direction) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public int getFetchDirection() throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void setFetchSize(int rows) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public int getFetchSize() throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public int getType() throws SQLException { - return ResultSet.TYPE_FORWARD_ONLY; - } - - public int getConcurrency() throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public boolean rowUpdated() throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public boolean rowInserted() throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public boolean rowDeleted() throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateNull(int columnIndex) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateBoolean(int columnIndex, boolean x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateByte(int columnIndex, byte x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateShort(int columnIndex, short x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateInt(int columnIndex, int x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateLong(int columnIndex, long x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateFloat(int columnIndex, float x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateDouble(int columnIndex, double x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateBigDecimal(int columnIndex, BigDecimal x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateString(int columnIndex, String x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateBytes(int columnIndex, byte[] x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateDate(int columnIndex, Date x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateTime(int columnIndex, Time x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateTimestamp(int columnIndex, Timestamp x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateAsciiStream(int columnIndex, InputStream x, int length) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateBinaryStream(int columnIndex, InputStream x, int length) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateCharacterStream(int columnIndex, Reader x, int length) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateObject(int columnIndex, Object x, int scaleOrLength) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateObject(int columnIndex, Object x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateNull(String columnLabel) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateBoolean(String columnLabel, boolean x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateByte(String columnLabel, byte x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESULTSET_CLOSED); - public void updateShort(String columnLabel, short x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); } - public void updateInt(String columnLabel, int x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } + @Override + public boolean isAfterLast() throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESULTSET_CLOSED); - public void updateLong(String columnLabel, long x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); } - public void updateFloat(String columnLabel, float x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } + @Override + public boolean isFirst() throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESULTSET_CLOSED); - public void updateDouble(String columnLabel, double x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); } - public void updateBigDecimal(String columnLabel, BigDecimal x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } + @Override + public boolean isLast() throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESULTSET_CLOSED); - public void updateString(String columnLabel, String x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); } - public void updateBytes(String columnLabel, byte[] x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } + @Override + public void beforeFirst() throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESULTSET_CLOSED); - public void updateDate(String columnLabel, Date x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); } - public void updateTime(String columnLabel, Time x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } + @Override + public void afterLast() throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESULTSET_CLOSED); - public void updateTimestamp(String columnLabel, Timestamp x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); } - public void updateAsciiStream(String columnLabel, InputStream x, int length) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } + @Override + public boolean first() throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESULTSET_CLOSED); - public void updateBinaryStream(String columnLabel, InputStream x, int length) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); } - public void updateCharacterStream(String columnLabel, Reader reader, int length) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } + @Override + public boolean last() throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESULTSET_CLOSED); - public void updateObject(String columnLabel, Object x, int scaleOrLength) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); } - public void updateObject(String columnLabel, Object x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } + @Override + public int getRow() throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESULTSET_CLOSED); - public void insertRow() throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); } - public void updateRow() throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } + @Override + public boolean absolute(int row) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESULTSET_CLOSED); - public void deleteRow() throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); } - public void refreshRow() throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } + @Override + public boolean relative(int rows) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESULTSET_CLOSED); - public void cancelRowUpdates() throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); } - public void moveToInsertRow() throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } + @Override + public boolean previous() throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESULTSET_CLOSED); - public void moveToCurrentRow() throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); } public Statement getStatement() throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public Object getObject(int columnIndex, Map> map) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public Ref getRef(int columnIndex) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public Blob getBlob(int columnIndex) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public Clob getClob(int columnIndex) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public Array getArray(int columnIndex) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public Object getObject(String columnLabel, Map> map) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public Ref getRef(String columnLabel) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public Blob getBlob(String columnLabel) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public Clob getClob(String columnLabel) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public Array getArray(String columnLabel) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public Date getDate(int columnIndex, Calendar cal) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public Date getDate(String columnLabel, Calendar cal) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public Time getTime(int columnIndex, Calendar cal) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public Time getTime(String columnLabel, Calendar cal) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public Timestamp getTimestamp(int columnIndex, Calendar cal) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public Timestamp getTimestamp(String columnLabel, Calendar cal) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public URL getURL(int columnIndex) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public URL getURL(String columnLabel) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateRef(int columnIndex, Ref x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateRef(String columnLabel, Ref x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateBlob(int columnIndex, Blob x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateBlob(String columnLabel, Blob x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateClob(int columnIndex, Clob x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateClob(String columnLabel, Clob x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateArray(int columnIndex, Array x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateArray(String columnLabel, Array x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public RowId getRowId(int columnIndex) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public RowId getRowId(String columnLabel) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateRowId(int columnIndex, RowId x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateRowId(String columnLabel, RowId x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESULTSET_CLOSED); - public int getHoldability() throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + return this.statement; } public boolean isClosed() throws SQLException { + //TODO: check if need release resources boolean isClosed = true; if (jniConnector != null) { isClosed = jniConnector.isResultsetClosed(); @@ -917,183 +423,11 @@ public class TSDBResultSet implements ResultSet { return isClosed; } - public void updateNString(int columnIndex, String nString) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateNString(String columnLabel, String nString) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateNClob(int columnIndex, NClob nClob) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateNClob(String columnLabel, NClob nClob) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public NClob getNClob(int columnIndex) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public NClob getNClob(String columnLabel) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public SQLXML getSQLXML(int columnIndex) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public SQLXML getSQLXML(String columnLabel) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateSQLXML(int columnIndex, SQLXML xmlObject) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateSQLXML(String columnLabel, SQLXML xmlObject) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - public String getNString(int columnIndex) throws SQLException { int colIndex = getTrueColumnIndex(columnIndex); return (String) rowData.get(colIndex); } - public String getNString(String columnLabel) throws SQLException { - return (String) this.getString(columnLabel); - } - - public Reader getNCharacterStream(int columnIndex) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public Reader getNCharacterStream(String columnLabel) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateNCharacterStream(int columnIndex, Reader x, long length) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateNCharacterStream(String columnLabel, Reader reader, long length) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateAsciiStream(int columnIndex, InputStream x, long length) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateBinaryStream(int columnIndex, InputStream x, long length) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateCharacterStream(int columnIndex, Reader x, long length) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateAsciiStream(String columnLabel, InputStream x, long length) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateBinaryStream(String columnLabel, InputStream x, long length) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateCharacterStream(String columnLabel, Reader reader, long length) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateBlob(int columnIndex, InputStream inputStream, long length) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateBlob(String columnLabel, InputStream inputStream, long length) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateClob(int columnIndex, Reader reader, long length) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateClob(String columnLabel, Reader reader, long length) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateNClob(int columnIndex, Reader reader, long length) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateNClob(String columnLabel, Reader reader, long length) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateNCharacterStream(int columnIndex, Reader x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateNCharacterStream(String columnLabel, Reader reader) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateAsciiStream(int columnIndex, InputStream x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateBinaryStream(int columnIndex, InputStream x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateCharacterStream(int columnIndex, Reader x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateAsciiStream(String columnLabel, InputStream x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateBinaryStream(String columnLabel, InputStream x) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateCharacterStream(String columnLabel, Reader reader) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateBlob(int columnIndex, InputStream inputStream) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateBlob(String columnLabel, InputStream inputStream) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateClob(int columnIndex, Reader reader) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateClob(String columnLabel, Reader reader) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateNClob(int columnIndex, Reader reader) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void updateNClob(String columnLabel, Reader reader) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public T getObject(int columnIndex, Class type) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public T getObject(String columnLabel, Class type) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - private int getTrueColumnIndex(int columnIndex) throws SQLException { if (columnIndex < this.COLUMN_INDEX_START_VALUE) { throw new SQLException("Column Index out of range, " + columnIndex + " < " + this.COLUMN_INDEX_START_VALUE); @@ -1103,7 +437,6 @@ public class TSDBResultSet implements ResultSet { if (columnIndex > numOfCols) { throw new SQLException("Column Index out of range, " + columnIndex + " > " + numOfCols); } - return columnIndex - 1; } } diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSetMetaData.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSetMetaData.java index d6d69bd8b02db7de9350c023e696f4cf2e2f5e65..0c0071a94902262d7f2497070c1034808608b329 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSetMetaData.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSetMetaData.java @@ -29,11 +29,11 @@ public class TSDBResultSetMetaData implements ResultSetMetaData { } public T unwrap(Class iface) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } public boolean isWrapperFor(Class iface) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } public int getColumnCount() throws SQLException { @@ -94,7 +94,7 @@ public class TSDBResultSetMetaData implements ResultSetMetaData { } public String getSchemaName(int column) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } public int getPrecision(int column) throws SQLException { @@ -125,11 +125,11 @@ public class TSDBResultSetMetaData implements ResultSetMetaData { } public String getTableName(int column) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } public String getCatalogName(int column) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } public int getColumnType(int column) throws SQLException { @@ -173,7 +173,7 @@ public class TSDBResultSetMetaData implements ResultSetMetaData { } public boolean isDefinitelyWritable(int column) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } public String getColumnClassName(int column) throws SQLException { diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSetWrapper.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSetWrapper.java index 059962a7a120073c17258652f9fb3609cc5072be..98b823a3c1d5cefb99e5c5824ce334d42cc40d9e 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSetWrapper.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSetWrapper.java @@ -1153,11 +1153,11 @@ public class TSDBResultSetWrapper implements ResultSet { } public T getObject(int columnIndex, Class type) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } public T getObject(String columnLabel, Class type) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBStatement.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBStatement.java index 82a6b4a3fff634b8bedfe338aeede940860e866a..c4c1904629fcd277bf8c9c6f3db422f1aa7b6ffc 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBStatement.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBStatement.java @@ -14,34 +14,24 @@ *****************************************************************************/ package com.taosdata.jdbc; -import com.taosdata.jdbc.utils.TaosInfo; - import java.sql.*; import java.util.ArrayList; import java.util.List; -public class TSDBStatement implements Statement { - private TSDBJNIConnector connector; +public class TSDBStatement extends AbstractStatement { + private TSDBJNIConnector connector; /** * To store batched commands */ protected List batchedArgs; - - /** - * Timeout for a query - */ - protected int queryTimeout = 0; - - private Long pSql = 0l; - /** * Status of current statement */ - private boolean isClosed = true; - private int affectedRows = 0; - + private boolean isClosed; + private int affectedRows = -1; private TSDBConnection connection; + private TSDBResultSet resultSet; public void setConnection(TSDBConnection connection) { this.connection = connection; @@ -50,220 +40,89 @@ public class TSDBStatement implements Statement { TSDBStatement(TSDBConnection connection, TSDBJNIConnector connector) { this.connection = connection; this.connector = connector; - this.isClosed = false; - } - - @Override - public T unwrap(Class iface) throws SQLException { - try { - return iface.cast(this); - } catch (ClassCastException cce) { - throw new SQLException("Unable to unwrap to " + iface.toString()); - } - } - - @Override - public boolean isWrapperFor(Class iface) throws SQLException { - return iface.isInstance(this); } public ResultSet executeQuery(String sql) throws SQLException { - if (isClosed()) { + // check if closed + if (isClosed()) throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); - } + //TODO: 如果在executeQuery方法中执行insert语句,那么先执行了SQL,再通过pSql来检查是否为一个insert语句,但这个insert SQL已经执行成功了 - // TODO make sure it is not a update query - pSql = this.connector.executeQuery(sql); - long resultSetPointer = this.connector.getResultSet(); - if (resultSetPointer == TSDBConstants.JNI_CONNECTION_NULL) { + // execute query + long pSql = this.connector.executeQuery(sql); + // if pSql is create/insert/update/delete/alter SQL + if (this.connector.isUpdateQuery(pSql)) { this.connector.freeResultSet(pSql); - throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_CONNECTION_NULL)); - } - - // create/insert/update/delete/alter - if (resultSetPointer == TSDBConstants.JNI_NULL_POINTER) { - this.connector.freeResultSet(pSql); - return null; - } - - if (!this.connector.isUpdateQuery(pSql)) { - TSDBResultSet res = new TSDBResultSet(this.connector, resultSetPointer); - res.setBatchFetch(this.connection.getBatchFetch()); - return res; - } else { - this.connector.freeResultSet(pSql); - return null; + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_INVALID_WITH_EXECUTEQUERY); } + TSDBResultSet res = new TSDBResultSet(this, this.connector, pSql); + res.setBatchFetch(this.connection.getBatchFetch()); + return res; } public int executeUpdate(String sql) throws SQLException { - if (isClosed()) { + if (isClosed()) throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); - } - // TODO check if current query is update query - pSql = this.connector.executeQuery(sql); - long resultSetPointer = this.connector.getResultSet(); - - if (resultSetPointer == TSDBConstants.JNI_CONNECTION_NULL) { + long pSql = this.connector.executeQuery(sql); + // if pSql is create/insert/update/delete/alter SQL + if (!this.connector.isUpdateQuery(pSql)) { this.connector.freeResultSet(pSql); - throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_CONNECTION_NULL)); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_INVALID_WITH_EXECUTEUPDATE); } - - this.affectedRows = this.connector.getAffectedRows(pSql); + int affectedRows = this.connector.getAffectedRows(pSql); this.connector.freeResultSet(pSql); - - return this.affectedRows; - } - - public String getErrorMsg(long pSql) { - return this.connector.getErrMsg(pSql); + return affectedRows; } public void close() throws SQLException { if (!isClosed) { - if (!this.connector.isResultsetClosed()) { - this.connector.freeResultSet(); - } + if (this.resultSet != null) + this.resultSet.close(); isClosed = true; } } - public int getMaxFieldSize() throws SQLException { - if (isClosed()) { - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); - } - - return 0; - } - - public void setMaxFieldSize(int max) throws SQLException { - if (isClosed()) { - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); - } - - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public int getMaxRows() throws SQLException { - if (isClosed()) { - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); - } - // always set maxRows to zero, meaning unlimitted rows in a resultSet - return 0; - } - - public void setMaxRows(int max) throws SQLException { - if (isClosed()) { - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); - } - // always set maxRows to zero, meaning unlimited rows in a resultSet - } - - public void setEscapeProcessing(boolean enable) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public int getQueryTimeout() throws SQLException { - return queryTimeout; - } - - public void setQueryTimeout(int seconds) throws SQLException { - this.queryTimeout = seconds; - } - - public void cancel() throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public SQLWarning getWarnings() throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void clearWarnings() throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void setCursorName(String name) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - public boolean execute(String sql) throws SQLException { - if (isClosed) { - throw new SQLException("Invalid method call on a closed statement."); - } - boolean res = true; - pSql = this.connector.executeQuery(sql); - long resultSetPointer = this.connector.getResultSet(); - - if (resultSetPointer == TSDBConstants.JNI_CONNECTION_NULL) { - this.connector.freeResultSet(pSql); - throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_CONNECTION_NULL)); - } else if (resultSetPointer == TSDBConstants.JNI_NULL_POINTER) { - // no result set is retrieved + // check if closed + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + // execute query + long pSql = this.connector.executeQuery(sql); + // if pSql is create/insert/update/delete/alter SQL + if (this.connector.isUpdateQuery(pSql)) { + this.affectedRows = this.connector.getAffectedRows(pSql); this.connector.freeResultSet(pSql); - res = false; + return false; } - return res; + this.resultSet = new TSDBResultSet(this, this.connector, pSql); + this.resultSet.setBatchFetch(this.connection.getBatchFetch()); + return true; } public ResultSet getResultSet() throws SQLException { - if (isClosed) { - throw new SQLException("Invalid method call on a closed statement."); - } - long resultSetPointer = connector.getResultSet(); - TSDBResultSet resSet = null; - if (resultSetPointer != TSDBConstants.JNI_NULL_POINTER) { - resSet = new TSDBResultSet(connector, resultSetPointer); - } - return resSet; + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); +// long resultSetPointer = connector.getResultSet(); +// TSDBResultSet resSet = null; +// if (resultSetPointer != TSDBConstants.JNI_NULL_POINTER) { +// resSet = new TSDBResultSet(connector, resultSetPointer); +// } + return this.resultSet; } public int getUpdateCount() throws SQLException { - if (isClosed) { - throw new SQLException("Invalid method call on a closed statement."); - } - + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); return this.affectedRows; } - public boolean getMoreResults() throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public void setFetchDirection(int direction) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public int getFetchDirection() throws SQLException { - return ResultSet.FETCH_FORWARD; -// throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - /* - * used by spark - */ - public void setFetchSize(int rows) throws SQLException { - } - - /* - * used by spark - */ - public int getFetchSize() throws SQLException { - return 4096; - } - - public int getResultSetConcurrency() throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public int getResultSetType() throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - public void addBatch(String sql) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + if (batchedArgs == null) { batchedArgs = new ArrayList<>(); } @@ -271,83 +130,41 @@ public class TSDBStatement implements Statement { } public void clearBatch() throws SQLException { - batchedArgs.clear(); + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + if (batchedArgs != null) + batchedArgs.clear(); } public int[] executeBatch() throws SQLException { - if (isClosed) { - throw new SQLException("Invalid method call on a closed statement."); - } - if (batchedArgs == null) { - throw new SQLException(TSDBConstants.WrapErrMsg("Batch is empty!")); - } else { - int[] res = new int[batchedArgs.size()]; - for (int i = 0; i < batchedArgs.size(); i++) { - res[i] = executeUpdate(batchedArgs.get(i)); + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + if (batchedArgs == null || batchedArgs.isEmpty()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_BATCH_IS_EMPTY); + + int[] res = new int[batchedArgs.size()]; + for (int i = 0; i < batchedArgs.size(); i++) { + boolean isSelect = execute(batchedArgs.get(i)); + if (isSelect) { + res[i] = SUCCESS_NO_INFO; + } else { + res[i] = getUpdateCount(); } - return res; } + return res; } public Connection getConnection() throws SQLException { - if (this.connector != null) - return this.connection; - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public boolean getMoreResults(int current) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public ResultSet getGeneratedKeys() throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public int executeUpdate(String sql, int autoGeneratedKeys) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public int executeUpdate(String sql, int[] columnIndexes) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public int executeUpdate(String sql, String[] columnNames) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public boolean execute(String sql, int autoGeneratedKeys) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public boolean execute(String sql, int[] columnIndexes) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public boolean execute(String sql, String[] columnNames) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public int getResultSetHoldability() throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + if (this.connector == null) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_JNI_CONNECTION_NULL); + return this.connection; } public boolean isClosed() throws SQLException { return isClosed; } - public void setPoolable(boolean poolable) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public boolean isPoolable() throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - public void closeOnCompletion() throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public boolean isCloseOnCompletion() throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } } diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBSubscribe.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBSubscribe.java index deffd9aa2ae88802f71af5cbec66c5896cf4e19a..c21a058ba2bdc8de20a7ca2e9ceb9369396702be 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBSubscribe.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBSubscribe.java @@ -16,9 +16,6 @@ package com.taosdata.jdbc; import javax.management.OperationsException; import java.sql.SQLException; -import java.util.Map; -import java.util.TimerTask; -import java.util.concurrent.*; public class TSDBSubscribe { private TSDBJNIConnector connecter = null; @@ -36,9 +33,8 @@ public class TSDBSubscribe { /** * consume * - * @throws OperationsException, SQLException */ - public TSDBResultSet consume() throws OperationsException, SQLException { + public TSDBResultSet consume() throws SQLException { if (this.connecter.isClosed()) { throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_CONNECTION_NULL)); } @@ -50,7 +46,7 @@ public class TSDBSubscribe { } else if (resultSetPointer == TSDBConstants.JNI_NULL_POINTER) { return null; } else { - return new TSDBResultSet(this.connecter, resultSetPointer); + return new TSDBResultSet(null, this.connecter, resultSetPointer); } } diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/WrapperImpl.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/WrapperImpl.java new file mode 100644 index 0000000000000000000000000000000000000000..5b7539d434e0c5595d75da0a50baca9ab59c953b --- /dev/null +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/WrapperImpl.java @@ -0,0 +1,21 @@ +package com.taosdata.jdbc; + +import java.sql.SQLException; +import java.sql.Wrapper; + +public class WrapperImpl implements Wrapper { + + @Override + public T unwrap(Class iface) throws SQLException { + try { + return iface.cast(this); + } catch (ClassCastException cce) { + throw new SQLException("Unable to unwrap to " + iface.toString()); + } + } + + @Override + public boolean isWrapperFor(Class iface) throws SQLException { + return iface.isInstance(this); + } +} diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulConnection.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulConnection.java index 5260b780bd510edad6ef8ea9a481fa334cca50f6..83f6fb839aa76b8464d18341b9c20086a07d402a 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulConnection.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulConnection.java @@ -1,34 +1,23 @@ package com.taosdata.jdbc.rs; -import com.taosdata.jdbc.TSDBConstants; -import com.taosdata.jdbc.TSDBDriver; +import com.taosdata.jdbc.*; import java.sql.*; -import java.util.Enumeration; -import java.util.HashMap; -import java.util.Map; import java.util.Properties; -import java.util.concurrent.Executor; -public class RestfulConnection implements Connection { +public class RestfulConnection extends AbstractConnection { - private static final String CONNECTION_IS_CLOSED = "connection is closed."; - private static final String AUTO_COMMIT_IS_TRUE = "auto commit is true"; private final String host; private final int port; - private final Properties props; - private volatile String database; private final String url; + private volatile String database; /******************************************************/ private boolean isClosed; - private DatabaseMetaData metadata; - private Map> typeMap; - private Properties clientInfoProps = new Properties(); + private final DatabaseMetaData metadata; public RestfulConnection(String host, String port, Properties props, String database, String url) { this.host = host; this.port = Integer.parseInt(port); - this.props = props; this.database = database; this.url = url; this.metadata = new RestfulDatabaseMetaData(url, props.getProperty(TSDBDriver.PROPERTY_KEY_USER), this); @@ -37,7 +26,7 @@ public class RestfulConnection implements Connection { @Override public Statement createStatement() throws SQLException { if (isClosed()) - throw new SQLException(CONNECTION_IS_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED);; return new RestfulStatement(this, database); } @@ -45,59 +34,9 @@ public class RestfulConnection implements Connection { @Override public PreparedStatement prepareStatement(String sql) throws SQLException { if (isClosed()) - throw new SQLException(CONNECTION_IS_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED);; //TODO: prepareStatement - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public CallableStatement prepareCall(String sql) throws SQLException { - if (isClosed()) - throw new SQLException(CONNECTION_IS_CLOSED); - - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public String nativeSQL(String sql) throws SQLException { - if (isClosed()) - throw new SQLException(CONNECTION_IS_CLOSED); - - //nothing did - return sql; - } - - @Override - public void setAutoCommit(boolean autoCommit) throws SQLException { - if (isClosed()) - throw new SQLException(CONNECTION_IS_CLOSED); - if (!autoCommit) - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public boolean getAutoCommit() throws SQLException { - if (isClosed()) - throw new SQLException(CONNECTION_IS_CLOSED); - return true; - } - - @Override - public void commit() throws SQLException { - if (isClosed()) - throw new SQLException(CONNECTION_IS_CLOSED); - if (getAutoCommit()) - throw new SQLException(AUTO_COMMIT_IS_TRUE); - //nothing to do - } - - @Override - public void rollback() throws SQLException { - if (isClosed()) - throw new SQLException(CONNECTION_IS_CLOSED); - if (getAutoCommit()) - throw new SQLException(AUTO_COMMIT_IS_TRUE); - //nothing to do + throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG); } @Override @@ -116,356 +55,11 @@ public class RestfulConnection implements Connection { @Override public DatabaseMetaData getMetaData() throws SQLException { if (isClosed()) - throw new SQLException(CONNECTION_IS_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED);; return this.metadata; } - @Override - public void setReadOnly(boolean readOnly) throws SQLException { - if (isClosed()) - throw new SQLException(CONNECTION_IS_CLOSED); - // nothing to do - } - - @Override - public boolean isReadOnly() throws SQLException { - if (isClosed()) - throw new SQLException(CONNECTION_IS_CLOSED); - return true; - } - - @Override - public void setCatalog(String catalog) throws SQLException { - if (isClosed()) - throw new SQLException(CONNECTION_IS_CLOSED); - synchronized (RestfulConnection.class) { - this.database = catalog; - } - } - - @Override - public String getCatalog() throws SQLException { - if (isClosed()) - throw new SQLException(CONNECTION_IS_CLOSED); - return this.database; - } - - @Override - public void setTransactionIsolation(int level) throws SQLException { - if (isClosed()) - throw new SQLException(CONNECTION_IS_CLOSED); - switch (level) { - case Connection.TRANSACTION_NONE: - break; - case Connection.TRANSACTION_READ_UNCOMMITTED: - case Connection.TRANSACTION_READ_COMMITTED: - case Connection.TRANSACTION_REPEATABLE_READ: - case Connection.TRANSACTION_SERIALIZABLE: - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - default: - throw new SQLException(TSDBConstants.INVALID_VARIABLES); - } - } - - @Override - public int getTransactionIsolation() throws SQLException { - if (isClosed()) - throw new SQLException(CONNECTION_IS_CLOSED); - //Connection.TRANSACTION_NONE specifies that transactions are not supported. - return Connection.TRANSACTION_NONE; - } - - @Override - public SQLWarning getWarnings() throws SQLException { - if (isClosed()) - throw new SQLException(CONNECTION_IS_CLOSED); - - return null; - } - - @Override - public void clearWarnings() throws SQLException { - if (isClosed()) - throw new SQLException(CONNECTION_IS_CLOSED); - //nothing to do - } - - @Override - public Statement createStatement(int resultSetType, int resultSetConcurrency) throws SQLException { - if (isClosed()) - throw new SQLException(CONNECTION_IS_CLOSED); - - if (resultSetType != ResultSet.TYPE_FORWARD_ONLY) { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - if (resultSetConcurrency != ResultSet.CONCUR_READ_ONLY) - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - return createStatement(); - } - - @Override - public PreparedStatement prepareStatement(String sql, int resultSetType, int resultSetConcurrency) throws SQLException { - if (isClosed()) - throw new SQLException(CONNECTION_IS_CLOSED); - if (resultSetType != ResultSet.TYPE_FORWARD_ONLY || resultSetConcurrency != ResultSet.CONCUR_READ_ONLY) - throw new SQLFeatureNotSupportedException(TSDBConstants.INVALID_VARIABLES); - - return this.prepareStatement(sql); - } - - @Override - public CallableStatement prepareCall(String sql, int resultSetType, int resultSetConcurrency) throws SQLException { - if (isClosed()) - throw new SQLException(CONNECTION_IS_CLOSED); - if (resultSetType != ResultSet.TYPE_FORWARD_ONLY || resultSetConcurrency != ResultSet.CONCUR_READ_ONLY) - throw new SQLFeatureNotSupportedException(TSDBConstants.INVALID_VARIABLES); - - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public Map> getTypeMap() throws SQLException { - if (isClosed()) - throw new SQLException(CONNECTION_IS_CLOSED); - - synchronized (RestfulConnection.class) { - if (this.typeMap == null) { - this.typeMap = new HashMap<>(); - } - return this.typeMap; - } - } - - @Override - public void setTypeMap(Map> map) throws SQLException { - if (isClosed()) - throw new SQLException(CONNECTION_IS_CLOSED); - - synchronized (RestfulConnection.class) { - this.typeMap = map; - } - } - - @Override - public void setHoldability(int holdability) throws SQLException { - if (isClosed()) - throw new SQLException(CONNECTION_IS_CLOSED); - if (holdability != ResultSet.HOLD_CURSORS_OVER_COMMIT) - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public int getHoldability() throws SQLException { - if (isClosed()) - throw new SQLException(CONNECTION_IS_CLOSED); - return ResultSet.HOLD_CURSORS_OVER_COMMIT; - } - - @Override - public Savepoint setSavepoint() throws SQLException { - if (isClosed()) - throw new SQLException(CONNECTION_IS_CLOSED); - if (getAutoCommit()) - throw new SQLException(TSDBConstants.INVALID_VARIABLES); - //nothing to do - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public Savepoint setSavepoint(String name) throws SQLException { - if (isClosed()) - throw new SQLException(CONNECTION_IS_CLOSED); - if (getAutoCommit()) - throw new SQLException(TSDBConstants.INVALID_VARIABLES); - //nothing to do - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void rollback(Savepoint savepoint) throws SQLException { - if (isClosed()) - throw new SQLException(CONNECTION_IS_CLOSED); - if (getAutoCommit()) - throw new SQLException(TSDBConstants.INVALID_VARIABLES); - //nothing to do - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void releaseSavepoint(Savepoint savepoint) throws SQLException { - if (isClosed()) - throw new SQLException(CONNECTION_IS_CLOSED); - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public Statement createStatement(int resultSetType, int resultSetConcurrency, int resultSetHoldability) throws SQLException { - if (resultSetHoldability != ResultSet.HOLD_CURSORS_OVER_COMMIT) - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - return createStatement(resultSetType, resultSetConcurrency); - } - - @Override - public PreparedStatement prepareStatement(String sql, int resultSetType, int resultSetConcurrency, int resultSetHoldability) throws SQLException { - if (resultSetHoldability != ResultSet.HOLD_CURSORS_OVER_COMMIT) - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - return prepareStatement(sql, resultSetType, resultSetConcurrency); - } - - @Override - public CallableStatement prepareCall(String sql, int resultSetType, int resultSetConcurrency, int resultSetHoldability) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public PreparedStatement prepareStatement(String sql, int autoGeneratedKeys) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public PreparedStatement prepareStatement(String sql, int[] columnIndexes) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public PreparedStatement prepareStatement(String sql, String[] columnNames) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public Clob createClob() throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public Blob createBlob() throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public NClob createNClob() throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public SQLXML createSQLXML() throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public boolean isValid(int timeout) throws SQLException { - if (timeout < 0) - throw new SQLException(TSDBConstants.INVALID_VARIABLES); - // TODO: - /* The driver shall submit a query on the connection or use some other mechanism that positively verifies - the connection is still valid when this method is called.*/ - return !isClosed(); - } - - @Override - public void setClientInfo(String name, String value) throws SQLClientInfoException { - if (isClosed) - throw new SQLClientInfoException(); - clientInfoProps.setProperty(name, value); - } - - @Override - public void setClientInfo(Properties properties) throws SQLClientInfoException { - if (isClosed) - throw new SQLClientInfoException(); - - for (Enumeration enumer = properties.keys(); enumer.hasMoreElements(); ) { - String name = (String) enumer.nextElement(); - clientInfoProps.put(name, properties.getProperty(name)); - } - } - - @Override - public String getClientInfo(String name) throws SQLException { - if (isClosed) - throw new SQLClientInfoException(); - - return clientInfoProps.getProperty(name); - } - - @Override - public Properties getClientInfo() throws SQLException { - if (isClosed) - throw new SQLClientInfoException(); - - return clientInfoProps; - } - - @Override - public Array createArrayOf(String typeName, Object[] elements) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public Struct createStruct(String typeName, Object[] attributes) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void setSchema(String schema) throws SQLException { - if (isClosed()) - throw new SQLException(CONNECTION_IS_CLOSED); - synchronized (RestfulConnection.class) { - this.database = schema; - } - } - - @Override - public String getSchema() throws SQLException { - if (isClosed()) - throw new SQLException(CONNECTION_IS_CLOSED); - return this.database; - } - - @Override - public void abort(Executor executor) throws SQLException { - if (executor == null) { - throw new SQLException("Executor can not be null"); - } - - executor.execute(() -> { - try { - close(); - } catch (SQLException e) { - e.printStackTrace(); - } - }); - } - - @Override - public void setNetworkTimeout(Executor executor, int milliseconds) throws SQLException { - if (isClosed()) - throw new SQLException(CONNECTION_IS_CLOSED); - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public int getNetworkTimeout() throws SQLException { - if (isClosed()) - throw new SQLException(CONNECTION_IS_CLOSED); - return 0; - } - - @Override - public T unwrap(Class iface) throws SQLException { - try { - return iface.cast(this); - } catch (ClassCastException cce) { - throw new SQLException("Unable to unwrap to " + iface.toString()); - } - } - - @Override - public boolean isWrapperFor(Class iface) throws SQLException { - return iface.isInstance(this); - } - public String getHost() { return host; } @@ -474,10 +68,6 @@ public class RestfulConnection implements Connection { return port; } - public Properties getProps() { - return props; - } - public String getDatabase() { return database; } @@ -485,4 +75,4 @@ public class RestfulConnection implements Connection { public String getUrl() { return url; } -} +} \ No newline at end of file diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulDatabaseMetaData.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulDatabaseMetaData.java index 0ef64e4a9ee9e58f9e1010889b030a6c0be13304..d108f46a796bbae350f942a17f90efa29cc3021e 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulDatabaseMetaData.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulDatabaseMetaData.java @@ -1,10 +1,12 @@ package com.taosdata.jdbc.rs; -import com.taosdata.jdbc.*; +import com.taosdata.jdbc.AbstractDatabaseMetaData; +import com.taosdata.jdbc.TSDBError; +import com.taosdata.jdbc.TSDBErrorNumbers; -import java.sql.*; -import java.util.ArrayList; -import java.util.List; +import java.sql.Connection; +import java.sql.ResultSet; +import java.sql.SQLException; public class RestfulDatabaseMetaData extends AbstractDatabaseMetaData { @@ -33,11 +35,10 @@ public class RestfulDatabaseMetaData extends AbstractDatabaseMetaData { return RestfulDriver.class.getName(); } - @Override public ResultSet getTables(String catalog, String schemaPattern, String tableNamePattern, String[] types) throws SQLException { if (connection == null || connection.isClosed()) { - throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_CONNECTION_NULL)); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); } return super.getTables(catalog, schemaPattern, tableNamePattern, types, connection); } @@ -45,14 +46,14 @@ public class RestfulDatabaseMetaData extends AbstractDatabaseMetaData { @Override public ResultSet getCatalogs() throws SQLException { if (connection == null || connection.isClosed()) - throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_CONNECTION_NULL)); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); return super.getCatalogs(connection); } @Override public ResultSet getTableTypes() throws SQLException { if (connection == null || connection.isClosed()) { - throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_CONNECTION_NULL)); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); } return super.getTableTypes(); } @@ -60,21 +61,26 @@ public class RestfulDatabaseMetaData extends AbstractDatabaseMetaData { @Override public ResultSet getColumns(String catalog, String schemaPattern, String tableNamePattern, String columnNamePattern) throws SQLException { if (connection == null || connection.isClosed()) - throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_CONNECTION_NULL)); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); return super.getColumns(catalog, schemaPattern, tableNamePattern, columnNamePattern, connection); } @Override public ResultSet getPrimaryKeys(String catalog, String schema, String table) throws SQLException { if (connection == null || connection.isClosed()) - throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_CONNECTION_NULL)); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); return super.getPrimaryKeys(catalog, schema, table, connection); } + @Override + public Connection getConnection() throws SQLException { + return this.connection; + } + @Override public ResultSet getSuperTables(String catalog, String schemaPattern, String tableNamePattern) throws SQLException { if (connection == null || connection.isClosed()) - throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_CONNECTION_NULL)); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); return super.getSuperTables(catalog, schemaPattern, tableNamePattern, connection); } diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulDriver.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulDriver.java index fca8847114529280bf70d4bf8e56508a572ef7e4..a8a92e412303b7e5ef871b86a9788c1fa774de0e 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulDriver.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulDriver.java @@ -2,7 +2,7 @@ package com.taosdata.jdbc.rs; import com.alibaba.fastjson.JSON; import com.alibaba.fastjson.JSONObject; -import com.taosdata.jdbc.AbstractTaosDriver; +import com.taosdata.jdbc.AbstractDriver; import com.taosdata.jdbc.TSDBConstants; import com.taosdata.jdbc.TSDBDriver; import com.taosdata.jdbc.rs.util.HttpClientPoolUtil; @@ -11,7 +11,7 @@ import java.sql.*; import java.util.Properties; import java.util.logging.Logger; -public class RestfulDriver extends AbstractTaosDriver { +public class RestfulDriver extends AbstractDriver { private static final String URL_PREFIX = "jdbc:TAOS-RS://"; diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulResultSet.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulResultSet.java index 38d3f2b6aa21427647d108038fdd616927221520..ebeeded5b09bd3e92d8de931bf15a5259c71eec3 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulResultSet.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulResultSet.java @@ -2,19 +2,16 @@ package com.taosdata.jdbc.rs; import com.alibaba.fastjson.JSONArray; import com.alibaba.fastjson.JSONObject; +import com.taosdata.jdbc.AbstractResultSet; import com.taosdata.jdbc.TSDBConstants; +import com.taosdata.jdbc.TSDBError; +import com.taosdata.jdbc.TSDBErrorNumbers; -import java.io.InputStream; -import java.io.Reader; -import java.math.BigDecimal; -import java.net.URL; import java.sql.*; import java.util.ArrayList; -import java.util.Calendar; import java.util.List; -import java.util.Map; -public class RestfulResultSet implements ResultSet { +public class RestfulResultSet extends AbstractResultSet implements ResultSet { private volatile boolean isClosed; private int pos = -1; @@ -144,7 +141,6 @@ public class RestfulResultSet implements ResultSet { return resultSet.get(pos).get(columnIndex).toString(); } - @Override public boolean getBoolean(int columnIndex) throws SQLException { if (isClosed()) @@ -155,14 +151,6 @@ public class RestfulResultSet implements ResultSet { return result == 0 ? false : true; } - @Override - public byte getByte(int columnIndex) throws SQLException { - if (isClosed()) - throw new SQLException(TSDBConstants.WrapErrMsg(TSDBConstants.RESULT_SET_IS_CLOSED)); - - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - @Override public short getShort(int columnIndex) throws SQLException { if (isClosed()) @@ -217,40 +205,6 @@ public class RestfulResultSet implements ResultSet { return columnIndex - 1; } - /*******************************************************************************************************************/ - - @Override - public BigDecimal getBigDecimal(int columnIndex, int scale) throws SQLException { - if (isClosed()) - throw new SQLException(TSDBConstants.WrapErrMsg(TSDBConstants.RESULT_SET_IS_CLOSED)); - - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public byte[] getBytes(int columnIndex) throws SQLException { - if (isClosed()) - throw new SQLException(TSDBConstants.WrapErrMsg(TSDBConstants.RESULT_SET_IS_CLOSED)); - - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public Date getDate(int columnIndex) throws SQLException { - if (isClosed()) - throw new SQLException(TSDBConstants.WrapErrMsg(TSDBConstants.RESULT_SET_IS_CLOSED)); - - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public Time getTime(int columnIndex) throws SQLException { - if (isClosed()) - throw new SQLException(TSDBConstants.WrapErrMsg(TSDBConstants.RESULT_SET_IS_CLOSED)); - - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - @Override public Timestamp getTimestamp(int columnIndex) throws SQLException { if (isClosed()) @@ -262,136 +216,7 @@ public class RestfulResultSet implements ResultSet { return Timestamp.valueOf(strDate); } - @Override - public InputStream getAsciiStream(int columnIndex) throws SQLException { - if (isClosed()) - throw new SQLException(TSDBConstants.WrapErrMsg(TSDBConstants.RESULT_SET_IS_CLOSED)); - - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public InputStream getUnicodeStream(int columnIndex) throws SQLException { - if (isClosed()) - throw new SQLException(TSDBConstants.WrapErrMsg(TSDBConstants.RESULT_SET_IS_CLOSED)); - - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public InputStream getBinaryStream(int columnIndex) throws SQLException { - if (isClosed()) - throw new SQLException(TSDBConstants.WrapErrMsg(TSDBConstants.RESULT_SET_IS_CLOSED)); - - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - /*************************************************************************************************************/ - - @Override - public String getString(String columnLabel) throws SQLException { - return getString(findColumn(columnLabel)); - } - - @Override - public boolean getBoolean(String columnLabel) throws SQLException { - return getBoolean(findColumn(columnLabel)); - } - - @Override - public byte getByte(String columnLabel) throws SQLException { - return getByte(findColumn(columnLabel)); - } - - @Override - public short getShort(String columnLabel) throws SQLException { - return getShort(findColumn(columnLabel)); - } - - @Override - public int getInt(String columnLabel) throws SQLException { - return getInt(findColumn(columnLabel)); - } - - @Override - public long getLong(String columnLabel) throws SQLException { - return getLong(findColumn(columnLabel)); - } - - @Override - public float getFloat(String columnLabel) throws SQLException { - return getFloat(findColumn(columnLabel)); - } - - @Override - public double getDouble(String columnLabel) throws SQLException { - return getDouble(findColumn(columnLabel)); - } - - @Override - public BigDecimal getBigDecimal(String columnLabel, int scale) throws SQLException { - return getBigDecimal(findColumn(columnLabel)); - } - - @Override - public byte[] getBytes(String columnLabel) throws SQLException { - return getBytes(findColumn(columnLabel)); - } - - @Override - public Date getDate(String columnLabel) throws SQLException { - return getDate(findColumn(columnLabel)); - } - - @Override - public Time getTime(String columnLabel) throws SQLException { - return getTime(findColumn(columnLabel)); - } - - @Override - public Timestamp getTimestamp(String columnLabel) throws SQLException { - return getTimestamp(findColumn(columnLabel)); - } - - @Override - public InputStream getAsciiStream(String columnLabel) throws SQLException { - return getAsciiStream(findColumn(columnLabel)); - } - - @Override - public InputStream getUnicodeStream(String columnLabel) throws SQLException { - return getUnicodeStream(findColumn(columnLabel)); - } - - @Override - public InputStream getBinaryStream(String columnLabel) throws SQLException { - return getBinaryStream(findColumn(columnLabel)); - } - /*************************************************************************************************************/ - - @Override - public SQLWarning getWarnings() throws SQLException { - if (isClosed()) - throw new SQLException(TSDBConstants.WrapErrMsg(TSDBConstants.RESULT_SET_IS_CLOSED)); - return null; - } - - @Override - public void clearWarnings() throws SQLException { - if (isClosed()) - throw new SQLException(TSDBConstants.WrapErrMsg(TSDBConstants.RESULT_SET_IS_CLOSED)); - return; - } - - @Override - public String getCursorName() throws SQLException { - if (isClosed()) - throw new SQLException(TSDBConstants.WrapErrMsg(TSDBConstants.RESULT_SET_IS_CLOSED)); - - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - @Override public ResultSetMetaData getMetaData() throws SQLException { if (isClosed()) @@ -400,14 +225,6 @@ public class RestfulResultSet implements ResultSet { return this.metaData; } - @Override - public Object getObject(int columnIndex) throws SQLException { - if (isClosed()) - throw new SQLException(TSDBConstants.WrapErrMsg(TSDBConstants.RESULT_SET_IS_CLOSED)); - - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - @Override public Object getObject(String columnLabel) throws SQLException { return getObject(findColumn(columnLabel)); @@ -424,38 +241,6 @@ public class RestfulResultSet implements ResultSet { return columnIndex + 1; } - @Override - public Reader getCharacterStream(int columnIndex) throws SQLException { - if (isClosed()) - throw new SQLException(TSDBConstants.WrapErrMsg(TSDBConstants.RESULT_SET_IS_CLOSED)); - - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public Reader getCharacterStream(String columnLabel) throws SQLException { - if (isClosed()) - throw new SQLException(TSDBConstants.WrapErrMsg(TSDBConstants.RESULT_SET_IS_CLOSED)); - - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public BigDecimal getBigDecimal(int columnIndex) throws SQLException { - if (isClosed()) - throw new SQLException(TSDBConstants.WrapErrMsg(TSDBConstants.RESULT_SET_IS_CLOSED)); - - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public BigDecimal getBigDecimal(String columnLabel) throws SQLException { - if (isClosed()) - throw new SQLException(TSDBConstants.WrapErrMsg(TSDBConstants.RESULT_SET_IS_CLOSED)); - - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - @Override public boolean isBeforeFirst() throws SQLException { if (isClosed()) @@ -475,7 +260,6 @@ public class RestfulResultSet implements ResultSet { public boolean isFirst() throws SQLException { if (isClosed()) throw new SQLException(TSDBConstants.WrapErrMsg(TSDBConstants.RESULT_SET_IS_CLOSED)); - return this.pos == 0; } @@ -509,7 +293,6 @@ public class RestfulResultSet implements ResultSet { this.pos = this.resultSet.size(); } } - } @Override @@ -554,7 +337,7 @@ public class RestfulResultSet implements ResultSet { @Override public boolean absolute(int row) throws SQLException { if (isClosed()) - throw new SQLException(TSDBConstants.WrapErrMsg(TSDBConstants.RESULT_SET_IS_CLOSED)); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESULTSET_CLOSED); // if (this.resultSet.size() == 0) // return false; @@ -586,721 +369,43 @@ public class RestfulResultSet implements ResultSet { // } // } - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); } @Override public boolean relative(int rows) throws SQLException { if (isClosed()) - throw new SQLException(TSDBConstants.WrapErrMsg(TSDBConstants.RESULT_SET_IS_CLOSED)); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESULTSET_CLOSED); - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); } @Override public boolean previous() throws SQLException { if (isClosed()) - throw new SQLException(TSDBConstants.WrapErrMsg(TSDBConstants.RESULT_SET_IS_CLOSED)); - - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void setFetchDirection(int direction) throws SQLException { - if (isClosed()) - throw new SQLException(TSDBConstants.WrapErrMsg(TSDBConstants.RESULT_SET_IS_CLOSED)); - if ((direction != ResultSet.FETCH_FORWARD) && (direction != ResultSet.FETCH_REVERSE) && (direction != ResultSet.FETCH_UNKNOWN)) - throw new SQLException(TSDBConstants.INVALID_VARIABLES); - - if (!(getType() == ResultSet.TYPE_FORWARD_ONLY && direction == ResultSet.FETCH_FORWARD)) - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public int getFetchDirection() throws SQLException { - if (isClosed()) - throw new SQLException(TSDBConstants.WrapErrMsg(TSDBConstants.RESULT_SET_IS_CLOSED)); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESULTSET_CLOSED); - return ResultSet.FETCH_FORWARD; + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); } @Override - public void setFetchSize(int rows) throws SQLException { - if (isClosed()) - throw new SQLException(TSDBConstants.WrapErrMsg(TSDBConstants.RESULT_SET_IS_CLOSED)); - if (rows < 0) - throw new SQLException(TSDBConstants.INVALID_VARIABLES); - - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + public String getNString(int columnIndex) throws SQLException { + return getString(columnIndex); } @Override - public int getFetchSize() throws SQLException { + public Statement getStatement() throws SQLException { if (isClosed()) throw new SQLException(TSDBConstants.WrapErrMsg(TSDBConstants.RESULT_SET_IS_CLOSED)); - return this.resultSet.size(); - } - - @Override - public int getType() throws SQLException { - return ResultSet.TYPE_FORWARD_ONLY; - } - - @Override - public int getConcurrency() throws SQLException { - return ResultSet.CONCUR_READ_ONLY; - } - - @Override - public boolean rowUpdated() throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public boolean rowInserted() throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public boolean rowDeleted() throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateNull(int columnIndex) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateBoolean(int columnIndex, boolean x) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateByte(int columnIndex, byte x) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateShort(int columnIndex, short x) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateInt(int columnIndex, int x) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateLong(int columnIndex, long x) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateFloat(int columnIndex, float x) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateDouble(int columnIndex, double x) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateBigDecimal(int columnIndex, BigDecimal x) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateString(int columnIndex, String x) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateBytes(int columnIndex, byte[] x) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateDate(int columnIndex, Date x) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateTime(int columnIndex, Time x) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateTimestamp(int columnIndex, Timestamp x) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateAsciiStream(int columnIndex, InputStream x, int length) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateBinaryStream(int columnIndex, InputStream x, int length) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateCharacterStream(int columnIndex, Reader x, int length) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateObject(int columnIndex, Object x, int scaleOrLength) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateObject(int columnIndex, Object x) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateNull(String columnLabel) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateBoolean(String columnLabel, boolean x) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + return this.statement; } @Override - public void updateByte(String columnLabel, byte x) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + public boolean isClosed() throws SQLException { + return isClosed; } - @Override - public void updateShort(String columnLabel, short x) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - @Override - public void updateInt(String columnLabel, int x) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - @Override - public void updateLong(String columnLabel, long x) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateFloat(String columnLabel, float x) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateDouble(String columnLabel, double x) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateBigDecimal(String columnLabel, BigDecimal x) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateString(String columnLabel, String x) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateBytes(String columnLabel, byte[] x) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateDate(String columnLabel, Date x) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateTime(String columnLabel, Time x) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateTimestamp(String columnLabel, Timestamp x) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateAsciiStream(String columnLabel, InputStream x, int length) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateBinaryStream(String columnLabel, InputStream x, int length) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateCharacterStream(String columnLabel, Reader reader, int length) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateObject(String columnLabel, Object x, int scaleOrLength) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateObject(String columnLabel, Object x) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void insertRow() throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateRow() throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void deleteRow() throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void refreshRow() throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void cancelRowUpdates() throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void moveToInsertRow() throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void moveToCurrentRow() throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public Statement getStatement() throws SQLException { - if (isClosed()) - throw new SQLException(TSDBConstants.WrapErrMsg(TSDBConstants.RESULT_SET_IS_CLOSED)); - - return this.statement; - } - - @Override - public Object getObject(int columnIndex, Map> map) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public Ref getRef(int columnIndex) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public Blob getBlob(int columnIndex) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public Clob getClob(int columnIndex) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public Array getArray(int columnIndex) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - /******************************************************************************************************************/ - @Override - public Object getObject(String columnLabel, Map> map) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public Ref getRef(String columnLabel) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public Blob getBlob(String columnLabel) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public Clob getClob(String columnLabel) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public Array getArray(String columnLabel) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public Date getDate(int columnIndex, Calendar cal) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public Date getDate(String columnLabel, Calendar cal) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public Time getTime(int columnIndex, Calendar cal) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public Time getTime(String columnLabel, Calendar cal) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public Timestamp getTimestamp(int columnIndex, Calendar cal) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public Timestamp getTimestamp(String columnLabel, Calendar cal) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public URL getURL(int columnIndex) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public URL getURL(String columnLabel) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateRef(int columnIndex, Ref x) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateRef(String columnLabel, Ref x) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateBlob(int columnIndex, Blob x) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateBlob(String columnLabel, Blob x) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateClob(int columnIndex, Clob x) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateClob(String columnLabel, Clob x) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateArray(int columnIndex, Array x) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateArray(String columnLabel, Array x) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public RowId getRowId(int columnIndex) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public RowId getRowId(String columnLabel) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateRowId(int columnIndex, RowId x) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateRowId(String columnLabel, RowId x) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public int getHoldability() throws SQLException { - if (isClosed()) - throw new SQLException(TSDBConstants.WrapErrMsg(TSDBConstants.RESULT_SET_IS_CLOSED)); - - return ResultSet.HOLD_CURSORS_OVER_COMMIT; - } - - @Override - public boolean isClosed() throws SQLException { - return isClosed; - } - - @Override - public void updateNString(int columnIndex, String nString) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateNString(String columnLabel, String nString) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateNClob(int columnIndex, NClob nClob) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateNClob(String columnLabel, NClob nClob) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public NClob getNClob(int columnIndex) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public NClob getNClob(String columnLabel) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public SQLXML getSQLXML(int columnIndex) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public SQLXML getSQLXML(String columnLabel) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateSQLXML(int columnIndex, SQLXML xmlObject) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateSQLXML(String columnLabel, SQLXML xmlObject) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public String getNString(int columnIndex) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public String getNString(String columnLabel) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public Reader getNCharacterStream(int columnIndex) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public Reader getNCharacterStream(String columnLabel) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateNCharacterStream(int columnIndex, Reader x, long length) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateNCharacterStream(String columnLabel, Reader reader, long length) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateAsciiStream(int columnIndex, InputStream x, long length) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateBinaryStream(int columnIndex, InputStream x, long length) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateCharacterStream(int columnIndex, Reader x, long length) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateAsciiStream(String columnLabel, InputStream x, long length) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateBinaryStream(String columnLabel, InputStream x, long length) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateCharacterStream(String columnLabel, Reader reader, long length) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateBlob(int columnIndex, InputStream inputStream, long length) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateBlob(String columnLabel, InputStream inputStream, long length) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateClob(int columnIndex, Reader reader, long length) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateClob(String columnLabel, Reader reader, long length) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateNClob(int columnIndex, Reader reader, long length) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateNClob(String columnLabel, Reader reader, long length) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateNCharacterStream(int columnIndex, Reader x) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateNCharacterStream(String columnLabel, Reader reader) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateAsciiStream(int columnIndex, InputStream x) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateBinaryStream(int columnIndex, InputStream x) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateCharacterStream(int columnIndex, Reader x) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateAsciiStream(String columnLabel, InputStream x) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateBinaryStream(String columnLabel, InputStream x) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateCharacterStream(String columnLabel, Reader reader) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateBlob(int columnIndex, InputStream inputStream) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateBlob(String columnLabel, InputStream inputStream) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateClob(int columnIndex, Reader reader) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateClob(String columnLabel, Reader reader) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateNClob(int columnIndex, Reader reader) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public void updateNClob(String columnLabel, Reader reader) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public T getObject(int columnIndex, Class type) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public T getObject(String columnLabel, Class type) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public T unwrap(Class iface) throws SQLException { - if (isClosed()) - throw new SQLException(TSDBConstants.WrapErrMsg(TSDBConstants.RESULT_SET_IS_CLOSED)); - - try { - return iface.cast(this); - } catch (ClassCastException cce) { - throw new SQLException("Unable to unwrap to " + iface.toString()); - } - } - - @Override - public boolean isWrapperFor(Class iface) throws SQLException { - if (isClosed()) - throw new SQLException(TSDBConstants.WrapErrMsg(TSDBConstants.RESULT_SET_IS_CLOSED)); - - return iface.isInstance(this); - } } diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulStatement.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulStatement.java index 14ea3180924a8b23055ff610a8f7db5a70522039..f10d914859ad19c12e31eb5892d26fcfc592058e 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulStatement.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulStatement.java @@ -2,7 +2,10 @@ package com.taosdata.jdbc.rs; import com.alibaba.fastjson.JSON; import com.alibaba.fastjson.JSONObject; +import com.taosdata.jdbc.AbstractStatement; import com.taosdata.jdbc.TSDBConstants; +import com.taosdata.jdbc.TSDBError; +import com.taosdata.jdbc.TSDBErrorNumbers; import com.taosdata.jdbc.rs.util.HttpClientPoolUtil; import com.taosdata.jdbc.utils.SqlSyntaxValidator; @@ -12,7 +15,7 @@ import java.util.Arrays; import java.util.List; import java.util.stream.Collectors; -public class RestfulStatement implements Statement { +public class RestfulStatement extends AbstractStatement { private boolean closed; private String database; @@ -20,7 +23,6 @@ public class RestfulStatement implements Statement { private volatile RestfulResultSet resultSet; private volatile int affectedRows; - private volatile boolean closeOnCompletion; public RestfulStatement(RestfulConnection conn, String database) { this.conn = conn; @@ -63,9 +65,9 @@ public class RestfulStatement implements Statement { @Override public ResultSet executeQuery(String sql) throws SQLException { if (isClosed()) - throw new SQLException("statement already closed"); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); if (!SqlSyntaxValidator.isValidForExecuteQuery(sql)) - throw new SQLException("not a valid sql for executeQuery: " + sql); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_INVALID_FOR_EXECUTE_QUERY, "not a valid sql for executeQuery: " + sql); final String url = "http://" + conn.getHost() + ":" + conn.getPort() + "/rest/sql"; if (SqlSyntaxValidator.isDatabaseUnspecifiedQuery(sql)) { @@ -73,7 +75,7 @@ public class RestfulStatement implements Statement { } if (this.database == null || this.database.isEmpty()) - throw new SQLException("Database not specified or available"); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_DATABASE_NOT_SPECIFIED_OR_AVAILABLE); HttpClientPoolUtil.execute(url, "use " + this.database); return executeOneQuery(url, sql); } @@ -81,9 +83,9 @@ public class RestfulStatement implements Statement { @Override public int executeUpdate(String sql) throws SQLException { if (isClosed()) - throw new SQLException("statement already closed"); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); if (!SqlSyntaxValidator.isValidForExecuteUpdate(sql)) - throw new SQLException("not a valid sql for executeUpdate: " + sql); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_INVALID_FOR_EXECUTE_UPDATE, "not a valid sql for executeUpdate: " + sql); final String url = "http://" + conn.getHost() + ":" + conn.getPort() + "/rest/sql"; if (SqlSyntaxValidator.isDatabaseUnspecifiedUpdate(sql)) { @@ -91,7 +93,8 @@ public class RestfulStatement implements Statement { } if (this.database == null || this.database.isEmpty()) - throw new SQLException("Database not specified or available"); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_DATABASE_NOT_SPECIFIED_OR_AVAILABLE); + HttpClientPoolUtil.execute(url, "use " + this.database); return executeOneUpdate(url, sql); } @@ -104,91 +107,12 @@ public class RestfulStatement implements Statement { } } - @Override - public int getMaxFieldSize() throws SQLException { - if (isClosed()) - throw new SQLException(TSDBConstants.STATEMENT_CLOSED); - return TSDBConstants.maxFieldSize; - } - - @Override - public void setMaxFieldSize(int max) throws SQLException { - if (isClosed()) - throw new SQLException(TSDBConstants.STATEMENT_CLOSED); - if (max < 0) - throw new SQLException(TSDBConstants.INVALID_VARIABLES); - // nothing to do - } - - @Override - public int getMaxRows() throws SQLException { - if (isClosed()) - throw new SQLException(TSDBConstants.STATEMENT_CLOSED); - return 0; - } - - @Override - public void setMaxRows(int max) throws SQLException { - if (isClosed()) - throw new SQLException(TSDBConstants.STATEMENT_CLOSED); - if (max < 0) - throw new SQLException(TSDBConstants.INVALID_VARIABLES); - // nothing to do - } - - @Override - public void setEscapeProcessing(boolean enable) throws SQLException { - if (isClosed()) - throw new SQLException(TSDBConstants.STATEMENT_CLOSED); - } - - @Override - public int getQueryTimeout() throws SQLException { - if (isClosed()) - throw new SQLException(TSDBConstants.STATEMENT_CLOSED); - return 0; - } - - @Override - public void setQueryTimeout(int seconds) throws SQLException { - if (isClosed()) - throw new SQLException(TSDBConstants.STATEMENT_CLOSED); - if (seconds < 0) - throw new SQLException(TSDBConstants.INVALID_VARIABLES); - } - - @Override - public void cancel() throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public SQLWarning getWarnings() throws SQLException { - if (isClosed()) - throw new SQLException(TSDBConstants.STATEMENT_CLOSED); - return null; - } - - @Override - public void clearWarnings() throws SQLException { - // nothing to do - if (isClosed()) - throw new SQLException(TSDBConstants.STATEMENT_CLOSED); - } - - @Override - public void setCursorName(String name) throws SQLException { - if (isClosed()) - throw new SQLException(TSDBConstants.STATEMENT_CLOSED); - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - @Override public boolean execute(String sql) throws SQLException { if (isClosed()) - throw new SQLException("Invalid method call on a closed statement."); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); if (!SqlSyntaxValidator.isValidForExecute(sql)) - throw new SQLException("not a valid sql for execute: " + sql); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_INVALID_FOR_EXECUTE, "not a valid sql for execute: " + sql); //如果执行了use操作应该将当前Statement的catalog设置为新的database final String url = "http://" + conn.getHost() + ":" + conn.getPort() + "/rest/sql"; @@ -213,7 +137,7 @@ public class RestfulStatement implements Statement { private ResultSet executeOneQuery(String url, String sql) throws SQLException { if (!SqlSyntaxValidator.isValidForExecuteQuery(sql)) - throw new SQLException("not a select sql for executeQuery: " + sql); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_INVALID_FOR_EXECUTE_QUERY, "not a valid sql for executeQuery: " + sql); // row data String result = HttpClientPoolUtil.execute(url, sql); @@ -244,7 +168,7 @@ public class RestfulStatement implements Statement { private int executeOneUpdate(String url, String sql) throws SQLException { if (!SqlSyntaxValidator.isValidForExecuteUpdate(sql)) - throw new SQLException("not a valid sql for executeUpdate: " + sql); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_INVALID_FOR_EXECUTE_UPDATE, "not a valid sql for executeUpdate: " + sql); String result = HttpClientPoolUtil.execute(url, sql); JSONObject jsonObject = JSON.parseObject(result); @@ -265,63 +189,16 @@ public class RestfulStatement implements Statement { @Override public int getUpdateCount() throws SQLException { - if (isClosed()) { - throw new SQLException("Invalid method call on a closed statement."); - } - return this.affectedRows; - } - - @Override - public boolean getMoreResults() throws SQLException { - return getMoreResults(CLOSE_CURRENT_RESULT); - } - - @Override - public void setFetchDirection(int direction) throws SQLException { - if (direction != ResultSet.FETCH_FORWARD && direction != ResultSet.FETCH_REVERSE && direction != ResultSet.FETCH_UNKNOWN) - throw new SQLException(TSDBConstants.INVALID_VARIABLES); - this.resultSet.setFetchDirection(direction); - } - - @Override - public int getFetchDirection() throws SQLException { - return this.resultSet.getFetchDirection(); - } - - @Override - public void setFetchSize(int rows) throws SQLException { - if (isClosed()) - throw new SQLException(TSDBConstants.STATEMENT_CLOSED); - if (rows < 0) - throw new SQLException(TSDBConstants.INVALID_VARIABLES); - //nothing to do - } - - @Override - public int getFetchSize() throws SQLException { if (isClosed()) - throw new SQLException(TSDBConstants.STATEMENT_CLOSED); - return 0; - } - - @Override - public int getResultSetConcurrency() throws SQLException { - if (isClosed()) - throw new SQLException(TSDBConstants.STATEMENT_CLOSED); - return this.resultSet.getConcurrency(); - } + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); - @Override - public int getResultSetType() throws SQLException { - if (isClosed()) - throw new SQLException(TSDBConstants.STATEMENT_CLOSED); - return this.resultSet.getType(); + return this.affectedRows; } @Override public void addBatch(String sql) throws SQLException { if (isClosed()) - throw new SQLException(TSDBConstants.STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); //TODO: } @@ -339,119 +216,14 @@ public class RestfulStatement implements Statement { @Override public Connection getConnection() throws SQLException { if (isClosed()) - throw new SQLException(TSDBConstants.STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); return this.conn; } - @Override - public boolean getMoreResults(int current) throws SQLException { - if (isClosed()) - throw new SQLException(TSDBConstants.STATEMENT_CLOSED); - if (resultSet == null) - return false; - -// switch (current) { -// case CLOSE_CURRENT_RESULT: -// resultSet.close(); -// break; -// case KEEP_CURRENT_RESULT: -// break; -// case CLOSE_ALL_RESULTS: -// resultSet.close(); -// break; -// default: -// throw new SQLException(TSDBConstants.INVALID_VARIABLES); -// } -// return next; - return false; - } - - @Override - public ResultSet getGeneratedKeys() throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public int executeUpdate(String sql, int autoGeneratedKeys) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public int executeUpdate(String sql, int[] columnIndexes) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public int executeUpdate(String sql, String[] columnNames) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public boolean execute(String sql, int autoGeneratedKeys) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public boolean execute(String sql, int[] columnIndexes) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public boolean execute(String sql, String[] columnNames) throws SQLException { - throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - @Override - public int getResultSetHoldability() throws SQLException { - if (isClosed()) - throw new SQLException(TSDBConstants.STATEMENT_CLOSED); - return this.resultSet.getHoldability(); - } - @Override public boolean isClosed() throws SQLException { return closed; } - @Override - public void setPoolable(boolean poolable) throws SQLException { - if (isClosed()) - throw new SQLException(TSDBConstants.STATEMENT_CLOSED); - //nothing to do - } - @Override - public boolean isPoolable() throws SQLException { - if (isClosed()) - throw new SQLException(TSDBConstants.STATEMENT_CLOSED); - return false; - } - - @Override - public void closeOnCompletion() throws SQLException { - if (isClosed()) - throw new SQLException(TSDBConstants.STATEMENT_CLOSED); - this.closeOnCompletion = true; - } - - @Override - public boolean isCloseOnCompletion() throws SQLException { - if (isClosed()) - throw new SQLException(TSDBConstants.STATEMENT_CLOSED); - return this.closeOnCompletion; - } - - @Override - public T unwrap(Class iface) throws SQLException { - try { - return iface.cast(this); - } catch (ClassCastException cce) { - throw new SQLException("Unable to unwrap to " + iface.toString()); - } - } - - @Override - public boolean isWrapperFor(Class iface) throws SQLException { - return iface.isInstance(this); - } } diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/ConnectionTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/ConnectionTest.java deleted file mode 100644 index 66d23f2ffafb8d266d0f24651676214b6c48d418..0000000000000000000000000000000000000000 --- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/ConnectionTest.java +++ /dev/null @@ -1,39 +0,0 @@ -package com.taosdata.jdbc; - -import org.junit.Assert; -import org.junit.Test; - -import java.sql.Connection; -import java.sql.DriverManager; -import java.sql.SQLException; -import java.sql.Statement; -import java.util.Properties; - - -public class ConnectionTest { - private Connection connection; - private Statement statement; - private static String host = "127.0.0.1"; - - @Test - public void testConnection() { - Properties properties = new Properties(); - properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8"); - properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8"); - properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8"); - - try { - Class.forName("com.taosdata.jdbc.TSDBDriver"); - connection = DriverManager.getConnection("jdbc:TAOS://" + host + ":0/", properties); - Assert.assertTrue(null != connection); - statement = connection.createStatement(); - Assert.assertTrue(null != statement); - statement.close(); - connection.close(); - } catch (ClassNotFoundException e) { - return; - } catch (SQLException e) { - e.printStackTrace(); - } - } -} diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/ResultSetTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/ResultSetTest.java index 3d80ff066cacfdf71f087942f11e2ee5e86b65ee..fb0053cb4b1be8c6aa72ed9ae6b1d70a073b7cff 100644 --- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/ResultSetTest.java +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/ResultSetTest.java @@ -7,6 +7,7 @@ import org.junit.Test; import javax.sql.rowset.serial.SerialBlob; import javax.sql.rowset.serial.SerialClob; +import java.io.UnsupportedEncodingException; import java.sql.*; import java.util.HashMap; import java.util.Properties; @@ -39,7 +40,6 @@ public class ResultSetTest { } catch (ClassNotFoundException | SQLException e) { return; } - } @Test @@ -54,51 +54,38 @@ public class ResultSetTest { short v6 = 12; boolean v7 = false; String v8 = "TDengine is powerful"; - sql = "insert into " + dbName + "." + tName + " values (" + ts + "," + v1 + "," + v2 + "," + v3 + "," + v4 + ",\"" + v5 + "\"," + v6 + "," + v7 + ",\"" + v8 + "\")"; - try { statement.executeUpdate(sql); assertEquals(1, statement.getUpdateCount()); } catch (SQLException e) { assert false : "insert error " + e.getMessage(); } - try { - statement.executeQuery("select * from " + dbName + "." + tName + " where ts = " + ts); + statement.execute("select * from " + dbName + "." + tName + " where ts = " + ts); resSet = statement.getResultSet(); System.out.println(((TSDBResultSet) resSet).getRowData()); while (resSet.next()) { assertEquals(ts, resSet.getLong(1)); assertEquals(ts, resSet.getLong("ts")); - System.out.println(resSet.getTimestamp(1)); - assertEquals(v1, resSet.getInt(2)); assertEquals(v1, resSet.getInt("k1")); - assertEquals(v2, resSet.getLong(3)); assertEquals(v2, resSet.getLong("k2")); - assertEquals(v3, resSet.getFloat(4), 7); assertEquals(v3, resSet.getFloat("k3"), 7); - assertEquals(v4, resSet.getDouble(5), 13); assertEquals(v4, resSet.getDouble("k4"), 13); - assertEquals(v5, resSet.getString(6)); assertEquals(v5, resSet.getString("k5")); - assertEquals(v6, resSet.getShort(7)); assertEquals(v6, resSet.getShort("k6")); - assertEquals(v7, resSet.getBoolean(8)); assertEquals(v7, resSet.getBoolean("k7")); - assertEquals(v8, resSet.getString(9)); assertEquals(v8, resSet.getString("k8")); - resSet.getBytes(9); resSet.getObject(6); resSet.getObject("k8"); @@ -111,684 +98,145 @@ public class ResultSetTest { } } - @Test - public void testUnsupport() throws SQLException { - statement.executeQuery("show databases"); + @Test(expected = SQLException.class) + public void testUnsupport() throws SQLException, UnsupportedEncodingException { + statement.execute("show databases"); resSet = statement.getResultSet(); Assert.assertNotNull(resSet.unwrap(TSDBResultSet.class)); Assert.assertTrue(resSet.isWrapperFor(TSDBResultSet.class)); - try { - resSet.getAsciiStream(0); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.getUnicodeStream(null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.getBinaryStream(null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.getAsciiStream(""); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.getUnicodeStream(null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.getBinaryStream(null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.getWarnings(); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.clearWarnings(); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.getCursorName(); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.getCharacterStream(null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.getCharacterStream(null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.isBeforeFirst(); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.isAfterLast(); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.isFirst(); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.isLast(); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.beforeFirst(); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.afterLast(); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.first(); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.last(); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.getRow(); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.absolute(1); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.relative(1); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.previous(); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.setFetchDirection(0); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.getFetchDirection(); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.setFetchSize(0); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.getFetchSize(); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.getConcurrency(); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.rowUpdated(); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.rowInserted(); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.rowDeleted(); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.updateNull(null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.updateBoolean(0, true); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.updateByte(0, (byte) 2); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.updateShort(0, (short) 1); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.updateInt(0, 0); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.updateLong(0, 0l); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.updateFloat(0, 3.14f); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.updateDouble(0, 3.1415); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.updateBigDecimal(null, null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.updateString(null, null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.updateBytes(null, null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.updateDate(null, null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.updateTime(null, null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.updateTimestamp(null, null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.updateAsciiStream(null, null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.updateBinaryStream(null, null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.updateCharacterStream(null, null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.updateObject(null, null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.updateObject(null, null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.updateNull(null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.updateBoolean("", false); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.updateByte("", (byte) 1); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.updateShort("", (short) 1); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.updateInt("", 0); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.updateLong("", 0l); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.updateFloat("", 3.14f); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.updateDouble("", 3.1415); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.updateBigDecimal(null, null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.updateString(null, null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.updateBytes(null, null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.updateDate(null, null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.updateTime(null, null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.updateTimestamp(null, null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.updateAsciiStream(null, null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.updateBinaryStream(null, null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.updateCharacterStream(null, null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.updateObject(null, null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.updateObject(null, null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.insertRow(); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.updateRow(); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.deleteRow(); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.refreshRow(); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.cancelRowUpdates(); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.moveToInsertRow(); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.moveToCurrentRow(); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.getStatement(); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.getObject(0, new HashMap<>()); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.getRef(null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.getBlob(null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.getClob(null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.getArray(null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.getObject("", new HashMap<>()); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.getRef(null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.getBlob(null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.getClob(null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.getArray(null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.getDate(null, null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.getDate(null, null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.getTime(null, null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.getTime(null, null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.getTimestamp(null, null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.getTimestamp(null, null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.getURL(null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.getURL(null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.updateRef(null, null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.updateRef(null, null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.updateBlob(0, new SerialBlob("".getBytes("UTF8"))); - } catch (Exception e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.updateBlob("", new SerialBlob("".getBytes("UTF8"))); - } catch (Exception e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.updateClob("", new SerialClob("".toCharArray())); - } catch (Exception e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.updateClob(0, new SerialClob("".toCharArray())); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.updateArray(null, null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.updateArray(null, null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.getRowId(null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.getRowId(null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.updateRowId(null, null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.updateRowId(null, null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.getHoldability(); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.updateNString(null, null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.updateNString(null, null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - - try { - resSet.getNClob(null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.getNClob(null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.getSQLXML(null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.getSQLXML(null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.updateSQLXML(null, null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.updateSQLXML(null, null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.getNCharacterStream(null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.getNCharacterStream(null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.updateNCharacterStream(null, null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.updateNCharacterStream(null, null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.updateAsciiStream(null, null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.updateBinaryStream(null, null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.updateCharacterStream(null, null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.updateAsciiStream(null, null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.updateBinaryStream(null, null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.updateCharacterStream(null, null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - - try { - resSet.updateNCharacterStream(null, null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.updateNCharacterStream(null, null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.updateAsciiStream(null, null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.updateBinaryStream(null, null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.updateCharacterStream(null, null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.updateAsciiStream(null, null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.updateBinaryStream(null, null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } - try { - resSet.updateCharacterStream(null, null); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("this operation is NOT supported currently!")); - } + resSet.getUnicodeStream(null); + resSet.getBinaryStream(null); + resSet.getAsciiStream(""); + resSet.getUnicodeStream(null); + resSet.getBinaryStream(null); + resSet.getWarnings(); + resSet.clearWarnings(); + resSet.getCursorName(); + resSet.getCharacterStream(null); + resSet.getCharacterStream(null); + resSet.isBeforeFirst(); + resSet.isAfterLast(); + resSet.isFirst(); + resSet.isLast(); + resSet.beforeFirst(); + resSet.afterLast(); + resSet.first(); + resSet.last(); + resSet.getRow(); + resSet.absolute(1); + resSet.relative(1); + resSet.previous(); + resSet.setFetchDirection(0); + resSet.getFetchDirection(); + resSet.setFetchSize(0); + resSet.getFetchSize(); + resSet.getConcurrency(); + resSet.rowUpdated(); + resSet.rowInserted(); + resSet.rowDeleted(); + resSet.updateNull(null); + resSet.updateBoolean(0, true); + resSet.updateByte(0, (byte) 2); + resSet.updateShort(0, (short) 1); + resSet.updateInt(0, 0); + resSet.updateLong(0, 0l); + resSet.updateFloat(0, 3.14f); + resSet.updateDouble(0, 3.1415); + resSet.updateBigDecimal(null, null); + resSet.updateString(null, null); + resSet.updateBytes(null, null); + resSet.updateDate(null, null); + resSet.updateTime(null, null); + resSet.updateTimestamp(null, null); + resSet.updateAsciiStream(null, null); + resSet.updateBinaryStream(null, null); + resSet.updateCharacterStream(null, null); + resSet.updateObject(null, null); + resSet.updateObject(null, null); + resSet.updateNull(null); + resSet.updateBoolean("", false); + resSet.updateByte("", (byte) 1); + resSet.updateShort("", (short) 1); + resSet.updateInt("", 0); + resSet.updateLong("", 0l); + resSet.updateFloat("", 3.14f); + resSet.updateDouble("", 3.1415); + resSet.updateBigDecimal(null, null); + resSet.updateString(null, null); + resSet.updateBytes(null, null); + resSet.updateDate(null, null); + resSet.updateTime(null, null); + resSet.updateTimestamp(null, null); + resSet.updateAsciiStream(null, null); + resSet.updateBinaryStream(null, null); + resSet.updateCharacterStream(null, null); + resSet.updateObject(null, null); + resSet.updateObject(null, null); + resSet.insertRow(); + resSet.updateRow(); + resSet.deleteRow(); + resSet.refreshRow(); + resSet.cancelRowUpdates(); + resSet.moveToInsertRow(); + resSet.moveToCurrentRow(); + resSet.getStatement(); + resSet.getObject(0, new HashMap<>()); + resSet.getRef(null); + resSet.getBlob(null); + resSet.getClob(null); + resSet.getArray(null); + resSet.getObject("", new HashMap<>()); + resSet.getRef(null); + resSet.getBlob(null); + resSet.getClob(null); + resSet.getArray(null); + resSet.getDate(null, null); + resSet.getDate(null, null); + resSet.getTime(null, null); + resSet.getTime(null, null); + resSet.getTimestamp(null, null); + resSet.getTimestamp(null, null); + resSet.getURL(null); + resSet.getURL(null); + resSet.updateRef(null, null); + resSet.updateRef(null, null); + resSet.updateBlob(0, new SerialBlob("".getBytes("UTF8"))); + resSet.updateBlob("", new SerialBlob("".getBytes("UTF8"))); + resSet.updateClob("", new SerialClob("".toCharArray())); + resSet.updateClob(0, new SerialClob("".toCharArray())); + resSet.updateArray(null, null); + resSet.updateArray(null, null); + resSet.getRowId(null); + resSet.getRowId(null); + resSet.updateRowId(null, null); + resSet.updateRowId(null, null); + resSet.getHoldability(); + resSet.updateNString(null, null); + resSet.updateNString(null, null); + resSet.getNClob(null); + resSet.getNClob(null); + resSet.getSQLXML(null); + resSet.getSQLXML(null); + resSet.updateSQLXML(null, null); + resSet.updateSQLXML(null, null); + resSet.getNCharacterStream(null); + resSet.getNCharacterStream(null); + resSet.updateNCharacterStream(null, null); + resSet.updateNCharacterStream(null, null); + resSet.updateAsciiStream(null, null); + resSet.updateBinaryStream(null, null); + resSet.updateCharacterStream(null, null); + resSet.updateAsciiStream(null, null); + resSet.updateBinaryStream(null, null); + resSet.updateCharacterStream(null, null); + resSet.updateNCharacterStream(null, null); + resSet.updateNCharacterStream(null, null); + resSet.updateAsciiStream(null, null); + resSet.updateBinaryStream(null, null); + resSet.updateCharacterStream(null, null); + resSet.updateAsciiStream(null, null); + resSet.updateBinaryStream(null, null); + resSet.updateCharacterStream(null, null); } @Test @@ -816,5 +264,4 @@ public class ResultSetTest { e.printStackTrace(); } } - } diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/StatementTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/StatementTest.java index b09482fb03f643cd183468b18bf36e50c50bf0b4..73ceafa7299b256d7e83064b53bd638835a4b075 100644 --- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/StatementTest.java +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/StatementTest.java @@ -8,9 +8,6 @@ import org.junit.Test; import java.sql.*; import java.util.Properties; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; - public class StatementTest { static Connection connection = null; static Statement statement = null; @@ -58,12 +55,12 @@ public class StatementTest { statement.executeUpdate("create database if not exists " + dbName); statement.executeUpdate("create table if not exists " + dbName + "." + tName + "(ts timestamp, k1 int)"); statement.executeUpdate("insert into " + dbName + "." + tName + " values (" + ts + ", 1)"); - statement.executeQuery("select * from " + dbName + "." + tName); + statement.execute("select * from " + dbName + "." + tName); ResultSet resultSet = statement.getResultSet(); - assertTrue(null != resultSet); + Assert.assertNotNull(resultSet); boolean isClosed = statement.isClosed(); - assertEquals(false, isClosed); + Assert.assertEquals(false, isClosed); } @Test(expected = SQLException.class) diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/SubscribeTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/SubscribeTest.java index 1d8ff08db6d5f177fe74de579e1b6bb26ee35750..685957d60af694ffb0327fa9acd580fa45eda39d 100644 --- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/SubscribeTest.java +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/SubscribeTest.java @@ -9,13 +9,14 @@ import java.sql.DriverManager; import java.sql.SQLException; import java.sql.Statement; import java.util.Properties; +import java.util.concurrent.TimeUnit; public class SubscribeTest { Connection connection; Statement statement; String dbName = "test"; String tName = "t0"; - String host = "localhost"; + String host = "127.0.0.1"; String topic = "test"; @Before @@ -23,15 +24,15 @@ public class SubscribeTest { try { Class.forName("com.taosdata.jdbc.TSDBDriver"); Properties properties = new Properties(); - properties.setProperty(TSDBDriver.PROPERTY_KEY_HOST, host); properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8"); properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8"); properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8"); connection = DriverManager.getConnection("jdbc:TAOS://" + host + ":0/", properties); statement = connection.createStatement(); - statement.executeUpdate("create database if not exists " + dbName); - statement.executeUpdate("create table if not exists " + dbName + "." + tName + " (ts timestamp, k int, v int)"); + statement.execute("drop database if exists " + dbName); + statement.execute("create database if not exists " + dbName); + statement.execute("create table if not exists " + dbName + "." + tName + " (ts timestamp, k int, v int)"); long ts = System.currentTimeMillis(); for (int i = 0; i < 2; i++) { ts += i; @@ -45,44 +46,40 @@ public class SubscribeTest { } @Test - public void subscribe() throws Exception { - TSDBSubscribe subscribe = null; + public void subscribe() { try { String rawSql = "select * from " + dbName + "." + tName + ";"; System.out.println(rawSql); - subscribe = ((TSDBConnection) connection).subscribe(topic, rawSql, false); + TSDBSubscribe subscribe = ((TSDBConnection) connection).subscribe(topic, rawSql, false); int a = 0; while (true) { - Thread.sleep(900); + TimeUnit.MILLISECONDS.sleep(1000); TSDBResultSet resSet = subscribe.consume(); - while (resSet.next()) { for (int i = 1; i <= resSet.getMetaData().getColumnCount(); i++) { System.out.printf(i + ": " + resSet.getString(i) + "\t"); } System.out.println("\n======" + a + "=========="); } - resSet.close(); a++; if (a >= 2) { break; } +// resSet.close(); } + + subscribe.close(true); } catch (Exception e) { e.printStackTrace(); - } finally { - if (null != subscribe) { - subscribe.close(true); - } } } @After public void close() { try { - statement.executeQuery("drop database " + dbName); + statement.execute("drop database " + dbName); if (statement != null) statement.close(); if (connection != null) diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/TSDBConnectionTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/TSDBConnectionTest.java new file mode 100644 index 0000000000000000000000000000000000000000..0a4ecb739cfd5a0abda01f7788d375ef95e0208a --- /dev/null +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/TSDBConnectionTest.java @@ -0,0 +1,423 @@ +package com.taosdata.jdbc; + +import org.junit.AfterClass; +import org.junit.Assert; +import org.junit.BeforeClass; +import org.junit.Test; + +import javax.management.OperationsException; +import java.sql.*; +import java.util.Properties; + +public class TSDBConnectionTest { + + private static final String host = "127.0.0.1"; + private static Connection conn; + + @Test + public void getConnection() { + // already test in beforeClass method + } + + @Test + public void createStatement() { + try (Statement stmt = conn.createStatement()) { + ResultSet rs = stmt.executeQuery("select server_status()"); + rs.next(); + int status = rs.getInt("server_status()"); + Assert.assertEquals(1, status); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @Test + public void subscribe() { + try { + TSDBConnection unwrap = conn.unwrap(TSDBConnection.class); + TSDBSubscribe subscribe = unwrap.subscribe("topic1", "select * from log.log", false); + TSDBResultSet rs = subscribe.consume(); + ResultSetMetaData metaData = rs.getMetaData(); + for (int count = 0; count < 10 && rs.next(); count++) { + for (int i = 1; i <= metaData.getColumnCount(); i++) { + String value = rs.getString(i); + System.out.print(metaData.getColumnLabel(i) + ":" + value + "\t"); + } + System.out.println(); + } + Assert.assertNotNull(rs); + subscribe.close(false); + } catch (SQLException e) { + e.printStackTrace(); + } + + } + + @Test + public void prepareStatement() throws SQLException { + PreparedStatement pstmt = conn.prepareStatement("select server_status()"); + ResultSet rs = pstmt.executeQuery(); + rs.next(); + int status = rs.getInt("server_status()"); + Assert.assertEquals(1, status); + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void prepareCall() throws SQLException { + conn.prepareCall("select server_status()"); + } + + @Test + public void nativeSQL() throws SQLException { + String nativeSQL = conn.nativeSQL("select * from log.log"); + Assert.assertEquals("select * from log.log", nativeSQL); + } + + @Test + public void setAutoCommit() throws SQLException { + conn.setAutoCommit(true); + conn.setAutoCommit(false); + } + + @Test + public void getAutoCommit() throws SQLException { + Assert.assertTrue(conn.getAutoCommit()); + } + + @Test + public void commit() throws SQLException { + conn.commit(); + } + + @Test + public void rollback() throws SQLException { + conn.rollback(); + } + + @Test + public void close() { + // connection will close in afterClass method + } + + @Test + public void isClosed() throws SQLException { + Assert.assertFalse(conn.isClosed()); + } + + @Test + public void getMetaData() throws SQLException { + DatabaseMetaData meta = conn.getMetaData(); + Assert.assertNotNull(meta); + Assert.assertEquals("com.taosdata.jdbc.TSDBDriver", meta.getDriverName()); + } + + @Test + public void setReadOnly() throws SQLException { + conn.setReadOnly(true); + } + + @Test + public void isReadOnly() throws SQLException { + Assert.assertTrue(conn.isReadOnly()); + } + + @Test + public void setCatalog() throws SQLException { + conn.setCatalog("test"); + Assert.assertEquals("test", conn.getCatalog()); + } + + @Test + public void getCatalog() throws SQLException { + conn.setCatalog("log"); + Assert.assertEquals("log", conn.getCatalog()); + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void setTransactionIsolation() throws SQLException { + conn.setTransactionIsolation(Connection.TRANSACTION_NONE); + Assert.assertEquals(Connection.TRANSACTION_NONE, conn.getTransactionIsolation()); + conn.setTransactionIsolation(Connection.TRANSACTION_READ_UNCOMMITTED); + } + + @Test + public void getTransactionIsolation() throws SQLException { + Assert.assertEquals(Connection.TRANSACTION_NONE, conn.getTransactionIsolation()); + } + + @Test + public void getWarnings() throws SQLException { + Assert.assertNull(conn.getWarnings()); + } + + @Test + public void clearWarnings() throws SQLException { + conn.clearWarnings(); + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void testCreateStatement() throws SQLException { + Statement stmt = conn.createStatement(ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_READ_ONLY); + ResultSet rs = stmt.executeQuery("select server_status()"); + rs.next(); + int status = rs.getInt("server_status()"); + Assert.assertEquals(1, status); + + conn.createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_READ_ONLY); + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void testPrepareStatement() throws SQLException { + PreparedStatement pstmt = conn.prepareStatement("select server_status()", + ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_READ_ONLY); + ResultSet rs = pstmt.executeQuery(); + rs.next(); + int status = rs.getInt("server_status()"); + Assert.assertEquals(1, status); + + conn.prepareStatement("select server_status", ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_READ_ONLY); + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void testPrepareCall() throws SQLException { + conn.prepareCall("", ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_READ_ONLY, ResultSet.HOLD_CURSORS_OVER_COMMIT); + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void getTypeMap() throws SQLException { + conn.getTypeMap(); + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void setTypeMap() throws SQLException { + conn.setTypeMap(null); + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void setHoldability() throws SQLException { + conn.setHoldability(ResultSet.HOLD_CURSORS_OVER_COMMIT); + Assert.assertEquals(ResultSet.HOLD_CURSORS_OVER_COMMIT, conn.getHoldability()); + conn.setHoldability(ResultSet.CLOSE_CURSORS_AT_COMMIT); + } + + @Test + public void getHoldability() throws SQLException { + Assert.assertEquals(ResultSet.HOLD_CURSORS_OVER_COMMIT, conn.getHoldability()); + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void setSavepoint() throws SQLException { + conn.setSavepoint(); + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void testSetSavepoint() throws SQLException { + conn.setSavepoint(null); + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void testRollback() throws SQLException { + conn.rollback(null); + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void releaseSavepoint() throws SQLException { + conn.releaseSavepoint(null); + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void testCreateStatement1() throws SQLException { + Statement stmt = conn.createStatement(ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_READ_ONLY, ResultSet.HOLD_CURSORS_OVER_COMMIT); + ResultSet rs = stmt.executeQuery("select server_status()"); + rs.next(); + int status = rs.getInt("server_status()"); + Assert.assertEquals(1, status); + + conn.createStatement(ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_READ_ONLY, ResultSet.HOLD_CURSORS_OVER_COMMIT); + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void testPrepareStatement1() throws SQLException { + PreparedStatement pstmt = conn.prepareStatement("select server_status()", + ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_READ_ONLY, ResultSet.HOLD_CURSORS_OVER_COMMIT); + ResultSet rs = pstmt.executeQuery(); + rs.next(); + int status = rs.getInt("server_status()"); + Assert.assertEquals(1, status); + + conn.prepareStatement("select server_status", ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_READ_ONLY, ResultSet.HOLD_CURSORS_OVER_COMMIT); + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void testPrepareCall1() throws SQLException { + conn.prepareCall("", ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_READ_ONLY, ResultSet.HOLD_CURSORS_OVER_COMMIT); + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void testPrepareStatement2() throws SQLException { + Assert.assertNotNull("", Statement.NO_GENERATED_KEYS); + conn.prepareStatement("", Statement.RETURN_GENERATED_KEYS); + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void testPrepareStatement3() throws SQLException { + conn.prepareStatement("", new int[]{}); + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void testPrepareStatement4() throws SQLException { + conn.prepareStatement("", new String[]{}); + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void createClob() throws SQLException { + conn.createClob(); + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void createBlob() throws SQLException { + conn.createBlob(); + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void createNClob() throws SQLException { + conn.createNClob(); + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void createSQLXML() throws SQLException { + conn.createSQLXML(); + } + + @Test(expected = SQLException.class) + public void isValid() throws SQLException { + Assert.assertTrue(conn.isValid(10)); + Assert.assertTrue(conn.isValid(0)); + conn.isValid(-1); + } + + @Test + public void setClientInfo() throws SQLClientInfoException { + conn.setClientInfo(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8"); + conn.setClientInfo(TSDBDriver.PROPERTY_KEY_CHARSET, "en_US.UTF-8"); + conn.setClientInfo(TSDBDriver.PROPERTY_KEY_CHARSET, "UTC-8"); + } + + @Test + public void testSetClientInfo() throws SQLClientInfoException { + Properties properties = new Properties(); + properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8"); + properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8"); + properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8"); + conn.setClientInfo(properties); + } + + @Test + public void getClientInfo() throws SQLException { + conn.setClientInfo(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8"); + conn.setClientInfo(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8"); + conn.setClientInfo(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8"); + + Properties info = conn.getClientInfo(); + String charset = info.getProperty(TSDBDriver.PROPERTY_KEY_CHARSET); + Assert.assertEquals("UTF-8", charset); + String locale = info.getProperty(TSDBDriver.PROPERTY_KEY_LOCALE); + Assert.assertEquals("en_US.UTF-8", locale); + String timezone = info.getProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE); + Assert.assertEquals("UTC-8", timezone); + } + + @Test + public void testGetClientInfo() throws SQLException { + conn.setClientInfo(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8"); + conn.setClientInfo(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8"); + conn.setClientInfo(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8"); + + String charset = conn.getClientInfo(TSDBDriver.PROPERTY_KEY_CHARSET); + Assert.assertEquals("UTF-8", charset); + String locale = conn.getClientInfo(TSDBDriver.PROPERTY_KEY_LOCALE); + Assert.assertEquals("en_US.UTF-8", locale); + String timezone = conn.getClientInfo(TSDBDriver.PROPERTY_KEY_TIME_ZONE); + Assert.assertEquals("UTC-8", timezone); + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void createArrayOf() throws SQLException { + conn.createArrayOf("", null); + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void createStruct() throws SQLException { + conn.createStruct("", null); + } + + @Test + public void setSchema() throws SQLException { + conn.setSchema("test"); + } + + @Test + public void getSchema() throws SQLException { + Assert.assertNull(conn.getSchema()); + } + + @Test + public void abort() throws SQLException { + conn.abort(null); + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void setNetworkTimeout() throws SQLException { + conn.setNetworkTimeout(null, 1000); + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void getNetworkTimeout() throws SQLException { + conn.getNetworkTimeout(); + } + + @Test + public void unwrap() { + try { + TSDBConnection tsdbConnection = conn.unwrap(TSDBConnection.class); + Assert.assertNotNull(tsdbConnection); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @Test + public void isWrapperFor() throws SQLException { + Assert.assertTrue(conn.isWrapperFor(TSDBConnection.class)); + } + + @BeforeClass + public static void beforeClass() { + try { + Class.forName("com.taosdata.jdbc.TSDBDriver"); + Properties properties = new Properties(); + properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8"); + properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8"); + properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8"); + conn = DriverManager.getConnection("jdbc:TAOS://" + host + ":6030/log?user=root&password=taosdata", properties); + // create test database for test cases + try (Statement stmt = conn.createStatement()) { + stmt.execute("create database if not exists test"); + } + + } catch (ClassNotFoundException | SQLException e) { + e.printStackTrace(); + } + } + + @AfterClass + public static void afterClass() { + try { + if (conn != null) + conn.close(); + } catch (SQLException e) { + e.printStackTrace(); + } + } + +} \ No newline at end of file diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/TSDBDriverTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/TSDBDriverTest.java index 445723f501a85f7c697afffb59b273bcf6a1a630..671ecd723d6fea8d6b9b8ccf94cba06689ce26b8 100644 --- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/TSDBDriverTest.java +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/TSDBDriverTest.java @@ -24,11 +24,10 @@ public class TSDBDriverTest { "jdbc:TAOS://:/test", "jdbc:TAOS://localhost:0/?user=root&password=taosdata" }; - private Connection conn; @Test - public void testConnectWithJdbcURL() { + public void connectWithJdbcURL() { final String url = "jdbc:TAOS://localhost:6030/log?user=root&password=taosdata"; try { conn = DriverManager.getConnection(url); @@ -40,7 +39,7 @@ public class TSDBDriverTest { } @Test - public void testConnectWithProperties() { + public void connectWithProperties() { final String jdbcUrl = "jdbc:TAOS://localhost:6030/log?user=root&password=taosdata"; Properties connProps = new Properties(); connProps.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8"); @@ -56,7 +55,7 @@ public class TSDBDriverTest { } @Test - public void testConnectWithConfigFile() { + public void connectWithConfigFile() { String jdbcUrl = "jdbc:TAOS://:/log?user=root&password=taosdata"; Properties connProps = new Properties(); connProps.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8"); @@ -71,16 +70,6 @@ public class TSDBDriverTest { } } - @Test(expected = SQLException.class) - public void testAcceptsURL() throws SQLException { - Driver driver = new TSDBDriver(); - for (String url : validURLs) { - assertTrue("failure - acceptsURL(\" " + url + " \") should be true", driver.acceptsURL(url)); - } - driver.acceptsURL(null); - fail("acceptsURL throws exception when parameter is null"); - } - @Test public void testParseURL() { TSDBDriver driver = new TSDBDriver(); @@ -121,8 +110,19 @@ public class TSDBDriverTest { assertNull("failure - dbname should be null", actual.getProperty("dbname")); } + + @Test(expected = SQLException.class) + public void acceptsURL() throws SQLException { + Driver driver = new TSDBDriver(); + for (String url : validURLs) { + assertTrue("failure - acceptsURL(\" " + url + " \") should be true", driver.acceptsURL(url)); + } + driver.acceptsURL(null); + fail("acceptsURL throws exception when parameter is null"); + } + @Test - public void testGetPropertyInfo() throws SQLException { + public void getPropertyInfo() throws SQLException { Driver driver = new TSDBDriver(); final String url = "jdbc:TAOS://localhost:6030/log?user=root&password=taosdata"; Properties connProps = new Properties(); @@ -142,23 +142,23 @@ public class TSDBDriverTest { } @Test - public void testGetMajorVersion() { - assertEquals("failure - getMajorVersion should be 2", 2, new TSDBDriver().getMajorVersion()); + public void getMajorVersion() { + assertEquals(2, new TSDBDriver().getMajorVersion()); } @Test - public void testGetMinorVersion() { - assertEquals("failure - getMinorVersion should be 0", 0, new TSDBDriver().getMinorVersion()); + public void getMinorVersion() { + assertEquals(0, new TSDBDriver().getMinorVersion()); } @Test - public void testJdbcCompliant() { - assertFalse("failure - jdbcCompliant should be false", new TSDBDriver().jdbcCompliant()); + public void jdbcCompliant() { + assertFalse(new TSDBDriver().jdbcCompliant()); } @Test - public void testGetParentLogger() throws SQLFeatureNotSupportedException { - assertNull("failure - getParentLogger should be be null", new TSDBDriver().getParentLogger()); + public void getParentLogger() throws SQLFeatureNotSupportedException { + assertNull(new TSDBDriver().getParentLogger()); } @BeforeClass @@ -169,6 +169,4 @@ public class TSDBDriverTest { e.printStackTrace(); } } - - } \ No newline at end of file diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/TSDBStatementTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/TSDBStatementTest.java new file mode 100644 index 0000000000000000000000000000000000000000..4794fc61f167e6dd5c3fcab4c789358eb87c4554 --- /dev/null +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/TSDBStatementTest.java @@ -0,0 +1,234 @@ +package com.taosdata.jdbc; + +import org.junit.AfterClass; +import org.junit.Assert; +import org.junit.BeforeClass; +import org.junit.Test; + +import java.sql.*; +import java.util.Properties; +import java.util.UUID; + +public class TSDBStatementTest { + private static final String host = "127.0.0.1"; + private static Connection conn; + private static Statement stmt; + + @Test + public void executeQuery() { + try { + ResultSet rs = stmt.executeQuery("show databases"); + Assert.assertNotNull(rs); + ResultSetMetaData meta = rs.getMetaData(); + while (rs.next()) { + for (int i = 1; i <= meta.getColumnCount(); i++) { + System.out.print(meta.getColumnLabel(i) + ": " + rs.getString(i) + "\t"); + } + System.out.println(); + } + rs.close(); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @Test + public void executeUpdate() { + final String dbName = ("test_" + UUID.randomUUID()).replace("-", "_").substring(0, 32); + try { + int affectRows = stmt.executeUpdate("create database " + dbName); + Assert.assertEquals(0, affectRows); + affectRows = stmt.executeUpdate("create table " + dbName + ".weather(ts timestamp, temperature float) tags(loc nchar(64))"); + Assert.assertEquals(0, affectRows); + affectRows = stmt.executeUpdate("insert into " + dbName + ".t1 using " + dbName + ".weather tags('北京') values(now, 22.33)"); + Assert.assertEquals(1, affectRows); + affectRows = stmt.executeUpdate("drop database " + dbName); + Assert.assertEquals(0, affectRows); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @Test + public void close() { + } + + @Test + public void execute() { + final String dbName = ("test_" + UUID.randomUUID()).replace("-", "_").substring(0, 32); + try { + boolean isSelect = stmt.execute("create database " + dbName); + Assert.assertEquals(false, isSelect); + int affectedRows = stmt.getUpdateCount(); + Assert.assertEquals(0, affectedRows); + + isSelect = stmt.execute("create table " + dbName + ".weather(ts timestamp, temperature float) tags(loc nchar(64))"); + Assert.assertEquals(false, isSelect); + affectedRows = stmt.getUpdateCount(); + Assert.assertEquals(0, affectedRows); + + isSelect = stmt.execute("insert into " + dbName + ".t1 using " + dbName + ".weather tags('北京') values(now, 22.33)"); + Assert.assertEquals(false, isSelect); + affectedRows = stmt.getUpdateCount(); + Assert.assertEquals(1, affectedRows); + + isSelect = stmt.execute("select * from " + dbName + ".weather"); + Assert.assertEquals(true, isSelect); + + isSelect = stmt.execute("drop database " + dbName); + Assert.assertEquals(false, isSelect); + affectedRows = stmt.getUpdateCount(); + Assert.assertEquals(0, affectedRows); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @Test + public void getResultSet() { + final String dbName = ("test_" + UUID.randomUUID()).replace("-", "_").substring(0, 32); + try { + boolean isSelect = stmt.execute("create database " + dbName); + Assert.assertEquals(false, isSelect); + int affectedRows = stmt.getUpdateCount(); + Assert.assertEquals(0, affectedRows); + + isSelect = stmt.execute("create table " + dbName + ".weather(ts timestamp, temperature float) tags(loc nchar(64))"); + Assert.assertEquals(false, isSelect); + affectedRows = stmt.getUpdateCount(); + Assert.assertEquals(0, affectedRows); + + isSelect = stmt.execute("insert into " + dbName + ".t1 using " + dbName + ".weather tags('北京') values(now, 22.33)"); + Assert.assertEquals(false, isSelect); + affectedRows = stmt.getUpdateCount(); + Assert.assertEquals(1, affectedRows); + + isSelect = stmt.execute("select * from " + dbName + ".weather"); + Assert.assertEquals(true, isSelect); + ResultSet rs = stmt.getResultSet(); + Assert.assertNotNull(rs); + ResultSetMetaData meta = rs.getMetaData(); + Assert.assertEquals(3, meta.getColumnCount()); + int count = 0; + while (rs.next()) { + for (int i = 1; i <= meta.getColumnCount(); i++) { + System.out.print(meta.getColumnLabel(i) + ": " + rs.getString(i) + "\t"); + } + System.out.println(); + count++; + } + Assert.assertEquals(1, count); + + isSelect = stmt.execute("drop database " + dbName); + Assert.assertEquals(false, isSelect); + affectedRows = stmt.getUpdateCount(); + Assert.assertEquals(0, affectedRows); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @Test + public void getUpdateCount() { + execute(); + } + + @Test + public void addBatch() { + final String dbName = ("test_" + UUID.randomUUID()).replace("-", "_").substring(0, 32); + try { + stmt.addBatch("create database " + dbName); + stmt.addBatch("create table " + dbName + ".weather(ts timestamp, temperature float) tags(loc nchar(64))"); + stmt.addBatch("insert into " + dbName + ".t1 using " + dbName + ".weather tags('北京') values(now, 22.33)"); + stmt.addBatch("select * from " + dbName + ".weather"); + stmt.addBatch("drop database " + dbName); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @Test + public void clearBatch() { + final String dbName = ("test_" + UUID.randomUUID()).replace("-", "_").substring(0, 32); + try { + stmt.clearBatch(); + stmt.addBatch("create database " + dbName); + stmt.addBatch("create table " + dbName + ".weather(ts timestamp, temperature float) tags(loc nchar(64))"); + stmt.addBatch("insert into " + dbName + ".t1 using " + dbName + ".weather tags('北京') values(now, 22.33)"); + stmt.addBatch("select * from " + dbName + ".weather"); + stmt.addBatch("drop database " + dbName); + stmt.clearBatch(); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @Test + public void executeBatch() { + final String dbName = ("test_" + UUID.randomUUID()).replace("-", "_").substring(0, 32); + try { + stmt.addBatch("create database " + dbName); + stmt.addBatch("create table " + dbName + ".weather(ts timestamp, temperature float) tags(loc nchar(64))"); + stmt.addBatch("insert into " + dbName + ".t1 using " + dbName + ".weather tags('北京') values(now, 22.33)"); + stmt.addBatch("select * from " + dbName + ".weather"); + stmt.addBatch("drop database " + dbName); + int[] results = stmt.executeBatch(); + Assert.assertEquals(0, results[0]); + Assert.assertEquals(0, results[1]); + Assert.assertEquals(1, results[2]); + Assert.assertEquals(Statement.SUCCESS_NO_INFO, results[3]); + Assert.assertEquals(0, results[4]); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @Test + public void getConnection() { + try { + Connection connection = stmt.getConnection(); + Assert.assertNotNull(connection); + Assert.assertTrue(this.conn == connection); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @Test + public void isClosed() { + try { + Assert.assertEquals(false, stmt.isClosed()); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @BeforeClass + public static void beforeClass() { + try { + Class.forName("com.taosdata.jdbc.TSDBDriver"); + Properties properties = new Properties(); + properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8"); + properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8"); + properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8"); + conn = DriverManager.getConnection("jdbc:TAOS://" + host + ":6030/?user=root&password=taosdata", properties); + stmt = conn.createStatement(); + } catch (ClassNotFoundException e) { + e.printStackTrace(); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @AfterClass + public static void afterClass() { + try { + if (stmt != null) + stmt.close(); + if (conn != null) + conn.close(); + } catch (SQLException e) { + e.printStackTrace(); + } + } +} diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/ImportTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/ImportTest.java similarity index 98% rename from src/connector/jdbc/src/test/java/com/taosdata/jdbc/ImportTest.java rename to src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/ImportTest.java index 9d1884e6c2b44eb5269bcb5ad40fdfe9dd759f67..b5f5c7b58997c106c4c8282c80fbea5bdcc129a0 100644 --- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/ImportTest.java +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/ImportTest.java @@ -1,5 +1,6 @@ -package com.taosdata.jdbc; +package com.taosdata.jdbc.cases; +import com.taosdata.jdbc.TSDBDriver; import org.junit.*; import org.junit.runners.MethodSorters; diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/InvalidResultSetPointerTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/InvalidResultSetPointerTest.java new file mode 100644 index 0000000000000000000000000000000000000000..f3d79b1df1594edc4fffd626244bc742ea13ec75 --- /dev/null +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/InvalidResultSetPointerTest.java @@ -0,0 +1,187 @@ +package com.taosdata.jdbc.cases; + +import org.junit.BeforeClass; +import org.junit.Test; + +import java.sql.*; +import java.util.Properties; + +public class InvalidResultSetPointerTest { + + private static String host = "127.0.0.1"; + private static final String dbName = "test"; + private static final String stbName = "stb"; + private static final String tbName = "tb"; + private static Connection connection; + private static int numOfSTb = 30000; + private static int numOfTb = 3; + private static int numOfThreads = 100; + + @Test + public void test() throws SQLException { + execute("drop database if exists " + dbName); + execute("create database if not exists " + dbName); + execute("use " + dbName); + createSTable(); + createTable(); + insert(); + selectMultiThreading(); + close(); + } + + private void insert() { + for (int i = 0; i < numOfSTb; i++) { + for (int j = 0; j < numOfTb; j++) { + final String sql = "INSERT INTO " + dbName + "." + tbName + i + "_" + j + " (ts, temperature, humidity, name) values(now, 20.5, 34, \"" + i + "\")"; + System.out.println(sql); + execute(sql); + } + } + } + + private void createSTable() { + for (int i = 0; i < numOfSTb; i++) { + final String sql = "create table if not exists " + dbName + "." + stbName + i + " (ts timestamp, temperature float, humidity int, name BINARY(" + (i % 73 + 10) + ")) TAGS (tag1 INT)"; + execute(sql); + } + } + + private void createTable() { + for (int i = 0; i < numOfSTb; i++) { + for (int j = 0; j < numOfTb; j++) { + final String sql = "create table if not exists " + dbName + "." + tbName + i + "_" + j + " USING " + stbName + i + " TAGS(" + j + ")"; + execute(sql); + } + } + } + + private void close() throws SQLException { + if (connection != null) { + this.connection.close(); + System.out.println("connection closed."); + } + } + + private void selectMultiThreading() { + int a = numOfSTb / numOfThreads; + if (a < 1) { + numOfThreads = numOfSTb; + a = 1; + } + + int b = 0; + if (numOfThreads != 0) { + b = numOfSTb % numOfThreads; + } + + multiThreadingClass instance[] = new multiThreadingClass[numOfThreads]; + + int last = 0; + for (int i = 0; i < numOfThreads; i++) { + instance[i] = new multiThreadingClass(); + instance[i].id = i; + instance[i].from = last; + if (i < b) { + instance[i].to = last + a; + } else { + instance[i].to = last + a - 1; + } + + last = instance[i].to + 1; + instance[i].numOfTb = numOfTb; + instance[i].connection = connection; + instance[i].dbName = dbName; + instance[i].tbName = tbName; + instance[i].start(); + } + + for (int i = 0; i < numOfThreads; i++) { + try { + instance[i].join(); + } catch (InterruptedException ie) { + ie.printStackTrace(); + } + } + } + + @BeforeClass + public static void beforeClass() { + try { + String url = "jdbc:TAOS://" + host + ":6030/?user=root&password=taosdata"; + Class.forName("com.taosdata.jdbc.TSDBDriver"); + Properties properties = new Properties(); + properties.setProperty("charset", "UTF-8"); + properties.setProperty("locale", "en_US.UTF-8"); + properties.setProperty("timezone", "UTC-8"); + System.out.println("get connection starting..."); + connection = DriverManager.getConnection(url, properties); + if (connection != null) + System.out.println("[ OK ] Connection established."); + } catch (ClassNotFoundException | SQLException e) { + e.printStackTrace(); + } + } + + private void execute(String sql) { + try (Statement statement = connection.createStatement()) { + long start = System.currentTimeMillis(); + statement.execute(sql); + long end = System.currentTimeMillis(); + printSql(sql, (end - start)); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + private void printSql(String sql, long cost) { + System.out.println("time cost: " + cost + " ms, execute statement ====> " + sql); + } + + private void executeQuery(String sql) { + try (Statement statement = connection.createStatement()) { + long start = System.currentTimeMillis(); + ResultSet resultSet = statement.executeQuery(sql); + long end = System.currentTimeMillis(); + printSql(sql, (end - start)); +// printResult(resultSet); + resultSet.close(); + statement.close(); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + class multiThreadingClass extends Thread { + public int id; + public int from, to; + public int numOfTb; + public Connection connection; + public String dbName; + public String tbName; + + public void run() { + System.out.println("ID: " + id + " from: " + from + " to: " + to); + try { + Thread.sleep(1000); + } catch (InterruptedException e) { + System.out.println("Thread " + id + " interrupted."); + } + + for (int i = from; i < to; i++) { + for (int j = 0; j < numOfTb; j++) { + if (j % 1000 == 0) { + try { + System.out.print(id + "s."); + Thread.sleep(1); + } catch (InterruptedException e) { + System.out.println("Thread " + id + " interrupted."); + } + } + final String sql = "select last_row(humidity) from " + dbName + "." + tbName + i + "_" + j; + executeQuery(sql); + } + } + } + } + +} diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/QueryDataTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/QueryDataTest.java similarity index 96% rename from src/connector/jdbc/src/test/java/com/taosdata/jdbc/QueryDataTest.java rename to src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/QueryDataTest.java index 37fbc284877dbb27ecb8c9da1b752cfaff029023..d0ba113b7a4a8f99e22eb8143905d0b086583e1d 100644 --- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/QueryDataTest.java +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/QueryDataTest.java @@ -1,5 +1,6 @@ -package com.taosdata.jdbc; +package com.taosdata.jdbc.cases; +import com.taosdata.jdbc.TSDBDriver; import org.junit.After; import org.junit.Before; import org.junit.Test; diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/SelectTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/SelectTest.java similarity index 97% rename from src/connector/jdbc/src/test/java/com/taosdata/jdbc/SelectTest.java rename to src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/SelectTest.java index 7db37beaf4b355b8d256b5d63f4eedd8e7fd0e0e..38c8cbb98c48342f131f4f5f0fee885bb446e83c 100644 --- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/SelectTest.java +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/SelectTest.java @@ -1,5 +1,6 @@ -package com.taosdata.jdbc; +package com.taosdata.jdbc.cases; +import com.taosdata.jdbc.TSDBDriver; import org.junit.After; import org.junit.Before; import org.junit.Test; diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/StableTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/StableTest.java similarity index 98% rename from src/connector/jdbc/src/test/java/com/taosdata/jdbc/StableTest.java rename to src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/StableTest.java index 0a1c548baa330edada8c393f79cc89583bea7b18..4575cb73a05fbbc19d6eaf2ba5be0ed27b61804c 100644 --- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/StableTest.java +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/StableTest.java @@ -1,5 +1,6 @@ -package com.taosdata.jdbc; +package com.taosdata.jdbc.cases; +import com.taosdata.jdbc.TSDBDriver; import org.junit.AfterClass; import org.junit.BeforeClass; import org.junit.FixMethodOrder; diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/RestfulConnectionTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/RestfulConnectionTest.java new file mode 100644 index 0000000000000000000000000000000000000000..68eccd876ef27b2eb99c34b215836cd1575b2cb4 --- /dev/null +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/RestfulConnectionTest.java @@ -0,0 +1,406 @@ +package com.taosdata.jdbc.rs; + +import com.taosdata.jdbc.TSDBConnection; +import com.taosdata.jdbc.TSDBDriver; +import com.taosdata.jdbc.TSDBResultSet; +import com.taosdata.jdbc.TSDBSubscribe; +import org.junit.AfterClass; +import org.junit.Assert; +import org.junit.BeforeClass; +import org.junit.Test; + +import javax.management.OperationsException; +import java.sql.*; +import java.util.Properties; + +public class RestfulConnectionTest { + + // private static final String host = "127.0.0.1"; + private static final String host = "master"; + private static Connection conn; + + @Test + public void getConnection() { + // already test in beforeClass method + } + + @Test + public void createStatement() { + try (Statement stmt = conn.createStatement()) { + ResultSet rs = stmt.executeQuery("select server_status()"); + rs.next(); + int status = rs.getInt("server_status()"); + Assert.assertEquals(1, status); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @Test + public void prepareStatement() throws SQLException { + PreparedStatement pstmt = conn.prepareStatement("select server_status()"); + ResultSet rs = pstmt.executeQuery(); + rs.next(); + int status = rs.getInt("server_status()"); + Assert.assertEquals(1, status); + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void prepareCall() throws SQLException { + conn.prepareCall("select server_status()"); + } + + @Test + public void nativeSQL() throws SQLException { + String nativeSQL = conn.nativeSQL("select * from log.log"); + Assert.assertEquals("select * from log.log", nativeSQL); + } + + @Test + public void setAutoCommit() throws SQLException { + conn.setAutoCommit(true); + conn.setAutoCommit(false); + } + + @Test + public void getAutoCommit() throws SQLException { + Assert.assertTrue(conn.getAutoCommit()); + } + + @Test + public void commit() throws SQLException { + conn.commit(); + } + + @Test + public void rollback() throws SQLException { + conn.rollback(); + } + + @Test + public void close() { + // connection will close in afterClass method + } + + @Test + public void isClosed() throws SQLException { + Assert.assertFalse(conn.isClosed()); + } + + @Test + public void getMetaData() throws SQLException { + DatabaseMetaData meta = conn.getMetaData(); + Assert.assertNotNull(meta); + Assert.assertEquals("com.taosdata.jdbc.rs.RestfulDriver", meta.getDriverName()); + } + + @Test + public void setReadOnly() throws SQLException { + conn.setReadOnly(true); + } + + @Test + public void isReadOnly() throws SQLException { + Assert.assertTrue(conn.isReadOnly()); + } + + @Test + public void setCatalog() throws SQLException { + conn.setCatalog("test"); + Assert.assertEquals("test", conn.getCatalog()); + } + + @Test + public void getCatalog() throws SQLException { + conn.setCatalog("log"); + Assert.assertEquals("log", conn.getCatalog()); + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void setTransactionIsolation() throws SQLException { + conn.setTransactionIsolation(Connection.TRANSACTION_NONE); + Assert.assertEquals(Connection.TRANSACTION_NONE, conn.getTransactionIsolation()); + conn.setTransactionIsolation(Connection.TRANSACTION_READ_UNCOMMITTED); + } + + @Test + public void getTransactionIsolation() throws SQLException { + Assert.assertEquals(Connection.TRANSACTION_NONE, conn.getTransactionIsolation()); + } + + @Test + public void getWarnings() throws SQLException { + Assert.assertNull(conn.getWarnings()); + } + + @Test + public void clearWarnings() throws SQLException { + conn.clearWarnings(); + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void testCreateStatement() throws SQLException { + Statement stmt = conn.createStatement(ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_READ_ONLY); + ResultSet rs = stmt.executeQuery("select server_status()"); + rs.next(); + int status = rs.getInt("server_status()"); + Assert.assertEquals(1, status); + + conn.createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_READ_ONLY); + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void testPrepareStatement() throws SQLException { + PreparedStatement pstmt = conn.prepareStatement("select server_status()", + ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_READ_ONLY); + ResultSet rs = pstmt.executeQuery(); + rs.next(); + int status = rs.getInt("server_status()"); + Assert.assertEquals(1, status); + + conn.prepareStatement("select server_status", ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_READ_ONLY); + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void testPrepareCall() throws SQLException { + conn.prepareCall("", ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_READ_ONLY, ResultSet.HOLD_CURSORS_OVER_COMMIT); + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void getTypeMap() throws SQLException { + conn.getTypeMap(); + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void setTypeMap() throws SQLException { + conn.setTypeMap(null); + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void setHoldability() throws SQLException { + conn.setHoldability(ResultSet.HOLD_CURSORS_OVER_COMMIT); + Assert.assertEquals(ResultSet.HOLD_CURSORS_OVER_COMMIT, conn.getHoldability()); + conn.setHoldability(ResultSet.CLOSE_CURSORS_AT_COMMIT); + } + + @Test + public void getHoldability() throws SQLException { + Assert.assertEquals(ResultSet.HOLD_CURSORS_OVER_COMMIT, conn.getHoldability()); + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void setSavepoint() throws SQLException { + conn.setSavepoint(); + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void testSetSavepoint() throws SQLException { + conn.setSavepoint(null); + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void testRollback() throws SQLException { + conn.rollback(null); + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void releaseSavepoint() throws SQLException { + conn.releaseSavepoint(null); + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void testCreateStatement1() throws SQLException { + Statement stmt = conn.createStatement(ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_READ_ONLY, ResultSet.HOLD_CURSORS_OVER_COMMIT); + ResultSet rs = stmt.executeQuery("select server_status()"); + rs.next(); + int status = rs.getInt("server_status()"); + Assert.assertEquals(1, status); + + conn.createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_READ_ONLY, ResultSet.HOLD_CURSORS_OVER_COMMIT); + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void testPrepareStatement1() throws SQLException { + PreparedStatement pstmt = conn.prepareStatement("select server_status()", + ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_READ_ONLY, ResultSet.HOLD_CURSORS_OVER_COMMIT); + ResultSet rs = pstmt.executeQuery(); + rs.next(); + int status = rs.getInt("server_status()"); + Assert.assertEquals(1, status); + + conn.prepareStatement("select server_status", ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_READ_ONLY, ResultSet.HOLD_CURSORS_OVER_COMMIT); + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void testPrepareCall1() throws SQLException { + conn.prepareCall("", ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_READ_ONLY, ResultSet.HOLD_CURSORS_OVER_COMMIT); + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void testPrepareStatement2() throws SQLException { + Assert.assertNotNull("", Statement.NO_GENERATED_KEYS); + conn.prepareStatement("", Statement.RETURN_GENERATED_KEYS); + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void testPrepareStatement3() throws SQLException { + conn.prepareStatement("", new int[]{}); + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void testPrepareStatement4() throws SQLException { + conn.prepareStatement("", new String[]{}); + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void createClob() throws SQLException { + conn.createClob(); + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void createBlob() throws SQLException { + conn.createBlob(); + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void createNClob() throws SQLException { + conn.createNClob(); + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void createSQLXML() throws SQLException { + conn.createSQLXML(); + } + + @Test(expected = SQLException.class) + public void isValid() throws SQLException { + Assert.assertTrue(conn.isValid(10)); + Assert.assertTrue(conn.isValid(0)); + conn.isValid(-1); + } + + @Test + public void setClientInfo() throws SQLClientInfoException { + conn.setClientInfo(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8"); + conn.setClientInfo(TSDBDriver.PROPERTY_KEY_CHARSET, "en_US.UTF-8"); + conn.setClientInfo(TSDBDriver.PROPERTY_KEY_CHARSET, "UTC-8"); + } + + @Test + public void testSetClientInfo() throws SQLClientInfoException { + Properties properties = new Properties(); + properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8"); + properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8"); + properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8"); + conn.setClientInfo(properties); + } + + @Test + public void getClientInfo() throws SQLException { + conn.setClientInfo(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8"); + conn.setClientInfo(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8"); + conn.setClientInfo(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8"); + + Properties info = conn.getClientInfo(); + String charset = info.getProperty(TSDBDriver.PROPERTY_KEY_CHARSET); + Assert.assertEquals("UTF-8", charset); + String locale = info.getProperty(TSDBDriver.PROPERTY_KEY_LOCALE); + Assert.assertEquals("en_US.UTF-8", locale); + String timezone = info.getProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE); + Assert.assertEquals("UTC-8", timezone); + } + + @Test + public void testGetClientInfo() throws SQLException { + conn.setClientInfo(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8"); + conn.setClientInfo(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8"); + conn.setClientInfo(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8"); + + String charset = conn.getClientInfo(TSDBDriver.PROPERTY_KEY_CHARSET); + Assert.assertEquals("UTF-8", charset); + String locale = conn.getClientInfo(TSDBDriver.PROPERTY_KEY_LOCALE); + Assert.assertEquals("en_US.UTF-8", locale); + String timezone = conn.getClientInfo(TSDBDriver.PROPERTY_KEY_TIME_ZONE); + Assert.assertEquals("UTC-8", timezone); + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void createArrayOf() throws SQLException { + conn.createArrayOf("", null); + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void createStruct() throws SQLException { + conn.createStruct("", null); + } + + @Test + public void setSchema() throws SQLException { + conn.setSchema("test"); + } + + @Test + public void getSchema() throws SQLException { + Assert.assertNull(conn.getSchema()); + } + + @Test + public void abort() throws SQLException { + conn.abort(null); + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void setNetworkTimeout() throws SQLException { + conn.setNetworkTimeout(null, 1000); + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void getNetworkTimeout() throws SQLException { + conn.getNetworkTimeout(); + } + + @Test + public void unwrap() { + try { + RestfulConnection restfulConnection = conn.unwrap(RestfulConnection.class); + Assert.assertNotNull(restfulConnection); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @Test + public void isWrapperFor() throws SQLException { + Assert.assertTrue(conn.isWrapperFor(RestfulConnection.class)); + } + + @BeforeClass + public static void beforeClass() { + try { + Class.forName("com.taosdata.jdbc.rs.RestfulDriver"); + Properties properties = new Properties(); + properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8"); + properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8"); + properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8"); + conn = DriverManager.getConnection("jdbc:TAOS-RS://" + host + ":6041/log?user=root&password=taosdata", properties); + // create test database for test cases + try (Statement stmt = conn.createStatement()) { + stmt.execute("create database if not exists test"); + } + + } catch (ClassNotFoundException | SQLException e) { + e.printStackTrace(); + } + } + + @AfterClass + public static void afterClass() { + try { + if (conn != null) + conn.close(); + } catch (SQLException e) { + e.printStackTrace(); + } + } + +} \ No newline at end of file diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/RestfulDatabaseMetaDataTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/RestfulDatabaseMetaDataTest.java new file mode 100644 index 0000000000000000000000000000000000000000..1991c17065a34c16fe7758486bd10b83f3241a07 --- /dev/null +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/RestfulDatabaseMetaDataTest.java @@ -0,0 +1,984 @@ +package com.taosdata.jdbc.rs; + +import com.taosdata.jdbc.TSDBDriver; +import org.junit.AfterClass; +import org.junit.Assert; +import org.junit.BeforeClass; +import org.junit.Test; + +import java.sql.*; +import java.util.Properties; + +public class RestfulDatabaseMetaDataTest { + // private static final String host = "master"; + private static final String host = "127.0.0.1"; + private static final String url = "jdbc:TAOS-RS://" + host + ":6041/?user=root&password=taosdata"; + private static Connection connection; + private static RestfulDatabaseMetaData metaData; + + @Test + public void unwrap() throws SQLException { + RestfulDatabaseMetaData unwrap = metaData.unwrap(RestfulDatabaseMetaData.class); + Assert.assertNotNull(unwrap); + } + + @Test + public void isWrapperFor() throws SQLException { + Assert.assertTrue(metaData.isWrapperFor(RestfulDatabaseMetaData.class)); + } + + @Test + public void allProceduresAreCallable() throws SQLException { + Assert.assertFalse(metaData.allProceduresAreCallable()); + } + + @Test + public void allTablesAreSelectable() throws SQLException { + Assert.assertFalse(metaData.allTablesAreSelectable()); + } + + @Test + public void getURL() throws SQLException { + Assert.assertEquals(url, metaData.getURL()); + } + + @Test + public void getUserName() throws SQLException { + Assert.assertEquals("root", metaData.getUserName()); + } + + @Test + public void isReadOnly() throws SQLException { + Assert.assertFalse(metaData.isReadOnly()); + } + + @Test + public void nullsAreSortedHigh() throws SQLException { + Assert.assertFalse(metaData.nullsAreSortedHigh()); + } + + @Test + public void nullsAreSortedLow() throws SQLException { + Assert.assertTrue(metaData.nullsAreSortedLow()); + } + + @Test + public void nullsAreSortedAtStart() throws SQLException { + Assert.assertTrue(metaData.nullsAreSortedAtStart()); + } + + @Test + public void nullsAreSortedAtEnd() throws SQLException { + Assert.assertFalse(metaData.nullsAreSortedAtEnd()); + } + + @Test + public void getDatabaseProductName() throws SQLException { + Assert.assertEquals("TDengine", metaData.getDatabaseProductName()); + } + + @Test + public void getDatabaseProductVersion() throws SQLException { + Assert.assertEquals("2.0.x.x", metaData.getDatabaseProductVersion()); + } + + @Test + public void getDriverName() throws SQLException { + Assert.assertEquals("com.taosdata.jdbc.rs.RestfulDriver", metaData.getDriverName()); + } + + @Test + public void getDriverVersion() throws SQLException { + Assert.assertEquals("2.0.x", metaData.getDriverVersion()); + } + + @Test + public void getDriverMajorVersion() { + Assert.assertEquals(2, metaData.getDriverMajorVersion()); + } + + @Test + public void getDriverMinorVersion() { + Assert.assertEquals(0, metaData.getDriverMinorVersion()); + } + + @Test + public void usesLocalFiles() throws SQLException { + Assert.assertFalse(metaData.usesLocalFiles()); + } + + @Test + public void usesLocalFilePerTable() throws SQLException { + Assert.assertFalse(metaData.usesLocalFilePerTable()); + } + + @Test + public void supportsMixedCaseIdentifiers() throws SQLException { + Assert.assertFalse(metaData.supportsMixedCaseIdentifiers()); + } + + @Test + public void storesUpperCaseIdentifiers() throws SQLException { + Assert.assertFalse(metaData.storesUpperCaseIdentifiers()); + } + + @Test + public void storesLowerCaseIdentifiers() throws SQLException { + Assert.assertTrue(metaData.storesLowerCaseIdentifiers()); + } + + @Test + public void storesMixedCaseIdentifiers() throws SQLException { + Assert.assertFalse(metaData.storesMixedCaseIdentifiers()); + } + + @Test + public void supportsMixedCaseQuotedIdentifiers() throws SQLException { + Assert.assertFalse(metaData.supportsMixedCaseQuotedIdentifiers()); + } + + @Test + public void storesUpperCaseQuotedIdentifiers() throws SQLException { + Assert.assertFalse(metaData.storesUpperCaseQuotedIdentifiers()); + } + + @Test + public void storesLowerCaseQuotedIdentifiers() throws SQLException { + Assert.assertFalse(metaData.storesLowerCaseQuotedIdentifiers()); + } + + @Test + public void storesMixedCaseQuotedIdentifiers() throws SQLException { + Assert.assertFalse(metaData.storesMixedCaseQuotedIdentifiers()); + } + + @Test + public void getIdentifierQuoteString() throws SQLException { + Assert.assertEquals(" ", metaData.getIdentifierQuoteString()); + } + + @Test + public void getSQLKeywords() throws SQLException { + Assert.assertEquals(null, metaData.getSQLKeywords()); + } + + @Test + public void getNumericFunctions() throws SQLException { + Assert.assertEquals(null, metaData.getNumericFunctions()); + } + + @Test + public void getStringFunctions() throws SQLException { + Assert.assertEquals(null, metaData.getStringFunctions()); + } + + @Test + public void getSystemFunctions() throws SQLException { + Assert.assertEquals(null, metaData.getSystemFunctions()); + } + + @Test + public void getTimeDateFunctions() throws SQLException { + Assert.assertEquals(null, metaData.getTimeDateFunctions()); + } + + @Test + public void getSearchStringEscape() throws SQLException { + Assert.assertEquals(null, metaData.getSearchStringEscape()); + } + + @Test + public void getExtraNameCharacters() throws SQLException { + Assert.assertEquals(null, metaData.getExtraNameCharacters()); + } + + @Test + public void supportsAlterTableWithAddColumn() throws SQLException { + Assert.assertTrue(metaData.supportsAlterTableWithAddColumn()); + } + + @Test + public void supportsAlterTableWithDropColumn() throws SQLException { + Assert.assertTrue(metaData.supportsAlterTableWithDropColumn()); + } + + @Test + public void supportsColumnAliasing() throws SQLException { + Assert.assertTrue(metaData.supportsColumnAliasing()); + } + + @Test + public void nullPlusNonNullIsNull() throws SQLException { + Assert.assertFalse(metaData.nullPlusNonNullIsNull()); + } + + @Test + public void supportsConvert() throws SQLException { + Assert.assertFalse(metaData.supportsConvert()); + } + + @Test + public void testSupportsConvert() throws SQLException { + Assert.assertFalse(metaData.supportsConvert(1, 1)); + } + + @Test + public void supportsTableCorrelationNames() throws SQLException { + Assert.assertFalse(metaData.supportsTableCorrelationNames()); + } + + @Test + public void supportsDifferentTableCorrelationNames() throws SQLException { + Assert.assertFalse(metaData.supportsDifferentTableCorrelationNames()); + } + + @Test + public void supportsExpressionsInOrderBy() throws SQLException { + Assert.assertFalse(metaData.supportsExpressionsInOrderBy()); + } + + @Test + public void supportsOrderByUnrelated() throws SQLException { + Assert.assertFalse(metaData.supportsOrderByUnrelated()); + } + + @Test + public void supportsGroupBy() throws SQLException { + Assert.assertTrue(metaData.supportsGroupBy()); + } + + @Test + public void supportsGroupByUnrelated() throws SQLException { + Assert.assertFalse(metaData.supportsGroupByUnrelated()); + } + + @Test + public void supportsGroupByBeyondSelect() throws SQLException { + Assert.assertFalse(metaData.supportsGroupByBeyondSelect()); + } + + @Test + public void supportsLikeEscapeClause() throws SQLException { + Assert.assertFalse(metaData.supportsLikeEscapeClause()); + } + + @Test + public void supportsMultipleResultSets() throws SQLException { + Assert.assertFalse(metaData.supportsMultipleResultSets()); + } + + @Test + public void supportsMultipleTransactions() throws SQLException { + Assert.assertFalse(metaData.supportsMultipleTransactions()); + } + + @Test + public void supportsNonNullableColumns() throws SQLException { + Assert.assertFalse(metaData.supportsNonNullableColumns()); + } + + @Test + public void supportsMinimumSQLGrammar() throws SQLException { + Assert.assertFalse(metaData.supportsMinimumSQLGrammar()); + } + + @Test + public void supportsCoreSQLGrammar() throws SQLException { + Assert.assertFalse(metaData.supportsCoreSQLGrammar()); + } + + @Test + public void supportsExtendedSQLGrammar() throws SQLException { + Assert.assertFalse(metaData.supportsExtendedSQLGrammar()); + } + + @Test + public void supportsANSI92EntryLevelSQL() throws SQLException { + Assert.assertFalse(metaData.supportsANSI92EntryLevelSQL()); + } + + @Test + public void supportsANSI92IntermediateSQL() throws SQLException { + Assert.assertFalse(metaData.supportsANSI92IntermediateSQL()); + } + + @Test + public void supportsANSI92FullSQL() throws SQLException { + Assert.assertFalse(metaData.supportsANSI92FullSQL()); + } + + @Test + public void supportsIntegrityEnhancementFacility() throws SQLException { + Assert.assertFalse(metaData.supportsIntegrityEnhancementFacility()); + } + + @Test + public void supportsOuterJoins() throws SQLException { + Assert.assertFalse(metaData.supportsOuterJoins()); + } + + @Test + public void supportsFullOuterJoins() throws SQLException { + Assert.assertFalse(metaData.supportsFullOuterJoins()); + } + + @Test + public void supportsLimitedOuterJoins() throws SQLException { + Assert.assertFalse(metaData.supportsLimitedOuterJoins()); + } + + @Test + public void getSchemaTerm() throws SQLException { + Assert.assertNull(metaData.getSchemaTerm()); + } + + @Test + public void getProcedureTerm() throws SQLException { + Assert.assertNull(metaData.getProcedureTerm()); + } + + @Test + public void getCatalogTerm() throws SQLException { + Assert.assertEquals("database", metaData.getCatalogTerm()); + } + + @Test + public void isCatalogAtStart() throws SQLException { + Assert.assertTrue(metaData.isCatalogAtStart()); + } + + @Test + public void getCatalogSeparator() throws SQLException { + Assert.assertEquals(".", metaData.getCatalogSeparator()); + } + + @Test + public void supportsSchemasInDataManipulation() throws SQLException { + Assert.assertFalse(metaData.supportsSchemasInDataManipulation()); + } + + @Test + public void supportsSchemasInProcedureCalls() throws SQLException { + Assert.assertFalse(metaData.supportsSchemasInProcedureCalls()); + } + + @Test + public void supportsSchemasInTableDefinitions() throws SQLException { + Assert.assertFalse(metaData.supportsSchemasInTableDefinitions()); + } + + @Test + public void supportsSchemasInIndexDefinitions() throws SQLException { + Assert.assertFalse(metaData.supportsSchemasInIndexDefinitions()); + } + + @Test + public void supportsSchemasInPrivilegeDefinitions() throws SQLException { + Assert.assertFalse(metaData.supportsSchemasInPrivilegeDefinitions()); + } + + @Test + public void supportsCatalogsInDataManipulation() throws SQLException { + Assert.assertTrue(metaData.supportsCatalogsInDataManipulation()); + } + + @Test + public void supportsCatalogsInProcedureCalls() throws SQLException { + Assert.assertFalse(metaData.supportsCatalogsInProcedureCalls()); + } + + @Test + public void supportsCatalogsInTableDefinitions() throws SQLException { + Assert.assertFalse(metaData.supportsCatalogsInTableDefinitions()); + } + + @Test + public void supportsCatalogsInIndexDefinitions() throws SQLException { + Assert.assertFalse(metaData.supportsCatalogsInIndexDefinitions()); + } + + @Test + public void supportsCatalogsInPrivilegeDefinitions() throws SQLException { + Assert.assertFalse(metaData.supportsCatalogsInPrivilegeDefinitions()); + } + + @Test + public void supportsPositionedDelete() throws SQLException { + Assert.assertFalse(metaData.supportsPositionedDelete()); + } + + @Test + public void supportsPositionedUpdate() throws SQLException { + Assert.assertFalse(metaData.supportsPositionedUpdate()); + } + + @Test + public void supportsSelectForUpdate() throws SQLException { + Assert.assertFalse(metaData.supportsSelectForUpdate()); + } + + @Test + public void supportsStoredProcedures() throws SQLException { + Assert.assertFalse(metaData.supportsStoredProcedures()); + } + + @Test + public void supportsSubqueriesInComparisons() throws SQLException { + Assert.assertFalse(metaData.supportsSubqueriesInComparisons()); + } + + @Test + public void supportsSubqueriesInExists() throws SQLException { + Assert.assertFalse(metaData.supportsSubqueriesInExists()); + } + + @Test + public void supportsSubqueriesInIns() throws SQLException { + Assert.assertFalse(metaData.supportsSubqueriesInIns()); + } + + @Test + public void supportsSubqueriesInQuantifieds() throws SQLException { + Assert.assertFalse(metaData.supportsSubqueriesInQuantifieds()); + } + + @Test + public void supportsCorrelatedSubqueries() throws SQLException { + Assert.assertFalse(metaData.supportsCorrelatedSubqueries()); + } + + @Test + public void supportsUnion() throws SQLException { + Assert.assertFalse(metaData.supportsUnion()); + } + + @Test + public void supportsUnionAll() throws SQLException { + Assert.assertFalse(metaData.supportsUnionAll()); + } + + @Test + public void supportsOpenCursorsAcrossCommit() throws SQLException { + Assert.assertFalse(metaData.supportsOpenCursorsAcrossCommit()); + } + + @Test + public void supportsOpenCursorsAcrossRollback() throws SQLException { + Assert.assertFalse(metaData.supportsOpenCursorsAcrossRollback()); + } + + @Test + public void supportsOpenStatementsAcrossCommit() throws SQLException { + Assert.assertFalse(metaData.supportsOpenStatementsAcrossCommit()); + } + + @Test + public void supportsOpenStatementsAcrossRollback() throws SQLException { + Assert.assertFalse(metaData.supportsOpenStatementsAcrossRollback()); + } + + @Test + public void getMaxBinaryLiteralLength() throws SQLException { + Assert.assertEquals(0, metaData.getMaxBinaryLiteralLength()); + } + + @Test + public void getMaxCharLiteralLength() throws SQLException { + Assert.assertEquals(0, metaData.getMaxCharLiteralLength()); + } + + @Test + public void getMaxColumnNameLength() throws SQLException { + Assert.assertEquals(0, metaData.getMaxColumnNameLength()); + } + + @Test + public void getMaxColumnsInGroupBy() throws SQLException { + Assert.assertEquals(0, metaData.getMaxColumnsInGroupBy()); + } + + @Test + public void getMaxColumnsInIndex() throws SQLException { + Assert.assertEquals(0, metaData.getMaxColumnsInIndex()); + } + + @Test + public void getMaxColumnsInOrderBy() throws SQLException { + Assert.assertEquals(0, metaData.getMaxColumnsInOrderBy()); + } + + @Test + public void getMaxColumnsInSelect() throws SQLException { + Assert.assertEquals(0, metaData.getMaxColumnsInSelect()); + } + + @Test + public void getMaxColumnsInTable() throws SQLException { + Assert.assertEquals(0, metaData.getMaxColumnsInTable()); + } + + @Test + public void getMaxConnections() throws SQLException { + Assert.assertEquals(0, metaData.getMaxConnections()); + } + + @Test + public void getMaxCursorNameLength() throws SQLException { + Assert.assertEquals(0, metaData.getMaxCursorNameLength()); + } + + @Test + public void getMaxIndexLength() throws SQLException { + Assert.assertEquals(0, metaData.getMaxIndexLength()); + } + + @Test + public void getMaxSchemaNameLength() throws SQLException { + Assert.assertEquals(0, metaData.getMaxSchemaNameLength()); + } + + @Test + public void getMaxProcedureNameLength() throws SQLException { + Assert.assertEquals(0, metaData.getMaxProcedureNameLength()); + } + + @Test + public void getMaxCatalogNameLength() throws SQLException { + Assert.assertEquals(0, metaData.getMaxCatalogNameLength()); + } + + @Test + public void getMaxRowSize() throws SQLException { + Assert.assertEquals(0, metaData.getMaxRowSize()); + } + + @Test + public void doesMaxRowSizeIncludeBlobs() throws SQLException { + Assert.assertFalse(metaData.doesMaxRowSizeIncludeBlobs()); + } + + @Test + public void getMaxStatementLength() throws SQLException { + Assert.assertEquals(0, metaData.getMaxStatementLength()); + } + + @Test + public void getMaxStatements() throws SQLException { + Assert.assertEquals(0, metaData.getMaxStatements()); + } + + @Test + public void getMaxTableNameLength() throws SQLException { + Assert.assertEquals(0, metaData.getMaxTableNameLength()); + } + + @Test + public void getMaxTablesInSelect() throws SQLException { + Assert.assertEquals(0, metaData.getMaxTablesInSelect()); + } + + @Test + public void getMaxUserNameLength() throws SQLException { + Assert.assertEquals(0, metaData.getMaxUserNameLength()); + } + + @Test + public void getDefaultTransactionIsolation() throws SQLException { + Assert.assertEquals(Connection.TRANSACTION_NONE, metaData.getDefaultTransactionIsolation()); + } + + @Test + public void supportsTransactions() throws SQLException { + Assert.assertFalse(metaData.supportsTransactions()); + } + + @Test + public void supportsTransactionIsolationLevel() throws SQLException { + Assert.assertTrue(metaData.supportsTransactionIsolationLevel(Connection.TRANSACTION_NONE)); + Assert.assertFalse(metaData.supportsTransactionIsolationLevel(Connection.TRANSACTION_READ_COMMITTED)); + Assert.assertFalse(metaData.supportsTransactionIsolationLevel(Connection.TRANSACTION_READ_UNCOMMITTED)); + Assert.assertFalse(metaData.supportsTransactionIsolationLevel(Connection.TRANSACTION_REPEATABLE_READ)); + Assert.assertFalse(metaData.supportsTransactionIsolationLevel(Connection.TRANSACTION_SERIALIZABLE)); + } + + @Test + public void supportsDataDefinitionAndDataManipulationTransactions() throws SQLException { + Assert.assertFalse(metaData.supportsDataDefinitionAndDataManipulationTransactions()); + } + + @Test + public void supportsDataManipulationTransactionsOnly() throws SQLException { + Assert.assertFalse(metaData.supportsDataManipulationTransactionsOnly()); + } + + @Test + public void dataDefinitionCausesTransactionCommit() throws SQLException { + Assert.assertFalse(metaData.dataDefinitionCausesTransactionCommit()); + } + + @Test + public void dataDefinitionIgnoredInTransactions() throws SQLException { + Assert.assertFalse(metaData.dataDefinitionIgnoredInTransactions()); + } + + @Test + public void getProcedures() throws SQLException { + Assert.assertNull(metaData.getProcedures("*", "*", "*")); + } + + @Test + public void getProcedureColumns() throws SQLException { + Assert.assertNull(metaData.getProcedureColumns("*", "*", "*", "*")); + } + + @Test + public void getTables() throws SQLException { + System.out.println("****************************************************"); + ResultSet tables = metaData.getTables("log", "", null, null); + ResultSetMetaData metaData = tables.getMetaData(); + while (tables.next()) { + System.out.print(metaData.getColumnLabel(1) + ":" + tables.getString(1) + "\t"); + System.out.print(metaData.getColumnLabel(3) + ":" + tables.getString(3) + "\t"); + System.out.print(metaData.getColumnLabel(4) + ":" + tables.getString(4) + "\t"); + System.out.print(metaData.getColumnLabel(5) + ":" + tables.getString(5) + "\n"); + } + System.out.println(); + Assert.assertNotNull(tables); + } + + @Test + public void getSchemas() throws SQLException { + Assert.assertNotNull(metaData.getSchemas()); + } + + @Test + public void getCatalogs() throws SQLException { + System.out.println("****************************************************"); + + ResultSet catalogs = metaData.getCatalogs(); + ResultSetMetaData meta = catalogs.getMetaData(); + while (catalogs.next()) { + for (int i = 1; i <= meta.getColumnCount(); i++) { + System.out.print(meta.getColumnLabel(i) + ": " + catalogs.getString(i)); + } + System.out.println(); + } + } + + @Test + public void getTableTypes() throws SQLException { + System.out.println("****************************************************"); + + ResultSet tableTypes = metaData.getTableTypes(); + while (tableTypes.next()) { + System.out.println(tableTypes.getString("TABLE_TYPE")); + } + Assert.assertNotNull(metaData.getTableTypes()); + } + + @Test + public void getColumns() throws SQLException { + System.out.println("****************************************************"); + + ResultSet columns = metaData.getColumns("log", "", "dn", ""); + ResultSetMetaData meta = columns.getMetaData(); + while (columns.next()) { + System.out.print(meta.getColumnLabel(1) + ": " + columns.getString(1) + "\t"); + System.out.print(meta.getColumnLabel(3) + ": " + columns.getString(3) + "\t"); + System.out.print(meta.getColumnLabel(4) + ": " + columns.getString(4) + "\t"); + System.out.print(meta.getColumnLabel(5) + ": " + columns.getString(5) + "\t"); + System.out.print(meta.getColumnLabel(6) + ": " + columns.getString(6) + "\t"); + System.out.print(meta.getColumnLabel(7) + ": " + columns.getString(7) + "\t"); + System.out.print(meta.getColumnLabel(9) + ": " + columns.getString(9) + "\t"); + System.out.print(meta.getColumnLabel(10) + ": " + columns.getString(10) + "\t"); + System.out.print(meta.getColumnLabel(11) + ": " + columns.getString(11) + "\n"); + System.out.print(meta.getColumnLabel(12) + ": " + columns.getString(12) + "\n"); + } + } + + @Test + public void getColumnPrivileges() throws SQLException { + Assert.assertNotNull(metaData.getColumnPrivileges("", "", "", "")); + } + + @Test + public void getTablePrivileges() throws SQLException { + Assert.assertNotNull(metaData.getTablePrivileges("", "", "")); + } + + @Test + public void getBestRowIdentifier() throws SQLException { + Assert.assertNotNull(metaData.getBestRowIdentifier("", "", "", 0, false)); + } + + @Test + public void getVersionColumns() throws SQLException { + Assert.assertNotNull(metaData.getVersionColumns("", "", "")); + } + + @Test + public void getPrimaryKeys() throws SQLException { + System.out.println("****************************************************"); + + ResultSet rs = metaData.getPrimaryKeys("log", "", "dn1"); + while (rs.next()) { + System.out.println("TABLE_NAME: " + rs.getString("TABLE_NAME")); + System.out.println("COLUMN_NAME: " + rs.getString("COLUMN_NAME")); + System.out.println("KEY_SEQ: " + rs.getString("KEY_SEQ")); + System.out.println("PK_NAME: " + rs.getString("PK_NAME")); + } + + Assert.assertNotNull(rs); + } + + @Test + public void getImportedKeys() throws SQLException { + Assert.assertNotNull(metaData.getImportedKeys("", "", "")); + } + + @Test + public void getExportedKeys() throws SQLException { + Assert.assertNotNull(metaData.getExportedKeys("", "", "")); + } + + @Test + public void getCrossReference() throws SQLException { + Assert.assertNotNull(metaData.getCrossReference("", "", "", "", "", "")); + } + + @Test + public void getTypeInfo() throws SQLException { + Assert.assertNotNull(metaData.getTypeInfo()); + } + + @Test + public void getIndexInfo() throws SQLException { + Assert.assertNotNull(metaData.getIndexInfo("", "", "", false, false)); + } + + @Test + public void supportsResultSetType() throws SQLException { + Assert.assertFalse(metaData.supportsResultSetType(0)); + } + + @Test + public void supportsResultSetConcurrency() throws SQLException { + Assert.assertFalse(metaData.supportsResultSetConcurrency(0, 0)); + } + + @Test + public void ownUpdatesAreVisible() throws SQLException { + Assert.assertFalse(metaData.ownUpdatesAreVisible(0)); + } + + @Test + public void ownDeletesAreVisible() throws SQLException { + Assert.assertFalse(metaData.ownDeletesAreVisible(0)); + } + + @Test + public void ownInsertsAreVisible() throws SQLException { + Assert.assertFalse(metaData.ownInsertsAreVisible(0)); + } + + @Test + public void othersUpdatesAreVisible() throws SQLException { + Assert.assertFalse(metaData.othersUpdatesAreVisible(0)); + } + + @Test + public void othersDeletesAreVisible() throws SQLException { + Assert.assertFalse(metaData.othersDeletesAreVisible(0)); + } + + @Test + public void othersInsertsAreVisible() throws SQLException { + Assert.assertFalse(metaData.othersInsertsAreVisible(0)); + } + + @Test + public void updatesAreDetected() throws SQLException { + Assert.assertFalse(metaData.updatesAreDetected(0)); + } + + @Test + public void deletesAreDetected() throws SQLException { + Assert.assertFalse(metaData.deletesAreDetected(0)); + } + + @Test + public void insertsAreDetected() throws SQLException { + Assert.assertFalse(metaData.insertsAreDetected(0)); + } + + @Test + public void supportsBatchUpdates() throws SQLException { + Assert.assertFalse(metaData.supportsBatchUpdates()); + } + + @Test + public void getUDTs() throws SQLException { + Assert.assertNotNull(metaData.getUDTs("", "", "", null)); + } + + @Test + public void getConnection() throws SQLException { + Assert.assertNotNull(metaData.getConnection()); + } + + @Test + public void supportsSavepoints() throws SQLException { + Assert.assertFalse(metaData.supportsSavepoints()); + } + + @Test + public void supportsNamedParameters() throws SQLException { + Assert.assertFalse(metaData.supportsNamedParameters()); + } + + @Test + public void supportsMultipleOpenResults() throws SQLException { + Assert.assertFalse(metaData.supportsMultipleOpenResults()); + } + + @Test + public void supportsGetGeneratedKeys() throws SQLException { + Assert.assertFalse(metaData.supportsGetGeneratedKeys()); + } + + @Test + public void getSuperTypes() throws SQLException { + Assert.assertNotNull(metaData.getSuperTypes("", "", "")); + } + + @Test + public void getSuperTables() throws SQLException { + System.out.println("****************************************************"); + + ResultSet rs = metaData.getSuperTables("log", "", "dn1"); + while (rs.next()) { + System.out.println("TABLE_NAME: " + rs.getString("TABLE_NAME")); + System.out.println("SUPERTABLE_NAME: " + rs.getString("SUPERTABLE_NAME")); + } + Assert.assertNotNull(rs); + } + + @Test + public void getAttributes() throws SQLException { + Assert.assertNotNull(metaData.getAttributes("", "", "", "")); + } + + @Test + public void supportsResultSetHoldability() throws SQLException { + Assert.assertTrue(metaData.supportsResultSetHoldability(ResultSet.HOLD_CURSORS_OVER_COMMIT)); + Assert.assertFalse(metaData.supportsResultSetHoldability(ResultSet.CLOSE_CURSORS_AT_COMMIT)); + } + + @Test + public void getResultSetHoldability() throws SQLException { + Assert.assertEquals(1, metaData.getResultSetHoldability()); + } + + @Test + public void getDatabaseMajorVersion() throws SQLException { + Assert.assertEquals(2, metaData.getDatabaseMajorVersion()); + } + + @Test + public void getDatabaseMinorVersion() throws SQLException { + Assert.assertEquals(0, metaData.getDatabaseMinorVersion()); + } + + @Test + public void getJDBCMajorVersion() throws SQLException { + Assert.assertEquals(2, metaData.getJDBCMajorVersion()); + } + + @Test + public void getJDBCMinorVersion() throws SQLException { + Assert.assertEquals(0, metaData.getJDBCMinorVersion()); + } + + @Test + public void getSQLStateType() throws SQLException { + Assert.assertEquals(0, metaData.getSQLStateType()); + } + + @Test + public void locatorsUpdateCopy() throws SQLException { + Assert.assertFalse(metaData.locatorsUpdateCopy()); + } + + @Test + public void supportsStatementPooling() throws SQLException { + Assert.assertFalse(metaData.supportsStatementPooling()); + } + + @Test + public void getRowIdLifetime() throws SQLException { + Assert.assertNull(metaData.getRowIdLifetime()); + } + + @Test + public void supportsStoredFunctionsUsingCallSyntax() throws SQLException { + Assert.assertFalse(metaData.supportsStoredFunctionsUsingCallSyntax()); + } + + @Test + public void autoCommitFailureClosesAllResultSets() throws SQLException { + Assert.assertFalse(metaData.autoCommitFailureClosesAllResultSets()); + } + + @Test + public void getClientInfoProperties() throws SQLException { + Assert.assertNotNull(metaData.getClientInfoProperties()); + } + + @Test + public void getFunctions() throws SQLException { + Assert.assertNotNull(metaData.getFunctions("", "", "")); + } + + @Test + public void getFunctionColumns() throws SQLException { + Assert.assertNotNull(metaData.getFunctionColumns("", "", "", "")); + } + + @Test + public void getPseudoColumns() throws SQLException { + Assert.assertNotNull(metaData.getPseudoColumns("", "", "", "")); + } + + @Test + public void generatedKeyAlwaysReturned() throws SQLException { + Assert.assertFalse(metaData.generatedKeyAlwaysReturned()); + } + + @BeforeClass + public static void beforeClass() { + try { + Class.forName("com.taosdata.jdbc.rs.RestfulDriver"); + Properties properties = new Properties(); + properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8"); + properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8"); + properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8"); + connection = DriverManager.getConnection(url, properties); + metaData = connection.getMetaData().unwrap(RestfulDatabaseMetaData.class); + } catch (ClassNotFoundException e) { + e.printStackTrace(); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @AfterClass + public static void afterClass() { + try { + if (connection != null) + connection.close(); + } catch (SQLException e) { + e.printStackTrace(); + } + } + +} \ No newline at end of file diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/RestfulResultSetTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/RestfulResultSetTest.java index a14d09588d21f5d405cbb2456c9833d2a411ea96..b199eff1baab53d9c4a8c65f7e0bb58157657d33 100644 --- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/RestfulResultSetTest.java +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/RestfulResultSetTest.java @@ -259,10 +259,12 @@ public class RestfulResultSetTest { rs.previous(); } - @Test(expected = SQLException.class) + @Test public void setFetchDirection() throws SQLException { rs.setFetchDirection(ResultSet.FETCH_FORWARD); + Assert.assertEquals(ResultSet.FETCH_FORWARD, rs.getFetchDirection()); rs.setFetchDirection(ResultSet.FETCH_UNKNOWN); + Assert.assertEquals(ResultSet.FETCH_FORWARD, rs.getFetchDirection()); } @Test @@ -270,14 +272,15 @@ public class RestfulResultSetTest { Assert.assertEquals(ResultSet.FETCH_FORWARD, rs.getFetchDirection()); } - @Test(expected = SQLException.class) + @Test public void setFetchSize() throws SQLException { rs.setFetchSize(0); + Assert.assertEquals(0, rs.getFetchSize()); } @Test public void getFetchSize() throws SQLException { - Assert.assertEquals(1, rs.getFetchSize()); + Assert.assertEquals(0, rs.getFetchSize()); } @Test @@ -526,9 +529,12 @@ public class RestfulResultSetTest { rs.updateSQLXML(1, null); } - @Test(expected = SQLFeatureNotSupportedException.class) + @Test public void getNString() throws SQLException { - rs.getNString("f1"); + String f10 = rs.getNString("f10"); + Assert.assertEquals("涛思数据", f10); + f10 = rs.getNString(10); + Assert.assertEquals("涛思数据", f10); } @Test(expected = SQLFeatureNotSupportedException.class) diff --git a/src/connector/odbc/CMakeLists.txt b/src/connector/odbc/CMakeLists.txt index 2e1e39ef123c901b36050a9e239fe58d26869e8e..0d8c07041aa741793b7a1b8db20c3a3b470cf193 100644 --- a/src/connector/odbc/CMakeLists.txt +++ b/src/connector/odbc/CMakeLists.txt @@ -1,4 +1,4 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 3.5) +CMAKE_MINIMUM_REQUIRED(VERSION 2.8) PROJECT(TDengine) IF (TD_LINUX_64) diff --git a/src/connector/odbc/src/CMakeLists.txt b/src/connector/odbc/src/CMakeLists.txt index 67357cb4698b2885b563fd54133f36aace38c54b..2699e1bc90e162c80d27d690e1f7163747616526 100644 --- a/src/connector/odbc/src/CMakeLists.txt +++ b/src/connector/odbc/src/CMakeLists.txt @@ -1,4 +1,4 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 3.5) +CMAKE_MINIMUM_REQUIRED(VERSION 2.8) PROJECT(TDengine) IF (TD_LINUX_64) diff --git a/src/connector/python/linux/python2/setup.py b/src/connector/python/linux/python2/setup.py index dba234d7a4360f2cd5167261f513ddcc1c31094d..4a829f36c4bf0d6e680ed923573509cc1fad39db 100644 --- a/src/connector/python/linux/python2/setup.py +++ b/src/connector/python/linux/python2/setup.py @@ -5,7 +5,7 @@ with open("README.md", "r") as fh: setuptools.setup( name="taos", - version="2.0.5", + version="2.0.6", author="Taosdata Inc.", author_email="support@taosdata.com", description="TDengine python client package", diff --git a/src/connector/python/linux/python2/taos/__init__.py b/src/connector/python/linux/python2/taos/__init__.py index 62e0536b6fca63a0aca0b6be3673104c27c631c6..973263573808232e4e71dc0158585624a8e7d2ab 100644 --- a/src/connector/python/linux/python2/taos/__init__.py +++ b/src/connector/python/linux/python2/taos/__init__.py @@ -3,12 +3,12 @@ from .connection import TDengineConnection from .cursor import TDengineCursor # Globals -apilevel = '2.0.3' threadsafety = 0 paramstyle = 'pyformat' __all__ = ['connection', 'cursor'] + def connect(*args, **kwargs): """ Function to return a TDengine connector object @@ -21,4 +21,4 @@ def connect(*args, **kwargs): @rtype: TDengineConnector """ - return TDengineConnection(*args, **kwargs) \ No newline at end of file + return TDengineConnection(*args, **kwargs) diff --git a/src/connector/python/linux/python2/taos/cinterface.py b/src/connector/python/linux/python2/taos/cinterface.py index 2b1b29ee31af56499d626fc56d4950e2e25f9978..555cc3435bcbea302b34cbde09772ac5f6fe32b2 100644 --- a/src/connector/python/linux/python2/taos/cinterface.py +++ b/src/connector/python/linux/python2/taos/cinterface.py @@ -4,11 +4,14 @@ from .error import * import math import datetime + def _convert_millisecond_to_datetime(milli): - return datetime.datetime.fromtimestamp(milli/1000.0) + return datetime.datetime.fromtimestamp(milli / 1000.0) + def _convert_microsecond_to_datetime(micro): - return datetime.datetime.fromtimestamp(micro/1000000.0) + return datetime.datetime.fromtimestamp(micro / 1000000.0) + def _crow_timestamp_to_python(data, num_of_rows, nbytes=None, micro=False): """Function to convert C bool row to python row @@ -18,168 +21,309 @@ def _crow_timestamp_to_python(data, num_of_rows, nbytes=None, micro=False): _timestamp_converter = _convert_microsecond_to_datetime if num_of_rows > 0: - return list(map(_timestamp_converter, ctypes.cast(data, ctypes.POINTER(ctypes.c_long))[:abs(num_of_rows)])) + return list(map(_timestamp_converter, ctypes.cast( + data, ctypes.POINTER(ctypes.c_long))[:abs(num_of_rows)])) else: - return list(map(_timestamp_converter, ctypes.cast(data, ctypes.POINTER(ctypes.c_long))[:abs(num_of_rows)])) + return list(map(_timestamp_converter, ctypes.cast( + data, ctypes.POINTER(ctypes.c_long))[:abs(num_of_rows)])) + def _crow_bool_to_python(data, num_of_rows, nbytes=None, micro=False): """Function to convert C bool row to python row """ if num_of_rows > 0: - return [ None if ele == FieldType.C_BOOL_NULL else bool(ele) for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_byte))[:abs(num_of_rows)] ] + return [ + None if ele == FieldType.C_BOOL_NULL else bool(ele) for ele in ctypes.cast( + data, ctypes.POINTER( + ctypes.c_byte))[ + :abs(num_of_rows)]] else: - return [ None if ele == FieldType.C_BOOL_NULL else bool(ele) for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_bool))[:abs(num_of_rows)] ] + return [ + None if ele == FieldType.C_BOOL_NULL else bool(ele) for ele in ctypes.cast( + data, ctypes.POINTER( + ctypes.c_bool))[ + :abs(num_of_rows)]] + def _crow_tinyint_to_python(data, num_of_rows, nbytes=None, micro=False): """Function to convert C tinyint row to python row """ if num_of_rows > 0: - return [ None if ele == FieldType.C_TINYINT_NULL else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_byte))[:abs(num_of_rows)] ] + return [None if ele == FieldType.C_TINYINT_NULL else ele for ele in ctypes.cast( + data, ctypes.POINTER(ctypes.c_byte))[:abs(num_of_rows)]] + else: + return [None if ele == FieldType.C_TINYINT_NULL else ele for ele in ctypes.cast( + data, ctypes.POINTER(ctypes.c_byte))[:abs(num_of_rows)]] + + +def _crow_tinyint_unsigned_to_python( + data, + num_of_rows, + nbytes=None, + micro=False): + """Function to convert C tinyint row to python row + """ + if num_of_rows > 0: + return [ + None if ele == FieldType.C_TINYINT_UNSIGNED_NULL else ele for ele in ctypes.cast( + data, ctypes.POINTER( + ctypes.c_ubyte))[ + :abs(num_of_rows)]] else: - return [ None if ele == FieldType.C_TINYINT_NULL else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_byte))[:abs(num_of_rows)] ] - + return [ + None if ele == FieldType.C_TINYINT_UNSIGNED_NULL else ele for ele in ctypes.cast( + data, ctypes.POINTER( + ctypes.c_ubyte))[ + :abs(num_of_rows)]] + + def _crow_smallint_to_python(data, num_of_rows, nbytes=None, micro=False): """Function to convert C smallint row to python row """ if num_of_rows > 0: - return [ None if ele == FieldType.C_SMALLINT_NULL else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_short))[:abs(num_of_rows)]] + return [ + None if ele == FieldType.C_SMALLINT_NULL else ele for ele in ctypes.cast( + data, ctypes.POINTER( + ctypes.c_short))[ + :abs(num_of_rows)]] + else: + return [ + None if ele == FieldType.C_SMALLINT_NULL else ele for ele in ctypes.cast( + data, ctypes.POINTER( + ctypes.c_short))[ + :abs(num_of_rows)]] + + +def _crow_smallint_unsigned_to_python( + data, num_of_rows, nbytes=None, micro=False): + """Function to convert C smallint row to python row + """ + if num_of_rows > 0: + return [ + None if ele == FieldType.C_SMALLINT_UNSIGNED_NULL else ele for ele in ctypes.cast( + data, ctypes.POINTER( + ctypes.c_ushort))[ + :abs(num_of_rows)]] else: - return [ None if ele == FieldType.C_SMALLINT_NULL else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_short))[:abs(num_of_rows)] ] + return [ + None if ele == FieldType.C_SMALLINT_UNSIGNED_NULL else ele for ele in ctypes.cast( + data, ctypes.POINTER( + ctypes.c_ushort))[ + :abs(num_of_rows)]] + def _crow_int_to_python(data, num_of_rows, nbytes=None, micro=False): """Function to convert C int row to python row """ if num_of_rows > 0: - return [ None if ele == FieldType.C_INT_NULL else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_int))[:abs(num_of_rows)] ] + return [None if ele == FieldType.C_INT_NULL else ele for ele in ctypes.cast( + data, ctypes.POINTER(ctypes.c_int))[:abs(num_of_rows)]] + else: + return [None if ele == FieldType.C_INT_NULL else ele for ele in ctypes.cast( + data, ctypes.POINTER(ctypes.c_int))[:abs(num_of_rows)]] + + +def _crow_int_unsigned_to_python(data, num_of_rows, nbytes=None, micro=False): + """Function to convert C int row to python row + """ + if num_of_rows > 0: + return [ + None if ele == FieldType.C_INT_UNSIGNED_NULL else ele for ele in ctypes.cast( + data, ctypes.POINTER( + ctypes.c_uint))[ + :abs(num_of_rows)]] else: - return [ None if ele == FieldType.C_INT_NULL else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_int))[:abs(num_of_rows)] ] + return [ + None if ele == FieldType.C_INT_UNSIGNED_NULL else ele for ele in ctypes.cast( + data, ctypes.POINTER( + ctypes.c_uint))[ + :abs(num_of_rows)]] + def _crow_bigint_to_python(data, num_of_rows, nbytes=None, micro=False): """Function to convert C bigint row to python row """ if num_of_rows > 0: - return [ None if ele == FieldType.C_BIGINT_NULL else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_long))[:abs(num_of_rows)] ] + return [None if ele == FieldType.C_BIGINT_NULL else ele for ele in ctypes.cast( + data, ctypes.POINTER(ctypes.c_long))[:abs(num_of_rows)]] + else: + return [None if ele == FieldType.C_BIGINT_NULL else ele for ele in ctypes.cast( + data, ctypes.POINTER(ctypes.c_long))[:abs(num_of_rows)]] + + +def _crow_bigint_unsigned_to_python( + data, + num_of_rows, + nbytes=None, + micro=False): + """Function to convert C bigint row to python row + """ + if num_of_rows > 0: + return [ + None if ele == FieldType.C_BIGINT_UNSIGNED_NULL else ele for ele in ctypes.cast( + data, ctypes.POINTER( + ctypes.c_ulong))[ + :abs(num_of_rows)]] else: - return [ None if ele == FieldType.C_BIGINT_NULL else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_long))[:abs(num_of_rows)] ] + return [ + None if ele == FieldType.C_BIGINT_UNSIGNED_NULL else ele for ele in ctypes.cast( + data, ctypes.POINTER( + ctypes.c_ulong))[ + :abs(num_of_rows)]] + def _crow_float_to_python(data, num_of_rows, nbytes=None, micro=False): """Function to convert C float row to python row """ if num_of_rows > 0: - return [ None if math.isnan(ele) else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_float))[:abs(num_of_rows)] ] + return [None if math.isnan(ele) else ele for ele in ctypes.cast( + data, ctypes.POINTER(ctypes.c_float))[:abs(num_of_rows)]] else: - return [ None if math.isnan(ele) else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_float))[:abs(num_of_rows)] ] + return [None if math.isnan(ele) else ele for ele in ctypes.cast( + data, ctypes.POINTER(ctypes.c_float))[:abs(num_of_rows)]] + def _crow_double_to_python(data, num_of_rows, nbytes=None, micro=False): """Function to convert C double row to python row """ if num_of_rows > 0: - return [ None if math.isnan(ele) else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_double))[:abs(num_of_rows)] ] + return [None if math.isnan(ele) else ele for ele in ctypes.cast( + data, ctypes.POINTER(ctypes.c_double))[:abs(num_of_rows)]] else: - return [ None if math.isnan(ele) else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_double))[:abs(num_of_rows)] ] + return [None if math.isnan(ele) else ele for ele in ctypes.cast( + data, ctypes.POINTER(ctypes.c_double))[:abs(num_of_rows)]] + def _crow_binary_to_python(data, num_of_rows, nbytes=None, micro=False): """Function to convert C binary row to python row """ assert(nbytes is not None) if num_of_rows > 0: - return [ None if ele.value[0:1] == FieldType.C_BINARY_NULL else ele.value.decode('utf-8') for ele in (ctypes.cast(data, ctypes.POINTER(ctypes.c_char * nbytes)))[:abs(num_of_rows)]] + return [None if ele.value[0:1] == FieldType.C_BINARY_NULL else ele.value.decode( + 'utf-8') for ele in (ctypes.cast(data, ctypes.POINTER(ctypes.c_char * nbytes)))[:abs(num_of_rows)]] else: - return [ None if ele.value[0:1] == FieldType.C_BINARY_NULL else ele.value.decode('utf-8') for ele in (ctypes.cast(data, ctypes.POINTER(ctypes.c_char * nbytes)))[:abs(num_of_rows)]] + return [None if ele.value[0:1] == FieldType.C_BINARY_NULL else ele.value.decode( + 'utf-8') for ele in (ctypes.cast(data, ctypes.POINTER(ctypes.c_char * nbytes)))[:abs(num_of_rows)]] + def _crow_nchar_to_python(data, num_of_rows, nbytes=None, micro=False): """Function to convert C nchar row to python row """ assert(nbytes is not None) - res=[] + res = [] for i in range(abs(num_of_rows)): try: if num_of_rows >= 0: tmpstr = ctypes.c_char_p(data) - res.append( tmpstr.value.decode() ) + res.append(tmpstr.value.decode()) else: - res.append( (ctypes.cast(data+nbytes*i, ctypes.POINTER(ctypes.c_wchar * (nbytes//4))))[0].value ) + res.append((ctypes.cast(data + nbytes * i, + ctypes.POINTER(ctypes.c_wchar * (nbytes // 4))))[0].value) except ValueError: res.append(None) - return res + return res + def _crow_binary_to_python_block(data, num_of_rows, nbytes=None, micro=False): """Function to convert C binary row to python row """ assert(nbytes is not None) - res=[] + res = [] if num_of_rows > 0: for i in range(abs(num_of_rows)): try: - rbyte=ctypes.cast(data+nbytes*i,ctypes.POINTER(ctypes.c_short))[:1].pop() - tmpstr = ctypes.c_char_p(data+nbytes*i+2) - res.append( tmpstr.value.decode()[0:rbyte] ) + rbyte = ctypes.cast( + data + nbytes * i, + ctypes.POINTER( + ctypes.c_short))[ + :1].pop() + tmpstr = ctypes.c_char_p(data + nbytes * i + 2) + res.append(tmpstr.value.decode()[0:rbyte]) except ValueError: res.append(None) else: for i in range(abs(num_of_rows)): try: - rbyte=ctypes.cast(data+nbytes*i,ctypes.POINTER(ctypes.c_short))[:1].pop() - tmpstr = ctypes.c_char_p(data+nbytes*i+2) - res.append( tmpstr.value.decode()[0:rbyte] ) + rbyte = ctypes.cast( + data + nbytes * i, + ctypes.POINTER( + ctypes.c_short))[ + :1].pop() + tmpstr = ctypes.c_char_p(data + nbytes * i + 2) + res.append(tmpstr.value.decode()[0:rbyte]) except ValueError: res.append(None) return res + def _crow_nchar_to_python_block(data, num_of_rows, nbytes=None, micro=False): """Function to convert C nchar row to python row """ assert(nbytes is not None) - res=[] + res = [] if num_of_rows >= 0: for i in range(abs(num_of_rows)): try: - tmpstr = ctypes.c_char_p(data+nbytes*i+2) - res.append( tmpstr.value.decode() ) + tmpstr = ctypes.c_char_p(data + nbytes * i + 2) + res.append(tmpstr.value.decode()) except ValueError: res.append(None) else: for i in range(abs(num_of_rows)): try: - res.append( (ctypes.cast(data+nbytes*i+2, ctypes.POINTER(ctypes.c_wchar * (nbytes//4))))[0].value ) + res.append((ctypes.cast(data + nbytes * i + 2, + ctypes.POINTER(ctypes.c_wchar * (nbytes // 4))))[0].value) except ValueError: res.append(None) return res + _CONVERT_FUNC = { FieldType.C_BOOL: _crow_bool_to_python, - FieldType.C_TINYINT : _crow_tinyint_to_python, - FieldType.C_SMALLINT : _crow_smallint_to_python, - FieldType.C_INT : _crow_int_to_python, - FieldType.C_BIGINT : _crow_bigint_to_python, - FieldType.C_FLOAT : _crow_float_to_python, - FieldType.C_DOUBLE : _crow_double_to_python, + FieldType.C_TINYINT: _crow_tinyint_to_python, + FieldType.C_SMALLINT: _crow_smallint_to_python, + FieldType.C_INT: _crow_int_to_python, + FieldType.C_BIGINT: _crow_bigint_to_python, + FieldType.C_FLOAT: _crow_float_to_python, + FieldType.C_DOUBLE: _crow_double_to_python, FieldType.C_BINARY: _crow_binary_to_python, - FieldType.C_TIMESTAMP : _crow_timestamp_to_python, - FieldType.C_NCHAR : _crow_nchar_to_python + FieldType.C_TIMESTAMP: _crow_timestamp_to_python, + FieldType.C_NCHAR: _crow_nchar_to_python, + FieldType.C_TINYINT_UNSIGNED: _crow_tinyint_unsigned_to_python, + FieldType.C_SMALLINT_UNSIGNED: _crow_smallint_unsigned_to_python, + FieldType.C_INT_UNSIGNED: _crow_int_unsigned_to_python, + FieldType.C_BIGINT_UNSIGNED: _crow_bigint_unsigned_to_python } _CONVERT_FUNC_BLOCK = { FieldType.C_BOOL: _crow_bool_to_python, - FieldType.C_TINYINT : _crow_tinyint_to_python, - FieldType.C_SMALLINT : _crow_smallint_to_python, - FieldType.C_INT : _crow_int_to_python, - FieldType.C_BIGINT : _crow_bigint_to_python, - FieldType.C_FLOAT : _crow_float_to_python, - FieldType.C_DOUBLE : _crow_double_to_python, + FieldType.C_TINYINT: _crow_tinyint_to_python, + FieldType.C_SMALLINT: _crow_smallint_to_python, + FieldType.C_INT: _crow_int_to_python, + FieldType.C_BIGINT: _crow_bigint_to_python, + FieldType.C_FLOAT: _crow_float_to_python, + FieldType.C_DOUBLE: _crow_double_to_python, FieldType.C_BINARY: _crow_binary_to_python_block, - FieldType.C_TIMESTAMP : _crow_timestamp_to_python, - FieldType.C_NCHAR : _crow_nchar_to_python_block + FieldType.C_TIMESTAMP: _crow_timestamp_to_python, + FieldType.C_NCHAR: _crow_nchar_to_python_block, + FieldType.C_TINYINT_UNSIGNED: _crow_tinyint_unsigned_to_python, + FieldType.C_SMALLINT_UNSIGNED: _crow_smallint_unsigned_to_python, + FieldType.C_INT_UNSIGNED: _crow_int_unsigned_to_python, + FieldType.C_BIGINT_UNSIGNED: _crow_bigint_unsigned_to_python } # Corresponding TAOS_FIELD structure in C + + class TaosField(ctypes.Structure): _fields_ = [('name', ctypes.c_char * 65), ('type', ctypes.c_char), ('bytes', ctypes.c_short)] # C interface class + + class CTaosInterface(object): libtaos = ctypes.CDLL('libtaos.so') @@ -216,7 +360,7 @@ class CTaosInterface(object): except AttributeError: raise AttributeError("config is expected as a str") - if config != None: + if config is not None: CTaosInterface.libtaos.taos_options(3, self._config) CTaosInterface.libtaos.taos_init() @@ -227,7 +371,13 @@ class CTaosInterface(object): """ return self._config - def connect(self, host=None, user="root", password="taosdata", db=None, port=0): + def connect( + self, + host=None, + user="root", + password="taosdata", + db=None, + port=0): ''' Function to connect to server @@ -236,7 +386,7 @@ class CTaosInterface(object): # host try: _host = ctypes.c_char_p(host.encode( - "utf-8")) if host != None else ctypes.c_char_p(None) + "utf-8")) if host is not None else ctypes.c_char_p(None) except AttributeError: raise AttributeError("host is expected as a str") @@ -255,7 +405,7 @@ class CTaosInterface(object): # db try: _db = ctypes.c_char_p( - db.encode("utf-8")) if db != None else ctypes.c_char_p(None) + db.encode("utf-8")) if db is not None else ctypes.c_char_p(None) except AttributeError: raise AttributeError("db is expected as a str") @@ -268,11 +418,11 @@ class CTaosInterface(object): connection = ctypes.c_void_p(CTaosInterface.libtaos.taos_connect( _host, _user, _password, _db, _port)) - if connection.value == None: + if connection.value is None: print('connect to TDengine failed') raise ConnectionError("connect to TDengine failed") # sys.exit(1) - #else: + # else: # print('connect to TDengine success') return connection @@ -293,12 +443,13 @@ class CTaosInterface(object): @rtype: 0 on success and -1 on failure ''' try: - return CTaosInterface.libtaos.taos_query(connection, ctypes.c_char_p(sql.encode('utf-8'))) + return CTaosInterface.libtaos.taos_query( + connection, ctypes.c_char_p(sql.encode('utf-8'))) except AttributeError: raise AttributeError("sql is expected as a string") # finally: # CTaosInterface.libtaos.close(connection) - + @staticmethod def affectedRows(result): """The affected rows after runing query @@ -308,7 +459,7 @@ class CTaosInterface(object): @staticmethod def subscribe(connection, restart, topic, sql, interval): """Create a subscription - @restart boolean, + @restart boolean, @sql string, sql statement for data query, must be a 'select' statement. @topic string, name of this subscription """ @@ -360,38 +511,53 @@ class CTaosInterface(object): result, ctypes.byref(pblock)) if num_of_rows == 0: return None, 0 - isMicro = (CTaosInterface.libtaos.taos_result_precision(result) == FieldType.C_TIMESTAMP_MICRO) + isMicro = (CTaosInterface.libtaos.taos_result_precision( + result) == FieldType.C_TIMESTAMP_MICRO) blocks = [None] * len(fields) fieldL = CTaosInterface.libtaos.taos_fetch_lengths(result) - fieldLen = [ele for ele in ctypes.cast(fieldL, ctypes.POINTER(ctypes.c_int))[:len(fields)]] + fieldLen = [ + ele for ele in ctypes.cast( + fieldL, ctypes.POINTER( + ctypes.c_int))[ + :len(fields)]] for i in range(len(fields)): data = ctypes.cast(pblock, ctypes.POINTER(ctypes.c_void_p))[i] if fields[i]['type'] not in _CONVERT_FUNC_BLOCK: raise DatabaseError("Invalid data type returned from database") - blocks[i] = _CONVERT_FUNC_BLOCK[fields[i]['type']](data, num_of_rows, fieldLen[i], isMicro) + blocks[i] = _CONVERT_FUNC_BLOCK[fields[i]['type']]( + data, num_of_rows, fieldLen[i], isMicro) return blocks, abs(num_of_rows) + @staticmethod def fetchRow(result, fields): pblock = ctypes.c_void_p(0) - pblock = CTaosInterface.libtaos.taos_fetch_row(result) - if pblock : + pblock = CTaosInterface.libtaos.taos_fetch_row(result) + if pblock: num_of_rows = 1 - isMicro = (CTaosInterface.libtaos.taos_result_precision(result) == FieldType.C_TIMESTAMP_MICRO) + isMicro = (CTaosInterface.libtaos.taos_result_precision( + result) == FieldType.C_TIMESTAMP_MICRO) blocks = [None] * len(fields) fieldL = CTaosInterface.libtaos.taos_fetch_lengths(result) - fieldLen = [ele for ele in ctypes.cast(fieldL, ctypes.POINTER(ctypes.c_int))[:len(fields)]] + fieldLen = [ + ele for ele in ctypes.cast( + fieldL, ctypes.POINTER( + ctypes.c_int))[ + :len(fields)]] for i in range(len(fields)): data = ctypes.cast(pblock, ctypes.POINTER(ctypes.c_void_p))[i] if fields[i]['type'] not in _CONVERT_FUNC: - raise DatabaseError("Invalid data type returned from database") + raise DatabaseError( + "Invalid data type returned from database") if data is None: blocks[i] = [None] else: - blocks[i] = _CONVERT_FUNC[fields[i]['type']](data, num_of_rows, fieldLen[i], isMicro) + blocks[i] = _CONVERT_FUNC[fields[i]['type']]( + data, num_of_rows, fieldLen[i], isMicro) else: return None, 0 return blocks, abs(num_of_rows) + @staticmethod def freeResult(result): CTaosInterface.libtaos.taos_free_result(result) diff --git a/src/connector/python/linux/python2/taos/connection.py b/src/connector/python/linux/python2/taos/connection.py index 552250f1164ced467cd29b5084524985aca8848b..f6c395342c9c39a24bda6022f0ed36cb7bfe045b 100644 --- a/src/connector/python/linux/python2/taos/connection.py +++ b/src/connector/python/linux/python2/taos/connection.py @@ -2,9 +2,11 @@ from .cursor import TDengineCursor from .subscription import TDengineSubscription from .cinterface import CTaosInterface + class TDengineConnection(object): """ TDengine connection object """ + def __init__(self, *args, **kwargs): self._conn = None self._host = None @@ -29,7 +31,7 @@ class TDengineConnection(object): # password if 'password' in kwargs: self._password = kwargs['password'] - + # database if 'database' in kwargs: self._database = kwargs['database'] @@ -43,7 +45,12 @@ class TDengineConnection(object): self._config = kwargs['config'] self._chandle = CTaosInterface(self._config) - self._conn = self._chandle.connect(self._host, self._user, self._password, self._database, self._port) + self._conn = self._chandle.connect( + self._host, + self._user, + self._password, + self._database, + self._port) def close(self): """Close current connection. @@ -55,7 +62,8 @@ class TDengineConnection(object): """ if self._conn is None: return None - sub = CTaosInterface.subscribe(self._conn, restart, topic, sql, interval) + sub = CTaosInterface.subscribe( + self._conn, restart, topic, sql, interval) return TDengineSubscription(sub) def cursor(self): @@ -80,7 +88,8 @@ class TDengineConnection(object): """ pass + if __name__ == "__main__": conn = TDengineConnection(host='192.168.1.107') conn.close() - print("Hello world") \ No newline at end of file + print("Hello world") diff --git a/src/connector/python/linux/python2/taos/constants.py b/src/connector/python/linux/python2/taos/constants.py index feb7050a40b67f88a6d7ca859764fbbc6b36af1c..93466f5184a6bf37c2e1c915a00aa5c5e91d1801 100644 --- a/src/connector/python/linux/python2/taos/constants.py +++ b/src/connector/python/linux/python2/taos/constants.py @@ -3,6 +3,7 @@ from .dbapi import * + class FieldType(object): """TDengine Field Types """ @@ -18,13 +19,21 @@ class FieldType(object): C_BINARY = 8 C_TIMESTAMP = 9 C_NCHAR = 10 + C_TINYINT_UNSIGNED = 11 + C_SMALLINT_UNSIGNED = 12 + C_INT_UNSIGNED = 13 + C_BIGINT_UNSIGNED = 14 # NULL value definition # NOTE: These values should change according to C definition in tsdb.h C_BOOL_NULL = 0x02 C_TINYINT_NULL = -128 + C_TINYINT_UNSIGNED_NULL = 255 C_SMALLINT_NULL = -32768 + C_SMALLINT_UNSIGNED_NULL = 65535 C_INT_NULL = -2147483648 + C_INT_UNSIGNED_NULL = 4294967295 C_BIGINT_NULL = -9223372036854775808 + C_BIGINT_UNSIGNED_NULL = 18446744073709551615 C_FLOAT_NULL = float('nan') C_DOUBLE_NULL = float('nan') C_BINARY_NULL = bytearray([int('0xff', 16)]) diff --git a/src/connector/python/linux/python2/taos/cursor.py b/src/connector/python/linux/python2/taos/cursor.py index bc3b6c65d80cb2f025213f57a6eb728a182ff4a0..8f9aab82da64d24645311d1263f9abb006c737eb 100644 --- a/src/connector/python/linux/python2/taos/cursor.py +++ b/src/connector/python/linux/python2/taos/cursor.py @@ -128,8 +128,8 @@ class TDengineCursor(object): if errno == 0: if CTaosInterface.fieldsCount(self._result) == 0: self._affected_rows += CTaosInterface.affectedRows( - self._result ) - return CTaosInterface.affectedRows(self._result ) + self._result) + return CTaosInterface.affectedRows(self._result) else: self._fields = CTaosInterface.useResult( self._result) @@ -148,6 +148,7 @@ class TDengineCursor(object): """Fetch the next row of a query result set, returning a single sequence, or None when no more data is available. """ pass + def fetchmany(self): pass @@ -158,11 +159,26 @@ class TDengineCursor(object): if (dataType.upper() == "TINYINT"): if (self._description[col][1] == FieldType.C_TINYINT): return True + if (dataType.upper() == "TINYINT UNSIGNED"): + if (self._description[col][1] == FieldType.C_TINYINT_UNSIGNED): + return True + if (dataType.upper() == "SMALLINT"): + if (self._description[col][1] == FieldType.C_SMALLINT): + return True + if (dataType.upper() == "SMALLINT UNSIGNED"): + if (self._description[col][1] == FieldType.C_SMALLINT_UNSIGNED): + return True if (dataType.upper() == "INT"): if (self._description[col][1] == FieldType.C_INT): return True + if (dataType.upper() == "INT UNSIGNED"): + if (self._description[col][1] == FieldType.C_INT_UNSIGNED): + return True if (dataType.upper() == "BIGINT"): - if (self._description[col][1] == FieldType.C_INT): + if (self._description[col][1] == FieldType.C_BIGINT): + return True + if (dataType.upper() == "BIGINT UNSIGNED"): + if (self._description[col][1] == FieldType.C_BIGINT_UNSIGNED): return True if (dataType.upper() == "FLOAT"): if (self._description[col][1] == FieldType.C_FLOAT): @@ -191,16 +207,20 @@ class TDengineCursor(object): buffer = [[] for i in range(len(self._fields))] self._rowcount = 0 while True: - block, num_of_fields = CTaosInterface.fetchRow(self._result, self._fields) + block, num_of_fields = CTaosInterface.fetchRow( + self._result, self._fields) errno = CTaosInterface.libtaos.taos_errno(self._result) if errno != 0: - raise ProgrammingError(CTaosInterface.errStr(self._result), errno) + raise ProgrammingError( + CTaosInterface.errStr( + self._result), errno) if num_of_fields == 0: break self._rowcount += num_of_fields for i in range(len(self._fields)): buffer[i].extend(block[i]) return list(map(tuple, zip(*buffer))) + def fetchall(self): if self._result is None or self._fields is None: raise OperationalError("Invalid use of fetchall") @@ -208,16 +228,20 @@ class TDengineCursor(object): buffer = [[] for i in range(len(self._fields))] self._rowcount = 0 while True: - block, num_of_fields = CTaosInterface.fetchBlock(self._result, self._fields) + block, num_of_fields = CTaosInterface.fetchBlock( + self._result, self._fields) errno = CTaosInterface.libtaos.taos_errno(self._result) if errno != 0: - raise ProgrammingError(CTaosInterface.errStr(self._result), errno) + raise ProgrammingError( + CTaosInterface.errStr( + self._result), errno) if num_of_fields == 0: break self._rowcount += num_of_fields for i in range(len(self._fields)): buffer[i].extend(block[i]) return list(map(tuple, zip(*buffer))) + def nextset(self): """ """ diff --git a/src/connector/python/linux/python2/taos/dbapi.py b/src/connector/python/linux/python2/taos/dbapi.py index f1c22bdb512224ac712b78b15ec00207587e65c5..594681ada953abf388e503c23199043cf686e1a3 100644 --- a/src/connector/python/linux/python2/taos/dbapi.py +++ b/src/connector/python/linux/python2/taos/dbapi.py @@ -4,6 +4,7 @@ import time import datetime + class DBAPITypeObject(object): def __init__(self, *values): self.values = values @@ -16,23 +17,28 @@ class DBAPITypeObject(object): else: return -1 + Date = datetime.date Time = datetime.time Timestamp = datetime.datetime + def DataFromTicks(ticks): return Date(*time.localtime(ticks)[:3]) + def TimeFromTicks(ticks): return Time(*time.localtime(ticks)[3:6]) + def TimestampFromTicks(ticks): return Timestamp(*time.localtime(ticks)[:6]) + Binary = bytes # STRING = DBAPITypeObject(*constants.FieldType.get_string_types()) # BINARY = DBAPITypeObject(*constants.FieldType.get_binary_types()) # NUMBER = BAPITypeObject(*constants.FieldType.get_number_types()) # DATETIME = DBAPITypeObject(*constants.FieldType.get_timestamp_types()) -# ROWID = DBAPITypeObject() \ No newline at end of file +# ROWID = DBAPITypeObject() diff --git a/src/connector/python/linux/python2/taos/error.py b/src/connector/python/linux/python2/taos/error.py index 24508a72ed78bb6231187bb6de34d57182e31b22..c584badce8320cd35dc81e8f6b613c56163b1a29 100644 --- a/src/connector/python/linux/python2/taos/error.py +++ b/src/connector/python/linux/python2/taos/error.py @@ -1,35 +1,41 @@ """Python exceptions """ + class Error(Exception): def __init__(self, msg=None, errno=None): self.msg = msg self._full_msg = self.msg self.errno = errno - + def __str__(self): return self._full_msg + class Warning(Exception): """Exception raised for important warnings like data truncations while inserting. """ pass + class InterfaceError(Error): - """Exception raised for errors that are related to the database interface rather than the database itself. + """Exception raised for errors that are related to the database interface rather than the database itself. """ pass + class DatabaseError(Error): - """Exception raised for errors that are related to the database. + """Exception raised for errors that are related to the database. """ pass + class DataError(DatabaseError): """Exception raised for errors that are due to problems with the processed data like division by zero, numeric value out of range. """ pass + class OperationalError(DatabaseError): """Exception raised for errors that are related to the database's operation and not necessarily under the control of the programmer """ @@ -41,17 +47,20 @@ class IntegrityError(DatabaseError): """ pass + class InternalError(DatabaseError): """Exception raised when the database encounters an internal error. """ pass + class ProgrammingError(DatabaseError): """Exception raised for programming errors. """ pass + class NotSupportedError(DatabaseError): """Exception raised in case a method or database API was used which is not supported by the database,. """ - pass \ No newline at end of file + pass diff --git a/src/connector/python/linux/python2/taos/subscription.py b/src/connector/python/linux/python2/taos/subscription.py index 2d01395532820c3bd0e068ef7eb3d425eaaa6d78..270d9de09217fc58a389981a3542698dd1c0428a 100644 --- a/src/connector/python/linux/python2/taos/subscription.py +++ b/src/connector/python/linux/python2/taos/subscription.py @@ -1,52 +1,57 @@ from .cinterface import CTaosInterface from .error import * + class TDengineSubscription(object): """TDengine subscription object """ + def __init__(self, sub): self._sub = sub - def consume(self): """Consume rows of a subscription """ if self._sub is None: raise OperationalError("Invalid use of consume") - + result, fields = CTaosInterface.consume(self._sub) buffer = [[] for i in range(len(fields))] while True: block, num_of_fields = CTaosInterface.fetchBlock(result, fields) - if num_of_fields == 0: break + if num_of_fields == 0: + break for i in range(len(fields)): buffer[i].extend(block[i]) self.fields = fields return list(map(tuple, zip(*buffer))) - - def close(self, keepProgress = True): + def close(self, keepProgress=True): """Close the Subscription. """ if self._sub is None: return False - + CTaosInterface.unsubscribe(self._sub, keepProgress) return True if __name__ == '__main__': from .connection import TDengineConnection - conn = TDengineConnection(host="127.0.0.1", user="root", password="taosdata", database="test") + conn = TDengineConnection( + host="127.0.0.1", + user="root", + password="taosdata", + database="test") # Generate a cursor object to run SQL commands sub = conn.subscribe(True, "test", "select * from meters;", 1000) - for i in range(0,10): + for i in range(0, 10): data = sub.consume() for d in data: print(d) sub.close() - conn.close() \ No newline at end of file + conn.close() diff --git a/src/connector/python/linux/python3/setup.py b/src/connector/python/linux/python3/setup.py index e238372cd3e4cffa5d5d2d33660364b9addd2ee5..a865f5df856d1b416d7f78da8b1f857a967f5e61 100644 --- a/src/connector/python/linux/python3/setup.py +++ b/src/connector/python/linux/python3/setup.py @@ -5,7 +5,7 @@ with open("README.md", "r") as fh: setuptools.setup( name="taos", - version="2.0.4", + version="2.0.5", author="Taosdata Inc.", author_email="support@taosdata.com", description="TDengine python client package", diff --git a/src/connector/python/linux/python3/taos/__init__.py b/src/connector/python/linux/python3/taos/__init__.py index 8cf095ea68dda55a071403734d39e9198a71d8a1..973263573808232e4e71dc0158585624a8e7d2ab 100644 --- a/src/connector/python/linux/python3/taos/__init__.py +++ b/src/connector/python/linux/python3/taos/__init__.py @@ -3,12 +3,12 @@ from .connection import TDengineConnection from .cursor import TDengineCursor # Globals -apilevel = '2.0.3' threadsafety = 0 paramstyle = 'pyformat' __all__ = ['connection', 'cursor'] + def connect(*args, **kwargs): """ Function to return a TDengine connector object diff --git a/src/connector/python/linux/python3/taos/cinterface.py b/src/connector/python/linux/python3/taos/cinterface.py index fdebe349fe9597260f231adb52022a2ecc9e1063..555cc3435bcbea302b34cbde09772ac5f6fe32b2 100644 --- a/src/connector/python/linux/python3/taos/cinterface.py +++ b/src/connector/python/linux/python3/taos/cinterface.py @@ -4,11 +4,14 @@ from .error import * import math import datetime + def _convert_millisecond_to_datetime(milli): - return datetime.datetime.fromtimestamp(milli/1000.0) + return datetime.datetime.fromtimestamp(milli / 1000.0) + def _convert_microsecond_to_datetime(micro): - return datetime.datetime.fromtimestamp(micro/1000000.0) + return datetime.datetime.fromtimestamp(micro / 1000000.0) + def _crow_timestamp_to_python(data, num_of_rows, nbytes=None, micro=False): """Function to convert C bool row to python row @@ -18,168 +21,309 @@ def _crow_timestamp_to_python(data, num_of_rows, nbytes=None, micro=False): _timestamp_converter = _convert_microsecond_to_datetime if num_of_rows > 0: - return list(map(_timestamp_converter, ctypes.cast(data, ctypes.POINTER(ctypes.c_long))[:abs(num_of_rows)])) + return list(map(_timestamp_converter, ctypes.cast( + data, ctypes.POINTER(ctypes.c_long))[:abs(num_of_rows)])) else: - return list(map(_timestamp_converter, ctypes.cast(data, ctypes.POINTER(ctypes.c_long))[:abs(num_of_rows)])) + return list(map(_timestamp_converter, ctypes.cast( + data, ctypes.POINTER(ctypes.c_long))[:abs(num_of_rows)])) + def _crow_bool_to_python(data, num_of_rows, nbytes=None, micro=False): """Function to convert C bool row to python row """ if num_of_rows > 0: - return [ None if ele == FieldType.C_BOOL_NULL else bool(ele) for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_byte))[:abs(num_of_rows)] ] + return [ + None if ele == FieldType.C_BOOL_NULL else bool(ele) for ele in ctypes.cast( + data, ctypes.POINTER( + ctypes.c_byte))[ + :abs(num_of_rows)]] else: - return [ None if ele == FieldType.C_BOOL_NULL else bool(ele) for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_bool))[:abs(num_of_rows)] ] + return [ + None if ele == FieldType.C_BOOL_NULL else bool(ele) for ele in ctypes.cast( + data, ctypes.POINTER( + ctypes.c_bool))[ + :abs(num_of_rows)]] + def _crow_tinyint_to_python(data, num_of_rows, nbytes=None, micro=False): """Function to convert C tinyint row to python row """ if num_of_rows > 0: - return [ None if ele == FieldType.C_TINYINT_NULL else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_byte))[:abs(num_of_rows)] ] + return [None if ele == FieldType.C_TINYINT_NULL else ele for ele in ctypes.cast( + data, ctypes.POINTER(ctypes.c_byte))[:abs(num_of_rows)]] + else: + return [None if ele == FieldType.C_TINYINT_NULL else ele for ele in ctypes.cast( + data, ctypes.POINTER(ctypes.c_byte))[:abs(num_of_rows)]] + + +def _crow_tinyint_unsigned_to_python( + data, + num_of_rows, + nbytes=None, + micro=False): + """Function to convert C tinyint row to python row + """ + if num_of_rows > 0: + return [ + None if ele == FieldType.C_TINYINT_UNSIGNED_NULL else ele for ele in ctypes.cast( + data, ctypes.POINTER( + ctypes.c_ubyte))[ + :abs(num_of_rows)]] else: - return [ None if ele == FieldType.C_TINYINT_NULL else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_byte))[:abs(num_of_rows)] ] - + return [ + None if ele == FieldType.C_TINYINT_UNSIGNED_NULL else ele for ele in ctypes.cast( + data, ctypes.POINTER( + ctypes.c_ubyte))[ + :abs(num_of_rows)]] + + def _crow_smallint_to_python(data, num_of_rows, nbytes=None, micro=False): """Function to convert C smallint row to python row """ if num_of_rows > 0: - return [ None if ele == FieldType.C_SMALLINT_NULL else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_short))[:abs(num_of_rows)]] + return [ + None if ele == FieldType.C_SMALLINT_NULL else ele for ele in ctypes.cast( + data, ctypes.POINTER( + ctypes.c_short))[ + :abs(num_of_rows)]] else: - return [ None if ele == FieldType.C_SMALLINT_NULL else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_short))[:abs(num_of_rows)] ] + return [ + None if ele == FieldType.C_SMALLINT_NULL else ele for ele in ctypes.cast( + data, ctypes.POINTER( + ctypes.c_short))[ + :abs(num_of_rows)]] + + +def _crow_smallint_unsigned_to_python( + data, num_of_rows, nbytes=None, micro=False): + """Function to convert C smallint row to python row + """ + if num_of_rows > 0: + return [ + None if ele == FieldType.C_SMALLINT_UNSIGNED_NULL else ele for ele in ctypes.cast( + data, ctypes.POINTER( + ctypes.c_ushort))[ + :abs(num_of_rows)]] + else: + return [ + None if ele == FieldType.C_SMALLINT_UNSIGNED_NULL else ele for ele in ctypes.cast( + data, ctypes.POINTER( + ctypes.c_ushort))[ + :abs(num_of_rows)]] + def _crow_int_to_python(data, num_of_rows, nbytes=None, micro=False): """Function to convert C int row to python row """ if num_of_rows > 0: - return [ None if ele == FieldType.C_INT_NULL else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_int))[:abs(num_of_rows)] ] + return [None if ele == FieldType.C_INT_NULL else ele for ele in ctypes.cast( + data, ctypes.POINTER(ctypes.c_int))[:abs(num_of_rows)]] + else: + return [None if ele == FieldType.C_INT_NULL else ele for ele in ctypes.cast( + data, ctypes.POINTER(ctypes.c_int))[:abs(num_of_rows)]] + + +def _crow_int_unsigned_to_python(data, num_of_rows, nbytes=None, micro=False): + """Function to convert C int row to python row + """ + if num_of_rows > 0: + return [ + None if ele == FieldType.C_INT_UNSIGNED_NULL else ele for ele in ctypes.cast( + data, ctypes.POINTER( + ctypes.c_uint))[ + :abs(num_of_rows)]] else: - return [ None if ele == FieldType.C_INT_NULL else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_int))[:abs(num_of_rows)] ] + return [ + None if ele == FieldType.C_INT_UNSIGNED_NULL else ele for ele in ctypes.cast( + data, ctypes.POINTER( + ctypes.c_uint))[ + :abs(num_of_rows)]] + def _crow_bigint_to_python(data, num_of_rows, nbytes=None, micro=False): """Function to convert C bigint row to python row """ if num_of_rows > 0: - return [ None if ele == FieldType.C_BIGINT_NULL else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_long))[:abs(num_of_rows)] ] + return [None if ele == FieldType.C_BIGINT_NULL else ele for ele in ctypes.cast( + data, ctypes.POINTER(ctypes.c_long))[:abs(num_of_rows)]] + else: + return [None if ele == FieldType.C_BIGINT_NULL else ele for ele in ctypes.cast( + data, ctypes.POINTER(ctypes.c_long))[:abs(num_of_rows)]] + + +def _crow_bigint_unsigned_to_python( + data, + num_of_rows, + nbytes=None, + micro=False): + """Function to convert C bigint row to python row + """ + if num_of_rows > 0: + return [ + None if ele == FieldType.C_BIGINT_UNSIGNED_NULL else ele for ele in ctypes.cast( + data, ctypes.POINTER( + ctypes.c_ulong))[ + :abs(num_of_rows)]] else: - return [ None if ele == FieldType.C_BIGINT_NULL else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_long))[:abs(num_of_rows)] ] + return [ + None if ele == FieldType.C_BIGINT_UNSIGNED_NULL else ele for ele in ctypes.cast( + data, ctypes.POINTER( + ctypes.c_ulong))[ + :abs(num_of_rows)]] + def _crow_float_to_python(data, num_of_rows, nbytes=None, micro=False): """Function to convert C float row to python row """ if num_of_rows > 0: - return [ None if math.isnan(ele) else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_float))[:abs(num_of_rows)] ] + return [None if math.isnan(ele) else ele for ele in ctypes.cast( + data, ctypes.POINTER(ctypes.c_float))[:abs(num_of_rows)]] else: - return [ None if math.isnan(ele) else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_float))[:abs(num_of_rows)] ] + return [None if math.isnan(ele) else ele for ele in ctypes.cast( + data, ctypes.POINTER(ctypes.c_float))[:abs(num_of_rows)]] + def _crow_double_to_python(data, num_of_rows, nbytes=None, micro=False): """Function to convert C double row to python row """ if num_of_rows > 0: - return [ None if math.isnan(ele) else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_double))[:abs(num_of_rows)] ] + return [None if math.isnan(ele) else ele for ele in ctypes.cast( + data, ctypes.POINTER(ctypes.c_double))[:abs(num_of_rows)]] else: - return [ None if math.isnan(ele) else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_double))[:abs(num_of_rows)] ] + return [None if math.isnan(ele) else ele for ele in ctypes.cast( + data, ctypes.POINTER(ctypes.c_double))[:abs(num_of_rows)]] + def _crow_binary_to_python(data, num_of_rows, nbytes=None, micro=False): """Function to convert C binary row to python row """ assert(nbytes is not None) if num_of_rows > 0: - return [ None if ele.value[0:1] == FieldType.C_BINARY_NULL else ele.value.decode('utf-8') for ele in (ctypes.cast(data, ctypes.POINTER(ctypes.c_char * nbytes)))[:abs(num_of_rows)]] + return [None if ele.value[0:1] == FieldType.C_BINARY_NULL else ele.value.decode( + 'utf-8') for ele in (ctypes.cast(data, ctypes.POINTER(ctypes.c_char * nbytes)))[:abs(num_of_rows)]] else: - return [ None if ele.value[0:1] == FieldType.C_BINARY_NULL else ele.value.decode('utf-8') for ele in (ctypes.cast(data, ctypes.POINTER(ctypes.c_char * nbytes)))[:abs(num_of_rows)]] + return [None if ele.value[0:1] == FieldType.C_BINARY_NULL else ele.value.decode( + 'utf-8') for ele in (ctypes.cast(data, ctypes.POINTER(ctypes.c_char * nbytes)))[:abs(num_of_rows)]] + def _crow_nchar_to_python(data, num_of_rows, nbytes=None, micro=False): """Function to convert C nchar row to python row """ assert(nbytes is not None) - res=[] + res = [] for i in range(abs(num_of_rows)): try: if num_of_rows >= 0: tmpstr = ctypes.c_char_p(data) - res.append( tmpstr.value.decode() ) + res.append(tmpstr.value.decode()) else: - res.append( (ctypes.cast(data+nbytes*i, ctypes.POINTER(ctypes.c_wchar * (nbytes//4))))[0].value ) + res.append((ctypes.cast(data + nbytes * i, + ctypes.POINTER(ctypes.c_wchar * (nbytes // 4))))[0].value) except ValueError: res.append(None) - return res + return res + def _crow_binary_to_python_block(data, num_of_rows, nbytes=None, micro=False): """Function to convert C binary row to python row """ assert(nbytes is not None) - res=[] + res = [] if num_of_rows > 0: for i in range(abs(num_of_rows)): try: - rbyte=ctypes.cast(data+nbytes*i,ctypes.POINTER(ctypes.c_short))[:1].pop() - tmpstr = ctypes.c_char_p(data+nbytes*i+2) - res.append( tmpstr.value.decode()[0:rbyte] ) + rbyte = ctypes.cast( + data + nbytes * i, + ctypes.POINTER( + ctypes.c_short))[ + :1].pop() + tmpstr = ctypes.c_char_p(data + nbytes * i + 2) + res.append(tmpstr.value.decode()[0:rbyte]) except ValueError: res.append(None) else: for i in range(abs(num_of_rows)): try: - rbyte=ctypes.cast(data+nbytes*i,ctypes.POINTER(ctypes.c_short))[:1].pop() - tmpstr = ctypes.c_char_p(data+nbytes*i+2) - res.append( tmpstr.value.decode()[0:rbyte] ) + rbyte = ctypes.cast( + data + nbytes * i, + ctypes.POINTER( + ctypes.c_short))[ + :1].pop() + tmpstr = ctypes.c_char_p(data + nbytes * i + 2) + res.append(tmpstr.value.decode()[0:rbyte]) except ValueError: res.append(None) return res + def _crow_nchar_to_python_block(data, num_of_rows, nbytes=None, micro=False): """Function to convert C nchar row to python row """ assert(nbytes is not None) - res=[] + res = [] if num_of_rows >= 0: for i in range(abs(num_of_rows)): try: - tmpstr = ctypes.c_char_p(data+nbytes*i+2) - res.append( tmpstr.value.decode() ) + tmpstr = ctypes.c_char_p(data + nbytes * i + 2) + res.append(tmpstr.value.decode()) except ValueError: res.append(None) else: for i in range(abs(num_of_rows)): try: - res.append( (ctypes.cast(data+nbytes*i+2, ctypes.POINTER(ctypes.c_wchar * (nbytes//4))))[0].value ) + res.append((ctypes.cast(data + nbytes * i + 2, + ctypes.POINTER(ctypes.c_wchar * (nbytes // 4))))[0].value) except ValueError: res.append(None) return res + _CONVERT_FUNC = { FieldType.C_BOOL: _crow_bool_to_python, - FieldType.C_TINYINT : _crow_tinyint_to_python, - FieldType.C_SMALLINT : _crow_smallint_to_python, - FieldType.C_INT : _crow_int_to_python, - FieldType.C_BIGINT : _crow_bigint_to_python, - FieldType.C_FLOAT : _crow_float_to_python, - FieldType.C_DOUBLE : _crow_double_to_python, + FieldType.C_TINYINT: _crow_tinyint_to_python, + FieldType.C_SMALLINT: _crow_smallint_to_python, + FieldType.C_INT: _crow_int_to_python, + FieldType.C_BIGINT: _crow_bigint_to_python, + FieldType.C_FLOAT: _crow_float_to_python, + FieldType.C_DOUBLE: _crow_double_to_python, FieldType.C_BINARY: _crow_binary_to_python, - FieldType.C_TIMESTAMP : _crow_timestamp_to_python, - FieldType.C_NCHAR : _crow_nchar_to_python + FieldType.C_TIMESTAMP: _crow_timestamp_to_python, + FieldType.C_NCHAR: _crow_nchar_to_python, + FieldType.C_TINYINT_UNSIGNED: _crow_tinyint_unsigned_to_python, + FieldType.C_SMALLINT_UNSIGNED: _crow_smallint_unsigned_to_python, + FieldType.C_INT_UNSIGNED: _crow_int_unsigned_to_python, + FieldType.C_BIGINT_UNSIGNED: _crow_bigint_unsigned_to_python } _CONVERT_FUNC_BLOCK = { FieldType.C_BOOL: _crow_bool_to_python, - FieldType.C_TINYINT : _crow_tinyint_to_python, - FieldType.C_SMALLINT : _crow_smallint_to_python, - FieldType.C_INT : _crow_int_to_python, - FieldType.C_BIGINT : _crow_bigint_to_python, - FieldType.C_FLOAT : _crow_float_to_python, - FieldType.C_DOUBLE : _crow_double_to_python, + FieldType.C_TINYINT: _crow_tinyint_to_python, + FieldType.C_SMALLINT: _crow_smallint_to_python, + FieldType.C_INT: _crow_int_to_python, + FieldType.C_BIGINT: _crow_bigint_to_python, + FieldType.C_FLOAT: _crow_float_to_python, + FieldType.C_DOUBLE: _crow_double_to_python, FieldType.C_BINARY: _crow_binary_to_python_block, - FieldType.C_TIMESTAMP : _crow_timestamp_to_python, - FieldType.C_NCHAR : _crow_nchar_to_python_block + FieldType.C_TIMESTAMP: _crow_timestamp_to_python, + FieldType.C_NCHAR: _crow_nchar_to_python_block, + FieldType.C_TINYINT_UNSIGNED: _crow_tinyint_unsigned_to_python, + FieldType.C_SMALLINT_UNSIGNED: _crow_smallint_unsigned_to_python, + FieldType.C_INT_UNSIGNED: _crow_int_unsigned_to_python, + FieldType.C_BIGINT_UNSIGNED: _crow_bigint_unsigned_to_python } # Corresponding TAOS_FIELD structure in C + + class TaosField(ctypes.Structure): _fields_ = [('name', ctypes.c_char * 65), ('type', ctypes.c_char), ('bytes', ctypes.c_short)] # C interface class + + class CTaosInterface(object): libtaos = ctypes.CDLL('libtaos.so') @@ -216,7 +360,7 @@ class CTaosInterface(object): except AttributeError: raise AttributeError("config is expected as a str") - if config != None: + if config is not None: CTaosInterface.libtaos.taos_options(3, self._config) CTaosInterface.libtaos.taos_init() @@ -227,7 +371,13 @@ class CTaosInterface(object): """ return self._config - def connect(self, host=None, user="root", password="taosdata", db=None, port=0): + def connect( + self, + host=None, + user="root", + password="taosdata", + db=None, + port=0): ''' Function to connect to server @@ -236,7 +386,7 @@ class CTaosInterface(object): # host try: _host = ctypes.c_char_p(host.encode( - "utf-8")) if host != None else ctypes.c_char_p(None) + "utf-8")) if host is not None else ctypes.c_char_p(None) except AttributeError: raise AttributeError("host is expected as a str") @@ -255,7 +405,7 @@ class CTaosInterface(object): # db try: _db = ctypes.c_char_p( - db.encode("utf-8")) if db != None else ctypes.c_char_p(None) + db.encode("utf-8")) if db is not None else ctypes.c_char_p(None) except AttributeError: raise AttributeError("db is expected as a str") @@ -268,11 +418,11 @@ class CTaosInterface(object): connection = ctypes.c_void_p(CTaosInterface.libtaos.taos_connect( _host, _user, _password, _db, _port)) - if connection.value == None: + if connection.value is None: print('connect to TDengine failed') raise ConnectionError("connect to TDengine failed") # sys.exit(1) - #else: + # else: # print('connect to TDengine success') return connection @@ -293,7 +443,8 @@ class CTaosInterface(object): @rtype: 0 on success and -1 on failure ''' try: - return CTaosInterface.libtaos.taos_query(connection, ctypes.c_char_p(sql.encode('utf-8'))) + return CTaosInterface.libtaos.taos_query( + connection, ctypes.c_char_p(sql.encode('utf-8'))) except AttributeError: raise AttributeError("sql is expected as a string") # finally: @@ -308,7 +459,7 @@ class CTaosInterface(object): @staticmethod def subscribe(connection, restart, topic, sql, interval): """Create a subscription - @restart boolean, + @restart boolean, @sql string, sql statement for data query, must be a 'select' statement. @topic string, name of this subscription """ @@ -360,35 +511,49 @@ class CTaosInterface(object): result, ctypes.byref(pblock)) if num_of_rows == 0: return None, 0 - isMicro = (CTaosInterface.libtaos.taos_result_precision(result) == FieldType.C_TIMESTAMP_MICRO) + isMicro = (CTaosInterface.libtaos.taos_result_precision( + result) == FieldType.C_TIMESTAMP_MICRO) blocks = [None] * len(fields) fieldL = CTaosInterface.libtaos.taos_fetch_lengths(result) - fieldLen = [ele for ele in ctypes.cast(fieldL, ctypes.POINTER(ctypes.c_int))[:len(fields)]] + fieldLen = [ + ele for ele in ctypes.cast( + fieldL, ctypes.POINTER( + ctypes.c_int))[ + :len(fields)]] for i in range(len(fields)): data = ctypes.cast(pblock, ctypes.POINTER(ctypes.c_void_p))[i] if fields[i]['type'] not in _CONVERT_FUNC_BLOCK: raise DatabaseError("Invalid data type returned from database") - blocks[i] = _CONVERT_FUNC_BLOCK[fields[i]['type']](data, num_of_rows, fieldLen[i], isMicro) + blocks[i] = _CONVERT_FUNC_BLOCK[fields[i]['type']]( + data, num_of_rows, fieldLen[i], isMicro) return blocks, abs(num_of_rows) + @staticmethod def fetchRow(result, fields): pblock = ctypes.c_void_p(0) - pblock = CTaosInterface.libtaos.taos_fetch_row(result) - if pblock : + pblock = CTaosInterface.libtaos.taos_fetch_row(result) + if pblock: num_of_rows = 1 - isMicro = (CTaosInterface.libtaos.taos_result_precision(result) == FieldType.C_TIMESTAMP_MICRO) + isMicro = (CTaosInterface.libtaos.taos_result_precision( + result) == FieldType.C_TIMESTAMP_MICRO) blocks = [None] * len(fields) fieldL = CTaosInterface.libtaos.taos_fetch_lengths(result) - fieldLen = [ele for ele in ctypes.cast(fieldL, ctypes.POINTER(ctypes.c_int))[:len(fields)]] + fieldLen = [ + ele for ele in ctypes.cast( + fieldL, ctypes.POINTER( + ctypes.c_int))[ + :len(fields)]] for i in range(len(fields)): data = ctypes.cast(pblock, ctypes.POINTER(ctypes.c_void_p))[i] if fields[i]['type'] not in _CONVERT_FUNC: - raise DatabaseError("Invalid data type returned from database") + raise DatabaseError( + "Invalid data type returned from database") if data is None: blocks[i] = [None] else: - blocks[i] = _CONVERT_FUNC[fields[i]['type']](data, num_of_rows, fieldLen[i], isMicro) + blocks[i] = _CONVERT_FUNC[fields[i]['type']]( + data, num_of_rows, fieldLen[i], isMicro) else: return None, 0 return blocks, abs(num_of_rows) diff --git a/src/connector/python/linux/python3/taos/connection.py b/src/connector/python/linux/python3/taos/connection.py index 552250f1164ced467cd29b5084524985aca8848b..f6c395342c9c39a24bda6022f0ed36cb7bfe045b 100644 --- a/src/connector/python/linux/python3/taos/connection.py +++ b/src/connector/python/linux/python3/taos/connection.py @@ -2,9 +2,11 @@ from .cursor import TDengineCursor from .subscription import TDengineSubscription from .cinterface import CTaosInterface + class TDengineConnection(object): """ TDengine connection object """ + def __init__(self, *args, **kwargs): self._conn = None self._host = None @@ -29,7 +31,7 @@ class TDengineConnection(object): # password if 'password' in kwargs: self._password = kwargs['password'] - + # database if 'database' in kwargs: self._database = kwargs['database'] @@ -43,7 +45,12 @@ class TDengineConnection(object): self._config = kwargs['config'] self._chandle = CTaosInterface(self._config) - self._conn = self._chandle.connect(self._host, self._user, self._password, self._database, self._port) + self._conn = self._chandle.connect( + self._host, + self._user, + self._password, + self._database, + self._port) def close(self): """Close current connection. @@ -55,7 +62,8 @@ class TDengineConnection(object): """ if self._conn is None: return None - sub = CTaosInterface.subscribe(self._conn, restart, topic, sql, interval) + sub = CTaosInterface.subscribe( + self._conn, restart, topic, sql, interval) return TDengineSubscription(sub) def cursor(self): @@ -80,7 +88,8 @@ class TDengineConnection(object): """ pass + if __name__ == "__main__": conn = TDengineConnection(host='192.168.1.107') conn.close() - print("Hello world") \ No newline at end of file + print("Hello world") diff --git a/src/connector/python/linux/python3/taos/constants.py b/src/connector/python/linux/python3/taos/constants.py index feb7050a40b67f88a6d7ca859764fbbc6b36af1c..93466f5184a6bf37c2e1c915a00aa5c5e91d1801 100644 --- a/src/connector/python/linux/python3/taos/constants.py +++ b/src/connector/python/linux/python3/taos/constants.py @@ -3,6 +3,7 @@ from .dbapi import * + class FieldType(object): """TDengine Field Types """ @@ -18,13 +19,21 @@ class FieldType(object): C_BINARY = 8 C_TIMESTAMP = 9 C_NCHAR = 10 + C_TINYINT_UNSIGNED = 11 + C_SMALLINT_UNSIGNED = 12 + C_INT_UNSIGNED = 13 + C_BIGINT_UNSIGNED = 14 # NULL value definition # NOTE: These values should change according to C definition in tsdb.h C_BOOL_NULL = 0x02 C_TINYINT_NULL = -128 + C_TINYINT_UNSIGNED_NULL = 255 C_SMALLINT_NULL = -32768 + C_SMALLINT_UNSIGNED_NULL = 65535 C_INT_NULL = -2147483648 + C_INT_UNSIGNED_NULL = 4294967295 C_BIGINT_NULL = -9223372036854775808 + C_BIGINT_UNSIGNED_NULL = 18446744073709551615 C_FLOAT_NULL = float('nan') C_DOUBLE_NULL = float('nan') C_BINARY_NULL = bytearray([int('0xff', 16)]) diff --git a/src/connector/python/linux/python3/taos/cursor.py b/src/connector/python/linux/python3/taos/cursor.py index f972d2ff07f8e7c964839102a4af8b41f98d4622..2e7c362d547973bc3e78f2eb57a33b7fa2d0635e 100644 --- a/src/connector/python/linux/python3/taos/cursor.py +++ b/src/connector/python/linux/python3/taos/cursor.py @@ -5,6 +5,7 @@ import threading # querySeqNum = 0 + class TDengineCursor(object): """Database cursor which is used to manage the context of a fetch operation. @@ -107,8 +108,8 @@ class TDengineCursor(object): # if threading.get_ident() != self._threadId: # info ="Cursor execute:Thread ID not match,creater:"+str(self._threadId)+" caller:"+str(threading.get_ident()) # raise OperationalError(info) - # print(info) - # return None + # print(info) + # return None if not operation: return None @@ -137,8 +138,8 @@ class TDengineCursor(object): if errno == 0: if CTaosInterface.fieldsCount(self._result) == 0: self._affected_rows += CTaosInterface.affectedRows( - self._result ) - return CTaosInterface.affectedRows(self._result ) + self._result) + return CTaosInterface.affectedRows(self._result) else: self._fields = CTaosInterface.useResult( self._result) @@ -168,11 +169,26 @@ class TDengineCursor(object): if (dataType.upper() == "TINYINT"): if (self._description[col][1] == FieldType.C_TINYINT): return True + if (dataType.upper() == "TINYINT UNSIGNED"): + if (self._description[col][1] == FieldType.C_TINYINT_UNSIGNED): + return True + if (dataType.upper() == "SMALLINT"): + if (self._description[col][1] == FieldType.C_SMALLINT): + return True + if (dataType.upper() == "SMALLINT UNSIGNED"): + if (self._description[col][1] == FieldType.C_SMALLINT_UNSIGNED): + return True if (dataType.upper() == "INT"): if (self._description[col][1] == FieldType.C_INT): return True + if (dataType.upper() == "INT UNSIGNED"): + if (self._description[col][1] == FieldType.C_INT_UNSIGNED): + return True if (dataType.upper() == "BIGINT"): - if (self._description[col][1] == FieldType.C_INT): + if (self._description[col][1] == FieldType.C_BIGINT): + return True + if (dataType.upper() == "BIGINT UNSIGNED"): + if (self._description[col][1] == FieldType.C_BIGINT_UNSIGNED): return True if (dataType.upper() == "FLOAT"): if (self._description[col][1] == FieldType.C_FLOAT): @@ -201,10 +217,13 @@ class TDengineCursor(object): buffer = [[] for i in range(len(self._fields))] self._rowcount = 0 while True: - block, num_of_fields = CTaosInterface.fetchRow(self._result, self._fields) + block, num_of_fields = CTaosInterface.fetchRow( + self._result, self._fields) errno = CTaosInterface.libtaos.taos_errno(self._result) if errno != 0: - raise ProgrammingError(CTaosInterface.errStr(self._result), errno) + raise ProgrammingError( + CTaosInterface.errStr( + self._result), errno) if num_of_fields == 0: break self._rowcount += num_of_fields @@ -219,15 +238,20 @@ class TDengineCursor(object): buffer = [[] for i in range(len(self._fields))] self._rowcount = 0 while True: - block, num_of_fields = CTaosInterface.fetchBlock(self._result, self._fields) + block, num_of_fields = CTaosInterface.fetchBlock( + self._result, self._fields) errno = CTaosInterface.libtaos.taos_errno(self._result) if errno != 0: - raise ProgrammingError(CTaosInterface.errStr(self._result), errno) - if num_of_fields == 0: break + raise ProgrammingError( + CTaosInterface.errStr( + self._result), errno) + if num_of_fields == 0: + break self._rowcount += num_of_fields for i in range(len(self._fields)): buffer[i].extend(block[i]) return list(map(tuple, zip(*buffer))) + def nextset(self): """ """ @@ -259,8 +283,8 @@ class TDengineCursor(object): # if threading.get_ident() != self._threadId: # info = "Cursor handleresult:Thread ID not match,creater:"+str(self._threadId)+" caller:"+str(threading.get_ident()) # raise OperationalError(info) - # print(info) - # return None + # print(info) + # return None self._description = [] for ele in self._fields: @@ -268,4 +292,3 @@ class TDengineCursor(object): (ele['name'], ele['type'], None, None, None, None, False)) return self._result - diff --git a/src/connector/python/linux/python3/taos/dbapi.py b/src/connector/python/linux/python3/taos/dbapi.py index f1c22bdb512224ac712b78b15ec00207587e65c5..594681ada953abf388e503c23199043cf686e1a3 100644 --- a/src/connector/python/linux/python3/taos/dbapi.py +++ b/src/connector/python/linux/python3/taos/dbapi.py @@ -4,6 +4,7 @@ import time import datetime + class DBAPITypeObject(object): def __init__(self, *values): self.values = values @@ -16,23 +17,28 @@ class DBAPITypeObject(object): else: return -1 + Date = datetime.date Time = datetime.time Timestamp = datetime.datetime + def DataFromTicks(ticks): return Date(*time.localtime(ticks)[:3]) + def TimeFromTicks(ticks): return Time(*time.localtime(ticks)[3:6]) + def TimestampFromTicks(ticks): return Timestamp(*time.localtime(ticks)[:6]) + Binary = bytes # STRING = DBAPITypeObject(*constants.FieldType.get_string_types()) # BINARY = DBAPITypeObject(*constants.FieldType.get_binary_types()) # NUMBER = BAPITypeObject(*constants.FieldType.get_number_types()) # DATETIME = DBAPITypeObject(*constants.FieldType.get_timestamp_types()) -# ROWID = DBAPITypeObject() \ No newline at end of file +# ROWID = DBAPITypeObject() diff --git a/src/connector/python/linux/python3/taos/error.py b/src/connector/python/linux/python3/taos/error.py index 24508a72ed78bb6231187bb6de34d57182e31b22..c584badce8320cd35dc81e8f6b613c56163b1a29 100644 --- a/src/connector/python/linux/python3/taos/error.py +++ b/src/connector/python/linux/python3/taos/error.py @@ -1,35 +1,41 @@ """Python exceptions """ + class Error(Exception): def __init__(self, msg=None, errno=None): self.msg = msg self._full_msg = self.msg self.errno = errno - + def __str__(self): return self._full_msg + class Warning(Exception): """Exception raised for important warnings like data truncations while inserting. """ pass + class InterfaceError(Error): - """Exception raised for errors that are related to the database interface rather than the database itself. + """Exception raised for errors that are related to the database interface rather than the database itself. """ pass + class DatabaseError(Error): - """Exception raised for errors that are related to the database. + """Exception raised for errors that are related to the database. """ pass + class DataError(DatabaseError): """Exception raised for errors that are due to problems with the processed data like division by zero, numeric value out of range. """ pass + class OperationalError(DatabaseError): """Exception raised for errors that are related to the database's operation and not necessarily under the control of the programmer """ @@ -41,17 +47,20 @@ class IntegrityError(DatabaseError): """ pass + class InternalError(DatabaseError): """Exception raised when the database encounters an internal error. """ pass + class ProgrammingError(DatabaseError): """Exception raised for programming errors. """ pass + class NotSupportedError(DatabaseError): """Exception raised in case a method or database API was used which is not supported by the database,. """ - pass \ No newline at end of file + pass diff --git a/src/connector/python/linux/python3/taos/subscription.py b/src/connector/python/linux/python3/taos/subscription.py index d3cf10d5ada578687689b94454378dd543368e3e..270d9de09217fc58a389981a3542698dd1c0428a 100644 --- a/src/connector/python/linux/python3/taos/subscription.py +++ b/src/connector/python/linux/python3/taos/subscription.py @@ -1,32 +1,33 @@ from .cinterface import CTaosInterface from .error import * + class TDengineSubscription(object): """TDengine subscription object """ + def __init__(self, sub): self._sub = sub - def consume(self): """Consume rows of a subscription """ if self._sub is None: raise OperationalError("Invalid use of consume") - + result, fields = CTaosInterface.consume(self._sub) buffer = [[] for i in range(len(fields))] while True: block, num_of_fields = CTaosInterface.fetchBlock(result, fields) - if num_of_fields == 0: break + if num_of_fields == 0: + break for i in range(len(fields)): buffer[i].extend(block[i]) self.fields = fields return list(map(tuple, zip(*buffer))) - - def close(self, keepProgress = True): + def close(self, keepProgress=True): """Close the Subscription. """ if self._sub is None: @@ -38,15 +39,19 @@ class TDengineSubscription(object): if __name__ == '__main__': from .connection import TDengineConnection - conn = TDengineConnection(host="127.0.0.1", user="root", password="taosdata", database="test") + conn = TDengineConnection( + host="127.0.0.1", + user="root", + password="taosdata", + database="test") # Generate a cursor object to run SQL commands sub = conn.subscribe(True, "test", "select * from meters;", 1000) - for i in range(0,10): + for i in range(0, 10): data = sub.consume() for d in data: print(d) sub.close() - conn.close() \ No newline at end of file + conn.close() diff --git a/src/connector/python/osx/python3/LICENSE b/src/connector/python/osx/python3/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..79a9d730868bfe5d3fa01d679a4abfe9ee7811f0 --- /dev/null +++ b/src/connector/python/osx/python3/LICENSE @@ -0,0 +1,12 @@ + Copyright (c) 2019 TAOS Data, Inc. + +This program is free software: you can use, redistribute, and/or modify +it under the terms of the GNU Affero General Public License, version 3 +or later ("AGPL"), as published by the Free Software Foundation. + +This program is distributed in the hope that it will be useful, but WITHOUT +ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +FITNESS FOR A PARTICULAR PURPOSE. + +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see . diff --git a/src/connector/python/osx/python3/README.md b/src/connector/python/osx/python3/README.md new file mode 100644 index 0000000000000000000000000000000000000000..70db6bba13a8b52b9f707400b80d1302542dbc34 --- /dev/null +++ b/src/connector/python/osx/python3/README.md @@ -0,0 +1 @@ +# TDengine python client interface \ No newline at end of file diff --git a/src/connector/python/osx/python3/setup.py b/src/connector/python/osx/python3/setup.py new file mode 100644 index 0000000000000000000000000000000000000000..a6b97f753c7ee3bc303be0db2217e87e889ef4df --- /dev/null +++ b/src/connector/python/osx/python3/setup.py @@ -0,0 +1,20 @@ +import setuptools + +with open("README.md", "r") as fh: + long_description = fh.read() + +setuptools.setup( + name="taos", + version="2.0.5", + author="Taosdata Inc.", + author_email="support@taosdata.com", + description="TDengine python client package", + long_description=long_description, + long_description_content_type="text/markdown", + url="https://github.com/pypa/sampleproject", + packages=setuptools.find_packages(), + classifiers=[ + "Programming Language :: Python :: 3", + "Operating System :: MacOS X", + ], +) diff --git a/src/connector/python/osx/python3/taos/__init__.py b/src/connector/python/osx/python3/taos/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..973263573808232e4e71dc0158585624a8e7d2ab --- /dev/null +++ b/src/connector/python/osx/python3/taos/__init__.py @@ -0,0 +1,24 @@ + +from .connection import TDengineConnection +from .cursor import TDengineCursor + +# Globals +threadsafety = 0 +paramstyle = 'pyformat' + +__all__ = ['connection', 'cursor'] + + +def connect(*args, **kwargs): + """ Function to return a TDengine connector object + + Current supporting keyword parameters: + @dsn: Data source name as string + @user: Username as string(optional) + @password: Password as string(optional) + @host: Hostname(optional) + @database: Database name(optional) + + @rtype: TDengineConnector + """ + return TDengineConnection(*args, **kwargs) diff --git a/src/connector/python/osx/python3/taos/cinterface.py b/src/connector/python/osx/python3/taos/cinterface.py new file mode 100644 index 0000000000000000000000000000000000000000..6f56cf0c5e09c14fdc9d1296c80e434ab672ef44 --- /dev/null +++ b/src/connector/python/osx/python3/taos/cinterface.py @@ -0,0 +1,642 @@ +import ctypes +from .constants import FieldType +from .error import * +import math +import datetime + + +def _convert_millisecond_to_datetime(milli): + return datetime.datetime.fromtimestamp(milli / 1000.0) + + +def _convert_microsecond_to_datetime(micro): + return datetime.datetime.fromtimestamp(micro / 1000000.0) + + +def _crow_timestamp_to_python(data, num_of_rows, nbytes=None, micro=False): + """Function to convert C bool row to python row + """ + _timestamp_converter = _convert_millisecond_to_datetime + if micro: + _timestamp_converter = _convert_microsecond_to_datetime + + if num_of_rows > 0: + return list(map(_timestamp_converter, ctypes.cast( + data, ctypes.POINTER(ctypes.c_long))[:abs(num_of_rows)])) + else: + return list(map(_timestamp_converter, ctypes.cast( + data, ctypes.POINTER(ctypes.c_long))[:abs(num_of_rows)])) + + +def _crow_bool_to_python(data, num_of_rows, nbytes=None, micro=False): + """Function to convert C bool row to python row + """ + if num_of_rows > 0: + return [ + None if ele == FieldType.C_BOOL_NULL else bool(ele) for ele in ctypes.cast( + data, ctypes.POINTER( + ctypes.c_byte))[ + :abs(num_of_rows)]] + else: + return [ + None if ele == FieldType.C_BOOL_NULL else bool(ele) for ele in ctypes.cast( + data, ctypes.POINTER( + ctypes.c_bool))[ + :abs(num_of_rows)]] + + +def _crow_tinyint_to_python(data, num_of_rows, nbytes=None, micro=False): + """Function to convert C tinyint row to python row + """ + if num_of_rows > 0: + return [None if ele == FieldType.C_TINYINT_NULL else ele for ele in ctypes.cast( + data, ctypes.POINTER(ctypes.c_byte))[:abs(num_of_rows)]] + else: + return [None if ele == FieldType.C_TINYINT_NULL else ele for ele in ctypes.cast( + data, ctypes.POINTER(ctypes.c_byte))[:abs(num_of_rows)]] + + +def _crow_tinyint_unsigned_to_python( + data, + num_of_rows, + nbytes=None, + micro=False): + """Function to convert C tinyint row to python row + """ + if num_of_rows > 0: + return [ + None if ele == FieldType.C_TINYINT_UNSIGNED_NULL else ele for ele in ctypes.cast( + data, ctypes.POINTER( + ctypes.c_ubyte))[ + :abs(num_of_rows)]] + else: + return [ + None if ele == FieldType.C_TINYINT_UNSIGNED_NULL else ele for ele in ctypes.cast( + data, ctypes.POINTER( + ctypes.c_ubyte))[ + :abs(num_of_rows)]] + + +def _crow_smallint_to_python(data, num_of_rows, nbytes=None, micro=False): + """Function to convert C smallint row to python row + """ + if num_of_rows > 0: + return [ + None if ele == FieldType.C_SMALLINT_NULL else ele for ele in ctypes.cast( + data, ctypes.POINTER( + ctypes.c_short))[ + :abs(num_of_rows)]] + else: + return [ + None if ele == FieldType.C_SMALLINT_NULL else ele for ele in ctypes.cast( + data, ctypes.POINTER( + ctypes.c_short))[ + :abs(num_of_rows)]] + + +def _crow_smallint_unsigned_to_python( + data, num_of_rows, nbytes=None, micro=False): + """Function to convert C smallint row to python row + """ + if num_of_rows > 0: + return [ + None if ele == FieldType.C_SMALLINT_UNSIGNED_NULL else ele for ele in ctypes.cast( + data, ctypes.POINTER( + ctypes.c_ushort))[ + :abs(num_of_rows)]] + else: + return [ + None if ele == FieldType.C_SMALLINT_UNSIGNED_NULL else ele for ele in ctypes.cast( + data, ctypes.POINTER( + ctypes.c_ushort))[ + :abs(num_of_rows)]] + + +def _crow_int_to_python(data, num_of_rows, nbytes=None, micro=False): + """Function to convert C int row to python row + """ + if num_of_rows > 0: + return [None if ele == FieldType.C_INT_NULL else ele for ele in ctypes.cast( + data, ctypes.POINTER(ctypes.c_int))[:abs(num_of_rows)]] + else: + return [None if ele == FieldType.C_INT_NULL else ele for ele in ctypes.cast( + data, ctypes.POINTER(ctypes.c_int))[:abs(num_of_rows)]] + + +def _crow_int_unsigned_to_python(data, num_of_rows, nbytes=None, micro=False): + """Function to convert C int row to python row + """ + if num_of_rows > 0: + return [ + None if ele == FieldType.C_INT_UNSIGNED_NULL else ele for ele in ctypes.cast( + data, ctypes.POINTER( + ctypes.c_uint))[ + :abs(num_of_rows)]] + else: + return [ + None if ele == FieldType.C_INT_UNSIGNED_NULL else ele for ele in ctypes.cast( + data, ctypes.POINTER( + ctypes.c_uint))[ + :abs(num_of_rows)]] + + +def _crow_bigint_to_python(data, num_of_rows, nbytes=None, micro=False): + """Function to convert C bigint row to python row + """ + if num_of_rows > 0: + return [None if ele == FieldType.C_BIGINT_NULL else ele for ele in ctypes.cast( + data, ctypes.POINTER(ctypes.c_long))[:abs(num_of_rows)]] + else: + return [None if ele == FieldType.C_BIGINT_NULL else ele for ele in ctypes.cast( + data, ctypes.POINTER(ctypes.c_long))[:abs(num_of_rows)]] + + +def _crow_bigint_unsigned_to_python( + data, + num_of_rows, + nbytes=None, + micro=False): + """Function to convert C bigint row to python row + """ + if num_of_rows > 0: + return [ + None if ele == FieldType.C_BIGINT_UNSIGNED_NULL else ele for ele in ctypes.cast( + data, ctypes.POINTER( + ctypes.c_ulong))[ + :abs(num_of_rows)]] + else: + return [ + None if ele == FieldType.C_BIGINT_UNSIGNED_NULL else ele for ele in ctypes.cast( + data, ctypes.POINTER( + ctypes.c_ulong))[ + :abs(num_of_rows)]] + + +def _crow_float_to_python(data, num_of_rows, nbytes=None, micro=False): + """Function to convert C float row to python row + """ + if num_of_rows > 0: + return [None if math.isnan(ele) else ele for ele in ctypes.cast( + data, ctypes.POINTER(ctypes.c_float))[:abs(num_of_rows)]] + else: + return [None if math.isnan(ele) else ele for ele in ctypes.cast( + data, ctypes.POINTER(ctypes.c_float))[:abs(num_of_rows)]] + + +def _crow_double_to_python(data, num_of_rows, nbytes=None, micro=False): + """Function to convert C double row to python row + """ + if num_of_rows > 0: + return [None if math.isnan(ele) else ele for ele in ctypes.cast( + data, ctypes.POINTER(ctypes.c_double))[:abs(num_of_rows)]] + else: + return [None if math.isnan(ele) else ele for ele in ctypes.cast( + data, ctypes.POINTER(ctypes.c_double))[:abs(num_of_rows)]] + + +def _crow_binary_to_python(data, num_of_rows, nbytes=None, micro=False): + """Function to convert C binary row to python row + """ + assert(nbytes is not None) + if num_of_rows > 0: + return [None if ele.value[0:1] == FieldType.C_BINARY_NULL else ele.value.decode( + 'utf-8') for ele in (ctypes.cast(data, ctypes.POINTER(ctypes.c_char * nbytes)))[:abs(num_of_rows)]] + else: + return [None if ele.value[0:1] == FieldType.C_BINARY_NULL else ele.value.decode( + 'utf-8') for ele in (ctypes.cast(data, ctypes.POINTER(ctypes.c_char * nbytes)))[:abs(num_of_rows)]] + + +def _crow_nchar_to_python(data, num_of_rows, nbytes=None, micro=False): + """Function to convert C nchar row to python row + """ + assert(nbytes is not None) + res = [] + for i in range(abs(num_of_rows)): + try: + if num_of_rows >= 0: + tmpstr = ctypes.c_char_p(data) + res.append(tmpstr.value.decode()) + else: + res.append((ctypes.cast(data + nbytes * i, + ctypes.POINTER(ctypes.c_wchar * (nbytes // 4))))[0].value) + except ValueError: + res.append(None) + + return res + + +def _crow_binary_to_python_block(data, num_of_rows, nbytes=None, micro=False): + """Function to convert C binary row to python row + """ + assert(nbytes is not None) + res = [] + if num_of_rows > 0: + for i in range(abs(num_of_rows)): + try: + rbyte = ctypes.cast( + data + nbytes * i, + ctypes.POINTER( + ctypes.c_short))[ + :1].pop() + tmpstr = ctypes.c_char_p(data + nbytes * i + 2) + res.append(tmpstr.value.decode()[0:rbyte]) + except ValueError: + res.append(None) + else: + for i in range(abs(num_of_rows)): + try: + rbyte = ctypes.cast( + data + nbytes * i, + ctypes.POINTER( + ctypes.c_short))[ + :1].pop() + tmpstr = ctypes.c_char_p(data + nbytes * i + 2) + res.append(tmpstr.value.decode()[0:rbyte]) + except ValueError: + res.append(None) + return res + + +def _crow_nchar_to_python_block(data, num_of_rows, nbytes=None, micro=False): + """Function to convert C nchar row to python row + """ + assert(nbytes is not None) + res = [] + if num_of_rows >= 0: + for i in range(abs(num_of_rows)): + try: + tmpstr = ctypes.c_char_p(data + nbytes * i + 2) + res.append(tmpstr.value.decode()) + except ValueError: + res.append(None) + else: + for i in range(abs(num_of_rows)): + try: + res.append((ctypes.cast(data + nbytes * i + 2, + ctypes.POINTER(ctypes.c_wchar * (nbytes // 4))))[0].value) + except ValueError: + res.append(None) + return res + + +_CONVERT_FUNC = { + FieldType.C_BOOL: _crow_bool_to_python, + FieldType.C_TINYINT: _crow_tinyint_to_python, + FieldType.C_SMALLINT: _crow_smallint_to_python, + FieldType.C_INT: _crow_int_to_python, + FieldType.C_BIGINT: _crow_bigint_to_python, + FieldType.C_FLOAT: _crow_float_to_python, + FieldType.C_DOUBLE: _crow_double_to_python, + FieldType.C_BINARY: _crow_binary_to_python, + FieldType.C_TIMESTAMP: _crow_timestamp_to_python, + FieldType.C_NCHAR: _crow_nchar_to_python, + FieldType.C_TINYINT_UNSIGNED: _crow_tinyint_unsigned_to_python, + FieldType.C_SMALLINT_UNSIGNED: _crow_smallint_unsigned_to_python, + FieldType.C_INT_UNSIGNED: _crow_int_unsigned_to_python, + FieldType.C_BIGINT_UNSIGNED: _crow_bigint_unsigned_to_python +} + +_CONVERT_FUNC_BLOCK = { + FieldType.C_BOOL: _crow_bool_to_python, + FieldType.C_TINYINT: _crow_tinyint_to_python, + FieldType.C_SMALLINT: _crow_smallint_to_python, + FieldType.C_INT: _crow_int_to_python, + FieldType.C_BIGINT: _crow_bigint_to_python, + FieldType.C_FLOAT: _crow_float_to_python, + FieldType.C_DOUBLE: _crow_double_to_python, + FieldType.C_BINARY: _crow_binary_to_python_block, + FieldType.C_TIMESTAMP: _crow_timestamp_to_python, + FieldType.C_NCHAR: _crow_nchar_to_python_block, + FieldType.C_TINYINT_UNSIGNED: _crow_tinyint_unsigned_to_python, + FieldType.C_SMALLINT_UNSIGNED: _crow_smallint_unsigned_to_python, + FieldType.C_INT_UNSIGNED: _crow_int_unsigned_to_python, + FieldType.C_BIGINT_UNSIGNED: _crow_bigint_unsigned_to_python +} + +# Corresponding TAOS_FIELD structure in C + + +class TaosField(ctypes.Structure): + _fields_ = [('name', ctypes.c_char * 65), + ('type', ctypes.c_char), + ('bytes', ctypes.c_short)] + +# C interface class + + +class CTaosInterface(object): + + libtaos = ctypes.CDLL('libtaos.dylib') + + libtaos.taos_fetch_fields.restype = ctypes.POINTER(TaosField) + libtaos.taos_init.restype = None + libtaos.taos_connect.restype = ctypes.c_void_p + #libtaos.taos_use_result.restype = ctypes.c_void_p + libtaos.taos_fetch_row.restype = ctypes.POINTER(ctypes.c_void_p) + libtaos.taos_errstr.restype = ctypes.c_char_p + libtaos.taos_subscribe.restype = ctypes.c_void_p + libtaos.taos_consume.restype = ctypes.c_void_p + libtaos.taos_fetch_lengths.restype = ctypes.c_void_p + libtaos.taos_free_result.restype = None + libtaos.taos_errno.restype = ctypes.c_int + libtaos.taos_query.restype = ctypes.POINTER(ctypes.c_void_p) + + def __init__(self, config=None): + ''' + Function to initialize the class + @host : str, hostname to connect + @user : str, username to connect to server + @password : str, password to connect to server + @db : str, default db to use when log in + @config : str, config directory + + @rtype : None + ''' + if config is None: + self._config = ctypes.c_char_p(None) + else: + try: + self._config = ctypes.c_char_p(config.encode('utf-8')) + except AttributeError: + raise AttributeError("config is expected as a str") + + if config is not None: + CTaosInterface.libtaos.taos_options(3, self._config) + + CTaosInterface.libtaos.taos_init() + + @property + def config(self): + """ Get current config + """ + return self._config + + def connect( + self, + host=None, + user="root", + password="taosdata", + db=None, + port=0): + ''' + Function to connect to server + + @rtype: c_void_p, TDengine handle + ''' + # host + try: + _host = ctypes.c_char_p(host.encode( + "utf-8")) if host is not None else ctypes.c_char_p(None) + except AttributeError: + raise AttributeError("host is expected as a str") + + # user + try: + _user = ctypes.c_char_p(user.encode("utf-8")) + except AttributeError: + raise AttributeError("user is expected as a str") + + # password + try: + _password = ctypes.c_char_p(password.encode("utf-8")) + except AttributeError: + raise AttributeError("password is expected as a str") + + # db + try: + _db = ctypes.c_char_p( + db.encode("utf-8")) if db is not None else ctypes.c_char_p(None) + except AttributeError: + raise AttributeError("db is expected as a str") + + # port + try: + _port = ctypes.c_int(port) + except TypeError: + raise TypeError("port is expected as an int") + + connection = ctypes.c_void_p(CTaosInterface.libtaos.taos_connect( + _host, _user, _password, _db, _port)) + + if connection.value is None: + print('connect to TDengine failed') + raise ConnectionError("connect to TDengine failed") + # sys.exit(1) + # else: + # print('connect to TDengine success') + + return connection + + @staticmethod + def close(connection): + '''Close the TDengine handle + ''' + CTaosInterface.libtaos.taos_close(connection) + #print('connection is closed') + + @staticmethod + def query(connection, sql): + '''Run SQL + + @sql: str, sql string to run + + @rtype: 0 on success and -1 on failure + ''' + try: + return CTaosInterface.libtaos.taos_query( + connection, ctypes.c_char_p(sql.encode('utf-8'))) + except AttributeError: + raise AttributeError("sql is expected as a string") + # finally: + # CTaosInterface.libtaos.close(connection) + + @staticmethod + def affectedRows(result): + """The affected rows after runing query + """ + return CTaosInterface.libtaos.taos_affected_rows(result) + + @staticmethod + def subscribe(connection, restart, topic, sql, interval): + """Create a subscription + @restart boolean, + @sql string, sql statement for data query, must be a 'select' statement. + @topic string, name of this subscription + """ + return ctypes.c_void_p(CTaosInterface.libtaos.taos_subscribe( + connection, + 1 if restart else 0, + ctypes.c_char_p(topic.encode('utf-8')), + ctypes.c_char_p(sql.encode('utf-8')), + None, + None, + interval)) + + @staticmethod + def consume(sub): + """Consume data of a subscription + """ + result = ctypes.c_void_p(CTaosInterface.libtaos.taos_consume(sub)) + fields = [] + pfields = CTaosInterface.fetchFields(result) + for i in range(CTaosInterface.libtaos.taos_num_fields(result)): + fields.append({'name': pfields[i].name.decode('utf-8'), + 'bytes': pfields[i].bytes, + 'type': ord(pfields[i].type)}) + return result, fields + + @staticmethod + def unsubscribe(sub, keepProgress): + """Cancel a subscription + """ + CTaosInterface.libtaos.taos_unsubscribe(sub, 1 if keepProgress else 0) + + @staticmethod + def useResult(result): + '''Use result after calling self.query + ''' + fields = [] + pfields = CTaosInterface.fetchFields(result) + for i in range(CTaosInterface.fieldsCount(result)): + fields.append({'name': pfields[i].name.decode('utf-8'), + 'bytes': pfields[i].bytes, + 'type': ord(pfields[i].type)}) + + return fields + + @staticmethod + def fetchBlock(result, fields): + pblock = ctypes.c_void_p(0) + num_of_rows = CTaosInterface.libtaos.taos_fetch_block( + result, ctypes.byref(pblock)) + if num_of_rows == 0: + return None, 0 + isMicro = (CTaosInterface.libtaos.taos_result_precision( + result) == FieldType.C_TIMESTAMP_MICRO) + blocks = [None] * len(fields) + fieldL = CTaosInterface.libtaos.taos_fetch_lengths(result) + fieldLen = [ + ele for ele in ctypes.cast( + fieldL, ctypes.POINTER( + ctypes.c_int))[ + :len(fields)]] + for i in range(len(fields)): + data = ctypes.cast(pblock, ctypes.POINTER(ctypes.c_void_p))[i] + if fields[i]['type'] not in _CONVERT_FUNC_BLOCK: + raise DatabaseError("Invalid data type returned from database") + blocks[i] = _CONVERT_FUNC_BLOCK[fields[i]['type']]( + data, num_of_rows, fieldLen[i], isMicro) + + return blocks, abs(num_of_rows) + + @staticmethod + def fetchRow(result, fields): + pblock = ctypes.c_void_p(0) + pblock = CTaosInterface.libtaos.taos_fetch_row(result) + if pblock: + num_of_rows = 1 + isMicro = (CTaosInterface.libtaos.taos_result_precision( + result) == FieldType.C_TIMESTAMP_MICRO) + blocks = [None] * len(fields) + fieldL = CTaosInterface.libtaos.taos_fetch_lengths(result) + fieldLen = [ + ele for ele in ctypes.cast( + fieldL, ctypes.POINTER( + ctypes.c_int))[ + :len(fields)]] + for i in range(len(fields)): + data = ctypes.cast(pblock, ctypes.POINTER(ctypes.c_void_p))[i] + if fields[i]['type'] not in _CONVERT_FUNC: + raise DatabaseError( + "Invalid data type returned from database") + if data is None: + blocks[i] = [None] + else: + blocks[i] = _CONVERT_FUNC[fields[i]['type']]( + data, num_of_rows, fieldLen[i], isMicro) + else: + return None, 0 + return blocks, abs(num_of_rows) + + @staticmethod + def freeResult(result): + CTaosInterface.libtaos.taos_free_result(result) + result.value = None + + @staticmethod + def fieldsCount(result): + return CTaosInterface.libtaos.taos_field_count(result) + + @staticmethod + def fetchFields(result): + return CTaosInterface.libtaos.taos_fetch_fields(result) + + # @staticmethod + # def fetchRow(result, fields): + # l = [] + # row = CTaosInterface.libtaos.taos_fetch_row(result) + # if not row: + # return None + + # for i in range(len(fields)): + # l.append(CTaosInterface.getDataValue( + # row[i], fields[i]['type'], fields[i]['bytes'])) + + # return tuple(l) + + # @staticmethod + # def getDataValue(data, dtype, byte): + # ''' + # ''' + # if not data: + # return None + + # if (dtype == CTaosInterface.TSDB_DATA_TYPE_BOOL): + # return ctypes.cast(data, ctypes.POINTER(ctypes.c_bool))[0] + # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_TINYINT): + # return ctypes.cast(data, ctypes.POINTER(ctypes.c_byte))[0] + # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_SMALLINT): + # return ctypes.cast(data, ctypes.POINTER(ctypes.c_short))[0] + # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_INT): + # return ctypes.cast(data, ctypes.POINTER(ctypes.c_int))[0] + # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_BIGINT): + # return ctypes.cast(data, ctypes.POINTER(ctypes.c_long))[0] + # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_FLOAT): + # return ctypes.cast(data, ctypes.POINTER(ctypes.c_float))[0] + # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_DOUBLE): + # return ctypes.cast(data, ctypes.POINTER(ctypes.c_double))[0] + # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_BINARY): + # return (ctypes.cast(data, ctypes.POINTER(ctypes.c_char))[0:byte]).rstrip('\x00') + # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_TIMESTAMP): + # return ctypes.cast(data, ctypes.POINTER(ctypes.c_long))[0] + # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_NCHAR): + # return (ctypes.cast(data, ctypes.c_char_p).value).rstrip('\x00') + + @staticmethod + def errno(result): + """Return the error number. + """ + return CTaosInterface.libtaos.taos_errno(result) + + @staticmethod + def errStr(result): + """Return the error styring + """ + return CTaosInterface.libtaos.taos_errstr(result).decode('utf-8') + + +if __name__ == '__main__': + cinter = CTaosInterface() + conn = cinter.connect() + result = cinter.query(conn, 'show databases') + + print('Query Affected rows: {}'.format(cinter.affectedRows(result))) + + fields = CTaosInterface.useResult(result) + + data, num_of_rows = CTaosInterface.fetchBlock(result, fields) + + print(data) + + cinter.freeResult(result) + cinter.close(conn) diff --git a/src/connector/python/osx/python3/taos/connection.py b/src/connector/python/osx/python3/taos/connection.py new file mode 100644 index 0000000000000000000000000000000000000000..f6c395342c9c39a24bda6022f0ed36cb7bfe045b --- /dev/null +++ b/src/connector/python/osx/python3/taos/connection.py @@ -0,0 +1,95 @@ +from .cursor import TDengineCursor +from .subscription import TDengineSubscription +from .cinterface import CTaosInterface + + +class TDengineConnection(object): + """ TDengine connection object + """ + + def __init__(self, *args, **kwargs): + self._conn = None + self._host = None + self._user = "root" + self._password = "taosdata" + self._database = None + self._port = 0 + self._config = None + self._chandle = None + + self.config(**kwargs) + + def config(self, **kwargs): + # host + if 'host' in kwargs: + self._host = kwargs['host'] + + # user + if 'user' in kwargs: + self._user = kwargs['user'] + + # password + if 'password' in kwargs: + self._password = kwargs['password'] + + # database + if 'database' in kwargs: + self._database = kwargs['database'] + + # port + if 'port' in kwargs: + self._port = kwargs['port'] + + # config + if 'config' in kwargs: + self._config = kwargs['config'] + + self._chandle = CTaosInterface(self._config) + self._conn = self._chandle.connect( + self._host, + self._user, + self._password, + self._database, + self._port) + + def close(self): + """Close current connection. + """ + return CTaosInterface.close(self._conn) + + def subscribe(self, restart, topic, sql, interval): + """Create a subscription. + """ + if self._conn is None: + return None + sub = CTaosInterface.subscribe( + self._conn, restart, topic, sql, interval) + return TDengineSubscription(sub) + + def cursor(self): + """Return a new Cursor object using the connection. + """ + return TDengineCursor(self) + + def commit(self): + """Commit any pending transaction to the database. + + Since TDengine do not support transactions, the implement is void functionality. + """ + pass + + def rollback(self): + """Void functionality + """ + pass + + def clear_result_set(self): + """Clear unused result set on this connection. + """ + pass + + +if __name__ == "__main__": + conn = TDengineConnection(host='192.168.1.107') + conn.close() + print("Hello world") diff --git a/src/connector/python/osx/python3/taos/constants.py b/src/connector/python/osx/python3/taos/constants.py new file mode 100644 index 0000000000000000000000000000000000000000..93466f5184a6bf37c2e1c915a00aa5c5e91d1801 --- /dev/null +++ b/src/connector/python/osx/python3/taos/constants.py @@ -0,0 +1,42 @@ +"""Constants in TDengine python +""" + +from .dbapi import * + + +class FieldType(object): + """TDengine Field Types + """ + # type_code + C_NULL = 0 + C_BOOL = 1 + C_TINYINT = 2 + C_SMALLINT = 3 + C_INT = 4 + C_BIGINT = 5 + C_FLOAT = 6 + C_DOUBLE = 7 + C_BINARY = 8 + C_TIMESTAMP = 9 + C_NCHAR = 10 + C_TINYINT_UNSIGNED = 11 + C_SMALLINT_UNSIGNED = 12 + C_INT_UNSIGNED = 13 + C_BIGINT_UNSIGNED = 14 + # NULL value definition + # NOTE: These values should change according to C definition in tsdb.h + C_BOOL_NULL = 0x02 + C_TINYINT_NULL = -128 + C_TINYINT_UNSIGNED_NULL = 255 + C_SMALLINT_NULL = -32768 + C_SMALLINT_UNSIGNED_NULL = 65535 + C_INT_NULL = -2147483648 + C_INT_UNSIGNED_NULL = 4294967295 + C_BIGINT_NULL = -9223372036854775808 + C_BIGINT_UNSIGNED_NULL = 18446744073709551615 + C_FLOAT_NULL = float('nan') + C_DOUBLE_NULL = float('nan') + C_BINARY_NULL = bytearray([int('0xff', 16)]) + # Timestamp precision definition + C_TIMESTAMP_MILLI = 0 + C_TIMESTAMP_MICRO = 1 diff --git a/src/connector/python/osx/python3/taos/cursor.py b/src/connector/python/osx/python3/taos/cursor.py new file mode 100644 index 0000000000000000000000000000000000000000..2e7c362d547973bc3e78f2eb57a33b7fa2d0635e --- /dev/null +++ b/src/connector/python/osx/python3/taos/cursor.py @@ -0,0 +1,294 @@ +from .cinterface import CTaosInterface +from .error import * +from .constants import FieldType +import threading + +# querySeqNum = 0 + + +class TDengineCursor(object): + """Database cursor which is used to manage the context of a fetch operation. + + Attributes: + .description: Read-only attribute consists of 7-item sequences: + + > name (mondatory) + > type_code (mondatory) + > display_size + > internal_size + > precision + > scale + > null_ok + + This attribute will be None for operations that do not return rows or + if the cursor has not had an operation invoked via the .execute*() method yet. + + .rowcount:This read-only attribute specifies the number of rows that the last + .execute*() produced (for DQL statements like SELECT) or affected + """ + + def __init__(self, connection=None): + self._description = [] + self._rowcount = -1 + self._connection = None + self._result = None + self._fields = None + self._block = None + self._block_rows = -1 + self._block_iter = 0 + self._affected_rows = 0 + self._logfile = "" + self._threadId = threading.get_ident() + + if connection is not None: + self._connection = connection + + def __iter__(self): + return self + + def __next__(self): + if self._result is None or self._fields is None: + raise OperationalError("Invalid use of fetch iterator") + + if self._block_rows <= self._block_iter: + block, self._block_rows = CTaosInterface.fetchRow( + self._result, self._fields) + if self._block_rows == 0: + raise StopIteration + self._block = list(map(tuple, zip(*block))) + self._block_iter = 0 + + data = self._block[self._block_iter] + self._block_iter += 1 + + return data + + @property + def description(self): + """Return the description of the object. + """ + return self._description + + @property + def rowcount(self): + """Return the rowcount of the object + """ + return self._rowcount + + @property + def affected_rows(self): + """Return the rowcount of insertion + """ + return self._affected_rows + + def callproc(self, procname, *args): + """Call a stored database procedure with the given name. + + Void functionality since no stored procedures. + """ + pass + + def log(self, logfile): + self._logfile = logfile + + def close(self): + """Close the cursor. + """ + if self._connection is None: + return False + + self._reset_result() + self._connection = None + + return True + + def execute(self, operation, params=None): + """Prepare and execute a database operation (query or command). + """ + # if threading.get_ident() != self._threadId: + # info ="Cursor execute:Thread ID not match,creater:"+str(self._threadId)+" caller:"+str(threading.get_ident()) + # raise OperationalError(info) + # print(info) + # return None + + if not operation: + return None + + if not self._connection: + # TODO : change the exception raised here + raise ProgrammingError("Cursor is not connected") + + self._reset_result() + + stmt = operation + if params is not None: + pass + + # global querySeqNum + # querySeqNum += 1 + # localSeqNum = querySeqNum # avoid raice condition + # print(" >> Exec Query ({}): {}".format(localSeqNum, str(stmt))) + self._result = CTaosInterface.query(self._connection._conn, stmt) + # print(" << Query ({}) Exec Done".format(localSeqNum)) + if (self._logfile): + with open(self._logfile, "a") as logfile: + logfile.write("%s;\n" % operation) + + errno = CTaosInterface.libtaos.taos_errno(self._result) + if errno == 0: + if CTaosInterface.fieldsCount(self._result) == 0: + self._affected_rows += CTaosInterface.affectedRows( + self._result) + return CTaosInterface.affectedRows(self._result) + else: + self._fields = CTaosInterface.useResult( + self._result) + return self._handle_result() + else: + raise ProgrammingError( + CTaosInterface.errStr( + self._result), errno) + + def executemany(self, operation, seq_of_parameters): + """Prepare a database operation (query or command) and then execute it against all parameter sequences or mappings found in the sequence seq_of_parameters. + """ + pass + + def fetchone(self): + """Fetch the next row of a query result set, returning a single sequence, or None when no more data is available. + """ + pass + + def fetchmany(self): + pass + + def istype(self, col, dataType): + if (dataType.upper() == "BOOL"): + if (self._description[col][1] == FieldType.C_BOOL): + return True + if (dataType.upper() == "TINYINT"): + if (self._description[col][1] == FieldType.C_TINYINT): + return True + if (dataType.upper() == "TINYINT UNSIGNED"): + if (self._description[col][1] == FieldType.C_TINYINT_UNSIGNED): + return True + if (dataType.upper() == "SMALLINT"): + if (self._description[col][1] == FieldType.C_SMALLINT): + return True + if (dataType.upper() == "SMALLINT UNSIGNED"): + if (self._description[col][1] == FieldType.C_SMALLINT_UNSIGNED): + return True + if (dataType.upper() == "INT"): + if (self._description[col][1] == FieldType.C_INT): + return True + if (dataType.upper() == "INT UNSIGNED"): + if (self._description[col][1] == FieldType.C_INT_UNSIGNED): + return True + if (dataType.upper() == "BIGINT"): + if (self._description[col][1] == FieldType.C_BIGINT): + return True + if (dataType.upper() == "BIGINT UNSIGNED"): + if (self._description[col][1] == FieldType.C_BIGINT_UNSIGNED): + return True + if (dataType.upper() == "FLOAT"): + if (self._description[col][1] == FieldType.C_FLOAT): + return True + if (dataType.upper() == "DOUBLE"): + if (self._description[col][1] == FieldType.C_DOUBLE): + return True + if (dataType.upper() == "BINARY"): + if (self._description[col][1] == FieldType.C_BINARY): + return True + if (dataType.upper() == "TIMESTAMP"): + if (self._description[col][1] == FieldType.C_TIMESTAMP): + return True + if (dataType.upper() == "NCHAR"): + if (self._description[col][1] == FieldType.C_NCHAR): + return True + + return False + + def fetchall_row(self): + """Fetch all (remaining) rows of a query result, returning them as a sequence of sequences (e.g. a list of tuples). Note that the cursor's arraysize attribute can affect the performance of this operation. + """ + if self._result is None or self._fields is None: + raise OperationalError("Invalid use of fetchall") + + buffer = [[] for i in range(len(self._fields))] + self._rowcount = 0 + while True: + block, num_of_fields = CTaosInterface.fetchRow( + self._result, self._fields) + errno = CTaosInterface.libtaos.taos_errno(self._result) + if errno != 0: + raise ProgrammingError( + CTaosInterface.errStr( + self._result), errno) + if num_of_fields == 0: + break + self._rowcount += num_of_fields + for i in range(len(self._fields)): + buffer[i].extend(block[i]) + return list(map(tuple, zip(*buffer))) + + def fetchall(self): + if self._result is None or self._fields is None: + raise OperationalError("Invalid use of fetchall") + + buffer = [[] for i in range(len(self._fields))] + self._rowcount = 0 + while True: + block, num_of_fields = CTaosInterface.fetchBlock( + self._result, self._fields) + errno = CTaosInterface.libtaos.taos_errno(self._result) + if errno != 0: + raise ProgrammingError( + CTaosInterface.errStr( + self._result), errno) + if num_of_fields == 0: + break + self._rowcount += num_of_fields + for i in range(len(self._fields)): + buffer[i].extend(block[i]) + return list(map(tuple, zip(*buffer))) + + def nextset(self): + """ + """ + pass + + def setinputsize(self, sizes): + pass + + def setutputsize(self, size, column=None): + pass + + def _reset_result(self): + """Reset the result to unused version. + """ + self._description = [] + self._rowcount = -1 + if self._result is not None: + CTaosInterface.freeResult(self._result) + self._result = None + self._fields = None + self._block = None + self._block_rows = -1 + self._block_iter = 0 + self._affected_rows = 0 + + def _handle_result(self): + """Handle the return result from query. + """ + # if threading.get_ident() != self._threadId: + # info = "Cursor handleresult:Thread ID not match,creater:"+str(self._threadId)+" caller:"+str(threading.get_ident()) + # raise OperationalError(info) + # print(info) + # return None + + self._description = [] + for ele in self._fields: + self._description.append( + (ele['name'], ele['type'], None, None, None, None, False)) + + return self._result diff --git a/src/connector/python/osx/python3/taos/dbapi.py b/src/connector/python/osx/python3/taos/dbapi.py new file mode 100644 index 0000000000000000000000000000000000000000..594681ada953abf388e503c23199043cf686e1a3 --- /dev/null +++ b/src/connector/python/osx/python3/taos/dbapi.py @@ -0,0 +1,44 @@ +"""Type Objects and Constructors. +""" + +import time +import datetime + + +class DBAPITypeObject(object): + def __init__(self, *values): + self.values = values + + def __com__(self, other): + if other in self.values: + return 0 + if other < self.values: + return 1 + else: + return -1 + + +Date = datetime.date +Time = datetime.time +Timestamp = datetime.datetime + + +def DataFromTicks(ticks): + return Date(*time.localtime(ticks)[:3]) + + +def TimeFromTicks(ticks): + return Time(*time.localtime(ticks)[3:6]) + + +def TimestampFromTicks(ticks): + return Timestamp(*time.localtime(ticks)[:6]) + + +Binary = bytes + +# STRING = DBAPITypeObject(*constants.FieldType.get_string_types()) +# BINARY = DBAPITypeObject(*constants.FieldType.get_binary_types()) +# NUMBER = BAPITypeObject(*constants.FieldType.get_number_types()) +# DATETIME = DBAPITypeObject(*constants.FieldType.get_timestamp_types()) +# ROWID = DBAPITypeObject() diff --git a/src/connector/python/osx/python3/taos/error.py b/src/connector/python/osx/python3/taos/error.py new file mode 100644 index 0000000000000000000000000000000000000000..c584badce8320cd35dc81e8f6b613c56163b1a29 --- /dev/null +++ b/src/connector/python/osx/python3/taos/error.py @@ -0,0 +1,66 @@ +"""Python exceptions +""" + + +class Error(Exception): + def __init__(self, msg=None, errno=None): + self.msg = msg + self._full_msg = self.msg + self.errno = errno + + def __str__(self): + return self._full_msg + + +class Warning(Exception): + """Exception raised for important warnings like data truncations while inserting. + """ + pass + + +class InterfaceError(Error): + """Exception raised for errors that are related to the database interface rather than the database itself. + """ + pass + + +class DatabaseError(Error): + """Exception raised for errors that are related to the database. + """ + pass + + +class DataError(DatabaseError): + """Exception raised for errors that are due to problems with the processed data like division by zero, numeric value out of range. + """ + pass + + +class OperationalError(DatabaseError): + """Exception raised for errors that are related to the database's operation and not necessarily under the control of the programmer + """ + pass + + +class IntegrityError(DatabaseError): + """Exception raised when the relational integrity of the database is affected. + """ + pass + + +class InternalError(DatabaseError): + """Exception raised when the database encounters an internal error. + """ + pass + + +class ProgrammingError(DatabaseError): + """Exception raised for programming errors. + """ + pass + + +class NotSupportedError(DatabaseError): + """Exception raised in case a method or database API was used which is not supported by the database,. + """ + pass diff --git a/src/connector/python/osx/python3/taos/subscription.py b/src/connector/python/osx/python3/taos/subscription.py new file mode 100644 index 0000000000000000000000000000000000000000..270d9de09217fc58a389981a3542698dd1c0428a --- /dev/null +++ b/src/connector/python/osx/python3/taos/subscription.py @@ -0,0 +1,57 @@ +from .cinterface import CTaosInterface +from .error import * + + +class TDengineSubscription(object): + """TDengine subscription object + """ + + def __init__(self, sub): + self._sub = sub + + def consume(self): + """Consume rows of a subscription + """ + if self._sub is None: + raise OperationalError("Invalid use of consume") + + result, fields = CTaosInterface.consume(self._sub) + buffer = [[] for i in range(len(fields))] + while True: + block, num_of_fields = CTaosInterface.fetchBlock(result, fields) + if num_of_fields == 0: + break + for i in range(len(fields)): + buffer[i].extend(block[i]) + + self.fields = fields + return list(map(tuple, zip(*buffer))) + + def close(self, keepProgress=True): + """Close the Subscription. + """ + if self._sub is None: + return False + + CTaosInterface.unsubscribe(self._sub, keepProgress) + return True + + +if __name__ == '__main__': + from .connection import TDengineConnection + conn = TDengineConnection( + host="127.0.0.1", + user="root", + password="taosdata", + database="test") + + # Generate a cursor object to run SQL commands + sub = conn.subscribe(True, "test", "select * from meters;", 1000) + + for i in range(0, 10): + data = sub.consume() + for d in data: + print(d) + + sub.close() + conn.close() diff --git a/src/connector/python/windows/python2/taos/__init__.py b/src/connector/python/windows/python2/taos/__init__.py index 62e0536b6fca63a0aca0b6be3673104c27c631c6..973263573808232e4e71dc0158585624a8e7d2ab 100644 --- a/src/connector/python/windows/python2/taos/__init__.py +++ b/src/connector/python/windows/python2/taos/__init__.py @@ -3,12 +3,12 @@ from .connection import TDengineConnection from .cursor import TDengineCursor # Globals -apilevel = '2.0.3' threadsafety = 0 paramstyle = 'pyformat' __all__ = ['connection', 'cursor'] + def connect(*args, **kwargs): """ Function to return a TDengine connector object @@ -21,4 +21,4 @@ def connect(*args, **kwargs): @rtype: TDengineConnector """ - return TDengineConnection(*args, **kwargs) \ No newline at end of file + return TDengineConnection(*args, **kwargs) diff --git a/src/connector/python/windows/python2/taos/cinterface.py b/src/connector/python/windows/python2/taos/cinterface.py index 14f4f49be8ae719da16dbed7e79631db7ef9689e..d8cdce2ad138c34db5193e3972ba51d46c693254 100644 --- a/src/connector/python/windows/python2/taos/cinterface.py +++ b/src/connector/python/windows/python2/taos/cinterface.py @@ -4,11 +4,14 @@ from .error import * import math import datetime + def _convert_millisecond_to_datetime(milli): - return datetime.datetime.fromtimestamp(milli/1000.0) + return datetime.datetime.fromtimestamp(milli / 1000.0) + def _convert_microsecond_to_datetime(micro): - return datetime.datetime.fromtimestamp(micro/1000000.0) + return datetime.datetime.fromtimestamp(micro / 1000000.0) + def _crow_timestamp_to_python(data, num_of_rows, nbytes=None, micro=False): """Function to convert C bool row to python row @@ -18,168 +21,309 @@ def _crow_timestamp_to_python(data, num_of_rows, nbytes=None, micro=False): _timestamp_converter = _convert_microsecond_to_datetime if num_of_rows > 0: - return list(map(_timestamp_converter, ctypes.cast(data, ctypes.POINTER(ctypes.c_longlong))[:abs(num_of_rows)])) + return list(map(_timestamp_converter, ctypes.cast( + data, ctypes.POINTER(ctypes.c_long))[:abs(num_of_rows)])) else: - return list(map(_timestamp_converter, ctypes.cast(data, ctypes.POINTER(ctypes.c_longlong))[:abs(num_of_rows)])) + return list(map(_timestamp_converter, ctypes.cast( + data, ctypes.POINTER(ctypes.c_long))[:abs(num_of_rows)])) + def _crow_bool_to_python(data, num_of_rows, nbytes=None, micro=False): """Function to convert C bool row to python row """ if num_of_rows > 0: - return [ None if ele == FieldType.C_BOOL_NULL else bool(ele) for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_byte))[:abs(num_of_rows)] ] + return [ + None if ele == FieldType.C_BOOL_NULL else bool(ele) for ele in ctypes.cast( + data, ctypes.POINTER( + ctypes.c_byte))[ + :abs(num_of_rows)]] else: - return [ None if ele == FieldType.C_BOOL_NULL else bool(ele) for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_bool))[:abs(num_of_rows)] ] + return [ + None if ele == FieldType.C_BOOL_NULL else bool(ele) for ele in ctypes.cast( + data, ctypes.POINTER( + ctypes.c_bool))[ + :abs(num_of_rows)]] + def _crow_tinyint_to_python(data, num_of_rows, nbytes=None, micro=False): """Function to convert C tinyint row to python row """ if num_of_rows > 0: - return [ None if ele == FieldType.C_TINYINT_NULL else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_byte))[:abs(num_of_rows)] ] + return [None if ele == FieldType.C_TINYINT_NULL else ele for ele in ctypes.cast( + data, ctypes.POINTER(ctypes.c_byte))[:abs(num_of_rows)]] + else: + return [None if ele == FieldType.C_TINYINT_NULL else ele for ele in ctypes.cast( + data, ctypes.POINTER(ctypes.c_byte))[:abs(num_of_rows)]] + + +def _crow_tinyint_unsigned_to_python( + data, + num_of_rows, + nbytes=None, + micro=False): + """Function to convert C tinyint row to python row + """ + if num_of_rows > 0: + return [ + None if ele == FieldType.C_TINYINT_UNSIGNED_NULL else ele for ele in ctypes.cast( + data, ctypes.POINTER( + ctypes.c_ubyte))[ + :abs(num_of_rows)]] else: - return [ None if ele == FieldType.C_TINYINT_NULL else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_byte))[:abs(num_of_rows)] ] - + return [ + None if ele == FieldType.C_TINYINT_UNSIGNED_NULL else ele for ele in ctypes.cast( + data, ctypes.POINTER( + ctypes.c_ubyte))[ + :abs(num_of_rows)]] + + def _crow_smallint_to_python(data, num_of_rows, nbytes=None, micro=False): """Function to convert C smallint row to python row """ if num_of_rows > 0: - return [ None if ele == FieldType.C_SMALLINT_NULL else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_short))[:abs(num_of_rows)]] + return [ + None if ele == FieldType.C_SMALLINT_NULL else ele for ele in ctypes.cast( + data, ctypes.POINTER( + ctypes.c_short))[ + :abs(num_of_rows)]] + else: + return [ + None if ele == FieldType.C_SMALLINT_NULL else ele for ele in ctypes.cast( + data, ctypes.POINTER( + ctypes.c_short))[ + :abs(num_of_rows)]] + + +def _crow_smallint_unsigned_to_python( + data, num_of_rows, nbytes=None, micro=False): + """Function to convert C smallint row to python row + """ + if num_of_rows > 0: + return [ + None if ele == FieldType.C_SMALLINT_UNSIGNED_NULL else ele for ele in ctypes.cast( + data, ctypes.POINTER( + ctypes.c_ushort))[ + :abs(num_of_rows)]] else: - return [ None if ele == FieldType.C_SMALLINT_NULL else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_short))[:abs(num_of_rows)] ] + return [ + None if ele == FieldType.C_SMALLINT_UNSIGNED_NULL else ele for ele in ctypes.cast( + data, ctypes.POINTER( + ctypes.c_ushort))[ + :abs(num_of_rows)]] + def _crow_int_to_python(data, num_of_rows, nbytes=None, micro=False): """Function to convert C int row to python row """ if num_of_rows > 0: - return [ None if ele == FieldType.C_INT_NULL else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_int))[:abs(num_of_rows)] ] + return [None if ele == FieldType.C_INT_NULL else ele for ele in ctypes.cast( + data, ctypes.POINTER(ctypes.c_int))[:abs(num_of_rows)]] else: - return [ None if ele == FieldType.C_INT_NULL else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_int))[:abs(num_of_rows)] ] + return [None if ele == FieldType.C_INT_NULL else ele for ele in ctypes.cast( + data, ctypes.POINTER(ctypes.c_int))[:abs(num_of_rows)]] + + +def _crow_int_unsigned_to_python(data, num_of_rows, nbytes=None, micro=False): + """Function to convert C int row to python row + """ + if num_of_rows > 0: + return [ + None if ele == FieldType.C_INT_UNSIGNED_NULL else ele for ele in ctypes.cast( + data, ctypes.POINTER( + ctypes.c_uint))[ + :abs(num_of_rows)]] + else: + return [ + None if ele == FieldType.C_INT_UNSIGNED_NULL else ele for ele in ctypes.cast( + data, ctypes.POINTER( + ctypes.c_uint))[ + :abs(num_of_rows)]] + def _crow_bigint_to_python(data, num_of_rows, nbytes=None, micro=False): """Function to convert C bigint row to python row """ if num_of_rows > 0: - return [ None if ele == FieldType.C_BIGINT_NULL else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_longlong))[:abs(num_of_rows)] ] + return [None if ele == FieldType.C_BIGINT_NULL else ele for ele in ctypes.cast( + data, ctypes.POINTER(ctypes.c_long))[:abs(num_of_rows)]] + else: + return [None if ele == FieldType.C_BIGINT_NULL else ele for ele in ctypes.cast( + data, ctypes.POINTER(ctypes.c_long))[:abs(num_of_rows)]] + + +def _crow_bigint_unsigned_to_python( + data, + num_of_rows, + nbytes=None, + micro=False): + """Function to convert C bigint row to python row + """ + if num_of_rows > 0: + return [ + None if ele == FieldType.C_BIGINT_UNSIGNED_NULL else ele for ele in ctypes.cast( + data, ctypes.POINTER( + ctypes.c_ulong))[ + :abs(num_of_rows)]] else: - return [ None if ele == FieldType.C_BIGINT_NULL else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_longlong))[:abs(num_of_rows)] ] + return [ + None if ele == FieldType.C_BIGINT_UNSIGNED_NULL else ele for ele in ctypes.cast( + data, ctypes.POINTER( + ctypes.c_ulong))[ + :abs(num_of_rows)]] + def _crow_float_to_python(data, num_of_rows, nbytes=None, micro=False): """Function to convert C float row to python row """ if num_of_rows > 0: - return [ None if math.isnan(ele) else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_float))[:abs(num_of_rows)] ] + return [None if math.isnan(ele) else ele for ele in ctypes.cast( + data, ctypes.POINTER(ctypes.c_float))[:abs(num_of_rows)]] else: - return [ None if math.isnan(ele) else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_float))[:abs(num_of_rows)] ] + return [None if math.isnan(ele) else ele for ele in ctypes.cast( + data, ctypes.POINTER(ctypes.c_float))[:abs(num_of_rows)]] + def _crow_double_to_python(data, num_of_rows, nbytes=None, micro=False): """Function to convert C double row to python row """ if num_of_rows > 0: - return [ None if math.isnan(ele) else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_double))[:abs(num_of_rows)] ] + return [None if math.isnan(ele) else ele for ele in ctypes.cast( + data, ctypes.POINTER(ctypes.c_double))[:abs(num_of_rows)]] else: - return [ None if math.isnan(ele) else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_double))[:abs(num_of_rows)] ] + return [None if math.isnan(ele) else ele for ele in ctypes.cast( + data, ctypes.POINTER(ctypes.c_double))[:abs(num_of_rows)]] + def _crow_binary_to_python(data, num_of_rows, nbytes=None, micro=False): """Function to convert C binary row to python row """ assert(nbytes is not None) if num_of_rows > 0: - return [ None if ele.value[0:1] == FieldType.C_BINARY_NULL else ele.value.decode('utf-8') for ele in (ctypes.cast(data, ctypes.POINTER(ctypes.c_char * nbytes)))[:abs(num_of_rows)]] + return [None if ele.value[0:1] == FieldType.C_BINARY_NULL else ele.value.decode( + 'utf-8') for ele in (ctypes.cast(data, ctypes.POINTER(ctypes.c_char * nbytes)))[:abs(num_of_rows)]] else: - return [ None if ele.value[0:1] == FieldType.C_BINARY_NULL else ele.value.decode('utf-8') for ele in (ctypes.cast(data, ctypes.POINTER(ctypes.c_char * nbytes)))[:abs(num_of_rows)]] + return [None if ele.value[0:1] == FieldType.C_BINARY_NULL else ele.value.decode( + 'utf-8') for ele in (ctypes.cast(data, ctypes.POINTER(ctypes.c_char * nbytes)))[:abs(num_of_rows)]] + def _crow_nchar_to_python(data, num_of_rows, nbytes=None, micro=False): """Function to convert C nchar row to python row """ assert(nbytes is not None) - res=[] + res = [] for i in range(abs(num_of_rows)): try: if num_of_rows >= 0: tmpstr = ctypes.c_char_p(data) - res.append( tmpstr.value.decode() ) + res.append(tmpstr.value.decode()) else: - res.append( (ctypes.cast(data+nbytes*i, ctypes.POINTER(ctypes.c_wchar * (nbytes//4))))[0].value ) + res.append((ctypes.cast(data + nbytes * i, + ctypes.POINTER(ctypes.c_wchar * (nbytes // 4))))[0].value) except ValueError: res.append(None) - return res + return res + def _crow_binary_to_python_block(data, num_of_rows, nbytes=None, micro=False): """Function to convert C binary row to python row """ assert(nbytes is not None) - res=[] + res = [] if num_of_rows > 0: for i in range(abs(num_of_rows)): try: - rbyte=ctypes.cast(data+nbytes*i,ctypes.POINTER(ctypes.c_short))[:1].pop() - tmpstr = ctypes.c_char_p(data+nbytes*i+2) - res.append( tmpstr.value.decode()[0:rbyte] ) + rbyte = ctypes.cast( + data + nbytes * i, + ctypes.POINTER( + ctypes.c_short))[ + :1].pop() + tmpstr = ctypes.c_char_p(data + nbytes * i + 2) + res.append(tmpstr.value.decode()[0:rbyte]) except ValueError: res.append(None) else: for i in range(abs(num_of_rows)): try: - rbyte=ctypes.cast(data+nbytes*i,ctypes.POINTER(ctypes.c_short))[:1].pop() - tmpstr = ctypes.c_char_p(data+nbytes*i+2) - res.append( tmpstr.value.decode()[0:rbyte] ) + rbyte = ctypes.cast( + data + nbytes * i, + ctypes.POINTER( + ctypes.c_short))[ + :1].pop() + tmpstr = ctypes.c_char_p(data + nbytes * i + 2) + res.append(tmpstr.value.decode()[0:rbyte]) except ValueError: res.append(None) return res + def _crow_nchar_to_python_block(data, num_of_rows, nbytes=None, micro=False): """Function to convert C nchar row to python row """ assert(nbytes is not None) - res=[] + res = [] if num_of_rows >= 0: for i in range(abs(num_of_rows)): try: - tmpstr = ctypes.c_char_p(data+nbytes*i+2) - res.append( tmpstr.value.decode() ) + tmpstr = ctypes.c_char_p(data + nbytes * i + 2) + res.append(tmpstr.value.decode()) except ValueError: res.append(None) else: for i in range(abs(num_of_rows)): try: - res.append( (ctypes.cast(data+nbytes*i+2, ctypes.POINTER(ctypes.c_wchar * (nbytes//4))))[0].value ) + res.append((ctypes.cast(data + nbytes * i + 2, + ctypes.POINTER(ctypes.c_wchar * (nbytes // 4))))[0].value) except ValueError: res.append(None) return res + _CONVERT_FUNC = { FieldType.C_BOOL: _crow_bool_to_python, - FieldType.C_TINYINT : _crow_tinyint_to_python, - FieldType.C_SMALLINT : _crow_smallint_to_python, - FieldType.C_INT : _crow_int_to_python, - FieldType.C_BIGINT : _crow_bigint_to_python, - FieldType.C_FLOAT : _crow_float_to_python, - FieldType.C_DOUBLE : _crow_double_to_python, + FieldType.C_TINYINT: _crow_tinyint_to_python, + FieldType.C_SMALLINT: _crow_smallint_to_python, + FieldType.C_INT: _crow_int_to_python, + FieldType.C_BIGINT: _crow_bigint_to_python, + FieldType.C_FLOAT: _crow_float_to_python, + FieldType.C_DOUBLE: _crow_double_to_python, FieldType.C_BINARY: _crow_binary_to_python, - FieldType.C_TIMESTAMP : _crow_timestamp_to_python, - FieldType.C_NCHAR : _crow_nchar_to_python + FieldType.C_TIMESTAMP: _crow_timestamp_to_python, + FieldType.C_NCHAR: _crow_nchar_to_python, + FieldType.C_TINYINT_UNSIGNED: _crow_tinyint_unsigned_to_python, + FieldType.C_SMALLINT_UNSIGNED: _crow_smallint_unsigned_to_python, + FieldType.C_INT_UNSIGNED: _crow_int_unsigned_to_python, + FieldType.C_BIGINT_UNSIGNED: _crow_bigint_unsigned_to_python } _CONVERT_FUNC_BLOCK = { FieldType.C_BOOL: _crow_bool_to_python, - FieldType.C_TINYINT : _crow_tinyint_to_python, - FieldType.C_SMALLINT : _crow_smallint_to_python, - FieldType.C_INT : _crow_int_to_python, - FieldType.C_BIGINT : _crow_bigint_to_python, - FieldType.C_FLOAT : _crow_float_to_python, - FieldType.C_DOUBLE : _crow_double_to_python, + FieldType.C_TINYINT: _crow_tinyint_to_python, + FieldType.C_SMALLINT: _crow_smallint_to_python, + FieldType.C_INT: _crow_int_to_python, + FieldType.C_BIGINT: _crow_bigint_to_python, + FieldType.C_FLOAT: _crow_float_to_python, + FieldType.C_DOUBLE: _crow_double_to_python, FieldType.C_BINARY: _crow_binary_to_python_block, - FieldType.C_TIMESTAMP : _crow_timestamp_to_python, - FieldType.C_NCHAR : _crow_nchar_to_python_block + FieldType.C_TIMESTAMP: _crow_timestamp_to_python, + FieldType.C_NCHAR: _crow_nchar_to_python_block, + FieldType.C_TINYINT_UNSIGNED: _crow_tinyint_unsigned_to_python, + FieldType.C_SMALLINT_UNSIGNED: _crow_smallint_unsigned_to_python, + FieldType.C_INT_UNSIGNED: _crow_int_unsigned_to_python, + FieldType.C_BIGINT_UNSIGNED: _crow_bigint_unsigned_to_python } # Corresponding TAOS_FIELD structure in C + + class TaosField(ctypes.Structure): _fields_ = [('name', ctypes.c_char * 65), ('type', ctypes.c_char), ('bytes', ctypes.c_short)] # C interface class + + class CTaosInterface(object): libtaos = ctypes.windll.LoadLibrary('taos') @@ -216,7 +360,7 @@ class CTaosInterface(object): except AttributeError: raise AttributeError("config is expected as a str") - if config != None: + if config is not None: CTaosInterface.libtaos.taos_options(3, self._config) CTaosInterface.libtaos.taos_init() @@ -227,7 +371,13 @@ class CTaosInterface(object): """ return self._config - def connect(self, host=None, user="root", password="taosdata", db=None, port=0): + def connect( + self, + host=None, + user="root", + password="taosdata", + db=None, + port=0): ''' Function to connect to server @@ -236,7 +386,7 @@ class CTaosInterface(object): # host try: _host = ctypes.c_char_p(host.encode( - "utf-8")) if host != None else ctypes.c_char_p(None) + "utf-8")) if host is not None else ctypes.c_char_p(None) except AttributeError: raise AttributeError("host is expected as a str") @@ -255,7 +405,7 @@ class CTaosInterface(object): # db try: _db = ctypes.c_char_p( - db.encode("utf-8")) if db != None else ctypes.c_char_p(None) + db.encode("utf-8")) if db is not None else ctypes.c_char_p(None) except AttributeError: raise AttributeError("db is expected as a str") @@ -268,11 +418,11 @@ class CTaosInterface(object): connection = ctypes.c_void_p(CTaosInterface.libtaos.taos_connect( _host, _user, _password, _db, _port)) - if connection.value == None: + if connection.value is None: print('connect to TDengine failed') raise ConnectionError("connect to TDengine failed") # sys.exit(1) - #else: + # else: # print('connect to TDengine success') return connection @@ -293,12 +443,13 @@ class CTaosInterface(object): @rtype: 0 on success and -1 on failure ''' try: - return CTaosInterface.libtaos.taos_query(connection, ctypes.c_char_p(sql.encode('utf-8'))) + return CTaosInterface.libtaos.taos_query( + connection, ctypes.c_char_p(sql.encode('utf-8'))) except AttributeError: raise AttributeError("sql is expected as a string") # finally: # CTaosInterface.libtaos.close(connection) - + @staticmethod def affectedRows(result): """The affected rows after runing query @@ -308,7 +459,7 @@ class CTaosInterface(object): @staticmethod def subscribe(connection, restart, topic, sql, interval): """Create a subscription - @restart boolean, + @restart boolean, @sql string, sql statement for data query, must be a 'select' statement. @topic string, name of this subscription """ @@ -360,35 +511,49 @@ class CTaosInterface(object): result, ctypes.byref(pblock)) if num_of_rows == 0: return None, 0 - isMicro = (CTaosInterface.libtaos.taos_result_precision(result) == FieldType.C_TIMESTAMP_MICRO) + isMicro = (CTaosInterface.libtaos.taos_result_precision( + result) == FieldType.C_TIMESTAMP_MICRO) blocks = [None] * len(fields) fieldL = CTaosInterface.libtaos.taos_fetch_lengths(result) - fieldLen = [ele for ele in ctypes.cast(fieldL, ctypes.POINTER(ctypes.c_int))[:len(fields)]] + fieldLen = [ + ele for ele in ctypes.cast( + fieldL, ctypes.POINTER( + ctypes.c_int))[ + :len(fields)]] for i in range(len(fields)): data = ctypes.cast(pblock, ctypes.POINTER(ctypes.c_void_p))[i] if fields[i]['type'] not in _CONVERT_FUNC_BLOCK: raise DatabaseError("Invalid data type returned from database") - blocks[i] = _CONVERT_FUNC_BLOCK[fields[i]['type']](data, num_of_rows, fieldLen[i], isMicro) + blocks[i] = _CONVERT_FUNC_BLOCK[fields[i]['type']]( + data, num_of_rows, fieldLen[i], isMicro) return blocks, abs(num_of_rows) + @staticmethod def fetchRow(result, fields): pblock = ctypes.c_void_p(0) - pblock = CTaosInterface.libtaos.taos_fetch_row(result) - if pblock : + pblock = CTaosInterface.libtaos.taos_fetch_row(result) + if pblock: num_of_rows = 1 - isMicro = (CTaosInterface.libtaos.taos_result_precision(result) == FieldType.C_TIMESTAMP_MICRO) + isMicro = (CTaosInterface.libtaos.taos_result_precision( + result) == FieldType.C_TIMESTAMP_MICRO) blocks = [None] * len(fields) fieldL = CTaosInterface.libtaos.taos_fetch_lengths(result) - fieldLen = [ele for ele in ctypes.cast(fieldL, ctypes.POINTER(ctypes.c_int))[:len(fields)]] + fieldLen = [ + ele for ele in ctypes.cast( + fieldL, ctypes.POINTER( + ctypes.c_int))[ + :len(fields)]] for i in range(len(fields)): data = ctypes.cast(pblock, ctypes.POINTER(ctypes.c_void_p))[i] if fields[i]['type'] not in _CONVERT_FUNC: - raise DatabaseError("Invalid data type returned from database") + raise DatabaseError( + "Invalid data type returned from database") if data is None: blocks[i] = [None] else: - blocks[i] = _CONVERT_FUNC[fields[i]['type']](data, num_of_rows, fieldLen[i], isMicro) + blocks[i] = _CONVERT_FUNC[fields[i]['type']]( + data, num_of_rows, fieldLen[i], isMicro) else: return None, 0 return blocks, abs(num_of_rows) diff --git a/src/connector/python/windows/python2/taos/connection.py b/src/connector/python/windows/python2/taos/connection.py index d9576a553b810a975429b2cefc03e5e60f240a88..5729d01c6df8c0e58086726c4001467811e9fee5 100644 --- a/src/connector/python/windows/python2/taos/connection.py +++ b/src/connector/python/windows/python2/taos/connection.py @@ -2,9 +2,11 @@ from .cursor import TDengineCursor from .subscription import TDengineSubscription from .cinterface import CTaosInterface + class TDengineConnection(object): """ TDengine connection object """ + def __init__(self, *args, **kwargs): self._conn = None self._host = None @@ -30,7 +32,7 @@ class TDengineConnection(object): # password if 'password' in kwargs: self._password = kwargs['password'] - + # database if 'database' in kwargs: self._database = kwargs['database'] @@ -44,7 +46,12 @@ class TDengineConnection(object): self._config = kwargs['config'] self._chandle = CTaosInterface(self._config) - self._conn = self._chandle.connect(self._host, self._user, self._password, self._database, self._port) + self._conn = self._chandle.connect( + self._host, + self._user, + self._password, + self._database, + self._port) def close(self): """Close current connection. @@ -56,7 +63,8 @@ class TDengineConnection(object): """ if self._conn is None: return None - sub = CTaosInterface.subscribe(self._conn, restart, topic, sql, interval) + sub = CTaosInterface.subscribe( + self._conn, restart, topic, sql, interval) return TDengineSubscription(sub) def cursor(self): @@ -81,7 +89,8 @@ class TDengineConnection(object): """ pass + if __name__ == "__main__": conn = TDengineConnection(host='192.168.1.107') conn.close() - print("Hello world") \ No newline at end of file + print("Hello world") diff --git a/src/connector/python/windows/python2/taos/constants.py b/src/connector/python/windows/python2/taos/constants.py index a994bceaf61894ac0bf9a719a574d00a09c584a5..8a8011c3e36c52993e9d03228c2a50e2af6a7c9e 100644 --- a/src/connector/python/windows/python2/taos/constants.py +++ b/src/connector/python/windows/python2/taos/constants.py @@ -3,6 +3,7 @@ from .dbapi import * + class FieldType(object): """TDengine Field Types """ @@ -18,13 +19,21 @@ class FieldType(object): C_BINARY = 8 C_TIMESTAMP = 9 C_NCHAR = 10 + C_TINYINT_UNSIGNED = 11 + C_SMALLINT_UNSIGNED = 12 + C_INT_UNSIGNED = 13 + C_BIGINT_UNSIGNED = 14 # NULL value definition # NOTE: These values should change according to C definition in tsdb.h C_BOOL_NULL = 0x02 C_TINYINT_NULL = -128 + C_TINYINT_UNSIGNED_NULL = 255 C_SMALLINT_NULL = -32768 + C_SMALLINT_UNSIGNED_NULL = 65535 C_INT_NULL = -2147483648 + C_INT_UNSIGNED_NULL = 4294967295 C_BIGINT_NULL = -9223372036854775808 + C_BIGINT_UNSIGNED_NULL = 18446744073709551615 C_FLOAT_NULL = float('nan') C_DOUBLE_NULL = float('nan') C_BINARY_NULL = bytearray([int('0xff', 16)]) diff --git a/src/connector/python/windows/python2/taos/cursor.py b/src/connector/python/windows/python2/taos/cursor.py index 958466985ef050df64ceecdabd994e112716ccf0..0656b6326e173b111eb8293c6e3b76678eccc0e2 100644 --- a/src/connector/python/windows/python2/taos/cursor.py +++ b/src/connector/python/windows/python2/taos/cursor.py @@ -5,6 +5,7 @@ import threading # querySeqNum = 0 + class TDengineCursor(object): """Database cursor which is used to manage the context of a fetch operation. @@ -23,7 +24,7 @@ class TDengineCursor(object): if the cursor has not had an operation invoked via the .execute*() method yet. .rowcount:This read-only attribute specifies the number of rows that the last - .execute*() produced (for DQL statements like SELECT) or affected + .execute*() produced (for DQL statements like SELECT) or affected """ def __init__(self, connection=None): @@ -50,13 +51,14 @@ class TDengineCursor(object): raise OperationalError("Invalid use of fetch iterator") if self._block_rows <= self._block_iter: - block, self._block_rows = CTaosInterface.fetchRow(self._result, self._fields) + block, self._block_rows = CTaosInterface.fetchRow( + self._result, self._fields) if self._block_rows == 0: raise StopIteration self._block = list(map(tuple, zip(*block))) self._block_iter = 0 - data = self._block[self._block_iter] + data = self._block[self._block_iter] self._block_iter += 1 return data @@ -91,7 +93,7 @@ class TDengineCursor(object): """ if self._connection is None: return False - + self._reset_result() self._connection = None @@ -106,19 +108,20 @@ class TDengineCursor(object): if not self._connection: # TODO : change the exception raised here raise ProgrammingError("Cursor is not connected") - + self._reset_result() stmt = operation if params is not None: pass - + self._result = CTaosInterface.query(self._connection._conn, stmt) errno = CTaosInterface.libtaos.taos_errno(self._result) if errno == 0: if CTaosInterface.fieldsCount(self._result) == 0: - self._affected_rows += CTaosInterface.affectedRows(self._result) - return CTaosInterface.affectedRows(self._result ) + self._affected_rows += CTaosInterface.affectedRows( + self._result) + return CTaosInterface.affectedRows(self._result) else: self._fields = CTaosInterface.useResult(self._result) return self._handle_result() @@ -147,17 +150,20 @@ class TDengineCursor(object): buffer = [[] for i in range(len(self._fields))] self._rowcount = 0 while True: - block, num_of_fields = CTaosInterface.fetchRow(self._result, self._fields) + block, num_of_fields = CTaosInterface.fetchRow( + self._result, self._fields) errno = CTaosInterface.libtaos.taos_errno(self._result) if errno != 0: - raise ProgrammingError(CTaosInterface.errStr(self._result), errno) + raise ProgrammingError( + CTaosInterface.errStr( + self._result), errno) if num_of_fields == 0: break self._rowcount += num_of_fields for i in range(len(self._fields)): buffer[i].extend(block[i]) return list(map(tuple, zip(*buffer))) - + def fetchall(self): if self._result is None or self._fields is None: raise OperationalError("Invalid use of fetchall") @@ -165,20 +171,21 @@ class TDengineCursor(object): buffer = [[] for i in range(len(self._fields))] self._rowcount = 0 while True: - block, num_of_fields = CTaosInterface.fetchBlock(self._result, self._fields) + block, num_of_fields = CTaosInterface.fetchBlock( + self._result, self._fields) errno = CTaosInterface.libtaos.taos_errno(self._result) if errno != 0: - raise ProgrammingError(CTaosInterface.errStr(self._result), errno) - if num_of_fields == 0: break + raise ProgrammingError( + CTaosInterface.errStr( + self._result), errno) + if num_of_fields == 0: + break self._rowcount += num_of_fields for i in range(len(self._fields)): buffer[i].extend(block[i]) - return list(map(tuple, zip(*buffer))) - - def nextset(self): """ """ @@ -209,6 +216,7 @@ class TDengineCursor(object): """ self._description = [] for ele in self._fields: - self._description.append((ele['name'], ele['type'], None, None, None, None, False)) - + self._description.append( + (ele['name'], ele['type'], None, None, None, None, False)) + return self._result diff --git a/src/connector/python/windows/python2/taos/dbapi.py b/src/connector/python/windows/python2/taos/dbapi.py index f1c22bdb512224ac712b78b15ec00207587e65c5..594681ada953abf388e503c23199043cf686e1a3 100644 --- a/src/connector/python/windows/python2/taos/dbapi.py +++ b/src/connector/python/windows/python2/taos/dbapi.py @@ -4,6 +4,7 @@ import time import datetime + class DBAPITypeObject(object): def __init__(self, *values): self.values = values @@ -16,23 +17,28 @@ class DBAPITypeObject(object): else: return -1 + Date = datetime.date Time = datetime.time Timestamp = datetime.datetime + def DataFromTicks(ticks): return Date(*time.localtime(ticks)[:3]) + def TimeFromTicks(ticks): return Time(*time.localtime(ticks)[3:6]) + def TimestampFromTicks(ticks): return Timestamp(*time.localtime(ticks)[:6]) + Binary = bytes # STRING = DBAPITypeObject(*constants.FieldType.get_string_types()) # BINARY = DBAPITypeObject(*constants.FieldType.get_binary_types()) # NUMBER = BAPITypeObject(*constants.FieldType.get_number_types()) # DATETIME = DBAPITypeObject(*constants.FieldType.get_timestamp_types()) -# ROWID = DBAPITypeObject() \ No newline at end of file +# ROWID = DBAPITypeObject() diff --git a/src/connector/python/windows/python2/taos/error.py b/src/connector/python/windows/python2/taos/error.py index 24508a72ed78bb6231187bb6de34d57182e31b22..c584badce8320cd35dc81e8f6b613c56163b1a29 100644 --- a/src/connector/python/windows/python2/taos/error.py +++ b/src/connector/python/windows/python2/taos/error.py @@ -1,35 +1,41 @@ """Python exceptions """ + class Error(Exception): def __init__(self, msg=None, errno=None): self.msg = msg self._full_msg = self.msg self.errno = errno - + def __str__(self): return self._full_msg + class Warning(Exception): """Exception raised for important warnings like data truncations while inserting. """ pass + class InterfaceError(Error): - """Exception raised for errors that are related to the database interface rather than the database itself. + """Exception raised for errors that are related to the database interface rather than the database itself. """ pass + class DatabaseError(Error): - """Exception raised for errors that are related to the database. + """Exception raised for errors that are related to the database. """ pass + class DataError(DatabaseError): """Exception raised for errors that are due to problems with the processed data like division by zero, numeric value out of range. """ pass + class OperationalError(DatabaseError): """Exception raised for errors that are related to the database's operation and not necessarily under the control of the programmer """ @@ -41,17 +47,20 @@ class IntegrityError(DatabaseError): """ pass + class InternalError(DatabaseError): """Exception raised when the database encounters an internal error. """ pass + class ProgrammingError(DatabaseError): """Exception raised for programming errors. """ pass + class NotSupportedError(DatabaseError): """Exception raised in case a method or database API was used which is not supported by the database,. """ - pass \ No newline at end of file + pass diff --git a/src/connector/python/windows/python2/taos/subscription.py b/src/connector/python/windows/python2/taos/subscription.py index d3cf10d5ada578687689b94454378dd543368e3e..270d9de09217fc58a389981a3542698dd1c0428a 100644 --- a/src/connector/python/windows/python2/taos/subscription.py +++ b/src/connector/python/windows/python2/taos/subscription.py @@ -1,32 +1,33 @@ from .cinterface import CTaosInterface from .error import * + class TDengineSubscription(object): """TDengine subscription object """ + def __init__(self, sub): self._sub = sub - def consume(self): """Consume rows of a subscription """ if self._sub is None: raise OperationalError("Invalid use of consume") - + result, fields = CTaosInterface.consume(self._sub) buffer = [[] for i in range(len(fields))] while True: block, num_of_fields = CTaosInterface.fetchBlock(result, fields) - if num_of_fields == 0: break + if num_of_fields == 0: + break for i in range(len(fields)): buffer[i].extend(block[i]) self.fields = fields return list(map(tuple, zip(*buffer))) - - def close(self, keepProgress = True): + def close(self, keepProgress=True): """Close the Subscription. """ if self._sub is None: @@ -38,15 +39,19 @@ class TDengineSubscription(object): if __name__ == '__main__': from .connection import TDengineConnection - conn = TDengineConnection(host="127.0.0.1", user="root", password="taosdata", database="test") + conn = TDengineConnection( + host="127.0.0.1", + user="root", + password="taosdata", + database="test") # Generate a cursor object to run SQL commands sub = conn.subscribe(True, "test", "select * from meters;", 1000) - for i in range(0,10): + for i in range(0, 10): data = sub.consume() for d in data: print(d) sub.close() - conn.close() \ No newline at end of file + conn.close() diff --git a/src/connector/python/windows/python3/taos/__init__.py b/src/connector/python/windows/python3/taos/__init__.py index c6dd929a6a5110300ecd2e1042d48a5ad6a4bf46..b57e25fd2c320956e46b190d9f0a1139db1cced0 100644 --- a/src/connector/python/windows/python3/taos/__init__.py +++ b/src/connector/python/windows/python3/taos/__init__.py @@ -3,12 +3,12 @@ from .connection import TDengineConnection from .cursor import TDengineCursor # Globals -apilevel = '2.0.3' threadsafety = 0 paramstyle = 'pyformat' __all__ = ['connection', 'cursor'] + def connect(*args, **kwargs): """ Function to return a TDengine connector object @@ -21,4 +21,4 @@ def connect(*args, **kwargs): @rtype: TDengineConnector """ - return TDengineConnection(*args, **kwargs) \ No newline at end of file + return TDengineConnection(*args, **kwargs) diff --git a/src/connector/python/windows/python3/taos/cinterface.py b/src/connector/python/windows/python3/taos/cinterface.py index 42b820ca80feda4a269e5e80f288bd1f5e87adcd..d8cdce2ad138c34db5193e3972ba51d46c693254 100644 --- a/src/connector/python/windows/python3/taos/cinterface.py +++ b/src/connector/python/windows/python3/taos/cinterface.py @@ -4,11 +4,14 @@ from .error import * import math import datetime + def _convert_millisecond_to_datetime(milli): - return datetime.datetime.fromtimestamp(milli/1000.0) + return datetime.datetime.fromtimestamp(milli / 1000.0) + def _convert_microsecond_to_datetime(micro): - return datetime.datetime.fromtimestamp(micro/1000000.0) + return datetime.datetime.fromtimestamp(micro / 1000000.0) + def _crow_timestamp_to_python(data, num_of_rows, nbytes=None, micro=False): """Function to convert C bool row to python row @@ -18,170 +21,309 @@ def _crow_timestamp_to_python(data, num_of_rows, nbytes=None, micro=False): _timestamp_converter = _convert_microsecond_to_datetime if num_of_rows > 0: - return list(map(_timestamp_converter, ctypes.cast(data, ctypes.POINTER(ctypes.c_longlong))[:abs(num_of_rows)])) + return list(map(_timestamp_converter, ctypes.cast( + data, ctypes.POINTER(ctypes.c_long))[:abs(num_of_rows)])) else: - return list(map(_timestamp_converter, ctypes.cast(data, ctypes.POINTER(ctypes.c_longlong))[:abs(num_of_rows)])) + return list(map(_timestamp_converter, ctypes.cast( + data, ctypes.POINTER(ctypes.c_long))[:abs(num_of_rows)])) + def _crow_bool_to_python(data, num_of_rows, nbytes=None, micro=False): """Function to convert C bool row to python row """ if num_of_rows > 0: - return [ None if ele == FieldType.C_BOOL_NULL else bool(ele) for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_byte))[:abs(num_of_rows)] ] + return [ + None if ele == FieldType.C_BOOL_NULL else bool(ele) for ele in ctypes.cast( + data, ctypes.POINTER( + ctypes.c_byte))[ + :abs(num_of_rows)]] else: - return [ None if ele == FieldType.C_BOOL_NULL else bool(ele) for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_bool))[:abs(num_of_rows)] ] + return [ + None if ele == FieldType.C_BOOL_NULL else bool(ele) for ele in ctypes.cast( + data, ctypes.POINTER( + ctypes.c_bool))[ + :abs(num_of_rows)]] + def _crow_tinyint_to_python(data, num_of_rows, nbytes=None, micro=False): """Function to convert C tinyint row to python row """ if num_of_rows > 0: - return [ None if ele == FieldType.C_TINYINT_NULL else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_byte))[:abs(num_of_rows)] ] + return [None if ele == FieldType.C_TINYINT_NULL else ele for ele in ctypes.cast( + data, ctypes.POINTER(ctypes.c_byte))[:abs(num_of_rows)]] else: - return [ None if ele == FieldType.C_TINYINT_NULL else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_byte))[:abs(num_of_rows)] ] - + return [None if ele == FieldType.C_TINYINT_NULL else ele for ele in ctypes.cast( + data, ctypes.POINTER(ctypes.c_byte))[:abs(num_of_rows)]] + + +def _crow_tinyint_unsigned_to_python( + data, + num_of_rows, + nbytes=None, + micro=False): + """Function to convert C tinyint row to python row + """ + if num_of_rows > 0: + return [ + None if ele == FieldType.C_TINYINT_UNSIGNED_NULL else ele for ele in ctypes.cast( + data, ctypes.POINTER( + ctypes.c_ubyte))[ + :abs(num_of_rows)]] + else: + return [ + None if ele == FieldType.C_TINYINT_UNSIGNED_NULL else ele for ele in ctypes.cast( + data, ctypes.POINTER( + ctypes.c_ubyte))[ + :abs(num_of_rows)]] + + def _crow_smallint_to_python(data, num_of_rows, nbytes=None, micro=False): """Function to convert C smallint row to python row """ if num_of_rows > 0: - return [ None if ele == FieldType.C_SMALLINT_NULL else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_short))[:abs(num_of_rows)]] + return [ + None if ele == FieldType.C_SMALLINT_NULL else ele for ele in ctypes.cast( + data, ctypes.POINTER( + ctypes.c_short))[ + :abs(num_of_rows)]] else: - return [ None if ele == FieldType.C_SMALLINT_NULL else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_short))[:abs(num_of_rows)] ] + return [ + None if ele == FieldType.C_SMALLINT_NULL else ele for ele in ctypes.cast( + data, ctypes.POINTER( + ctypes.c_short))[ + :abs(num_of_rows)]] + + +def _crow_smallint_unsigned_to_python( + data, num_of_rows, nbytes=None, micro=False): + """Function to convert C smallint row to python row + """ + if num_of_rows > 0: + return [ + None if ele == FieldType.C_SMALLINT_UNSIGNED_NULL else ele for ele in ctypes.cast( + data, ctypes.POINTER( + ctypes.c_ushort))[ + :abs(num_of_rows)]] + else: + return [ + None if ele == FieldType.C_SMALLINT_UNSIGNED_NULL else ele for ele in ctypes.cast( + data, ctypes.POINTER( + ctypes.c_ushort))[ + :abs(num_of_rows)]] + def _crow_int_to_python(data, num_of_rows, nbytes=None, micro=False): """Function to convert C int row to python row """ if num_of_rows > 0: - return [ None if ele == FieldType.C_INT_NULL else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_int))[:abs(num_of_rows)] ] + return [None if ele == FieldType.C_INT_NULL else ele for ele in ctypes.cast( + data, ctypes.POINTER(ctypes.c_int))[:abs(num_of_rows)]] + else: + return [None if ele == FieldType.C_INT_NULL else ele for ele in ctypes.cast( + data, ctypes.POINTER(ctypes.c_int))[:abs(num_of_rows)]] + + +def _crow_int_unsigned_to_python(data, num_of_rows, nbytes=None, micro=False): + """Function to convert C int row to python row + """ + if num_of_rows > 0: + return [ + None if ele == FieldType.C_INT_UNSIGNED_NULL else ele for ele in ctypes.cast( + data, ctypes.POINTER( + ctypes.c_uint))[ + :abs(num_of_rows)]] else: - return [ None if ele == FieldType.C_INT_NULL else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_int))[:abs(num_of_rows)] ] + return [ + None if ele == FieldType.C_INT_UNSIGNED_NULL else ele for ele in ctypes.cast( + data, ctypes.POINTER( + ctypes.c_uint))[ + :abs(num_of_rows)]] + def _crow_bigint_to_python(data, num_of_rows, nbytes=None, micro=False): """Function to convert C bigint row to python row """ if num_of_rows > 0: - return [ None if ele == FieldType.C_BIGINT_NULL else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_longlong))[:abs(num_of_rows)] ] + return [None if ele == FieldType.C_BIGINT_NULL else ele for ele in ctypes.cast( + data, ctypes.POINTER(ctypes.c_long))[:abs(num_of_rows)]] else: - return [ None if ele == FieldType.C_BIGINT_NULL else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_longlong))[:abs(num_of_rows)] ] + return [None if ele == FieldType.C_BIGINT_NULL else ele for ele in ctypes.cast( + data, ctypes.POINTER(ctypes.c_long))[:abs(num_of_rows)]] + + +def _crow_bigint_unsigned_to_python( + data, + num_of_rows, + nbytes=None, + micro=False): + """Function to convert C bigint row to python row + """ + if num_of_rows > 0: + return [ + None if ele == FieldType.C_BIGINT_UNSIGNED_NULL else ele for ele in ctypes.cast( + data, ctypes.POINTER( + ctypes.c_ulong))[ + :abs(num_of_rows)]] + else: + return [ + None if ele == FieldType.C_BIGINT_UNSIGNED_NULL else ele for ele in ctypes.cast( + data, ctypes.POINTER( + ctypes.c_ulong))[ + :abs(num_of_rows)]] + def _crow_float_to_python(data, num_of_rows, nbytes=None, micro=False): """Function to convert C float row to python row """ if num_of_rows > 0: - return [ None if math.isnan(ele) else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_float))[:abs(num_of_rows)] ] + return [None if math.isnan(ele) else ele for ele in ctypes.cast( + data, ctypes.POINTER(ctypes.c_float))[:abs(num_of_rows)]] else: - return [ None if math.isnan(ele) else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_float))[:abs(num_of_rows)] ] + return [None if math.isnan(ele) else ele for ele in ctypes.cast( + data, ctypes.POINTER(ctypes.c_float))[:abs(num_of_rows)]] + def _crow_double_to_python(data, num_of_rows, nbytes=None, micro=False): """Function to convert C double row to python row """ if num_of_rows > 0: - return [ None if math.isnan(ele) else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_double))[:abs(num_of_rows)] ] + return [None if math.isnan(ele) else ele for ele in ctypes.cast( + data, ctypes.POINTER(ctypes.c_double))[:abs(num_of_rows)]] else: - return [ None if math.isnan(ele) else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_double))[:abs(num_of_rows)] ] + return [None if math.isnan(ele) else ele for ele in ctypes.cast( + data, ctypes.POINTER(ctypes.c_double))[:abs(num_of_rows)]] + def _crow_binary_to_python(data, num_of_rows, nbytes=None, micro=False): """Function to convert C binary row to python row """ assert(nbytes is not None) if num_of_rows > 0: - return [ None if ele.value[0:1] == FieldType.C_BINARY_NULL else ele.value.decode('utf-8') for ele in (ctypes.cast(data, ctypes.POINTER(ctypes.c_char * nbytes)))[:abs(num_of_rows)]] + return [None if ele.value[0:1] == FieldType.C_BINARY_NULL else ele.value.decode( + 'utf-8') for ele in (ctypes.cast(data, ctypes.POINTER(ctypes.c_char * nbytes)))[:abs(num_of_rows)]] else: - return [ None if ele.value[0:1] == FieldType.C_BINARY_NULL else ele.value.decode('utf-8') for ele in (ctypes.cast(data, ctypes.POINTER(ctypes.c_char * nbytes)))[:abs(num_of_rows)]] + return [None if ele.value[0:1] == FieldType.C_BINARY_NULL else ele.value.decode( + 'utf-8') for ele in (ctypes.cast(data, ctypes.POINTER(ctypes.c_char * nbytes)))[:abs(num_of_rows)]] + def _crow_nchar_to_python(data, num_of_rows, nbytes=None, micro=False): """Function to convert C nchar row to python row """ assert(nbytes is not None) - res = [] - for i in range(abs(num_of_rows)): try: if num_of_rows >= 0: tmpstr = ctypes.c_char_p(data) - res.append( tmpstr.value.decode() ) + res.append(tmpstr.value.decode()) else: - res.append( (ctypes.cast(data+nbytes*i, ctypes.POINTER(ctypes.c_wchar * (nbytes//4))))[0].value ) + res.append((ctypes.cast(data + nbytes * i, + ctypes.POINTER(ctypes.c_wchar * (nbytes // 4))))[0].value) except ValueError: res.append(None) return res - + + def _crow_binary_to_python_block(data, num_of_rows, nbytes=None, micro=False): """Function to convert C binary row to python row """ assert(nbytes is not None) - res=[] + res = [] if num_of_rows > 0: for i in range(abs(num_of_rows)): try: - rbyte=ctypes.cast(data+nbytes*i,ctypes.POINTER(ctypes.c_short))[:1].pop() - tmpstr = ctypes.c_char_p(data+nbytes*i+2) - res.append( tmpstr.value.decode()[0:rbyte] ) + rbyte = ctypes.cast( + data + nbytes * i, + ctypes.POINTER( + ctypes.c_short))[ + :1].pop() + tmpstr = ctypes.c_char_p(data + nbytes * i + 2) + res.append(tmpstr.value.decode()[0:rbyte]) except ValueError: res.append(None) else: for i in range(abs(num_of_rows)): try: - rbyte=ctypes.cast(data+nbytes*i,ctypes.POINTER(ctypes.c_short))[:1].pop() - tmpstr = ctypes.c_char_p(data+nbytes*i+2) - res.append( tmpstr.value.decode()[0:rbyte] ) + rbyte = ctypes.cast( + data + nbytes * i, + ctypes.POINTER( + ctypes.c_short))[ + :1].pop() + tmpstr = ctypes.c_char_p(data + nbytes * i + 2) + res.append(tmpstr.value.decode()[0:rbyte]) except ValueError: res.append(None) return res + def _crow_nchar_to_python_block(data, num_of_rows, nbytes=None, micro=False): """Function to convert C nchar row to python row """ assert(nbytes is not None) - res=[] + res = [] if num_of_rows >= 0: for i in range(abs(num_of_rows)): try: - tmpstr = ctypes.c_char_p(data+nbytes*i+2) - res.append( tmpstr.value.decode() ) + tmpstr = ctypes.c_char_p(data + nbytes * i + 2) + res.append(tmpstr.value.decode()) except ValueError: res.append(None) else: for i in range(abs(num_of_rows)): try: - res.append( (ctypes.cast(data+nbytes*i+2, ctypes.POINTER(ctypes.c_wchar * (nbytes//4))))[0].value ) + res.append((ctypes.cast(data + nbytes * i + 2, + ctypes.POINTER(ctypes.c_wchar * (nbytes // 4))))[0].value) except ValueError: res.append(None) return res + _CONVERT_FUNC = { FieldType.C_BOOL: _crow_bool_to_python, - FieldType.C_TINYINT : _crow_tinyint_to_python, - FieldType.C_SMALLINT : _crow_smallint_to_python, - FieldType.C_INT : _crow_int_to_python, - FieldType.C_BIGINT : _crow_bigint_to_python, - FieldType.C_FLOAT : _crow_float_to_python, - FieldType.C_DOUBLE : _crow_double_to_python, + FieldType.C_TINYINT: _crow_tinyint_to_python, + FieldType.C_SMALLINT: _crow_smallint_to_python, + FieldType.C_INT: _crow_int_to_python, + FieldType.C_BIGINT: _crow_bigint_to_python, + FieldType.C_FLOAT: _crow_float_to_python, + FieldType.C_DOUBLE: _crow_double_to_python, FieldType.C_BINARY: _crow_binary_to_python, - FieldType.C_TIMESTAMP : _crow_timestamp_to_python, - FieldType.C_NCHAR : _crow_nchar_to_python + FieldType.C_TIMESTAMP: _crow_timestamp_to_python, + FieldType.C_NCHAR: _crow_nchar_to_python, + FieldType.C_TINYINT_UNSIGNED: _crow_tinyint_unsigned_to_python, + FieldType.C_SMALLINT_UNSIGNED: _crow_smallint_unsigned_to_python, + FieldType.C_INT_UNSIGNED: _crow_int_unsigned_to_python, + FieldType.C_BIGINT_UNSIGNED: _crow_bigint_unsigned_to_python } _CONVERT_FUNC_BLOCK = { FieldType.C_BOOL: _crow_bool_to_python, - FieldType.C_TINYINT : _crow_tinyint_to_python, - FieldType.C_SMALLINT : _crow_smallint_to_python, - FieldType.C_INT : _crow_int_to_python, - FieldType.C_BIGINT : _crow_bigint_to_python, - FieldType.C_FLOAT : _crow_float_to_python, - FieldType.C_DOUBLE : _crow_double_to_python, + FieldType.C_TINYINT: _crow_tinyint_to_python, + FieldType.C_SMALLINT: _crow_smallint_to_python, + FieldType.C_INT: _crow_int_to_python, + FieldType.C_BIGINT: _crow_bigint_to_python, + FieldType.C_FLOAT: _crow_float_to_python, + FieldType.C_DOUBLE: _crow_double_to_python, FieldType.C_BINARY: _crow_binary_to_python_block, - FieldType.C_TIMESTAMP : _crow_timestamp_to_python, - FieldType.C_NCHAR : _crow_nchar_to_python_block + FieldType.C_TIMESTAMP: _crow_timestamp_to_python, + FieldType.C_NCHAR: _crow_nchar_to_python_block, + FieldType.C_TINYINT_UNSIGNED: _crow_tinyint_unsigned_to_python, + FieldType.C_SMALLINT_UNSIGNED: _crow_smallint_unsigned_to_python, + FieldType.C_INT_UNSIGNED: _crow_int_unsigned_to_python, + FieldType.C_BIGINT_UNSIGNED: _crow_bigint_unsigned_to_python } # Corresponding TAOS_FIELD structure in C + + class TaosField(ctypes.Structure): _fields_ = [('name', ctypes.c_char * 65), ('type', ctypes.c_char), ('bytes', ctypes.c_short)] # C interface class + + class CTaosInterface(object): libtaos = ctypes.windll.LoadLibrary('taos') @@ -218,7 +360,7 @@ class CTaosInterface(object): except AttributeError: raise AttributeError("config is expected as a str") - if config != None: + if config is not None: CTaosInterface.libtaos.taos_options(3, self._config) CTaosInterface.libtaos.taos_init() @@ -229,7 +371,13 @@ class CTaosInterface(object): """ return self._config - def connect(self, host=None, user="root", password="taosdata", db=None, port=0): + def connect( + self, + host=None, + user="root", + password="taosdata", + db=None, + port=0): ''' Function to connect to server @@ -238,7 +386,7 @@ class CTaosInterface(object): # host try: _host = ctypes.c_char_p(host.encode( - "utf-8")) if host != None else ctypes.c_char_p(None) + "utf-8")) if host is not None else ctypes.c_char_p(None) except AttributeError: raise AttributeError("host is expected as a str") @@ -257,7 +405,7 @@ class CTaosInterface(object): # db try: _db = ctypes.c_char_p( - db.encode("utf-8")) if db != None else ctypes.c_char_p(None) + db.encode("utf-8")) if db is not None else ctypes.c_char_p(None) except AttributeError: raise AttributeError("db is expected as a str") @@ -270,11 +418,11 @@ class CTaosInterface(object): connection = ctypes.c_void_p(CTaosInterface.libtaos.taos_connect( _host, _user, _password, _db, _port)) - if connection.value == None: + if connection.value is None: print('connect to TDengine failed') raise ConnectionError("connect to TDengine failed") # sys.exit(1) - #else: + # else: # print('connect to TDengine success') return connection @@ -295,7 +443,8 @@ class CTaosInterface(object): @rtype: 0 on success and -1 on failure ''' try: - return CTaosInterface.libtaos.taos_query(connection, ctypes.c_char_p(sql.encode('utf-8'))) + return CTaosInterface.libtaos.taos_query( + connection, ctypes.c_char_p(sql.encode('utf-8'))) except AttributeError: raise AttributeError("sql is expected as a string") # finally: @@ -310,7 +459,7 @@ class CTaosInterface(object): @staticmethod def subscribe(connection, restart, topic, sql, interval): """Create a subscription - @restart boolean, + @restart boolean, @sql string, sql statement for data query, must be a 'select' statement. @topic string, name of this subscription """ @@ -362,35 +511,49 @@ class CTaosInterface(object): result, ctypes.byref(pblock)) if num_of_rows == 0: return None, 0 - isMicro = (CTaosInterface.libtaos.taos_result_precision(result) == FieldType.C_TIMESTAMP_MICRO) + isMicro = (CTaosInterface.libtaos.taos_result_precision( + result) == FieldType.C_TIMESTAMP_MICRO) blocks = [None] * len(fields) fieldL = CTaosInterface.libtaos.taos_fetch_lengths(result) - fieldLen = [ele for ele in ctypes.cast(fieldL, ctypes.POINTER(ctypes.c_int))[:len(fields)]] + fieldLen = [ + ele for ele in ctypes.cast( + fieldL, ctypes.POINTER( + ctypes.c_int))[ + :len(fields)]] for i in range(len(fields)): data = ctypes.cast(pblock, ctypes.POINTER(ctypes.c_void_p))[i] if fields[i]['type'] not in _CONVERT_FUNC_BLOCK: raise DatabaseError("Invalid data type returned from database") - blocks[i] = _CONVERT_FUNC_BLOCK[fields[i]['type']](data, num_of_rows, fieldLen[i], isMicro) + blocks[i] = _CONVERT_FUNC_BLOCK[fields[i]['type']]( + data, num_of_rows, fieldLen[i], isMicro) return blocks, abs(num_of_rows) + @staticmethod def fetchRow(result, fields): pblock = ctypes.c_void_p(0) - pblock = CTaosInterface.libtaos.taos_fetch_row(result) - if pblock : + pblock = CTaosInterface.libtaos.taos_fetch_row(result) + if pblock: num_of_rows = 1 - isMicro = (CTaosInterface.libtaos.taos_result_precision(result) == FieldType.C_TIMESTAMP_MICRO) + isMicro = (CTaosInterface.libtaos.taos_result_precision( + result) == FieldType.C_TIMESTAMP_MICRO) blocks = [None] * len(fields) fieldL = CTaosInterface.libtaos.taos_fetch_lengths(result) - fieldLen = [ele for ele in ctypes.cast(fieldL, ctypes.POINTER(ctypes.c_int))[:len(fields)]] + fieldLen = [ + ele for ele in ctypes.cast( + fieldL, ctypes.POINTER( + ctypes.c_int))[ + :len(fields)]] for i in range(len(fields)): data = ctypes.cast(pblock, ctypes.POINTER(ctypes.c_void_p))[i] if fields[i]['type'] not in _CONVERT_FUNC: - raise DatabaseError("Invalid data type returned from database") + raise DatabaseError( + "Invalid data type returned from database") if data is None: blocks[i] = [None] else: - blocks[i] = _CONVERT_FUNC[fields[i]['type']](data, num_of_rows, fieldLen[i], isMicro) + blocks[i] = _CONVERT_FUNC[fields[i]['type']]( + data, num_of_rows, fieldLen[i], isMicro) else: return None, 0 return blocks, abs(num_of_rows) @@ -476,4 +639,4 @@ if __name__ == '__main__': print(data) cinter.freeResult(result) - cinter.close(conn) \ No newline at end of file + cinter.close(conn) diff --git a/src/connector/python/windows/python3/taos/connection.py b/src/connector/python/windows/python3/taos/connection.py index d9576a553b810a975429b2cefc03e5e60f240a88..5729d01c6df8c0e58086726c4001467811e9fee5 100644 --- a/src/connector/python/windows/python3/taos/connection.py +++ b/src/connector/python/windows/python3/taos/connection.py @@ -2,9 +2,11 @@ from .cursor import TDengineCursor from .subscription import TDengineSubscription from .cinterface import CTaosInterface + class TDengineConnection(object): """ TDengine connection object """ + def __init__(self, *args, **kwargs): self._conn = None self._host = None @@ -30,7 +32,7 @@ class TDengineConnection(object): # password if 'password' in kwargs: self._password = kwargs['password'] - + # database if 'database' in kwargs: self._database = kwargs['database'] @@ -44,7 +46,12 @@ class TDengineConnection(object): self._config = kwargs['config'] self._chandle = CTaosInterface(self._config) - self._conn = self._chandle.connect(self._host, self._user, self._password, self._database, self._port) + self._conn = self._chandle.connect( + self._host, + self._user, + self._password, + self._database, + self._port) def close(self): """Close current connection. @@ -56,7 +63,8 @@ class TDengineConnection(object): """ if self._conn is None: return None - sub = CTaosInterface.subscribe(self._conn, restart, topic, sql, interval) + sub = CTaosInterface.subscribe( + self._conn, restart, topic, sql, interval) return TDengineSubscription(sub) def cursor(self): @@ -81,7 +89,8 @@ class TDengineConnection(object): """ pass + if __name__ == "__main__": conn = TDengineConnection(host='192.168.1.107') conn.close() - print("Hello world") \ No newline at end of file + print("Hello world") diff --git a/src/connector/python/windows/python3/taos/constants.py b/src/connector/python/windows/python3/taos/constants.py index def2bbc0a8c063e85214a634b60c5db7a5fd1259..49fc17b2fb98a6684e74e4a044651fdc6237518e 100644 --- a/src/connector/python/windows/python3/taos/constants.py +++ b/src/connector/python/windows/python3/taos/constants.py @@ -3,6 +3,7 @@ from .dbapi import * + class FieldType(object): """TDengine Field Types """ @@ -18,13 +19,21 @@ class FieldType(object): C_BINARY = 8 C_TIMESTAMP = 9 C_NCHAR = 10 + C_TINYINT_UNSIGNED = 11 + C_SMALLINT_UNSIGNED = 12 + C_INT_UNSIGNED = 13 + C_BIGINT_UNSIGNED = 14 # NULL value definition # NOTE: These values should change according to C definition in tsdb.h C_BOOL_NULL = 0x02 C_TINYINT_NULL = -128 + C_TINYINT_UNSIGNED_NULL = 255 C_SMALLINT_NULL = -32768 + C_SMALLINT_UNSIGNED_NULL = 65535 C_INT_NULL = -2147483648 + C_INT_UNSIGNED_NULL = 4294967295 C_BIGINT_NULL = -9223372036854775808 + C_BIGINT_UNSIGNED_NULL = 18446744073709551615 C_FLOAT_NULL = float('nan') C_DOUBLE_NULL = float('nan') C_BINARY_NULL = bytearray([int('0xff', 16)]) diff --git a/src/connector/python/windows/python3/taos/cursor.py b/src/connector/python/windows/python3/taos/cursor.py index bbac1b1dd5158a5f074325a72f15f6993e97e9da..769cb7cf0f61fe850c16315bf552162f33536502 100644 --- a/src/connector/python/windows/python3/taos/cursor.py +++ b/src/connector/python/windows/python3/taos/cursor.py @@ -24,7 +24,7 @@ class TDengineCursor(object): if the cursor has not had an operation invoked via the .execute*() method yet. .rowcount:This read-only attribute specifies the number of rows that the last - .execute*() produced (for DQL statements like SELECT) or affected + .execute*() produced (for DQL statements like SELECT) or affected """ def __init__(self, connection=None): @@ -51,13 +51,14 @@ class TDengineCursor(object): raise OperationalError("Invalid use of fetch iterator") if self._block_rows <= self._block_iter: - block, self._block_rows = CTaosInterface.fetchRow(self._result, self._fields) + block, self._block_rows = CTaosInterface.fetchRow( + self._result, self._fields) if self._block_rows == 0: raise StopIteration self._block = list(map(tuple, zip(*block))) self._block_iter = 0 - data = self._block[self._block_iter] + data = self._block[self._block_iter] self._block_iter += 1 return data @@ -92,7 +93,7 @@ class TDengineCursor(object): """ if self._connection is None: return False - + self._reset_result() self._connection = None @@ -107,24 +108,25 @@ class TDengineCursor(object): if not self._connection: # TODO : change the exception raised here raise ProgrammingError("Cursor is not connected") - + self._reset_result() stmt = operation if params is not None: pass - + self._result = CTaosInterface.query(self._connection._conn, stmt) errno = CTaosInterface.libtaos.taos_errno(self._result) if errno == 0: if CTaosInterface.fieldsCount(self._result) == 0: - self._affected_rows += CTaosInterface.affectedRows(self._result ) - return CTaosInterface.affectedRows(self._result ) + self._affected_rows += CTaosInterface.affectedRows( + self._result) + return CTaosInterface.affectedRows(self._result) else: - self._fields = CTaosInterface.useResult(self._result ) + self._fields = CTaosInterface.useResult(self._result) return self._handle_result() else: - raise ProgrammingError(CTaosInterface.errStr(self._result ), errno) + raise ProgrammingError(CTaosInterface.errStr(self._result), errno) def executemany(self, operation, seq_of_parameters): """Prepare a database operation (query or command) and then execute it against all parameter sequences or mappings found in the sequence seq_of_parameters. @@ -148,10 +150,13 @@ class TDengineCursor(object): buffer = [[] for i in range(len(self._fields))] self._rowcount = 0 while True: - block, num_of_fields = CTaosInterface.fetchRow(self._result, self._fields) + block, num_of_fields = CTaosInterface.fetchRow( + self._result, self._fields) errno = CTaosInterface.libtaos.taos_errno(self._result) if errno != 0: - raise ProgrammingError(CTaosInterface.errStr(self._result), errno) + raise ProgrammingError( + CTaosInterface.errStr( + self._result), errno) if num_of_fields == 0: break self._rowcount += num_of_fields @@ -166,20 +171,21 @@ class TDengineCursor(object): buffer = [[] for i in range(len(self._fields))] self._rowcount = 0 while True: - block, num_of_fields = CTaosInterface.fetchBlock(self._result, self._fields) + block, num_of_fields = CTaosInterface.fetchBlock( + self._result, self._fields) errno = CTaosInterface.libtaos.taos_errno(self._result) if errno != 0: - raise ProgrammingError(CTaosInterface.errStr(self._result), errno) - if num_of_fields == 0: break + raise ProgrammingError( + CTaosInterface.errStr( + self._result), errno) + if num_of_fields == 0: + break self._rowcount += num_of_fields for i in range(len(self._fields)): buffer[i].extend(block[i]) - return list(map(tuple, zip(*buffer))) - - def nextset(self): """ """ @@ -204,12 +210,13 @@ class TDengineCursor(object): self._block_rows = -1 self._block_iter = 0 self._affected_rows = 0 - + def _handle_result(self): """Handle the return result from query. """ self._description = [] for ele in self._fields: - self._description.append((ele['name'], ele['type'], None, None, None, None, False)) - + self._description.append( + (ele['name'], ele['type'], None, None, None, None, False)) + return self._result diff --git a/src/connector/python/windows/python3/taos/dbapi.py b/src/connector/python/windows/python3/taos/dbapi.py index 9b1cb1321c14619782c801e9381010f1f67fbc2e..a29621f7a3594a618b59b30bdc96197c4222a619 100644 --- a/src/connector/python/windows/python3/taos/dbapi.py +++ b/src/connector/python/windows/python3/taos/dbapi.py @@ -4,6 +4,7 @@ import time import datetime + class DBAPITypeObject(object): def __init__(self, *values): self.values = values @@ -16,23 +17,28 @@ class DBAPITypeObject(object): else: return -1 + Date = datetime.date Time = datetime.time Timestamp = datetime.datetime + def DataFromTicks(ticks): return Date(*time.localtime(ticks)[:3]) + def TimeFromTicks(ticks): return Time(*time.localtime(ticks)[3:6]) + def TimestampFromTicks(ticks): return Timestamp(*time.localtime(ticks)[:6]) + Binary = bytes # STRING = DBAPITypeObject(*constants.FieldType.get_string_types()) # BINARY = DBAPITypeObject(*constants.FieldType.get_binary_types()) # NUMBER = BAPITypeObject(*constants.FieldType.get_number_types()) # DATETIME = DBAPITypeObject(*constants.FieldType.get_timestamp_types()) -# ROWID = DBAPITypeObject() \ No newline at end of file +# ROWID = DBAPITypeObject() diff --git a/src/connector/python/windows/python3/taos/error.py b/src/connector/python/windows/python3/taos/error.py index ccc0e61d84597a7cae9b360b7fa2434b9bc47401..238b293a0b609570e7b5d536648c6ada3ca2f209 100644 --- a/src/connector/python/windows/python3/taos/error.py +++ b/src/connector/python/windows/python3/taos/error.py @@ -1,35 +1,41 @@ """Python exceptions """ + class Error(Exception): def __init__(self, msg=None, errno=None): self.msg = msg self._full_msg = self.msg self.errno = errno - + def __str__(self): return self._full_msg + class Warning(Exception): """Exception raised for important warnings like data truncations while inserting. """ pass + class InterfaceError(Error): - """Exception raised for errors that are related to the database interface rather than the database itself. + """Exception raised for errors that are related to the database interface rather than the database itself. """ pass + class DatabaseError(Error): - """Exception raised for errors that are related to the database. + """Exception raised for errors that are related to the database. """ pass + class DataError(DatabaseError): """Exception raised for errors that are due to problems with the processed data like division by zero, numeric value out of range. """ pass + class OperationalError(DatabaseError): """Exception raised for errors that are related to the database's operation and not necessarily under the control of the programmer """ @@ -41,17 +47,20 @@ class IntegrityError(DatabaseError): """ pass + class InternalError(DatabaseError): """Exception raised when the database encounters an internal error. """ pass + class ProgrammingError(DatabaseError): """Exception raised for programming errors. """ pass + class NotSupportedError(DatabaseError): """Exception raised in case a method or database API was used which is not supported by the database,. """ - pass \ No newline at end of file + pass diff --git a/src/connector/python/windows/python3/taos/subscription.py b/src/connector/python/windows/python3/taos/subscription.py index d3cf10d5ada578687689b94454378dd543368e3e..270d9de09217fc58a389981a3542698dd1c0428a 100644 --- a/src/connector/python/windows/python3/taos/subscription.py +++ b/src/connector/python/windows/python3/taos/subscription.py @@ -1,32 +1,33 @@ from .cinterface import CTaosInterface from .error import * + class TDengineSubscription(object): """TDengine subscription object """ + def __init__(self, sub): self._sub = sub - def consume(self): """Consume rows of a subscription """ if self._sub is None: raise OperationalError("Invalid use of consume") - + result, fields = CTaosInterface.consume(self._sub) buffer = [[] for i in range(len(fields))] while True: block, num_of_fields = CTaosInterface.fetchBlock(result, fields) - if num_of_fields == 0: break + if num_of_fields == 0: + break for i in range(len(fields)): buffer[i].extend(block[i]) self.fields = fields return list(map(tuple, zip(*buffer))) - - def close(self, keepProgress = True): + def close(self, keepProgress=True): """Close the Subscription. """ if self._sub is None: @@ -38,15 +39,19 @@ class TDengineSubscription(object): if __name__ == '__main__': from .connection import TDengineConnection - conn = TDengineConnection(host="127.0.0.1", user="root", password="taosdata", database="test") + conn = TDengineConnection( + host="127.0.0.1", + user="root", + password="taosdata", + database="test") # Generate a cursor object to run SQL commands sub = conn.subscribe(True, "test", "select * from meters;", 1000) - for i in range(0,10): + for i in range(0, 10): data = sub.consume() for d in data: print(d) sub.close() - conn.close() \ No newline at end of file + conn.close() diff --git a/src/cq/CMakeLists.txt b/src/cq/CMakeLists.txt index 73d5eebd6d3d20d8ed4e0c150d8873c27c217d51..e9ed2996c74e2c59d56245e6fc1e932ebb07dfb0 100644 --- a/src/cq/CMakeLists.txt +++ b/src/cq/CMakeLists.txt @@ -1,4 +1,4 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 3.5) +CMAKE_MINIMUM_REQUIRED(VERSION 2.8) PROJECT(TDengine) INCLUDE_DIRECTORIES(inc) diff --git a/src/cq/src/cqMain.c b/src/cq/src/cqMain.c index 0dc330091157ddf74d620969e3977d89b7c73859..fb0c1508cb051c577fed9bde534d55e297822fe2 100644 --- a/src/cq/src/cqMain.c +++ b/src/cq/src/cqMain.c @@ -97,7 +97,7 @@ static void freeSCqContext(void *handle) { } SCqContext *pContext = handle; pthread_mutex_destroy(&pContext->mutex); - + taosTmrCleanUp(pContext->tmrCtrl); pContext->tmrCtrl = NULL; cDebug("vgId:%d, CQ is closed", pContext->vgId); @@ -203,7 +203,7 @@ void cqClose(void *handle) { pContext->delete = 1; int32_t hasCq = 0; int32_t existLoop = 0; - + // stop all CQs cqStop(pContext); @@ -223,7 +223,7 @@ void cqClose(void *handle) { if (pContext->pHead == NULL) { existLoop = 1; } - } else { + } else { pthread_mutex_unlock(&pContext->mutex); break; } @@ -267,6 +267,7 @@ void cqStop(void *handle) { if (tsEnableStream == 0) { return; } + SCqContext *pContext = handle; cDebug("vgId:%d, stop all CQs", pContext->vgId); if (pContext->dbConn == NULL || pContext->master == 0) return; diff --git a/src/cq/test/CMakeLists.txt b/src/cq/test/CMakeLists.txt index fc3a1ea93a72a9ab7791e596688a7a4a1dd8b77f..cd124567afd8766173cf07e7a6191ab473be1714 100644 --- a/src/cq/test/CMakeLists.txt +++ b/src/cq/test/CMakeLists.txt @@ -1,4 +1,4 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 3.5) +CMAKE_MINIMUM_REQUIRED(VERSION 2.8) PROJECT(TDengine) LIST(APPEND CQTEST_SRC ./cqtest.c) diff --git a/src/dnode/CMakeLists.txt b/src/dnode/CMakeLists.txt index b010c0c36353636ffb9081583a3ed808f0d21719..644a4e875d62622c07034639a4e08e584e99fdfb 100644 --- a/src/dnode/CMakeLists.txt +++ b/src/dnode/CMakeLists.txt @@ -1,4 +1,4 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 3.5) +CMAKE_MINIMUM_REQUIRED(VERSION 2.8) PROJECT(TDengine) INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/query/inc) diff --git a/src/inc/taos.h b/src/inc/taos.h index 05d390ffd0cdf4bf0acab82714c867777d9593d4..cd8e116053bd9adabda9a1eeeb20c6d92679d99d 100644 --- a/src/inc/taos.h +++ b/src/inc/taos.h @@ -68,7 +68,7 @@ typedef struct taosField { #define DLL_EXPORT #endif -DLL_EXPORT void taos_init(); +DLL_EXPORT int taos_init(); DLL_EXPORT void taos_cleanup(void); DLL_EXPORT int taos_options(TSDB_OPTION option, const void *arg, ...); DLL_EXPORT TAOS *taos_connect(const char *ip, const char *user, const char *pass, const char *db, uint16_t port); diff --git a/src/inc/ttokendef.h b/src/inc/ttokendef.h index 031857df094384bfbc433dea33e00668b0885c03..8bb9cde935cdad39aa7318517487a46a108aaa23 100644 --- a/src/inc/ttokendef.h +++ b/src/inc/ttokendef.h @@ -122,8 +122,8 @@ #define TK_UNSIGNED 103 #define TK_TAGS 104 #define TK_USING 105 -#define TK_AS 106 -#define TK_COMMA 107 +#define TK_COMMA 106 +#define TK_AS 107 #define TK_NULL 108 #define TK_SELECT 109 #define TK_UNION 110 @@ -228,6 +228,11 @@ #define TK_VALUES 209 + + + + + #define TK_SPACE 300 #define TK_COMMENT 301 #define TK_ILLEGAL 302 diff --git a/src/kit/CMakeLists.txt b/src/kit/CMakeLists.txt index bf77d856f9f772aeffb42f7f85d51a5841943076..66e8cf73988ab25db7544b9a52215d2279630c63 100644 --- a/src/kit/CMakeLists.txt +++ b/src/kit/CMakeLists.txt @@ -1,7 +1,6 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 3.5) +CMAKE_MINIMUM_REQUIRED(VERSION 2.8) PROJECT(TDengine) ADD_SUBDIRECTORY(shell) ADD_SUBDIRECTORY(taosdemo) -ADD_SUBDIRECTORY(taosdemox) ADD_SUBDIRECTORY(taosdump) diff --git a/src/kit/shell/CMakeLists.txt b/src/kit/shell/CMakeLists.txt index c4f3cc5696a1442b38d7511ff69ddd7ff557b896..b6babc5bc53aa254e0372dbbfd235bdd4cef878a 100644 --- a/src/kit/shell/CMakeLists.txt +++ b/src/kit/shell/CMakeLists.txt @@ -1,4 +1,4 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 3.5) +CMAKE_MINIMUM_REQUIRED(VERSION 2.8) PROJECT(TDengine) INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/client/inc) diff --git a/src/kit/shell/src/shellEngine.c b/src/kit/shell/src/shellEngine.c index 716a317fca03f4a3d8c1067e1eba781208689901..1f3eb7927cc3554db132138253d66f5a01868e9b 100644 --- a/src/kit/shell/src/shellEngine.c +++ b/src/kit/shell/src/shellEngine.c @@ -76,7 +76,11 @@ TAOS *shellInit(SShellArguments *args) { args->user = TSDB_DEFAULT_USER; } - taos_init(); + if (taos_init()) { + printf("failed to init taos\n"); + fflush(stdout); + return NULL; + } // Connect to the database. TAOS *con = NULL; @@ -337,7 +341,7 @@ void shellRunCommandOnServer(TAOS *con, char command[]) { } else { int num_rows_affacted = taos_affected_rows(pSql); et = taosGetTimestampUs(); - printf("Query OK, %d row(s) affected (%.6fs)\n", num_rows_affacted, (et - st) / 1E6); + printf("Query OK, %d of %d row(s) in database (%.6fs)\n", num_rows_affacted, num_rows_affacted, (et - st) / 1E6); } printf("\n"); @@ -387,10 +391,13 @@ static char* formatTimestamp(char* buf, int64_t val, int precision) { } time_t tt; + int32_t ms = 0; if (precision == TSDB_TIME_PRECISION_MICRO) { tt = (time_t)(val / 1000000); + ms = val % 1000000; } else { tt = (time_t)(val / 1000); + ms = val % 1000; } /* comment out as it make testcases like select_with_tags.sim fail. @@ -404,14 +411,22 @@ static char* formatTimestamp(char* buf, int64_t val, int precision) { #ifdef WINDOWS if (tt < 0) tt = 0; #endif + if (tt < 0 && ms != 0) { + tt--; + if (precision == TSDB_TIME_PRECISION_MICRO) { + ms += 1000000; + } else { + ms += 1000; + } + } struct tm* ptm = localtime(&tt); size_t pos = strftime(buf, 32, "%Y-%m-%d %H:%M:%S", ptm); if (precision == TSDB_TIME_PRECISION_MICRO) { - sprintf(buf + pos, ".%06d", (int)(val % 1000000)); + sprintf(buf + pos, ".%06d", ms); } else { - sprintf(buf + pos, ".%03d", (int)(val % 1000)); + sprintf(buf + pos, ".%03d", ms); } return buf; diff --git a/src/kit/shell/src/shellMain.c b/src/kit/shell/src/shellMain.c index 49de42796caf31f426c60945d7d252fd10372f28..4c7e550760cecb7c045cb8c94fc431cb5f91812b 100644 --- a/src/kit/shell/src/shellMain.c +++ b/src/kit/shell/src/shellMain.c @@ -110,7 +110,10 @@ int main(int argc, char* argv[]) { } if (args.netTestRole && args.netTestRole[0] != 0) { - taos_init(); + if (taos_init()) { + printf("Failed to init taos"); + exit(EXIT_FAILURE); + } taosNetTest(args.netTestRole, args.host, args.port, args.pktLen); exit(0); } diff --git a/src/kit/taosdemo/CMakeLists.txt b/src/kit/taosdemo/CMakeLists.txt index f74dbc2de4c045e209fe0510207d1f38a89d7a6c..7e85ec6dacc7bb33a1599d28e44d4f42bcb0a9bf 100644 --- a/src/kit/taosdemo/CMakeLists.txt +++ b/src/kit/taosdemo/CMakeLists.txt @@ -1,29 +1,55 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 3.5) +CMAKE_MINIMUM_REQUIRED(VERSION 2.8) PROJECT(TDengine) INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/client/inc) -INCLUDE_DIRECTORIES(inc) +INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/deps/libcurl/include) IF (TD_LINUX) AUX_SOURCE_DIRECTORY(. SRC) ADD_EXECUTABLE(taosdemo ${SRC}) + #find_program(HAVE_CURL NAMES curl) + IF ((NOT TD_ARM_64) AND (NOT TD_ARM_32)) + ADD_DEFINITIONS(-DTD_LOWA_CURL) + LINK_DIRECTORIES(${TD_COMMUNITY_DIR}/deps/libcurl/lib) + ADD_LIBRARY(curl STATIC IMPORTED) + SET_PROPERTY(TARGET curl PROPERTY IMPORTED_LOCATION ${TD_COMMUNITY_DIR}/deps/libcurl/lib/libcurl.a) + TARGET_LINK_LIBRARIES(taosdemo curl) + ENDIF () + IF (TD_SOMODE_STATIC) - TARGET_LINK_LIBRARIES(taosdemo taos_static) + TARGET_LINK_LIBRARIES(taosdemo taos_static cJson) ELSE () - TARGET_LINK_LIBRARIES(taosdemo taos) + TARGET_LINK_LIBRARIES(taosdemo taos cJson) ENDIF () ELSEIF (TD_WINDOWS) AUX_SOURCE_DIRECTORY(. SRC) ADD_EXECUTABLE(taosdemo ${SRC}) - TARGET_LINK_LIBRARIES(taosdemo taos_static) -ELSEIF (TD_DARWIN) - AUX_SOURCE_DIRECTORY(. SRC) - ADD_EXECUTABLE(taosdemo ${SRC}) - + SET_SOURCE_FILES_PROPERTIES(./taosdemo.c PROPERTIES COMPILE_FLAGS -w) + find_library(LIBCURL_A libcurl_a HINTS ${TD_COMMUNITY_DIR}/deps/libcurl/lib/win64) IF (TD_SOMODE_STATIC) - TARGET_LINK_LIBRARIES(taosdemo taos_static) + TARGET_LINK_LIBRARIES(taosdemo taos_static cJson ${LIBCURL_A}) ELSE () - TARGET_LINK_LIBRARIES(taosdemo taos) + TARGET_LINK_LIBRARIES(taosdemo taos cJson ${LIBCURL_A}) ENDIF () +ELSEIF (TD_DARWIN) + # missing a few dependencies, such as + # AUX_SOURCE_DIRECTORY(. SRC) + # ADD_EXECUTABLE(taosdemo ${SRC}) + # + # #find_program(HAVE_CURL NAMES curl) + # IF ((NOT TD_ARM_64) AND (NOT TD_ARM_32)) + # ADD_DEFINITIONS(-DTD_LOWA_CURL) + # LINK_DIRECTORIES(${TD_COMMUNITY_DIR}/deps/libcurl/lib) + # ADD_LIBRARY(curl STATIC IMPORTED) + # SET_PROPERTY(TARGET curl PROPERTY IMPORTED_LOCATION ${TD_COMMUNITY_DIR}/deps/libcurl/lib/libcurl.a) + # TARGET_LINK_LIBRARIES(taosdemo curl) + # ENDIF () + # + # IF (TD_SOMODE_STATIC) + # TARGET_LINK_LIBRARIES(taosdemo taos_static cJson) + # ELSE () + # TARGET_LINK_LIBRARIES(taosdemo taos cJson) + # ENDIF () ENDIF () + diff --git a/src/kit/taosdemox/insert.json b/src/kit/taosdemo/insert.json similarity index 96% rename from src/kit/taosdemox/insert.json rename to src/kit/taosdemo/insert.json index aa071c115d60d78797b4c36456adfacd0d345af7..56a64b7b8561877cb26b4ef2336ab8b98f26c02c 100644 --- a/src/kit/taosdemox/insert.json +++ b/src/kit/taosdemo/insert.json @@ -8,6 +8,7 @@ "thread_count": 4, "thread_count_create_tbl": 1, "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", "databases": [{ "dbinfo": { "name": "db", diff --git a/src/kit/taosdemox/query.json b/src/kit/taosdemo/query.json similarity index 92% rename from src/kit/taosdemox/query.json rename to src/kit/taosdemo/query.json index b7b08edfc912bdccc12bc6b6672d62a8ee4ad417..4a5403a55d520681569b17adbd96c69bf411a15b 100644 --- a/src/kit/taosdemox/query.json +++ b/src/kit/taosdemo/query.json @@ -5,6 +5,7 @@ "port": 6030, "user": "root", "password": "taosdata", + "confirm_parameter_prompt": "yes", "databases": "db01", "specified_table_query": {"query_interval":1, "concurrent":1, diff --git a/src/kit/taosdemox/subscribe.json b/src/kit/taosdemo/subscribe.json similarity index 100% rename from src/kit/taosdemox/subscribe.json rename to src/kit/taosdemo/subscribe.json diff --git a/src/kit/taosdemo/taosdemo.c b/src/kit/taosdemo/taosdemo.c index abcadd64b10e21fb6fe14767d374bfacbf744101..f5f2a02fb39fabd70a93a7d740b3a4b3d86bd411 100644 --- a/src/kit/taosdemo/taosdemo.c +++ b/src/kit/taosdemo/taosdemo.c @@ -13,7 +13,17 @@ * along with this program. If not, see . */ + +/* + when in some thread query return error, thread don't exit, but return, otherwise coredump in other thread. +*/ + #define _GNU_SOURCE +#define CURL_STATICLIB + +#ifdef TD_LOWA_CURL +#include "curl/curl.h" +#endif #ifdef LINUX #include "os.h" @@ -39,24 +49,178 @@ #include #include #include "os.h" + + #pragma comment ( lib, "libcurl_a.lib" ) + #pragma comment ( lib, "ws2_32.lib" ) + #pragma comment ( lib, "winmm.lib" ) + #pragma comment ( lib, "wldap32.lib" ) #endif +#include "cJSON.h" + #include "taos.h" #include "tutil.h" +#ifdef WINDOWS +#include +// Some old MinGW/CYGWIN distributions don't define this: +#ifndef ENABLE_VIRTUAL_TERMINAL_PROCESSING +#define ENABLE_VIRTUAL_TERMINAL_PROCESSING 0x0004 +#endif +static HANDLE g_stdoutHandle; +static DWORD g_consoleMode; + +void setupForAnsiEscape(void) { + DWORD mode = 0; + g_stdoutHandle = GetStdHandle(STD_OUTPUT_HANDLE); + + if(g_stdoutHandle == INVALID_HANDLE_VALUE) { + exit(GetLastError()); + } + + if(!GetConsoleMode(g_stdoutHandle, &mode)) { + exit(GetLastError()); + } + + g_consoleMode = mode; + + // Enable ANSI escape codes + mode |= ENABLE_VIRTUAL_TERMINAL_PROCESSING; + + if(!SetConsoleMode(g_stdoutHandle, mode)) { + exit(GetLastError()); + } +} + +void resetAfterAnsiEscape(void) { + // Reset colors + printf("\x1b[0m"); + + // Reset console mode + if(!SetConsoleMode(g_stdoutHandle, g_consoleMode)) { + exit(GetLastError()); + } +} +#else +void setupForAnsiEscape(void) {} + +void resetAfterAnsiEscape(void) { + // Reset colors + printf("\x1b[0m"); +} +#endif + extern char configDir[]; -#define BUFFER_SIZE 65536 -#define MAX_DB_NAME_SIZE 64 -#define MAX_TB_NAME_SIZE 64 -#define MAX_DATA_SIZE 16000 -#define MAX_NUM_DATATYPE 10 -#define OPT_ABORT 1 /* –abort */ -#define STRING_LEN 60000 -#define MAX_PREPARED_RAND 1000000 +#define INSERT_JSON_NAME "insert.json" +#define QUERY_JSON_NAME "query.json" +#define SUBSCRIBE_JSON_NAME "subscribe.json" + +#define INSERT_MODE 0 +#define QUERY_MODE 1 +#define SUBSCRIBE_MODE 2 + +#define MAX_SQL_SIZE 65536 +#define BUFFER_SIZE (65536*2) +#define MAX_DB_NAME_SIZE 64 +#define MAX_TB_NAME_SIZE 64 +#define MAX_DATA_SIZE 16000 +#define MAX_NUM_DATATYPE 10 +#define OPT_ABORT 1 /* –abort */ +#define STRING_LEN 60000 +#define MAX_PREPARED_RAND 1000000 +//#define MAX_SQL_SIZE 65536 +#define MAX_FILE_NAME_LEN 256 + +#define MAX_SAMPLES_ONCE_FROM_FILE 10000 +#define MAX_NUM_DATATYPE 10 + +#define MAX_DB_COUNT 8 +#define MAX_SUPER_TABLE_COUNT 8 +#define MAX_COLUMN_COUNT 1024 +#define MAX_TAG_COUNT 128 + +#define MAX_QUERY_SQL_COUNT 10 +#define MAX_QUERY_SQL_LENGTH 256 + +#define MAX_DATABASE_COUNT 256 + +typedef enum CREATE_SUB_TALBE_MOD_EN { + PRE_CREATE_SUBTBL, + AUTO_CREATE_SUBTBL, + NO_CREATE_SUBTBL +} CREATE_SUB_TALBE_MOD_EN; + +typedef enum TALBE_EXISTS_EN { + TBL_ALREADY_EXISTS, + TBL_NO_EXISTS, + TBL_EXISTS_BUTT +} TALBE_EXISTS_EN; + +enum MODE { + SYNC, + ASYNC, + MODE_BUT +}; + +enum QUERY_TYPE { + NO_INSERT_TYPE, + INSERT_TYPE, + QUERY_TYPE_BUT +} ; + +enum _show_db_index { + TSDB_SHOW_DB_NAME_INDEX, + TSDB_SHOW_DB_CREATED_TIME_INDEX, + TSDB_SHOW_DB_NTABLES_INDEX, + TSDB_SHOW_DB_VGROUPS_INDEX, + TSDB_SHOW_DB_REPLICA_INDEX, + TSDB_SHOW_DB_QUORUM_INDEX, + TSDB_SHOW_DB_DAYS_INDEX, + TSDB_SHOW_DB_KEEP_INDEX, + TSDB_SHOW_DB_CACHE_INDEX, + TSDB_SHOW_DB_BLOCKS_INDEX, + TSDB_SHOW_DB_MINROWS_INDEX, + TSDB_SHOW_DB_MAXROWS_INDEX, + TSDB_SHOW_DB_WALLEVEL_INDEX, + TSDB_SHOW_DB_FSYNC_INDEX, + TSDB_SHOW_DB_COMP_INDEX, + TSDB_SHOW_DB_CACHELAST_INDEX, + TSDB_SHOW_DB_PRECISION_INDEX, + TSDB_SHOW_DB_UPDATE_INDEX, + TSDB_SHOW_DB_STATUS_INDEX, + TSDB_MAX_SHOW_DB +}; + +// -----------------------------------------SHOW TABLES CONFIGURE ------------------------------------- +enum _show_stables_index { + TSDB_SHOW_STABLES_NAME_INDEX, + TSDB_SHOW_STABLES_CREATED_TIME_INDEX, + TSDB_SHOW_STABLES_COLUMNS_INDEX, + TSDB_SHOW_STABLES_METRIC_INDEX, + TSDB_SHOW_STABLES_UID_INDEX, + TSDB_SHOW_STABLES_TID_INDEX, + TSDB_SHOW_STABLES_VGID_INDEX, + TSDB_MAX_SHOW_STABLES +}; +enum _describe_table_index { + TSDB_DESCRIBE_METRIC_FIELD_INDEX, + TSDB_DESCRIBE_METRIC_TYPE_INDEX, + TSDB_DESCRIBE_METRIC_LENGTH_INDEX, + TSDB_DESCRIBE_METRIC_NOTE_INDEX, + TSDB_MAX_DESCRIBE_METRIC +}; + +typedef struct { + char field[TSDB_COL_NAME_LEN + 1]; + char type[16]; + int length; + char note[128]; +} SColDes; /* Used by main to communicate with parse_opt. */ -typedef struct DemoArguments { +typedef struct SArguments_S { + char * metaFile; char * host; uint16_t port; char * user; @@ -78,370 +242,428 @@ typedef struct DemoArguments { int num_of_tables; int num_of_DPT; int abort; - int order; - int rate; + int disorderRatio; + int disorderRange; int method_of_delete; char ** arg_list; -} SDemoArguments; +} SArguments; + +typedef struct SColumn_S { + char field[TSDB_COL_NAME_LEN + 1]; + char dataType[MAX_TB_NAME_SIZE]; + int dataLen; + char note[128]; +} StrColumn; + +typedef struct SSuperTable_S { + char sTblName[MAX_TB_NAME_SIZE]; + int childTblCount; + bool superTblExists; // 0: no, 1: yes + bool childTblExists; // 0: no, 1: yes + int batchCreateTableNum; // 0: no batch, > 0: batch table number in one sql + int8_t autoCreateTable; // 0: create sub table, 1: auto create sub table + char childTblPrefix[MAX_TB_NAME_SIZE]; + char dataSource[MAX_TB_NAME_SIZE]; // rand_gen or sample + char insertMode[MAX_TB_NAME_SIZE]; // taosc, restful + int insertRate; // 0: unlimit > 0 rows/s + + int multiThreadWriteOneTbl; // 0: no, 1: yes + int numberOfTblInOneSql; // 0/1: one table, > 1: number of tbl + int rowsPerTbl; // + int disorderRatio; // 0: no disorder, >0: x% + int disorderRange; // ms or us by database precision + int maxSqlLen; // + + int64_t insertRows; // 0: no limit + int timeStampStep; + char startTimestamp[MAX_TB_NAME_SIZE]; // + char sampleFormat[MAX_TB_NAME_SIZE]; // csv, json + char sampleFile[MAX_FILE_NAME_LEN]; + char tagsFile[MAX_FILE_NAME_LEN]; + + int columnCount; + StrColumn columns[MAX_COLUMN_COUNT]; + int tagCount; + StrColumn tags[MAX_TAG_COUNT]; + + char* childTblName; + char* colsOfCreatChildTable; + int lenOfOneRow; + int lenOfTagOfOneRow; + + char* sampleDataBuf; + int sampleDataBufSize; + //int sampleRowCount; + //int sampleUsePos; + + int tagSource; // 0: rand, 1: tag sample + char* tagDataBuf; + int tagSampleCount; + int tagUsePos; + + // statistics + int64_t totalRowsInserted; + int64_t totalAffectedRows; +} SSuperTable; + +typedef struct { + char name[TSDB_DB_NAME_LEN + 1]; + char create_time[32]; + int32_t ntables; + int32_t vgroups; + int16_t replica; + int16_t quorum; + int16_t days; + char keeplist[32]; + int32_t cache; //MB + int32_t blocks; + int32_t minrows; + int32_t maxrows; + int8_t wallevel; + int32_t fsync; + int8_t comp; + int8_t cachelast; + char precision[8]; // time resolution + int8_t update; + char status[16]; +} SDbInfo; + +typedef struct SDbCfg_S { +// int maxtablesPerVnode; + int minRows; + int maxRows; + int comp; + int walLevel; + int fsync; + int replica; + int update; + int keep; + int days; + int cache; + int blocks; + int quorum; + char precision[MAX_TB_NAME_SIZE]; +} SDbCfg; + +typedef struct SDataBase_S { + char dbName[MAX_DB_NAME_SIZE]; + int drop; // 0: use exists, 1: if exists, drop then new create + SDbCfg dbCfg; + int superTblCount; + SSuperTable superTbls[MAX_SUPER_TABLE_COUNT]; +} SDataBase; + +typedef struct SDbs_S { + char cfgDir[MAX_FILE_NAME_LEN]; + char host[MAX_DB_NAME_SIZE]; + uint16_t port; + char user[MAX_DB_NAME_SIZE]; + char password[MAX_DB_NAME_SIZE]; + char resultFile[MAX_FILE_NAME_LEN]; + bool use_metric; + bool insert_only; + bool do_aggreFunc; + bool queryMode; + + int threadCount; + int threadCountByCreateTbl; + int dbCount; + SDataBase db[MAX_DB_COUNT]; + + // statistics + int64_t totalRowsInserted; + int64_t totalAffectedRows; + +} SDbs; + +typedef struct SuperQueryInfo_S { + int rate; // 0: unlimit > 0 loop/s + int concurrent; + int sqlCount; + int subscribeMode; // 0: sync, 1: async + int subscribeInterval; // ms + int subscribeRestart; + int subscribeKeepProgress; + char sql[MAX_QUERY_SQL_COUNT][MAX_QUERY_SQL_LENGTH]; + char result[MAX_QUERY_SQL_COUNT][MAX_FILE_NAME_LEN]; + TAOS_SUB* tsub[MAX_QUERY_SQL_COUNT]; +} SuperQueryInfo; + +typedef struct SubQueryInfo_S { + char sTblName[MAX_TB_NAME_SIZE]; + int rate; // 0: unlimit > 0 loop/s + int threadCnt; + int subscribeMode; // 0: sync, 1: async + int subscribeInterval; // ms + int subscribeRestart; + int subscribeKeepProgress; + int childTblCount; + char childTblPrefix[MAX_TB_NAME_SIZE]; + int sqlCount; + char sql[MAX_QUERY_SQL_COUNT][MAX_QUERY_SQL_LENGTH]; + char result[MAX_QUERY_SQL_COUNT][MAX_FILE_NAME_LEN]; + TAOS_SUB* tsub[MAX_QUERY_SQL_COUNT]; + + char* childTblName; +} SubQueryInfo; + +typedef struct SQueryMetaInfo_S { + char cfgDir[MAX_FILE_NAME_LEN]; + char host[MAX_DB_NAME_SIZE]; + uint16_t port; + char user[MAX_DB_NAME_SIZE]; + char password[MAX_DB_NAME_SIZE]; + char dbName[MAX_DB_NAME_SIZE]; + char queryMode[MAX_TB_NAME_SIZE]; // taosc, restful + + SuperQueryInfo superQueryInfo; + SubQueryInfo subQueryInfo; +} SQueryMetaInfo; + +typedef struct SThreadInfo_S { + TAOS *taos; + #ifdef TD_LOWA_CURL + CURL *curl_handle; + #endif + int threadID; + char db_name[MAX_DB_NAME_SIZE]; + char fp[4096]; + char tb_prefix[MAX_TB_NAME_SIZE]; + int start_table_id; + int end_table_id; + int data_of_rate; + int64_t start_time; + char* cols; + bool use_metric; + SSuperTable* superTblInfo; + + // for async insert + tsem_t lock_sem; + int64_t counter; + int64_t st; + int64_t et; + int64_t lastTs; + int nrecords_per_request; + + // statistics + int64_t totalRowsInserted; + int64_t totalAffectedRows; + + // insert delay statistics + int64_t cntDelay; + int64_t totalDelay; + int64_t avgDelay; + int64_t maxDelay; + int64_t minDelay; + +} threadInfo; + +typedef struct curlMemInfo_S { + char *buf; + size_t sizeleft; + } curlMemInfo; + + #ifdef LINUX /* The options we understand. */ static struct argp_option options[] = { - {0, 'h', "host", 0, "The host to connect to TDengine. Default is localhost.", 0}, - {0, 'p', "port", 0, "The TCP/IP port number to use for the connection. Default is 0.", 1}, - {0, 'u', "user", 0, "The TDengine user name to use when connecting to the server. Default is 'root'.", 2}, - #ifdef _TD_POWER_ - {0, 'P', "password", 0, "The password to use when connecting to the server. Default is 'powerdb'.", 3}, - #else - {0, 'P', "password", 0, "The password to use when connecting to the server. Default is 'taosdata'.", 3}, - #endif - {0, 'd', "database", 0, "Destination database. Default is 'test'.", 3}, - {0, 'a', "replica", 0, "Set the replica parameters of the database, Default 1, min: 1, max: 3.", 3}, - {0, 'm', "table_prefix", 0, "Table prefix name. Default is 't'.", 3}, - {0, 's', "sql file", 0, "The select sql file.", 3}, - {0, 'M', 0, 0, "Use metric flag.", 13}, - {0, 'o', "outputfile", 0, "Direct output to the named file. Default is './output.txt'.", 14}, - {0, 'q', "query_mode", 0, "Query mode--0: SYNC, 1: ASYNC. Default is SYNC.", 6}, - {0, 'b', "type_of_cols", 0, "The data_type of columns: 'INT', 'TINYINT', 'SMALLINT', 'BIGINT', 'FLOAT', 'DOUBLE', 'BINARY'. Default is 'INT'.", 7}, - {0, 'w', "length_of_binary", 0, "The length of data_type 'BINARY'. Only applicable when type of cols is 'BINARY'. Default is 8", 8}, - {0, 'l', "num_of_cols_per_record", 0, "The number of columns per record. Default is 3.", 8}, - {0, 'T', "num_of_threads", 0, "The number of threads. Default is 10.", 9}, - {0, 'r', "num_of_records_per_req", 0, "The number of records per request. Default is 1000.", 10}, - {0, 't', "num_of_tables", 0, "The number of tables. Default is 10000.", 11}, - {0, 'n', "num_of_records_per_table", 0, "The number of records per table. Default is 100000.", 12}, + {0, 'f', "meta file", 0, "The meta data to the execution procedure, if use -f, all others options invalid. Default is NULL.", 0}, #ifdef _TD_POWER_ - {0, 'c', "config_directory", 0, "Configuration directory. Default is '/etc/power/'.", 14}, + {0, 'c', "config_directory", 0, "Configuration directory. Default is '/etc/power/'.", 1}, + {0, 'P', "password", 0, "The password to use when connecting to the server. Default is 'powerdb'.", 2}, #else - {0, 'c', "config_directory", 0, "Configuration directory. Default is '/etc/taos/'.", 14}, - #endif - {0, 'x', 0, 0, "Insert only flag.", 13}, - {0, 'y', 0, 0, "Default input yes for prompt", 13}, - {0, 'O', "order", 0, "Insert mode--0: In order, 1: Out of order. Default is in order.", 14}, - {0, 'R', "rate", 0, "Out of order data's rate--if order=1 Default 10, min: 0, max: 50.", 14}, - {0, 'D', "delete table", 0, "Delete data methods——0: don't delete, 1: delete by table, 2: delete by stable, 3: delete by database", 14}, + {0, 'c', "config_directory", 0, "Configuration directory. Default is '/etc/taos/'.", 1}, + {0, 'P', "password", 0, "The password to use when connecting to the server. Default is 'taosdata'.", 2}, + #endif + {0, 'h', "host", 0, "The host to connect to TDengine. Default is localhost.", 2}, + {0, 'p', "port", 0, "The TCP/IP port number to use for the connection. Default is 0.", 2}, + {0, 'u', "user", 0, "The TDengine user name to use when connecting to the server. Default is 'root'.", 2}, + {0, 'd', "database", 0, "Destination database. Default is 'test'.", 3}, + {0, 'a', "replica", 0, "Set the replica parameters of the database, Default 1, min: 1, max: 3.", 4}, + {0, 'm', "table_prefix", 0, "Table prefix name. Default is 't'.", 4}, + {0, 's', "sql file", 0, "The select sql file.", 6}, + {0, 'M', 0, 0, "Use metric flag.", 4}, + {0, 'o', "outputfile", 0, "Direct output to the named file. Default is './output.txt'.", 6}, + {0, 'q', "query_mode", 0, "Query mode--0: SYNC, 1: ASYNC. Default is SYNC.", 4}, + {0, 'b', "type_of_cols", 0, "The data_type of columns, default: TINYINT,SMALLINT,INT,BIGINT,FLOAT,DOUBLE,BINARY,NCHAR,BOOL,TIMESTAMP.", 4}, + {0, 'w', "length_of_chartype", 0, "The length of data_type 'BINARY' or 'NCHAR'. Default is 16", 4}, + {0, 'l', "num_of_cols_per_record", 0, "The number of columns per record. Default is 10.", 4}, + {0, 'T', "num_of_threads", 0, "The number of threads. Default is 10.", 4}, + // {0, 'r', "num_of_records_per_req", 0, "The number of records per request. Default is 100.", 4}, + {0, 't', "num_of_tables", 0, "The number of tables. Default is 10000.", 4}, + {0, 'n', "num_of_records_per_table", 0, "The number of records per table. Default is 10000.", 4}, + {0, 'x', 0, 0, "Not insert only flag.", 4}, + {0, 'y', 0, 0, "Default input yes for prompt.", 4}, + {0, 'O', "disorderRatio", 0, "Insert mode--0: In order, > 0: disorder ratio. Default is in order.", 4}, + {0, 'R', "disorderRang", 0, "Out of order data's range, ms, default is 1000.", 4}, + //{0, 'D', "delete database", 0, "if elete database if exists. 0: no, 1: yes, default is 1", 5}, {0}}; - /* Parse a single option. */ - static error_t parse_opt(int key, char *arg, struct argp_state *state) { - /* Get the input argument from argp_parse, which we - know is a pointer to our arguments structure. */ - SDemoArguments *arguments = state->input; - wordexp_t full_path; - char **sptr; - switch (key) { - case 'h': - arguments->host = arg; - break; - case 'p': - arguments->port = atoi(arg); - break; - case 'u': - arguments->user = arg; - break; - case 'P': - arguments->password = arg; - break; - case 'o': - arguments->output_file = arg; - break; - case 's': - arguments->sqlFile = arg; - break; - case 'q': - arguments->mode = atoi(arg); - break; - case 'T': - arguments->num_of_threads = atoi(arg); - break; - case 'r': - arguments->num_of_RPR = atoi(arg); - break; - case 't': - arguments->num_of_tables = atoi(arg); - break; - case 'n': - arguments->num_of_DPT = atoi(arg); - break; - case 'd': - arguments->database = arg; - break; - case 'l': - arguments->num_of_CPR = atoi(arg); - break; - case 'b': - sptr = arguments->datatype; - if (strstr(arg, ",") == NULL) { - if (strcasecmp(arg, "INT") != 0 && strcasecmp(arg, "FLOAT") != 0 && - strcasecmp(arg, "TINYINT") != 0 && strcasecmp(arg, "BOOL") != 0 && - strcasecmp(arg, "SMALLINT") != 0 && - strcasecmp(arg, "BIGINT") != 0 && strcasecmp(arg, "DOUBLE") != 0 && - strcasecmp(arg, "BINARY") && strcasecmp(arg, "NCHAR")) { +/* Parse a single option. */ +static error_t parse_opt(int key, char *arg, struct argp_state *state) { + // Get the input argument from argp_parse, which we know is a pointer to our arguments structure. + SArguments *arguments = state->input; + wordexp_t full_path; + char **sptr; + switch (key) { + case 'f': + arguments->metaFile = arg; + break; + case 'h': + arguments->host = arg; + break; + case 'p': + arguments->port = atoi(arg); + break; + case 'u': + arguments->user = arg; + break; + case 'P': + arguments->password = arg; + break; + case 'o': + arguments->output_file = arg; + break; + case 's': + arguments->sqlFile = arg; + break; + case 'q': + arguments->mode = atoi(arg); + break; + case 'T': + arguments->num_of_threads = atoi(arg); + break; + //case 'r': + // arguments->num_of_RPR = atoi(arg); + // break; + case 't': + arguments->num_of_tables = atoi(arg); + break; + case 'n': + arguments->num_of_DPT = atoi(arg); + break; + case 'd': + arguments->database = arg; + break; + case 'l': + arguments->num_of_CPR = atoi(arg); + break; + case 'b': + sptr = arguments->datatype; + if (strstr(arg, ",") == NULL) { + if (strcasecmp(arg, "INT") != 0 && strcasecmp(arg, "FLOAT") != 0 && + strcasecmp(arg, "TINYINT") != 0 && strcasecmp(arg, "BOOL") != 0 && + strcasecmp(arg, "SMALLINT") != 0 && strcasecmp(arg, "TIMESTAMP") != 0 && + strcasecmp(arg, "BIGINT") != 0 && strcasecmp(arg, "DOUBLE") != 0 && + strcasecmp(arg, "BINARY") != 0 && strcasecmp(arg, "NCHAR") != 0) { + argp_error(state, "Invalid data_type!"); + } + sptr[0] = arg; + } else { + int index = 0; + char *dupstr = strdup(arg); + char *running = dupstr; + char *token = strsep(&running, ","); + while (token != NULL) { + if (strcasecmp(token, "INT") != 0 && strcasecmp(token, "FLOAT") != 0 && + strcasecmp(token, "TINYINT") != 0 && strcasecmp(token, "BOOL") != 0 && + strcasecmp(token, "SMALLINT") != 0 && strcasecmp(token, "TIMESTAMP") != 0 && + strcasecmp(token, "BIGINT") != 0 && strcasecmp(token, "DOUBLE") != 0 && + strcasecmp(token, "BINARY") != 0 && strcasecmp(token, "NCHAR") != 0) { argp_error(state, "Invalid data_type!"); } - sptr[0] = arg; - } else { - int index = 0; - char *dupstr = strdup(arg); - char *running = dupstr; - char *token = strsep(&running, ","); - while (token != NULL) { - if (strcasecmp(token, "INT") != 0 && - strcasecmp(token, "FLOAT") != 0 && - strcasecmp(token, "TINYINT") != 0 && - strcasecmp(token, "BOOL") != 0 && - strcasecmp(token, "SMALLINT") != 0 && - strcasecmp(token, "BIGINT") != 0 && - strcasecmp(token, "DOUBLE") != 0 && strcasecmp(token, "BINARY") && strcasecmp(token, "NCHAR")) { - argp_error(state, "Invalid data_type!"); - } - sptr[index++] = token; - token = strsep(&running, ","); - if (index >= MAX_NUM_DATATYPE) break; - } - } - break; - case 'w': - arguments->len_of_binary = atoi(arg); - break; - case 'm': - arguments->tb_prefix = arg; - break; - case 'M': - arguments->use_metric = true; - break; - case 'x': - arguments->insert_only = true; - break; - case 'y': - arguments->answer_yes = true; - break; - case 'c': - if (wordexp(arg, &full_path, 0) != 0) { - fprintf(stderr, "Invalid path %s\n", arg); - return -1; + sptr[index++] = token; + token = strsep(&running, ","); + if (index >= MAX_NUM_DATATYPE) break; } - taos_options(TSDB_OPTION_CONFIGDIR, full_path.we_wordv[0]); - wordfree(&full_path); - break; - case 'O': - arguments->order = atoi(arg); - if (arguments->order > 1 || arguments->order < 0) - { - arguments->order = 0; - } else if (arguments->order == 1) - { - arguments->rate = 10; - } - break; - case 'R': - arguments->rate = atoi(arg); - if (arguments->order == 1 && (arguments->rate > 50 || arguments->rate <= 0)) - { - arguments->rate = 10; - } - break; - case 'a': - arguments->replica = atoi(arg); - if (arguments->replica > 3 || arguments->replica < 1) - { - arguments->replica = 1; - } - break; - case 'D': - arguments->method_of_delete = atoi(arg); - if (arguments->method_of_delete < 0 || arguments->method_of_delete > 3) - { - arguments->method_of_delete = 0; - } - break; - case OPT_ABORT: - arguments->abort = 1; - break; - case ARGP_KEY_ARG: - /*arguments->arg_list = &state->argv[state->next-1]; - state->next = state->argc;*/ - argp_usage(state); - break; + } + break; + case 'w': + arguments->len_of_binary = atoi(arg); + break; + case 'm': + arguments->tb_prefix = arg; + break; + case 'M': + arguments->use_metric = true; + break; + case 'x': + arguments->insert_only = true; + case 'y': + arguments->answer_yes = true; + break; + case 'c': + if (wordexp(arg, &full_path, 0) != 0) { + fprintf(stderr, "Invalid path %s\n", arg); + return -1; + } + taos_options(TSDB_OPTION_CONFIGDIR, full_path.we_wordv[0]); + wordfree(&full_path); + break; + case 'O': + arguments->disorderRatio = atoi(arg); + if (arguments->disorderRatio < 0 || arguments->disorderRatio > 100) + { + argp_error(state, "Invalid disorder ratio, should 1 ~ 100!"); + } + break; + case 'R': + arguments->disorderRange = atoi(arg); + break; + case 'a': + arguments->replica = atoi(arg); + if (arguments->replica > 3 || arguments->replica < 1) + { + arguments->replica = 1; + } + break; + //case 'D': + // arguments->method_of_delete = atoi(arg); + // break; + case OPT_ABORT: + arguments->abort = 1; + break; + case ARGP_KEY_ARG: + /*arguments->arg_list = &state->argv[state->next-1]; + state->next = state->argc;*/ + argp_usage(state); + break; - default: - return ARGP_ERR_UNKNOWN; - } - return 0; + default: + return ARGP_ERR_UNKNOWN; } + return 0; +} - static struct argp argp = {options, parse_opt, 0, 0}; +static struct argp argp = {options, parse_opt, 0, 0}; - void parse_args(int argc, char *argv[], SDemoArguments *arguments) { - argp_parse(&argp, argc, argv, 0, 0, arguments); - if (arguments->abort) { - #ifndef _ALPINE - error(10, 0, "ABORTED"); - #else - abort(); - #endif - } +void parse_args(int argc, char *argv[], SArguments *arguments) { + argp_parse(&argp, argc, argv, 0, 0, arguments); + if (arguments->abort) { + #ifndef _ALPINE + error(10, 0, "ABORTED"); + #else + abort(); + #endif } +} #else void printHelp() { char indent[10] = " "; - printf("%s%s\n", indent, "-h"); - printf("%s%s%s\n", indent, indent, "host, The host to connect to TDengine. Default is localhost."); - printf("%s%s\n", indent, "-p"); - printf("%s%s%s\n", indent, indent, "port, The TCP/IP port number to use for the connection. Default is 0."); - printf("%s%s\n", indent, "-u"); - printf("%s%s%s\n", indent, indent, "user, The user name to use when connecting to the server. Default is 'root'."); - printf("%s%s\n", indent, "-p"); - #ifdef _TD_POWER_ - printf("%s%s%s\n", indent, indent, "password, The password to use when connecting to the server. Default is 'powerdb'."); - #else - printf("%s%s%s\n", indent, indent, "password, The password to use when connecting to the server. Default is 'taosdata'."); - #endif - printf("%s%s\n", indent, "-d"); - printf("%s%s%s\n", indent, indent, "database, Destination database. Default is 'test'."); - printf("%s%s\n", indent, "-a"); - printf("%s%s%s\n", indent, indent, "replica, Set the replica parameters of the database, Default 1, min: 1, max: 3."); - printf("%s%s\n", indent, "-m"); - printf("%s%s%s\n", indent, indent, "table_prefix, Table prefix name. Default is 't'."); - printf("%s%s\n", indent, "-s"); - printf("%s%s%s\n", indent, indent, "sql file, The select sql file."); - printf("%s%s\n", indent, "-M"); - printf("%s%s%s\n", indent, indent, "meteric, Use metric flag."); - printf("%s%s\n", indent, "-o"); - printf("%s%s%s\n", indent, indent, "outputfile, Direct output to the named file. Default is './output.txt'."); - printf("%s%s\n", indent, "-q"); - printf("%s%s%s\n", indent, indent, "query_mode, Query mode--0: SYNC, 1: ASYNC. Default is SYNC."); - printf("%s%s\n", indent, "-b"); - printf("%s%s%s\n", indent, indent, "type_of_cols, data_type of columns: 'INT', 'TINYINT', 'SMALLINT', 'BIGINT', 'FLOAT', 'DOUBLE', 'BINARY'. Default is 'INT'."); - printf("%s%s\n", indent, "-w"); - printf("%s%s%s\n", indent, indent, "length_of_binary, The length of data_type 'BINARY'. Only applicable when type of cols is 'BINARY'. Default is 8"); - printf("%s%s\n", indent, "-l"); - printf("%s%s%s\n", indent, indent, "num_of_cols_per_record, The number of columns per record. Default is 3."); - printf("%s%s\n", indent, "-T"); - printf("%s%s%s\n", indent, indent, "num_of_threads, The number of threads. Default is 10."); - printf("%s%s\n", indent, "-r"); - printf("%s%s%s\n", indent, indent, "num_of_records_per_req, The number of records per request. Default is 1000."); - printf("%s%s\n", indent, "-t"); - printf("%s%s%s\n", indent, indent, "num_of_tables, The number of tables. Default is 10000."); - printf("%s%s\n", indent, "-n"); - printf("%s%s%s\n", indent, indent, "num_of_records_per_table, The number of records per table. Default is 100000."); + printf("%s%s\n", indent, "-f"); + printf("%s%s%s\n", indent, indent, "The meta file to the execution procedure. Default is './meta.json'."); printf("%s%s\n", indent, "-c"); - #ifdef _TD_POWER_ - printf("%s%s%s\n", indent, indent, "config_directory, Configuration directory. Default is '/etc/power/'."); - #else printf("%s%s%s\n", indent, indent, "config_directory, Configuration directory. Default is '/etc/taos/'."); - #endif - printf("%s%s\n", indent, "-x"); - printf("%s%s%s\n", indent, indent, "flag, Insert only flag."); - printf("%s%s\n", indent, "-y"); - printf("%s%s%s\n", indent, indent, "flag, Anser Yes for prompt."); - printf("%s%s\n", indent, "-O"); - printf("%s%s%s\n", indent, indent, "order, Insert mode--0: In order, 1: Out of order. Default is in order."); - printf("%s%s\n", indent, "-R"); - printf("%s%s%s\n", indent, indent, "rate, Out of order data's rate--if order=1 Default 10, min: 0, max: 50."); - printf("%s%s\n", indent, "-D"); - printf("%s%s%s\n", indent, indent, "Delete data methods 0: don't delete, 1: delete by table, 2: delete by stable, 3: delete by database."); - } - - void parse_args(int argc, char *argv[], SDemoArguments *arguments) { - char **sptr; + } + + void parse_args(int argc, char *argv[], SArguments *arguments) { for (int i = 1; i < argc; i++) { - if (strcmp(argv[i], "-h") == 0) { - arguments->host = argv[++i]; - } else if (strcmp(argv[i], "-p") == 0) { - arguments->port = atoi(argv[++i]); - } else if (strcmp(argv[i], "-u") == 0) { - arguments->user = argv[++i]; - } else if (strcmp(argv[i], "-P") == 0) { - arguments->password = argv[++i]; - } else if (strcmp(argv[i], "-o") == 0) { - arguments->output_file = argv[++i]; - } else if (strcmp(argv[i], "-s") == 0) { - arguments->sqlFile = argv[++i]; - } else if (strcmp(argv[i], "-q") == 0) { - arguments->mode = atoi(argv[++i]); - } else if (strcmp(argv[i], "-T") == 0) { - arguments->num_of_threads = atoi(argv[++i]); - } else if (strcmp(argv[i], "-r") == 0) { - arguments->num_of_RPR = atoi(argv[++i]); - } else if (strcmp(argv[i], "-t") == 0) { - arguments->num_of_tables = atoi(argv[++i]); - } else if (strcmp(argv[i], "-n") == 0) { - arguments->num_of_DPT = atoi(argv[++i]); - } else if (strcmp(argv[i], "-d") == 0) { - arguments->database = argv[++i]; - } else if (strcmp(argv[i], "-l") == 0) { - arguments->num_of_CPR = atoi(argv[++i]); - } else if (strcmp(argv[i], "-b") == 0) { - sptr = arguments->datatype; - ++i; - if (strstr(argv[i], ",") == NULL) { - if (strcasecmp(argv[i], "INT") != 0 && strcasecmp(argv[i], "FLOAT") != 0 && - strcasecmp(argv[i], "TINYINT") != 0 && strcasecmp(argv[i], "BOOL") != 0 && - strcasecmp(argv[i], "SMALLINT") != 0 && - strcasecmp(argv[i], "BIGINT") != 0 && strcasecmp(argv[i], "DOUBLE") != 0 && - strcasecmp(argv[i], "BINARY") && strcasecmp(argv[i], "NCHAR")) { - fprintf(stderr, "Invalid data_type!\n"); - printHelp(); - exit(EXIT_FAILURE); - } - sptr[0] = argv[i]; - } else { - int index = 0; - char *dupstr = strdup(argv[i]); - char *running = dupstr; - char *token = strsep(&running, ","); - while (token != NULL) { - if (strcasecmp(token, "INT") != 0 && - strcasecmp(token, "FLOAT") != 0 && - strcasecmp(token, "TINYINT") != 0 && - strcasecmp(token, "BOOL") != 0 && - strcasecmp(token, "SMALLINT") != 0 && - strcasecmp(token, "BIGINT") != 0 && - strcasecmp(token, "DOUBLE") != 0 && strcasecmp(token, "BINARY") && strcasecmp(token, "NCHAR")) { - fprintf(stderr, "Invalid data_type!\n"); - printHelp(); - exit(EXIT_FAILURE); - } - sptr[index++] = token; - token = strsep(&running, ","); - if (index >= MAX_NUM_DATATYPE) break; - } - } - } else if (strcmp(argv[i], "-w") == 0) { - arguments->len_of_binary = atoi(argv[++i]); - } else if (strcmp(argv[i], "-m") == 0) { - arguments->tb_prefix = argv[++i]; - } else if (strcmp(argv[i], "-M") == 0) { - arguments->use_metric = true; - } else if (strcmp(argv[i], "-x") == 0) { - arguments->insert_only = true; - } else if (strcmp(argv[i], "-y") == 0) { - arguments->answer_yes = true; + if (strcmp(argv[i], "-f") == 0) { + arguments->metaFile = argv[++i]; } else if (strcmp(argv[i], "-c") == 0) { strcpy(configDir, argv[++i]); - } else if (strcmp(argv[i], "-O") == 0) { - arguments->order = atoi(argv[++i]); - if (arguments->order > 1 || arguments->order < 0) { - arguments->order = 0; - } else if (arguments->order == 1) { - arguments->rate = 10; - } - } else if (strcmp(argv[i], "-R") == 0) { - arguments->rate = atoi(argv[++i]); - if (arguments->order == 1 && (arguments->rate > 50 || arguments->rate <= 0)) { - arguments->rate = 10; - } - } else if (strcmp(argv[i], "-a") == 0) { - arguments->replica = atoi(argv[++i]); - if (arguments->rate > 3 || arguments->rate < 1) { - arguments->rate = 1; - } - } else if (strcmp(argv[i], "-D") == 0) { - arguments->method_of_delete = atoi(argv[++i]); - if (arguments->method_of_delete < 0 || arguments->method_of_delete > 3) { - arguments->method_of_delete = 0; - } } else if (strcmp(argv[i], "--help") == 0) { printHelp(); exit(EXIT_FAILURE); @@ -452,627 +674,3480 @@ typedef struct DemoArguments { } } } - #endif -/* ******************************* Structure - * definition******************************* */ -enum MODE { - SYNC, ASYNC -}; -typedef struct { - TAOS *taos; - int threadID; - char db_name[MAX_DB_NAME_SIZE]; - char fp[4096]; - char **datatype; - int len_of_binary; - char tb_prefix[MAX_TB_NAME_SIZE]; - int start_table_id; - int end_table_id; - int ncols_per_record; - int nrecords_per_table; - int nrecords_per_request; - int data_of_order; - int data_of_rate; - int64_t start_time; - bool do_aggreFunc; - - char* cols; - bool use_metric; - - tsem_t mutex_sem; - int notFinished; - tsem_t lock_sem; - int counter; - - // insert delay statitics - int64_t cntDelay; - int64_t totalDelay; - int64_t avgDelay; - int64_t maxDelay; - int64_t minDelay; - -} info; - -typedef struct { - TAOS *taos; - - char tb_name[MAX_TB_NAME_SIZE]; - int64_t timestamp; - int target; - int counter; - int nrecords_per_request; - int ncols_per_record; - char **data_type; - int len_of_binary; - int data_of_order; - int data_of_rate; - - tsem_t *mutex_sem; - int *notFinished; - tsem_t *lock_sem; -} sTable; - -/* ******************************* Global - * variables******************************* */ -char *aggreFunc[] = {"*", "count(*)", "avg(f1)", "sum(f1)", "max(f1)", "min(f1)", "first(f1)", "last(f1)"}; +static bool getInfoFromJsonFile(char* file); +//static int generateOneRowDataForStb(SSuperTable* stbInfo); +//static int getDataIntoMemForStb(SSuperTable* stbInfo); +static void init_rand_data(); +static int createDatabases(); +static void createChildTables(); +static int queryDbExec(TAOS *taos, char *command, int type); +/* ************ Global variables ************ */ -void queryDB(TAOS *taos, char *command); +int32_t randint[MAX_PREPARED_RAND]; +int64_t randbigint[MAX_PREPARED_RAND]; +float randfloat[MAX_PREPARED_RAND]; +double randdouble[MAX_PREPARED_RAND]; +char *aggreFunc[] = {"*", "count(*)", "avg(col0)", "sum(col0)", "max(col0)", "min(col0)", "first(col0)", "last(col0)"}; + +SArguments g_args = {NULL, + "127.0.0.1", // host + 6030, // port + "root", // user + #ifdef _TD_POWER_ + "powerdb", // password + #else + "taosdata", // password + #endif + "test", // database + 1, // replica + "t", // tb_prefix + NULL, // sqlFile + false, // use_metric + false, // insert_only + false, // answer_yes; + "./output.txt", // output_file + 0, // mode : sync or async + { + "TINYINT", // datatype + "SMALLINT", + "INT", + "BIGINT", + "FLOAT", + "DOUBLE", + "BINARY", + "NCHAR", + "BOOL", + "TIMESTAMP" + }, + 16, // len_of_binary + 10, // num_of_CPR + 10, // num_of_connections/thread + 100, // num_of_RPR + 10000, // num_of_tables + 10000, // num_of_DPT + 0, // abort + 0, // disorderRatio + 1000, // disorderRange + 1, // method_of_delete + NULL // arg_list +}; -void *readTable(void *sarg); -void *readMetric(void *sarg); +static int g_jsonType = 0; +static SDbs g_Dbs; +static int g_totalChildTables = 0; +static SQueryMetaInfo g_queryInfo; +static FILE * g_fpOfInsertResult = NULL; -void *syncWrite(void *sarg); -void *deleteTable(); +void tmfclose(FILE *fp) { + if (NULL != fp) { + fclose(fp); + } +} -void *asyncWrite(void *sarg); +void tmfree(char *buf) { + if (NULL != buf) { + free(buf); + } +} -int generateData(char *res, char **data_type, int num_of_cols, int64_t timestamp, int len_of_binary); +static int queryDbExec(TAOS *taos, char *command, int type) { + int i; + TAOS_RES *res = NULL; + int32_t code = -1; -void rand_string(char *str, int size); + for (i = 0; i < 5; i++) { + if (NULL != res) { + taos_free_result(res); + res = NULL; + } + + res = taos_query(taos, command); + code = taos_errno(res); + if (0 == code) { + break; + } + } -void init_rand_data(); + if (code != 0) { + fprintf(stderr, "Failed to run %s, reason: %s\n", command, taos_errstr(res)); + taos_free_result(res); + //taos_close(taos); + return -1; + } -double getCurrentTime(); + if (INSERT_TYPE == type) { + int affectedRows = taos_affected_rows(res); + taos_free_result(res); + return affectedRows; + } + + taos_free_result(res); + return 0; +} -void callBack(void *param, TAOS_RES *res, int code); -void multiThreadCreateTable(char* cols, bool use_metric, int threads, int ntables, char* db_name, char* tb_prefix, char *ip_addr, uint16_t port, char *user, char *pass); -void querySqlFile(TAOS* taos, char* sqlFile); +static void getResult(TAOS_RES *res, char* resultFileName) { + TAOS_ROW row = NULL; + int num_rows = 0; + int num_fields = taos_field_count(res); + TAOS_FIELD *fields = taos_fetch_fields(res); + + FILE *fp = NULL; + if (resultFileName[0] != 0) { + fp = fopen(resultFileName, "at"); + if (fp == NULL) { + fprintf(stderr, "failed to open result file: %s, result will not save to file\n", resultFileName); + } + } + + char* databuf = (char*) calloc(1, 100*1024*1024); + if (databuf == NULL) { + fprintf(stderr, "failed to malloc, warning: save result to file slowly!\n"); + return ; + } -int main(int argc, char *argv[]) { - SDemoArguments arguments = { NULL, // host - 0, // port - "root", // user - #ifdef _TD_POWER_ - "powerdb", // password - #else - "taosdata", // password - #endif - "test", // database - 1, // replica - "t", // tb_prefix - NULL, - false, // use_metric - false, // insert_only - false, // answer_yes - "./output.txt", // output_file - 0, // mode - { - "int", // datatype - "int", - "int", - "int", - "int", - "int", - "int", - "float" - }, - 8, // len_of_binary - 1, // num_of_CPR - 1, // num_of_connections/thread - 1, // num_of_RPR - 1, // num_of_tables - 50000, // num_of_DPT - 0, // abort - 0, // order - 0, // rate - 0, // method_of_delete - NULL // arg_list - }; - - /* Parse our arguments; every option seen by parse_opt will be - reflected in arguments. */ - // For demo use, change default values for some parameters; - arguments.num_of_tables = 10000; - arguments.num_of_CPR = 3; - arguments.num_of_threads = 10; - arguments.num_of_DPT = 100000; - arguments.num_of_RPR = 1000; - arguments.use_metric = true; - arguments.insert_only = false; - arguments.answer_yes = false; - // end change - - parse_args(argc, argv, &arguments); - - enum MODE query_mode = arguments.mode; - char *ip_addr = arguments.host; - uint16_t port = arguments.port; - char *user = arguments.user; - char *pass = arguments.password; - char *db_name = arguments.database; - char *tb_prefix = arguments.tb_prefix; - int len_of_binary = arguments.len_of_binary; - int ncols_per_record = arguments.num_of_CPR; - int order = arguments.order; - int rate = arguments.rate; - int method_of_delete = arguments.method_of_delete; - int ntables = arguments.num_of_tables; - int threads = arguments.num_of_threads; - int nrecords_per_table = arguments.num_of_DPT; - int nrecords_per_request = arguments.num_of_RPR; - bool use_metric = arguments.use_metric; - bool insert_only = arguments.insert_only; - bool answer_yes = arguments.answer_yes; - char **data_type = arguments.datatype; - int count_data_type = 0; - char dataString[STRING_LEN]; - bool do_aggreFunc = true; - int replica = arguments.replica; + int totalLen = 0; + char temp[16000]; - if (NULL != arguments.sqlFile) { - TAOS* qtaos = taos_connect(ip_addr, user, pass, db_name, port); - querySqlFile(qtaos, arguments.sqlFile); - taos_close(qtaos); - return 0; + // fetch the records row by row + while ((row = taos_fetch_row(res))) { + if (totalLen >= 100*1024*1024 - 32000) { + if (fp) fprintf(fp, "%s", databuf); + totalLen = 0; + memset(databuf, 0, 100*1024*1024); + } + num_rows++; + int len = taos_print_row(temp, row, fields, num_fields); + len += sprintf(temp + len, "\n"); + //printf("query result:%s\n", temp); + memcpy(databuf + totalLen, temp, len); + totalLen += len; } - init_rand_data(); - memset(dataString, 0, STRING_LEN); - int len = 0; + if (fp) fprintf(fp, "%s", databuf); + tmfclose(fp); + free(databuf); +} - if (strcasecmp(data_type[0], "BINARY") == 0 || strcasecmp(data_type[0], "BOOL") == 0 || strcasecmp(data_type[0], "NCHAR") == 0 ) { - do_aggreFunc = false; +static void selectAndGetResult(TAOS *taos, char *command, char* resultFileName) { + TAOS_RES *res = taos_query(taos, command); + if (res == NULL || taos_errno(res) != 0) { + printf("failed to sql:%s, reason:%s\n", command, taos_errstr(res)); + taos_free_result(res); + return; } - for (; count_data_type <= MAX_NUM_DATATYPE; count_data_type++) { - if (data_type[count_data_type] == NULL) { - break; - } + + getResult(res, resultFileName); + taos_free_result(res); +} - len += snprintf(dataString + len, STRING_LEN - len, "%s ", data_type[count_data_type]); +double getCurrentTime() { + struct timeval tv; + if (gettimeofday(&tv, NULL) != 0) { + perror("Failed to get current time in ms"); + return 0.0; } - FILE *fp = fopen(arguments.output_file, "a"); - if (NULL == fp) { - fprintf(stderr, "Failed to open %s for writing\n", arguments.output_file); - return 1; - }; + return tv.tv_sec + tv.tv_usec / 1E6; +} + +static int32_t rand_bool(){ + static int cursor; + cursor++; + cursor = cursor % MAX_PREPARED_RAND; + return randint[cursor] % 2; +} + +static int32_t rand_tinyint(){ + static int cursor; + cursor++; + cursor = cursor % MAX_PREPARED_RAND; + return randint[cursor] % 128; +} + +static int32_t rand_smallint(){ + static int cursor; + cursor++; + cursor = cursor % MAX_PREPARED_RAND; + return randint[cursor] % 32767; +} + +static int32_t rand_int(){ + static int cursor; + cursor++; + cursor = cursor % MAX_PREPARED_RAND; + return randint[cursor]; +} + +static int64_t rand_bigint(){ + static int cursor; + cursor++; + cursor = cursor % MAX_PREPARED_RAND; + return randbigint[cursor]; + +} + +static float rand_float(){ + static int cursor; + cursor++; + cursor = cursor % MAX_PREPARED_RAND; + return randfloat[cursor]; +} + +static const char charset[] = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890"; +void rand_string(char *str, int size) { + str[0] = 0; + if (size > 0) { + //--size; + int n; + for (n = 0; n < size; n++) { + int key = rand_tinyint() % (int)(sizeof(charset) - 1); + str[n] = charset[key]; + } + str[n] = 0; + } +} + +static double rand_double() { + static int cursor; + cursor++; + cursor = cursor % MAX_PREPARED_RAND; + return randdouble[cursor]; + +} + +static void init_rand_data() { + for (int i = 0; i < MAX_PREPARED_RAND; i++){ + randint[i] = (int)(rand() % 65535); + randbigint[i] = (int64_t)(rand() % 2147483648); + randfloat[i] = (float)(rand() / 1000.0); + randdouble[i] = (double)(rand() / 1000000.0); + } +} + +static int printfInsertMeta() { + printf("\033[1m\033[40;32m================ insert.json parse result START ================\033[0m\n"); + printf("host: \033[33m%s:%u\033[0m\n", g_Dbs.host, g_Dbs.port); + printf("user: \033[33m%s\033[0m\n", g_Dbs.user); + printf("password: \033[33m%s\033[0m\n", g_Dbs.password); + printf("resultFile: \033[33m%s\033[0m\n", g_Dbs.resultFile); + printf("thread num of insert data: \033[33m%d\033[0m\n", g_Dbs.threadCount); + printf("thread num of create table: \033[33m%d\033[0m\n", g_Dbs.threadCountByCreateTbl); + + printf("database count: \033[33m%d\033[0m\n", g_Dbs.dbCount); + for (int i = 0; i < g_Dbs.dbCount; i++) { + printf("database[\033[33m%d\033[0m]:\n", i); + printf(" database name: \033[33m%s\033[0m\n", g_Dbs.db[i].dbName); + if (0 == g_Dbs.db[i].drop) { + printf(" drop: \033[33mno\033[0m\n"); + }else { + printf(" drop: \033[33myes\033[0m\n"); + } + + if (g_Dbs.db[i].dbCfg.blocks > 0) { + printf(" blocks: \033[33m%d\033[0m\n", g_Dbs.db[i].dbCfg.blocks); + } + if (g_Dbs.db[i].dbCfg.cache > 0) { + printf(" cache: \033[33m%d\033[0m\n", g_Dbs.db[i].dbCfg.cache); + } + if (g_Dbs.db[i].dbCfg.days > 0) { + printf(" days: \033[33m%d\033[0m\n", g_Dbs.db[i].dbCfg.days); + } + if (g_Dbs.db[i].dbCfg.keep > 0) { + printf(" keep: \033[33m%d\033[0m\n", g_Dbs.db[i].dbCfg.keep); + } + if (g_Dbs.db[i].dbCfg.replica > 0) { + printf(" replica: \033[33m%d\033[0m\n", g_Dbs.db[i].dbCfg.replica); + } + if (g_Dbs.db[i].dbCfg.update > 0) { + printf(" update: \033[33m%d\033[0m\n", g_Dbs.db[i].dbCfg.update); + } + if (g_Dbs.db[i].dbCfg.minRows > 0) { + printf(" minRows: \033[33m%d\033[0m\n", g_Dbs.db[i].dbCfg.minRows); + } + if (g_Dbs.db[i].dbCfg.maxRows > 0) { + printf(" maxRows: \033[33m%d\033[0m\n", g_Dbs.db[i].dbCfg.maxRows); + } + if (g_Dbs.db[i].dbCfg.comp > 0) { + printf(" comp: \033[33m%d\033[0m\n", g_Dbs.db[i].dbCfg.comp); + } + if (g_Dbs.db[i].dbCfg.walLevel > 0) { + printf(" walLevel: \033[33m%d\033[0m\n", g_Dbs.db[i].dbCfg.walLevel); + } + if (g_Dbs.db[i].dbCfg.fsync > 0) { + printf(" fsync: \033[33m%d\033[0m\n", g_Dbs.db[i].dbCfg.fsync); + } + if (g_Dbs.db[i].dbCfg.quorum > 0) { + printf(" quorum: \033[33m%d\033[0m\n", g_Dbs.db[i].dbCfg.quorum); + } + if (g_Dbs.db[i].dbCfg.precision[0] != 0) { + if ((0 == strncasecmp(g_Dbs.db[i].dbCfg.precision, "ms", 2)) || (0 == strncasecmp(g_Dbs.db[i].dbCfg.precision, "us", 2))) { + printf(" precision: \033[33m%s\033[0m\n", g_Dbs.db[i].dbCfg.precision); + } else { + printf(" precision error: \033[33m%s\033[0m\n", g_Dbs.db[i].dbCfg.precision); + return -1; + } + } + + printf(" super table count: \033[33m%d\033[0m\n", g_Dbs.db[i].superTblCount); + for (int j = 0; j < g_Dbs.db[i].superTblCount; j++) { + printf(" super table[\033[33m%d\033[0m]:\n", j); + + printf(" stbName: \033[33m%s\033[0m\n", g_Dbs.db[i].superTbls[j].sTblName); + + if (PRE_CREATE_SUBTBL == g_Dbs.db[i].superTbls[j].autoCreateTable) { + printf(" autoCreateTable: \033[33m%s\033[0m\n", "no"); + } else if (AUTO_CREATE_SUBTBL == g_Dbs.db[i].superTbls[j].autoCreateTable) { + printf(" autoCreateTable: \033[33m%s\033[0m\n", "yes"); + } else { + printf(" autoCreateTable: \033[33m%s\033[0m\n", "error"); + } + + if (TBL_NO_EXISTS == g_Dbs.db[i].superTbls[j].childTblExists) { + printf(" childTblExists: \033[33m%s\033[0m\n", "no"); + } else if (TBL_ALREADY_EXISTS == g_Dbs.db[i].superTbls[j].childTblExists) { + printf(" childTblExists: \033[33m%s\033[0m\n", "yes"); + } else { + printf(" childTblExists: \033[33m%s\033[0m\n", "error"); + } + + printf(" childTblCount: \033[33m%d\033[0m\n", g_Dbs.db[i].superTbls[j].childTblCount); + printf(" childTblPrefix: \033[33m%s\033[0m\n", g_Dbs.db[i].superTbls[j].childTblPrefix); + printf(" dataSource: \033[33m%s\033[0m\n", g_Dbs.db[i].superTbls[j].dataSource); + printf(" insertMode: \033[33m%s\033[0m\n", g_Dbs.db[i].superTbls[j].insertMode); + printf(" insertRate: \033[33m%d\033[0m\n", g_Dbs.db[i].superTbls[j].insertRate); + printf(" insertRows: \033[33m%"PRId64"\033[0m\n", g_Dbs.db[i].superTbls[j].insertRows); + + if (0 == g_Dbs.db[i].superTbls[j].multiThreadWriteOneTbl) { + printf(" multiThreadWriteOneTbl: \033[33mno\033[0m\n"); + }else { + printf(" multiThreadWriteOneTbl: \033[33myes\033[0m\n"); + } + printf(" numberOfTblInOneSql: \033[33m%d\033[0m\n", g_Dbs.db[i].superTbls[j].numberOfTblInOneSql); + printf(" rowsPerTbl: \033[33m%d\033[0m\n", g_Dbs.db[i].superTbls[j].rowsPerTbl); + printf(" disorderRange: \033[33m%d\033[0m\n", g_Dbs.db[i].superTbls[j].disorderRange); + printf(" disorderRatio: \033[33m%d\033[0m\n", g_Dbs.db[i].superTbls[j].disorderRatio); + printf(" maxSqlLen: \033[33m%d\033[0m\n", g_Dbs.db[i].superTbls[j].maxSqlLen); + + printf(" timeStampStep: \033[33m%d\033[0m\n", g_Dbs.db[i].superTbls[j].timeStampStep); + printf(" startTimestamp: \033[33m%s\033[0m\n", g_Dbs.db[i].superTbls[j].startTimestamp); + printf(" sampleFormat: \033[33m%s\033[0m\n", g_Dbs.db[i].superTbls[j].sampleFormat); + printf(" sampleFile: \033[33m%s\033[0m\n", g_Dbs.db[i].superTbls[j].sampleFile); + printf(" tagsFile: \033[33m%s\033[0m\n", g_Dbs.db[i].superTbls[j].tagsFile); + + printf(" columnCount: \033[33m%d\033[0m\n ", g_Dbs.db[i].superTbls[j].columnCount); + for (int k = 0; k < g_Dbs.db[i].superTbls[j].columnCount; k++) { + //printf("dataType:%s, dataLen:%d\t", g_Dbs.db[i].superTbls[j].columns[k].dataType, g_Dbs.db[i].superTbls[j].columns[k].dataLen); + if ((0 == strncasecmp(g_Dbs.db[i].superTbls[j].columns[k].dataType, "binary", 6)) || (0 == strncasecmp(g_Dbs.db[i].superTbls[j].columns[k].dataType, "nchar", 5))) { + printf("column[\033[33m%d\033[0m]:\033[33m%s(%d)\033[0m ", k, g_Dbs.db[i].superTbls[j].columns[k].dataType, g_Dbs.db[i].superTbls[j].columns[k].dataLen); + } else { + printf("column[%d]:\033[33m%s\033[0m ", k, g_Dbs.db[i].superTbls[j].columns[k].dataType); + } + } + printf("\n"); + + printf(" tagCount: \033[33m%d\033[0m\n ", g_Dbs.db[i].superTbls[j].tagCount); + for (int k = 0; k < g_Dbs.db[i].superTbls[j].tagCount; k++) { + //printf("dataType:%s, dataLen:%d\t", g_Dbs.db[i].superTbls[j].tags[k].dataType, g_Dbs.db[i].superTbls[j].tags[k].dataLen); + if ((0 == strncasecmp(g_Dbs.db[i].superTbls[j].tags[k].dataType, "binary", 6)) || (0 == strncasecmp(g_Dbs.db[i].superTbls[j].tags[k].dataType, "nchar", 5))) { + printf("tag[%d]:\033[33m%s(%d)\033[0m ", k, g_Dbs.db[i].superTbls[j].tags[k].dataType, g_Dbs.db[i].superTbls[j].tags[k].dataLen); + } else { + printf("tag[%d]:\033[33m%s\033[0m ", k, g_Dbs.db[i].superTbls[j].tags[k].dataType); + } + } + printf("\n"); + } + printf("\n"); + } + printf("\033[1m\033[40;32m================ insert.json parse result END================\033[0m\n"); + + return 0; +} + +static void printfInsertMetaToFile(FILE* fp) { + fprintf(fp, "================ insert.json parse result START================\n"); + fprintf(fp, "host: %s:%u\n", g_Dbs.host, g_Dbs.port); + fprintf(fp, "user: %s\n", g_Dbs.user); + fprintf(fp, "password: %s\n", g_Dbs.password); + fprintf(fp, "resultFile: %s\n", g_Dbs.resultFile); + fprintf(fp, "thread num of insert data: %d\n", g_Dbs.threadCount); + fprintf(fp, "thread num of create table: %d\n", g_Dbs.threadCountByCreateTbl); + + fprintf(fp, "database count: %d\n", g_Dbs.dbCount); + for (int i = 0; i < g_Dbs.dbCount; i++) { + fprintf(fp, "database[%d]:\n", i); + fprintf(fp, " database name: %s\n", g_Dbs.db[i].dbName); + if (0 == g_Dbs.db[i].drop) { + fprintf(fp, " drop: no\n"); + }else { + fprintf(fp, " drop: yes\n"); + } + + if (g_Dbs.db[i].dbCfg.blocks > 0) { + fprintf(fp, " blocks: %d\n", g_Dbs.db[i].dbCfg.blocks); + } + if (g_Dbs.db[i].dbCfg.cache > 0) { + fprintf(fp, " cache: %d\n", g_Dbs.db[i].dbCfg.cache); + } + if (g_Dbs.db[i].dbCfg.days > 0) { + fprintf(fp, " days: %d\n", g_Dbs.db[i].dbCfg.days); + } + if (g_Dbs.db[i].dbCfg.keep > 0) { + fprintf(fp, " keep: %d\n", g_Dbs.db[i].dbCfg.keep); + } + if (g_Dbs.db[i].dbCfg.replica > 0) { + fprintf(fp, " replica: %d\n", g_Dbs.db[i].dbCfg.replica); + } + if (g_Dbs.db[i].dbCfg.update > 0) { + fprintf(fp, " update: %d\n", g_Dbs.db[i].dbCfg.update); + } + if (g_Dbs.db[i].dbCfg.minRows > 0) { + fprintf(fp, " minRows: %d\n", g_Dbs.db[i].dbCfg.minRows); + } + if (g_Dbs.db[i].dbCfg.maxRows > 0) { + fprintf(fp, " maxRows: %d\n", g_Dbs.db[i].dbCfg.maxRows); + } + if (g_Dbs.db[i].dbCfg.comp > 0) { + fprintf(fp, " comp: %d\n", g_Dbs.db[i].dbCfg.comp); + } + if (g_Dbs.db[i].dbCfg.walLevel > 0) { + fprintf(fp, " walLevel: %d\n", g_Dbs.db[i].dbCfg.walLevel); + } + if (g_Dbs.db[i].dbCfg.fsync > 0) { + fprintf(fp, " fsync: %d\n", g_Dbs.db[i].dbCfg.fsync); + } + if (g_Dbs.db[i].dbCfg.quorum > 0) { + fprintf(fp, " quorum: %d\n", g_Dbs.db[i].dbCfg.quorum); + } + if (g_Dbs.db[i].dbCfg.precision[0] != 0) { + if ((0 == strncasecmp(g_Dbs.db[i].dbCfg.precision, "ms", 2)) || (0 == strncasecmp(g_Dbs.db[i].dbCfg.precision, "us", 2))) { + fprintf(fp, " precision: %s\n", g_Dbs.db[i].dbCfg.precision); + } else { + fprintf(fp, " precision error: %s\n", g_Dbs.db[i].dbCfg.precision); + } + } + + fprintf(fp, " super table count: %d\n", g_Dbs.db[i].superTblCount); + for (int j = 0; j < g_Dbs.db[i].superTblCount; j++) { + fprintf(fp, " super table[%d]:\n", j); + + fprintf(fp, " stbName: %s\n", g_Dbs.db[i].superTbls[j].sTblName); + + if (PRE_CREATE_SUBTBL == g_Dbs.db[i].superTbls[j].autoCreateTable) { + fprintf(fp, " autoCreateTable: %s\n", "no"); + } else if (AUTO_CREATE_SUBTBL == g_Dbs.db[i].superTbls[j].autoCreateTable) { + fprintf(fp, " autoCreateTable: %s\n", "yes"); + } else { + fprintf(fp, " autoCreateTable: %s\n", "error"); + } + + if (TBL_NO_EXISTS == g_Dbs.db[i].superTbls[j].childTblExists) { + fprintf(fp, " childTblExists: %s\n", "no"); + } else if (TBL_ALREADY_EXISTS == g_Dbs.db[i].superTbls[j].childTblExists) { + fprintf(fp, " childTblExists: %s\n", "yes"); + } else { + fprintf(fp, " childTblExists: %s\n", "error"); + } + + fprintf(fp, " childTblCount: %d\n", g_Dbs.db[i].superTbls[j].childTblCount); + fprintf(fp, " childTblPrefix: %s\n", g_Dbs.db[i].superTbls[j].childTblPrefix); + fprintf(fp, " dataSource: %s\n", g_Dbs.db[i].superTbls[j].dataSource); + fprintf(fp, " insertMode: %s\n", g_Dbs.db[i].superTbls[j].insertMode); + fprintf(fp, " insertRate: %d\n", g_Dbs.db[i].superTbls[j].insertRate); + fprintf(fp, " insertRows: %"PRId64"\n", g_Dbs.db[i].superTbls[j].insertRows); + + if (0 == g_Dbs.db[i].superTbls[j].multiThreadWriteOneTbl) { + fprintf(fp, " multiThreadWriteOneTbl: no\n"); + }else { + fprintf(fp, " multiThreadWriteOneTbl: yes\n"); + } + fprintf(fp, " numberOfTblInOneSql: %d\n", g_Dbs.db[i].superTbls[j].numberOfTblInOneSql); + fprintf(fp, " rowsPerTbl: %d\n", g_Dbs.db[i].superTbls[j].rowsPerTbl); + fprintf(fp, " disorderRange: %d\n", g_Dbs.db[i].superTbls[j].disorderRange); + fprintf(fp, " disorderRatio: %d\n", g_Dbs.db[i].superTbls[j].disorderRatio); + fprintf(fp, " maxSqlLen: %d\n", g_Dbs.db[i].superTbls[j].maxSqlLen); + + fprintf(fp, " timeStampStep: %d\n", g_Dbs.db[i].superTbls[j].timeStampStep); + fprintf(fp, " startTimestamp: %s\n", g_Dbs.db[i].superTbls[j].startTimestamp); + fprintf(fp, " sampleFormat: %s\n", g_Dbs.db[i].superTbls[j].sampleFormat); + fprintf(fp, " sampleFile: %s\n", g_Dbs.db[i].superTbls[j].sampleFile); + fprintf(fp, " tagsFile: %s\n", g_Dbs.db[i].superTbls[j].tagsFile); + + fprintf(fp, " columnCount: %d\n ", g_Dbs.db[i].superTbls[j].columnCount); + for (int k = 0; k < g_Dbs.db[i].superTbls[j].columnCount; k++) { + //printf("dataType:%s, dataLen:%d\t", g_Dbs.db[i].superTbls[j].columns[k].dataType, g_Dbs.db[i].superTbls[j].columns[k].dataLen); + if ((0 == strncasecmp(g_Dbs.db[i].superTbls[j].columns[k].dataType, "binary", 6)) || (0 == strncasecmp(g_Dbs.db[i].superTbls[j].columns[k].dataType, "nchar", 5))) { + fprintf(fp, "column[%d]:%s(%d) ", k, g_Dbs.db[i].superTbls[j].columns[k].dataType, g_Dbs.db[i].superTbls[j].columns[k].dataLen); + } else { + fprintf(fp, "column[%d]:%s ", k, g_Dbs.db[i].superTbls[j].columns[k].dataType); + } + } + fprintf(fp, "\n"); + + fprintf(fp, " tagCount: %d\n ", g_Dbs.db[i].superTbls[j].tagCount); + for (int k = 0; k < g_Dbs.db[i].superTbls[j].tagCount; k++) { + //printf("dataType:%s, dataLen:%d\t", g_Dbs.db[i].superTbls[j].tags[k].dataType, g_Dbs.db[i].superTbls[j].tags[k].dataLen); + if ((0 == strncasecmp(g_Dbs.db[i].superTbls[j].tags[k].dataType, "binary", 6)) || (0 == strncasecmp(g_Dbs.db[i].superTbls[j].tags[k].dataType, "nchar", 5))) { + fprintf(fp, "tag[%d]:%s(%d) ", k, g_Dbs.db[i].superTbls[j].tags[k].dataType, g_Dbs.db[i].superTbls[j].tags[k].dataLen); + } else { + fprintf(fp, "tag[%d]:%s ", k, g_Dbs.db[i].superTbls[j].tags[k].dataType); + } + } + fprintf(fp, "\n"); + } + fprintf(fp, "\n"); + } + fprintf(fp, "================ insert.json parse result END ================\n\n"); +} + +static void printfQueryMeta() { + printf("\033[1m\033[40;32m================ query.json parse result ================\033[0m\n"); + printf("host: \033[33m%s:%u\033[0m\n", g_queryInfo.host, g_queryInfo.port); + printf("user: \033[33m%s\033[0m\n", g_queryInfo.user); + printf("password: \033[33m%s\033[0m\n", g_queryInfo.password); + printf("database name: \033[33m%s\033[0m\n", g_queryInfo.dbName); + + printf("\n"); + printf("specified table query info: \n"); + printf("query interval: \033[33m%d\033[0m\n", g_queryInfo.superQueryInfo.rate); + printf("concurrent: \033[33m%d\033[0m\n", g_queryInfo.superQueryInfo.concurrent); + printf("sqlCount: \033[33m%d\033[0m\n", g_queryInfo.superQueryInfo.sqlCount); + + if (SUBSCRIBE_MODE == g_jsonType) { + printf("mod: \033[33m%d\033[0m\n", g_queryInfo.superQueryInfo.subscribeMode); + printf("interval: \033[33m%d\033[0m\n", g_queryInfo.superQueryInfo.subscribeInterval); + printf("restart: \033[33m%d\033[0m\n", g_queryInfo.superQueryInfo.subscribeRestart); + printf("keepProgress: \033[33m%d\033[0m\n", g_queryInfo.superQueryInfo.subscribeKeepProgress); + } + + + for (int i = 0; i < g_queryInfo.superQueryInfo.sqlCount; i++) { + printf(" sql[%d]: \033[33m%s\033[0m\n", i, g_queryInfo.superQueryInfo.sql[i]); + } + printf("\n"); + printf("super table query info: \n"); + printf("query interval: \033[33m%d\033[0m\n", g_queryInfo.subQueryInfo.rate); + printf("threadCnt: \033[33m%d\033[0m\n", g_queryInfo.subQueryInfo.threadCnt); + printf("childTblCount: \033[33m%d\033[0m\n", g_queryInfo.subQueryInfo.childTblCount); + printf("stable name: \033[33m%s\033[0m\n", g_queryInfo.subQueryInfo.sTblName); + + if (SUBSCRIBE_MODE == g_jsonType) { + printf("mod: \033[33m%d\033[0m\n", g_queryInfo.subQueryInfo.subscribeMode); + printf("interval: \033[33m%d\033[0m\n", g_queryInfo.subQueryInfo.subscribeInterval); + printf("restart: \033[33m%d\033[0m\n", g_queryInfo.subQueryInfo.subscribeRestart); + printf("keepProgress: \033[33m%d\033[0m\n", g_queryInfo.subQueryInfo.subscribeKeepProgress); + } + + printf("sqlCount: \033[33m%d\033[0m\n", g_queryInfo.subQueryInfo.sqlCount); + for (int i = 0; i < g_queryInfo.subQueryInfo.sqlCount; i++) { + printf(" sql[%d]: \033[33m%s\033[0m\n", i, g_queryInfo.subQueryInfo.sql[i]); + } + printf("\n"); + printf("\033[1m\033[40;32m================ query.json parse result ================\033[0m\n"); +} + + +static char* xFormatTimestamp(char* buf, int64_t val, int precision) { + time_t tt; + if (precision == TSDB_TIME_PRECISION_MICRO) { + tt = (time_t)(val / 1000000); + } else { + tt = (time_t)(val / 1000); + } + +/* comment out as it make testcases like select_with_tags.sim fail. + but in windows, this may cause the call to localtime crash if tt < 0, + need to find a better solution. + if (tt < 0) { + tt = 0; + } + */ + +#ifdef WINDOWS + if (tt < 0) tt = 0; +#endif + + struct tm* ptm = localtime(&tt); + size_t pos = strftime(buf, 32, "%Y-%m-%d %H:%M:%S", ptm); + + if (precision == TSDB_TIME_PRECISION_MICRO) { + sprintf(buf + pos, ".%06d", (int)(val % 1000000)); + } else { + sprintf(buf + pos, ".%03d", (int)(val % 1000)); + } + + return buf; +} + +static void xDumpFieldToFile(FILE* fp, const char* val, TAOS_FIELD* field, int32_t length, int precision) { + if (val == NULL) { + fprintf(fp, "%s", TSDB_DATA_NULL_STR); + return; + } + + char buf[TSDB_MAX_BYTES_PER_ROW]; + switch (field->type) { + case TSDB_DATA_TYPE_BOOL: + fprintf(fp, "%d", ((((int32_t)(*((char *)val))) == 1) ? 1 : 0)); + break; + case TSDB_DATA_TYPE_TINYINT: + fprintf(fp, "%d", *((int8_t *)val)); + break; + case TSDB_DATA_TYPE_SMALLINT: + fprintf(fp, "%d", *((int16_t *)val)); + break; + case TSDB_DATA_TYPE_INT: + fprintf(fp, "%d", *((int32_t *)val)); + break; + case TSDB_DATA_TYPE_BIGINT: + fprintf(fp, "%" PRId64, *((int64_t *)val)); + break; + case TSDB_DATA_TYPE_FLOAT: + fprintf(fp, "%.5f", GET_FLOAT_VAL(val)); + break; + case TSDB_DATA_TYPE_DOUBLE: + fprintf(fp, "%.9f", GET_DOUBLE_VAL(val)); + break; + case TSDB_DATA_TYPE_BINARY: + case TSDB_DATA_TYPE_NCHAR: + memcpy(buf, val, length); + buf[length] = 0; + fprintf(fp, "\'%s\'", buf); + break; + case TSDB_DATA_TYPE_TIMESTAMP: + xFormatTimestamp(buf, *(int64_t*)val, precision); + fprintf(fp, "'%s'", buf); + break; + default: + break; + } +} + +static int xDumpResultToFile(const char* fname, TAOS_RES* tres) { + TAOS_ROW row = taos_fetch_row(tres); + if (row == NULL) { + return 0; + } + + FILE* fp = fopen(fname, "at"); + if (fp == NULL) { + fprintf(stderr, "ERROR: failed to open file: %s\n", fname); + return -1; + } + + int num_fields = taos_num_fields(tres); + TAOS_FIELD *fields = taos_fetch_fields(tres); + int precision = taos_result_precision(tres); + + for (int col = 0; col < num_fields; col++) { + if (col > 0) { + fprintf(fp, ","); + } + fprintf(fp, "%s", fields[col].name); + } + fputc('\n', fp); + + int numOfRows = 0; + do { + int32_t* length = taos_fetch_lengths(tres); + for (int i = 0; i < num_fields; i++) { + if (i > 0) { + fputc(',', fp); + } + xDumpFieldToFile(fp, (const char*)row[i], fields +i, length[i], precision); + } + fputc('\n', fp); + + numOfRows++; + row = taos_fetch_row(tres); + } while( row != NULL); + + fclose(fp); + + return numOfRows; +} + +static int getDbFromServer(TAOS * taos, SDbInfo** dbInfos) { + TAOS_RES * res; + TAOS_ROW row = NULL; + int count = 0; + + res = taos_query(taos, "show databases;"); + int32_t code = taos_errno(res); + + if (code != 0) { + fprintf(stderr, "failed to run , reason: %s\n", taos_errstr(res)); + return -1; + } + + TAOS_FIELD *fields = taos_fetch_fields(res); + + while ((row = taos_fetch_row(res)) != NULL) { + // sys database name : 'log' + if (strncasecmp(row[TSDB_SHOW_DB_NAME_INDEX], "log", fields[TSDB_SHOW_DB_NAME_INDEX].bytes) == 0) continue; + + dbInfos[count] = (SDbInfo *)calloc(1, sizeof(SDbInfo)); + if (dbInfos[count] == NULL) { + fprintf(stderr, "failed to allocate memory for some dbInfo[%d]\n", count); + return -1; + } + + strncpy(dbInfos[count]->name, (char *)row[TSDB_SHOW_DB_NAME_INDEX], fields[TSDB_SHOW_DB_NAME_INDEX].bytes); + xFormatTimestamp(dbInfos[count]->create_time, *(int64_t*)row[TSDB_SHOW_DB_CREATED_TIME_INDEX], TSDB_TIME_PRECISION_MILLI); + dbInfos[count]->ntables = *((int32_t *)row[TSDB_SHOW_DB_NTABLES_INDEX]); + dbInfos[count]->vgroups = *((int32_t *)row[TSDB_SHOW_DB_VGROUPS_INDEX]); + dbInfos[count]->replica = *((int16_t *)row[TSDB_SHOW_DB_REPLICA_INDEX]); + dbInfos[count]->quorum = *((int16_t *)row[TSDB_SHOW_DB_QUORUM_INDEX]); + dbInfos[count]->days = *((int16_t *)row[TSDB_SHOW_DB_DAYS_INDEX]); + + strncpy(dbInfos[count]->keeplist, (char *)row[TSDB_SHOW_DB_KEEP_INDEX], fields[TSDB_SHOW_DB_KEEP_INDEX].bytes); + dbInfos[count]->cache = *((int32_t *)row[TSDB_SHOW_DB_CACHE_INDEX]); + dbInfos[count]->blocks = *((int32_t *)row[TSDB_SHOW_DB_BLOCKS_INDEX]); + dbInfos[count]->minrows = *((int32_t *)row[TSDB_SHOW_DB_MINROWS_INDEX]); + dbInfos[count]->maxrows = *((int32_t *)row[TSDB_SHOW_DB_MAXROWS_INDEX]); + dbInfos[count]->wallevel = *((int8_t *)row[TSDB_SHOW_DB_WALLEVEL_INDEX]); + dbInfos[count]->fsync = *((int32_t *)row[TSDB_SHOW_DB_FSYNC_INDEX]); + dbInfos[count]->comp = (int8_t)(*((int8_t *)row[TSDB_SHOW_DB_COMP_INDEX])); + dbInfos[count]->cachelast = (int8_t)(*((int8_t *)row[TSDB_SHOW_DB_CACHELAST_INDEX])); + + strncpy(dbInfos[count]->precision, (char *)row[TSDB_SHOW_DB_PRECISION_INDEX], fields[TSDB_SHOW_DB_PRECISION_INDEX].bytes); + dbInfos[count]->update = *((int8_t *)row[TSDB_SHOW_DB_UPDATE_INDEX]); + strncpy(dbInfos[count]->status, (char *)row[TSDB_SHOW_DB_STATUS_INDEX], fields[TSDB_SHOW_DB_STATUS_INDEX].bytes); + + count++; + if (count > MAX_DATABASE_COUNT) { + fprintf(stderr, "The database count overflow than %d\n", MAX_DATABASE_COUNT); + break; + } + } + + return count; +} + +static void printfDbInfoForQueryToFile(char* filename, SDbInfo* dbInfos, int index) { + FILE *fp = NULL; + if (filename[0] != 0) { + fp = fopen(filename, "at"); + if (fp == NULL) { + fprintf(stderr, "failed to open file: %s\n", filename); + return; + } + } + + fprintf(fp, "================ database[%d] ================\n", index); + fprintf(fp, "name: %s\n", dbInfos->name); + fprintf(fp, "created_time: %s\n", dbInfos->create_time); + fprintf(fp, "ntables: %d\n", dbInfos->ntables); + fprintf(fp, "vgroups: %d\n", dbInfos->vgroups); + fprintf(fp, "replica: %d\n", dbInfos->replica); + fprintf(fp, "quorum: %d\n", dbInfos->quorum); + fprintf(fp, "days: %d\n", dbInfos->days); + fprintf(fp, "keep0,keep1,keep(D): %s\n", dbInfos->keeplist); + fprintf(fp, "cache(MB): %d\n", dbInfos->cache); + fprintf(fp, "blocks: %d\n", dbInfos->blocks); + fprintf(fp, "minrows: %d\n", dbInfos->minrows); + fprintf(fp, "maxrows: %d\n", dbInfos->maxrows); + fprintf(fp, "wallevel: %d\n", dbInfos->wallevel); + fprintf(fp, "fsync: %d\n", dbInfos->fsync); + fprintf(fp, "comp: %d\n", dbInfos->comp); + fprintf(fp, "cachelast: %d\n", dbInfos->cachelast); + fprintf(fp, "precision: %s\n", dbInfos->precision); + fprintf(fp, "update: %d\n", dbInfos->update); + fprintf(fp, "status: %s\n", dbInfos->status); + fprintf(fp, "\n"); + + fclose(fp); +} + +static void printfQuerySystemInfo(TAOS * taos) { + char filename[MAX_QUERY_SQL_LENGTH+1] = {0}; + char buffer[MAX_QUERY_SQL_LENGTH+1] = {0}; + TAOS_RES* res; + + time_t t; + struct tm* lt; + time(&t); + lt = localtime(&t); + snprintf(filename, MAX_QUERY_SQL_LENGTH, "querySystemInfo-%d-%d-%d %d:%d:%d", lt->tm_year+1900, lt->tm_mon, lt->tm_mday, lt->tm_hour, lt->tm_min, lt->tm_sec); + + // show variables + res = taos_query(taos, "show variables;"); + //getResult(res, filename); + xDumpResultToFile(filename, res); + + // show dnodes + res = taos_query(taos, "show dnodes;"); + xDumpResultToFile(filename, res); + //getResult(res, filename); + + // show databases + res = taos_query(taos, "show databases;"); + SDbInfo** dbInfos = (SDbInfo **)calloc(MAX_DATABASE_COUNT, sizeof(SDbInfo *)); + if (dbInfos == NULL) { + fprintf(stderr, "failed to allocate memory\n"); + return; + } + int dbCount = getDbFromServer(taos, dbInfos); + if (dbCount <= 0) return; + + for (int i = 0; i < dbCount; i++) { + // printf database info + printfDbInfoForQueryToFile(filename, dbInfos[i], i); + + // show db.vgroups + snprintf(buffer, MAX_QUERY_SQL_LENGTH, "show %s.vgroups;", dbInfos[i]->name); + res = taos_query(taos, buffer); + xDumpResultToFile(filename, res); + + // show db.stables + snprintf(buffer, MAX_QUERY_SQL_LENGTH, "show %s.stables;", dbInfos[i]->name); + res = taos_query(taos, buffer); + xDumpResultToFile(filename, res); + + free(dbInfos[i]); + } + + free(dbInfos); + +} + + +#ifdef TD_LOWA_CURL +static size_t responseCallback(void *contents, size_t size, size_t nmemb, void *userp) +{ + size_t realsize = size * nmemb; + curlMemInfo* mem = (curlMemInfo*)userp; + + char *ptr = realloc(mem->buf, mem->sizeleft + realsize + 1); + if(ptr == NULL) { + /* out of memory! */ + printf("not enough memory (realloc returned NULL)\n"); + return 0; + } + + mem->buf = ptr; + memcpy(&(mem->buf[mem->sizeleft]), contents, realsize); + mem->sizeleft += realsize; + mem->buf[mem->sizeleft] = 0; + + //printf("result:%s\n\n", mem->buf); + + return realsize; +} + +void curlProceLogin(void) +{ + CURL *curl_handle; + CURLcode res; + + curlMemInfo chunk; + + chunk.buf = malloc(1); /* will be grown as needed by the realloc above */ + chunk.sizeleft = 0; /* no data at this point */ + + //curl_global_init(CURL_GLOBAL_ALL); + + /* init the curl session */ + curl_handle = curl_easy_init(); + + curl_easy_setopt(curl_handle,CURLOPT_POSTFIELDS,""); + curl_easy_setopt(curl_handle, CURLOPT_POST, 1); + + char dstUrl[128] = {0}; + snprintf(dstUrl, 128, "http://%s:6041/rest/login/root/taosdata", g_Dbs.host); + + /* specify URL to get */ + curl_easy_setopt(curl_handle, CURLOPT_URL, dstUrl); + + /* send all data to this function */ + curl_easy_setopt(curl_handle, CURLOPT_WRITEFUNCTION, responseCallback); + + /* we pass our 'chunk' struct to the callback function */ + curl_easy_setopt(curl_handle, CURLOPT_WRITEDATA, (void *)&chunk); + + /* do it! */ + res = curl_easy_perform(curl_handle); + + /* check for errors */ + if(res != CURLE_OK) { + fprintf(stderr, "curl_easy_perform() failed: %s\n", curl_easy_strerror(res)); + } + else { + //printf("response len:%lu, content: %s \n", (unsigned long)chunk.sizeleft, chunk.buf); + ; + } + + /* cleanup curl stuff */ + curl_easy_cleanup(curl_handle); + + free(chunk.buf); + + /* we're done with libcurl, so clean it up */ + //curl_global_cleanup(); + + return; +} + +int curlProceSql(char* host, uint16_t port, char* sqlstr, CURL *curl_handle) +{ + //curlProceLogin(); + + //CURL *curl_handle; + CURLcode res; + + curlMemInfo chunk; + + chunk.buf = malloc(1); /* will be grown as needed by the realloc above */ + chunk.sizeleft = 0; /* no data at this point */ + + + char dstUrl[128] = {0}; + snprintf(dstUrl, 128, "http://%s:%u/rest/sql", host, port+TSDB_PORT_HTTP); + + //curl_global_init(CURL_GLOBAL_ALL); + + /* init the curl session */ + //curl_handle = curl_easy_init(); + + //curl_easy_setopt(curl_handle,CURLOPT_POSTFIELDS,""); + curl_easy_setopt(curl_handle, CURLOPT_POST, 1L); + + /* specify URL to get */ + curl_easy_setopt(curl_handle, CURLOPT_URL, dstUrl); + + /* enable TCP keep-alive for this transfer */ + curl_easy_setopt(curl_handle, CURLOPT_TCP_KEEPALIVE, 1L); + /* keep-alive idle time to 120 seconds */ + curl_easy_setopt(curl_handle, CURLOPT_TCP_KEEPIDLE, 120L); + /* interval time between keep-alive probes: 60 seconds */ + curl_easy_setopt(curl_handle, CURLOPT_TCP_KEEPINTVL, 60L); + + /* send all data to this function */ + curl_easy_setopt(curl_handle, CURLOPT_WRITEFUNCTION, responseCallback); + + /* we pass our 'chunk' struct to the callback function */ + curl_easy_setopt(curl_handle, CURLOPT_WRITEDATA, (void *)&chunk); + + struct curl_slist *list = NULL; + list = curl_slist_append(list, "Authorization: Basic cm9vdDp0YW9zZGF0YQ=="); + curl_easy_setopt(curl_handle, CURLOPT_HTTPHEADER, list); + curl_easy_setopt(curl_handle, CURLOPT_HTTPHEADER, list); + + /* Set the expected upload size. */ + curl_easy_setopt(curl_handle, CURLOPT_POSTFIELDSIZE_LARGE, (curl_off_t)strlen(sqlstr)); + curl_easy_setopt(curl_handle, CURLOPT_POSTFIELDS, sqlstr); + + /* get it! */ + res = curl_easy_perform(curl_handle); + + /* check for errors */ + if(res != CURLE_OK) { + fprintf(stderr, "curl_easy_perform() failed: %s\n", curl_easy_strerror(res)); + return -1; + } + else { + /* curl_easy_perform() block end and return result */ + //printf("[%32.32s] sql response len:%lu, content: %s \n\n", sqlstr, (unsigned long)chunk.sizeleft, chunk.buf); + ; + } + + curl_slist_free_all(list); /* free the list again */ + + /* cleanup curl stuff */ + //curl_easy_cleanup(curl_handle); + + free(chunk.buf); + + /* we're done with libcurl, so clean it up */ + //curl_global_cleanup(); + + return 0; +} +#endif + +char* getTagValueFromTagSample( SSuperTable* stbInfo, int tagUsePos) { + char* dataBuf = (char*)calloc(TSDB_MAX_SQL_LEN+1, 1); + if (NULL == dataBuf) { + printf("calloc failed! size:%d\n", TSDB_MAX_SQL_LEN+1); + return NULL; + } + + int dataLen = 0; + dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen, "(%s)", stbInfo->tagDataBuf + stbInfo->lenOfTagOfOneRow * tagUsePos); + + return dataBuf; +} + +char* generateTagVaulesForStb(SSuperTable* stbInfo) { + char* dataBuf = (char*)calloc(TSDB_MAX_SQL_LEN+1, 1); + if (NULL == dataBuf) { + printf("calloc failed! size:%d\n", TSDB_MAX_SQL_LEN+1); + return NULL; + } + + int dataLen = 0; + dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen, "("); + for (int i = 0; i < stbInfo->tagCount; i++) { + if ((0 == strncasecmp(stbInfo->tags[i].dataType, "binary", 6)) || (0 == strncasecmp(stbInfo->tags[i].dataType, "nchar", 5))) { + if (stbInfo->tags[i].dataLen > TSDB_MAX_BINARY_LEN) { + printf("binary or nchar length overflow, max size:%u\n", (uint32_t)TSDB_MAX_BINARY_LEN); + tmfree(dataBuf); + return NULL; + } + + char* buf = (char*)calloc(stbInfo->tags[i].dataLen+1, 1); + if (NULL == buf) { + printf("calloc failed! size:%d\n", stbInfo->tags[i].dataLen); + tmfree(dataBuf); + return NULL; + } + rand_string(buf, stbInfo->tags[i].dataLen); + dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen, "\'%s\', ", buf); + tmfree(buf); + } else if (0 == strncasecmp(stbInfo->tags[i].dataType, "int", 3)) { + dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen, "%d, ", rand_int()); + } else if (0 == strncasecmp(stbInfo->tags[i].dataType, "bigint", 6)) { + dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen, "%"PRId64", ", rand_bigint()); + } else if (0 == strncasecmp(stbInfo->tags[i].dataType, "float", 5)) { + dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen, "%f, ", rand_float()); + } else if (0 == strncasecmp(stbInfo->tags[i].dataType, "double", 6)) { + dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen, "%f, ", rand_double()); + } else if (0 == strncasecmp(stbInfo->tags[i].dataType, "smallint", 8)) { + dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen, "%d, ", rand_smallint()); + } else if (0 == strncasecmp(stbInfo->tags[i].dataType, "tinyint", 7)) { + dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen, "%d, ", rand_tinyint()); + } else if (0 == strncasecmp(stbInfo->tags[i].dataType, "bool", 4)) { + dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen, "%d, ", rand_bool()); + } else if (0 == strncasecmp(stbInfo->tags[i].dataType, "timestamp", 4)) { + dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen, "%"PRId64", ", rand_bigint()); + } else { + printf("No support data type: %s\n", stbInfo->tags[i].dataType); + tmfree(dataBuf); + return NULL; + } + } + dataLen -= 2; + dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen, ")"); + return dataBuf; +} + +static int calcRowLen(SSuperTable* superTbls) { + int colIndex; + int lenOfOneRow = 0; + + for (colIndex = 0; colIndex < superTbls->columnCount; colIndex++) { + char* dataType = superTbls->columns[colIndex].dataType; + + if (strcasecmp(dataType, "BINARY") == 0) { + lenOfOneRow += superTbls->columns[colIndex].dataLen + 3; + } else if (strcasecmp(dataType, "NCHAR") == 0) { + lenOfOneRow += superTbls->columns[colIndex].dataLen + 3; + } else if (strcasecmp(dataType, "INT") == 0) { + lenOfOneRow += 11; + } else if (strcasecmp(dataType, "BIGINT") == 0) { + lenOfOneRow += 21; + } else if (strcasecmp(dataType, "SMALLINT") == 0) { + lenOfOneRow += 6; + } else if (strcasecmp(dataType, "TINYINT") == 0) { + lenOfOneRow += 4; + } else if (strcasecmp(dataType, "BOOL") == 0) { + lenOfOneRow += 6; + } else if (strcasecmp(dataType, "FLOAT") == 0) { + lenOfOneRow += 22; + } else if (strcasecmp(dataType, "DOUBLE") == 0) { + lenOfOneRow += 42; + } else if (strcasecmp(dataType, "TIMESTAMP") == 0) { + lenOfOneRow += 21; + } else { + printf("get error data type : %s\n", dataType); + exit(-1); + } + } + + superTbls->lenOfOneRow = lenOfOneRow + 20; // timestamp + + int tagIndex; + int lenOfTagOfOneRow = 0; + for (tagIndex = 0; tagIndex < superTbls->tagCount; tagIndex++) { + char* dataType = superTbls->tags[tagIndex].dataType; + + if (strcasecmp(dataType, "BINARY") == 0) { + lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + 3; + } else if (strcasecmp(dataType, "NCHAR") == 0) { + lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + 3; + } else if (strcasecmp(dataType, "INT") == 0) { + lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + 11; + } else if (strcasecmp(dataType, "BIGINT") == 0) { + lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + 21; + } else if (strcasecmp(dataType, "SMALLINT") == 0) { + lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + 6; + } else if (strcasecmp(dataType, "TINYINT") == 0) { + lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + 4; + } else if (strcasecmp(dataType, "BOOL") == 0) { + lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + 6; + } else if (strcasecmp(dataType, "FLOAT") == 0) { + lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + 22; + } else if (strcasecmp(dataType, "DOUBLE") == 0) { + lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + 42; + } else { + printf("get error tag type : %s\n", dataType); + exit(-1); + } + } + + superTbls->lenOfTagOfOneRow = lenOfTagOfOneRow; + + return 0; +} + + +static int getAllChildNameOfSuperTable(TAOS * taos, char* dbName, char* sTblName, char** childTblNameOfSuperTbl, int* childTblCountOfSuperTbl) { + char command[BUFFER_SIZE] = "\0"; + TAOS_RES * res; + TAOS_ROW row = NULL; + + char* childTblName = *childTblNameOfSuperTbl; + + //get all child table name use cmd: select tbname from superTblName; + snprintf(command, BUFFER_SIZE, "select tbname from %s.%s", dbName, sTblName); + res = taos_query(taos, command); + int32_t code = taos_errno(res); + if (code != 0) { + printf("failed to run command %s\n", command); + taos_free_result(res); + taos_close(taos); + exit(-1); + } + + int childTblCount = 10000; + int count = 0; + childTblName = (char*)calloc(1, childTblCount * TSDB_TABLE_NAME_LEN); + char* pTblName = childTblName; + while ((row = taos_fetch_row(res)) != NULL) { + int32_t* len = taos_fetch_lengths(res); + strncpy(pTblName, (char *)row[0], len[0]); + //printf("==== sub table name: %s\n", pTblName); + count++; + if (count >= childTblCount - 1) { + char *tmp = realloc(childTblName, (size_t)childTblCount*1.5*TSDB_TABLE_NAME_LEN+1); + if (tmp != NULL) { + childTblName = tmp; + childTblCount = (int)(childTblCount*1.5); + memset(childTblName + count*TSDB_TABLE_NAME_LEN, 0, (size_t)((childTblCount-count)*TSDB_TABLE_NAME_LEN)); + } else { + // exit, if allocate more memory failed + printf("realloc fail for save child table name of %s.%s\n", dbName, sTblName); + tmfree(childTblName); + taos_free_result(res); + taos_close(taos); + exit(-1); + } + } + pTblName = childTblName + count * TSDB_TABLE_NAME_LEN; + } + + *childTblCountOfSuperTbl = count; + *childTblNameOfSuperTbl = childTblName; + + taos_free_result(res); + return 0; +} + +static int getSuperTableFromServer(TAOS * taos, char* dbName, SSuperTable* superTbls) { + char command[BUFFER_SIZE] = "\0"; + TAOS_RES * res; + TAOS_ROW row = NULL; + int count = 0; + + //get schema use cmd: describe superTblName; + snprintf(command, BUFFER_SIZE, "describe %s.%s", dbName, superTbls->sTblName); + res = taos_query(taos, command); + int32_t code = taos_errno(res); + if (code != 0) { + printf("failed to run command %s\n", command); + taos_free_result(res); + return -1; + } + + int tagIndex = 0; + int columnIndex = 0; + TAOS_FIELD *fields = taos_fetch_fields(res); + while ((row = taos_fetch_row(res)) != NULL) { + if (0 == count) { + count++; + continue; + } + + if (strcmp((char *)row[TSDB_DESCRIBE_METRIC_NOTE_INDEX], "TAG") == 0) { + strncpy(superTbls->tags[tagIndex].field, (char *)row[TSDB_DESCRIBE_METRIC_FIELD_INDEX], fields[TSDB_DESCRIBE_METRIC_FIELD_INDEX].bytes); + strncpy(superTbls->tags[tagIndex].dataType, (char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX], fields[TSDB_DESCRIBE_METRIC_TYPE_INDEX].bytes); + superTbls->tags[tagIndex].dataLen = *((int *)row[TSDB_DESCRIBE_METRIC_LENGTH_INDEX]); + strncpy(superTbls->tags[tagIndex].note, (char *)row[TSDB_DESCRIBE_METRIC_NOTE_INDEX], fields[TSDB_DESCRIBE_METRIC_NOTE_INDEX].bytes); + tagIndex++; + } else { + strncpy(superTbls->columns[columnIndex].field, (char *)row[TSDB_DESCRIBE_METRIC_FIELD_INDEX], fields[TSDB_DESCRIBE_METRIC_FIELD_INDEX].bytes); + strncpy(superTbls->columns[columnIndex].dataType, (char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX], fields[TSDB_DESCRIBE_METRIC_TYPE_INDEX].bytes); + superTbls->columns[columnIndex].dataLen = *((int *)row[TSDB_DESCRIBE_METRIC_LENGTH_INDEX]); + strncpy(superTbls->columns[columnIndex].note, (char *)row[TSDB_DESCRIBE_METRIC_NOTE_INDEX], fields[TSDB_DESCRIBE_METRIC_NOTE_INDEX].bytes); + columnIndex++; + } + count++; + } + + superTbls->columnCount = columnIndex; + superTbls->tagCount = tagIndex; + taos_free_result(res); + + calcRowLen(superTbls); + + if (TBL_ALREADY_EXISTS == superTbls->childTblExists) { + //get all child table name use cmd: select tbname from superTblName; + getAllChildNameOfSuperTable(taos, dbName, superTbls->sTblName, &superTbls->childTblName, &superTbls->childTblCount); + } + return 0; +} + +static int createSuperTable(TAOS * taos, char* dbName, SSuperTable* superTbls, bool use_metric) { + char command[BUFFER_SIZE] = "\0"; + + char cols[STRING_LEN] = "\0"; + int colIndex; + int len = 0; + + int lenOfOneRow = 0; + for (colIndex = 0; colIndex < superTbls->columnCount; colIndex++) { + char* dataType = superTbls->columns[colIndex].dataType; + + if (strcasecmp(dataType, "BINARY") == 0) { + len += snprintf(cols + len, STRING_LEN - len, ", col%d %s(%d)", colIndex, "BINARY", superTbls->columns[colIndex].dataLen); + lenOfOneRow += superTbls->columns[colIndex].dataLen + 3; + } else if (strcasecmp(dataType, "NCHAR") == 0) { + len += snprintf(cols + len, STRING_LEN - len, ", col%d %s(%d)", colIndex, "NCHAR", superTbls->columns[colIndex].dataLen); + lenOfOneRow += superTbls->columns[colIndex].dataLen + 3; + } else if (strcasecmp(dataType, "INT") == 0) { + len += snprintf(cols + len, STRING_LEN - len, ", col%d %s", colIndex, "INT"); + lenOfOneRow += 11; + } else if (strcasecmp(dataType, "BIGINT") == 0) { + len += snprintf(cols + len, STRING_LEN - len, ", col%d %s", colIndex, "BIGINT"); + lenOfOneRow += 21; + } else if (strcasecmp(dataType, "SMALLINT") == 0) { + len += snprintf(cols + len, STRING_LEN - len, ", col%d %s", colIndex, "SMALLINT"); + lenOfOneRow += 6; + } else if (strcasecmp(dataType, "TINYINT") == 0) { + len += snprintf(cols + len, STRING_LEN - len, ", col%d %s", colIndex, "TINYINT"); + lenOfOneRow += 4; + } else if (strcasecmp(dataType, "BOOL") == 0) { + len += snprintf(cols + len, STRING_LEN - len, ", col%d %s", colIndex, "BOOL"); + lenOfOneRow += 6; + } else if (strcasecmp(dataType, "FLOAT") == 0) { + len += snprintf(cols + len, STRING_LEN - len, ", col%d %s", colIndex, "FLOAT"); + lenOfOneRow += 22; + } else if (strcasecmp(dataType, "DOUBLE") == 0) { + len += snprintf(cols + len, STRING_LEN - len, ", col%d %s", colIndex, "DOUBLE"); + lenOfOneRow += 42; + } else if (strcasecmp(dataType, "TIMESTAMP") == 0) { + len += snprintf(cols + len, STRING_LEN - len, ", col%d %s", colIndex, "TIMESTAMP"); + lenOfOneRow += 21; + } else { + taos_close(taos); + printf("config error data type : %s\n", dataType); + exit(-1); + } + } + + superTbls->lenOfOneRow = lenOfOneRow + 20; // timestamp + //printf("%s.%s column count:%d, column length:%d\n\n", g_Dbs.db[i].dbName, g_Dbs.db[i].superTbls[j].sTblName, g_Dbs.db[i].superTbls[j].columnCount, lenOfOneRow); + + // save for creating child table + superTbls->colsOfCreatChildTable = (char*)calloc(len+20, 1); + if (NULL == superTbls->colsOfCreatChildTable) { + printf("Failed when calloc, size:%d", len+1); + taos_close(taos); + exit(-1); + } + snprintf(superTbls->colsOfCreatChildTable, len+20, "(ts timestamp%s)", cols); + + if (use_metric) { + char tags[STRING_LEN] = "\0"; + int tagIndex; + len = 0; + + int lenOfTagOfOneRow = 0; + len += snprintf(tags + len, STRING_LEN - len, "("); + for (tagIndex = 0; tagIndex < superTbls->tagCount; tagIndex++) { + char* dataType = superTbls->tags[tagIndex].dataType; + + if (strcasecmp(dataType, "BINARY") == 0) { + len += snprintf(tags + len, STRING_LEN - len, "t%d %s(%d), ", tagIndex, "BINARY", superTbls->tags[tagIndex].dataLen); + lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + 3; + } else if (strcasecmp(dataType, "NCHAR") == 0) { + len += snprintf(tags + len, STRING_LEN - len, "t%d %s(%d), ", tagIndex, "NCHAR", superTbls->tags[tagIndex].dataLen); + lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + 3; + } else if (strcasecmp(dataType, "INT") == 0) { + len += snprintf(tags + len, STRING_LEN - len, "t%d %s, ", tagIndex, "INT"); + lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + 11; + } else if (strcasecmp(dataType, "BIGINT") == 0) { + len += snprintf(tags + len, STRING_LEN - len, "t%d %s, ", tagIndex, "BIGINT"); + lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + 21; + } else if (strcasecmp(dataType, "SMALLINT") == 0) { + len += snprintf(tags + len, STRING_LEN - len, "t%d %s, ", tagIndex, "SMALLINT"); + lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + 6; + } else if (strcasecmp(dataType, "TINYINT") == 0) { + len += snprintf(tags + len, STRING_LEN - len, "t%d %s, ", tagIndex, "TINYINT"); + lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + 4; + } else if (strcasecmp(dataType, "BOOL") == 0) { + len += snprintf(tags + len, STRING_LEN - len, "t%d %s, ", tagIndex, "BOOL"); + lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + 6; + } else if (strcasecmp(dataType, "FLOAT") == 0) { + len += snprintf(tags + len, STRING_LEN - len, "t%d %s, ", tagIndex, "FLOAT"); + lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + 22; + } else if (strcasecmp(dataType, "DOUBLE") == 0) { + len += snprintf(tags + len, STRING_LEN - len, "t%d %s, ", tagIndex, "DOUBLE"); + lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + 42; + } else { + taos_close(taos); + printf("config error tag type : %s\n", dataType); + exit(-1); + } + } + len -= 2; + len += snprintf(tags + len, STRING_LEN - len, ")"); + + superTbls->lenOfTagOfOneRow = lenOfTagOfOneRow; + + snprintf(command, BUFFER_SIZE, "create table if not exists %s.%s (ts timestamp%s) tags %s", dbName, superTbls->sTblName, cols, tags); + if (0 != queryDbExec(taos, command, NO_INSERT_TYPE)) { + return -1; + } + printf("\ncreate supertable %s success!\n\n", superTbls->sTblName); + } + return 0; +} + + +static int createDatabases() { + TAOS * taos = NULL; + int ret = 0; + taos = taos_connect(g_Dbs.host, g_Dbs.user, g_Dbs.password, NULL, g_Dbs.port); + if (taos == NULL) { + fprintf(stderr, "Failed to connect to TDengine, reason:%s\n", taos_errstr(NULL)); + exit(-1); + } + char command[BUFFER_SIZE] = "\0"; + + + for (int i = 0; i < g_Dbs.dbCount; i++) { + if (g_Dbs.db[i].drop) { + sprintf(command, "drop database if exists %s;", g_Dbs.db[i].dbName); + if (0 != queryDbExec(taos, command, NO_INSERT_TYPE)) { + taos_close(taos); + return -1; + } + } + + int dataLen = 0; + dataLen += snprintf(command + dataLen, BUFFER_SIZE - dataLen, "create database if not exists %s ", g_Dbs.db[i].dbName); + + if (g_Dbs.db[i].dbCfg.blocks > 0) { + dataLen += snprintf(command + dataLen, BUFFER_SIZE - dataLen, "blocks %d ", g_Dbs.db[i].dbCfg.blocks); + } + if (g_Dbs.db[i].dbCfg.cache > 0) { + dataLen += snprintf(command + dataLen, BUFFER_SIZE - dataLen, "cache %d ", g_Dbs.db[i].dbCfg.cache); + } + if (g_Dbs.db[i].dbCfg.days > 0) { + dataLen += snprintf(command + dataLen, BUFFER_SIZE - dataLen, "days %d ", g_Dbs.db[i].dbCfg.days); + } + if (g_Dbs.db[i].dbCfg.keep > 0) { + dataLen += snprintf(command + dataLen, BUFFER_SIZE - dataLen, "keep %d ", g_Dbs.db[i].dbCfg.keep); + } + if (g_Dbs.db[i].dbCfg.replica > 0) { + dataLen += snprintf(command + dataLen, BUFFER_SIZE - dataLen, "replica %d ", g_Dbs.db[i].dbCfg.replica); + } + if (g_Dbs.db[i].dbCfg.update > 0) { + dataLen += snprintf(command + dataLen, BUFFER_SIZE - dataLen, "update %d ", g_Dbs.db[i].dbCfg.update); + } + //if (g_Dbs.db[i].dbCfg.maxtablesPerVnode > 0) { + // dataLen += snprintf(command + dataLen, BUFFER_SIZE - dataLen, "tables %d ", g_Dbs.db[i].dbCfg.maxtablesPerVnode); + //} + if (g_Dbs.db[i].dbCfg.minRows > 0) { + dataLen += snprintf(command + dataLen, BUFFER_SIZE - dataLen, "minrows %d ", g_Dbs.db[i].dbCfg.minRows); + } + if (g_Dbs.db[i].dbCfg.maxRows > 0) { + dataLen += snprintf(command + dataLen, BUFFER_SIZE - dataLen, "maxrows %d ", g_Dbs.db[i].dbCfg.maxRows); + } + if (g_Dbs.db[i].dbCfg.comp > 0) { + dataLen += snprintf(command + dataLen, BUFFER_SIZE - dataLen, "comp %d ", g_Dbs.db[i].dbCfg.comp); + } + if (g_Dbs.db[i].dbCfg.walLevel > 0) { + dataLen += snprintf(command + dataLen, BUFFER_SIZE - dataLen, "wal %d ", g_Dbs.db[i].dbCfg.walLevel); + } + if (g_Dbs.db[i].dbCfg.fsync > 0) { + dataLen += snprintf(command + dataLen, BUFFER_SIZE - dataLen, "fsync %d ", g_Dbs.db[i].dbCfg.fsync); + } + if ((0 == strncasecmp(g_Dbs.db[i].dbCfg.precision, "ms", 2)) || (0 == strncasecmp(g_Dbs.db[i].dbCfg.precision, "us", 2))) { + dataLen += snprintf(command + dataLen, BUFFER_SIZE - dataLen, "precision \'%s\';", g_Dbs.db[i].dbCfg.precision); + } + + if (0 != queryDbExec(taos, command, NO_INSERT_TYPE)) { + taos_close(taos); + return -1; + } + printf("\ncreate database %s success!\n\n", g_Dbs.db[i].dbName); + + for (int j = 0; j < g_Dbs.db[i].superTblCount; j++) { + // describe super table, if exists + sprintf(command, "describe %s.%s;", g_Dbs.db[i].dbName, g_Dbs.db[i].superTbls[j].sTblName); + if (0 != queryDbExec(taos, command, NO_INSERT_TYPE)) { + g_Dbs.db[i].superTbls[j].superTblExists = TBL_NO_EXISTS; + ret = createSuperTable(taos, g_Dbs.db[i].dbName, &g_Dbs.db[i].superTbls[j], g_Dbs.use_metric); + } else { + g_Dbs.db[i].superTbls[j].superTblExists = TBL_ALREADY_EXISTS; + ret = getSuperTableFromServer(taos, g_Dbs.db[i].dbName, &g_Dbs.db[i].superTbls[j]); + } + + if (0 != ret) { + taos_close(taos); + return -1; + } + } + } + + taos_close(taos); + return 0; +} + + +void * createTable(void *sarg) +{ + threadInfo *winfo = (threadInfo *)sarg; + SSuperTable* superTblInfo = winfo->superTblInfo; + + int64_t lastPrintTime = taosGetTimestampMs(); + + char* buffer = calloc(superTblInfo->maxSqlLen, 1); + + int len = 0; + int batchNum = 0; + //printf("Creating table from %d to %d\n", winfo->start_table_id, winfo->end_table_id); + for (int i = winfo->start_table_id; i <= winfo->end_table_id; i++) { + if (0 == g_Dbs.use_metric) { + snprintf(buffer, BUFFER_SIZE, "create table if not exists %s.%s%d %s;", winfo->db_name, superTblInfo->childTblPrefix, i, superTblInfo->colsOfCreatChildTable); + } else { + if (0 == len) { + batchNum = 0; + memset(buffer, 0, superTblInfo->maxSqlLen); + len += snprintf(buffer + len, superTblInfo->maxSqlLen - len, "create table "); + } + + char* tagsValBuf = NULL; + if (0 == superTblInfo->tagSource) { + tagsValBuf = generateTagVaulesForStb(superTblInfo); + } else { + tagsValBuf = getTagValueFromTagSample(superTblInfo, i % superTblInfo->tagSampleCount); + } + if (NULL == tagsValBuf) { + free(buffer); + return NULL; + } + + len += snprintf(buffer + len, superTblInfo->maxSqlLen - len, "if not exists %s.%s%d using %s.%s tags %s ", winfo->db_name, superTblInfo->childTblPrefix, i, winfo->db_name, superTblInfo->sTblName, tagsValBuf); + free(tagsValBuf); + batchNum++; + + if ((batchNum < superTblInfo->batchCreateTableNum) && ((superTblInfo->maxSqlLen - len) >= (superTblInfo->lenOfTagOfOneRow + 256))) { + continue; + } + } + + len = 0; + if (0 != queryDbExec(winfo->taos, buffer, NO_INSERT_TYPE)){ + free(buffer); + return NULL; + } + + int64_t currentPrintTime = taosGetTimestampMs(); + if (currentPrintTime - lastPrintTime > 30*1000) { + printf("thread[%d] already create %d - %d tables\n", winfo->threadID, winfo->start_table_id, i); + lastPrintTime = currentPrintTime; + } + } + + if (0 != len) { + (void)queryDbExec(winfo->taos, buffer, NO_INSERT_TYPE); + } + + free(buffer); + return NULL; +} + +void startMultiThreadCreateChildTable(char* cols, int threads, int ntables, char* db_name, SSuperTable* superTblInfo) { + pthread_t *pids = malloc(threads * sizeof(pthread_t)); + threadInfo *infos = malloc(threads * sizeof(threadInfo)); + + if ((NULL == pids) || (NULL == infos)) { + printf("malloc failed\n"); + exit(-1); + } + + if (threads < 1) { + threads = 1; + } + + int a = ntables / threads; + if (a < 1) { + threads = ntables; + a = 1; + } + + int b = 0; + b = ntables % threads; + + int last = 0; + for (int i = 0; i < threads; i++) { + threadInfo *t_info = infos + i; + t_info->threadID = i; + tstrncpy(t_info->db_name, db_name, MAX_DB_NAME_SIZE); + t_info->superTblInfo = superTblInfo; + t_info->taos = taos_connect(g_Dbs.host, g_Dbs.user, g_Dbs.password, db_name, g_Dbs.port); + t_info->start_table_id = last; + t_info->end_table_id = i < b ? last + a : last + a - 1; + last = t_info->end_table_id + 1; + t_info->use_metric = 1; + t_info->cols = cols; + t_info->minDelay = INT16_MAX; + pthread_create(pids + i, NULL, createTable, t_info); + } + + for (int i = 0; i < threads; i++) { + pthread_join(pids[i], NULL); + } + + for (int i = 0; i < threads; i++) { + threadInfo *t_info = infos + i; + taos_close(t_info->taos); + } + + free(pids); + free(infos); +} + + +static void createChildTables() { + for (int i = 0; i < g_Dbs.dbCount; i++) { + for (int j = 0; j < g_Dbs.db[i].superTblCount; j++) { + if ((AUTO_CREATE_SUBTBL == g_Dbs.db[i].superTbls[j].autoCreateTable) || (TBL_ALREADY_EXISTS == g_Dbs.db[i].superTbls[j].childTblExists)) { + continue; + } + startMultiThreadCreateChildTable(g_Dbs.db[i].superTbls[j].colsOfCreatChildTable, g_Dbs.threadCountByCreateTbl, g_Dbs.db[i].superTbls[j].childTblCount, g_Dbs.db[i].dbName, &(g_Dbs.db[i].superTbls[j])); + g_totalChildTables += g_Dbs.db[i].superTbls[j].childTblCount; + } + } +} + +/* +static int taosGetLineNum(const char *fileName) +{ + int lineNum = 0; + char cmd[1024] = { 0 }; + char buf[1024] = { 0 }; + sprintf(cmd, "wc -l %s", fileName); + + FILE *fp = popen(cmd, "r"); + if (fp == NULL) { + fprintf(stderr, "ERROR: failed to execute:%s, error:%s\n", cmd, strerror(errno)); + return lineNum; + } + + if (fgets(buf, sizeof(buf), fp)) { + int index = strchr((const char*)buf, ' ') - buf; + buf[index] = '\0'; + lineNum = atoi(buf); + } + pclose(fp); + return lineNum; +} +*/ + +/* + Read 10000 lines at most. If more than 10000 lines, continue to read after using +*/ +int readTagFromCsvFileToMem(SSuperTable * superTblInfo) { + size_t n = 0; + ssize_t readLen = 0; + char * line = NULL; + + FILE *fp = fopen(superTblInfo->tagsFile, "r"); + if (fp == NULL) { + printf("Failed to open tags file: %s, reason:%s\n", superTblInfo->tagsFile, strerror(errno)); + return -1; + } + + if (superTblInfo->tagDataBuf) { + free(superTblInfo->tagDataBuf); + superTblInfo->tagDataBuf = NULL; + } + + int tagCount = 10000; + int count = 0; + char* tagDataBuf = calloc(1, superTblInfo->lenOfTagOfOneRow * tagCount); + if (tagDataBuf == NULL) { + printf("Failed to calloc, reason:%s\n", strerror(errno)); + fclose(fp); + return -1; + } + + while ((readLen = tgetline(&line, &n, fp)) != -1) { + if (('\r' == line[readLen - 1]) || ('\n' == line[readLen - 1])) { + line[--readLen] = 0; + } + + if (readLen == 0) { + continue; + } + + memcpy(tagDataBuf + count * superTblInfo->lenOfTagOfOneRow, line, readLen); + count++; + + if (count >= tagCount - 1) { + char *tmp = realloc(tagDataBuf, (size_t)tagCount*1.5*superTblInfo->lenOfTagOfOneRow); + if (tmp != NULL) { + tagDataBuf = tmp; + tagCount = (int)(tagCount*1.5); + memset(tagDataBuf + count*superTblInfo->lenOfTagOfOneRow, 0, (size_t)((tagCount-count)*superTblInfo->lenOfTagOfOneRow)); + } else { + // exit, if allocate more memory failed + printf("realloc fail for save tag val from %s\n", superTblInfo->tagsFile); + tmfree(tagDataBuf); + free(line); + fclose(fp); + return -1; + } + } + } + + superTblInfo->tagDataBuf = tagDataBuf; + superTblInfo->tagSampleCount = count; + + free(line); + fclose(fp); + return 0; +} + +int readSampleFromJsonFileToMem(SSuperTable * superTblInfo) { + // TODO + return 0; +} + + +/* + Read 10000 lines at most. If more than 10000 lines, continue to read after using +*/ +int readSampleFromCsvFileToMem(FILE *fp, SSuperTable* superTblInfo, char* sampleBuf) { + size_t n = 0; + ssize_t readLen = 0; + char * line = NULL; + int getRows = 0; + + memset(sampleBuf, 0, MAX_SAMPLES_ONCE_FROM_FILE* superTblInfo->lenOfOneRow); + while (1) { + readLen = tgetline(&line, &n, fp); + if (-1 == readLen) { + if(0 != fseek(fp, 0, SEEK_SET)) { + printf("Failed to fseek file: %s, reason:%s\n", superTblInfo->sampleFile, strerror(errno)); + return -1; + } + continue; + } + + if (('\r' == line[readLen - 1]) || ('\n' == line[readLen - 1])) { + line[--readLen] = 0; + } + + if (readLen == 0) { + continue; + } + + if (readLen > superTblInfo->lenOfOneRow) { + printf("sample row len[%d] overflow define schema len[%d], so discard this row\n", (int32_t)readLen, superTblInfo->lenOfOneRow); + continue; + } + + memcpy(sampleBuf + getRows * superTblInfo->lenOfOneRow, line, readLen); + getRows++; + + if (getRows == MAX_SAMPLES_ONCE_FROM_FILE) { + break; + } + } + + tmfree(line); + return 0; +} + +/* +void readSampleFromFileToMem(SSuperTable * supterTblInfo) { + int ret; + if (0 == strncasecmp(supterTblInfo->sampleFormat, "csv", 3)) { + ret = readSampleFromCsvFileToMem(supterTblInfo); + } else if (0 == strncasecmp(supterTblInfo->sampleFormat, "json", 4)) { + ret = readSampleFromJsonFileToMem(supterTblInfo); + } + + if (0 != ret) { + exit(-1); + } +} +*/ +static bool getColumnAndTagTypeFromInsertJsonFile(cJSON* stbInfo, SSuperTable* superTbls) { + bool ret = false; + + // columns + cJSON *columns = cJSON_GetObjectItem(stbInfo, "columns"); + if (columns && columns->type != cJSON_Array) { + printf("failed to read json, columns not found\n"); + goto PARSE_OVER; + } else if (NULL == columns) { + superTbls->columnCount = 0; + superTbls->tagCount = 0; + return true; + } + + int columnSize = cJSON_GetArraySize(columns); + if (columnSize > MAX_COLUMN_COUNT) { + printf("failed to read json, column size overflow, max column size is %d\n", MAX_COLUMN_COUNT); + goto PARSE_OVER; + } + + int count = 1; + int index = 0; + StrColumn columnCase; - time_t tTime = time(NULL); - struct tm tm = *localtime(&tTime); - printf("###################################################################\n"); - printf("# Server IP: %s:%hu\n", ip_addr == NULL ? "localhost" : ip_addr, port); - printf("# User: %s\n", user); - printf("# Password: %s\n", pass); - printf("# Use metric: %s\n", use_metric ? "true" : "false"); - printf("# Datatype of Columns: %s\n", dataString); - printf("# Binary Length(If applicable): %d\n", - (strncasecmp(dataString, "BINARY", 6) == 0 || strncasecmp(dataString, "NCHAR", 5) == 0) ? len_of_binary : -1); - printf("# Number of Columns per record: %d\n", ncols_per_record); - printf("# Number of Threads: %d\n", threads); - printf("# Number of Tables: %d\n", ntables); - printf("# Number of Data per Table: %d\n", nrecords_per_table); - printf("# Records/Request: %d\n", nrecords_per_request); - printf("# Database name: %s\n", db_name); - printf("# Table prefix: %s\n", tb_prefix); - if (order == 1) - { - printf("# Data order: %d\n", order); - printf("# Data out of order rate: %d\n", rate); - - } - printf("# Delete method: %d\n", method_of_delete); - printf("# Test time: %d-%02d-%02d %02d:%02d:%02d\n", tm.tm_year + 1900, tm.tm_mon + 1, - tm.tm_mday, tm.tm_hour, tm.tm_min, tm.tm_sec); - - if (!answer_yes) { - printf("###################################################################\n\n"); - printf("Press enter key to continue"); - (void)getchar(); + //superTbls->columnCount = columnSize; + for (int k = 0; k < columnSize; ++k) { + cJSON* column = cJSON_GetArrayItem(columns, k); + if (column == NULL) continue; + + count = 1; + cJSON* countObj = cJSON_GetObjectItem(column, "count"); + if (countObj && countObj->type == cJSON_Number) { + count = countObj->valueint; + } else if (countObj && countObj->type != cJSON_Number) { + printf("failed to read json, column count not found"); + goto PARSE_OVER; + } else { + count = 1; + } + + // column info + memset(&columnCase, 0, sizeof(StrColumn)); + cJSON *dataType = cJSON_GetObjectItem(column, "type"); + if (!dataType || dataType->type != cJSON_String || dataType->valuestring == NULL) { + printf("failed to read json, column type not found"); + goto PARSE_OVER; + } + //strncpy(superTbls->columns[k].dataType, dataType->valuestring, MAX_TB_NAME_SIZE); + strncpy(columnCase.dataType, dataType->valuestring, MAX_TB_NAME_SIZE); + + cJSON* dataLen = cJSON_GetObjectItem(column, "len"); + if (dataLen && dataLen->type == cJSON_Number) { + columnCase.dataLen = dataLen->valueint; + } else if (dataLen && dataLen->type != cJSON_Number) { + printf("failed to read json, column len not found"); + goto PARSE_OVER; + } else { + columnCase.dataLen = 8; + } + + for (int n = 0; n < count; ++n) { + strncpy(superTbls->columns[index].dataType, columnCase.dataType, MAX_TB_NAME_SIZE); + superTbls->columns[index].dataLen = columnCase.dataLen; + index++; + } + } + superTbls->columnCount = index; + + count = 1; + index = 0; + // tags + cJSON *tags = cJSON_GetObjectItem(stbInfo, "tags"); + if (!tags || tags->type != cJSON_Array) { + printf("failed to read json, tags not found"); + goto PARSE_OVER; + } + + int tagSize = cJSON_GetArraySize(tags); + if (tagSize > MAX_TAG_COUNT) { + printf("failed to read json, tags size overflow, max tag size is %d\n", MAX_TAG_COUNT); + goto PARSE_OVER; + } + + //superTbls->tagCount = tagSize; + for (int k = 0; k < tagSize; ++k) { + cJSON* tag = cJSON_GetArrayItem(tags, k); + if (tag == NULL) continue; + + count = 1; + cJSON* countObj = cJSON_GetObjectItem(tag, "count"); + if (countObj && countObj->type == cJSON_Number) { + count = countObj->valueint; + } else if (countObj && countObj->type != cJSON_Number) { + printf("failed to read json, column count not found"); + goto PARSE_OVER; + } else { + count = 1; + } + + // column info + memset(&columnCase, 0, sizeof(StrColumn)); + cJSON *dataType = cJSON_GetObjectItem(tag, "type"); + if (!dataType || dataType->type != cJSON_String || dataType->valuestring == NULL) { + printf("failed to read json, tag type not found"); + goto PARSE_OVER; + } + strncpy(columnCase.dataType, dataType->valuestring, MAX_TB_NAME_SIZE); + + cJSON* dataLen = cJSON_GetObjectItem(tag, "len"); + if (dataLen && dataLen->type == cJSON_Number) { + columnCase.dataLen = dataLen->valueint; + } else if (dataLen && dataLen->type != cJSON_Number) { + printf("failed to read json, column len not found"); + goto PARSE_OVER; + } else { + columnCase.dataLen = 0; + } + + for (int n = 0; n < count; ++n) { + strncpy(superTbls->tags[index].dataType, columnCase.dataType, MAX_TB_NAME_SIZE); + superTbls->tags[index].dataLen = columnCase.dataLen; + index++; + } + } + superTbls->tagCount = index; + + ret = true; + +PARSE_OVER: + //free(content); + //cJSON_Delete(root); + //fclose(fp); + return ret; +} + +static bool getMetaFromInsertJsonFile(cJSON* root) { + bool ret = false; + + cJSON* cfgdir = cJSON_GetObjectItem(root, "cfgdir"); + if (cfgdir && cfgdir->type == cJSON_String && cfgdir->valuestring != NULL) { + strncpy(g_Dbs.cfgDir, cfgdir->valuestring, MAX_FILE_NAME_LEN); + } + + cJSON* host = cJSON_GetObjectItem(root, "host"); + if (host && host->type == cJSON_String && host->valuestring != NULL) { + strncpy(g_Dbs.host, host->valuestring, MAX_DB_NAME_SIZE); + } else if (!host) { + strncpy(g_Dbs.host, "127.0.0.1", MAX_DB_NAME_SIZE); + } else { + printf("failed to read json, host not found\n"); + goto PARSE_OVER; + } + + cJSON* port = cJSON_GetObjectItem(root, "port"); + if (port && port->type == cJSON_Number) { + g_Dbs.port = port->valueint; + } else if (!port) { + g_Dbs.port = 6030; + } + + cJSON* user = cJSON_GetObjectItem(root, "user"); + if (user && user->type == cJSON_String && user->valuestring != NULL) { + strncpy(g_Dbs.user, user->valuestring, MAX_DB_NAME_SIZE); + } else if (!user) { + strncpy(g_Dbs.user, "root", MAX_DB_NAME_SIZE); + } + + cJSON* password = cJSON_GetObjectItem(root, "password"); + if (password && password->type == cJSON_String && password->valuestring != NULL) { + strncpy(g_Dbs.password, password->valuestring, MAX_DB_NAME_SIZE); + } else if (!password) { + strncpy(g_Dbs.password, "taosdata", MAX_DB_NAME_SIZE); + } + + cJSON* resultfile = cJSON_GetObjectItem(root, "result_file"); + if (resultfile && resultfile->type == cJSON_String && resultfile->valuestring != NULL) { + strncpy(g_Dbs.resultFile, resultfile->valuestring, MAX_FILE_NAME_LEN); + } else if (!resultfile) { + strncpy(g_Dbs.resultFile, "./insert_res.txt", MAX_FILE_NAME_LEN); + } + + cJSON* threads = cJSON_GetObjectItem(root, "thread_count"); + if (threads && threads->type == cJSON_Number) { + g_Dbs.threadCount = threads->valueint; + } else if (!threads) { + g_Dbs.threadCount = 1; + } else { + printf("failed to read json, threads not found"); + goto PARSE_OVER; + } + + cJSON* threads2 = cJSON_GetObjectItem(root, "thread_count_create_tbl"); + if (threads2 && threads2->type == cJSON_Number) { + g_Dbs.threadCountByCreateTbl = threads2->valueint; + } else if (!threads2) { + g_Dbs.threadCountByCreateTbl = 1; + } else { + printf("failed to read json, threads2 not found"); + goto PARSE_OVER; + } + + cJSON *answerPrompt = cJSON_GetObjectItem(root, "confirm_parameter_prompt"); // yes, no, + if (answerPrompt && answerPrompt->type == cJSON_String && answerPrompt->valuestring != NULL) { + if (0 == strncasecmp(answerPrompt->valuestring, "yes", 3)) { + g_args.answer_yes = false; + } else if (0 == strncasecmp(answerPrompt->valuestring, "no", 2)) { + g_args.answer_yes = true; + } else { + g_args.answer_yes = false; + } + } else if (!answerPrompt) { + g_args.answer_yes = false; + } else { + printf("failed to read json, confirm_parameter_prompt not found"); + goto PARSE_OVER; + } + + cJSON* dbs = cJSON_GetObjectItem(root, "databases"); + if (!dbs || dbs->type != cJSON_Array) { + printf("failed to read json, databases not found\n"); + goto PARSE_OVER; + } + + int dbSize = cJSON_GetArraySize(dbs); + if (dbSize > MAX_DB_COUNT) { + printf("failed to read json, databases size overflow, max database is %d\n", MAX_DB_COUNT); + goto PARSE_OVER; + } + + g_Dbs.dbCount = dbSize; + for (int i = 0; i < dbSize; ++i) { + cJSON* dbinfos = cJSON_GetArrayItem(dbs, i); + if (dbinfos == NULL) continue; + + // dbinfo + cJSON *dbinfo = cJSON_GetObjectItem(dbinfos, "dbinfo"); + if (!dbinfo || dbinfo->type != cJSON_Object) { + printf("failed to read json, dbinfo not found"); + goto PARSE_OVER; + } + + cJSON *dbName = cJSON_GetObjectItem(dbinfo, "name"); + if (!dbName || dbName->type != cJSON_String || dbName->valuestring == NULL) { + printf("failed to read json, db name not found"); + goto PARSE_OVER; + } + strncpy(g_Dbs.db[i].dbName, dbName->valuestring, MAX_DB_NAME_SIZE); + + cJSON *drop = cJSON_GetObjectItem(dbinfo, "drop"); + if (drop && drop->type == cJSON_String && drop->valuestring != NULL) { + if (0 == strncasecmp(drop->valuestring, "yes", 3)) { + g_Dbs.db[i].drop = 1; + } else { + g_Dbs.db[i].drop = 0; + } + } else if (!drop) { + g_Dbs.db[i].drop = 0; + } else { + printf("failed to read json, drop not found"); + goto PARSE_OVER; + } + + cJSON *precision = cJSON_GetObjectItem(dbinfo, "precision"); + if (precision && precision->type == cJSON_String && precision->valuestring != NULL) { + strncpy(g_Dbs.db[i].dbCfg.precision, precision->valuestring, MAX_DB_NAME_SIZE); + } else if (!precision) { + //strncpy(g_Dbs.db[i].dbCfg.precision, "ms", MAX_DB_NAME_SIZE); + memset(g_Dbs.db[i].dbCfg.precision, 0, MAX_DB_NAME_SIZE); + } else { + printf("failed to read json, precision not found"); + goto PARSE_OVER; + } + + cJSON* update = cJSON_GetObjectItem(dbinfo, "update"); + if (update && update->type == cJSON_Number) { + g_Dbs.db[i].dbCfg.update = update->valueint; + } else if (!update) { + g_Dbs.db[i].dbCfg.update = -1; + } else { + printf("failed to read json, update not found"); + goto PARSE_OVER; + } + + cJSON* replica = cJSON_GetObjectItem(dbinfo, "replica"); + if (replica && replica->type == cJSON_Number) { + g_Dbs.db[i].dbCfg.replica = replica->valueint; + } else if (!replica) { + g_Dbs.db[i].dbCfg.replica = -1; + } else { + printf("failed to read json, replica not found"); + goto PARSE_OVER; + } + + cJSON* keep = cJSON_GetObjectItem(dbinfo, "keep"); + if (keep && keep->type == cJSON_Number) { + g_Dbs.db[i].dbCfg.keep = keep->valueint; + } else if (!keep) { + g_Dbs.db[i].dbCfg.keep = -1; + } else { + printf("failed to read json, keep not found"); + goto PARSE_OVER; + } + + cJSON* days = cJSON_GetObjectItem(dbinfo, "days"); + if (days && days->type == cJSON_Number) { + g_Dbs.db[i].dbCfg.days = days->valueint; + } else if (!days) { + g_Dbs.db[i].dbCfg.days = -1; + } else { + printf("failed to read json, days not found"); + goto PARSE_OVER; + } + + cJSON* cache = cJSON_GetObjectItem(dbinfo, "cache"); + if (cache && cache->type == cJSON_Number) { + g_Dbs.db[i].dbCfg.cache = cache->valueint; + } else if (!cache) { + g_Dbs.db[i].dbCfg.cache = -1; + } else { + printf("failed to read json, cache not found"); + goto PARSE_OVER; + } + + cJSON* blocks= cJSON_GetObjectItem(dbinfo, "blocks"); + if (blocks && blocks->type == cJSON_Number) { + g_Dbs.db[i].dbCfg.blocks = blocks->valueint; + } else if (!blocks) { + g_Dbs.db[i].dbCfg.blocks = -1; + } else { + printf("failed to read json, block not found"); + goto PARSE_OVER; + } + + //cJSON* maxtablesPerVnode= cJSON_GetObjectItem(dbinfo, "maxtablesPerVnode"); + //if (maxtablesPerVnode && maxtablesPerVnode->type == cJSON_Number) { + // g_Dbs.db[i].dbCfg.maxtablesPerVnode = maxtablesPerVnode->valueint; + //} else if (!maxtablesPerVnode) { + // g_Dbs.db[i].dbCfg.maxtablesPerVnode = TSDB_DEFAULT_TABLES; + //} else { + // printf("failed to read json, maxtablesPerVnode not found"); + // goto PARSE_OVER; + //} + + cJSON* minRows= cJSON_GetObjectItem(dbinfo, "minRows"); + if (minRows && minRows->type == cJSON_Number) { + g_Dbs.db[i].dbCfg.minRows = minRows->valueint; + } else if (!minRows) { + g_Dbs.db[i].dbCfg.minRows = -1; + } else { + printf("failed to read json, minRows not found"); + goto PARSE_OVER; + } + + cJSON* maxRows= cJSON_GetObjectItem(dbinfo, "maxRows"); + if (maxRows && maxRows->type == cJSON_Number) { + g_Dbs.db[i].dbCfg.maxRows = maxRows->valueint; + } else if (!maxRows) { + g_Dbs.db[i].dbCfg.maxRows = -1; + } else { + printf("failed to read json, maxRows not found"); + goto PARSE_OVER; + } + + cJSON* comp= cJSON_GetObjectItem(dbinfo, "comp"); + if (comp && comp->type == cJSON_Number) { + g_Dbs.db[i].dbCfg.comp = comp->valueint; + } else if (!comp) { + g_Dbs.db[i].dbCfg.comp = -1; + } else { + printf("failed to read json, comp not found"); + goto PARSE_OVER; + } + + cJSON* walLevel= cJSON_GetObjectItem(dbinfo, "walLevel"); + if (walLevel && walLevel->type == cJSON_Number) { + g_Dbs.db[i].dbCfg.walLevel = walLevel->valueint; + } else if (!walLevel) { + g_Dbs.db[i].dbCfg.walLevel = -1; + } else { + printf("failed to read json, walLevel not found"); + goto PARSE_OVER; + } + + cJSON* quorum= cJSON_GetObjectItem(dbinfo, "quorum"); + if (quorum && quorum->type == cJSON_Number) { + g_Dbs.db[i].dbCfg.quorum = quorum->valueint; + } else if (!quorum) { + g_Dbs.db[i].dbCfg.quorum = -1; + } else { + printf("failed to read json, walLevel not found"); + goto PARSE_OVER; + } + + cJSON* fsync= cJSON_GetObjectItem(dbinfo, "fsync"); + if (fsync && fsync->type == cJSON_Number) { + g_Dbs.db[i].dbCfg.fsync = fsync->valueint; + } else if (!fsync) { + g_Dbs.db[i].dbCfg.fsync = -1; + } else { + printf("failed to read json, fsync not found"); + goto PARSE_OVER; + } + + // super_talbes + cJSON *stables = cJSON_GetObjectItem(dbinfos, "super_tables"); + if (!stables || stables->type != cJSON_Array) { + printf("failed to read json, super_tables not found"); + goto PARSE_OVER; + } + + int stbSize = cJSON_GetArraySize(stables); + if (stbSize > MAX_SUPER_TABLE_COUNT) { + printf("failed to read json, databases size overflow, max database is %d\n", MAX_SUPER_TABLE_COUNT); + goto PARSE_OVER; + } + + g_Dbs.db[i].superTblCount = stbSize; + for (int j = 0; j < stbSize; ++j) { + cJSON* stbInfo = cJSON_GetArrayItem(stables, j); + if (stbInfo == NULL) continue; + + // dbinfo + cJSON *stbName = cJSON_GetObjectItem(stbInfo, "name"); + if (!stbName || stbName->type != cJSON_String || stbName->valuestring == NULL) { + printf("failed to read json, stb name not found"); + goto PARSE_OVER; + } + strncpy(g_Dbs.db[i].superTbls[j].sTblName, stbName->valuestring, MAX_TB_NAME_SIZE); + + cJSON *prefix = cJSON_GetObjectItem(stbInfo, "childtable_prefix"); + if (!prefix || prefix->type != cJSON_String || prefix->valuestring == NULL) { + printf("failed to read json, childtable_prefix not found"); + goto PARSE_OVER; + } + strncpy(g_Dbs.db[i].superTbls[j].childTblPrefix, prefix->valuestring, MAX_DB_NAME_SIZE); + + cJSON *autoCreateTbl = cJSON_GetObjectItem(stbInfo, "auto_create_table"); // yes, no, null + if (autoCreateTbl && autoCreateTbl->type == cJSON_String && autoCreateTbl->valuestring != NULL) { + if (0 == strncasecmp(autoCreateTbl->valuestring, "yes", 3)) { + g_Dbs.db[i].superTbls[j].autoCreateTable = AUTO_CREATE_SUBTBL; + } else if (0 == strncasecmp(autoCreateTbl->valuestring, "no", 2)) { + g_Dbs.db[i].superTbls[j].autoCreateTable = PRE_CREATE_SUBTBL; + } else { + g_Dbs.db[i].superTbls[j].autoCreateTable = PRE_CREATE_SUBTBL; + } + } else if (!autoCreateTbl) { + g_Dbs.db[i].superTbls[j].autoCreateTable = PRE_CREATE_SUBTBL; + } else { + printf("failed to read json, auto_create_table not found"); + goto PARSE_OVER; + } + + cJSON* batchCreateTbl = cJSON_GetObjectItem(stbInfo, "batch_create_tbl_num"); + if (batchCreateTbl && batchCreateTbl->type == cJSON_Number) { + g_Dbs.db[i].superTbls[j].batchCreateTableNum = batchCreateTbl->valueint; + } else if (!batchCreateTbl) { + g_Dbs.db[i].superTbls[j].batchCreateTableNum = 2000; + } else { + printf("failed to read json, batch_create_tbl_num not found"); + goto PARSE_OVER; + } + + cJSON *childTblExists = cJSON_GetObjectItem(stbInfo, "child_table_exists"); // yes, no + if (childTblExists && childTblExists->type == cJSON_String && childTblExists->valuestring != NULL) { + if (0 == strncasecmp(childTblExists->valuestring, "yes", 3)) { + g_Dbs.db[i].superTbls[j].childTblExists = TBL_ALREADY_EXISTS; + } else if (0 == strncasecmp(childTblExists->valuestring, "no", 2)) { + g_Dbs.db[i].superTbls[j].childTblExists = TBL_NO_EXISTS; + } else { + g_Dbs.db[i].superTbls[j].childTblExists = TBL_NO_EXISTS; + } + } else if (!childTblExists) { + g_Dbs.db[i].superTbls[j].childTblExists = TBL_NO_EXISTS; + } else { + printf("failed to read json, child_table_exists not found"); + goto PARSE_OVER; + } + + cJSON* count = cJSON_GetObjectItem(stbInfo, "childtable_count"); + if (!count || count->type != cJSON_Number || 0 >= count->valueint) { + printf("failed to read json, childtable_count not found"); + goto PARSE_OVER; + } + g_Dbs.db[i].superTbls[j].childTblCount = count->valueint; + + cJSON *dataSource = cJSON_GetObjectItem(stbInfo, "data_source"); + if (dataSource && dataSource->type == cJSON_String && dataSource->valuestring != NULL) { + strncpy(g_Dbs.db[i].superTbls[j].dataSource, dataSource->valuestring, MAX_DB_NAME_SIZE); + } else if (!dataSource) { + strncpy(g_Dbs.db[i].superTbls[j].dataSource, "rand", MAX_DB_NAME_SIZE); + } else { + printf("failed to read json, data_source not found"); + goto PARSE_OVER; + } + + cJSON *insertMode = cJSON_GetObjectItem(stbInfo, "insert_mode"); // taosc , restful + if (insertMode && insertMode->type == cJSON_String && insertMode->valuestring != NULL) { + strncpy(g_Dbs.db[i].superTbls[j].insertMode, insertMode->valuestring, MAX_DB_NAME_SIZE); + #ifndef TD_LOWA_CURL + if (0 == strncasecmp(g_Dbs.db[i].superTbls[j].insertMode, "restful", 7)) { + printf("There no libcurl, so no support resetful test! please use taosc mode.\n"); + goto PARSE_OVER; + } + #endif + } else if (!insertMode) { + strncpy(g_Dbs.db[i].superTbls[j].insertMode, "taosc", MAX_DB_NAME_SIZE); + } else { + printf("failed to read json, insert_mode not found"); + goto PARSE_OVER; + } + + cJSON *ts = cJSON_GetObjectItem(stbInfo, "start_timestamp"); + if (ts && ts->type == cJSON_String && ts->valuestring != NULL) { + strncpy(g_Dbs.db[i].superTbls[j].startTimestamp, ts->valuestring, MAX_DB_NAME_SIZE); + } else if (!ts) { + strncpy(g_Dbs.db[i].superTbls[j].startTimestamp, "now", MAX_DB_NAME_SIZE); + } else { + printf("failed to read json, start_timestamp not found"); + goto PARSE_OVER; + } + + cJSON* timestampStep = cJSON_GetObjectItem(stbInfo, "timestamp_step"); + if (timestampStep && timestampStep->type == cJSON_Number) { + g_Dbs.db[i].superTbls[j].timeStampStep = timestampStep->valueint; + } else if (!timestampStep) { + g_Dbs.db[i].superTbls[j].timeStampStep = 1000; + } else { + printf("failed to read json, timestamp_step not found"); + goto PARSE_OVER; + } + + cJSON* sampleDataBufSize = cJSON_GetObjectItem(stbInfo, "sample_buf_size"); + if (sampleDataBufSize && sampleDataBufSize->type == cJSON_Number) { + g_Dbs.db[i].superTbls[j].sampleDataBufSize = sampleDataBufSize->valueint; + if (g_Dbs.db[i].superTbls[j].sampleDataBufSize < 1024*1024) { + g_Dbs.db[i].superTbls[j].sampleDataBufSize = 1024*1024 + 1024; + } + } else if (!sampleDataBufSize) { + g_Dbs.db[i].superTbls[j].sampleDataBufSize = 1024*1024 + 1024; + } else { + printf("failed to read json, sample_buf_size not found"); + goto PARSE_OVER; + } + + cJSON *sampleFormat = cJSON_GetObjectItem(stbInfo, "sample_format"); + if (sampleFormat && sampleFormat->type == cJSON_String && sampleFormat->valuestring != NULL) { + strncpy(g_Dbs.db[i].superTbls[j].sampleFormat, sampleFormat->valuestring, MAX_DB_NAME_SIZE); + } else if (!sampleFormat) { + strncpy(g_Dbs.db[i].superTbls[j].sampleFormat, "csv", MAX_DB_NAME_SIZE); + } else { + printf("failed to read json, sample_format not found"); + goto PARSE_OVER; + } + + cJSON *sampleFile = cJSON_GetObjectItem(stbInfo, "sample_file"); + if (sampleFile && sampleFile->type == cJSON_String && sampleFile->valuestring != NULL) { + strncpy(g_Dbs.db[i].superTbls[j].sampleFile, sampleFile->valuestring, MAX_FILE_NAME_LEN); + } else if (!sampleFile) { + memset(g_Dbs.db[i].superTbls[j].sampleFile, 0, MAX_FILE_NAME_LEN); + } else { + printf("failed to read json, sample_file not found"); + goto PARSE_OVER; + } + + cJSON *tagsFile = cJSON_GetObjectItem(stbInfo, "tags_file"); + if (tagsFile && tagsFile->type == cJSON_String && tagsFile->valuestring != NULL) { + strncpy(g_Dbs.db[i].superTbls[j].tagsFile, tagsFile->valuestring, MAX_FILE_NAME_LEN); + if (0 == g_Dbs.db[i].superTbls[j].tagsFile[0]) { + g_Dbs.db[i].superTbls[j].tagSource = 0; + } else { + g_Dbs.db[i].superTbls[j].tagSource = 1; + } + } else if (!tagsFile) { + memset(g_Dbs.db[i].superTbls[j].tagsFile, 0, MAX_FILE_NAME_LEN); + g_Dbs.db[i].superTbls[j].tagSource = 0; + } else { + printf("failed to read json, tags_file not found"); + goto PARSE_OVER; + } + + cJSON* maxSqlLen = cJSON_GetObjectItem(stbInfo, "max_sql_len"); + if (maxSqlLen && maxSqlLen->type == cJSON_Number) { + int32_t len = maxSqlLen->valueint; + if (len > TSDB_MAX_ALLOWED_SQL_LEN) { + len = TSDB_MAX_ALLOWED_SQL_LEN; + } else if (len < TSDB_MAX_SQL_LEN) { + len = TSDB_MAX_SQL_LEN; + } + g_Dbs.db[i].superTbls[j].maxSqlLen = len; + } else if (!maxSqlLen) { + g_Dbs.db[i].superTbls[j].maxSqlLen = TSDB_MAX_SQL_LEN; + } else { + printf("failed to read json, maxSqlLen not found"); + goto PARSE_OVER; + } + + cJSON *multiThreadWriteOneTbl = cJSON_GetObjectItem(stbInfo, "multi_thread_write_one_tbl"); // no , yes + if (multiThreadWriteOneTbl && multiThreadWriteOneTbl->type == cJSON_String && multiThreadWriteOneTbl->valuestring != NULL) { + if (0 == strncasecmp(multiThreadWriteOneTbl->valuestring, "yes", 3)) { + g_Dbs.db[i].superTbls[j].multiThreadWriteOneTbl = 1; + } else { + g_Dbs.db[i].superTbls[j].multiThreadWriteOneTbl = 0; + } + } else if (!multiThreadWriteOneTbl) { + g_Dbs.db[i].superTbls[j].multiThreadWriteOneTbl = 0; + } else { + printf("failed to read json, multiThreadWriteOneTbl not found"); + goto PARSE_OVER; + } + + cJSON* numberOfTblInOneSql = cJSON_GetObjectItem(stbInfo, "number_of_tbl_in_one_sql"); + if (numberOfTblInOneSql && numberOfTblInOneSql->type == cJSON_Number) { + g_Dbs.db[i].superTbls[j].numberOfTblInOneSql = numberOfTblInOneSql->valueint; + } else if (!numberOfTblInOneSql) { + g_Dbs.db[i].superTbls[j].numberOfTblInOneSql = 0; + } else { + printf("failed to read json, numberOfTblInOneSql not found"); + goto PARSE_OVER; + } + + cJSON* rowsPerTbl = cJSON_GetObjectItem(stbInfo, "rows_per_tbl"); + if (rowsPerTbl && rowsPerTbl->type == cJSON_Number) { + g_Dbs.db[i].superTbls[j].rowsPerTbl = rowsPerTbl->valueint; + } else if (!rowsPerTbl) { + g_Dbs.db[i].superTbls[j].rowsPerTbl = 1; + } else { + printf("failed to read json, rowsPerTbl not found"); + goto PARSE_OVER; + } + + cJSON* disorderRatio = cJSON_GetObjectItem(stbInfo, "disorder_ratio"); + if (disorderRatio && disorderRatio->type == cJSON_Number) { + g_Dbs.db[i].superTbls[j].disorderRatio = disorderRatio->valueint; + } else if (!disorderRatio) { + g_Dbs.db[i].superTbls[j].disorderRatio = 0; + } else { + printf("failed to read json, disorderRatio not found"); + goto PARSE_OVER; + } + + cJSON* disorderRange = cJSON_GetObjectItem(stbInfo, "disorder_range"); + if (disorderRange && disorderRange->type == cJSON_Number) { + g_Dbs.db[i].superTbls[j].disorderRange = disorderRange->valueint; + } else if (!disorderRange) { + g_Dbs.db[i].superTbls[j].disorderRange = 1000; + } else { + printf("failed to read json, disorderRange not found"); + goto PARSE_OVER; + } + + cJSON* insertRate = cJSON_GetObjectItem(stbInfo, "insert_rate"); + if (insertRate && insertRate->type == cJSON_Number) { + g_Dbs.db[i].superTbls[j].insertRate = insertRate->valueint; + } else if (!insertRate) { + g_Dbs.db[i].superTbls[j].insertRate = 0; + } else { + printf("failed to read json, insert_rate not found"); + goto PARSE_OVER; + } + + cJSON* insertRows = cJSON_GetObjectItem(stbInfo, "insert_rows"); + if (insertRows && insertRows->type == cJSON_Number) { + g_Dbs.db[i].superTbls[j].insertRows = insertRows->valueint; + if (0 == g_Dbs.db[i].superTbls[j].insertRows) { + g_Dbs.db[i].superTbls[j].insertRows = 0x7FFFFFFFFFFFFFFF; + } + } else if (!insertRows) { + g_Dbs.db[i].superTbls[j].insertRows = 0x7FFFFFFFFFFFFFFF; + } else { + printf("failed to read json, insert_rows not found"); + goto PARSE_OVER; + } + + if (NO_CREATE_SUBTBL == g_Dbs.db[i].superTbls[j].autoCreateTable || (TBL_ALREADY_EXISTS == g_Dbs.db[i].superTbls[j].childTblExists)) { + continue; + } + + int retVal = getColumnAndTagTypeFromInsertJsonFile(stbInfo, &g_Dbs.db[i].superTbls[j]); + if (false == retVal) { + goto PARSE_OVER; + } + } + } + + ret = true; + +PARSE_OVER: + //free(content); + //cJSON_Delete(root); + //fclose(fp); + return ret; +} + +static bool getMetaFromQueryJsonFile(cJSON* root) { + bool ret = false; + + cJSON* cfgdir = cJSON_GetObjectItem(root, "cfgdir"); + if (cfgdir && cfgdir->type == cJSON_String && cfgdir->valuestring != NULL) { + strncpy(g_queryInfo.cfgDir, cfgdir->valuestring, MAX_FILE_NAME_LEN); + } + + cJSON* host = cJSON_GetObjectItem(root, "host"); + if (host && host->type == cJSON_String && host->valuestring != NULL) { + strncpy(g_queryInfo.host, host->valuestring, MAX_DB_NAME_SIZE); + } else if (!host) { + strncpy(g_queryInfo.host, "127.0.0.1", MAX_DB_NAME_SIZE); + } else { + printf("failed to read json, host not found\n"); + goto PARSE_OVER; + } + + cJSON* port = cJSON_GetObjectItem(root, "port"); + if (port && port->type == cJSON_Number) { + g_queryInfo.port = port->valueint; + } else if (!port) { + g_queryInfo.port = 6030; + } + + cJSON* user = cJSON_GetObjectItem(root, "user"); + if (user && user->type == cJSON_String && user->valuestring != NULL) { + strncpy(g_queryInfo.user, user->valuestring, MAX_DB_NAME_SIZE); + } else if (!user) { + strncpy(g_queryInfo.user, "root", MAX_DB_NAME_SIZE); ; + } + + cJSON* password = cJSON_GetObjectItem(root, "password"); + if (password && password->type == cJSON_String && password->valuestring != NULL) { + strncpy(g_queryInfo.password, password->valuestring, MAX_DB_NAME_SIZE); + } else if (!password) { + strncpy(g_queryInfo.password, "taosdata", MAX_DB_NAME_SIZE);; + } + + cJSON *answerPrompt = cJSON_GetObjectItem(root, "confirm_parameter_prompt"); // yes, no, + if (answerPrompt && answerPrompt->type == cJSON_String && answerPrompt->valuestring != NULL) { + if (0 == strncasecmp(answerPrompt->valuestring, "yes", 3)) { + g_args.answer_yes = false; + } else if (0 == strncasecmp(answerPrompt->valuestring, "no", 2)) { + g_args.answer_yes = true; + } else { + g_args.answer_yes = false; + } + } else if (!answerPrompt) { + g_args.answer_yes = false; + } else { + printf("failed to read json, confirm_parameter_prompt not found"); + goto PARSE_OVER; + } + + cJSON* dbs = cJSON_GetObjectItem(root, "databases"); + if (dbs && dbs->type == cJSON_String && dbs->valuestring != NULL) { + strncpy(g_queryInfo.dbName, dbs->valuestring, MAX_DB_NAME_SIZE); + } else if (!dbs) { + printf("failed to read json, databases not found\n"); + goto PARSE_OVER; + } + + cJSON* queryMode = cJSON_GetObjectItem(root, "query_mode"); + if (queryMode && queryMode->type == cJSON_String && queryMode->valuestring != NULL) { + strncpy(g_queryInfo.queryMode, queryMode->valuestring, MAX_TB_NAME_SIZE); + } else if (!queryMode) { + strncpy(g_queryInfo.queryMode, "taosc", MAX_TB_NAME_SIZE); + } else { + printf("failed to read json, query_mode not found\n"); + goto PARSE_OVER; + } + + // super_table_query + cJSON *superQuery = cJSON_GetObjectItem(root, "specified_table_query"); + if (!superQuery) { + g_queryInfo.superQueryInfo.concurrent = 0; + g_queryInfo.superQueryInfo.sqlCount = 0; + } else if (superQuery->type != cJSON_Object) { + printf("failed to read json, super_table_query not found"); + goto PARSE_OVER; + } else { + cJSON* rate = cJSON_GetObjectItem(superQuery, "query_interval"); + if (rate && rate->type == cJSON_Number) { + g_queryInfo.superQueryInfo.rate = rate->valueint; + } else if (!rate) { + g_queryInfo.superQueryInfo.rate = 0; + } + + cJSON* concurrent = cJSON_GetObjectItem(superQuery, "concurrent"); + if (concurrent && concurrent->type == cJSON_Number) { + g_queryInfo.superQueryInfo.concurrent = concurrent->valueint; + } else if (!concurrent) { + g_queryInfo.superQueryInfo.concurrent = 1; + } + + cJSON* mode = cJSON_GetObjectItem(superQuery, "mode"); + if (mode && mode->type == cJSON_String && mode->valuestring != NULL) { + if (0 == strcmp("sync", mode->valuestring)) { + g_queryInfo.superQueryInfo.subscribeMode = 0; + } else if (0 == strcmp("async", mode->valuestring)) { + g_queryInfo.superQueryInfo.subscribeMode = 1; + } else { + printf("failed to read json, subscribe mod error\n"); + goto PARSE_OVER; + } + } else { + g_queryInfo.superQueryInfo.subscribeMode = 0; + } + + cJSON* interval = cJSON_GetObjectItem(superQuery, "interval"); + if (interval && interval->type == cJSON_Number) { + g_queryInfo.superQueryInfo.subscribeInterval = interval->valueint; + } else if (!interval) { + //printf("failed to read json, subscribe interval no found\n"); + //goto PARSE_OVER; + g_queryInfo.superQueryInfo.subscribeInterval = 10000; + } + + cJSON* restart = cJSON_GetObjectItem(superQuery, "restart"); + if (restart && restart->type == cJSON_String && restart->valuestring != NULL) { + if (0 == strcmp("yes", restart->valuestring)) { + g_queryInfo.superQueryInfo.subscribeRestart = 1; + } else if (0 == strcmp("no", restart->valuestring)) { + g_queryInfo.superQueryInfo.subscribeRestart = 0; + } else { + printf("failed to read json, subscribe restart error\n"); + goto PARSE_OVER; + } + } else { + g_queryInfo.superQueryInfo.subscribeRestart = 1; + } + + cJSON* keepProgress = cJSON_GetObjectItem(superQuery, "keepProgress"); + if (keepProgress && keepProgress->type == cJSON_String && keepProgress->valuestring != NULL) { + if (0 == strcmp("yes", keepProgress->valuestring)) { + g_queryInfo.superQueryInfo.subscribeKeepProgress = 1; + } else if (0 == strcmp("no", keepProgress->valuestring)) { + g_queryInfo.superQueryInfo.subscribeKeepProgress = 0; + } else { + printf("failed to read json, subscribe keepProgress error\n"); + goto PARSE_OVER; + } + } else { + g_queryInfo.superQueryInfo.subscribeKeepProgress = 0; + } + + // sqls + cJSON* superSqls = cJSON_GetObjectItem(superQuery, "sqls"); + if (!superSqls) { + g_queryInfo.superQueryInfo.sqlCount = 0; + } else if (superSqls->type != cJSON_Array) { + printf("failed to read json, super sqls not found\n"); + goto PARSE_OVER; + } else { + int superSqlSize = cJSON_GetArraySize(superSqls); + if (superSqlSize > MAX_QUERY_SQL_COUNT) { + printf("failed to read json, query sql size overflow, max is %d\n", MAX_QUERY_SQL_COUNT); + goto PARSE_OVER; + } + + g_queryInfo.superQueryInfo.sqlCount = superSqlSize; + for (int j = 0; j < superSqlSize; ++j) { + cJSON* sql = cJSON_GetArrayItem(superSqls, j); + if (sql == NULL) continue; + + cJSON *sqlStr = cJSON_GetObjectItem(sql, "sql"); + if (!sqlStr || sqlStr->type != cJSON_String || sqlStr->valuestring == NULL) { + printf("failed to read json, sql not found\n"); + goto PARSE_OVER; + } + strncpy(g_queryInfo.superQueryInfo.sql[j], sqlStr->valuestring, MAX_QUERY_SQL_LENGTH); + + cJSON *result = cJSON_GetObjectItem(sql, "result"); + if (NULL != result && result->type == cJSON_String && result->valuestring != NULL) { + strncpy(g_queryInfo.superQueryInfo.result[j], result->valuestring, MAX_FILE_NAME_LEN); + } else if (NULL == result) { + memset(g_queryInfo.superQueryInfo.result[j], 0, MAX_FILE_NAME_LEN); + } else { + printf("failed to read json, super query result file not found\n"); + goto PARSE_OVER; + } + } + } + } + + // sub_table_query + cJSON *subQuery = cJSON_GetObjectItem(root, "super_table_query"); + if (!subQuery) { + g_queryInfo.subQueryInfo.threadCnt = 0; + g_queryInfo.subQueryInfo.sqlCount = 0; + } else if (subQuery->type != cJSON_Object) { + printf("failed to read json, sub_table_query not found"); + ret = true; + goto PARSE_OVER; + } else { + cJSON* subrate = cJSON_GetObjectItem(subQuery, "query_interval"); + if (subrate && subrate->type == cJSON_Number) { + g_queryInfo.subQueryInfo.rate = subrate->valueint; + } else if (!subrate) { + g_queryInfo.subQueryInfo.rate = 0; + } + + cJSON* threads = cJSON_GetObjectItem(subQuery, "threads"); + if (threads && threads->type == cJSON_Number) { + g_queryInfo.subQueryInfo.threadCnt = threads->valueint; + } else if (!threads) { + g_queryInfo.subQueryInfo.threadCnt = 1; + } + + //cJSON* subTblCnt = cJSON_GetObjectItem(subQuery, "childtable_count"); + //if (subTblCnt && subTblCnt->type == cJSON_Number) { + // g_queryInfo.subQueryInfo.childTblCount = subTblCnt->valueint; + //} else if (!subTblCnt) { + // g_queryInfo.subQueryInfo.childTblCount = 0; + //} + + cJSON* stblname = cJSON_GetObjectItem(subQuery, "stblname"); + if (stblname && stblname->type == cJSON_String && stblname->valuestring != NULL) { + strncpy(g_queryInfo.subQueryInfo.sTblName, stblname->valuestring, MAX_TB_NAME_SIZE); + } else { + printf("failed to read json, super table name not found\n"); + goto PARSE_OVER; + } + + cJSON* submode = cJSON_GetObjectItem(subQuery, "mode"); + if (submode && submode->type == cJSON_String && submode->valuestring != NULL) { + if (0 == strcmp("sync", submode->valuestring)) { + g_queryInfo.subQueryInfo.subscribeMode = 0; + } else if (0 == strcmp("async", submode->valuestring)) { + g_queryInfo.subQueryInfo.subscribeMode = 1; + } else { + printf("failed to read json, subscribe mod error\n"); + goto PARSE_OVER; + } + } else { + g_queryInfo.subQueryInfo.subscribeMode = 0; + } + + cJSON* subinterval = cJSON_GetObjectItem(subQuery, "interval"); + if (subinterval && subinterval->type == cJSON_Number) { + g_queryInfo.subQueryInfo.subscribeInterval = subinterval->valueint; + } else if (!subinterval) { + //printf("failed to read json, subscribe interval no found\n"); + //goto PARSE_OVER; + g_queryInfo.subQueryInfo.subscribeInterval = 10000; + } + + cJSON* subrestart = cJSON_GetObjectItem(subQuery, "restart"); + if (subrestart && subrestart->type == cJSON_String && subrestart->valuestring != NULL) { + if (0 == strcmp("yes", subrestart->valuestring)) { + g_queryInfo.subQueryInfo.subscribeRestart = 1; + } else if (0 == strcmp("no", subrestart->valuestring)) { + g_queryInfo.subQueryInfo.subscribeRestart = 0; + } else { + printf("failed to read json, subscribe restart error\n"); + goto PARSE_OVER; + } + } else { + g_queryInfo.subQueryInfo.subscribeRestart = 1; + } + + cJSON* subkeepProgress = cJSON_GetObjectItem(subQuery, "keepProgress"); + if (subkeepProgress && subkeepProgress->type == cJSON_String && subkeepProgress->valuestring != NULL) { + if (0 == strcmp("yes", subkeepProgress->valuestring)) { + g_queryInfo.subQueryInfo.subscribeKeepProgress = 1; + } else if (0 == strcmp("no", subkeepProgress->valuestring)) { + g_queryInfo.subQueryInfo.subscribeKeepProgress = 0; + } else { + printf("failed to read json, subscribe keepProgress error\n"); + goto PARSE_OVER; + } + } else { + g_queryInfo.subQueryInfo.subscribeKeepProgress = 0; + } + + // sqls + cJSON* subsqls = cJSON_GetObjectItem(subQuery, "sqls"); + if (!subsqls) { + g_queryInfo.subQueryInfo.sqlCount = 0; + } else if (subsqls->type != cJSON_Array) { + printf("failed to read json, super sqls not found\n"); + goto PARSE_OVER; + } else { + int superSqlSize = cJSON_GetArraySize(subsqls); + if (superSqlSize > MAX_QUERY_SQL_COUNT) { + printf("failed to read json, query sql size overflow, max is %d\n", MAX_QUERY_SQL_COUNT); + goto PARSE_OVER; + } + + g_queryInfo.subQueryInfo.sqlCount = superSqlSize; + for (int j = 0; j < superSqlSize; ++j) { + cJSON* sql = cJSON_GetArrayItem(subsqls, j); + if (sql == NULL) continue; + + cJSON *sqlStr = cJSON_GetObjectItem(sql, "sql"); + if (!sqlStr || sqlStr->type != cJSON_String || sqlStr->valuestring == NULL) { + printf("failed to read json, sql not found\n"); + goto PARSE_OVER; + } + strncpy(g_queryInfo.subQueryInfo.sql[j], sqlStr->valuestring, MAX_QUERY_SQL_LENGTH); + + cJSON *result = cJSON_GetObjectItem(sql, "result"); + if (result != NULL && result->type == cJSON_String && result->valuestring != NULL){ + strncpy(g_queryInfo.subQueryInfo.result[j], result->valuestring, MAX_FILE_NAME_LEN); + } else if (NULL == result) { + memset(g_queryInfo.subQueryInfo.result[j], 0, MAX_FILE_NAME_LEN); + } else { + printf("failed to read json, sub query result file not found\n"); + goto PARSE_OVER; + } + } + } } - fprintf(fp, "###################################################################\n"); - fprintf(fp, "# Server IP: %s:%hu\n", ip_addr == NULL ? "localhost" : ip_addr, port); - fprintf(fp, "# User: %s\n", user); - fprintf(fp, "# Password: %s\n", pass); - fprintf(fp, "# Use metric: %s\n", use_metric ? "true" : "false"); - fprintf(fp, "# Datatype of Columns: %s\n", dataString); - fprintf(fp, "# Binary Length(If applicable): %d\n", - (strncasecmp(dataString, "BINARY", 6) == 0 || strncasecmp(dataString, "NCHAR", 5) == 0) ? len_of_binary : -1); - fprintf(fp, "# Number of Columns per record: %d\n", ncols_per_record); - fprintf(fp, "# Number of Threads: %d\n", threads); - fprintf(fp, "# Number of Tables: %d\n", ntables); - fprintf(fp, "# Number of Data per Table: %d\n", nrecords_per_table); - fprintf(fp, "# Records/Request: %d\n", nrecords_per_request); - fprintf(fp, "# Database name: %s\n", db_name); - fprintf(fp, "# Table prefix: %s\n", tb_prefix); - if (order == 1) - { - printf("# Data order: %d\n", order); - printf("# Data out of order rate: %d\n", rate); - - } - fprintf(fp, "# Test time: %d-%02d-%02d %02d:%02d:%02d\n", tm.tm_year + 1900, tm.tm_mon + 1, - tm.tm_mday, tm.tm_hour, tm.tm_min, tm.tm_sec); - fprintf(fp, "###################################################################\n\n"); - fprintf(fp, "| WRecords | Records/Second | Requests/Second | WLatency(ms) |\n"); - - taos_init(); - TAOS *taos = taos_connect(ip_addr, user, pass, NULL, port); - if (taos == NULL) { - fprintf(stderr, "Failed to connect to TDengine, reason:%s\n", taos_errstr(NULL)); - return 1; - } - char command[BUFFER_SIZE] = "\0"; + ret = true; - sprintf(command, "drop database %s;", db_name); - TAOS_RES* res = taos_query(taos, command); - taos_free_result(res); +PARSE_OVER: + //free(content); + //cJSON_Delete(root); + //fclose(fp); + return ret; +} - sprintf(command, "create database %s replica %d;", db_name, replica); - res = taos_query(taos, command); - taos_free_result(res); +static bool getInfoFromJsonFile(char* file) { + FILE *fp = fopen(file, "r"); + if (!fp) { + printf("failed to read %s, reason:%s\n", file, strerror(errno)); + return false; + } - char cols[STRING_LEN] = "\0"; - int colIndex = 0; - len = 0; + bool ret = false; + int maxLen = 64000; + char *content = calloc(1, maxLen + 1); + int len = fread(content, 1, maxLen, fp); + if (len <= 0) { + free(content); + fclose(fp); + printf("failed to read %s, content is null", file); + return false; + } + + content[len] = 0; + cJSON* root = cJSON_Parse(content); + if (root == NULL) { + printf("failed to cjson parse %s, invalid json format", file); + goto PARSE_OVER; + } - for (; colIndex < ncols_per_record - 1; colIndex++) { - if (strcasecmp(data_type[colIndex % count_data_type], "BINARY") != 0 && strcasecmp(data_type[colIndex % count_data_type], "NCHAR") != 0) { - len += snprintf(cols + len, STRING_LEN - len, ",f%d %s", colIndex + 1, data_type[colIndex % count_data_type]); + cJSON* filetype = cJSON_GetObjectItem(root, "filetype"); + if (filetype && filetype->type == cJSON_String && filetype->valuestring != NULL) { + if (0 == strcasecmp("insert", filetype->valuestring)) { + g_jsonType = INSERT_MODE; + } else if (0 == strcasecmp("query", filetype->valuestring)) { + g_jsonType = QUERY_MODE; + } else if (0 == strcasecmp("subscribe", filetype->valuestring)) { + g_jsonType = SUBSCRIBE_MODE; } else { - len += snprintf(cols + len, STRING_LEN - len, ",f%d %s(%d)", colIndex + 1, data_type[colIndex % count_data_type], len_of_binary); + printf("failed to read json, filetype not support\n"); + goto PARSE_OVER; } + } else if (!filetype) { + g_jsonType = INSERT_MODE; + } else { + printf("failed to read json, filetype not found\n"); + goto PARSE_OVER; } - if (strcasecmp(data_type[colIndex % count_data_type], "BINARY") != 0 && strcasecmp(data_type[colIndex % count_data_type], "NCHAR") != 0){ - len += snprintf(cols + len, STRING_LEN - len, ",f%d %s", colIndex + 1, data_type[colIndex % count_data_type]); + if (INSERT_MODE == g_jsonType) { + ret = getMetaFromInsertJsonFile(root); + } else if (QUERY_MODE == g_jsonType) { + ret = getMetaFromQueryJsonFile(root); + } else if (SUBSCRIBE_MODE == g_jsonType) { + ret = getMetaFromQueryJsonFile(root); } else { - len += snprintf(cols + len, STRING_LEN - len, ",f%d %s(%d)", colIndex + 1, data_type[colIndex % count_data_type], len_of_binary); + printf("input json file type error! please input correct file type: insert or query or subscribe\n"); + goto PARSE_OVER; + } + +PARSE_OVER: + free(content); + cJSON_Delete(root); + fclose(fp); + return ret; +} + + +void prePareSampleData() { + for (int i = 0; i < g_Dbs.dbCount; i++) { + for (int j = 0; j < g_Dbs.db[i].superTblCount; j++) { + //if (0 == strncasecmp(g_Dbs.db[i].superTbls[j].dataSource, "sample", 6)) { + // readSampleFromFileToMem(&g_Dbs.db[i].superTbls[j]); + //} + + if (g_Dbs.db[i].superTbls[j].tagsFile[0] != 0) { + (void)readTagFromCsvFileToMem(&g_Dbs.db[i].superTbls[j]); + } + + #ifdef TD_LOWA_CURL + if (0 == strncasecmp(g_Dbs.db[i].superTbls[j].insertMode, "restful", 8)) { + curl_global_init(CURL_GLOBAL_ALL); + } + #endif + } } +} - if (use_metric) { - /* Create metric table */ - printf("Creating meters super table...\n"); - snprintf(command, BUFFER_SIZE, "create table if not exists %s.meters (ts timestamp%s) tags (areaid int, loc binary(10))", db_name, cols); - queryDB(taos, command); - printf("meters created!\n"); +void postFreeResource() { + tmfclose(g_fpOfInsertResult); + for (int i = 0; i < g_Dbs.dbCount; i++) { + for (int j = 0; j < g_Dbs.db[i].superTblCount; j++) { + if (0 != g_Dbs.db[i].superTbls[j].colsOfCreatChildTable) { + free(g_Dbs.db[i].superTbls[j].colsOfCreatChildTable); + g_Dbs.db[i].superTbls[j].colsOfCreatChildTable = NULL; + } + if (0 != g_Dbs.db[i].superTbls[j].sampleDataBuf) { + free(g_Dbs.db[i].superTbls[j].sampleDataBuf); + g_Dbs.db[i].superTbls[j].sampleDataBuf = NULL; + } + if (0 != g_Dbs.db[i].superTbls[j].tagDataBuf) { + free(g_Dbs.db[i].superTbls[j].tagDataBuf); + g_Dbs.db[i].superTbls[j].tagDataBuf = NULL; + } + if (0 != g_Dbs.db[i].superTbls[j].childTblName) { + free(g_Dbs.db[i].superTbls[j].childTblName); + g_Dbs.db[i].superTbls[j].childTblName = NULL; + } + + #ifdef TD_LOWA_CURL + if (0 == strncasecmp(g_Dbs.db[i].superTbls[j].insertMode, "restful", 8)) { + curl_global_cleanup(); + } + #endif + } } - taos_close(taos); - - /* Wait for table to create */ - multiThreadCreateTable(cols, use_metric, threads, ntables, db_name, tb_prefix, ip_addr, port, user, pass); - - /* Insert data */ - double ts = getCurrentTime(); - printf("Inserting data......\n"); - pthread_t *pids = malloc(threads * sizeof(pthread_t)); - info *infos = malloc(threads * sizeof(info)); - - memset(pids, 0, threads * sizeof(pthread_t)); - memset(infos, 0, threads * sizeof(info)); +} - int a = ntables / threads; - if (a < 1) { - threads = ntables; - a = 1; +int getRowDataFromSample(char* dataBuf, int maxLen, int64_t timestamp, SSuperTable* superTblInfo, int* sampleUsePos, FILE *fp, char* sampleBuf) { + if ((*sampleUsePos) == MAX_SAMPLES_ONCE_FROM_FILE) { + int ret = readSampleFromCsvFileToMem(fp, superTblInfo, sampleBuf); + if (0 != ret) { + return -1; + } + *sampleUsePos = 0; } - int b = 0; - if (threads != 0) - b = ntables % threads; - int last = 0; - for (int i = 0; i < threads; i++) { - info *t_info = infos + i; - t_info->threadID = i; - tstrncpy(t_info->db_name, db_name, MAX_DB_NAME_SIZE); - tstrncpy(t_info->tb_prefix, tb_prefix, MAX_TB_NAME_SIZE); - t_info->datatype = data_type; - t_info->ncols_per_record = ncols_per_record; - t_info->nrecords_per_table = nrecords_per_table; - t_info->start_time = 1500000000000; - t_info->taos = taos_connect(ip_addr, user, pass, db_name, port); - t_info->len_of_binary = len_of_binary; - t_info->nrecords_per_request = nrecords_per_request; - t_info->start_table_id = last; - t_info->data_of_order = order; - t_info->data_of_rate = rate; - t_info->end_table_id = i < b ? last + a : last + a - 1; - last = t_info->end_table_id + 1; - t_info->counter = 0; - t_info->minDelay = INT16_MAX; + int dataLen = 0; + dataLen += snprintf(dataBuf + dataLen, maxLen - dataLen, "(%" PRId64 ", ", timestamp); + dataLen += snprintf(dataBuf + dataLen, maxLen - dataLen, "%s", sampleBuf + superTblInfo->lenOfOneRow * (*sampleUsePos)); + dataLen += snprintf(dataBuf + dataLen, maxLen - dataLen, ")"); - tsem_init(&(t_info->mutex_sem), 0, 1); - t_info->notFinished = t_info->end_table_id - t_info->start_table_id + 1; - tsem_init(&(t_info->lock_sem), 0, 0); + (*sampleUsePos)++; + + return dataLen; +} - if (query_mode == SYNC) { - pthread_create(pids + i, NULL, syncWrite, t_info); - } else { - pthread_create(pids + i, NULL, asyncWrite, t_info); +int generateRowData(char* dataBuf, int maxLen, int64_t timestamp, SSuperTable* stbInfo) { + int dataLen = 0; + dataLen += snprintf(dataBuf + dataLen, maxLen - dataLen, "(%" PRId64 ", ", timestamp); + for (int i = 0; i < stbInfo->columnCount; i++) { + if ((0 == strncasecmp(stbInfo->columns[i].dataType, "binary", 6)) || (0 == strncasecmp(stbInfo->columns[i].dataType, "nchar", 5))) { + if (stbInfo->columns[i].dataLen > TSDB_MAX_BINARY_LEN) { + printf("binary or nchar length overflow, max size:%u\n", (uint32_t)TSDB_MAX_BINARY_LEN); + return (-1); + } + + char* buf = (char*)calloc(stbInfo->columns[i].dataLen+1, 1); + if (NULL == buf) { + printf("calloc failed! size:%d\n", stbInfo->columns[i].dataLen); + return (-1); + } + rand_string(buf, stbInfo->columns[i].dataLen); + dataLen += snprintf(dataBuf + dataLen, maxLen - dataLen, "\'%s\', ", buf); + tmfree(buf); + } else if (0 == strncasecmp(stbInfo->columns[i].dataType, "int", 3)) { + dataLen += snprintf(dataBuf + dataLen, maxLen - dataLen, "%d, ", rand_int()); + } else if (0 == strncasecmp(stbInfo->columns[i].dataType, "bigint", 6)) { + dataLen += snprintf(dataBuf + dataLen, maxLen - dataLen, "%"PRId64", ", rand_bigint()); + } else if (0 == strncasecmp(stbInfo->columns[i].dataType, "float", 5)) { + dataLen += snprintf(dataBuf + dataLen, maxLen - dataLen, "%f, ", rand_float()); + } else if (0 == strncasecmp(stbInfo->columns[i].dataType, "double", 6)) { + dataLen += snprintf(dataBuf + dataLen, maxLen - dataLen, "%f, ", rand_double()); + } else if (0 == strncasecmp(stbInfo->columns[i].dataType, "smallint", 8)) { + dataLen += snprintf(dataBuf + dataLen, maxLen - dataLen, "%d, ", rand_smallint()); + } else if (0 == strncasecmp(stbInfo->columns[i].dataType, "tinyint", 7)) { + dataLen += snprintf(dataBuf + dataLen, maxLen - dataLen, "%d, ", rand_tinyint()); + } else if (0 == strncasecmp(stbInfo->columns[i].dataType, "bool", 4)) { + dataLen += snprintf(dataBuf + dataLen, maxLen - dataLen, "%d, ", rand_bool()); + } else if (0 == strncasecmp(stbInfo->columns[i].dataType, "timestamp", 9)) { + dataLen += snprintf(dataBuf + dataLen, maxLen - dataLen, "%"PRId64", ", rand_bigint()); + } else { + printf("No support data type: %s\n", stbInfo->columns[i].dataType); + return (-1); } } - for (int i = 0; i < threads; i++) { - pthread_join(pids[i], NULL); - } + dataLen -= 2; + dataLen += snprintf(dataBuf + dataLen, maxLen - dataLen, ")"); + + return dataLen; +} - double t = getCurrentTime() - ts; - if (query_mode == SYNC) { - printf("SYNC Insert with %d connections:\n", threads); - } else { - printf("ASYNC Insert with %d connections:\n", threads); - } +void syncWriteForNumberOfTblInOneSql(threadInfo *winfo, FILE *fp, char* sampleDataBuf) { + SSuperTable* superTblInfo = winfo->superTblInfo; - fprintf(fp, "|%"PRIu64" | %10.2f | %10.2f | %10.4f |\n\n", - (int64_t)ntables * nrecords_per_table, ntables * nrecords_per_table / t, - ((int64_t)ntables * nrecords_per_table) / (t * nrecords_per_request), - t * 1000); + int samplePos = 0; - printf("Spent %.4f seconds to insert %"PRIu64" records with %d record(s) per request: %.2f records/second\n", - t, (int64_t)ntables * nrecords_per_table, nrecords_per_request, - (int64_t)ntables * nrecords_per_table / t); + //printf("========threadID[%d], table rang: %d - %d \n", winfo->threadID, winfo->start_table_id, winfo->end_table_id); + int64_t totalRowsInserted = 0; + int64_t totalAffectedRows = 0; + int64_t lastPrintTime = taosGetTimestampMs(); - int64_t totalDelay = 0; - int64_t maxDelay = 0; - int64_t minDelay = INT16_MAX; - int64_t cntDelay = 0; - double avgDelay = 0; - for (int i = 0; i < threads; i++) { - info *t_info = infos + i; - taos_close(t_info->taos); - tsem_destroy(&(t_info->mutex_sem)); - tsem_destroy(&(t_info->lock_sem)); + char* buffer = calloc(superTblInfo->maxSqlLen+1, 1); + if (NULL == buffer) { + printf("========calloc size[ %d ] fail!\n", superTblInfo->maxSqlLen); + return; + } - totalDelay += t_info->totalDelay; - cntDelay += t_info->cntDelay; - if (t_info->maxDelay > maxDelay) maxDelay = t_info->maxDelay; - if (t_info->minDelay < minDelay) minDelay = t_info->minDelay; + int32_t numberOfTblInOneSql = superTblInfo->numberOfTblInOneSql; + int32_t tbls = winfo->end_table_id - winfo->start_table_id + 1; + if (numberOfTblInOneSql > tbls) { + numberOfTblInOneSql = tbls; } - avgDelay = (double)totalDelay / cntDelay; - fprintf(fp, "insert delay, avg:%10.6fms, max: %10.6fms, min: %10.6fms\n\n", - avgDelay/1000.0, (double)maxDelay/1000.0, (double)minDelay/1000.0); + int64_t time_counter = winfo->start_time; + int64_t tmp_time; + int sampleUsePos; + + int64_t st = 0; + int64_t et = 0; + for (int i = 0; i < superTblInfo->insertRows;) { + if (superTblInfo->insertRate && (et - st) < 1000) { + taosMsleep(1000 - (et - st)); // ms + //printf("========sleep duration:%"PRId64 "========inserted rows:%d, table range:%d - %d\n", (1000 - (et - st)), i, winfo->start_table_id, winfo->end_table_id); + } - printf("insert delay, avg: %10.6fms, max: %10.6fms, min: %10.6fms\n\n", - avgDelay/1000.0, (double)maxDelay/1000.0, (double)minDelay/1000.0); + if (superTblInfo->insertRate) { + st = taosGetTimestampMs(); + } - free(pids); - free(infos); - fclose(fp); + int32_t tbl_id = 0; + for (int tID = winfo->start_table_id; tID <= winfo->end_table_id; ) { + int inserted = i; - if (method_of_delete != 0) - { - TAOS *dtaos = taos_connect(ip_addr, user, pass, db_name, port); - double dts = getCurrentTime(); - printf("Deleteing %d table(s)......\n", ntables); + int k = 0; + int batchRowsSql = 0; + while (1) + { + int len = 0; + memset(buffer, 0, superTblInfo->maxSqlLen); + char *pstr = buffer; + + int32_t end_tbl_id = tID + numberOfTblInOneSql; + if (end_tbl_id > winfo->end_table_id) { + end_tbl_id = winfo->end_table_id+1; + } + for (tbl_id = tID; tbl_id < end_tbl_id; tbl_id++) { + sampleUsePos = samplePos; + if (AUTO_CREATE_SUBTBL == superTblInfo->autoCreateTable) { + char* tagsValBuf = NULL; + if (0 == superTblInfo->tagSource) { + tagsValBuf = generateTagVaulesForStb(superTblInfo); + } else { + tagsValBuf = getTagValueFromTagSample(superTblInfo, tbl_id % superTblInfo->tagSampleCount); + } + if (NULL == tagsValBuf) { + goto free_and_statistics; + } - switch (method_of_delete) - { - case 1: - // delete by table - /* Create all the tables; */ - for (int i = 0; i < ntables; i++) { - sprintf(command, "drop table %s.%s%d;", db_name, tb_prefix, i); - queryDB(dtaos, command); - } - break; - case 2: - // delete by stable - if (!use_metric) { + if (0 == len) { + len += snprintf(pstr + len, superTblInfo->maxSqlLen - len, "insert into %s.%s%d using %s.%s tags %s values ", winfo->db_name, superTblInfo->childTblPrefix, tbl_id, winfo->db_name, superTblInfo->sTblName, tagsValBuf); + } else { + len += snprintf(pstr + len, superTblInfo->maxSqlLen - len, " %s.%s%d using %s.%s tags %s values ", winfo->db_name, superTblInfo->childTblPrefix, tbl_id, winfo->db_name, superTblInfo->sTblName, tagsValBuf); + } + tmfree(tagsValBuf); + } else if (TBL_ALREADY_EXISTS == superTblInfo->childTblExists) { + if (0 == len) { + len += snprintf(pstr + len, superTblInfo->maxSqlLen - len, "insert into %s.%s values ", winfo->db_name, superTblInfo->childTblName + tbl_id * TSDB_TABLE_NAME_LEN); + } else { + len += snprintf(pstr + len, superTblInfo->maxSqlLen - len, " %s.%s values ", winfo->db_name, superTblInfo->childTblName + tbl_id * TSDB_TABLE_NAME_LEN); + } + } else { // pre-create child table + if (0 == len) { + len += snprintf(pstr + len, superTblInfo->maxSqlLen - len, "insert into %s.%s%d values ", winfo->db_name, superTblInfo->childTblPrefix, tbl_id); + } else { + len += snprintf(pstr + len, superTblInfo->maxSqlLen - len, " %s.%s%d values ", winfo->db_name, superTblInfo->childTblPrefix, tbl_id); + } + } + + tmp_time = time_counter; + for (k = 0; k < superTblInfo->rowsPerTbl;) { + int retLen = 0; + if (0 == strncasecmp(superTblInfo->dataSource, "sample", 6)) { + retLen = getRowDataFromSample(pstr + len, superTblInfo->maxSqlLen - len, tmp_time += superTblInfo->timeStampStep, superTblInfo, &sampleUsePos, fp, sampleDataBuf); + if (retLen < 0) { + goto free_and_statistics; + } + } else if (0 == strncasecmp(superTblInfo->dataSource, "rand", 8)) { + int rand_num = rand_tinyint() % 100; + if (0 != superTblInfo->disorderRatio && rand_num < superTblInfo->disorderRatio) { + int64_t d = tmp_time - rand() % superTblInfo->disorderRange; + retLen = generateRowData(pstr + len, superTblInfo->maxSqlLen - len, d, superTblInfo); + } else { + retLen = generateRowData(pstr + len, superTblInfo->maxSqlLen - len, tmp_time += superTblInfo->timeStampStep, superTblInfo); + } + if (retLen < 0) { + goto free_and_statistics; + } + } + len += retLen; + //inserted++; + k++; + totalRowsInserted++; + batchRowsSql++; + + if (inserted >= superTblInfo->insertRows || (superTblInfo->maxSqlLen - len) < (superTblInfo->lenOfOneRow + 128) || batchRowsSql >= INT16_MAX - 1) { + tID = tbl_id + 1; + printf("config rowsPerTbl and numberOfTblInOneSql not match with max_sql_lenth, please reconfig![lenOfOneRow:%d]\n", superTblInfo->lenOfOneRow); + goto send_to_server; + } + } + + } + + tID = tbl_id; + inserted += superTblInfo->rowsPerTbl; + + send_to_server: + batchRowsSql = 0; + if (0 == strncasecmp(superTblInfo->insertMode, "taosc", 5)) { + //printf("multi table===== sql: %s \n\n", buffer); + //int64_t t1 = taosGetTimestampMs(); + int64_t startTs; + int64_t endTs; + startTs = taosGetTimestampUs(); + + int affectedRows = queryDbExec(winfo->taos, buffer, INSERT_TYPE); + if (0 > affectedRows) { + goto free_and_statistics; + } else { + endTs = taosGetTimestampUs(); + int64_t delay = endTs - startTs; + if (delay > winfo->maxDelay) winfo->maxDelay = delay; + if (delay < winfo->minDelay) winfo->minDelay = delay; + winfo->cntDelay++; + winfo->totalDelay += delay; + //winfo->avgDelay = (double)winfo->totalDelay / winfo->cntDelay; + } + totalAffectedRows += affectedRows; + + int64_t currentPrintTime = taosGetTimestampMs(); + if (currentPrintTime - lastPrintTime > 30*1000) { + printf("thread[%d] has currently inserted rows: %"PRId64 ", affected rows: %"PRId64 "\n", winfo->threadID, totalRowsInserted, totalAffectedRows); + lastPrintTime = currentPrintTime; + } + //int64_t t2 = taosGetTimestampMs(); + //printf("taosc insert sql return, Spent %.4f seconds \n", (double)(t2 - t1)/1000.0); + } else { + #ifdef TD_LOWA_CURL + //int64_t t1 = taosGetTimestampMs(); + int retCode = curlProceSql(g_Dbs.host, g_Dbs.port, buffer, winfo->curl_handle); + //int64_t t2 = taosGetTimestampMs(); + //printf("http insert sql return, Spent %ld ms \n", t2 - t1); + + if (0 != retCode) { + printf("========curl return fail, threadID[%d]\n", winfo->threadID); + goto free_and_statistics; + } + #else + printf("========no use http mode for no curl lib!\n"); + goto free_and_statistics; + #endif + } + + //printf("========tID:%d, k:%d, loop_cnt:%d\n", tID, k, loop_cnt); break; } - else - { - sprintf(command, "drop table %s.meters;", db_name); - queryDB(dtaos, command); + + if (tID > winfo->end_table_id) { + if (0 == strncasecmp(superTblInfo->dataSource, "sample", 6)) { + samplePos = sampleUsePos; + } + i = inserted; + time_counter = tmp_time; } - break; - case 3: - // delete by database - sprintf(command, "drop database %s;", db_name); - queryDB(dtaos, command); - break; - default: - break; + } + + if (superTblInfo->insertRate) { + et = taosGetTimestampMs(); } + //printf("========loop %d childTables duration:%"PRId64 "========inserted rows:%d\n", winfo->end_table_id - winfo->start_table_id, et - st, i); + } - printf("Table(s) droped!\n"); - taos_close(dtaos); + free_and_statistics: + tmfree(buffer); + winfo->totalRowsInserted = totalRowsInserted; + winfo->totalAffectedRows = totalAffectedRows; + printf("====thread[%d] completed total inserted rows: %"PRId64 ", affected rows: %"PRId64 "====\n", winfo->threadID, totalRowsInserted, totalAffectedRows); + return; +} - double dt = getCurrentTime() - dts; - printf("Spent %.4f seconds to drop %d tables\n", dt, ntables); +// sync insertion +/* + 1 thread: 100 tables * 2000 rows/s + 1 thread: 10 tables * 20000 rows/s + 6 thread: 300 tables * 2000 rows/s - FILE *fp = fopen(arguments.output_file, "a"); - fprintf(fp, "Spent %.4f seconds to drop %d tables\n", dt, ntables); - fclose(fp); + 2 taosinsertdata , 1 thread: 10 tables * 20000 rows/s +*/ +void *syncWrite(void *sarg) { + int64_t totalRowsInserted = 0; + int64_t totalAffectedRows = 0; + int64_t lastPrintTime = taosGetTimestampMs(); + + threadInfo *winfo = (threadInfo *)sarg; + SSuperTable* superTblInfo = winfo->superTblInfo; + + FILE *fp = NULL; + char* sampleDataBuf = NULL; + int samplePos = 0; + + // each thread read sample data from csv file + if (0 == strncasecmp(superTblInfo->dataSource, "sample", 6)) { + sampleDataBuf = calloc(superTblInfo->lenOfOneRow * MAX_SAMPLES_ONCE_FROM_FILE, 1); + if (sampleDataBuf == NULL) { + printf("Failed to calloc %d Bytes, reason:%s\n", superTblInfo->lenOfOneRow * MAX_SAMPLES_ONCE_FROM_FILE, strerror(errno)); + return NULL; + } + + fp = fopen(superTblInfo->sampleFile, "r"); + if (fp == NULL) { + printf("Failed to open sample file: %s, reason:%s\n", superTblInfo->sampleFile, strerror(errno)); + tmfree(sampleDataBuf); + return NULL; + } + int ret = readSampleFromCsvFileToMem(fp, superTblInfo, sampleDataBuf); + if (0 != ret) { + tmfree(sampleDataBuf); + tmfclose(fp); + return NULL; + } + } + if (superTblInfo->numberOfTblInOneSql > 0) { + syncWriteForNumberOfTblInOneSql(winfo, fp, sampleDataBuf); + tmfree(sampleDataBuf); + tmfclose(fp); + return NULL; } - - if (false == insert_only) { - // query data - pthread_t read_id; - info *rInfo = malloc(sizeof(info)); - rInfo->start_time = 1500000000000; - rInfo->start_table_id = 0; - rInfo->end_table_id = ntables - 1; - rInfo->do_aggreFunc = do_aggreFunc; - rInfo->nrecords_per_table = nrecords_per_table; - rInfo->taos = taos_connect(ip_addr, user, pass, db_name, port); - strcpy(rInfo->tb_prefix, tb_prefix); - strcpy(rInfo->fp, arguments.output_file); - - if (!use_metric) { - pthread_create(&read_id, NULL, readTable, rInfo); - } else { - pthread_create(&read_id, NULL, readMetric, rInfo); + //printf("========threadID[%d], table rang: %d - %d \n", winfo->threadID, winfo->start_table_id, winfo->end_table_id); + + char* buffer = calloc(superTblInfo->maxSqlLen, 1); + + int nrecords_per_request = 0; + if (AUTO_CREATE_SUBTBL == superTblInfo->autoCreateTable) { + nrecords_per_request = (superTblInfo->maxSqlLen - 1280 - superTblInfo->lenOfTagOfOneRow) / superTblInfo->lenOfOneRow; + } else { + nrecords_per_request = (superTblInfo->maxSqlLen - 1280) / superTblInfo->lenOfOneRow; + } + + int nrecords_no_last_req = nrecords_per_request; + int nrecords_last_req = 0; + int loop_cnt = 0; + if (0 != superTblInfo->insertRate) { + if (nrecords_no_last_req >= superTblInfo->insertRate) { + nrecords_no_last_req = superTblInfo->insertRate; + } else { + nrecords_last_req = superTblInfo->insertRate % nrecords_per_request; + loop_cnt = (superTblInfo->insertRate / nrecords_per_request) + (superTblInfo->insertRate % nrecords_per_request ? 1 : 0) ; } - pthread_join(read_id, NULL); - taos_close(rInfo->taos); - free(rInfo); + } + + if (nrecords_no_last_req <= 0) { + nrecords_no_last_req = 1; } - taos_cleanup(); - return 0; -} + if (nrecords_no_last_req >= INT16_MAX) { + nrecords_no_last_req = INT16_MAX - 1; + } -#define MAX_SQL_SIZE 65536 -void selectSql(TAOS* taos, char* sqlcmd) -{ - TAOS_RES *pSql = taos_query(taos, sqlcmd); - int32_t code = taos_errno(pSql); - - if (code != 0) { - printf("Failed to sqlcmd:%s, reason:%s\n", sqlcmd, taos_errstr(pSql)); - taos_free_result(pSql); - exit(1); + if (nrecords_last_req >= INT16_MAX) { + nrecords_last_req = INT16_MAX - 1; } + + int nrecords_cur_req = nrecords_no_last_req; + int loop_cnt_orig = loop_cnt; + + //printf("========nrecords_per_request:%d, nrecords_no_last_req:%d, nrecords_last_req:%d, loop_cnt:%d\n", nrecords_per_request, nrecords_no_last_req, nrecords_last_req, loop_cnt); + + int64_t time_counter = winfo->start_time; + + int64_t st = 0; + int64_t et = 0; + for (int i = 0; i < superTblInfo->insertRows;) { + if (superTblInfo->insertRate && (et - st) < 1000) { + taosMsleep(1000 - (et - st)); // ms + //printf("========sleep duration:%"PRId64 "========inserted rows:%d, table range:%d - %d\n", (1000 - (et - st)), i, winfo->start_table_id, winfo->end_table_id); + } + + if (superTblInfo->insertRate) { + st = taosGetTimestampMs(); + } + + for (int tID = winfo->start_table_id; tID <= winfo->end_table_id; tID++) { + int inserted = i; + int64_t tmp_time = time_counter; + + int sampleUsePos = samplePos; + int k = 0; + while (1) + { + int len = 0; + memset(buffer, 0, superTblInfo->maxSqlLen); + char *pstr = buffer; + + if (AUTO_CREATE_SUBTBL == superTblInfo->autoCreateTable) { + char* tagsValBuf = NULL; + if (0 == superTblInfo->tagSource) { + tagsValBuf = generateTagVaulesForStb(superTblInfo); + } else { + tagsValBuf = getTagValueFromTagSample(superTblInfo, tID % superTblInfo->tagSampleCount); + } + if (NULL == tagsValBuf) { + goto free_and_statistics_2; + } + + len += snprintf(pstr + len, superTblInfo->maxSqlLen - len, "insert into %s.%s%d using %s.%s tags %s values", winfo->db_name, superTblInfo->childTblPrefix, tID, winfo->db_name, superTblInfo->sTblName, tagsValBuf); + tmfree(tagsValBuf); + } else if (TBL_ALREADY_EXISTS == superTblInfo->childTblExists) { + len += snprintf(pstr + len, superTblInfo->maxSqlLen - len, "insert into %s.%s values", winfo->db_name, superTblInfo->childTblName + tID * TSDB_TABLE_NAME_LEN); + } else { + len += snprintf(pstr + len, superTblInfo->maxSqlLen - len, "insert into %s.%s%d values", winfo->db_name, superTblInfo->childTblPrefix, tID); + } + + for (k = 0; k < nrecords_cur_req;) { + int retLen = 0; + if (0 == strncasecmp(superTblInfo->dataSource, "sample", 6)) { + retLen = getRowDataFromSample(pstr + len, superTblInfo->maxSqlLen - len, tmp_time += superTblInfo->timeStampStep, superTblInfo, &sampleUsePos, fp, sampleDataBuf); + if (retLen < 0) { + goto free_and_statistics_2; + } + } else if (0 == strncasecmp(superTblInfo->dataSource, "rand", 8)) { + int rand_num = rand_tinyint() % 100; + if (0 != superTblInfo->disorderRatio && rand_num < superTblInfo->disorderRatio) { + int64_t d = tmp_time - rand() % superTblInfo->disorderRange; + retLen = generateRowData(pstr + len, superTblInfo->maxSqlLen - len, d, superTblInfo); + //printf("disorder rows, rand_num:%d, last ts:%"PRId64" current ts:%"PRId64"\n", rand_num, tmp_time, d); + } else { + retLen = generateRowData(pstr + len, superTblInfo->maxSqlLen - len, tmp_time += superTblInfo->timeStampStep, superTblInfo); + } + if (retLen < 0) { + goto free_and_statistics_2; + } + } + len += retLen; + inserted++; + k++; + totalRowsInserted++; - int count = 0; - while (taos_fetch_row(pSql) != NULL) { - count++; - } + if (inserted >= superTblInfo->insertRows || (superTblInfo->maxSqlLen - len) < (superTblInfo->lenOfOneRow + 128)) break; + } - taos_free_result(pSql); - return; -} - + if (0 == strncasecmp(superTblInfo->insertMode, "taosc", 5)) { + //printf("===== sql: %s \n\n", buffer); + //int64_t t1 = taosGetTimestampMs(); + int64_t startTs; + int64_t endTs; + startTs = taosGetTimestampUs(); + + int affectedRows = queryDbExec(winfo->taos, buffer, INSERT_TYPE); + if (0 > affectedRows){ + goto free_and_statistics_2; + } else { + endTs = taosGetTimestampUs(); + int64_t delay = endTs - startTs; + if (delay > winfo->maxDelay) winfo->maxDelay = delay; + if (delay < winfo->minDelay) winfo->minDelay = delay; + winfo->cntDelay++; + winfo->totalDelay += delay; + //winfo->avgDelay = (double)winfo->totalDelay / winfo->cntDelay; + } + totalAffectedRows += affectedRows; -/* Function to do regular expression check */ -static int regexMatch(const char *s, const char *reg, int cflags) { - regex_t regex; - char msgbuf[100] = {0}; + int64_t currentPrintTime = taosGetTimestampMs(); + if (currentPrintTime - lastPrintTime > 30*1000) { + printf("thread[%d] has currently inserted rows: %"PRId64 ", affected rows: %"PRId64 "\n", winfo->threadID, totalRowsInserted, totalAffectedRows); + lastPrintTime = currentPrintTime; + } + //int64_t t2 = taosGetTimestampMs(); + //printf("taosc insert sql return, Spent %.4f seconds \n", (double)(t2 - t1)/1000.0); + } else { + #ifdef TD_LOWA_CURL + //int64_t t1 = taosGetTimestampMs(); + int retCode = curlProceSql(g_Dbs.host, g_Dbs.port, buffer, winfo->curl_handle); + //int64_t t2 = taosGetTimestampMs(); + //printf("http insert sql return, Spent %ld ms \n", t2 - t1); + + if (0 != retCode) { + printf("========curl return fail, threadID[%d]\n", winfo->threadID); + goto free_and_statistics_2; + } + #else + printf("========no use http mode for no curl lib!\n"); + goto free_and_statistics_2; + #endif + } + + //printf("========tID:%d, k:%d, loop_cnt:%d\n", tID, k, loop_cnt); + + if (loop_cnt) { + loop_cnt--; + if ((1 == loop_cnt) && (0 != nrecords_last_req)) { + nrecords_cur_req = nrecords_last_req; + } else if (0 == loop_cnt){ + nrecords_cur_req = nrecords_no_last_req; + loop_cnt = loop_cnt_orig; + break; + } + } else { + break; + } + } - /* Compile regular expression */ - if (regcomp(®ex, reg, cflags) != 0) { - printf("Fail to compile regex\n"); - exit(-1); + if (tID == winfo->end_table_id) { + if (0 == strncasecmp(superTblInfo->dataSource, "sample", 6)) { + samplePos = sampleUsePos; + } + i = inserted; + time_counter = tmp_time; + } + } + + if (superTblInfo->insertRate) { + et = taosGetTimestampMs(); + } + //printf("========loop %d childTables duration:%"PRId64 "========inserted rows:%d\n", winfo->end_table_id - winfo->start_table_id, et - st, i); } - /* Execute regular expression */ - int reti = regexec(®ex, s, 0, NULL, 0); - if (!reti) { - regfree(®ex); - return 1; - } else if (reti == REG_NOMATCH) { - regfree(®ex); - return 0; - } else { - regerror(reti, ®ex, msgbuf, sizeof(msgbuf)); - printf("Regex match failed: %s\n", msgbuf); - regfree(®ex); - exit(-1); - } + free_and_statistics_2: + tmfree(buffer); + tmfree(sampleDataBuf); + tmfclose(fp); - return 0; + winfo->totalRowsInserted = totalRowsInserted; + winfo->totalAffectedRows = totalAffectedRows; + + printf("====thread[%d] completed total inserted rows: %"PRId64 ", total affected rows: %"PRId64 "====\n", winfo->threadID, totalRowsInserted, totalAffectedRows); + return NULL; } -static int isCommentLine(char *line) { - if (line == NULL) return 1; - - return regexMatch(line, "^\\s*#.*", REG_EXTENDED); -} +void callBack(void *param, TAOS_RES *res, int code) { + threadInfo* winfo = (threadInfo*)param; -void querySqlFile(TAOS* taos, char* sqlFile) -{ - FILE *fp = fopen(sqlFile, "r"); - if (fp == NULL) { - printf("failed to open file %s, reason:%s\n", sqlFile, strerror(errno)); - exit(-1); + if (winfo->superTblInfo->insertRate) { + winfo->et = taosGetTimestampMs(); + if (winfo->et - winfo->st < 1000) { + taosMsleep(1000 - (winfo->et - winfo->st)); // ms + } + } + + char *buffer = calloc(1, winfo->superTblInfo->maxSqlLen); + char *data = calloc(1, MAX_DATA_SIZE); + char *pstr = buffer; + pstr += sprintf(pstr, "insert into %s.%s%d values", winfo->db_name, winfo->tb_prefix, winfo->start_table_id); + if (winfo->counter >= winfo->superTblInfo->insertRows) { + winfo->start_table_id++; + winfo->counter = 0; + } + if (winfo->start_table_id > winfo->end_table_id) { + tsem_post(&winfo->lock_sem); + free(buffer); + free(data); + taos_free_result(res); + return; } - int read_len = 0; - char * cmd = calloc(1, MAX_SQL_SIZE); - size_t cmd_len = 0; - char * line = NULL; - size_t line_len = 0; + for (int i = 0; i < winfo->nrecords_per_request; i++) { + int rand_num = rand() % 100; + if (0 != winfo->superTblInfo->disorderRatio && rand_num < winfo->superTblInfo->disorderRatio) + { + int64_t d = winfo->lastTs - rand() % 1000000 + rand_num; + //generateData(data, datatype, ncols_per_record, d, len_of_binary); + (void)generateRowData(data, MAX_DATA_SIZE, d, winfo->superTblInfo); + } else { + //generateData(data, datatype, ncols_per_record, tmp_time += 1000, len_of_binary); + (void)generateRowData(data, MAX_DATA_SIZE, winfo->lastTs += 1000, winfo->superTblInfo); + } + pstr += sprintf(pstr, "%s", data); + winfo->counter++; - double t = getCurrentTime(); + if (winfo->counter >= winfo->superTblInfo->insertRows) { + break; + } + } - while ((read_len = tgetline(&line, &line_len, fp)) != -1) { - if (read_len >= MAX_SQL_SIZE) continue; - line[--read_len] = '\0'; + if (winfo->superTblInfo->insertRate) { + winfo->st = taosGetTimestampMs(); + } + taos_query_a(winfo->taos, buffer, callBack, winfo); + free(buffer); + free(data); - if (read_len == 0 || isCommentLine(line)) { // line starts with # - continue; - } + taos_free_result(res); +} - if (line[read_len - 1] == '\\') { - line[read_len - 1] = ' '; - memcpy(cmd + cmd_len, line, read_len); - cmd_len += read_len; - continue; +void *asyncWrite(void *sarg) { + threadInfo *winfo = (threadInfo *)sarg; + + winfo->nrecords_per_request = 0; + //if (AUTO_CREATE_SUBTBL == winfo->superTblInfo->autoCreateTable) { + winfo->nrecords_per_request = (winfo->superTblInfo->maxSqlLen - 1280 - winfo->superTblInfo->lenOfTagOfOneRow) / winfo->superTblInfo->lenOfOneRow; + //} else { + // winfo->nrecords_per_request = (winfo->superTblInfo->maxSqlLen - 1280) / winfo->superTblInfo->lenOfOneRow; + //} + + if (0 != winfo->superTblInfo->insertRate) { + if (winfo->nrecords_per_request >= winfo->superTblInfo->insertRate) { + winfo->nrecords_per_request = winfo->superTblInfo->insertRate; } - - memcpy(cmd + cmd_len, line, read_len); - selectSql(taos, cmd); - memset(cmd, 0, MAX_SQL_SIZE); - cmd_len = 0; + } + + if (winfo->nrecords_per_request <= 0) { + winfo->nrecords_per_request = 1; } - t = getCurrentTime() - t; - printf("run %s took %.6f second(s)\n\n", sqlFile, t); + if (winfo->nrecords_per_request >= INT16_MAX) { + winfo->nrecords_per_request = INT16_MAX - 1; + } - free(cmd); - if (line) free(line); - fclose(fp); - return; -} + if (winfo->nrecords_per_request >= INT16_MAX) { + winfo->nrecords_per_request = INT16_MAX - 1; + } -void * createTable(void *sarg) -{ - char command[BUFFER_SIZE] = "\0"; + winfo->st = 0; + winfo->et = 0; + winfo->lastTs = winfo->start_time; - info *winfo = (info *)sarg; - - if (!winfo->use_metric) { - /* Create all the tables; */ - printf("Creating table from %d to %d\n", winfo->start_table_id, winfo->end_table_id); - for (int i = winfo->start_table_id; i <= winfo->end_table_id; i++) { - snprintf(command, BUFFER_SIZE, "create table if not exists %s.%s%d (ts timestamp%s);", winfo->db_name, winfo->tb_prefix, i, winfo->cols); - queryDB(winfo->taos, command); - } - } else { - /* Create all the tables; */ - printf("Creating table from %d to %d\n", winfo->start_table_id, winfo->end_table_id); - for (int i = winfo->start_table_id; i <= winfo->end_table_id; i++) { - int j; - if (i % 10 == 0) { - j = 10; - } else { - j = i % 10; - } - if (j % 2 == 0) { - snprintf(command, BUFFER_SIZE, "create table if not exists %s.%s%d using %s.meters tags (%d,\"%s\");", winfo->db_name, winfo->tb_prefix, i, winfo->db_name, j, "shanghai"); - } else { - snprintf(command, BUFFER_SIZE, "create table if not exists %s.%s%d using %s.meters tags (%d,\"%s\");", winfo->db_name, winfo->tb_prefix, i, winfo->db_name, j, "beijing"); - } - queryDB(winfo->taos, command); - } + if (winfo->superTblInfo->insertRate) { + winfo->st = taosGetTimestampMs(); } + taos_query_a(winfo->taos, "show databases", callBack, winfo); + + tsem_wait(&(winfo->lock_sem)); return NULL; } -void multiThreadCreateTable(char* cols, bool use_metric, int threads, int ntables, char* db_name, char* tb_prefix, char *ip_addr, uint16_t port, char *user, char *pass) { - double ts = getCurrentTime(); - printf("create table......\n"); +void startMultiThreadInsertData(int threads, char* db_name, char* precision, SSuperTable* superTblInfo) { pthread_t *pids = malloc(threads * sizeof(pthread_t)); - info *infos = malloc(threads * sizeof(info)); + threadInfo *infos = malloc(threads * sizeof(threadInfo)); + memset(pids, 0, threads * sizeof(pthread_t)); + memset(infos, 0, threads * sizeof(threadInfo)); + int ntables = superTblInfo->childTblCount; int a = ntables / threads; if (a < 1) { @@ -1081,44 +4156,139 @@ void multiThreadCreateTable(char* cols, bool use_metric, int threads, int ntable } int b = 0; - if (threads != 0) + if (threads != 0) { b = ntables % threads; + } + + //TAOS* taos; + //if (0 == strncasecmp(superTblInfo->insertMode, "taosc", 5)) { + // taos = taos_connect(g_Dbs.host, g_Dbs.user, g_Dbs.password, db_name, g_Dbs.port); + // if (NULL == taos) { + // printf("connect to server fail, reason: %s\n", taos_errstr(NULL)); + // exit(-1); + // } + //} + + int32_t timePrec = TSDB_TIME_PRECISION_MILLI; + if (0 != precision[0]) { + if (0 == strncasecmp(precision, "ms", 2)) { + timePrec = TSDB_TIME_PRECISION_MILLI; + } else if (0 == strncasecmp(precision, "us", 2)) { + timePrec = TSDB_TIME_PRECISION_MICRO; + } else { + printf("No support precision: %s\n", precision); + exit(-1); + } + } + + int64_t start_time; + if (0 == strncasecmp(superTblInfo->startTimestamp, "now", 3)) { + start_time = taosGetTimestamp(timePrec); + } else { + (void)taosParseTime(superTblInfo->startTimestamp, &start_time, strlen(superTblInfo->startTimestamp), timePrec, 0); + } + + double start = getCurrentTime(); + int last = 0; for (int i = 0; i < threads; i++) { - info *t_info = infos + i; + threadInfo *t_info = infos + i; t_info->threadID = i; tstrncpy(t_info->db_name, db_name, MAX_DB_NAME_SIZE); - tstrncpy(t_info->tb_prefix, tb_prefix, MAX_TB_NAME_SIZE); - t_info->taos = taos_connect(ip_addr, user, pass, db_name, port); - t_info->start_table_id = last; - t_info->end_table_id = i < b ? last + a : last + a - 1; - last = t_info->end_table_id + 1; - t_info->use_metric = use_metric; - t_info->cols = cols; - pthread_create(pids + i, NULL, createTable, t_info); + t_info->superTblInfo = superTblInfo; + + t_info->start_time = start_time; + t_info->minDelay = INT16_MAX; + + if (0 == strncasecmp(superTblInfo->insertMode, "taosc", 5)) { + //t_info->taos = taos; + t_info->taos = taos_connect(g_Dbs.host, g_Dbs.user, g_Dbs.password, db_name, g_Dbs.port); + if (NULL == t_info->taos) { + printf("connect to server fail from insert sub thread, reason: %s\n", taos_errstr(NULL)); + exit(-1); + } + } else { + t_info->taos = NULL; + #ifdef TD_LOWA_CURL + t_info->curl_handle = curl_easy_init(); + #endif + } + + if (0 == superTblInfo->multiThreadWriteOneTbl) { + t_info->start_table_id = last; + t_info->end_table_id = i < b ? last + a : last + a - 1; + last = t_info->end_table_id + 1; + } else { + t_info->start_table_id = 0; + t_info->end_table_id = superTblInfo->childTblCount - 1; + t_info->start_time = t_info->start_time + rand_int() % 10000 - rand_tinyint(); + } + + tsem_init(&(t_info->lock_sem), 0, 0); + + if (SYNC == g_Dbs.queryMode) { + pthread_create(pids + i, NULL, syncWrite, t_info); + } else { + pthread_create(pids + i, NULL, asyncWrite, t_info); + } } for (int i = 0; i < threads; i++) { pthread_join(pids[i], NULL); } - double t = getCurrentTime() - ts; - printf("Spent %.4f seconds to create %d tables with %d connections\n", t, ntables, threads); + int64_t totalDelay = 0; + int64_t maxDelay = 0; + int64_t minDelay = INT16_MAX; + int64_t cntDelay = 0; + double avgDelay = 0; for (int i = 0; i < threads; i++) { - info *t_info = infos + i; - tsem_destroy(&(t_info->mutex_sem)); + threadInfo *t_info = infos + i; + tsem_destroy(&(t_info->lock_sem)); + taos_close(t_info->taos); + + superTblInfo->totalAffectedRows += t_info->totalAffectedRows; + superTblInfo->totalRowsInserted += t_info->totalRowsInserted; + + totalDelay += t_info->totalDelay; + cntDelay += t_info->cntDelay; + if (t_info->maxDelay > maxDelay) maxDelay = t_info->maxDelay; + if (t_info->minDelay < minDelay) minDelay = t_info->minDelay; + #ifdef TD_LOWA_CURL + if (t_info->curl_handle) { + curl_easy_cleanup(t_info->curl_handle); + } + #endif } + avgDelay = (double)totalDelay / cntDelay; + + double end = getCurrentTime(); + printf("Spent %.4f seconds to insert rows: %"PRId64", affected rows: %"PRId64" with %d thread(s) into %s.%s\n\n", + end - start, superTblInfo->totalRowsInserted, superTblInfo->totalAffectedRows, threads, db_name, superTblInfo->sTblName); + fprintf(g_fpOfInsertResult, "Spent %.4f seconds to insert rows: %"PRId64", affected rows: %"PRId64" with %d thread(s) into %s.%s\n\n", + end - start, superTblInfo->totalRowsInserted, superTblInfo->totalAffectedRows, threads, db_name, superTblInfo->sTblName); + + + printf("insert delay, avg: %10.6fms, max: %10.6fms, min: %10.6fms\n\n", + avgDelay/1000.0, (double)maxDelay/1000.0, (double)minDelay/1000.0); + fprintf(g_fpOfInsertResult, "insert delay, avg:%10.6fms, max: %10.6fms, min: %10.6fms\n\n", + avgDelay/1000.0, (double)maxDelay/1000.0, (double)minDelay/1000.0); + + + //taos_close(taos); + free(pids); free(infos); - return ; } + void *readTable(void *sarg) { - info *rinfo = (info *)sarg; +#if 1 + threadInfo *rinfo = (threadInfo *)sarg; TAOS *taos = rinfo->taos; char command[BUFFER_SIZE] = "\0"; int64_t sTime = rinfo->start_time; @@ -1128,11 +4298,11 @@ void *readTable(void *sarg) { printf("fopen %s fail, reason:%s.\n", rinfo->fp, strerror(errno)); return NULL; } - - int num_of_DPT = rinfo->nrecords_per_table; + + int num_of_DPT = rinfo->superTblInfo->insertRows; // nrecords_per_table; int num_of_tables = rinfo->end_table_id - rinfo->start_table_id + 1; int totalData = num_of_DPT * num_of_tables; - bool do_aggreFunc = rinfo->do_aggreFunc; + bool do_aggreFunc = g_Dbs.do_aggreFunc; int n = do_aggreFunc ? (sizeof(aggreFunc) / sizeof(aggreFunc[0])) : 2; if (!do_aggreFunc) { @@ -1155,7 +4325,7 @@ void *readTable(void *sarg) { fprintf(stderr, "Failed to query:%s\n", taos_errstr(pSql)); taos_free_result(pSql); taos_close(taos); - exit(EXIT_FAILURE); + return NULL; } while (taos_fetch_row(pSql) != NULL) { @@ -1174,13 +4344,14 @@ void *readTable(void *sarg) { printf("select %10s took %.6f second(s)\n", aggreFunc[j], totalT); } fprintf(fp, "\n"); - fclose(fp); +#endif return NULL; } void *readMetric(void *sarg) { - info *rinfo = (info *)sarg; +#if 1 + threadInfo *rinfo = (threadInfo *)sarg; TAOS *taos = rinfo->taos; char command[BUFFER_SIZE] = "\0"; FILE *fp = fopen(rinfo->fp, "a"); @@ -1188,11 +4359,11 @@ void *readMetric(void *sarg) { printf("fopen %s fail, reason:%s.\n", rinfo->fp, strerror(errno)); return NULL; } - - int num_of_DPT = rinfo->nrecords_per_table; + + int num_of_DPT = rinfo->superTblInfo->insertRows; int num_of_tables = rinfo->end_table_id - rinfo->start_table_id + 1; int totalData = num_of_DPT * num_of_tables; - bool do_aggreFunc = rinfo->do_aggreFunc; + bool do_aggreFunc = g_Dbs.do_aggreFunc; int n = do_aggreFunc ? (sizeof(aggreFunc) / sizeof(aggreFunc[0])) : 2; if (!do_aggreFunc) { @@ -1209,9 +4380,9 @@ void *readMetric(void *sarg) { for (int i = 1; i <= m; i++) { if (i == 1) { - sprintf(tempS, "areaid = %d", i); + sprintf(tempS, "t1 = %d", i); } else { - sprintf(tempS, " or areaid = %d ", i); + sprintf(tempS, " or t1 = %d ", i); } strcat(condition, tempS); @@ -1229,7 +4400,7 @@ void *readMetric(void *sarg) { fprintf(stderr, "Failed to query:%s\n", taos_errstr(pSql)); taos_free_result(pSql); taos_close(taos); - exit(1); + return NULL; } int count = 0; while (taos_fetch_row(pSql) != NULL) { @@ -1244,333 +4415,829 @@ void *readMetric(void *sarg) { } fprintf(fp, "\n"); } - fclose(fp); +#endif return NULL; } -static int queryDbExec(TAOS *taos, char *command, int type) { - int i; - TAOS_RES *res = NULL; - int32_t code = -1; - for (i = 0; i < 5; i++) { - if (NULL != res) { - taos_free_result(res); - res = NULL; +int insertTestProcess() { + + g_fpOfInsertResult = fopen(g_Dbs.resultFile, "a"); + if (NULL == g_fpOfInsertResult) { + fprintf(stderr, "Failed to open %s for save result\n", g_Dbs.resultFile); + return 1; + }; + + setupForAnsiEscape(); + int ret = printfInsertMeta(); + resetAfterAnsiEscape(); + if (ret == -1) + exit(EXIT_FAILURE); + + printfInsertMetaToFile(g_fpOfInsertResult); + + if (!g_args.answer_yes) { + printf("Press enter key to continue\n\n"); + (void)getchar(); + } + + init_rand_data(); + + // create database and super tables + (void)createDatabases(); + + // pretreatement + prePareSampleData(); + + double start; + double end; + + // create child tables + start = getCurrentTime(); + createChildTables(); + end = getCurrentTime(); + if (g_totalChildTables > 0) { + printf("Spent %.4f seconds to create %d tables with %d thread(s)\n\n", end - start, g_totalChildTables, g_Dbs.threadCount); + fprintf(g_fpOfInsertResult, "Spent %.4f seconds to create %d tables with %d thread(s)\n\n", end - start, g_totalChildTables, g_Dbs.threadCount); + } + + taosMsleep(1000); + + // create sub threads for inserting data + //start = getCurrentTime(); + for (int i = 0; i < g_Dbs.dbCount; i++) { + for (int j = 0; j < g_Dbs.db[i].superTblCount; j++) { + SSuperTable* superTblInfo = &g_Dbs.db[i].superTbls[j]; + startMultiThreadInsertData(g_Dbs.threadCount, g_Dbs.db[i].dbName, g_Dbs.db[i].dbCfg.precision, superTblInfo); } - - res = taos_query(taos, command); - code = taos_errno(res); - if (0 == code) { - break; - } + } + //end = getCurrentTime(); + + //int64_t totalRowsInserted = 0; + //int64_t totalAffectedRows = 0; + //for (int i = 0; i < g_Dbs.dbCount; i++) { + // for (int j = 0; j < g_Dbs.db[i].superTblCount; j++) { + // totalRowsInserted += g_Dbs.db[i].superTbls[j].totalRowsInserted; + // totalAffectedRows += g_Dbs.db[i].superTbls[j].totalAffectedRows; + //} + //printf("Spent %.4f seconds to insert rows: %"PRId64", affected rows: %"PRId64" with %d thread(s)\n\n", end - start, totalRowsInserted, totalAffectedRows, g_Dbs.threadCount); + if (NULL == g_args.metaFile && false == g_Dbs.insert_only) { + // query data + pthread_t read_id; + threadInfo *rInfo = malloc(sizeof(threadInfo)); + rInfo->start_time = 1500000000000; // 2017-07-14 10:40:00.000 + rInfo->start_table_id = 0; + rInfo->end_table_id = g_Dbs.db[0].superTbls[0].childTblCount - 1; + //rInfo->do_aggreFunc = g_Dbs.do_aggreFunc; + //rInfo->nrecords_per_table = g_Dbs.db[0].superTbls[0].insertRows; + rInfo->superTblInfo = &g_Dbs.db[0].superTbls[0]; + rInfo->taos = taos_connect(g_Dbs.host, g_Dbs.user, g_Dbs.password, g_Dbs.db[0].dbName, g_Dbs.port); + strcpy(rInfo->tb_prefix, g_Dbs.db[0].superTbls[0].childTblPrefix); + strcpy(rInfo->fp, g_Dbs.resultFile); + + if (!g_Dbs.use_metric) { + pthread_create(&read_id, NULL, readTable, rInfo); + } else { + pthread_create(&read_id, NULL, readMetric, rInfo); + } + pthread_join(read_id, NULL); + taos_close(rInfo->taos); } - if (code != 0) { - fprintf(stderr, "Failed to run %s, reason: %s\n", command, taos_errstr(res)); - taos_free_result(res); - //taos_close(taos); - return -1; + postFreeResource(); + + return 0; +} + +void *superQueryProcess(void *sarg) { + threadInfo *winfo = (threadInfo *)sarg; + + //char sqlStr[MAX_TB_NAME_SIZE*2]; + //sprintf(sqlStr, "use %s", g_queryInfo.dbName); + //queryDB(winfo->taos, sqlStr); + + int64_t st = 0; + int64_t et = 0; + while (1) { + if (g_queryInfo.superQueryInfo.rate && (et - st) < g_queryInfo.superQueryInfo.rate*1000) { + taosMsleep(g_queryInfo.superQueryInfo.rate*1000 - (et - st)); // ms + //printf("========sleep duration:%"PRId64 "========inserted rows:%d, table range:%d - %d\n", (1000 - (et - st)), i, winfo->start_table_id, winfo->end_table_id); + } + + st = taosGetTimestampMs(); + for (int i = 0; i < g_queryInfo.superQueryInfo.sqlCount; i++) { + if (0 == strncasecmp(g_queryInfo.queryMode, "taosc", 5)) { + int64_t t1 = taosGetTimestampUs(); + char tmpFile[MAX_FILE_NAME_LEN*2] = {0}; + if (g_queryInfo.superQueryInfo.result[i][0] != 0) { + sprintf(tmpFile, "%s-%d", g_queryInfo.superQueryInfo.result[i], winfo->threadID); + } + selectAndGetResult(winfo->taos, g_queryInfo.superQueryInfo.sql[i], tmpFile); + int64_t t2 = taosGetTimestampUs(); + printf("=[taosc] thread[%"PRIu64"] complete one sql, Spent %f s\n", taosGetSelfPthreadId(), (t2 - t1)/1000000.0); + } else { + #ifdef TD_LOWA_CURL + int64_t t1 = taosGetTimestampUs(); + int retCode = curlProceSql(g_queryInfo.host, g_queryInfo.port, g_queryInfo.superQueryInfo.sql[i], winfo->curl_handle); + int64_t t2 = taosGetTimestampUs(); + printf("=[restful] thread[%"PRIu64"] complete one sql, Spent %f s\n", taosGetSelfPthreadId(), (t2 - t1)/1000000.0); + + if (0 != retCode) { + printf("====curl return fail, threadID[%d]\n", winfo->threadID); + return NULL; + } + #endif + } + } + et = taosGetTimestampMs(); + printf("==thread[%"PRIu64"] complete all sqls to specify tables once queries duration:%.6fs\n\n", taosGetSelfPthreadId(), (double)(et - st)/1000.0); } + return NULL; +} - if (1 == type) { - int affectedRows = taos_affected_rows(res); - taos_free_result(res); - return affectedRows; +void replaceSubTblName(char* inSql, char* outSql, int tblIndex) { + char sourceString[32] = "xxxx"; + char subTblName[MAX_TB_NAME_SIZE*3]; + sprintf(subTblName, "%s.%s", g_queryInfo.dbName, g_queryInfo.subQueryInfo.childTblName + tblIndex*TSDB_TABLE_NAME_LEN); + + //printf("inSql: %s\n", inSql); + + char* pos = strstr(inSql, sourceString); + if (0 == pos) { + return; } - taos_free_result(res); - return 0; + strncpy(outSql, inSql, pos - inSql); + //printf("1: %s\n", outSql); + strcat(outSql, subTblName); + //printf("2: %s\n", outSql); + strcat(outSql, pos+strlen(sourceString)); + //printf("3: %s\n", outSql); } -void queryDB(TAOS *taos, char *command) { - int i; - TAOS_RES *pSql = NULL; - int32_t code = -1; +void *subQueryProcess(void *sarg) { + char sqlstr[1024]; + threadInfo *winfo = (threadInfo *)sarg; + int64_t st = 0; + int64_t et = g_queryInfo.subQueryInfo.rate*1000; + while (1) { + if (g_queryInfo.subQueryInfo.rate && (et - st) < g_queryInfo.subQueryInfo.rate*1000) { + taosMsleep(g_queryInfo.subQueryInfo.rate*1000 - (et - st)); // ms + //printf("========sleep duration:%"PRId64 "========inserted rows:%d, table range:%d - %d\n", (1000 - (et - st)), i, winfo->start_table_id, winfo->end_table_id); + } - for (i = 0; i < 5; i++) { - if (NULL != pSql) { - taos_free_result(pSql); - pSql = NULL; + st = taosGetTimestampMs(); + for (int i = winfo->start_table_id; i <= winfo->end_table_id; i++) { + for (int i = 0; i < g_queryInfo.subQueryInfo.sqlCount; i++) { + memset(sqlstr,0,sizeof(sqlstr)); + replaceSubTblName(g_queryInfo.subQueryInfo.sql[i], sqlstr, i); + char tmpFile[MAX_FILE_NAME_LEN*2] = {0}; + if (g_queryInfo.subQueryInfo.result[i][0] != 0) { + sprintf(tmpFile, "%s-%d", g_queryInfo.subQueryInfo.result[i], winfo->threadID); + } + selectAndGetResult(winfo->taos, sqlstr, tmpFile); + } } + et = taosGetTimestampMs(); + printf("####thread[%"PRIu64"] complete all sqls to allocate all sub-tables[%d - %d] once queries duration:%.4fs\n\n", taosGetSelfPthreadId(), winfo->start_table_id, winfo->end_table_id, (double)(et - st)/1000.0); + } + return NULL; +} + +int queryTestProcess() { + TAOS * taos = NULL; + taos = taos_connect(g_queryInfo.host, g_queryInfo.user, g_queryInfo.password, NULL, g_queryInfo.port); + if (taos == NULL) { + fprintf(stderr, "Failed to connect to TDengine, reason:%s\n", taos_errstr(NULL)); + exit(-1); + } + + if (0 != g_queryInfo.subQueryInfo.sqlCount) { + (void)getAllChildNameOfSuperTable(taos, g_queryInfo.dbName, g_queryInfo.subQueryInfo.sTblName, &g_queryInfo.subQueryInfo.childTblName, &g_queryInfo.subQueryInfo.childTblCount); + } + + printfQueryMeta(); + + if (!g_args.answer_yes) { + printf("Press enter key to continue\n\n"); + (void)getchar(); + } + + printfQuerySystemInfo(taos); + + pthread_t *pids = NULL; + threadInfo *infos = NULL; + //==== create sub threads for query from specify table + if (g_queryInfo.superQueryInfo.sqlCount > 0 && g_queryInfo.superQueryInfo.concurrent > 0) { - pSql = taos_query(taos, command); - code = taos_errno(pSql); - if (0 == code) { - break; - } + pids = malloc(g_queryInfo.superQueryInfo.concurrent * sizeof(pthread_t)); + infos = malloc(g_queryInfo.superQueryInfo.concurrent * sizeof(threadInfo)); + if ((NULL == pids) || (NULL == infos)) { + printf("malloc failed for create threads\n"); + taos_close(taos); + exit(-1); + } + + for (int i = 0; i < g_queryInfo.superQueryInfo.concurrent; i++) { + threadInfo *t_info = infos + i; + t_info->threadID = i; + + if (0 == strncasecmp(g_queryInfo.queryMode, "taosc", 5)) { + t_info->taos = taos; + + char sqlStr[MAX_TB_NAME_SIZE*2]; + sprintf(sqlStr, "use %s", g_queryInfo.dbName); + (void)queryDbExec(t_info->taos, sqlStr, NO_INSERT_TYPE); + } else { + t_info->taos = NULL; + #ifdef TD_LOWA_CURL + t_info->curl_handle = curl_easy_init(); + #endif + } + + pthread_create(pids + i, NULL, superQueryProcess, t_info); + } + }else { + g_queryInfo.superQueryInfo.concurrent = 0; + } + + pthread_t *pidsOfSub = NULL; + threadInfo *infosOfSub = NULL; + //==== create sub threads for query from all sub table of the super table + if ((g_queryInfo.subQueryInfo.sqlCount > 0) && (g_queryInfo.subQueryInfo.threadCnt > 0)) { + pidsOfSub = malloc(g_queryInfo.subQueryInfo.threadCnt * sizeof(pthread_t)); + infosOfSub = malloc(g_queryInfo.subQueryInfo.threadCnt * sizeof(threadInfo)); + if ((NULL == pidsOfSub) || (NULL == infosOfSub)) { + printf("malloc failed for create threads\n"); + taos_close(taos); + exit(-1); + } + + int ntables = g_queryInfo.subQueryInfo.childTblCount; + int threads = g_queryInfo.subQueryInfo.threadCnt; + + int a = ntables / threads; + if (a < 1) { + threads = ntables; + a = 1; + } + + int b = 0; + if (threads != 0) { + b = ntables % threads; + } + + int last = 0; + for (int i = 0; i < threads; i++) { + threadInfo *t_info = infosOfSub + i; + t_info->threadID = i; + + t_info->start_table_id = last; + t_info->end_table_id = i < b ? last + a : last + a - 1; + last = t_info->end_table_id + 1; + t_info->taos = taos; + pthread_create(pidsOfSub + i, NULL, subQueryProcess, t_info); + } + + g_queryInfo.subQueryInfo.threadCnt = threads; + }else { + g_queryInfo.subQueryInfo.threadCnt = 0; + } + + for (int i = 0; i < g_queryInfo.superQueryInfo.concurrent; i++) { + pthread_join(pids[i], NULL); } - if (code != 0) { - fprintf(stderr, "Failed to run %s, reason: %s\n", command, taos_errstr(pSql)); - taos_free_result(pSql); - taos_close(taos); - exit(EXIT_FAILURE); + tmfree((char*)pids); + tmfree((char*)infos); + + for (int i = 0; i < g_queryInfo.subQueryInfo.threadCnt; i++) { + pthread_join(pidsOfSub[i], NULL); } - taos_free_result(pSql); + tmfree((char*)pidsOfSub); + tmfree((char*)infosOfSub); + + taos_close(taos); + return 0; +} + +static void subscribe_callback(TAOS_SUB* tsub, TAOS_RES *res, void* param, int code) { + if (res == NULL || taos_errno(res) != 0) { + printf("failed to subscribe result, code:%d, reason:%s\n", code, taos_errstr(res)); + return; + } + + getResult(res, (char*)param); + taos_free_result(res); } -// sync insertion -void *syncWrite(void *sarg) { - info *winfo = (info *)sarg; - char buffer[BUFFER_SIZE] = "\0"; - char data[MAX_DATA_SIZE]; - char **data_type = winfo->datatype; - int len_of_binary = winfo->len_of_binary; - int ncols_per_record = winfo->ncols_per_record; - srand((uint32_t)time(NULL)); - int64_t time_counter = winfo->start_time; - for (int i = 0; i < winfo->nrecords_per_table;) { - for (int tID = winfo->start_table_id; tID <= winfo->end_table_id; tID++) { - int inserted = i; - int64_t tmp_time = time_counter; +static TAOS_SUB* subscribeImpl(TAOS *taos, char *sql, char* topic, char* resultFileName) { + TAOS_SUB* tsub = NULL; - char *pstr = buffer; - pstr += sprintf(pstr, "insert into %s.%s%d values", winfo->db_name, winfo->tb_prefix, tID); - int k; - for (k = 0; k < winfo->nrecords_per_request;) { - int rand_num = rand() % 100; - int len = -1; - if (winfo->data_of_order ==1 && rand_num < winfo->data_of_rate) { - int64_t d = tmp_time - rand() % 1000000 + rand_num; - len = generateData(data, data_type, ncols_per_record, d, len_of_binary); - } else { - len = generateData(data, data_type, ncols_per_record, tmp_time += 1000, len_of_binary); - } + if (g_queryInfo.superQueryInfo.subscribeMode) { + tsub = taos_subscribe(taos, g_queryInfo.superQueryInfo.subscribeRestart, topic, sql, subscribe_callback, (void*)resultFileName, g_queryInfo.superQueryInfo.subscribeInterval); + } else { + tsub = taos_subscribe(taos, g_queryInfo.superQueryInfo.subscribeRestart, topic, sql, NULL, NULL, 0); + } - //assert(len + pstr - buffer < BUFFER_SIZE); - if (len + pstr - buffer >= BUFFER_SIZE) { // too long - break; - } + if (tsub == NULL) { + printf("failed to create subscription. topic:%s, sql:%s\n", topic, sql); + return NULL; + } - pstr += sprintf(pstr, " %s", data); - inserted++; - k++; + return tsub; +} - if (inserted >= winfo->nrecords_per_table) break; - } +void *subSubscribeProcess(void *sarg) { + threadInfo *winfo = (threadInfo *)sarg; + char subSqlstr[1024]; - /* puts(buffer); */ - int64_t startTs; - int64_t endTs; - startTs = taosGetTimestampUs(); - //queryDB(winfo->taos, buffer); - int affectedRows = queryDbExec(winfo->taos, buffer, 1); + char sqlStr[MAX_TB_NAME_SIZE*2]; + sprintf(sqlStr, "use %s", g_queryInfo.dbName); + if (0 != queryDbExec(winfo->taos, sqlStr, NO_INSERT_TYPE)){ + return NULL; + } + + //int64_t st = 0; + //int64_t et = 0; + do { + //if (g_queryInfo.superQueryInfo.rate && (et - st) < g_queryInfo.superQueryInfo.rate*1000) { + // taosMsleep(g_queryInfo.superQueryInfo.rate*1000 - (et - st)); // ms + // //printf("========sleep duration:%"PRId64 "========inserted rows:%d, table range:%d - %d\n", (1000 - (et - st)), i, winfo->start_table_id, winfo->end_table_id); + //} + + //st = taosGetTimestampMs(); + char topic[32] = {0}; + for (int i = 0; i < g_queryInfo.subQueryInfo.sqlCount; i++) { + sprintf(topic, "taosdemo-subscribe-%d", i); + memset(subSqlstr,0,sizeof(subSqlstr)); + replaceSubTblName(g_queryInfo.subQueryInfo.sql[i], subSqlstr, i); + char tmpFile[MAX_FILE_NAME_LEN*2] = {0}; + if (g_queryInfo.subQueryInfo.result[i][0] != 0) { + sprintf(tmpFile, "%s-%d", g_queryInfo.subQueryInfo.result[i], winfo->threadID); + } + g_queryInfo.subQueryInfo.tsub[i] = subscribeImpl(winfo->taos, subSqlstr, topic, tmpFile); + if (NULL == g_queryInfo.subQueryInfo.tsub[i]) { + return NULL; + } + } + //et = taosGetTimestampMs(); + //printf("========thread[%"PRId64"] complete all sqls to super table once queries duration:%.4fs\n", taosGetSelfPthreadId(), (double)(et - st)/1000.0); + } while (0); + + // start loop to consume result + TAOS_RES* res = NULL; + while (1) { + for (int i = 0; i < g_queryInfo.subQueryInfo.sqlCount; i++) { + if (1 == g_queryInfo.subQueryInfo.subscribeMode) { + continue; + } - if (0 <= affectedRows){ - endTs = taosGetTimestampUs(); - int64_t delay = endTs - startTs; - if (delay > winfo->maxDelay) winfo->maxDelay = delay; - if (delay < winfo->minDelay) winfo->minDelay = delay; - winfo->cntDelay++; - winfo->totalDelay += delay; - //winfo->avgDelay = (double)winfo->totalDelay / winfo->cntDelay; - } - - if (tID == winfo->end_table_id) { - i = inserted; - time_counter = tmp_time; + res = taos_consume(g_queryInfo.subQueryInfo.tsub[i]); + if (res) { + char tmpFile[MAX_FILE_NAME_LEN*2] = {0}; + if (g_queryInfo.subQueryInfo.result[i][0] != 0) { + sprintf(tmpFile, "%s-%d", g_queryInfo.subQueryInfo.result[i], winfo->threadID); + } + getResult(res, tmpFile); } } } + taos_free_result(res); + + for (int i = 0; i < g_queryInfo.subQueryInfo.sqlCount; i++) { + taos_unsubscribe(g_queryInfo.subQueryInfo.tsub[i], g_queryInfo.subQueryInfo.subscribeKeepProgress); + } return NULL; } -void *asyncWrite(void *sarg) { - info *winfo = (info *)sarg; - taos_query_a(winfo->taos, "show databases", callBack, winfo); - - tsem_wait(&(winfo->lock_sem)); +void *superSubscribeProcess(void *sarg) { + threadInfo *winfo = (threadInfo *)sarg; + char sqlStr[MAX_TB_NAME_SIZE*2]; + sprintf(sqlStr, "use %s", g_queryInfo.dbName); + if (0 != queryDbExec(winfo->taos, sqlStr, NO_INSERT_TYPE)) { + return NULL; + } + + //int64_t st = 0; + //int64_t et = 0; + do { + //if (g_queryInfo.superQueryInfo.rate && (et - st) < g_queryInfo.superQueryInfo.rate*1000) { + // taosMsleep(g_queryInfo.superQueryInfo.rate*1000 - (et - st)); // ms + // //printf("========sleep duration:%"PRId64 "========inserted rows:%d, table range:%d - %d\n", (1000 - (et - st)), i, winfo->start_table_id, winfo->end_table_id); + //} + + //st = taosGetTimestampMs(); + char topic[32] = {0}; + for (int i = 0; i < g_queryInfo.superQueryInfo.sqlCount; i++) { + sprintf(topic, "taosdemo-subscribe-%d", i); + char tmpFile[MAX_FILE_NAME_LEN*2] = {0}; + if (g_queryInfo.subQueryInfo.result[i][0] != 0) { + sprintf(tmpFile, "%s-%d", g_queryInfo.superQueryInfo.result[i], winfo->threadID); + } + g_queryInfo.superQueryInfo.tsub[i] = subscribeImpl(winfo->taos, g_queryInfo.superQueryInfo.sql[i], topic, tmpFile); + if (NULL == g_queryInfo.superQueryInfo.tsub[i]) { + return NULL; + } + } + //et = taosGetTimestampMs(); + //printf("========thread[%"PRId64"] complete all sqls to super table once queries duration:%.4fs\n", taosGetSelfPthreadId(), (double)(et - st)/1000.0); + } while (0); + + // start loop to consume result + TAOS_RES* res = NULL; + while (1) { + for (int i = 0; i < g_queryInfo.superQueryInfo.sqlCount; i++) { + if (1 == g_queryInfo.superQueryInfo.subscribeMode) { + continue; + } + + res = taos_consume(g_queryInfo.superQueryInfo.tsub[i]); + if (res) { + char tmpFile[MAX_FILE_NAME_LEN*2] = {0}; + if (g_queryInfo.superQueryInfo.result[i][0] != 0) { + sprintf(tmpFile, "%s-%d", g_queryInfo.superQueryInfo.result[i], winfo->threadID); + } + getResult(res, tmpFile); + } + } + } + taos_free_result(res); + + for (int i = 0; i < g_queryInfo.superQueryInfo.sqlCount; i++) { + taos_unsubscribe(g_queryInfo.superQueryInfo.tsub[i], g_queryInfo.superQueryInfo.subscribeKeepProgress); + } return NULL; } -void callBack(void *param, TAOS_RES *res, int code) { - info* winfo = (info*)param; - char **datatype = winfo->datatype; - int ncols_per_record = winfo->ncols_per_record; - int len_of_binary = winfo->len_of_binary; +int subscribeTestProcess() { + printfQueryMeta(); - int64_t tmp_time = winfo->start_time; - char *buffer = calloc(1, BUFFER_SIZE); - char *data = calloc(1, MAX_DATA_SIZE); - char *pstr = buffer; - pstr += sprintf(pstr, "insert into %s.%s%d values", winfo->db_name, winfo->tb_prefix, winfo->start_table_id); - if (winfo->counter >= winfo->nrecords_per_table) { - winfo->start_table_id++; - winfo->counter = 0; + if (!g_args.answer_yes) { + printf("Press enter key to continue\n\n"); + (void)getchar(); } - if (winfo->start_table_id > winfo->end_table_id) { - tsem_post(&winfo->lock_sem); - free(buffer); - free(data); - taos_free_result(res); - return; + + TAOS * taos = NULL; + taos = taos_connect(g_queryInfo.host, g_queryInfo.user, g_queryInfo.password, g_queryInfo.dbName, g_queryInfo.port); + if (taos == NULL) { + fprintf(stderr, "Failed to connect to TDengine, reason:%s\n", taos_errstr(NULL)); + exit(-1); + } + + if (0 != g_queryInfo.subQueryInfo.sqlCount) { + (void)getAllChildNameOfSuperTable(taos, g_queryInfo.dbName, g_queryInfo.subQueryInfo.sTblName, &g_queryInfo.subQueryInfo.childTblName, &g_queryInfo.subQueryInfo.childTblCount); + } + + + pthread_t *pids = NULL; + threadInfo *infos = NULL; + //==== create sub threads for query from super table + if (g_queryInfo.superQueryInfo.sqlCount > 0 && g_queryInfo.superQueryInfo.concurrent > 0) { + pids = malloc(g_queryInfo.superQueryInfo.concurrent * sizeof(pthread_t)); + infos = malloc(g_queryInfo.superQueryInfo.concurrent * sizeof(threadInfo)); + if ((NULL == pids) || (NULL == infos)) { + printf("malloc failed for create threads\n"); + taos_close(taos); + exit(-1); + } + + for (int i = 0; i < g_queryInfo.superQueryInfo.concurrent; i++) { + threadInfo *t_info = infos + i; + t_info->threadID = i; + t_info->taos = taos; + pthread_create(pids + i, NULL, superSubscribeProcess, t_info); + } } - for (int i = 0; i < winfo->nrecords_per_request; i++) { - int rand_num = rand() % 100; - if (winfo->data_of_order ==1 && rand_num < winfo->data_of_rate) - { - int64_t d = tmp_time - rand() % 1000000 + rand_num; - generateData(data, datatype, ncols_per_record, d, len_of_binary); - } else - { - generateData(data, datatype, ncols_per_record, tmp_time += 1000, len_of_binary); + //==== create sub threads for query from sub table + pthread_t *pidsOfSub = NULL; + threadInfo *infosOfSub = NULL; + if ((g_queryInfo.subQueryInfo.sqlCount > 0) && (g_queryInfo.subQueryInfo.threadCnt > 0)) { + pidsOfSub = malloc(g_queryInfo.subQueryInfo.threadCnt * sizeof(pthread_t)); + infosOfSub = malloc(g_queryInfo.subQueryInfo.threadCnt * sizeof(threadInfo)); + if ((NULL == pidsOfSub) || (NULL == infosOfSub)) { + printf("malloc failed for create threads\n"); + taos_close(taos); + exit(-1); } - pstr += sprintf(pstr, "%s", data); - winfo->counter++; - - if (winfo->counter >= winfo->nrecords_per_table) { - break; + + int ntables = g_queryInfo.subQueryInfo.childTblCount; + int threads = g_queryInfo.subQueryInfo.threadCnt; + + int a = ntables / threads; + if (a < 1) { + threads = ntables; + a = 1; + } + + int b = 0; + if (threads != 0) { + b = ntables % threads; + } + + int last = 0; + for (int i = 0; i < threads; i++) { + threadInfo *t_info = infosOfSub + i; + t_info->threadID = i; + + t_info->start_table_id = last; + t_info->end_table_id = i < b ? last + a : last + a - 1; + t_info->taos = taos; + pthread_create(pidsOfSub + i, NULL, subSubscribeProcess, t_info); } + g_queryInfo.subQueryInfo.threadCnt = threads; } - taos_query_a(winfo->taos, buffer, callBack, winfo); - free(buffer); - free(data); + + for (int i = 0; i < g_queryInfo.superQueryInfo.concurrent; i++) { + pthread_join(pids[i], NULL); + } - taos_free_result(res); -} + tmfree((char*)pids); + tmfree((char*)infos); -double getCurrentTime() { - struct timeval tv; - if (gettimeofday(&tv, NULL) != 0) { - perror("Failed to get current time in ms"); - exit(EXIT_FAILURE); + for (int i = 0; i < g_queryInfo.subQueryInfo.threadCnt; i++) { + pthread_join(pidsOfSub[i], NULL); } - return tv.tv_sec + tv.tv_usec / 1E6; + tmfree((char*)pidsOfSub); + tmfree((char*)infosOfSub); + taos_close(taos); + return 0; } -int32_t randint[MAX_PREPARED_RAND]; -int64_t randbigint[MAX_PREPARED_RAND]; -float randfloat[MAX_PREPARED_RAND]; -double randdouble[MAX_PREPARED_RAND]; - -int32_t rand_tinyint(){ - static int cursor; - cursor++; - cursor = cursor % MAX_PREPARED_RAND; - return randint[cursor] % 128; - +void initOfInsertMeta() { + memset(&g_Dbs, 0, sizeof(SDbs)); + + // set default values + strncpy(g_Dbs.host, "127.0.0.1", MAX_DB_NAME_SIZE); + g_Dbs.port = 6030; + strncpy(g_Dbs.user, TSDB_DEFAULT_USER, MAX_DB_NAME_SIZE); + strncpy(g_Dbs.password, TSDB_DEFAULT_PASS, MAX_DB_NAME_SIZE); + g_Dbs.threadCount = 2; + g_Dbs.use_metric = true; } -int32_t rand_smallint(){ - static int cursor; - cursor++; - cursor = cursor % MAX_PREPARED_RAND; - return randint[cursor] % 32767; +void initOfQueryMeta() { + memset(&g_queryInfo, 0, sizeof(SQueryMetaInfo)); + + // set default values + strncpy(g_queryInfo.host, "127.0.0.1", MAX_DB_NAME_SIZE); + g_queryInfo.port = 6030; + strncpy(g_queryInfo.user, TSDB_DEFAULT_USER, MAX_DB_NAME_SIZE); + strncpy(g_queryInfo.password, TSDB_DEFAULT_PASS, MAX_DB_NAME_SIZE); } -int32_t rand_int(){ - static int cursor; - cursor++; - cursor = cursor % MAX_PREPARED_RAND; - return randint[cursor]; -} +void setParaFromArg(){ + if (g_args.host) { + strcpy(g_Dbs.host, g_args.host); + } else { + strncpy(g_Dbs.host, "127.0.0.1", MAX_DB_NAME_SIZE); + } -int64_t rand_bigint(){ - static int cursor; - cursor++; - cursor = cursor % MAX_PREPARED_RAND; - return randbigint[cursor]; + if (g_args.user) { + strcpy(g_Dbs.user, g_args.user); + } + + if (g_args.password) { + strcpy(g_Dbs.password, g_args.password); + } -} + if (g_args.port) { + g_Dbs.port = g_args.port; + } -float rand_float(){ - static int cursor; - cursor++; - cursor = cursor % MAX_PREPARED_RAND; - return randfloat[cursor]; + g_Dbs.dbCount = 1; + g_Dbs.db[0].drop = 1; -} + strncpy(g_Dbs.db[0].dbName, g_args.database, MAX_DB_NAME_SIZE); + g_Dbs.db[0].dbCfg.replica = g_args.replica; + strncpy(g_Dbs.db[0].dbCfg.precision, "ms", MAX_DB_NAME_SIZE); -double rand_double() { - static int cursor; - cursor++; - cursor = cursor % MAX_PREPARED_RAND; - return randdouble[cursor]; + + strncpy(g_Dbs.resultFile, g_args.output_file, MAX_FILE_NAME_LEN); + + g_Dbs.use_metric = g_args.use_metric; + g_Dbs.insert_only = g_args.insert_only; + + g_Dbs.db[0].superTblCount = 1; + strncpy(g_Dbs.db[0].superTbls[0].sTblName, "meters", MAX_TB_NAME_SIZE); + g_Dbs.db[0].superTbls[0].childTblCount = g_args.num_of_tables; + g_Dbs.threadCount = g_args.num_of_threads; + g_Dbs.threadCountByCreateTbl = 1; + g_Dbs.queryMode = g_args.mode; + + g_Dbs.db[0].superTbls[0].autoCreateTable = PRE_CREATE_SUBTBL; + g_Dbs.db[0].superTbls[0].superTblExists = TBL_NO_EXISTS; + g_Dbs.db[0].superTbls[0].childTblExists = TBL_NO_EXISTS; + g_Dbs.db[0].superTbls[0].insertRate = 0; + g_Dbs.db[0].superTbls[0].disorderRange = g_args.disorderRange; + g_Dbs.db[0].superTbls[0].disorderRatio = g_args.disorderRatio; + strncpy(g_Dbs.db[0].superTbls[0].childTblPrefix, g_args.tb_prefix, MAX_TB_NAME_SIZE); + strncpy(g_Dbs.db[0].superTbls[0].dataSource, "rand", MAX_TB_NAME_SIZE); + strncpy(g_Dbs.db[0].superTbls[0].insertMode, "taosc", MAX_TB_NAME_SIZE); + strncpy(g_Dbs.db[0].superTbls[0].startTimestamp, "2017-07-14 10:40:00.000", MAX_TB_NAME_SIZE); + g_Dbs.db[0].superTbls[0].timeStampStep = 10; + + // g_args.num_of_RPR; + g_Dbs.db[0].superTbls[0].insertRows = g_args.num_of_DPT; + g_Dbs.db[0].superTbls[0].maxSqlLen = TSDB_PAYLOAD_SIZE; + + g_Dbs.do_aggreFunc = true; -} + char dataString[STRING_LEN]; + char **data_type = g_args.datatype; + + memset(dataString, 0, STRING_LEN); -void init_rand_data(){ - for (int i = 0; i < MAX_PREPARED_RAND; i++){ - randint[i] = (int)(rand() % 10); - randbigint[i] = (int64_t)(rand() % 2147483648); - randfloat[i] = (float)(rand() / 1000.0); - randdouble[i] = (double)(rand() / 1000000.0); + if (strcasecmp(data_type[0], "BINARY") == 0 || strcasecmp(data_type[0], "BOOL") == 0 || strcasecmp(data_type[0], "NCHAR") == 0 ) { + g_Dbs.do_aggreFunc = false; } -} - -int32_t generateData(char *res, char **data_type, int num_of_cols, int64_t timestamp, int len_of_binary) { - memset(res, 0, MAX_DATA_SIZE); - char *pstr = res; - pstr += sprintf(pstr, "(%" PRId64, timestamp); - int c = 0; - for (; c < MAX_NUM_DATATYPE; c++) { - if (data_type[c] == NULL) { + g_Dbs.db[0].superTbls[0].columnCount = 0; + for (int i = 0; i < MAX_NUM_DATATYPE; i++) { + if (data_type[i] == NULL) { break; } + + strncpy(g_Dbs.db[0].superTbls[0].columns[i].dataType, data_type[i], MAX_TB_NAME_SIZE); + g_Dbs.db[0].superTbls[0].columns[i].dataLen = g_args.len_of_binary; + g_Dbs.db[0].superTbls[0].columnCount++; + } + + if (g_Dbs.db[0].superTbls[0].columnCount > g_args.num_of_CPR) { + g_Dbs.db[0].superTbls[0].columnCount = g_args.num_of_CPR; + } else { + for (int i = g_Dbs.db[0].superTbls[0].columnCount; i < g_args.num_of_CPR; i++) { + strncpy(g_Dbs.db[0].superTbls[0].columns[i].dataType, "INT", MAX_TB_NAME_SIZE); + g_Dbs.db[0].superTbls[0].columns[i].dataLen = 0; + g_Dbs.db[0].superTbls[0].columnCount++; + } + } + + if (g_Dbs.use_metric) { + strncpy(g_Dbs.db[0].superTbls[0].tags[0].dataType, "INT", MAX_TB_NAME_SIZE); + g_Dbs.db[0].superTbls[0].tags[0].dataLen = 0; + + strncpy(g_Dbs.db[0].superTbls[0].tags[1].dataType, "BINARY", MAX_TB_NAME_SIZE); + g_Dbs.db[0].superTbls[0].tags[1].dataLen = g_args.len_of_binary; + g_Dbs.db[0].superTbls[0].tagCount = 2; + } else { + g_Dbs.db[0].superTbls[0].tagCount = 0; } +} + +/* Function to do regular expression check */ +static int regexMatch(const char *s, const char *reg, int cflags) { + regex_t regex; + char msgbuf[100] = {0}; - if (0 == c) { - perror("data type error!"); + /* Compile regular expression */ + if (regcomp(®ex, reg, cflags) != 0) { + printf("Fail to compile regex\n"); exit(-1); } - for (int i = 0; i < num_of_cols; i++) { - if (strcasecmp(data_type[i % c], "tinyint") == 0) { - pstr += sprintf(pstr, ", %d", rand_tinyint() ); - } else if (strcasecmp(data_type[i % c], "smallint") == 0) { - pstr += sprintf(pstr, ", %d", rand_smallint()); - } else if (strcasecmp(data_type[i % c], "int") == 0) { - pstr += sprintf(pstr, ", %d", rand_int()); - } else if (strcasecmp(data_type[i % c], "bigint") == 0) { - pstr += sprintf(pstr, ", %" PRId64, rand_bigint()); - } else if (strcasecmp(data_type[i % c], "float") == 0) { - pstr += sprintf(pstr, ", %10.4f", rand_float()); - } else if (strcasecmp(data_type[i % c], "double") == 0) { - double t = rand_double(); - pstr += sprintf(pstr, ", %20.8f", t); - } else if (strcasecmp(data_type[i % c], "bool") == 0) { - bool b = rand() & 1; - pstr += sprintf(pstr, ", %s", b ? "true" : "false"); - } else if (strcasecmp(data_type[i % c], "binary") == 0) { - char *s = malloc(len_of_binary); - rand_string(s, len_of_binary); - pstr += sprintf(pstr, ", \"%s\"", s); - free(s); - }else if (strcasecmp(data_type[i % c], "nchar") == 0) { - char *s = malloc(len_of_binary); - rand_string(s, len_of_binary); - pstr += sprintf(pstr, ", \"%s\"", s); - free(s); - } - - if (pstr - res > MAX_DATA_SIZE) { - perror("column length too long, abort"); - exit(-1); + /* Execute regular expression */ + int reti = regexec(®ex, s, 0, NULL, 0); + if (!reti) { + regfree(®ex); + return 1; + } else if (reti == REG_NOMATCH) { + regfree(®ex); + return 0; + } else { + regerror(reti, ®ex, msgbuf, sizeof(msgbuf)); + printf("Regex match failed: %s\n", msgbuf); + regfree(®ex); + exit(-1); + } + + return 0; +} + +static int isCommentLine(char *line) { + if (line == NULL) return 1; + + return regexMatch(line, "^\\s*#.*", REG_EXTENDED); +} + +void querySqlFile(TAOS* taos, char* sqlFile) +{ + FILE *fp = fopen(sqlFile, "r"); + if (fp == NULL) { + printf("failed to open file %s, reason:%s\n", sqlFile, strerror(errno)); + return; + } + + int read_len = 0; + char * cmd = calloc(1, MAX_SQL_SIZE); + size_t cmd_len = 0; + char * line = NULL; + size_t line_len = 0; + + double t = getCurrentTime(); + + while ((read_len = tgetline(&line, &line_len, fp)) != -1) { + if (read_len >= MAX_SQL_SIZE) continue; + line[--read_len] = '\0'; + + if (read_len == 0 || isCommentLine(line)) { // line starts with # + continue; + } + + if (line[read_len - 1] == '\\') { + line[read_len - 1] = ' '; + memcpy(cmd + cmd_len, line, read_len); + cmd_len += read_len; + continue; } + + memcpy(cmd + cmd_len, line, read_len); + queryDbExec(taos, cmd, NO_INSERT_TYPE); + memset(cmd, 0, MAX_SQL_SIZE); + cmd_len = 0; } - pstr += sprintf(pstr, ")"); + t = getCurrentTime() - t; + printf("run %s took %.6f second(s)\n\n", sqlFile, t); - return (int32_t)(pstr - res); + tmfree(cmd); + tmfree(line); + tmfclose(fp); + return; } -static const char charset[] = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJK1234567890"; -void rand_string(char *str, int size) { - str[0] = 0; - if (size > 0) { - --size; - int n; - for (n = 0; n < size; n++) { - int key = rand() % (int)(sizeof charset - 1); - str[n] = charset[key]; +int main(int argc, char *argv[]) { + parse_args(argc, argv, &g_args); + + if (g_args.metaFile) { + initOfInsertMeta(); + initOfQueryMeta(); + if (false == getInfoFromJsonFile(g_args.metaFile)) { + printf("Failed to read %s\n", g_args.metaFile); + return 1; + } + if (INSERT_MODE == g_jsonType) { + if (g_Dbs.cfgDir[0]) taos_options(TSDB_OPTION_CONFIGDIR, g_Dbs.cfgDir); + (void)insertTestProcess(); + } else if (QUERY_MODE == g_jsonType) { + if (g_queryInfo.cfgDir[0]) taos_options(TSDB_OPTION_CONFIGDIR, g_queryInfo.cfgDir); + (void)queryTestProcess(); + } else if (SUBSCRIBE_MODE == g_jsonType) { + if (g_queryInfo.cfgDir[0]) taos_options(TSDB_OPTION_CONFIGDIR, g_queryInfo.cfgDir); + (void)subscribeTestProcess(); + } else { + ; + } + } else { + + memset(&g_Dbs, 0, sizeof(SDbs)); + g_jsonType = INSERT_MODE; + setParaFromArg(); + + if (NULL != g_args.sqlFile) { + TAOS* qtaos = taos_connect( + g_Dbs.host, g_Dbs.user, g_Dbs.password, g_Dbs.db[0].dbName, g_Dbs.port); + querySqlFile(qtaos, g_args.sqlFile); + taos_close(qtaos); + return 0; + } + + (void)insertTestProcess(); + if (g_Dbs.insert_only) return 0; + + // select + if (false == g_Dbs.insert_only) { + // query data + + pthread_t read_id; + threadInfo *rInfo = malloc(sizeof(threadInfo)); + rInfo->start_time = 1500000000000; // 2017-07-14 10:40:00.000 + rInfo->start_table_id = 0; + rInfo->end_table_id = g_Dbs.db[0].superTbls[0].childTblCount - 1; + //rInfo->do_aggreFunc = g_Dbs.do_aggreFunc; + //rInfo->nrecords_per_table = g_Dbs.db[0].superTbls[0].insertRows; + rInfo->superTblInfo = &g_Dbs.db[0].superTbls[0]; + rInfo->taos = taos_connect(g_Dbs.host, g_Dbs.user, g_Dbs.password, g_Dbs.db[0].dbName, g_Dbs.port); + strcpy(rInfo->tb_prefix, g_Dbs.db[0].superTbls[0].childTblPrefix); + strcpy(rInfo->fp, g_Dbs.resultFile); + + if (!g_Dbs.use_metric) { + pthread_create(&read_id, NULL, readTable, rInfo); + } else { + pthread_create(&read_id, NULL, readMetric, rInfo); + } + pthread_join(read_id, NULL); + taos_close(rInfo->taos); + free(rInfo); } - str[n] = 0; } + + taos_cleanup(); + return 0; } + diff --git a/src/kit/taosdemox/CMakeLists.txt b/src/kit/taosdemox/CMakeLists.txt deleted file mode 100644 index 3993cb0feb749d4bb2d762f203baeb920f8db495..0000000000000000000000000000000000000000 --- a/src/kit/taosdemox/CMakeLists.txt +++ /dev/null @@ -1,47 +0,0 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 3.5) -PROJECT(TDengine) - -INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/client/inc) -INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/deps/libcurl/include) - -IF (TD_LINUX) - AUX_SOURCE_DIRECTORY(. SRC) - ADD_EXECUTABLE(taosdemox ${SRC}) - - #find_program(HAVE_CURL NAMES curl) - IF ((NOT TD_ARM_64) AND (NOT TD_ARM_32)) - ADD_DEFINITIONS(-DTD_LOWA_CURL) - LINK_DIRECTORIES(${TD_COMMUNITY_DIR}/deps/libcurl/lib) - ADD_LIBRARY(curl STATIC IMPORTED) - SET_PROPERTY(TARGET curl PROPERTY IMPORTED_LOCATION ${TD_COMMUNITY_DIR}/deps/libcurl/lib/libcurl.a) - TARGET_LINK_LIBRARIES(taosdemox curl) - ENDIF () - - IF (TD_SOMODE_STATIC) - TARGET_LINK_LIBRARIES(taosdemox taos_static cJson) - ELSE () - TARGET_LINK_LIBRARIES(taosdemox taos cJson) - ENDIF () -ENDIF () - -IF (TD_DARWIN) - # missing a few dependencies, such as - # AUX_SOURCE_DIRECTORY(. SRC) - # ADD_EXECUTABLE(taosdemox ${SRC}) - # - # #find_program(HAVE_CURL NAMES curl) - # IF ((NOT TD_ARM_64) AND (NOT TD_ARM_32)) - # ADD_DEFINITIONS(-DTD_LOWA_CURL) - # LINK_DIRECTORIES(${TD_COMMUNITY_DIR}/deps/libcurl/lib) - # ADD_LIBRARY(curl STATIC IMPORTED) - # SET_PROPERTY(TARGET curl PROPERTY IMPORTED_LOCATION ${TD_COMMUNITY_DIR}/deps/libcurl/lib/libcurl.a) - # TARGET_LINK_LIBRARIES(taosdemox curl) - # ENDIF () - # - # IF (TD_SOMODE_STATIC) - # TARGET_LINK_LIBRARIES(taosdemox taos_static cJson) - # ELSE () - # TARGET_LINK_LIBRARIES(taosdemox taos cJson) - # ENDIF () -ENDIF () - diff --git a/src/kit/taosdemox/taosdemox.c b/src/kit/taosdemox/taosdemox.c deleted file mode 100644 index 3337546ee81fd941510b34dfa9fd7b603523230b..0000000000000000000000000000000000000000 --- a/src/kit/taosdemox/taosdemox.c +++ /dev/null @@ -1,5069 +0,0 @@ -/* - * Copyright (c) 2019 TAOS Data, Inc. - * - * This program is free software: you can use, redistribute, and/or modify - * it under the terms of the GNU Affero General Public License, version 3 - * or later ("AGPL"), as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. - * - * You should have received a copy of the GNU Affero General Public License - * along with this program. If not, see . - */ - - -/* - when in some thread query return error, thread don't exit, but return, otherwise coredump in other thread. -*/ - -#define _GNU_SOURCE -#define CURL_STATICLIB - -#ifdef TD_LOWA_CURL -#include "curl/curl.h" -#endif - -#ifdef LINUX - #include "os.h" - #include "cJSON.h" - #include - #include - #include - #ifndef _ALPINE - #include - #endif - #include - #include - #include - #include - #include - #include - #include - #include - #include - #include - #include -#else - #include - #include - #include - #include "os.h" - - #pragma comment ( lib, "libcurl.lib" ) - #pragma comment ( lib, "ws2_32.lib" ) - #pragma comment ( lib, "winmm.lib" ) - #pragma comment ( lib, "wldap32.lib" ) -#endif - -#include "taos.h" -#include "tutil.h" - -extern char configDir[]; - -#define INSERT_JSON_NAME "insert.json" -#define QUERY_JSON_NAME "query.json" -#define SUBSCRIBE_JSON_NAME "subscribe.json" - -#define INSERT_MODE 0 -#define QUERY_MODE 1 -#define SUBSCRIBE_MODE 2 - -#define MAX_SQL_SIZE 65536 -#define BUFFER_SIZE (65536*2) -#define MAX_DB_NAME_SIZE 64 -#define MAX_TB_NAME_SIZE 64 -#define MAX_DATA_SIZE 16000 -#define MAX_NUM_DATATYPE 10 -#define OPT_ABORT 1 /* –abort */ -#define STRING_LEN 60000 -#define MAX_PREPARED_RAND 1000000 -//#define MAX_SQL_SIZE 65536 -#define MAX_FILE_NAME_LEN 256 - -#define MAX_SAMPLES_ONCE_FROM_FILE 10000 -#define MAX_NUM_DATATYPE 10 - -#define MAX_DB_COUNT 8 -#define MAX_SUPER_TABLE_COUNT 8 -#define MAX_COLUMN_COUNT 1024 -#define MAX_TAG_COUNT 128 - -#define MAX_QUERY_SQL_COUNT 10 -#define MAX_QUERY_SQL_LENGTH 256 - -#define MAX_DATABASE_COUNT 256 - -typedef enum CREATE_SUB_TALBE_MOD_EN { - PRE_CREATE_SUBTBL, - AUTO_CREATE_SUBTBL, - NO_CREATE_SUBTBL -} CREATE_SUB_TALBE_MOD_EN; - -typedef enum TALBE_EXISTS_EN { - TBL_ALREADY_EXISTS, - TBL_NO_EXISTS, - TBL_EXISTS_BUTT -} TALBE_EXISTS_EN; - -enum MODE { - SYNC, - ASYNC, - MODE_BUT -}; - -enum QUERY_TYPE { - NO_INSERT_TYPE, - INSERT_TYPE, - QUERY_TYPE_BUT -} ; - -enum _show_db_index { - TSDB_SHOW_DB_NAME_INDEX, - TSDB_SHOW_DB_CREATED_TIME_INDEX, - TSDB_SHOW_DB_NTABLES_INDEX, - TSDB_SHOW_DB_VGROUPS_INDEX, - TSDB_SHOW_DB_REPLICA_INDEX, - TSDB_SHOW_DB_QUORUM_INDEX, - TSDB_SHOW_DB_DAYS_INDEX, - TSDB_SHOW_DB_KEEP_INDEX, - TSDB_SHOW_DB_CACHE_INDEX, - TSDB_SHOW_DB_BLOCKS_INDEX, - TSDB_SHOW_DB_MINROWS_INDEX, - TSDB_SHOW_DB_MAXROWS_INDEX, - TSDB_SHOW_DB_WALLEVEL_INDEX, - TSDB_SHOW_DB_FSYNC_INDEX, - TSDB_SHOW_DB_COMP_INDEX, - TSDB_SHOW_DB_CACHELAST_INDEX, - TSDB_SHOW_DB_PRECISION_INDEX, - TSDB_SHOW_DB_UPDATE_INDEX, - TSDB_SHOW_DB_STATUS_INDEX, - TSDB_MAX_SHOW_DB -}; - -// -----------------------------------------SHOW TABLES CONFIGURE ------------------------------------- -enum _show_stables_index { - TSDB_SHOW_STABLES_NAME_INDEX, - TSDB_SHOW_STABLES_CREATED_TIME_INDEX, - TSDB_SHOW_STABLES_COLUMNS_INDEX, - TSDB_SHOW_STABLES_METRIC_INDEX, - TSDB_SHOW_STABLES_UID_INDEX, - TSDB_SHOW_STABLES_TID_INDEX, - TSDB_SHOW_STABLES_VGID_INDEX, - TSDB_MAX_SHOW_STABLES -}; -enum _describe_table_index { - TSDB_DESCRIBE_METRIC_FIELD_INDEX, - TSDB_DESCRIBE_METRIC_TYPE_INDEX, - TSDB_DESCRIBE_METRIC_LENGTH_INDEX, - TSDB_DESCRIBE_METRIC_NOTE_INDEX, - TSDB_MAX_DESCRIBE_METRIC -}; - -typedef struct { - char field[TSDB_COL_NAME_LEN + 1]; - char type[16]; - int length; - char note[128]; -} SColDes; - -/* Used by main to communicate with parse_opt. */ -typedef struct SArguments_S { - char * metaFile; - char * host; - uint16_t port; - char * user; - char * password; - char * database; - int replica; - char * tb_prefix; - char * sqlFile; - bool use_metric; - bool insert_only; - char * output_file; - int mode; - char * datatype[MAX_NUM_DATATYPE + 1]; - int len_of_binary; - int num_of_CPR; - int num_of_threads; - int num_of_RPR; - int num_of_tables; - int num_of_DPT; - int abort; - int disorderRatio; - int disorderRange; - int method_of_delete; - char ** arg_list; -} SArguments; - -typedef struct SColumn_S { - char field[TSDB_COL_NAME_LEN + 1]; - char dataType[MAX_TB_NAME_SIZE]; - int dataLen; - char note[128]; -} StrColumn; - -typedef struct SSuperTable_S { - char sTblName[MAX_TB_NAME_SIZE]; - int childTblCount; - bool superTblExists; // 0: no, 1: yes - bool childTblExists; // 0: no, 1: yes - int batchCreateTableNum; // 0: no batch, > 0: batch table number in one sql - int8_t autoCreateTable; // 0: create sub table, 1: auto create sub table - char childTblPrefix[MAX_TB_NAME_SIZE]; - char dataSource[MAX_TB_NAME_SIZE]; // rand_gen or sample - char insertMode[MAX_TB_NAME_SIZE]; // taosc, restful - int insertRate; // 0: unlimit > 0 rows/s - - int multiThreadWriteOneTbl; // 0: no, 1: yes - int numberOfTblInOneSql; // 0/1: one table, > 1: number of tbl - int rowsPerTbl; // - int disorderRatio; // 0: no disorder, >0: x% - int disorderRange; // ms or us by database precision - int maxSqlLen; // - - int64_t insertRows; // 0: no limit - int timeStampStep; - char startTimestamp[MAX_TB_NAME_SIZE]; // - char sampleFormat[MAX_TB_NAME_SIZE]; // csv, json - char sampleFile[MAX_FILE_NAME_LEN]; - char tagsFile[MAX_FILE_NAME_LEN]; - - int columnCount; - StrColumn columns[MAX_COLUMN_COUNT]; - int tagCount; - StrColumn tags[MAX_TAG_COUNT]; - - char* childTblName; - char* colsOfCreatChildTable; - int lenOfOneRow; - int lenOfTagOfOneRow; - - char* sampleDataBuf; - int sampleDataBufSize; - //int sampleRowCount; - //int sampleUsePos; - - int tagSource; // 0: rand, 1: tag sample - char* tagDataBuf; - int tagSampleCount; - int tagUsePos; - - // statistics - int64_t totalRowsInserted; - int64_t totalAffectedRows; -} SSuperTable; - -typedef struct { - char name[TSDB_DB_NAME_LEN + 1]; - char create_time[32]; - int32_t ntables; - int32_t vgroups; - int16_t replica; - int16_t quorum; - int16_t days; - char keeplist[32]; - int32_t cache; //MB - int32_t blocks; - int32_t minrows; - int32_t maxrows; - int8_t wallevel; - int32_t fsync; - int8_t comp; - int8_t cachelast; - char precision[8]; // time resolution - int8_t update; - char status[16]; -} SDbInfo; - -typedef struct SDbCfg_S { -// int maxtablesPerVnode; - int minRows; - int maxRows; - int comp; - int walLevel; - int fsync; - int replica; - int update; - int keep; - int days; - int cache; - int blocks; - int quorum; - char precision[MAX_TB_NAME_SIZE]; -} SDbCfg; - -typedef struct SDataBase_S { - char dbName[MAX_DB_NAME_SIZE]; - int drop; // 0: use exists, 1: if exists, drop then new create - SDbCfg dbCfg; - int superTblCount; - SSuperTable superTbls[MAX_SUPER_TABLE_COUNT]; -} SDataBase; - -typedef struct SDbs_S { - char cfgDir[MAX_FILE_NAME_LEN]; - char host[MAX_DB_NAME_SIZE]; - uint16_t port; - char user[MAX_DB_NAME_SIZE]; - char password[MAX_DB_NAME_SIZE]; - char resultFile[MAX_FILE_NAME_LEN]; - bool use_metric; - bool insert_only; - bool do_aggreFunc; - bool queryMode; - - int threadCount; - int threadCountByCreateTbl; - int dbCount; - SDataBase db[MAX_DB_COUNT]; - - // statistics - int64_t totalRowsInserted; - int64_t totalAffectedRows; -} SDbs; - -typedef struct SuperQueryInfo_S { - int rate; // 0: unlimit > 0 loop/s - int concurrent; - int sqlCount; - int subscribeMode; // 0: sync, 1: async - int subscribeInterval; // ms - int subscribeRestart; - int subscribeKeepProgress; - char sql[MAX_QUERY_SQL_COUNT][MAX_QUERY_SQL_LENGTH]; - char result[MAX_QUERY_SQL_COUNT][MAX_FILE_NAME_LEN]; - TAOS_SUB* tsub[MAX_QUERY_SQL_COUNT]; -} SuperQueryInfo; - -typedef struct SubQueryInfo_S { - char sTblName[MAX_TB_NAME_SIZE]; - int rate; // 0: unlimit > 0 loop/s - int threadCnt; - int subscribeMode; // 0: sync, 1: async - int subscribeInterval; // ms - int subscribeRestart; - int subscribeKeepProgress; - int childTblCount; - char childTblPrefix[MAX_TB_NAME_SIZE]; - int sqlCount; - char sql[MAX_QUERY_SQL_COUNT][MAX_QUERY_SQL_LENGTH]; - char result[MAX_QUERY_SQL_COUNT][MAX_FILE_NAME_LEN]; - TAOS_SUB* tsub[MAX_QUERY_SQL_COUNT]; - - char* childTblName; -} SubQueryInfo; - -typedef struct SQueryMetaInfo_S { - char cfgDir[MAX_FILE_NAME_LEN]; - char host[MAX_DB_NAME_SIZE]; - uint16_t port; - char user[MAX_DB_NAME_SIZE]; - char password[MAX_DB_NAME_SIZE]; - char dbName[MAX_DB_NAME_SIZE]; - char queryMode[MAX_TB_NAME_SIZE]; // taosc, restful - - SuperQueryInfo superQueryInfo; - SubQueryInfo subQueryInfo; -} SQueryMetaInfo; - -typedef struct SThreadInfo_S { - TAOS *taos; - #ifdef TD_LOWA_CURL - CURL *curl_handle; - #endif - int threadID; - char db_name[MAX_DB_NAME_SIZE]; - char fp[4096]; - char tb_prefix[MAX_TB_NAME_SIZE]; - int start_table_id; - int end_table_id; - int data_of_rate; - int64_t start_time; - char* cols; - bool use_metric; - SSuperTable* superTblInfo; - - // for async insert - tsem_t lock_sem; - int64_t counter; - int64_t st; - int64_t et; - int64_t lastTs; - int nrecords_per_request; - - // statistics - int64_t totalRowsInserted; - int64_t totalAffectedRows; -} threadInfo; - -typedef struct curlMemInfo_S { - char *buf; - size_t sizeleft; - } curlMemInfo; - - - -#ifdef LINUX - /* The options we understand. */ - static struct argp_option options[] = { - {0, 'f', "meta file", 0, "The meta data to the execution procedure, if use -f, all others options invalid. Default is NULL.", 0}, - #ifdef _TD_POWER_ - {0, 'c', "config_directory", 0, "Configuration directory. Default is '/etc/power/'.", 1}, - {0, 'P', "password", 0, "The password to use when connecting to the server. Default is 'powerdb'.", 2}, - #else - {0, 'c', "config_directory", 0, "Configuration directory. Default is '/etc/taos/'.", 1}, - {0, 'P', "password", 0, "The password to use when connecting to the server. Default is 'taosdata'.", 2}, - #endif - {0, 'h', "host", 0, "The host to connect to TDengine. Default is localhost.", 2}, - {0, 'p', "port", 0, "The TCP/IP port number to use for the connection. Default is 0.", 2}, - {0, 'u', "user", 0, "The TDengine user name to use when connecting to the server. Default is 'root'.", 2}, - {0, 'd', "database", 0, "Destination database. Default is 'test'.", 3}, - {0, 'a', "replica", 0, "Set the replica parameters of the database, Default 1, min: 1, max: 3.", 4}, - {0, 'm', "table_prefix", 0, "Table prefix name. Default is 't'.", 4}, - {0, 's', "sql file", 0, "The select sql file.", 6}, - {0, 'M', 0, 0, "Use metric flag.", 4}, - {0, 'o', "outputfile", 0, "Direct output to the named file. Default is './output.txt'.", 6}, - {0, 'q', "query_mode", 0, "Query mode--0: SYNC, 1: ASYNC. Default is SYNC.", 4}, - {0, 'b', "type_of_cols", 0, "The data_type of columns, default: TINYINT,SMALLINT,INT,BIGINT,FLOAT,DOUBLE,BINARY,NCHAR,BOOL,TIMESTAMP.", 4}, - {0, 'w', "length_of_chartype", 0, "The length of data_type 'BINARY' or 'NCHAR'. Default is 16", 4}, - {0, 'l', "num_of_cols_per_record", 0, "The number of columns per record. Default is 10.", 4}, - {0, 'T', "num_of_threads", 0, "The number of threads. Default is 10.", 4}, - // {0, 'r', "num_of_records_per_req", 0, "The number of records per request. Default is 100.", 4}, - {0, 't', "num_of_tables", 0, "The number of tables. Default is 10000.", 4}, - {0, 'n', "num_of_records_per_table", 0, "The number of records per table. Default is 10000.", 4}, - {0, 'x', 0, 0, "Not insert only flag.", 4}, - {0, 'O', "disorderRatio", 0, "Insert mode--0: In order, > 0: disorder ratio. Default is in order.", 4}, - {0, 'R', "disorderRang", 0, "Out of order data's range, ms, default is 1000.", 4}, - //{0, 'D', "delete database", 0, "if elete database if exists. 0: no, 1: yes, default is 1", 5}, - {0}}; - -/* Parse a single option. */ -static error_t parse_opt(int key, char *arg, struct argp_state *state) { - // Get the input argument from argp_parse, which we know is a pointer to our arguments structure. - SArguments *arguments = state->input; - wordexp_t full_path; - char **sptr; - switch (key) { - case 'f': - arguments->metaFile = arg; - break; - case 'h': - arguments->host = arg; - break; - case 'p': - arguments->port = atoi(arg); - break; - case 'u': - arguments->user = arg; - break; - case 'P': - arguments->password = arg; - break; - case 'o': - arguments->output_file = arg; - break; - case 's': - arguments->sqlFile = arg; - break; - case 'q': - arguments->mode = atoi(arg); - break; - case 'T': - arguments->num_of_threads = atoi(arg); - break; - //case 'r': - // arguments->num_of_RPR = atoi(arg); - // break; - case 't': - arguments->num_of_tables = atoi(arg); - break; - case 'n': - arguments->num_of_DPT = atoi(arg); - break; - case 'd': - arguments->database = arg; - break; - case 'l': - arguments->num_of_CPR = atoi(arg); - break; - case 'b': - sptr = arguments->datatype; - if (strstr(arg, ",") == NULL) { - if (strcasecmp(arg, "INT") != 0 && strcasecmp(arg, "FLOAT") != 0 && - strcasecmp(arg, "TINYINT") != 0 && strcasecmp(arg, "BOOL") != 0 && - strcasecmp(arg, "SMALLINT") != 0 && strcasecmp(arg, "TIMESTAMP") != 0 && - strcasecmp(arg, "BIGINT") != 0 && strcasecmp(arg, "DOUBLE") != 0 && - strcasecmp(arg, "BINARY") != 0 && strcasecmp(arg, "NCHAR") != 0) { - argp_error(state, "Invalid data_type!"); - } - sptr[0] = arg; - } else { - int index = 0; - char *dupstr = strdup(arg); - char *running = dupstr; - char *token = strsep(&running, ","); - while (token != NULL) { - if (strcasecmp(token, "INT") != 0 && strcasecmp(token, "FLOAT") != 0 && - strcasecmp(token, "TINYINT") != 0 && strcasecmp(token, "BOOL") != 0 && - strcasecmp(token, "SMALLINT") != 0 && strcasecmp(token, "TIMESTAMP") != 0 && - strcasecmp(token, "BIGINT") != 0 && strcasecmp(token, "DOUBLE") != 0 && - strcasecmp(token, "BINARY") != 0 && strcasecmp(token, "NCHAR") != 0) { - argp_error(state, "Invalid data_type!"); - } - sptr[index++] = token; - token = strsep(&running, ","); - if (index >= MAX_NUM_DATATYPE) break; - } - } - break; - case 'w': - arguments->len_of_binary = atoi(arg); - break; - case 'm': - arguments->tb_prefix = arg; - break; - case 'M': - arguments->use_metric = true; - break; - case 'x': - arguments->insert_only = false; - break; - case 'c': - if (wordexp(arg, &full_path, 0) != 0) { - fprintf(stderr, "Invalid path %s\n", arg); - return -1; - } - taos_options(TSDB_OPTION_CONFIGDIR, full_path.we_wordv[0]); - wordfree(&full_path); - break; - case 'O': - arguments->disorderRatio = atoi(arg); - if (arguments->disorderRatio < 0 || arguments->disorderRatio > 100) - { - argp_error(state, "Invalid disorder ratio, should 1 ~ 100!"); - } - break; - case 'R': - arguments->disorderRange = atoi(arg); - break; - case 'a': - arguments->replica = atoi(arg); - if (arguments->replica > 3 || arguments->replica < 1) - { - arguments->replica = 1; - } - break; - //case 'D': - // arguments->method_of_delete = atoi(arg); - // break; - case OPT_ABORT: - arguments->abort = 1; - break; - case ARGP_KEY_ARG: - /*arguments->arg_list = &state->argv[state->next-1]; - state->next = state->argc;*/ - argp_usage(state); - break; - - default: - return ARGP_ERR_UNKNOWN; - } - return 0; -} - -static struct argp argp = {options, parse_opt, 0, 0}; - -void parse_args(int argc, char *argv[], SArguments *arguments) { - argp_parse(&argp, argc, argv, 0, 0, arguments); - if (arguments->abort) { - #ifndef _ALPINE - error(10, 0, "ABORTED"); - #else - abort(); - #endif - } -} - -#else - void printHelp() { - char indent[10] = " "; - printf("%s%s\n", indent, "-f"); - printf("%s%s%s\n", indent, indent, "The meta file to the execution procedure. Default is './meta.json'."); - printf("%s%s\n", indent, "-c"); - printf("%s%s%s\n", indent, indent, "config_directory, Configuration directory. Default is '/etc/taos/'."); - } - - void parse_args(int argc, char *argv[], SArguments *arguments) { - for (int i = 1; i < argc; i++) { - if (strcmp(argv[i], "-f") == 0) { - arguments->metaFile = argv[++i]; - } else if (strcmp(argv[i], "-c") == 0) { - strcpy(configDir, argv[++i]); - } else if (strcmp(argv[i], "--help") == 0) { - printHelp(); - exit(EXIT_FAILURE); - } else { - fprintf(stderr, "wrong options\n"); - printHelp(); - exit(EXIT_FAILURE); - } - } - } -#endif - -static bool getInfoFromJsonFile(char* file); -//static int generateOneRowDataForStb(SSuperTable* stbInfo); -//static int getDataIntoMemForStb(SSuperTable* stbInfo); -static void init_rand_data(); -static int createDatabases(); -static void createChildTables(); -static int queryDbExec(TAOS *taos, char *command, int type); - -/* ************ Global variables ************ */ - -int32_t randint[MAX_PREPARED_RAND]; -int64_t randbigint[MAX_PREPARED_RAND]; -float randfloat[MAX_PREPARED_RAND]; -double randdouble[MAX_PREPARED_RAND]; -char *aggreFunc[] = {"*", "count(*)", "avg(col0)", "sum(col0)", "max(col0)", "min(col0)", "first(col0)", "last(col0)"}; - -SArguments g_args = {NULL, - "127.0.0.1", // host - 6030, // port - "root", // user - #ifdef _TD_POWER_ - "powerdb", // password - #else - "taosdata", // password - #endif - "test", // database - 1, // replica - "t", // tb_prefix - NULL, // sqlFile - false, // use_metric - true, // insert_only - "./output.txt", // output_file - 0, // mode : sync or async - { - "TINYINT", // datatype - "SMALLINT", - "INT", - "BIGINT", - "FLOAT", - "DOUBLE", - "BINARY", - "NCHAR", - "BOOL", - "TIMESTAMP" - }, - 16, // len_of_binary - 10, // num_of_CPR - 10, // num_of_connections/thread - 100, // num_of_RPR - 10000, // num_of_tables - 10000, // num_of_DPT - 0, // abort - 0, // disorderRatio - 1000, // disorderRange - 1, // method_of_delete - NULL // arg_list -}; - - -static int g_jsonType = 0; -static SDbs g_Dbs; -static int g_totalChildTables = 0; -static SQueryMetaInfo g_queryInfo; -static FILE * g_fpOfInsertResult = NULL; - - -void tmfclose(FILE *fp) { - if (NULL != fp) { - fclose(fp); - } -} - -void tmfree(char *buf) { - if (NULL != buf) { - free(buf); - } -} - -static int queryDbExec(TAOS *taos, char *command, int type) { - int i; - TAOS_RES *res = NULL; - int32_t code = -1; - - for (i = 0; i < 5; i++) { - if (NULL != res) { - taos_free_result(res); - res = NULL; - } - - res = taos_query(taos, command); - code = taos_errno(res); - if (0 == code) { - break; - } - } - - if (code != 0) { - fprintf(stderr, "Failed to run %s, reason: %s\n", command, taos_errstr(res)); - taos_free_result(res); - //taos_close(taos); - return -1; - } - - if (INSERT_TYPE == type) { - int affectedRows = taos_affected_rows(res); - taos_free_result(res); - return affectedRows; - } - - taos_free_result(res); - return 0; -} - -static void getResult(TAOS_RES *res, char* resultFileName) { - TAOS_ROW row = NULL; - int num_rows = 0; - int num_fields = taos_field_count(res); - TAOS_FIELD *fields = taos_fetch_fields(res); - - FILE *fp = NULL; - if (resultFileName[0] != 0) { - fp = fopen(resultFileName, "at"); - if (fp == NULL) { - fprintf(stderr, "failed to open result file: %s, result will not save to file\n", resultFileName); - } - } - - char* databuf = (char*) calloc(1, 100*1024*1024); - if (databuf == NULL) { - fprintf(stderr, "failed to malloc, warning: save result to file slowly!\n"); - return ; - } - - int totalLen = 0; - char temp[16000]; - - // fetch the records row by row - while ((row = taos_fetch_row(res))) { - if (totalLen >= 100*1024*1024 - 32000) { - if (fp) fprintf(fp, "%s", databuf); - totalLen = 0; - memset(databuf, 0, 100*1024*1024); - } - num_rows++; - int len = taos_print_row(temp, row, fields, num_fields); - len += sprintf(temp + len, "\n"); - //printf("query result:%s\n", temp); - memcpy(databuf + totalLen, temp, len); - totalLen += len; - } - - if (fp) fprintf(fp, "%s", databuf); - tmfclose(fp); - free(databuf); -} - -static void selectAndGetResult(TAOS *taos, char *command, char* resultFileName) { - TAOS_RES *res = taos_query(taos, command); - if (res == NULL || taos_errno(res) != 0) { - printf("failed to sql:%s, reason:%s\n", command, taos_errstr(res)); - taos_free_result(res); - return; - } - - getResult(res, resultFileName); - taos_free_result(res); -} - -double getCurrentTime() { - struct timeval tv; - if (gettimeofday(&tv, NULL) != 0) { - perror("Failed to get current time in ms"); - return 0.0; - } - - return tv.tv_sec + tv.tv_usec / 1E6; -} - -static int32_t rand_bool(){ - static int cursor; - cursor++; - cursor = cursor % MAX_PREPARED_RAND; - return randint[cursor] % 2; -} - -static int32_t rand_tinyint(){ - static int cursor; - cursor++; - cursor = cursor % MAX_PREPARED_RAND; - return randint[cursor] % 128; -} - -static int32_t rand_smallint(){ - static int cursor; - cursor++; - cursor = cursor % MAX_PREPARED_RAND; - return randint[cursor] % 32767; -} - -static int32_t rand_int(){ - static int cursor; - cursor++; - cursor = cursor % MAX_PREPARED_RAND; - return randint[cursor]; -} - -static int64_t rand_bigint(){ - static int cursor; - cursor++; - cursor = cursor % MAX_PREPARED_RAND; - return randbigint[cursor]; - -} - -static float rand_float(){ - static int cursor; - cursor++; - cursor = cursor % MAX_PREPARED_RAND; - return randfloat[cursor]; -} - -static const char charset[] = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890"; -void rand_string(char *str, int size) { - str[0] = 0; - if (size > 0) { - //--size; - int n; - for (n = 0; n < size; n++) { - int key = rand_tinyint() % (int)(sizeof(charset) - 1); - str[n] = charset[key]; - } - str[n] = 0; - } -} - -static double rand_double() { - static int cursor; - cursor++; - cursor = cursor % MAX_PREPARED_RAND; - return randdouble[cursor]; - -} - -static void init_rand_data() { - for (int i = 0; i < MAX_PREPARED_RAND; i++){ - randint[i] = (int)(rand() % 65535); - randbigint[i] = (int64_t)(rand() % 2147483648); - randfloat[i] = (float)(rand() / 1000.0); - randdouble[i] = (double)(rand() / 1000000.0); - } -} - -static void printfInsertMeta() { - printf("\033[1m\033[40;32m================ insert.json parse result START ================\033[0m\n"); - printf("host: \033[33m%s:%u\033[0m\n", g_Dbs.host, g_Dbs.port); - printf("user: \033[33m%s\033[0m\n", g_Dbs.user); - printf("password: \033[33m%s\033[0m\n", g_Dbs.password); - printf("resultFile: \033[33m%s\033[0m\n", g_Dbs.resultFile); - printf("thread num of insert data: \033[33m%d\033[0m\n", g_Dbs.threadCount); - printf("thread num of create table: \033[33m%d\033[0m\n", g_Dbs.threadCountByCreateTbl); - - printf("database count: \033[33m%d\033[0m\n", g_Dbs.dbCount); - for (int i = 0; i < g_Dbs.dbCount; i++) { - printf("database[\033[33m%d\033[0m]:\n", i); - printf(" database name: \033[33m%s\033[0m\n", g_Dbs.db[i].dbName); - if (0 == g_Dbs.db[i].drop) { - printf(" drop: \033[33mno\033[0m\n"); - }else { - printf(" drop: \033[33myes\033[0m\n"); - } - - if (g_Dbs.db[i].dbCfg.blocks > 0) { - printf(" blocks: \033[33m%d\033[0m\n", g_Dbs.db[i].dbCfg.blocks); - } - if (g_Dbs.db[i].dbCfg.cache > 0) { - printf(" cache: \033[33m%d\033[0m\n", g_Dbs.db[i].dbCfg.cache); - } - if (g_Dbs.db[i].dbCfg.days > 0) { - printf(" days: \033[33m%d\033[0m\n", g_Dbs.db[i].dbCfg.days); - } - if (g_Dbs.db[i].dbCfg.keep > 0) { - printf(" keep: \033[33m%d\033[0m\n", g_Dbs.db[i].dbCfg.keep); - } - if (g_Dbs.db[i].dbCfg.replica > 0) { - printf(" replica: \033[33m%d\033[0m\n", g_Dbs.db[i].dbCfg.replica); - } - if (g_Dbs.db[i].dbCfg.update > 0) { - printf(" update: \033[33m%d\033[0m\n", g_Dbs.db[i].dbCfg.update); - } - if (g_Dbs.db[i].dbCfg.minRows > 0) { - printf(" minRows: \033[33m%d\033[0m\n", g_Dbs.db[i].dbCfg.minRows); - } - if (g_Dbs.db[i].dbCfg.maxRows > 0) { - printf(" maxRows: \033[33m%d\033[0m\n", g_Dbs.db[i].dbCfg.maxRows); - } - if (g_Dbs.db[i].dbCfg.comp > 0) { - printf(" comp: \033[33m%d\033[0m\n", g_Dbs.db[i].dbCfg.comp); - } - if (g_Dbs.db[i].dbCfg.walLevel > 0) { - printf(" walLevel: \033[33m%d\033[0m\n", g_Dbs.db[i].dbCfg.walLevel); - } - if (g_Dbs.db[i].dbCfg.fsync > 0) { - printf(" fsync: \033[33m%d\033[0m\n", g_Dbs.db[i].dbCfg.fsync); - } - if (g_Dbs.db[i].dbCfg.quorum > 0) { - printf(" quorum: \033[33m%d\033[0m\n", g_Dbs.db[i].dbCfg.quorum); - } - if (g_Dbs.db[i].dbCfg.precision[0] != 0) { - if ((0 == strncasecmp(g_Dbs.db[i].dbCfg.precision, "ms", 2)) || (0 == strncasecmp(g_Dbs.db[i].dbCfg.precision, "us", 2))) { - printf(" precision: \033[33m%s\033[0m\n", g_Dbs.db[i].dbCfg.precision); - } else { - printf(" precision error: \033[33m%s\033[0m\n", g_Dbs.db[i].dbCfg.precision); - exit(EXIT_FAILURE); - } - } - - printf(" super table count: \033[33m%d\033[0m\n", g_Dbs.db[i].superTblCount); - for (int j = 0; j < g_Dbs.db[i].superTblCount; j++) { - printf(" super table[\033[33m%d\033[0m]:\n", j); - - printf(" stbName: \033[33m%s\033[0m\n", g_Dbs.db[i].superTbls[j].sTblName); - - if (PRE_CREATE_SUBTBL == g_Dbs.db[i].superTbls[j].autoCreateTable) { - printf(" autoCreateTable: \033[33m%s\033[0m\n", "no"); - } else if (AUTO_CREATE_SUBTBL == g_Dbs.db[i].superTbls[j].autoCreateTable) { - printf(" autoCreateTable: \033[33m%s\033[0m\n", "yes"); - } else { - printf(" autoCreateTable: \033[33m%s\033[0m\n", "error"); - } - - if (TBL_NO_EXISTS == g_Dbs.db[i].superTbls[j].childTblExists) { - printf(" childTblExists: \033[33m%s\033[0m\n", "no"); - } else if (TBL_ALREADY_EXISTS == g_Dbs.db[i].superTbls[j].childTblExists) { - printf(" childTblExists: \033[33m%s\033[0m\n", "yes"); - } else { - printf(" childTblExists: \033[33m%s\033[0m\n", "error"); - } - - printf(" childTblCount: \033[33m%d\033[0m\n", g_Dbs.db[i].superTbls[j].childTblCount); - printf(" childTblPrefix: \033[33m%s\033[0m\n", g_Dbs.db[i].superTbls[j].childTblPrefix); - printf(" dataSource: \033[33m%s\033[0m\n", g_Dbs.db[i].superTbls[j].dataSource); - printf(" insertMode: \033[33m%s\033[0m\n", g_Dbs.db[i].superTbls[j].insertMode); - printf(" insertRate: \033[33m%d\033[0m\n", g_Dbs.db[i].superTbls[j].insertRate); - printf(" insertRows: \033[33m%"PRId64"\033[0m\n", g_Dbs.db[i].superTbls[j].insertRows); - - if (0 == g_Dbs.db[i].superTbls[j].multiThreadWriteOneTbl) { - printf(" multiThreadWriteOneTbl: \033[33mno\033[0m\n"); - }else { - printf(" multiThreadWriteOneTbl: \033[33myes\033[0m\n"); - } - printf(" numberOfTblInOneSql: \033[33m%d\033[0m\n", g_Dbs.db[i].superTbls[j].numberOfTblInOneSql); - printf(" rowsPerTbl: \033[33m%d\033[0m\n", g_Dbs.db[i].superTbls[j].rowsPerTbl); - printf(" disorderRange: \033[33m%d\033[0m\n", g_Dbs.db[i].superTbls[j].disorderRange); - printf(" disorderRatio: \033[33m%d\033[0m\n", g_Dbs.db[i].superTbls[j].disorderRatio); - printf(" maxSqlLen: \033[33m%d\033[0m\n", g_Dbs.db[i].superTbls[j].maxSqlLen); - - printf(" timeStampStep: \033[33m%d\033[0m\n", g_Dbs.db[i].superTbls[j].timeStampStep); - printf(" startTimestamp: \033[33m%s\033[0m\n", g_Dbs.db[i].superTbls[j].startTimestamp); - printf(" sampleFormat: \033[33m%s\033[0m\n", g_Dbs.db[i].superTbls[j].sampleFormat); - printf(" sampleFile: \033[33m%s\033[0m\n", g_Dbs.db[i].superTbls[j].sampleFile); - printf(" tagsFile: \033[33m%s\033[0m\n", g_Dbs.db[i].superTbls[j].tagsFile); - - printf(" columnCount: \033[33m%d\033[0m\n ", g_Dbs.db[i].superTbls[j].columnCount); - for (int k = 0; k < g_Dbs.db[i].superTbls[j].columnCount; k++) { - //printf("dataType:%s, dataLen:%d\t", g_Dbs.db[i].superTbls[j].columns[k].dataType, g_Dbs.db[i].superTbls[j].columns[k].dataLen); - if ((0 == strncasecmp(g_Dbs.db[i].superTbls[j].columns[k].dataType, "binary", 6)) || (0 == strncasecmp(g_Dbs.db[i].superTbls[j].columns[k].dataType, "nchar", 5))) { - printf("column[\033[33m%d\033[0m]:\033[33m%s(%d)\033[0m ", k, g_Dbs.db[i].superTbls[j].columns[k].dataType, g_Dbs.db[i].superTbls[j].columns[k].dataLen); - } else { - printf("column[%d]:\033[33m%s\033[0m ", k, g_Dbs.db[i].superTbls[j].columns[k].dataType); - } - } - printf("\n"); - - printf(" tagCount: \033[33m%d\033[0m\n ", g_Dbs.db[i].superTbls[j].tagCount); - for (int k = 0; k < g_Dbs.db[i].superTbls[j].tagCount; k++) { - //printf("dataType:%s, dataLen:%d\t", g_Dbs.db[i].superTbls[j].tags[k].dataType, g_Dbs.db[i].superTbls[j].tags[k].dataLen); - if ((0 == strncasecmp(g_Dbs.db[i].superTbls[j].tags[k].dataType, "binary", 6)) || (0 == strncasecmp(g_Dbs.db[i].superTbls[j].tags[k].dataType, "nchar", 5))) { - printf("tag[%d]:\033[33m%s(%d)\033[0m ", k, g_Dbs.db[i].superTbls[j].tags[k].dataType, g_Dbs.db[i].superTbls[j].tags[k].dataLen); - } else { - printf("tag[%d]:\033[33m%s\033[0m ", k, g_Dbs.db[i].superTbls[j].tags[k].dataType); - } - } - printf("\n"); - } - printf("\n"); - } - printf("\033[1m\033[40;32m================ insert.json parse result END================\033[0m\n"); -} - -static void printfInsertMetaToFile(FILE* fp) { - fprintf(fp, "================ insert.json parse result START================\n"); - fprintf(fp, "host: %s:%u\n", g_Dbs.host, g_Dbs.port); - fprintf(fp, "user: %s\n", g_Dbs.user); - fprintf(fp, "password: %s\n", g_Dbs.password); - fprintf(fp, "resultFile: %s\n", g_Dbs.resultFile); - fprintf(fp, "thread num of insert data: %d\n", g_Dbs.threadCount); - fprintf(fp, "thread num of create table: %d\n", g_Dbs.threadCountByCreateTbl); - - fprintf(fp, "database count: %d\n", g_Dbs.dbCount); - for (int i = 0; i < g_Dbs.dbCount; i++) { - fprintf(fp, "database[%d]:\n", i); - fprintf(fp, " database name: %s\n", g_Dbs.db[i].dbName); - if (0 == g_Dbs.db[i].drop) { - fprintf(fp, " drop: no\n"); - }else { - fprintf(fp, " drop: yes\n"); - } - - if (g_Dbs.db[i].dbCfg.blocks > 0) { - fprintf(fp, " blocks: %d\n", g_Dbs.db[i].dbCfg.blocks); - } - if (g_Dbs.db[i].dbCfg.cache > 0) { - fprintf(fp, " cache: %d\n", g_Dbs.db[i].dbCfg.cache); - } - if (g_Dbs.db[i].dbCfg.days > 0) { - fprintf(fp, " days: %d\n", g_Dbs.db[i].dbCfg.days); - } - if (g_Dbs.db[i].dbCfg.keep > 0) { - fprintf(fp, " keep: %d\n", g_Dbs.db[i].dbCfg.keep); - } - if (g_Dbs.db[i].dbCfg.replica > 0) { - fprintf(fp, " replica: %d\n", g_Dbs.db[i].dbCfg.replica); - } - if (g_Dbs.db[i].dbCfg.update > 0) { - fprintf(fp, " update: %d\n", g_Dbs.db[i].dbCfg.update); - } - if (g_Dbs.db[i].dbCfg.minRows > 0) { - fprintf(fp, " minRows: %d\n", g_Dbs.db[i].dbCfg.minRows); - } - if (g_Dbs.db[i].dbCfg.maxRows > 0) { - fprintf(fp, " maxRows: %d\n", g_Dbs.db[i].dbCfg.maxRows); - } - if (g_Dbs.db[i].dbCfg.comp > 0) { - fprintf(fp, " comp: %d\n", g_Dbs.db[i].dbCfg.comp); - } - if (g_Dbs.db[i].dbCfg.walLevel > 0) { - fprintf(fp, " walLevel: %d\n", g_Dbs.db[i].dbCfg.walLevel); - } - if (g_Dbs.db[i].dbCfg.fsync > 0) { - fprintf(fp, " fsync: %d\n", g_Dbs.db[i].dbCfg.fsync); - } - if (g_Dbs.db[i].dbCfg.quorum > 0) { - fprintf(fp, " quorum: %d\n", g_Dbs.db[i].dbCfg.quorum); - } - if (g_Dbs.db[i].dbCfg.precision[0] != 0) { - if ((0 == strncasecmp(g_Dbs.db[i].dbCfg.precision, "ms", 2)) || (0 == strncasecmp(g_Dbs.db[i].dbCfg.precision, "us", 2))) { - fprintf(fp, " precision: %s\n", g_Dbs.db[i].dbCfg.precision); - } else { - fprintf(fp, " precision error: %s\n", g_Dbs.db[i].dbCfg.precision); - } - } - - fprintf(fp, " super table count: %d\n", g_Dbs.db[i].superTblCount); - for (int j = 0; j < g_Dbs.db[i].superTblCount; j++) { - fprintf(fp, " super table[%d]:\n", j); - - fprintf(fp, " stbName: %s\n", g_Dbs.db[i].superTbls[j].sTblName); - - if (PRE_CREATE_SUBTBL == g_Dbs.db[i].superTbls[j].autoCreateTable) { - fprintf(fp, " autoCreateTable: %s\n", "no"); - } else if (AUTO_CREATE_SUBTBL == g_Dbs.db[i].superTbls[j].autoCreateTable) { - fprintf(fp, " autoCreateTable: %s\n", "yes"); - } else { - fprintf(fp, " autoCreateTable: %s\n", "error"); - } - - if (TBL_NO_EXISTS == g_Dbs.db[i].superTbls[j].childTblExists) { - fprintf(fp, " childTblExists: %s\n", "no"); - } else if (TBL_ALREADY_EXISTS == g_Dbs.db[i].superTbls[j].childTblExists) { - fprintf(fp, " childTblExists: %s\n", "yes"); - } else { - fprintf(fp, " childTblExists: %s\n", "error"); - } - - fprintf(fp, " childTblCount: %d\n", g_Dbs.db[i].superTbls[j].childTblCount); - fprintf(fp, " childTblPrefix: %s\n", g_Dbs.db[i].superTbls[j].childTblPrefix); - fprintf(fp, " dataSource: %s\n", g_Dbs.db[i].superTbls[j].dataSource); - fprintf(fp, " insertMode: %s\n", g_Dbs.db[i].superTbls[j].insertMode); - fprintf(fp, " insertRate: %d\n", g_Dbs.db[i].superTbls[j].insertRate); - fprintf(fp, " insertRows: %"PRId64"\n", g_Dbs.db[i].superTbls[j].insertRows); - - if (0 == g_Dbs.db[i].superTbls[j].multiThreadWriteOneTbl) { - fprintf(fp, " multiThreadWriteOneTbl: no\n"); - }else { - fprintf(fp, " multiThreadWriteOneTbl: yes\n"); - } - fprintf(fp, " numberOfTblInOneSql: %d\n", g_Dbs.db[i].superTbls[j].numberOfTblInOneSql); - fprintf(fp, " rowsPerTbl: %d\n", g_Dbs.db[i].superTbls[j].rowsPerTbl); - fprintf(fp, " disorderRange: %d\n", g_Dbs.db[i].superTbls[j].disorderRange); - fprintf(fp, " disorderRatio: %d\n", g_Dbs.db[i].superTbls[j].disorderRatio); - fprintf(fp, " maxSqlLen: %d\n", g_Dbs.db[i].superTbls[j].maxSqlLen); - - fprintf(fp, " timeStampStep: %d\n", g_Dbs.db[i].superTbls[j].timeStampStep); - fprintf(fp, " startTimestamp: %s\n", g_Dbs.db[i].superTbls[j].startTimestamp); - fprintf(fp, " sampleFormat: %s\n", g_Dbs.db[i].superTbls[j].sampleFormat); - fprintf(fp, " sampleFile: %s\n", g_Dbs.db[i].superTbls[j].sampleFile); - fprintf(fp, " tagsFile: %s\n", g_Dbs.db[i].superTbls[j].tagsFile); - - fprintf(fp, " columnCount: %d\n ", g_Dbs.db[i].superTbls[j].columnCount); - for (int k = 0; k < g_Dbs.db[i].superTbls[j].columnCount; k++) { - //printf("dataType:%s, dataLen:%d\t", g_Dbs.db[i].superTbls[j].columns[k].dataType, g_Dbs.db[i].superTbls[j].columns[k].dataLen); - if ((0 == strncasecmp(g_Dbs.db[i].superTbls[j].columns[k].dataType, "binary", 6)) || (0 == strncasecmp(g_Dbs.db[i].superTbls[j].columns[k].dataType, "nchar", 5))) { - fprintf(fp, "column[%d]:%s(%d) ", k, g_Dbs.db[i].superTbls[j].columns[k].dataType, g_Dbs.db[i].superTbls[j].columns[k].dataLen); - } else { - fprintf(fp, "column[%d]:%s ", k, g_Dbs.db[i].superTbls[j].columns[k].dataType); - } - } - fprintf(fp, "\n"); - - fprintf(fp, " tagCount: %d\n ", g_Dbs.db[i].superTbls[j].tagCount); - for (int k = 0; k < g_Dbs.db[i].superTbls[j].tagCount; k++) { - //printf("dataType:%s, dataLen:%d\t", g_Dbs.db[i].superTbls[j].tags[k].dataType, g_Dbs.db[i].superTbls[j].tags[k].dataLen); - if ((0 == strncasecmp(g_Dbs.db[i].superTbls[j].tags[k].dataType, "binary", 6)) || (0 == strncasecmp(g_Dbs.db[i].superTbls[j].tags[k].dataType, "nchar", 5))) { - fprintf(fp, "tag[%d]:%s(%d) ", k, g_Dbs.db[i].superTbls[j].tags[k].dataType, g_Dbs.db[i].superTbls[j].tags[k].dataLen); - } else { - fprintf(fp, "tag[%d]:%s ", k, g_Dbs.db[i].superTbls[j].tags[k].dataType); - } - } - fprintf(fp, "\n"); - } - fprintf(fp, "\n"); - } - fprintf(fp, "================ insert.json parse result END ================\n\n"); -} - -static void printfQueryMeta() { - printf("\033[1m\033[40;32m================ query.json parse result ================\033[0m\n"); - printf("host: \033[33m%s:%u\033[0m\n", g_queryInfo.host, g_queryInfo.port); - printf("user: \033[33m%s\033[0m\n", g_queryInfo.user); - printf("password: \033[33m%s\033[0m\n", g_queryInfo.password); - printf("database name: \033[33m%s\033[0m\n", g_queryInfo.dbName); - - printf("\n"); - printf("specified table query info: \n"); - printf("query interval: \033[33m%d\033[0m\n", g_queryInfo.superQueryInfo.rate); - printf("concurrent: \033[33m%d\033[0m\n", g_queryInfo.superQueryInfo.concurrent); - printf("sqlCount: \033[33m%d\033[0m\n", g_queryInfo.superQueryInfo.sqlCount); - - if (SUBSCRIBE_MODE == g_jsonType) { - printf("mod: \033[33m%d\033[0m\n", g_queryInfo.superQueryInfo.subscribeMode); - printf("interval: \033[33m%d\033[0m\n", g_queryInfo.superQueryInfo.subscribeInterval); - printf("restart: \033[33m%d\033[0m\n", g_queryInfo.superQueryInfo.subscribeRestart); - printf("keepProgress: \033[33m%d\033[0m\n", g_queryInfo.superQueryInfo.subscribeKeepProgress); - } - - - for (int i = 0; i < g_queryInfo.superQueryInfo.sqlCount; i++) { - printf(" sql[%d]: \033[33m%s\033[0m\n", i, g_queryInfo.superQueryInfo.sql[i]); - } - printf("\n"); - printf("super table query info: \n"); - printf("query interval: \033[33m%d\033[0m\n", g_queryInfo.subQueryInfo.rate); - printf("threadCnt: \033[33m%d\033[0m\n", g_queryInfo.subQueryInfo.threadCnt); - printf("childTblCount: \033[33m%d\033[0m\n", g_queryInfo.subQueryInfo.childTblCount); - printf("stable name: \033[33m%s\033[0m\n", g_queryInfo.subQueryInfo.sTblName); - - if (SUBSCRIBE_MODE == g_jsonType) { - printf("mod: \033[33m%d\033[0m\n", g_queryInfo.subQueryInfo.subscribeMode); - printf("interval: \033[33m%d\033[0m\n", g_queryInfo.subQueryInfo.subscribeInterval); - printf("restart: \033[33m%d\033[0m\n", g_queryInfo.subQueryInfo.subscribeRestart); - printf("keepProgress: \033[33m%d\033[0m\n", g_queryInfo.subQueryInfo.subscribeKeepProgress); - } - - printf("sqlCount: \033[33m%d\033[0m\n", g_queryInfo.subQueryInfo.sqlCount); - for (int i = 0; i < g_queryInfo.subQueryInfo.sqlCount; i++) { - printf(" sql[%d]: \033[33m%s\033[0m\n", i, g_queryInfo.subQueryInfo.sql[i]); - } - printf("\n"); - printf("\033[1m\033[40;32m================ query.json parse result ================\033[0m\n"); -} - - -static char* xFormatTimestamp(char* buf, int64_t val, int precision) { - time_t tt; - if (precision == TSDB_TIME_PRECISION_MICRO) { - tt = (time_t)(val / 1000000); - } else { - tt = (time_t)(val / 1000); - } - -/* comment out as it make testcases like select_with_tags.sim fail. - but in windows, this may cause the call to localtime crash if tt < 0, - need to find a better solution. - if (tt < 0) { - tt = 0; - } - */ - -#ifdef WINDOWS - if (tt < 0) tt = 0; -#endif - - struct tm* ptm = localtime(&tt); - size_t pos = strftime(buf, 32, "%Y-%m-%d %H:%M:%S", ptm); - - if (precision == TSDB_TIME_PRECISION_MICRO) { - sprintf(buf + pos, ".%06d", (int)(val % 1000000)); - } else { - sprintf(buf + pos, ".%03d", (int)(val % 1000)); - } - - return buf; -} - -static void xDumpFieldToFile(FILE* fp, const char* val, TAOS_FIELD* field, int32_t length, int precision) { - if (val == NULL) { - fprintf(fp, "%s", TSDB_DATA_NULL_STR); - return; - } - - char buf[TSDB_MAX_BYTES_PER_ROW]; - switch (field->type) { - case TSDB_DATA_TYPE_BOOL: - fprintf(fp, "%d", ((((int32_t)(*((char *)val))) == 1) ? 1 : 0)); - break; - case TSDB_DATA_TYPE_TINYINT: - fprintf(fp, "%d", *((int8_t *)val)); - break; - case TSDB_DATA_TYPE_SMALLINT: - fprintf(fp, "%d", *((int16_t *)val)); - break; - case TSDB_DATA_TYPE_INT: - fprintf(fp, "%d", *((int32_t *)val)); - break; - case TSDB_DATA_TYPE_BIGINT: - fprintf(fp, "%" PRId64, *((int64_t *)val)); - break; - case TSDB_DATA_TYPE_FLOAT: - fprintf(fp, "%.5f", GET_FLOAT_VAL(val)); - break; - case TSDB_DATA_TYPE_DOUBLE: - fprintf(fp, "%.9f", GET_DOUBLE_VAL(val)); - break; - case TSDB_DATA_TYPE_BINARY: - case TSDB_DATA_TYPE_NCHAR: - memcpy(buf, val, length); - buf[length] = 0; - fprintf(fp, "\'%s\'", buf); - break; - case TSDB_DATA_TYPE_TIMESTAMP: - xFormatTimestamp(buf, *(int64_t*)val, precision); - fprintf(fp, "'%s'", buf); - break; - default: - break; - } -} - -static int xDumpResultToFile(const char* fname, TAOS_RES* tres) { - TAOS_ROW row = taos_fetch_row(tres); - if (row == NULL) { - return 0; - } - - FILE* fp = fopen(fname, "at"); - if (fp == NULL) { - fprintf(stderr, "ERROR: failed to open file: %s\n", fname); - return -1; - } - - int num_fields = taos_num_fields(tres); - TAOS_FIELD *fields = taos_fetch_fields(tres); - int precision = taos_result_precision(tres); - - for (int col = 0; col < num_fields; col++) { - if (col > 0) { - fprintf(fp, ","); - } - fprintf(fp, "%s", fields[col].name); - } - fputc('\n', fp); - - int numOfRows = 0; - do { - int32_t* length = taos_fetch_lengths(tres); - for (int i = 0; i < num_fields; i++) { - if (i > 0) { - fputc(',', fp); - } - xDumpFieldToFile(fp, (const char*)row[i], fields +i, length[i], precision); - } - fputc('\n', fp); - - numOfRows++; - row = taos_fetch_row(tres); - } while( row != NULL); - - fclose(fp); - - return numOfRows; -} - -static int getDbFromServer(TAOS * taos, SDbInfo** dbInfos) { - TAOS_RES * res; - TAOS_ROW row = NULL; - int count = 0; - - res = taos_query(taos, "show databases;"); - int32_t code = taos_errno(res); - - if (code != 0) { - fprintf(stderr, "failed to run , reason: %s\n", taos_errstr(res)); - return -1; - } - - TAOS_FIELD *fields = taos_fetch_fields(res); - - while ((row = taos_fetch_row(res)) != NULL) { - // sys database name : 'log' - if (strncasecmp(row[TSDB_SHOW_DB_NAME_INDEX], "log", fields[TSDB_SHOW_DB_NAME_INDEX].bytes) == 0) continue; - - dbInfos[count] = (SDbInfo *)calloc(1, sizeof(SDbInfo)); - if (dbInfos[count] == NULL) { - fprintf(stderr, "failed to allocate memory for some dbInfo[%d]\n", count); - return -1; - } - - strncpy(dbInfos[count]->name, (char *)row[TSDB_SHOW_DB_NAME_INDEX], fields[TSDB_SHOW_DB_NAME_INDEX].bytes); - xFormatTimestamp(dbInfos[count]->create_time, *(int64_t*)row[TSDB_SHOW_DB_CREATED_TIME_INDEX], TSDB_TIME_PRECISION_MILLI); - dbInfos[count]->ntables = *((int32_t *)row[TSDB_SHOW_DB_NTABLES_INDEX]); - dbInfos[count]->vgroups = *((int32_t *)row[TSDB_SHOW_DB_VGROUPS_INDEX]); - dbInfos[count]->replica = *((int16_t *)row[TSDB_SHOW_DB_REPLICA_INDEX]); - dbInfos[count]->quorum = *((int16_t *)row[TSDB_SHOW_DB_QUORUM_INDEX]); - dbInfos[count]->days = *((int16_t *)row[TSDB_SHOW_DB_DAYS_INDEX]); - - strncpy(dbInfos[count]->keeplist, (char *)row[TSDB_SHOW_DB_KEEP_INDEX], fields[TSDB_SHOW_DB_KEEP_INDEX].bytes); - dbInfos[count]->cache = *((int32_t *)row[TSDB_SHOW_DB_CACHE_INDEX]); - dbInfos[count]->blocks = *((int32_t *)row[TSDB_SHOW_DB_BLOCKS_INDEX]); - dbInfos[count]->minrows = *((int32_t *)row[TSDB_SHOW_DB_MINROWS_INDEX]); - dbInfos[count]->maxrows = *((int32_t *)row[TSDB_SHOW_DB_MAXROWS_INDEX]); - dbInfos[count]->wallevel = *((int8_t *)row[TSDB_SHOW_DB_WALLEVEL_INDEX]); - dbInfos[count]->fsync = *((int32_t *)row[TSDB_SHOW_DB_FSYNC_INDEX]); - dbInfos[count]->comp = (int8_t)(*((int8_t *)row[TSDB_SHOW_DB_COMP_INDEX])); - dbInfos[count]->cachelast = (int8_t)(*((int8_t *)row[TSDB_SHOW_DB_CACHELAST_INDEX])); - - strncpy(dbInfos[count]->precision, (char *)row[TSDB_SHOW_DB_PRECISION_INDEX], fields[TSDB_SHOW_DB_PRECISION_INDEX].bytes); - dbInfos[count]->update = *((int8_t *)row[TSDB_SHOW_DB_UPDATE_INDEX]); - strncpy(dbInfos[count]->status, (char *)row[TSDB_SHOW_DB_STATUS_INDEX], fields[TSDB_SHOW_DB_STATUS_INDEX].bytes); - - count++; - if (count > MAX_DATABASE_COUNT) { - fprintf(stderr, "The database count overflow than %d\n", MAX_DATABASE_COUNT); - break; - } - } - - return count; -} - -static void printfDbInfoForQueryToFile(char* filename, SDbInfo* dbInfos, int index) { - FILE *fp = NULL; - if (filename[0] != 0) { - fp = fopen(filename, "at"); - if (fp == NULL) { - fprintf(stderr, "failed to open file: %s\n", filename); - return; - } - } - - fprintf(fp, "================ database[%d] ================\n", index); - fprintf(fp, "name: %s\n", dbInfos->name); - fprintf(fp, "created_time: %s\n", dbInfos->create_time); - fprintf(fp, "ntables: %d\n", dbInfos->ntables); - fprintf(fp, "vgroups: %d\n", dbInfos->vgroups); - fprintf(fp, "replica: %d\n", dbInfos->replica); - fprintf(fp, "quorum: %d\n", dbInfos->quorum); - fprintf(fp, "days: %d\n", dbInfos->days); - fprintf(fp, "keep0,keep1,keep(D): %s\n", dbInfos->keeplist); - fprintf(fp, "cache(MB): %d\n", dbInfos->cache); - fprintf(fp, "blocks: %d\n", dbInfos->blocks); - fprintf(fp, "minrows: %d\n", dbInfos->minrows); - fprintf(fp, "maxrows: %d\n", dbInfos->maxrows); - fprintf(fp, "wallevel: %d\n", dbInfos->wallevel); - fprintf(fp, "fsync: %d\n", dbInfos->fsync); - fprintf(fp, "comp: %d\n", dbInfos->comp); - fprintf(fp, "cachelast: %d\n", dbInfos->cachelast); - fprintf(fp, "precision: %s\n", dbInfos->precision); - fprintf(fp, "update: %d\n", dbInfos->update); - fprintf(fp, "status: %s\n", dbInfos->status); - fprintf(fp, "\n"); - - fclose(fp); -} - -static void printfQuerySystemInfo(TAOS * taos) { - char filename[MAX_QUERY_SQL_LENGTH+1] = {0}; - char buffer[MAX_QUERY_SQL_LENGTH+1] = {0}; - TAOS_RES* res; - - time_t t; - struct tm* lt; - time(&t); - lt = localtime(&t); - snprintf(filename, MAX_QUERY_SQL_LENGTH, "querySystemInfo-%d-%d-%d %d:%d:%d", lt->tm_year+1900, lt->tm_mon, lt->tm_mday, lt->tm_hour, lt->tm_min, lt->tm_sec); - - // show variables - res = taos_query(taos, "show variables;"); - //getResult(res, filename); - xDumpResultToFile(filename, res); - - // show dnodes - res = taos_query(taos, "show dnodes;"); - xDumpResultToFile(filename, res); - //getResult(res, filename); - - // show databases - res = taos_query(taos, "show databases;"); - SDbInfo** dbInfos = (SDbInfo **)calloc(MAX_DATABASE_COUNT, sizeof(SDbInfo *)); - if (dbInfos == NULL) { - fprintf(stderr, "failed to allocate memory\n"); - return; - } - int dbCount = getDbFromServer(taos, dbInfos); - if (dbCount <= 0) return; - - for (int i = 0; i < dbCount; i++) { - // printf database info - printfDbInfoForQueryToFile(filename, dbInfos[i], i); - - // show db.vgroups - snprintf(buffer, MAX_QUERY_SQL_LENGTH, "show %s.vgroups;", dbInfos[i]->name); - res = taos_query(taos, buffer); - xDumpResultToFile(filename, res); - - // show db.stables - snprintf(buffer, MAX_QUERY_SQL_LENGTH, "show %s.stables;", dbInfos[i]->name); - res = taos_query(taos, buffer); - xDumpResultToFile(filename, res); - - free(dbInfos[i]); - } - - free(dbInfos); - -} - - -#ifdef TD_LOWA_CURL -static size_t responseCallback(void *contents, size_t size, size_t nmemb, void *userp) -{ - size_t realsize = size * nmemb; - curlMemInfo* mem = (curlMemInfo*)userp; - - char *ptr = realloc(mem->buf, mem->sizeleft + realsize + 1); - if(ptr == NULL) { - /* out of memory! */ - printf("not enough memory (realloc returned NULL)\n"); - return 0; - } - - mem->buf = ptr; - memcpy(&(mem->buf[mem->sizeleft]), contents, realsize); - mem->sizeleft += realsize; - mem->buf[mem->sizeleft] = 0; - - //printf("result:%s\n\n", mem->buf); - - return realsize; -} - -void curlProceLogin(void) -{ - CURL *curl_handle; - CURLcode res; - - curlMemInfo chunk; - - chunk.buf = malloc(1); /* will be grown as needed by the realloc above */ - chunk.sizeleft = 0; /* no data at this point */ - - //curl_global_init(CURL_GLOBAL_ALL); - - /* init the curl session */ - curl_handle = curl_easy_init(); - - curl_easy_setopt(curl_handle,CURLOPT_POSTFIELDS,""); - curl_easy_setopt(curl_handle, CURLOPT_POST, 1); - - char dstUrl[128] = {0}; - snprintf(dstUrl, 128, "http://%s:6041/rest/login/root/taosdata", g_Dbs.host); - - /* specify URL to get */ - curl_easy_setopt(curl_handle, CURLOPT_URL, dstUrl); - - /* send all data to this function */ - curl_easy_setopt(curl_handle, CURLOPT_WRITEFUNCTION, responseCallback); - - /* we pass our 'chunk' struct to the callback function */ - curl_easy_setopt(curl_handle, CURLOPT_WRITEDATA, (void *)&chunk); - - /* do it! */ - res = curl_easy_perform(curl_handle); - - /* check for errors */ - if(res != CURLE_OK) { - fprintf(stderr, "curl_easy_perform() failed: %s\n", curl_easy_strerror(res)); - } - else { - //printf("response len:%lu, content: %s \n", (unsigned long)chunk.sizeleft, chunk.buf); - ; - } - - /* cleanup curl stuff */ - curl_easy_cleanup(curl_handle); - - free(chunk.buf); - - /* we're done with libcurl, so clean it up */ - //curl_global_cleanup(); - - return; -} - -int curlProceSql(char* host, uint16_t port, char* sqlstr, CURL *curl_handle) -{ - //curlProceLogin(); - - //CURL *curl_handle; - CURLcode res; - - curlMemInfo chunk; - - chunk.buf = malloc(1); /* will be grown as needed by the realloc above */ - chunk.sizeleft = 0; /* no data at this point */ - - - char dstUrl[128] = {0}; - snprintf(dstUrl, 128, "http://%s:%u/rest/sql", host, port+TSDB_PORT_HTTP); - - //curl_global_init(CURL_GLOBAL_ALL); - - /* init the curl session */ - //curl_handle = curl_easy_init(); - - //curl_easy_setopt(curl_handle,CURLOPT_POSTFIELDS,""); - curl_easy_setopt(curl_handle, CURLOPT_POST, 1L); - - /* specify URL to get */ - curl_easy_setopt(curl_handle, CURLOPT_URL, dstUrl); - - /* enable TCP keep-alive for this transfer */ - curl_easy_setopt(curl_handle, CURLOPT_TCP_KEEPALIVE, 1L); - /* keep-alive idle time to 120 seconds */ - curl_easy_setopt(curl_handle, CURLOPT_TCP_KEEPIDLE, 120L); - /* interval time between keep-alive probes: 60 seconds */ - curl_easy_setopt(curl_handle, CURLOPT_TCP_KEEPINTVL, 60L); - - /* send all data to this function */ - curl_easy_setopt(curl_handle, CURLOPT_WRITEFUNCTION, responseCallback); - - /* we pass our 'chunk' struct to the callback function */ - curl_easy_setopt(curl_handle, CURLOPT_WRITEDATA, (void *)&chunk); - - struct curl_slist *list = NULL; - list = curl_slist_append(list, "Authorization: Basic cm9vdDp0YW9zZGF0YQ=="); - curl_easy_setopt(curl_handle, CURLOPT_HTTPHEADER, list); - curl_easy_setopt(curl_handle, CURLOPT_HTTPHEADER, list); - - /* Set the expected upload size. */ - curl_easy_setopt(curl_handle, CURLOPT_POSTFIELDSIZE_LARGE, (curl_off_t)strlen(sqlstr)); - curl_easy_setopt(curl_handle, CURLOPT_POSTFIELDS, sqlstr); - - /* get it! */ - res = curl_easy_perform(curl_handle); - - /* check for errors */ - if(res != CURLE_OK) { - fprintf(stderr, "curl_easy_perform() failed: %s\n", curl_easy_strerror(res)); - return -1; - } - else { - /* curl_easy_perform() block end and return result */ - //printf("[%32.32s] sql response len:%lu, content: %s \n\n", sqlstr, (unsigned long)chunk.sizeleft, chunk.buf); - ; - } - - curl_slist_free_all(list); /* free the list again */ - - /* cleanup curl stuff */ - //curl_easy_cleanup(curl_handle); - - free(chunk.buf); - - /* we're done with libcurl, so clean it up */ - //curl_global_cleanup(); - - return 0; -} -#endif - -char* getTagValueFromTagSample( SSuperTable* stbInfo, int tagUsePos) { - char* dataBuf = (char*)calloc(TSDB_MAX_SQL_LEN+1, 1); - if (NULL == dataBuf) { - printf("calloc failed! size:%d\n", TSDB_MAX_SQL_LEN+1); - return NULL; - } - - int dataLen = 0; - dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen, "(%s)", stbInfo->tagDataBuf + stbInfo->lenOfTagOfOneRow * tagUsePos); - - return dataBuf; -} - -char* generateTagVaulesForStb(SSuperTable* stbInfo) { - char* dataBuf = (char*)calloc(TSDB_MAX_SQL_LEN+1, 1); - if (NULL == dataBuf) { - printf("calloc failed! size:%d\n", TSDB_MAX_SQL_LEN+1); - return NULL; - } - - int dataLen = 0; - dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen, "("); - for (int i = 0; i < stbInfo->tagCount; i++) { - if ((0 == strncasecmp(stbInfo->tags[i].dataType, "binary", 6)) || (0 == strncasecmp(stbInfo->tags[i].dataType, "nchar", 5))) { - if (stbInfo->tags[i].dataLen > TSDB_MAX_BINARY_LEN) { - printf("binary or nchar length overflow, max size:%u\n", (uint32_t)TSDB_MAX_BINARY_LEN); - tmfree(dataBuf); - return NULL; - } - - char* buf = (char*)calloc(stbInfo->tags[i].dataLen+1, 1); - if (NULL == buf) { - printf("calloc failed! size:%d\n", stbInfo->tags[i].dataLen); - tmfree(dataBuf); - return NULL; - } - rand_string(buf, stbInfo->tags[i].dataLen); - dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen, "\'%s\', ", buf); - tmfree(buf); - } else if (0 == strncasecmp(stbInfo->tags[i].dataType, "int", 3)) { - dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen, "%d, ", rand_int()); - } else if (0 == strncasecmp(stbInfo->tags[i].dataType, "bigint", 6)) { - dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen, "%"PRId64", ", rand_bigint()); - } else if (0 == strncasecmp(stbInfo->tags[i].dataType, "float", 5)) { - dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen, "%f, ", rand_float()); - } else if (0 == strncasecmp(stbInfo->tags[i].dataType, "double", 6)) { - dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen, "%f, ", rand_double()); - } else if (0 == strncasecmp(stbInfo->tags[i].dataType, "smallint", 8)) { - dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen, "%d, ", rand_smallint()); - } else if (0 == strncasecmp(stbInfo->tags[i].dataType, "tinyint", 7)) { - dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen, "%d, ", rand_tinyint()); - } else if (0 == strncasecmp(stbInfo->tags[i].dataType, "bool", 4)) { - dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen, "%d, ", rand_bool()); - } else if (0 == strncasecmp(stbInfo->tags[i].dataType, "timestamp", 4)) { - dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen, "%"PRId64", ", rand_bigint()); - } else { - printf("No support data type: %s\n", stbInfo->tags[i].dataType); - tmfree(dataBuf); - return NULL; - } - } - dataLen -= 2; - dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen, ")"); - return dataBuf; -} - -static int calcRowLen(SSuperTable* superTbls) { - int colIndex; - int lenOfOneRow = 0; - - for (colIndex = 0; colIndex < superTbls->columnCount; colIndex++) { - char* dataType = superTbls->columns[colIndex].dataType; - - if (strcasecmp(dataType, "BINARY") == 0) { - lenOfOneRow += superTbls->columns[colIndex].dataLen + 3; - } else if (strcasecmp(dataType, "NCHAR") == 0) { - lenOfOneRow += superTbls->columns[colIndex].dataLen + 3; - } else if (strcasecmp(dataType, "INT") == 0) { - lenOfOneRow += 11; - } else if (strcasecmp(dataType, "BIGINT") == 0) { - lenOfOneRow += 21; - } else if (strcasecmp(dataType, "SMALLINT") == 0) { - lenOfOneRow += 6; - } else if (strcasecmp(dataType, "TINYINT") == 0) { - lenOfOneRow += 4; - } else if (strcasecmp(dataType, "BOOL") == 0) { - lenOfOneRow += 6; - } else if (strcasecmp(dataType, "FLOAT") == 0) { - lenOfOneRow += 22; - } else if (strcasecmp(dataType, "DOUBLE") == 0) { - lenOfOneRow += 42; - } else if (strcasecmp(dataType, "TIMESTAMP") == 0) { - lenOfOneRow += 21; - } else { - printf("get error data type : %s\n", dataType); - exit(-1); - } - } - - superTbls->lenOfOneRow = lenOfOneRow + 20; // timestamp - - int tagIndex; - int lenOfTagOfOneRow = 0; - for (tagIndex = 0; tagIndex < superTbls->tagCount; tagIndex++) { - char* dataType = superTbls->tags[tagIndex].dataType; - - if (strcasecmp(dataType, "BINARY") == 0) { - lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + 3; - } else if (strcasecmp(dataType, "NCHAR") == 0) { - lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + 3; - } else if (strcasecmp(dataType, "INT") == 0) { - lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + 11; - } else if (strcasecmp(dataType, "BIGINT") == 0) { - lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + 21; - } else if (strcasecmp(dataType, "SMALLINT") == 0) { - lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + 6; - } else if (strcasecmp(dataType, "TINYINT") == 0) { - lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + 4; - } else if (strcasecmp(dataType, "BOOL") == 0) { - lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + 6; - } else if (strcasecmp(dataType, "FLOAT") == 0) { - lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + 22; - } else if (strcasecmp(dataType, "DOUBLE") == 0) { - lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + 42; - } else { - printf("get error tag type : %s\n", dataType); - exit(-1); - } - } - - superTbls->lenOfTagOfOneRow = lenOfTagOfOneRow; - - return 0; -} - - -static int getAllChildNameOfSuperTable(TAOS * taos, char* dbName, char* sTblName, char** childTblNameOfSuperTbl, int* childTblCountOfSuperTbl) { - char command[BUFFER_SIZE] = "\0"; - TAOS_RES * res; - TAOS_ROW row = NULL; - - char* childTblName = *childTblNameOfSuperTbl; - - //get all child table name use cmd: select tbname from superTblName; - snprintf(command, BUFFER_SIZE, "select tbname from %s.%s", dbName, sTblName); - res = taos_query(taos, command); - int32_t code = taos_errno(res); - if (code != 0) { - printf("failed to run command %s\n", command); - taos_free_result(res); - taos_close(taos); - exit(-1); - } - - int childTblCount = 10000; - int count = 0; - childTblName = (char*)calloc(1, childTblCount * TSDB_TABLE_NAME_LEN); - char* pTblName = childTblName; - while ((row = taos_fetch_row(res)) != NULL) { - int32_t* len = taos_fetch_lengths(res); - strncpy(pTblName, (char *)row[0], len[0]); - //printf("==== sub table name: %s\n", pTblName); - count++; - if (count >= childTblCount - 1) { - char *tmp = realloc(childTblName, (size_t)childTblCount*1.5*TSDB_TABLE_NAME_LEN+1); - if (tmp != NULL) { - childTblName = tmp; - childTblCount = (int)(childTblCount*1.5); - memset(childTblName + count*TSDB_TABLE_NAME_LEN, 0, (size_t)((childTblCount-count)*TSDB_TABLE_NAME_LEN)); - } else { - // exit, if allocate more memory failed - printf("realloc fail for save child table name of %s.%s\n", dbName, sTblName); - tmfree(childTblName); - taos_free_result(res); - taos_close(taos); - exit(-1); - } - } - pTblName = childTblName + count * TSDB_TABLE_NAME_LEN; - } - - *childTblCountOfSuperTbl = count; - *childTblNameOfSuperTbl = childTblName; - - taos_free_result(res); - return 0; -} - -static int getSuperTableFromServer(TAOS * taos, char* dbName, SSuperTable* superTbls) { - char command[BUFFER_SIZE] = "\0"; - TAOS_RES * res; - TAOS_ROW row = NULL; - int count = 0; - - //get schema use cmd: describe superTblName; - snprintf(command, BUFFER_SIZE, "describe %s.%s", dbName, superTbls->sTblName); - res = taos_query(taos, command); - int32_t code = taos_errno(res); - if (code != 0) { - printf("failed to run command %s\n", command); - taos_free_result(res); - return -1; - } - - int tagIndex = 0; - int columnIndex = 0; - TAOS_FIELD *fields = taos_fetch_fields(res); - while ((row = taos_fetch_row(res)) != NULL) { - if (0 == count) { - count++; - continue; - } - - if (strcmp((char *)row[TSDB_DESCRIBE_METRIC_NOTE_INDEX], "TAG") == 0) { - strncpy(superTbls->tags[tagIndex].field, (char *)row[TSDB_DESCRIBE_METRIC_FIELD_INDEX], fields[TSDB_DESCRIBE_METRIC_FIELD_INDEX].bytes); - strncpy(superTbls->tags[tagIndex].dataType, (char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX], fields[TSDB_DESCRIBE_METRIC_TYPE_INDEX].bytes); - superTbls->tags[tagIndex].dataLen = *((int *)row[TSDB_DESCRIBE_METRIC_LENGTH_INDEX]); - strncpy(superTbls->tags[tagIndex].note, (char *)row[TSDB_DESCRIBE_METRIC_NOTE_INDEX], fields[TSDB_DESCRIBE_METRIC_NOTE_INDEX].bytes); - tagIndex++; - } else { - strncpy(superTbls->columns[columnIndex].field, (char *)row[TSDB_DESCRIBE_METRIC_FIELD_INDEX], fields[TSDB_DESCRIBE_METRIC_FIELD_INDEX].bytes); - strncpy(superTbls->columns[columnIndex].dataType, (char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX], fields[TSDB_DESCRIBE_METRIC_TYPE_INDEX].bytes); - superTbls->columns[columnIndex].dataLen = *((int *)row[TSDB_DESCRIBE_METRIC_LENGTH_INDEX]); - strncpy(superTbls->columns[columnIndex].note, (char *)row[TSDB_DESCRIBE_METRIC_NOTE_INDEX], fields[TSDB_DESCRIBE_METRIC_NOTE_INDEX].bytes); - columnIndex++; - } - count++; - } - - superTbls->columnCount = columnIndex; - superTbls->tagCount = tagIndex; - taos_free_result(res); - - calcRowLen(superTbls); - - if (TBL_ALREADY_EXISTS == superTbls->childTblExists) { - //get all child table name use cmd: select tbname from superTblName; - getAllChildNameOfSuperTable(taos, dbName, superTbls->sTblName, &superTbls->childTblName, &superTbls->childTblCount); - } - return 0; -} - -static int createSuperTable(TAOS * taos, char* dbName, SSuperTable* superTbls, bool use_metric) { - char command[BUFFER_SIZE] = "\0"; - - char cols[STRING_LEN] = "\0"; - int colIndex; - int len = 0; - - int lenOfOneRow = 0; - for (colIndex = 0; colIndex < superTbls->columnCount; colIndex++) { - char* dataType = superTbls->columns[colIndex].dataType; - - if (strcasecmp(dataType, "BINARY") == 0) { - len += snprintf(cols + len, STRING_LEN - len, ", col%d %s(%d)", colIndex, "BINARY", superTbls->columns[colIndex].dataLen); - lenOfOneRow += superTbls->columns[colIndex].dataLen + 3; - } else if (strcasecmp(dataType, "NCHAR") == 0) { - len += snprintf(cols + len, STRING_LEN - len, ", col%d %s(%d)", colIndex, "NCHAR", superTbls->columns[colIndex].dataLen); - lenOfOneRow += superTbls->columns[colIndex].dataLen + 3; - } else if (strcasecmp(dataType, "INT") == 0) { - len += snprintf(cols + len, STRING_LEN - len, ", col%d %s", colIndex, "INT"); - lenOfOneRow += 11; - } else if (strcasecmp(dataType, "BIGINT") == 0) { - len += snprintf(cols + len, STRING_LEN - len, ", col%d %s", colIndex, "BIGINT"); - lenOfOneRow += 21; - } else if (strcasecmp(dataType, "SMALLINT") == 0) { - len += snprintf(cols + len, STRING_LEN - len, ", col%d %s", colIndex, "SMALLINT"); - lenOfOneRow += 6; - } else if (strcasecmp(dataType, "TINYINT") == 0) { - len += snprintf(cols + len, STRING_LEN - len, ", col%d %s", colIndex, "TINYINT"); - lenOfOneRow += 4; - } else if (strcasecmp(dataType, "BOOL") == 0) { - len += snprintf(cols + len, STRING_LEN - len, ", col%d %s", colIndex, "BOOL"); - lenOfOneRow += 6; - } else if (strcasecmp(dataType, "FLOAT") == 0) { - len += snprintf(cols + len, STRING_LEN - len, ", col%d %s", colIndex, "FLOAT"); - lenOfOneRow += 22; - } else if (strcasecmp(dataType, "DOUBLE") == 0) { - len += snprintf(cols + len, STRING_LEN - len, ", col%d %s", colIndex, "DOUBLE"); - lenOfOneRow += 42; - } else if (strcasecmp(dataType, "TIMESTAMP") == 0) { - len += snprintf(cols + len, STRING_LEN - len, ", col%d %s", colIndex, "TIMESTAMP"); - lenOfOneRow += 21; - } else { - taos_close(taos); - printf("config error data type : %s\n", dataType); - exit(-1); - } - } - - superTbls->lenOfOneRow = lenOfOneRow + 20; // timestamp - //printf("%s.%s column count:%d, column length:%d\n\n", g_Dbs.db[i].dbName, g_Dbs.db[i].superTbls[j].sTblName, g_Dbs.db[i].superTbls[j].columnCount, lenOfOneRow); - - // save for creating child table - superTbls->colsOfCreatChildTable = (char*)calloc(len+20, 1); - if (NULL == superTbls->colsOfCreatChildTable) { - printf("Failed when calloc, size:%d", len+1); - taos_close(taos); - exit(-1); - } - snprintf(superTbls->colsOfCreatChildTable, len+20, "(ts timestamp%s)", cols); - - if (use_metric) { - char tags[STRING_LEN] = "\0"; - int tagIndex; - len = 0; - - int lenOfTagOfOneRow = 0; - len += snprintf(tags + len, STRING_LEN - len, "("); - for (tagIndex = 0; tagIndex < superTbls->tagCount; tagIndex++) { - char* dataType = superTbls->tags[tagIndex].dataType; - - if (strcasecmp(dataType, "BINARY") == 0) { - len += snprintf(tags + len, STRING_LEN - len, "t%d %s(%d), ", tagIndex, "BINARY", superTbls->tags[tagIndex].dataLen); - lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + 3; - } else if (strcasecmp(dataType, "NCHAR") == 0) { - len += snprintf(tags + len, STRING_LEN - len, "t%d %s(%d), ", tagIndex, "NCHAR", superTbls->tags[tagIndex].dataLen); - lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + 3; - } else if (strcasecmp(dataType, "INT") == 0) { - len += snprintf(tags + len, STRING_LEN - len, "t%d %s, ", tagIndex, "INT"); - lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + 11; - } else if (strcasecmp(dataType, "BIGINT") == 0) { - len += snprintf(tags + len, STRING_LEN - len, "t%d %s, ", tagIndex, "BIGINT"); - lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + 21; - } else if (strcasecmp(dataType, "SMALLINT") == 0) { - len += snprintf(tags + len, STRING_LEN - len, "t%d %s, ", tagIndex, "SMALLINT"); - lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + 6; - } else if (strcasecmp(dataType, "TINYINT") == 0) { - len += snprintf(tags + len, STRING_LEN - len, "t%d %s, ", tagIndex, "TINYINT"); - lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + 4; - } else if (strcasecmp(dataType, "BOOL") == 0) { - len += snprintf(tags + len, STRING_LEN - len, "t%d %s, ", tagIndex, "BOOL"); - lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + 6; - } else if (strcasecmp(dataType, "FLOAT") == 0) { - len += snprintf(tags + len, STRING_LEN - len, "t%d %s, ", tagIndex, "FLOAT"); - lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + 22; - } else if (strcasecmp(dataType, "DOUBLE") == 0) { - len += snprintf(tags + len, STRING_LEN - len, "t%d %s, ", tagIndex, "DOUBLE"); - lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + 42; - } else { - taos_close(taos); - printf("config error tag type : %s\n", dataType); - exit(-1); - } - } - len -= 2; - len += snprintf(tags + len, STRING_LEN - len, ")"); - - superTbls->lenOfTagOfOneRow = lenOfTagOfOneRow; - - snprintf(command, BUFFER_SIZE, "create table if not exists %s.%s (ts timestamp%s) tags %s", dbName, superTbls->sTblName, cols, tags); - if (0 != queryDbExec(taos, command, NO_INSERT_TYPE)) { - return -1; - } - printf("\ncreate supertable %s success!\n\n", superTbls->sTblName); - } - return 0; -} - - -static int createDatabases() { - TAOS * taos = NULL; - int ret = 0; - taos_init(); - taos = taos_connect(g_Dbs.host, g_Dbs.user, g_Dbs.password, NULL, g_Dbs.port); - if (taos == NULL) { - fprintf(stderr, "Failed to connect to TDengine, reason:%s\n", taos_errstr(NULL)); - exit(-1); - } - char command[BUFFER_SIZE] = "\0"; - - - for (int i = 0; i < g_Dbs.dbCount; i++) { - if (g_Dbs.db[i].drop) { - sprintf(command, "drop database if exists %s;", g_Dbs.db[i].dbName); - if (0 != queryDbExec(taos, command, NO_INSERT_TYPE)) { - taos_close(taos); - return -1; - } - } - - int dataLen = 0; - dataLen += snprintf(command + dataLen, BUFFER_SIZE - dataLen, "create database if not exists %s ", g_Dbs.db[i].dbName); - - if (g_Dbs.db[i].dbCfg.blocks > 0) { - dataLen += snprintf(command + dataLen, BUFFER_SIZE - dataLen, "blocks %d ", g_Dbs.db[i].dbCfg.blocks); - } - if (g_Dbs.db[i].dbCfg.cache > 0) { - dataLen += snprintf(command + dataLen, BUFFER_SIZE - dataLen, "cache %d ", g_Dbs.db[i].dbCfg.cache); - } - if (g_Dbs.db[i].dbCfg.days > 0) { - dataLen += snprintf(command + dataLen, BUFFER_SIZE - dataLen, "days %d ", g_Dbs.db[i].dbCfg.days); - } - if (g_Dbs.db[i].dbCfg.keep > 0) { - dataLen += snprintf(command + dataLen, BUFFER_SIZE - dataLen, "keep %d ", g_Dbs.db[i].dbCfg.keep); - } - if (g_Dbs.db[i].dbCfg.replica > 0) { - dataLen += snprintf(command + dataLen, BUFFER_SIZE - dataLen, "replica %d ", g_Dbs.db[i].dbCfg.replica); - } - if (g_Dbs.db[i].dbCfg.update > 0) { - dataLen += snprintf(command + dataLen, BUFFER_SIZE - dataLen, "update %d ", g_Dbs.db[i].dbCfg.update); - } - //if (g_Dbs.db[i].dbCfg.maxtablesPerVnode > 0) { - // dataLen += snprintf(command + dataLen, BUFFER_SIZE - dataLen, "tables %d ", g_Dbs.db[i].dbCfg.maxtablesPerVnode); - //} - if (g_Dbs.db[i].dbCfg.minRows > 0) { - dataLen += snprintf(command + dataLen, BUFFER_SIZE - dataLen, "minrows %d ", g_Dbs.db[i].dbCfg.minRows); - } - if (g_Dbs.db[i].dbCfg.maxRows > 0) { - dataLen += snprintf(command + dataLen, BUFFER_SIZE - dataLen, "maxrows %d ", g_Dbs.db[i].dbCfg.maxRows); - } - if (g_Dbs.db[i].dbCfg.comp > 0) { - dataLen += snprintf(command + dataLen, BUFFER_SIZE - dataLen, "comp %d ", g_Dbs.db[i].dbCfg.comp); - } - if (g_Dbs.db[i].dbCfg.walLevel > 0) { - dataLen += snprintf(command + dataLen, BUFFER_SIZE - dataLen, "wal %d ", g_Dbs.db[i].dbCfg.walLevel); - } - if (g_Dbs.db[i].dbCfg.fsync > 0) { - dataLen += snprintf(command + dataLen, BUFFER_SIZE - dataLen, "fsync %d ", g_Dbs.db[i].dbCfg.fsync); - } - if ((0 == strncasecmp(g_Dbs.db[i].dbCfg.precision, "ms", 2)) || (0 == strncasecmp(g_Dbs.db[i].dbCfg.precision, "us", 2))) { - dataLen += snprintf(command + dataLen, BUFFER_SIZE - dataLen, "precision \'%s\';", g_Dbs.db[i].dbCfg.precision); - } - - if (0 != queryDbExec(taos, command, NO_INSERT_TYPE)) { - taos_close(taos); - return -1; - } - printf("\ncreate database %s success!\n\n", g_Dbs.db[i].dbName); - - for (int j = 0; j < g_Dbs.db[i].superTblCount; j++) { - // describe super table, if exists - sprintf(command, "describe %s.%s;", g_Dbs.db[i].dbName, g_Dbs.db[i].superTbls[j].sTblName); - if (0 != queryDbExec(taos, command, NO_INSERT_TYPE)) { - g_Dbs.db[i].superTbls[j].superTblExists = TBL_NO_EXISTS; - ret = createSuperTable(taos, g_Dbs.db[i].dbName, &g_Dbs.db[i].superTbls[j], g_Dbs.use_metric); - } else { - g_Dbs.db[i].superTbls[j].superTblExists = TBL_ALREADY_EXISTS; - ret = getSuperTableFromServer(taos, g_Dbs.db[i].dbName, &g_Dbs.db[i].superTbls[j]); - } - - if (0 != ret) { - taos_close(taos); - return -1; - } - } - } - - taos_close(taos); - return 0; -} - - -void * createTable(void *sarg) -{ - threadInfo *winfo = (threadInfo *)sarg; - SSuperTable* superTblInfo = winfo->superTblInfo; - - int64_t lastPrintTime = taosGetTimestampMs(); - - char* buffer = calloc(superTblInfo->maxSqlLen, 1); - - int len = 0; - int batchNum = 0; - //printf("Creating table from %d to %d\n", winfo->start_table_id, winfo->end_table_id); - for (int i = winfo->start_table_id; i <= winfo->end_table_id; i++) { - if (0 == g_Dbs.use_metric) { - snprintf(buffer, BUFFER_SIZE, "create table if not exists %s.%s%d %s;", winfo->db_name, superTblInfo->childTblPrefix, i, superTblInfo->colsOfCreatChildTable); - } else { - if (0 == len) { - batchNum = 0; - memset(buffer, 0, superTblInfo->maxSqlLen); - len += snprintf(buffer + len, superTblInfo->maxSqlLen - len, "create table "); - } - - char* tagsValBuf = NULL; - if (0 == superTblInfo->tagSource) { - tagsValBuf = generateTagVaulesForStb(superTblInfo); - } else { - tagsValBuf = getTagValueFromTagSample(superTblInfo, i % superTblInfo->tagSampleCount); - } - if (NULL == tagsValBuf) { - free(buffer); - return NULL; - } - - len += snprintf(buffer + len, superTblInfo->maxSqlLen - len, "if not exists %s.%s%d using %s.%s tags %s ", winfo->db_name, superTblInfo->childTblPrefix, i, winfo->db_name, superTblInfo->sTblName, tagsValBuf); - free(tagsValBuf); - batchNum++; - - if ((batchNum < superTblInfo->batchCreateTableNum) && ((superTblInfo->maxSqlLen - len) >= (superTblInfo->lenOfTagOfOneRow + 256))) { - continue; - } - } - - len = 0; - if (0 != queryDbExec(winfo->taos, buffer, NO_INSERT_TYPE)){ - free(buffer); - return NULL; - } - - int64_t currentPrintTime = taosGetTimestampMs(); - if (currentPrintTime - lastPrintTime > 30*1000) { - printf("thread[%d] already create %d - %d tables\n", winfo->threadID, winfo->start_table_id, i); - lastPrintTime = currentPrintTime; - } - } - - if (0 != len) { - (void)queryDbExec(winfo->taos, buffer, NO_INSERT_TYPE); - } - - free(buffer); - return NULL; -} - -void startMultiThreadCreateChildTable(char* cols, int threads, int ntables, char* db_name, SSuperTable* superTblInfo) { - pthread_t *pids = malloc(threads * sizeof(pthread_t)); - threadInfo *infos = malloc(threads * sizeof(threadInfo)); - - if ((NULL == pids) || (NULL == infos)) { - printf("malloc failed\n"); - exit(-1); - } - - if (threads < 1) { - threads = 1; - } - - int a = ntables / threads; - if (a < 1) { - threads = ntables; - a = 1; - } - - int b = 0; - b = ntables % threads; - - int last = 0; - for (int i = 0; i < threads; i++) { - threadInfo *t_info = infos + i; - t_info->threadID = i; - tstrncpy(t_info->db_name, db_name, MAX_DB_NAME_SIZE); - t_info->superTblInfo = superTblInfo; - t_info->taos = taos_connect(g_Dbs.host, g_Dbs.user, g_Dbs.password, db_name, g_Dbs.port); - t_info->start_table_id = last; - t_info->end_table_id = i < b ? last + a : last + a - 1; - last = t_info->end_table_id + 1; - t_info->use_metric = 1; - t_info->cols = cols; - pthread_create(pids + i, NULL, createTable, t_info); - } - - for (int i = 0; i < threads; i++) { - pthread_join(pids[i], NULL); - } - - for (int i = 0; i < threads; i++) { - threadInfo *t_info = infos + i; - taos_close(t_info->taos); - } - - free(pids); - free(infos); -} - - -static void createChildTables() { - for (int i = 0; i < g_Dbs.dbCount; i++) { - for (int j = 0; j < g_Dbs.db[i].superTblCount; j++) { - if ((AUTO_CREATE_SUBTBL == g_Dbs.db[i].superTbls[j].autoCreateTable) || (TBL_ALREADY_EXISTS == g_Dbs.db[i].superTbls[j].childTblExists)) { - continue; - } - startMultiThreadCreateChildTable(g_Dbs.db[i].superTbls[j].colsOfCreatChildTable, g_Dbs.threadCountByCreateTbl, g_Dbs.db[i].superTbls[j].childTblCount, g_Dbs.db[i].dbName, &(g_Dbs.db[i].superTbls[j])); - g_totalChildTables += g_Dbs.db[i].superTbls[j].childTblCount; - } - } -} - -/* -static int taosGetLineNum(const char *fileName) -{ - int lineNum = 0; - char cmd[1024] = { 0 }; - char buf[1024] = { 0 }; - sprintf(cmd, "wc -l %s", fileName); - - FILE *fp = popen(cmd, "r"); - if (fp == NULL) { - fprintf(stderr, "ERROR: failed to execute:%s, error:%s\n", cmd, strerror(errno)); - return lineNum; - } - - if (fgets(buf, sizeof(buf), fp)) { - int index = strchr((const char*)buf, ' ') - buf; - buf[index] = '\0'; - lineNum = atoi(buf); - } - pclose(fp); - return lineNum; -} -*/ - -/* - Read 10000 lines at most. If more than 10000 lines, continue to read after using -*/ -int readTagFromCsvFileToMem(SSuperTable * superTblInfo) { - size_t n = 0; - ssize_t readLen = 0; - char * line = NULL; - - FILE *fp = fopen(superTblInfo->tagsFile, "r"); - if (fp == NULL) { - printf("Failed to open tags file: %s, reason:%s\n", superTblInfo->tagsFile, strerror(errno)); - return -1; - } - - if (superTblInfo->tagDataBuf) { - free(superTblInfo->tagDataBuf); - superTblInfo->tagDataBuf = NULL; - } - - int tagCount = 10000; - int count = 0; - char* tagDataBuf = calloc(1, superTblInfo->lenOfTagOfOneRow * tagCount); - if (tagDataBuf == NULL) { - printf("Failed to calloc, reason:%s\n", strerror(errno)); - fclose(fp); - return -1; - } - - while ((readLen = getline(&line, &n, fp)) != -1) { - if (('\r' == line[readLen - 1]) || ('\n' == line[readLen - 1])) { - line[--readLen] = 0; - } - - if (readLen == 0) { - continue; - } - - memcpy(tagDataBuf + count * superTblInfo->lenOfTagOfOneRow, line, readLen); - count++; - - if (count >= tagCount - 1) { - char *tmp = realloc(tagDataBuf, (size_t)tagCount*1.5*superTblInfo->lenOfTagOfOneRow); - if (tmp != NULL) { - tagDataBuf = tmp; - tagCount = (int)(tagCount*1.5); - memset(tagDataBuf + count*superTblInfo->lenOfTagOfOneRow, 0, (size_t)((tagCount-count)*superTblInfo->lenOfTagOfOneRow)); - } else { - // exit, if allocate more memory failed - printf("realloc fail for save tag val from %s\n", superTblInfo->tagsFile); - tmfree(tagDataBuf); - free(line); - fclose(fp); - return -1; - } - } - } - - superTblInfo->tagDataBuf = tagDataBuf; - superTblInfo->tagSampleCount = count; - - free(line); - fclose(fp); - return 0; -} - -int readSampleFromJsonFileToMem(SSuperTable * superTblInfo) { - // TODO - return 0; -} - - -/* - Read 10000 lines at most. If more than 10000 lines, continue to read after using -*/ -int readSampleFromCsvFileToMem(FILE *fp, SSuperTable* superTblInfo, char* sampleBuf) { - size_t n = 0; - ssize_t readLen = 0; - char * line = NULL; - int getRows = 0; - - memset(sampleBuf, 0, MAX_SAMPLES_ONCE_FROM_FILE* superTblInfo->lenOfOneRow); - while (1) { - readLen = getline(&line, &n, fp); - if (-1 == readLen) { - if(0 != fseek(fp, 0, SEEK_SET)) { - printf("Failed to fseek file: %s, reason:%s\n", superTblInfo->sampleFile, strerror(errno)); - return -1; - } - continue; - } - - if (('\r' == line[readLen - 1]) || ('\n' == line[readLen - 1])) { - line[--readLen] = 0; - } - - if (readLen == 0) { - continue; - } - - if (readLen > superTblInfo->lenOfOneRow) { - printf("sample row len[%d] overflow define schema len[%d], so discard this row\n", (int32_t)readLen, superTblInfo->lenOfOneRow); - continue; - } - - memcpy(sampleBuf + getRows * superTblInfo->lenOfOneRow, line, readLen); - getRows++; - - if (getRows == MAX_SAMPLES_ONCE_FROM_FILE) { - break; - } - } - - tmfree(line); - return 0; -} - -/* -void readSampleFromFileToMem(SSuperTable * supterTblInfo) { - int ret; - if (0 == strncasecmp(supterTblInfo->sampleFormat, "csv", 3)) { - ret = readSampleFromCsvFileToMem(supterTblInfo); - } else if (0 == strncasecmp(supterTblInfo->sampleFormat, "json", 4)) { - ret = readSampleFromJsonFileToMem(supterTblInfo); - } - - if (0 != ret) { - exit(-1); - } -} -*/ -static bool getColumnAndTagTypeFromInsertJsonFile(cJSON* stbInfo, SSuperTable* superTbls) { - bool ret = false; - - // columns - cJSON *columns = cJSON_GetObjectItem(stbInfo, "columns"); - if (columns && columns->type != cJSON_Array) { - printf("failed to read json, columns not found\n"); - goto PARSE_OVER; - } else if (NULL == columns) { - superTbls->columnCount = 0; - superTbls->tagCount = 0; - return true; - } - - int columnSize = cJSON_GetArraySize(columns); - if (columnSize > MAX_COLUMN_COUNT) { - printf("failed to read json, column size overflow, max column size is %d\n", MAX_COLUMN_COUNT); - goto PARSE_OVER; - } - - int count = 1; - int index = 0; - StrColumn columnCase; - - //superTbls->columnCount = columnSize; - for (int k = 0; k < columnSize; ++k) { - cJSON* column = cJSON_GetArrayItem(columns, k); - if (column == NULL) continue; - - count = 1; - cJSON* countObj = cJSON_GetObjectItem(column, "count"); - if (countObj && countObj->type == cJSON_Number) { - count = countObj->valueint; - } else if (countObj && countObj->type != cJSON_Number) { - printf("failed to read json, column count not found"); - goto PARSE_OVER; - } else { - count = 1; - } - - // column info - memset(&columnCase, 0, sizeof(StrColumn)); - cJSON *dataType = cJSON_GetObjectItem(column, "type"); - if (!dataType || dataType->type != cJSON_String || dataType->valuestring == NULL) { - printf("failed to read json, column type not found"); - goto PARSE_OVER; - } - //strncpy(superTbls->columns[k].dataType, dataType->valuestring, MAX_TB_NAME_SIZE); - strncpy(columnCase.dataType, dataType->valuestring, MAX_TB_NAME_SIZE); - - cJSON* dataLen = cJSON_GetObjectItem(column, "len"); - if (dataLen && dataLen->type == cJSON_Number) { - columnCase.dataLen = dataLen->valueint; - } else if (dataLen && dataLen->type != cJSON_Number) { - printf("failed to read json, column len not found"); - goto PARSE_OVER; - } else { - columnCase.dataLen = 8; - } - - for (int n = 0; n < count; ++n) { - strncpy(superTbls->columns[index].dataType, columnCase.dataType, MAX_TB_NAME_SIZE); - superTbls->columns[index].dataLen = columnCase.dataLen; - index++; - } - } - superTbls->columnCount = index; - - count = 1; - index = 0; - // tags - cJSON *tags = cJSON_GetObjectItem(stbInfo, "tags"); - if (!tags || tags->type != cJSON_Array) { - printf("failed to read json, tags not found"); - goto PARSE_OVER; - } - - int tagSize = cJSON_GetArraySize(tags); - if (tagSize > MAX_TAG_COUNT) { - printf("failed to read json, tags size overflow, max tag size is %d\n", MAX_TAG_COUNT); - goto PARSE_OVER; - } - - //superTbls->tagCount = tagSize; - for (int k = 0; k < tagSize; ++k) { - cJSON* tag = cJSON_GetArrayItem(tags, k); - if (tag == NULL) continue; - - count = 1; - cJSON* countObj = cJSON_GetObjectItem(tag, "count"); - if (countObj && countObj->type == cJSON_Number) { - count = countObj->valueint; - } else if (countObj && countObj->type != cJSON_Number) { - printf("failed to read json, column count not found"); - goto PARSE_OVER; - } else { - count = 1; - } - - // column info - memset(&columnCase, 0, sizeof(StrColumn)); - cJSON *dataType = cJSON_GetObjectItem(tag, "type"); - if (!dataType || dataType->type != cJSON_String || dataType->valuestring == NULL) { - printf("failed to read json, tag type not found"); - goto PARSE_OVER; - } - strncpy(columnCase.dataType, dataType->valuestring, MAX_TB_NAME_SIZE); - - cJSON* dataLen = cJSON_GetObjectItem(tag, "len"); - if (dataLen && dataLen->type == cJSON_Number) { - columnCase.dataLen = dataLen->valueint; - } else if (dataLen && dataLen->type != cJSON_Number) { - printf("failed to read json, column len not found"); - goto PARSE_OVER; - } else { - columnCase.dataLen = 0; - } - - for (int n = 0; n < count; ++n) { - strncpy(superTbls->tags[index].dataType, columnCase.dataType, MAX_TB_NAME_SIZE); - superTbls->tags[index].dataLen = columnCase.dataLen; - index++; - } - } - superTbls->tagCount = index; - - ret = true; - -PARSE_OVER: - //free(content); - //cJSON_Delete(root); - //fclose(fp); - return ret; -} - -static bool getMetaFromInsertJsonFile(cJSON* root) { - bool ret = false; - - cJSON* cfgdir = cJSON_GetObjectItem(root, "cfgdir"); - if (cfgdir && cfgdir->type == cJSON_String && cfgdir->valuestring != NULL) { - strncpy(g_Dbs.cfgDir, cfgdir->valuestring, MAX_FILE_NAME_LEN); - } - - cJSON* host = cJSON_GetObjectItem(root, "host"); - if (host && host->type == cJSON_String && host->valuestring != NULL) { - strncpy(g_Dbs.host, host->valuestring, MAX_DB_NAME_SIZE); - } else if (!host) { - strncpy(g_Dbs.host, "127.0.0.1", MAX_DB_NAME_SIZE); - } else { - printf("failed to read json, host not found\n"); - goto PARSE_OVER; - } - - cJSON* port = cJSON_GetObjectItem(root, "port"); - if (port && port->type == cJSON_Number) { - g_Dbs.port = port->valueint; - } else if (!port) { - g_Dbs.port = 6030; - } - - cJSON* user = cJSON_GetObjectItem(root, "user"); - if (user && user->type == cJSON_String && user->valuestring != NULL) { - strncpy(g_Dbs.user, user->valuestring, MAX_DB_NAME_SIZE); - } else if (!user) { - strncpy(g_Dbs.user, "root", MAX_DB_NAME_SIZE); - } - - cJSON* password = cJSON_GetObjectItem(root, "password"); - if (password && password->type == cJSON_String && password->valuestring != NULL) { - strncpy(g_Dbs.password, password->valuestring, MAX_DB_NAME_SIZE); - } else if (!password) { - strncpy(g_Dbs.password, "taosdata", MAX_DB_NAME_SIZE); - } - - cJSON* resultfile = cJSON_GetObjectItem(root, "result_file"); - if (resultfile && resultfile->type == cJSON_String && resultfile->valuestring != NULL) { - strncpy(g_Dbs.resultFile, resultfile->valuestring, MAX_FILE_NAME_LEN); - } else if (!resultfile) { - strncpy(g_Dbs.resultFile, "./insert_res.txt", MAX_FILE_NAME_LEN); - } - - cJSON* threads = cJSON_GetObjectItem(root, "thread_count"); - if (threads && threads->type == cJSON_Number) { - g_Dbs.threadCount = threads->valueint; - } else if (!threads) { - g_Dbs.threadCount = 1; - } else { - printf("failed to read json, threads not found"); - goto PARSE_OVER; - } - - cJSON* threads2 = cJSON_GetObjectItem(root, "thread_count_create_tbl"); - if (threads2 && threads2->type == cJSON_Number) { - g_Dbs.threadCountByCreateTbl = threads2->valueint; - } else if (!threads2) { - g_Dbs.threadCountByCreateTbl = 1; - } else { - printf("failed to read json, threads2 not found"); - goto PARSE_OVER; - } - - cJSON* dbs = cJSON_GetObjectItem(root, "databases"); - if (!dbs || dbs->type != cJSON_Array) { - printf("failed to read json, databases not found\n"); - goto PARSE_OVER; - } - - int dbSize = cJSON_GetArraySize(dbs); - if (dbSize > MAX_DB_COUNT) { - printf("failed to read json, databases size overflow, max database is %d\n", MAX_DB_COUNT); - goto PARSE_OVER; - } - - g_Dbs.dbCount = dbSize; - for (int i = 0; i < dbSize; ++i) { - cJSON* dbinfos = cJSON_GetArrayItem(dbs, i); - if (dbinfos == NULL) continue; - - // dbinfo - cJSON *dbinfo = cJSON_GetObjectItem(dbinfos, "dbinfo"); - if (!dbinfo || dbinfo->type != cJSON_Object) { - printf("failed to read json, dbinfo not found"); - goto PARSE_OVER; - } - - cJSON *dbName = cJSON_GetObjectItem(dbinfo, "name"); - if (!dbName || dbName->type != cJSON_String || dbName->valuestring == NULL) { - printf("failed to read json, db name not found"); - goto PARSE_OVER; - } - strncpy(g_Dbs.db[i].dbName, dbName->valuestring, MAX_DB_NAME_SIZE); - - cJSON *drop = cJSON_GetObjectItem(dbinfo, "drop"); - if (drop && drop->type == cJSON_String && drop->valuestring != NULL) { - if (0 == strncasecmp(drop->valuestring, "yes", 3)) { - g_Dbs.db[i].drop = 1; - } else { - g_Dbs.db[i].drop = 0; - } - } else if (!drop) { - g_Dbs.db[i].drop = 0; - } else { - printf("failed to read json, drop not found"); - goto PARSE_OVER; - } - - cJSON *precision = cJSON_GetObjectItem(dbinfo, "precision"); - if (precision && precision->type == cJSON_String && precision->valuestring != NULL) { - strncpy(g_Dbs.db[i].dbCfg.precision, precision->valuestring, MAX_DB_NAME_SIZE); - } else if (!precision) { - //strncpy(g_Dbs.db[i].dbCfg.precision, "ms", MAX_DB_NAME_SIZE); - memset(g_Dbs.db[i].dbCfg.precision, 0, MAX_DB_NAME_SIZE); - } else { - printf("failed to read json, precision not found"); - goto PARSE_OVER; - } - - cJSON* update = cJSON_GetObjectItem(dbinfo, "update"); - if (update && update->type == cJSON_Number) { - g_Dbs.db[i].dbCfg.update = update->valueint; - } else if (!update) { - g_Dbs.db[i].dbCfg.update = -1; - } else { - printf("failed to read json, update not found"); - goto PARSE_OVER; - } - - cJSON* replica = cJSON_GetObjectItem(dbinfo, "replica"); - if (replica && replica->type == cJSON_Number) { - g_Dbs.db[i].dbCfg.replica = replica->valueint; - } else if (!replica) { - g_Dbs.db[i].dbCfg.replica = -1; - } else { - printf("failed to read json, replica not found"); - goto PARSE_OVER; - } - - cJSON* keep = cJSON_GetObjectItem(dbinfo, "keep"); - if (keep && keep->type == cJSON_Number) { - g_Dbs.db[i].dbCfg.keep = keep->valueint; - } else if (!keep) { - g_Dbs.db[i].dbCfg.keep = -1; - } else { - printf("failed to read json, keep not found"); - goto PARSE_OVER; - } - - cJSON* days = cJSON_GetObjectItem(dbinfo, "days"); - if (days && days->type == cJSON_Number) { - g_Dbs.db[i].dbCfg.days = days->valueint; - } else if (!days) { - g_Dbs.db[i].dbCfg.days = -1; - } else { - printf("failed to read json, days not found"); - goto PARSE_OVER; - } - - cJSON* cache = cJSON_GetObjectItem(dbinfo, "cache"); - if (cache && cache->type == cJSON_Number) { - g_Dbs.db[i].dbCfg.cache = cache->valueint; - } else if (!cache) { - g_Dbs.db[i].dbCfg.cache = -1; - } else { - printf("failed to read json, cache not found"); - goto PARSE_OVER; - } - - cJSON* blocks= cJSON_GetObjectItem(dbinfo, "blocks"); - if (blocks && blocks->type == cJSON_Number) { - g_Dbs.db[i].dbCfg.blocks = blocks->valueint; - } else if (!blocks) { - g_Dbs.db[i].dbCfg.blocks = -1; - } else { - printf("failed to read json, block not found"); - goto PARSE_OVER; - } - - //cJSON* maxtablesPerVnode= cJSON_GetObjectItem(dbinfo, "maxtablesPerVnode"); - //if (maxtablesPerVnode && maxtablesPerVnode->type == cJSON_Number) { - // g_Dbs.db[i].dbCfg.maxtablesPerVnode = maxtablesPerVnode->valueint; - //} else if (!maxtablesPerVnode) { - // g_Dbs.db[i].dbCfg.maxtablesPerVnode = TSDB_DEFAULT_TABLES; - //} else { - // printf("failed to read json, maxtablesPerVnode not found"); - // goto PARSE_OVER; - //} - - cJSON* minRows= cJSON_GetObjectItem(dbinfo, "minRows"); - if (minRows && minRows->type == cJSON_Number) { - g_Dbs.db[i].dbCfg.minRows = minRows->valueint; - } else if (!minRows) { - g_Dbs.db[i].dbCfg.minRows = -1; - } else { - printf("failed to read json, minRows not found"); - goto PARSE_OVER; - } - - cJSON* maxRows= cJSON_GetObjectItem(dbinfo, "maxRows"); - if (maxRows && maxRows->type == cJSON_Number) { - g_Dbs.db[i].dbCfg.maxRows = maxRows->valueint; - } else if (!maxRows) { - g_Dbs.db[i].dbCfg.maxRows = -1; - } else { - printf("failed to read json, maxRows not found"); - goto PARSE_OVER; - } - - cJSON* comp= cJSON_GetObjectItem(dbinfo, "comp"); - if (comp && comp->type == cJSON_Number) { - g_Dbs.db[i].dbCfg.comp = comp->valueint; - } else if (!comp) { - g_Dbs.db[i].dbCfg.comp = -1; - } else { - printf("failed to read json, comp not found"); - goto PARSE_OVER; - } - - cJSON* walLevel= cJSON_GetObjectItem(dbinfo, "walLevel"); - if (walLevel && walLevel->type == cJSON_Number) { - g_Dbs.db[i].dbCfg.walLevel = walLevel->valueint; - } else if (!walLevel) { - g_Dbs.db[i].dbCfg.walLevel = -1; - } else { - printf("failed to read json, walLevel not found"); - goto PARSE_OVER; - } - - cJSON* quorum= cJSON_GetObjectItem(dbinfo, "quorum"); - if (quorum && quorum->type == cJSON_Number) { - g_Dbs.db[i].dbCfg.quorum = quorum->valueint; - } else if (!quorum) { - g_Dbs.db[i].dbCfg.quorum = -1; - } else { - printf("failed to read json, walLevel not found"); - goto PARSE_OVER; - } - - cJSON* fsync= cJSON_GetObjectItem(dbinfo, "fsync"); - if (fsync && fsync->type == cJSON_Number) { - g_Dbs.db[i].dbCfg.fsync = fsync->valueint; - } else if (!fsync) { - g_Dbs.db[i].dbCfg.fsync = -1; - } else { - printf("failed to read json, fsync not found"); - goto PARSE_OVER; - } - - // super_talbes - cJSON *stables = cJSON_GetObjectItem(dbinfos, "super_tables"); - if (!stables || stables->type != cJSON_Array) { - printf("failed to read json, super_tables not found"); - goto PARSE_OVER; - } - - int stbSize = cJSON_GetArraySize(stables); - if (stbSize > MAX_SUPER_TABLE_COUNT) { - printf("failed to read json, databases size overflow, max database is %d\n", MAX_SUPER_TABLE_COUNT); - goto PARSE_OVER; - } - - g_Dbs.db[i].superTblCount = stbSize; - for (int j = 0; j < stbSize; ++j) { - cJSON* stbInfo = cJSON_GetArrayItem(stables, j); - if (stbInfo == NULL) continue; - - // dbinfo - cJSON *stbName = cJSON_GetObjectItem(stbInfo, "name"); - if (!stbName || stbName->type != cJSON_String || stbName->valuestring == NULL) { - printf("failed to read json, stb name not found"); - goto PARSE_OVER; - } - strncpy(g_Dbs.db[i].superTbls[j].sTblName, stbName->valuestring, MAX_TB_NAME_SIZE); - - cJSON *prefix = cJSON_GetObjectItem(stbInfo, "childtable_prefix"); - if (!prefix || prefix->type != cJSON_String || prefix->valuestring == NULL) { - printf("failed to read json, childtable_prefix not found"); - goto PARSE_OVER; - } - strncpy(g_Dbs.db[i].superTbls[j].childTblPrefix, prefix->valuestring, MAX_DB_NAME_SIZE); - - cJSON *autoCreateTbl = cJSON_GetObjectItem(stbInfo, "auto_create_table"); // yes, no, null - if (autoCreateTbl && autoCreateTbl->type == cJSON_String && autoCreateTbl->valuestring != NULL) { - if (0 == strncasecmp(autoCreateTbl->valuestring, "yes", 3)) { - g_Dbs.db[i].superTbls[j].autoCreateTable = AUTO_CREATE_SUBTBL; - } else if (0 == strncasecmp(autoCreateTbl->valuestring, "no", 2)) { - g_Dbs.db[i].superTbls[j].autoCreateTable = PRE_CREATE_SUBTBL; - } else { - g_Dbs.db[i].superTbls[j].autoCreateTable = PRE_CREATE_SUBTBL; - } - } else if (!autoCreateTbl) { - g_Dbs.db[i].superTbls[j].autoCreateTable = PRE_CREATE_SUBTBL; - } else { - printf("failed to read json, auto_create_table not found"); - goto PARSE_OVER; - } - - cJSON* batchCreateTbl = cJSON_GetObjectItem(stbInfo, "batch_create_tbl_num"); - if (batchCreateTbl && batchCreateTbl->type == cJSON_Number) { - g_Dbs.db[i].superTbls[j].batchCreateTableNum = batchCreateTbl->valueint; - } else if (!batchCreateTbl) { - g_Dbs.db[i].superTbls[j].batchCreateTableNum = 2000; - } else { - printf("failed to read json, batch_create_tbl_num not found"); - goto PARSE_OVER; - } - - cJSON *childTblExists = cJSON_GetObjectItem(stbInfo, "child_table_exists"); // yes, no - if (childTblExists && childTblExists->type == cJSON_String && childTblExists->valuestring != NULL) { - if (0 == strncasecmp(childTblExists->valuestring, "yes", 3)) { - g_Dbs.db[i].superTbls[j].childTblExists = TBL_ALREADY_EXISTS; - } else if (0 == strncasecmp(childTblExists->valuestring, "no", 2)) { - g_Dbs.db[i].superTbls[j].childTblExists = TBL_NO_EXISTS; - } else { - g_Dbs.db[i].superTbls[j].childTblExists = TBL_NO_EXISTS; - } - } else if (!childTblExists) { - g_Dbs.db[i].superTbls[j].childTblExists = TBL_NO_EXISTS; - } else { - printf("failed to read json, child_table_exists not found"); - goto PARSE_OVER; - } - - cJSON* count = cJSON_GetObjectItem(stbInfo, "childtable_count"); - if (!count || count->type != cJSON_Number || 0 >= count->valueint) { - printf("failed to read json, childtable_count not found"); - goto PARSE_OVER; - } - g_Dbs.db[i].superTbls[j].childTblCount = count->valueint; - - cJSON *dataSource = cJSON_GetObjectItem(stbInfo, "data_source"); - if (dataSource && dataSource->type == cJSON_String && dataSource->valuestring != NULL) { - strncpy(g_Dbs.db[i].superTbls[j].dataSource, dataSource->valuestring, MAX_DB_NAME_SIZE); - } else if (!dataSource) { - strncpy(g_Dbs.db[i].superTbls[j].dataSource, "rand", MAX_DB_NAME_SIZE); - } else { - printf("failed to read json, data_source not found"); - goto PARSE_OVER; - } - - cJSON *insertMode = cJSON_GetObjectItem(stbInfo, "insert_mode"); // taosc , restful - if (insertMode && insertMode->type == cJSON_String && insertMode->valuestring != NULL) { - strncpy(g_Dbs.db[i].superTbls[j].insertMode, insertMode->valuestring, MAX_DB_NAME_SIZE); - #ifndef TD_LOWA_CURL - if (0 == strncasecmp(g_Dbs.db[i].superTbls[j].insertMode, "restful", 7)) { - printf("There no libcurl, so no support resetful test! please use taosc mode.\n"); - goto PARSE_OVER; - } - #endif - } else if (!insertMode) { - strncpy(g_Dbs.db[i].superTbls[j].insertMode, "taosc", MAX_DB_NAME_SIZE); - } else { - printf("failed to read json, insert_mode not found"); - goto PARSE_OVER; - } - - cJSON *ts = cJSON_GetObjectItem(stbInfo, "start_timestamp"); - if (ts && ts->type == cJSON_String && ts->valuestring != NULL) { - strncpy(g_Dbs.db[i].superTbls[j].startTimestamp, ts->valuestring, MAX_DB_NAME_SIZE); - } else if (!ts) { - strncpy(g_Dbs.db[i].superTbls[j].startTimestamp, "now", MAX_DB_NAME_SIZE); - } else { - printf("failed to read json, start_timestamp not found"); - goto PARSE_OVER; - } - - cJSON* timestampStep = cJSON_GetObjectItem(stbInfo, "timestamp_step"); - if (timestampStep && timestampStep->type == cJSON_Number) { - g_Dbs.db[i].superTbls[j].timeStampStep = timestampStep->valueint; - } else if (!timestampStep) { - g_Dbs.db[i].superTbls[j].timeStampStep = 1000; - } else { - printf("failed to read json, timestamp_step not found"); - goto PARSE_OVER; - } - - cJSON* sampleDataBufSize = cJSON_GetObjectItem(stbInfo, "sample_buf_size"); - if (sampleDataBufSize && sampleDataBufSize->type == cJSON_Number) { - g_Dbs.db[i].superTbls[j].sampleDataBufSize = sampleDataBufSize->valueint; - if (g_Dbs.db[i].superTbls[j].sampleDataBufSize < 1024*1024) { - g_Dbs.db[i].superTbls[j].sampleDataBufSize = 1024*1024 + 1024; - } - } else if (!sampleDataBufSize) { - g_Dbs.db[i].superTbls[j].sampleDataBufSize = 1024*1024 + 1024; - } else { - printf("failed to read json, sample_buf_size not found"); - goto PARSE_OVER; - } - - cJSON *sampleFormat = cJSON_GetObjectItem(stbInfo, "sample_format"); - if (sampleFormat && sampleFormat->type == cJSON_String && sampleFormat->valuestring != NULL) { - strncpy(g_Dbs.db[i].superTbls[j].sampleFormat, sampleFormat->valuestring, MAX_DB_NAME_SIZE); - } else if (!sampleFormat) { - strncpy(g_Dbs.db[i].superTbls[j].sampleFormat, "csv", MAX_DB_NAME_SIZE); - } else { - printf("failed to read json, sample_format not found"); - goto PARSE_OVER; - } - - cJSON *sampleFile = cJSON_GetObjectItem(stbInfo, "sample_file"); - if (sampleFile && sampleFile->type == cJSON_String && sampleFile->valuestring != NULL) { - strncpy(g_Dbs.db[i].superTbls[j].sampleFile, sampleFile->valuestring, MAX_FILE_NAME_LEN); - } else if (!sampleFile) { - memset(g_Dbs.db[i].superTbls[j].sampleFile, 0, MAX_FILE_NAME_LEN); - } else { - printf("failed to read json, sample_file not found"); - goto PARSE_OVER; - } - - cJSON *tagsFile = cJSON_GetObjectItem(stbInfo, "tags_file"); - if (tagsFile && tagsFile->type == cJSON_String && tagsFile->valuestring != NULL) { - strncpy(g_Dbs.db[i].superTbls[j].tagsFile, tagsFile->valuestring, MAX_FILE_NAME_LEN); - if (0 == g_Dbs.db[i].superTbls[j].tagsFile[0]) { - g_Dbs.db[i].superTbls[j].tagSource = 0; - } else { - g_Dbs.db[i].superTbls[j].tagSource = 1; - } - } else if (!tagsFile) { - memset(g_Dbs.db[i].superTbls[j].tagsFile, 0, MAX_FILE_NAME_LEN); - g_Dbs.db[i].superTbls[j].tagSource = 0; - } else { - printf("failed to read json, tags_file not found"); - goto PARSE_OVER; - } - - cJSON* maxSqlLen = cJSON_GetObjectItem(stbInfo, "max_sql_len"); - if (maxSqlLen && maxSqlLen->type == cJSON_Number) { - int32_t len = maxSqlLen->valueint; - if (len > TSDB_MAX_ALLOWED_SQL_LEN) { - len = TSDB_MAX_ALLOWED_SQL_LEN; - } else if (len < TSDB_MAX_SQL_LEN) { - len = TSDB_MAX_SQL_LEN; - } - g_Dbs.db[i].superTbls[j].maxSqlLen = len; - } else if (!maxSqlLen) { - g_Dbs.db[i].superTbls[j].maxSqlLen = TSDB_MAX_SQL_LEN; - } else { - printf("failed to read json, maxSqlLen not found"); - goto PARSE_OVER; - } - - cJSON *multiThreadWriteOneTbl = cJSON_GetObjectItem(stbInfo, "multi_thread_write_one_tbl"); // no , yes - if (multiThreadWriteOneTbl && multiThreadWriteOneTbl->type == cJSON_String && multiThreadWriteOneTbl->valuestring != NULL) { - if (0 == strncasecmp(multiThreadWriteOneTbl->valuestring, "yes", 3)) { - g_Dbs.db[i].superTbls[j].multiThreadWriteOneTbl = 1; - } else { - g_Dbs.db[i].superTbls[j].multiThreadWriteOneTbl = 0; - } - } else if (!multiThreadWriteOneTbl) { - g_Dbs.db[i].superTbls[j].multiThreadWriteOneTbl = 0; - } else { - printf("failed to read json, multiThreadWriteOneTbl not found"); - goto PARSE_OVER; - } - - cJSON* numberOfTblInOneSql = cJSON_GetObjectItem(stbInfo, "number_of_tbl_in_one_sql"); - if (numberOfTblInOneSql && numberOfTblInOneSql->type == cJSON_Number) { - g_Dbs.db[i].superTbls[j].numberOfTblInOneSql = numberOfTblInOneSql->valueint; - } else if (!numberOfTblInOneSql) { - g_Dbs.db[i].superTbls[j].numberOfTblInOneSql = 0; - } else { - printf("failed to read json, numberOfTblInOneSql not found"); - goto PARSE_OVER; - } - - cJSON* rowsPerTbl = cJSON_GetObjectItem(stbInfo, "rows_per_tbl"); - if (rowsPerTbl && rowsPerTbl->type == cJSON_Number) { - g_Dbs.db[i].superTbls[j].rowsPerTbl = rowsPerTbl->valueint; - } else if (!rowsPerTbl) { - g_Dbs.db[i].superTbls[j].rowsPerTbl = 1; - } else { - printf("failed to read json, rowsPerTbl not found"); - goto PARSE_OVER; - } - - cJSON* disorderRatio = cJSON_GetObjectItem(stbInfo, "disorder_ratio"); - if (disorderRatio && disorderRatio->type == cJSON_Number) { - g_Dbs.db[i].superTbls[j].disorderRatio = disorderRatio->valueint; - } else if (!disorderRatio) { - g_Dbs.db[i].superTbls[j].disorderRatio = 0; - } else { - printf("failed to read json, disorderRatio not found"); - goto PARSE_OVER; - } - - cJSON* disorderRange = cJSON_GetObjectItem(stbInfo, "disorder_range"); - if (disorderRange && disorderRange->type == cJSON_Number) { - g_Dbs.db[i].superTbls[j].disorderRange = disorderRange->valueint; - } else if (!disorderRange) { - g_Dbs.db[i].superTbls[j].disorderRange = 1000; - } else { - printf("failed to read json, disorderRange not found"); - goto PARSE_OVER; - } - - cJSON* insertRate = cJSON_GetObjectItem(stbInfo, "insert_rate"); - if (insertRate && insertRate->type == cJSON_Number) { - g_Dbs.db[i].superTbls[j].insertRate = insertRate->valueint; - } else if (!insertRate) { - g_Dbs.db[i].superTbls[j].insertRate = 0; - } else { - printf("failed to read json, insert_rate not found"); - goto PARSE_OVER; - } - - cJSON* insertRows = cJSON_GetObjectItem(stbInfo, "insert_rows"); - if (insertRows && insertRows->type == cJSON_Number) { - g_Dbs.db[i].superTbls[j].insertRows = insertRows->valueint; - if (0 == g_Dbs.db[i].superTbls[j].insertRows) { - g_Dbs.db[i].superTbls[j].insertRows = 0x7FFFFFFFFFFFFFFF; - } - } else if (!insertRows) { - g_Dbs.db[i].superTbls[j].insertRows = 0x7FFFFFFFFFFFFFFF; - } else { - printf("failed to read json, insert_rows not found"); - goto PARSE_OVER; - } - - if (NO_CREATE_SUBTBL == g_Dbs.db[i].superTbls[j].autoCreateTable || (TBL_ALREADY_EXISTS == g_Dbs.db[i].superTbls[j].childTblExists)) { - continue; - } - - int retVal = getColumnAndTagTypeFromInsertJsonFile(stbInfo, &g_Dbs.db[i].superTbls[j]); - if (false == retVal) { - goto PARSE_OVER; - } - } - } - - ret = true; - -PARSE_OVER: - //free(content); - //cJSON_Delete(root); - //fclose(fp); - return ret; -} - -static bool getMetaFromQueryJsonFile(cJSON* root) { - bool ret = false; - - cJSON* cfgdir = cJSON_GetObjectItem(root, "cfgdir"); - if (cfgdir && cfgdir->type == cJSON_String && cfgdir->valuestring != NULL) { - strncpy(g_queryInfo.cfgDir, cfgdir->valuestring, MAX_FILE_NAME_LEN); - } - - cJSON* host = cJSON_GetObjectItem(root, "host"); - if (host && host->type == cJSON_String && host->valuestring != NULL) { - strncpy(g_queryInfo.host, host->valuestring, MAX_DB_NAME_SIZE); - } else if (!host) { - strncpy(g_queryInfo.host, "127.0.0.1", MAX_DB_NAME_SIZE); - } else { - printf("failed to read json, host not found\n"); - goto PARSE_OVER; - } - - cJSON* port = cJSON_GetObjectItem(root, "port"); - if (port && port->type == cJSON_Number) { - g_queryInfo.port = port->valueint; - } else if (!port) { - g_queryInfo.port = 6030; - } - - cJSON* user = cJSON_GetObjectItem(root, "user"); - if (user && user->type == cJSON_String && user->valuestring != NULL) { - strncpy(g_queryInfo.user, user->valuestring, MAX_DB_NAME_SIZE); - } else if (!user) { - strncpy(g_queryInfo.user, "root", MAX_DB_NAME_SIZE); ; - } - - cJSON* password = cJSON_GetObjectItem(root, "password"); - if (password && password->type == cJSON_String && password->valuestring != NULL) { - strncpy(g_queryInfo.password, password->valuestring, MAX_DB_NAME_SIZE); - } else if (!password) { - strncpy(g_queryInfo.password, "taosdata", MAX_DB_NAME_SIZE);; - } - - cJSON* dbs = cJSON_GetObjectItem(root, "databases"); - if (dbs && dbs->type == cJSON_String && dbs->valuestring != NULL) { - strncpy(g_queryInfo.dbName, dbs->valuestring, MAX_DB_NAME_SIZE); - } else if (!dbs) { - printf("failed to read json, databases not found\n"); - goto PARSE_OVER; - } - - cJSON* queryMode = cJSON_GetObjectItem(root, "query_mode"); - if (queryMode && queryMode->type == cJSON_String && queryMode->valuestring != NULL) { - strncpy(g_queryInfo.queryMode, queryMode->valuestring, MAX_TB_NAME_SIZE); - } else if (!queryMode) { - strncpy(g_queryInfo.queryMode, "taosc", MAX_TB_NAME_SIZE); - } else { - printf("failed to read json, query_mode not found\n"); - goto PARSE_OVER; - } - - // super_table_query - cJSON *superQuery = cJSON_GetObjectItem(root, "specified_table_query"); - if (!superQuery) { - g_queryInfo.superQueryInfo.concurrent = 0; - g_queryInfo.superQueryInfo.sqlCount = 0; - } else if (superQuery->type != cJSON_Object) { - printf("failed to read json, super_table_query not found"); - goto PARSE_OVER; - } else { - cJSON* rate = cJSON_GetObjectItem(superQuery, "query_interval"); - if (rate && rate->type == cJSON_Number) { - g_queryInfo.superQueryInfo.rate = rate->valueint; - } else if (!rate) { - g_queryInfo.superQueryInfo.rate = 0; - } - - cJSON* concurrent = cJSON_GetObjectItem(superQuery, "concurrent"); - if (concurrent && concurrent->type == cJSON_Number) { - g_queryInfo.superQueryInfo.concurrent = concurrent->valueint; - } else if (!concurrent) { - g_queryInfo.superQueryInfo.concurrent = 1; - } - - cJSON* mode = cJSON_GetObjectItem(superQuery, "mode"); - if (mode && mode->type == cJSON_String && mode->valuestring != NULL) { - if (0 == strcmp("sync", mode->valuestring)) { - g_queryInfo.superQueryInfo.subscribeMode = 0; - } else if (0 == strcmp("async", mode->valuestring)) { - g_queryInfo.superQueryInfo.subscribeMode = 1; - } else { - printf("failed to read json, subscribe mod error\n"); - goto PARSE_OVER; - } - } else { - g_queryInfo.superQueryInfo.subscribeMode = 0; - } - - cJSON* interval = cJSON_GetObjectItem(superQuery, "interval"); - if (interval && interval->type == cJSON_Number) { - g_queryInfo.superQueryInfo.subscribeInterval = interval->valueint; - } else if (!interval) { - //printf("failed to read json, subscribe interval no found\n"); - //goto PARSE_OVER; - g_queryInfo.superQueryInfo.subscribeInterval = 10000; - } - - cJSON* restart = cJSON_GetObjectItem(superQuery, "restart"); - if (restart && restart->type == cJSON_String && restart->valuestring != NULL) { - if (0 == strcmp("yes", restart->valuestring)) { - g_queryInfo.superQueryInfo.subscribeRestart = 1; - } else if (0 == strcmp("no", restart->valuestring)) { - g_queryInfo.superQueryInfo.subscribeRestart = 0; - } else { - printf("failed to read json, subscribe restart error\n"); - goto PARSE_OVER; - } - } else { - g_queryInfo.superQueryInfo.subscribeRestart = 1; - } - - cJSON* keepProgress = cJSON_GetObjectItem(superQuery, "keepProgress"); - if (keepProgress && keepProgress->type == cJSON_String && keepProgress->valuestring != NULL) { - if (0 == strcmp("yes", keepProgress->valuestring)) { - g_queryInfo.superQueryInfo.subscribeKeepProgress = 1; - } else if (0 == strcmp("no", keepProgress->valuestring)) { - g_queryInfo.superQueryInfo.subscribeKeepProgress = 0; - } else { - printf("failed to read json, subscribe keepProgress error\n"); - goto PARSE_OVER; - } - } else { - g_queryInfo.superQueryInfo.subscribeKeepProgress = 0; - } - - // sqls - cJSON* superSqls = cJSON_GetObjectItem(superQuery, "sqls"); - if (!superSqls) { - g_queryInfo.superQueryInfo.sqlCount = 0; - } else if (superSqls->type != cJSON_Array) { - printf("failed to read json, super sqls not found\n"); - goto PARSE_OVER; - } else { - int superSqlSize = cJSON_GetArraySize(superSqls); - if (superSqlSize > MAX_QUERY_SQL_COUNT) { - printf("failed to read json, query sql size overflow, max is %d\n", MAX_QUERY_SQL_COUNT); - goto PARSE_OVER; - } - - g_queryInfo.superQueryInfo.sqlCount = superSqlSize; - for (int j = 0; j < superSqlSize; ++j) { - cJSON* sql = cJSON_GetArrayItem(superSqls, j); - if (sql == NULL) continue; - - cJSON *sqlStr = cJSON_GetObjectItem(sql, "sql"); - if (!sqlStr || sqlStr->type != cJSON_String || sqlStr->valuestring == NULL) { - printf("failed to read json, sql not found\n"); - goto PARSE_OVER; - } - strncpy(g_queryInfo.superQueryInfo.sql[j], sqlStr->valuestring, MAX_QUERY_SQL_LENGTH); - - cJSON *result = cJSON_GetObjectItem(sql, "result"); - if (NULL != result && result->type == cJSON_String && result->valuestring != NULL) { - strncpy(g_queryInfo.superQueryInfo.result[j], result->valuestring, MAX_FILE_NAME_LEN); - } else if (NULL == result) { - memset(g_queryInfo.superQueryInfo.result[j], 0, MAX_FILE_NAME_LEN); - } else { - printf("failed to read json, super query result file not found\n"); - goto PARSE_OVER; - } - } - } - } - - // sub_table_query - cJSON *subQuery = cJSON_GetObjectItem(root, "super_table_query"); - if (!subQuery) { - g_queryInfo.subQueryInfo.threadCnt = 0; - g_queryInfo.subQueryInfo.sqlCount = 0; - } else if (subQuery->type != cJSON_Object) { - printf("failed to read json, sub_table_query not found"); - ret = true; - goto PARSE_OVER; - } else { - cJSON* subrate = cJSON_GetObjectItem(subQuery, "query_interval"); - if (subrate && subrate->type == cJSON_Number) { - g_queryInfo.subQueryInfo.rate = subrate->valueint; - } else if (!subrate) { - g_queryInfo.subQueryInfo.rate = 0; - } - - cJSON* threads = cJSON_GetObjectItem(subQuery, "threads"); - if (threads && threads->type == cJSON_Number) { - g_queryInfo.subQueryInfo.threadCnt = threads->valueint; - } else if (!threads) { - g_queryInfo.subQueryInfo.threadCnt = 1; - } - - //cJSON* subTblCnt = cJSON_GetObjectItem(subQuery, "childtable_count"); - //if (subTblCnt && subTblCnt->type == cJSON_Number) { - // g_queryInfo.subQueryInfo.childTblCount = subTblCnt->valueint; - //} else if (!subTblCnt) { - // g_queryInfo.subQueryInfo.childTblCount = 0; - //} - - cJSON* stblname = cJSON_GetObjectItem(subQuery, "stblname"); - if (stblname && stblname->type == cJSON_String && stblname->valuestring != NULL) { - strncpy(g_queryInfo.subQueryInfo.sTblName, stblname->valuestring, MAX_TB_NAME_SIZE); - } else { - printf("failed to read json, super table name not found\n"); - goto PARSE_OVER; - } - - cJSON* submode = cJSON_GetObjectItem(subQuery, "mode"); - if (submode && submode->type == cJSON_String && submode->valuestring != NULL) { - if (0 == strcmp("sync", submode->valuestring)) { - g_queryInfo.subQueryInfo.subscribeMode = 0; - } else if (0 == strcmp("async", submode->valuestring)) { - g_queryInfo.subQueryInfo.subscribeMode = 1; - } else { - printf("failed to read json, subscribe mod error\n"); - goto PARSE_OVER; - } - } else { - g_queryInfo.subQueryInfo.subscribeMode = 0; - } - - cJSON* subinterval = cJSON_GetObjectItem(subQuery, "interval"); - if (subinterval && subinterval->type == cJSON_Number) { - g_queryInfo.subQueryInfo.subscribeInterval = subinterval->valueint; - } else if (!subinterval) { - //printf("failed to read json, subscribe interval no found\n"); - //goto PARSE_OVER; - g_queryInfo.subQueryInfo.subscribeInterval = 10000; - } - - cJSON* subrestart = cJSON_GetObjectItem(subQuery, "restart"); - if (subrestart && subrestart->type == cJSON_String && subrestart->valuestring != NULL) { - if (0 == strcmp("yes", subrestart->valuestring)) { - g_queryInfo.subQueryInfo.subscribeRestart = 1; - } else if (0 == strcmp("no", subrestart->valuestring)) { - g_queryInfo.subQueryInfo.subscribeRestart = 0; - } else { - printf("failed to read json, subscribe restart error\n"); - goto PARSE_OVER; - } - } else { - g_queryInfo.subQueryInfo.subscribeRestart = 1; - } - - cJSON* subkeepProgress = cJSON_GetObjectItem(subQuery, "keepProgress"); - if (subkeepProgress && subkeepProgress->type == cJSON_String && subkeepProgress->valuestring != NULL) { - if (0 == strcmp("yes", subkeepProgress->valuestring)) { - g_queryInfo.subQueryInfo.subscribeKeepProgress = 1; - } else if (0 == strcmp("no", subkeepProgress->valuestring)) { - g_queryInfo.subQueryInfo.subscribeKeepProgress = 0; - } else { - printf("failed to read json, subscribe keepProgress error\n"); - goto PARSE_OVER; - } - } else { - g_queryInfo.subQueryInfo.subscribeKeepProgress = 0; - } - - // sqls - cJSON* subsqls = cJSON_GetObjectItem(subQuery, "sqls"); - if (!subsqls) { - g_queryInfo.subQueryInfo.sqlCount = 0; - } else if (subsqls->type != cJSON_Array) { - printf("failed to read json, super sqls not found\n"); - goto PARSE_OVER; - } else { - int superSqlSize = cJSON_GetArraySize(subsqls); - if (superSqlSize > MAX_QUERY_SQL_COUNT) { - printf("failed to read json, query sql size overflow, max is %d\n", MAX_QUERY_SQL_COUNT); - goto PARSE_OVER; - } - - g_queryInfo.subQueryInfo.sqlCount = superSqlSize; - for (int j = 0; j < superSqlSize; ++j) { - cJSON* sql = cJSON_GetArrayItem(subsqls, j); - if (sql == NULL) continue; - - cJSON *sqlStr = cJSON_GetObjectItem(sql, "sql"); - if (!sqlStr || sqlStr->type != cJSON_String || sqlStr->valuestring == NULL) { - printf("failed to read json, sql not found\n"); - goto PARSE_OVER; - } - strncpy(g_queryInfo.subQueryInfo.sql[j], sqlStr->valuestring, MAX_QUERY_SQL_LENGTH); - - cJSON *result = cJSON_GetObjectItem(sql, "result"); - if (result != NULL && result->type == cJSON_String && result->valuestring != NULL){ - strncpy(g_queryInfo.subQueryInfo.result[j], result->valuestring, MAX_FILE_NAME_LEN); - } else if (NULL == result) { - memset(g_queryInfo.subQueryInfo.result[j], 0, MAX_FILE_NAME_LEN); - } else { - printf("failed to read json, sub query result file not found\n"); - goto PARSE_OVER; - } - } - } - } - - ret = true; - -PARSE_OVER: - //free(content); - //cJSON_Delete(root); - //fclose(fp); - return ret; -} - -static bool getInfoFromJsonFile(char* file) { - FILE *fp = fopen(file, "r"); - if (!fp) { - printf("failed to read %s, reason:%s\n", file, strerror(errno)); - return false; - } - - bool ret = false; - int maxLen = 64000; - char *content = calloc(1, maxLen + 1); - int len = fread(content, 1, maxLen, fp); - if (len <= 0) { - free(content); - fclose(fp); - printf("failed to read %s, content is null", file); - return false; - } - - content[len] = 0; - cJSON* root = cJSON_Parse(content); - if (root == NULL) { - printf("failed to cjson parse %s, invalid json format", file); - goto PARSE_OVER; - } - - cJSON* filetype = cJSON_GetObjectItem(root, "filetype"); - if (filetype && filetype->type == cJSON_String && filetype->valuestring != NULL) { - if (0 == strcasecmp("insert", filetype->valuestring)) { - g_jsonType = INSERT_MODE; - } else if (0 == strcasecmp("query", filetype->valuestring)) { - g_jsonType = QUERY_MODE; - } else if (0 == strcasecmp("subscribe", filetype->valuestring)) { - g_jsonType = SUBSCRIBE_MODE; - } else { - printf("failed to read json, filetype not support\n"); - goto PARSE_OVER; - } - } else if (!filetype) { - g_jsonType = INSERT_MODE; - } else { - printf("failed to read json, filetype not found\n"); - goto PARSE_OVER; - } - - if (INSERT_MODE == g_jsonType) { - ret = getMetaFromInsertJsonFile(root); - } else if (QUERY_MODE == g_jsonType) { - ret = getMetaFromQueryJsonFile(root); - } else if (SUBSCRIBE_MODE == g_jsonType) { - ret = getMetaFromQueryJsonFile(root); - } else { - printf("input json file type error! please input correct file type: insert or query or subscribe\n"); - goto PARSE_OVER; - } - -PARSE_OVER: - free(content); - cJSON_Delete(root); - fclose(fp); - return ret; -} - - -void prePareSampleData() { - for (int i = 0; i < g_Dbs.dbCount; i++) { - for (int j = 0; j < g_Dbs.db[i].superTblCount; j++) { - //if (0 == strncasecmp(g_Dbs.db[i].superTbls[j].dataSource, "sample", 6)) { - // readSampleFromFileToMem(&g_Dbs.db[i].superTbls[j]); - //} - - if (g_Dbs.db[i].superTbls[j].tagsFile[0] != 0) { - (void)readTagFromCsvFileToMem(&g_Dbs.db[i].superTbls[j]); - } - - #ifdef TD_LOWA_CURL - if (0 == strncasecmp(g_Dbs.db[i].superTbls[j].insertMode, "restful", 8)) { - curl_global_init(CURL_GLOBAL_ALL); - } - #endif - } - } -} - -void postFreeResource() { - tmfclose(g_fpOfInsertResult); - for (int i = 0; i < g_Dbs.dbCount; i++) { - for (int j = 0; j < g_Dbs.db[i].superTblCount; j++) { - if (0 != g_Dbs.db[i].superTbls[j].colsOfCreatChildTable) { - free(g_Dbs.db[i].superTbls[j].colsOfCreatChildTable); - g_Dbs.db[i].superTbls[j].colsOfCreatChildTable = NULL; - } - if (0 != g_Dbs.db[i].superTbls[j].sampleDataBuf) { - free(g_Dbs.db[i].superTbls[j].sampleDataBuf); - g_Dbs.db[i].superTbls[j].sampleDataBuf = NULL; - } - if (0 != g_Dbs.db[i].superTbls[j].tagDataBuf) { - free(g_Dbs.db[i].superTbls[j].tagDataBuf); - g_Dbs.db[i].superTbls[j].tagDataBuf = NULL; - } - if (0 != g_Dbs.db[i].superTbls[j].childTblName) { - free(g_Dbs.db[i].superTbls[j].childTblName); - g_Dbs.db[i].superTbls[j].childTblName = NULL; - } - - #ifdef TD_LOWA_CURL - if (0 == strncasecmp(g_Dbs.db[i].superTbls[j].insertMode, "restful", 8)) { - curl_global_cleanup(); - } - #endif - } - } -} - -int getRowDataFromSample(char* dataBuf, int maxLen, int64_t timestamp, SSuperTable* superTblInfo, int* sampleUsePos, FILE *fp, char* sampleBuf) { - if ((*sampleUsePos) == MAX_SAMPLES_ONCE_FROM_FILE) { - int ret = readSampleFromCsvFileToMem(fp, superTblInfo, sampleBuf); - if (0 != ret) { - return -1; - } - *sampleUsePos = 0; - } - - int dataLen = 0; - dataLen += snprintf(dataBuf + dataLen, maxLen - dataLen, "(%" PRId64 ", ", timestamp); - dataLen += snprintf(dataBuf + dataLen, maxLen - dataLen, "%s", sampleBuf + superTblInfo->lenOfOneRow * (*sampleUsePos)); - dataLen += snprintf(dataBuf + dataLen, maxLen - dataLen, ")"); - - (*sampleUsePos)++; - - return dataLen; -} - -int generateRowData(char* dataBuf, int maxLen, int64_t timestamp, SSuperTable* stbInfo) { - int dataLen = 0; - dataLen += snprintf(dataBuf + dataLen, maxLen - dataLen, "(%" PRId64 ", ", timestamp); - for (int i = 0; i < stbInfo->columnCount; i++) { - if ((0 == strncasecmp(stbInfo->columns[i].dataType, "binary", 6)) || (0 == strncasecmp(stbInfo->columns[i].dataType, "nchar", 5))) { - if (stbInfo->columns[i].dataLen > TSDB_MAX_BINARY_LEN) { - printf("binary or nchar length overflow, max size:%u\n", (uint32_t)TSDB_MAX_BINARY_LEN); - return (-1); - } - - char* buf = (char*)calloc(stbInfo->columns[i].dataLen+1, 1); - if (NULL == buf) { - printf("calloc failed! size:%d\n", stbInfo->columns[i].dataLen); - return (-1); - } - rand_string(buf, stbInfo->columns[i].dataLen); - dataLen += snprintf(dataBuf + dataLen, maxLen - dataLen, "\'%s\', ", buf); - tmfree(buf); - } else if (0 == strncasecmp(stbInfo->columns[i].dataType, "int", 3)) { - dataLen += snprintf(dataBuf + dataLen, maxLen - dataLen, "%d, ", rand_int()); - } else if (0 == strncasecmp(stbInfo->columns[i].dataType, "bigint", 6)) { - dataLen += snprintf(dataBuf + dataLen, maxLen - dataLen, "%"PRId64", ", rand_bigint()); - } else if (0 == strncasecmp(stbInfo->columns[i].dataType, "float", 5)) { - dataLen += snprintf(dataBuf + dataLen, maxLen - dataLen, "%f, ", rand_float()); - } else if (0 == strncasecmp(stbInfo->columns[i].dataType, "double", 6)) { - dataLen += snprintf(dataBuf + dataLen, maxLen - dataLen, "%f, ", rand_double()); - } else if (0 == strncasecmp(stbInfo->columns[i].dataType, "smallint", 8)) { - dataLen += snprintf(dataBuf + dataLen, maxLen - dataLen, "%d, ", rand_smallint()); - } else if (0 == strncasecmp(stbInfo->columns[i].dataType, "tinyint", 7)) { - dataLen += snprintf(dataBuf + dataLen, maxLen - dataLen, "%d, ", rand_tinyint()); - } else if (0 == strncasecmp(stbInfo->columns[i].dataType, "bool", 4)) { - dataLen += snprintf(dataBuf + dataLen, maxLen - dataLen, "%d, ", rand_bool()); - } else if (0 == strncasecmp(stbInfo->columns[i].dataType, "timestamp", 9)) { - dataLen += snprintf(dataBuf + dataLen, maxLen - dataLen, "%"PRId64", ", rand_bigint()); - } else { - printf("No support data type: %s\n", stbInfo->columns[i].dataType); - return (-1); - } - } - dataLen -= 2; - dataLen += snprintf(dataBuf + dataLen, maxLen - dataLen, ")"); - - return dataLen; -} - -void syncWriteForNumberOfTblInOneSql(threadInfo *winfo, FILE *fp, char* sampleDataBuf) { - SSuperTable* superTblInfo = winfo->superTblInfo; - - int samplePos = 0; - - //printf("========threadID[%d], table rang: %d - %d \n", winfo->threadID, winfo->start_table_id, winfo->end_table_id); - int64_t totalRowsInserted = 0; - int64_t totalAffectedRows = 0; - int64_t lastPrintTime = taosGetTimestampMs(); - - char* buffer = calloc(superTblInfo->maxSqlLen+1, 1); - if (NULL == buffer) { - printf("========calloc size[ %d ] fail!\n", superTblInfo->maxSqlLen); - return; - } - - int32_t numberOfTblInOneSql = superTblInfo->numberOfTblInOneSql; - int32_t tbls = winfo->end_table_id - winfo->start_table_id + 1; - if (numberOfTblInOneSql > tbls) { - numberOfTblInOneSql = tbls; - } - - int64_t time_counter = winfo->start_time; - int64_t tmp_time; - int sampleUsePos; - - int64_t st = 0; - int64_t et = 0; - for (int i = 0; i < superTblInfo->insertRows;) { - if (superTblInfo->insertRate && (et - st) < 1000) { - taosMsleep(1000 - (et - st)); // ms - //printf("========sleep duration:%"PRId64 "========inserted rows:%d, table range:%d - %d\n", (1000 - (et - st)), i, winfo->start_table_id, winfo->end_table_id); - } - - if (superTblInfo->insertRate) { - st = taosGetTimestampMs(); - } - - int32_t tbl_id = 0; - for (int tID = winfo->start_table_id; tID <= winfo->end_table_id; ) { - int inserted = i; - - int k = 0; - int batchRowsSql = 0; - while (1) - { - int len = 0; - memset(buffer, 0, superTblInfo->maxSqlLen); - char *pstr = buffer; - - int32_t end_tbl_id = tID + numberOfTblInOneSql; - if (end_tbl_id > winfo->end_table_id) { - end_tbl_id = winfo->end_table_id+1; - } - for (tbl_id = tID; tbl_id < end_tbl_id; tbl_id++) { - sampleUsePos = samplePos; - if (AUTO_CREATE_SUBTBL == superTblInfo->autoCreateTable) { - char* tagsValBuf = NULL; - if (0 == superTblInfo->tagSource) { - tagsValBuf = generateTagVaulesForStb(superTblInfo); - } else { - tagsValBuf = getTagValueFromTagSample(superTblInfo, tbl_id % superTblInfo->tagSampleCount); - } - if (NULL == tagsValBuf) { - goto free_and_statistics; - } - - if (0 == len) { - len += snprintf(pstr + len, superTblInfo->maxSqlLen - len, "insert into %s.%s%d using %s.%s tags %s values ", winfo->db_name, superTblInfo->childTblPrefix, tbl_id, winfo->db_name, superTblInfo->sTblName, tagsValBuf); - } else { - len += snprintf(pstr + len, superTblInfo->maxSqlLen - len, " %s.%s%d using %s.%s tags %s values ", winfo->db_name, superTblInfo->childTblPrefix, tbl_id, winfo->db_name, superTblInfo->sTblName, tagsValBuf); - } - tmfree(tagsValBuf); - } else if (TBL_ALREADY_EXISTS == superTblInfo->childTblExists) { - if (0 == len) { - len += snprintf(pstr + len, superTblInfo->maxSqlLen - len, "insert into %s.%s values ", winfo->db_name, superTblInfo->childTblName + tbl_id * TSDB_TABLE_NAME_LEN); - } else { - len += snprintf(pstr + len, superTblInfo->maxSqlLen - len, " %s.%s values ", winfo->db_name, superTblInfo->childTblName + tbl_id * TSDB_TABLE_NAME_LEN); - } - } else { // pre-create child table - if (0 == len) { - len += snprintf(pstr + len, superTblInfo->maxSqlLen - len, "insert into %s.%s%d values ", winfo->db_name, superTblInfo->childTblPrefix, tbl_id); - } else { - len += snprintf(pstr + len, superTblInfo->maxSqlLen - len, " %s.%s%d values ", winfo->db_name, superTblInfo->childTblPrefix, tbl_id); - } - } - - tmp_time = time_counter; - for (k = 0; k < superTblInfo->rowsPerTbl;) { - int retLen = 0; - if (0 == strncasecmp(superTblInfo->dataSource, "sample", 6)) { - retLen = getRowDataFromSample(pstr + len, superTblInfo->maxSqlLen - len, tmp_time += superTblInfo->timeStampStep, superTblInfo, &sampleUsePos, fp, sampleDataBuf); - if (retLen < 0) { - goto free_and_statistics; - } - } else if (0 == strncasecmp(superTblInfo->dataSource, "rand", 8)) { - int rand_num = rand_tinyint() % 100; - if (0 != superTblInfo->disorderRatio && rand_num < superTblInfo->disorderRatio) { - int64_t d = tmp_time - rand() % superTblInfo->disorderRange; - retLen = generateRowData(pstr + len, superTblInfo->maxSqlLen - len, d, superTblInfo); - } else { - retLen = generateRowData(pstr + len, superTblInfo->maxSqlLen - len, tmp_time += superTblInfo->timeStampStep, superTblInfo); - } - if (retLen < 0) { - goto free_and_statistics; - } - } - len += retLen; - //inserted++; - k++; - totalRowsInserted++; - batchRowsSql++; - - if (inserted >= superTblInfo->insertRows || (superTblInfo->maxSqlLen - len) < (superTblInfo->lenOfOneRow + 128) || batchRowsSql >= INT16_MAX - 1) { - tID = tbl_id + 1; - printf("config rowsPerTbl and numberOfTblInOneSql not match with max_sql_lenth, please reconfig![lenOfOneRow:%d]\n", superTblInfo->lenOfOneRow); - goto send_to_server; - } - } - - } - - tID = tbl_id; - inserted += superTblInfo->rowsPerTbl; - - send_to_server: - batchRowsSql = 0; - if (0 == strncasecmp(superTblInfo->insertMode, "taosc", 5)) { - //printf("multi table===== sql: %s \n\n", buffer); - //int64_t t1 = taosGetTimestampMs(); - int affectedRows = queryDbExec(winfo->taos, buffer, INSERT_TYPE); - if (0 > affectedRows) { - goto free_and_statistics; - } - totalAffectedRows += affectedRows; - - int64_t currentPrintTime = taosGetTimestampMs(); - if (currentPrintTime - lastPrintTime > 30*1000) { - printf("thread[%d] has currently inserted rows: %"PRId64 ", affected rows: %"PRId64 "\n", winfo->threadID, totalRowsInserted, totalAffectedRows); - lastPrintTime = currentPrintTime; - } - //int64_t t2 = taosGetTimestampMs(); - //printf("taosc insert sql return, Spent %.4f seconds \n", (double)(t2 - t1)/1000.0); - } else { - #ifdef TD_LOWA_CURL - //int64_t t1 = taosGetTimestampMs(); - int retCode = curlProceSql(g_Dbs.host, g_Dbs.port, buffer, winfo->curl_handle); - //int64_t t2 = taosGetTimestampMs(); - //printf("http insert sql return, Spent %ld ms \n", t2 - t1); - - if (0 != retCode) { - printf("========curl return fail, threadID[%d]\n", winfo->threadID); - goto free_and_statistics; - } - #else - printf("========no use http mode for no curl lib!\n"); - goto free_and_statistics; - #endif - } - - //printf("========tID:%d, k:%d, loop_cnt:%d\n", tID, k, loop_cnt); - break; - } - - if (tID > winfo->end_table_id) { - if (0 == strncasecmp(superTblInfo->dataSource, "sample", 6)) { - samplePos = sampleUsePos; - } - i = inserted; - time_counter = tmp_time; - } - } - - if (superTblInfo->insertRate) { - et = taosGetTimestampMs(); - } - //printf("========loop %d childTables duration:%"PRId64 "========inserted rows:%d\n", winfo->end_table_id - winfo->start_table_id, et - st, i); - } - - free_and_statistics: - tmfree(buffer); - winfo->totalRowsInserted = totalRowsInserted; - winfo->totalAffectedRows = totalAffectedRows; - printf("====thread[%d] completed total inserted rows: %"PRId64 ", affected rows: %"PRId64 "====\n", winfo->threadID, totalRowsInserted, totalAffectedRows); - return; -} - -// sync insertion -/* - 1 thread: 100 tables * 2000 rows/s - 1 thread: 10 tables * 20000 rows/s - 6 thread: 300 tables * 2000 rows/s - - 2 taosinsertdata , 1 thread: 10 tables * 20000 rows/s -*/ -void *syncWrite(void *sarg) { - int64_t totalRowsInserted = 0; - int64_t totalAffectedRows = 0; - int64_t lastPrintTime = taosGetTimestampMs(); - - threadInfo *winfo = (threadInfo *)sarg; - SSuperTable* superTblInfo = winfo->superTblInfo; - - FILE *fp = NULL; - char* sampleDataBuf = NULL; - int samplePos = 0; - - // each thread read sample data from csv file - if (0 == strncasecmp(superTblInfo->dataSource, "sample", 6)) { - sampleDataBuf = calloc(superTblInfo->lenOfOneRow * MAX_SAMPLES_ONCE_FROM_FILE, 1); - if (sampleDataBuf == NULL) { - printf("Failed to calloc %d Bytes, reason:%s\n", superTblInfo->lenOfOneRow * MAX_SAMPLES_ONCE_FROM_FILE, strerror(errno)); - return NULL; - } - - fp = fopen(superTblInfo->sampleFile, "r"); - if (fp == NULL) { - printf("Failed to open sample file: %s, reason:%s\n", superTblInfo->sampleFile, strerror(errno)); - tmfree(sampleDataBuf); - return NULL; - } - int ret = readSampleFromCsvFileToMem(fp, superTblInfo, sampleDataBuf); - if (0 != ret) { - tmfree(sampleDataBuf); - tmfclose(fp); - return NULL; - } - } - - if (superTblInfo->numberOfTblInOneSql > 0) { - syncWriteForNumberOfTblInOneSql(winfo, fp, sampleDataBuf); - tmfree(sampleDataBuf); - tmfclose(fp); - return NULL; - } - - //printf("========threadID[%d], table rang: %d - %d \n", winfo->threadID, winfo->start_table_id, winfo->end_table_id); - - char* buffer = calloc(superTblInfo->maxSqlLen, 1); - - int nrecords_per_request = 0; - if (AUTO_CREATE_SUBTBL == superTblInfo->autoCreateTable) { - nrecords_per_request = (superTblInfo->maxSqlLen - 1280 - superTblInfo->lenOfTagOfOneRow) / superTblInfo->lenOfOneRow; - } else { - nrecords_per_request = (superTblInfo->maxSqlLen - 1280) / superTblInfo->lenOfOneRow; - } - - int nrecords_no_last_req = nrecords_per_request; - int nrecords_last_req = 0; - int loop_cnt = 0; - if (0 != superTblInfo->insertRate) { - if (nrecords_no_last_req >= superTblInfo->insertRate) { - nrecords_no_last_req = superTblInfo->insertRate; - } else { - nrecords_last_req = superTblInfo->insertRate % nrecords_per_request; - loop_cnt = (superTblInfo->insertRate / nrecords_per_request) + (superTblInfo->insertRate % nrecords_per_request ? 1 : 0) ; - } - } - - if (nrecords_no_last_req <= 0) { - nrecords_no_last_req = 1; - } - - if (nrecords_no_last_req >= INT16_MAX) { - nrecords_no_last_req = INT16_MAX - 1; - } - - if (nrecords_last_req >= INT16_MAX) { - nrecords_last_req = INT16_MAX - 1; - } - - int nrecords_cur_req = nrecords_no_last_req; - int loop_cnt_orig = loop_cnt; - - //printf("========nrecords_per_request:%d, nrecords_no_last_req:%d, nrecords_last_req:%d, loop_cnt:%d\n", nrecords_per_request, nrecords_no_last_req, nrecords_last_req, loop_cnt); - - int64_t time_counter = winfo->start_time; - - int64_t st = 0; - int64_t et = 0; - for (int i = 0; i < superTblInfo->insertRows;) { - if (superTblInfo->insertRate && (et - st) < 1000) { - taosMsleep(1000 - (et - st)); // ms - //printf("========sleep duration:%"PRId64 "========inserted rows:%d, table range:%d - %d\n", (1000 - (et - st)), i, winfo->start_table_id, winfo->end_table_id); - } - - if (superTblInfo->insertRate) { - st = taosGetTimestampMs(); - } - - for (int tID = winfo->start_table_id; tID <= winfo->end_table_id; tID++) { - int inserted = i; - int64_t tmp_time = time_counter; - - int sampleUsePos = samplePos; - int k = 0; - while (1) - { - int len = 0; - memset(buffer, 0, superTblInfo->maxSqlLen); - char *pstr = buffer; - - if (AUTO_CREATE_SUBTBL == superTblInfo->autoCreateTable) { - char* tagsValBuf = NULL; - if (0 == superTblInfo->tagSource) { - tagsValBuf = generateTagVaulesForStb(superTblInfo); - } else { - tagsValBuf = getTagValueFromTagSample(superTblInfo, tID % superTblInfo->tagSampleCount); - } - if (NULL == tagsValBuf) { - goto free_and_statistics_2; - } - - len += snprintf(pstr + len, superTblInfo->maxSqlLen - len, "insert into %s.%s%d using %s.%s tags %s values", winfo->db_name, superTblInfo->childTblPrefix, tID, winfo->db_name, superTblInfo->sTblName, tagsValBuf); - tmfree(tagsValBuf); - } else if (TBL_ALREADY_EXISTS == superTblInfo->childTblExists) { - len += snprintf(pstr + len, superTblInfo->maxSqlLen - len, "insert into %s.%s values", winfo->db_name, superTblInfo->childTblName + tID * TSDB_TABLE_NAME_LEN); - } else { - len += snprintf(pstr + len, superTblInfo->maxSqlLen - len, "insert into %s.%s%d values", winfo->db_name, superTblInfo->childTblPrefix, tID); - } - - for (k = 0; k < nrecords_cur_req;) { - int retLen = 0; - if (0 == strncasecmp(superTblInfo->dataSource, "sample", 6)) { - retLen = getRowDataFromSample(pstr + len, superTblInfo->maxSqlLen - len, tmp_time += superTblInfo->timeStampStep, superTblInfo, &sampleUsePos, fp, sampleDataBuf); - if (retLen < 0) { - goto free_and_statistics_2; - } - } else if (0 == strncasecmp(superTblInfo->dataSource, "rand", 8)) { - int rand_num = rand_tinyint() % 100; - if (0 != superTblInfo->disorderRatio && rand_num < superTblInfo->disorderRatio) { - int64_t d = tmp_time - rand() % superTblInfo->disorderRange; - retLen = generateRowData(pstr + len, superTblInfo->maxSqlLen - len, d, superTblInfo); - //printf("disorder rows, rand_num:%d, last ts:%"PRId64" current ts:%"PRId64"\n", rand_num, tmp_time, d); - } else { - retLen = generateRowData(pstr + len, superTblInfo->maxSqlLen - len, tmp_time += superTblInfo->timeStampStep, superTblInfo); - } - if (retLen < 0) { - goto free_and_statistics_2; - } - } - len += retLen; - inserted++; - k++; - totalRowsInserted++; - - if (inserted >= superTblInfo->insertRows || (superTblInfo->maxSqlLen - len) < (superTblInfo->lenOfOneRow + 128)) break; - } - - if (0 == strncasecmp(superTblInfo->insertMode, "taosc", 5)) { - //printf("===== sql: %s \n\n", buffer); - //int64_t t1 = taosGetTimestampMs(); - int affectedRows = queryDbExec(winfo->taos, buffer, INSERT_TYPE); - if (0 > affectedRows){ - goto free_and_statistics_2; - } - totalAffectedRows += affectedRows; - - int64_t currentPrintTime = taosGetTimestampMs(); - if (currentPrintTime - lastPrintTime > 30*1000) { - printf("thread[%d] has currently inserted rows: %"PRId64 ", affected rows: %"PRId64 "\n", winfo->threadID, totalRowsInserted, totalAffectedRows); - lastPrintTime = currentPrintTime; - } - //int64_t t2 = taosGetTimestampMs(); - //printf("taosc insert sql return, Spent %.4f seconds \n", (double)(t2 - t1)/1000.0); - } else { - #ifdef TD_LOWA_CURL - //int64_t t1 = taosGetTimestampMs(); - int retCode = curlProceSql(g_Dbs.host, g_Dbs.port, buffer, winfo->curl_handle); - //int64_t t2 = taosGetTimestampMs(); - //printf("http insert sql return, Spent %ld ms \n", t2 - t1); - - if (0 != retCode) { - printf("========curl return fail, threadID[%d]\n", winfo->threadID); - goto free_and_statistics_2; - } - #else - printf("========no use http mode for no curl lib!\n"); - goto free_and_statistics_2; - #endif - } - - //printf("========tID:%d, k:%d, loop_cnt:%d\n", tID, k, loop_cnt); - - if (loop_cnt) { - loop_cnt--; - if ((1 == loop_cnt) && (0 != nrecords_last_req)) { - nrecords_cur_req = nrecords_last_req; - } else if (0 == loop_cnt){ - nrecords_cur_req = nrecords_no_last_req; - loop_cnt = loop_cnt_orig; - break; - } - } else { - break; - } - } - - if (tID == winfo->end_table_id) { - if (0 == strncasecmp(superTblInfo->dataSource, "sample", 6)) { - samplePos = sampleUsePos; - } - i = inserted; - time_counter = tmp_time; - } - } - - if (superTblInfo->insertRate) { - et = taosGetTimestampMs(); - } - //printf("========loop %d childTables duration:%"PRId64 "========inserted rows:%d\n", winfo->end_table_id - winfo->start_table_id, et - st, i); - } - - free_and_statistics_2: - tmfree(buffer); - tmfree(sampleDataBuf); - tmfclose(fp); - - winfo->totalRowsInserted = totalRowsInserted; - winfo->totalAffectedRows = totalAffectedRows; - - printf("====thread[%d] completed total inserted rows: %"PRId64 ", total affected rows: %"PRId64 "====\n", winfo->threadID, totalRowsInserted, totalAffectedRows); - return NULL; -} - -void callBack(void *param, TAOS_RES *res, int code) { - threadInfo* winfo = (threadInfo*)param; - - if (winfo->superTblInfo->insertRate) { - winfo->et = taosGetTimestampMs(); - if (winfo->et - winfo->st < 1000) { - taosMsleep(1000 - (winfo->et - winfo->st)); // ms - } - } - - char *buffer = calloc(1, winfo->superTblInfo->maxSqlLen); - char *data = calloc(1, MAX_DATA_SIZE); - char *pstr = buffer; - pstr += sprintf(pstr, "insert into %s.%s%d values", winfo->db_name, winfo->tb_prefix, winfo->start_table_id); - if (winfo->counter >= winfo->superTblInfo->insertRows) { - winfo->start_table_id++; - winfo->counter = 0; - } - if (winfo->start_table_id > winfo->end_table_id) { - tsem_post(&winfo->lock_sem); - free(buffer); - free(data); - taos_free_result(res); - return; - } - - for (int i = 0; i < winfo->nrecords_per_request; i++) { - int rand_num = rand() % 100; - if (0 != winfo->superTblInfo->disorderRatio && rand_num < winfo->superTblInfo->disorderRatio) - { - int64_t d = winfo->lastTs - rand() % 1000000 + rand_num; - //generateData(data, datatype, ncols_per_record, d, len_of_binary); - (void)generateRowData(data, MAX_DATA_SIZE, d, winfo->superTblInfo); - } else { - //generateData(data, datatype, ncols_per_record, tmp_time += 1000, len_of_binary); - (void)generateRowData(data, MAX_DATA_SIZE, winfo->lastTs += 1000, winfo->superTblInfo); - } - pstr += sprintf(pstr, "%s", data); - winfo->counter++; - - if (winfo->counter >= winfo->superTblInfo->insertRows) { - break; - } - } - - if (winfo->superTblInfo->insertRate) { - winfo->st = taosGetTimestampMs(); - } - taos_query_a(winfo->taos, buffer, callBack, winfo); - free(buffer); - free(data); - - taos_free_result(res); -} - -void *asyncWrite(void *sarg) { - threadInfo *winfo = (threadInfo *)sarg; - - winfo->nrecords_per_request = 0; - //if (AUTO_CREATE_SUBTBL == winfo->superTblInfo->autoCreateTable) { - winfo->nrecords_per_request = (winfo->superTblInfo->maxSqlLen - 1280 - winfo->superTblInfo->lenOfTagOfOneRow) / winfo->superTblInfo->lenOfOneRow; - //} else { - // winfo->nrecords_per_request = (winfo->superTblInfo->maxSqlLen - 1280) / winfo->superTblInfo->lenOfOneRow; - //} - - if (0 != winfo->superTblInfo->insertRate) { - if (winfo->nrecords_per_request >= winfo->superTblInfo->insertRate) { - winfo->nrecords_per_request = winfo->superTblInfo->insertRate; - } - } - - if (winfo->nrecords_per_request <= 0) { - winfo->nrecords_per_request = 1; - } - - if (winfo->nrecords_per_request >= INT16_MAX) { - winfo->nrecords_per_request = INT16_MAX - 1; - } - - if (winfo->nrecords_per_request >= INT16_MAX) { - winfo->nrecords_per_request = INT16_MAX - 1; - } - - winfo->st = 0; - winfo->et = 0; - winfo->lastTs = winfo->start_time; - - if (winfo->superTblInfo->insertRate) { - winfo->st = taosGetTimestampMs(); - } - taos_query_a(winfo->taos, "show databases", callBack, winfo); - - tsem_wait(&(winfo->lock_sem)); - - return NULL; -} - -void startMultiThreadInsertData(int threads, char* db_name, char* precision, SSuperTable* superTblInfo) { - pthread_t *pids = malloc(threads * sizeof(pthread_t)); - threadInfo *infos = malloc(threads * sizeof(threadInfo)); - memset(pids, 0, threads * sizeof(pthread_t)); - memset(infos, 0, threads * sizeof(threadInfo)); - int ntables = superTblInfo->childTblCount; - - int a = ntables / threads; - if (a < 1) { - threads = ntables; - a = 1; - } - - int b = 0; - if (threads != 0) { - b = ntables % threads; - } - - //TAOS* taos; - //if (0 == strncasecmp(superTblInfo->insertMode, "taosc", 5)) { - // taos = taos_connect(g_Dbs.host, g_Dbs.user, g_Dbs.password, db_name, g_Dbs.port); - // if (NULL == taos) { - // printf("connect to server fail, reason: %s\n", taos_errstr(NULL)); - // exit(-1); - // } - //} - - int32_t timePrec = TSDB_TIME_PRECISION_MILLI; - if (0 != precision[0]) { - if (0 == strncasecmp(precision, "ms", 2)) { - timePrec = TSDB_TIME_PRECISION_MILLI; - } else if (0 == strncasecmp(precision, "us", 2)) { - timePrec = TSDB_TIME_PRECISION_MICRO; - } else { - printf("No support precision: %s\n", precision); - exit(-1); - } - } - - int64_t start_time; - if (0 == strncasecmp(superTblInfo->startTimestamp, "now", 3)) { - start_time = taosGetTimestamp(timePrec); - } else { - (void)taosParseTime(superTblInfo->startTimestamp, &start_time, strlen(superTblInfo->startTimestamp), timePrec, 0); - } - - double start = getCurrentTime(); - - int last = 0; - for (int i = 0; i < threads; i++) { - threadInfo *t_info = infos + i; - t_info->threadID = i; - tstrncpy(t_info->db_name, db_name, MAX_DB_NAME_SIZE); - t_info->superTblInfo = superTblInfo; - - t_info->start_time = start_time; - - if (0 == strncasecmp(superTblInfo->insertMode, "taosc", 5)) { - //t_info->taos = taos; - t_info->taos = taos_connect(g_Dbs.host, g_Dbs.user, g_Dbs.password, db_name, g_Dbs.port); - if (NULL == t_info->taos) { - printf("connect to server fail from insert sub thread, reason: %s\n", taos_errstr(NULL)); - exit(-1); - } - } else { - t_info->taos = NULL; - #ifdef TD_LOWA_CURL - t_info->curl_handle = curl_easy_init(); - #endif - } - - if (0 == superTblInfo->multiThreadWriteOneTbl) { - t_info->start_table_id = last; - t_info->end_table_id = i < b ? last + a : last + a - 1; - last = t_info->end_table_id + 1; - } else { - t_info->start_table_id = 0; - t_info->end_table_id = superTblInfo->childTblCount - 1; - t_info->start_time = t_info->start_time + rand_int() % 10000 - rand_tinyint(); - } - - tsem_init(&(t_info->lock_sem), 0, 0); - - if (SYNC == g_Dbs.queryMode) { - pthread_create(pids + i, NULL, syncWrite, t_info); - } else { - pthread_create(pids + i, NULL, asyncWrite, t_info); - } - } - - for (int i = 0; i < threads; i++) { - pthread_join(pids[i], NULL); - } - - for (int i = 0; i < threads; i++) { - threadInfo *t_info = infos + i; - - tsem_destroy(&(t_info->lock_sem)); - taos_close(t_info->taos); - - superTblInfo->totalAffectedRows += t_info->totalAffectedRows; - superTblInfo->totalRowsInserted += t_info->totalRowsInserted; - #ifdef TD_LOWA_CURL - if (t_info->curl_handle) { - curl_easy_cleanup(t_info->curl_handle); - } - #endif - } - - double end = getCurrentTime(); - - //taos_close(taos); - - free(pids); - free(infos); - - printf("Spent %.4f seconds to insert rows: %"PRId64", affected rows: %"PRId64" with %d thread(s) into %s.%s\n\n", - end - start, superTblInfo->totalRowsInserted, superTblInfo->totalAffectedRows, threads, db_name, superTblInfo->sTblName); - fprintf(g_fpOfInsertResult, "Spent %.4f seconds to insert rows: %"PRId64", affected rows: %"PRId64" with %d thread(s) into %s.%s\n\n", - end - start, superTblInfo->totalRowsInserted, superTblInfo->totalAffectedRows, threads, db_name, superTblInfo->sTblName); -} - - -void *readTable(void *sarg) { -#if 1 - threadInfo *rinfo = (threadInfo *)sarg; - TAOS *taos = rinfo->taos; - char command[BUFFER_SIZE] = "\0"; - int64_t sTime = rinfo->start_time; - char *tb_prefix = rinfo->tb_prefix; - FILE *fp = fopen(rinfo->fp, "a"); - if (NULL == fp) { - printf("fopen %s fail, reason:%s.\n", rinfo->fp, strerror(errno)); - return NULL; - } - - int num_of_DPT = rinfo->superTblInfo->insertRows; // nrecords_per_table; - int num_of_tables = rinfo->end_table_id - rinfo->start_table_id + 1; - int totalData = num_of_DPT * num_of_tables; - bool do_aggreFunc = g_Dbs.do_aggreFunc; - - int n = do_aggreFunc ? (sizeof(aggreFunc) / sizeof(aggreFunc[0])) : 2; - if (!do_aggreFunc) { - printf("\nThe first field is either Binary or Bool. Aggregation functions are not supported.\n"); - } - printf("%d records:\n", totalData); - fprintf(fp, "| QFunctions | QRecords | QSpeed(R/s) | QLatency(ms) |\n"); - - for (int j = 0; j < n; j++) { - double totalT = 0; - int count = 0; - for (int i = 0; i < num_of_tables; i++) { - sprintf(command, "select %s from %s%d where ts>= %" PRId64, aggreFunc[j], tb_prefix, i, sTime); - - double t = getCurrentTime(); - TAOS_RES *pSql = taos_query(taos, command); - int32_t code = taos_errno(pSql); - - if (code != 0) { - fprintf(stderr, "Failed to query:%s\n", taos_errstr(pSql)); - taos_free_result(pSql); - taos_close(taos); - return NULL; - } - - while (taos_fetch_row(pSql) != NULL) { - count++; - } - - t = getCurrentTime() - t; - totalT += t; - - taos_free_result(pSql); - } - - fprintf(fp, "|%10s | %10d | %12.2f | %10.2f |\n", - aggreFunc[j][0] == '*' ? " * " : aggreFunc[j], totalData, - (double)(num_of_tables * num_of_DPT) / totalT, totalT * 1000); - printf("select %10s took %.6f second(s)\n", aggreFunc[j], totalT); - } - fprintf(fp, "\n"); - fclose(fp); -#endif - return NULL; -} - -void *readMetric(void *sarg) { -#if 1 - threadInfo *rinfo = (threadInfo *)sarg; - TAOS *taos = rinfo->taos; - char command[BUFFER_SIZE] = "\0"; - FILE *fp = fopen(rinfo->fp, "a"); - if (NULL == fp) { - printf("fopen %s fail, reason:%s.\n", rinfo->fp, strerror(errno)); - return NULL; - } - - int num_of_DPT = rinfo->superTblInfo->insertRows; - int num_of_tables = rinfo->end_table_id - rinfo->start_table_id + 1; - int totalData = num_of_DPT * num_of_tables; - bool do_aggreFunc = g_Dbs.do_aggreFunc; - - int n = do_aggreFunc ? (sizeof(aggreFunc) / sizeof(aggreFunc[0])) : 2; - if (!do_aggreFunc) { - printf("\nThe first field is either Binary or Bool. Aggregation functions are not supported.\n"); - } - printf("%d records:\n", totalData); - fprintf(fp, "Querying On %d records:\n", totalData); - - for (int j = 0; j < n; j++) { - char condition[BUFFER_SIZE - 30] = "\0"; - char tempS[64] = "\0"; - - int m = 10 < num_of_tables ? 10 : num_of_tables; - - for (int i = 1; i <= m; i++) { - if (i == 1) { - sprintf(tempS, "t1 = %d", i); - } else { - sprintf(tempS, " or t1 = %d ", i); - } - strcat(condition, tempS); - - sprintf(command, "select %s from meters where %s", aggreFunc[j], condition); - - printf("Where condition: %s\n", condition); - fprintf(fp, "%s\n", command); - - double t = getCurrentTime(); - - TAOS_RES *pSql = taos_query(taos, command); - int32_t code = taos_errno(pSql); - - if (code != 0) { - fprintf(stderr, "Failed to query:%s\n", taos_errstr(pSql)); - taos_free_result(pSql); - taos_close(taos); - return NULL; - } - int count = 0; - while (taos_fetch_row(pSql) != NULL) { - count++; - } - t = getCurrentTime() - t; - - fprintf(fp, "| Speed: %12.2f(per s) | Latency: %.4f(ms) |\n", num_of_tables * num_of_DPT / t, t * 1000); - printf("select %10s took %.6f second(s)\n\n", aggreFunc[j], t); - - taos_free_result(pSql); - } - fprintf(fp, "\n"); - } - fclose(fp); -#endif - return NULL; -} - - -int insertTestProcess() { - - g_fpOfInsertResult = fopen(g_Dbs.resultFile, "a"); - if (NULL == g_fpOfInsertResult) { - fprintf(stderr, "Failed to open %s for save result\n", g_Dbs.resultFile); - return 1; - }; - - printfInsertMeta(); - printfInsertMetaToFile(g_fpOfInsertResult); - - printf("Press enter key to continue\n\n"); - (void)getchar(); - - init_rand_data(); - - // create database and super tables - (void)createDatabases(); - - // pretreatement - prePareSampleData(); - - double start; - double end; - - // create child tables - start = getCurrentTime(); - createChildTables(); - end = getCurrentTime(); - if (g_totalChildTables > 0) { - printf("Spent %.4f seconds to create %d tables with %d thread(s)\n\n", end - start, g_totalChildTables, g_Dbs.threadCount); - fprintf(g_fpOfInsertResult, "Spent %.4f seconds to create %d tables with %d thread(s)\n\n", end - start, g_totalChildTables, g_Dbs.threadCount); - } - - usleep(1000*1000); - - // create sub threads for inserting data - //start = getCurrentTime(); - for (int i = 0; i < g_Dbs.dbCount; i++) { - for (int j = 0; j < g_Dbs.db[i].superTblCount; j++) { - SSuperTable* superTblInfo = &g_Dbs.db[i].superTbls[j]; - startMultiThreadInsertData(g_Dbs.threadCount, g_Dbs.db[i].dbName, g_Dbs.db[i].dbCfg.precision, superTblInfo); - } - } - //end = getCurrentTime(); - - //int64_t totalRowsInserted = 0; - //int64_t totalAffectedRows = 0; - //for (int i = 0; i < g_Dbs.dbCount; i++) { - // for (int j = 0; j < g_Dbs.db[i].superTblCount; j++) { - // totalRowsInserted += g_Dbs.db[i].superTbls[j].totalRowsInserted; - // totalAffectedRows += g_Dbs.db[i].superTbls[j].totalAffectedRows; - //} - //printf("Spent %.4f seconds to insert rows: %"PRId64", affected rows: %"PRId64" with %d thread(s)\n\n", end - start, totalRowsInserted, totalAffectedRows, g_Dbs.threadCount); - if (NULL == g_args.metaFile && false == g_Dbs.insert_only) { - // query data - pthread_t read_id; - threadInfo *rInfo = malloc(sizeof(threadInfo)); - rInfo->start_time = 1500000000000; // 2017-07-14 10:40:00.000 - rInfo->start_table_id = 0; - rInfo->end_table_id = g_Dbs.db[0].superTbls[0].childTblCount - 1; - //rInfo->do_aggreFunc = g_Dbs.do_aggreFunc; - //rInfo->nrecords_per_table = g_Dbs.db[0].superTbls[0].insertRows; - rInfo->superTblInfo = &g_Dbs.db[0].superTbls[0]; - rInfo->taos = taos_connect(g_Dbs.host, g_Dbs.user, g_Dbs.password, g_Dbs.db[0].dbName, g_Dbs.port); - strcpy(rInfo->tb_prefix, g_Dbs.db[0].superTbls[0].childTblPrefix); - strcpy(rInfo->fp, g_Dbs.resultFile); - - if (!g_Dbs.use_metric) { - pthread_create(&read_id, NULL, readTable, rInfo); - } else { - pthread_create(&read_id, NULL, readMetric, rInfo); - } - pthread_join(read_id, NULL); - taos_close(rInfo->taos); - } - - postFreeResource(); - - return 0; -} - -void *superQueryProcess(void *sarg) { - threadInfo *winfo = (threadInfo *)sarg; - - //char sqlStr[MAX_TB_NAME_SIZE*2]; - //sprintf(sqlStr, "use %s", g_queryInfo.dbName); - //queryDB(winfo->taos, sqlStr); - - int64_t st = 0; - int64_t et = 0; - while (1) { - if (g_queryInfo.superQueryInfo.rate && (et - st) < g_queryInfo.superQueryInfo.rate*1000) { - taosMsleep(g_queryInfo.superQueryInfo.rate*1000 - (et - st)); // ms - //printf("========sleep duration:%"PRId64 "========inserted rows:%d, table range:%d - %d\n", (1000 - (et - st)), i, winfo->start_table_id, winfo->end_table_id); - } - - st = taosGetTimestampMs(); - for (int i = 0; i < g_queryInfo.superQueryInfo.sqlCount; i++) { - if (0 == strncasecmp(g_queryInfo.queryMode, "taosc", 5)) { - int64_t t1 = taosGetTimestampUs(); - char tmpFile[MAX_FILE_NAME_LEN*2] = {0}; - if (g_queryInfo.superQueryInfo.result[i][0] != 0) { - sprintf(tmpFile, "%s-%d", g_queryInfo.superQueryInfo.result[i], winfo->threadID); - } - selectAndGetResult(winfo->taos, g_queryInfo.superQueryInfo.sql[i], tmpFile); - int64_t t2 = taosGetTimestampUs(); - printf("=[taosc] thread[%"PRIu64"] complete one sql, Spent %f s\n", (uint64_t)pthread_self(), (t2 - t1)/1000000.0); - } else { - #ifdef TD_LOWA_CURL - int64_t t1 = taosGetTimestampUs(); - int retCode = curlProceSql(g_queryInfo.host, g_queryInfo.port, g_queryInfo.superQueryInfo.sql[i], winfo->curl_handle); - int64_t t2 = taosGetTimestampUs(); - printf("=[restful] thread[%"PRIu64"] complete one sql, Spent %f s\n", (uint64_t)pthread_self(), (t2 - t1)/1000000.0); - - if (0 != retCode) { - printf("====curl return fail, threadID[%d]\n", winfo->threadID); - return NULL; - } - #endif - } - } - et = taosGetTimestampMs(); - printf("==thread[%"PRIu64"] complete all sqls to specify tables once queries duration:%.6fs\n\n", (uint64_t)pthread_self(), (double)(et - st)/1000.0); - } - return NULL; -} - -void replaceSubTblName(char* inSql, char* outSql, int tblIndex) { - char sourceString[32] = "xxxx"; - char subTblName[MAX_TB_NAME_SIZE*3]; - sprintf(subTblName, "%s.%s", g_queryInfo.dbName, g_queryInfo.subQueryInfo.childTblName + tblIndex*TSDB_TABLE_NAME_LEN); - - //printf("inSql: %s\n", inSql); - - char* pos = strstr(inSql, sourceString); - if (0 == pos) { - return; - } - - strncpy(outSql, inSql, pos - inSql); - //printf("1: %s\n", outSql); - strcat(outSql, subTblName); - //printf("2: %s\n", outSql); - strcat(outSql, pos+strlen(sourceString)); - //printf("3: %s\n", outSql); -} - -void *subQueryProcess(void *sarg) { - char sqlstr[1024]; - threadInfo *winfo = (threadInfo *)sarg; - int64_t st = 0; - int64_t et = g_queryInfo.subQueryInfo.rate*1000; - while (1) { - if (g_queryInfo.subQueryInfo.rate && (et - st) < g_queryInfo.subQueryInfo.rate*1000) { - taosMsleep(g_queryInfo.subQueryInfo.rate*1000 - (et - st)); // ms - //printf("========sleep duration:%"PRId64 "========inserted rows:%d, table range:%d - %d\n", (1000 - (et - st)), i, winfo->start_table_id, winfo->end_table_id); - } - - st = taosGetTimestampMs(); - for (int i = winfo->start_table_id; i <= winfo->end_table_id; i++) { - for (int i = 0; i < g_queryInfo.subQueryInfo.sqlCount; i++) { - memset(sqlstr,0,sizeof(sqlstr)); - replaceSubTblName(g_queryInfo.subQueryInfo.sql[i], sqlstr, i); - char tmpFile[MAX_FILE_NAME_LEN*2] = {0}; - if (g_queryInfo.subQueryInfo.result[i][0] != 0) { - sprintf(tmpFile, "%s-%d", g_queryInfo.subQueryInfo.result[i], winfo->threadID); - } - selectAndGetResult(winfo->taos, sqlstr, tmpFile); - } - } - et = taosGetTimestampMs(); - printf("####thread[%"PRIu64"] complete all sqls to allocate all sub-tables[%d - %d] once queries duration:%.4fs\n\n", (uint64_t)pthread_self(), winfo->start_table_id, winfo->end_table_id, (double)(et - st)/1000.0); - } - return NULL; -} - -int queryTestProcess() { - TAOS * taos = NULL; - taos_init(); - taos = taos_connect(g_queryInfo.host, g_queryInfo.user, g_queryInfo.password, NULL, g_queryInfo.port); - if (taos == NULL) { - fprintf(stderr, "Failed to connect to TDengine, reason:%s\n", taos_errstr(NULL)); - exit(-1); - } - - if (0 != g_queryInfo.subQueryInfo.sqlCount) { - (void)getAllChildNameOfSuperTable(taos, g_queryInfo.dbName, g_queryInfo.subQueryInfo.sTblName, &g_queryInfo.subQueryInfo.childTblName, &g_queryInfo.subQueryInfo.childTblCount); - } - - printfQueryMeta(); - printf("Press enter key to continue\n\n"); - (void)getchar(); - - printfQuerySystemInfo(taos); - - pthread_t *pids = NULL; - threadInfo *infos = NULL; - //==== create sub threads for query from specify table - if (g_queryInfo.superQueryInfo.sqlCount > 0 && g_queryInfo.superQueryInfo.concurrent > 0) { - - pids = malloc(g_queryInfo.superQueryInfo.concurrent * sizeof(pthread_t)); - infos = malloc(g_queryInfo.superQueryInfo.concurrent * sizeof(threadInfo)); - if ((NULL == pids) || (NULL == infos)) { - printf("malloc failed for create threads\n"); - taos_close(taos); - exit(-1); - } - - for (int i = 0; i < g_queryInfo.superQueryInfo.concurrent; i++) { - threadInfo *t_info = infos + i; - t_info->threadID = i; - - if (0 == strncasecmp(g_queryInfo.queryMode, "taosc", 5)) { - t_info->taos = taos; - - char sqlStr[MAX_TB_NAME_SIZE*2]; - sprintf(sqlStr, "use %s", g_queryInfo.dbName); - (void)queryDbExec(t_info->taos, sqlStr, NO_INSERT_TYPE); - } else { - t_info->taos = NULL; - #ifdef TD_LOWA_CURL - t_info->curl_handle = curl_easy_init(); - #endif - } - - pthread_create(pids + i, NULL, superQueryProcess, t_info); - } - }else { - g_queryInfo.superQueryInfo.concurrent = 0; - } - - pthread_t *pidsOfSub = NULL; - threadInfo *infosOfSub = NULL; - //==== create sub threads for query from all sub table of the super table - if ((g_queryInfo.subQueryInfo.sqlCount > 0) && (g_queryInfo.subQueryInfo.threadCnt > 0)) { - pidsOfSub = malloc(g_queryInfo.subQueryInfo.threadCnt * sizeof(pthread_t)); - infosOfSub = malloc(g_queryInfo.subQueryInfo.threadCnt * sizeof(threadInfo)); - if ((NULL == pidsOfSub) || (NULL == infosOfSub)) { - printf("malloc failed for create threads\n"); - taos_close(taos); - exit(-1); - } - - int ntables = g_queryInfo.subQueryInfo.childTblCount; - int threads = g_queryInfo.subQueryInfo.threadCnt; - - int a = ntables / threads; - if (a < 1) { - threads = ntables; - a = 1; - } - - int b = 0; - if (threads != 0) { - b = ntables % threads; - } - - int last = 0; - for (int i = 0; i < threads; i++) { - threadInfo *t_info = infosOfSub + i; - t_info->threadID = i; - - t_info->start_table_id = last; - t_info->end_table_id = i < b ? last + a : last + a - 1; - last = t_info->end_table_id + 1; - t_info->taos = taos; - pthread_create(pidsOfSub + i, NULL, subQueryProcess, t_info); - } - - g_queryInfo.subQueryInfo.threadCnt = threads; - }else { - g_queryInfo.subQueryInfo.threadCnt = 0; - } - - for (int i = 0; i < g_queryInfo.superQueryInfo.concurrent; i++) { - pthread_join(pids[i], NULL); - } - - tmfree((char*)pids); - tmfree((char*)infos); - - for (int i = 0; i < g_queryInfo.subQueryInfo.threadCnt; i++) { - pthread_join(pidsOfSub[i], NULL); - } - - tmfree((char*)pidsOfSub); - tmfree((char*)infosOfSub); - - taos_close(taos); - return 0; -} - -static void subscribe_callback(TAOS_SUB* tsub, TAOS_RES *res, void* param, int code) { - if (res == NULL || taos_errno(res) != 0) { - printf("failed to subscribe result, code:%d, reason:%s\n", code, taos_errstr(res)); - return; - } - - getResult(res, (char*)param); - taos_free_result(res); -} - -static TAOS_SUB* subscribeImpl(TAOS *taos, char *sql, char* topic, char* resultFileName) { - TAOS_SUB* tsub = NULL; - - if (g_queryInfo.superQueryInfo.subscribeMode) { - tsub = taos_subscribe(taos, g_queryInfo.superQueryInfo.subscribeRestart, topic, sql, subscribe_callback, (void*)resultFileName, g_queryInfo.superQueryInfo.subscribeInterval); - } else { - tsub = taos_subscribe(taos, g_queryInfo.superQueryInfo.subscribeRestart, topic, sql, NULL, NULL, 0); - } - - if (tsub == NULL) { - printf("failed to create subscription. topic:%s, sql:%s\n", topic, sql); - return NULL; - } - - return tsub; -} - -void *subSubscribeProcess(void *sarg) { - threadInfo *winfo = (threadInfo *)sarg; - char subSqlstr[1024]; - - char sqlStr[MAX_TB_NAME_SIZE*2]; - sprintf(sqlStr, "use %s", g_queryInfo.dbName); - if (0 != queryDbExec(winfo->taos, sqlStr, NO_INSERT_TYPE)){ - return NULL; - } - - //int64_t st = 0; - //int64_t et = 0; - do { - //if (g_queryInfo.superQueryInfo.rate && (et - st) < g_queryInfo.superQueryInfo.rate*1000) { - // taosMsleep(g_queryInfo.superQueryInfo.rate*1000 - (et - st)); // ms - // //printf("========sleep duration:%"PRId64 "========inserted rows:%d, table range:%d - %d\n", (1000 - (et - st)), i, winfo->start_table_id, winfo->end_table_id); - //} - - //st = taosGetTimestampMs(); - char topic[32] = {0}; - for (int i = 0; i < g_queryInfo.subQueryInfo.sqlCount; i++) { - sprintf(topic, "taosdemo-subscribe-%d", i); - memset(subSqlstr,0,sizeof(subSqlstr)); - replaceSubTblName(g_queryInfo.subQueryInfo.sql[i], subSqlstr, i); - char tmpFile[MAX_FILE_NAME_LEN*2] = {0}; - if (g_queryInfo.subQueryInfo.result[i][0] != 0) { - sprintf(tmpFile, "%s-%d", g_queryInfo.subQueryInfo.result[i], winfo->threadID); - } - g_queryInfo.subQueryInfo.tsub[i] = subscribeImpl(winfo->taos, subSqlstr, topic, tmpFile); - if (NULL == g_queryInfo.subQueryInfo.tsub[i]) { - return NULL; - } - } - //et = taosGetTimestampMs(); - //printf("========thread[%"PRId64"] complete all sqls to super table once queries duration:%.4fs\n", pthread_self(), (double)(et - st)/1000.0); - } while (0); - - // start loop to consume result - TAOS_RES* res = NULL; - while (1) { - for (int i = 0; i < g_queryInfo.subQueryInfo.sqlCount; i++) { - if (1 == g_queryInfo.subQueryInfo.subscribeMode) { - continue; - } - - res = taos_consume(g_queryInfo.subQueryInfo.tsub[i]); - if (res) { - char tmpFile[MAX_FILE_NAME_LEN*2] = {0}; - if (g_queryInfo.subQueryInfo.result[i][0] != 0) { - sprintf(tmpFile, "%s-%d", g_queryInfo.subQueryInfo.result[i], winfo->threadID); - } - getResult(res, tmpFile); - } - } - } - taos_free_result(res); - - for (int i = 0; i < g_queryInfo.subQueryInfo.sqlCount; i++) { - taos_unsubscribe(g_queryInfo.subQueryInfo.tsub[i], g_queryInfo.subQueryInfo.subscribeKeepProgress); - } - return NULL; -} - -void *superSubscribeProcess(void *sarg) { - threadInfo *winfo = (threadInfo *)sarg; - - char sqlStr[MAX_TB_NAME_SIZE*2]; - sprintf(sqlStr, "use %s", g_queryInfo.dbName); - if (0 != queryDbExec(winfo->taos, sqlStr, NO_INSERT_TYPE)) { - return NULL; - } - - //int64_t st = 0; - //int64_t et = 0; - do { - //if (g_queryInfo.superQueryInfo.rate && (et - st) < g_queryInfo.superQueryInfo.rate*1000) { - // taosMsleep(g_queryInfo.superQueryInfo.rate*1000 - (et - st)); // ms - // //printf("========sleep duration:%"PRId64 "========inserted rows:%d, table range:%d - %d\n", (1000 - (et - st)), i, winfo->start_table_id, winfo->end_table_id); - //} - - //st = taosGetTimestampMs(); - char topic[32] = {0}; - for (int i = 0; i < g_queryInfo.superQueryInfo.sqlCount; i++) { - sprintf(topic, "taosdemo-subscribe-%d", i); - char tmpFile[MAX_FILE_NAME_LEN*2] = {0}; - if (g_queryInfo.subQueryInfo.result[i][0] != 0) { - sprintf(tmpFile, "%s-%d", g_queryInfo.superQueryInfo.result[i], winfo->threadID); - } - g_queryInfo.superQueryInfo.tsub[i] = subscribeImpl(winfo->taos, g_queryInfo.superQueryInfo.sql[i], topic, tmpFile); - if (NULL == g_queryInfo.superQueryInfo.tsub[i]) { - return NULL; - } - } - //et = taosGetTimestampMs(); - //printf("========thread[%"PRId64"] complete all sqls to super table once queries duration:%.4fs\n", pthread_self(), (double)(et - st)/1000.0); - } while (0); - - // start loop to consume result - TAOS_RES* res = NULL; - while (1) { - for (int i = 0; i < g_queryInfo.superQueryInfo.sqlCount; i++) { - if (1 == g_queryInfo.superQueryInfo.subscribeMode) { - continue; - } - - res = taos_consume(g_queryInfo.superQueryInfo.tsub[i]); - if (res) { - char tmpFile[MAX_FILE_NAME_LEN*2] = {0}; - if (g_queryInfo.superQueryInfo.result[i][0] != 0) { - sprintf(tmpFile, "%s-%d", g_queryInfo.superQueryInfo.result[i], winfo->threadID); - } - getResult(res, tmpFile); - } - } - } - taos_free_result(res); - - for (int i = 0; i < g_queryInfo.superQueryInfo.sqlCount; i++) { - taos_unsubscribe(g_queryInfo.superQueryInfo.tsub[i], g_queryInfo.superQueryInfo.subscribeKeepProgress); - } - return NULL; -} - -int subscribeTestProcess() { - printfQueryMeta(); - - printf("Press enter key to continue\n\n"); - (void)getchar(); - - TAOS * taos = NULL; - taos_init(); - taos = taos_connect(g_queryInfo.host, g_queryInfo.user, g_queryInfo.password, g_queryInfo.dbName, g_queryInfo.port); - if (taos == NULL) { - fprintf(stderr, "Failed to connect to TDengine, reason:%s\n", taos_errstr(NULL)); - exit(-1); - } - - if (0 != g_queryInfo.subQueryInfo.sqlCount) { - (void)getAllChildNameOfSuperTable(taos, g_queryInfo.dbName, g_queryInfo.subQueryInfo.sTblName, &g_queryInfo.subQueryInfo.childTblName, &g_queryInfo.subQueryInfo.childTblCount); - } - - - pthread_t *pids = NULL; - threadInfo *infos = NULL; - //==== create sub threads for query from super table - if (g_queryInfo.superQueryInfo.sqlCount > 0 && g_queryInfo.superQueryInfo.concurrent > 0) { - pids = malloc(g_queryInfo.superQueryInfo.concurrent * sizeof(pthread_t)); - infos = malloc(g_queryInfo.superQueryInfo.concurrent * sizeof(threadInfo)); - if ((NULL == pids) || (NULL == infos)) { - printf("malloc failed for create threads\n"); - taos_close(taos); - exit(-1); - } - - for (int i = 0; i < g_queryInfo.superQueryInfo.concurrent; i++) { - threadInfo *t_info = infos + i; - t_info->threadID = i; - t_info->taos = taos; - pthread_create(pids + i, NULL, superSubscribeProcess, t_info); - } - } - - //==== create sub threads for query from sub table - pthread_t *pidsOfSub = NULL; - threadInfo *infosOfSub = NULL; - if ((g_queryInfo.subQueryInfo.sqlCount > 0) && (g_queryInfo.subQueryInfo.threadCnt > 0)) { - pidsOfSub = malloc(g_queryInfo.subQueryInfo.threadCnt * sizeof(pthread_t)); - infosOfSub = malloc(g_queryInfo.subQueryInfo.threadCnt * sizeof(threadInfo)); - if ((NULL == pidsOfSub) || (NULL == infosOfSub)) { - printf("malloc failed for create threads\n"); - taos_close(taos); - exit(-1); - } - - int ntables = g_queryInfo.subQueryInfo.childTblCount; - int threads = g_queryInfo.subQueryInfo.threadCnt; - - int a = ntables / threads; - if (a < 1) { - threads = ntables; - a = 1; - } - - int b = 0; - if (threads != 0) { - b = ntables % threads; - } - - int last = 0; - for (int i = 0; i < threads; i++) { - threadInfo *t_info = infosOfSub + i; - t_info->threadID = i; - - t_info->start_table_id = last; - t_info->end_table_id = i < b ? last + a : last + a - 1; - t_info->taos = taos; - pthread_create(pidsOfSub + i, NULL, subSubscribeProcess, t_info); - } - g_queryInfo.subQueryInfo.threadCnt = threads; - } - - for (int i = 0; i < g_queryInfo.superQueryInfo.concurrent; i++) { - pthread_join(pids[i], NULL); - } - - tmfree((char*)pids); - tmfree((char*)infos); - - for (int i = 0; i < g_queryInfo.subQueryInfo.threadCnt; i++) { - pthread_join(pidsOfSub[i], NULL); - } - - tmfree((char*)pidsOfSub); - tmfree((char*)infosOfSub); - taos_close(taos); - return 0; -} - -void initOfInsertMeta() { - memset(&g_Dbs, 0, sizeof(SDbs)); - - // set default values - strncpy(g_Dbs.host, "127.0.0.1", MAX_DB_NAME_SIZE); - g_Dbs.port = 6030; - strncpy(g_Dbs.user, TSDB_DEFAULT_USER, MAX_DB_NAME_SIZE); - strncpy(g_Dbs.password, TSDB_DEFAULT_PASS, MAX_DB_NAME_SIZE); - g_Dbs.threadCount = 2; - g_Dbs.use_metric = true; -} - -void initOfQueryMeta() { - memset(&g_queryInfo, 0, sizeof(SQueryMetaInfo)); - - // set default values - strncpy(g_queryInfo.host, "127.0.0.1", MAX_DB_NAME_SIZE); - g_queryInfo.port = 6030; - strncpy(g_queryInfo.user, TSDB_DEFAULT_USER, MAX_DB_NAME_SIZE); - strncpy(g_queryInfo.password, TSDB_DEFAULT_PASS, MAX_DB_NAME_SIZE); -} - -void setParaFromArg(){ - if (g_args.host) { - strcpy(g_Dbs.host, g_args.host); - } else { - strncpy(g_Dbs.host, "127.0.0.1", MAX_DB_NAME_SIZE); - } - - if (g_args.user) { - strcpy(g_Dbs.user, g_args.user); - } - - if (g_args.password) { - strcpy(g_Dbs.password, g_args.password); - } - - if (g_args.port) { - g_Dbs.port = g_args.port; - } - - g_Dbs.dbCount = 1; - g_Dbs.db[0].drop = 1; - - strncpy(g_Dbs.db[0].dbName, g_args.database, MAX_DB_NAME_SIZE); - g_Dbs.db[0].dbCfg.replica = g_args.replica; - strncpy(g_Dbs.db[0].dbCfg.precision, "ms", MAX_DB_NAME_SIZE); - - - strncpy(g_Dbs.resultFile, g_args.output_file, MAX_FILE_NAME_LEN); - - g_Dbs.use_metric = g_args.use_metric; - g_Dbs.insert_only = g_args.insert_only; - - g_Dbs.db[0].superTblCount = 1; - strncpy(g_Dbs.db[0].superTbls[0].sTblName, "meters", MAX_TB_NAME_SIZE); - g_Dbs.db[0].superTbls[0].childTblCount = g_args.num_of_tables; - g_Dbs.threadCount = g_args.num_of_threads; - g_Dbs.threadCountByCreateTbl = 1; - g_Dbs.queryMode = g_args.mode; - - g_Dbs.db[0].superTbls[0].autoCreateTable = PRE_CREATE_SUBTBL; - g_Dbs.db[0].superTbls[0].superTblExists = TBL_NO_EXISTS; - g_Dbs.db[0].superTbls[0].childTblExists = TBL_NO_EXISTS; - g_Dbs.db[0].superTbls[0].insertRate = 0; - g_Dbs.db[0].superTbls[0].disorderRange = g_args.disorderRange; - g_Dbs.db[0].superTbls[0].disorderRatio = g_args.disorderRatio; - strncpy(g_Dbs.db[0].superTbls[0].childTblPrefix, g_args.tb_prefix, MAX_TB_NAME_SIZE); - strncpy(g_Dbs.db[0].superTbls[0].dataSource, "rand", MAX_TB_NAME_SIZE); - strncpy(g_Dbs.db[0].superTbls[0].insertMode, "taosc", MAX_TB_NAME_SIZE); - strncpy(g_Dbs.db[0].superTbls[0].startTimestamp, "2017-07-14 10:40:00.000", MAX_TB_NAME_SIZE); - g_Dbs.db[0].superTbls[0].timeStampStep = 10; - - // g_args.num_of_RPR; - g_Dbs.db[0].superTbls[0].insertRows = g_args.num_of_DPT; - g_Dbs.db[0].superTbls[0].maxSqlLen = TSDB_PAYLOAD_SIZE; - - g_Dbs.do_aggreFunc = true; - - char dataString[STRING_LEN]; - char **data_type = g_args.datatype; - - memset(dataString, 0, STRING_LEN); - - if (strcasecmp(data_type[0], "BINARY") == 0 || strcasecmp(data_type[0], "BOOL") == 0 || strcasecmp(data_type[0], "NCHAR") == 0 ) { - g_Dbs.do_aggreFunc = false; - } - - g_Dbs.db[0].superTbls[0].columnCount = 0; - for (int i = 0; i < MAX_NUM_DATATYPE; i++) { - if (data_type[i] == NULL) { - break; - } - - strncpy(g_Dbs.db[0].superTbls[0].columns[i].dataType, data_type[i], MAX_TB_NAME_SIZE); - g_Dbs.db[0].superTbls[0].columns[i].dataLen = g_args.len_of_binary; - g_Dbs.db[0].superTbls[0].columnCount++; - } - - if (g_Dbs.db[0].superTbls[0].columnCount > g_args.num_of_CPR) { - g_Dbs.db[0].superTbls[0].columnCount = g_args.num_of_CPR; - } else { - for (int i = g_Dbs.db[0].superTbls[0].columnCount; i < g_args.num_of_CPR; i++) { - strncpy(g_Dbs.db[0].superTbls[0].columns[i].dataType, "INT", MAX_TB_NAME_SIZE); - g_Dbs.db[0].superTbls[0].columns[i].dataLen = 0; - g_Dbs.db[0].superTbls[0].columnCount++; - } - } - - if (g_Dbs.use_metric) { - strncpy(g_Dbs.db[0].superTbls[0].tags[0].dataType, "INT", MAX_TB_NAME_SIZE); - g_Dbs.db[0].superTbls[0].tags[0].dataLen = 0; - - strncpy(g_Dbs.db[0].superTbls[0].tags[1].dataType, "BINARY", MAX_TB_NAME_SIZE); - g_Dbs.db[0].superTbls[0].tags[1].dataLen = g_args.len_of_binary; - g_Dbs.db[0].superTbls[0].tagCount = 2; - } else { - g_Dbs.db[0].superTbls[0].tagCount = 0; - } -} - -/* Function to do regular expression check */ -static int regexMatch(const char *s, const char *reg, int cflags) { - regex_t regex; - char msgbuf[100] = {0}; - - /* Compile regular expression */ - if (regcomp(®ex, reg, cflags) != 0) { - printf("Fail to compile regex\n"); - exit(-1); - } - - /* Execute regular expression */ - int reti = regexec(®ex, s, 0, NULL, 0); - if (!reti) { - regfree(®ex); - return 1; - } else if (reti == REG_NOMATCH) { - regfree(®ex); - return 0; - } else { - regerror(reti, ®ex, msgbuf, sizeof(msgbuf)); - printf("Regex match failed: %s\n", msgbuf); - regfree(®ex); - exit(-1); - } - - return 0; -} - -static int isCommentLine(char *line) { - if (line == NULL) return 1; - - return regexMatch(line, "^\\s*#.*", REG_EXTENDED); -} - -void querySqlFile(TAOS* taos, char* sqlFile) -{ - FILE *fp = fopen(sqlFile, "r"); - if (fp == NULL) { - printf("failed to open file %s, reason:%s\n", sqlFile, strerror(errno)); - return; - } - - int read_len = 0; - char * cmd = calloc(1, MAX_SQL_SIZE); - size_t cmd_len = 0; - char * line = NULL; - size_t line_len = 0; - - double t = getCurrentTime(); - - while ((read_len = tgetline(&line, &line_len, fp)) != -1) { - if (read_len >= MAX_SQL_SIZE) continue; - line[--read_len] = '\0'; - - if (read_len == 0 || isCommentLine(line)) { // line starts with # - continue; - } - - if (line[read_len - 1] == '\\') { - line[read_len - 1] = ' '; - memcpy(cmd + cmd_len, line, read_len); - cmd_len += read_len; - continue; - } - - memcpy(cmd + cmd_len, line, read_len); - queryDbExec(taos, cmd, NO_INSERT_TYPE); - memset(cmd, 0, MAX_SQL_SIZE); - cmd_len = 0; - } - - t = getCurrentTime() - t; - printf("run %s took %.6f second(s)\n\n", sqlFile, t); - - tmfree(cmd); - tmfree(line); - tmfclose(fp); - return; -} - -int main(int argc, char *argv[]) { - parse_args(argc, argv, &g_args); - - if (g_args.metaFile) { - initOfInsertMeta(); - initOfQueryMeta(); - if (false == getInfoFromJsonFile(g_args.metaFile)) { - printf("Failed to read %s\n", g_args.metaFile); - return 1; - } - } else { - - memset(&g_Dbs, 0, sizeof(SDbs)); - g_jsonType = INSERT_MODE; - setParaFromArg(); - - if (NULL != g_args.sqlFile) { - TAOS* qtaos = taos_connect(g_Dbs.host, g_Dbs.user, g_Dbs.password, g_Dbs.db[0].dbName, g_Dbs.port); - querySqlFile(qtaos, g_args.sqlFile); - taos_close(qtaos); - return 0; - } - - (void)insertTestProcess(); - if (g_Dbs.insert_only) return 0; - - // select - - //printf("At present, there is no integration of taosdemo, please wait patiently!\n"); - return 0; - } - - if (INSERT_MODE == g_jsonType) { - if (g_Dbs.cfgDir[0]) taos_options(TSDB_OPTION_CONFIGDIR, g_Dbs.cfgDir); - (void)insertTestProcess(); - } else if (QUERY_MODE == g_jsonType) { - if (g_queryInfo.cfgDir[0]) taos_options(TSDB_OPTION_CONFIGDIR, g_queryInfo.cfgDir); - (void)queryTestProcess(); - } else if (SUBSCRIBE_MODE == g_jsonType) { - if (g_queryInfo.cfgDir[0]) taos_options(TSDB_OPTION_CONFIGDIR, g_queryInfo.cfgDir); - (void)subscribeTestProcess(); - } else { - ; - } - - taos_cleanup(); - return 0; -} - diff --git a/src/kit/taosdump/CMakeLists.txt b/src/kit/taosdump/CMakeLists.txt index b50ad85c08cf5fa35862ce42bcc441dc502c3166..58897b89e95743c802755c0476f3b2843a244a59 100644 --- a/src/kit/taosdump/CMakeLists.txt +++ b/src/kit/taosdump/CMakeLists.txt @@ -1,4 +1,4 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 3.5) +CMAKE_MINIMUM_REQUIRED(VERSION 2.8) PROJECT(TDengine) INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/client/inc) diff --git a/src/mnode/CMakeLists.txt b/src/mnode/CMakeLists.txt index fffc82c6ef2fb5741d81fb59e4e5fa271e3100f4..2df4708c239515febafc7a4f3ab3f63bd9e434e8 100644 --- a/src/mnode/CMakeLists.txt +++ b/src/mnode/CMakeLists.txt @@ -1,4 +1,4 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 3.5) +CMAKE_MINIMUM_REQUIRED(VERSION 2.8) PROJECT(TDengine) INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/query/inc) diff --git a/src/mnode/src/mnodeProfile.c b/src/mnode/src/mnodeProfile.c index 89dc32f03a8d63592bf34ba7e0e44566e9bcb92a..17a4282d05935684df4ab6585fef5f2398a62979 100644 --- a/src/mnode/src/mnodeProfile.c +++ b/src/mnode/src/mnodeProfile.c @@ -356,7 +356,7 @@ static int32_t mnodeGetQueryMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *pC pShow->bytes[cols] = 8; pSchema[cols].type = TSDB_DATA_TYPE_BIGINT; - strcpy(pSchema[cols].name, "time(us)"); + strcpy(pSchema[cols].name, "time"); pSchema[cols].bytes = htons(pShow->bytes[cols]); cols++; diff --git a/src/os/CMakeLists.txt b/src/os/CMakeLists.txt index f41719f2404297070656910653b65a3fbffa7916..ab8b0f76785c24e3385f49245f6e191b2d57cc40 100644 --- a/src/os/CMakeLists.txt +++ b/src/os/CMakeLists.txt @@ -1,4 +1,4 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 3.5) +CMAKE_MINIMUM_REQUIRED(VERSION 2.8) PROJECT(TDengine) IF (TD_LINUX) diff --git a/src/os/src/alpine/CMakeLists.txt b/src/os/src/alpine/CMakeLists.txt index daa0b3cf43d0de60fafc960e48e3ad8aeec1a9ad..b5e739c24ce7ec3ef3ffc537ca8769706f7b56de 100644 --- a/src/os/src/alpine/CMakeLists.txt +++ b/src/os/src/alpine/CMakeLists.txt @@ -1,4 +1,4 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 3.5) +CMAKE_MINIMUM_REQUIRED(VERSION 2.8) PROJECT(TDengine) AUX_SOURCE_DIRECTORY(. SRC) diff --git a/src/os/src/darwin/CMakeLists.txt b/src/os/src/darwin/CMakeLists.txt index 7f05ddd64b87f69e5fa03c874bb2bc401e5094cc..c4cb28aa05e4716ca98c2687ce41d436b1300bb2 100644 --- a/src/os/src/darwin/CMakeLists.txt +++ b/src/os/src/darwin/CMakeLists.txt @@ -1,4 +1,4 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 3.5) +CMAKE_MINIMUM_REQUIRED(VERSION 2.8) PROJECT(TDengine) AUX_SOURCE_DIRECTORY(. SRC) diff --git a/src/os/src/detail/CMakeLists.txt b/src/os/src/detail/CMakeLists.txt index 1c5e55a522250a1532e35075616e9efddb008217..facfbd23af7a579ed11655ce66dddd971677fb18 100644 --- a/src/os/src/detail/CMakeLists.txt +++ b/src/os/src/detail/CMakeLists.txt @@ -1,4 +1,4 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 3.5) +CMAKE_MINIMUM_REQUIRED(VERSION 2.8) PROJECT(TDengine) INCLUDE_DIRECTORIES(.) diff --git a/src/os/src/detail/osFile.c b/src/os/src/detail/osFile.c index 0b7b5ca487e5728a5dbdd93e45ee2b0593f98000..538ed378798f53c09c8cb8ada2f2a1a066d8b6bf 100644 --- a/src/os/src/detail/osFile.c +++ b/src/os/src/detail/osFile.c @@ -25,7 +25,8 @@ void taosGetTmpfilePath(const char *fileNamePrefix, char *dstPath) { char tmpPath[PATH_MAX]; int32_t len = strlen(tsTempDir); memcpy(tmpPath, tsTempDir, len); - + static uint64_t seqId = 0; + if (tmpPath[len - 1] != '/') { tmpPath[len++] = '/'; } @@ -36,8 +37,10 @@ void taosGetTmpfilePath(const char *fileNamePrefix, char *dstPath) { strcat(tmpPath, "-%d-%s"); } - char rand[8] = {0}; - taosRandStr(rand, tListLen(rand) - 1); + char rand[32] = {0}; + + sprintf(rand, "%"PRIu64, atomic_add_fetch_64(&seqId, 1)); + snprintf(dstPath, PATH_MAX, tmpPath, getpid(), rand); } diff --git a/src/os/src/linux/CMakeLists.txt b/src/os/src/linux/CMakeLists.txt index 8ab8f554672843eec34ed1880ba672e61e54aa7b..b1a7ebf54e58bbbdeea6d5cc219904916cc2ba03 100644 --- a/src/os/src/linux/CMakeLists.txt +++ b/src/os/src/linux/CMakeLists.txt @@ -1,4 +1,4 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 3.5) +CMAKE_MINIMUM_REQUIRED(VERSION 2.8) PROJECT(TDengine) AUX_SOURCE_DIRECTORY(. SRC) diff --git a/src/os/src/windows/CMakeLists.txt b/src/os/src/windows/CMakeLists.txt index a430dd3b3f968cd845732ead4aa1b780aea10c22..9dcc9e7e6d93ff200b7571d98724f898712658eb 100644 --- a/src/os/src/windows/CMakeLists.txt +++ b/src/os/src/windows/CMakeLists.txt @@ -1,4 +1,4 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 3.5) +CMAKE_MINIMUM_REQUIRED(VERSION 2.8) PROJECT(TDengine) AUX_SOURCE_DIRECTORY(. SRC) diff --git a/src/plugins/CMakeLists.txt b/src/plugins/CMakeLists.txt index e66997dc8ec37409c3bba94979255db523796667..7dcaaf27e615ead75e83630788288a27e938b0a9 100644 --- a/src/plugins/CMakeLists.txt +++ b/src/plugins/CMakeLists.txt @@ -1,4 +1,4 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 3.5) +CMAKE_MINIMUM_REQUIRED(VERSION 2.8) PROJECT(TDengine) ADD_SUBDIRECTORY(monitor) diff --git a/src/plugins/http/CMakeLists.txt b/src/plugins/http/CMakeLists.txt index 42016b8645690e24d1bced4c31261b9d785269c5..bfb47ad12e8b1ef7099109ecf5849ec3575caf5f 100644 --- a/src/plugins/http/CMakeLists.txt +++ b/src/plugins/http/CMakeLists.txt @@ -1,4 +1,4 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 3.5) +CMAKE_MINIMUM_REQUIRED(VERSION 2.8) PROJECT(TDengine) INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/deps/zlib-1.2.11/inc) diff --git a/src/plugins/http/src/httpContext.c b/src/plugins/http/src/httpContext.c index f71a84a5afe29e94d5bd9ba78638f295645d8e45..13f706af653e9c5c1b9fb0a4c602355b001d0cac 100644 --- a/src/plugins/http/src/httpContext.c +++ b/src/plugins/http/src/httpContext.c @@ -154,7 +154,10 @@ void httpReleaseContext(HttpContext *pContext, bool clearRes) { } if (clearRes) { - httpClearParser(pContext->parser); + if (pContext->parser) { + httpClearParser(pContext->parser); + } + memset(&pContext->singleCmd, 0, sizeof(HttpSqlCmd)); } HttpContext **ppContext = pContext->ppContext; @@ -185,9 +188,9 @@ void httpCloseContextByApp(HttpContext *pContext) { pContext->parsed = false; bool keepAlive = true; - if (parser->httpVersion == HTTP_VERSION_10 && parser->keepAlive != HTTP_KEEPALIVE_ENABLE) { + if (parser && parser->httpVersion == HTTP_VERSION_10 && parser->keepAlive != HTTP_KEEPALIVE_ENABLE) { keepAlive = false; - } else if (parser->httpVersion != HTTP_VERSION_10 && parser->keepAlive == HTTP_KEEPALIVE_DISABLE) { + } else if (parser && parser->httpVersion != HTTP_VERSION_10 && parser->keepAlive == HTTP_KEEPALIVE_DISABLE) { keepAlive = false; } else { } diff --git a/src/plugins/http/src/httpParser.c b/src/plugins/http/src/httpParser.c index e537253f0d886dfd3d93d632fda43c77d49acf15..4ce54a8ee630579499fe498e5d8c155bb5916eea 100644 --- a/src/plugins/http/src/httpParser.c +++ b/src/plugins/http/src/httpParser.c @@ -229,7 +229,7 @@ static int32_t httpOnParseHeaderField(HttpParser *parser, const char *key, const return 0; } - else if (strncasecmp(key, "Connection: ", 12) == 0) { + else if (strncasecmp(key, "Connection", 10) == 0) { if (strncasecmp(val, "Keep-Alive", 10) == 0) { parser->keepAlive = HTTP_KEEPALIVE_ENABLE; } else { diff --git a/src/plugins/http/src/httpUtil.c b/src/plugins/http/src/httpUtil.c index a84ae9f617d7ecd43c98d942dea95d99fd9ba901..399a33954d5670a7e928f856a898dcde1e4ac4eb 100644 --- a/src/plugins/http/src/httpUtil.c +++ b/src/plugins/http/src/httpUtil.c @@ -239,6 +239,10 @@ JsonBuf *httpMallocJsonBuf(HttpContext *pContext) { pContext->jsonBuf = (JsonBuf *)malloc(sizeof(JsonBuf)); } + if (!pContext->jsonBuf->pContext) { + pContext->jsonBuf->pContext = pContext; + } + return pContext->jsonBuf; } diff --git a/src/plugins/monitor/CMakeLists.txt b/src/plugins/monitor/CMakeLists.txt index abab07e0cd026d00499c5835e5446fdab9c16df7..28c62a099c0f2bea8b33a57c577bc89c7fb15aaa 100644 --- a/src/plugins/monitor/CMakeLists.txt +++ b/src/plugins/monitor/CMakeLists.txt @@ -1,4 +1,4 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 3.5) +CMAKE_MINIMUM_REQUIRED(VERSION 2.8) PROJECT(TDengine) INCLUDE_DIRECTORIES(inc) diff --git a/src/plugins/monitor/src/monMain.c b/src/plugins/monitor/src/monMain.c index 424ab0f216162b00d96ae39fcb4351f3ffea75cf..ac80ad62509a755b5b86b65593a90a2730120df8 100644 --- a/src/plugins/monitor/src/monMain.c +++ b/src/plugins/monitor/src/monMain.c @@ -103,7 +103,9 @@ int32_t monInitSystem() { } int32_t monStartSystem() { - taos_init(); + if (taos_init()) { + return -1; + } tsMonitor.start = 1; monExecuteSQLFp = monExecuteSQL; monInfo("monitor module start"); diff --git a/src/plugins/mqtt/CMakeLists.txt b/src/plugins/mqtt/CMakeLists.txt index b6de4215170be827c9e049044508d586ae6a6977..50b0bbe8af4faeab41a7b041d6aa51747f0aab3e 100644 --- a/src/plugins/mqtt/CMakeLists.txt +++ b/src/plugins/mqtt/CMakeLists.txt @@ -1,4 +1,4 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 3.5) +CMAKE_MINIMUM_REQUIRED(VERSION 2.8) PROJECT(TDengine) INCLUDE_DIRECTORIES(inc) diff --git a/src/query/CMakeLists.txt b/src/query/CMakeLists.txt index 967e86de3c5d9288834e3eb3b3222e551009bc49..f23ac7dd86932ba42dde7c2891865f7dff546a00 100644 --- a/src/query/CMakeLists.txt +++ b/src/query/CMakeLists.txt @@ -1,4 +1,4 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 3.5) +CMAKE_MINIMUM_REQUIRED(VERSION 2.8) PROJECT(TDengine) INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/tsdb/inc) diff --git a/src/query/inc/qAggMain.h b/src/query/inc/qAggMain.h index dbdada8952c82ae5ef7d703966f37432a593f7b9..7122f63593c64a6bffd6f3d53cd8ef21eca800ff 100644 --- a/src/query/inc/qAggMain.h +++ b/src/query/inc/qAggMain.h @@ -84,7 +84,7 @@ extern "C" { #define TSDB_FUNCSTATE_SO 0x1u // single output #define TSDB_FUNCSTATE_MO 0x2u // dynamic number of output, not multinumber of output e.g., TOP/BOTTOM #define TSDB_FUNCSTATE_STREAM 0x4u // function avail for stream -#define TSDB_FUNCSTATE_STABLE 0x8u // function avail for metric +#define TSDB_FUNCSTATE_STABLE 0x8u // function avail for super table #define TSDB_FUNCSTATE_OF 0x10u // outer forward #define TSDB_FUNCSTATE_NEED_TS 0x20u // timestamp is required during query processing #define TSDB_FUNCSTATE_SELECTIVITY 0x40u // selectivity functions, can exists along with tag columns @@ -166,9 +166,8 @@ typedef struct SExtTagsInfo { // sql function runtime context typedef struct SQLFunctionCtx { - int32_t startOffset; // todo remove it int32_t size; // number of rows - void * pInput; // + void * pInput; // input data buffer uint32_t order; // asc|desc int16_t inputType; int16_t inputBytes; @@ -184,7 +183,7 @@ typedef struct SQLFunctionCtx { uint8_t currentStage; // record current running step, default: 0 int64_t startTs; // timestamp range of current query when function is executed on a specific data block int32_t numOfParams; - tVariant param[4]; // input parameter, e.g., top(k, 20), the number of results for top query is kept in param */ + tVariant param[4]; // input parameter, e.g., top(k, 20), the number of results for top query is kept in param int64_t *ptsList; // corresponding timestamp array list void *ptsOutputBuf; // corresponding output buffer for timestamp of each result, e.g., top/bottom*/ SQLPreAggVal preAggVals; @@ -228,7 +227,7 @@ int32_t getResultDataInfo(int32_t dataType, int32_t dataBytes, int32_t functionI #define IS_SINGLEOUTPUT(x) (((x)&TSDB_FUNCSTATE_SO) != 0) #define IS_OUTER_FORWARD(x) (((x)&TSDB_FUNCSTATE_OF) != 0) -/* determine the real data need to calculated the result */ +// determine the real data need to calculated the result enum { BLK_DATA_NO_NEEDED = 0x0, BLK_DATA_STATIS_NEEDED = 0x1, diff --git a/src/query/inc/qExecutor.h b/src/query/inc/qExecutor.h index 80068588f719816d847f14bcaad714e107958601..ec1261da0a45ad24985ebf51b9b16c2acfad7709 100644 --- a/src/query/inc/qExecutor.h +++ b/src/query/inc/qExecutor.h @@ -33,6 +33,36 @@ struct SColumnFilterElem; typedef bool (*__filter_func_t)(struct SColumnFilterElem* pFilter, const char* val1, const char* val2, int16_t type); typedef int32_t (*__block_search_fn_t)(char* data, int32_t num, int64_t key, int32_t order); +#define IS_QUERY_KILLED(_q) ((_q)->code == TSDB_CODE_TSC_QUERY_CANCELLED) +#define Q_STATUS_EQUAL(p, s) (((p) & (s)) != 0u) +#define QUERY_IS_ASC_QUERY(q) (GET_FORWARD_DIRECTION_FACTOR((q)->order.order) == QUERY_ASC_FORWARD_STEP) + +#define SET_STABLE_QUERY_OVER(_q) ((_q)->tableIndex = (int32_t)((_q)->tableqinfoGroupInfo.numOfTables)) +#define IS_STASBLE_QUERY_OVER(_q) ((_q)->tableIndex >= (int32_t)((_q)->tableqinfoGroupInfo.numOfTables)) + +#define GET_TABLEGROUP(q, _index) ((SArray*) taosArrayGetP((q)->tableqinfoGroupInfo.pGroupList, (_index))) + +enum { + // when query starts to execute, this status will set + QUERY_NOT_COMPLETED = 0x1u, + + /* result output buffer is full, current query is paused. + * this status is only exist in group-by clause and diff/add/division/multiply/ query. + */ + QUERY_RESBUF_FULL = 0x2u, + + /* query is over + * 1. this status is used in one row result query process, e.g., count/sum/first/last/ avg...etc. + * 2. when all data within queried time window, it is also denoted as query_completed + */ + QUERY_COMPLETED = 0x4u, + + /* when the result is not completed return to client, this status will be + * usually used in case of interval query with interpolation option + */ + QUERY_OVER = 0x8u, +}; + typedef struct SResultRowPool { int32_t elemSize; int32_t blockSize; @@ -66,7 +96,8 @@ typedef struct SResultRow { } SResultRow; typedef struct SGroupResInfo { - int32_t rowId; + int32_t totalGroup; + int32_t currentGroup; int32_t index; SArray* pRows; // SArray } SGroupResInfo; @@ -112,7 +143,7 @@ typedef struct STableQueryInfo { STimeWindow win; STSCursor cur; void* pTable; // for retrieve the page id list - SResultRowInfo windowResInfo; + SResultRowInfo resInfo; } STableQueryInfo; typedef struct SQueryCostInfo { @@ -193,7 +224,7 @@ typedef struct SQueryRuntimeEnv { uint16_t* offset; uint16_t scanFlag; // denotes reversed scan of data or not SFillInfo* pFillInfo; - SResultRowInfo windowResInfo; + SResultRowInfo resultRowInfo; SQueryCostInfo summary; void* pQueryHandle; @@ -204,7 +235,8 @@ typedef struct SQueryRuntimeEnv { bool hasTagResults; // if there are tag values in final result or not bool timeWindowInterpo;// if the time window start/end required interpolation bool queryWindowIdentical; // all query time windows are identical for all tables in one group - bool queryBlockDist; // if query data block distribution + bool queryBlockDist; // if query data block distribution + bool stabledev; // super table stddev query int32_t interBufSize; // intermediate buffer sizse int32_t prevGroupId; // previous executed group id SDiskbasedResultBuf* pResultBuf; // query result buffer based on blocked-wised disk file @@ -257,4 +289,51 @@ typedef struct SQInfo { char* sql; // query sql string } SQInfo; +typedef struct SQueryParam { + char *sql; + char *tagCond; + char *tbnameCond; + char *prevResult; + SArray *pTableIdList; + SSqlFuncMsg **pExprMsg; + SSqlFuncMsg **pSecExprMsg; + SExprInfo *pExprs; + SExprInfo *pSecExprs; + + SColIndex *pGroupColIndex; + SColumnInfo *pTagColumnInfo; + SSqlGroupbyExpr *pGroupbyExpr; +} SQueryParam; + +void freeParam(SQueryParam *param); +int32_t convertQueryMsg(SQueryTableMsg *pQueryMsg, SQueryParam* param); +int32_t createQueryFuncExprFromMsg(SQueryTableMsg *pQueryMsg, int32_t numOfOutput, SExprInfo **pExprInfo, SSqlFuncMsg **pExprMsg, + SColumnInfo* pTagCols); +SSqlGroupbyExpr *createGroupbyExprFromMsg(SQueryTableMsg *pQueryMsg, SColIndex *pColIndex, int32_t *code); +SQInfo *createQInfoImpl(SQueryTableMsg *pQueryMsg, SSqlGroupbyExpr *pGroupbyExpr, SExprInfo *pExprs, + SExprInfo *pSecExprs, STableGroupInfo *pTableGroupInfo, SColumnInfo* pTagCols, bool stableQuery, char* sql); +int32_t initQInfo(SQueryTableMsg *pQueryMsg, void *tsdb, int32_t vgId, SQInfo *pQInfo, SQueryParam* param, bool isSTable); +void freeColumnFilterInfo(SColumnFilterInfo* pFilter, int32_t numOfFilters); + +bool isQueryKilled(SQInfo *pQInfo); +int32_t checkForQueryBuf(size_t numOfTables); +bool doBuildResCheck(SQInfo* pQInfo); +void setQueryStatus(SQuery *pQuery, int8_t status); + +bool onlyQueryTags(SQuery* pQuery); +void buildTagQueryResult(SQInfo *pQInfo); +void stableQueryImpl(SQInfo *pQInfo); +void buildTableBlockDistResult(SQInfo *pQInfo); +void tableQueryImpl(SQInfo *pQInfo); +bool isValidQInfo(void *param); + +int32_t doDumpQueryResult(SQInfo *pQInfo, char *data); + +size_t getResultSize(SQInfo *pQInfo, int64_t *numOfRows); +void setQueryKilled(SQInfo *pQInfo); +void queryCostStatis(SQInfo *pQInfo); +void freeQInfo(SQInfo *pQInfo); + +int32_t getMaximumIdleDurationSec(); + #endif // TDENGINE_QUERYEXECUTOR_H diff --git a/src/query/inc/qSqlparser.h b/src/query/inc/qSqlparser.h index 77647813d688183edc343c349969c15c26edfb4d..bcc876c953777f465ecb761f19256a86bc375b1d 100644 --- a/src/query/inc/qSqlparser.h +++ b/src/query/inc/qSqlparser.h @@ -76,6 +76,7 @@ typedef struct SQuerySQL { typedef struct SCreatedTableInfo { SStrToken name; // table name token SStrToken stableName; // super table name token , for using clause + SArray *pTagNames; // create by using super table, tag name SArray *pTagVals; // create by using super table, tag value char *fullname; // table full name STagData tagdata; // true tag data, super table full name is in STagData @@ -231,6 +232,8 @@ SArray *tVariantListAppendToken(SArray *pList, SStrToken *pAliasToken, uint8_t s tSQLExpr *tSqlExprCreate(tSQLExpr *pLeft, tSQLExpr *pRight, int32_t optrType); +tSQLExpr *tSqlExprClone(tSQLExpr *pSrc); + void tSqlExprDestroy(tSQLExpr *pExpr); tSQLExprList *tSqlExprListAppend(tSQLExprList *pList, tSQLExpr *pNode, SStrToken *pDistinct, SStrToken *pToken); @@ -246,7 +249,7 @@ SCreateTableSQL *tSetCreateSqlElems(SArray *pCols, SArray *pTags, SQuerySQL *pSe void tSqlExprNodeDestroy(tSQLExpr *pExpr); SAlterTableInfo * tAlterTableSqlElems(SStrToken *pTableName, SArray *pCols, SArray *pVals, int32_t type, int16_t tableTable); -SCreatedTableInfo createNewChildTableInfo(SStrToken *pTableName, SArray *pTagVals, SStrToken *pToken, SStrToken* igExists); +SCreatedTableInfo createNewChildTableInfo(SStrToken *pTableName, SArray *pTagNames, SArray *pTagVals, SStrToken *pToken, SStrToken* igExists); void destroyAllSelectClause(SSubclauseInfo *pSql); void doDestroyQuerySql(SQuerySQL *pSql); diff --git a/src/query/inc/qUtil.h b/src/query/inc/qUtil.h index 55311f569444e8b1b8d9f5cd2fe90e37686ead68..d4a0c25886ad63ff5cc3e79cb0a9e84c156197cd 100644 --- a/src/query/inc/qUtil.h +++ b/src/query/inc/qUtil.h @@ -85,4 +85,12 @@ void interResToBinary(SBufferWriter* bw, SArray* pRes, int32_t tagLen); SArray* interResFromBinary(const char* data, int32_t len); void freeInterResult(void* param); +void initGroupResInfo(SGroupResInfo* pGroupResInfo, SResultRowInfo* pResultInfo, int32_t offset); +void cleanupGroupResInfo(SGroupResInfo* pGroupResInfo); +bool hasRemainData(SGroupResInfo* pGroupResInfo); +bool incNextGroup(SGroupResInfo* pGroupResInfo); +int32_t getNumOfTotalRes(SGroupResInfo* pGroupResInfo); + +int32_t mergeIntoGroupResult(SGroupResInfo* pGroupResInfo, SQInfo *pQInfo); + #endif // TDENGINE_QUERYUTIL_H diff --git a/src/query/inc/sql.y b/src/query/inc/sql.y index cd02d4fb002a1810f1b792b64aa8ff522f696796..8a01a736b73a90ba73c5c952601cdb220765d9e6 100644 --- a/src/query/inc/sql.y +++ b/src/query/inc/sql.y @@ -356,9 +356,20 @@ create_stable_args(A) ::= ifnotexists(U) ids(V) cpxName(Z) LP columnlist(X) RP T create_from_stable(A) ::= ifnotexists(U) ids(V) cpxName(Z) USING ids(X) cpxName(F) TAGS LP tagitemlist(Y) RP. { X.n += F.n; V.n += Z.n; - A = createNewChildTableInfo(&X, Y, &V, &U); + A = createNewChildTableInfo(&X, NULL, Y, &V, &U); } +create_from_stable(A) ::= ifnotexists(U) ids(V) cpxName(Z) USING ids(X) cpxName(F) LP tagNamelist(P) RP TAGS LP tagitemlist(Y) RP. { + X.n += F.n; + V.n += Z.n; + A = createNewChildTableInfo(&X, P, Y, &V, &U); +} + +%type tagNamelist{SArray*} +%destructor tagNamelist {taosArrayDestroy($$);} +tagNamelist(A) ::= tagNamelist(X) COMMA ids(Y). {taosArrayPush(X, &Y); A = X; } +tagNamelist(A) ::= ids(X). {A = taosArrayInit(4, sizeof(SStrToken)); taosArrayPush(A, &X);} + // create stream // create table table_name as select count(*) from super_table_name interval(time) create_table_args(A) ::= ifnotexists(U) ids(V) cpxName(Z) AS select(S). { @@ -663,6 +674,8 @@ expr(A) ::= expr(X) GE expr(Y). {A = tSqlExprCreate(X, Y, TK_GE);} expr(A) ::= expr(X) NE expr(Y). {A = tSqlExprCreate(X, Y, TK_NE);} expr(A) ::= expr(X) EQ expr(Y). {A = tSqlExprCreate(X, Y, TK_EQ);} +expr(A) ::= expr(X) BETWEEN expr(Y) AND expr(Z). { tSQLExpr* X2 = tSqlExprClone(X); A = tSqlExprCreate(tSqlExprCreate(X, Y, TK_GE), tSqlExprCreate(X2, Z, TK_LE), TK_AND);} + expr(A) ::= expr(X) AND expr(Y). {A = tSqlExprCreate(X, Y, TK_AND);} expr(A) ::= expr(X) OR expr(Y). {A = tSqlExprCreate(X, Y, TK_OR); } diff --git a/src/query/src/qAggMain.c b/src/query/src/qAggMain.c index d43b5f45e8a6a40144a5e71eb2a8a2efc27ec28e..2d5287fb937f54c485358c6611d375b3616fa65a 100644 --- a/src/query/src/qAggMain.c +++ b/src/query/src/qAggMain.c @@ -26,10 +26,12 @@ #include "qTsbuf.h" #include "queryLog.h" -#define GET_INPUT_DATA_LIST(x) (((char *)((x)->pInput)) + ((x)->startOffset) * ((x)->inputBytes)) +//#define GET_INPUT_DATA_LIST(x) (((char *)((x)->pInput)) + ((x)->startOffset) * ((x)->inputBytes)) +#define GET_INPUT_DATA_LIST(x) ((char *)((x)->pInput)) #define GET_INPUT_DATA(x, y) (GET_INPUT_DATA_LIST(x) + (y) * (x)->inputBytes) -#define GET_TS_LIST(x) ((TSKEY*)&((x)->ptsList[(x)->startOffset])) +//#define GET_TS_LIST(x) ((TSKEY*)&((x)->ptsList[(x)->startOffset])) +#define GET_TS_LIST(x) ((TSKEY*)((x)->ptsList)) #define GET_TS_DATA(x, y) (GET_TS_LIST(x)[(y)]) #define GET_TRUE_DATA_TYPE() \ @@ -379,11 +381,7 @@ static bool function_setup(SQLFunctionCtx *pCtx) { static void function_finalizer(SQLFunctionCtx *pCtx) { SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx); if (pResInfo->hasResult != DATA_SET_FLAG) { - if (pCtx->outputType == TSDB_DATA_TYPE_BINARY || pCtx->outputType == TSDB_DATA_TYPE_NCHAR) { - setVardataNull(pCtx->pOutput, pCtx->outputType); - } else { - setNull(pCtx->pOutput, pCtx->outputType, pCtx->outputBytes); - } + setNull(pCtx->pOutput, pCtx->outputType, pCtx->outputBytes); } doFinalizer(pCtx); @@ -414,10 +412,7 @@ static void count_function(SQLFunctionCtx *pCtx) { numOfElem += 1; } } else { - /* - * when counting on the primary time stamp column and no statistics data is provided, - * simple use the size value - */ + //when counting on the primary time stamp column and no statistics data is presented, use the size value directly. numOfElem = pCtx->size; } } @@ -944,9 +939,9 @@ static void minMax_function(SQLFunctionCtx *pCtx, char *pOutput, int32_t isMin, * * The following codes of 3 lines will be removed later. */ - if (index < 0 || index >= pCtx->size + pCtx->startOffset) { - index = 0; - } +// if (index < 0 || index >= pCtx->size + pCtx->startOffset) { +// index = 0; +// } // the index is the original position, not the relative position key = pCtx->ptsList[index]; @@ -1569,8 +1564,10 @@ static void stddev_dst_function(SQLFunctionCtx *pCtx) { avg = p->avg; } else { // todo opt performance by using iterator since the timestamp lsit is matched with the output result SResPair* p = bsearch(&pCtx->startTs, resList->pData, len, sizeof(SResPair), tsCompare); - assert(p != NULL); - + if (p == NULL) { + return; + } + avg = p->avg; } @@ -1635,6 +1632,97 @@ static void stddev_dst_function(SQLFunctionCtx *pCtx) { memcpy(pCtx->pOutput, GET_ROWCELL_INTERBUF(GET_RES_INFO(pCtx)), sizeof(SAvgInfo)); } +static void stddev_dst_function_f(SQLFunctionCtx *pCtx, int32_t index) { + void *pData = GET_INPUT_DATA(pCtx, index); + if (pCtx->hasNull && isNull(pData, pCtx->inputType)) { + return; + } + + // the second stage to calculate standard deviation + SStddevdstInfo *pStd = GET_ROWCELL_INTERBUF(GET_RES_INFO(pCtx)); + double *retVal = &pStd->res; + + // all data are null, no need to proceed + SArray* resList = (SArray*) pCtx->param[0].pz; + if (resList == NULL) { + return; + } + + // find the correct group average results according to the tag value + int32_t len = (int32_t) taosArrayGetSize(resList); + assert(len > 0); + + double avg = 0; + if (len == 1) { + SResPair* p = taosArrayGet(resList, 0); + avg = p->avg; + } else { // todo opt performance by using iterator since the timestamp lsit is matched with the output result + SResPair* p = bsearch(&pCtx->startTs, resList->pData, len, sizeof(SResPair), tsCompare); + assert(p != NULL); + + avg = p->avg; + } + + int32_t num = 0; + switch (pCtx->inputType) { + case TSDB_DATA_TYPE_INT: { + for (int32_t i = 0; i < pCtx->size; ++i) { + if (pCtx->hasNull && isNull((const char*) (&((int32_t *)pData)[i]), pCtx->inputType)) { + continue; + } + num += 1; + *retVal += POW2(((int32_t *)pData)[i] - avg); + } + break; + } + case TSDB_DATA_TYPE_FLOAT: { + LOOP_STDDEV_IMPL(float, *retVal, pData, pCtx, avg, pCtx->inputType, num); + break; + } + case TSDB_DATA_TYPE_DOUBLE: { + LOOP_STDDEV_IMPL(double, *retVal, pData, pCtx, avg, pCtx->inputType, num); + break; + } + case TSDB_DATA_TYPE_TINYINT: { + LOOP_STDDEV_IMPL(int8_t, *retVal, pData, pCtx, avg, pCtx->inputType, num); + break; + } + case TSDB_DATA_TYPE_UTINYINT: { + LOOP_STDDEV_IMPL(int8_t, *retVal, pData, pCtx, avg, pCtx->inputType, num); + break; + } + case TSDB_DATA_TYPE_SMALLINT: { + LOOP_STDDEV_IMPL(int16_t, *retVal, pData, pCtx, avg, pCtx->inputType, num); + break; + } + case TSDB_DATA_TYPE_USMALLINT: { + LOOP_STDDEV_IMPL(uint16_t, *retVal, pData, pCtx, avg, pCtx->inputType, num); + break; + } + case TSDB_DATA_TYPE_UINT: { + LOOP_STDDEV_IMPL(uint32_t, *retVal, pData, pCtx, avg, pCtx->inputType, num); + break; + } + case TSDB_DATA_TYPE_BIGINT: { + LOOP_STDDEV_IMPL(int64_t, *retVal, pData, pCtx, avg, pCtx->inputType, num); + break; + } + case TSDB_DATA_TYPE_UBIGINT: { + LOOP_STDDEV_IMPL(uint64_t, *retVal, pData, pCtx, avg, pCtx->inputType, num); + break; + } + default: + qError("stddev function not support data type:%d", pCtx->inputType); + } + + pStd->num += num; + SET_VAL(pCtx, num, 1); + + // copy to the final output buffer for super table + memcpy(pCtx->pOutput, GET_ROWCELL_INTERBUF(GET_RES_INFO(pCtx)), sizeof(SAvgInfo)); +} + + static void stddev_dst_merge(SQLFunctionCtx *pCtx) { SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx); SStddevdstInfo* pRes = GET_ROWCELL_INTERBUF(pResInfo); @@ -2486,12 +2574,16 @@ static void bottom_function(SQLFunctionCtx *pCtx) { STopBotInfo *pRes = getTopBotOutputInfo(pCtx); + if ((void *)pRes->res[0] != (void *)((char *)pRes + sizeof(STopBotInfo) + POINTER_BYTES * pCtx->param[0].i64)) { + buildTopBotStruct(pRes, pCtx); + } + for (int32_t i = 0; i < pCtx->size; ++i) { char *data = GET_INPUT_DATA(pCtx, i); TSKEY ts = GET_TS_DATA(pCtx, i); if (pCtx->hasNull && isNull(data, pCtx->inputType)) { - continue; + continue; } notNullElems++; @@ -2520,6 +2612,11 @@ static void bottom_function_f(SQLFunctionCtx *pCtx, int32_t index) { } STopBotInfo *pRes = getTopBotOutputInfo(pCtx); + + if ((void *)pRes->res[0] != (void *)((char *)pRes + sizeof(STopBotInfo) + POINTER_BYTES * pCtx->param[0].i64)) { + buildTopBotStruct(pRes, pCtx); + } + SET_VAL(pCtx, 1, 1); do_bottom_function_add(pRes, (int32_t)pCtx->param[0].i64, pData, ts, pCtx->inputType, &pCtx->tagInfo, NULL, 0); @@ -3487,9 +3584,7 @@ static void arithmetic_function(SQLFunctionCtx *pCtx) { SArithmeticSupport *sas = (SArithmeticSupport *)pCtx->param[1].pz; arithmeticTreeTraverse(sas->pArithExpr->pExpr, pCtx->size, pCtx->pOutput, sas, pCtx->order, getArithColumnData); - pCtx->pOutput += pCtx->outputBytes * pCtx->size; - pCtx->param[1].pz = NULL; } static void arithmetic_function_f(SQLFunctionCtx *pCtx, int32_t index) { @@ -3592,6 +3687,14 @@ static void spread_function(SQLFunctionCtx *pCtx) { LIST_MINMAX_N(pCtx, pInfo->min, pInfo->max, pCtx->size, pData, double, pCtx->inputType, numOfElems); } else if (pCtx->inputType == TSDB_DATA_TYPE_FLOAT) { LIST_MINMAX_N(pCtx, pInfo->min, pInfo->max, pCtx->size, pData, float, pCtx->inputType, numOfElems); + } else if (pCtx->inputType == TSDB_DATA_TYPE_UTINYINT) { + LIST_MINMAX_N(pCtx, pInfo->min, pInfo->max, pCtx->size, pData, uint8_t, pCtx->inputType, numOfElems); + } else if (pCtx->inputType == TSDB_DATA_TYPE_USMALLINT) { + LIST_MINMAX_N(pCtx, pInfo->min, pInfo->max, pCtx->size, pData, uint16_t, pCtx->inputType, numOfElems); + } else if (pCtx->inputType == TSDB_DATA_TYPE_UINT) { + LIST_MINMAX_N(pCtx, pInfo->min, pInfo->max, pCtx->size, pData, uint32_t, pCtx->inputType, numOfElems); + } else if (pCtx->inputType == TSDB_DATA_TYPE_UBIGINT) { + LIST_MINMAX_N(pCtx, pInfo->min, pInfo->max, pCtx->size, pData, uint64_t, pCtx->inputType, numOfElems); } if (!pCtx->hasNull) { @@ -3977,6 +4080,12 @@ static void interp_function_impl(SQLFunctionCtx *pCtx) { } else { assignVal(pCtx->pOutput, pCtx->start.ptr, pCtx->outputBytes, pCtx->inputType); } + } else if (type == TSDB_FILL_NEXT) { + if (IS_NUMERIC_TYPE(pCtx->inputType) || pCtx->inputType == TSDB_DATA_TYPE_BOOL) { + SET_TYPED_DATA(pCtx->pOutput, pCtx->inputType, pCtx->end.val); + } else { + assignVal(pCtx->pOutput, pCtx->end.ptr, pCtx->outputBytes, pCtx->inputType); + } } else if (type == TSDB_FILL_LINEAR) { SPoint point1 = {.key = pCtx->start.key, .val = &pCtx->start.val}; SPoint point2 = {.key = pCtx->end.key, .val = &pCtx->end.val}; @@ -4836,7 +4945,7 @@ SAggFunctionInfo aAggs[] = {{ TSDB_FUNCSTATE_SO | TSDB_FUNCSTATE_STABLE, function_setup, stddev_dst_function, - noop2, + stddev_dst_function_f, no_next_step, stddev_dst_finalizer, stddev_dst_merge, diff --git a/src/query/src/qExecutor.c b/src/query/src/qExecutor.c index 9b6d2b779199d0629e66242ab17b95416e6c9a73..3d3e7295b9faef9899fc608df179b01c83ffada0 100644 --- a/src/query/src/qExecutor.c +++ b/src/query/src/qExecutor.c @@ -15,7 +15,6 @@ #include "os.h" #include "qFill.h" #include "taosmsg.h" -#include "tcache.h" #include "tglobal.h" #include "exception.h" @@ -24,11 +23,9 @@ #include "qExecutor.h" #include "qResultbuf.h" #include "qUtil.h" -#include "query.h" #include "queryLog.h" #include "tlosertree.h" #include "ttype.h" -#include "tcompare.h" #define MAX_ROWS_PER_RESBUF_PAGE ((1u<<12) - 1) @@ -36,8 +33,6 @@ * check if the primary column is load by default, otherwise, the program will * forced to load primary column explicitly. */ -#define Q_STATUS_EQUAL(p, s) (((p) & (s)) != 0u) -#define QUERY_IS_ASC_QUERY(q) (GET_FORWARD_DIRECTION_FACTOR((q)->order.order) == QUERY_ASC_FORWARD_STEP) #define IS_MASTER_SCAN(runtime) ((runtime)->scanFlag == MASTER_SCAN) #define IS_REVERSE_SCAN(runtime) ((runtime)->scanFlag == REVERSE_SCAN) @@ -56,27 +51,6 @@ (_dst).ekey = (_src).ekey;\ } while (0) -enum { - // when query starts to execute, this status will set - QUERY_NOT_COMPLETED = 0x1u, - - /* result output buffer is full, current query is paused. - * this status is only exist in group-by clause and diff/add/division/multiply/ query. - */ - QUERY_RESBUF_FULL = 0x2u, - - /* query is over - * 1. this status is used in one row result query process, e.g., count/sum/first/last/ avg...etc. - * 2. when all data within queried time window, it is also denoted as query_completed - */ - QUERY_COMPLETED = 0x4u, - - /* when the result is not completed return to client, this status will be - * usually used in case of interval query with interpolation option - */ - QUERY_OVER = 0x8u, -}; - enum { TS_JOIN_TS_EQUAL = 0, TS_JOIN_TS_NOT_EQUALS = 1, @@ -134,13 +108,11 @@ static UNUSED_FUNC void* u_realloc(void* p, size_t __size) { #define CLEAR_QUERY_STATUS(q, st) ((q)->status &= (~(st))) #define GET_NUM_OF_TABLEGROUP(q) taosArrayGetSize((q)->tableqinfoGroupInfo.pGroupList) -#define GET_TABLEGROUP(q, _index) ((SArray*) taosArrayGetP((q)->tableqinfoGroupInfo.pGroupList, (_index))) #define QUERY_IS_INTERVAL_QUERY(_q) ((_q)->interval.interval > 0) -static void setQueryStatus(SQuery *pQuery, int8_t status); static void finalizeQueryResult(SQueryRuntimeEnv *pRuntimeEnv); -static int32_t getMaximumIdleDurationSec() { +int32_t getMaximumIdleDurationSec() { return tsShellActivityTimer * 2; } @@ -181,27 +153,19 @@ static void getNextTimeWindow(SQuery* pQuery, STimeWindow* tw) { tw->ekey -= 1; } -#define SET_STABLE_QUERY_OVER(_q) ((_q)->tableIndex = (int32_t)((_q)->tableqinfoGroupInfo.numOfTables)) -#define IS_STASBLE_QUERY_OVER(_q) ((_q)->tableIndex >= (int32_t)((_q)->tableqinfoGroupInfo.numOfTables)) - -// todo move to utility -static int32_t mergeIntoGroupResultImpl(SGroupResInfo* pGroupResInfo, SArray *pTableList, SQInfo* pQInfo); - static void setResultOutputBuf(SQueryRuntimeEnv *pRuntimeEnv, SResultRow *pResult); static void setResultRowOutputBufInitCtx(SQueryRuntimeEnv *pRuntimeEnv, SResultRow *pResult); static bool functionNeedToExecute(SQueryRuntimeEnv *pRuntimeEnv, SQLFunctionCtx *pCtx, int32_t functionId); static void setExecParams(SQuery *pQuery, SQLFunctionCtx *pCtx, void* inputData, TSKEY *tsCol, SDataBlockInfo* pBlockInfo, - SDataStatis *pStatis, void *param, int32_t colIndex, int32_t vgId); + SDataStatis *pStatis, SExprInfo* pExprInfo); static void initCtxOutputBuf(SQueryRuntimeEnv *pRuntimeEnv); static void destroyTableQueryInfoImpl(STableQueryInfo *pTableQueryInfo); static void resetDefaultResInfoOutputBuf(SQueryRuntimeEnv *pRuntimeEnv); static bool hasMainOutput(SQuery *pQuery); -static void buildTagQueryResult(SQInfo *pQInfo); static int32_t setTimestampListJoinInfo(SQInfo *pQInfo, STableQueryInfo *pTableQueryInfo); -static int32_t checkForQueryBuf(size_t numOfTables); static void releaseQueryBuf(size_t numOfTables); static int32_t binarySearchForKey(char *pValue, int num, TSKEY key, int order); static void doRowwiseTimeWindowInterpolation(SQueryRuntimeEnv* pRuntimeEnv, SArray* pDataBlock, TSKEY prevTs, int32_t prevRowIndex, TSKEY curTs, int32_t curRowIndex, TSKEY windowKey, int32_t type); @@ -296,11 +260,6 @@ void updateNumOfResult(SQueryRuntimeEnv *pRuntimeEnv, int32_t numOfRes) { } } -static UNUSED_FUNC int32_t getMergeResultGroupId(int32_t groupIndex) { - int32_t base = 50000000; - return base + (groupIndex * 10000); -} - bool isGroupbyColumn(SSqlGroupbyExpr *pGroupbyExpr) { if (pGroupbyExpr == NULL || pGroupbyExpr->numOfGroupCols == 0) { return false; @@ -321,6 +280,17 @@ bool isGroupbyColumn(SSqlGroupbyExpr *pGroupbyExpr) { return false; } +bool isStabledev(SQuery* pQuery) { + for (int32_t i = 0; i < pQuery->numOfOutput; ++i) { + int32_t functId = pQuery->pExpr1[i].base.functionId; + if (functId == TSDB_FUNC_STDDEV_DST) { + return true; + } + } + + return false; +} + int16_t getGroupbyColumnType(SQuery *pQuery, SSqlGroupbyExpr *pGroupbyExpr) { assert(pGroupbyExpr != NULL); @@ -345,7 +315,7 @@ int16_t getGroupbyColumnType(SQuery *pQuery, SSqlGroupbyExpr *pGroupbyExpr) { return type; } -bool isSelectivityWithTagsQuery(SQuery *pQuery) { +static bool isSelectivityWithTagsQuery(SQuery *pQuery) { bool hasTags = false; int32_t numOfSelectivity = 0; @@ -368,7 +338,7 @@ bool isSelectivityWithTagsQuery(SQuery *pQuery) { return false; } -bool isProjQuery(SQuery *pQuery) { +static bool isProjQuery(SQuery *pQuery) { for (int32_t i = 0; i < pQuery->numOfOutput; ++i) { int32_t functId = pQuery->pExpr1[i].base.functionId; if (functId != TSDB_FUNC_PRJ && functId != TSDB_FUNC_TAGPRJ) { @@ -379,17 +349,14 @@ bool isProjQuery(SQuery *pQuery) { return true; } -bool isTsCompQuery(SQuery *pQuery) { return pQuery->pExpr1[0].base.functionId == TSDB_FUNC_TS_COMP; } - -static bool limitOperator(SQueryRuntimeEnv* pRuntimeEnv) { - SQInfo* pQInfo = GET_QINFO_ADDR(pRuntimeEnv); - SQuery* pQuery = pRuntimeEnv->pQuery; +static bool isTsCompQuery(SQuery *pQuery) { return pQuery->pExpr1[0].base.functionId == TSDB_FUNC_TS_COMP; } +static bool limitOperator(SQuery* pQuery, void* qinfo) { if ((pQuery->limit.limit > 0) && (pQuery->rec.total + pQuery->rec.rows > pQuery->limit.limit)) { pQuery->rec.rows = pQuery->limit.limit - pQuery->rec.total; qDebug("QInfo:%p discard remain data due to result limitation, limit:%"PRId64", current return:%" PRId64 ", total:%"PRId64, - pQInfo, pQuery->limit.limit, pQuery->rec.rows, pQuery->rec.total + pQuery->rec.rows); + qinfo, pQuery->limit.limit, pQuery->rec.rows, pQuery->rec.total + pQuery->rec.rows); assert(pQuery->rec.rows >= 0); setQueryStatus(pQuery, QUERY_COMPLETED); return true; @@ -643,7 +610,7 @@ static int32_t addNewWindowResultBuf(SResultRow *pWindowRes, SDiskbasedResultBuf } static int32_t setWindowOutputBufByKey(SQueryRuntimeEnv *pRuntimeEnv, SResultRowInfo *pResultRowInfo, STimeWindow *win, - bool masterscan, SResultRow** pResult, int64_t groupId) { + bool masterscan, SResultRow **pResult, int64_t groupId) { assert(win->skey <= win->ekey); SDiskbasedResultBuf *pResultBuf = pRuntimeEnv->pResultBuf; @@ -826,9 +793,11 @@ static int32_t getNumOfRowsInTimeWindow(SQuery *pQuery, SDataBlockInfo *pDataBlo return num; } -// TODO decouple the data block and the SQLFunctionCtx -static void doBlockwiseApplyFunctions(SQueryRuntimeEnv *pRuntimeEnv, STimeWindow *pWin, int32_t offset, int32_t forwardStep, TSKEY *tsCol, int32_t numOfTotal) { - SQuery *pQuery = pRuntimeEnv->pQuery; +static char *getDataBlock(SQueryRuntimeEnv *pRuntimeEnv, SArithmeticSupport *sas, int32_t col, int32_t size, SArray *pDataBlock); + +static void doBlockwiseApplyFunctions(SQueryRuntimeEnv *pRuntimeEnv, STimeWindow *pWin, int32_t offset, + int32_t forwardStep, TSKEY *tsCol, int32_t numOfTotal, SArray *pDataBlock) { + SQuery * pQuery = pRuntimeEnv->pQuery; SQLFunctionCtx *pCtx = pRuntimeEnv->pCtx; bool hasPrev = pCtx[0].preAggVals.isSet; @@ -836,7 +805,17 @@ static void doBlockwiseApplyFunctions(SQueryRuntimeEnv *pRuntimeEnv, STimeWindow for (int32_t k = 0; k < pQuery->numOfOutput; ++k) { pCtx[k].size = forwardStep; pCtx[k].startTs = pWin->skey; - pCtx[k].startOffset = (QUERY_IS_ASC_QUERY(pQuery)) ? offset : offset - (forwardStep - 1); + + char *dataBlock = getDataBlock(pRuntimeEnv, &pRuntimeEnv->sasArray[k], k, numOfTotal, pDataBlock); + + int32_t pos = (QUERY_IS_ASC_QUERY(pQuery)) ? offset : offset - (forwardStep - 1); + if (dataBlock != NULL) { + pCtx[k].pInput = (char *)dataBlock + pos * pCtx[k].inputBytes; + } + + if (tsCol != NULL) { + pCtx[k].ptsList = &tsCol[pos]; + } int32_t functionId = pQuery->pExpr1[k].base.functionId; @@ -976,6 +955,7 @@ static void* getDataBlockImpl(SArray* pDataBlock, int32_t colId) { return NULL; } +// todo refactor static char *getDataBlock(SQueryRuntimeEnv *pRuntimeEnv, SArithmeticSupport *sas, int32_t col, int32_t size, SArray *pDataBlock) { if (pDataBlock == NULL) { return NULL; @@ -1190,10 +1170,9 @@ static void blockwiseApplyFunctions(SQueryRuntimeEnv *pRuntimeEnv, SDataStatis * tsCols = (TSKEY *)(pColInfo->pData); } - SQInfo *pQInfo = GET_QINFO_ADDR(pRuntimeEnv); for (int32_t k = 0; k < pQuery->numOfOutput; ++k) { char *dataBlock = getDataBlock(pRuntimeEnv, &pRuntimeEnv->sasArray[k], k, pDataBlockInfo->rows, pDataBlock); - setExecParams(pQuery, &pCtx[k], dataBlock, tsCols, pDataBlockInfo, pStatis, &pRuntimeEnv->sasArray[k], k, pQInfo->vgId); + setExecParams(pQuery, &pCtx[k], dataBlock, tsCols, pDataBlockInfo, pStatis, &pQuery->pExpr1[k]); } int32_t step = GET_FORWARD_DIRECTION_FACTOR(pQuery->order.order); @@ -1234,7 +1213,7 @@ static void blockwiseApplyFunctions(SQueryRuntimeEnv *pRuntimeEnv, SDataStatis * setResultRowInterpo(pResult, RESULT_ROW_END_INTERP); setNotInterpoWindowKey(pRuntimeEnv->pCtx, pQuery->numOfOutput, RESULT_ROW_START_INTERP); - doBlockwiseApplyFunctions(pRuntimeEnv, &w, startPos, 0, tsCols, pDataBlockInfo->rows); + doBlockwiseApplyFunctions(pRuntimeEnv, &w, startPos, 0, tsCols, pDataBlockInfo->rows, pDataBlock); } // restore current time window @@ -1244,7 +1223,7 @@ static void blockwiseApplyFunctions(SQueryRuntimeEnv *pRuntimeEnv, SDataStatis * // window start key interpolation doWindowBorderInterpolation(pRuntimeEnv, pDataBlockInfo, pDataBlock, pResult, &win, pQuery->pos, forwardStep); - doBlockwiseApplyFunctions(pRuntimeEnv, &win, startPos, forwardStep, tsCols, pDataBlockInfo->rows); + doBlockwiseApplyFunctions(pRuntimeEnv, &win, startPos, forwardStep, tsCols, pDataBlockInfo->rows, pDataBlock); STimeWindow nextWin = win; while (1) { @@ -1265,7 +1244,7 @@ static void blockwiseApplyFunctions(SQueryRuntimeEnv *pRuntimeEnv, SDataStatis * // window start(end) key interpolation doWindowBorderInterpolation(pRuntimeEnv, pDataBlockInfo, pDataBlock, pResult, &nextWin, startPos, forwardStep); - doBlockwiseApplyFunctions(pRuntimeEnv, &nextWin, startPos, forwardStep, tsCols, pDataBlockInfo->rows); + doBlockwiseApplyFunctions(pRuntimeEnv, &nextWin, startPos, forwardStep, tsCols, pDataBlockInfo->rows, pDataBlock); } } else { @@ -1305,7 +1284,7 @@ static int32_t setGroupResultOutputBuf(SQueryRuntimeEnv *pRuntimeEnv, char *pDat return -1; } - SResultRow *pResultRow = doPrepareResultRowFromKey(pRuntimeEnv, &pRuntimeEnv->windowResInfo, d, len, true, groupIndex); + SResultRow *pResultRow = doPrepareResultRowFromKey(pRuntimeEnv, &pRuntimeEnv->resultRowInfo, d, len, true, groupIndex); assert (pResultRow != NULL); int64_t v = -1; @@ -1556,7 +1535,7 @@ static void rowwiseApplyFunctions(SQueryRuntimeEnv *pRuntimeEnv, SDataStatis *pS SQInfo* pQInfo = GET_QINFO_ADDR(pRuntimeEnv); for (int32_t k = 0; k < pQuery->numOfOutput; ++k) { char *dataBlock = getDataBlock(pRuntimeEnv, &pRuntimeEnv->sasArray[k], k, pDataBlockInfo->rows, pDataBlock); - setExecParams(pQuery, &pCtx[k], dataBlock, tsCols, pDataBlockInfo, pStatis, &pRuntimeEnv->sasArray[k], k, pQInfo->vgId); + setExecParams(pQuery, &pCtx[k], dataBlock, tsCols, pDataBlockInfo, pStatis, &pQuery->pExpr1[k]); pCtx[k].size = 1; } @@ -1669,8 +1648,9 @@ static void rowwiseApplyFunctions(SQueryRuntimeEnv *pRuntimeEnv, SDataStatis *pS pWindowResInfo->curIndex = index; } else { // other queries // decide which group this rows belongs to according to current state value + char* val = NULL; if (groupbyColumnValue) { - char *val = groupbyColumnData + bytes * offset; + val = groupbyColumnData + bytes * offset; if (isNull(val, type)) { // ignore the null value continue; } @@ -1681,6 +1661,34 @@ static void rowwiseApplyFunctions(SQueryRuntimeEnv *pRuntimeEnv, SDataStatis *pS } } + if (pRuntimeEnv->stabledev) { + for (int32_t k = 0; k < pQuery->numOfOutput; ++k) { + int32_t functionId = pQuery->pExpr1[k].base.functionId; + if (functionId != TSDB_FUNC_STDDEV_DST) { + continue; + } + + pRuntimeEnv->pCtx[k].param[0].arr = NULL; + pRuntimeEnv->pCtx[k].param[0].nType = TSDB_DATA_TYPE_INT; // avoid freeing the memory by setting the type to be int + + // todo opt perf + int32_t numOfGroup = (int32_t)taosArrayGetSize(pRuntimeEnv->prevResult); + for (int32_t i = 0; i < numOfGroup; ++i) { + SInterResult *p = taosArrayGet(pRuntimeEnv->prevResult, i); + if (memcmp(p->tags, val, bytes) == 0) { + int32_t numOfCols = (int32_t)taosArrayGetSize(p->pResult); + for (int32_t f = 0; f < numOfCols; ++f) { + SStddevInterResult *pres = taosArrayGet(p->pResult, f); + if (pres->colId == pQuery->pExpr1[k].base.colInfo.colId) { + pRuntimeEnv->pCtx[k].param[0].arr = pres->pResult; + break; + } + } + } + } + } + } + for (int32_t k = 0; k < pQuery->numOfOutput; ++k) { int32_t functionId = pQuery->pExpr1[k].base.functionId; if (functionNeedToExecute(pRuntimeEnv, &pCtx[k], functionId)) { @@ -1723,7 +1731,7 @@ static int32_t tableApplyFunctionsOnBlock(SQueryRuntimeEnv *pRuntimeEnv, SDataBl SQuery *pQuery = pRuntimeEnv->pQuery; STableQueryInfo* pTableQueryInfo = pQuery->current; - SResultRowInfo* pResultRowInfo = &pRuntimeEnv->windowResInfo; + SResultRowInfo* pResultRowInfo = &pRuntimeEnv->resultRowInfo; if (pQuery->numOfFilterCols > 0 || pRuntimeEnv->pTsBuf != NULL || pRuntimeEnv->groupbyColumn) { rowwiseApplyFunctions(pRuntimeEnv, pStatis, pDataBlockInfo, pResultRowInfo, pDataBlock); @@ -1767,14 +1775,13 @@ static int32_t tableApplyFunctionsOnBlock(SQueryRuntimeEnv *pRuntimeEnv, SDataBl } void setExecParams(SQuery *pQuery, SQLFunctionCtx *pCtx, void* inputData, TSKEY *tsCol, SDataBlockInfo* pBlockInfo, - SDataStatis *pStatis, void *param, int32_t colIndex, int32_t vgId) { + SDataStatis *pStatis, SExprInfo* pExprInfo) { - int32_t functionId = pQuery->pExpr1[colIndex].base.functionId; - int32_t colId = pQuery->pExpr1[colIndex].base.colInfo.colId; + int32_t functionId = pExprInfo->base.functionId; + int32_t colId = pExprInfo->base.colInfo.colId; SDataStatis *tpField = NULL; - pCtx->hasNull = hasNullValue(&pQuery->pExpr1[colIndex].base.colInfo, pStatis, &tpField); - pCtx->pInput = inputData; + pCtx->hasNull = hasNullValue(&pExprInfo->base.colInfo, pStatis, &tpField); if (tpField != NULL) { pCtx->preAggVals.isSet = true; @@ -1789,73 +1796,24 @@ void setExecParams(SQuery *pQuery, SQLFunctionCtx *pCtx, void* inputData, TSKEY // limit/offset query will affect this value pCtx->size = QUERY_IS_ASC_QUERY(pQuery) ? pBlockInfo->rows - pQuery->pos : pQuery->pos + 1; - // minimum value no matter ascending/descending order query - pCtx->startOffset = QUERY_IS_ASC_QUERY(pQuery) ? pQuery->pos: (pQuery->pos - pCtx->size + 1); - assert(pCtx->startOffset >= 0); + // set the start position in current block + int32_t offset = QUERY_IS_ASC_QUERY(pQuery) ? pQuery->pos: (pQuery->pos - pCtx->size + 1); + if (inputData != NULL) { + pCtx->pInput = (char*)inputData + offset * pCtx->inputBytes; + } uint32_t status = aAggs[functionId].status; if (((status & (TSDB_FUNCSTATE_SELECTIVITY | TSDB_FUNCSTATE_NEED_TS)) != 0) && (tsCol != NULL)) { - pCtx->ptsList = tsCol; + pCtx->ptsList = tsCol + offset; } - if (functionId >= TSDB_FUNC_FIRST_DST && functionId <= TSDB_FUNC_LAST_DST) { - // last_dist or first_dist function - // store the first&last timestamp into the intermediate buffer [1], the true - // value may be null but timestamp will never be null - } else if (functionId == TSDB_FUNC_TOP || functionId == TSDB_FUNC_BOTTOM || functionId == TSDB_FUNC_TWA || - functionId == TSDB_FUNC_DIFF || (functionId >= TSDB_FUNC_RATE && functionId <= TSDB_FUNC_AVG_IRATE)) { - /* - * least squares function needs two columns of input, currently, the x value of linear equation is set to - * timestamp column, and the y-value is the column specified in pQuery->pExpr1[i].colIdxInBuffer - * - * top/bottom function needs timestamp to indicate when the - * top/bottom values emerge, so does diff function - */ - if (functionId == TSDB_FUNC_TWA) { - pCtx->param[1].i64 = pQuery->window.skey; - pCtx->param[1].nType = TSDB_DATA_TYPE_BIGINT; - pCtx->param[2].i64 = pQuery->window.ekey; - pCtx->param[2].nType = TSDB_DATA_TYPE_BIGINT; - } - - } else if (functionId == TSDB_FUNC_ARITHM) { - pCtx->param[1].pz = param; - } else if (functionId == TSDB_FUNC_SPREAD) { // set the statistics data for primary time stamp column + if (functionId == TSDB_FUNC_SPREAD) { // set the statistics data for primary time stamp column if (colId == PRIMARYKEY_TIMESTAMP_COL_INDEX) { pCtx->preAggVals.isSet = true; pCtx->preAggVals.statis.min = pBlockInfo->window.skey; pCtx->preAggVals.statis.max = pBlockInfo->window.ekey; } - } else if (functionId == TSDB_FUNC_INTERP) { - pCtx->param[2].i64 = (int8_t) pQuery->fillType; - if (pQuery->fillVal != NULL) { - if (isNull((const char*) &pQuery->fillVal[colIndex], pCtx->inputType)) { - pCtx->param[1].nType = TSDB_DATA_TYPE_NULL; - } else { // todo refactor, tVariantCreateFromBinary should handle the NULL value - if (pCtx->inputType != TSDB_DATA_TYPE_BINARY && pCtx->inputType != TSDB_DATA_TYPE_NCHAR) { - tVariantCreateFromBinary(&pCtx->param[1], (char*) &pQuery->fillVal[colIndex], pCtx->inputBytes, pCtx->inputType); - } - } - } - } else if (functionId == TSDB_FUNC_TS_COMP) { - pCtx->param[0].i64 = vgId; - pCtx->param[0].nType = TSDB_DATA_TYPE_BIGINT; } - -#if defined(_DEBUG_VIEW) - // int64_t *tsList = (int64_t *)primaryColumnData; -// int64_t s = tsList[0]; -// int64_t e = tsList[size - 1]; - -// if (IS_DATA_BLOCK_LOADED(blockStatus)) { -// qDebug("QInfo:%p query ts:%lld-%lld, offset:%d, rows:%d, bstatus:%d, -// functId:%d", GET_QINFO_ADDR(pQuery), -// s, e, startOffset, size, blockStatus, functionId); -// } else { -// qDebug("QInfo:%p block not loaded, bstatus:%d", -// GET_QINFO_ADDR(pQuery), blockStatus); -// } -#endif } // set the output buffer for the selectivity + tag query @@ -1900,7 +1858,7 @@ static int32_t setCtxTagColumnInfo(SQueryRuntimeEnv *pRuntimeEnv, SQLFunctionCtx return TSDB_CODE_SUCCESS; } -static int32_t setupQueryRuntimeEnv(SQueryRuntimeEnv *pRuntimeEnv, int32_t numOfTables, int16_t order) { +static int32_t setupQueryRuntimeEnv(SQueryRuntimeEnv *pRuntimeEnv, int32_t numOfTables, int16_t order, int32_t vgId) { qDebug("QInfo:%p setup runtime env", GET_QINFO_ADDR(pRuntimeEnv)); SQuery *pQuery = pRuntimeEnv->pQuery; @@ -1908,34 +1866,33 @@ static int32_t setupQueryRuntimeEnv(SQueryRuntimeEnv *pRuntimeEnv, int32_t numOf pRuntimeEnv->summary.tableInfoSize += (numOfTables * sizeof(STableQueryInfo)); pRuntimeEnv->pResultRowHashTable = taosHashInit(numOfTables, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK); - pRuntimeEnv->keyBuf = malloc(pQuery->maxSrcColumnSize + sizeof(int64_t)); - pRuntimeEnv->pool = initResultRowPool(getResultRowSize(pRuntimeEnv)); + pRuntimeEnv->keyBuf = malloc(pQuery->maxSrcColumnSize + sizeof(int64_t)); + pRuntimeEnv->pool = initResultRowPool(getResultRowSize(pRuntimeEnv)); pRuntimeEnv->prevRow = malloc(POINTER_BYTES * pQuery->numOfCols + pQuery->srcRowSize); - pRuntimeEnv->tagVal = malloc(pQuery->tagLen); + pRuntimeEnv->tagVal = malloc(pQuery->tagLen); + pRuntimeEnv->pCtx = (SQLFunctionCtx *)calloc(pQuery->numOfOutput, sizeof(SQLFunctionCtx)); + pRuntimeEnv->offset = calloc(pQuery->numOfOutput, sizeof(int16_t)); + pRuntimeEnv->rowCellInfoOffset = calloc(pQuery->numOfOutput, sizeof(int32_t)); + pRuntimeEnv->sasArray = calloc(pQuery->numOfOutput, sizeof(SArithmeticSupport)); + + if (pRuntimeEnv->offset == NULL || pRuntimeEnv->pCtx == NULL || pRuntimeEnv->rowCellInfoOffset == NULL || + pRuntimeEnv->sasArray == NULL || pRuntimeEnv->pResultRowHashTable == NULL || pRuntimeEnv->keyBuf == NULL || + pRuntimeEnv->prevRow == NULL || pRuntimeEnv->tagVal == NULL) { + goto _clean; + } char* start = POINTER_BYTES * pQuery->numOfCols + (char*) pRuntimeEnv->prevRow; pRuntimeEnv->prevRow[0] = start; - for(int32_t i = 1; i < pQuery->numOfCols; ++i) { pRuntimeEnv->prevRow[i] = pRuntimeEnv->prevRow[i - 1] + pQuery->colList[i-1].bytes; } - pRuntimeEnv->pCtx = (SQLFunctionCtx *)calloc(pQuery->numOfOutput, sizeof(SQLFunctionCtx)); - pRuntimeEnv->offset = calloc(pQuery->numOfOutput, sizeof(int16_t)); - pRuntimeEnv->rowCellInfoOffset = calloc(pQuery->numOfOutput, sizeof(int32_t)); - pRuntimeEnv->sasArray = calloc(pQuery->numOfOutput, sizeof(SArithmeticSupport)); - - // TODO check malloc failure - if (pRuntimeEnv->offset == NULL || pRuntimeEnv->pCtx == NULL || pRuntimeEnv->rowCellInfoOffset == NULL || pRuntimeEnv->sasArray == NULL) { - goto _clean; - } - pRuntimeEnv->offset[0] = 0; for (int32_t i = 0; i < pQuery->numOfOutput; ++i) { SSqlFuncMsg *pSqlFuncMsg = &pQuery->pExpr1[i].base; SQLFunctionCtx *pCtx = &pRuntimeEnv->pCtx[i]; - SColIndex* pIndex = &pSqlFuncMsg->colInfo; + SColIndex * pIndex = &pSqlFuncMsg->colInfo; if (TSDB_COL_REQ_NULL(pIndex->flag)) { pCtx->requireNull = true; @@ -1947,7 +1904,7 @@ static int32_t setupQueryRuntimeEnv(SQueryRuntimeEnv *pRuntimeEnv, int32_t numOf int32_t index = pSqlFuncMsg->colInfo.colIndex; if (TSDB_COL_IS_TAG(pIndex->flag)) { if (pIndex->colId == TSDB_TBNAME_COLUMN_INDEX) { // todo refactor - SSchema* s = tGetTbnameColumnSchema(); + SSchema *s = tGetTbnameColumnSchema(); pCtx->inputBytes = s->bytes; pCtx->inputType = s->type; @@ -2008,18 +1965,38 @@ static int32_t setupQueryRuntimeEnv(SQueryRuntimeEnv *pRuntimeEnv, int32_t numOf pCtx->param[3].nType = TSDB_DATA_TYPE_BIGINT; pCtx->param[1].i64 = pQuery->order.orderColId; - } - - if (functionId == TSDB_FUNC_ARITHM) { + } else if (functionId == TSDB_FUNC_INTERP) { + pCtx->param[2].i64 = (int8_t)pQuery->fillType; + if (pQuery->fillVal != NULL) { + if (isNull((const char *)&pQuery->fillVal[i], pCtx->inputType)) { + pCtx->param[1].nType = TSDB_DATA_TYPE_NULL; + } else { // todo refactor, tVariantCreateFromBinary should handle the NULL value + if (pCtx->inputType != TSDB_DATA_TYPE_BINARY && pCtx->inputType != TSDB_DATA_TYPE_NCHAR) { + tVariantCreateFromBinary(&pCtx->param[1], (char *)&pQuery->fillVal[i], pCtx->inputBytes, pCtx->inputType); + } + } + } + } else if (functionId == TSDB_FUNC_TS_COMP) { + pCtx->param[0].i64 = vgId; + pCtx->param[0].nType = TSDB_DATA_TYPE_BIGINT; + } else if (functionId == TSDB_FUNC_TWA) { + pCtx->param[1].i64 = pQuery->window.skey; + pCtx->param[1].nType = TSDB_DATA_TYPE_BIGINT; + pCtx->param[2].i64 = pQuery->window.ekey; + pCtx->param[2].nType = TSDB_DATA_TYPE_BIGINT; + } else if (functionId == TSDB_FUNC_ARITHM) { pRuntimeEnv->sasArray[i].data = calloc(pQuery->numOfCols, POINTER_BYTES); if (pRuntimeEnv->sasArray[i].data == NULL) { goto _clean; } + + pCtx->param[1].pz = (char*) &pRuntimeEnv->sasArray[i]; } if (i > 0) { pRuntimeEnv->offset[i] = pRuntimeEnv->offset[i - 1] + pRuntimeEnv->pCtx[i - 1].outputBytes; - pRuntimeEnv->rowCellInfoOffset[i] = pRuntimeEnv->rowCellInfoOffset[i - 1] + sizeof(SResultRowCellInfo) + pQuery->pExpr1[i - 1].interBytes; + pRuntimeEnv->rowCellInfoOffset[i] = + pRuntimeEnv->rowCellInfoOffset[i - 1] + sizeof(SResultRowCellInfo) + pQuery->pExpr1[i - 1].interBytes; } } @@ -2043,6 +2020,10 @@ _clean: tfree(pRuntimeEnv->offset); tfree(pRuntimeEnv->rowCellInfoOffset); tfree(pRuntimeEnv->sasArray); + tfree(pRuntimeEnv->pResultRowHashTable); + tfree(pRuntimeEnv->keyBuf); + tfree(pRuntimeEnv->prevRow); + tfree(pRuntimeEnv->tagVal); return TSDB_CODE_QRY_OUT_OF_MEMORY; } @@ -2070,7 +2051,7 @@ static void teardownQueryRuntimeEnv(SQueryRuntimeEnv *pRuntimeEnv) { SQInfo* pQInfo = (SQInfo*) GET_QINFO_ADDR(pRuntimeEnv); qDebug("QInfo:%p teardown runtime env", pQInfo); - cleanupResultRowInfo(&pRuntimeEnv->windowResInfo); + cleanupResultRowInfo(&pRuntimeEnv->resultRowInfo); if (isTsCompQuery(pQuery)) { FILE *f = *(FILE **)pQuery->sdata[0]->data; @@ -2118,7 +2099,6 @@ static void teardownQueryRuntimeEnv(SQueryRuntimeEnv *pRuntimeEnv) { tfree(pRuntimeEnv->prevRow); tfree(pRuntimeEnv->tagVal); - taosHashCleanup(pRuntimeEnv->pResultRowHashTable); pRuntimeEnv->pResultRowHashTable = NULL; @@ -2131,9 +2111,7 @@ static bool needBuildResAfterQueryComplete(SQInfo* pQInfo) { return pQInfo->rspContext != NULL; } -#define IS_QUERY_KILLED(_q) ((_q)->code == TSDB_CODE_TSC_QUERY_CANCELLED) - -static bool isQueryKilled(SQInfo *pQInfo) { +bool isQueryKilled(SQInfo *pQInfo) { if (IS_QUERY_KILLED(pQInfo)) { return true; } @@ -2152,7 +2130,7 @@ static bool isQueryKilled(SQInfo *pQInfo) { return false; } -static void setQueryKilled(SQInfo *pQInfo) { pQInfo->code = TSDB_CODE_TSC_QUERY_CANCELLED;} +void setQueryKilled(SQInfo *pQInfo) { pQInfo->code = TSDB_CODE_TSC_QUERY_CANCELLED;} static bool isFixedOutputQuery(SQueryRuntimeEnv* pRuntimeEnv) { SQuery* pQuery = pRuntimeEnv->pQuery; @@ -2253,7 +2231,7 @@ static bool needReverseScan(SQuery *pQuery) { * The following 4 kinds of query are treated as the tags query * tagprj, tid_tag query, count(tbname), 'abc' (user defined constant value column) query */ -static bool onlyQueryTags(SQuery* pQuery) { +bool onlyQueryTags(SQuery* pQuery) { for(int32_t i = 0; i < pQuery->numOfOutput; ++i) { SExprInfo* pExprInfo = &pQuery->pExpr1[i]; @@ -2351,7 +2329,6 @@ static bool onlyFirstQuery(SQuery *pQuery) { return onlyOneQueryType(pQuery, TSD static bool onlyLastQuery(SQuery *pQuery) { return onlyOneQueryType(pQuery, TSDB_FUNC_LAST, TSDB_FUNC_LAST_DST); } -// todo refactor, add iterator static void doExchangeTimeWindow(SQInfo* pQInfo, STimeWindow* win) { size_t t = taosArrayGetSize(pQInfo->tableGroupInfo.pGroupList); for(int32_t i = 0; i < t; ++i) { @@ -2769,56 +2746,42 @@ int32_t binarySearchForKey(char *pValue, int num, TSKEY key, int order) { return midPos; } -static void ensureOutputBufferSimple(SQueryRuntimeEnv* pRuntimeEnv, int32_t capacity) { +static void expandBuffer(SQueryRuntimeEnv* pRuntimeEnv, int32_t newSize, void* qinfo) { SQuery* pQuery = pRuntimeEnv->pQuery; + SResultRec *pRec = &pQuery->rec; - if (capacity < pQuery->rec.capacity) { - return; - } + assert(newSize > 0); for (int32_t i = 0; i < pQuery->numOfOutput; ++i) { int32_t bytes = pQuery->pExpr1[i].bytes; - assert(bytes > 0 && capacity > 0); - char *tmp = realloc(pQuery->sdata[i], bytes * capacity + sizeof(tFilePage)); + char *tmp = realloc(pQuery->sdata[i], bytes * newSize + sizeof(tFilePage)); if (tmp == NULL) { longjmp(pRuntimeEnv->env, TSDB_CODE_QRY_OUT_OF_MEMORY); } else { + memset(tmp + sizeof(tFilePage) + bytes * pRec->rows, 0, (size_t)((newSize - pRec->rows) * bytes)); pQuery->sdata[i] = (tFilePage *)tmp; } - - // set the pCtx output buffer position - pRuntimeEnv->pCtx[i].pOutput = pQuery->sdata[i]->data; } - qDebug("QInfo:%p realloc output buffer to inc output buffer from: %" PRId64 " rows to:%d rows", GET_QINFO_ADDR(pRuntimeEnv), - pQuery->rec.capacity, capacity); - - pQuery->rec.capacity = capacity; + pRec->capacity = newSize; + qDebug("QInfo:%p realloc output buffer, new size: %d rows, old:%" PRId64 ", remain:%" PRId64, qinfo, newSize, + pRec->capacity, newSize - pRec->rows); } -// TODO merge with enuserOutputBufferSimple -static void ensureOutputBuffer(SQueryRuntimeEnv* pRuntimeEnv, SDataBlockInfo* pBlockInfo) { +static void ensureOutputBuffer(SQueryRuntimeEnv* pRuntimeEnv, int32_t numOfRows) { // in case of prj/diff query, ensure the output buffer is sufficient to accommodate the results of current block SQuery* pQuery = pRuntimeEnv->pQuery; if (!QUERY_IS_INTERVAL_QUERY(pQuery) && !pRuntimeEnv->groupbyColumn && !isFixedOutputQuery(pRuntimeEnv) && !isTsCompQuery(pQuery)) { SResultRec *pRec = &pQuery->rec; - if (pQuery->rec.capacity - pQuery->rec.rows < pBlockInfo->rows) { - int32_t remain = (int32_t)(pRec->capacity - pRec->rows); - int32_t newSize = (int32_t)(pRec->capacity + (pBlockInfo->rows - remain)); + int32_t remain = (int32_t)(pRec->capacity - pRec->rows); + if (remain < numOfRows) { + int32_t newSize = (int32_t)(pRec->capacity + (numOfRows - remain)); + expandBuffer(pRuntimeEnv, newSize, GET_QINFO_ADDR(pRuntimeEnv)); for (int32_t i = 0; i < pQuery->numOfOutput; ++i) { int32_t bytes = pQuery->pExpr1[i].bytes; - assert(bytes > 0 && newSize > 0); - - char *tmp = realloc(pQuery->sdata[i], bytes * newSize + sizeof(tFilePage)); - if (tmp == NULL) { - longjmp(pRuntimeEnv->env, TSDB_CODE_QRY_OUT_OF_MEMORY); - } else { - memset(tmp + sizeof(tFilePage) + bytes * pRec->rows, 0, (size_t)((newSize - pRec->rows) * bytes)); - pQuery->sdata[i] = (tFilePage *)tmp; - } // set the pCtx output buffer position pRuntimeEnv->pCtx[i].pOutput = pQuery->sdata[i]->data + pRec->rows * bytes; @@ -2828,11 +2791,6 @@ static void ensureOutputBuffer(SQueryRuntimeEnv* pRuntimeEnv, SDataBlockInfo* pB pRuntimeEnv->pCtx[i].ptsOutputBuf = pRuntimeEnv->pCtx[0].pOutput; } } - - qDebug("QInfo:%p realloc output buffer, new size: %d rows, old:%" PRId64 ", remain:%" PRId64, GET_QINFO_ADDR(pRuntimeEnv), - newSize, pRec->capacity, newSize - pRec->rows); - - pRec->capacity = newSize; } } } @@ -2840,9 +2798,9 @@ static void ensureOutputBuffer(SQueryRuntimeEnv* pRuntimeEnv, SDataBlockInfo* pB static void doSetInitialTimewindow(SQueryRuntimeEnv* pRuntimeEnv, SDataBlockInfo* pBlockInfo) { SQuery* pQuery = pRuntimeEnv->pQuery; - if (QUERY_IS_INTERVAL_QUERY(pQuery) && pRuntimeEnv->windowResInfo.prevSKey == TSKEY_INITIAL_VAL) { + if (QUERY_IS_INTERVAL_QUERY(pQuery) && pRuntimeEnv->resultRowInfo.prevSKey == TSKEY_INITIAL_VAL) { STimeWindow w = TSWINDOW_INITIALIZER; - SResultRowInfo *pWindowResInfo = &pRuntimeEnv->windowResInfo; + SResultRowInfo *pWindowResInfo = &pRuntimeEnv->resultRowInfo; if (QUERY_IS_ASC_QUERY(pQuery)) { getAlignQueryTimeWindow(pQuery, pBlockInfo->window.skey, pBlockInfo->window.skey, pQuery->window.ekey, &w); @@ -2882,13 +2840,13 @@ static int64_t doScanAllDataBlocks(SQueryRuntimeEnv *pRuntimeEnv) { doSetInitialTimewindow(pRuntimeEnv, &blockInfo); // in case of prj/diff query, ensure the output buffer is sufficient to accommodate the results of current block - ensureOutputBuffer(pRuntimeEnv, &blockInfo); + ensureOutputBuffer(pRuntimeEnv, blockInfo.rows); SDataStatis *pStatis = NULL; SArray * pDataBlock = NULL; uint32_t status = 0; - int32_t ret = loadDataBlockOnDemand(pRuntimeEnv, &pRuntimeEnv->windowResInfo, pQueryHandle, &blockInfo, &pStatis, &pDataBlock, &status); + int32_t ret = loadDataBlockOnDemand(pRuntimeEnv, &pRuntimeEnv->resultRowInfo, pQueryHandle, &blockInfo, &pStatis, &pDataBlock, &status); if (ret != TSDB_CODE_SUCCESS) { break; } @@ -2923,8 +2881,8 @@ static int64_t doScanAllDataBlocks(SQueryRuntimeEnv *pRuntimeEnv) { } if (QUERY_IS_INTERVAL_QUERY(pQuery)) { - closeAllResultRows(&pRuntimeEnv->windowResInfo); - pRuntimeEnv->windowResInfo.curIndex = pRuntimeEnv->windowResInfo.size - 1; // point to the last time window + closeAllResultRows(&pRuntimeEnv->resultRowInfo); + pRuntimeEnv->resultRowInfo.curIndex = pRuntimeEnv->resultRowInfo.size - 1; // point to the last time window } return 0; @@ -3142,253 +3100,35 @@ void UNUSED_FUNC displayInterResult(tFilePage **pdata, SQueryRuntimeEnv* pRuntim } } -typedef struct SCompSupporter { - STableQueryInfo **pTableQueryInfo; - int32_t *rowIndex; - int32_t order; -} SCompSupporter; - -int32_t tableResultComparFn(const void *pLeft, const void *pRight, void *param) { - int32_t left = *(int32_t *)pLeft; - int32_t right = *(int32_t *)pRight; - - SCompSupporter * supporter = (SCompSupporter *)param; - - int32_t leftPos = supporter->rowIndex[left]; - int32_t rightPos = supporter->rowIndex[right]; - - /* left source is exhausted */ - if (leftPos == -1) { - return 1; - } - - /* right source is exhausted*/ - if (rightPos == -1) { - return -1; - } - - STableQueryInfo** pList = supporter->pTableQueryInfo; - - SResultRowInfo *pWindowResInfo1 = &(pList[left]->windowResInfo); - SResultRow * pWindowRes1 = getResultRow(pWindowResInfo1, leftPos); - TSKEY leftTimestamp = pWindowRes1->win.skey; - - SResultRowInfo *pWindowResInfo2 = &(pList[right]->windowResInfo); - SResultRow * pWindowRes2 = getResultRow(pWindowResInfo2, rightPos); - TSKEY rightTimestamp = pWindowRes2->win.skey; - - if (leftTimestamp == rightTimestamp) { - return 0; - } - - if (supporter->order == TSDB_ORDER_ASC) { - return (leftTimestamp > rightTimestamp)? 1:-1; - } else { - return (leftTimestamp < rightTimestamp)? 1:-1; - } -} - -int32_t mergeGroupResult(SQInfo *pQInfo) { - int64_t st = taosGetTimestampUs(); - - SGroupResInfo* pGroupResInfo = &pQInfo->groupResInfo; - - int32_t numOfGroups = (int32_t)(GET_NUM_OF_TABLEGROUP(pQInfo)); - while (pQInfo->groupIndex < numOfGroups) { - SArray *group = GET_TABLEGROUP(pQInfo, pQInfo->groupIndex); - - int32_t ret = mergeIntoGroupResultImpl(pGroupResInfo, group, pQInfo); - if (ret != TSDB_CODE_SUCCESS) { - return ret; - } - - // this group generates at least one result, return results - pQInfo->groupIndex += 1; - if (taosArrayGetSize(pGroupResInfo->pRows) > 0) { - break; - } - - qDebug("QInfo:%p no result in group %d, continue", pQInfo, pQInfo->groupIndex - 1); - taosArrayClear(pGroupResInfo->pRows); - - pGroupResInfo->index = 0; - pGroupResInfo->rowId = 0; - } - - if (pQInfo->groupIndex == numOfGroups && taosArrayGetSize(pGroupResInfo->pRows) == 0) { - SET_STABLE_QUERY_OVER(pQInfo); - } - - int64_t elapsedTime = taosGetTimestampUs() - st; - qDebug("QInfo:%p merge res data into group, index:%d, total group:%d, elapsed time:%" PRId64 "us", pQInfo, - pQInfo->groupIndex - 1, numOfGroups, elapsedTime); - - pQInfo->runtimeEnv.summary.firstStageMergeTime += elapsedTime; - return TSDB_CODE_SUCCESS; -} - -static int32_t doCopyToSData(SQInfo *pQInfo, SResultRow **pRows, int32_t numOfRows, int32_t* index, int32_t orderType); +static int32_t doCopyToSData(SQueryRuntimeEnv* pRuntimeEnv, SGroupResInfo* pGroupResInfo, int32_t orderType); void copyResToQueryResultBuf(SQInfo *pQInfo, SQuery *pQuery) { SGroupResInfo* pGroupResInfo = &pQInfo->groupResInfo; - // all results in current group have been returned to client, try next group - if (pGroupResInfo->index >= taosArrayGetSize(pGroupResInfo->pRows)) { - // current results of group has been sent to client, try next group - pGroupResInfo->index = 0; - pGroupResInfo->rowId = 0; - taosArrayClear(pGroupResInfo->pRows); - - if (mergeGroupResult(pQInfo) != TSDB_CODE_SUCCESS) { - return; // failed to save data in the disk - } - - // check if all results has been sent to client - int32_t numOfGroup = (int32_t)(GET_NUM_OF_TABLEGROUP(pQInfo)); - if (taosArrayGetSize(pGroupResInfo->pRows) == 0 && pQInfo->groupIndex == numOfGroup) { - SET_STABLE_QUERY_OVER(pQInfo); - return; - } - } - - int32_t size = (int32_t) taosArrayGetSize(pGroupResInfo->pRows); - pQuery->rec.rows = doCopyToSData(pQInfo, pGroupResInfo->pRows->pData, size, &pGroupResInfo->index, TSDB_ORDER_ASC); -} - -int64_t getNumOfResultWindowRes(SQueryRuntimeEnv* pRuntimeEnv, SResultRow *pResultRow) { - SQuery* pQuery = pRuntimeEnv->pQuery; - - for (int32_t j = 0; j < pQuery->numOfOutput; ++j) { - int32_t functionId = pQuery->pExpr1[j].base.functionId; - - /* - * ts, tag, tagprj function can not decide the output number of current query - * the number of output result is decided by main output - */ - if (functionId == TSDB_FUNC_TS || functionId == TSDB_FUNC_TAG || functionId == TSDB_FUNC_TAGPRJ) { - continue; - } - - SResultRowCellInfo *pResultInfo = getResultCell(pRuntimeEnv, pResultRow, j); - assert(pResultInfo != NULL); - - if (pResultInfo->numOfRes > 0) { - return pResultInfo->numOfRes; - } - } - - return 0; -} - -int32_t mergeIntoGroupResultImpl(SGroupResInfo* pGroupResInfo, SArray *pTableList, SQInfo* pQInfo) { - SQueryRuntimeEnv *pRuntimeEnv = &pQInfo->runtimeEnv; - bool ascQuery = QUERY_IS_ASC_QUERY(pRuntimeEnv->pQuery); - - int32_t code = TSDB_CODE_SUCCESS; - - int32_t *posList = NULL; - SLoserTreeInfo *pTree = NULL; - STableQueryInfo **pTableQueryInfoList = NULL; - - size_t size = taosArrayGetSize(pTableList); - if (pGroupResInfo->pRows == NULL) { - pGroupResInfo->pRows = taosArrayInit(100, POINTER_BYTES); - } - - posList = calloc(size, sizeof(int32_t)); - pTableQueryInfoList = malloc(POINTER_BYTES * size); - - if (pTableQueryInfoList == NULL || posList == NULL || pGroupResInfo->pRows == NULL) { - qError("QInfo:%p failed alloc memory", pQInfo); - code = TSDB_CODE_QRY_OUT_OF_MEMORY; - goto _end; - } - - int32_t numOfTables = 0; - for (int32_t i = 0; i < size; ++i) { - STableQueryInfo *item = taosArrayGetP(pTableList, i); - if (item->windowResInfo.size > 0) { - pTableQueryInfoList[numOfTables++] = item; - } - } - - // there is no data in current group - // no need to merge results since only one table in each group - if (numOfTables == 0) { - goto _end; - } - - SCompSupporter cs = {pTableQueryInfoList, posList, pRuntimeEnv->pQuery->order.order}; - - int32_t ret = tLoserTreeCreate(&pTree, numOfTables, &cs, tableResultComparFn); - if (ret != TSDB_CODE_SUCCESS) { - code = TSDB_CODE_QRY_OUT_OF_MEMORY; - goto _end; - } - - int64_t lastTimestamp = ascQuery? INT64_MIN:INT64_MAX; - int64_t startt = taosGetTimestampMs(); - - while (1) { - if (isQueryKilled(pQInfo)) { - qDebug("QInfo:%p it is already killed, abort", pQInfo); - code = TSDB_CODE_TSC_QUERY_CANCELLED; - goto _end; - } - - int32_t tableIndex = pTree->pNode[0].index; - - SResultRowInfo *pWindowResInfo = &pTableQueryInfoList[tableIndex]->windowResInfo; - SResultRow *pWindowRes = getResultRow(pWindowResInfo, cs.rowIndex[tableIndex]); - - int64_t num = getNumOfResultWindowRes(pRuntimeEnv, pWindowRes); - if (num <= 0) { - cs.rowIndex[tableIndex] += 1; - - if (cs.rowIndex[tableIndex] >= pWindowResInfo->size) { - cs.rowIndex[tableIndex] = -1; - if (--numOfTables == 0) { // all input sources are exhausted - break; - } - } - } else { - assert((pWindowRes->win.skey >= lastTimestamp && ascQuery) || (pWindowRes->win.skey <= lastTimestamp && !ascQuery)); - - if (pWindowRes->win.skey != lastTimestamp) { - taosArrayPush(pGroupResInfo->pRows, &pWindowRes); - pWindowRes->numOfRows = (uint32_t) num; + while(pGroupResInfo->currentGroup < pGroupResInfo->totalGroup) { + // all results in current group have been returned to client, try next group + if ((pGroupResInfo->pRows == NULL) || taosArrayGetSize(pGroupResInfo->pRows) == 0) { + assert(pGroupResInfo->index == 0); + if ((pQInfo->code = mergeIntoGroupResult(&pQInfo->groupResInfo, pQInfo)) != TSDB_CODE_SUCCESS) { + return; } + } - lastTimestamp = pWindowRes->win.skey; - - // move to the next row of current entry - if ((++cs.rowIndex[tableIndex]) >= pWindowResInfo->size) { - cs.rowIndex[tableIndex] = -1; + pQuery->rec.rows = doCopyToSData(&pQInfo->runtimeEnv, pGroupResInfo, TSDB_ORDER_ASC); - // all input sources are exhausted - if ((--numOfTables) == 0) { - break; - } + // current data are all dumped to result buffer, clear it + if (!hasRemainData(pGroupResInfo)) { + cleanupGroupResInfo(pGroupResInfo); + if (!incNextGroup(pGroupResInfo)) { + SET_STABLE_QUERY_OVER(pQInfo); } } - tLoserTreeAdjust(pTree, tableIndex + pTree->numOfEntries); + // enough results in data buffer, return + if (pQuery->rec.rows >= pQuery->rec.threshold) { + break; + } } - - int64_t endt = taosGetTimestampMs(); - -#ifdef _DEBUG_VIEW - displayInterResult(pQuery->sdata, pRuntimeEnv, pQuery->sdata[0]->num); -#endif - - qDebug("QInfo:%p result merge completed for group:%d, elapsed time:%" PRId64 " ms", pQInfo, pQInfo->groupIndex, endt - startt); - - _end: - tfree(pTableQueryInfoList); - tfree(posList); - tfree(pTree); - - return code; } static void updateTableQueryInfoForReverseScan(SQuery *pQuery, STableQueryInfo *pTableQueryInfo) { @@ -3403,7 +3143,7 @@ static void updateTableQueryInfoForReverseScan(SQuery *pQuery, STableQueryInfo * pTableQueryInfo->cur.vgroupIndex = -1; // set the index at the end of time window - pTableQueryInfo->windowResInfo.curIndex = pTableQueryInfo->windowResInfo.size - 1; + pTableQueryInfo->resInfo.curIndex = pTableQueryInfo->resInfo.size - 1; } static void disableFuncInReverseScanImpl(SQueryRuntimeEnv* pRuntimeEnv, SResultRowInfo *pWindowResInfo, int32_t order) { @@ -3438,7 +3178,7 @@ void disableFuncInReverseScan(SQInfo *pQInfo) { int32_t order = pQuery->order.order; // group by normal columns and interval query on normal table - SResultRowInfo *pWindowResInfo = &pRuntimeEnv->windowResInfo; + SResultRowInfo *pWindowResInfo = &pRuntimeEnv->resultRowInfo; if (pRuntimeEnv->groupbyColumn || QUERY_IS_INTERVAL_QUERY(pQuery)) { disableFuncInReverseScanImpl(pRuntimeEnv, pWindowResInfo, order); } else { // for simple result of table query, @@ -3502,7 +3242,7 @@ void resetDefaultResInfoOutputBuf(SQueryRuntimeEnv *pRuntimeEnv) { int32_t tid = 0; int64_t uid = 0; - SResultRow* pRow = doPrepareResultRowFromKey(pRuntimeEnv, &pRuntimeEnv->windowResInfo, (char *)&tid, sizeof(tid), true, uid); + SResultRow* pRow = doPrepareResultRowFromKey(pRuntimeEnv, &pRuntimeEnv->resultRowInfo, (char *)&tid, sizeof(tid), true, uid); for (int32_t i = 0; i < pQuery->numOfOutput; ++i) { SQLFunctionCtx *pCtx = &pRuntimeEnv->pCtx[i]; @@ -3629,7 +3369,7 @@ bool needRepeatScan(SQueryRuntimeEnv *pRuntimeEnv) { bool toContinue = false; if (pRuntimeEnv->groupbyColumn || QUERY_IS_INTERVAL_QUERY(pQuery)) { // for each group result, call the finalize function for each column - SResultRowInfo *pWindowResInfo = &pRuntimeEnv->windowResInfo; + SResultRowInfo *pWindowResInfo = &pRuntimeEnv->resultRowInfo; for (int32_t i = 0; i < pWindowResInfo->size; ++i) { SResultRow *pResult = getResultRow(pWindowResInfo, i); @@ -3673,7 +3413,7 @@ static SQueryStatusInfo getQueryStatusInfo(SQueryRuntimeEnv *pRuntimeEnv, TSKEY SQueryStatusInfo info = { .status = pQuery->status, - .windowIndex = pRuntimeEnv->windowResInfo.curIndex, + .windowIndex = pRuntimeEnv->resultRowInfo.curIndex, .lastKey = start, }; @@ -3858,7 +3598,7 @@ void scanOneTableDataBlocks(SQueryRuntimeEnv *pRuntimeEnv, TSKEY start) { longjmp(pRuntimeEnv->env, terrno); } - pRuntimeEnv->windowResInfo.curIndex = qstatus.windowIndex; + pRuntimeEnv->resultRowInfo.curIndex = qstatus.windowIndex; setQueryStatus(pQuery, QUERY_NOT_COMPLETED); pRuntimeEnv->scanFlag = REPEAT_SCAN; @@ -3889,7 +3629,7 @@ void finalizeQueryResult(SQueryRuntimeEnv *pRuntimeEnv) { if (pRuntimeEnv->groupbyColumn || QUERY_IS_INTERVAL_QUERY(pQuery)) { // for each group result, call the finalize function for each column - SResultRowInfo *pWindowResInfo = &pRuntimeEnv->windowResInfo; + SResultRowInfo *pWindowResInfo = &pRuntimeEnv->resultRowInfo; if (pRuntimeEnv->groupbyColumn) { closeAllResultRows(pWindowResInfo); } @@ -3944,7 +3684,7 @@ static STableQueryInfo *createTableQueryInfo(SQuery* pQuery, void* pTable, bool // set more initial size of interval/groupby query if (QUERY_IS_INTERVAL_QUERY(pQuery) || groupbyColumn) { int32_t initialSize = 128; - int32_t code = initResultRowInfo(&pTableQueryInfo->windowResInfo, initialSize, TSDB_DATA_TYPE_INT); + int32_t code = initResultRowInfo(&pTableQueryInfo->resInfo, initialSize, TSDB_DATA_TYPE_INT); if (code != TSDB_CODE_SUCCESS) { return NULL; } @@ -3960,7 +3700,7 @@ void destroyTableQueryInfoImpl(STableQueryInfo *pTableQueryInfo) { } tVariantDestroy(&pTableQueryInfo->tag); - cleanupResultRowInfo(&pTableQueryInfo->windowResInfo); + cleanupResultRowInfo(&pTableQueryInfo->resInfo); } /** @@ -3971,7 +3711,7 @@ void destroyTableQueryInfoImpl(STableQueryInfo *pTableQueryInfo) { void setExecutionContext(SQInfo *pQInfo, int32_t groupIndex, TSKEY nextKey) { SQueryRuntimeEnv *pRuntimeEnv = &pQInfo->runtimeEnv; STableQueryInfo *pTableQueryInfo = pRuntimeEnv->pQuery->current; - SResultRowInfo *pWindowResInfo = &pRuntimeEnv->windowResInfo; + SResultRowInfo *pWindowResInfo = &pRuntimeEnv->resultRowInfo; // lastKey needs to be updated pTableQueryInfo->lastKey = nextKey; @@ -4045,7 +3785,7 @@ void setResultRowOutputBufInitCtx(SQueryRuntimeEnv *pRuntimeEnv, SResultRow *pRe if (functionId == TSDB_FUNC_TOP || functionId == TSDB_FUNC_BOTTOM || functionId == TSDB_FUNC_DIFF) { pCtx->ptsOutputBuf = pRuntimeEnv->pCtx[0].pOutput; } - + if (!pCtx->resultInfo->initialized) { aAggs[functionId].init(pCtx); } @@ -4099,7 +3839,7 @@ int32_t setTimestampListJoinInfo(SQInfo *pQInfo, STableQueryInfo *pTableQueryInf int32_t setParamValue(SQueryRuntimeEnv* pRuntimeEnv) { SQuery* pQuery = pRuntimeEnv->pQuery; - if (pRuntimeEnv->prevResult == NULL) { + if (pRuntimeEnv->prevResult == NULL || pRuntimeEnv->groupbyColumn) { return TSDB_CODE_SUCCESS; } @@ -4168,7 +3908,7 @@ void setIntervalQueryRange(SQInfo *pQInfo, TSKEY key) { * operations involve. */ STimeWindow w = TSWINDOW_INITIALIZER; - SResultRowInfo *pWindowResInfo = &pTableQueryInfo->windowResInfo; + SResultRowInfo *pWindowResInfo = &pTableQueryInfo->resInfo; TSKEY sk = MIN(win.skey, win.ekey); TSKEY ek = MAX(win.skey, win.ekey); @@ -4211,73 +3951,63 @@ bool needPrimaryTimestampCol(SQuery *pQuery, SDataBlockInfo *pDataBlockInfo) { return loadPrimaryTS; } -static int32_t doCopyToSData(SQInfo *pQInfo, SResultRow **pRows, int32_t numOfRows, int32_t *index, int32_t orderType) { - SQueryRuntimeEnv *pRuntimeEnv = &pQInfo->runtimeEnv; - SQuery * pQuery = pRuntimeEnv->pQuery; +static int32_t doCopyToSData(SQueryRuntimeEnv* pRuntimeEnv, SGroupResInfo* pGroupResInfo, int32_t orderType) { + void* qinfo = GET_QINFO_ADDR(pRuntimeEnv); + SQuery *pQuery = pRuntimeEnv->pQuery; + + int32_t numOfRows = getNumOfTotalRes(pGroupResInfo); + int32_t numOfResult = (int32_t) pQuery->rec.rows; // there are already exists result rows - int32_t numOfResult = 0; int32_t start = 0; int32_t step = -1; - qDebug("QInfo:%p start to copy data from windowResInfo to query buf", pQInfo); + qDebug("QInfo:%p start to copy data from windowResInfo to output buf", qinfo); if (orderType == TSDB_ORDER_ASC) { - start = (*index); + start = pGroupResInfo->index; step = 1; } else { // desc order copy all data - start = numOfRows - (*index) - 1; + start = numOfRows - pGroupResInfo->index - 1; step = -1; } - SGroupResInfo* pGroupResInfo = &pQInfo->groupResInfo; - for (int32_t i = start; (i < numOfRows) && (i >= 0); i += step) { - if (pRows[i]->numOfRows == 0) { - (*index) += 1; - pGroupResInfo->rowId = 0; + SResultRow* pRow = taosArrayGetP(pGroupResInfo->pRows, i); + if (pRow->numOfRows == 0) { + pGroupResInfo->index += 1; continue; } - int32_t numOfRowsToCopy = pRows[i]->numOfRows - pGroupResInfo->rowId; - int32_t oldOffset = pGroupResInfo->rowId; + int32_t numOfRowsToCopy = pRow->numOfRows; - /* - * current output space is not enough to accommodate all data of this page, only partial results - * will be copied to SQuery object's result buffer - */ - if (numOfRowsToCopy > pQuery->rec.capacity - numOfResult) { - numOfRowsToCopy = (int32_t) pQuery->rec.capacity - numOfResult; - pGroupResInfo->rowId += numOfRowsToCopy; - } else { - pGroupResInfo->rowId = 0; - (*index) += 1; + //current output space is not enough to accommodate all data of this page, prepare more space + if (numOfRowsToCopy > (pQuery->rec.capacity - numOfResult)) { + int32_t newSize = (int32_t) (pQuery->rec.capacity + (numOfRowsToCopy - numOfResult)); + expandBuffer(pRuntimeEnv, newSize, GET_QINFO_ADDR(pRuntimeEnv)); } - tFilePage *page = getResBufPage(pRuntimeEnv->pResultBuf, pRows[i]->pageId); + pGroupResInfo->index += 1; + tFilePage *page = getResBufPage(pRuntimeEnv->pResultBuf, pRow->pageId); for (int32_t j = 0; j < pQuery->numOfOutput; ++j) { int32_t size = pRuntimeEnv->pCtx[j].outputBytes; char *out = pQuery->sdata[j]->data + numOfResult * size; - char *in = getPosInResultPage(pRuntimeEnv, j, pRows[i], page); - memcpy(out, in + oldOffset * size, size * numOfRowsToCopy); + char *in = getPosInResultPage(pRuntimeEnv, j, pRow, page); + memcpy(out, in, size * numOfRowsToCopy); } numOfResult += numOfRowsToCopy; - if (numOfResult == pQuery->rec.capacity) { + if (numOfResult == pQuery->rec.capacity) { // output buffer is full break; } } - qDebug("QInfo:%p copy data to query buf completed", pQInfo); - -#ifdef _DEBUG_VIEW - displayInterResult(pQuery->sdata, pRuntimeEnv, numOfResult); -#endif + qDebug("QInfo:%p copy data to query buf completed", qinfo); return numOfResult; } /** - * copyFromWindowResToSData support copy data in ascending/descending order + * copyToOutputBuf support copy data in ascending/descending order * For interval query of both super table and table, copy the data in ascending order, since the output results are * ordered in SWindowResutl already. While handling the group by query for both table and super table, * all group result are completed already. @@ -4285,14 +4015,17 @@ static int32_t doCopyToSData(SQInfo *pQInfo, SResultRow **pRows, int32_t numOfRo * @param pQInfo * @param result */ -void copyFromWindowResToSData(SQInfo *pQInfo, SResultRowInfo *pResultInfo) { - SQuery *pQuery = pQInfo->runtimeEnv.pQuery; +void copyToOutputBuf(SQInfo *pQInfo, SResultRowInfo *pResultInfo) { + SQuery *pQuery = pQInfo->runtimeEnv.pQuery; + SGroupResInfo *pGroupResInfo = &pQInfo->groupResInfo; - int32_t orderType = (pQuery->pGroupbyExpr != NULL) ? pQuery->pGroupbyExpr->orderType : TSDB_ORDER_ASC; - int32_t numOfResult = doCopyToSData(pQInfo, pResultInfo->pResult, pResultInfo->size, &pQInfo->groupIndex, orderType); + assert(pQuery->rec.rows == 0 && pGroupResInfo->currentGroup <= pGroupResInfo->totalGroup); + if (!hasRemainData(pGroupResInfo)) { + return; + } - pQuery->rec.rows += numOfResult; - assert(pQuery->rec.rows <= pQuery->rec.capacity); + int32_t orderType = (pQuery->pGroupbyExpr != NULL) ? pQuery->pGroupbyExpr->orderType : TSDB_ORDER_ASC; + pQuery->rec.rows = doCopyToSData(&pQInfo->runtimeEnv, pGroupResInfo, orderType); } static void updateWindowResNumOfRes(SQueryRuntimeEnv *pRuntimeEnv) { @@ -4303,8 +4036,8 @@ static void updateWindowResNumOfRes(SQueryRuntimeEnv *pRuntimeEnv) { return; } - for (int32_t i = 0; i < pRuntimeEnv->windowResInfo.size; ++i) { - SResultRow *pResult = pRuntimeEnv->windowResInfo.pResult[i]; + for (int32_t i = 0; i < pRuntimeEnv->resultRowInfo.size; ++i) { + SResultRow *pResult = pRuntimeEnv->resultRowInfo.pResult[i]; for (int32_t j = 0; j < pQuery->numOfOutput; ++j) { int32_t functionId = pRuntimeEnv->pCtx[j].functionId; @@ -4323,7 +4056,7 @@ static void stableApplyFunctionsOnBlock(SQueryRuntimeEnv *pRuntimeEnv, SDataBloc SQuery * pQuery = pRuntimeEnv->pQuery; STableQueryInfo* pTableQueryInfo = pQuery->current; - SResultRowInfo * pResultRowInfo = &pTableQueryInfo->windowResInfo; + SResultRowInfo * pResultRowInfo = &pTableQueryInfo->resInfo; pQuery->pos = QUERY_IS_ASC_QUERY(pQuery)? 0 : pDataBlockInfo->rows - 1; if (pQuery->numOfFilterCols > 0 || pRuntimeEnv->pTsBuf != NULL || pRuntimeEnv->groupbyColumn) { @@ -4337,10 +4070,14 @@ static void stableApplyFunctionsOnBlock(SQueryRuntimeEnv *pRuntimeEnv, SDataBloc } } -bool hasNotReturnedResults(SQueryRuntimeEnv* pRuntimeEnv) { +bool hasNotReturnedResults(SQueryRuntimeEnv* pRuntimeEnv, SGroupResInfo* pGroupResInfo) { SQuery *pQuery = pRuntimeEnv->pQuery; SFillInfo *pFillInfo = pRuntimeEnv->pFillInfo; + if (!Q_STATUS_EQUAL(pQuery->status, QUERY_COMPLETED)) { + return false; + } + if (pQuery->limit.limit > 0 && pQuery->rec.total >= pQuery->limit.limit) { return false; } @@ -4360,16 +4097,11 @@ bool hasNotReturnedResults(SQueryRuntimeEnv* pRuntimeEnv) { * set is the FIRST result block, the gap between the start time of query time window and the timestamp of the * first result row in the actual result set will fill nothing. */ - if (Q_STATUS_EQUAL(pQuery->status, QUERY_COMPLETED)) { - int32_t numOfTotal = (int32_t)getNumOfResultsAfterFillGap(pFillInfo, pQuery->window.ekey, (int32_t)pQuery->rec.capacity); - return numOfTotal > 0; - } - - } else { - // there are results waiting for returned to client. - if (Q_STATUS_EQUAL(pQuery->status, QUERY_COMPLETED) && - (pRuntimeEnv->groupbyColumn || QUERY_IS_INTERVAL_QUERY(pQuery)) && - (pRuntimeEnv->windowResInfo.size > 0)) { + int32_t numOfTotal = (int32_t)getNumOfResultsAfterFillGap(pFillInfo, pQuery->window.ekey, (int32_t)pQuery->rec.capacity); + return numOfTotal > 0; + } else { // there are results waiting for returned to client. + if (Q_STATUS_EQUAL(pQuery->status, QUERY_COMPLETED) && hasRemainData(pGroupResInfo) && + (pRuntimeEnv->groupbyColumn || QUERY_IS_INTERVAL_QUERY(pQuery))) { return true; } } @@ -4429,15 +4161,15 @@ static void doCopyQueryResultToMsg(SQInfo *pQInfo, int32_t numOfRows, char *data setQueryStatus(pQuery, QUERY_OVER); } } else { - if (!hasNotReturnedResults(&pQInfo->runtimeEnv)) { + if (!hasNotReturnedResults(&pQInfo->runtimeEnv, &pQInfo->groupResInfo)) { setQueryStatus(pQuery, QUERY_OVER); } } } } -int32_t doFillGapsInResults(SQueryRuntimeEnv* pRuntimeEnv, tFilePage **pDst, int32_t *numOfFilled) { - SQInfo* pQInfo = GET_QINFO_ADDR(pRuntimeEnv); +int32_t doFillGapsInResults(SQueryRuntimeEnv* pRuntimeEnv, tFilePage **pDst) { + SQInfo *pQInfo = GET_QINFO_ADDR(pRuntimeEnv); SQuery *pQuery = pRuntimeEnv->pQuery; SFillInfo* pFillInfo = pRuntimeEnv->pFillInfo; @@ -4456,7 +4188,7 @@ int32_t doFillGapsInResults(SQueryRuntimeEnv* pRuntimeEnv, tFilePage **pDst, int pQInfo, pFillInfo->numOfRows, ret, pQuery->limit.offset, ret - pQuery->limit.offset, 0); ret -= (int32_t)pQuery->limit.offset; - for (int32_t i = 0; i < pQuery->numOfOutput; ++i) { + for (int32_t i = 0; i < pQuery->numOfOutput; ++i) { //???pExpr1 or pExpr2 memmove(pDst[i]->data, pDst[i]->data + pQuery->pExpr1[i].bytes * pQuery->limit.offset, ret * pQuery->pExpr1[i].bytes); } @@ -4469,17 +4201,18 @@ int32_t doFillGapsInResults(SQueryRuntimeEnv* pRuntimeEnv, tFilePage **pDst, int pQuery->limit.offset - ret); pQuery->limit.offset -= ret; - pQuery->rec.rows = 0; ret = 0; } - if (!hasNotReturnedResults(pRuntimeEnv)) { - return ret; + // no data in current data after fill + int32_t numOfTotal = (int32_t)getNumOfResultsAfterFillGap(pFillInfo, pFillInfo->end, (int32_t)pQuery->rec.capacity); + if (numOfTotal == 0) { + return 0; } } } -static void queryCostStatis(SQInfo *pQInfo) { +void queryCostStatis(SQInfo *pQInfo) { SQueryRuntimeEnv *pRuntimeEnv = &pQInfo->runtimeEnv; SQueryCostInfo *pSummary = &pRuntimeEnv->summary; @@ -4571,7 +4304,7 @@ static void generateBlockDistResult(STableBlockDist *pTableBlockDist) { if (pTableBlockDist == NULL) { return; } - int64_t min = 0, max = 0, avg = 0; + int64_t min = 0, max = 0, avg = 0; SArray* blockInfos= pTableBlockDist->dataBlockInfos; int64_t totalRows = 0, totalBlocks = taosArrayGetSize(blockInfos); for (size_t i = 0; i < taosArrayGetSize(blockInfos); i++) { @@ -4637,7 +4370,7 @@ void skipBlocks(SQueryRuntimeEnv *pRuntimeEnv) { static TSKEY doSkipIntervalProcess(SQueryRuntimeEnv* pRuntimeEnv, STimeWindow* win, SDataBlockInfo* pBlockInfo, STableQueryInfo* pTableQueryInfo) { SQuery *pQuery = pRuntimeEnv->pQuery; - SResultRowInfo *pWindowResInfo = &pRuntimeEnv->windowResInfo; + SResultRowInfo *pWindowResInfo = &pRuntimeEnv->resultRowInfo; assert(pQuery->limit.offset == 0); STimeWindow tw = *win; @@ -4665,10 +4398,10 @@ static TSKEY doSkipIntervalProcess(SQueryRuntimeEnv* pRuntimeEnv, STimeWindow* w TSKEY key = pTableQueryInfo->win.skey; pWindowResInfo->prevSKey = tw.skey; - int32_t index = pRuntimeEnv->windowResInfo.curIndex; + int32_t index = pRuntimeEnv->resultRowInfo.curIndex; int32_t numOfRes = tableApplyFunctionsOnBlock(pRuntimeEnv, pBlockInfo, NULL, binarySearchForKey, pDataBlock); - pRuntimeEnv->windowResInfo.curIndex = index; // restore the window index + pRuntimeEnv->resultRowInfo.curIndex = index; // restore the window index qDebug("QInfo:%p check data block, brange:%" PRId64 "-%" PRId64 ", numOfRows:%d, numOfRes:%d, lastKey:%" PRId64, GET_QINFO_ADDR(pRuntimeEnv), pBlockInfo->window.skey, pBlockInfo->window.ekey, pBlockInfo->rows, numOfRes, @@ -4704,12 +4437,12 @@ static bool skipTimeInterval(SQueryRuntimeEnv *pRuntimeEnv, TSKEY* start) { * pQuery->limit.offset times. Since hole exists, pQuery->interval.interval*pQuery->limit.offset value is * not valid. otherwise, we only forward pQuery->limit.offset number of points */ - assert(pRuntimeEnv->windowResInfo.prevSKey == TSKEY_INITIAL_VAL); + assert(pRuntimeEnv->resultRowInfo.prevSKey == TSKEY_INITIAL_VAL); STimeWindow w = TSWINDOW_INITIALIZER; bool ascQuery = QUERY_IS_ASC_QUERY(pQuery); - SResultRowInfo *pWindowResInfo = &pRuntimeEnv->windowResInfo; + SResultRowInfo *pWindowResInfo = &pRuntimeEnv->resultRowInfo; STableQueryInfo *pTableQueryInfo = pQuery->current; SDataBlockInfo blockInfo = SDATA_BLOCK_INITIALIZER; @@ -4890,7 +4623,6 @@ int32_t doInitQInfo(SQInfo *pQInfo, STSBuf *pTsBuf, SArray* prevResult, void *ts pRuntimeEnv->topBotQuery = isTopBottomQuery(pQuery); pRuntimeEnv->hasTagResults = hasTagValOutput(pQuery); pRuntimeEnv->timeWindowInterpo = timeWindowInterpoRequired(pQuery); - pRuntimeEnv->prevResult = prevResult; setScanLimitationByResultBuffer(pQuery); @@ -4902,6 +4634,7 @@ int32_t doInitQInfo(SQInfo *pQInfo, STSBuf *pTsBuf, SArray* prevResult, void *ts pQInfo->tsdb = tsdb; pQInfo->vgId = vgId; + pQInfo->groupResInfo.totalGroup = (int32_t) (isSTableQuery? GET_NUM_OF_TABLEGROUP(pQInfo):0); pRuntimeEnv->pQuery = pQuery; pRuntimeEnv->pTsBuf = pTsBuf; @@ -4909,6 +4642,7 @@ int32_t doInitQInfo(SQInfo *pQInfo, STSBuf *pTsBuf, SArray* prevResult, void *ts pRuntimeEnv->stableQuery = isSTableQuery; pRuntimeEnv->prevGroupId = INT32_MIN; pRuntimeEnv->groupbyColumn = isGroupbyColumn(pQuery->pGroupbyExpr); + pRuntimeEnv->stabledev = isStabledev(pQuery); if (pTsBuf != NULL) { int16_t order = (pQuery->order.order == pRuntimeEnv->pTsBuf->tsOrder) ? TSDB_ORDER_ASC : TSDB_ORDER_DESC; @@ -4934,7 +4668,7 @@ int32_t doInitQInfo(SQInfo *pQInfo, STSBuf *pTsBuf, SArray* prevResult, void *ts type = TSDB_DATA_TYPE_INT; // group id } - code = initResultRowInfo(&pRuntimeEnv->windowResInfo, 8, type); + code = initResultRowInfo(&pRuntimeEnv->resultRowInfo, 8, type); if (code != TSDB_CODE_SUCCESS) { return code; } @@ -4954,14 +4688,14 @@ int32_t doInitQInfo(SQInfo *pQInfo, STSBuf *pTsBuf, SArray* prevResult, void *ts type = TSDB_DATA_TYPE_TIMESTAMP; } - code = initResultRowInfo(&pRuntimeEnv->windowResInfo, numOfResultRows, type); + code = initResultRowInfo(&pRuntimeEnv->resultRowInfo, numOfResultRows, type); if (code != TSDB_CODE_SUCCESS) { return code; } } // create runtime environment - code = setupQueryRuntimeEnv(pRuntimeEnv, (int32_t) pQInfo->tableGroupInfo.numOfTables, pQuery->order.order); + code = setupQueryRuntimeEnv(pRuntimeEnv, (int32_t) pQInfo->tableGroupInfo.numOfTables, pQuery->order.order, pQInfo->vgId); if (code != TSDB_CODE_SUCCESS) { return code; } @@ -5008,13 +4742,6 @@ static FORCE_INLINE void setEnvForEachBlock(SQInfo* pQInfo, STableQueryInfo* pTa setTimestampListJoinInfo(pQInfo, pTableQueryInfo); } - for(int32_t i = 0; i < pQuery->numOfOutput; ++i) { - if (pQuery->pExpr1[i].base.functionId == TSDB_FUNC_STDDEV_DST) { - setParamValue(pRuntimeEnv); - break; - } - } - if (QUERY_IS_INTERVAL_QUERY(pQuery)) { setIntervalQueryRange(pQInfo, pBlockInfo->window.skey); } else { // non-interval query @@ -5068,11 +4795,20 @@ static int64_t scanMultiTableDataBlocks(SQInfo *pQInfo) { setEnvForEachBlock(pQInfo, *pTableQueryInfo, &blockInfo); } + if (pRuntimeEnv->stabledev) { + for(int32_t i = 0; i < pQuery->numOfOutput; ++i) { + if (pQuery->pExpr1[i].base.functionId == TSDB_FUNC_STDDEV_DST) { + setParamValue(pRuntimeEnv); + break; + } + } + } + uint32_t status = 0; SDataStatis *pStatis = NULL; SArray *pDataBlock = NULL; - int32_t ret = loadDataBlockOnDemand(pRuntimeEnv, &pQuery->current->windowResInfo, pQueryHandle, &blockInfo, &pStatis, &pDataBlock, &status); + int32_t ret = loadDataBlockOnDemand(pRuntimeEnv, &pQuery->current->resInfo, pQueryHandle, &blockInfo, &pStatis, &pDataBlock, &status); if (ret != TSDB_CODE_SUCCESS) { break; } @@ -5366,7 +5102,7 @@ static void sequentialTableProcess(SQInfo *pQInfo) { taosArrayDestroy(s); // no results generated for current group, continue to try the next group - SResultRowInfo *pWindowResInfo = &pRuntimeEnv->windowResInfo; + SResultRowInfo *pWindowResInfo = &pRuntimeEnv->resultRowInfo; if (pWindowResInfo->size <= 0) { continue; } @@ -5383,17 +5119,18 @@ static void sequentialTableProcess(SQInfo *pQInfo) { qDebug("QInfo:%p generated groupby columns results %d rows for group %d completed", pQInfo, pWindowResInfo->size, pQInfo->groupIndex); - int32_t currentGroupIndex = pQInfo->groupIndex; pQuery->rec.rows = 0; - pQInfo->groupIndex = 0; - - ensureOutputBufferSimple(pRuntimeEnv, pWindowResInfo->size); - copyFromWindowResToSData(pQInfo, pWindowResInfo); + if (pWindowResInfo->size > pQuery->rec.capacity) { + expandBuffer(pRuntimeEnv, pWindowResInfo->size, pQInfo); + } - pQInfo->groupIndex = currentGroupIndex; // restore the group index + initGroupResInfo(&pQInfo->groupResInfo, &pRuntimeEnv->resultRowInfo, 0); + copyToOutputBuf(pQInfo, pWindowResInfo); assert(pQuery->rec.rows == pWindowResInfo->size); - resetResultRowInfo(pRuntimeEnv, &pRuntimeEnv->windowResInfo); + + resetResultRowInfo(pRuntimeEnv, &pRuntimeEnv->resultRowInfo); + cleanupGroupResInfo(&pQInfo->groupResInfo); break; } } else if (pRuntimeEnv->queryWindowIdentical && pRuntimeEnv->pTsBuf == NULL && !isTsCompQuery(pQuery)) { @@ -5425,6 +5162,8 @@ static void sequentialTableProcess(SQInfo *pQInfo) { assert(pQuery->prjInfo.vgroupLimit == -1); } + setQueryStatus(pQuery, QUERY_NOT_COMPLETED); + bool hasMoreBlock = true; int32_t step = GET_FORWARD_DIRECTION_FACTOR(pQuery->order.order); SQueryCostInfo *summary = &pRuntimeEnv->summary; @@ -5449,7 +5188,7 @@ static void sequentialTableProcess(SQInfo *pQInfo) { setTagVal(pRuntimeEnv, pQuery->current->pTable); } - if (pQuery->prjInfo.vgroupLimit > 0 && pQuery->current->windowResInfo.size > pQuery->prjInfo.vgroupLimit) { + if (pQuery->prjInfo.vgroupLimit > 0 && pQuery->current->resInfo.size > pQuery->prjInfo.vgroupLimit) { pQuery->current->lastKey = QUERY_IS_ASC_QUERY(pQuery) ? blockInfo.window.ekey + step : blockInfo.window.skey + step; continue; @@ -5472,7 +5211,7 @@ static void sequentialTableProcess(SQInfo *pQInfo) { SDataStatis *pStatis = NULL; SArray *pDataBlock = NULL; - int32_t ret = loadDataBlockOnDemand(pRuntimeEnv, &pQuery->current->windowResInfo, pQueryHandle, &blockInfo, + int32_t ret = loadDataBlockOnDemand(pRuntimeEnv, &pQuery->current->resInfo, pQueryHandle, &blockInfo, &pStatis, &pDataBlock, &status); if (ret != TSDB_CODE_SUCCESS) { break; @@ -5484,7 +5223,7 @@ static void sequentialTableProcess(SQInfo *pQInfo) { continue; } - ensureOutputBuffer(pRuntimeEnv, &blockInfo); + ensureOutputBuffer(pRuntimeEnv, blockInfo.rows); int64_t prev = getNumOfResult(pRuntimeEnv); pQuery->pos = QUERY_IS_ASC_QUERY(pQuery) ? 0 : blockInfo.rows - 1; @@ -5498,7 +5237,7 @@ static void sequentialTableProcess(SQInfo *pQInfo) { pQuery->rec.rows = getNumOfResult(pRuntimeEnv); int64_t inc = pQuery->rec.rows - prev; - pQuery->current->windowResInfo.size += (int32_t) inc; + pQuery->current->resInfo.size += (int32_t) inc; // the flag may be set by tableApplyFunctionsOnBlock, clear it here CLEAR_QUERY_STATUS(pQuery, QUERY_COMPLETED); @@ -5516,8 +5255,7 @@ static void sequentialTableProcess(SQInfo *pQInfo) { } else { // the limitation of output result is reached, set the query completed skipResults(pRuntimeEnv); - if (limitOperator(pRuntimeEnv)) { - setQueryStatus(pQuery, QUERY_COMPLETED); + if (limitOperator(pQuery, pQInfo)) { SET_STABLE_QUERY_OVER(pQInfo); break; } @@ -5540,8 +5278,8 @@ static void sequentialTableProcess(SQInfo *pQInfo) { * If the subgroup index is larger than 0, results generated by group by tbname,k is existed. * we need to return it to client in the first place. */ - if (pQInfo->groupIndex > 0) { - copyFromWindowResToSData(pQInfo, &pRuntimeEnv->windowResInfo); + if (hasRemainData(&pQInfo->groupResInfo)) { + copyToOutputBuf(pQInfo, &pRuntimeEnv->resultRowInfo); pQuery->rec.total += pQuery->rec.rows; if (pQuery->rec.rows > 0) { @@ -5555,7 +5293,7 @@ static void sequentialTableProcess(SQInfo *pQInfo) { } resetDefaultResInfoOutputBuf(pRuntimeEnv); - resetResultRowInfo(pRuntimeEnv, &pRuntimeEnv->windowResInfo); + resetResultRowInfo(pRuntimeEnv, &pRuntimeEnv->resultRowInfo); SArray *group = GET_TABLEGROUP(pQInfo, 0); assert(taosArrayGetSize(group) == pQInfo->tableqinfoGroupInfo.numOfTables && @@ -5584,7 +5322,7 @@ static void sequentialTableProcess(SQInfo *pQInfo) { skipResults(pRuntimeEnv); // the limitation of output result is reached, set the query completed - if (limitOperator(pRuntimeEnv)) { + if (limitOperator(pQuery, pQInfo)) { SET_STABLE_QUERY_OVER(pQInfo); break; } @@ -5709,11 +5447,11 @@ static void doCloseAllTimeWindow(SQInfo *pQInfo) { size_t num = taosArrayGetSize(group); for (int32_t j = 0; j < num; ++j) { STableQueryInfo* item = taosArrayGetP(group, j); - closeAllResultRows(&item->windowResInfo); + closeAllResultRows(&item->resInfo); } } } else { // close results for group result - closeAllResultRows(&pQInfo->runtimeEnv.windowResInfo); + closeAllResultRows(&pQInfo->runtimeEnv.resultRowInfo); } } @@ -5721,18 +5459,14 @@ static void multiTableQueryProcess(SQInfo *pQInfo) { SQueryRuntimeEnv *pRuntimeEnv = &pQInfo->runtimeEnv; SQuery *pQuery = pRuntimeEnv->pQuery; - if (pQInfo->groupIndex > 0) { - /* - * if the groupIndex > 0, the query process must be completed yet, we only need to - * copy the data into output buffer - */ + if (Q_STATUS_EQUAL(pQuery->status, QUERY_COMPLETED)) { if (QUERY_IS_INTERVAL_QUERY(pQuery)) { copyResToQueryResultBuf(pQInfo, pQuery); } else { - copyFromWindowResToSData(pQInfo, &pRuntimeEnv->windowResInfo); + copyToOutputBuf(pQInfo, &pRuntimeEnv->resultRowInfo); } - qDebug("QInfo:%p current:%"PRId64", total:%"PRId64"", pQInfo, pQuery->rec.rows, pQuery->rec.total); + qDebug("QInfo:%p current:%"PRId64", total:%"PRId64, pQInfo, pQuery->rec.rows, pQuery->rec.total); return; } @@ -5774,18 +5508,10 @@ static void multiTableQueryProcess(SQInfo *pQInfo) { } if (QUERY_IS_INTERVAL_QUERY(pQuery) || isSumAvgRateQuery(pQuery)) { - int32_t code = mergeGroupResult(pQInfo); - if (code == TSDB_CODE_SUCCESS) { - copyResToQueryResultBuf(pQInfo, pQuery); - -#ifdef _DEBUG_VIEW - displayInterResult(pQuery->sdata, pRuntimeEnv, pQuery->sdata[0]->num); -#endif - } else { // set the error code - pQInfo->code = code; - } + copyResToQueryResultBuf(pQInfo, pQuery); } else { // not a interval query - copyFromWindowResToSData(pQInfo, &pRuntimeEnv->windowResInfo); + initGroupResInfo(&pQInfo->groupResInfo, &pRuntimeEnv->resultRowInfo, 0); + copyToOutputBuf(pQInfo, &pRuntimeEnv->resultRowInfo); } // handle the limitation of output buffer @@ -5889,7 +5615,7 @@ static void tableAggregationProcess(SQInfo *pQInfo, STableQueryInfo* pTableInfo) // TODO limit/offset refactor to be one operator skipResults(pRuntimeEnv); - limitOperator(pRuntimeEnv); + limitOperator(pQuery, pQInfo); } static void tableProjectionProcess(SQInfo *pQInfo, STableQueryInfo* pTableInfo) { @@ -5931,7 +5657,7 @@ static void tableProjectionProcess(SQInfo *pQInfo, STableQueryInfo* pTableInfo) resetDefaultResInfoOutputBuf(pRuntimeEnv); } - limitOperator(pRuntimeEnv); + limitOperator(pQuery, pQInfo); if (Q_STATUS_EQUAL(pQuery->status, QUERY_RESBUF_FULL)) { qDebug("QInfo:%p query paused due to output limitation, next qrange:%" PRId64 "-%" PRId64, pQInfo, pQuery->current->lastKey, pQuery->window.ekey); @@ -5945,6 +5671,40 @@ static void tableProjectionProcess(SQInfo *pQInfo, STableQueryInfo* pTableInfo) } } +static void copyAndFillResult(SQInfo* pQInfo) { + SQueryRuntimeEnv* pRuntimeEnv = &pQInfo->runtimeEnv; + SQuery* pQuery = pRuntimeEnv->pQuery; + + while(1) { + copyToOutputBuf(pQInfo, &pRuntimeEnv->resultRowInfo); + doSecondaryArithmeticProcess(pQuery); + + TSKEY lastKey = 0; + if (!hasRemainData(&pQInfo->groupResInfo)) { + lastKey = pQuery->window.ekey; + } else { + lastKey = ((TSKEY*)pQuery->sdata[0]->data)[pQuery->rec.rows - 1]; + } + + assert(lastKey <= pQuery->window.ekey); + + taosFillSetStartInfo(pRuntimeEnv->pFillInfo, (int32_t)pQuery->rec.rows, lastKey); + taosFillSetDataBlockFromFilePage(pRuntimeEnv->pFillInfo, (const tFilePage **)pQuery->sdata); + + pQuery->rec.rows = doFillGapsInResults(pRuntimeEnv, (tFilePage **)pQuery->sdata); + + if (pQuery->rec.rows > 0) { + limitOperator(pQuery, pQInfo); + break; + } + + // here the pQuery->rec.rows == 0 + if (!hasRemainData(&pQInfo->groupResInfo) && !taosFillHasMoreResults(pRuntimeEnv->pFillInfo)) { + break; + } + } +} + // handle time interval query on table static void tableIntervalProcess(SQInfo *pQInfo, STableQueryInfo* pTableInfo) { SQueryRuntimeEnv *pRuntimeEnv = &(pQInfo->runtimeEnv); @@ -5968,69 +5728,56 @@ static void tableIntervalProcess(SQInfo *pQInfo, STableQueryInfo* pTableInfo) { pQuery->rec.rows = 0; // not fill or no result generated during this query - if (pQuery->fillType == TSDB_FILL_NONE || pRuntimeEnv->windowResInfo.size == 0 || isPointInterpoQuery(pQuery)) { + if (pQuery->fillType == TSDB_FILL_NONE || pRuntimeEnv->resultRowInfo.size == 0 || isPointInterpoQuery(pQuery)) { // all data scanned, the group by normal column can return - int32_t numOfClosed = numOfClosedResultRows(&pRuntimeEnv->windowResInfo); - if (pQuery->limit.offset > numOfClosed) { + int32_t numOfClosed = numOfClosedResultRows(&pRuntimeEnv->resultRowInfo); + if (pQuery->limit.offset > numOfClosed || numOfClosed == 0) { return; } - pQInfo->groupIndex = (int32_t) pQuery->limit.offset; - - copyFromWindowResToSData(pQInfo, &pRuntimeEnv->windowResInfo); + initGroupResInfo(&pQInfo->groupResInfo, &pRuntimeEnv->resultRowInfo, (int32_t) pQuery->limit.offset); + copyToOutputBuf(pQInfo, &pRuntimeEnv->resultRowInfo); doSecondaryArithmeticProcess(pQuery); - limitOperator(pRuntimeEnv); + limitOperator(pQuery, pQInfo); } else { - - copyFromWindowResToSData(pQInfo, &pRuntimeEnv->windowResInfo); - doSecondaryArithmeticProcess(pQuery); - - taosFillSetStartInfo(pRuntimeEnv->pFillInfo, (int32_t)pQuery->rec.rows, pQuery->window.ekey); - taosFillSetDataBlockFromFilePage(pRuntimeEnv->pFillInfo, (const tFilePage **)pQuery->sdata); - - int32_t numOfFilled = 0; - pQuery->rec.rows = doFillGapsInResults(pRuntimeEnv, (tFilePage **)pQuery->sdata, &numOfFilled); - - if (pQuery->rec.rows > 0 || Q_STATUS_EQUAL(pQuery->status, QUERY_COMPLETED)) { - limitOperator(pRuntimeEnv); - } + initGroupResInfo(&pQInfo->groupResInfo, &pRuntimeEnv->resultRowInfo, 0); + copyAndFillResult(pQInfo); } } -static void tableQueryImpl(SQInfo *pQInfo) { +void tableQueryImpl(SQInfo *pQInfo) { SQueryRuntimeEnv *pRuntimeEnv = &pQInfo->runtimeEnv; SQuery * pQuery = pRuntimeEnv->pQuery; - if (hasNotReturnedResults(pRuntimeEnv)) { + if (hasNotReturnedResults(pRuntimeEnv, &pQInfo->groupResInfo)) { if (pQuery->fillType != TSDB_FILL_NONE && !isPointInterpoQuery(pQuery)) { /* * There are remain results that are not returned due to result interpolation * So, we do keep in this procedure instead of launching retrieve procedure for next results. */ - int32_t numOfFilled = 0; - pQuery->rec.rows = doFillGapsInResults(pRuntimeEnv, (tFilePage **)pQuery->sdata, &numOfFilled); - + pQuery->rec.rows = doFillGapsInResults(pRuntimeEnv, (tFilePage **)pQuery->sdata); if (pQuery->rec.rows > 0) { - limitOperator(pRuntimeEnv); + limitOperator(pQuery, pQInfo); + qDebug("QInfo:%p current:%" PRId64 " returned, total:%" PRId64, pQInfo, pQuery->rec.rows, pQuery->rec.total); + } else { + copyAndFillResult(pQInfo); } - qDebug("QInfo:%p current:%" PRId64 " returned, total:%" PRId64, pQInfo, pQuery->rec.rows, pQuery->rec.total); } else { pQuery->rec.rows = 0; - assert(pRuntimeEnv->windowResInfo.size > 0); + assert(pRuntimeEnv->resultRowInfo.size > 0); + copyToOutputBuf(pQInfo, &pRuntimeEnv->resultRowInfo); + doSecondaryArithmeticProcess(pQuery); - if (pQInfo->groupIndex < pRuntimeEnv->windowResInfo.size) { - copyFromWindowResToSData(pQInfo, &pRuntimeEnv->windowResInfo); + if (pQuery->rec.rows > 0) { + limitOperator(pQuery, pQInfo); } if (pQuery->rec.rows > 0) { qDebug("QInfo:%p %" PRId64 " rows returned from group results, total:%" PRId64 "", pQInfo, pQuery->rec.rows, pQuery->rec.total); - } - - // there are not data remains - if (pQuery->rec.rows <= 0 || pRuntimeEnv->windowResInfo.size <= pQInfo->groupIndex) { + } else { qDebug("QInfo:%p query over, %" PRId64 " rows are returned", pQInfo, pQuery->rec.total); } } @@ -6062,7 +5809,8 @@ static void tableQueryImpl(SQInfo *pQInfo) { pRuntimeEnv->summary.elapsedTime += (taosGetTimestampUs() - st); assert(pQInfo->tableqinfoGroupInfo.numOfTables == 1); } -static void buildTableBlockDistResult(SQInfo *pQInfo) { + +void buildTableBlockDistResult(SQInfo *pQInfo) { SQueryRuntimeEnv *pRuntimeEnv = &pQInfo->runtimeEnv; SQuery *pQuery = pRuntimeEnv->pQuery; pQuery->pos = 0; @@ -6115,7 +5863,7 @@ static void buildTableBlockDistResult(SQInfo *pQInfo) { return; } -static void stableQueryImpl(SQInfo *pQInfo) { +void stableQueryImpl(SQInfo *pQInfo) { SQueryRuntimeEnv* pRuntimeEnv = &pQInfo->runtimeEnv; SQuery *pQuery = pRuntimeEnv->pQuery; pQuery->rec.rows = 0; @@ -6127,7 +5875,6 @@ static void stableQueryImpl(SQInfo *pQInfo) { multiTableQueryProcess(pQInfo); } else { assert(pQuery->checkResultBuf == 1 || isPointInterpoQuery(pQuery) || pRuntimeEnv->groupbyColumn); - sequentialTableProcess(pQInfo); } @@ -6247,37 +5994,6 @@ static char *createTableIdList(SQueryTableMsg *pQueryMsg, char *pMsg, SArray **p return pMsg; } -typedef struct SQueryParam { - char *sql; - char *tagCond; - char *tbnameCond; - char *prevResult; - SArray *pTableIdList; - SSqlFuncMsg **pExprMsg; - SSqlFuncMsg **pSecExprMsg; - SExprInfo *pExprs; - SExprInfo *pSecExprs; - - SColIndex *pGroupColIndex; - SColumnInfo *pTagColumnInfo; - SSqlGroupbyExpr *pGroupbyExpr; -} SQueryParam; - -static void freeParam(SQueryParam *param) { - tfree(param->sql); - tfree(param->tagCond); - tfree(param->tbnameCond); - tfree(param->pTableIdList); - tfree(param->pExprMsg); - tfree(param->pSecExprMsg); - tfree(param->pExprs); - tfree(param->pSecExprs); - tfree(param->pGroupColIndex); - tfree(param->pTagColumnInfo); - tfree(param->pGroupbyExpr); - tfree(param->prevResult); -} - /** * pQueryMsg->head has been converted before this function is called. * @@ -6286,7 +6002,7 @@ static void freeParam(SQueryParam *param) { * @param pExpr * @return */ -static int32_t convertQueryMsg(SQueryTableMsg *pQueryMsg, SQueryParam* param) { +int32_t convertQueryMsg(SQueryTableMsg *pQueryMsg, SQueryParam* param) { int32_t code = TSDB_CODE_SUCCESS; if (taosCheckVersion(pQueryMsg->version, version, 3) != 0) { @@ -6604,7 +6320,7 @@ static int32_t buildArithmeticExprFromMsg(SExprInfo *pArithExprInfo, SQueryTable return TSDB_CODE_SUCCESS; } -static int32_t createQueryFuncExprFromMsg(SQueryTableMsg *pQueryMsg, int32_t numOfOutput, SExprInfo **pExprInfo, SSqlFuncMsg **pExprMsg, +int32_t createQueryFuncExprFromMsg(SQueryTableMsg *pQueryMsg, int32_t numOfOutput, SExprInfo **pExprInfo, SSqlFuncMsg **pExprMsg, SColumnInfo* pTagCols) { *pExprInfo = NULL; int32_t code = TSDB_CODE_SUCCESS; @@ -6714,7 +6430,7 @@ static int32_t createQueryFuncExprFromMsg(SQueryTableMsg *pQueryMsg, int32_t num return TSDB_CODE_SUCCESS; } -static SSqlGroupbyExpr *createGroupbyExprFromMsg(SQueryTableMsg *pQueryMsg, SColIndex *pColIndex, int32_t *code) { +SSqlGroupbyExpr *createGroupbyExprFromMsg(SQueryTableMsg *pQueryMsg, SColIndex *pColIndex, int32_t *code) { if (pQueryMsg->numOfGroupCols == 0) { return NULL; } @@ -6831,8 +6547,6 @@ static void doUpdateExprColumnIndex(SQuery *pQuery) { } } -static void freeQInfo(SQInfo *pQInfo); - static void calResultBufSize(SQuery* pQuery) { const int32_t RESULT_MSG_MIN_SIZE = 1024 * (1024 + 512); // bytes const int32_t RESULT_MSG_MIN_ROWS = 8192; @@ -6852,7 +6566,7 @@ static void calResultBufSize(SQuery* pQuery) { } } -static SQInfo *createQInfoImpl(SQueryTableMsg *pQueryMsg, SSqlGroupbyExpr *pGroupbyExpr, SExprInfo *pExprs, +SQInfo *createQInfoImpl(SQueryTableMsg *pQueryMsg, SSqlGroupbyExpr *pGroupbyExpr, SExprInfo *pExprs, SExprInfo *pSecExprs, STableGroupInfo *pTableGroupInfo, SColumnInfo* pTagCols, bool stableQuery, char* sql) { int16_t numOfCols = pQueryMsg->numOfCols; int16_t numOfOutput = pQueryMsg->numOfOutput; @@ -7060,7 +6774,7 @@ _cleanup: return NULL; } -static bool isValidQInfo(void *param) { +bool isValidQInfo(void *param) { SQInfo *pQInfo = (SQInfo *)param; if (pQInfo == NULL) { return false; @@ -7074,7 +6788,7 @@ static bool isValidQInfo(void *param) { return (sig == (uint64_t)pQInfo); } -static int32_t initQInfo(SQueryTableMsg *pQueryMsg, void *tsdb, int32_t vgId, SQInfo *pQInfo, SQueryParam* param, bool isSTable) { +int32_t initQInfo(SQueryTableMsg *pQueryMsg, void *tsdb, int32_t vgId, SQInfo *pQInfo, SQueryParam* param, bool isSTable) { int32_t code = TSDB_CODE_SUCCESS; SQuery *pQuery = pQInfo->runtimeEnv.pQuery; @@ -7125,7 +6839,7 @@ _error: return code; } -static void freeColumnFilterInfo(SColumnFilterInfo* pFilter, int32_t numOfFilters) { +void freeColumnFilterInfo(SColumnFilterInfo* pFilter, int32_t numOfFilters) { if (pFilter == NULL || numOfFilters == 0) { return; } @@ -7179,7 +6893,7 @@ static void* destroyQueryFuncExpr(SExprInfo* pExprInfo, int32_t numOfExpr) { return NULL; } -static void freeQInfo(SQInfo *pQInfo) { +void freeQInfo(SQInfo *pQInfo) { if (!isValidQInfo(pQInfo)) { return; } @@ -7248,7 +6962,7 @@ static void freeQInfo(SQInfo *pQInfo) { tfree(pQInfo); } -static size_t getResultSize(SQInfo *pQInfo, int64_t *numOfRows) { +size_t getResultSize(SQInfo *pQInfo, int64_t *numOfRows) { SQuery *pQuery = pQInfo->runtimeEnv.pQuery; /* @@ -7271,7 +6985,7 @@ static size_t getResultSize(SQInfo *pQInfo, int64_t *numOfRows) { } } -static int32_t doDumpQueryResult(SQInfo *pQInfo, char *data) { +int32_t doDumpQueryResult(SQInfo *pQInfo, char *data) { // the remained number of retrieved rows, not the interpolated result SQuery *pQuery = pQInfo->runtimeEnv.pQuery; @@ -7326,154 +7040,7 @@ static int32_t doDumpQueryResult(SQInfo *pQInfo, char *data) { return TSDB_CODE_SUCCESS; } -typedef struct SQueryMgmt { - pthread_mutex_t lock; - SCacheObj *qinfoPool; // query handle pool - int32_t vgId; - bool closed; -} SQueryMgmt; - -int32_t qCreateQueryInfo(void* tsdb, int32_t vgId, SQueryTableMsg* pQueryMsg, qinfo_t* pQInfo) { - assert(pQueryMsg != NULL && tsdb != NULL); - - int32_t code = TSDB_CODE_SUCCESS; - - SQueryParam param = {0}; - code = convertQueryMsg(pQueryMsg, ¶m); - if (code != TSDB_CODE_SUCCESS) { - goto _over; - } - - if (pQueryMsg->numOfTables <= 0) { - qError("Invalid number of tables to query, numOfTables:%d", pQueryMsg->numOfTables); - code = TSDB_CODE_QRY_INVALID_MSG; - goto _over; - } - - if (param.pTableIdList == NULL || taosArrayGetSize(param.pTableIdList) == 0) { - qError("qmsg:%p, SQueryTableMsg wrong format", pQueryMsg); - code = TSDB_CODE_QRY_INVALID_MSG; - goto _over; - } - - if ((code = createQueryFuncExprFromMsg(pQueryMsg, pQueryMsg->numOfOutput, ¶m.pExprs, param.pExprMsg, param.pTagColumnInfo)) != TSDB_CODE_SUCCESS) { - goto _over; - } - - if (param.pSecExprMsg != NULL) { - if ((code = createQueryFuncExprFromMsg(pQueryMsg, pQueryMsg->secondStageOutput, ¶m.pSecExprs, param.pSecExprMsg, param.pTagColumnInfo)) != TSDB_CODE_SUCCESS) { - goto _over; - } - } - - param.pGroupbyExpr = createGroupbyExprFromMsg(pQueryMsg, param.pGroupColIndex, &code); - if ((param.pGroupbyExpr == NULL && pQueryMsg->numOfGroupCols != 0) || code != TSDB_CODE_SUCCESS) { - goto _over; - } - - bool isSTableQuery = false; - STableGroupInfo tableGroupInfo = {0}; - int64_t st = taosGetTimestampUs(); - - if (TSDB_QUERY_HAS_TYPE(pQueryMsg->queryType, TSDB_QUERY_TYPE_TABLE_QUERY)) { - STableIdInfo *id = taosArrayGet(param.pTableIdList, 0); - - qDebug("qmsg:%p query normal table, uid:%"PRId64", tid:%d", pQueryMsg, id->uid, id->tid); - if ((code = tsdbGetOneTableGroup(tsdb, id->uid, pQueryMsg->window.skey, &tableGroupInfo)) != TSDB_CODE_SUCCESS) { - goto _over; - } - } else if (TSDB_QUERY_HAS_TYPE(pQueryMsg->queryType, TSDB_QUERY_TYPE_MULTITABLE_QUERY|TSDB_QUERY_TYPE_STABLE_QUERY)) { - isSTableQuery = true; - - // also note there's possibility that only one table in the super table - if (!TSDB_QUERY_HAS_TYPE(pQueryMsg->queryType, TSDB_QUERY_TYPE_MULTITABLE_QUERY)) { - STableIdInfo *id = taosArrayGet(param.pTableIdList, 0); - - // group by normal column, do not pass the group by condition to tsdb to group table into different group - int32_t numOfGroupByCols = pQueryMsg->numOfGroupCols; - if (pQueryMsg->numOfGroupCols == 1 && !TSDB_COL_IS_TAG(param.pGroupColIndex->flag)) { - numOfGroupByCols = 0; - } - - qDebug("qmsg:%p query stable, uid:%"PRId64", tid:%d", pQueryMsg, id->uid, id->tid); - code = tsdbQuerySTableByTagCond(tsdb, id->uid, pQueryMsg->window.skey, param.tagCond, pQueryMsg->tagCondLen, - pQueryMsg->tagNameRelType, param.tbnameCond, &tableGroupInfo, param.pGroupColIndex, numOfGroupByCols); - - if (code != TSDB_CODE_SUCCESS) { - qError("qmsg:%p failed to query stable, reason: %s", pQueryMsg, tstrerror(code)); - goto _over; - } - } else { - code = tsdbGetTableGroupFromIdList(tsdb, param.pTableIdList, &tableGroupInfo); - if (code != TSDB_CODE_SUCCESS) { - goto _over; - } - - qDebug("qmsg:%p query on %" PRIzu " tables in one group from client", pQueryMsg, tableGroupInfo.numOfTables); - } - - int64_t el = taosGetTimestampUs() - st; - qDebug("qmsg:%p tag filter completed, numOfTables:%" PRIzu ", elapsed time:%"PRId64"us", pQueryMsg, tableGroupInfo.numOfTables, el); - } else { - assert(0); - } - - code = checkForQueryBuf(tableGroupInfo.numOfTables); - if (code != TSDB_CODE_SUCCESS) { // not enough query buffer, abort - goto _over; - } - - (*pQInfo) = createQInfoImpl(pQueryMsg, param.pGroupbyExpr, param.pExprs, param.pSecExprs, &tableGroupInfo, param.pTagColumnInfo, isSTableQuery, param.sql); - - param.sql = NULL; - param.pExprs = NULL; - param.pSecExprs = NULL; - param.pGroupbyExpr = NULL; - param.pTagColumnInfo = NULL; - - if ((*pQInfo) == NULL) { - code = TSDB_CODE_QRY_OUT_OF_MEMORY; - goto _over; - } - - code = initQInfo(pQueryMsg, tsdb, vgId, *pQInfo, ¶m, isSTableQuery); - -_over: - if (param.pGroupbyExpr != NULL) { - taosArrayDestroy(param.pGroupbyExpr->columnInfo); - } - - taosArrayDestroy(param.pTableIdList); - param.pTableIdList = NULL; - - freeParam(¶m); - - for (int32_t i = 0; i < pQueryMsg->numOfCols; i++) { - SColumnInfo* column = pQueryMsg->colList + i; - freeColumnFilterInfo(column->filters, column->numOfFilters); - } - - //pQInfo already freed in initQInfo, but *pQInfo may not pointer to null; - if (code != TSDB_CODE_SUCCESS) { - *pQInfo = NULL; - } - - // if failed to add ref for all tables in this query, abort current query - return code; -} - -void qDestroyQueryInfo(qinfo_t qHandle) { - SQInfo* pQInfo = (SQInfo*) qHandle; - if (!isValidQInfo(pQInfo)) { - return; - } - - qDebug("QInfo:%p query completed", pQInfo); - queryCostStatis(pQInfo); // print the query cost summary - freeQInfo(pQInfo); -} - -static bool doBuildResCheck(SQInfo* pQInfo) { +bool doBuildResCheck(SQInfo* pQInfo) { bool buildRes = false; pthread_mutex_lock(&pQInfo->lock); @@ -7493,212 +7060,20 @@ static bool doBuildResCheck(SQInfo* pQInfo) { return buildRes; } -bool qTableQuery(qinfo_t qinfo) { - SQInfo *pQInfo = (SQInfo *)qinfo; - assert(pQInfo && pQInfo->signature == pQInfo); - int64_t threadId = taosGetSelfPthreadId(); - - int64_t curOwner = 0; - if ((curOwner = atomic_val_compare_exchange_64(&pQInfo->owner, 0, threadId)) != 0) { - qError("QInfo:%p qhandle is now executed by thread:%p", pQInfo, (void*) curOwner); - pQInfo->code = TSDB_CODE_QRY_IN_EXEC; - return false; - } - - pQInfo->startExecTs = taosGetTimestampSec(); - - if (isQueryKilled(pQInfo)) { - qDebug("QInfo:%p it is already killed, abort", pQInfo); - return doBuildResCheck(pQInfo); - } - - if (pQInfo->tableqinfoGroupInfo.numOfTables == 0) { - qDebug("QInfo:%p no table exists for query, abort", pQInfo); - setQueryStatus(pQInfo->runtimeEnv.pQuery, QUERY_COMPLETED); - return doBuildResCheck(pQInfo); - } - - // error occurs, record the error code and return to client - int32_t ret = setjmp(pQInfo->runtimeEnv.env); - if (ret != TSDB_CODE_SUCCESS) { - pQInfo->code = ret; - qDebug("QInfo:%p query abort due to error/cancel occurs, code:%s", pQInfo, tstrerror(pQInfo->code)); - return doBuildResCheck(pQInfo); - } - - qDebug("QInfo:%p query task is launched", pQInfo); - - SQueryRuntimeEnv* pRuntimeEnv = &pQInfo->runtimeEnv; - if (onlyQueryTags(pQInfo->runtimeEnv.pQuery)) { - assert(pQInfo->runtimeEnv.pQueryHandle == NULL); - buildTagQueryResult(pQInfo); - } else if (pQInfo->runtimeEnv.stableQuery) { - stableQueryImpl(pQInfo); - } else if (pQInfo->runtimeEnv.queryBlockDist){ - buildTableBlockDistResult(pQInfo); - } else { - tableQueryImpl(pQInfo); - } - - SQuery* pQuery = pRuntimeEnv->pQuery; - if (isQueryKilled(pQInfo)) { - qDebug("QInfo:%p query is killed", pQInfo); - } else if (pQuery->rec.rows == 0) { - qDebug("QInfo:%p over, %" PRIzu " tables queried, %"PRId64" rows are returned", pQInfo, pQInfo->tableqinfoGroupInfo.numOfTables, pQuery->rec.total); - } else { - qDebug("QInfo:%p query paused, %" PRId64 " rows returned, numOfTotal:%" PRId64 " rows", - pQInfo, pQuery->rec.rows, pQuery->rec.total + pQuery->rec.rows); - } - - return doBuildResCheck(pQInfo); -} - -int32_t qRetrieveQueryResultInfo(qinfo_t qinfo, bool* buildRes, void* pRspContext) { - SQInfo *pQInfo = (SQInfo *)qinfo; - - if (pQInfo == NULL || !isValidQInfo(pQInfo)) { - qError("QInfo:%p invalid qhandle", pQInfo); - return TSDB_CODE_QRY_INVALID_QHANDLE; - } - - *buildRes = false; - if (IS_QUERY_KILLED(pQInfo)) { - qDebug("QInfo:%p query is killed, code:0x%08x", pQInfo, pQInfo->code); - return pQInfo->code; - } - - int32_t code = TSDB_CODE_SUCCESS; - - if (tsRetrieveBlockingModel) { - pQInfo->rspContext = pRspContext; - tsem_wait(&pQInfo->ready); - *buildRes = true; - code = pQInfo->code; - } else { - SQuery *pQuery = pQInfo->runtimeEnv.pQuery; - - pthread_mutex_lock(&pQInfo->lock); - - assert(pQInfo->rspContext == NULL); - if (pQInfo->dataReady == QUERY_RESULT_READY) { - *buildRes = true; - qDebug("QInfo:%p retrieve result info, rowsize:%d, rows:%" PRId64 ", code:%s", pQInfo, pQuery->resultRowSize, - pQuery->rec.rows, tstrerror(pQInfo->code)); - } else { - *buildRes = false; - qDebug("QInfo:%p retrieve req set query return result after paused", pQInfo); - pQInfo->rspContext = pRspContext; - assert(pQInfo->rspContext != NULL); - } - - code = pQInfo->code; - pthread_mutex_unlock(&pQInfo->lock); - } - - return code; -} - -int32_t qDumpRetrieveResult(qinfo_t qinfo, SRetrieveTableRsp **pRsp, int32_t *contLen, bool* continueExec) { - SQInfo *pQInfo = (SQInfo *)qinfo; - - if (pQInfo == NULL || !isValidQInfo(pQInfo)) { - return TSDB_CODE_QRY_INVALID_QHANDLE; - } - - SQueryRuntimeEnv* pRuntimeEnv = &pQInfo->runtimeEnv; - SQuery *pQuery = pQInfo->runtimeEnv.pQuery; - size_t size = getResultSize(pQInfo, &pQuery->rec.rows); - - size += sizeof(int32_t); - size += sizeof(STableIdInfo) * taosHashGetSize(pQInfo->arrTableIdInfo); - - *contLen = (int32_t)(size + sizeof(SRetrieveTableRsp)); - - // todo proper handle failed to allocate memory, - // current solution only avoid crash, but cannot return error code to client - *pRsp = (SRetrieveTableRsp *)rpcMallocCont(*contLen); - if (*pRsp == NULL) { - return TSDB_CODE_QRY_OUT_OF_MEMORY; - } - - (*pRsp)->numOfRows = htonl((int32_t)pQuery->rec.rows); - - if (pQInfo->code == TSDB_CODE_SUCCESS) { - (*pRsp)->offset = htobe64(pQuery->limit.offset); - (*pRsp)->useconds = htobe64(pRuntimeEnv->summary.elapsedTime); - } else { - (*pRsp)->offset = 0; - (*pRsp)->useconds = htobe64(pRuntimeEnv->summary.elapsedTime); - } - - (*pRsp)->precision = htons(pQuery->precision); - if (pQuery->rec.rows > 0 && pQInfo->code == TSDB_CODE_SUCCESS) { - doDumpQueryResult(pQInfo, (*pRsp)->data); - } else { - setQueryStatus(pQuery, QUERY_OVER); - } - - pQInfo->rspContext = NULL; - pQInfo->dataReady = QUERY_RESULT_NOT_READY; - - if (IS_QUERY_KILLED(pQInfo) || Q_STATUS_EQUAL(pQuery->status, QUERY_OVER)) { - // here current thread hold the refcount, so it is safe to free tsdbQueryHandle. - *continueExec = false; - (*pRsp)->completed = 1; // notify no more result to client - } else { - *continueExec = true; - qDebug("QInfo:%p has more results to retrieve", pQInfo); - } - - return pQInfo->code; -} - -int32_t qQueryCompleted(qinfo_t qinfo) { - SQInfo *pQInfo = (SQInfo *)qinfo; - - if (pQInfo == NULL || !isValidQInfo(pQInfo)) { - return TSDB_CODE_QRY_INVALID_QHANDLE; - } - - SQuery* pQuery = pQInfo->runtimeEnv.pQuery; - return isQueryKilled(pQInfo) || Q_STATUS_EQUAL(pQuery->status, QUERY_OVER); -} - -int32_t qKillQuery(qinfo_t qinfo) { - SQInfo *pQInfo = (SQInfo *)qinfo; - - if (pQInfo == NULL || !isValidQInfo(pQInfo)) { - return TSDB_CODE_QRY_INVALID_QHANDLE; - } - - setQueryKilled(pQInfo); - - // Wait for the query executing thread being stopped/ - // Once the query is stopped, the owner of qHandle will be cleared immediately. - while (pQInfo->owner != 0) { - taosMsleep(100); +static void doSetTagValueToResultBuf(char* output, const char* val, int16_t type, int16_t bytes) { + if (val == NULL) { + setNull(output, type, bytes); + return; } - return TSDB_CODE_SUCCESS; -} - -static void doSetTagValueToResultBuf(char* output, const char* val, int16_t type, int16_t bytes) { if (type == TSDB_DATA_TYPE_BINARY || type == TSDB_DATA_TYPE_NCHAR) { - if (val == NULL) { - setVardataNull(output, type); - } else { - memcpy(output, val, varDataTLen(val)); - } + memcpy(output, val, varDataTLen(val)); } else { - if (val == NULL) { - setNull(output, type, bytes); - } else { // todo here stop will cause client crash - memcpy(output, val, bytes); - } + memcpy(output, val, bytes); } } -static void buildTagQueryResult(SQInfo* pQInfo) { +void buildTagQueryResult(SQInfo* pQInfo) { SQueryRuntimeEnv *pRuntimeEnv = &pQInfo->runtimeEnv; SQuery * pQuery = pRuntimeEnv->pQuery; @@ -7872,157 +7247,3 @@ void releaseQueryBuf(size_t numOfTables) { // restore value is not enough buffer available atomic_add_fetch_64(&tsQueryBufferSizeBytes, t); } - -void* qGetResultRetrieveMsg(qinfo_t qinfo) { - SQInfo* pQInfo = (SQInfo*) qinfo; - assert(pQInfo != NULL); - - return pQInfo->rspContext; -} - -void freeqinfoFn(void *qhandle) { - void** handle = qhandle; - if (handle == NULL || *handle == NULL) { - return; - } - - qKillQuery(*handle); - qDestroyQueryInfo(*handle); -} - -void* qOpenQueryMgmt(int32_t vgId) { - const int32_t REFRESH_HANDLE_INTERVAL = 30; // every 30 seconds, refresh handle pool - - char cacheName[128] = {0}; - sprintf(cacheName, "qhandle_%d", vgId); - - SQueryMgmt* pQueryMgmt = calloc(1, sizeof(SQueryMgmt)); - if (pQueryMgmt == NULL) { - terrno = TSDB_CODE_QRY_OUT_OF_MEMORY; - return NULL; - } - - pQueryMgmt->qinfoPool = taosCacheInit(TSDB_CACHE_PTR_KEY, REFRESH_HANDLE_INTERVAL, true, freeqinfoFn, cacheName); - pQueryMgmt->closed = false; - pQueryMgmt->vgId = vgId; - - pthread_mutex_init(&pQueryMgmt->lock, NULL); - - qDebug("vgId:%d, open querymgmt success", vgId); - return pQueryMgmt; -} - -static void queryMgmtKillQueryFn(void* handle) { - void** fp = (void**)handle; - qKillQuery(*fp); -} - -void qQueryMgmtNotifyClosed(void* pQMgmt) { - if (pQMgmt == NULL) { - return; - } - - SQueryMgmt* pQueryMgmt = pQMgmt; - qDebug("vgId:%d, set querymgmt closed, wait for all queries cancelled", pQueryMgmt->vgId); - - pthread_mutex_lock(&pQueryMgmt->lock); - pQueryMgmt->closed = true; - pthread_mutex_unlock(&pQueryMgmt->lock); - - taosCacheRefresh(pQueryMgmt->qinfoPool, queryMgmtKillQueryFn); -} - -void qQueryMgmtReOpen(void *pQMgmt) { - if (pQMgmt == NULL) { - return; - } - - SQueryMgmt *pQueryMgmt = pQMgmt; - qDebug("vgId:%d, set querymgmt reopen", pQueryMgmt->vgId); - - pthread_mutex_lock(&pQueryMgmt->lock); - pQueryMgmt->closed = false; - pthread_mutex_unlock(&pQueryMgmt->lock); -} - -void qCleanupQueryMgmt(void* pQMgmt) { - if (pQMgmt == NULL) { - return; - } - - SQueryMgmt* pQueryMgmt = pQMgmt; - int32_t vgId = pQueryMgmt->vgId; - - assert(pQueryMgmt->closed); - - SCacheObj* pqinfoPool = pQueryMgmt->qinfoPool; - pQueryMgmt->qinfoPool = NULL; - - taosCacheCleanup(pqinfoPool); - pthread_mutex_destroy(&pQueryMgmt->lock); - tfree(pQueryMgmt); - - qDebug("vgId:%d, queryMgmt cleanup completed", vgId); -} - -void** qRegisterQInfo(void* pMgmt, uint64_t qInfo) { - if (pMgmt == NULL) { - terrno = TSDB_CODE_VND_INVALID_VGROUP_ID; - return NULL; - } - - SQueryMgmt *pQueryMgmt = pMgmt; - if (pQueryMgmt->qinfoPool == NULL) { - qError("QInfo:%p failed to add qhandle into qMgmt, since qMgmt is closed", (void *)qInfo); - terrno = TSDB_CODE_VND_INVALID_VGROUP_ID; - return NULL; - } - - pthread_mutex_lock(&pQueryMgmt->lock); - if (pQueryMgmt->closed) { - pthread_mutex_unlock(&pQueryMgmt->lock); - qError("QInfo:%p failed to add qhandle into cache, since qMgmt is colsing", (void *)qInfo); - terrno = TSDB_CODE_VND_INVALID_VGROUP_ID; - return NULL; - } else { - TSDB_CACHE_PTR_TYPE handleVal = (TSDB_CACHE_PTR_TYPE) qInfo; - void** handle = taosCachePut(pQueryMgmt->qinfoPool, &handleVal, sizeof(TSDB_CACHE_PTR_TYPE), &qInfo, sizeof(TSDB_CACHE_PTR_TYPE), - (getMaximumIdleDurationSec()*1000)); - pthread_mutex_unlock(&pQueryMgmt->lock); - - return handle; - } -} - -void** qAcquireQInfo(void* pMgmt, uint64_t _key) { - SQueryMgmt *pQueryMgmt = pMgmt; - - if (pQueryMgmt->closed) { - terrno = TSDB_CODE_VND_INVALID_VGROUP_ID; - return NULL; - } - - if (pQueryMgmt->qinfoPool == NULL) { - terrno = TSDB_CODE_QRY_INVALID_QHANDLE; - return NULL; - } - - TSDB_CACHE_PTR_TYPE key = (TSDB_CACHE_PTR_TYPE)_key; - void** handle = taosCacheAcquireByKey(pQueryMgmt->qinfoPool, &key, sizeof(TSDB_CACHE_PTR_TYPE)); - if (handle == NULL || *handle == NULL) { - terrno = TSDB_CODE_QRY_INVALID_QHANDLE; - return NULL; - } else { - return handle; - } -} - -void** qReleaseQInfo(void* pMgmt, void* pQInfo, bool freeHandle) { - SQueryMgmt *pQueryMgmt = pMgmt; - if (pQueryMgmt->qinfoPool == NULL) { - return NULL; - } - - taosCacheRelease(pQueryMgmt->qinfoPool, pQInfo, freeHandle); - return 0; -} diff --git a/src/query/src/qParserImpl.c b/src/query/src/qParserImpl.c index 2efd4f76ea3f3efbbc1c52df4de2e743b65908f3..07bb307aba22ed0d000aafea659098cf44a9999b 100644 --- a/src/query/src/qParserImpl.c +++ b/src/query/src/qParserImpl.c @@ -289,6 +289,28 @@ tSQLExpr *tSqlExprCreate(tSQLExpr *pLeft, tSQLExpr *pRight, int32_t optrType) { return pExpr; } + + +tSQLExpr *tSqlExprClone(tSQLExpr *pSrc) { + tSQLExpr *pExpr = calloc(1, sizeof(tSQLExpr)); + + memcpy(pExpr, pSrc, sizeof(*pSrc)); + + if (pSrc->pLeft) { + pExpr->pLeft = tSqlExprClone(pSrc->pLeft); + } + + if (pSrc->pRight) { + pExpr->pRight = tSqlExprClone(pSrc->pRight); + } + + //we don't clone pParam now because clone is only used for between/and + assert(pSrc->pParam == NULL); + + return pExpr; +} + + void tSqlExprNodeDestroy(tSQLExpr *pExpr) { if (pExpr == NULL) { return; @@ -309,8 +331,9 @@ void tSqlExprDestroy(tSQLExpr *pExpr) { } tSqlExprDestroy(pExpr->pLeft); + pExpr->pLeft = NULL; tSqlExprDestroy(pExpr->pRight); - + pExpr->pRight = NULL; tSqlExprNodeDestroy(pExpr); } @@ -496,7 +519,8 @@ static void freeVariant(void *pItem) { } void freeCreateTableInfo(void* p) { - SCreatedTableInfo* pInfo = (SCreatedTableInfo*) p; + SCreatedTableInfo* pInfo = (SCreatedTableInfo*) p; + taosArrayDestroy(pInfo->pTagNames); taosArrayDestroyEx(pInfo->pTagVals, freeVariant); tfree(pInfo->fullname); tfree(pInfo->tagdata.data); @@ -574,11 +598,12 @@ SCreateTableSQL *tSetCreateSqlElems(SArray *pCols, SArray *pTags, SQuerySQL *pSe return pCreate; } -SCreatedTableInfo createNewChildTableInfo(SStrToken *pTableName, SArray *pTagVals, SStrToken *pToken, SStrToken* igExists) { +SCreatedTableInfo createNewChildTableInfo(SStrToken *pTableName, SArray *pTagNames, SArray *pTagVals, SStrToken *pToken, SStrToken* igExists) { SCreatedTableInfo info; memset(&info, 0, sizeof(SCreatedTableInfo)); info.name = *pToken; + info.pTagNames = pTagNames; info.pTagVals = pTagVals; info.stableName = *pTableName; info.igExist = (igExists->n > 0)? 1:0; diff --git a/src/query/src/qUtil.c b/src/query/src/qUtil.c index 46079e6830f91712490bcdcdd61de4902a3a5015..b7a7fd28e9dc2f013457e22446ff7bc41d3fef6c 100644 --- a/src/query/src/qUtil.c +++ b/src/query/src/qUtil.c @@ -20,6 +20,14 @@ #include "qExecutor.h" #include "qUtil.h" #include "tbuffer.h" +#include "tlosertree.h" +#include "queryLog.h" + +typedef struct SCompSupporter { + STableQueryInfo **pTableQueryInfo; + int32_t *rowIndex; + int32_t order; +} SCompSupporter; int32_t getOutputInterResultBufSize(SQuery* pQuery) { int32_t size = 0; @@ -322,4 +330,243 @@ void freeInterResult(void* param) { } taosArrayDestroy(pResult->pResult); -} \ No newline at end of file +} + +void cleanupGroupResInfo(SGroupResInfo* pGroupResInfo) { + assert(pGroupResInfo != NULL); + + taosArrayDestroy(pGroupResInfo->pRows); + pGroupResInfo->pRows = NULL; + pGroupResInfo->index = 0; +} + +void initGroupResInfo(SGroupResInfo* pGroupResInfo, SResultRowInfo* pResultInfo, int32_t offset) { + if (pGroupResInfo->pRows != NULL) { + taosArrayDestroy(pGroupResInfo->pRows); + } + + pGroupResInfo->pRows = taosArrayFromList(pResultInfo->pResult, pResultInfo->size, POINTER_BYTES); + pGroupResInfo->index = offset; + + assert(pGroupResInfo->index <= getNumOfTotalRes(pGroupResInfo)); +} + +bool hasRemainData(SGroupResInfo* pGroupResInfo) { + if (pGroupResInfo->pRows == NULL) { + return false; + } + + return pGroupResInfo->index < taosArrayGetSize(pGroupResInfo->pRows); +} + +bool incNextGroup(SGroupResInfo* pGroupResInfo) { + return (++pGroupResInfo->currentGroup) < pGroupResInfo->totalGroup; +} + +int32_t getNumOfTotalRes(SGroupResInfo* pGroupResInfo) { + assert(pGroupResInfo != NULL); + if (pGroupResInfo->pRows == 0) { + return 0; + } + + return (int32_t) taosArrayGetSize(pGroupResInfo->pRows); +} + +static int64_t getNumOfResultWindowRes(SQueryRuntimeEnv* pRuntimeEnv, SResultRow *pResultRow) { + SQuery* pQuery = pRuntimeEnv->pQuery; + + for (int32_t j = 0; j < pQuery->numOfOutput; ++j) { + int32_t functionId = pQuery->pExpr1[j].base.functionId; + + /* + * ts, tag, tagprj function can not decide the output number of current query + * the number of output result is decided by main output + */ + if (functionId == TSDB_FUNC_TS || functionId == TSDB_FUNC_TAG || functionId == TSDB_FUNC_TAGPRJ) { + continue; + } + + SResultRowCellInfo *pResultInfo = getResultCell(pRuntimeEnv, pResultRow, j); + assert(pResultInfo != NULL); + + if (pResultInfo->numOfRes > 0) { + return pResultInfo->numOfRes; + } + } + + return 0; +} + +static int32_t tableResultComparFn(const void *pLeft, const void *pRight, void *param) { + int32_t left = *(int32_t *)pLeft; + int32_t right = *(int32_t *)pRight; + + SCompSupporter * supporter = (SCompSupporter *)param; + + int32_t leftPos = supporter->rowIndex[left]; + int32_t rightPos = supporter->rowIndex[right]; + + /* left source is exhausted */ + if (leftPos == -1) { + return 1; + } + + /* right source is exhausted*/ + if (rightPos == -1) { + return -1; + } + + STableQueryInfo** pList = supporter->pTableQueryInfo; + + SResultRowInfo *pWindowResInfo1 = &(pList[left]->resInfo); + SResultRow * pWindowRes1 = getResultRow(pWindowResInfo1, leftPos); + TSKEY leftTimestamp = pWindowRes1->win.skey; + + SResultRowInfo *pWindowResInfo2 = &(pList[right]->resInfo); + SResultRow * pWindowRes2 = getResultRow(pWindowResInfo2, rightPos); + TSKEY rightTimestamp = pWindowRes2->win.skey; + + if (leftTimestamp == rightTimestamp) { + return 0; + } + + if (supporter->order == TSDB_ORDER_ASC) { + return (leftTimestamp > rightTimestamp)? 1:-1; + } else { + return (leftTimestamp < rightTimestamp)? 1:-1; + } +} + +static int32_t mergeIntoGroupResultImpl(SQueryRuntimeEnv *pRuntimeEnv, SGroupResInfo* pGroupResInfo, SArray *pTableList, void* qinfo) { + bool ascQuery = QUERY_IS_ASC_QUERY(pRuntimeEnv->pQuery); + + int32_t code = TSDB_CODE_SUCCESS; + + int32_t *posList = NULL; + SLoserTreeInfo *pTree = NULL; + STableQueryInfo **pTableQueryInfoList = NULL; + + size_t size = taosArrayGetSize(pTableList); + if (pGroupResInfo->pRows == NULL) { + pGroupResInfo->pRows = taosArrayInit(100, POINTER_BYTES); + } + + posList = calloc(size, sizeof(int32_t)); + pTableQueryInfoList = malloc(POINTER_BYTES * size); + + if (pTableQueryInfoList == NULL || posList == NULL || pGroupResInfo->pRows == NULL || pGroupResInfo->pRows == NULL) { + qError("QInfo:%p failed alloc memory", qinfo); + code = TSDB_CODE_QRY_OUT_OF_MEMORY; + goto _end; + } + + int32_t numOfTables = 0; + for (int32_t i = 0; i < size; ++i) { + STableQueryInfo *item = taosArrayGetP(pTableList, i); + if (item->resInfo.size > 0) { + pTableQueryInfoList[numOfTables++] = item; + } + } + + // there is no data in current group + // no need to merge results since only one table in each group + if (numOfTables == 0) { + goto _end; + } + + SCompSupporter cs = {pTableQueryInfoList, posList, pRuntimeEnv->pQuery->order.order}; + + int32_t ret = tLoserTreeCreate(&pTree, numOfTables, &cs, tableResultComparFn); + if (ret != TSDB_CODE_SUCCESS) { + code = TSDB_CODE_QRY_OUT_OF_MEMORY; + goto _end; + } + + int64_t lastTimestamp = ascQuery? INT64_MIN:INT64_MAX; + int64_t startt = taosGetTimestampMs(); + + while (1) { + int32_t tableIndex = pTree->pNode[0].index; + + SResultRowInfo *pWindowResInfo = &pTableQueryInfoList[tableIndex]->resInfo; + SResultRow *pWindowRes = getResultRow(pWindowResInfo, cs.rowIndex[tableIndex]); + + int64_t num = getNumOfResultWindowRes(pRuntimeEnv, pWindowRes); + if (num <= 0) { + cs.rowIndex[tableIndex] += 1; + + if (cs.rowIndex[tableIndex] >= pWindowResInfo->size) { + cs.rowIndex[tableIndex] = -1; + if (--numOfTables == 0) { // all input sources are exhausted + break; + } + } + } else { + assert((pWindowRes->win.skey >= lastTimestamp && ascQuery) || (pWindowRes->win.skey <= lastTimestamp && !ascQuery)); + + if (pWindowRes->win.skey != lastTimestamp) { + taosArrayPush(pGroupResInfo->pRows, &pWindowRes); + pWindowRes->numOfRows = (uint32_t) num; + } + + lastTimestamp = pWindowRes->win.skey; + + // move to the next row of current entry + if ((++cs.rowIndex[tableIndex]) >= pWindowResInfo->size) { + cs.rowIndex[tableIndex] = -1; + + // all input sources are exhausted + if ((--numOfTables) == 0) { + break; + } + } + } + + tLoserTreeAdjust(pTree, tableIndex + pTree->numOfEntries); + } + + int64_t endt = taosGetTimestampMs(); + + qDebug("QInfo:%p result merge completed for group:%d, elapsed time:%" PRId64 " ms", qinfo, + pGroupResInfo->currentGroup, endt - startt); + + _end: + tfree(pTableQueryInfoList); + tfree(posList); + tfree(pTree); + + return code; +} + +int32_t mergeIntoGroupResult(SGroupResInfo* pGroupResInfo, SQInfo *pQInfo) { + int64_t st = taosGetTimestampUs(); + + while (pGroupResInfo->currentGroup < pGroupResInfo->totalGroup) { + SArray *group = GET_TABLEGROUP(pQInfo, pGroupResInfo->currentGroup); + + int32_t ret = mergeIntoGroupResultImpl(&pQInfo->runtimeEnv, pGroupResInfo, group, pQInfo); + if (ret != TSDB_CODE_SUCCESS) { + return ret; + } + + // this group generates at least one result, return results + if (taosArrayGetSize(pGroupResInfo->pRows) > 0) { + break; + } + + qDebug("QInfo:%p no result in group %d, continue", pQInfo, pGroupResInfo->currentGroup); + cleanupGroupResInfo(pGroupResInfo); + incNextGroup(pGroupResInfo); + } + + if (pGroupResInfo->currentGroup >= pGroupResInfo->totalGroup && !hasRemainData(pGroupResInfo)) { + SET_STABLE_QUERY_OVER(pQInfo); + } + + int64_t elapsedTime = taosGetTimestampUs() - st; + qDebug("QInfo:%p merge res data into group, index:%d, total group:%d, elapsed time:%" PRId64 "us", pQInfo, + pGroupResInfo->currentGroup, pGroupResInfo->totalGroup, elapsedTime); + + pQInfo->runtimeEnv.summary.firstStageMergeTime += elapsedTime; + return TSDB_CODE_SUCCESS; +} diff --git a/src/query/src/queryMain.c b/src/query/src/queryMain.c new file mode 100644 index 0000000000000000000000000000000000000000..e262a3ad385273f6e7f4b3c2cbe9c075e3d4e86d --- /dev/null +++ b/src/query/src/queryMain.c @@ -0,0 +1,542 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#include "os.h" +#include "qFill.h" +#include "taosmsg.h" +#include "tcache.h" +#include "tglobal.h" + +#include "exception.h" +#include "hash.h" +#include "texpr.h" +#include "qExecutor.h" +#include "qResultbuf.h" +#include "qUtil.h" +#include "query.h" +#include "queryLog.h" +#include "tlosertree.h" +#include "ttype.h" +#include "tcompare.h" + +typedef struct SQueryMgmt { + pthread_mutex_t lock; + SCacheObj *qinfoPool; // query handle pool + int32_t vgId; + bool closed; +} SQueryMgmt; + +static void queryMgmtKillQueryFn(void* handle) { + void** fp = (void**)handle; + qKillQuery(*fp); +} + +static void freeqinfoFn(void *qhandle) { + void** handle = qhandle; + if (handle == NULL || *handle == NULL) { + return; + } + + qKillQuery(*handle); + qDestroyQueryInfo(*handle); +} + +void freeParam(SQueryParam *param) { + tfree(param->sql); + tfree(param->tagCond); + tfree(param->tbnameCond); + tfree(param->pTableIdList); + tfree(param->pExprMsg); + tfree(param->pSecExprMsg); + tfree(param->pExprs); + tfree(param->pSecExprs); + tfree(param->pGroupColIndex); + tfree(param->pTagColumnInfo); + tfree(param->pGroupbyExpr); + tfree(param->prevResult); +} + +int32_t qCreateQueryInfo(void* tsdb, int32_t vgId, SQueryTableMsg* pQueryMsg, qinfo_t* pQInfo) { + assert(pQueryMsg != NULL && tsdb != NULL); + + int32_t code = TSDB_CODE_SUCCESS; + + SQueryParam param = {0}; + code = convertQueryMsg(pQueryMsg, ¶m); + if (code != TSDB_CODE_SUCCESS) { + goto _over; + } + + if (pQueryMsg->numOfTables <= 0) { + qError("Invalid number of tables to query, numOfTables:%d", pQueryMsg->numOfTables); + code = TSDB_CODE_QRY_INVALID_MSG; + goto _over; + } + + if (param.pTableIdList == NULL || taosArrayGetSize(param.pTableIdList) == 0) { + qError("qmsg:%p, SQueryTableMsg wrong format", pQueryMsg); + code = TSDB_CODE_QRY_INVALID_MSG; + goto _over; + } + + if ((code = createQueryFuncExprFromMsg(pQueryMsg, pQueryMsg->numOfOutput, ¶m.pExprs, param.pExprMsg, param.pTagColumnInfo)) != TSDB_CODE_SUCCESS) { + goto _over; + } + + if (param.pSecExprMsg != NULL) { + if ((code = createQueryFuncExprFromMsg(pQueryMsg, pQueryMsg->secondStageOutput, ¶m.pSecExprs, param.pSecExprMsg, param.pTagColumnInfo)) != TSDB_CODE_SUCCESS) { + goto _over; + } + } + + param.pGroupbyExpr = createGroupbyExprFromMsg(pQueryMsg, param.pGroupColIndex, &code); + if ((param.pGroupbyExpr == NULL && pQueryMsg->numOfGroupCols != 0) || code != TSDB_CODE_SUCCESS) { + goto _over; + } + + bool isSTableQuery = false; + STableGroupInfo tableGroupInfo = {0}; + int64_t st = taosGetTimestampUs(); + + if (TSDB_QUERY_HAS_TYPE(pQueryMsg->queryType, TSDB_QUERY_TYPE_TABLE_QUERY)) { + STableIdInfo *id = taosArrayGet(param.pTableIdList, 0); + + qDebug("qmsg:%p query normal table, uid:%"PRId64", tid:%d", pQueryMsg, id->uid, id->tid); + if ((code = tsdbGetOneTableGroup(tsdb, id->uid, pQueryMsg->window.skey, &tableGroupInfo)) != TSDB_CODE_SUCCESS) { + goto _over; + } + } else if (TSDB_QUERY_HAS_TYPE(pQueryMsg->queryType, TSDB_QUERY_TYPE_MULTITABLE_QUERY|TSDB_QUERY_TYPE_STABLE_QUERY)) { + isSTableQuery = true; + + // also note there's possibility that only one table in the super table + if (!TSDB_QUERY_HAS_TYPE(pQueryMsg->queryType, TSDB_QUERY_TYPE_MULTITABLE_QUERY)) { + STableIdInfo *id = taosArrayGet(param.pTableIdList, 0); + + // group by normal column, do not pass the group by condition to tsdb to group table into different group + int32_t numOfGroupByCols = pQueryMsg->numOfGroupCols; + if (pQueryMsg->numOfGroupCols == 1 && !TSDB_COL_IS_TAG(param.pGroupColIndex->flag)) { + numOfGroupByCols = 0; + } + + qDebug("qmsg:%p query stable, uid:%"PRId64", tid:%d", pQueryMsg, id->uid, id->tid); + code = tsdbQuerySTableByTagCond(tsdb, id->uid, pQueryMsg->window.skey, param.tagCond, pQueryMsg->tagCondLen, + pQueryMsg->tagNameRelType, param.tbnameCond, &tableGroupInfo, param.pGroupColIndex, numOfGroupByCols); + + if (code != TSDB_CODE_SUCCESS) { + qError("qmsg:%p failed to query stable, reason: %s", pQueryMsg, tstrerror(code)); + goto _over; + } + } else { + code = tsdbGetTableGroupFromIdList(tsdb, param.pTableIdList, &tableGroupInfo); + if (code != TSDB_CODE_SUCCESS) { + goto _over; + } + + qDebug("qmsg:%p query on %" PRIzu " tables in one group from client", pQueryMsg, tableGroupInfo.numOfTables); + } + + int64_t el = taosGetTimestampUs() - st; + qDebug("qmsg:%p tag filter completed, numOfTables:%" PRIzu ", elapsed time:%"PRId64"us", pQueryMsg, tableGroupInfo.numOfTables, el); + } else { + assert(0); + } + + code = checkForQueryBuf(tableGroupInfo.numOfTables); + if (code != TSDB_CODE_SUCCESS) { // not enough query buffer, abort + goto _over; + } + + (*pQInfo) = createQInfoImpl(pQueryMsg, param.pGroupbyExpr, param.pExprs, param.pSecExprs, &tableGroupInfo, param.pTagColumnInfo, isSTableQuery, param.sql); + + param.sql = NULL; + param.pExprs = NULL; + param.pSecExprs = NULL; + param.pGroupbyExpr = NULL; + param.pTagColumnInfo = NULL; + + if ((*pQInfo) == NULL) { + code = TSDB_CODE_QRY_OUT_OF_MEMORY; + goto _over; + } + + code = initQInfo(pQueryMsg, tsdb, vgId, *pQInfo, ¶m, isSTableQuery); + + _over: + if (param.pGroupbyExpr != NULL) { + taosArrayDestroy(param.pGroupbyExpr->columnInfo); + } + + taosArrayDestroy(param.pTableIdList); + param.pTableIdList = NULL; + + freeParam(¶m); + + for (int32_t i = 0; i < pQueryMsg->numOfCols; i++) { + SColumnInfo* column = pQueryMsg->colList + i; + freeColumnFilterInfo(column->filters, column->numOfFilters); + } + + //pQInfo already freed in initQInfo, but *pQInfo may not pointer to null; + if (code != TSDB_CODE_SUCCESS) { + *pQInfo = NULL; + } + + // if failed to add ref for all tables in this query, abort current query + return code; +} + +bool qTableQuery(qinfo_t qinfo) { + SQInfo *pQInfo = (SQInfo *)qinfo; + assert(pQInfo && pQInfo->signature == pQInfo); + int64_t threadId = taosGetSelfPthreadId(); + + int64_t curOwner = 0; + if ((curOwner = atomic_val_compare_exchange_64(&pQInfo->owner, 0, threadId)) != 0) { + qError("QInfo:%p qhandle is now executed by thread:%p", pQInfo, (void*) curOwner); + pQInfo->code = TSDB_CODE_QRY_IN_EXEC; + return false; + } + + pQInfo->startExecTs = taosGetTimestampSec(); + + if (isQueryKilled(pQInfo)) { + qDebug("QInfo:%p it is already killed, abort", pQInfo); + return doBuildResCheck(pQInfo); + } + + if (pQInfo->tableqinfoGroupInfo.numOfTables == 0) { + qDebug("QInfo:%p no table exists for query, abort", pQInfo); + setQueryStatus(pQInfo->runtimeEnv.pQuery, QUERY_COMPLETED); + return doBuildResCheck(pQInfo); + } + + // error occurs, record the error code and return to client + int32_t ret = setjmp(pQInfo->runtimeEnv.env); + if (ret != TSDB_CODE_SUCCESS) { + pQInfo->code = ret; + qDebug("QInfo:%p query abort due to error/cancel occurs, code:%s", pQInfo, tstrerror(pQInfo->code)); + return doBuildResCheck(pQInfo); + } + + qDebug("QInfo:%p query task is launched", pQInfo); + + SQueryRuntimeEnv* pRuntimeEnv = &pQInfo->runtimeEnv; + if (onlyQueryTags(pQInfo->runtimeEnv.pQuery)) { + assert(pQInfo->runtimeEnv.pQueryHandle == NULL); + buildTagQueryResult(pQInfo); + } else if (pQInfo->runtimeEnv.stableQuery) { + stableQueryImpl(pQInfo); + } else if (pQInfo->runtimeEnv.queryBlockDist){ + buildTableBlockDistResult(pQInfo); + } else { + tableQueryImpl(pQInfo); + } + + SQuery* pQuery = pRuntimeEnv->pQuery; + if (isQueryKilled(pQInfo)) { + qDebug("QInfo:%p query is killed", pQInfo); + } else if (pQuery->rec.rows == 0) { + qDebug("QInfo:%p over, %" PRIzu " tables queried, %"PRId64" rows are returned", pQInfo, pQInfo->tableqinfoGroupInfo.numOfTables, pQuery->rec.total); + } else { + qDebug("QInfo:%p query paused, %" PRId64 " rows returned, numOfTotal:%" PRId64 " rows", + pQInfo, pQuery->rec.rows, pQuery->rec.total + pQuery->rec.rows); + } + + return doBuildResCheck(pQInfo); +} + +int32_t qRetrieveQueryResultInfo(qinfo_t qinfo, bool* buildRes, void* pRspContext) { + SQInfo *pQInfo = (SQInfo *)qinfo; + + if (pQInfo == NULL || !isValidQInfo(pQInfo)) { + qError("QInfo:%p invalid qhandle", pQInfo); + return TSDB_CODE_QRY_INVALID_QHANDLE; + } + + *buildRes = false; + if (IS_QUERY_KILLED(pQInfo)) { + qDebug("QInfo:%p query is killed, code:0x%08x", pQInfo, pQInfo->code); + return pQInfo->code; + } + + int32_t code = TSDB_CODE_SUCCESS; + + if (tsRetrieveBlockingModel) { + pQInfo->rspContext = pRspContext; + tsem_wait(&pQInfo->ready); + *buildRes = true; + code = pQInfo->code; + } else { + SQuery *pQuery = pQInfo->runtimeEnv.pQuery; + + pthread_mutex_lock(&pQInfo->lock); + + assert(pQInfo->rspContext == NULL); + if (pQInfo->dataReady == QUERY_RESULT_READY) { + *buildRes = true; + qDebug("QInfo:%p retrieve result info, rowsize:%d, rows:%" PRId64 ", code:%s", pQInfo, pQuery->resultRowSize, + pQuery->rec.rows, tstrerror(pQInfo->code)); + } else { + *buildRes = false; + qDebug("QInfo:%p retrieve req set query return result after paused", pQInfo); + pQInfo->rspContext = pRspContext; + assert(pQInfo->rspContext != NULL); + } + + code = pQInfo->code; + pthread_mutex_unlock(&pQInfo->lock); + } + + return code; +} + +int32_t qDumpRetrieveResult(qinfo_t qinfo, SRetrieveTableRsp **pRsp, int32_t *contLen, bool* continueExec) { + SQInfo *pQInfo = (SQInfo *)qinfo; + + if (pQInfo == NULL || !isValidQInfo(pQInfo)) { + return TSDB_CODE_QRY_INVALID_QHANDLE; + } + + SQueryRuntimeEnv* pRuntimeEnv = &pQInfo->runtimeEnv; + SQuery *pQuery = pQInfo->runtimeEnv.pQuery; + size_t size = getResultSize(pQInfo, &pQuery->rec.rows); + + size += sizeof(int32_t); + size += sizeof(STableIdInfo) * taosHashGetSize(pQInfo->arrTableIdInfo); + + *contLen = (int32_t)(size + sizeof(SRetrieveTableRsp)); + + // current solution only avoid crash, but cannot return error code to client + *pRsp = (SRetrieveTableRsp *)rpcMallocCont(*contLen); + if (*pRsp == NULL) { + return TSDB_CODE_QRY_OUT_OF_MEMORY; + } + + (*pRsp)->numOfRows = htonl((int32_t)pQuery->rec.rows); + + if (pQInfo->code == TSDB_CODE_SUCCESS) { + (*pRsp)->offset = htobe64(pQuery->limit.offset); + (*pRsp)->useconds = htobe64(pRuntimeEnv->summary.elapsedTime); + } else { + (*pRsp)->offset = 0; + (*pRsp)->useconds = htobe64(pRuntimeEnv->summary.elapsedTime); + } + + (*pRsp)->precision = htons(pQuery->precision); + if (pQuery->rec.rows > 0 && pQInfo->code == TSDB_CODE_SUCCESS) { + doDumpQueryResult(pQInfo, (*pRsp)->data); + } else { + setQueryStatus(pQuery, QUERY_OVER); + } + + pQInfo->rspContext = NULL; + pQInfo->dataReady = QUERY_RESULT_NOT_READY; + + if (IS_QUERY_KILLED(pQInfo) || Q_STATUS_EQUAL(pQuery->status, QUERY_OVER)) { + // here current thread hold the refcount, so it is safe to free tsdbQueryHandle. + *continueExec = false; + (*pRsp)->completed = 1; // notify no more result to client + } else { + *continueExec = true; + qDebug("QInfo:%p has more results to retrieve", pQInfo); + } + + // the memory should be freed if the code of pQInfo is not TSDB_CODE_SUCCESS + if (pQInfo->code != TSDB_CODE_SUCCESS) { + rpcFreeCont(*pRsp); + *pRsp = NULL; + } + + return pQInfo->code; +} + +void* qGetResultRetrieveMsg(qinfo_t qinfo) { + SQInfo* pQInfo = (SQInfo*) qinfo; + assert(pQInfo != NULL); + + return pQInfo->rspContext; +} + +int32_t qKillQuery(qinfo_t qinfo) { + SQInfo *pQInfo = (SQInfo *)qinfo; + + if (pQInfo == NULL || !isValidQInfo(pQInfo)) { + return TSDB_CODE_QRY_INVALID_QHANDLE; + } + + setQueryKilled(pQInfo); + + // Wait for the query executing thread being stopped/ + // Once the query is stopped, the owner of qHandle will be cleared immediately. + while (pQInfo->owner != 0) { + taosMsleep(100); + } + + return TSDB_CODE_SUCCESS; +} + +int32_t qQueryCompleted(qinfo_t qinfo) { + SQInfo *pQInfo = (SQInfo *)qinfo; + + if (pQInfo == NULL || !isValidQInfo(pQInfo)) { + return TSDB_CODE_QRY_INVALID_QHANDLE; + } + + SQuery* pQuery = pQInfo->runtimeEnv.pQuery; + return isQueryKilled(pQInfo) || Q_STATUS_EQUAL(pQuery->status, QUERY_OVER); +} + +void qDestroyQueryInfo(qinfo_t qHandle) { + SQInfo* pQInfo = (SQInfo*) qHandle; + if (!isValidQInfo(pQInfo)) { + return; + } + + qDebug("QInfo:%p query completed", pQInfo); + queryCostStatis(pQInfo); // print the query cost summary + freeQInfo(pQInfo); +} + +void* qOpenQueryMgmt(int32_t vgId) { + const int32_t refreshHandleInterval = 30; // every 30 seconds, refresh handle pool + + char cacheName[128] = {0}; + sprintf(cacheName, "qhandle_%d", vgId); + + SQueryMgmt* pQueryMgmt = calloc(1, sizeof(SQueryMgmt)); + if (pQueryMgmt == NULL) { + terrno = TSDB_CODE_QRY_OUT_OF_MEMORY; + return NULL; + } + + pQueryMgmt->qinfoPool = taosCacheInit(TSDB_CACHE_PTR_KEY, refreshHandleInterval, true, freeqinfoFn, cacheName); + pQueryMgmt->closed = false; + pQueryMgmt->vgId = vgId; + + pthread_mutex_init(&pQueryMgmt->lock, NULL); + + qDebug("vgId:%d, open querymgmt success", vgId); + return pQueryMgmt; +} + +void qQueryMgmtNotifyClosed(void* pQMgmt) { + if (pQMgmt == NULL) { + return; + } + + SQueryMgmt* pQueryMgmt = pQMgmt; + qDebug("vgId:%d, set querymgmt closed, wait for all queries cancelled", pQueryMgmt->vgId); + + pthread_mutex_lock(&pQueryMgmt->lock); + pQueryMgmt->closed = true; + pthread_mutex_unlock(&pQueryMgmt->lock); + + taosCacheRefresh(pQueryMgmt->qinfoPool, queryMgmtKillQueryFn); +} + +void qQueryMgmtReOpen(void *pQMgmt) { + if (pQMgmt == NULL) { + return; + } + + SQueryMgmt *pQueryMgmt = pQMgmt; + qDebug("vgId:%d, set querymgmt reopen", pQueryMgmt->vgId); + + pthread_mutex_lock(&pQueryMgmt->lock); + pQueryMgmt->closed = false; + pthread_mutex_unlock(&pQueryMgmt->lock); +} + +void qCleanupQueryMgmt(void* pQMgmt) { + if (pQMgmt == NULL) { + return; + } + + SQueryMgmt* pQueryMgmt = pQMgmt; + int32_t vgId = pQueryMgmt->vgId; + + assert(pQueryMgmt->closed); + + SCacheObj* pqinfoPool = pQueryMgmt->qinfoPool; + pQueryMgmt->qinfoPool = NULL; + + taosCacheCleanup(pqinfoPool); + pthread_mutex_destroy(&pQueryMgmt->lock); + tfree(pQueryMgmt); + + qDebug("vgId:%d, queryMgmt cleanup completed", vgId); +} + +void** qRegisterQInfo(void* pMgmt, uint64_t qInfo) { + if (pMgmt == NULL) { + terrno = TSDB_CODE_VND_INVALID_VGROUP_ID; + return NULL; + } + + SQueryMgmt *pQueryMgmt = pMgmt; + if (pQueryMgmt->qinfoPool == NULL) { + qError("QInfo:%p failed to add qhandle into qMgmt, since qMgmt is closed", (void *)qInfo); + terrno = TSDB_CODE_VND_INVALID_VGROUP_ID; + return NULL; + } + + pthread_mutex_lock(&pQueryMgmt->lock); + if (pQueryMgmt->closed) { + pthread_mutex_unlock(&pQueryMgmt->lock); + qError("QInfo:%p failed to add qhandle into cache, since qMgmt is colsing", (void *)qInfo); + terrno = TSDB_CODE_VND_INVALID_VGROUP_ID; + return NULL; + } else { + TSDB_CACHE_PTR_TYPE handleVal = (TSDB_CACHE_PTR_TYPE) qInfo; + void** handle = taosCachePut(pQueryMgmt->qinfoPool, &handleVal, sizeof(TSDB_CACHE_PTR_TYPE), &qInfo, sizeof(TSDB_CACHE_PTR_TYPE), + (getMaximumIdleDurationSec()*1000)); + pthread_mutex_unlock(&pQueryMgmt->lock); + + return handle; + } +} + +void** qAcquireQInfo(void* pMgmt, uint64_t _key) { + SQueryMgmt *pQueryMgmt = pMgmt; + + if (pQueryMgmt->closed) { + terrno = TSDB_CODE_VND_INVALID_VGROUP_ID; + return NULL; + } + + if (pQueryMgmt->qinfoPool == NULL) { + terrno = TSDB_CODE_QRY_INVALID_QHANDLE; + return NULL; + } + + TSDB_CACHE_PTR_TYPE key = (TSDB_CACHE_PTR_TYPE)_key; + void** handle = taosCacheAcquireByKey(pQueryMgmt->qinfoPool, &key, sizeof(TSDB_CACHE_PTR_TYPE)); + if (handle == NULL || *handle == NULL) { + terrno = TSDB_CODE_QRY_INVALID_QHANDLE; + return NULL; + } else { + return handle; + } +} + +void** qReleaseQInfo(void* pMgmt, void* pQInfo, bool freeHandle) { + SQueryMgmt *pQueryMgmt = pMgmt; + if (pQueryMgmt->qinfoPool == NULL) { + return NULL; + } + + taosCacheRelease(pQueryMgmt->qinfoPool, pQInfo, freeHandle); + return 0; +} diff --git a/src/query/src/sql.c b/src/query/src/sql.c index 0f945d0230f70943fddc19373ef8b872a127c5b5..2b1109688da3f7d814adea97665b718a30097a71 100644 --- a/src/query/src/sql.c +++ b/src/query/src/sql.c @@ -23,6 +23,7 @@ ** input grammar file: */ #include +#include /************ Begin %include sections from the grammar ************************/ #include @@ -76,8 +77,10 @@ ** zero the stack is dynamically sized using realloc() ** ParseARG_SDECL A static variable declaration for the %extra_argument ** ParseARG_PDECL A parameter declaration for the %extra_argument +** ParseARG_PARAM Code to pass %extra_argument as a subroutine parameter ** ParseARG_STORE Code to store %extra_argument into yypParser ** ParseARG_FETCH Code to extract %extra_argument from yypParser +** ParseCTX_* As ParseARG_ except for %extra_context ** YYERRORSYMBOL is the code number of the error symbol. If not ** defined, then do no error processing. ** YYNSTATE the combined number of states. @@ -97,7 +100,7 @@ #endif /************* Begin control #defines *****************************************/ #define YYCODETYPE unsigned short int -#define YYNOCODE 282 +#define YYNOCODE 281 #define YYACTIONTYPE unsigned short int #define ParseTOKENTYPE SStrToken typedef union { @@ -124,21 +127,29 @@ typedef union { #endif #define ParseARG_SDECL SSqlInfo* pInfo; #define ParseARG_PDECL ,SSqlInfo* pInfo -#define ParseARG_FETCH SSqlInfo* pInfo = yypParser->pInfo -#define ParseARG_STORE yypParser->pInfo = pInfo +#define ParseARG_PARAM ,pInfo +#define ParseARG_FETCH SSqlInfo* pInfo=yypParser->pInfo; +#define ParseARG_STORE yypParser->pInfo=pInfo; +#define ParseCTX_SDECL +#define ParseCTX_PDECL +#define ParseCTX_PARAM +#define ParseCTX_FETCH +#define ParseCTX_STORE #define YYFALLBACK 1 -#define YYNSTATE 283 -#define YYNRULE 250 +#define YYNSTATE 294 +#define YYNRULE 254 +#define YYNRULE_WITH_ACTION 254 #define YYNTOKEN 210 -#define YY_MAX_SHIFT 282 -#define YY_MIN_SHIFTREDUCE 463 -#define YY_MAX_SHIFTREDUCE 712 -#define YY_ERROR_ACTION 713 -#define YY_ACCEPT_ACTION 714 -#define YY_NO_ACTION 715 -#define YY_MIN_REDUCE 716 -#define YY_MAX_REDUCE 965 +#define YY_MAX_SHIFT 293 +#define YY_MIN_SHIFTREDUCE 477 +#define YY_MAX_SHIFTREDUCE 730 +#define YY_ERROR_ACTION 731 +#define YY_ACCEPT_ACTION 732 +#define YY_NO_ACTION 733 +#define YY_MIN_REDUCE 734 +#define YY_MAX_REDUCE 987 /************* End control #defines *******************************************/ +#define YY_NLOOKAHEAD ((int)(sizeof(yy_lookahead)/sizeof(yy_lookahead[0]))) /* Define the yytestcase() macro to be a no-op if is not already defined ** otherwise. @@ -203,138 +214,142 @@ typedef union { ** yy_default[] Default action for each state. ** *********** Begin parsing tables **********************************************/ -#define YY_ACTTAB_COUNT (615) +#define YY_ACTTAB_COUNT (651) static const YYACTIONTYPE yy_action[] = { - /* 0 */ 872, 507, 159, 714, 282, 159, 15, 580, 183, 508, - /* 10 */ 663, 186, 948, 41, 42, 947, 43, 44, 26, 158, - /* 20 */ 191, 35, 507, 507, 232, 47, 45, 49, 46, 163, - /* 30 */ 508, 508, 850, 40, 39, 257, 256, 38, 37, 36, - /* 40 */ 41, 42, 118, 43, 44, 861, 664, 191, 35, 179, - /* 50 */ 280, 232, 47, 45, 49, 46, 181, 869, 847, 215, - /* 60 */ 40, 39, 957, 123, 38, 37, 36, 464, 465, 466, - /* 70 */ 467, 468, 469, 470, 471, 472, 473, 474, 475, 281, - /* 80 */ 91, 196, 205, 41, 42, 267, 43, 44, 839, 247, - /* 90 */ 191, 35, 159, 944, 232, 47, 45, 49, 46, 123, - /* 100 */ 220, 185, 948, 40, 39, 850, 62, 38, 37, 36, - /* 110 */ 20, 245, 275, 274, 244, 243, 242, 273, 241, 272, - /* 120 */ 271, 270, 240, 269, 268, 901, 267, 227, 817, 661, - /* 130 */ 805, 806, 807, 808, 809, 810, 811, 812, 813, 814, - /* 140 */ 815, 816, 818, 819, 42, 208, 43, 44, 642, 643, - /* 150 */ 191, 35, 212, 211, 232, 47, 45, 49, 46, 229, - /* 160 */ 861, 67, 21, 40, 39, 247, 276, 38, 37, 36, - /* 170 */ 32, 43, 44, 197, 180, 191, 35, 849, 70, 232, - /* 180 */ 47, 45, 49, 46, 16, 38, 37, 36, 40, 39, - /* 190 */ 850, 123, 38, 37, 36, 190, 674, 72, 26, 665, - /* 200 */ 167, 668, 667, 671, 670, 61, 168, 836, 837, 25, - /* 210 */ 840, 103, 102, 166, 190, 674, 758, 199, 665, 147, - /* 220 */ 668, 767, 671, 20, 147, 275, 274, 187, 188, 841, - /* 230 */ 273, 231, 272, 271, 270, 838, 269, 268, 846, 198, - /* 240 */ 823, 21, 249, 821, 822, 943, 187, 188, 824, 32, - /* 250 */ 826, 827, 825, 900, 828, 829, 123, 3, 137, 47, - /* 260 */ 45, 49, 46, 29, 78, 74, 77, 40, 39, 617, - /* 270 */ 214, 38, 37, 36, 233, 601, 759, 174, 598, 147, - /* 280 */ 599, 26, 600, 201, 89, 93, 254, 253, 26, 942, - /* 290 */ 83, 98, 101, 92, 26, 48, 153, 149, 26, 95, - /* 300 */ 175, 26, 151, 106, 105, 104, 202, 203, 673, 26, - /* 310 */ 69, 40, 39, 200, 48, 38, 37, 36, 68, 194, - /* 320 */ 10, 847, 63, 672, 71, 133, 195, 673, 847, 279, - /* 330 */ 278, 110, 250, 666, 847, 669, 251, 614, 847, 255, - /* 340 */ 116, 847, 672, 22, 621, 848, 609, 259, 32, 847, - /* 350 */ 218, 629, 633, 634, 217, 27, 120, 52, 18, 693, - /* 360 */ 675, 189, 53, 176, 17, 17, 590, 677, 56, 235, - /* 370 */ 591, 27, 100, 99, 27, 52, 82, 81, 12, 11, - /* 380 */ 161, 602, 54, 59, 162, 579, 57, 88, 87, 14, - /* 390 */ 13, 605, 603, 606, 604, 115, 113, 164, 165, 171, - /* 400 */ 911, 172, 170, 4, 157, 169, 160, 910, 192, 907, - /* 410 */ 906, 193, 258, 117, 871, 33, 878, 880, 119, 863, - /* 420 */ 893, 892, 134, 135, 132, 136, 769, 32, 239, 155, - /* 430 */ 30, 248, 766, 216, 962, 79, 961, 114, 959, 138, - /* 440 */ 252, 956, 85, 955, 628, 953, 221, 139, 182, 225, - /* 450 */ 787, 31, 28, 58, 156, 756, 94, 860, 55, 50, - /* 460 */ 754, 96, 230, 124, 228, 125, 97, 752, 126, 127, - /* 470 */ 226, 128, 224, 222, 751, 34, 204, 148, 90, 749, - /* 480 */ 260, 261, 262, 748, 747, 746, 745, 263, 150, 152, - /* 490 */ 742, 740, 264, 738, 736, 734, 265, 154, 266, 219, - /* 500 */ 64, 65, 894, 277, 712, 206, 207, 177, 237, 238, - /* 510 */ 711, 178, 173, 209, 75, 210, 710, 698, 217, 213, - /* 520 */ 750, 611, 107, 60, 744, 234, 142, 141, 788, 140, - /* 530 */ 143, 144, 146, 145, 1, 108, 743, 2, 109, 735, - /* 540 */ 66, 6, 184, 630, 845, 121, 223, 131, 129, 130, - /* 550 */ 635, 122, 5, 23, 7, 8, 24, 676, 9, 19, - /* 560 */ 678, 71, 73, 236, 548, 544, 542, 541, 540, 537, - /* 570 */ 511, 246, 76, 27, 51, 582, 80, 84, 581, 578, - /* 580 */ 532, 86, 530, 522, 528, 524, 526, 520, 518, 550, - /* 590 */ 549, 547, 546, 545, 543, 539, 538, 52, 509, 479, - /* 600 */ 477, 716, 715, 715, 715, 715, 715, 715, 715, 715, - /* 610 */ 715, 715, 715, 111, 112, + /* 0 */ 74, 521, 732, 293, 521, 165, 186, 291, 28, 522, + /* 10 */ 190, 893, 522, 43, 44, 969, 47, 48, 15, 776, + /* 20 */ 198, 37, 152, 46, 242, 51, 49, 53, 50, 854, + /* 30 */ 855, 27, 858, 42, 41, 871, 128, 40, 39, 38, + /* 40 */ 43, 44, 882, 47, 48, 882, 188, 198, 37, 868, + /* 50 */ 46, 242, 51, 49, 53, 50, 187, 128, 203, 225, + /* 60 */ 42, 41, 979, 165, 40, 39, 38, 43, 44, 890, + /* 70 */ 47, 48, 193, 970, 198, 37, 165, 46, 242, 51, + /* 80 */ 49, 53, 50, 871, 128, 192, 970, 42, 41, 258, + /* 90 */ 521, 40, 39, 38, 290, 289, 115, 239, 522, 71, + /* 100 */ 77, 43, 45, 128, 47, 48, 205, 66, 198, 37, + /* 110 */ 28, 46, 242, 51, 49, 53, 50, 40, 39, 38, + /* 120 */ 921, 42, 41, 278, 65, 40, 39, 38, 865, 678, + /* 130 */ 287, 871, 859, 210, 478, 479, 480, 481, 482, 483, + /* 140 */ 484, 485, 486, 487, 488, 489, 292, 72, 201, 215, + /* 150 */ 44, 868, 47, 48, 856, 871, 198, 37, 209, 46, + /* 160 */ 242, 51, 49, 53, 50, 869, 922, 204, 237, 42, + /* 170 */ 41, 96, 163, 40, 39, 38, 278, 21, 256, 286, + /* 180 */ 285, 255, 254, 253, 284, 252, 283, 282, 281, 251, + /* 190 */ 280, 279, 835, 594, 823, 824, 825, 826, 827, 828, + /* 200 */ 829, 830, 831, 832, 833, 834, 836, 837, 47, 48, + /* 210 */ 87, 86, 198, 37, 28, 46, 242, 51, 49, 53, + /* 220 */ 50, 268, 267, 16, 211, 42, 41, 265, 264, 40, + /* 230 */ 39, 38, 197, 691, 28, 634, 682, 207, 685, 174, + /* 240 */ 688, 22, 42, 41, 73, 175, 40, 39, 38, 34, + /* 250 */ 108, 107, 173, 197, 691, 867, 67, 682, 28, 685, + /* 260 */ 21, 688, 286, 285, 194, 195, 169, 284, 241, 283, + /* 270 */ 282, 281, 202, 280, 279, 868, 618, 28, 60, 615, + /* 280 */ 22, 616, 631, 617, 218, 194, 195, 123, 34, 23, + /* 290 */ 841, 222, 221, 839, 840, 857, 261, 61, 842, 868, + /* 300 */ 844, 845, 843, 208, 846, 847, 260, 212, 213, 224, + /* 310 */ 638, 51, 49, 53, 50, 262, 181, 28, 868, 42, + /* 320 */ 41, 94, 98, 40, 39, 38, 28, 88, 103, 106, + /* 330 */ 97, 10, 52, 3, 142, 76, 100, 138, 680, 31, + /* 340 */ 83, 79, 82, 158, 154, 690, 230, 659, 660, 156, + /* 350 */ 111, 110, 109, 52, 785, 266, 777, 152, 868, 152, + /* 360 */ 689, 626, 121, 684, 270, 687, 690, 868, 196, 227, + /* 370 */ 34, 228, 258, 646, 681, 29, 683, 125, 686, 650, + /* 380 */ 651, 689, 619, 56, 18, 711, 692, 243, 966, 17, + /* 390 */ 17, 57, 604, 245, 606, 247, 29, 29, 56, 75, + /* 400 */ 605, 63, 26, 593, 56, 248, 12, 11, 93, 92, + /* 410 */ 4, 965, 58, 14, 13, 622, 620, 623, 621, 105, + /* 420 */ 104, 120, 118, 932, 964, 182, 183, 167, 168, 170, + /* 430 */ 164, 171, 172, 178, 179, 177, 162, 176, 166, 870, + /* 440 */ 931, 199, 928, 927, 884, 200, 269, 122, 892, 35, + /* 450 */ 899, 901, 124, 139, 864, 914, 140, 141, 913, 137, + /* 460 */ 787, 250, 160, 32, 259, 34, 784, 984, 84, 983, + /* 470 */ 981, 143, 226, 119, 231, 263, 978, 90, 977, 975, + /* 480 */ 694, 144, 645, 805, 189, 235, 62, 881, 129, 33, + /* 490 */ 59, 240, 30, 54, 161, 132, 130, 238, 236, 131, + /* 500 */ 774, 99, 772, 133, 234, 134, 232, 101, 36, 102, + /* 510 */ 770, 95, 769, 271, 272, 214, 153, 767, 766, 765, + /* 520 */ 764, 763, 273, 155, 157, 760, 758, 756, 754, 752, + /* 530 */ 159, 274, 229, 68, 69, 915, 275, 276, 277, 184, + /* 540 */ 206, 249, 730, 185, 180, 288, 80, 216, 217, 768, + /* 550 */ 729, 220, 219, 112, 728, 762, 147, 761, 146, 806, + /* 560 */ 145, 148, 149, 151, 150, 113, 114, 753, 1, 716, + /* 570 */ 2, 223, 227, 628, 64, 6, 866, 244, 70, 647, + /* 580 */ 126, 135, 136, 191, 24, 233, 7, 652, 127, 8, + /* 590 */ 693, 5, 25, 9, 19, 246, 20, 695, 78, 76, + /* 600 */ 562, 558, 556, 555, 554, 551, 525, 257, 81, 85, + /* 610 */ 29, 55, 596, 595, 89, 91, 592, 546, 544, 536, + /* 620 */ 542, 538, 540, 534, 532, 564, 563, 561, 560, 559, + /* 630 */ 557, 553, 552, 56, 523, 493, 491, 734, 733, 733, + /* 640 */ 733, 733, 733, 733, 733, 733, 733, 733, 733, 116, + /* 650 */ 117, }; static const YYCODETYPE yy_lookahead[] = { - /* 0 */ 214, 1, 270, 211, 212, 270, 270, 5, 231, 9, - /* 10 */ 1, 279, 280, 13, 14, 280, 16, 17, 214, 270, - /* 20 */ 20, 21, 1, 1, 24, 25, 26, 27, 28, 270, - /* 30 */ 9, 9, 255, 33, 34, 33, 34, 37, 38, 39, - /* 40 */ 13, 14, 214, 16, 17, 253, 37, 20, 21, 213, - /* 50 */ 214, 24, 25, 26, 27, 28, 252, 271, 254, 267, - /* 60 */ 33, 34, 255, 214, 37, 38, 39, 45, 46, 47, - /* 70 */ 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, - /* 80 */ 74, 231, 60, 13, 14, 79, 16, 17, 0, 77, - /* 90 */ 20, 21, 270, 270, 24, 25, 26, 27, 28, 214, - /* 100 */ 272, 279, 280, 33, 34, 255, 106, 37, 38, 39, - /* 110 */ 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, - /* 120 */ 96, 97, 98, 99, 100, 276, 79, 278, 230, 102, - /* 130 */ 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, - /* 140 */ 242, 243, 244, 245, 14, 131, 16, 17, 120, 121, - /* 150 */ 20, 21, 138, 139, 24, 25, 26, 27, 28, 274, - /* 160 */ 253, 276, 101, 33, 34, 77, 231, 37, 38, 39, - /* 170 */ 109, 16, 17, 66, 267, 20, 21, 255, 219, 24, - /* 180 */ 25, 26, 27, 28, 44, 37, 38, 39, 33, 34, - /* 190 */ 255, 214, 37, 38, 39, 1, 2, 219, 214, 5, - /* 200 */ 60, 7, 5, 9, 7, 219, 66, 248, 249, 250, - /* 210 */ 251, 71, 72, 73, 1, 2, 218, 66, 5, 221, - /* 220 */ 7, 218, 9, 86, 221, 88, 89, 33, 34, 251, - /* 230 */ 93, 37, 95, 96, 97, 249, 99, 100, 254, 132, - /* 240 */ 230, 101, 135, 233, 234, 270, 33, 34, 238, 109, - /* 250 */ 240, 241, 242, 276, 244, 245, 214, 61, 62, 25, - /* 260 */ 26, 27, 28, 67, 68, 69, 70, 33, 34, 37, - /* 270 */ 130, 37, 38, 39, 15, 2, 218, 137, 5, 221, - /* 280 */ 7, 214, 9, 132, 61, 62, 135, 136, 214, 270, - /* 290 */ 67, 68, 69, 70, 214, 101, 61, 62, 214, 76, - /* 300 */ 270, 214, 67, 68, 69, 70, 33, 34, 114, 214, - /* 310 */ 256, 33, 34, 214, 101, 37, 38, 39, 276, 252, - /* 320 */ 101, 254, 268, 129, 105, 106, 252, 114, 254, 63, - /* 330 */ 64, 65, 252, 5, 254, 7, 252, 107, 254, 252, - /* 340 */ 101, 254, 129, 113, 112, 246, 102, 252, 109, 254, - /* 350 */ 102, 102, 102, 102, 110, 107, 107, 107, 107, 102, - /* 360 */ 102, 59, 107, 270, 107, 107, 102, 108, 107, 102, - /* 370 */ 102, 107, 74, 75, 107, 107, 133, 134, 133, 134, - /* 380 */ 270, 108, 127, 101, 270, 103, 125, 133, 134, 133, - /* 390 */ 134, 5, 5, 7, 7, 61, 62, 270, 270, 270, - /* 400 */ 247, 270, 270, 101, 270, 270, 270, 247, 247, 247, - /* 410 */ 247, 247, 247, 214, 214, 269, 214, 214, 214, 253, - /* 420 */ 277, 277, 214, 214, 257, 214, 214, 109, 214, 214, - /* 430 */ 214, 214, 214, 253, 214, 214, 214, 59, 214, 214, - /* 440 */ 214, 214, 214, 214, 114, 214, 273, 214, 273, 273, - /* 450 */ 214, 214, 214, 124, 214, 214, 214, 266, 126, 123, - /* 460 */ 214, 214, 118, 265, 122, 264, 214, 214, 263, 262, - /* 470 */ 117, 261, 116, 115, 214, 128, 214, 214, 85, 214, - /* 480 */ 84, 49, 81, 214, 214, 214, 214, 83, 214, 214, - /* 490 */ 214, 214, 53, 214, 214, 214, 82, 214, 80, 215, - /* 500 */ 215, 215, 215, 77, 5, 140, 5, 215, 215, 215, - /* 510 */ 5, 215, 215, 140, 219, 5, 5, 87, 110, 131, - /* 520 */ 215, 102, 216, 111, 215, 104, 223, 227, 229, 228, - /* 530 */ 226, 224, 222, 225, 220, 216, 215, 217, 216, 215, - /* 540 */ 107, 101, 1, 102, 253, 101, 101, 258, 260, 259, - /* 550 */ 102, 101, 101, 107, 119, 119, 107, 102, 101, 101, - /* 560 */ 108, 105, 74, 104, 9, 5, 5, 5, 5, 5, - /* 570 */ 78, 15, 74, 107, 16, 5, 134, 134, 5, 102, - /* 580 */ 5, 134, 5, 5, 5, 5, 5, 5, 5, 5, - /* 590 */ 5, 5, 5, 5, 5, 5, 5, 107, 78, 59, - /* 600 */ 58, 0, 281, 281, 281, 281, 281, 281, 281, 281, - /* 610 */ 281, 281, 281, 21, 21, 281, 281, 281, 281, 281, - /* 620 */ 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, - /* 630 */ 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, - /* 640 */ 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, - /* 650 */ 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, + /* 0 */ 218, 1, 210, 211, 1, 270, 212, 213, 213, 9, + /* 10 */ 230, 213, 9, 13, 14, 280, 16, 17, 270, 217, + /* 20 */ 20, 21, 220, 23, 24, 25, 26, 27, 28, 247, + /* 30 */ 248, 249, 250, 33, 34, 255, 213, 37, 38, 39, + /* 40 */ 13, 14, 253, 16, 17, 253, 251, 20, 21, 254, + /* 50 */ 23, 24, 25, 26, 27, 28, 267, 213, 230, 267, + /* 60 */ 33, 34, 255, 270, 37, 38, 39, 13, 14, 271, + /* 70 */ 16, 17, 279, 280, 20, 21, 270, 23, 24, 25, + /* 80 */ 26, 27, 28, 255, 213, 279, 280, 33, 34, 77, + /* 90 */ 1, 37, 38, 39, 63, 64, 65, 274, 9, 276, + /* 100 */ 218, 13, 14, 213, 16, 17, 230, 107, 20, 21, + /* 110 */ 213, 23, 24, 25, 26, 27, 28, 37, 38, 39, + /* 120 */ 276, 33, 34, 79, 218, 37, 38, 39, 213, 102, + /* 130 */ 230, 255, 250, 213, 45, 46, 47, 48, 49, 50, + /* 140 */ 51, 52, 53, 54, 55, 56, 57, 276, 251, 60, + /* 150 */ 14, 254, 16, 17, 248, 255, 20, 21, 66, 23, + /* 160 */ 24, 25, 26, 27, 28, 245, 276, 252, 278, 33, + /* 170 */ 34, 74, 270, 37, 38, 39, 79, 86, 87, 88, + /* 180 */ 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, + /* 190 */ 99, 100, 229, 5, 231, 232, 233, 234, 235, 236, + /* 200 */ 237, 238, 239, 240, 241, 242, 243, 244, 16, 17, + /* 210 */ 133, 134, 20, 21, 213, 23, 24, 25, 26, 27, + /* 220 */ 28, 33, 34, 44, 132, 33, 34, 135, 136, 37, + /* 230 */ 38, 39, 1, 2, 213, 37, 5, 66, 7, 60, + /* 240 */ 9, 101, 33, 34, 256, 66, 37, 38, 39, 109, + /* 250 */ 71, 72, 73, 1, 2, 254, 268, 5, 213, 7, + /* 260 */ 86, 9, 88, 89, 33, 34, 270, 93, 37, 95, + /* 270 */ 96, 97, 251, 99, 100, 254, 2, 213, 106, 5, + /* 280 */ 101, 7, 106, 9, 131, 33, 34, 213, 109, 113, + /* 290 */ 229, 138, 139, 232, 233, 0, 251, 125, 237, 254, + /* 300 */ 239, 240, 241, 132, 243, 244, 135, 33, 34, 130, + /* 310 */ 112, 25, 26, 27, 28, 251, 137, 213, 254, 33, + /* 320 */ 34, 61, 62, 37, 38, 39, 213, 67, 68, 69, + /* 330 */ 70, 101, 101, 61, 62, 105, 76, 107, 1, 67, + /* 340 */ 68, 69, 70, 61, 62, 114, 272, 120, 121, 67, + /* 350 */ 68, 69, 70, 101, 217, 251, 217, 220, 254, 220, + /* 360 */ 129, 102, 101, 5, 251, 7, 114, 254, 59, 110, + /* 370 */ 109, 102, 77, 102, 37, 106, 5, 106, 7, 102, + /* 380 */ 102, 129, 108, 106, 106, 102, 102, 15, 270, 106, + /* 390 */ 106, 106, 102, 102, 102, 102, 106, 106, 106, 106, + /* 400 */ 102, 101, 101, 103, 106, 104, 133, 134, 133, 134, + /* 410 */ 101, 270, 127, 133, 134, 5, 5, 7, 7, 74, + /* 420 */ 75, 61, 62, 246, 270, 270, 270, 270, 270, 270, + /* 430 */ 270, 270, 270, 270, 270, 270, 270, 270, 270, 255, + /* 440 */ 246, 246, 246, 246, 253, 246, 246, 213, 213, 269, + /* 450 */ 213, 213, 213, 213, 213, 277, 213, 213, 277, 257, + /* 460 */ 213, 213, 213, 213, 213, 109, 213, 213, 213, 213, + /* 470 */ 213, 213, 253, 59, 273, 213, 213, 213, 213, 213, + /* 480 */ 108, 213, 114, 213, 273, 273, 124, 266, 265, 213, + /* 490 */ 126, 118, 213, 123, 213, 262, 264, 122, 117, 263, + /* 500 */ 213, 213, 213, 261, 116, 260, 115, 213, 128, 213, + /* 510 */ 213, 85, 213, 84, 49, 213, 213, 213, 213, 213, + /* 520 */ 213, 213, 81, 213, 213, 213, 213, 213, 213, 213, + /* 530 */ 213, 83, 214, 214, 214, 214, 53, 82, 80, 214, + /* 540 */ 214, 214, 5, 214, 214, 77, 218, 140, 5, 214, + /* 550 */ 5, 5, 140, 215, 5, 214, 222, 214, 226, 228, + /* 560 */ 227, 225, 223, 221, 224, 215, 215, 214, 219, 87, + /* 570 */ 216, 131, 110, 102, 111, 101, 253, 104, 106, 102, + /* 580 */ 101, 259, 258, 1, 106, 101, 119, 102, 101, 119, + /* 590 */ 102, 101, 106, 101, 101, 104, 101, 108, 74, 105, + /* 600 */ 9, 5, 5, 5, 5, 5, 78, 15, 74, 134, + /* 610 */ 106, 16, 5, 5, 134, 134, 102, 5, 5, 5, + /* 620 */ 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, + /* 630 */ 5, 5, 5, 106, 78, 59, 58, 0, 281, 281, + /* 640 */ 281, 281, 281, 281, 281, 281, 281, 281, 281, 21, + /* 650 */ 21, 281, 281, 281, 281, 281, 281, 281, 281, 281, /* 660 */ 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, /* 670 */ 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, /* 680 */ 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, @@ -351,93 +366,100 @@ static const YYCODETYPE yy_lookahead[] = { /* 790 */ 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, /* 800 */ 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, /* 810 */ 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, - /* 820 */ 281, 281, 281, 281, 281, + /* 820 */ 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, + /* 830 */ 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, + /* 840 */ 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, + /* 850 */ 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, + /* 860 */ 281, }; -#define YY_SHIFT_COUNT (282) +#define YY_SHIFT_COUNT (293) #define YY_SHIFT_MIN (0) -#define YY_SHIFT_MAX (601) +#define YY_SHIFT_MAX (637) static const unsigned short int yy_shift_ofst[] = { - /* 0 */ 140, 24, 137, 12, 194, 213, 21, 21, 21, 21, - /* 10 */ 21, 21, 21, 21, 21, 0, 22, 213, 273, 273, - /* 20 */ 273, 61, 21, 21, 21, 88, 21, 21, 6, 12, - /* 30 */ 47, 47, 615, 213, 213, 213, 213, 213, 213, 213, - /* 40 */ 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, - /* 50 */ 213, 273, 273, 2, 2, 2, 2, 2, 2, 2, - /* 60 */ 239, 21, 21, 232, 21, 21, 21, 28, 28, 230, - /* 70 */ 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, - /* 80 */ 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, - /* 90 */ 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, - /* 100 */ 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, - /* 110 */ 21, 21, 21, 21, 21, 21, 318, 378, 378, 378, - /* 120 */ 330, 330, 330, 378, 329, 332, 336, 344, 342, 353, - /* 130 */ 356, 358, 347, 318, 378, 378, 378, 12, 378, 378, - /* 140 */ 393, 396, 432, 401, 404, 439, 414, 418, 378, 426, - /* 150 */ 378, 426, 378, 426, 378, 615, 615, 27, 70, 70, - /* 160 */ 70, 130, 155, 234, 234, 234, 223, 196, 235, 278, - /* 170 */ 278, 278, 278, 151, 14, 148, 148, 219, 107, 266, - /* 180 */ 244, 248, 249, 250, 251, 257, 258, 197, 328, 9, - /* 190 */ 302, 259, 255, 261, 264, 267, 268, 243, 245, 254, - /* 200 */ 282, 256, 386, 387, 298, 334, 499, 365, 501, 505, - /* 210 */ 373, 510, 511, 430, 388, 408, 419, 412, 421, 440, - /* 220 */ 433, 441, 444, 541, 445, 448, 450, 446, 435, 449, - /* 230 */ 436, 455, 451, 452, 457, 421, 458, 459, 456, 488, - /* 240 */ 555, 560, 561, 562, 563, 564, 492, 556, 498, 442, - /* 250 */ 466, 466, 558, 443, 447, 466, 570, 573, 477, 466, - /* 260 */ 575, 577, 578, 579, 580, 581, 582, 583, 584, 585, - /* 270 */ 586, 587, 588, 589, 590, 591, 490, 520, 592, 593, - /* 280 */ 540, 542, 601, + /* 0 */ 179, 91, 174, 12, 231, 252, 3, 3, 3, 3, + /* 10 */ 3, 3, 3, 3, 3, 0, 89, 252, 274, 274, + /* 20 */ 274, 274, 140, 3, 3, 3, 3, 295, 3, 3, + /* 30 */ 97, 12, 44, 44, 651, 252, 252, 252, 252, 252, + /* 40 */ 252, 252, 252, 252, 252, 252, 252, 252, 252, 252, + /* 50 */ 252, 252, 252, 252, 252, 274, 274, 188, 188, 188, + /* 60 */ 188, 188, 188, 188, 261, 3, 3, 198, 3, 3, + /* 70 */ 3, 227, 227, 176, 3, 3, 3, 3, 3, 3, + /* 80 */ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, + /* 90 */ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, + /* 100 */ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, + /* 110 */ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, + /* 120 */ 3, 356, 414, 414, 414, 368, 368, 368, 414, 362, + /* 130 */ 364, 370, 373, 375, 381, 388, 391, 380, 356, 414, + /* 140 */ 414, 414, 12, 414, 414, 426, 429, 465, 441, 448, + /* 150 */ 483, 455, 458, 414, 468, 414, 468, 414, 468, 414, + /* 160 */ 651, 651, 27, 54, 88, 54, 54, 136, 192, 286, + /* 170 */ 286, 286, 286, 260, 272, 282, 209, 209, 209, 209, + /* 180 */ 92, 153, 80, 80, 230, 171, 31, 259, 269, 271, + /* 190 */ 277, 278, 283, 284, 358, 371, 337, 309, 372, 285, + /* 200 */ 172, 290, 291, 292, 293, 298, 301, 77, 273, 275, + /* 210 */ 300, 280, 410, 411, 345, 360, 537, 407, 543, 545, + /* 220 */ 412, 546, 549, 482, 440, 462, 471, 463, 473, 474, + /* 230 */ 472, 477, 479, 582, 484, 485, 487, 478, 467, 486, + /* 240 */ 470, 488, 490, 489, 492, 473, 493, 491, 495, 494, + /* 250 */ 524, 591, 596, 597, 598, 599, 600, 528, 592, 534, + /* 260 */ 475, 504, 504, 595, 480, 481, 504, 607, 608, 514, + /* 270 */ 504, 612, 613, 614, 615, 616, 617, 618, 619, 620, + /* 280 */ 621, 622, 623, 624, 625, 626, 627, 527, 556, 628, + /* 290 */ 629, 576, 578, 637, }; -#define YY_REDUCE_COUNT (156) -#define YY_REDUCE_MIN (-268) -#define YY_REDUCE_MAX (324) +#define YY_REDUCE_COUNT (161) +#define YY_REDUCE_MIN (-265) +#define YY_REDUCE_MAX (354) static const short yy_reduce_ofst[] = { - /* 0 */ -208, -102, 10, -41, -268, -178, -196, -151, -115, 67, - /* 10 */ 74, 80, 84, 87, 95, -214, -164, -265, -223, -150, - /* 20 */ -65, -93, -172, -23, 42, -22, 99, -16, -2, -14, - /* 30 */ 3, 58, 54, -264, -251, -241, -177, -25, 19, 30, - /* 40 */ 93, 110, 114, 127, 128, 129, 131, 132, 134, 135, - /* 50 */ 136, -193, -78, 153, 160, 161, 162, 163, 164, 165, - /* 60 */ 166, 199, 200, 146, 202, 203, 204, 143, 144, 167, - /* 70 */ 208, 209, 211, 212, 214, 215, 216, 217, 218, 220, - /* 80 */ 221, 222, 224, 225, 226, 227, 228, 229, 231, 233, - /* 90 */ 236, 237, 238, 240, 241, 242, 246, 247, 252, 253, - /* 100 */ 260, 262, 263, 265, 269, 270, 271, 272, 274, 275, - /* 110 */ 276, 277, 279, 280, 281, 283, 180, 284, 285, 286, - /* 120 */ 173, 175, 176, 287, 191, 198, 201, 205, 207, 210, - /* 130 */ 288, 290, 289, 291, 292, 293, 294, 295, 296, 297, - /* 140 */ 299, 301, 300, 303, 304, 307, 308, 310, 305, 306, - /* 150 */ 309, 319, 321, 322, 324, 314, 320, + /* 0 */ -208, -37, 61, -218, -207, -194, -205, -110, -177, -103, + /* 10 */ 21, 45, 64, 104, 113, -202, -206, -265, -220, -172, + /* 20 */ -124, -100, -211, 74, -156, -129, -85, -118, -80, 1, + /* 30 */ -198, -94, 137, 139, -12, -252, -98, -4, 118, 141, + /* 40 */ 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, + /* 50 */ 164, 165, 166, 167, 168, -193, 184, 177, 194, 195, + /* 60 */ 196, 197, 199, 200, 191, 234, 235, 180, 237, 238, + /* 70 */ 239, 178, 181, 202, 240, 241, 243, 244, 247, 248, + /* 80 */ 249, 250, 251, 253, 254, 255, 256, 257, 258, 262, + /* 90 */ 263, 264, 265, 266, 268, 270, 276, 279, 281, 287, + /* 100 */ 288, 289, 294, 296, 297, 299, 302, 303, 304, 305, + /* 110 */ 306, 307, 308, 310, 311, 312, 313, 314, 315, 316, + /* 120 */ 317, 219, 318, 319, 320, 201, 211, 212, 321, 221, + /* 130 */ 223, 232, 236, 233, 242, 245, 322, 324, 323, 325, + /* 140 */ 326, 327, 328, 329, 330, 331, 333, 332, 334, 336, + /* 150 */ 339, 340, 342, 335, 338, 341, 350, 343, 351, 353, + /* 160 */ 349, 354, }; static const YYACTIONTYPE yy_default[] = { - /* 0 */ 713, 768, 757, 765, 950, 950, 713, 713, 713, 713, - /* 10 */ 713, 713, 713, 713, 713, 873, 731, 950, 713, 713, - /* 20 */ 713, 713, 713, 713, 713, 765, 713, 713, 770, 765, - /* 30 */ 770, 770, 868, 713, 713, 713, 713, 713, 713, 713, - /* 40 */ 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, - /* 50 */ 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, - /* 60 */ 713, 713, 713, 875, 877, 879, 713, 897, 897, 866, - /* 70 */ 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, - /* 80 */ 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, - /* 90 */ 713, 713, 713, 713, 755, 713, 753, 713, 713, 713, - /* 100 */ 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, - /* 110 */ 741, 713, 713, 713, 713, 713, 713, 733, 733, 733, - /* 120 */ 713, 713, 713, 733, 904, 908, 902, 890, 898, 889, - /* 130 */ 885, 884, 912, 713, 733, 733, 733, 765, 733, 733, - /* 140 */ 786, 784, 782, 774, 780, 776, 778, 772, 733, 763, - /* 150 */ 733, 763, 733, 763, 733, 804, 820, 713, 913, 949, - /* 160 */ 903, 939, 938, 945, 937, 936, 713, 713, 713, 932, - /* 170 */ 933, 935, 934, 713, 713, 941, 940, 713, 713, 713, - /* 180 */ 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, - /* 190 */ 915, 713, 909, 905, 713, 713, 713, 713, 713, 713, - /* 200 */ 830, 713, 713, 713, 713, 713, 713, 713, 713, 713, - /* 210 */ 713, 713, 713, 713, 713, 865, 713, 713, 713, 713, - /* 220 */ 876, 713, 713, 713, 713, 713, 713, 899, 713, 891, - /* 230 */ 713, 713, 713, 713, 713, 842, 713, 713, 713, 713, - /* 240 */ 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, - /* 250 */ 960, 958, 713, 713, 713, 954, 713, 713, 713, 952, - /* 260 */ 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, - /* 270 */ 713, 713, 713, 713, 713, 713, 789, 713, 739, 737, - /* 280 */ 713, 729, 713, + /* 0 */ 731, 786, 775, 783, 972, 972, 731, 731, 731, 731, + /* 10 */ 731, 731, 731, 731, 731, 894, 749, 972, 731, 731, + /* 20 */ 731, 731, 731, 731, 731, 731, 731, 783, 731, 731, + /* 30 */ 788, 783, 788, 788, 889, 731, 731, 731, 731, 731, + /* 40 */ 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, + /* 50 */ 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, + /* 60 */ 731, 731, 731, 731, 731, 731, 731, 896, 898, 900, + /* 70 */ 731, 918, 918, 887, 731, 731, 731, 731, 731, 731, + /* 80 */ 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, + /* 90 */ 731, 731, 731, 731, 731, 731, 731, 731, 731, 773, + /* 100 */ 731, 771, 731, 731, 731, 731, 731, 731, 731, 731, + /* 110 */ 731, 731, 731, 731, 731, 759, 731, 731, 731, 731, + /* 120 */ 731, 731, 751, 751, 751, 731, 731, 731, 751, 925, + /* 130 */ 929, 923, 911, 919, 910, 906, 905, 933, 731, 751, + /* 140 */ 751, 751, 783, 751, 751, 804, 802, 800, 792, 798, + /* 150 */ 794, 796, 790, 751, 781, 751, 781, 751, 781, 751, + /* 160 */ 822, 838, 731, 934, 731, 971, 924, 961, 960, 967, + /* 170 */ 959, 958, 957, 731, 731, 731, 953, 954, 956, 955, + /* 180 */ 731, 731, 963, 962, 731, 731, 731, 731, 731, 731, + /* 190 */ 731, 731, 731, 731, 731, 731, 731, 936, 731, 930, + /* 200 */ 926, 731, 731, 731, 731, 731, 731, 731, 731, 731, + /* 210 */ 848, 731, 731, 731, 731, 731, 731, 731, 731, 731, + /* 220 */ 731, 731, 731, 731, 731, 886, 731, 731, 731, 731, + /* 230 */ 897, 731, 731, 731, 731, 731, 731, 920, 731, 912, + /* 240 */ 731, 731, 731, 731, 731, 860, 731, 731, 731, 731, + /* 250 */ 731, 731, 731, 731, 731, 731, 731, 731, 731, 731, + /* 260 */ 731, 982, 980, 731, 731, 731, 976, 731, 731, 731, + /* 270 */ 974, 731, 731, 731, 731, 731, 731, 731, 731, 731, + /* 280 */ 731, 731, 731, 731, 731, 731, 731, 807, 731, 757, + /* 290 */ 755, 731, 747, 731, }; /********** End of lemon-generated parsing tables *****************************/ @@ -563,8 +585,8 @@ static const YYCODETYPE yyFallback[] = { 0, /* UNSIGNED => nothing */ 0, /* TAGS => nothing */ 0, /* USING => nothing */ - 0, /* AS => nothing */ 0, /* COMMA => nothing */ + 0, /* AS => nothing */ 1, /* NULL => ID */ 0, /* SELECT => nothing */ 0, /* UNION => nothing */ @@ -706,6 +728,7 @@ struct yyParser { int yyerrcnt; /* Shifts left before out of the error */ #endif ParseARG_SDECL /* A place to hold %extra_argument */ + ParseCTX_SDECL /* A place to hold %extra_context */ #if YYSTACKDEPTH<=0 int yystksz; /* Current side of the stack */ yyStackEntry *yystack; /* The parser's stack */ @@ -859,8 +882,8 @@ static const char *const yyTokenName[] = { /* 103 */ "UNSIGNED", /* 104 */ "TAGS", /* 105 */ "USING", - /* 106 */ "AS", - /* 107 */ "COMMA", + /* 106 */ "COMMA", + /* 107 */ "AS", /* 108 */ "NULL", /* 109 */ "SELECT", /* 110 */ "UNION", @@ -963,49 +986,49 @@ static const char *const yyTokenName[] = { /* 207 */ "INSERT", /* 208 */ "INTO", /* 209 */ "VALUES", - /* 210 */ "error", - /* 211 */ "program", - /* 212 */ "cmd", - /* 213 */ "dbPrefix", - /* 214 */ "ids", - /* 215 */ "cpxName", - /* 216 */ "ifexists", - /* 217 */ "alter_db_optr", - /* 218 */ "acct_optr", - /* 219 */ "ifnotexists", - /* 220 */ "db_optr", - /* 221 */ "pps", - /* 222 */ "tseries", - /* 223 */ "dbs", - /* 224 */ "streams", - /* 225 */ "storage", - /* 226 */ "qtime", - /* 227 */ "users", - /* 228 */ "conns", - /* 229 */ "state", - /* 230 */ "keep", - /* 231 */ "tagitemlist", - /* 232 */ "cache", - /* 233 */ "replica", - /* 234 */ "quorum", - /* 235 */ "days", - /* 236 */ "minrows", - /* 237 */ "maxrows", - /* 238 */ "blocks", - /* 239 */ "ctime", - /* 240 */ "wal", - /* 241 */ "fsync", - /* 242 */ "comp", - /* 243 */ "prec", - /* 244 */ "update", - /* 245 */ "cachelast", - /* 246 */ "typename", - /* 247 */ "signed", - /* 248 */ "create_table_args", - /* 249 */ "create_stable_args", - /* 250 */ "create_table_list", - /* 251 */ "create_from_stable", - /* 252 */ "columnlist", + /* 210 */ "program", + /* 211 */ "cmd", + /* 212 */ "dbPrefix", + /* 213 */ "ids", + /* 214 */ "cpxName", + /* 215 */ "ifexists", + /* 216 */ "alter_db_optr", + /* 217 */ "acct_optr", + /* 218 */ "ifnotexists", + /* 219 */ "db_optr", + /* 220 */ "pps", + /* 221 */ "tseries", + /* 222 */ "dbs", + /* 223 */ "streams", + /* 224 */ "storage", + /* 225 */ "qtime", + /* 226 */ "users", + /* 227 */ "conns", + /* 228 */ "state", + /* 229 */ "keep", + /* 230 */ "tagitemlist", + /* 231 */ "cache", + /* 232 */ "replica", + /* 233 */ "quorum", + /* 234 */ "days", + /* 235 */ "minrows", + /* 236 */ "maxrows", + /* 237 */ "blocks", + /* 238 */ "ctime", + /* 239 */ "wal", + /* 240 */ "fsync", + /* 241 */ "comp", + /* 242 */ "prec", + /* 243 */ "update", + /* 244 */ "cachelast", + /* 245 */ "typename", + /* 246 */ "signed", + /* 247 */ "create_table_args", + /* 248 */ "create_stable_args", + /* 249 */ "create_table_list", + /* 250 */ "create_from_stable", + /* 251 */ "columnlist", + /* 252 */ "tagNamelist", /* 253 */ "select", /* 254 */ "column", /* 255 */ "tagitem", @@ -1170,127 +1193,131 @@ static const char *const yyRuleName[] = { /* 126 */ "create_table_args ::= ifnotexists ids cpxName LP columnlist RP", /* 127 */ "create_stable_args ::= ifnotexists ids cpxName LP columnlist RP TAGS LP columnlist RP", /* 128 */ "create_from_stable ::= ifnotexists ids cpxName USING ids cpxName TAGS LP tagitemlist RP", - /* 129 */ "create_table_args ::= ifnotexists ids cpxName AS select", - /* 130 */ "columnlist ::= columnlist COMMA column", - /* 131 */ "columnlist ::= column", - /* 132 */ "column ::= ids typename", - /* 133 */ "tagitemlist ::= tagitemlist COMMA tagitem", - /* 134 */ "tagitemlist ::= tagitem", - /* 135 */ "tagitem ::= INTEGER", - /* 136 */ "tagitem ::= FLOAT", - /* 137 */ "tagitem ::= STRING", - /* 138 */ "tagitem ::= BOOL", - /* 139 */ "tagitem ::= NULL", - /* 140 */ "tagitem ::= MINUS INTEGER", - /* 141 */ "tagitem ::= MINUS FLOAT", - /* 142 */ "tagitem ::= PLUS INTEGER", - /* 143 */ "tagitem ::= PLUS FLOAT", - /* 144 */ "select ::= SELECT selcollist from where_opt interval_opt fill_opt sliding_opt groupby_opt orderby_opt having_opt slimit_opt limit_opt", - /* 145 */ "union ::= select", - /* 146 */ "union ::= LP union RP", - /* 147 */ "union ::= union UNION ALL select", - /* 148 */ "union ::= union UNION ALL LP select RP", - /* 149 */ "cmd ::= union", - /* 150 */ "select ::= SELECT selcollist", - /* 151 */ "sclp ::= selcollist COMMA", - /* 152 */ "sclp ::=", - /* 153 */ "selcollist ::= sclp distinct expr as", - /* 154 */ "selcollist ::= sclp STAR", - /* 155 */ "as ::= AS ids", - /* 156 */ "as ::= ids", - /* 157 */ "as ::=", - /* 158 */ "distinct ::= DISTINCT", - /* 159 */ "distinct ::=", - /* 160 */ "from ::= FROM tablelist", - /* 161 */ "tablelist ::= ids cpxName", - /* 162 */ "tablelist ::= ids cpxName ids", - /* 163 */ "tablelist ::= tablelist COMMA ids cpxName", - /* 164 */ "tablelist ::= tablelist COMMA ids cpxName ids", - /* 165 */ "tmvar ::= VARIABLE", - /* 166 */ "interval_opt ::= INTERVAL LP tmvar RP", - /* 167 */ "interval_opt ::= INTERVAL LP tmvar COMMA tmvar RP", - /* 168 */ "interval_opt ::=", - /* 169 */ "fill_opt ::=", - /* 170 */ "fill_opt ::= FILL LP ID COMMA tagitemlist RP", - /* 171 */ "fill_opt ::= FILL LP ID RP", - /* 172 */ "sliding_opt ::= SLIDING LP tmvar RP", - /* 173 */ "sliding_opt ::=", - /* 174 */ "orderby_opt ::=", - /* 175 */ "orderby_opt ::= ORDER BY sortlist", - /* 176 */ "sortlist ::= sortlist COMMA item sortorder", - /* 177 */ "sortlist ::= item sortorder", - /* 178 */ "item ::= ids cpxName", - /* 179 */ "sortorder ::= ASC", - /* 180 */ "sortorder ::= DESC", - /* 181 */ "sortorder ::=", - /* 182 */ "groupby_opt ::=", - /* 183 */ "groupby_opt ::= GROUP BY grouplist", - /* 184 */ "grouplist ::= grouplist COMMA item", - /* 185 */ "grouplist ::= item", - /* 186 */ "having_opt ::=", - /* 187 */ "having_opt ::= HAVING expr", - /* 188 */ "limit_opt ::=", - /* 189 */ "limit_opt ::= LIMIT signed", - /* 190 */ "limit_opt ::= LIMIT signed OFFSET signed", - /* 191 */ "limit_opt ::= LIMIT signed COMMA signed", - /* 192 */ "slimit_opt ::=", - /* 193 */ "slimit_opt ::= SLIMIT signed", - /* 194 */ "slimit_opt ::= SLIMIT signed SOFFSET signed", - /* 195 */ "slimit_opt ::= SLIMIT signed COMMA signed", - /* 196 */ "where_opt ::=", - /* 197 */ "where_opt ::= WHERE expr", - /* 198 */ "expr ::= LP expr RP", - /* 199 */ "expr ::= ID", - /* 200 */ "expr ::= ID DOT ID", - /* 201 */ "expr ::= ID DOT STAR", - /* 202 */ "expr ::= INTEGER", - /* 203 */ "expr ::= MINUS INTEGER", - /* 204 */ "expr ::= PLUS INTEGER", - /* 205 */ "expr ::= FLOAT", - /* 206 */ "expr ::= MINUS FLOAT", - /* 207 */ "expr ::= PLUS FLOAT", - /* 208 */ "expr ::= STRING", - /* 209 */ "expr ::= NOW", - /* 210 */ "expr ::= VARIABLE", - /* 211 */ "expr ::= BOOL", - /* 212 */ "expr ::= ID LP exprlist RP", - /* 213 */ "expr ::= ID LP STAR RP", - /* 214 */ "expr ::= expr IS NULL", - /* 215 */ "expr ::= expr IS NOT NULL", - /* 216 */ "expr ::= expr LT expr", - /* 217 */ "expr ::= expr GT expr", - /* 218 */ "expr ::= expr LE expr", - /* 219 */ "expr ::= expr GE expr", - /* 220 */ "expr ::= expr NE expr", - /* 221 */ "expr ::= expr EQ expr", - /* 222 */ "expr ::= expr AND expr", - /* 223 */ "expr ::= expr OR expr", - /* 224 */ "expr ::= expr PLUS expr", - /* 225 */ "expr ::= expr MINUS expr", - /* 226 */ "expr ::= expr STAR expr", - /* 227 */ "expr ::= expr SLASH expr", - /* 228 */ "expr ::= expr REM expr", - /* 229 */ "expr ::= expr LIKE expr", - /* 230 */ "expr ::= expr IN LP exprlist RP", - /* 231 */ "exprlist ::= exprlist COMMA expritem", - /* 232 */ "exprlist ::= expritem", - /* 233 */ "expritem ::= expr", - /* 234 */ "expritem ::=", - /* 235 */ "cmd ::= RESET QUERY CACHE", - /* 236 */ "cmd ::= ALTER TABLE ids cpxName ADD COLUMN columnlist", - /* 237 */ "cmd ::= ALTER TABLE ids cpxName DROP COLUMN ids", - /* 238 */ "cmd ::= ALTER TABLE ids cpxName ADD TAG columnlist", - /* 239 */ "cmd ::= ALTER TABLE ids cpxName DROP TAG ids", - /* 240 */ "cmd ::= ALTER TABLE ids cpxName CHANGE TAG ids ids", - /* 241 */ "cmd ::= ALTER TABLE ids cpxName SET TAG ids EQ tagitem", - /* 242 */ "cmd ::= ALTER STABLE ids cpxName ADD COLUMN columnlist", - /* 243 */ "cmd ::= ALTER STABLE ids cpxName DROP COLUMN ids", - /* 244 */ "cmd ::= ALTER STABLE ids cpxName ADD TAG columnlist", - /* 245 */ "cmd ::= ALTER STABLE ids cpxName DROP TAG ids", - /* 246 */ "cmd ::= ALTER STABLE ids cpxName CHANGE TAG ids ids", - /* 247 */ "cmd ::= KILL CONNECTION INTEGER", - /* 248 */ "cmd ::= KILL STREAM INTEGER COLON INTEGER", - /* 249 */ "cmd ::= KILL QUERY INTEGER COLON INTEGER", + /* 129 */ "create_from_stable ::= ifnotexists ids cpxName USING ids cpxName LP tagNamelist RP TAGS LP tagitemlist RP", + /* 130 */ "tagNamelist ::= tagNamelist COMMA ids", + /* 131 */ "tagNamelist ::= ids", + /* 132 */ "create_table_args ::= ifnotexists ids cpxName AS select", + /* 133 */ "columnlist ::= columnlist COMMA column", + /* 134 */ "columnlist ::= column", + /* 135 */ "column ::= ids typename", + /* 136 */ "tagitemlist ::= tagitemlist COMMA tagitem", + /* 137 */ "tagitemlist ::= tagitem", + /* 138 */ "tagitem ::= INTEGER", + /* 139 */ "tagitem ::= FLOAT", + /* 140 */ "tagitem ::= STRING", + /* 141 */ "tagitem ::= BOOL", + /* 142 */ "tagitem ::= NULL", + /* 143 */ "tagitem ::= MINUS INTEGER", + /* 144 */ "tagitem ::= MINUS FLOAT", + /* 145 */ "tagitem ::= PLUS INTEGER", + /* 146 */ "tagitem ::= PLUS FLOAT", + /* 147 */ "select ::= SELECT selcollist from where_opt interval_opt fill_opt sliding_opt groupby_opt orderby_opt having_opt slimit_opt limit_opt", + /* 148 */ "union ::= select", + /* 149 */ "union ::= LP union RP", + /* 150 */ "union ::= union UNION ALL select", + /* 151 */ "union ::= union UNION ALL LP select RP", + /* 152 */ "cmd ::= union", + /* 153 */ "select ::= SELECT selcollist", + /* 154 */ "sclp ::= selcollist COMMA", + /* 155 */ "sclp ::=", + /* 156 */ "selcollist ::= sclp distinct expr as", + /* 157 */ "selcollist ::= sclp STAR", + /* 158 */ "as ::= AS ids", + /* 159 */ "as ::= ids", + /* 160 */ "as ::=", + /* 161 */ "distinct ::= DISTINCT", + /* 162 */ "distinct ::=", + /* 163 */ "from ::= FROM tablelist", + /* 164 */ "tablelist ::= ids cpxName", + /* 165 */ "tablelist ::= ids cpxName ids", + /* 166 */ "tablelist ::= tablelist COMMA ids cpxName", + /* 167 */ "tablelist ::= tablelist COMMA ids cpxName ids", + /* 168 */ "tmvar ::= VARIABLE", + /* 169 */ "interval_opt ::= INTERVAL LP tmvar RP", + /* 170 */ "interval_opt ::= INTERVAL LP tmvar COMMA tmvar RP", + /* 171 */ "interval_opt ::=", + /* 172 */ "fill_opt ::=", + /* 173 */ "fill_opt ::= FILL LP ID COMMA tagitemlist RP", + /* 174 */ "fill_opt ::= FILL LP ID RP", + /* 175 */ "sliding_opt ::= SLIDING LP tmvar RP", + /* 176 */ "sliding_opt ::=", + /* 177 */ "orderby_opt ::=", + /* 178 */ "orderby_opt ::= ORDER BY sortlist", + /* 179 */ "sortlist ::= sortlist COMMA item sortorder", + /* 180 */ "sortlist ::= item sortorder", + /* 181 */ "item ::= ids cpxName", + /* 182 */ "sortorder ::= ASC", + /* 183 */ "sortorder ::= DESC", + /* 184 */ "sortorder ::=", + /* 185 */ "groupby_opt ::=", + /* 186 */ "groupby_opt ::= GROUP BY grouplist", + /* 187 */ "grouplist ::= grouplist COMMA item", + /* 188 */ "grouplist ::= item", + /* 189 */ "having_opt ::=", + /* 190 */ "having_opt ::= HAVING expr", + /* 191 */ "limit_opt ::=", + /* 192 */ "limit_opt ::= LIMIT signed", + /* 193 */ "limit_opt ::= LIMIT signed OFFSET signed", + /* 194 */ "limit_opt ::= LIMIT signed COMMA signed", + /* 195 */ "slimit_opt ::=", + /* 196 */ "slimit_opt ::= SLIMIT signed", + /* 197 */ "slimit_opt ::= SLIMIT signed SOFFSET signed", + /* 198 */ "slimit_opt ::= SLIMIT signed COMMA signed", + /* 199 */ "where_opt ::=", + /* 200 */ "where_opt ::= WHERE expr", + /* 201 */ "expr ::= LP expr RP", + /* 202 */ "expr ::= ID", + /* 203 */ "expr ::= ID DOT ID", + /* 204 */ "expr ::= ID DOT STAR", + /* 205 */ "expr ::= INTEGER", + /* 206 */ "expr ::= MINUS INTEGER", + /* 207 */ "expr ::= PLUS INTEGER", + /* 208 */ "expr ::= FLOAT", + /* 209 */ "expr ::= MINUS FLOAT", + /* 210 */ "expr ::= PLUS FLOAT", + /* 211 */ "expr ::= STRING", + /* 212 */ "expr ::= NOW", + /* 213 */ "expr ::= VARIABLE", + /* 214 */ "expr ::= BOOL", + /* 215 */ "expr ::= ID LP exprlist RP", + /* 216 */ "expr ::= ID LP STAR RP", + /* 217 */ "expr ::= expr IS NULL", + /* 218 */ "expr ::= expr IS NOT NULL", + /* 219 */ "expr ::= expr LT expr", + /* 220 */ "expr ::= expr GT expr", + /* 221 */ "expr ::= expr LE expr", + /* 222 */ "expr ::= expr GE expr", + /* 223 */ "expr ::= expr NE expr", + /* 224 */ "expr ::= expr EQ expr", + /* 225 */ "expr ::= expr BETWEEN expr AND expr", + /* 226 */ "expr ::= expr AND expr", + /* 227 */ "expr ::= expr OR expr", + /* 228 */ "expr ::= expr PLUS expr", + /* 229 */ "expr ::= expr MINUS expr", + /* 230 */ "expr ::= expr STAR expr", + /* 231 */ "expr ::= expr SLASH expr", + /* 232 */ "expr ::= expr REM expr", + /* 233 */ "expr ::= expr LIKE expr", + /* 234 */ "expr ::= expr IN LP exprlist RP", + /* 235 */ "exprlist ::= exprlist COMMA expritem", + /* 236 */ "exprlist ::= expritem", + /* 237 */ "expritem ::= expr", + /* 238 */ "expritem ::=", + /* 239 */ "cmd ::= RESET QUERY CACHE", + /* 240 */ "cmd ::= ALTER TABLE ids cpxName ADD COLUMN columnlist", + /* 241 */ "cmd ::= ALTER TABLE ids cpxName DROP COLUMN ids", + /* 242 */ "cmd ::= ALTER TABLE ids cpxName ADD TAG columnlist", + /* 243 */ "cmd ::= ALTER TABLE ids cpxName DROP TAG ids", + /* 244 */ "cmd ::= ALTER TABLE ids cpxName CHANGE TAG ids ids", + /* 245 */ "cmd ::= ALTER TABLE ids cpxName SET TAG ids EQ tagitem", + /* 246 */ "cmd ::= ALTER STABLE ids cpxName ADD COLUMN columnlist", + /* 247 */ "cmd ::= ALTER STABLE ids cpxName DROP COLUMN ids", + /* 248 */ "cmd ::= ALTER STABLE ids cpxName ADD TAG columnlist", + /* 249 */ "cmd ::= ALTER STABLE ids cpxName DROP TAG ids", + /* 250 */ "cmd ::= ALTER STABLE ids cpxName CHANGE TAG ids ids", + /* 251 */ "cmd ::= KILL CONNECTION INTEGER", + /* 252 */ "cmd ::= KILL STREAM INTEGER COLON INTEGER", + /* 253 */ "cmd ::= KILL QUERY INTEGER COLON INTEGER", }; #endif /* NDEBUG */ @@ -1339,28 +1366,29 @@ static int yyGrowStack(yyParser *p){ /* Initialize a new parser that has already been allocated. */ -void ParseInit(void *yypParser){ - yyParser *pParser = (yyParser*)yypParser; +void ParseInit(void *yypRawParser ParseCTX_PDECL){ + yyParser *yypParser = (yyParser*)yypRawParser; + ParseCTX_STORE #ifdef YYTRACKMAXSTACKDEPTH - pParser->yyhwm = 0; + yypParser->yyhwm = 0; #endif #if YYSTACKDEPTH<=0 - pParser->yytos = NULL; - pParser->yystack = NULL; - pParser->yystksz = 0; - if( yyGrowStack(pParser) ){ - pParser->yystack = &pParser->yystk0; - pParser->yystksz = 1; + yypParser->yytos = NULL; + yypParser->yystack = NULL; + yypParser->yystksz = 0; + if( yyGrowStack(yypParser) ){ + yypParser->yystack = &yypParser->yystk0; + yypParser->yystksz = 1; } #endif #ifndef YYNOERRORRECOVERY - pParser->yyerrcnt = -1; + yypParser->yyerrcnt = -1; #endif - pParser->yytos = pParser->yystack; - pParser->yystack[0].stateno = 0; - pParser->yystack[0].major = 0; + yypParser->yytos = yypParser->yystack; + yypParser->yystack[0].stateno = 0; + yypParser->yystack[0].major = 0; #if YYSTACKDEPTH>0 - pParser->yystackEnd = &pParser->yystack[YYSTACKDEPTH-1]; + yypParser->yystackEnd = &yypParser->yystack[YYSTACKDEPTH-1]; #endif } @@ -1377,11 +1405,14 @@ void ParseInit(void *yypParser){ ** A pointer to a parser. This pointer is used in subsequent calls ** to Parse and ParseFree. */ -void *ParseAlloc(void *(*mallocProc)(YYMALLOCARGTYPE)){ - yyParser *pParser; - pParser = (yyParser*)(*mallocProc)( (YYMALLOCARGTYPE)sizeof(yyParser) ); - if( pParser ) ParseInit(pParser); - return pParser; +void *ParseAlloc(void *(*mallocProc)(YYMALLOCARGTYPE) ParseCTX_PDECL){ + yyParser *yypParser; + yypParser = (yyParser*)(*mallocProc)( (YYMALLOCARGTYPE)sizeof(yyParser) ); + if( yypParser ){ + ParseCTX_STORE + ParseInit(yypParser ParseCTX_PARAM); + } + return (void*)yypParser; } #endif /* Parse_ENGINEALWAYSONSTACK */ @@ -1398,7 +1429,8 @@ static void yy_destructor( YYCODETYPE yymajor, /* Type code for object to destroy */ YYMINORTYPE *yypminor /* The object to be destroyed */ ){ - ParseARG_FETCH; + ParseARG_FETCH + ParseCTX_FETCH switch( yymajor ){ /* Here is inserted the actions which take place when a ** terminal or non-terminal is destroyed. This can happen @@ -1411,9 +1443,10 @@ static void yy_destructor( ** inside the C code. */ /********* Begin destructor definitions ***************************************/ - case 230: /* keep */ - case 231: /* tagitemlist */ - case 252: /* columnlist */ + case 229: /* keep */ + case 230: /* tagitemlist */ + case 251: /* columnlist */ + case 252: /* tagNamelist */ case 260: /* fill_opt */ case 262: /* groupby_opt */ case 263: /* orderby_opt */ @@ -1423,7 +1456,7 @@ static void yy_destructor( taosArrayDestroy((yypminor->yy247)); } break; - case 250: /* create_table_list */ + case 249: /* create_table_list */ { destroyCreateTableSql((yypminor->yy358)); } @@ -1567,13 +1600,12 @@ int ParseCoverage(FILE *out){ ** Find the appropriate action for a parser given the terminal ** look-ahead token iLookAhead. */ -static unsigned int yy_find_shift_action( - yyParser *pParser, /* The parser */ - YYCODETYPE iLookAhead /* The look-ahead token */ +static YYACTIONTYPE yy_find_shift_action( + YYCODETYPE iLookAhead, /* The look-ahead token */ + YYACTIONTYPE stateno /* Current state number */ ){ int i; - int stateno = pParser->yytos->stateno; - + if( stateno>YY_MAX_SHIFT ) return stateno; assert( stateno <= YY_SHIFT_COUNT ); #if defined(YYCOVERAGE) @@ -1581,15 +1613,19 @@ static unsigned int yy_find_shift_action( #endif do{ i = yy_shift_ofst[stateno]; - assert( i>=0 && i+YYNTOKEN<=sizeof(yy_lookahead)/sizeof(yy_lookahead[0]) ); + assert( i>=0 ); + assert( i<=YY_ACTTAB_COUNT ); + assert( i+YYNTOKEN<=(int)YY_NLOOKAHEAD ); assert( iLookAhead!=YYNOCODE ); assert( iLookAhead < YYNTOKEN ); i += iLookAhead; + assert( i<(int)YY_NLOOKAHEAD ); if( yy_lookahead[i]!=iLookAhead ){ #ifdef YYFALLBACK YYCODETYPE iFallback; /* Fallback token */ - if( iLookAhead %s\n", @@ -1604,15 +1640,8 @@ static unsigned int yy_find_shift_action( #ifdef YYWILDCARD { int j = i - iLookAhead + YYWILDCARD; - if( -#if YY_SHIFT_MIN+YYWILDCARD<0 - j>=0 && -#endif -#if YY_SHIFT_MAX+YYWILDCARD>=YY_ACTTAB_COUNT - j0 - ){ + assert( j<(int)(sizeof(yy_lookahead)/sizeof(yy_lookahead[0])) ); + if( yy_lookahead[j]==YYWILDCARD && iLookAhead>0 ){ #ifndef NDEBUG if( yyTraceFILE ){ fprintf(yyTraceFILE, "%sWILDCARD %s => %s\n", @@ -1626,6 +1655,7 @@ static unsigned int yy_find_shift_action( #endif /* YYWILDCARD */ return yy_default[stateno]; }else{ + assert( i>=0 && iyytos; - yytos->stateno = (YYACTIONTYPE)yyNewState; - yytos->major = (YYCODETYPE)yyMajor; + yytos->stateno = yyNewState; + yytos->major = yyMajor; yytos->minor.yy0 = yyMinor; yyTraceShift(yypParser, yyNewState, "Shift"); } -/* The following table contains information about every rule that -** is used during the reduce. -*/ -static const struct { - YYCODETYPE lhs; /* Symbol on the left-hand side of the rule */ - signed char nrhs; /* Negative of the number of RHS symbols in the rule */ -} yyRuleInfo[] = { - { 211, -1 }, /* (0) program ::= cmd */ - { 212, -2 }, /* (1) cmd ::= SHOW DATABASES */ - { 212, -2 }, /* (2) cmd ::= SHOW MNODES */ - { 212, -2 }, /* (3) cmd ::= SHOW DNODES */ - { 212, -2 }, /* (4) cmd ::= SHOW ACCOUNTS */ - { 212, -2 }, /* (5) cmd ::= SHOW USERS */ - { 212, -2 }, /* (6) cmd ::= SHOW MODULES */ - { 212, -2 }, /* (7) cmd ::= SHOW QUERIES */ - { 212, -2 }, /* (8) cmd ::= SHOW CONNECTIONS */ - { 212, -2 }, /* (9) cmd ::= SHOW STREAMS */ - { 212, -2 }, /* (10) cmd ::= SHOW VARIABLES */ - { 212, -2 }, /* (11) cmd ::= SHOW SCORES */ - { 212, -2 }, /* (12) cmd ::= SHOW GRANTS */ - { 212, -2 }, /* (13) cmd ::= SHOW VNODES */ - { 212, -3 }, /* (14) cmd ::= SHOW VNODES IPTOKEN */ - { 213, 0 }, /* (15) dbPrefix ::= */ - { 213, -2 }, /* (16) dbPrefix ::= ids DOT */ - { 215, 0 }, /* (17) cpxName ::= */ - { 215, -2 }, /* (18) cpxName ::= DOT ids */ - { 212, -5 }, /* (19) cmd ::= SHOW CREATE TABLE ids cpxName */ - { 212, -4 }, /* (20) cmd ::= SHOW CREATE DATABASE ids */ - { 212, -3 }, /* (21) cmd ::= SHOW dbPrefix TABLES */ - { 212, -5 }, /* (22) cmd ::= SHOW dbPrefix TABLES LIKE ids */ - { 212, -3 }, /* (23) cmd ::= SHOW dbPrefix STABLES */ - { 212, -5 }, /* (24) cmd ::= SHOW dbPrefix STABLES LIKE ids */ - { 212, -3 }, /* (25) cmd ::= SHOW dbPrefix VGROUPS */ - { 212, -4 }, /* (26) cmd ::= SHOW dbPrefix VGROUPS ids */ - { 212, -5 }, /* (27) cmd ::= DROP TABLE ifexists ids cpxName */ - { 212, -5 }, /* (28) cmd ::= DROP STABLE ifexists ids cpxName */ - { 212, -4 }, /* (29) cmd ::= DROP DATABASE ifexists ids */ - { 212, -3 }, /* (30) cmd ::= DROP DNODE ids */ - { 212, -3 }, /* (31) cmd ::= DROP USER ids */ - { 212, -3 }, /* (32) cmd ::= DROP ACCOUNT ids */ - { 212, -2 }, /* (33) cmd ::= USE ids */ - { 212, -3 }, /* (34) cmd ::= DESCRIBE ids cpxName */ - { 212, -5 }, /* (35) cmd ::= ALTER USER ids PASS ids */ - { 212, -5 }, /* (36) cmd ::= ALTER USER ids PRIVILEGE ids */ - { 212, -4 }, /* (37) cmd ::= ALTER DNODE ids ids */ - { 212, -5 }, /* (38) cmd ::= ALTER DNODE ids ids ids */ - { 212, -3 }, /* (39) cmd ::= ALTER LOCAL ids */ - { 212, -4 }, /* (40) cmd ::= ALTER LOCAL ids ids */ - { 212, -4 }, /* (41) cmd ::= ALTER DATABASE ids alter_db_optr */ - { 212, -4 }, /* (42) cmd ::= ALTER ACCOUNT ids acct_optr */ - { 212, -6 }, /* (43) cmd ::= ALTER ACCOUNT ids PASS ids acct_optr */ - { 214, -1 }, /* (44) ids ::= ID */ - { 214, -1 }, /* (45) ids ::= STRING */ - { 216, -2 }, /* (46) ifexists ::= IF EXISTS */ - { 216, 0 }, /* (47) ifexists ::= */ - { 219, -3 }, /* (48) ifnotexists ::= IF NOT EXISTS */ - { 219, 0 }, /* (49) ifnotexists ::= */ - { 212, -3 }, /* (50) cmd ::= CREATE DNODE ids */ - { 212, -6 }, /* (51) cmd ::= CREATE ACCOUNT ids PASS ids acct_optr */ - { 212, -5 }, /* (52) cmd ::= CREATE DATABASE ifnotexists ids db_optr */ - { 212, -5 }, /* (53) cmd ::= CREATE USER ids PASS ids */ - { 221, 0 }, /* (54) pps ::= */ - { 221, -2 }, /* (55) pps ::= PPS INTEGER */ - { 222, 0 }, /* (56) tseries ::= */ - { 222, -2 }, /* (57) tseries ::= TSERIES INTEGER */ - { 223, 0 }, /* (58) dbs ::= */ - { 223, -2 }, /* (59) dbs ::= DBS INTEGER */ - { 224, 0 }, /* (60) streams ::= */ - { 224, -2 }, /* (61) streams ::= STREAMS INTEGER */ - { 225, 0 }, /* (62) storage ::= */ - { 225, -2 }, /* (63) storage ::= STORAGE INTEGER */ - { 226, 0 }, /* (64) qtime ::= */ - { 226, -2 }, /* (65) qtime ::= QTIME INTEGER */ - { 227, 0 }, /* (66) users ::= */ - { 227, -2 }, /* (67) users ::= USERS INTEGER */ - { 228, 0 }, /* (68) conns ::= */ - { 228, -2 }, /* (69) conns ::= CONNS INTEGER */ - { 229, 0 }, /* (70) state ::= */ - { 229, -2 }, /* (71) state ::= STATE ids */ - { 218, -9 }, /* (72) acct_optr ::= pps tseries storage streams qtime dbs users conns state */ - { 230, -2 }, /* (73) keep ::= KEEP tagitemlist */ - { 232, -2 }, /* (74) cache ::= CACHE INTEGER */ - { 233, -2 }, /* (75) replica ::= REPLICA INTEGER */ - { 234, -2 }, /* (76) quorum ::= QUORUM INTEGER */ - { 235, -2 }, /* (77) days ::= DAYS INTEGER */ - { 236, -2 }, /* (78) minrows ::= MINROWS INTEGER */ - { 237, -2 }, /* (79) maxrows ::= MAXROWS INTEGER */ - { 238, -2 }, /* (80) blocks ::= BLOCKS INTEGER */ - { 239, -2 }, /* (81) ctime ::= CTIME INTEGER */ - { 240, -2 }, /* (82) wal ::= WAL INTEGER */ - { 241, -2 }, /* (83) fsync ::= FSYNC INTEGER */ - { 242, -2 }, /* (84) comp ::= COMP INTEGER */ - { 243, -2 }, /* (85) prec ::= PRECISION STRING */ - { 244, -2 }, /* (86) update ::= UPDATE INTEGER */ - { 245, -2 }, /* (87) cachelast ::= CACHELAST INTEGER */ - { 220, 0 }, /* (88) db_optr ::= */ - { 220, -2 }, /* (89) db_optr ::= db_optr cache */ - { 220, -2 }, /* (90) db_optr ::= db_optr replica */ - { 220, -2 }, /* (91) db_optr ::= db_optr quorum */ - { 220, -2 }, /* (92) db_optr ::= db_optr days */ - { 220, -2 }, /* (93) db_optr ::= db_optr minrows */ - { 220, -2 }, /* (94) db_optr ::= db_optr maxrows */ - { 220, -2 }, /* (95) db_optr ::= db_optr blocks */ - { 220, -2 }, /* (96) db_optr ::= db_optr ctime */ - { 220, -2 }, /* (97) db_optr ::= db_optr wal */ - { 220, -2 }, /* (98) db_optr ::= db_optr fsync */ - { 220, -2 }, /* (99) db_optr ::= db_optr comp */ - { 220, -2 }, /* (100) db_optr ::= db_optr prec */ - { 220, -2 }, /* (101) db_optr ::= db_optr keep */ - { 220, -2 }, /* (102) db_optr ::= db_optr update */ - { 220, -2 }, /* (103) db_optr ::= db_optr cachelast */ - { 217, 0 }, /* (104) alter_db_optr ::= */ - { 217, -2 }, /* (105) alter_db_optr ::= alter_db_optr replica */ - { 217, -2 }, /* (106) alter_db_optr ::= alter_db_optr quorum */ - { 217, -2 }, /* (107) alter_db_optr ::= alter_db_optr keep */ - { 217, -2 }, /* (108) alter_db_optr ::= alter_db_optr blocks */ - { 217, -2 }, /* (109) alter_db_optr ::= alter_db_optr comp */ - { 217, -2 }, /* (110) alter_db_optr ::= alter_db_optr wal */ - { 217, -2 }, /* (111) alter_db_optr ::= alter_db_optr fsync */ - { 217, -2 }, /* (112) alter_db_optr ::= alter_db_optr update */ - { 217, -2 }, /* (113) alter_db_optr ::= alter_db_optr cachelast */ - { 246, -1 }, /* (114) typename ::= ids */ - { 246, -4 }, /* (115) typename ::= ids LP signed RP */ - { 246, -2 }, /* (116) typename ::= ids UNSIGNED */ - { 247, -1 }, /* (117) signed ::= INTEGER */ - { 247, -2 }, /* (118) signed ::= PLUS INTEGER */ - { 247, -2 }, /* (119) signed ::= MINUS INTEGER */ - { 212, -3 }, /* (120) cmd ::= CREATE TABLE create_table_args */ - { 212, -3 }, /* (121) cmd ::= CREATE TABLE create_stable_args */ - { 212, -3 }, /* (122) cmd ::= CREATE STABLE create_stable_args */ - { 212, -3 }, /* (123) cmd ::= CREATE TABLE create_table_list */ - { 250, -1 }, /* (124) create_table_list ::= create_from_stable */ - { 250, -2 }, /* (125) create_table_list ::= create_table_list create_from_stable */ - { 248, -6 }, /* (126) create_table_args ::= ifnotexists ids cpxName LP columnlist RP */ - { 249, -10 }, /* (127) create_stable_args ::= ifnotexists ids cpxName LP columnlist RP TAGS LP columnlist RP */ - { 251, -10 }, /* (128) create_from_stable ::= ifnotexists ids cpxName USING ids cpxName TAGS LP tagitemlist RP */ - { 248, -5 }, /* (129) create_table_args ::= ifnotexists ids cpxName AS select */ - { 252, -3 }, /* (130) columnlist ::= columnlist COMMA column */ - { 252, -1 }, /* (131) columnlist ::= column */ - { 254, -2 }, /* (132) column ::= ids typename */ - { 231, -3 }, /* (133) tagitemlist ::= tagitemlist COMMA tagitem */ - { 231, -1 }, /* (134) tagitemlist ::= tagitem */ - { 255, -1 }, /* (135) tagitem ::= INTEGER */ - { 255, -1 }, /* (136) tagitem ::= FLOAT */ - { 255, -1 }, /* (137) tagitem ::= STRING */ - { 255, -1 }, /* (138) tagitem ::= BOOL */ - { 255, -1 }, /* (139) tagitem ::= NULL */ - { 255, -2 }, /* (140) tagitem ::= MINUS INTEGER */ - { 255, -2 }, /* (141) tagitem ::= MINUS FLOAT */ - { 255, -2 }, /* (142) tagitem ::= PLUS INTEGER */ - { 255, -2 }, /* (143) tagitem ::= PLUS FLOAT */ - { 253, -12 }, /* (144) select ::= SELECT selcollist from where_opt interval_opt fill_opt sliding_opt groupby_opt orderby_opt having_opt slimit_opt limit_opt */ - { 267, -1 }, /* (145) union ::= select */ - { 267, -3 }, /* (146) union ::= LP union RP */ - { 267, -4 }, /* (147) union ::= union UNION ALL select */ - { 267, -6 }, /* (148) union ::= union UNION ALL LP select RP */ - { 212, -1 }, /* (149) cmd ::= union */ - { 253, -2 }, /* (150) select ::= SELECT selcollist */ - { 268, -2 }, /* (151) sclp ::= selcollist COMMA */ - { 268, 0 }, /* (152) sclp ::= */ - { 256, -4 }, /* (153) selcollist ::= sclp distinct expr as */ - { 256, -2 }, /* (154) selcollist ::= sclp STAR */ - { 271, -2 }, /* (155) as ::= AS ids */ - { 271, -1 }, /* (156) as ::= ids */ - { 271, 0 }, /* (157) as ::= */ - { 269, -1 }, /* (158) distinct ::= DISTINCT */ - { 269, 0 }, /* (159) distinct ::= */ - { 257, -2 }, /* (160) from ::= FROM tablelist */ - { 272, -2 }, /* (161) tablelist ::= ids cpxName */ - { 272, -3 }, /* (162) tablelist ::= ids cpxName ids */ - { 272, -4 }, /* (163) tablelist ::= tablelist COMMA ids cpxName */ - { 272, -5 }, /* (164) tablelist ::= tablelist COMMA ids cpxName ids */ - { 273, -1 }, /* (165) tmvar ::= VARIABLE */ - { 259, -4 }, /* (166) interval_opt ::= INTERVAL LP tmvar RP */ - { 259, -6 }, /* (167) interval_opt ::= INTERVAL LP tmvar COMMA tmvar RP */ - { 259, 0 }, /* (168) interval_opt ::= */ - { 260, 0 }, /* (169) fill_opt ::= */ - { 260, -6 }, /* (170) fill_opt ::= FILL LP ID COMMA tagitemlist RP */ - { 260, -4 }, /* (171) fill_opt ::= FILL LP ID RP */ - { 261, -4 }, /* (172) sliding_opt ::= SLIDING LP tmvar RP */ - { 261, 0 }, /* (173) sliding_opt ::= */ - { 263, 0 }, /* (174) orderby_opt ::= */ - { 263, -3 }, /* (175) orderby_opt ::= ORDER BY sortlist */ - { 274, -4 }, /* (176) sortlist ::= sortlist COMMA item sortorder */ - { 274, -2 }, /* (177) sortlist ::= item sortorder */ - { 276, -2 }, /* (178) item ::= ids cpxName */ - { 277, -1 }, /* (179) sortorder ::= ASC */ - { 277, -1 }, /* (180) sortorder ::= DESC */ - { 277, 0 }, /* (181) sortorder ::= */ - { 262, 0 }, /* (182) groupby_opt ::= */ - { 262, -3 }, /* (183) groupby_opt ::= GROUP BY grouplist */ - { 278, -3 }, /* (184) grouplist ::= grouplist COMMA item */ - { 278, -1 }, /* (185) grouplist ::= item */ - { 264, 0 }, /* (186) having_opt ::= */ - { 264, -2 }, /* (187) having_opt ::= HAVING expr */ - { 266, 0 }, /* (188) limit_opt ::= */ - { 266, -2 }, /* (189) limit_opt ::= LIMIT signed */ - { 266, -4 }, /* (190) limit_opt ::= LIMIT signed OFFSET signed */ - { 266, -4 }, /* (191) limit_opt ::= LIMIT signed COMMA signed */ - { 265, 0 }, /* (192) slimit_opt ::= */ - { 265, -2 }, /* (193) slimit_opt ::= SLIMIT signed */ - { 265, -4 }, /* (194) slimit_opt ::= SLIMIT signed SOFFSET signed */ - { 265, -4 }, /* (195) slimit_opt ::= SLIMIT signed COMMA signed */ - { 258, 0 }, /* (196) where_opt ::= */ - { 258, -2 }, /* (197) where_opt ::= WHERE expr */ - { 270, -3 }, /* (198) expr ::= LP expr RP */ - { 270, -1 }, /* (199) expr ::= ID */ - { 270, -3 }, /* (200) expr ::= ID DOT ID */ - { 270, -3 }, /* (201) expr ::= ID DOT STAR */ - { 270, -1 }, /* (202) expr ::= INTEGER */ - { 270, -2 }, /* (203) expr ::= MINUS INTEGER */ - { 270, -2 }, /* (204) expr ::= PLUS INTEGER */ - { 270, -1 }, /* (205) expr ::= FLOAT */ - { 270, -2 }, /* (206) expr ::= MINUS FLOAT */ - { 270, -2 }, /* (207) expr ::= PLUS FLOAT */ - { 270, -1 }, /* (208) expr ::= STRING */ - { 270, -1 }, /* (209) expr ::= NOW */ - { 270, -1 }, /* (210) expr ::= VARIABLE */ - { 270, -1 }, /* (211) expr ::= BOOL */ - { 270, -4 }, /* (212) expr ::= ID LP exprlist RP */ - { 270, -4 }, /* (213) expr ::= ID LP STAR RP */ - { 270, -3 }, /* (214) expr ::= expr IS NULL */ - { 270, -4 }, /* (215) expr ::= expr IS NOT NULL */ - { 270, -3 }, /* (216) expr ::= expr LT expr */ - { 270, -3 }, /* (217) expr ::= expr GT expr */ - { 270, -3 }, /* (218) expr ::= expr LE expr */ - { 270, -3 }, /* (219) expr ::= expr GE expr */ - { 270, -3 }, /* (220) expr ::= expr NE expr */ - { 270, -3 }, /* (221) expr ::= expr EQ expr */ - { 270, -3 }, /* (222) expr ::= expr AND expr */ - { 270, -3 }, /* (223) expr ::= expr OR expr */ - { 270, -3 }, /* (224) expr ::= expr PLUS expr */ - { 270, -3 }, /* (225) expr ::= expr MINUS expr */ - { 270, -3 }, /* (226) expr ::= expr STAR expr */ - { 270, -3 }, /* (227) expr ::= expr SLASH expr */ - { 270, -3 }, /* (228) expr ::= expr REM expr */ - { 270, -3 }, /* (229) expr ::= expr LIKE expr */ - { 270, -5 }, /* (230) expr ::= expr IN LP exprlist RP */ - { 279, -3 }, /* (231) exprlist ::= exprlist COMMA expritem */ - { 279, -1 }, /* (232) exprlist ::= expritem */ - { 280, -1 }, /* (233) expritem ::= expr */ - { 280, 0 }, /* (234) expritem ::= */ - { 212, -3 }, /* (235) cmd ::= RESET QUERY CACHE */ - { 212, -7 }, /* (236) cmd ::= ALTER TABLE ids cpxName ADD COLUMN columnlist */ - { 212, -7 }, /* (237) cmd ::= ALTER TABLE ids cpxName DROP COLUMN ids */ - { 212, -7 }, /* (238) cmd ::= ALTER TABLE ids cpxName ADD TAG columnlist */ - { 212, -7 }, /* (239) cmd ::= ALTER TABLE ids cpxName DROP TAG ids */ - { 212, -8 }, /* (240) cmd ::= ALTER TABLE ids cpxName CHANGE TAG ids ids */ - { 212, -9 }, /* (241) cmd ::= ALTER TABLE ids cpxName SET TAG ids EQ tagitem */ - { 212, -7 }, /* (242) cmd ::= ALTER STABLE ids cpxName ADD COLUMN columnlist */ - { 212, -7 }, /* (243) cmd ::= ALTER STABLE ids cpxName DROP COLUMN ids */ - { 212, -7 }, /* (244) cmd ::= ALTER STABLE ids cpxName ADD TAG columnlist */ - { 212, -7 }, /* (245) cmd ::= ALTER STABLE ids cpxName DROP TAG ids */ - { 212, -8 }, /* (246) cmd ::= ALTER STABLE ids cpxName CHANGE TAG ids ids */ - { 212, -3 }, /* (247) cmd ::= KILL CONNECTION INTEGER */ - { 212, -5 }, /* (248) cmd ::= KILL STREAM INTEGER COLON INTEGER */ - { 212, -5 }, /* (249) cmd ::= KILL QUERY INTEGER COLON INTEGER */ +/* For rule J, yyRuleInfoLhs[J] contains the symbol on the left-hand side +** of that rule */ +static const YYCODETYPE yyRuleInfoLhs[] = { + 210, /* (0) program ::= cmd */ + 211, /* (1) cmd ::= SHOW DATABASES */ + 211, /* (2) cmd ::= SHOW MNODES */ + 211, /* (3) cmd ::= SHOW DNODES */ + 211, /* (4) cmd ::= SHOW ACCOUNTS */ + 211, /* (5) cmd ::= SHOW USERS */ + 211, /* (6) cmd ::= SHOW MODULES */ + 211, /* (7) cmd ::= SHOW QUERIES */ + 211, /* (8) cmd ::= SHOW CONNECTIONS */ + 211, /* (9) cmd ::= SHOW STREAMS */ + 211, /* (10) cmd ::= SHOW VARIABLES */ + 211, /* (11) cmd ::= SHOW SCORES */ + 211, /* (12) cmd ::= SHOW GRANTS */ + 211, /* (13) cmd ::= SHOW VNODES */ + 211, /* (14) cmd ::= SHOW VNODES IPTOKEN */ + 212, /* (15) dbPrefix ::= */ + 212, /* (16) dbPrefix ::= ids DOT */ + 214, /* (17) cpxName ::= */ + 214, /* (18) cpxName ::= DOT ids */ + 211, /* (19) cmd ::= SHOW CREATE TABLE ids cpxName */ + 211, /* (20) cmd ::= SHOW CREATE DATABASE ids */ + 211, /* (21) cmd ::= SHOW dbPrefix TABLES */ + 211, /* (22) cmd ::= SHOW dbPrefix TABLES LIKE ids */ + 211, /* (23) cmd ::= SHOW dbPrefix STABLES */ + 211, /* (24) cmd ::= SHOW dbPrefix STABLES LIKE ids */ + 211, /* (25) cmd ::= SHOW dbPrefix VGROUPS */ + 211, /* (26) cmd ::= SHOW dbPrefix VGROUPS ids */ + 211, /* (27) cmd ::= DROP TABLE ifexists ids cpxName */ + 211, /* (28) cmd ::= DROP STABLE ifexists ids cpxName */ + 211, /* (29) cmd ::= DROP DATABASE ifexists ids */ + 211, /* (30) cmd ::= DROP DNODE ids */ + 211, /* (31) cmd ::= DROP USER ids */ + 211, /* (32) cmd ::= DROP ACCOUNT ids */ + 211, /* (33) cmd ::= USE ids */ + 211, /* (34) cmd ::= DESCRIBE ids cpxName */ + 211, /* (35) cmd ::= ALTER USER ids PASS ids */ + 211, /* (36) cmd ::= ALTER USER ids PRIVILEGE ids */ + 211, /* (37) cmd ::= ALTER DNODE ids ids */ + 211, /* (38) cmd ::= ALTER DNODE ids ids ids */ + 211, /* (39) cmd ::= ALTER LOCAL ids */ + 211, /* (40) cmd ::= ALTER LOCAL ids ids */ + 211, /* (41) cmd ::= ALTER DATABASE ids alter_db_optr */ + 211, /* (42) cmd ::= ALTER ACCOUNT ids acct_optr */ + 211, /* (43) cmd ::= ALTER ACCOUNT ids PASS ids acct_optr */ + 213, /* (44) ids ::= ID */ + 213, /* (45) ids ::= STRING */ + 215, /* (46) ifexists ::= IF EXISTS */ + 215, /* (47) ifexists ::= */ + 218, /* (48) ifnotexists ::= IF NOT EXISTS */ + 218, /* (49) ifnotexists ::= */ + 211, /* (50) cmd ::= CREATE DNODE ids */ + 211, /* (51) cmd ::= CREATE ACCOUNT ids PASS ids acct_optr */ + 211, /* (52) cmd ::= CREATE DATABASE ifnotexists ids db_optr */ + 211, /* (53) cmd ::= CREATE USER ids PASS ids */ + 220, /* (54) pps ::= */ + 220, /* (55) pps ::= PPS INTEGER */ + 221, /* (56) tseries ::= */ + 221, /* (57) tseries ::= TSERIES INTEGER */ + 222, /* (58) dbs ::= */ + 222, /* (59) dbs ::= DBS INTEGER */ + 223, /* (60) streams ::= */ + 223, /* (61) streams ::= STREAMS INTEGER */ + 224, /* (62) storage ::= */ + 224, /* (63) storage ::= STORAGE INTEGER */ + 225, /* (64) qtime ::= */ + 225, /* (65) qtime ::= QTIME INTEGER */ + 226, /* (66) users ::= */ + 226, /* (67) users ::= USERS INTEGER */ + 227, /* (68) conns ::= */ + 227, /* (69) conns ::= CONNS INTEGER */ + 228, /* (70) state ::= */ + 228, /* (71) state ::= STATE ids */ + 217, /* (72) acct_optr ::= pps tseries storage streams qtime dbs users conns state */ + 229, /* (73) keep ::= KEEP tagitemlist */ + 231, /* (74) cache ::= CACHE INTEGER */ + 232, /* (75) replica ::= REPLICA INTEGER */ + 233, /* (76) quorum ::= QUORUM INTEGER */ + 234, /* (77) days ::= DAYS INTEGER */ + 235, /* (78) minrows ::= MINROWS INTEGER */ + 236, /* (79) maxrows ::= MAXROWS INTEGER */ + 237, /* (80) blocks ::= BLOCKS INTEGER */ + 238, /* (81) ctime ::= CTIME INTEGER */ + 239, /* (82) wal ::= WAL INTEGER */ + 240, /* (83) fsync ::= FSYNC INTEGER */ + 241, /* (84) comp ::= COMP INTEGER */ + 242, /* (85) prec ::= PRECISION STRING */ + 243, /* (86) update ::= UPDATE INTEGER */ + 244, /* (87) cachelast ::= CACHELAST INTEGER */ + 219, /* (88) db_optr ::= */ + 219, /* (89) db_optr ::= db_optr cache */ + 219, /* (90) db_optr ::= db_optr replica */ + 219, /* (91) db_optr ::= db_optr quorum */ + 219, /* (92) db_optr ::= db_optr days */ + 219, /* (93) db_optr ::= db_optr minrows */ + 219, /* (94) db_optr ::= db_optr maxrows */ + 219, /* (95) db_optr ::= db_optr blocks */ + 219, /* (96) db_optr ::= db_optr ctime */ + 219, /* (97) db_optr ::= db_optr wal */ + 219, /* (98) db_optr ::= db_optr fsync */ + 219, /* (99) db_optr ::= db_optr comp */ + 219, /* (100) db_optr ::= db_optr prec */ + 219, /* (101) db_optr ::= db_optr keep */ + 219, /* (102) db_optr ::= db_optr update */ + 219, /* (103) db_optr ::= db_optr cachelast */ + 216, /* (104) alter_db_optr ::= */ + 216, /* (105) alter_db_optr ::= alter_db_optr replica */ + 216, /* (106) alter_db_optr ::= alter_db_optr quorum */ + 216, /* (107) alter_db_optr ::= alter_db_optr keep */ + 216, /* (108) alter_db_optr ::= alter_db_optr blocks */ + 216, /* (109) alter_db_optr ::= alter_db_optr comp */ + 216, /* (110) alter_db_optr ::= alter_db_optr wal */ + 216, /* (111) alter_db_optr ::= alter_db_optr fsync */ + 216, /* (112) alter_db_optr ::= alter_db_optr update */ + 216, /* (113) alter_db_optr ::= alter_db_optr cachelast */ + 245, /* (114) typename ::= ids */ + 245, /* (115) typename ::= ids LP signed RP */ + 245, /* (116) typename ::= ids UNSIGNED */ + 246, /* (117) signed ::= INTEGER */ + 246, /* (118) signed ::= PLUS INTEGER */ + 246, /* (119) signed ::= MINUS INTEGER */ + 211, /* (120) cmd ::= CREATE TABLE create_table_args */ + 211, /* (121) cmd ::= CREATE TABLE create_stable_args */ + 211, /* (122) cmd ::= CREATE STABLE create_stable_args */ + 211, /* (123) cmd ::= CREATE TABLE create_table_list */ + 249, /* (124) create_table_list ::= create_from_stable */ + 249, /* (125) create_table_list ::= create_table_list create_from_stable */ + 247, /* (126) create_table_args ::= ifnotexists ids cpxName LP columnlist RP */ + 248, /* (127) create_stable_args ::= ifnotexists ids cpxName LP columnlist RP TAGS LP columnlist RP */ + 250, /* (128) create_from_stable ::= ifnotexists ids cpxName USING ids cpxName TAGS LP tagitemlist RP */ + 250, /* (129) create_from_stable ::= ifnotexists ids cpxName USING ids cpxName LP tagNamelist RP TAGS LP tagitemlist RP */ + 252, /* (130) tagNamelist ::= tagNamelist COMMA ids */ + 252, /* (131) tagNamelist ::= ids */ + 247, /* (132) create_table_args ::= ifnotexists ids cpxName AS select */ + 251, /* (133) columnlist ::= columnlist COMMA column */ + 251, /* (134) columnlist ::= column */ + 254, /* (135) column ::= ids typename */ + 230, /* (136) tagitemlist ::= tagitemlist COMMA tagitem */ + 230, /* (137) tagitemlist ::= tagitem */ + 255, /* (138) tagitem ::= INTEGER */ + 255, /* (139) tagitem ::= FLOAT */ + 255, /* (140) tagitem ::= STRING */ + 255, /* (141) tagitem ::= BOOL */ + 255, /* (142) tagitem ::= NULL */ + 255, /* (143) tagitem ::= MINUS INTEGER */ + 255, /* (144) tagitem ::= MINUS FLOAT */ + 255, /* (145) tagitem ::= PLUS INTEGER */ + 255, /* (146) tagitem ::= PLUS FLOAT */ + 253, /* (147) select ::= SELECT selcollist from where_opt interval_opt fill_opt sliding_opt groupby_opt orderby_opt having_opt slimit_opt limit_opt */ + 267, /* (148) union ::= select */ + 267, /* (149) union ::= LP union RP */ + 267, /* (150) union ::= union UNION ALL select */ + 267, /* (151) union ::= union UNION ALL LP select RP */ + 211, /* (152) cmd ::= union */ + 253, /* (153) select ::= SELECT selcollist */ + 268, /* (154) sclp ::= selcollist COMMA */ + 268, /* (155) sclp ::= */ + 256, /* (156) selcollist ::= sclp distinct expr as */ + 256, /* (157) selcollist ::= sclp STAR */ + 271, /* (158) as ::= AS ids */ + 271, /* (159) as ::= ids */ + 271, /* (160) as ::= */ + 269, /* (161) distinct ::= DISTINCT */ + 269, /* (162) distinct ::= */ + 257, /* (163) from ::= FROM tablelist */ + 272, /* (164) tablelist ::= ids cpxName */ + 272, /* (165) tablelist ::= ids cpxName ids */ + 272, /* (166) tablelist ::= tablelist COMMA ids cpxName */ + 272, /* (167) tablelist ::= tablelist COMMA ids cpxName ids */ + 273, /* (168) tmvar ::= VARIABLE */ + 259, /* (169) interval_opt ::= INTERVAL LP tmvar RP */ + 259, /* (170) interval_opt ::= INTERVAL LP tmvar COMMA tmvar RP */ + 259, /* (171) interval_opt ::= */ + 260, /* (172) fill_opt ::= */ + 260, /* (173) fill_opt ::= FILL LP ID COMMA tagitemlist RP */ + 260, /* (174) fill_opt ::= FILL LP ID RP */ + 261, /* (175) sliding_opt ::= SLIDING LP tmvar RP */ + 261, /* (176) sliding_opt ::= */ + 263, /* (177) orderby_opt ::= */ + 263, /* (178) orderby_opt ::= ORDER BY sortlist */ + 274, /* (179) sortlist ::= sortlist COMMA item sortorder */ + 274, /* (180) sortlist ::= item sortorder */ + 276, /* (181) item ::= ids cpxName */ + 277, /* (182) sortorder ::= ASC */ + 277, /* (183) sortorder ::= DESC */ + 277, /* (184) sortorder ::= */ + 262, /* (185) groupby_opt ::= */ + 262, /* (186) groupby_opt ::= GROUP BY grouplist */ + 278, /* (187) grouplist ::= grouplist COMMA item */ + 278, /* (188) grouplist ::= item */ + 264, /* (189) having_opt ::= */ + 264, /* (190) having_opt ::= HAVING expr */ + 266, /* (191) limit_opt ::= */ + 266, /* (192) limit_opt ::= LIMIT signed */ + 266, /* (193) limit_opt ::= LIMIT signed OFFSET signed */ + 266, /* (194) limit_opt ::= LIMIT signed COMMA signed */ + 265, /* (195) slimit_opt ::= */ + 265, /* (196) slimit_opt ::= SLIMIT signed */ + 265, /* (197) slimit_opt ::= SLIMIT signed SOFFSET signed */ + 265, /* (198) slimit_opt ::= SLIMIT signed COMMA signed */ + 258, /* (199) where_opt ::= */ + 258, /* (200) where_opt ::= WHERE expr */ + 270, /* (201) expr ::= LP expr RP */ + 270, /* (202) expr ::= ID */ + 270, /* (203) expr ::= ID DOT ID */ + 270, /* (204) expr ::= ID DOT STAR */ + 270, /* (205) expr ::= INTEGER */ + 270, /* (206) expr ::= MINUS INTEGER */ + 270, /* (207) expr ::= PLUS INTEGER */ + 270, /* (208) expr ::= FLOAT */ + 270, /* (209) expr ::= MINUS FLOAT */ + 270, /* (210) expr ::= PLUS FLOAT */ + 270, /* (211) expr ::= STRING */ + 270, /* (212) expr ::= NOW */ + 270, /* (213) expr ::= VARIABLE */ + 270, /* (214) expr ::= BOOL */ + 270, /* (215) expr ::= ID LP exprlist RP */ + 270, /* (216) expr ::= ID LP STAR RP */ + 270, /* (217) expr ::= expr IS NULL */ + 270, /* (218) expr ::= expr IS NOT NULL */ + 270, /* (219) expr ::= expr LT expr */ + 270, /* (220) expr ::= expr GT expr */ + 270, /* (221) expr ::= expr LE expr */ + 270, /* (222) expr ::= expr GE expr */ + 270, /* (223) expr ::= expr NE expr */ + 270, /* (224) expr ::= expr EQ expr */ + 270, /* (225) expr ::= expr BETWEEN expr AND expr */ + 270, /* (226) expr ::= expr AND expr */ + 270, /* (227) expr ::= expr OR expr */ + 270, /* (228) expr ::= expr PLUS expr */ + 270, /* (229) expr ::= expr MINUS expr */ + 270, /* (230) expr ::= expr STAR expr */ + 270, /* (231) expr ::= expr SLASH expr */ + 270, /* (232) expr ::= expr REM expr */ + 270, /* (233) expr ::= expr LIKE expr */ + 270, /* (234) expr ::= expr IN LP exprlist RP */ + 279, /* (235) exprlist ::= exprlist COMMA expritem */ + 279, /* (236) exprlist ::= expritem */ + 280, /* (237) expritem ::= expr */ + 280, /* (238) expritem ::= */ + 211, /* (239) cmd ::= RESET QUERY CACHE */ + 211, /* (240) cmd ::= ALTER TABLE ids cpxName ADD COLUMN columnlist */ + 211, /* (241) cmd ::= ALTER TABLE ids cpxName DROP COLUMN ids */ + 211, /* (242) cmd ::= ALTER TABLE ids cpxName ADD TAG columnlist */ + 211, /* (243) cmd ::= ALTER TABLE ids cpxName DROP TAG ids */ + 211, /* (244) cmd ::= ALTER TABLE ids cpxName CHANGE TAG ids ids */ + 211, /* (245) cmd ::= ALTER TABLE ids cpxName SET TAG ids EQ tagitem */ + 211, /* (246) cmd ::= ALTER STABLE ids cpxName ADD COLUMN columnlist */ + 211, /* (247) cmd ::= ALTER STABLE ids cpxName DROP COLUMN ids */ + 211, /* (248) cmd ::= ALTER STABLE ids cpxName ADD TAG columnlist */ + 211, /* (249) cmd ::= ALTER STABLE ids cpxName DROP TAG ids */ + 211, /* (250) cmd ::= ALTER STABLE ids cpxName CHANGE TAG ids ids */ + 211, /* (251) cmd ::= KILL CONNECTION INTEGER */ + 211, /* (252) cmd ::= KILL STREAM INTEGER COLON INTEGER */ + 211, /* (253) cmd ::= KILL QUERY INTEGER COLON INTEGER */ +}; + +/* For rule J, yyRuleInfoNRhs[J] contains the negative of the number +** of symbols on the right-hand side of that rule. */ +static const signed char yyRuleInfoNRhs[] = { + -1, /* (0) program ::= cmd */ + -2, /* (1) cmd ::= SHOW DATABASES */ + -2, /* (2) cmd ::= SHOW MNODES */ + -2, /* (3) cmd ::= SHOW DNODES */ + -2, /* (4) cmd ::= SHOW ACCOUNTS */ + -2, /* (5) cmd ::= SHOW USERS */ + -2, /* (6) cmd ::= SHOW MODULES */ + -2, /* (7) cmd ::= SHOW QUERIES */ + -2, /* (8) cmd ::= SHOW CONNECTIONS */ + -2, /* (9) cmd ::= SHOW STREAMS */ + -2, /* (10) cmd ::= SHOW VARIABLES */ + -2, /* (11) cmd ::= SHOW SCORES */ + -2, /* (12) cmd ::= SHOW GRANTS */ + -2, /* (13) cmd ::= SHOW VNODES */ + -3, /* (14) cmd ::= SHOW VNODES IPTOKEN */ + 0, /* (15) dbPrefix ::= */ + -2, /* (16) dbPrefix ::= ids DOT */ + 0, /* (17) cpxName ::= */ + -2, /* (18) cpxName ::= DOT ids */ + -5, /* (19) cmd ::= SHOW CREATE TABLE ids cpxName */ + -4, /* (20) cmd ::= SHOW CREATE DATABASE ids */ + -3, /* (21) cmd ::= SHOW dbPrefix TABLES */ + -5, /* (22) cmd ::= SHOW dbPrefix TABLES LIKE ids */ + -3, /* (23) cmd ::= SHOW dbPrefix STABLES */ + -5, /* (24) cmd ::= SHOW dbPrefix STABLES LIKE ids */ + -3, /* (25) cmd ::= SHOW dbPrefix VGROUPS */ + -4, /* (26) cmd ::= SHOW dbPrefix VGROUPS ids */ + -5, /* (27) cmd ::= DROP TABLE ifexists ids cpxName */ + -5, /* (28) cmd ::= DROP STABLE ifexists ids cpxName */ + -4, /* (29) cmd ::= DROP DATABASE ifexists ids */ + -3, /* (30) cmd ::= DROP DNODE ids */ + -3, /* (31) cmd ::= DROP USER ids */ + -3, /* (32) cmd ::= DROP ACCOUNT ids */ + -2, /* (33) cmd ::= USE ids */ + -3, /* (34) cmd ::= DESCRIBE ids cpxName */ + -5, /* (35) cmd ::= ALTER USER ids PASS ids */ + -5, /* (36) cmd ::= ALTER USER ids PRIVILEGE ids */ + -4, /* (37) cmd ::= ALTER DNODE ids ids */ + -5, /* (38) cmd ::= ALTER DNODE ids ids ids */ + -3, /* (39) cmd ::= ALTER LOCAL ids */ + -4, /* (40) cmd ::= ALTER LOCAL ids ids */ + -4, /* (41) cmd ::= ALTER DATABASE ids alter_db_optr */ + -4, /* (42) cmd ::= ALTER ACCOUNT ids acct_optr */ + -6, /* (43) cmd ::= ALTER ACCOUNT ids PASS ids acct_optr */ + -1, /* (44) ids ::= ID */ + -1, /* (45) ids ::= STRING */ + -2, /* (46) ifexists ::= IF EXISTS */ + 0, /* (47) ifexists ::= */ + -3, /* (48) ifnotexists ::= IF NOT EXISTS */ + 0, /* (49) ifnotexists ::= */ + -3, /* (50) cmd ::= CREATE DNODE ids */ + -6, /* (51) cmd ::= CREATE ACCOUNT ids PASS ids acct_optr */ + -5, /* (52) cmd ::= CREATE DATABASE ifnotexists ids db_optr */ + -5, /* (53) cmd ::= CREATE USER ids PASS ids */ + 0, /* (54) pps ::= */ + -2, /* (55) pps ::= PPS INTEGER */ + 0, /* (56) tseries ::= */ + -2, /* (57) tseries ::= TSERIES INTEGER */ + 0, /* (58) dbs ::= */ + -2, /* (59) dbs ::= DBS INTEGER */ + 0, /* (60) streams ::= */ + -2, /* (61) streams ::= STREAMS INTEGER */ + 0, /* (62) storage ::= */ + -2, /* (63) storage ::= STORAGE INTEGER */ + 0, /* (64) qtime ::= */ + -2, /* (65) qtime ::= QTIME INTEGER */ + 0, /* (66) users ::= */ + -2, /* (67) users ::= USERS INTEGER */ + 0, /* (68) conns ::= */ + -2, /* (69) conns ::= CONNS INTEGER */ + 0, /* (70) state ::= */ + -2, /* (71) state ::= STATE ids */ + -9, /* (72) acct_optr ::= pps tseries storage streams qtime dbs users conns state */ + -2, /* (73) keep ::= KEEP tagitemlist */ + -2, /* (74) cache ::= CACHE INTEGER */ + -2, /* (75) replica ::= REPLICA INTEGER */ + -2, /* (76) quorum ::= QUORUM INTEGER */ + -2, /* (77) days ::= DAYS INTEGER */ + -2, /* (78) minrows ::= MINROWS INTEGER */ + -2, /* (79) maxrows ::= MAXROWS INTEGER */ + -2, /* (80) blocks ::= BLOCKS INTEGER */ + -2, /* (81) ctime ::= CTIME INTEGER */ + -2, /* (82) wal ::= WAL INTEGER */ + -2, /* (83) fsync ::= FSYNC INTEGER */ + -2, /* (84) comp ::= COMP INTEGER */ + -2, /* (85) prec ::= PRECISION STRING */ + -2, /* (86) update ::= UPDATE INTEGER */ + -2, /* (87) cachelast ::= CACHELAST INTEGER */ + 0, /* (88) db_optr ::= */ + -2, /* (89) db_optr ::= db_optr cache */ + -2, /* (90) db_optr ::= db_optr replica */ + -2, /* (91) db_optr ::= db_optr quorum */ + -2, /* (92) db_optr ::= db_optr days */ + -2, /* (93) db_optr ::= db_optr minrows */ + -2, /* (94) db_optr ::= db_optr maxrows */ + -2, /* (95) db_optr ::= db_optr blocks */ + -2, /* (96) db_optr ::= db_optr ctime */ + -2, /* (97) db_optr ::= db_optr wal */ + -2, /* (98) db_optr ::= db_optr fsync */ + -2, /* (99) db_optr ::= db_optr comp */ + -2, /* (100) db_optr ::= db_optr prec */ + -2, /* (101) db_optr ::= db_optr keep */ + -2, /* (102) db_optr ::= db_optr update */ + -2, /* (103) db_optr ::= db_optr cachelast */ + 0, /* (104) alter_db_optr ::= */ + -2, /* (105) alter_db_optr ::= alter_db_optr replica */ + -2, /* (106) alter_db_optr ::= alter_db_optr quorum */ + -2, /* (107) alter_db_optr ::= alter_db_optr keep */ + -2, /* (108) alter_db_optr ::= alter_db_optr blocks */ + -2, /* (109) alter_db_optr ::= alter_db_optr comp */ + -2, /* (110) alter_db_optr ::= alter_db_optr wal */ + -2, /* (111) alter_db_optr ::= alter_db_optr fsync */ + -2, /* (112) alter_db_optr ::= alter_db_optr update */ + -2, /* (113) alter_db_optr ::= alter_db_optr cachelast */ + -1, /* (114) typename ::= ids */ + -4, /* (115) typename ::= ids LP signed RP */ + -2, /* (116) typename ::= ids UNSIGNED */ + -1, /* (117) signed ::= INTEGER */ + -2, /* (118) signed ::= PLUS INTEGER */ + -2, /* (119) signed ::= MINUS INTEGER */ + -3, /* (120) cmd ::= CREATE TABLE create_table_args */ + -3, /* (121) cmd ::= CREATE TABLE create_stable_args */ + -3, /* (122) cmd ::= CREATE STABLE create_stable_args */ + -3, /* (123) cmd ::= CREATE TABLE create_table_list */ + -1, /* (124) create_table_list ::= create_from_stable */ + -2, /* (125) create_table_list ::= create_table_list create_from_stable */ + -6, /* (126) create_table_args ::= ifnotexists ids cpxName LP columnlist RP */ + -10, /* (127) create_stable_args ::= ifnotexists ids cpxName LP columnlist RP TAGS LP columnlist RP */ + -10, /* (128) create_from_stable ::= ifnotexists ids cpxName USING ids cpxName TAGS LP tagitemlist RP */ + -13, /* (129) create_from_stable ::= ifnotexists ids cpxName USING ids cpxName LP tagNamelist RP TAGS LP tagitemlist RP */ + -3, /* (130) tagNamelist ::= tagNamelist COMMA ids */ + -1, /* (131) tagNamelist ::= ids */ + -5, /* (132) create_table_args ::= ifnotexists ids cpxName AS select */ + -3, /* (133) columnlist ::= columnlist COMMA column */ + -1, /* (134) columnlist ::= column */ + -2, /* (135) column ::= ids typename */ + -3, /* (136) tagitemlist ::= tagitemlist COMMA tagitem */ + -1, /* (137) tagitemlist ::= tagitem */ + -1, /* (138) tagitem ::= INTEGER */ + -1, /* (139) tagitem ::= FLOAT */ + -1, /* (140) tagitem ::= STRING */ + -1, /* (141) tagitem ::= BOOL */ + -1, /* (142) tagitem ::= NULL */ + -2, /* (143) tagitem ::= MINUS INTEGER */ + -2, /* (144) tagitem ::= MINUS FLOAT */ + -2, /* (145) tagitem ::= PLUS INTEGER */ + -2, /* (146) tagitem ::= PLUS FLOAT */ + -12, /* (147) select ::= SELECT selcollist from where_opt interval_opt fill_opt sliding_opt groupby_opt orderby_opt having_opt slimit_opt limit_opt */ + -1, /* (148) union ::= select */ + -3, /* (149) union ::= LP union RP */ + -4, /* (150) union ::= union UNION ALL select */ + -6, /* (151) union ::= union UNION ALL LP select RP */ + -1, /* (152) cmd ::= union */ + -2, /* (153) select ::= SELECT selcollist */ + -2, /* (154) sclp ::= selcollist COMMA */ + 0, /* (155) sclp ::= */ + -4, /* (156) selcollist ::= sclp distinct expr as */ + -2, /* (157) selcollist ::= sclp STAR */ + -2, /* (158) as ::= AS ids */ + -1, /* (159) as ::= ids */ + 0, /* (160) as ::= */ + -1, /* (161) distinct ::= DISTINCT */ + 0, /* (162) distinct ::= */ + -2, /* (163) from ::= FROM tablelist */ + -2, /* (164) tablelist ::= ids cpxName */ + -3, /* (165) tablelist ::= ids cpxName ids */ + -4, /* (166) tablelist ::= tablelist COMMA ids cpxName */ + -5, /* (167) tablelist ::= tablelist COMMA ids cpxName ids */ + -1, /* (168) tmvar ::= VARIABLE */ + -4, /* (169) interval_opt ::= INTERVAL LP tmvar RP */ + -6, /* (170) interval_opt ::= INTERVAL LP tmvar COMMA tmvar RP */ + 0, /* (171) interval_opt ::= */ + 0, /* (172) fill_opt ::= */ + -6, /* (173) fill_opt ::= FILL LP ID COMMA tagitemlist RP */ + -4, /* (174) fill_opt ::= FILL LP ID RP */ + -4, /* (175) sliding_opt ::= SLIDING LP tmvar RP */ + 0, /* (176) sliding_opt ::= */ + 0, /* (177) orderby_opt ::= */ + -3, /* (178) orderby_opt ::= ORDER BY sortlist */ + -4, /* (179) sortlist ::= sortlist COMMA item sortorder */ + -2, /* (180) sortlist ::= item sortorder */ + -2, /* (181) item ::= ids cpxName */ + -1, /* (182) sortorder ::= ASC */ + -1, /* (183) sortorder ::= DESC */ + 0, /* (184) sortorder ::= */ + 0, /* (185) groupby_opt ::= */ + -3, /* (186) groupby_opt ::= GROUP BY grouplist */ + -3, /* (187) grouplist ::= grouplist COMMA item */ + -1, /* (188) grouplist ::= item */ + 0, /* (189) having_opt ::= */ + -2, /* (190) having_opt ::= HAVING expr */ + 0, /* (191) limit_opt ::= */ + -2, /* (192) limit_opt ::= LIMIT signed */ + -4, /* (193) limit_opt ::= LIMIT signed OFFSET signed */ + -4, /* (194) limit_opt ::= LIMIT signed COMMA signed */ + 0, /* (195) slimit_opt ::= */ + -2, /* (196) slimit_opt ::= SLIMIT signed */ + -4, /* (197) slimit_opt ::= SLIMIT signed SOFFSET signed */ + -4, /* (198) slimit_opt ::= SLIMIT signed COMMA signed */ + 0, /* (199) where_opt ::= */ + -2, /* (200) where_opt ::= WHERE expr */ + -3, /* (201) expr ::= LP expr RP */ + -1, /* (202) expr ::= ID */ + -3, /* (203) expr ::= ID DOT ID */ + -3, /* (204) expr ::= ID DOT STAR */ + -1, /* (205) expr ::= INTEGER */ + -2, /* (206) expr ::= MINUS INTEGER */ + -2, /* (207) expr ::= PLUS INTEGER */ + -1, /* (208) expr ::= FLOAT */ + -2, /* (209) expr ::= MINUS FLOAT */ + -2, /* (210) expr ::= PLUS FLOAT */ + -1, /* (211) expr ::= STRING */ + -1, /* (212) expr ::= NOW */ + -1, /* (213) expr ::= VARIABLE */ + -1, /* (214) expr ::= BOOL */ + -4, /* (215) expr ::= ID LP exprlist RP */ + -4, /* (216) expr ::= ID LP STAR RP */ + -3, /* (217) expr ::= expr IS NULL */ + -4, /* (218) expr ::= expr IS NOT NULL */ + -3, /* (219) expr ::= expr LT expr */ + -3, /* (220) expr ::= expr GT expr */ + -3, /* (221) expr ::= expr LE expr */ + -3, /* (222) expr ::= expr GE expr */ + -3, /* (223) expr ::= expr NE expr */ + -3, /* (224) expr ::= expr EQ expr */ + -5, /* (225) expr ::= expr BETWEEN expr AND expr */ + -3, /* (226) expr ::= expr AND expr */ + -3, /* (227) expr ::= expr OR expr */ + -3, /* (228) expr ::= expr PLUS expr */ + -3, /* (229) expr ::= expr MINUS expr */ + -3, /* (230) expr ::= expr STAR expr */ + -3, /* (231) expr ::= expr SLASH expr */ + -3, /* (232) expr ::= expr REM expr */ + -3, /* (233) expr ::= expr LIKE expr */ + -5, /* (234) expr ::= expr IN LP exprlist RP */ + -3, /* (235) exprlist ::= exprlist COMMA expritem */ + -1, /* (236) exprlist ::= expritem */ + -1, /* (237) expritem ::= expr */ + 0, /* (238) expritem ::= */ + -3, /* (239) cmd ::= RESET QUERY CACHE */ + -7, /* (240) cmd ::= ALTER TABLE ids cpxName ADD COLUMN columnlist */ + -7, /* (241) cmd ::= ALTER TABLE ids cpxName DROP COLUMN ids */ + -7, /* (242) cmd ::= ALTER TABLE ids cpxName ADD TAG columnlist */ + -7, /* (243) cmd ::= ALTER TABLE ids cpxName DROP TAG ids */ + -8, /* (244) cmd ::= ALTER TABLE ids cpxName CHANGE TAG ids ids */ + -9, /* (245) cmd ::= ALTER TABLE ids cpxName SET TAG ids EQ tagitem */ + -7, /* (246) cmd ::= ALTER STABLE ids cpxName ADD COLUMN columnlist */ + -7, /* (247) cmd ::= ALTER STABLE ids cpxName DROP COLUMN ids */ + -7, /* (248) cmd ::= ALTER STABLE ids cpxName ADD TAG columnlist */ + -7, /* (249) cmd ::= ALTER STABLE ids cpxName DROP TAG ids */ + -8, /* (250) cmd ::= ALTER STABLE ids cpxName CHANGE TAG ids ids */ + -3, /* (251) cmd ::= KILL CONNECTION INTEGER */ + -5, /* (252) cmd ::= KILL STREAM INTEGER COLON INTEGER */ + -5, /* (253) cmd ::= KILL QUERY INTEGER COLON INTEGER */ }; static void yy_accept(yyParser*); /* Forward Declaration */ @@ -2013,30 +2304,34 @@ static void yy_accept(yyParser*); /* Forward Declaration */ ** only called from one place, optimizing compilers will in-line it, which ** means that the extra parameters have no performance impact. */ -static void yy_reduce( +static YYACTIONTYPE yy_reduce( yyParser *yypParser, /* The parser */ unsigned int yyruleno, /* Number of the rule by which to reduce */ int yyLookahead, /* Lookahead token, or YYNOCODE if none */ ParseTOKENTYPE yyLookaheadToken /* Value of the lookahead token */ + ParseCTX_PDECL /* %extra_context */ ){ int yygoto; /* The next state */ - int yyact; /* The next action */ + YYACTIONTYPE yyact; /* The next action */ yyStackEntry *yymsp; /* The top of the parser's stack */ int yysize; /* Amount to pop the stack */ - ParseARG_FETCH; + ParseARG_FETCH (void)yyLookahead; (void)yyLookaheadToken; yymsp = yypParser->yytos; #ifndef NDEBUG if( yyTraceFILE && yyruleno<(int)(sizeof(yyRuleName)/sizeof(yyRuleName[0])) ){ - yysize = yyRuleInfo[yyruleno].nrhs; + yysize = yyRuleInfoNRhs[yyruleno]; if( yysize ){ - fprintf(yyTraceFILE, "%sReduce %d [%s], go to state %d.\n", + fprintf(yyTraceFILE, "%sReduce %d [%s]%s, pop back to state %d.\n", yyTracePrompt, - yyruleno, yyRuleName[yyruleno], yymsp[yysize].stateno); + yyruleno, yyRuleName[yyruleno], + yyrulenoyytos - yypParser->yystack)>yypParser->yyhwm ){ yypParser->yyhwm++; @@ -2054,13 +2349,19 @@ static void yy_reduce( #if YYSTACKDEPTH>0 if( yypParser->yytos>=yypParser->yystackEnd ){ yyStackOverflow(yypParser); - return; + /* The call to yyStackOverflow() above pops the stack until it is + ** empty, causing the main parser loop to exit. So the return value + ** is never used and does not matter. */ + return 0; } #else if( yypParser->yytos>=&yypParser->yystack[yypParser->yystksz-1] ){ if( yyGrowStack(yypParser) ){ yyStackOverflow(yypParser); - return; + /* The call to yyStackOverflow() above pops the stack until it is + ** empty, causing the main parser loop to exit. So the return value + ** is never used and does not matter. */ + return 0; } yymsp = yypParser->yytos; } @@ -2256,7 +2557,7 @@ static void yy_reduce( break; case 47: /* ifexists ::= */ case 49: /* ifnotexists ::= */ yytestcase(yyruleno==49); - case 159: /* distinct ::= */ yytestcase(yyruleno==159); + case 162: /* distinct ::= */ yytestcase(yyruleno==162); { yymsp[1].minor.yy0.n = 0;} break; case 48: /* ifnotexists ::= IF NOT EXISTS */ @@ -2486,11 +2787,27 @@ static void yy_reduce( { yymsp[-5].minor.yy0.n += yymsp[-4].minor.yy0.n; yymsp[-8].minor.yy0.n += yymsp[-7].minor.yy0.n; - yylhsminor.yy42 = createNewChildTableInfo(&yymsp[-5].minor.yy0, yymsp[-1].minor.yy247, &yymsp[-8].minor.yy0, &yymsp[-9].minor.yy0); + yylhsminor.yy42 = createNewChildTableInfo(&yymsp[-5].minor.yy0, NULL, yymsp[-1].minor.yy247, &yymsp[-8].minor.yy0, &yymsp[-9].minor.yy0); } yymsp[-9].minor.yy42 = yylhsminor.yy42; break; - case 129: /* create_table_args ::= ifnotexists ids cpxName AS select */ + case 129: /* create_from_stable ::= ifnotexists ids cpxName USING ids cpxName LP tagNamelist RP TAGS LP tagitemlist RP */ +{ + yymsp[-8].minor.yy0.n += yymsp[-7].minor.yy0.n; + yymsp[-11].minor.yy0.n += yymsp[-10].minor.yy0.n; + yylhsminor.yy42 = createNewChildTableInfo(&yymsp[-8].minor.yy0, yymsp[-5].minor.yy247, yymsp[-1].minor.yy247, &yymsp[-11].minor.yy0, &yymsp[-12].minor.yy0); +} + yymsp[-12].minor.yy42 = yylhsminor.yy42; + break; + case 130: /* tagNamelist ::= tagNamelist COMMA ids */ +{taosArrayPush(yymsp[-2].minor.yy247, &yymsp[0].minor.yy0); yylhsminor.yy247 = yymsp[-2].minor.yy247; } + yymsp[-2].minor.yy247 = yylhsminor.yy247; + break; + case 131: /* tagNamelist ::= ids */ +{yylhsminor.yy247 = taosArrayInit(4, sizeof(SStrToken)); taosArrayPush(yylhsminor.yy247, &yymsp[0].minor.yy0);} + yymsp[0].minor.yy247 = yylhsminor.yy247; + break; + case 132: /* create_table_args ::= ifnotexists ids cpxName AS select */ { yylhsminor.yy358 = tSetCreateSqlElems(NULL, NULL, yymsp[0].minor.yy114, TSQL_CREATE_STREAM); setSqlInfo(pInfo, yylhsminor.yy358, NULL, TSDB_SQL_CREATE_TABLE); @@ -2500,43 +2817,43 @@ static void yy_reduce( } yymsp[-4].minor.yy358 = yylhsminor.yy358; break; - case 130: /* columnlist ::= columnlist COMMA column */ + case 133: /* columnlist ::= columnlist COMMA column */ {taosArrayPush(yymsp[-2].minor.yy247, &yymsp[0].minor.yy179); yylhsminor.yy247 = yymsp[-2].minor.yy247; } yymsp[-2].minor.yy247 = yylhsminor.yy247; break; - case 131: /* columnlist ::= column */ + case 134: /* columnlist ::= column */ {yylhsminor.yy247 = taosArrayInit(4, sizeof(TAOS_FIELD)); taosArrayPush(yylhsminor.yy247, &yymsp[0].minor.yy179);} yymsp[0].minor.yy247 = yylhsminor.yy247; break; - case 132: /* column ::= ids typename */ + case 135: /* column ::= ids typename */ { tSqlSetColumnInfo(&yylhsminor.yy179, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy179); } yymsp[-1].minor.yy179 = yylhsminor.yy179; break; - case 133: /* tagitemlist ::= tagitemlist COMMA tagitem */ + case 136: /* tagitemlist ::= tagitemlist COMMA tagitem */ { yylhsminor.yy247 = tVariantListAppend(yymsp[-2].minor.yy247, &yymsp[0].minor.yy378, -1); } yymsp[-2].minor.yy247 = yylhsminor.yy247; break; - case 134: /* tagitemlist ::= tagitem */ + case 137: /* tagitemlist ::= tagitem */ { yylhsminor.yy247 = tVariantListAppend(NULL, &yymsp[0].minor.yy378, -1); } yymsp[0].minor.yy247 = yylhsminor.yy247; break; - case 135: /* tagitem ::= INTEGER */ - case 136: /* tagitem ::= FLOAT */ yytestcase(yyruleno==136); - case 137: /* tagitem ::= STRING */ yytestcase(yyruleno==137); - case 138: /* tagitem ::= BOOL */ yytestcase(yyruleno==138); + case 138: /* tagitem ::= INTEGER */ + case 139: /* tagitem ::= FLOAT */ yytestcase(yyruleno==139); + case 140: /* tagitem ::= STRING */ yytestcase(yyruleno==140); + case 141: /* tagitem ::= BOOL */ yytestcase(yyruleno==141); { toTSDBType(yymsp[0].minor.yy0.type); tVariantCreate(&yylhsminor.yy378, &yymsp[0].minor.yy0); } yymsp[0].minor.yy378 = yylhsminor.yy378; break; - case 139: /* tagitem ::= NULL */ + case 142: /* tagitem ::= NULL */ { yymsp[0].minor.yy0.type = 0; tVariantCreate(&yylhsminor.yy378, &yymsp[0].minor.yy0); } yymsp[0].minor.yy378 = yylhsminor.yy378; break; - case 140: /* tagitem ::= MINUS INTEGER */ - case 141: /* tagitem ::= MINUS FLOAT */ yytestcase(yyruleno==141); - case 142: /* tagitem ::= PLUS INTEGER */ yytestcase(yyruleno==142); - case 143: /* tagitem ::= PLUS FLOAT */ yytestcase(yyruleno==143); + case 143: /* tagitem ::= MINUS INTEGER */ + case 144: /* tagitem ::= MINUS FLOAT */ yytestcase(yyruleno==144); + case 145: /* tagitem ::= PLUS INTEGER */ yytestcase(yyruleno==145); + case 146: /* tagitem ::= PLUS FLOAT */ yytestcase(yyruleno==146); { yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; yymsp[-1].minor.yy0.type = yymsp[0].minor.yy0.type; @@ -2545,74 +2862,74 @@ static void yy_reduce( } yymsp[-1].minor.yy378 = yylhsminor.yy378; break; - case 144: /* select ::= SELECT selcollist from where_opt interval_opt fill_opt sliding_opt groupby_opt orderby_opt having_opt slimit_opt limit_opt */ + case 147: /* select ::= SELECT selcollist from where_opt interval_opt fill_opt sliding_opt groupby_opt orderby_opt having_opt slimit_opt limit_opt */ { yylhsminor.yy114 = tSetQuerySqlElems(&yymsp[-11].minor.yy0, yymsp[-10].minor.yy522, yymsp[-9].minor.yy247, yymsp[-8].minor.yy326, yymsp[-4].minor.yy247, yymsp[-3].minor.yy247, &yymsp[-7].minor.yy430, &yymsp[-5].minor.yy0, yymsp[-6].minor.yy247, &yymsp[0].minor.yy204, &yymsp[-1].minor.yy204); } yymsp[-11].minor.yy114 = yylhsminor.yy114; break; - case 145: /* union ::= select */ + case 148: /* union ::= select */ { yylhsminor.yy219 = setSubclause(NULL, yymsp[0].minor.yy114); } yymsp[0].minor.yy219 = yylhsminor.yy219; break; - case 146: /* union ::= LP union RP */ + case 149: /* union ::= LP union RP */ { yymsp[-2].minor.yy219 = yymsp[-1].minor.yy219; } break; - case 147: /* union ::= union UNION ALL select */ + case 150: /* union ::= union UNION ALL select */ { yylhsminor.yy219 = appendSelectClause(yymsp[-3].minor.yy219, yymsp[0].minor.yy114); } yymsp[-3].minor.yy219 = yylhsminor.yy219; break; - case 148: /* union ::= union UNION ALL LP select RP */ + case 151: /* union ::= union UNION ALL LP select RP */ { yylhsminor.yy219 = appendSelectClause(yymsp[-5].minor.yy219, yymsp[-1].minor.yy114); } yymsp[-5].minor.yy219 = yylhsminor.yy219; break; - case 149: /* cmd ::= union */ + case 152: /* cmd ::= union */ { setSqlInfo(pInfo, yymsp[0].minor.yy219, NULL, TSDB_SQL_SELECT); } break; - case 150: /* select ::= SELECT selcollist */ + case 153: /* select ::= SELECT selcollist */ { yylhsminor.yy114 = tSetQuerySqlElems(&yymsp[-1].minor.yy0, yymsp[0].minor.yy522, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL); } yymsp[-1].minor.yy114 = yylhsminor.yy114; break; - case 151: /* sclp ::= selcollist COMMA */ + case 154: /* sclp ::= selcollist COMMA */ {yylhsminor.yy522 = yymsp[-1].minor.yy522;} yymsp[-1].minor.yy522 = yylhsminor.yy522; break; - case 152: /* sclp ::= */ + case 155: /* sclp ::= */ {yymsp[1].minor.yy522 = 0;} break; - case 153: /* selcollist ::= sclp distinct expr as */ + case 156: /* selcollist ::= sclp distinct expr as */ { yylhsminor.yy522 = tSqlExprListAppend(yymsp[-3].minor.yy522, yymsp[-1].minor.yy326, yymsp[-2].minor.yy0.n? &yymsp[-2].minor.yy0:0, yymsp[0].minor.yy0.n?&yymsp[0].minor.yy0:0); } yymsp[-3].minor.yy522 = yylhsminor.yy522; break; - case 154: /* selcollist ::= sclp STAR */ + case 157: /* selcollist ::= sclp STAR */ { tSQLExpr *pNode = tSqlExprIdValueCreate(NULL, TK_ALL); yylhsminor.yy522 = tSqlExprListAppend(yymsp[-1].minor.yy522, pNode, 0, 0); } yymsp[-1].minor.yy522 = yylhsminor.yy522; break; - case 155: /* as ::= AS ids */ + case 158: /* as ::= AS ids */ { yymsp[-1].minor.yy0 = yymsp[0].minor.yy0; } break; - case 156: /* as ::= ids */ + case 159: /* as ::= ids */ { yylhsminor.yy0 = yymsp[0].minor.yy0; } yymsp[0].minor.yy0 = yylhsminor.yy0; break; - case 157: /* as ::= */ + case 160: /* as ::= */ { yymsp[1].minor.yy0.n = 0; } break; - case 158: /* distinct ::= DISTINCT */ + case 161: /* distinct ::= DISTINCT */ { yylhsminor.yy0 = yymsp[0].minor.yy0; } yymsp[0].minor.yy0 = yylhsminor.yy0; break; - case 160: /* from ::= FROM tablelist */ + case 163: /* from ::= FROM tablelist */ {yymsp[-1].minor.yy247 = yymsp[0].minor.yy247;} break; - case 161: /* tablelist ::= ids cpxName */ + case 164: /* tablelist ::= ids cpxName */ { toTSDBType(yymsp[-1].minor.yy0.type); yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; @@ -2621,7 +2938,7 @@ static void yy_reduce( } yymsp[-1].minor.yy247 = yylhsminor.yy247; break; - case 162: /* tablelist ::= ids cpxName ids */ + case 165: /* tablelist ::= ids cpxName ids */ { toTSDBType(yymsp[-2].minor.yy0.type); toTSDBType(yymsp[0].minor.yy0.type); @@ -2631,7 +2948,7 @@ static void yy_reduce( } yymsp[-2].minor.yy247 = yylhsminor.yy247; break; - case 163: /* tablelist ::= tablelist COMMA ids cpxName */ + case 166: /* tablelist ::= tablelist COMMA ids cpxName */ { toTSDBType(yymsp[-1].minor.yy0.type); yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; @@ -2640,7 +2957,7 @@ static void yy_reduce( } yymsp[-3].minor.yy247 = yylhsminor.yy247; break; - case 164: /* tablelist ::= tablelist COMMA ids cpxName ids */ + case 167: /* tablelist ::= tablelist COMMA ids cpxName ids */ { toTSDBType(yymsp[-2].minor.yy0.type); toTSDBType(yymsp[0].minor.yy0.type); @@ -2650,23 +2967,23 @@ static void yy_reduce( } yymsp[-4].minor.yy247 = yylhsminor.yy247; break; - case 165: /* tmvar ::= VARIABLE */ + case 168: /* tmvar ::= VARIABLE */ {yylhsminor.yy0 = yymsp[0].minor.yy0;} yymsp[0].minor.yy0 = yylhsminor.yy0; break; - case 166: /* interval_opt ::= INTERVAL LP tmvar RP */ + case 169: /* interval_opt ::= INTERVAL LP tmvar RP */ {yymsp[-3].minor.yy430.interval = yymsp[-1].minor.yy0; yymsp[-3].minor.yy430.offset.n = 0; yymsp[-3].minor.yy430.offset.z = NULL; yymsp[-3].minor.yy430.offset.type = 0;} break; - case 167: /* interval_opt ::= INTERVAL LP tmvar COMMA tmvar RP */ + case 170: /* interval_opt ::= INTERVAL LP tmvar COMMA tmvar RP */ {yymsp[-5].minor.yy430.interval = yymsp[-3].minor.yy0; yymsp[-5].minor.yy430.offset = yymsp[-1].minor.yy0;} break; - case 168: /* interval_opt ::= */ + case 171: /* interval_opt ::= */ {memset(&yymsp[1].minor.yy430, 0, sizeof(yymsp[1].minor.yy430));} break; - case 169: /* fill_opt ::= */ + case 172: /* fill_opt ::= */ {yymsp[1].minor.yy247 = 0; } break; - case 170: /* fill_opt ::= FILL LP ID COMMA tagitemlist RP */ + case 173: /* fill_opt ::= FILL LP ID COMMA tagitemlist RP */ { tVariant A = {0}; toTSDBType(yymsp[-3].minor.yy0.type); @@ -2676,37 +2993,37 @@ static void yy_reduce( yymsp[-5].minor.yy247 = yymsp[-1].minor.yy247; } break; - case 171: /* fill_opt ::= FILL LP ID RP */ + case 174: /* fill_opt ::= FILL LP ID RP */ { toTSDBType(yymsp[-1].minor.yy0.type); yymsp[-3].minor.yy247 = tVariantListAppendToken(NULL, &yymsp[-1].minor.yy0, -1); } break; - case 172: /* sliding_opt ::= SLIDING LP tmvar RP */ + case 175: /* sliding_opt ::= SLIDING LP tmvar RP */ {yymsp[-3].minor.yy0 = yymsp[-1].minor.yy0; } break; - case 173: /* sliding_opt ::= */ + case 176: /* sliding_opt ::= */ {yymsp[1].minor.yy0.n = 0; yymsp[1].minor.yy0.z = NULL; yymsp[1].minor.yy0.type = 0; } break; - case 174: /* orderby_opt ::= */ + case 177: /* orderby_opt ::= */ {yymsp[1].minor.yy247 = 0;} break; - case 175: /* orderby_opt ::= ORDER BY sortlist */ + case 178: /* orderby_opt ::= ORDER BY sortlist */ {yymsp[-2].minor.yy247 = yymsp[0].minor.yy247;} break; - case 176: /* sortlist ::= sortlist COMMA item sortorder */ + case 179: /* sortlist ::= sortlist COMMA item sortorder */ { yylhsminor.yy247 = tVariantListAppend(yymsp[-3].minor.yy247, &yymsp[-1].minor.yy378, yymsp[0].minor.yy222); } yymsp[-3].minor.yy247 = yylhsminor.yy247; break; - case 177: /* sortlist ::= item sortorder */ + case 180: /* sortlist ::= item sortorder */ { yylhsminor.yy247 = tVariantListAppend(NULL, &yymsp[-1].minor.yy378, yymsp[0].minor.yy222); } yymsp[-1].minor.yy247 = yylhsminor.yy247; break; - case 178: /* item ::= ids cpxName */ + case 181: /* item ::= ids cpxName */ { toTSDBType(yymsp[-1].minor.yy0.type); yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; @@ -2715,211 +3032,215 @@ static void yy_reduce( } yymsp[-1].minor.yy378 = yylhsminor.yy378; break; - case 179: /* sortorder ::= ASC */ + case 182: /* sortorder ::= ASC */ { yymsp[0].minor.yy222 = TSDB_ORDER_ASC; } break; - case 180: /* sortorder ::= DESC */ + case 183: /* sortorder ::= DESC */ { yymsp[0].minor.yy222 = TSDB_ORDER_DESC;} break; - case 181: /* sortorder ::= */ + case 184: /* sortorder ::= */ { yymsp[1].minor.yy222 = TSDB_ORDER_ASC; } break; - case 182: /* groupby_opt ::= */ + case 185: /* groupby_opt ::= */ { yymsp[1].minor.yy247 = 0;} break; - case 183: /* groupby_opt ::= GROUP BY grouplist */ + case 186: /* groupby_opt ::= GROUP BY grouplist */ { yymsp[-2].minor.yy247 = yymsp[0].minor.yy247;} break; - case 184: /* grouplist ::= grouplist COMMA item */ + case 187: /* grouplist ::= grouplist COMMA item */ { yylhsminor.yy247 = tVariantListAppend(yymsp[-2].minor.yy247, &yymsp[0].minor.yy378, -1); } yymsp[-2].minor.yy247 = yylhsminor.yy247; break; - case 185: /* grouplist ::= item */ + case 188: /* grouplist ::= item */ { yylhsminor.yy247 = tVariantListAppend(NULL, &yymsp[0].minor.yy378, -1); } yymsp[0].minor.yy247 = yylhsminor.yy247; break; - case 186: /* having_opt ::= */ - case 196: /* where_opt ::= */ yytestcase(yyruleno==196); - case 234: /* expritem ::= */ yytestcase(yyruleno==234); + case 189: /* having_opt ::= */ + case 199: /* where_opt ::= */ yytestcase(yyruleno==199); + case 238: /* expritem ::= */ yytestcase(yyruleno==238); {yymsp[1].minor.yy326 = 0;} break; - case 187: /* having_opt ::= HAVING expr */ - case 197: /* where_opt ::= WHERE expr */ yytestcase(yyruleno==197); + case 190: /* having_opt ::= HAVING expr */ + case 200: /* where_opt ::= WHERE expr */ yytestcase(yyruleno==200); {yymsp[-1].minor.yy326 = yymsp[0].minor.yy326;} break; - case 188: /* limit_opt ::= */ - case 192: /* slimit_opt ::= */ yytestcase(yyruleno==192); + case 191: /* limit_opt ::= */ + case 195: /* slimit_opt ::= */ yytestcase(yyruleno==195); {yymsp[1].minor.yy204.limit = -1; yymsp[1].minor.yy204.offset = 0;} break; - case 189: /* limit_opt ::= LIMIT signed */ - case 193: /* slimit_opt ::= SLIMIT signed */ yytestcase(yyruleno==193); + case 192: /* limit_opt ::= LIMIT signed */ + case 196: /* slimit_opt ::= SLIMIT signed */ yytestcase(yyruleno==196); {yymsp[-1].minor.yy204.limit = yymsp[0].minor.yy403; yymsp[-1].minor.yy204.offset = 0;} break; - case 190: /* limit_opt ::= LIMIT signed OFFSET signed */ + case 193: /* limit_opt ::= LIMIT signed OFFSET signed */ { yymsp[-3].minor.yy204.limit = yymsp[-2].minor.yy403; yymsp[-3].minor.yy204.offset = yymsp[0].minor.yy403;} break; - case 191: /* limit_opt ::= LIMIT signed COMMA signed */ + case 194: /* limit_opt ::= LIMIT signed COMMA signed */ { yymsp[-3].minor.yy204.limit = yymsp[0].minor.yy403; yymsp[-3].minor.yy204.offset = yymsp[-2].minor.yy403;} break; - case 194: /* slimit_opt ::= SLIMIT signed SOFFSET signed */ + case 197: /* slimit_opt ::= SLIMIT signed SOFFSET signed */ {yymsp[-3].minor.yy204.limit = yymsp[-2].minor.yy403; yymsp[-3].minor.yy204.offset = yymsp[0].minor.yy403;} break; - case 195: /* slimit_opt ::= SLIMIT signed COMMA signed */ + case 198: /* slimit_opt ::= SLIMIT signed COMMA signed */ {yymsp[-3].minor.yy204.limit = yymsp[0].minor.yy403; yymsp[-3].minor.yy204.offset = yymsp[-2].minor.yy403;} break; - case 198: /* expr ::= LP expr RP */ + case 201: /* expr ::= LP expr RP */ {yylhsminor.yy326 = yymsp[-1].minor.yy326; yylhsminor.yy326->token.z = yymsp[-2].minor.yy0.z; yylhsminor.yy326->token.n = (yymsp[0].minor.yy0.z - yymsp[-2].minor.yy0.z + 1);} yymsp[-2].minor.yy326 = yylhsminor.yy326; break; - case 199: /* expr ::= ID */ + case 202: /* expr ::= ID */ { yylhsminor.yy326 = tSqlExprIdValueCreate(&yymsp[0].minor.yy0, TK_ID);} yymsp[0].minor.yy326 = yylhsminor.yy326; break; - case 200: /* expr ::= ID DOT ID */ + case 203: /* expr ::= ID DOT ID */ { yymsp[-2].minor.yy0.n += (1+yymsp[0].minor.yy0.n); yylhsminor.yy326 = tSqlExprIdValueCreate(&yymsp[-2].minor.yy0, TK_ID);} yymsp[-2].minor.yy326 = yylhsminor.yy326; break; - case 201: /* expr ::= ID DOT STAR */ + case 204: /* expr ::= ID DOT STAR */ { yymsp[-2].minor.yy0.n += (1+yymsp[0].minor.yy0.n); yylhsminor.yy326 = tSqlExprIdValueCreate(&yymsp[-2].minor.yy0, TK_ALL);} yymsp[-2].minor.yy326 = yylhsminor.yy326; break; - case 202: /* expr ::= INTEGER */ + case 205: /* expr ::= INTEGER */ { yylhsminor.yy326 = tSqlExprIdValueCreate(&yymsp[0].minor.yy0, TK_INTEGER);} yymsp[0].minor.yy326 = yylhsminor.yy326; break; - case 203: /* expr ::= MINUS INTEGER */ - case 204: /* expr ::= PLUS INTEGER */ yytestcase(yyruleno==204); + case 206: /* expr ::= MINUS INTEGER */ + case 207: /* expr ::= PLUS INTEGER */ yytestcase(yyruleno==207); { yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; yymsp[-1].minor.yy0.type = TK_INTEGER; yylhsminor.yy326 = tSqlExprIdValueCreate(&yymsp[-1].minor.yy0, TK_INTEGER);} yymsp[-1].minor.yy326 = yylhsminor.yy326; break; - case 205: /* expr ::= FLOAT */ + case 208: /* expr ::= FLOAT */ { yylhsminor.yy326 = tSqlExprIdValueCreate(&yymsp[0].minor.yy0, TK_FLOAT);} yymsp[0].minor.yy326 = yylhsminor.yy326; break; - case 206: /* expr ::= MINUS FLOAT */ - case 207: /* expr ::= PLUS FLOAT */ yytestcase(yyruleno==207); + case 209: /* expr ::= MINUS FLOAT */ + case 210: /* expr ::= PLUS FLOAT */ yytestcase(yyruleno==210); { yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; yymsp[-1].minor.yy0.type = TK_FLOAT; yylhsminor.yy326 = tSqlExprIdValueCreate(&yymsp[-1].minor.yy0, TK_FLOAT);} yymsp[-1].minor.yy326 = yylhsminor.yy326; break; - case 208: /* expr ::= STRING */ + case 211: /* expr ::= STRING */ { yylhsminor.yy326 = tSqlExprIdValueCreate(&yymsp[0].minor.yy0, TK_STRING);} yymsp[0].minor.yy326 = yylhsminor.yy326; break; - case 209: /* expr ::= NOW */ + case 212: /* expr ::= NOW */ { yylhsminor.yy326 = tSqlExprIdValueCreate(&yymsp[0].minor.yy0, TK_NOW); } yymsp[0].minor.yy326 = yylhsminor.yy326; break; - case 210: /* expr ::= VARIABLE */ + case 213: /* expr ::= VARIABLE */ { yylhsminor.yy326 = tSqlExprIdValueCreate(&yymsp[0].minor.yy0, TK_VARIABLE);} yymsp[0].minor.yy326 = yylhsminor.yy326; break; - case 211: /* expr ::= BOOL */ + case 214: /* expr ::= BOOL */ { yylhsminor.yy326 = tSqlExprIdValueCreate(&yymsp[0].minor.yy0, TK_BOOL);} yymsp[0].minor.yy326 = yylhsminor.yy326; break; - case 212: /* expr ::= ID LP exprlist RP */ + case 215: /* expr ::= ID LP exprlist RP */ { yylhsminor.yy326 = tSqlExprCreateFunction(yymsp[-1].minor.yy522, &yymsp[-3].minor.yy0, &yymsp[0].minor.yy0, yymsp[-3].minor.yy0.type); } yymsp[-3].minor.yy326 = yylhsminor.yy326; break; - case 213: /* expr ::= ID LP STAR RP */ + case 216: /* expr ::= ID LP STAR RP */ { yylhsminor.yy326 = tSqlExprCreateFunction(NULL, &yymsp[-3].minor.yy0, &yymsp[0].minor.yy0, yymsp[-3].minor.yy0.type); } yymsp[-3].minor.yy326 = yylhsminor.yy326; break; - case 214: /* expr ::= expr IS NULL */ + case 217: /* expr ::= expr IS NULL */ {yylhsminor.yy326 = tSqlExprCreate(yymsp[-2].minor.yy326, NULL, TK_ISNULL);} yymsp[-2].minor.yy326 = yylhsminor.yy326; break; - case 215: /* expr ::= expr IS NOT NULL */ + case 218: /* expr ::= expr IS NOT NULL */ {yylhsminor.yy326 = tSqlExprCreate(yymsp[-3].minor.yy326, NULL, TK_NOTNULL);} yymsp[-3].minor.yy326 = yylhsminor.yy326; break; - case 216: /* expr ::= expr LT expr */ + case 219: /* expr ::= expr LT expr */ {yylhsminor.yy326 = tSqlExprCreate(yymsp[-2].minor.yy326, yymsp[0].minor.yy326, TK_LT);} yymsp[-2].minor.yy326 = yylhsminor.yy326; break; - case 217: /* expr ::= expr GT expr */ + case 220: /* expr ::= expr GT expr */ {yylhsminor.yy326 = tSqlExprCreate(yymsp[-2].minor.yy326, yymsp[0].minor.yy326, TK_GT);} yymsp[-2].minor.yy326 = yylhsminor.yy326; break; - case 218: /* expr ::= expr LE expr */ + case 221: /* expr ::= expr LE expr */ {yylhsminor.yy326 = tSqlExprCreate(yymsp[-2].minor.yy326, yymsp[0].minor.yy326, TK_LE);} yymsp[-2].minor.yy326 = yylhsminor.yy326; break; - case 219: /* expr ::= expr GE expr */ + case 222: /* expr ::= expr GE expr */ {yylhsminor.yy326 = tSqlExprCreate(yymsp[-2].minor.yy326, yymsp[0].minor.yy326, TK_GE);} yymsp[-2].minor.yy326 = yylhsminor.yy326; break; - case 220: /* expr ::= expr NE expr */ + case 223: /* expr ::= expr NE expr */ {yylhsminor.yy326 = tSqlExprCreate(yymsp[-2].minor.yy326, yymsp[0].minor.yy326, TK_NE);} yymsp[-2].minor.yy326 = yylhsminor.yy326; break; - case 221: /* expr ::= expr EQ expr */ + case 224: /* expr ::= expr EQ expr */ {yylhsminor.yy326 = tSqlExprCreate(yymsp[-2].minor.yy326, yymsp[0].minor.yy326, TK_EQ);} yymsp[-2].minor.yy326 = yylhsminor.yy326; break; - case 222: /* expr ::= expr AND expr */ + case 225: /* expr ::= expr BETWEEN expr AND expr */ +{ tSQLExpr* X2 = tSqlExprClone(yymsp[-4].minor.yy326); yylhsminor.yy326 = tSqlExprCreate(tSqlExprCreate(yymsp[-4].minor.yy326, yymsp[-2].minor.yy326, TK_GE), tSqlExprCreate(X2, yymsp[0].minor.yy326, TK_LE), TK_AND);} + yymsp[-4].minor.yy326 = yylhsminor.yy326; + break; + case 226: /* expr ::= expr AND expr */ {yylhsminor.yy326 = tSqlExprCreate(yymsp[-2].minor.yy326, yymsp[0].minor.yy326, TK_AND);} yymsp[-2].minor.yy326 = yylhsminor.yy326; break; - case 223: /* expr ::= expr OR expr */ + case 227: /* expr ::= expr OR expr */ {yylhsminor.yy326 = tSqlExprCreate(yymsp[-2].minor.yy326, yymsp[0].minor.yy326, TK_OR); } yymsp[-2].minor.yy326 = yylhsminor.yy326; break; - case 224: /* expr ::= expr PLUS expr */ + case 228: /* expr ::= expr PLUS expr */ {yylhsminor.yy326 = tSqlExprCreate(yymsp[-2].minor.yy326, yymsp[0].minor.yy326, TK_PLUS); } yymsp[-2].minor.yy326 = yylhsminor.yy326; break; - case 225: /* expr ::= expr MINUS expr */ + case 229: /* expr ::= expr MINUS expr */ {yylhsminor.yy326 = tSqlExprCreate(yymsp[-2].minor.yy326, yymsp[0].minor.yy326, TK_MINUS); } yymsp[-2].minor.yy326 = yylhsminor.yy326; break; - case 226: /* expr ::= expr STAR expr */ + case 230: /* expr ::= expr STAR expr */ {yylhsminor.yy326 = tSqlExprCreate(yymsp[-2].minor.yy326, yymsp[0].minor.yy326, TK_STAR); } yymsp[-2].minor.yy326 = yylhsminor.yy326; break; - case 227: /* expr ::= expr SLASH expr */ + case 231: /* expr ::= expr SLASH expr */ {yylhsminor.yy326 = tSqlExprCreate(yymsp[-2].minor.yy326, yymsp[0].minor.yy326, TK_DIVIDE);} yymsp[-2].minor.yy326 = yylhsminor.yy326; break; - case 228: /* expr ::= expr REM expr */ + case 232: /* expr ::= expr REM expr */ {yylhsminor.yy326 = tSqlExprCreate(yymsp[-2].minor.yy326, yymsp[0].minor.yy326, TK_REM); } yymsp[-2].minor.yy326 = yylhsminor.yy326; break; - case 229: /* expr ::= expr LIKE expr */ + case 233: /* expr ::= expr LIKE expr */ {yylhsminor.yy326 = tSqlExprCreate(yymsp[-2].minor.yy326, yymsp[0].minor.yy326, TK_LIKE); } yymsp[-2].minor.yy326 = yylhsminor.yy326; break; - case 230: /* expr ::= expr IN LP exprlist RP */ + case 234: /* expr ::= expr IN LP exprlist RP */ {yylhsminor.yy326 = tSqlExprCreate(yymsp[-4].minor.yy326, (tSQLExpr*)yymsp[-1].minor.yy522, TK_IN); } yymsp[-4].minor.yy326 = yylhsminor.yy326; break; - case 231: /* exprlist ::= exprlist COMMA expritem */ + case 235: /* exprlist ::= exprlist COMMA expritem */ {yylhsminor.yy522 = tSqlExprListAppend(yymsp[-2].minor.yy522,yymsp[0].minor.yy326,0, 0);} yymsp[-2].minor.yy522 = yylhsminor.yy522; break; - case 232: /* exprlist ::= expritem */ + case 236: /* exprlist ::= expritem */ {yylhsminor.yy522 = tSqlExprListAppend(0,yymsp[0].minor.yy326,0, 0);} yymsp[0].minor.yy522 = yylhsminor.yy522; break; - case 233: /* expritem ::= expr */ + case 237: /* expritem ::= expr */ {yylhsminor.yy326 = yymsp[0].minor.yy326;} yymsp[0].minor.yy326 = yylhsminor.yy326; break; - case 235: /* cmd ::= RESET QUERY CACHE */ + case 239: /* cmd ::= RESET QUERY CACHE */ { setDCLSQLElems(pInfo, TSDB_SQL_RESET_CACHE, 0);} break; - case 236: /* cmd ::= ALTER TABLE ids cpxName ADD COLUMN columnlist */ + case 240: /* cmd ::= ALTER TABLE ids cpxName ADD COLUMN columnlist */ { yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n; SAlterTableInfo* pAlterTable = tAlterTableSqlElems(&yymsp[-4].minor.yy0, yymsp[0].minor.yy247, NULL, TSDB_ALTER_TABLE_ADD_COLUMN, -1); setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } break; - case 237: /* cmd ::= ALTER TABLE ids cpxName DROP COLUMN ids */ + case 241: /* cmd ::= ALTER TABLE ids cpxName DROP COLUMN ids */ { yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n; @@ -2930,14 +3251,14 @@ static void yy_reduce( setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } break; - case 238: /* cmd ::= ALTER TABLE ids cpxName ADD TAG columnlist */ + case 242: /* cmd ::= ALTER TABLE ids cpxName ADD TAG columnlist */ { yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n; SAlterTableInfo* pAlterTable = tAlterTableSqlElems(&yymsp[-4].minor.yy0, yymsp[0].minor.yy247, NULL, TSDB_ALTER_TABLE_ADD_TAG_COLUMN, -1); setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } break; - case 239: /* cmd ::= ALTER TABLE ids cpxName DROP TAG ids */ + case 243: /* cmd ::= ALTER TABLE ids cpxName DROP TAG ids */ { yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n; @@ -2948,7 +3269,7 @@ static void yy_reduce( setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } break; - case 240: /* cmd ::= ALTER TABLE ids cpxName CHANGE TAG ids ids */ + case 244: /* cmd ::= ALTER TABLE ids cpxName CHANGE TAG ids ids */ { yymsp[-5].minor.yy0.n += yymsp[-4].minor.yy0.n; @@ -2962,7 +3283,7 @@ static void yy_reduce( setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } break; - case 241: /* cmd ::= ALTER TABLE ids cpxName SET TAG ids EQ tagitem */ + case 245: /* cmd ::= ALTER TABLE ids cpxName SET TAG ids EQ tagitem */ { yymsp[-6].minor.yy0.n += yymsp[-5].minor.yy0.n; @@ -2974,14 +3295,14 @@ static void yy_reduce( setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } break; - case 242: /* cmd ::= ALTER STABLE ids cpxName ADD COLUMN columnlist */ + case 246: /* cmd ::= ALTER STABLE ids cpxName ADD COLUMN columnlist */ { yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n; SAlterTableInfo* pAlterTable = tAlterTableSqlElems(&yymsp[-4].minor.yy0, yymsp[0].minor.yy247, NULL, TSDB_ALTER_TABLE_ADD_COLUMN, TSDB_SUPER_TABLE); setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } break; - case 243: /* cmd ::= ALTER STABLE ids cpxName DROP COLUMN ids */ + case 247: /* cmd ::= ALTER STABLE ids cpxName DROP COLUMN ids */ { yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n; @@ -2992,14 +3313,14 @@ static void yy_reduce( setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } break; - case 244: /* cmd ::= ALTER STABLE ids cpxName ADD TAG columnlist */ + case 248: /* cmd ::= ALTER STABLE ids cpxName ADD TAG columnlist */ { yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n; SAlterTableInfo* pAlterTable = tAlterTableSqlElems(&yymsp[-4].minor.yy0, yymsp[0].minor.yy247, NULL, TSDB_ALTER_TABLE_ADD_TAG_COLUMN, TSDB_SUPER_TABLE); setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } break; - case 245: /* cmd ::= ALTER STABLE ids cpxName DROP TAG ids */ + case 249: /* cmd ::= ALTER STABLE ids cpxName DROP TAG ids */ { yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n; @@ -3010,7 +3331,7 @@ static void yy_reduce( setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } break; - case 246: /* cmd ::= ALTER STABLE ids cpxName CHANGE TAG ids ids */ + case 250: /* cmd ::= ALTER STABLE ids cpxName CHANGE TAG ids ids */ { yymsp[-5].minor.yy0.n += yymsp[-4].minor.yy0.n; @@ -3024,22 +3345,22 @@ static void yy_reduce( setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } break; - case 247: /* cmd ::= KILL CONNECTION INTEGER */ + case 251: /* cmd ::= KILL CONNECTION INTEGER */ {setKillSql(pInfo, TSDB_SQL_KILL_CONNECTION, &yymsp[0].minor.yy0);} break; - case 248: /* cmd ::= KILL STREAM INTEGER COLON INTEGER */ + case 252: /* cmd ::= KILL STREAM INTEGER COLON INTEGER */ {yymsp[-2].minor.yy0.n += (yymsp[-1].minor.yy0.n + yymsp[0].minor.yy0.n); setKillSql(pInfo, TSDB_SQL_KILL_STREAM, &yymsp[-2].minor.yy0);} break; - case 249: /* cmd ::= KILL QUERY INTEGER COLON INTEGER */ + case 253: /* cmd ::= KILL QUERY INTEGER COLON INTEGER */ {yymsp[-2].minor.yy0.n += (yymsp[-1].minor.yy0.n + yymsp[0].minor.yy0.n); setKillSql(pInfo, TSDB_SQL_KILL_QUERY, &yymsp[-2].minor.yy0);} break; default: break; /********** End reduce actions ************************************************/ }; - assert( yyrulenostateno = (YYACTIONTYPE)yyact; yymsp->major = (YYCODETYPE)yygoto; yyTraceShift(yypParser, yyact, "... then shift"); + return yyact; } /* @@ -3063,7 +3385,8 @@ static void yy_reduce( static void yy_parse_failed( yyParser *yypParser /* The parser */ ){ - ParseARG_FETCH; + ParseARG_FETCH + ParseCTX_FETCH #ifndef NDEBUG if( yyTraceFILE ){ fprintf(yyTraceFILE,"%sFail!\n",yyTracePrompt); @@ -3074,7 +3397,8 @@ static void yy_parse_failed( ** parser fails */ /************ Begin %parse_failure code ***************************************/ /************ End %parse_failure code *****************************************/ - ParseARG_STORE; /* Suppress warning about unused %extra_argument variable */ + ParseARG_STORE /* Suppress warning about unused %extra_argument variable */ + ParseCTX_STORE } #endif /* YYNOERRORRECOVERY */ @@ -3086,7 +3410,8 @@ static void yy_syntax_error( int yymajor, /* The major type of the error token */ ParseTOKENTYPE yyminor /* The minor type of the error token */ ){ - ParseARG_FETCH; + ParseARG_FETCH + ParseCTX_FETCH #define TOKEN yyminor /************ Begin %syntax_error code ****************************************/ @@ -3112,7 +3437,8 @@ static void yy_syntax_error( assert(len <= outputBufLen); /************ End %syntax_error code ******************************************/ - ParseARG_STORE; /* Suppress warning about unused %extra_argument variable */ + ParseARG_STORE /* Suppress warning about unused %extra_argument variable */ + ParseCTX_STORE } /* @@ -3121,7 +3447,8 @@ static void yy_syntax_error( static void yy_accept( yyParser *yypParser /* The parser */ ){ - ParseARG_FETCH; + ParseARG_FETCH + ParseCTX_FETCH #ifndef NDEBUG if( yyTraceFILE ){ fprintf(yyTraceFILE,"%sAccept!\n",yyTracePrompt); @@ -3136,7 +3463,8 @@ static void yy_accept( /*********** Begin %parse_accept code *****************************************/ /*********** End %parse_accept code *******************************************/ - ParseARG_STORE; /* Suppress warning about unused %extra_argument variable */ + ParseARG_STORE /* Suppress warning about unused %extra_argument variable */ + ParseCTX_STORE } /* The main parser program. @@ -3165,45 +3493,47 @@ void Parse( ParseARG_PDECL /* Optional %extra_argument parameter */ ){ YYMINORTYPE yyminorunion; - unsigned int yyact; /* The parser action. */ + YYACTIONTYPE yyact; /* The parser action. */ #if !defined(YYERRORSYMBOL) && !defined(YYNOERRORRECOVERY) int yyendofinput; /* True if we are at the end of input */ #endif #ifdef YYERRORSYMBOL int yyerrorhit = 0; /* True if yymajor has invoked an error */ #endif - yyParser *yypParser; /* The parser */ + yyParser *yypParser = (yyParser*)yyp; /* The parser */ + ParseCTX_FETCH + ParseARG_STORE - yypParser = (yyParser*)yyp; assert( yypParser->yytos!=0 ); #if !defined(YYERRORSYMBOL) && !defined(YYNOERRORRECOVERY) yyendofinput = (yymajor==0); #endif - ParseARG_STORE; + yyact = yypParser->yytos->stateno; #ifndef NDEBUG if( yyTraceFILE ){ - int stateno = yypParser->yytos->stateno; - if( stateno < YY_MIN_REDUCE ){ + if( yyact < YY_MIN_REDUCE ){ fprintf(yyTraceFILE,"%sInput '%s' in state %d\n", - yyTracePrompt,yyTokenName[yymajor],stateno); + yyTracePrompt,yyTokenName[yymajor],yyact); }else{ fprintf(yyTraceFILE,"%sInput '%s' with pending reduce %d\n", - yyTracePrompt,yyTokenName[yymajor],stateno-YY_MIN_REDUCE); + yyTracePrompt,yyTokenName[yymajor],yyact-YY_MIN_REDUCE); } } #endif do{ - yyact = yy_find_shift_action(yypParser,(YYCODETYPE)yymajor); + assert( yyact==yypParser->yytos->stateno ); + yyact = yy_find_shift_action((YYCODETYPE)yymajor,yyact); if( yyact >= YY_MIN_REDUCE ){ - yy_reduce(yypParser,yyact-YY_MIN_REDUCE,yymajor,yyminor); + yyact = yy_reduce(yypParser,yyact-YY_MIN_REDUCE,yymajor, + yyminor ParseCTX_PARAM); }else if( yyact <= YY_MAX_SHIFTREDUCE ){ - yy_shift(yypParser,yyact,yymajor,yyminor); + yy_shift(yypParser,yyact,(YYCODETYPE)yymajor,yyminor); #ifndef YYNOERRORRECOVERY yypParser->yyerrcnt--; #endif - yymajor = YYNOCODE; + break; }else if( yyact==YY_ACCEPT_ACTION ){ yypParser->yytos--; yy_accept(yypParser); @@ -3254,10 +3584,9 @@ void Parse( yymajor = YYNOCODE; }else{ while( yypParser->yytos >= yypParser->yystack - && yymx != YYERRORSYMBOL && (yyact = yy_find_reduce_action( yypParser->yytos->stateno, - YYERRORSYMBOL)) >= YY_MIN_REDUCE + YYERRORSYMBOL)) > YY_MAX_SHIFTREDUCE ){ yy_pop_parser_stack(yypParser); } @@ -3274,6 +3603,8 @@ void Parse( } yypParser->yyerrcnt = 3; yyerrorhit = 1; + if( yymajor==YYNOCODE ) break; + yyact = yypParser->yytos->stateno; #elif defined(YYNOERRORRECOVERY) /* If the YYNOERRORRECOVERY macro is defined, then do not attempt to ** do any kind of error recovery. Instead, simply invoke the syntax @@ -3284,8 +3615,7 @@ void Parse( */ yy_syntax_error(yypParser,yymajor, yyminor); yy_destructor(yypParser,(YYCODETYPE)yymajor,&yyminorunion); - yymajor = YYNOCODE; - + break; #else /* YYERRORSYMBOL is not defined */ /* This is what we do if the grammar does not define ERROR: ** @@ -3307,10 +3637,10 @@ void Parse( yypParser->yyerrcnt = -1; #endif } - yymajor = YYNOCODE; + break; #endif } - }while( yymajor!=YYNOCODE && yypParser->yytos>yypParser->yystack ); + }while( yypParser->yytos>yypParser->yystack ); #ifndef NDEBUG if( yyTraceFILE ){ yyStackEntry *i; @@ -3325,3 +3655,17 @@ void Parse( #endif return; } + +/* +** Return the fallback token corresponding to canonical token iToken, or +** 0 if iToken has no fallback. +*/ +int ParseFallback(int iToken){ +#ifdef YYFALLBACK + assert( iToken<(int)(sizeof(yyFallback)/sizeof(yyFallback[0])) ); + return yyFallback[iToken]; +#else + (void)iToken; + return 0; +#endif +} diff --git a/src/query/tests/CMakeLists.txt b/src/query/tests/CMakeLists.txt index c3798b869eb6c02008043346d70b52592239cad0..1856223391ae719ef98492160da9810826b983a9 100644 --- a/src/query/tests/CMakeLists.txt +++ b/src/query/tests/CMakeLists.txt @@ -1,4 +1,4 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 3.5) +CMAKE_MINIMUM_REQUIRED(VERSION 2.8) PROJECT(TDengine) FIND_PATH(HEADER_GTEST_INCLUDE_DIR gtest.h /usr/include/gtest /usr/local/include/gtest) diff --git a/src/rpc/CMakeLists.txt b/src/rpc/CMakeLists.txt index 02a1e7c2d81d13194ca819c703657df5e1f3fb3b..f94b4aeb6d21277b6b845587cd35a2c98e0bc0b0 100644 --- a/src/rpc/CMakeLists.txt +++ b/src/rpc/CMakeLists.txt @@ -1,4 +1,4 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 3.5) +CMAKE_MINIMUM_REQUIRED(VERSION 2.8) PROJECT(TDengine) INCLUDE_DIRECTORIES(inc) diff --git a/src/rpc/test/CMakeLists.txt b/src/rpc/test/CMakeLists.txt index e9231058600ce2135f669dd42a6c52cca81424cd..c10cea6c9dd8c53ab8608c8a736795f2318059d8 100644 --- a/src/rpc/test/CMakeLists.txt +++ b/src/rpc/test/CMakeLists.txt @@ -1,4 +1,4 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 3.5) +CMAKE_MINIMUM_REQUIRED(VERSION 2.8) PROJECT(TDengine) INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/rpc/inc) diff --git a/src/sync/CMakeLists.txt b/src/sync/CMakeLists.txt index cc86bf704c24223bcc0ff90e3633efa8d065ac96..82d0bbf520843f5418d1004f2fe7c1be756b7b6f 100644 --- a/src/sync/CMakeLists.txt +++ b/src/sync/CMakeLists.txt @@ -1,4 +1,4 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 3.5) +CMAKE_MINIMUM_REQUIRED(VERSION 2.8) PROJECT(TDengine) INCLUDE_DIRECTORIES(inc) diff --git a/src/sync/src/syncRetrieve.c b/src/sync/src/syncRetrieve.c index ec4bbb33a5988656df9a665e67d60054336e7c7f..be4073760dfa551878301bc3bd4f2405b12b77cf 100644 --- a/src/sync/src/syncRetrieve.c +++ b/src/sync/src/syncRetrieve.c @@ -170,14 +170,14 @@ static int32_t syncReadOneWalRecord(int32_t sfd, SWalHead *pHead) { return sizeof(SWalHead) + pHead->len; } -static int32_t syncRetrieveLastWal(SSyncPeer *pPeer, char *name, uint64_t fversion, int64_t offset) { +static int64_t syncRetrieveLastWal(SSyncPeer *pPeer, char *name, uint64_t fversion, int64_t offset) { int32_t sfd = open(name, O_RDONLY | O_BINARY); if (sfd < 0) { sError("%s, failed to open wal:%s for retrieve since:%s", pPeer->id, name, tstrerror(errno)); return -1; } - int32_t code = (int32_t)taosLSeek(sfd, offset, SEEK_SET); + int64_t code = taosLSeek(sfd, offset, SEEK_SET); if (code < 0) { sError("%s, failed to seek %" PRId64 " in wal:%s for retrieve since:%s", pPeer->id, offset, name, tstrerror(errno)); close(sfd); @@ -187,7 +187,7 @@ static int32_t syncRetrieveLastWal(SSyncPeer *pPeer, char *name, uint64_t fversi sDebug("%s, retrieve last wal:%s, offset:%" PRId64 " fver:%" PRIu64, pPeer->id, name, offset, fversion); SWalHead *pHead = malloc(SYNC_MAX_SIZE); - int32_t bytes = 0; + int64_t bytes = 0; while (1) { code = syncReadOneWalRecord(sfd, pHead); @@ -198,13 +198,13 @@ static int32_t syncRetrieveLastWal(SSyncPeer *pPeer, char *name, uint64_t fversi if (code == 0) { code = bytes; - sDebug("%s, read to the end of wal, bytes:%d", pPeer->id, bytes); + sDebug("%s, read to the end of wal, bytes:%" PRId64, pPeer->id, bytes); break; } - sDebug("%s, last wal is forwarded, hver:%" PRIu64, pPeer->id, pHead->version); + sTrace("%s, last wal is forwarded, hver:%" PRIu64, pPeer->id, pHead->version); - int32_t wsize = code; + int32_t wsize = (int32_t)code; int32_t ret = taosWriteMsg(pPeer->syncFd, pHead, wsize); if (ret != wsize) { code = -1; @@ -228,7 +228,7 @@ static int32_t syncRetrieveLastWal(SSyncPeer *pPeer, char *name, uint64_t fversi return code; } -static int32_t syncProcessLastWal(SSyncPeer *pPeer, char *wname, int64_t index) { +static int64_t syncProcessLastWal(SSyncPeer *pPeer, char *wname, int64_t index) { SSyncNode *pNode = pPeer->pSyncNode; int32_t once = 0; // last WAL has once ever been processed int64_t offset = 0; @@ -243,9 +243,9 @@ static int32_t syncProcessLastWal(SSyncPeer *pPeer, char *wname, int64_t index) if (syncAreFilesModified(pNode, pPeer)) return -1; if (syncGetWalVersion(pNode, pPeer) < 0) return -1; - int32_t bytes = syncRetrieveLastWal(pPeer, fname, fversion, offset); + int64_t bytes = syncRetrieveLastWal(pPeer, fname, fversion, offset); if (bytes < 0) { - sDebug("%s, failed to retrieve last wal", pPeer->id); + sDebug("%s, failed to retrieve last wal, bytes:%" PRId64, pPeer->id, bytes); return bytes; } @@ -263,7 +263,7 @@ static int32_t syncProcessLastWal(SSyncPeer *pPeer, char *wname, int64_t index) // if all data up to fversion is read out, it is over if (pPeer->sversion >= fversion && fversion > 0) { - sDebug("%s, data up to fver:%" PRIu64 " has been read out, bytes:%d sver:%" PRIu64, pPeer->id, fversion, bytes, + sDebug("%s, data up to fver:%" PRIu64 " has been read out, bytes:%" PRId64 " sver:%" PRIu64, pPeer->id, fversion, bytes, pPeer->sversion); return 0; } @@ -277,19 +277,19 @@ static int32_t syncProcessLastWal(SSyncPeer *pPeer, char *wname, int64_t index) // if bytes > 0, file is updated, or fversion is not reached but file still open, read again once = 1; offset += bytes; - sDebug("%s, continue retrieve last wal, bytes:%d offset:%" PRId64 " sver:%" PRIu64 " fver:%" PRIu64, pPeer->id, + sDebug("%s, continue retrieve last wal, bytes:%" PRId64 " offset:%" PRId64 " sver:%" PRIu64 " fver:%" PRIu64, pPeer->id, bytes, offset, pPeer->sversion, fversion); } return -1; } -static int32_t syncRetrieveWal(SSyncPeer *pPeer) { +static int64_t syncRetrieveWal(SSyncPeer *pPeer) { SSyncNode * pNode = pPeer->pSyncNode; char fname[TSDB_FILENAME_LEN * 3]; char wname[TSDB_FILENAME_LEN * 2]; int32_t size; - int32_t code = -1; + int64_t code = -1; int64_t index = 0; while (1) { @@ -297,7 +297,7 @@ static int32_t syncRetrieveWal(SSyncPeer *pPeer) { wname[0] = 0; code = (*pNode->getWalInfoFp)(pNode->vgId, wname, &index); if (code < 0) { - sError("%s, failed to get wal info since:%s, code:0x%x", pPeer->id, strerror(errno), code); + sError("%s, failed to get wal info since:%s, code:0x%" PRIx64, pPeer->id, strerror(errno), code); break; } @@ -309,6 +309,7 @@ static int32_t syncRetrieveWal(SSyncPeer *pPeer) { if (code == 0) { // last wal code = syncProcessLastWal(pPeer, wname, index); + sInfo("%s, last wal processed, code:%" PRId64, pPeer->id, code); break; } @@ -319,7 +320,7 @@ static int32_t syncRetrieveWal(SSyncPeer *pPeer) { struct stat fstat; if (stat(fname, &fstat) < 0) { code = -1; - sDebug("%s, failed to stat wal:%s for retrieve since %s, code:0x%x", pPeer->id, fname, strerror(errno), code); + sDebug("%s, failed to stat wal:%s for retrieve since %s, code:0x%" PRIx64, pPeer->id, fname, strerror(errno), code); break; } @@ -329,14 +330,14 @@ static int32_t syncRetrieveWal(SSyncPeer *pPeer) { int32_t sfd = open(fname, O_RDONLY | O_BINARY); if (sfd < 0) { code = -1; - sError("%s, failed to open wal:%s for retrieve since %s, code:0x%x", pPeer->id, fname, strerror(errno), code); + sError("%s, failed to open wal:%s for retrieve since %s, code:0x%" PRIx64, pPeer->id, fname, strerror(errno), code); break; } code = (int32_t)taosSendFile(pPeer->syncFd, sfd, NULL, size); close(sfd); if (code < 0) { - sError("%s, failed to send wal:%s for retrieve since %s, code:0x%x", pPeer->id, fname, strerror(errno), code); + sError("%s, failed to send wal:%s for retrieve since %s, code:0x%" PRIx64, pPeer->id, fname, strerror(errno), code); break; } @@ -357,7 +358,7 @@ static int32_t syncRetrieveWal(SSyncPeer *pPeer) { code = -1; } } else { - sError("%s, failed to send wal since %s, code:0x%x", pPeer->id, strerror(errno), code); + sError("%s, failed to send wal since %s, code:0x%" PRIx64, pPeer->id, strerror(errno), code); } return code; @@ -404,9 +405,9 @@ static int32_t syncRetrieveDataStepByStep(SSyncPeer *pPeer) { if (pPeer->sversion == 0) pPeer->sversion = 1; sInfo("%s, start to retrieve wals", pPeer->id); - int32_t code = syncRetrieveWal(pPeer); - if (code != 0) { - sError("%s, failed to retrieve wals, code:0x%x", pPeer->id, code); + int64_t code = syncRetrieveWal(pPeer); + if (code < 0) { + sError("%s, failed to retrieve wals, code:0x%" PRIx64, pPeer->id, code); return -1; } diff --git a/src/sync/test/CMakeLists.txt b/src/sync/test/CMakeLists.txt index ab2e6c307bd7f69f1faf2fc578b3dcf8d43c4c84..f2b05ab2263c0d80bc870981f86933922de639e4 100644 --- a/src/sync/test/CMakeLists.txt +++ b/src/sync/test/CMakeLists.txt @@ -1,4 +1,4 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 3.5) +CMAKE_MINIMUM_REQUIRED(VERSION 2.8) PROJECT(TDengine) IF (TD_LINUX) diff --git a/src/tfs/src/tdisk.c b/src/tfs/src/tdisk.c index 7cdaf7fd099db85c988077296ade2da7440be8a1..37798d3a886b443a20703747ad9a99e26c1502b7 100644 --- a/src/tfs/src/tdisk.c +++ b/src/tfs/src/tdisk.c @@ -52,7 +52,7 @@ int tfsUpdateDiskInfo(SDisk *pDisk) { } pDisk->dmeta.size = diskSize.tsize; - pDisk->dmeta.free = diskSize.tsize - diskSize.avail; + pDisk->dmeta.free = diskSize.avail; return code; -} \ No newline at end of file +} diff --git a/src/tsdb/CMakeLists.txt b/src/tsdb/CMakeLists.txt index 21e8e8379586c4258fd65ddb74f5154bfc415d15..31d52aae7d4a809044ab01a7b561801d1ad0c2eb 100644 --- a/src/tsdb/CMakeLists.txt +++ b/src/tsdb/CMakeLists.txt @@ -1,4 +1,4 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 3.5) +CMAKE_MINIMUM_REQUIRED(VERSION 2.8) PROJECT(TDengine) INCLUDE_DIRECTORIES(inc) diff --git a/src/tsdb/src/tsdbFS.c b/src/tsdb/src/tsdbFS.c index 28d7abff3e1397ba56e8b0e92b400573870d5dac..cbff4fbeaa6907c83b5836d6039cbfc23a62168b 100644 --- a/src/tsdb/src/tsdbFS.c +++ b/src/tsdb/src/tsdbFS.c @@ -227,6 +227,7 @@ void *tsdbFreeFS(STsdbFS *pfs) { pfs->metaCache = NULL; pfs->cstatus = tsdbFreeFSStatus(pfs->cstatus); pthread_rwlock_destroy(&(pfs->lock)); + free(pfs); } return NULL; diff --git a/src/tsdb/src/tsdbMeta.c b/src/tsdb/src/tsdbMeta.c index 9b407dae484a5540fa4131e61428613c256f1cdf..5e2e0fce1d45dc8ffceb8c92a2475df5b4da0ad3 100644 --- a/src/tsdb/src/tsdbMeta.c +++ b/src/tsdb/src/tsdbMeta.c @@ -50,7 +50,8 @@ int tsdbCreateTable(STsdbRepo *repo, STableCfg *pCfg) { STsdbMeta *pMeta = pRepo->tsdbMeta; STable * super = NULL; STable * table = NULL; - int newSuper = 0; + bool newSuper = false; + bool superChanged = false; int tid = pCfg->tableId.tid; STable * pTable = NULL; @@ -85,7 +86,7 @@ int tsdbCreateTable(STsdbRepo *repo, STableCfg *pCfg) { if (pCfg->type == TSDB_CHILD_TABLE) { super = tsdbGetTableByUid(pMeta, pCfg->superUid); if (super == NULL) { // super table not exists, try to create it - newSuper = 1; + newSuper = true; super = tsdbCreateTableFromCfg(pCfg, true); if (super == NULL) goto _err; } else { @@ -93,6 +94,17 @@ int tsdbCreateTable(STsdbRepo *repo, STableCfg *pCfg) { terrno = TSDB_CODE_TDB_IVD_CREATE_TABLE_INFO; goto _err; } + + if (schemaVersion(pCfg->tagSchema) > schemaVersion(super->tagSchema)) { + // tag schema out of date, need to update super table tag version + STSchema *pOldSchema = super->tagSchema; + TSDB_WLOCK_TABLE(super); + super->tagSchema = tdDupSchema(pCfg->tagSchema); + TSDB_WUNLOCK_TABLE(super); + tdFreeSchema(pOldSchema); + + superChanged = true; + } } } @@ -117,7 +129,7 @@ int tsdbCreateTable(STsdbRepo *repo, STableCfg *pCfg) { // TODO: refactor duplicate codes int tlen = 0; void *pBuf = NULL; - if (newSuper) { + if (newSuper || superChanged) { tlen = tsdbGetTableEncodeSize(TSDB_UPDATE_META, super); pBuf = tsdbAllocBytes(pRepo, tlen); if (pBuf == NULL) goto _err; @@ -562,12 +574,13 @@ void tsdbRefTable(STable *pTable) { } void tsdbUnRefTable(STable *pTable) { - int32_t ref = T_REF_DEC(pTable); - tsdbDebug("unref table %s uid:%"PRIu64" tid:%d, refCount:%d", TABLE_CHAR_NAME(pTable), TABLE_UID(pTable), TABLE_TID(pTable), ref); + uint64_t uid = TABLE_UID(pTable); + int32_t tid = TABLE_TID(pTable); + int32_t ref = T_REF_DEC(pTable); - if (ref == 0) { - // tsdbDebug("destory table name:%s uid:%"PRIu64", tid:%d", TABLE_CHAR_NAME(pTable), TABLE_UID(pTable), TABLE_TID(pTable)); + tsdbDebug("unref table, uid:%" PRIu64 " tid:%d, refCount:%d", uid, tid, ref); + if (ref == 0) { if (TABLE_TYPE(pTable) == TSDB_CHILD_TABLE) { tsdbUnRefTable(pTable->pSuper); } diff --git a/src/util/CMakeLists.txt b/src/util/CMakeLists.txt index 80e874ad92cebc267460c1e59e494fa52f004ced..a7f4f59e07021b659707c59a0c9b2ef916558d52 100644 --- a/src/util/CMakeLists.txt +++ b/src/util/CMakeLists.txt @@ -1,4 +1,4 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 3.5) +CMAKE_MINIMUM_REQUIRED(VERSION 2.8) PROJECT(TDengine) INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/rpc/inc) diff --git a/src/util/inc/tarray.h b/src/util/inc/tarray.h index 9c3fa70b35467158094fe465dffac17f7785babf..63e62a54c2f679920100b7137cb49377da90789f 100644 --- a/src/util/inc/tarray.h +++ b/src/util/inc/tarray.h @@ -125,7 +125,7 @@ void taosArrayRemove(SArray* pArray, size_t index); * @param pDst * @param pSrc */ -void taosArrayCopy(SArray* pDst, const SArray* pSrc); +SArray* taosArrayFromList(const void* src, size_t size, size_t elemSize); /** * clone a new array diff --git a/src/util/src/tarray.c b/src/util/src/tarray.c index 2752782376fb4fa13d788af051b19616974a3bb5..4dde5dbba24adfcda0fe794f2f36d6c059354f55 100644 --- a/src/util/src/tarray.c +++ b/src/util/src/tarray.c @@ -156,23 +156,14 @@ void taosArrayRemove(SArray* pArray, size_t index) { pArray->size -= 1; } -void taosArrayCopy(SArray* pDst, const SArray* pSrc) { - assert(pSrc != NULL && pDst != NULL); - - if (pDst->capacity < pSrc->size) { - void* pData = realloc(pDst->pData, pSrc->size * pSrc->elemSize); - if (pData == NULL) { // todo handle oom - - } else { - pDst->pData = pData; - pDst->capacity = pSrc->size; - } - } - - memcpy(pDst->pData, pSrc->pData, pSrc->elemSize * pSrc->size); - pDst->elemSize = pSrc->elemSize; - pDst->capacity = pSrc->size; - pDst->size = pSrc->size; +SArray* taosArrayFromList(const void* src, size_t size, size_t elemSize) { + assert(src != NULL && elemSize > 0); + SArray* pDst = taosArrayInit(size, elemSize); + + memcpy(pDst->pData, src, elemSize * size); + pDst->size = size; + + return pDst; } SArray* taosArrayDup(const SArray* pSrc) { diff --git a/src/util/src/tlog.c b/src/util/src/tlog.c index 7c5301130b58f3ad00bcc6321ca69f079ad9eb99..9bf69b46b5c30ffe87f2bf3039b0aeef384dc572 100644 --- a/src/util/src/tlog.c +++ b/src/util/src/tlog.c @@ -185,7 +185,9 @@ static void *taosThreadToOpenNewFile(void *param) { int32_t fd = open(name, O_WRONLY | O_CREAT | O_TRUNC, S_IRWXU | S_IRWXG | S_IRWXO); if (fd < 0) { - uError("open new log file fail! fd:%d reason:%s", fd, strerror(errno)); + tsLogObj.openInProgress = 0; + tsLogObj.lines = tsLogObj.maxLines - 1000; + uError("open new log file fail! fd:%d reason:%s, reuse lastlog", fd, strerror(errno)); return NULL; } diff --git a/src/util/tests/CMakeLists.txt b/src/util/tests/CMakeLists.txt index 6066d58416e8a5ad905e3b0706f03adc013fa445..0c96ed2a2f3dfb7f03268c9f8fbb1b0afa2397b9 100644 --- a/src/util/tests/CMakeLists.txt +++ b/src/util/tests/CMakeLists.txt @@ -1,4 +1,4 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 3.5) +CMAKE_MINIMUM_REQUIRED(VERSION 2.8) PROJECT(TDengine) FIND_PATH(HEADER_GTEST_INCLUDE_DIR gtest.h /usr/include/gtest /usr/local/include/gtest) diff --git a/src/vnode/CMakeLists.txt b/src/vnode/CMakeLists.txt index 09c4213a024bfdaf397df39c5e164b6836951a41..3fefbea05ba763dfa856dd52c195d36ce70ccd91 100644 --- a/src/vnode/CMakeLists.txt +++ b/src/vnode/CMakeLists.txt @@ -1,4 +1,4 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 3.5) +CMAKE_MINIMUM_REQUIRED(VERSION 2.8) PROJECT(TDengine) INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/deps/cJson/inc) diff --git a/src/wal/CMakeLists.txt b/src/wal/CMakeLists.txt index 6f35cb9ba7e1fc111bd1c2a5ddd797abf334eac6..a89024dab5060b1f18174f769e0d70c00ad00faf 100644 --- a/src/wal/CMakeLists.txt +++ b/src/wal/CMakeLists.txt @@ -1,6 +1,8 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 3.5) +CMAKE_MINIMUM_REQUIRED(VERSION 2.8) PROJECT(TDengine) +ADD_DEFINITIONS(-DWAL_CHECKSUM_WHOLE) + INCLUDE_DIRECTORIES(inc) AUX_SOURCE_DIRECTORY(${CMAKE_CURRENT_SOURCE_DIR}/src SRC) diff --git a/src/wal/src/walWrite.c b/src/wal/src/walWrite.c index ea1eaa4feefd01ca9db764e1b27377233b6dfd2b..aeb49830299eb0dcddfbd39a7a838fdc5d45b081 100644 --- a/src/wal/src/walWrite.c +++ b/src/wal/src/walWrite.c @@ -111,6 +111,28 @@ void walRemoveAllOldFiles(void *handle) { pthread_mutex_unlock(&pWal->mutex); } +#if defined(WAL_CHECKSUM_WHOLE) + +static void walUpdateChecksum(SWalHead *pHead) { + pHead->sver = 1; + pHead->cksum = 0; + pHead->cksum = taosCalcChecksum(0, (uint8_t *)pHead, sizeof(*pHead) + pHead->len); +} + +static int walValidateChecksum(SWalHead *pHead) { + if (pHead->sver == 0) { // for compatible with wal before sver 1 + return taosCheckChecksumWhole((uint8_t *)pHead, sizeof(*pHead)); + } else if (pHead->sver == 1) { + uint32_t cksum = pHead->cksum; + pHead->cksum = 0; + return taosCheckChecksum((uint8_t *)pHead, sizeof(*pHead) + pHead->len, cksum); + } + + return 0; +} + +#endif + int32_t walWrite(void *handle, SWalHead *pHead) { if (handle == NULL) return -1; @@ -123,7 +145,13 @@ int32_t walWrite(void *handle, SWalHead *pHead) { if (pHead->version <= pWal->version) return 0; pHead->signature = WAL_SIGNATURE; +#if defined(WAL_CHECKSUM_WHOLE) + walUpdateChecksum(pHead); +#else + pHead->sver = 0; taosCalcChecksumAppend(0, (uint8_t *)pHead, sizeof(SWalHead)); +#endif + int32_t contLen = pHead->len + sizeof(SWalHead); pthread_mutex_lock(&pWal->mutex); @@ -246,16 +274,40 @@ static int32_t walSkipCorruptedRecord(SWal *pWal, SWalHead *pHead, int64_t tfd, continue; } +#if defined(WAL_CHECKSUM_WHOLE) + if (pHead->sver == 0 && walValidateChecksum(pHead)) { + wInfo("vgId:%d, wal head cksum check passed, offset:%" PRId64, pWal->vgId, pos); + *offset = pos; + return TSDB_CODE_SUCCESS; + } + + if (pHead->sver == 1) { + if (tfRead(tfd, pHead->cont, pHead->len) < pHead->len) { + wError("vgId:%d, read to end of corrupted wal file, offset:%" PRId64, pWal->vgId, pos); + return TSDB_CODE_WAL_FILE_CORRUPTED; + } + + if (walValidateChecksum(pHead)) { + wInfo("vgId:%d, wal whole cksum check passed, offset:%" PRId64, pWal->vgId, pos); + *offset = pos; + return TSDB_CODE_SUCCESS; + } + } + +#else if (taosCheckChecksumWhole((uint8_t *)pHead, sizeof(SWalHead))) { wInfo("vgId:%d, wal head cksum check passed, offset:%" PRId64, pWal->vgId, pos); *offset = pos; return TSDB_CODE_SUCCESS; } + +#endif } return TSDB_CODE_WAL_FILE_CORRUPTED; } + static int32_t walRestoreWalFile(SWal *pWal, void *pVnode, FWalWrite writeFp, char *name, int64_t fileId) { int32_t size = WAL_MAX_SIZE; void * buffer = tmalloc(size); @@ -293,6 +345,51 @@ static int32_t walRestoreWalFile(SWal *pWal, void *pVnode, FWalWrite writeFp, ch break; } +#if defined(WAL_CHECKSUM_WHOLE) + if (pHead->sver == 0 && !walValidateChecksum(pHead)) { + wError("vgId:%d, file:%s, wal head cksum is messed up, hver:%" PRIu64 " len:%d offset:%" PRId64, pWal->vgId, name, + pHead->version, pHead->len, offset); + code = walSkipCorruptedRecord(pWal, pHead, tfd, &offset); + if (code != TSDB_CODE_SUCCESS) { + walFtruncate(pWal, tfd, offset); + break; + } + } + + if (pHead->len < 0 || pHead->len > size - sizeof(SWalHead)) { + wError("vgId:%d, file:%s, wal head len out of range, hver:%" PRIu64 " len:%d offset:%" PRId64, pWal->vgId, name, + pHead->version, pHead->len, offset); + code = walSkipCorruptedRecord(pWal, pHead, tfd, &offset); + if (code != TSDB_CODE_SUCCESS) { + walFtruncate(pWal, tfd, offset); + break; + } + } + + ret = (int32_t)tfRead(tfd, pHead->cont, pHead->len); + if (ret < 0) { + wError("vgId:%d, file:%s, failed to read wal body since %s", pWal->vgId, name, strerror(errno)); + code = TAOS_SYSTEM_ERROR(errno); + break; + } + + if (ret < pHead->len) { + wError("vgId:%d, file:%s, failed to read wal body, ret:%d len:%d", pWal->vgId, name, ret, pHead->len); + offset += sizeof(SWalHead); + continue; + } + + if (pHead->sver == 1 && !walValidateChecksum(pHead)) { + wError("vgId:%d, file:%s, wal whole cksum is messed up, hver:%" PRIu64 " len:%d offset:%" PRId64, pWal->vgId, name, + pHead->version, pHead->len, offset); + code = walSkipCorruptedRecord(pWal, pHead, tfd, &offset); + if (code != TSDB_CODE_SUCCESS) { + walFtruncate(pWal, tfd, offset); + break; + } + } + +#else if (!taosCheckChecksumWhole((uint8_t *)pHead, sizeof(SWalHead))) { wError("vgId:%d, file:%s, wal head cksum is messed up, hver:%" PRIu64 " len:%d offset:%" PRId64, pWal->vgId, name, pHead->version, pHead->len, offset); @@ -326,6 +423,7 @@ static int32_t walRestoreWalFile(SWal *pWal, void *pVnode, FWalWrite writeFp, ch continue; } +#endif offset = offset + sizeof(SWalHead) + pHead->len; wTrace("vgId:%d, restore wal, fileId:%" PRId64 " hver:%" PRIu64 " wver:%" PRIu64 " len:%d", pWal->vgId, diff --git a/src/wal/test/CMakeLists.txt b/src/wal/test/CMakeLists.txt index aec0602ac00c6b943b4dd6c20c219d17223ee896..f20a57899e049115ded0012c0092bf643af76187 100644 --- a/src/wal/test/CMakeLists.txt +++ b/src/wal/test/CMakeLists.txt @@ -1,4 +1,4 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 3.5) +CMAKE_MINIMUM_REQUIRED(VERSION 2.8) PROJECT(TDengine) IF (TD_LINUX) diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index 57fc8b1953d155c6ab45c8adc9e4146d00fb0b39..4e7e9a87ea6810c362bd676cd9152f61bc08e29d 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -3,7 +3,7 @@ # generate release version: # mkdir release; cd release; cmake -DCMAKE_BUILD_TYPE=Release .. -CMAKE_MINIMUM_REQUIRED(VERSION 3.5) +CMAKE_MINIMUM_REQUIRED(VERSION 2.8) PROJECT(TDengine) SET(CMAKE_C_STANDARD 11) diff --git a/tests/Jenkinsfile b/tests/Jenkinsfile index f5b0cf1478266cc58624c5e6c89cccfb2063b4f6..aa1815fc634e423f69169415a666d4181fdeb1fc 100644 --- a/tests/Jenkinsfile +++ b/tests/Jenkinsfile @@ -109,6 +109,13 @@ pipeline { java --class-path=../../../../src/connector/jdbc/target:$JAVA_HOME/jre/lib/ext -jar target/JDBCDemo-SNAPSHOT-jar-with-dependencies.jar -host 127.0.0.1 ''' } + catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') { + sh ''' + cp -rf ${WKC}/tests/examples/nodejs ${JENKINS_HOME}/workspace/ + cd ${JENKINS_HOME}/workspace/nodejs + node nodejsChecker.js host=localhost + ''' + } catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') { sh ''' cd ${JENKINS_HOME}/workspace/C#NET/src/CheckC# diff --git a/tests/comparisonTest/tdengine/CMakeLists.txt b/tests/comparisonTest/tdengine/CMakeLists.txt index a12e36ab6b9b40992c0231bd36567ab0df0d9d7c..36ed3efe191c9d949d6234bd61ffbbe28c3a33d2 100644 --- a/tests/comparisonTest/tdengine/CMakeLists.txt +++ b/tests/comparisonTest/tdengine/CMakeLists.txt @@ -1,4 +1,4 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 3.5) +CMAKE_MINIMUM_REQUIRED(VERSION 2.8) PROJECT(TDengine) IF (TD_LINUX) diff --git a/tests/examples/JDBC/taosdemo/pom.xml b/tests/examples/JDBC/taosdemo/pom.xml index a6cbe4615e0ec132f789a1edcc63fdde6fb0ad9c..278212e75d67130d6d6d318348c928684e849481 100644 --- a/tests/examples/JDBC/taosdemo/pom.xml +++ b/tests/examples/JDBC/taosdemo/pom.xml @@ -67,7 +67,7 @@ com.taosdata.jdbc taos-jdbcdriver - 2.0.18 + 2.0.19 diff --git a/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/TaosDemoApplication.java b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/TaosDemoApplication.java index 4dc49fd37b1b5092b6799ec67bebd680f9642379..69e2606a79041c270fca7d7af1abb33c666d1fad 100644 --- a/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/TaosDemoApplication.java +++ b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/TaosDemoApplication.java @@ -4,6 +4,7 @@ import com.taosdata.taosdemo.components.DataSourceFactory; import com.taosdata.taosdemo.components.JdbcTaosdemoConfig; import com.taosdata.taosdemo.domain.SuperTableMeta; import com.taosdata.taosdemo.service.DatabaseService; +import com.taosdata.taosdemo.service.QueryService; import com.taosdata.taosdemo.service.SubTableService; import com.taosdata.taosdemo.service.SuperTableService; import com.taosdata.taosdemo.service.data.SuperTableMetaGenerator; @@ -34,6 +35,7 @@ public class TaosDemoApplication { final DatabaseService databaseService = new DatabaseService(dataSource); final SuperTableService superTableService = new SuperTableService(dataSource); final SubTableService subTableService = new SubTableService(dataSource); + final QueryService queryService = new QueryService(dataSource); // 创建数据库 long start = System.currentTimeMillis(); Map databaseParam = new HashMap<>(); @@ -90,6 +92,11 @@ public class TaosDemoApplication { int affectedRows = subTableService.insertMultiThreads(superTableMeta, threadSize, tableSize, startTime, gap, config); end = System.currentTimeMillis(); logger.info("insert " + affectedRows + " rows, time cost: " + (end - start) + " ms"); + /**********************************************************************************/ + // 查询 + + + /**********************************************************************************/ // 删除表 if (config.dropTable) { diff --git a/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/components/DataSourceFactory.java b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/components/DataSourceFactory.java index c96d6f8bed68e9bb67d959ddb1d7531b4cbadeb3..a7d08e96ea373c4773e872bcaf9b3a7b98d5bf9a 100644 --- a/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/components/DataSourceFactory.java +++ b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/components/DataSourceFactory.java @@ -23,7 +23,6 @@ public class DataSourceFactory { properties.load(is); HikariConfig config = new HikariConfig(); - if (properties.containsKey("jdbc.driver")) { // String driverName = properties.getProperty("jdbc.driver"); // System.out.println(">>> load driver : " + driverName); diff --git a/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/service/QueryService.java b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/service/QueryService.java new file mode 100644 index 0000000000000000000000000000000000000000..efabff6afe904516ad9682cd7197412dc02765ef --- /dev/null +++ b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/service/QueryService.java @@ -0,0 +1,104 @@ +package com.taosdata.taosdemo.service; + +import com.taosdata.jdbc.utils.SqlSyntaxValidator; + +import javax.sql.DataSource; +import java.sql.*; +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.TimeUnit; +import java.util.stream.Collectors; +import java.util.stream.IntStream; + +public class QueryService { + + private final DataSource dataSource; + + public QueryService(DataSource dataSource) { + this.dataSource = dataSource; + } + + /* only select or show SQL Statement is valid for executeQuery */ + public Boolean[] areValidQueries(String[] sqls) { + Boolean[] ret = new Boolean[sqls.length]; + for (int i = 0; i < sqls.length; i++) { + ret[i] = true; + if (!SqlSyntaxValidator.isValidForExecuteQuery(sqls[i])) { + ret[i] = false; + continue; + } + try (Connection conn = dataSource.getConnection(); Statement stmt = conn.createStatement()) { + stmt.executeQuery(sqls[i]); + } catch (SQLException e) { + ret[i] = false; + continue; + } + } + return ret; + } + + public String[] generateSuperTableQueries(String dbName) { + List sqls = new ArrayList<>(); + try (Connection conn = dataSource.getConnection(); Statement stmt = conn.createStatement()) { + stmt.execute("use " + dbName); + ResultSet rs = stmt.executeQuery("show stables"); + while (rs.next()) { + String name = rs.getString("name"); + sqls.add("select count(*) from " + dbName + "." + name); + sqls.add("select first(*) from " + dbName + "." + name); + sqls.add("select last(*) from " + dbName + "." + name); + sqls.add("select last_row(*) from " + dbName + "." + name); + } + } catch (SQLException e) { + e.printStackTrace(); + } + String[] sqlArr = new String[sqls.size()]; + return sqls.toArray(sqlArr); + } + + public void querySuperTable(String[] sqls, int interval, int threadCount, long queryTimes) { + List threads = IntStream.range(0, threadCount).mapToObj(i -> new Thread(() -> { + // do query + try (Connection conn = dataSource.getConnection(); Statement stmt = conn.createStatement()) { + long count = queryTimes; + if (count == 0) + count = Long.MAX_VALUE; + while (count > 0) { + for (String sql : sqls) { + long start = System.currentTimeMillis(); + ResultSet rs = stmt.executeQuery(sql); + printResultSet(rs); + long end = System.currentTimeMillis(); + long timecost = end - start; + if (interval - timecost > 0) { + TimeUnit.MILLISECONDS.sleep(interval - timecost); + } + } + count--; + } + + } catch (SQLException | InterruptedException e) { + e.printStackTrace(); + } + + })).collect(Collectors.toList()); + threads.stream().forEach(Thread::start); + for (Thread thread : threads) { + try { + thread.join(); + } catch (InterruptedException e) { + e.printStackTrace(); + } + } + } + + private void printResultSet(ResultSet rs) throws SQLException { + ResultSetMetaData meta = rs.getMetaData(); + while (rs.next()) { + for (int i = 1; i <= meta.getColumnCount(); i++) { + System.out.print(meta.getColumnLabel(i) + ": " + rs.getString(i) + "\t"); + } + System.out.println(); + } + } +} diff --git a/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/service/TableService.java b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/service/TableService.java index b4ad2d17e58a3f7c04665707f0cd3e7327d7c16c..2504fdb0b4cd48ec263d94ec377e1bb8902ea9a7 100644 --- a/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/service/TableService.java +++ b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/service/TableService.java @@ -2,7 +2,6 @@ package com.taosdata.taosdemo.service; import com.taosdata.taosdemo.dao.TableMapper; import com.taosdata.taosdemo.domain.TableMeta; -import org.springframework.beans.factory.annotation.Autowired; import org.springframework.stereotype.Service; import java.util.List; diff --git a/tests/examples/JDBC/taosdemo/src/main/resources/query.json b/tests/examples/JDBC/taosdemo/src/main/resources/query.json index 53d0b319212196257aa3e84be1221bd6e2bd0d8d..cc6900d77c3941e6af3274efdfe782c42a557990 100644 --- a/tests/examples/JDBC/taosdemo/src/main/resources/query.json +++ b/tests/examples/JDBC/taosdemo/src/main/resources/query.json @@ -7,10 +7,10 @@ "password": "taosdata", "databases": "db01", "super_table_query": - {"rate":1, "concurrent":1, + {"rate":1, "concurrent":1,"time":10000, "sqls": [{"sql": "select count(*) from stb01", "result": "./query_res0.txt"}] }, - "sub_table_query": + "sub_table_query": {"stblname": "stb01", "rate":1, "threads":1, "sqls": [{"sql": "select count(*) from xxxx", "result": "./query_res1.txt"}] } diff --git a/tests/examples/JDBC/taosdemo/src/test/java/com/taosdata/taosdemo/service/QueryServiceTest.java b/tests/examples/JDBC/taosdemo/src/test/java/com/taosdata/taosdemo/service/QueryServiceTest.java new file mode 100644 index 0000000000000000000000000000000000000000..f2ad25710c1a82136d6316ed69e379bc3925897d --- /dev/null +++ b/tests/examples/JDBC/taosdemo/src/test/java/com/taosdata/taosdemo/service/QueryServiceTest.java @@ -0,0 +1,41 @@ +package com.taosdata.taosdemo.service; + +import com.zaxxer.hikari.HikariConfig; +import com.zaxxer.hikari.HikariDataSource; +import org.junit.BeforeClass; +import org.junit.Test; + +public class QueryServiceTest { + private static QueryService queryService; + + @Test + public void areValidQueries() { + + } + + @Test + public void generateSuperTableQueries() { + String[] sqls = queryService.generateSuperTableQueries("restful_test"); + for (String sql : sqls) { + System.out.println(sql); + } + } + + @Test + public void querySuperTable() { + String[] sqls = queryService.generateSuperTableQueries("restful_test"); + queryService.querySuperTable(sqls, 1000, 10, 10); + } + + @BeforeClass + public static void beforeClass() throws ClassNotFoundException { + Class.forName("com.taosdata.jdbc.TSDBDriver"); + HikariConfig config = new HikariConfig(); + config.setJdbcUrl("jdbc:TAOS://127.0.0.1:6030/?charset=UTF-8&locale=en_US.UTF-8&timezone=UTC-8"); + config.setUsername("root"); + config.setPassword("taosdata"); + HikariDataSource dataSource = new HikariDataSource(config); + queryService = new QueryService(dataSource); + } + +} \ No newline at end of file diff --git a/tests/examples/c/demo.c b/tests/examples/c/demo.c index 45ec54680301fd2c941d22d8d67c867b8740c37b..0b12c3d3eabaf9a17ba0859c73e794f2e973dc3b 100644 --- a/tests/examples/c/demo.c +++ b/tests/examples/c/demo.c @@ -62,7 +62,10 @@ int main(int argc, char *argv[]) { } // init TAOS - taos_init(); + if (taos_init()) { + exit(1); + } + TAOS *taos = taos_connect(argv[1], "root", "taosdata", NULL, 0); if (taos == NULL) { printf("failed to connect to server, reason:%s\n", "null taos"/*taos_errstr(taos)*/); diff --git a/tests/examples/c/prepare.c b/tests/examples/c/prepare.c index 7a70b744ee8561459318aa456160a54b8c6270a8..bd650ed64b838d92a03bb5e023c2ca91ac5e2c2e 100644 --- a/tests/examples/c/prepare.c +++ b/tests/examples/c/prepare.c @@ -23,7 +23,10 @@ int main(int argc, char *argv[]) } // init TAOS - taos_init(); + if (taos_init()) { + printf("failed to init taos\n"); + exit(1); + } taos = taos_connect(argv[1], "root", "taosdata", NULL, 0); if (taos == NULL) { diff --git a/tests/examples/c/stream.c b/tests/examples/c/stream.c index 060f5b84ff276579019d3278552e424b2a4198e9..e3053d1969b169767904d595c6ed5615e9d46ce5 100644 --- a/tests/examples/c/stream.c +++ b/tests/examples/c/stream.c @@ -55,7 +55,10 @@ int main(int argc, char *argv[]) } // init TAOS - taos_init(); + if (taos_init()) { + printf("failed to init taos\n"); + exit(1); + } strcpy(db_name, argv[2]); strcpy(tbl_name, argv[3]); diff --git a/tests/examples/c/subscribe.c b/tests/examples/c/subscribe.c index cdd8ddaf7f6c4d2e5088ef36cc00ad77a0c8ebc9..5a402976241133dcef219cb64be7b3492e464aac 100644 --- a/tests/examples/c/subscribe.c +++ b/tests/examples/c/subscribe.c @@ -217,7 +217,10 @@ int main(int argc, char *argv[]) { } // init TAOS - taos_init(); + if (taos_init()) { + printf("failed to init taos\n"); + exit(1); + } TAOS* taos = taos_connect(host, user, passwd, "", 0); if (taos == NULL) { diff --git a/tests/examples/python/taosdemo/README.md b/tests/examples/python/taosdemo/README.md new file mode 100644 index 0000000000000000000000000000000000000000..d48fffe8ff44fb68a1a147a2c97ca057fb360092 --- /dev/null +++ b/tests/examples/python/taosdemo/README.md @@ -0,0 +1,38 @@ +install build environment +=== +/usr/bin/python3 -m pip install -r requirements.txt + +run python version taosdemo +=== +Usage: ./taosdemo.py [OPTION...] + +Author: Shuduo Sang + + -H, --help Show usage. + + -N, --native flag, Use native interface if set. Default is using RESTful interface. + -h, --host host, The host to connect to TDengine. Default is localhost. + -p, --port port, The TCP/IP port number to use for the connection. Default is 0. + -u, --user user, The user name to use when connecting to the server. Default is 'root'. + -P, --password password, The password to use when connecting to the server. Default is 'taosdata'. + -l, --colsPerRec num_of_columns_per_record, The number of columns per record. Default is 3. + -d, --dbname database, Destination database. Default is 'test'. + -a, --replica replica, Set the replica parameters of the database, Default 1, min: 1, max: 5. + -m, --tbname
table_prefix, Table prefix name. Default is 't'. + -M, --stable flag, Use super table. Default is no + -s, --stbname stable_prefix, STable prefix name. Default is 'st' + -Q, --query query, Execute query command. set 'DEFAULT' means select * from each table + -T, --threads num_of_threads, The number of threads. Default is 1. + -C, --processes num_of_processes, The number of threads. Default is 1. + -r, --batch num_of_records_per_req, The number of records per request. Default is 1000. + -t, --numOfTb num_of_tables, The number of tables. Default is 1. + -n, --numOfRec num_of_records_per_table, The number of records per table. Default is 1. + -c, --config config_directory, Configuration directory. Default is '/etc/taos/'. + -x, --inserOnly flag, Insert only flag. + -O, --outOfOrder out of order data insert, 0: In order, 1: Out of order. Default is in order. + -R, --rateOOOO rate, Out of order data's rate--if order=1 Default 10, min: 0, max: 50. + -D, --deleteMethod Delete data methods 0: don't delete, 1: delete by table, 2: delete by stable, 3: delete by database. + -v, --verbose Print verbose output + -g, --debug Print debug output + -y, --skipPrompt Skip read key for continous test, default is not skip + diff --git a/tests/examples/python/taosdemo/requirements.txt b/tests/examples/python/taosdemo/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..977e8e3726a446e85635764fe9243a3c5416ea0f --- /dev/null +++ b/tests/examples/python/taosdemo/requirements.txt @@ -0,0 +1,28 @@ +## +######## example-requirements.txt ####### +## +####### Requirements without Version Specifiers ###### +requests +multipledispatch +#beautifulsoup4 +## +####### Requirements with Version Specifiers ###### +## See https://www.python.org/dev/peps/pep-0440/#version-specifiers +#docopt == 0.6.1 # Version Matching. Must be version 0.6.1 +#keyring >= 4.1.1 # Minimum version 4.1.1 +#coverage != 3.5 # Version Exclusion. Anything except version 3.5 +#Mopidy-Dirble ~= 1.1 # Compatible release. Same as >= 1.1, == 1.* +## +####### Refer to other requirements files ###### +#-r other-requirements.txt +## +## +####### A particular file ###### +#./downloads/numpy-1.9.2-cp34-none-win32.whl +#http://wxpython.org/Phoenix/snapshot-builds/wxPython_Phoenix-3.0.3.dev1820+49a8884-cp34-none-win_amd64.whl +## +####### Additional Requirements without Version Specifiers ###### +## Same as 1st section, just here to show that you can put things in any order. +#rejected +#green +## diff --git a/tests/examples/python/taosdemo/taosdemo.py b/tests/examples/python/taosdemo/taosdemo.py new file mode 100755 index 0000000000000000000000000000000000000000..d55023bdbf119544a788aa6246c9d63dbf024872 --- /dev/null +++ b/tests/examples/python/taosdemo/taosdemo.py @@ -0,0 +1,797 @@ +#!/usr/bin/python3 +# * Copyright (c) 2019 TAOS Data, Inc. +# * +# * This program is free software: you can use, redistribute, and/or modify +# * it under the terms of the GNU Affero General Public License, version 3 +# * or later ("AGPL"), as published by the Free Software Foundation. +# * +# * This program is distributed in the hope that it will be useful, but WITHOUT +# * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# * FITNESS FOR A PARTICULAR PURPOSE. +# * +# * You should have received a copy of the GNU Affero General Public License +# * along with this program. If not, see . + +# -*- coding: utf-8 -*- + +import sys +import getopt +import requests +import json +import random +import time +import datetime +from multiprocessing import Manager, Pool, Lock +from multipledispatch import dispatch +from concurrent.futures import ThreadPoolExecutor, wait, ALL_COMPLETED + + +@dispatch(str, str) +def v_print(msg: str, arg: str): + if verbose: + print(msg % arg) + + +@dispatch(str, str, str) +def v_print(msg: str, arg1: str, arg2: str): + if verbose: + print(msg % (arg1, arg2)) + + +@dispatch(str, str, str, str) +def v_print(msg: str, arg1: str, arg2: str, arg3: str): + if verbose: + print(msg % (arg1, arg2, arg3)) + + +@dispatch(str, str, str, str, str) +def v_print(msg: str, arg1: str, arg2: str, arg3: str, arg4: str): + if verbose: + print(msg % (arg1, arg2, arg3, arg4)) + + +@dispatch(str, int) +def v_print(msg: str, arg: int): + if verbose: + print(msg % int(arg)) + + +@dispatch(str, int, str) +def v_print(msg: str, arg1: int, arg2: str): + if verbose: + print(msg % (int(arg1), str(arg2))) + + +@dispatch(str, str, int) +def v_print(msg: str, arg1: str, arg2: int): + if verbose: + print(msg % (arg1, int(arg2))) + + +@dispatch(str, int, int) +def v_print(msg: str, arg1: int, arg2: int): + if verbose: + print(msg % (int(arg1), int(arg2))) + + +@dispatch(str, int, int, str) +def v_print(msg: str, arg1: int, arg2: int, arg3: str): + if verbose: + print(msg % (int(arg1), int(arg2), str(arg3))) + + +@dispatch(str, int, int, int) +def v_print(msg: str, arg1: int, arg2: int, arg3: int): + if verbose: + print(msg % (int(arg1), int(arg2), int(arg3))) + + +@dispatch(str, int, int, int, int) +def v_print(msg: str, arg1: int, arg2: int, arg3: int, arg4: int): + if verbose: + print(msg % (int(arg1), int(arg2), int(arg3), int(arg4))) + + +def restful_execute(host: str, port: int, user: str, password: str, cmd: str): + url = "http://%s:%d/rest/sql" % (host, restPort) + + v_print("restful_execute - cmd: %s", cmd) + + resp = requests.post(url, cmd, auth=(user, password)) + + v_print("resp status: %d", resp.status_code) + + if debug: + v_print( + "resp text: %s", + json.dumps( + resp.json(), + sort_keys=True, + indent=2)) + else: + print("resp: %s" % json.dumps(resp.json())) + + +def query_func(process: int, thread: int, cmd: str): + v_print("%d process %d thread cmd: %s", process, thread, cmd) + + if oneMoreHost != "NotSupported" and random.randint( + 0, 1) == 1: + v_print("%s", "Send to second host") + if native: + cursor2.execute(cmd) + else: + restful_execute( + oneMoreHost, port, user, password, cmd) + else: + v_print("%s%s%s", "Send ", cmd, " to the host") + if native: + pass +# cursor.execute(cmd) + else: + restful_execute( + host, port, user, password, cmd) + + +def query_data_process(cmd: str): + # establish connection if native + if native: + v_print("host:%s, user:%s passwd:%s configDir:%s ", host, user, password, configDir) + try: + conn = taos.connect( + host=host, + user=user, + password=password, + config=configDir) + v_print("conn: %s", str(conn.__class__)) + except Exception as e: + print("Error: %s" % e.args[0]) + sys.exit(1) + + try: + cursor = conn.cursor() + v_print("cursor:%d %s", id(cursor), str(cursor.__class__)) + except Exception as e: + print("Error: %s" % e.args[0]) + conn.close() + sys.exit(1) + + if native: + try: + cursor.execute(cmd) + cols = cursor.description + data = cursor.fetchall() + + for col in data: + print(col) + except Exception as e: + conn.close() + print("Error: %s" % e.args[0]) + sys.exit(1) + + else: + restful_execute( + host, + port, + user, + password, + cmd) + + if native: + cursor.close() + conn.close() + + +def create_stb(): + for i in range(0, numOfStb): + if native: + cursor.execute( + "CREATE TABLE IF NOT EXISTS %s%d (ts timestamp, value float) TAGS (uuid binary(50))" % + (stbName, i)) + else: + restful_execute( + host, + port, + user, + password, + "CREATE TABLE IF NOT EXISTS %s%d (ts timestamp, value float) TAGS (uuid binary(50))" % + (stbName, i) + ) + + +def use_database(): + + if native: + cursor.execute("USE %s" % current_db) + else: + restful_execute(host, port, user, password, "USE %s" % current_db) + + +def create_databases(): + for i in range(0, numOfDb): + v_print("will create database db%d", int(i)) + + if native: + cursor.execute( + "CREATE DATABASE IF NOT EXISTS %s%d" % (dbName, i)) + else: + restful_execute( + host, + port, + user, + password, + "CREATE DATABASE IF NOT EXISTS %s%d" % (dbName, i)) + + +def drop_tables(): + # TODO + v_print("TODO: drop tables total %d", numOfTb) + pass + + +def drop_stable(): + # TODO + v_print("TODO: drop stables total %d", numOfStb) + pass + + +def drop_databases(): + v_print("drop databases total %d", numOfDb) + + # drop exist databases first + for i in range(0, numOfDb): + v_print("will drop database db%d", int(i)) + + if native: + cursor.execute( + "DROP DATABASE IF EXISTS %s%d" % + (dbName, i)) + else: + restful_execute( + host, + port, + user, + password, + "DROP DATABASE IF EXISTS %s%d" % + (dbName, i)) + + +def insert_func(process: int, thread: int): + v_print("%d process %d thread, insert_func ", process, thread) + + # generate uuid + uuid_int = random.randint(0, numOfTb + 1) + uuid = "%s" % uuid_int + v_print("uuid is: %s", uuid) + + # establish connection if native + if native: + v_print("host:%s, user:%s passwd:%s configDir:%s ", host, user, password, configDir) + try: + conn = taos.connect( + host=host, + user=user, + password=password, + config=configDir) + v_print("conn: %s", str(conn.__class__)) + except Exception as e: + print("Error: %s" % e.args[0]) + sys.exit(1) + + try: + cursor = conn.cursor() + v_print("cursor:%d %s", id(cursor), str(cursor.__class__)) + except Exception as e: + print("Error: %s" % e.args[0]) + conn.close() + sys.exit(1) + + v_print("numOfRec %d:", numOfRec) + + row = 0 + while row < numOfRec: + v_print("row: %d", row) + sqlCmd = ['INSERT INTO '] + try: + sqlCmd.append( + "%s.%s%d " % (current_db, tbName, thread)) + + if (numOfStb > 0 and autosubtable): + sqlCmd.append("USING %s.%s%d TAGS('%s') " % + (current_db, stbName, numOfStb - 1, uuid)) + + start_time = datetime.datetime( + 2021, 1, 25) + datetime.timedelta(seconds=row) + + sqlCmd.append("VALUES ") + for batchIter in range(0, batch): + sqlCmd.append("('%s', %f) " % + ( + start_time + + datetime.timedelta( + milliseconds=batchIter), + random.random())) + row = row + 1 + if row >= numOfRec: + v_print("BREAK, row: %d numOfRec:%d", row, numOfRec) + break + + except Exception as e: + print("Error: %s" % e.args[0]) + + cmd = ' '.join(sqlCmd) + + if measure: + exec_start_time = datetime.datetime.now() + + if native: + affectedRows = cursor.execute(cmd) + else: + restful_execute( + host, port, user, password, cmd) + + if measure: + exec_end_time = datetime.datetime.now() + exec_delta = exec_end_time - exec_start_time + v_print( + "consume %d microseconds", + exec_delta.microseconds) + + v_print("cmd: %s, length:%d", cmd, len(cmd)) + + if native: + cursor.close() + conn.close() + + +def create_tb_using_stb(): + # TODO: + pass + + +def create_tb(): + v_print("create_tb() numOfTb: %d", numOfTb) + for i in range(0, numOfDb): + if native: + cursor.execute("USE %s%d" % (dbName, i)) + else: + restful_execute( + host, port, user, password, "USE %s%d" % + (dbName, i)) + + for j in range(0, numOfTb): + if native: + cursor.execute( + "CREATE TABLE %s%d (ts timestamp, value float)" % + (tbName, j)) + else: + restful_execute( + host, + port, + user, + password, + "CREATE TABLE %s%d (ts timestamp, value float)" % + (tbName, j)) + + +def insert_data_process(lock, i: int, begin: int, end: int): + lock.acquire() + tasks = end - begin + v_print("insert_data_process:%d table from %d to %d, tasks %d", i, begin, end, tasks) + + if (threads < (end - begin)): + for j in range(begin, end, threads): + with ThreadPoolExecutor(max_workers=threads) as executor: + k = end if ((j + threads) > end) else (j + threads) + workers = [ + executor.submit( + insert_func, + i, + n) for n in range( + j, + k)] + wait(workers, return_when=ALL_COMPLETED) + else: + with ThreadPoolExecutor(max_workers=threads) as executor: + workers = [ + executor.submit( + insert_func, + i, + j) for j in range( + begin, + end)] + wait(workers, return_when=ALL_COMPLETED) + + lock.release() + + +def query_db(i): + if native: + cursor.execute("USE %s%d" % (dbName, i)) + else: + restful_execute( + host, port, user, password, "USE %s%d" % + (dbName, i)) + + for j in range(0, numOfTb): + if native: + cursor.execute( + "SELECT COUNT(*) FROM %s%d" % (tbName, j)) + else: + restful_execute( + host, port, user, password, "SELECT COUNT(*) FROM %s%d" % + (tbName, j)) + + +def printConfig(): + + print("###################################################################") + print("# Use native interface: %s" % native) + print("# Server IP: %s" % host) + if native: + print("# Server port: %s" % port) + else: + print("# Server port: %s" % restPort) + + print("# Configuration Dir: %s" % configDir) + print("# User: %s" % user) + print("# Password: %s" % password) + print("# Number of Columns per record: %s" % colsPerRecord) + print("# Number of Threads: %s" % threads) + print("# Number of Processes: %s" % processes) + print("# Number of Tables: %s" % numOfTb) + print("# Number of records per Table: %s" % numOfRec) + print("# Records/Request: %s" % batch) + print("# Database name: %s" % dbName) + print("# Replica: %s" % replica) + print("# Use STable: %s" % useStable) + print("# Table prefix: %s" % tbName) + if useStable: + print("# STable prefix: %s" % stbName) + + print("# Data order: %s" % outOfOrder) + print("# Data out of order rate: %s" % rateOOOO) + print("# Delete method: %s" % deleteMethod) + print("# Query command: %s" % queryCmd) + print("# Insert Only: %s" % insertOnly) + print("# Verbose output %s" % verbose) + print("# Test time: %s" % + datetime.datetime.now().strftime("%d/%m/%Y %H:%M:%S")) + print("###################################################################") + + +if __name__ == "__main__": + + native = False + verbose = False + debug = False + measure = True + dropDbOnly = False + colsPerRecord = 3 + numOfDb = 1 + dbName = "test" + replica = 1 + batch = 1 + numOfTb = 1 + tbName = "tb" + useStable = False + numOfStb = 0 + stbName = "stb" + numOfRec = 10 + ieration = 1 + host = "127.0.0.1" + configDir = "/etc/taos" + oneMoreHost = "NotSupported" + port = 6030 + restPort = 6041 + user = "root" + defaultPass = "taosdata" + processes = 1 + threads = 1 + insertOnly = False + autosubtable = False + queryCmd = "NO" + outOfOrder = 0 + rateOOOO = 0 + deleteMethod = 0 + skipPrompt = False + + try: + opts, args = getopt.gnu_getopt(sys.argv[1:], + 'Nh:p:u:P:d:a:m:Ms:Q:T:C:r:l:t:n:c:xOR:D:vgyH', + [ + 'native', 'host', 'port', 'user', 'password', 'dbname', 'replica', 'tbname', + 'stable', 'stbname', 'query', 'threads', 'processes', + 'recPerReq', 'colsPerRecord', 'numOfTb', 'numOfRec', 'config', + 'insertOnly', 'outOfOrder', 'rateOOOO', 'deleteMethod', + 'verbose', 'debug', 'skipPrompt', 'help' + ]) + except getopt.GetoptError as err: + print('ERROR:', err) + print('Try `taosdemo.py --help` for more options.') + sys.exit(1) + + if bool(opts) is False: + print('Try `taosdemo.py --help` for more options.') + sys.exit(1) + + for key, value in opts: + if key in ['-H', '--help']: + print('') + print( + 'taosdemo.py for TDengine') + print('') + print('Author: Shuduo Sang ') + print('') + + print('\t-H, --help Show usage.') + print('') + + print('\t-N, --native flag, Use native interface if set. Default is using RESTful interface.') + print('\t-h, --host host, The host to connect to TDengine. Default is localhost.') + print('\t-p, --port port, The TCP/IP port number to use for the connection. Default is 0.') + print('\t-u, --user user, The user name to use when connecting to the server. Default is \'root\'.') + print('\t-P, --password password, The password to use when connecting to the server. Default is \'taosdata\'.') + print('\t-l, --colsPerRec num_of_columns_per_record, The number of columns per record. Default is 3.') + print( + '\t-d, --dbname database, Destination database. Default is \'test\'.') + print('\t-a, --replica replica, Set the replica parameters of the database, Default 1, min: 1, max: 5.') + print( + '\t-m, --tbname
table_prefix, Table prefix name. Default is \'t\'.') + print( + '\t-M, --stable flag, Use super table. Default is no') + print( + '\t-s, --stbname stable_prefix, STable prefix name. Default is \'st\'') + print('\t-Q, --query [NO|EACHTB|command] query, Execute query command. set \'EACHTB\' means select * from each table') + print( + '\t-T, --threads num_of_threads, The number of threads. Default is 1.') + print( + '\t-C, --processes num_of_processes, The number of threads. Default is 1.') + print('\t-r, --batch num_of_records_per_req, The number of records per request. Default is 1000.') + print( + '\t-t, --numOfTb num_of_tables, The number of tables. Default is 1.') + print('\t-n, --numOfRec num_of_records_per_table, The number of records per table. Default is 1.') + print('\t-c, --config config_directory, Configuration directory. Default is \'/etc/taos/\'.') + print('\t-x, --inserOnly flag, Insert only flag.') + print('\t-O, --outOfOrder out of order data insert, 0: In order, 1: Out of order. Default is in order.') + print('\t-R, --rateOOOO rate, Out of order data\'s rate--if order=1 Default 10, min: 0, max: 50.') + print('\t-D, --deleteMethod Delete data methods 0: don\'t delete, 1: delete by table, 2: delete by stable, 3: delete by database.') + print('\t-v, --verbose Print verbose output') + print('\t-g, --debug Print debug output') + print( + '\t-y, --skipPrompt Skip read key for continous test, default is not skip') + print('') + sys.exit(0) + + if key in ['-N', '--native']: + try: + import taos + except Exception as e: + print("Error: %s" % e.args[0]) + sys.exit(1) + native = True + + if key in ['-h', '--host']: + host = value + + if key in ['-p', '--port']: + port = int(value) + + if key in ['-u', '--user']: + user = value + + if key in ['-P', '--password']: + password = value + else: + password = defaultPass + + if key in ['-d', '--dbname']: + dbName = value + + if key in ['-a', '--replica']: + replica = int(value) + if replica < 1: + print("FATAL: number of replica need > 0") + sys.exit(1) + + if key in ['-m', '--tbname']: + tbName = value + + if key in ['-M', '--stable']: + useStable = True + numOfStb = 1 + + if key in ['-s', '--stbname']: + stbName = value + + if key in ['-Q', '--query']: + queryCmd = str(value) + + if key in ['-T', '--threads']: + threads = int(value) + if threads < 1: + print("FATAL: number of threads must be larger than 0") + sys.exit(1) + + if key in ['-C', '--processes']: + processes = int(value) + if processes < 1: + print("FATAL: number of processes must be larger than 0") + sys.exit(1) + + if key in ['-r', '--batch']: + batch = int(value) + + if key in ['-l', '--colsPerRec']: + colsPerRec = int(value) + + if key in ['-t', '--numOfTb']: + numOfTb = int(value) + v_print("numOfTb is %d", numOfTb) + + if key in ['-n', '--numOfRec']: + numOfRec = int(value) + v_print("numOfRec is %d", numOfRec) + if numOfRec < 1: + print("FATAL: number of records must be larger than 0") + sys.exit(1) + + + if key in ['-c', '--config']: + configDir = value + v_print("config dir: %s", configDir) + + if key in ['-x', '--insertOnly']: + insertOnly = True + v_print("insert only: %d", insertOnly) + + if key in ['-O', '--outOfOrder']: + outOfOrder = int(value) + v_print("out of order is %d", outOfOrder) + + if key in ['-R', '--rateOOOO']: + rateOOOO = int(value) + v_print("the rate of out of order is %d", rateOOOO) + + if key in ['-D', '--deleteMethod']: + deleteMethod = int(value) + if (deleteMethod < 0) or (deleteMethod > 3): + print( + "inputed delete method is %d, valid value is 0~3, set to default 0" % + deleteMethod) + deleteMethod = 0 + v_print("the delete method is %d", deleteMethod) + + if key in ['-v', '--verbose']: + verbose = True + + if key in ['-g', '--debug']: + debug = True + + if key in ['-y', '--skipPrompt']: + skipPrompt = True + + if verbose: + printConfig() + + if not skipPrompt: + input("Press any key to continue..") + + # establish connection first if native + if native: + v_print("host:%s, user:%s passwd:%s configDir:%s ", host, user, password, configDir) + try: + conn = taos.connect( + host=host, + user=user, + password=password, + config=configDir) + v_print("conn: %s", str(conn.__class__)) + except Exception as e: + print("Error: %s" % e.args[0]) + sys.exit(1) + + try: + cursor = conn.cursor() + v_print("cursor:%d %s", id(cursor), str(cursor.__class__)) + except Exception as e: + print("Error: %s" % e.args[0]) + conn.close() + sys.exit(1) + + # drop data only if delete method be set + if deleteMethod > 0: + if deleteMethod == 1: + drop_tables() + print("Drop tables done.") + elif deleteMethod == 2: + drop_stables() + print("Drop super tables done.") + elif deleteMethod == 3: + drop_databases() + print("Drop Database done.") + sys.exit(0) + + # create databases + drop_databases() + create_databases() + + # use last database + current_db = "%s%d" % (dbName, (numOfDb - 1)) + use_database() + + if measure: + start_time_begin = time.time() + + if numOfStb > 0: + create_stb() + if (autosubtable == False): + create_tb_using_stb() + else: + create_tb() + + if measure: + end_time = time.time() + print( + "Total time consumed {} seconds for create table.".format( + (end_time - start_time_begin))) + + if native: + cursor.close() + conn.close() + + # start insert data + if measure: + start_time = time.time() + + manager = Manager() + lock = manager.Lock() + pool = Pool(processes) + + begin = 0 + end = 0 + + quotient = numOfTb // processes + if quotient < 1: + processes = numOfTb + quotient = 1 + + remainder = numOfTb % processes + v_print( + "num of tables: %d, quotient: %d, remainder: %d", + numOfTb, + quotient, + remainder) + + for i in range(processes): + begin = end + + if i < remainder: + end = begin + quotient + 1 + else: + end = begin + quotient + pool.apply_async(insert_data_process, args=(lock, i, begin, end,)) + + pool.close() + pool.join() + time.sleep(1) + + if measure: + end_time = time.time() + print( + "Total time consumed {} seconds for insert data.".format( + (end_time - start_time))) + + + # query data + if queryCmd != "NO": + print("queryCmd: %s" % queryCmd) + query_data_process(queryCmd) + + if measure: + end_time = time.time() + print( + "Total time consumed {} seconds.".format( + (end_time - start_time_begin))) + + print("done") diff --git a/tests/examples/rust b/tests/examples/rust new file mode 160000 index 0000000000000000000000000000000000000000..1c8924dc668e6aa848214c2fc54e3ace3f5bf8df --- /dev/null +++ b/tests/examples/rust @@ -0,0 +1 @@ +Subproject commit 1c8924dc668e6aa848214c2fc54e3ace3f5bf8df diff --git a/tests/perftest-scripts/coverage_test.sh b/tests/perftest-scripts/coverage_test.sh index 5085ec89d00def1a2147f24ccbeb488406541775..1be2053f24f28dd825c040912783675b1eab94f9 100755 --- a/tests/perftest-scripts/coverage_test.sh +++ b/tests/perftest-scripts/coverage_test.sh @@ -56,9 +56,9 @@ function runGeneralCaseOneByOne { case=`echo $line | grep sim$ |awk '{print $NF}'` if [ -n "$case" ]; then - ./test.sh -f $case > /dev/null 2>&1 && \ - echo -e "${GREEN}$case success${NC}" | tee -a $TDENGINE_COVERAGE_REPORT || \ - echo -e "${RED}$case failed${NC}" | tee -a $TDENGINE_COVERAGE_REPORT + date +%F\ %T | tee -a $TDENGINE_COVERAGE_REPORT && ./test.sh -f $case > /dev/null 2>&1 && \ + ( grep -q 'script.*success.*m$' ../../sim/tsim/log/taoslog0.0 && echo -e "${GREEN}$case success${NC}" | tee -a $TDENGINE_COVERAGE_REPORT ) \ + || echo -e "${RED}$case failed${NC}" | tee -a $TDENGINE_COVERAGE_REPORT fi fi done < $1 diff --git a/tests/perftest-scripts/perftest-query.sh b/tests/perftest-scripts/perftest-query.sh index 8498094181857095fa52123b364119801dc93d56..e43a45ed5e87ec81fc86e1af4733c0e859af69f0 100755 --- a/tests/perftest-scripts/perftest-query.sh +++ b/tests/perftest-scripts/perftest-query.sh @@ -39,6 +39,8 @@ function buildTDengine { cd $WORK_DIR/TDengine git remote update > /dev/null + git reset --hard HEAD + git checkout develop REMOTE_COMMIT=`git rev-parse --short remotes/origin/develop` LOCAL_COMMIT=`git rev-parse --short @` @@ -54,22 +56,23 @@ function buildTDengine { cd debug rm -rf * cmake .. > /dev/null - make > /dev/null - make install + make && make install > /dev/null fi } function runQueryPerfTest { [ -f $PERFORMANCE_TEST_REPORT ] && rm $PERFORMANCE_TEST_REPORT nohup $WORK_DIR/TDengine/debug/build/bin/taosd -c /etc/taosperf/ > /dev/null 2>&1 & - echoInfo "Run Performance Test" + echoInfo "Wait TDengine to start" + sleep 120 + echoInfo "Run Performance Test" cd $WORK_DIR/TDengine/tests/pytest python3 query/queryPerformance.py -c $LOCAL_COMMIT | tee -a $PERFORMANCE_TEST_REPORT python3 insert/insertFromCSVPerformance.py -c $LOCAL_COMMIT | tee -a $PERFORMANCE_TEST_REPORT - yes | taosdemo -c /etc/taosperf/ -d taosdemo_insert_test -t 1000 -n 1000 > taosdemoperf.txt + yes | taosdemo -c /etc/taosperf/ -d taosdemo_insert_test -x > taosdemoperf.txt CREATETABLETIME=`grep 'Spent' taosdemoperf.txt | awk 'NR==1{print $2}'` INSERTRECORDSTIME=`grep 'Spent' taosdemoperf.txt | awk 'NR==2{print $2}'` @@ -104,6 +107,7 @@ function sendReport { stopTaosd buildTDengine runQueryPerfTest +stopTaosd echoInfo "Send Report" sendReport diff --git a/tests/pytest/alter/alter_debugFlag.py b/tests/pytest/alter/alter_debugFlag.py new file mode 100644 index 0000000000000000000000000000000000000000..38d972b58252a704420af0dda7c091c22c894ecd --- /dev/null +++ b/tests/pytest/alter/alter_debugFlag.py @@ -0,0 +1,54 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import random +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import tdDnodes + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + def run(self): + tdSql.prepare() + + flagList=["debugflag", "cdebugflag", "tmrDebugFlag", "uDebugFlag", "rpcDebugFlag"] + + for flag in flagList: + tdSql.execute("alter local %s 131" % flag) + tdSql.execute("alter local %s 135" % flag) + tdSql.execute("alter local %s 143" % flag) + randomFlag = random.randint(100, 250) + if randomFlag != 131 and randomFlag != 135 and randomFlag != 143: + tdSql.error("alter local %s %d" % (flag, randomFlag)) + + tdSql.query("show dnodes") + dnodeId = tdSql.getData(0, 0) + + for flag in flagList: + tdSql.execute("alter dnode %d %s 131" % (dnodeId, flag)) + tdSql.execute("alter dnode %d %s 135" % (dnodeId, flag)) + tdSql.execute("alter dnode %d %s 143" % (dnodeId, flag)) + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/alter/alter_table.py b/tests/pytest/alter/alter_table.py index 48b0154361803c9f614e64b1f66e416613e83131..828c348b14324eb98da4902073e2925205a8bdb1 100644 --- a/tests/pytest/alter/alter_table.py +++ b/tests/pytest/alter/alter_table.py @@ -126,7 +126,6 @@ class TDTestCase: for i in range(2, size): tdSql.checkData(0, i, self.rowNum * (size - i)) - tdSql.error("alter local debugflag 143") tdSql.execute("create table st(ts timestamp, c1 int) tags(t1 float)") tdSql.execute("create table t0 using st tags(null)") diff --git a/tests/pytest/cluster/clusterEnvSetup/Dockerfile b/tests/pytest/cluster/clusterEnvSetup/Dockerfile index b699e8a23ff0e1ff329ffaf4b97aa8f34350eaf7..c9c4d79be981e45609e040bf5835e275fc446260 100644 --- a/tests/pytest/cluster/clusterEnvSetup/Dockerfile +++ b/tests/pytest/cluster/clusterEnvSetup/Dockerfile @@ -2,15 +2,20 @@ FROM ubuntu:latest AS builder ARG PACKAGE=TDengine-server-1.6.5.10-Linux-x64.tar.gz ARG EXTRACTDIR=TDengine-enterprise-server +ARG TARBITRATORPKG=TDengine-tarbitrator-1.6.5.10-Linux-x64.tar.gz +ARG EXTRACTDIR2=TDengine-enterprise-arbitrator ARG CONTENT=taos.tar.gz WORKDIR /root - + COPY ${PACKAGE} . +COPY ${TARBITRATORPKG} . RUN tar -zxf ${PACKAGE} +RUN tar -zxf ${TARBITRATORPKG} RUN mv ${EXTRACTDIR}/driver ./lib RUN tar -zxf ${EXTRACTDIR}/${CONTENT} +RUN mv ${EXTRACTDIR2}/bin/* /root/bin FROM ubuntu:latest @@ -19,8 +24,10 @@ WORKDIR /root RUN apt-get update RUN apt-get install -y vim tmux net-tools RUN echo 'alias ll="ls -l --color=auto"' >> /root/.bashrc - +RUN ulimit -c unlimited + COPY --from=builder /root/bin/taosd /usr/bin +COPY --from=builder /root/bin/tarbitrator /usr/bin COPY --from=builder /root/bin/taos /usr/bin COPY --from=builder /root/cfg/taos.cfg /etc/taos/ COPY --from=builder /root/lib/libtaos.so.* /usr/lib/libtaos.so.1 @@ -29,8 +36,8 @@ ENV LD_LIBRARY_PATH="$LD_LIBRARY_PATH:/usr/lib" ENV LC_CTYPE=en_US.UTF-8 ENV LANG=en_US.UTF-8 -EXPOSE 6030-6041/tcp 6060/tcp 6030-6039/udp +EXPOSE 6030-6042/tcp 6060/tcp 6030-6039/udp # VOLUME [ "/var/lib/taos", "/var/log/taos", "/etc/taos" ] -CMD [ "bash" ] +CMD [ "bash" ] \ No newline at end of file diff --git a/tests/pytest/cluster/clusterEnvSetup/basic.py b/tests/pytest/cluster/clusterEnvSetup/basic.py index 10ba91ab06704f9373b7c7a4c1d2cab72287d0dd..d9b8e9ce4a7bc144839334332268ac0f09f78f0d 100644 --- a/tests/pytest/cluster/clusterEnvSetup/basic.py +++ b/tests/pytest/cluster/clusterEnvSetup/basic.py @@ -12,15 +12,89 @@ # -*- coding: utf-8 -*- import os +import taos import random +import argparse -class ClusterTestcase: +class BuildDockerCluser: + + def __init__(self, hostName, user, password, configDir, numOfNodes, clusterVersion): + self.hostName = hostName + self.user = user + self.password = password + self.configDir = configDir + self.numOfNodes = numOfNodes + self.clusterVersion = clusterVersion + + def getConnection(self): + self.conn = taos.connect( + host = self.hostName, + user = self.user, + password = self.password, + config = self.configDir) + + def createDondes(self): + self.cursor = self.conn.cursor() + for i in range(2, self.numOfNodes + 1): + self.cursor.execute("create dnode tdnode%d" % i) + + def startArbitrator(self): + print("start arbitrator") + os.system("docker exec -d $(docker ps|grep tdnode1|awk '{print $1}') tarbitrator") - def run(self): - os.system("./buildClusterEnv.sh -n 3 -v 2.0.14.1") - os.system("yes|taosdemo -h 172.27.0.7 -n 100 -t 100 -x") - os.system("python3 ../../concurrent_inquiry.py -H 172.27.0.7 -T 4 -t 4 -l 10") + if self.numOfNodes < 2 or self.numOfNodes > 5: + print("the number of nodes must be between 2 and 5") + exit(0) + os.system("./buildClusterEnv.sh -n %d -v %s" % (self.numOfNodes, self.clusterVersion)) + self.getConnection() + self.createDondes() + self.startArbitrator() + +parser = argparse.ArgumentParser() +parser.add_argument( + '-H', + '--host', + action='store', + default='tdnode1', + type=str, + help='host name to be connected (default: tdnode1)') +parser.add_argument( + '-u', + '--user', + action='store', + default='root', + type=str, + help='user (default: root)') +parser.add_argument( + '-p', + '--password', + action='store', + default='taosdata', + type=str, + help='password (default: taosdata)') +parser.add_argument( + '-c', + '--config-dir', + action='store', + default='/etc/taos', + type=str, + help='configuration directory (default: /etc/taos)') +parser.add_argument( + '-n', + '--num-of-nodes', + action='store', + default=2, + type=int, + help='number of nodes in the cluster (default: 2, min: 2, max: 5)') +parser.add_argument( + '-v', + '--version', + action='store', + default='2.0.14.1', + type=str, + help='the version of the cluster to be build, Default is 2.0.14.1') -clusterTest = ClusterTestcase() -clusterTest.run() \ No newline at end of file +args = parser.parse_args() +cluster = BuildDockerCluser(args.host, args.user, args.password, args.config_dir, args.num_of_nodes, args.version) +cluster.run() \ No newline at end of file diff --git a/tests/pytest/cluster/clusterEnvSetup/buildClusterEnv.sh b/tests/pytest/cluster/clusterEnvSetup/buildClusterEnv.sh index 973d000a0ad582d3aba90fc1f7ff8f7d88812473..968cdd1c1c81b9f6dba68bc2cca542038ada8606 100755 --- a/tests/pytest/cluster/clusterEnvSetup/buildClusterEnv.sh +++ b/tests/pytest/cluster/clusterEnvSetup/buildClusterEnv.sh @@ -27,18 +27,28 @@ do esac done +function addTaoscfg { + for i in {1..5} + do + touch /data/node$i/cfg/taos.cfg + echo 'firstEp tdnode1:6030' > /data/node$i/cfg/taos.cfg + echo 'fqdn tdnode'$i >> /data/node$i/cfg/taos.cfg + echo 'arbitrator tdnode1:6042' >> /data/node$i/cfg/taos.cfg + done +} function createDIR { - for i in {1.. $2} + for i in {1..5} do mkdir -p /data/node$i/data mkdir -p /data/node$i/log mkdir -p /data/node$i/cfg + mkdir -p /data/node$i/core done } function cleanEnv { - for i in {1..3} + for i in {1..5} do echo /data/node$i/data/* rm -rf /data/node$i/data/* @@ -54,49 +64,60 @@ function prepareBuild { rm -rf $CURR_DIR/../../../../release/* fi - cd $CURR_DIR/../../../../packaging - ./release.sh -v edge -n $VERSION >> /dev/null + if [ ! -e $DOCKER_DIR/TDengine-server-$VERSION-Linux-x64.tar.gz ] || [ ! -e $DOCKER_DIR/TDengine-arbitrator-$VERSION-Linux-x64.tar.gz ]; then + cd $CURR_DIR/../../../../packaging + echo "generating TDeninger packages" + ./release.sh -v edge -n $VERSION >> /dev/null - if [ ! -f $CURR_DIR/../../../../release/TDengine-server-$VERSION-Linux-x64.tar.gz ]; then - echo "no TDengine install package found" - exit 1 - fi + if [ ! -e $CURR_DIR/../../../../release/TDengine-server-$VERSION-Linux-x64.tar.gz ]; then + echo "no TDengine install package found" + exit 1 + fi + + if [ ! -e $CURR_DIR/../../../../release/TDengine-arbitrator-$VERSION-Linux-x64.tar.gz ]; then + echo "no arbitrator install package found" + exit 1 + fi - cd $CURR_DIR/../../../../release - mv TDengine-server-$VERSION-Linux-x64.tar.gz $DOCKER_DIR + cd $CURR_DIR/../../../../release + mv TDengine-server-$VERSION-Linux-x64.tar.gz $DOCKER_DIR + mv TDengine-arbitrator-$VERSION-Linux-x64.tar.gz $DOCKER_DIR + fi rm -rf $DOCKER_DIR/*.yml cd $CURR_DIR - cp docker-compose.yml $DOCKER_DIR + cp *.yml $DOCKER_DIR cp Dockerfile $DOCKER_DIR - - if [ $NUM_OF_NODES -eq 4 ]; then - cp ../node4.yml $DOCKER_DIR - fi - - if [ $NUM_OF_NODES -eq 5 ]; then - cp ../node5.yml $DOCKER_DIR - fi } function clusterUp { + echo "docker compose start" - cd $DOCKER_DIR + cd $DOCKER_DIR + + if [ $NUM_OF_NODES -eq 2 ]; then + echo "create 2 dnodes" + PACKAGE=TDengine-server-$VERSION-Linux-x64.tar.gz DIR=TDengine-server-$VERSION DIR2=TDengine-arbitrator-$VERSION VERSION=$VERSION docker-compose up -d + fi if [ $NUM_OF_NODES -eq 3 ]; then - PACKAGE=TDengine-server-$VERSION-Linux-x64.tar.gz DIR=TDengine-server-$VERSION VERSION=$VERSION docker-compose up -d + PACKAGE=TDengine-server-$VERSION-Linux-x64.tar.gz DIR=TDengine-server-$VERSION DIR2=TDengine-arbitrator-$VERSION VERSION=$VERSION docker-compose -f docker-compose.yml -f node3.yml up -d fi if [ $NUM_OF_NODES -eq 4 ]; then - PACKAGE=TDengine-server-$VERSION-Linux-x64.tar.gz DIR=TDengine-server-$VERSION VERSION=$VERSION docker-compose -f docker-compose.yml -f node4.yml up -d + PACKAGE=TDengine-server-$VERSION-Linux-x64.tar.gz DIR=TDengine-server-$VERSION DIR2=TDengine-arbitrator-$VERSION VERSION=$VERSION docker-compose -f docker-compose.yml -f node3.yml -f node4.yml up -d fi if [ $NUM_OF_NODES -eq 5 ]; then - PACKAGE=TDengine-server-$VERSION-Linux-x64.tar.gz DIR=TDengine-server-$VERSION VERSION=$VERSION docker-compose -f docker-compose.yml -f node4.yml -f node5.yml up -d + PACKAGE=TDengine-server-$VERSION-Linux-x64.tar.gz DIR=TDengine-server-$VERSION DIR2=TDengine-arbitrator-$VERSION VERSION=$VERSION docker-compose -f docker-compose.yml -f node3.yml -f node4.yml -f node5.yml up -d fi + + echo "docker compose finish" } -cleanEnv +createDIR +cleanEnv +addTaoscfg prepareBuild clusterUp \ No newline at end of file diff --git a/tests/pytest/cluster/clusterEnvSetup/docker-compose.yml b/tests/pytest/cluster/clusterEnvSetup/docker-compose.yml index c45a09582b5cd46a20ce052f3f535def67c46232..cb35abd9a1497c92dee10e1e6fb95027fb21710c 100644 --- a/tests/pytest/cluster/clusterEnvSetup/docker-compose.yml +++ b/tests/pytest/cluster/clusterEnvSetup/docker-compose.yml @@ -1,14 +1,16 @@ version: '3.7' services: - td2.0-node1: - build: + td2.0-node1: + build: context: . args: - PACKAGE=${PACKAGE} + - TARBITRATORPKG=${TARBITRATORPKG} - EXTRACTDIR=${DIR} + - EXTRACTDIR2=${DIR2} image: 'tdengine:${VERSION}' - container_name: 'td2.0-node1' + container_name: 'tdnode1' cap_add: - ALL stdin_open: true @@ -18,7 +20,15 @@ services: command: > sh -c "ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone && + mkdir /coredump && + echo 'kernel.core_pattern=/coredump/core_%e_%p' >> /etc/sysctl.conf && + sysctl -p && exec my-main-application" + extra_hosts: + - "tdnode2:172.27.0.8" + - "tdnode3:172.27.0.9" + - "tdnode4:172.27.0.10" + - "tdnode5:172.27.0.11" volumes: # bind data directory - type: bind @@ -32,14 +42,18 @@ services: - type: bind source: /data/node1/cfg target: /etc/taos + # bind core dump path + - type: bind + source: /data/node1/core + target: /coredump - type: bind source: /data target: /root - hostname: node1 + hostname: tdnode1 networks: taos_update_net: ipv4_address: 172.27.0.7 - command: taosd + command: taosd td2.0-node2: build: @@ -48,7 +62,7 @@ services: - PACKAGE=${PACKAGE} - EXTRACTDIR=${DIR} image: 'tdengine:${VERSION}' - container_name: 'td2.0-node2' + container_name: 'tdnode2' cap_add: - ALL stdin_open: true @@ -58,7 +72,15 @@ services: command: > sh -c "ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone && + mkdir /coredump && + echo 'kernel.core_pattern=/coredump/core_%e_%p' >> /etc/sysctl.conf && + sysctl -p && exec my-main-application" + extra_hosts: + - "tdnode1:172.27.0.7" + - "tdnode3:172.27.0.9" + - "tdnode4:172.27.0.10" + - "tdnode5:172.27.0.11" volumes: # bind data directory - type: bind @@ -72,52 +94,19 @@ services: - type: bind source: /data/node2/cfg target: /etc/taos + # bind core dump path + - type: bind + source: /data/node2/core + target: /coredump - type: bind source: /data target: /root + hostname: tdnode2 networks: taos_update_net: ipv4_address: 172.27.0.8 command: taosd - td2.0-node3: - build: - context: . - args: - - PACKAGE=${PACKAGE} - - EXTRACTDIR=${DIR} - image: 'tdengine:${VERSION}' - container_name: 'td2.0-node3' - cap_add: - - ALL - stdin_open: true - tty: true - environment: - TZ: "Asia/Shanghai" - command: > - sh -c "ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && - echo $TZ > /etc/timezone && - exec my-main-application" - volumes: - # bind data directory - - type: bind - source: /data/node3/data - target: /var/lib/taos - # bind log directory - - type: bind - source: /data/node3/log - target: /var/log/taos - # bind configuration - - type: bind - source: /data/node3/cfg - target: /etc/taos - - type: bind - source: /data - target: /root - networks: - taos_update_net: - ipv4_address: 172.27.0.9 - command: taosd networks: taos_update_net: diff --git a/tests/pytest/cluster/clusterEnvSetup/insert.json b/tests/pytest/cluster/clusterEnvSetup/insert.json new file mode 100644 index 0000000000000000000000000000000000000000..56a64b7b8561877cb26b4ef2336ab8b98f26c02c --- /dev/null +++ b/tests/pytest/cluster/clusterEnvSetup/insert.json @@ -0,0 +1,55 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "thread_count_create_tbl": 1, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "databases": [{ + "dbinfo": { + "name": "db", + "drop": "no", + "replica": 1, + "days": 2, + "cache": 16, + "blocks": 8, + "precision": "ms", + "keep": 365, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "quorum":1, + "fsync":3000, + "update": 0 + }, + "super_tables": [{ + "name": "stb", + "child_table_exists":"no", + "childtable_count": 1, + "childtable_prefix": "stb_", + "auto_create_table": "no", + "data_source": "rand", + "insert_mode": "taosc", + "insert_rate": 0, + "insert_rows": 100000, + "multi_thread_write_one_tbl": "no", + "number_of_tbl_in_one_sql": 1, + "rows_per_tbl": 100, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 10, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":10}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }] + }] +} diff --git a/tests/pytest/cluster/clusterEnvSetup/node3.yml b/tests/pytest/cluster/clusterEnvSetup/node3.yml new file mode 100644 index 0000000000000000000000000000000000000000..4f4f3a6f991f0ffff51265fee4c5a3b8941b5d85 --- /dev/null +++ b/tests/pytest/cluster/clusterEnvSetup/node3.yml @@ -0,0 +1,54 @@ +version: '3.7' + +services: + td2.0-node3: + build: + context: . + args: + - PACKAGE=${PACKAGE} + - EXTRACTDIR=${DIR} + image: 'tdengine:${VERSION}' + container_name: 'tdnode3' + cap_add: + - ALL + stdin_open: true + tty: true + environment: + TZ: "Asia/Shanghai" + command: > + sh -c "ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && + echo $TZ > /etc/timezone && + mkdir /coredump && + echo 'kernel.core_pattern=/coredump/core_%e_%p' >> /etc/sysctl.conf && + sysctl -p && + exec my-main-application" + extra_hosts: + - "tdnode1:172.27.0.7" + - "tdnode2:172.27.0.8" + - "tdnode4:172.27.0.10" + - "tdnode5:172.27.0.11" + volumes: + # bind data directory + - type: bind + source: /data/node3/data + target: /var/lib/taos + # bind log directory + - type: bind + source: /data/node3/log + target: /var/log/taos + # bind configuration + - type: bind + source: /data/node3/cfg + target: /etc/taos + # bind core dump path + - type: bind + source: /data/node3/core + target: /coredump + - type: bind + source: /data + target: /root + hostname: tdnode3 + networks: + taos_update_net: + ipv4_address: 172.27.0.9 + command: taosd \ No newline at end of file diff --git a/tests/pytest/cluster/clusterEnvSetup/node4.yml b/tests/pytest/cluster/clusterEnvSetup/node4.yml index 542dc4cac1626ebc4387e6138067a5d91d64434d..c82a174cb883b14c885de7c5e8f19d98263b22b7 100644 --- a/tests/pytest/cluster/clusterEnvSetup/node4.yml +++ b/tests/pytest/cluster/clusterEnvSetup/node4.yml @@ -7,8 +7,8 @@ services: args: - PACKAGE=${PACKAGE} - EXTRACTDIR=${DIR} - image: 'tdengine:2.0.13.1' - container_name: 'td2.0-node4' + image: 'tdengine:${VERSION}' + container_name: 'tdnode4' cap_add: - ALL stdin_open: true @@ -18,7 +18,15 @@ services: command: > sh -c "ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone && + mkdir /coredump && + echo 'kernel.core_pattern=/coredump/core_%e_%p' >> /etc/sysctl.conf && + sysctl -p && exec my-main-application" + extra_hosts: + - "tdnode2:172.27.0.8" + - "tdnode3:172.27.0.9" + - "tdnode4:172.27.0.10" + - "tdnode5:172.27.0.11" volumes: # bind data directory - type: bind @@ -32,9 +40,14 @@ services: - type: bind source: /data/node4/cfg target: /etc/taos + # bind core dump path + - type: bind + source: /data/node4/core + target: /coredump - type: bind source: /data - target: /root + target: /root + hostname: tdnode4 networks: taos_update_net: ipv4_address: 172.27.0.10 diff --git a/tests/pytest/cluster/clusterEnvSetup/node5.yml b/tests/pytest/cluster/clusterEnvSetup/node5.yml index 832cc65e0888e5fc51feae7f13a7cd24b813878b..2e37e47512430ac99244f6b2f0e2d309a2145edc 100644 --- a/tests/pytest/cluster/clusterEnvSetup/node5.yml +++ b/tests/pytest/cluster/clusterEnvSetup/node5.yml @@ -7,8 +7,8 @@ services: args: - PACKAGE=${PACKAGE} - EXTRACTDIR=${DIR} - image: 'tdengine:2.0.13.1' - container_name: 'td2.0-node5' + image: 'tdengine:${VERSION}' + container_name: 'tdnode5' cap_add: - ALL stdin_open: true @@ -18,7 +18,15 @@ services: command: > sh -c "ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone && + mkdir /coredump && + echo 'kernel.core_pattern=/coredump/core_%e_%p' >> /etc/sysctl.conf && + sysctl -p && exec my-main-application" + extra_hosts: + - "tdnode2:172.27.0.8" + - "tdnode3:172.27.0.9" + - "tdnode4:172.27.0.10" + - "tdnode5:172.27.0.11" volumes: # bind data directory - type: bind @@ -32,9 +40,14 @@ services: - type: bind source: /data/node5/cfg target: /etc/taos + # bind core dump path + - type: bind + source: /data/node5/core + target: /coredump - type: bind source: /data - target: /root + target: /root + hostname: tdnode5 networks: taos_update_net: ipv4_address: 172.27.0.11 diff --git a/tests/pytest/cluster/clusterEnvSetup/taosdemoWrapper.py b/tests/pytest/cluster/clusterEnvSetup/taosdemoWrapper.py new file mode 100644 index 0000000000000000000000000000000000000000..457dd4ee5aa5919951adbcea834d34cd367d3080 --- /dev/null +++ b/tests/pytest/cluster/clusterEnvSetup/taosdemoWrapper.py @@ -0,0 +1,142 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import os +import random +import argparse + +class taosdemoWrapper: + + def __init__(self, host, metadata, database, tables, threads, configDir, replica, + columnType, columnsPerTable, rowsPerTable, disorderRatio, disorderRange, charTypeLen): + self.host = host + self.metadata = metadata + self.database = database + self.tables = tables + self.threads = threads + self.configDir = configDir + self.replica = replica + self.columnType = columnType + self.columnsPerTable = columnsPerTable + self.rowsPerTable = rowsPerTable + self.disorderRatio = disorderRatio + self.disorderRange = disorderRange + self.charTypeLen = charTypeLen + + def run(self): + if self.metadata is None: + os.system("taosdemo -h %s -d %s -t %d -T %d -c %s -a %d -b %s -n %d -t %d -O %d -R %d -w %d -x -y" + % (self.host, self.database, self.tables, self.threads, self.configDir, self.replica, self.columnType, + self.rowsPerTable, self.disorderRatio, self.disorderRange, self.charTypeLen)) + else: + os.system("taosdemo -f %s" % self.metadata) + + +parser = argparse.ArgumentParser() +parser.add_argument( + '-H', + '--host-name', + action='store', + default='tdnode1', + type=str, + help='host name to be connected (default: tdnode1)') +parser.add_argument( + '-f', + '--metadata', + action='store', + default=None, + type=str, + help='The meta data to execution procedure, if use -f, all other options invalid, Default is NULL') +parser.add_argument( + '-d', + '--db-name', + action='store', + default='test', + type=str, + help='Database name to be created (default: test)') +parser.add_argument( + '-t', + '--num-of-tables', + action='store', + default=10, + type=int, + help='Number of tables (default: 10000)') +parser.add_argument( + '-T', + '--num-of-threads', + action='store', + default=10, + type=int, + help='Number of rest threads (default: 10)') +parser.add_argument( + '-c', + '--config-dir', + action='store', + default='/etc/taos/', + type=str, + help='Configuration directory. (Default is /etc/taos/)') +parser.add_argument( + '-a', + '--replica', + action='store', + default=100, + type=int, + help='Set the replica parameters of the database (default: 1, min: 1, max: 3)') +parser.add_argument( + '-b', + '--column-type', + action='store', + default='int', + type=str, + help='the data_type of columns (default: TINYINT,SMALLINT,INT,BIGINT,FLOAT,DOUBLE,BINARY,NCHAR,BOOL,TIMESTAMP)') +parser.add_argument( + '-l', + '--num-of-cols', + action='store', + default=10, + type=int, + help='The number of columns per record (default: 10)') +parser.add_argument( + '-n', + '--num-of-rows', + action='store', + default=1000, + type=int, + help='Number of subtales per stable (default: 1000)') +parser.add_argument( + '-O', + '--disorder-ratio', + action='store', + default=0, + type=int, + help=' (0: in order, > 0: disorder ratio, default: 0)') +parser.add_argument( + '-R', + '--disorder-range', + action='store', + default=0, + type=int, + help='Out of order datas range, ms (default: 1000)') +parser.add_argument( + '-w', + '--char-type-length', + action='store', + default=16, + type=int, + help='Out of order datas range, ms (default: 16)') + +args = parser.parse_args() +taosdemo = taosdemoWrapper(args.host_name, args.metadata, args.db_name, args.num_of_tables, + args.num_of_threads, args.config_dir, args.replica, args.column_type, args.num_of_cols, + args.num_of_rows, args.disorder_ratio, args.disorder_range, args.char_type_length) +taosdemo.run() diff --git a/tests/pytest/concurrent_inquiry.py b/tests/pytest/concurrent_inquiry.py index e832c9a74e1c8b6c42a882a59931bff6d481f445..d1f180373bbe0585bd6e01b224e64359e0449e77 100644 --- a/tests/pytest/concurrent_inquiry.py +++ b/tests/pytest/concurrent_inquiry.py @@ -40,7 +40,7 @@ class ConcurrentInquiry: # stableNum = 2,subtableNum = 1000,insertRows = 100): def __init__(self,ts,host,user,password,dbname, stb_prefix,subtb_prefix,n_Therads,r_Therads,probabilities,loop, - stableNum ,subtableNum ,insertRows ,mix_table): + stableNum ,subtableNum ,insertRows ,mix_table, replay): self.n_numOfTherads = n_Therads self.r_numOfTherads = r_Therads self.ts=ts @@ -65,6 +65,7 @@ class ConcurrentInquiry: self.mix_table = mix_table self.max_ts = datetime.datetime.now() self.min_ts = datetime.datetime.now() - datetime.timedelta(days=5) + self.replay = replay def SetThreadsNum(self,num): self.numOfTherads=num @@ -412,7 +413,7 @@ class ConcurrentInquiry: ) cl = conn.cursor() cl.execute("use %s;" % self.dbname) - + fo = open('bak_sql_n_%d'%threadID,'w+') print("Thread %d: starting" % threadID) loop = self.loop while loop: @@ -423,6 +424,7 @@ class ConcurrentInquiry: else: sql=self.gen_query_join() print("sql is ",sql) + fo.write(sql+'\n') start = time.time() cl.execute(sql) cl.fetchall() @@ -438,13 +440,49 @@ class ConcurrentInquiry: exit(-1) loop -= 1 if loop == 0: break - + fo.close() cl.close() conn.close() print("Thread %d: finishing" % threadID) + + def query_thread_nr(self,threadID): #使用原生python接口进行重放 + host = self.host + user = self.user + password = self.password + conn = taos.connect( + host, + user, + password, + ) + cl = conn.cursor() + cl.execute("use %s;" % self.dbname) + replay_sql = [] + with open('bak_sql_n_%d'%threadID,'r') as f: + replay_sql = f.readlines() + print("Replay Thread %d: starting" % threadID) + for sql in replay_sql: + try: + print("sql is ",sql) + start = time.time() + cl.execute(sql) + cl.fetchall() + end = time.time() + print("time cost :",end-start) + except Exception as e: + print('-'*40) + print( + "Failure thread%d, sql: %s \nexception: %s" % + (threadID, str(sql),str(e))) + err_uec='Unable to establish connection' + if err_uec in str(e) and loop >0: + exit(-1) + cl.close() + conn.close() + print("Replay Thread %d: finishing" % threadID) def query_thread_r(self,threadID): #使用rest接口查询 print("Thread %d: starting" % threadID) + fo = open('bak_sql_r_%d'%threadID,'w+') loop = self.loop while loop: try: @@ -453,6 +491,7 @@ class ConcurrentInquiry: else: sql=self.gen_query_join() print("sql is ",sql) + fo.write(sql+'\n') start = time.time() self.rest_query(sql) end = time.time() @@ -467,20 +506,53 @@ class ConcurrentInquiry: exit(-1) loop -= 1 if loop == 0: break - - print("Thread %d: finishing" % threadID) + fo.close() + print("Thread %d: finishing" % threadID) + + def query_thread_rr(self,threadID): #使用rest接口重放 + print("Replay Thread %d: starting" % threadID) + replay_sql = [] + with open('bak_sql_r_%d'%threadID,'r') as f: + replay_sql = f.readlines() + + for sql in replay_sql: + try: + print("sql is ",sql) + start = time.time() + self.rest_query(sql) + end = time.time() + print("time cost :",end-start) + except Exception as e: + print('-'*40) + print( + "Failure thread%d, sql: %s \nexception: %s" % + (threadID, str(sql),str(e))) + err_uec='Unable to establish connection' + if err_uec in str(e) and loop >0: + exit(-1) + print("Replay Thread %d: finishing" % threadID) def run(self): print(self.n_numOfTherads,self.r_numOfTherads) threads = [] - for i in range(self.n_numOfTherads): - thread = threading.Thread(target=self.query_thread_n, args=(i,)) - threads.append(thread) - thread.start() - for i in range(self.r_numOfTherads): - thread = threading.Thread(target=self.query_thread_r, args=(i,)) - threads.append(thread) - thread.start() + if self.replay: #whether replay + for i in range(self.n_numOfTherads): + thread = threading.Thread(target=self.query_thread_nr, args=(i,)) + threads.append(thread) + thread.start() + for i in range(self.r_numOfTherads): + thread = threading.Thread(target=self.query_thread_rr, args=(i,)) + threads.append(thread) + thread.start() + else: + for i in range(self.n_numOfTherads): + thread = threading.Thread(target=self.query_thread_n, args=(i,)) + threads.append(thread) + thread.start() + for i in range(self.r_numOfTherads): + thread = threading.Thread(target=self.query_thread_r, args=(i,)) + threads.append(thread) + thread.start() parser = argparse.ArgumentParser() parser.add_argument( @@ -595,13 +667,20 @@ parser.add_argument( default=0, type=int, help='0:stable & substable ,1:subtable ,2:stable (default: 0)') +parser.add_argument( + '-R', + '--replay', + action='store', + default=0, + type=int, + help='0:not replay ,1:replay (default: 0)') args = parser.parse_args() q = ConcurrentInquiry( args.ts,args.host_name,args.user,args.password,args.db_name, args.stb_name_prefix,args.subtb_name_prefix,args.number_of_native_threads,args.number_of_rest_threads, args.probabilities,args.loop_per_thread,args.number_of_stables,args.number_of_tables ,args.number_of_records, - args.mix_stable_subtable ) + args.mix_stable_subtable, args.replay ) if args.create_table: q.gen_data() diff --git a/tests/pytest/concurrent_inquiry.sh b/tests/pytest/concurrent_inquiry.sh new file mode 100755 index 0000000000000000000000000000000000000000..f426fbbcec3070789209eb787dba61d95571f0e5 --- /dev/null +++ b/tests/pytest/concurrent_inquiry.sh @@ -0,0 +1,82 @@ +#!/bin/bash + +# This is the script for us to try to cause the TDengine server or client to crash +# +# PREPARATION +# +# 1. Build an compile the TDengine source code that comes with this script, in the same directory tree +# 2. Please follow the direction in our README.md, and build TDengine in the build/ directory +# 3. Adjust the configuration file if needed under build/test/cfg/taos.cfg +# 4. Run the TDengine server instance: cd build; ./build/bin/taosd -c test/cfg +# 5. Make sure you have a working Python3 environment: run /usr/bin/python3 --version, and you should get 3.6 or above +# 6. Make sure you have the proper Python packages: # sudo apt install python3-setuptools python3-pip python3-distutils +# +# RUNNING THIS SCRIPT +# +# This script assumes the source code directory is intact, and that the binaries has been built in the +# build/ directory, as such, will will load the Python libraries in the directory tree, and also load +# the TDengine client shared library (so) file, in the build/directory, as evidenced in the env +# variables below. +# +# Running the script is simple, no parameter is needed (for now, but will change in the future). +# +# Happy Crashing... + + +# Due to the heavy path name assumptions/usage, let us require that the user be in the current directory +EXEC_DIR=`dirname "$0"` +if [[ $EXEC_DIR != "." ]] +then + echo "ERROR: Please execute `basename "$0"` in its own directory (for now anyway, pardon the dust)" + exit -1 +fi + +CURR_DIR=`pwd` +IN_TDINTERNAL="community" +if [[ "$CURR_DIR" == *"$IN_TDINTERNAL"* ]]; then + TAOS_DIR=$CURR_DIR/../../.. + TAOSD_DIR=`find $TAOS_DIR -name "taosd"|grep bin|head -n1` + LIB_DIR=`echo $TAOSD_DIR|rev|cut -d '/' -f 3,4,5,6,7|rev`/lib +else + TAOS_DIR=$CURR_DIR/../.. + TAOSD_DIR=`find $TAOS_DIR -name "taosd"|grep bin|head -n1` + LIB_DIR=`echo $TAOSD_DIR|rev|cut -d '/' -f 3,4,5,6|rev`/lib +fi + +# Now getting ready to execute Python +# The following is the default of our standard dev env (Ubuntu 20.04), modify/adjust at your own risk +PYTHON_EXEC=python3.8 + +# First we need to set up a path for Python to find our own TAOS modules, so that "import" can work. +export PYTHONPATH=$(pwd)/../../src/connector/python/linux/python3:$(pwd) + +# Then let us set up the library path so that our compiled SO file can be loaded by Python +export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$LIB_DIR + +# Now we are all let, and let's see if we can find a crash. Note we pass all params +CONCURRENT_INQUIRY=concurrent_inquiry.py +if [[ $1 == '--valgrind' ]]; then + shift + export PYTHONMALLOC=malloc + VALGRIND_OUT=valgrind.out + VALGRIND_ERR=valgrind.err + # How to generate valgrind suppression file: https://stackoverflow.com/questions/17159578/generating-suppressions-for-memory-leaks + # valgrind --leak-check=full --gen-suppressions=all --log-fd=9 python3.8 ./concurrent_inquiry.py $@ 9>>memcheck.log + echo Executing under VALGRIND, with STDOUT/ERR going to $VALGRIND_OUT and $VALGRIND_ERR, please watch them from a different terminal. + valgrind \ + --leak-check=yes \ + --suppressions=crash_gen/valgrind_taos.supp \ + $PYTHON_EXEC \ + $CONCURRENT_INQUIRY $@ > $VALGRIND_OUT 2> $VALGRIND_ERR +elif [[ $1 == '--helgrind' ]]; then + shift + HELGRIND_OUT=helgrind.out + HELGRIND_ERR=helgrind.err + valgrind \ + --tool=helgrind \ + $PYTHON_EXEC \ + $CONCURRENT_INQUIRY $@ > $HELGRIND_OUT 2> $HELGRIND_ERR +else + $PYTHON_EXEC $CONCURRENT_INQUIRY $@ +fi + diff --git a/tests/pytest/crash_gen/crash_gen_main.py b/tests/pytest/crash_gen/crash_gen_main.py index c6b857b097671b8faf650f4659c2b2990ef67d97..309c0df9108f340bf96d73529ccf8bb49c1c9692 100755 --- a/tests/pytest/crash_gen/crash_gen_main.py +++ b/tests/pytest/crash_gen/crash_gen_main.py @@ -354,10 +354,11 @@ class ThreadCoordinator: # end, and maybe signal them to stop if isinstance(err, CrashGenError): # our own transition failure Logging.info("State transition error") + # TODO: saw an error here once, let's print out stack info for err? traceback.print_stack() transitionFailed = True self._te = None # Not running any more - self._execStats.registerFailure("State transition error") + self._execStats.registerFailure("State transition error: {}".format(err)) else: raise # return transitionFailed # Why did we have this??!! @@ -882,8 +883,12 @@ class StateMechine: self._stateWeights = [1, 2, 10, 40] def init(self, dbc: DbConn): # late initailization, don't save the dbConn - self._curState = self._findCurrentState(dbc) # starting state - Logging.debug("Found Starting State: {}".format(self._curState)) + try: + self._curState = self._findCurrentState(dbc) # starting state + except taos.error.ProgrammingError as err: + Logging.error("Failed to initialized state machine, cannot find current state: {}".format(err)) + traceback.print_stack() + raise # re-throw # TODO: seems no lnoger used, remove? def getCurrentState(self): @@ -951,6 +956,8 @@ class StateMechine: # We transition the system to a new state by examining the current state itself def transition(self, tasks, dbc: DbConn): + global gSvcMgr + if (len(tasks) == 0): # before 1st step, or otherwise empty Logging.debug("[STT] Starting State: {}".format(self._curState)) return # do nothing @@ -1276,6 +1283,7 @@ class Task(): 0x510, # vnode not in ready state 0x14, # db not ready, errno changed 0x600, # Invalid table ID, why? + 0x218, # Table does not exist 1000 # REST catch-all error ]: return True # These are the ALWAYS-ACCEPTABLE ones @@ -2369,7 +2377,7 @@ class MainExec: '-n', '--dynamic-db-table-names', action='store_true', - help='Use non-fixed names for dbs/tables, useful for multi-instance executions (default: false)') + help='Use non-fixed names for dbs/tables, for -b, useful for multi-instance executions (default: false)') parser.add_argument( '-o', '--num-dnodes', diff --git a/tests/pytest/crash_gen/db.py b/tests/pytest/crash_gen/db.py index dc072d7abce68debd69cb162a4d784fcc5b68c4e..e38692dbe1e5c33ffe162015e3e60630fd51fa38 100644 --- a/tests/pytest/crash_gen/db.py +++ b/tests/pytest/crash_gen/db.py @@ -15,6 +15,7 @@ from util.log import * from .misc import Logging, CrashGenError, Helper, Dice import os import datetime +import traceback # from .service_manager import TdeInstance class DbConn: @@ -349,6 +350,7 @@ class DbConnNative(DbConn): def execute(self, sql): if (not self.isOpen): + traceback.print_stack() raise CrashGenError( "Cannot exec SQL unless db connection is open", CrashGenError.DB_CONNECTION_NOT_OPEN) Logging.debug("[SQL] Executing SQL: {}".format(sql)) @@ -361,6 +363,7 @@ class DbConnNative(DbConn): def query(self, sql): # return rows affected if (not self.isOpen): + traceback.print_stack() raise CrashGenError( "Cannot query database until connection is open, restarting?", CrashGenError.DB_CONNECTION_NOT_OPEN) Logging.debug("[SQL] Executing SQL: {}".format(sql)) diff --git a/tests/pytest/fulltest.sh b/tests/pytest/fulltest.sh index aee8f7502c354628d9cb0ca9df55ab4a7ad17c6f..6b710732caf6368f680cb0cd784add47426db1c6 100755 --- a/tests/pytest/fulltest.sh +++ b/tests/pytest/fulltest.sh @@ -66,7 +66,7 @@ python3 ./test.py -f tag_lite/int.py python3 ./test.py -f tag_lite/set.py python3 ./test.py -f tag_lite/smallint.py python3 ./test.py -f tag_lite/tinyint.py - +python3 ./test.py -f tag_lite/alter_tag.py #python3 ./test.py -f dbmgmt/database-name-boundary.py python3 ./test.py -f import_merge/importBlock1HO.py @@ -193,6 +193,7 @@ python3 ./test.py -f stream/table_n.py #alter table python3 ./test.py -f alter/alter_table_crash.py python3 ./test.py -f alter/alter_table.py +python3 ./test.py -f alter/alter_debugFlag.py # client python3 ./test.py -f client/client.py diff --git a/tests/pytest/handle_crash_gen_val_log.sh b/tests/pytest/handle_crash_gen_val_log.sh index 528316700d184171641f7f686a3c12102b6c1feb..502c859dad85a8fcceee7e0efaaa46ab9bded02a 100755 --- a/tests/pytest/handle_crash_gen_val_log.sh +++ b/tests/pytest/handle_crash_gen_val_log.sh @@ -16,7 +16,7 @@ TOP_DIR=`pwd` TAOSD_DIR=`find . -name "taosd"|grep -v community|head -n1` nohup $TAOSD_DIR >/dev/null & cd - -./crash_gen.sh --valgrind -p -t 10 -s 350 -b 4 +./crash_gen.sh --valgrind -p -t 10 -s 500 -b 4 pidof taosd|xargs kill -9 grep 'start to execute\|ERROR SUMMARY' valgrind.err|grep -v 'grep'|uniq|tee crash_gen_mem_err.log @@ -36,11 +36,13 @@ for defiMemError in `grep 'definitely lost:' crash_gen-definitely-lost-out.log | do defiMemError=(${defiMemError//,/}) if [ -n "$defiMemError" ]; then - if [ "$defiMemError" -gt 3 -a "$defiMemError" -lt 1013 ]; then - echo -e "${RED} ## Memory errors number valgrind reports \ - Definitely lost is $defiMemError. More than our threshold! ## ${NC}" + if [ "$defiMemError" -gt 0 -a "$defiMemError" -lt 1013 ]; then + cat valgrind.err + echo -e "${RED} ## Memory errors number valgrind reports \ + Definitely lost is $defiMemError. More than our threshold! ## ${NC}" exit 8 elif [ "$defiMemError" -gt 1013 ];then #add for azure + cat valgrind.err echo -e "${RED} ## Memory errors number valgrind reports \ Definitely lost is $defiMemError. More than our threshold! ## ${NC}" exit 8 diff --git a/tests/pytest/insert/basic_unsigned.py b/tests/pytest/insert/basic_unsigned.py new file mode 100644 index 0000000000000000000000000000000000000000..993e58ce877284f73c136bea864c59f5166916d9 --- /dev/null +++ b/tests/pytest/insert/basic_unsigned.py @@ -0,0 +1,56 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +from util.log import * +from util.cases import * +from util.sql import * + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + def run(self): + tdSql.prepare() + + ret = tdSql.execute( + 'create table tb (ts timestamp, speed tinyint unsigned)') + + insertRows = 10 + tdLog.info("insert %d rows" % (insertRows)) + for i in range(0, insertRows): + ret = tdSql.execute( + 'insert into tb values (now + %dm, %d)' % + (i, i)) + + tdLog.info("insert earlier data") + tdSql.execute('insert into tb values (now - 5m , 10)') + tdSql.execute('insert into tb values (now - 6m , 10)') + tdSql.execute('insert into tb values (now - 7m , NULL)') + tdSql.execute('insert into tb values (now - 8m , 254)') + + tdSql.error('insert into tb values (now - 9m, -1)') + tdSql.error('insert into tb values (now - 9m, 255)') + + tdSql.query("select * from tb") + tdSql.checkRows(insertRows + 4) + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/pytest_1.sh b/tests/pytest/pytest_1.sh index 6905f0c61eb077cc45e4c9a6f40a7650931fbf16..e6638cbb171ab3d8dc38c13f5767ff2f09c522bb 100755 --- a/tests/pytest/pytest_1.sh +++ b/tests/pytest/pytest_1.sh @@ -16,10 +16,10 @@ python3 ./test.py -f insert/nchar.py python3 ./test.py -f insert/nchar-unicode.py python3 ./test.py -f insert/multi.py python3 ./test.py -f insert/randomNullCommit.py -#python3 insert/retentionpolicy.py +python3 insert/retentionpolicy.py python3 ./test.py -f insert/alterTableAndInsert.py python3 ./test.py -f insert/insertIntoTwoTables.py -#python3 ./test.py -f insert/before_1970.py +python3 ./test.py -f insert/before_1970.py python3 bug2265.py #table diff --git a/tests/pytest/pytest_2.sh b/tests/pytest/pytest_2.sh index 4ec517a0bf1c5eff8ad670cf28ab63d5ce818460..d152ed85fb9e138a6b9d62574bcc8a5119973448 100755 --- a/tests/pytest/pytest_2.sh +++ b/tests/pytest/pytest_2.sh @@ -7,15 +7,10 @@ python3 ./test.py -f update/append_commit_data.py python3 ./test.py -f update/append_commit_last-0.py python3 ./test.py -f update/append_commit_last.py python3 ./test.py -f update/merge_commit_data.py -python3 ./test.py -f update/merge_commit_data-0.py + python3 ./test.py -f update/merge_commit_data2.py python3 ./test.py -f update/merge_commit_data2_update0.py python3 ./test.py -f update/merge_commit_last-0.py python3 ./test.py -f update/merge_commit_last.py python3 ./test.py -f update/bug_td2279.py -# wal -python3 ./test.py -f wal/addOldWalTest.py - -# function -python3 ./test.py -f functions/all_null_value.py \ No newline at end of file diff --git a/tests/pytest/pytest_3.sh b/tests/pytest/pytest_3.sh index 8b2ac721e3e41430840dd3c70c2ad193e133c4f4..b1e2539fa935bcb9ce69482325e1e4c6df0503f2 100755 --- a/tests/pytest/pytest_3.sh +++ b/tests/pytest/pytest_3.sh @@ -70,26 +70,7 @@ python3 testCompress.py python3 testNoCompress.py python3 testMinTablesPerVnode.py -# functions -python3 ./test.py -f functions/function_avg.py -r 1 -python3 ./test.py -f functions/function_bottom.py -r 1 -python3 ./test.py -f functions/function_count.py -r 1 -python3 ./test.py -f functions/function_diff.py -r 1 -python3 ./test.py -f functions/function_first.py -r 1 -python3 ./test.py -f functions/function_last.py -r 1 -python3 ./test.py -f functions/function_last_row.py -r 1 -python3 ./test.py -f functions/function_leastsquares.py -r 1 -python3 ./test.py -f functions/function_max.py -r 1 -python3 ./test.py -f functions/function_min.py -r 1 -python3 ./test.py -f functions/function_operations.py -r 1 -python3 ./test.py -f functions/function_percentile.py -r 1 -python3 ./test.py -f functions/function_spread.py -r 1 -python3 ./test.py -f functions/function_stddev.py -r 1 -python3 ./test.py -f functions/function_sum.py -r 1 -python3 ./test.py -f functions/function_top.py -r 1 -python3 ./test.py -f functions/function_twa.py -r 1 -python3 ./test.py -f functions/function_twa_test2.py -python3 ./test.py -f functions/function_stddev_td2555.py + python3 queryCount.py python3 ./test.py -f query/queryGroupbyWithInterval.py python3 client/twoClients.py diff --git a/tests/pytest/pytest_4.sh b/tests/pytest/pytest_4.sh new file mode 100755 index 0000000000000000000000000000000000000000..a68e6a1fefeb09af5e0c7f934de14f0781ae540c --- /dev/null +++ b/tests/pytest/pytest_4.sh @@ -0,0 +1,33 @@ +python3 ./test.py -f update/merge_commit_data-0.py +# wal +python3 ./test.py -f wal/addOldWalTest.py + +# function +python3 ./test.py -f functions/all_null_value.py +# functions +python3 ./test.py -f functions/function_avg.py -r 1 +python3 ./test.py -f functions/function_bottom.py -r 1 +python3 ./test.py -f functions/function_count.py -r 1 +python3 ./test.py -f functions/function_diff.py -r 1 +python3 ./test.py -f functions/function_first.py -r 1 +python3 ./test.py -f functions/function_last.py -r 1 +python3 ./test.py -f functions/function_last_row.py -r 1 +python3 ./test.py -f functions/function_leastsquares.py -r 1 +python3 ./test.py -f functions/function_max.py -r 1 +python3 ./test.py -f functions/function_min.py -r 1 +python3 ./test.py -f functions/function_operations.py -r 1 +python3 ./test.py -f functions/function_percentile.py -r 1 +python3 ./test.py -f functions/function_spread.py -r 1 +python3 ./test.py -f functions/function_stddev.py -r 1 +python3 ./test.py -f functions/function_sum.py -r 1 +python3 ./test.py -f functions/function_top.py -r 1 +python3 ./test.py -f functions/function_twa.py -r 1 +python3 ./test.py -f functions/function_twa_test2.py +python3 ./test.py -f functions/function_stddev_td2555.py +python3 ./test.py -f insert/metadataUpdate.py +python3 ./test.py -f tools/taosdemoTest2.py +python3 ./test.py -f query/last_cache.py +python3 ./test.py -f query/last_row_cache.py +python3 ./test.py -f account/account_create.py +python3 ./test.py -f alter/alter_table.py +python3 ./test.py -f query/queryGroupbySort.py \ No newline at end of file diff --git a/tests/pytest/tag_lite/alter_tag.py b/tests/pytest/tag_lite/alter_tag.py new file mode 100644 index 0000000000000000000000000000000000000000..9e5abb6c134840ecb4ab52c7d3a6ab623885e12b --- /dev/null +++ b/tests/pytest/tag_lite/alter_tag.py @@ -0,0 +1,68 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import taos +from util.log import tdLog +from util.cases import tdCases +from util.sql import tdSql +from util.dnodes import tdDnodes + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + self.ts = 1538548685000 + + def run(self): + tdSql.prepare() + + print("==============step1") + tdSql.execute( + "CREATE TABLE IF NOT EXISTS ampere (ts TIMESTAMP(8),ampere DOUBLE(8)) TAGS (device_name BINARY(50),build_id BINARY(50),project_id BINARY(50),alias BINARY(50))") + tdSql.execute("insert into d1001 using ampere tags('test', '2', '2', '2') VALUES (now, 123)") + tdSql.execute("ALTER TABLE ampere ADD TAG variable_id BINARY(50)") + + print("==============step2") + + tdSql.execute("insert into d1002 using ampere tags('test', '2', '2', '2', 'test') VALUES (now, 124)") + + tdSql.query("select * from ampere") + tdSql.checkRows(2) + tdSql.checkData(0, 6, None) + tdSql.checkData(1, 6, 'test') + + # Test case for: https://jira.taosdata.com:18080/browse/TD-2423 + tdSql.execute("create table stb(ts timestamp, col1 int, col2 nchar(20)) tags(tg1 int, tg2 binary(20), tg3 nchar(25))") + tdSql.execute("insert into tb1 using stb(tg1, tg3) tags(1, 'test1') values(now, 1, 'test1')") + tdSql.query("select *, tg1, tg2, tg3 from tb1") + tdSql.checkRows(1) + tdSql.checkData(0, 3, 1) + tdSql.checkData(0, 4, None) + tdSql.checkData(0, 5, 'test1') + + tdSql.execute("create table tb2 using stb(tg3, tg2) tags('test3', 'test2')") + tdSql.query("select tg1, tg2, tg3 from tb2") + tdSql.checkRows(1) + tdSql.checkData(0, 0, None) + tdSql.checkData(0, 1, 'test2') + tdSql.checkData(0, 2, 'test3') + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/tools/lowaTest.py b/tests/pytest/tools/lowaTest.py index 2b65dcf3eff1ed9ed7275fd774807cfa0318ec81..ad8b5925bd99b9c5918421eb277cea6e5ed100a7 100644 --- a/tests/pytest/tools/lowaTest.py +++ b/tests/pytest/tools/lowaTest.py @@ -51,7 +51,7 @@ class TDTestCase: else: tdLog.info("taosd found in %s" % buildPath) binPath = buildPath+ "/build/bin/" - os.system("yes | %staosdemox -f tools/insert.json" % binPath) + os.system("yes | %staosdemo -f tools/insert.json" % binPath) tdSql.execute("use db01") tdSql.query("select count(*) from stb01") diff --git a/tests/pytest/tools/taosdemoTest.py b/tests/pytest/tools/taosdemoTest.py index 2a4a552c8fb87f9c5a3169afcad71793462b924f..1cb2f71d8fc98e703795a839eb3441cf6e044d5d 100644 --- a/tests/pytest/tools/taosdemoTest.py +++ b/tests/pytest/tools/taosdemoTest.py @@ -23,9 +23,10 @@ class TDTestCase: def init(self, conn, logSql): tdLog.debug("start to execute %s" % __file__) tdSql.init(conn.cursor(), logSql) - + self.numberOfTables = 10000 self.numberOfRecords = 100 + def getBuildPath(self): selfPath = os.path.dirname(os.path.realpath(__file__)) @@ -38,9 +39,10 @@ class TDTestCase: if ("taosd" in files): rootRealPath = os.path.dirname(os.path.realpath(root)) if ("packaging" not in rootRealPath): - buildPath = root[:len(root)-len("/build/bin")] + buildPath = root[:len(root) - len("/build/bin")] break return buildPath + def run(self): tdSql.prepare() buildPath = self.getBuildPath() @@ -48,18 +50,21 @@ class TDTestCase: tdLog.exit("taosd not found!") else: tdLog.info("taosd found in %s" % buildPath) - binPath = buildPath+ "/build/bin/" - os.system("yes | %staosdemo -t %d -n %d -x" % (binPath,self.numberOfTables, self.numberOfRecords)) + binPath = buildPath + "/build/bin/" + os.system("%staosdemo -y -M -t %d -n %d -x" % + (binPath, self.numberOfTables, self.numberOfRecords)) tdSql.execute("use test") tdSql.query("select count(*) from meters") tdSql.checkData(0, 0, self.numberOfTables * self.numberOfRecords) - tdSql.query("select sum(f1) from test.meters interval(1h) sliding(30m)") + tdSql.query( + "select sum(col1) from test.meters interval(1h) sliding(30m)") tdSql.checkRows(2) - tdSql.query("select apercentile(f1, 1) from test.meters interval(10s)") - tdSql.checkRows(11) + tdSql.query( + "select apercentile(col1, 1) from test.meters interval(10s)") + tdSql.checkRows(1) tdSql.error("select loc, count(loc) from test.meters") diff --git a/tests/pytest/tools/taosdemoTest2.py b/tests/pytest/tools/taosdemoTest2.py index 1e492aa8fca30fa6a9081ed023162828c01ca3db..75a79d0585e766718151ca9b0e3e195c03732e16 100644 --- a/tests/pytest/tools/taosdemoTest2.py +++ b/tests/pytest/tools/taosdemoTest2.py @@ -31,11 +31,12 @@ class TDTestCase: def insertDataAndAlterTable(self, threadID): if(threadID == 0): - os.system("yes | taosdemo -t %d -n %d -x" % (self.numberOfTables, self.numberOfRecords)) + os.system("taosdemo -M -y -t %d -n %d -x" % + (self.numberOfTables, self.numberOfRecords)) if(threadID == 1): time.sleep(2) print("use test") - tdSql.execute("use test") + tdSql.execute("use test") # check if all the tables have heen created while True: tdSql.query("show tables") @@ -52,19 +53,19 @@ class TDTestCase: print("number of records: %d" % rows) if(rows > 0): break - time.sleep(1) - print("alter table test.meters add column f4 int") - tdSql.execute("alter table test.meters add column f4 int") - print("insert into test.t0 values (now, 1, 2, 3, 4)") - tdSql.execute("insert into test.t0 values (now, 1, 2, 3, 4)") + time.sleep(1) + print("alter table test.meters add column col10 int") + tdSql.execute("alter table test.meters add column col10 int") + print("insert into test.t0 values (now, 1, 2, 3, 4, 0.1, 0.01,'test', '测试', TRUE, 1610000000000, 0)") + tdSql.execute("insert into test.t0 values (now, 1, 2, 3, 4, 0.1, 0.01,'test', '测试', TRUE, 1610000000000, 0)") - def run(self): + def run(self): tdSql.prepare() t1 = threading.Thread(target=self.insertDataAndAlterTable, args=(0, )) t2 = threading.Thread(target=self.insertDataAndAlterTable, args=(1, )) - t1.start() + t1.start() t2.start() t1.join() t2.join() @@ -78,4 +79,4 @@ class TDTestCase: tdCases.addWindows(__file__, TDTestCase()) -tdCases.addLinux(__file__, TDTestCase()) \ No newline at end of file +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/script/general/parser/alter.sim b/tests/script/general/parser/alter.sim index 56a677cc73f35d7656daef6fd3062bbc453007ef..e604d2122e56b4b2d87ca6cd87597abcac02672c 100644 --- a/tests/script/general/parser/alter.sim +++ b/tests/script/general/parser/alter.sim @@ -133,7 +133,7 @@ sleep 100 # return -1 #endi #sql alter table tb1 drop column c3 -#sleep 2000 +#sleep 500 #sql insert into tb1 values (now, 2, 'taos') #sleep 30000 #sql select * from strm @@ -144,7 +144,7 @@ sleep 100 # return -1 #endi #sql alter table tb1 add column c3 int -#sleep 2000 +#sleep 500 #sql insert into tb1 values (now, 3, 'taos', 3); #sleep 100 #sql select * from strm diff --git a/tests/script/general/parser/auto_create_tb.sim b/tests/script/general/parser/auto_create_tb.sim index e19eb0c667d5e667e3a044c9045c0584ec41d4e2..926eb7547694860810e4018d814637c788787e54 100644 --- a/tests/script/general/parser/auto_create_tb.sim +++ b/tests/script/general/parser/auto_create_tb.sim @@ -208,7 +208,7 @@ endi print ================== restart server to commit data into disk system sh/exec.sh -n dnode1 -s stop -x SIGINT -sleep 2000 +sleep 500 system sh/exec.sh -n dnode1 -s start print ================== server restart completed sql connect diff --git a/tests/script/general/parser/between_and.sim b/tests/script/general/parser/between_and.sim new file mode 100644 index 0000000000000000000000000000000000000000..2e031c4917cf396044a399493ef0be2849baa830 --- /dev/null +++ b/tests/script/general/parser/between_and.sim @@ -0,0 +1,165 @@ +system sh/stop_dnodes.sh +system sh/deploy.sh -n dnode1 -i 1 +system sh/cfg.sh -n dnode1 -c walLevel -v 0 +system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 2 +system sh/exec.sh -n dnode1 -s start + +sleep 100 +sql connect +print ======================== dnode1 start + +$db = testdb + +sql create database $db +sql use $db + +sql create stable st2 (ts timestamp, f1 int, f2 float, f3 double, f4 bigint, f5 smallint, f6 tinyint, f7 bool, f8 binary(10), f9 nchar(10)) tags (id1 int, id2 float, id3 nchar(10), id4 double, id5 smallint, id6 bigint, id7 binary(10)) + +sql create table tb1 using st2 tags (1,1.0,"1",1.0,1,1,"1"); +sql create table tb2 using st2 tags (2,2.0,"2",2.0,2,2,"2"); +sql create table tb3 using st2 tags (3,3.0,"3",3.0,3,3,"3"); +sql create table tb4 using st2 tags (4,4.0,"4",4.0,4,4,"4"); + +sql insert into tb1 values (now-200s,1,1.0,1.0,1,1,1,true,"1","1") +sql insert into tb1 values (now-100s,2,2.0,2.0,2,2,2,true,"2","2") +sql insert into tb1 values (now,3,3.0,3.0,3,3,3,true,"3","3") +sql insert into tb1 values (now+100s,4,4.0,4.0,4,4,4,true,"4","4") +sql insert into tb1 values (now+200s,4,4.0,4.0,4,4,4,true,"4","4") +sql insert into tb1 values (now+300s,4,4.0,4.0,4,4,4,true,"4","4") +sql insert into tb1 values (now+400s,4,4.0,4.0,4,4,4,true,"4","4") +sql insert into tb1 values (now+500s,4,4.0,4.0,4,4,4,true,"4","4") + +sql select tbname,id1 from st2; + +if $rows != 4 then + return -1 +endi + + +sql select * from st2; + +if $rows != 8 then + return -1 +endi + +sql select * from st2 where ts between now-50s and now+450s + +if $rows != 5 then + return -1 +endi + +sql select tbname,id1 from st2 where id1 between 2 and 3; + +if $rows != 2 then + return -1 +endi + +if $data00 != tb2 then + return -1 +endi +if $data01 != 2 then + return -1 +endi +if $data10 != tb3 then + return -1 +endi +if $data11 != 3 then + return -1 +endi + +sql select tbname,id2 from st2 where id2 between 2.0 and 3.0; + +if $rows != 2 then + return -1 +endi + +if $data00 != tb2 then + return -1 +endi +if $data01 != 2.00000 then + return -1 +endi +if $data10 != tb3 then + return -1 +endi +if $data11 != 3.00000 then + return -1 +endi + + +sql select tbname,id4 from st2 where id4 between 2.0 and 3.0; + +if $rows != 2 then + return -1 +endi + +if $data00 != tb2 then + return -1 +endi +if $data01 != 2.000000000 then + return -1 +endi +if $data10 != tb3 then + return -1 +endi +if $data11 != 3.000000000 then + return -1 +endi + + +sql select tbname,id5 from st2 where id5 between 2.0 and 3.0; + +if $rows != 2 then + return -1 +endi + +if $data00 != tb2 then + return -1 +endi +if $data01 != 2 then + return -1 +endi +if $data10 != tb3 then + return -1 +endi +if $data11 != 3 then + return -1 +endi + +sql select tbname,id6 from st2 where id6 between 2.0 and 3.0; + +if $rows != 2 then + return -1 +endi + +if $data00 != tb2 then + return -1 +endi +if $data01 != 2 then + return -1 +endi +if $data10 != tb3 then + return -1 +endi +if $data11 != 3 then + return -1 +endi + +sql select * from st2 where f1 between 2 and 3 and f2 between 2.0 and 3.0 and f3 between 2.0 and 3.0 and f4 between 2.0 and 3.0 and f5 between 2.0 and 3.0 and f6 between 2.0 and 3.0; + +if $rows != 2 then + return -1 +endi + +if $data01 != 2 then + return -1 +endi +if $data11 != 3 then + return -1 +endi + +sql_error select * from st2 where f7 between 2.0 and 3.0; +sql_error select * from st2 where f8 between 2.0 and 3.0; +sql_error select * from st2 where f9 between 2.0 and 3.0; + +system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/general/parser/col_arithmetic_operation.sim b/tests/script/general/parser/col_arithmetic_operation.sim index efea3bfd59a4763109690c9ddacd1c0f84191732..3911c2dca6da3f13dd908b4a0e1cc9f6f7279cfa 100644 --- a/tests/script/general/parser/col_arithmetic_operation.sim +++ b/tests/script/general/parser/col_arithmetic_operation.sim @@ -105,7 +105,7 @@ run general/parser/col_arithmetic_query.sim #======================================= all in files query ======================================= print ================== restart server to commit data into disk system sh/exec.sh -n dnode1 -s stop -x SIGINT -sleep 2000 +sleep 500 system sh/exec.sh -n dnode1 -s start print ================== server restart completed diff --git a/tests/script/general/parser/columnValue_unsign.sim b/tests/script/general/parser/columnValue_unsign.sim index 6e9a37fdb60f3ddbb847a092c4a7745316c68b01..8e44ccb5facf074691f3aeeb7c60099ab6ef691f 100644 --- a/tests/script/general/parser/columnValue_unsign.sim +++ b/tests/script/general/parser/columnValue_unsign.sim @@ -83,7 +83,7 @@ if $data03 != NULL then endi sql insert into mt_unsigned_1 values(now, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL); -sql insert into mt_unsigned_1 values(now, 1, 2, 3, 4, 5, 6, 7, 8, 9); +sql insert into mt_unsigned_1 values(now+1s, 1, 2, 3, 4, 5, 6, 7, 8, 9); sql_error insert into mt_unsigned_1 values(now, -1, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL); sql_error insert into mt_unsigned_1 values(now, NULL, -1, NULL, NULL, NULL, NULL, NULL, NULL, NULL); diff --git a/tests/script/general/parser/commit.sim b/tests/script/general/parser/commit.sim index 533fbf48f068d166ff0d7de440117694c4d48a97..4085ef620d26ac7c869d6f2a298022ac2fe19564 100644 --- a/tests/script/general/parser/commit.sim +++ b/tests/script/general/parser/commit.sim @@ -82,7 +82,7 @@ endw print ================== restart server to commit data into disk system sh/exec.sh -n dnode1 -s stop -x SIGINT -sleep 2000 +sleep 500 system sh/exec.sh -n dnode1 -s start sleep 100 print ================== server restart completed diff --git a/tests/script/general/parser/create_tb_with_tag_name.sim b/tests/script/general/parser/create_tb_with_tag_name.sim new file mode 100644 index 0000000000000000000000000000000000000000..bbd5fc11e1eb662053860bca4f0d4210c1d1fbbc --- /dev/null +++ b/tests/script/general/parser/create_tb_with_tag_name.sim @@ -0,0 +1,162 @@ +system sh/stop_dnodes.sh +system sh/deploy.sh -n dnode1 -i 1 +system sh/cfg.sh -n dnode1 -c walLevel -v 0 +system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 2 +system sh/exec.sh -n dnode1 -s start + +sleep 100 +sql connect +print ======================== dnode1 start + +$db = testdb + +sql create database $db +sql use $db + +sql create stable st2 (ts timestamp, f1 int) tags (id int, t1 int, t2 nchar(4), t3 double) + + +sql insert into tb1 using st2 (id, t1) tags(1,2) values (now, 1) + +sql select id,t1,t2,t3 from tb1 + +if $rows != 1 then + return -1 +endi + +if $data00 != 1 then + return -1 +endi +if $data01 != 2 then + return -1 +endi +if $data02 != NULL then + return -1 +endi +if $data03 != NULL then + return -1 +endi + +sql create table tb2 using st2 (t2,t3) tags ("12",22.0) + +sql select id,t1,t2,t3 from tb2; + +if $rows != 1 then + return -1 +endi + +if $data00 != NULL then + return -1 +endi +if $data01 != NULL then + return -1 +endi +if $data02 != 12 then + return -1 +endi +if $data03 != 22.000000000 then + return -1 +endi + + +sql create table tb3 using st2 tags (1,2,"3",33.0); + +sql select id,t1,t2,t3 from tb3; + + +if $rows != 1 then + return -1 +endi + +if $data00 != 1 then + return -1 +endi +if $data01 != 2 then + return -1 +endi +if $data02 != 3 then + return -1 +endi +if $data03 != 33.000000000 then + return -1 +endi + +sql insert into tb4 using st2 tags(1,2,"33",44.0) values (now, 1); + +sql select id,t1,t2,t3 from tb4; + +if $rows != 1 then + return -1 +endi + +if $data00 != 1 then + return -1 +endi +if $data01 != 2 then + return -1 +endi +if $data02 != 33 then + return -1 +endi +if $data03 != 44.000000000 then + return -1 +endi + +sql_error create table tb5 using st2() tags (3,3,"3",33.0); + +sql_error create table tb6 using st2 (id,t1) tags (3,3,"3",33.0); + +sql_error create table tb7 using st2 (id,t1) tags (3); + +sql_error create table tb8 using st2 (ide) tags (3); + +sql_error create table tb9 using st2 (id); + +sql_error create table tb10 using st2 (id t1) tags (1,1); + +sql_error create table tb10 using st2 (id,,t1) tags (1,1,1); + +sql_error create table tb11 using st2 (id,t1,) tags (1,1,1); + +sql create table tb12 using st2 (t1,id) tags (2,1); + +sql select id,t1,t2,t3 from tb12; +if $rows != 1 then + return -1 +endi + +if $data00 != 1 then + return -1 +endi +if $data01 != 2 then + return -1 +endi +if $data02 != NULL then + return -1 +endi +if $data03 != NULL then + return -1 +endi + +sql create table tb13 using st2 ("t1",'id') tags (2,1); + +sql select id,t1,t2,t3 from tb13; + +if $rows != 1 then + return -1 +endi + +if $data00 != 1 then + return -1 +endi +if $data01 != 2 then + return -1 +endi +if $data02 != NULL then + return -1 +endi +if $data03 != NULL then + return -1 +endi + +system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/general/parser/first_last.sim b/tests/script/general/parser/first_last.sim index 9c1f0774badf5edf44cc616f085eb06c8a47c120..aeff740a5f790533e26fda99205d3cffc876deb1 100644 --- a/tests/script/general/parser/first_last.sim +++ b/tests/script/general/parser/first_last.sim @@ -77,7 +77,7 @@ run general/parser/first_last_query.sim print ================== restart server to commit data into disk system sh/exec.sh -n dnode1 -s stop -x SIGINT -sleep 2000 +sleep 500 system sh/exec.sh -n dnode1 -s start print ================== server restart completed sql connect diff --git a/tests/script/general/parser/function.sim b/tests/script/general/parser/function.sim index ca020c40631c5ecb77ef0d808fa8251e6014865b..af16bfd4f18a40d9964b6f4f7d4f6ea0840937a2 100644 --- a/tests/script/general/parser/function.sim +++ b/tests/script/general/parser/function.sim @@ -763,3 +763,20 @@ endi if $data01 != 1.414213562 then return -1 endi + +sql create stable st1 (ts timestamp, f1 int, f2 int) tags (id int); +sql create table tb1 using st1 tags(1); + +sql insert into tb1 values (now, 1, 1); + +sql select stddev(f1) from st1 group by f1; + +if $rows != 1 then + return -1 +endi + + +if $data00 != 0.000000000 then + return -1 +endi + diff --git a/tests/script/general/parser/import.sim b/tests/script/general/parser/import.sim index 83751dc6166275d7d61ad9d63b41b0817ecd8a77..d626f4fa74eb3f53a9f9118800494a05320678a7 100644 --- a/tests/script/general/parser/import.sim +++ b/tests/script/general/parser/import.sim @@ -25,15 +25,15 @@ sql use $db sql create table tb (ts timestamp, c1 int, c2 timestamp) sql insert into tb values ('2019-05-05 11:30:00.000', 1, now) sql insert into tb values ('2019-05-05 12:00:00.000', 1, now) -sleep 2000 +sleep 500 sql import into tb values ('2019-05-05 11:00:00.000', -1, now) -sleep 2000 +sleep 500 sql import into tb values ('2019-05-05 11:59:00.000', -1, now) -sleep 2000 +sleep 500 sql import into tb values ('2019-05-04 08:00:00.000', -1, now) -sleep 2000 +sleep 500 sql import into tb values ('2019-05-04 07:59:00.000', -1, now) -sleep 2000 +sleep 500 sql select * from tb if $rows != 6 then @@ -60,7 +60,7 @@ endi print ================== restart server to commit data into disk system sh/exec.sh -n dnode1 -s stop -x SIGINT -sleep 2000 +sleep 500 system sh/exec.sh -n dnode1 -s start print ================== server restart completed sql connect diff --git a/tests/script/general/parser/import_commit1.sim b/tests/script/general/parser/import_commit1.sim index 27be5560c5812d3856929d0da7b53a23fda6210c..f330fe4cd94b1cba82ea985b9b4841245753e099 100644 --- a/tests/script/general/parser/import_commit1.sim +++ b/tests/script/general/parser/import_commit1.sim @@ -40,7 +40,7 @@ while $x < $rowNum endw print ====== tables created -sleep 2000 +sleep 500 $ts = $ts0 + $delta $ts = $ts + 1 diff --git a/tests/script/general/parser/import_commit2.sim b/tests/script/general/parser/import_commit2.sim index 72ee2b3844f09af2dc3c15ba6426c9fd51ad832f..47b30acb49111da69c1c0b8b1927fd7f13f0c31c 100644 --- a/tests/script/general/parser/import_commit2.sim +++ b/tests/script/general/parser/import_commit2.sim @@ -39,7 +39,7 @@ while $x < $rowNum endw print ====== tables created -sleep 2000 +sleep 500 $ts = $ts0 + $delta $ts = $ts + 1 diff --git a/tests/script/general/parser/import_commit3.sim b/tests/script/general/parser/import_commit3.sim index a9f021b20c42c03535fda4df28fbe1eb3c246a11..1e041375de3da7fc2a5c17ae40caa1251b5d9d1b 100644 --- a/tests/script/general/parser/import_commit3.sim +++ b/tests/script/general/parser/import_commit3.sim @@ -39,7 +39,7 @@ while $x < $rowNum endw print ====== tables created -sleep 2000 +sleep 500 $ts = $ts + 1 sql insert into $tb values ( $ts , -1, -1, -1, -1, -1) @@ -47,7 +47,7 @@ $ts = $ts0 + $delta $ts = $ts + 1 sql import into $tb values ( $ts , -2, -2, -2, -2, -2) -sleep 2000 +sleep 500 sql show databases diff --git a/tests/script/general/parser/import_file.sim b/tests/script/general/parser/import_file.sim index 6b4dd07c7981ca141765b41cca3d005874a6583f..e50fc92e28ee498989f8b71d1bc5dd50faa7baa3 100644 --- a/tests/script/general/parser/import_file.sim +++ b/tests/script/general/parser/import_file.sim @@ -3,9 +3,9 @@ system sh/stop_dnodes.sh system sh/deploy.sh -n dnode1 -i 1 system sh/cfg.sh -n dnode1 -c walLevel -v 0 system sh/exec.sh -n dnode1 -s start -sleep 2000 +sleep 500 sql connect -sleep 2000 +sleep 500 sql drop database if exists indb diff --git a/tests/script/general/parser/insert_multiTbl.sim b/tests/script/general/parser/insert_multiTbl.sim index 39223d84e394454b028b40a8dfa3f5457e0e9428..e9ee4fcf98666ddf81816c367927c8e528de3f42 100644 --- a/tests/script/general/parser/insert_multiTbl.sim +++ b/tests/script/general/parser/insert_multiTbl.sim @@ -4,7 +4,7 @@ system sh/stop_dnodes.sh system sh/deploy.sh -n dnode1 -i 1 system sh/cfg.sh -n dnode1 -c walLevel -v 0 system sh/exec.sh -n dnode1 -s start -sleep 2000 +sleep 500 sql connect sleep 100 print ======================== dnode1 start diff --git a/tests/script/general/parser/interp.sim b/tests/script/general/parser/interp.sim index 36a643c424b9db0bf81f9ab0c7f5d4f093e2183e..13b6a08024206b99b410ac06a913d14078c11bfc 100644 --- a/tests/script/general/parser/interp.sim +++ b/tests/script/general/parser/interp.sim @@ -59,7 +59,7 @@ run general/parser/interp_test.sim print ================== restart server to commit data into disk system sh/exec.sh -n dnode1 -s stop -x SIGINT -sleep 2000 +sleep 500 system sh/exec.sh -n dnode1 -s start print ================== server restart completed diff --git a/tests/script/general/parser/lastrow.sim b/tests/script/general/parser/lastrow.sim index 682a6cd5dfe42fe0d0411416b76faba15ba88f2c..d1eadfb67a43c027b54c66967d8818454cc81d81 100644 --- a/tests/script/general/parser/lastrow.sim +++ b/tests/script/general/parser/lastrow.sim @@ -62,7 +62,7 @@ run general/parser/lastrow_query.sim print ================== restart server to commit data into disk system sh/exec.sh -n dnode1 -s stop -x SIGINT -sleep 2000 +sleep 500 system sh/exec.sh -n dnode1 -s start print ================== server restart completed sql connect diff --git a/tests/script/general/parser/limit.sim b/tests/script/general/parser/limit.sim index 22d52c4257f07481e1dcc02119989d277d9e018d..17636dfb74d117187db66f5b66918fdd4ba9500b 100644 --- a/tests/script/general/parser/limit.sim +++ b/tests/script/general/parser/limit.sim @@ -66,7 +66,7 @@ run general/parser/limit_stb.sim print ================== restart server to commit data into disk system sh/exec.sh -n dnode1 -s stop -x SIGINT -sleep 2000 +sleep 500 system sh/exec.sh -n dnode1 -s start print ================== server restart completed sql connect diff --git a/tests/script/general/parser/limit1.sim b/tests/script/general/parser/limit1.sim index 0597723490f1c6af3ef6518dfa3b0d8aa005d3c4..2c40f0af2bfd3b38a43cefa4501d992dcbcfa661 100644 --- a/tests/script/general/parser/limit1.sim +++ b/tests/script/general/parser/limit1.sim @@ -61,7 +61,7 @@ run general/parser/limit1_stb.sim print ================== restart server to commit data into disk system sh/exec.sh -n dnode1 -s stop -x SIGINT -sleep 2000 +sleep 500 system sh/exec.sh -n dnode1 -s start print ================== server restart completed diff --git a/tests/script/general/parser/limit1_tblocks100.sim b/tests/script/general/parser/limit1_tblocks100.sim index 43519d2df4ddee99ea5de3a797d5333b10f48f4a..45ead58ba067be400f60c39a15cc58adb2b75a05 100644 --- a/tests/script/general/parser/limit1_tblocks100.sim +++ b/tests/script/general/parser/limit1_tblocks100.sim @@ -61,7 +61,7 @@ run general/parser/limit1_stb.sim print ================== restart server to commit data into disk system sh/exec.sh -n dnode1 -s stop -x SIGINT -sleep 2000 +sleep 500 system sh/exec.sh -n dnode1 -s start print ================== server restart completed diff --git a/tests/script/general/parser/limit2.sim b/tests/script/general/parser/limit2.sim index ddc5c10362a0743381e3f935a9d078c20be31858..0e7e13b6de0ea5eb40fd0609f9591b839157f5d6 100644 --- a/tests/script/general/parser/limit2.sim +++ b/tests/script/general/parser/limit2.sim @@ -69,7 +69,7 @@ print ====== tables created print ================== restart server to commit data into disk system sh/exec.sh -n dnode1 -s stop -x SIGINT -sleep 2000 +sleep 500 system sh/exec.sh -n dnode1 -s start print ================== server restart completed diff --git a/tests/script/general/parser/limit2_query.sim b/tests/script/general/parser/limit2_query.sim index 1e8077d26e03a9fb50425a74bbbe0ca027fb9c66..9fe287960d22642bbea0139246d3f90537fef628 100644 --- a/tests/script/general/parser/limit2_query.sim +++ b/tests/script/general/parser/limit2_query.sim @@ -143,6 +143,97 @@ if $data11 != -1 then return -1 endi +sql select max(c1) from lm2_tb0 where ts >= 1537146000000 and ts <= 1543145400000 interval(5m) fill(value, -1000, -2) limit 8200 +if $rows != 8200 then + return -1 +endi + +sql select max(c1) from lm2_tb0 where ts >= 1537146000000 and ts <= 1543145400000 interval(5m) fill(value, -1000, -2) limit 10 offset 8190; +if $rows != 10 then + return -1 +endi + +if $data00 != @18-10-15 19:30:00.000@ then + return -1 +endi + +if $data01 != 5 then + return -1 +endi + +if $data10 != @18-10-15 19:35:00.000@ then + return -1 +endi + +if $data11 != -1000 then + return -1 +endi + +if $data20 != @18-10-15 19:40:00.000@ then + return -1 +endi + +if $data21 != 6 then + return -1 +endi + +if $data30 != @18-10-15 19:45:00.000@ then + return -1 +endi + +if $data31 != -1000 then + return -1 +endi + +sql select max(c1) from lm2_tb0 where ts >= 1537146000000 and ts <= 1543145400000 interval(5m) fill(value, -1000, -2) limit 10 offset 10001; +if $rows != 10 then + return -1 +endi + +if $data00 != @18-10-22 02:25:00.000@ then + return -1 +endi + +if $data01 != -1000 then + return -1 +endi + +if $data10 != @18-10-22 02:30:00.000@ then + return -1 +endi + +if $data11 != 1 then + return -1 +endi + +if $data20 != @18-10-22 02:35:00.000@ then + return -1 +endi + +if $data21 != -1000 then + return -1 +endi + +if $data30 != @18-10-22 02:40:00.000@ then + return -1 +endi + +if $data31 != 2 then + return -1 +endi + +sql select max(c1) from lm2_tb0 where ts >= 1537146000000 and ts <= 1543145400000 interval(5m) fill(value, -1000, -2) limit 10000 offset 10001; +print ====> needs to validate the last row result +if $rows != 9998 then + return -1 +endi + + +sql select max(c1) from lm2_tb0 where ts >= 1537146000000 and ts <= 1543145400000 interval(5m) fill(value, -1000, -2) limit 100 offset 20001; +if $rows != 0 then + return -1 +endi + # tb + interval + fill(linear) + limit offset $limit = $rowNum $offset = $limit / 2 diff --git a/tests/script/general/parser/mixed_blocks.sim b/tests/script/general/parser/mixed_blocks.sim index 79bf65d1475a8f73fb31df35bf5e8e5f31476fca..8208963858721c88262af385fe4d9336012d63e8 100644 --- a/tests/script/general/parser/mixed_blocks.sim +++ b/tests/script/general/parser/mixed_blocks.sim @@ -59,7 +59,7 @@ sql show databases print ================== restart server to commit data into disk system sh/exec.sh -n dnode1 -s stop -x SIGINT -sleep 2000 +sleep 500 system sh/exec.sh -n dnode1 -s start print ================== server restart completed @@ -154,7 +154,7 @@ sql insert into t2 values('2020-1-1 1:5:1', 99); print ================== restart server to commit data into disk system sh/exec.sh -n dnode1 -s stop -x SIGINT -sleep 2000 +sleep 500 system sh/exec.sh -n dnode1 -s start print ================== server restart completed sql select ts from m1 where ts='2020-1-1 1:5:1' diff --git a/tests/script/general/parser/projection_limit_offset.sim b/tests/script/general/parser/projection_limit_offset.sim index e8a4c75a12f7ffcbe38cf2bd342dbace5e12cf02..a92493b7f4a9dfa180836360c7c39b37a4991c83 100644 --- a/tests/script/general/parser/projection_limit_offset.sim +++ b/tests/script/general/parser/projection_limit_offset.sim @@ -334,6 +334,9 @@ sql insert into tm0 values(10000, 1) (20000, 2)(30000, 3) (40000, NULL) (50000, #=============================tbase-1205 sql select count(*) from tm1 where ts= now -1d interval(1h) fill(NULL); +if $rows != 0 then + return -1 +endi print ===================>TD-1834 sql select * from tm0 where ts>11000 and ts< 20000 order by ts asc @@ -409,7 +412,7 @@ sql_error select k, sum(k)+1 from tm0; print ================== restart server to commit data into disk system sh/exec.sh -n dnode1 -s stop -x SIGINT -sleep 2000 +sleep 500 system sh/exec.sh -n dnode1 -s start print ================== server restart completed diff --git a/tests/script/general/parser/selectResNum.sim b/tests/script/general/parser/selectResNum.sim index 8f18a41d417ce8928dd2a991526641a17fc62d77..20b502447836409ca06c6acaebd7bd55921f7404 100644 --- a/tests/script/general/parser/selectResNum.sim +++ b/tests/script/general/parser/selectResNum.sim @@ -118,7 +118,7 @@ endw print ====== restart server to commit data into disk system sh/exec.sh -n dnode1 -s stop -x SIGINT -sleep 2000 +sleep 500 system sh/exec.sh -n dnode1 -s start print ====== server restart completed sleep 100 diff --git a/tests/script/general/parser/select_from_cache_disk.sim b/tests/script/general/parser/select_from_cache_disk.sim index 36a749cc3c4aa46bc2d52dd2352905f68a213c87..3d2cc0b70060ed9124205a00e3ea9922f2ed203b 100644 --- a/tests/script/general/parser/select_from_cache_disk.sim +++ b/tests/script/general/parser/select_from_cache_disk.sim @@ -35,7 +35,7 @@ sql insert into $tb values ('2018-09-17 09:00:00.030', 3) print ================== restart server to commit data into disk system sh/exec.sh -n dnode1 -s stop -x SIGINT -sleep 2000 +sleep 500 system sh/exec.sh -n dnode1 -s start print ================== server restart completed sql connect diff --git a/tests/script/general/parser/set_tag_vals.sim b/tests/script/general/parser/set_tag_vals.sim index 0b8ffd946f2bb052743faf4a732a06e4a68c5aec..92380ace8419c7756975fa4ce19482b845f804bf 100644 --- a/tests/script/general/parser/set_tag_vals.sim +++ b/tests/script/general/parser/set_tag_vals.sim @@ -61,7 +61,7 @@ while $i < $tbNum endw print ====== tables created -sleep 2000 +sleep 500 sql show tables if $rows != $tbNum then diff --git a/tests/script/general/parser/single_row_in_tb.sim b/tests/script/general/parser/single_row_in_tb.sim index 651f44a3a4dac3437e3c95bd3d8b42b632c58abf..bc9362904163c7abf9d32f517a31e503801fcab4 100644 --- a/tests/script/general/parser/single_row_in_tb.sim +++ b/tests/script/general/parser/single_row_in_tb.sim @@ -32,7 +32,7 @@ run general/parser/single_row_in_tb_query.sim print ================== restart server to commit data into disk system sh/exec.sh -n dnode1 -s stop -x SIGINT -sleep 2000 +sleep 500 system sh/exec.sh -n dnode1 -s start print ================== server restart completed diff --git a/tests/script/general/parser/slimit.sim b/tests/script/general/parser/slimit.sim index 426104c1680c777214c5514ecf8dc251540f75b6..bfb97b52618b7167434680c41d16f23786312fea 100644 --- a/tests/script/general/parser/slimit.sim +++ b/tests/script/general/parser/slimit.sim @@ -97,7 +97,7 @@ run general/parser/slimit_query.sim print ================== restart server to commit data into disk system sh/exec.sh -n dnode1 -s stop -x SIGINT -sleep 2000 +sleep 500 system sh/exec.sh -n dnode1 -s start print ================== server restart completed sql connect diff --git a/tests/script/general/parser/slimit1.sim b/tests/script/general/parser/slimit1.sim index 85cbe51aad166973367c094ac531fe7e58776468..901da4cab226bfaba705f7a0976731f017b8d941 100644 --- a/tests/script/general/parser/slimit1.sim +++ b/tests/script/general/parser/slimit1.sim @@ -56,7 +56,7 @@ run general/parser/slimit1_query.sim print ================== restart server to commit data into disk system sh/exec.sh -n dnode1 -s stop -x SIGINT -sleep 2000 +sleep 500 system sh/exec.sh -n dnode1 -s start print ================== server restart completed sql connect diff --git a/tests/script/general/parser/slimit_alter_tags.sim b/tests/script/general/parser/slimit_alter_tags.sim index 1073e0a3cc254dad66436896216411b026354040..ad557e891b0cb0b715da1c494c98e4a16213db8e 100644 --- a/tests/script/general/parser/slimit_alter_tags.sim +++ b/tests/script/general/parser/slimit_alter_tags.sim @@ -93,7 +93,7 @@ if $data02 != tb0 then return -1 endi -sleep 2000 +sleep 500 sql reset query cache sql select count(*), first(ts) from stb group by tg_added order by tg_added asc slimit 5 soffset 3 if $rows != 5 then @@ -171,7 +171,7 @@ endi print ================== restart server to commit data into disk system sh/exec.sh -n dnode1 -s stop -x SIGINT -sleep 2000 +sleep 500 system sh/exec.sh -n dnode1 -s start print ================== server restart completed sql connect diff --git a/tests/script/general/parser/tbnameIn.sim b/tests/script/general/parser/tbnameIn.sim index 65ed1ed65d50e96bee1b9185f1ecc5f2e18f376e..2ee5f38ab1b48a485be06376da08612bee9b98e8 100644 --- a/tests/script/general/parser/tbnameIn.sim +++ b/tests/script/general/parser/tbnameIn.sim @@ -67,7 +67,7 @@ run general/parser/tbnameIn_query.sim print ================== restart server to commit data into disk system sh/exec.sh -n dnode1 -s stop -x SIGINT -sleep 2000 +sleep 500 system sh/exec.sh -n dnode1 -s start print ================== server restart completed diff --git a/tests/script/general/parser/testSuite.sim b/tests/script/general/parser/testSuite.sim index 1dfdf9aac7732005bb9bf09bc456bd8284b20bf3..255389a2df12e470890ca74e9b3872a28bc119c3 100644 --- a/tests/script/general/parser/testSuite.sim +++ b/tests/script/general/parser/testSuite.sim @@ -1,84 +1,84 @@ -#run general/parser/alter.sim -#sleep 100 -#run general/parser/alter1.sim -#sleep 100 -#run general/parser/alter_stable.sim -#sleep 100 -#run general/parser/auto_create_tb.sim -#sleep 100 -#run general/parser/auto_create_tb_drop_tb.sim -#sleep 100 -#run general/parser/col_arithmetic_operation.sim -#sleep 100 -#run general/parser/columnValue.sim -#sleep 100 -#run general/parser/commit.sim -#sleep 100 -#run general/parser/create_db.sim -#sleep 100 -#run general/parser/create_mt.sim -#sleep 100 -#run general/parser/create_tb.sim -#sleep 100 -#run general/parser/dbtbnameValidate.sim -#sleep 100 -#run general/parser/fill.sim -#sleep 100 -#run general/parser/fill_stb.sim -#sleep 100 -##run general/parser/fill_us.sim # -#sleep 100 -#run general/parser/first_last.sim -#sleep 100 -#run general/parser/import_commit1.sim -#sleep 100 -#run general/parser/import_commit2.sim -#sleep 100 -#run general/parser/import_commit3.sim -#sleep 100 -##run general/parser/import_file.sim -#sleep 100 -#run general/parser/insert_tb.sim -#sleep 100 -#run general/parser/tags_dynamically_specifiy.sim -#sleep 100 -#run general/parser/interp.sim -#sleep 100 -#run general/parser/lastrow.sim -#sleep 100 -#run general/parser/limit.sim -#sleep 100 -#run general/parser/limit1.sim -#sleep 100 -#run general/parser/limit1_tblocks100.sim -#sleep 100 -#run general/parser/limit2.sim -#sleep 100 -#run general/parser/mixed_blocks.sim -#sleep 100 -#run general/parser/nchar.sim -#sleep 100 -#run general/parser/null_char.sim -#sleep 100 -#run general/parser/selectResNum.sim -#sleep 100 -#run general/parser/select_across_vnodes.sim -#sleep 100 -#run general/parser/select_from_cache_disk.sim -#sleep 100 -#run general/parser/set_tag_vals.sim -#sleep 100 -#run general/parser/single_row_in_tb.sim -#sleep 100 -#run general/parser/slimit.sim -#sleep 100 -#run general/parser/slimit1.sim -#sleep 100 -#run general/parser/slimit_alter_tags.sim -#sleep 100 -#run general/parser/tbnameIn.sim -#sleep 100 -#run general/parser/slimit_alter_tags.sim # persistent failed +run general/parser/alter.sim +sleep 100 +run general/parser/alter1.sim +sleep 100 +run general/parser/alter_stable.sim +sleep 100 +run general/parser/auto_create_tb.sim +sleep 100 +run general/parser/auto_create_tb_drop_tb.sim +sleep 100 +run general/parser/col_arithmetic_operation.sim +sleep 100 +run general/parser/columnValue.sim +sleep 100 +run general/parser/commit.sim +sleep 100 +run general/parser/create_db.sim +sleep 100 +run general/parser/create_mt.sim +sleep 100 +run general/parser/create_tb.sim +sleep 100 +run general/parser/dbtbnameValidate.sim +sleep 100 +run general/parser/fill.sim +sleep 100 +run general/parser/fill_stb.sim +sleep 100 +#run general/parser/fill_us.sim # +sleep 100 +run general/parser/first_last.sim +sleep 100 +run general/parser/import_commit1.sim +sleep 100 +run general/parser/import_commit2.sim +sleep 100 +run general/parser/import_commit3.sim +sleep 100 +#run general/parser/import_file.sim +sleep 100 +run general/parser/insert_tb.sim +sleep 100 +run general/parser/tags_dynamically_specifiy.sim +sleep 100 +run general/parser/interp.sim +sleep 100 +run general/parser/lastrow.sim +sleep 100 +run general/parser/limit.sim +sleep 100 +run general/parser/limit1.sim +sleep 100 +run general/parser/limit1_tblocks100.sim +sleep 100 +run general/parser/limit2.sim +sleep 100 +run general/parser/mixed_blocks.sim +sleep 100 +run general/parser/nchar.sim +sleep 100 +run general/parser/null_char.sim +sleep 100 +run general/parser/selectResNum.sim +sleep 100 +run general/parser/select_across_vnodes.sim +sleep 100 +run general/parser/select_from_cache_disk.sim +sleep 100 +run general/parser/set_tag_vals.sim +sleep 100 +run general/parser/single_row_in_tb.sim +sleep 100 +run general/parser/slimit.sim +sleep 100 +run general/parser/slimit1.sim +sleep 100 +run general/parser/slimit_alter_tags.sim +sleep 100 +run general/parser/tbnameIn.sim +sleep 100 +run general/parser/slimit_alter_tags.sim # persistent failed sleep 100 run general/parser/join.sim sleep 100 diff --git a/tests/script/general/parser/topbot.sim b/tests/script/general/parser/topbot.sim index 57378331e80489e9c3d3eaaaf321730e7e132f55..f5c78d07a1d362ffdd5ebe5c989d88cc35a33e72 100644 --- a/tests/script/general/parser/topbot.sim +++ b/tests/script/general/parser/topbot.sim @@ -128,7 +128,7 @@ sql insert into test values(29999, 1)(70000, 2)(80000, 3) print ================== restart server to commit data into disk system sh/exec.sh -n dnode1 -s stop -x SIGINT -sleep 2000 +sleep 500 system sh/exec.sh -n dnode1 -s start print ================== server restart completed sql connect diff --git a/tests/script/general/parser/where.sim b/tests/script/general/parser/where.sim index 1f148cbb2cb9a4b9ee4b0eaeffc8dad6e6d576d0..5f9e0ec2083de0ee4acdd3f727189a229e6eaa38 100644 --- a/tests/script/general/parser/where.sim +++ b/tests/script/general/parser/where.sim @@ -324,7 +324,7 @@ while $i < 1 endw system sh/exec.sh -n dnode1 -s stop -x SIGINT -sleep 2000 +sleep 500 system sh/exec.sh -n dnode1 -s start diff --git a/tests/script/jenkins/basic_2.txt b/tests/script/jenkins/basic_2.txt index 20e574711a8b2989ab017611b49aa2c1f4b52027..5a2a6f4062e9f35f9ef77222383ecfc800ea7574 100644 --- a/tests/script/jenkins/basic_2.txt +++ b/tests/script/jenkins/basic_2.txt @@ -72,4 +72,3 @@ cd ../../../debug; make ./test.sh -f unique/cluster/cache.sim ./test.sh -f unique/cluster/vgroup100.sim -./test.sh -f unique/column/replica3.sim \ No newline at end of file diff --git a/tests/script/jenkins/basic_4.txt b/tests/script/jenkins/basic_4.txt index 895281f218717af7dac3b8e1f890ce3e03b81609..5a7d23df719f737d0c5a3c85acfd3875b87872ab 100644 --- a/tests/script/jenkins/basic_4.txt +++ b/tests/script/jenkins/basic_4.txt @@ -1,14 +1,3 @@ -./test.sh -f unique/dnode/alternativeRole.sim -./test.sh -f unique/dnode/balance1.sim -./test.sh -f unique/dnode/balance2.sim -./test.sh -f unique/dnode/balance3.sim -./test.sh -f unique/dnode/balancex.sim -./test.sh -f unique/dnode/offline1.sim -./test.sh -f unique/dnode/offline2.sim -./test.sh -f unique/dnode/reason.sim -./test.sh -f unique/dnode/remove1.sim -./test.sh -f unique/dnode/remove2.sim -./test.sh -f unique/dnode/vnode_clean.sim ./test.sh -f unique/http/admin.sim ./test.sh -f unique/http/opentsdb.sim @@ -46,4 +35,12 @@ ./test.sh -f general/stable/refcount.sim ./test.sh -f general/stable/show.sim ./test.sh -f general/stable/values.sim -./test.sh -f general/stable/vnode3.sim \ No newline at end of file +./test.sh -f general/stable/vnode3.sim + +./test.sh -f unique/column/replica3.sim +./test.sh -f issue/TD-2713.sim +./test.sh -f general/parser/select_distinct_tag.sim +./test.sh -f unique/mnode/mgmt30.sim +./test.sh -f issue/TD-2677.sim +./test.sh -f issue/TD-2680.sim +./test.sh -f unique/dnode/lossdata.sim \ No newline at end of file diff --git a/tests/script/jenkins/basic_5.txt b/tests/script/jenkins/basic_5.txt index 66c2ce36b2c60458471fe9c8c56c752c9cb641d4..f89be9499e7a672a3c72646614552a43d1537463 100644 --- a/tests/script/jenkins/basic_5.txt +++ b/tests/script/jenkins/basic_5.txt @@ -1,3 +1,11 @@ +./test.sh -f unique/dnode/alternativeRole.sim +./test.sh -f unique/dnode/balance1.sim +./test.sh -f unique/dnode/balance2.sim +./test.sh -f unique/dnode/balance3.sim +./test.sh -f unique/dnode/balancex.sim +./test.sh -f unique/dnode/offline1.sim +./test.sh -f unique/dnode/offline2.sim + ./test.sh -f general/stream/metrics_del.sim ./test.sh -f general/stream/metrics_replica1_vnoden.sim ./test.sh -f general/stream/restart_stream.sim diff --git a/tests/script/jenkins/basic_6.txt b/tests/script/jenkins/basic_6.txt index 893346e6ca70529aeb9f37e17f6bf4edfb125183..9156360a9f548ba17d9b96d297e839e6b74aaa55 100644 --- a/tests/script/jenkins/basic_6.txt +++ b/tests/script/jenkins/basic_6.txt @@ -1,3 +1,8 @@ +./test.sh -f unique/dnode/reason.sim +./test.sh -f unique/dnode/remove1.sim +./test.sh -f unique/dnode/remove2.sim +./test.sh -f unique/dnode/vnode_clean.sim + ./test.sh -f unique/db/commit.sim ./test.sh -f unique/db/delete.sim ./test.sh -f unique/db/delete_part.sim diff --git a/tests/test-all.sh b/tests/test-all.sh index 2374750be461e7f19e0e154a3ed4461506e6a35a..f03e3f88c39a7289f7b0a4b9a06da8d2ab47cd4a 100755 --- a/tests/test-all.sh +++ b/tests/test-all.sh @@ -21,8 +21,8 @@ function runSimCaseOneByOne { if [[ $line =~ ^./test.sh* ]] || [[ $line =~ ^run* ]]; then case=`echo $line | grep sim$ |awk '{print $NF}'` IN_TDINTERNAL="community" - start_time=`date +%s` - IN_TDINTERNAL="community" + start_time=`date +%s` + date +%F\ %T | tee -a out.log if [[ "$tests_dir" == *"$IN_TDINTERNAL"* ]]; then echo -n $case ./test.sh -f $case > /dev/null 2>&1 && \ @@ -53,6 +53,7 @@ function runSimCaseOneByOnefq { start_time=`date +%s` IN_TDINTERNAL="community" + date +%F\ %T | tee -a out.log if [[ "$tests_dir" == *"$IN_TDINTERNAL"* ]]; then echo -n $case ./test.sh -f $case > /dev/null 2>&1 && \ @@ -94,6 +95,7 @@ function runPyCaseOneByOne { case=`echo $line|awk '{print $NF}'` fi start_time=`date +%s` + date +%F\ %T | tee -a pytest-out.log echo -n $case $line > /dev/null 2>&1 && \ echo -e "${GREEN} success${NC}" | tee -a pytest-out.log || \ @@ -122,6 +124,7 @@ function runPyCaseOneByOnefq { case=`echo $line|awk '{print $NF}'` fi start_time=`date +%s` + date +%F\ %T | tee -a pytest-out.log echo -n $case $line > /dev/null 2>&1 && \ echo -e "${GREEN} success${NC}" | tee -a pytest-out.log || \ @@ -261,6 +264,9 @@ if [ "$2" != "sim" ]; then elif [ "$1" == "p3" ]; then echo "### run Python_3 test ###" runPyCaseOneByOnefq pytest_3.sh + elif [ "$1" == "p4" ]; then + echo "### run Python_4 test ###" + runPyCaseOneByOnefq pytest_4.sh elif [ "$1" == "b2" ] || [ "$1" == "b3" ]; then exit $(($totalFailed + $totalPyFailed)) elif [ "$1" == "smoke" ] || [ -z "$1" ]; then diff --git a/tests/test/c/CMakeLists.txt b/tests/test/c/CMakeLists.txt index 33e1528b70b5525da23723dc9bf6309870d172ee..2eb8ee1614b286f3827705865cf073a7eded0c88 100644 --- a/tests/test/c/CMakeLists.txt +++ b/tests/test/c/CMakeLists.txt @@ -1,4 +1,4 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 3.5) +CMAKE_MINIMUM_REQUIRED(VERSION 2.8) PROJECT(TDengine) INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/inc) diff --git a/tests/tsim/src/simSystem.c b/tests/tsim/src/simSystem.c index 3a409ecbf900bfe05c423963020d7d3a37cf4771..40937e70532897fa6776965b861a8d678532d8aa 100644 --- a/tests/tsim/src/simSystem.c +++ b/tests/tsim/src/simSystem.c @@ -81,7 +81,9 @@ char *simParseHostName(char *varName) { } bool simSystemInit() { - taos_init(); + if (taos_init()) { + return false; + } taosGetFqdn(simHostName); simInitsimCmdList(); memset(simScriptList, 0, sizeof(SScript *) * MAX_MAIN_SCRIPT_NUM);