diff --git a/README.md b/README.md index edca04afd486687ea8653e955ae50da457f77ab9..db6a4e304bd3f3cdd4252e82a9ccbcabba2e9a54 100644 --- a/README.md +++ b/README.md @@ -234,7 +234,7 @@ wget -qO - http://repos.taosdata.com/tdengine.key | sudo apt-key add - echo "deb [arch=amd64] http://repos.taosdata.com/tdengine-stable stable main" | sudo tee /etc/apt/sources.list.d/tdengine-stable.list [Optional] echo "deb [arch=amd64] http://repos.taosdata.com/tdengine-beta beta main" | sudo tee /etc/apt/sources.list.d/tdengine-beta.list sudo apt-get update -apt-get policy tdengine +apt-cache policy tdengine sudo apt-get install tdengine ``` diff --git a/cmake/define.inc b/cmake/define.inc index 92044b8c2dd3710c5a1808abcecd7d2358230e7a..828ba345dc8291ee4d3be64dd055280906e92805 100755 --- a/cmake/define.inc +++ b/cmake/define.inc @@ -53,6 +53,14 @@ IF (TD_PRO) ADD_DEFINITIONS(-D_TD_PRO_) ENDIF () +IF (TD_KH) + ADD_DEFINITIONS(-D_TD_KH_) +ENDIF () + +IF (TD_JH) + ADD_DEFINITIONS(-D_TD_JH_) +ENDIF () + IF (TD_MEM_CHECK) ADD_DEFINITIONS(-DTAOS_MEM_CHECK) ENDIF () @@ -152,6 +160,17 @@ IF (TD_BUILD_HTTP) ADD_DEFINITIONS(-DHTTP_EMBEDDED) ENDIF () +IF (${BUILD_LUA} MATCHES "false") + SET(TD_BUILD_LUA FALSE) +ENDIF () + +IF (TD_BUILD_LUA) + MESSAGE("Enable lua") + ADD_DEFINITIONS(-DLUA_EMBEDDED) +ELSE () + MESSAGE("Disable lua") +ENDIF () + IF ("${AVRO_SUPPORT}" MATCHES "true") SET(TD_AVRO_SUPPORT TRUE) ELSEIF ("${AVRO_SUPPORT}" MATCHES "false") diff --git a/cmake/input.inc b/cmake/input.inc index 0812711a5824ce0b328374fcdd04fc5f229ad01c..eef58fae29bc8675f6f55b22be856e0779a22396 100755 --- a/cmake/input.inc +++ b/cmake/input.inc @@ -52,6 +52,12 @@ ELSEIF (${DBNAME} MATCHES "tq") ELSEIF (${DBNAME} MATCHES "pro") SET(TD_PRO TRUE) MESSAGE(STATUS "pro is true") +ELSEIF (${DBNAME} MATCHES "kh") + SET(TD_KH TRUE) + MESSAGE(STATUS "kh is true") +ELSEIF (${DBNAME} MATCHES "jh") + SET(TD_JH TRUE) + MESSAGE(STATUS "jh is true") ENDIF () IF (${DLLTYPE} MATCHES "go") @@ -92,6 +98,8 @@ ENDIF () SET(TD_BUILD_HTTP FALSE) +SET(TD_BUILD_LUA TRUE) + SET(TD_AVRO_SUPPORT FALSE) SET(TD_MEMORY_SANITIZER FALSE) diff --git a/cmake/install.inc b/cmake/install.inc index c90aa3f9511e416106309e603853028e7096f082..e78bba8d8d293f8c9c76e00f22b74efedf9591b3 100755 --- a/cmake/install.inc +++ b/cmake/install.inc @@ -5,8 +5,14 @@ IF (TD_LINUX) ELSEIF (TD_WINDOWS) IF (TD_POWER) SET(CMAKE_INSTALL_PREFIX C:/PowerDB) + ELSEIF (TD_TQ) + SET(CMAKE_INSTALL_PREFIX C:/TQueue) ELSEIF (TD_PRO) SET(CMAKE_INSTALL_PREFIX C:/ProDB) + ELSEIF (TD_KH) + SET(CMAKE_INSTALL_PREFIX C:/KingHistorian) + ELSEIF (TD_JH) + SET(CMAKE_INSTALL_PREFIX C:/jh_iot) ELSE () SET(CMAKE_INSTALL_PREFIX C:/TDengine) ENDIF () @@ -25,8 +31,14 @@ ELSEIF (TD_WINDOWS) IF (TD_POWER) INSTALL(FILES ${EXECUTABLE_OUTPUT_PATH}/power.exe DESTINATION .) + ELSEIF (TD_TQ) + INSTALL(FILES ${EXECUTABLE_OUTPUT_PATH}/tq.exe DESTINATION .) ELSEIF (TD_PRO) INSTALL(FILES ${EXECUTABLE_OUTPUT_PATH}/prodbc.exe DESTINATION .) + ELSEIF (TD_KH) + INSTALL(FILES ${EXECUTABLE_OUTPUT_PATH}/khclient.exe DESTINATION .) + ELSEIF (TD_JH) + INSTALL(FILES ${EXECUTABLE_OUTPUT_PATH}/jh_taos.exe DESTINATION .) ELSE () INSTALL(FILES ${EXECUTABLE_OUTPUT_PATH}/taos.exe DESTINATION .) INSTALL(FILES ${EXECUTABLE_OUTPUT_PATH}/taosdemo.exe DESTINATION .) diff --git a/deps/CMakeLists.txt b/deps/CMakeLists.txt index 38f36c4ed6678675cecfa9c0da1a3d065b58da86..45eaf6495d0f20c512d175c880af9bc1ed8f0ba6 100644 --- a/deps/CMakeLists.txt +++ b/deps/CMakeLists.txt @@ -15,7 +15,10 @@ ADD_SUBDIRECTORY(cJson) ADD_SUBDIRECTORY(wepoll) ADD_SUBDIRECTORY(MsvcLibX) ADD_SUBDIRECTORY(rmonotonic) -ADD_SUBDIRECTORY(lua) + +IF (TD_BUILD_LUA) + ADD_SUBDIRECTORY(lua) +ENDIF () IF (TD_LINUX AND TD_MQTT) ADD_SUBDIRECTORY(MQTT-C) diff --git a/documentation/webdocs/markdowndocs/administrator-ch.md b/documentation/webdocs/markdowndocs/administrator-ch.md index 79388a2edb9404a0f7b31b9182eb5ce2cb0d52be..232d2633f1013c7fc21b862023cd4f6552a0524c 100644 --- a/documentation/webdocs/markdowndocs/administrator-ch.md +++ b/documentation/webdocs/markdowndocs/administrator-ch.md @@ -357,7 +357,7 @@ KILL STREAM ; TDengine启动后,会自动创建一个监测数据库SYS,并自动将服务器的CPU、内存、硬盘空间、带宽、请求数、磁盘读写速度、慢查询等信息定时写入该数据库。TDengine还将重要的系统操作(比如登录、创建、删除数据库等)日志以及各种错误报警信息记录下来存放在SYS库里。系统管理员可以从CLI直接查看这个数据库,也可以在WEB通过图形化界面查看这些监测信息。 -这些监测信息的采集缺省是打开的,但可以修改配置文件里的选项enableMonitor将其关闭或打开。 +这些监测信息的采集缺省是打开的,但可以修改配置文件里的选项monitor将其关闭或打开。 ## 文件目录结构 diff --git a/documentation/webdocs/markdowndocs/administrator.md b/documentation/webdocs/markdowndocs/administrator.md index 58b9865be7c044d321e8b43bb6908ce8f1c967e9..05fd408a33f587654c78fc6a62da858b2e15896f 100644 --- a/documentation/webdocs/markdowndocs/administrator.md +++ b/documentation/webdocs/markdowndocs/administrator.md @@ -36,7 +36,7 @@ This section lists only the most important configuration parameters. Please chec - maxUsers: maximum number of users allowed - maxDbs: maximum number of databases allowed - maxTables: maximum number of tables allowed -- enableMonitor: turn on/off system monitoring, 0: off, 1: on +- monitor: turn on/off system monitoring, 0: off, 1: on - logDir: log directory, default is /var/log/taos - numOfLogLines: maximum number of lines in the log file - debugFlag: log level, 131: only error and warnings, 135: all diff --git a/documentation20/cn/02.getting-started/02.taosdemo/docs.md b/documentation20/cn/02.getting-started/02.taosdemo/docs.md index 3f91dbb35130a2ff78e5ef23219b79433af33ce3..5838d63665f11e5b77c5c83d181424de811f0926 100644 --- a/documentation20/cn/02.getting-started/02.taosdemo/docs.md +++ b/documentation20/cn/02.getting-started/02.taosdemo/docs.md @@ -4,7 +4,7 @@ 自从 TDengine 2019年 7 月开源以来,凭借创新的数据建模设计、快捷的安装方式、易用的编程接口和强大的数据写入查询性能博得了大量时序数据开发者的青睐。其中写入和查询性能往往令刚接触 TDengine 的用户称叹不已。为了便于用户在最短时间内就可以体验到 TDengine 的高性能特点,我们专门开发了一个应用程序 taosdemo 用于对 TDengine 进行写入和查询的性能测试,用户可以通过 taosdemo 轻松模拟大量设备产生海量数据的场景,并且可以通过 taosdemo 参数灵活控制表的列数、数据类型、乱序比例以及并发线程数量。 -运行 taosdemo 很简单,通过下载 TDengine 安装包(https://www.taosdata.com/cn/all-downloads/)或者自行下载 TDengine 代码(https://github.com/taosdata/TDengine)编译都可以在安装目录或者编译结果目录中找到并运行。 +运行 taosdemo 很简单,通过下载 TDengine 安装包( https://www.taosdata.com/cn/all-downloads/ )或者自行下载 TDengine 代码( https://github.com/taosdata/TDengine )编译都可以在安装目录或者编译结果目录中找到并运行。 接下来本文为大家讲解 taosdemo 的使用介绍及注意事项。 @@ -439,7 +439,7 @@ TDengine是涛思数据专为物联网、车联网、工业互联网、IT运维 附录 - 完整 taosdemo 参数介绍 -- -taosdemo支持两种配置参数的模式,一种是命令行参数,一种是使用json格式的配置文件。 +taosdemo支持两种配置参数的模式,一种是命令行参数,一种是使用 JSON 格式的配置文件。 一、命令行参数 -f:指定taosdemo所需参数的meta文件。当使用该参数时,其他所有命令行参数都失效。可选项,缺省是NULL。目前仅支持不含 BOM(byte-order mark)的标准 UTF-8 编码文件。 @@ -505,10 +505,10 @@ taosdemo支持两种配置参数的模式,一种是命令行参数,一种是 --help: 打印命令参数列表。 -二、json格式的配置文件中所有参数说明 +二、JSON 格式的配置文件中所有参数说明 taosdemo支持3种功能的测试,包括插入、查询、订阅。但一个taosdemo实例不能同时支持三种功能,一个 taosdemo 实例只能支持其中的一种功能,通过配置文件来指定进行哪种功能的测试。 -1、插入功能测试的json配置文件 +1、插入功能测试的 JSON 配置文件 { "filetype": "insert", @@ -695,7 +695,7 @@ taosdemo支持3种功能的测试,包括插入、查询、订阅。但一个ta "count":该类型的连续列个数,可选项,缺省是1。 }] -2、查询功能测试的json配置文件 +2、查询功能测试的 JSON 配置文件 { "filetype": "query", @@ -784,7 +784,7 @@ taosdemo支持3种功能的测试,包括插入、查询、订阅。但一个ta 注意:每条sql语句后的保存结果的文件不能重名,且生成结果文件时,文件名会附加线程号。 查询结果显示:如果查询线程结束一次查询距开始执行时间超过30秒打印一次查询次数、用时和QPS。所有查询结束时,汇总打印总的查询次数和QPS。 -3、订阅功能测试的json配置文件 +3、订阅功能测试的 JSON 配置文件 { "filetype":"subscribe", diff --git a/documentation20/cn/02.getting-started/docs.md b/documentation20/cn/02.getting-started/docs.md index adbba4603b94c689cab2e0aaaedf0e232ae3d1f4..5f887df914ef06d40f3b3b8bf6ef8dfe18fe6ecf 100644 --- a/documentation20/cn/02.getting-started/docs.md +++ b/documentation20/cn/02.getting-started/docs.md @@ -178,7 +178,7 @@ taos> select avg(current), max(voltage), min(phase) from test.d10 interval(10s); ## taosdemo 详细功能列表 taosdemo 命令本身带有很多选项,配置表的数目、记录条数等等,请执行 `taosdemo --help` 详细列出。您可以设置不同参数进行体验。 -taosdemo 详细使用方法请参照 [如何使用taosdemo对TDengine进行性能测试?](https://www.taosdata.com/cn/documentation/getting-started/taosdemo )。 +taosdemo 详细使用方法请参照 [如何使用taosdemo对TDengine进行性能测试](https://www.taosdata.com/cn/documentation/getting-started/taosdemo )。 ## 客户端和报警模块 @@ -190,7 +190,7 @@ taosdemo 详细使用方法请参照 [如何使用taosdemo对TDengine进行性 ### TDengine 服务器支持的平台列表 -| | **CentOS 6/7/8** | **Ubuntu 16/18/20** | **Other Linux** | **统信 UOS** | **银河/中标麒麟** | **凝思 V60/V80** | **华为 EulerOS** | +| | **CentOS 7/8** | **Ubuntu 16/18/20** | **Other Linux** | **统信 UOS** | **银河/中标麒麟** | **凝思 V60/V80** | **华为 EulerOS** | | -------------- | --------------------- | ------------------------ | --------------- | --------------- | ------------------------- | --------------------- | --------------------- | | X64 | ● | ● | | ○ | ● | ● | ● | | 龙芯 MIPS64 | | | ● | | | | | diff --git a/documentation20/cn/05.insert/docs.md b/documentation20/cn/05.insert/docs.md index a82aecd97c832f9b7f276ec27832097e46845dfc..4a77e09ecafb6b5e444dc8d54383ce96bd751ba7 100644 --- a/documentation20/cn/05.insert/docs.md +++ b/documentation20/cn/05.insert/docs.md @@ -34,7 +34,7 @@ INSERT INTO d1001 VALUES (1538548685000, 10.3, 219, 0.31) (1538548695000, 12.6,
无模式写入方式建立的超级表及其对应的子表与通过 SQL 直接建立的超级表和子表完全没有区别,您也可以通过 SQL 语句直接向其中写入数据。需要注意的是,通过无模式写入方式建立的表,其表名是基于标签值按照固定的映射规则生成,所以无法明确地进行表意,缺乏可读性。 **无模式写入行协议** -
TDengine 的无模式写入的行协议兼容 InfluxDB 的 行协议(Line Protocol)、OpenTSDB 的 telnet 行协议、OpenTSDB 的 Json 格式协议。但是使用这三种协议的时候,需要在 API 中指定输入内容使用解析协议的标准。 +
TDengine 的无模式写入的行协议兼容 InfluxDB 的 行协议(Line Protocol)、OpenTSDB 的 telnet 行协议、OpenTSDB 的 JSON 格式协议。但是使用这三种协议的时候,需要在 API 中指定输入内容使用解析协议的标准。 对于InfluxDB、OpenTSDB的标准写入协议请参考各自的文档。下面首先以 InfluxDB 的行协议为基础,介绍 TDengine 扩展的协议内容,允许用户采用更加精细的方式控制(超级表)模式。 @@ -99,8 +99,8 @@ st,t1=3,t2=4,t3=t3 c1=3i64,c3="passit",c2=false,c4=4f64 1626006833639000000 | **序号** | **值** | **说明** | | ---- | ------------------- | ------------ | | 1 | SML_LINE_PROTOCOL | InfluxDB行协议(Line Protocol) | -| 2 | SML_TELNET_PROTOCOL | OpenTSDB文本行协议 | -| 3 | SML_JSON_PROTOCOL | Json协议格式 | +| 2 | SML_TELNET_PROTOCOL | OpenTSDB 文本行协议 | +| 3 | SML_JSON_PROTOCOL | JSON 协议格式 |
在 SML_LINE_PROTOCOL 解析模式下,需要用户指定输入的时间戳的时间分辨率。可用的时间分辨率如下表所示:
@@ -145,7 +145,7 @@ st,t1=3,t2=4,t3=t3 c1=3i64,c6="passit" 1626006833640000000
如果是无模式写入过程中的数据本身错误,应用会得到 TSDB_CODE_TSC_LINE_SYNTAX_ERROR 错误信息,该错误信息表明错误发生在写入文本中。其他的错误码与原系统一致,可以通过 taos_errstr 获取具体的错误原因。 **后续升级计划** -
当前版本只提供了 C 版本的 API,后续将提供 其他高级语言的 API,例如 Java/Go/Python/C# 等。此外,在TDengine v2.3及后续版本中,您还可以通过 Taos Adapter 采用 REST 的方式直接写入无模式数据。 +
当前版本只提供了 C 版本的 API,后续将提供 其他高级语言的 API,例如 Java/Go/Python/C# 等。此外,在TDengine v2.3及后续版本中,您还可以通过 taosAdapter 采用 REST 的方式直接写入无模式数据。 ## Prometheus 直接写入 @@ -241,10 +241,10 @@ use prometheus; select * from apiserver_request_latencies_bucket; ``` -## Telegraf 直接写入(通过 taosadapter) +## Telegraf 直接写入(通过 taosAdapter) 安装 Telegraf 请参考[官方文档](https://portal.influxdata.com/downloads/)。 -TDengine 新版本(2.3.0.0+)包含一个 taosadapter 独立程序,负责接收包括 Telegraf 的多种应用的数据写入。 +TDengine 新版本(2.3.0.0+)包含一个 taosAdapter 独立程序,负责接收包括 Telegraf 的多种应用的数据写入。 配置方法,在 /etc/telegraf/telegraf.conf 增加如下文字,其中 database name 请填写希望在 TDengine 保存 Telegraf 数据的数据库名,TDengine server/cluster host、username和 password 填写 TDengine 实际值: ``` @@ -264,14 +264,14 @@ sudo systemctl start telegraf ``` 即可在 TDengine 中查询 metrics 数据库中 Telegraf 写入的数据。 -taosadapter 相关配置参数请参考 taosadapter --help 命令输出以及相关文档。 +taosAdapter 相关配置参数请参考 taosadapter --help 命令输出以及相关文档。 -## collectd 直接写入(通过 taosadapter) +## collectd 直接写入(通过 taosAdapter) 安装 collectd,请参考[官方文档](https://collectd.org/download.shtml)。 -TDengine 新版本(2.3.0.0+)包含一个 taosadapter 独立程序,负责接收包括 collectd 的多种应用的数据写入。 +TDengine 新版本(2.3.0.0+)包含一个 taosAdapter 独立程序,负责接收包括 collectd 的多种应用的数据写入。 -在 /etc/collectd/collectd.conf 文件中增加如下内容,其中 host 和 port 请填写 TDengine 和 taosadapter 配置的实际值: +在 /etc/collectd/collectd.conf 文件中增加如下内容,其中 host 和 port 请填写 TDengine 和 taosAdapter 配置的实际值: ``` LoadPlugin network @@ -282,15 +282,15 @@ LoadPlugin network ``` sudo systemctl start collectd ``` -taosadapter 相关配置参数请参考 taosadapter --help 命令输出以及相关文档。 +taosAdapter 相关配置参数请参考 taosadapter --help 命令输出以及相关文档。 -## StatsD 直接写入(通过 taosadapter) +## StatsD 直接写入(通过 taosAdapter) 安装 StatsD 请参考[官方文档](https://github.com/statsd/statsd)。 -TDengine 新版本(2.3.0.0+)包含一个 taosadapter 独立程序,负责接收包括 StatsD 的多种应用的数据写入。 +TDengine 新版本(2.3.0.0+)包含一个 taosAdapter 独立程序,负责接收包括 StatsD 的多种应用的数据写入。 -在 config.js 文件中增加如下内容后启动 StatsD,其中 host 和 port 请填写 TDengine 和 taosadapter 配置的实际值: +在 config.js 文件中增加如下内容后启动 StatsD,其中 host 和 port 请填写 TDengine 和 taosAdapter 配置的实际值: ``` backends 部分添加 "./backends/repeater" repeater 部分添加 { host:'', port: } @@ -305,12 +305,12 @@ port: 8125 } ``` -taosadapter 相关配置参数请参考 taosadapter --help 命令输出以及相关文档。 +taosAdapter 相关配置参数请参考 taosadapter --help 命令输出以及相关文档。 ## 使用 Bailongma 2.0 接入 Telegraf 数据写入 -*注意:TDengine 新版本(2.3.0.0+)提供新版本 Bailongma ,命名为 taosadapter ,提供更简便的 Telegraf 数据写入以及其他更强大的功能,Bailongma v2 即之前版本将逐步不再维护。 +*注意:TDengine 新版本(2.3.0.0+)提供新版本 Bailongma ,命名为 taosAdapter ,提供更简便的 Telegraf 数据写入以及其他更强大的功能,Bailongma v2 即之前版本将逐步不再维护。 [Telegraf](https://www.influxdata.com/time-series-platform/telegraf/)是一流行的IT运维数据采集开源工具,TDengine提供一个小工具[Bailongma](https://github.com/taosdata/Bailongma),只需在Telegraf做简单配置,无需任何代码,就可将Telegraf采集的数据直接写入TDengine,并按规则在TDengine自动创建库和相关表项。博文[用Docker容器快速搭建一个Devops监控Demo](https://www.taosdata.com/blog/2020/02/03/1189.html)即是采用bailongma将Prometheus和Telegraf的数据写入TDengine中的示例,可以参考。 diff --git a/documentation20/cn/11.administrator/docs.md b/documentation20/cn/11.administrator/docs.md index 4ef1c60112018cb29289314e199feda75ca7c2ba..d805809fc8e80bef6ce691c1ef8384a696dc05f3 100644 --- a/documentation20/cn/11.administrator/docs.md +++ b/documentation20/cn/11.administrator/docs.md @@ -114,7 +114,7 @@ taosd -C 下面仅仅列出一些重要的配置参数,更多的参数请看配置文件里的说明。各个参数的详细介绍及作用请看前述章节,而且这些参数的缺省配置都是可以工作的,一般无需设置。**注意:配置文件参数修改后,需要重启*taosd*服务,或客户端应用才能生效。** -| **#** | **配置参数名称** | **内部** | **S\|C** | **单位** | **含义** | **取值范围** | **缺省值** | **备注** | +| **#** | **配置参数名称** | **内部** | **SC** | **单位** | **含义** | **取值范围** | **缺省值** | **补充说明** | | ----- | ----------------------- | -------- | -------- | -------- | ------------------------------------------------------------ | ------------------------------------------------------------ | ------------------------------------------------------------ | ------------------------------------------------------------ | | 1 | firstEP | | **SC** | | taosd启动时,主动连接的集群中首个dnode的end point | | localhost:6030 | | | 2 | secondEP | YES | **SC** | | taosd启动时,如果firstEp连接不上,尝试连接集群中第二个dnode的end point | | 无 | | @@ -559,6 +559,50 @@ TDengine启动后,会自动创建一个监测数据库log,并自动将服务 这些监测信息的采集缺省是打开的,但可以修改配置文件里的选项enableMonitor将其关闭或打开。 +### TDinsight - 使用监控数据库 + Grafana 对 TDengine 进行监控的解决方案 + +从 2.3.3.0 开始,监控数据库将提供更多的监控项,您可以从 [TDinsight Grafana Dashboard](https://grafana.com/grafana/dashboards/15167) 了解如何使用 TDinsight 方案对 TDengine 进行监控。 + +我们提供了一个自动化脚本 `TDinsight.sh` 对TDinsight进行部署。 + +下载 `TDinsight.sh`: + +```bash +wget https://github.com/taosdata/grafanaplugin/raw/master/dashboards/TDinsight.sh +chmod +x TDinsight.sh +``` + +准备: + +1. TDengine Server 信息: + * TDengine RESTful 服务:对本地而言,可以是 http://localhost:6041 ,使用参数 `-a`。 + * TDengine 用户名和密码,使用 `-u` `-p` 参数设置。 + +2. Grafana 告警通知 + * 使用已经存在的Grafana Notification Channel `uid`,参数 `-E`。该参数可以使用 `curl -u admin:admin localhost:3000/api/alert-notifications |jq` 来获取。 + + ```bash + sudo ./TDinsight.sh -a http://localhost:6041 -u root -p taosdata -E + ``` + + * 使用 TDengine 数据源插件内置的阿里云短信告警通知,使用 `-s` 启用之,并设置如下参数: + 1. 阿里云短信服务Key ID,参数 `-I` + 2. 阿里云短信服务Key Secret,参数 `K` + 3. 阿里云短信服务签名,参数 `-S` + 4. 短信通知模板号,参数 `-C` + 5. 短信通知模板输入参数,JSON格式,参数 `-T`,如 `{"alarm_level":"%s","time":"%s","name":"%s","content":"%s"}` + 6. 逗号分隔的通知手机列表,参数 `-B` + + ```bash + sudo ./TDinsight.sh -a http://localhost:6041 -u root -p taosdata -s \ + -I XXXXXXX -K XXXXXXXX -S taosdata -C SMS_1111111 -B 18900000000 \ + -T '{"alarm_level":"%s","time":"%s","name":"%s","content":"%s"}' + ``` + +运行程序并重启 Grafana 服务,打开面板:。 + +更多使用场景和限制请参考[TDinsight](https://github.com/taosdata/grafanaplugin/blob/master/dashboards/TDinsight.md) 文档。 + ## 性能优化 diff --git a/documentation20/cn/13.faq/docs.md b/documentation20/cn/13.faq/docs.md index eb5f20e708bb4bb592a1ab2d535fcf261457b989..9132e8dca63c47e4b22ad87ef9fd4d4a1997077a 100644 --- a/documentation20/cn/13.faq/docs.md +++ b/documentation20/cn/13.faq/docs.md @@ -185,23 +185,23 @@ TDengine 中时间戳的时区总是由客户端进行处理,而与服务端 | TCP | 6041 | 客户端与服务端之间的 RESTful 通讯。 | 随 serverPort 端口变化。 | | TCP | 6042 | Arbitrator 的服务端口。 | 随 Arbitrator 启动参数设置变化。 | | TCP | 6043 | TaosKeeper 监控服务端口。 | 随 TaosKeeper 启动参数设置变化。 | -| TCP | 6044 | 支持 StatsD 的数据接入端口。 | 随 taosadapter 启动参数设置变化(2.3.0.1+以上版本)。 | -| TCP | 6045 | 支持 collectd 数据接入端口。 | 随 taosadapter 启动参数设置变化(2.3.0.1+以上版本)。 | +| TCP | 6044 | 支持 StatsD 的数据接入端口。 | 随 taosAdapter 启动参数设置变化(2.3.0.1+以上版本)。 | +| TCP | 6045 | 支持 collectd 数据接入端口。 | 随 taosAdapter 启动参数设置变化(2.3.0.1+以上版本)。 | | TCP | 6060 | 企业版内 Monitor 服务的网络端口。 | | | UDP | 6030-6034 | 客户端与服务端之间通讯。 | 随 serverPort 端口变化。 | | UDP | 6035-6039 | 多节点集群的节点间通讯。 | 随 serverPort 端口变化。 | ## 20. go 语言编写组件编译失败怎样解决? -新版本 TDengine 2.3.0.0 包含一个使用 go 语言开发的 taosadapter 组件,取代之前内置的 httpd ,提供包含原 httpd 功能以及支持多种其他软件(Prometheus、Telegraf、collectd、StatsD等)的数据接入功能。 -使用最新 develop 分支代码编译需要先 `git submodule update --init --recursive` 下载 taosadapter 仓库代码后再编译。 +新版本 TDengine 2.3.0.0 包含一个使用 go 语言开发的 taosAdapter 组件,取代之前内置的 httpd ,提供包含原 httpd 功能以及支持多种其他软件(Prometheus、Telegraf、collectd、StatsD等)的数据接入功能。 +使用最新 develop 分支代码编译需要先 `git submodule update --init --recursive` 下载 taosAdapter 仓库代码后再编译。 -目前编译方式默认自动编译 taosadapter。go 语言版本要求 1.14 以上,如果发生 go 编译错误,往往是国内访问 go mod 问题,可以通过设置 go 环境变量来解决: +目前编译方式默认自动编译 taosAdapter。go 语言版本要求 1.14 以上,如果发生 go 编译错误,往往是国内访问 go mod 问题,可以通过设置 go 环境变量来解决: ```sh go env -w GO111MODULE=on go env -w GOPROXY=https://goproxy.cn,direct ``` -如果希望继续使用之前的内置 httpd,可以关闭 taosadapter 编译,使用 +如果希望继续使用之前的内置 httpd,可以关闭 taosAdapter 编译,使用 `cmake .. -DBUILD_HTTP=true` 使用原来内置的 httpd。 diff --git a/documentation20/cn/14.devops/02.collectd/docs.md b/documentation20/cn/14.devops/02.collectd/docs.md index 5860e70ceafafadc21c5772c96515e0925897e3a..0073cf78340a1100ec97cb70685410ced0cf5d4e 100644 --- a/documentation20/cn/14.devops/02.collectd/docs.md +++ b/documentation20/cn/14.devops/02.collectd/docs.md @@ -40,7 +40,7 @@ IT 运维监测数据通常都是对时间特性比较敏感的数据,例如 ``` ### 配置 collectd -在 /etc/collectd/collectd.conf 文件中增加如下内容,其中 host 和 port 请填写 TDengine 和 Taos Adapter 配置的实际值: +在 /etc/collectd/collectd.conf 文件中增加如下内容,其中 host 和 port 请填写 TDengine 和 taosAdapter 配置的实际值: ``` LoadPlugin network @@ -51,7 +51,7 @@ sudo systemctl start collectd ``` ### 配置 StatsD -在 config.js 文件中增加如下内容后启动 StatsD,其中 host 和 port 请填写 TDengine 和 Taos Adapter 配置的实际值: +在 config.js 文件中增加如下内容后启动 StatsD,其中 host 和 port 请填写 TDengine 和 taosAdapter 配置的实际值: ``` backends 部分添加 "./backends/repeater" repeater 部分添加 { host:'', port: } diff --git a/documentation20/cn/14.devops/03.immigrate/docs.md b/documentation20/cn/14.devops/03.immigrate/docs.md index b2a68e1b15acef2d574600e59d1b18d890938ac6..980dc0c0f40632de40b54aec4de719eea4d8bc59 100644 --- a/documentation20/cn/14.devops/03.immigrate/docs.md +++ b/documentation20/cn/14.devops/03.immigrate/docs.md @@ -8,7 +8,7 @@ - 数据写入和查询的性能远超 OpenTSDB; - 针对时序数据的高效压缩机制,压缩后在磁盘上的存储空间不到 1/5; -- 安装部署非常简单,单一安装包完成安装部署,除了 taosadapter 需要依赖 Go 运行环境外,不依赖其他的第三方软件,整个安装部署过程秒级搞定; +- 安装部署非常简单,单一安装包完成安装部署,除了 taosAdapter 需要依赖 Go 运行环境外,不依赖其他的第三方软件,整个安装部署过程秒级搞定; - 提供的内建函数覆盖 OpenTSDB 支持的全部查询函数,还支持更多的时序数据查询函数、标量函数及聚合函数,支持多种时间窗口聚合、连接查询、表达式运算、多种分组聚合、用户定义排序、以及用户定义函数等高级查询功能。采用类 SQL 的语法规则,更加简单易学,基本上没有学习成本。 - 支持多达 128 个标签,标签总长度可达到 16 KB; - 除 HTTP 之外,还提供 Java、Python、C、Rust、Go 等多种语言的接口,支持 JDBC 等多种企业级标准连接器协议。 @@ -40,9 +40,9 @@ - **调整数据收集器配置** -在 TDengine 2.3 版本中,后台服务 taosd 启动后一个 HTTP 的服务 taosadapter 也会自动启用*。*利用 taosadapter 能够兼容 Influxdb 的 Line Protocol 和 OpenTSDB 的 telnet/Json 写入协议,可以将 collectd 和 StatsD 收集的数据直接推送到TDengine。 +在 TDengine 2.3 版本中,后台服务 taosd 启动后一个 HTTP 的服务 taosAdapter 也会自动启用*。*利用 taosAdapter 能够兼容 Influxdb 的 Line Protocol 和 OpenTSDB 的 telnet/JSON 写入协议,可以将 collectd 和 StatsD 收集的数据直接推送到TDengine。 -如果使用 collectd,修改其默认位置 `/etc/collectd/collectd.conf` 的配置文件为指向 taosadapter 部署的节点 IP 地址和端口。假设 taosadapter 的 IP 地址为192.168.1.130,端口为 6046,配置如下: +如果使用 collectd,修改其默认位置 `/etc/collectd/collectd.conf` 的配置文件为指向 taosAdapter 部署的节点 IP 地址和端口。假设 taosAdapter 的 IP 地址为192.168.1.130,端口为 6046,配置如下: ```html LoadPlugin write_tsdb @@ -57,7 +57,7 @@ LoadPlugin write_tsdb ``` -即可让 collectd 将数据使用推送到 OpenTSDB 的插件方式推送到 taosadapter, taosadapter 将调用 API 将数据写入到 taosd 中,从而完成数据的写入工作。如果你使用的是 StatsD 相应地调整配置文件信息。 +即可让 collectd 将数据使用推送到 OpenTSDB 的插件方式推送到 taosAdapter, taosAdapter 将调用 API 将数据写入到 taosd 中,从而完成数据的写入工作。如果你使用的是 StatsD 相应地调整配置文件信息。 - **调整看板(Dashborad)系统** @@ -106,7 +106,7 @@ sudo systemctl start grafana-server TDengine 当前只支持 Grafana 的可视化看板呈现,所以如果你的应用中使用了 Grafana 以外的前端看板(例如[TSDash](https://github.com/facebook/tsdash)、[Status Wolf](https://github.com/box/StatusWolf)等),那么前端看板将无法直接迁移到 TDengine,需要将前端看板重新适配到 Grafana 才可以正常运行。 -截止到 2.3.0.x 版本,TDengine 只能够支持 collectd 和 StatsD 作为数据收集汇聚软件,当然后面会陆续提供更多的数据收集聚合软件的接入支持。如果您的收集端使用了其他类型的数据汇聚器,您的应用需要适配到这两个数据汇聚端系统,才能够将数据正常写入。除了上述两个数据汇聚端软件协议以外,TDengine 还支持通过 InfluxDB 的行协议和 OpenTSDB 的数据写入协议、Json 格式将数据直接写入,您可以重写数据推送端的逻辑,使用 TDengine 支持的行协议来写入数据。 +截止到 2.3.0.x 版本,TDengine 只能够支持 collectd 和 StatsD 作为数据收集汇聚软件,当然后面会陆续提供更多的数据收集聚合软件的接入支持。如果您的收集端使用了其他类型的数据汇聚器,您的应用需要适配到这两个数据汇聚端系统,才能够将数据正常写入。除了上述两个数据汇聚端软件协议以外,TDengine 还支持通过 InfluxDB 的行协议和 OpenTSDB 的数据写入协议、JSON 格式将数据直接写入,您可以重写数据推送端的逻辑,使用 TDengine 支持的行协议来写入数据。 此外,如果你的应用中使用了 OpenTSDB 以下特性,在将应用迁移到 TDengine 之前你还需要了解以下注意事项: @@ -353,7 +353,7 @@ Select sum(val) from table_name 完整示例: ```json -//OpenTSDB查询Json +//OpenTSDB查询JSON query = { “start”:1510560000, “end”: 1515000009, diff --git a/documentation20/en/11.administrator/docs.md b/documentation20/en/11.administrator/docs.md index a2c2486b8e96cab95fad0f90470726d508dd63f7..01b09e9db4d55da2241539d0708ef033aa8d0148 100644 --- a/documentation20/en/11.administrator/docs.md +++ b/documentation20/en/11.administrator/docs.md @@ -404,7 +404,52 @@ Force to turn off the stream computing, in which stream-id is the connection-id: After TDengine is started, it will automatically create a monitoring database log and write the server's CPU, memory, hard disk space, bandwidth, number of requests, disk read-write speed, slow query and other information into the database regularly. TDengine also records important system operations (such as logging in, creating, deleting databases, etc.) logs and various error alarm information and stores them in the log database. The system administrator can view the database directly from CLI or view the monitoring information through GUI on WEB. -The collection of these monitoring metrics is turned on by default, but you can modify option enableMonitor in the configuration file to turn it off or on. +The collection of these monitoring metrics is turned on by default, but you can modify option monitor in the configuration file to turn it off or on. + +### TDinsight - Monitor TDengine with Grafana + Data Source + +Starting from v2.3.3.0, TDengine's log database provides more metrics for resources and status monitoring. Here we introduce a zero-dependency monitoring solution - we call it TDinsight - with Grafana. You can find the documentation from [GitHub](https://github.com/taosdata/grafanaplugin/blob/master/dashboards/TDinsight.md). + +We provide an automation shell script [`TDinsight.sh`](https://github.com/taosdata/grafanaplugin/blob/master/dashboards/TDinsight.sh) as a shortcut to help setup TDinsight on the Grafana server. + +First, download `TDinsight.sh` from GitHub: + +```bash +wget https://github.com/taosdata/grafanaplugin/raw/master/dashboards/TDinsight.sh +chmod +x TDinsight.sh +``` + +Some CLI options are needed to use the script: + +1. TDengine server informations: + + - TDengine RESTful endpoint, like `http://localhost:6041`, will be used with option `-a`. + - TDengine user `-u` (`root` by default), and password with `-p` (`taosdata` by default). + +2. Grafana alerting notifications. There's two ways to setup this: + 1. To use existing Grafana notification channel with `uid`, option `-E`. The `uid` could be retrieved with `curl -u admin:admin localhost:3000/api/alert-notifications |'.[]| .uid + "," + .name' -r`, then use it like this: + + ```bash + sudo ./TDinsight.sh -a http://localhost:6041 -u root -p taosdata -E + ``` + + 2. Use TDengine data source plugin's builtin [Aliyun SMS](https://www.aliyun.com/product/sms) alerting support with `-s` flag, and input these options: + 1. Access key id with option `-I` + 2. Access key secret with option `K` + 3. Access key sign name with option `-S` + 4. Message template code with option `-C` + 5. Message template params in JSON format with option `-T`, eg. `{"alarm_level":"%s","time":"%s","name":"%s","content":"%s"}`. + 6. `,`-separated phone numbers list with option `-B` + + ```bash + sudo ./TDinsight.sh -a http://localhost:6041 -u root -p taosdata -s \ + -I XXXXXXX -K XXXXXXXX -S taosdata -C SMS_1111111 -B 18900000000 \ + -T '{"alarm_level":"%s","time":"%s","name":"%s","content":"%s"}' + ``` + +Follow the usage of the script and then restart grafana-server service, here we go . + +Refer to [TDinsight](https://github.com/taosdata/grafanaplugin/blob/master/dashboards/TDinsight.md) README for more scenario and limitations of the script, and the metrics descriptions for all of the TDinsight. ## File Directory Structure diff --git a/importSampleData/README.md b/importSampleData/README.md index 56c5be0da422aadc5e05fe000ab83c312d29b6c8..c945cf52cb82723681f37efc42d3325f89011d39 100644 --- a/importSampleData/README.md +++ b/importSampleData/README.md @@ -1,36 +1,28 @@ -## 样例数据导入 +# 样例数据导入 该工具可以根据用户提供的 `json` 或 `csv` 格式样例数据文件快速导入 `TDengine`,目前仅能在 Linux 上运行。 为了体验写入和查询性能,可以对样例数据进行横向、纵向扩展。横向扩展是指将一个表(监测点)的数据克隆到多张表,纵向扩展是指将样例数据中的一段时间范围内的数据在时间轴上复制。该工具还支持历史数据导入至当前时间后持续导入,这样可以测试插入和查询并行进行的场景,以模拟真实环境。 -## 下载安装 +## 编译安装 -### 下载可执行文件 +由于该工具使用 go 语言开发,编译之前需要先安装 go,具体请参考 [Getting Started][2]。执行以下命令即可编译成可执行文件 `bin/taosimport`。 -由于该工具使用 go 语言开发,为了方便使用,项目中已经提供了编译好的可执行文件 `bin/taosimport`。通过 `git clone https://github.com/taosdata/TDengine.git` 命令或者直接下载 `ZIP` 文件解压进入样例导入程序目录 `cd importSampleData`,执行 `bin/taosimport`。 - -### go 源码编译 - -由于该工具使用 go 语言开发,编译之前需要先安装 go,具体请参考 [Getting Started][2],而且需要安装 TDengine 的 Go Connector, 具体请参考[TDengine 连接器文档][3]。安装完成之后,执行以下命令即可编译成可执行文件 `bin/taosimport`。 ```shell -go get https://github.com/taosdata/TDengine/importSampleData -cd $GOPATH/src/github.com/taosdata/TDengine/importSampleData +go mod tidy go build -o bin/taosimport app/main.go ``` -> 注:由于目前 TDengine 的 go connector 只支持 linux 环境,所以该工具暂时只能在 linux 系统中运行。 -> 如果 go get 失败可以下载之后复制 `github.com/taosdata/TDengine/importSampleData` 文件夹到 $GOPATH 的 src 目录下再执行 `go build -o bin/taosimport app/main.go`。 - ## 使用 ### 快速体验 执行命令 `bin/taosimport` 会根据默认配置执行以下操作: + 1. 创建数据库 - 自动创建名称为 `test_yyyyMMdd` 的数据库。 - + 自动创建名称为 `test_yyyyMMdd` 的数据库,`yyyyMMdd` 是当前日期,如`20211111`。 + 2. 创建超级表 根据配置文件 `config/cfg.toml` 中指定的 `sensor_info` 场景信息创建相应的超级表。 @@ -48,21 +40,25 @@ go build -o bin/taosimport app/main.go taos> use test_yyyyMMdd; taos> select count(*) from s_sensor_info; ``` + * 查询各个分组的记录数 ```shell taos> select count(*) from s_sensor_info group by devgroup; ``` + * 按 1h 间隔查询各聚合指标 ```shell taos> select count(temperature), sum(temperature), avg(temperature) from s_sensor_info interval(1h); ``` + * 查询指定位置最新上传指标 ```shell taos> select last(*) from s_sensor_info where location = 'beijing'; ``` + > 更多查询及函数使用请参考 [数据查询][4] ### 详细使用说明 @@ -70,23 +66,23 @@ go build -o bin/taosimport app/main.go 执行命令 `bin/taosimport -h` 可以查看详细参数使用说明: * -cfg string - + 导入配置文件路径,包含样例数据文件相关描述及对应 TDengine 配置信息。默认使用 `config/cfg.toml`。 - + * -cases string 需要导入的场景名称,该名称可从 -cfg 指定的配置文件中 `[usecase]` 查看,可同时导入多个场景,中间使用逗号分隔,如:`sensor_info,camera_detection`,默认为 `sensor_info`。 - + * -hnum int 需要将样例数据进行横向扩展的倍数,假设原有样例数据包含 1 张子表 `t_0` 数据,指定 hnum 为 2 时会根据原有表名创建 `t_0、t_1` 两张子表。默认为 100。 - + * -vnum int 需要将样例数据进行纵向扩展的次数,如果设置为 0 代表将历史数据导入至当前时间后持续按照指定间隔导入。默认为 1000,表示将样例数据在时间轴上纵向复制1000 次。 * -delay int - + 当 vnum 设置为 0 时持续导入的时间间隔,默认为所有场景中最小记录间隔时间的一半,单位 ms。 * -tick int @@ -102,25 +98,25 @@ go build -o bin/taosimport app/main.go 当 save 为 1 时保存统计信息的表名, 默认 statistic。 * -auto int - + 是否自动生成样例数据中的主键时间戳,1 是,0 否, 默认 0。 - + * -start string 导入的记录开始时间,格式为 `"yyyy-MM-dd HH:mm:ss.SSS"`,不设置会使用样例数据中最小时间,设置后会忽略样例数据中的主键时间,会按照指定的 start 进行导入。如果 auto 为 1,则必须设置 start,默认为空。 - + * -interval int 导入的记录时间间隔,该设置只会在指定 `auto=1` 之后生效,否则会根据样例数据自动计算间隔时间。单位为毫秒,默认 1000。 * -thread int - + 执行导入数据的线程数目,默认为 10。 * -batch int - + 执行导入数据时的批量大小,默认为 100。批量是指一次写操作时,包含多少条记录。 - + * -host string 导入的 TDengine 服务器 IP,默认为 127.0.0.1。 @@ -138,7 +134,7 @@ go build -o bin/taosimport app/main.go 导入的 TDengine 用户密码,默认为 taosdata。 * -dropdb int - + 导入数据之前是否删除数据库,1 是,0 否, 默认 0。 * -db string @@ -160,7 +156,7 @@ go build -o bin/taosimport app/main.go 执行上述命令后会将 sensor_info 场景的数据横向扩展2倍从指定时间 `2019-12-12 00:00:00.000` 开始且记录间隔时间为 5000 毫秒开始导入,导入至当前时间后会自动持续导入。 ### config/cfg.toml 配置文件说明 - + ``` toml # 传感器场景 [sensor_info] # 场景名称 @@ -237,8 +233,6 @@ devid,location,color,devgroup,ts,temperature,humidity 0, beijing, white, 0, 1575129601000, 22, 14.377142 ``` - - [1]: https://github.com/taosdata/TDengine [2]: https://golang.org/doc/install [3]: https://www.taosdata.com/cn/documentation/connector/#Go-Connector diff --git a/importSampleData/go.mod b/importSampleData/go.mod index fa1d978e597b3eb5b9f35e45f599d5a0f97ff267..d2e58d302b3c917922206cbfc3a7d5afef8266c9 100644 --- a/importSampleData/go.mod +++ b/importSampleData/go.mod @@ -3,6 +3,6 @@ module github.com/taosdata/TDengine/importSampleData go 1.13 require ( - github.com/pelletier/go-toml v1.9.0 // indirect - github.com/taosdata/driver-go v0.0.0-20210415143420-d99751356e28 // indirect + github.com/pelletier/go-toml v1.9.0 + github.com/taosdata/driver-go v0.0.0-20210415143420-d99751356e28 ) diff --git a/packaging/release.sh b/packaging/release.sh index c82d5704ac5c4d89837f5afe4b1f6e27419279cc..8adc332a8e4cdf6652070342ba2c718e3f4888ec 100755 --- a/packaging/release.sh +++ b/packaging/release.sh @@ -11,7 +11,7 @@ set -e # -V [stable | beta] # -l [full | lite] # -s [static | dynamic] -# -d [taos | power | tq ] +# -d [taos | power | tq | pro | kh | jh] # -n [2.0.0.3] # -m [2.0.0.0] @@ -22,7 +22,7 @@ cpuType=x64 # [aarch32 | aarch64 | x64 | x86 | mips64 ...] osType=Linux # [Linux | Kylin | Alpine | Raspberrypi | Darwin | Windows | Ningsi60 | Ningsi80 |...] pagMode=full # [full | lite] soMode=dynamic # [static | dynamic] -dbName=taos # [taos | power | tq | pro] +dbName=taos # [taos | power | tq | pro | kh | jh] allocator=glibc # [glibc | jemalloc] verNumber="" verNumberComp="1.0.0.0" @@ -78,7 +78,7 @@ do echo " -l [full | lite] " echo " -a [glibc | jemalloc] " echo " -s [static | dynamic] " - echo " -d [taos | power | tq | pro] " + echo " -d [taos | power | tq | pro | kh | jh] " echo " -n [version number] " echo " -m [compatible version number] " exit 0 @@ -192,9 +192,217 @@ else allocator_macro="" fi +# for powerdb +if [[ "$dbName" == "power" ]]; then + # cmake/install.inc + sed -i "s/C:\/TDengine/C:\/PowerDB/g" ${top_dir}/cmake/install.inc + sed -i "s/taos\.exe/power\.exe/g" ${top_dir}/cmake/install.inc + sed -i "s/taosdemo\.exe/powerdemo\.exe/g" ${top_dir}/cmake/install.inc + # src/kit/shell/inc/shell.h + sed -i "s/taos_history/power_history/g" ${top_dir}/src/kit/shell/inc/shell.h + # src/inc/taosdef.h + sed -i "s/\"taosdata\"/\"powerdb\"/g" ${top_dir}/src/inc/taosdef.h + # src/util/src/tconfig.c + sed -i "s/taos config/power config/g" ${top_dir}/src/util/src/tconfig.c + sed -i "s/taos\.cfg/power\.cfg/g" ${top_dir}/src/util/src/tconfig.c + sed -i "s/etc\/taos/etc\/power/g" ${top_dir}/src/util/src/tconfig.c + # src/kit/taosdemo/taosdemo.c + sed -i "s/\"taosdata\"/\"powerdb\"/g" ${top_dir}/src/kit/taosdemo/taosdemo.c + # src/util/src/tlog.c + sed -i "s/log\/taos/log\/power/g" ${top_dir}/src/util/src/tlog.c + # src/dnode/src/dnodeSystem.c + sed -i "s/TDengine/Power/g" ${top_dir}/src/dnode/src/dnodeSystem.c + sed -i "s/TDengine/Power/g" ${top_dir}/src/dnode/src/dnodeMain.c + sed -i "s/taosdlog/powerdlog/g" ${top_dir}/src/dnode/src/dnodeMain.c + # src/client/src/tscSystem.c + sed -i "s/taoslog/powerlog/g" ${top_dir}/src/client/src/tscSystem.c + # src/util/src/tnote.c + sed -i "s/taosinfo/powerinfo/g" ${top_dir}/src/util/src/tnote.c + # src/dnode/CMakeLists.txt + sed -i "s/taos\.cfg/power\.cfg/g" ${top_dir}/src/dnode/CMakeLists.txt + # src/kit/taosdump/taosdump.c + sed -i "s/TDengine/Power/g" ${top_dir}/src/kit/taosdump/taosdump.c + sed -i "s/Default is taosdata/Default is power/g" ${top_dir}/src/kit/taosdump/taosdump.c + sed -i "s/\"taosdata\"/\"powerdb\"/g" ${top_dir}/src/kit/taosdump/taosdump.c + sed -i "s/taos\/taos\.cfg/power\/power\.cfg/g" ${top_dir}/src/kit/taosdump/taosdump.c + # src/os/src/linux/linuxEnv.c + sed -i "s/etc\/taos/etc\/power/g" ${top_dir}/src/os/src/linux/linuxEnv.c + sed -i "s/lib\/taos/lib\/power/g" ${top_dir}/src/os/src/linux/linuxEnv.c + sed -i "s/log\/taos/log\/power/g" ${top_dir}/src/os/src/linux/linuxEnv.c + # src/os/src/windows/wEnv.c + sed -i "s/TDengine/PowerDB/g" ${top_dir}/src/os/src/windows/wEnv.c + # src/kit/shell/src/shellEngine.c + sed -i "s/TDengine shell/PowerDB shell/g" ${top_dir}/src/kit/shell/src/shellEngine.c + sed -i "s/2020 by TAOS Data, Inc/2020 by PowerDB, Inc/g" ${top_dir}/src/kit/shell/src/shellEngine.c + sed -i "s/\"taos> \"/\"power> \"/g" ${top_dir}/src/kit/shell/src/shellEngine.c + sed -i "s/\" -> \"/\" -> \"/g" ${top_dir}/src/kit/shell/src/shellEngine.c + sed -i "s/prompt_size = 6/prompt_size = 7/g" ${top_dir}/src/kit/shell/src/shellEngine.c +fi + +# for tq +if [[ "$dbName" == "tq" ]]; then + # cmake/install.inc + sed -i "s/C:\/TDengine/C:\/TQueue/g" ${top_dir}/cmake/install.inc + sed -i "s/taos\.exe/tq\.exe/g" ${top_dir}/cmake/install.inc + sed -i "s/taosdemo\.exe/tqdemo\.exe/g" ${top_dir}/cmake/install.inc + # src/kit/shell/inc/shell.h + sed -i "s/taos_history/tq_history/g" ${top_dir}/src/kit/shell/inc/shell.h + # src/inc/taosdef.h + sed -i "s/\"taosdata\"/\"tqueue\"/g" ${top_dir}/src/inc/taosdef.h + # src/util/src/tconfig.c + sed -i "s/taos config/tq config/g" ${top_dir}/src/util/src/tconfig.c + sed -i "s/taos\.cfg/tq\.cfg/g" ${top_dir}/src/util/src/tconfig.c + sed -i "s/etc\/taos/etc\/tq/g" ${top_dir}/src/util/src/tconfig.c + # src/kit/taosdemo/taosdemo.c + sed -i "s/\"taosdata\"/\"tqueue\"/g" ${top_dir}/src/kit/taosdemo/taosdemo.c + # src/util/src/tlog.c + sed -i "s/log\/taos/log\/tq/g" ${top_dir}/src/util/src/tlog.c + # src/dnode/src/dnodeSystem.c + sed -i "s/TDengine/TQueue/g" ${top_dir}/src/dnode/src/dnodeSystem.c + sed -i "s/TDengine/TQueue/g" ${top_dir}/src/dnode/src/dnodeMain.c + sed -i "s/taosdlog/tqdlog/g" ${top_dir}/src/dnode/src/dnodeMain.c + # src/client/src/tscSystem.c + sed -i "s/taoslog/tqlog/g" ${top_dir}/src/client/src/tscSystem.c + # src/util/src/tnote.c + sed -i "s/taosinfo/tqinfo/g" ${top_dir}/src/util/src/tnote.c + # src/dnode/CMakeLists.txt + sed -i "s/taos\.cfg/tq\.cfg/g" ${top_dir}/src/dnode/CMakeLists.txt + # src/kit/taosdump/taosdump.c + sed -i "s/TDengine/TQueue/g" ${top_dir}/src/kit/taosdump/taosdump.c + sed -i "s/Default is taosdata/Default is tqueue/g" ${top_dir}/src/kit/taosdump/taosdump.c + sed -i "s/\"taosdata\"/\"tqueue\"/g" ${top_dir}/src/kit/taosdump/taosdump.c + sed -i "s/taos\/taos\.cfg/tq\/tq\.cfg/g" ${top_dir}/src/kit/taosdump/taosdump.c + # src/os/src/linux/linuxEnv.c + sed -i "s/etc\/taos/etc\/tq/g" ${top_dir}/src/os/src/linux/linuxEnv.c + sed -i "s/lib\/taos/lib\/tq/g" ${top_dir}/src/os/src/linux/linuxEnv.c + sed -i "s/log\/taos/log\/tq/g" ${top_dir}/src/os/src/linux/linuxEnv.c + # src/os/src/windows/wEnv.c + sed -i "s/TDengine/TQ/g" ${top_dir}/src/os/src/windows/wEnv.c + # src/kit/shell/src/shellEngine.c + sed -i "s/TDengine shell/TQ shell/g" ${top_dir}/src/kit/shell/src/shellEngine.c + sed -i "s/2020 by TAOS Data, Inc/2020 by TQ, Inc/g" ${top_dir}/src/kit/shell/src/shellEngine.c + sed -i "s/\"taos> \"/\"tq> \"/g" ${top_dir}/src/kit/shell/src/shellEngine.c + sed -i "s/\" -> \"/\" -> \"/g" ${top_dir}/src/kit/shell/src/shellEngine.c + sed -i "s/prompt_size = 6/prompt_size = 4/g" ${top_dir}/src/kit/shell/src/shellEngine.c +fi + +# for prodb if [[ "$dbName" == "pro" ]]; then - sed -i "s/taos config/prodb config/g" ${top_dir}/src/util/src/tconfig.c - sed -i "s/TDengine/ProDB/g" ${top_dir}/src/dnode/src/dnodeSystem.c + # cmake/install.inc + sed -i "s/C:\/TDengine/C:\/ProDB/g" ${top_dir}/cmake/install.inc + sed -i "s/taos\.exe/prodbc\.exe/g" ${top_dir}/cmake/install.inc + sed -i "s/taosdemo\.exe/prodemo\.exe/g" ${top_dir}/cmake/install.inc + # src/kit/shell/inc/shell.h + sed -i "s/taos_history/prodb_history/g" ${top_dir}/src/kit/shell/inc/shell.h + # src/inc/taosdef.h + sed -i "s/\"taosdata\"/\"prodb\"/g" ${top_dir}/src/inc/taosdef.h + # src/util/src/tconfig.c + sed -i "s/taos config/prodb config/g" ${top_dir}/src/util/src/tconfig.c + sed -i "s/taos\.cfg/prodb\.cfg/g" ${top_dir}/src/util/src/tconfig.c + sed -i "s/etc\/taos/etc\/ProDB/g" ${top_dir}/src/util/src/tconfig.c + # src/kit/taosdemo/taosdemo.c + sed -i "s/\"taosdata\"/\"prodb\"/g" ${top_dir}/src/kit/taosdemo/taosdemo.c + sed -i "s/support@taosdata.com/support@hanatech.com.cn/g" ${top_dir}/src/kit/taosdemo/taosdemo.c + # src/util/src/tlog.c + sed -i "s/log\/taos/log\/ProDB/g" ${top_dir}/src/util/src/tlog.c + # src/dnode/src/dnodeSystem.c + sed -i "s/TDengine/ProDB/g" ${top_dir}/src/dnode/src/dnodeSystem.c + sed -i "s/TDengine/ProDB/g" ${top_dir}/src/dnode/src/dnodeMain.c + sed -i "s/taosdlog/prodlog/g" ${top_dir}/src/dnode/src/dnodeMain.c + # src/client/src/tscSystem.c + sed -i "s/taoslog/prolog/g" ${top_dir}/src/client/src/tscSystem.c + # src/util/src/tnote.c + sed -i "s/taosinfo/proinfo/g" ${top_dir}/src/util/src/tnote.c + # src/dnode/CMakeLists.txt + sed -i "s/taos\.cfg/prodb\.cfg/g" ${top_dir}/src/dnode/CMakeLists.txt + # src/kit/taosdump/taosdump.c + sed -i "s/Default is taosdata/Default is prodb/g" ${top_dir}/src/kit/taosdump/taosdump.c + sed -i "s/taos\/taos\.cfg/ProDB\/prodb\.cfg/g" ${top_dir}/src/kit/taosdump/taosdump.c + sed -i "s/\"taosdata\"/\"prodb\"/g" ${top_dir}/src/kit/taosdump/taosdump.c + sed -i "s/TDengine/ProDB/g" ${top_dir}/src/kit/taosdump/taosdump.c + # src/os/src/linux/linuxEnv.c + sed -i "s/etc\/taos/etc\/ProDB/g" ${top_dir}/src/os/src/linux/linuxEnv.c + sed -i "s/lib\/taos/lib\/ProDB/g" ${top_dir}/src/os/src/linux/linuxEnv.c + sed -i "s/log\/taos/log\/ProDB/g" ${top_dir}/src/os/src/linux/linuxEnv.c + # src/os/src/windows/wEnv.c + sed -i "s/TDengine/ProDB/g" ${top_dir}/src/os/src/windows/wEnv.c + # src/kit/shell/src/shellEngine.c + sed -i "s/TDengine shell/ProDB shell/g" ${top_dir}/src/kit/shell/src/shellEngine.c + sed -i "s/2020 by TAOS Data, Inc/2020 by Hanatech, Inc/g" ${top_dir}/src/kit/shell/src/shellEngine.c + sed -i "s/\"taos> \"/\"ProDB> \"/g" ${top_dir}/src/kit/shell/src/shellEngine.c + sed -i "s/\" -> \"/\" -> \"/g" ${top_dir}/src/kit/shell/src/shellEngine.c + sed -i "s/prompt_size = 6/prompt_size = 7/g" ${top_dir}/src/kit/shell/src/shellEngine.c +fi + +# for KingHistorian +if [[ "$dbName" == "kh" ]]; then + # cmake/install.inc + sed -i "s/C:\/TDengine/C:\/KingHistorian/g" ${top_dir}/cmake/install.inc + sed -i "s/taos\.exe/khclient\.exe/g" ${top_dir}/cmake/install.inc + sed -i "s/taosdemo\.exe/khdemo\.exe/g" ${top_dir}/cmake/install.inc + # src/kit/shell/inc/shell.h + sed -i "s/taos_history/kh_history/g" ${top_dir}/src/kit/shell/inc/shell.h + # src/inc/taosdef.h + sed -i "s/\"taosdata\"/\"khroot\"/g" ${top_dir}/src/inc/taosdef.h + # src/util/src/tconfig.c + sed -i "s/taos config/kh config/g" ${top_dir}/src/util/src/tconfig.c + sed -i "s/taos\.cfg/kinghistorian\.cfg/g" ${top_dir}/src/util/src/tconfig.c + sed -i "s/etc\/taos/etc\/kinghistorian/g" ${top_dir}/src/util/src/tconfig.c + # src/kit/taosdemo/taosdemo.c + sed -i "s/\"taosdata\"/\"khroot\"/g" ${top_dir}/src/kit/taosdemo/taosdemo.c + sed -i "s/support@taosdata.com/support@wellintech.com/g" ${top_dir}/src/kit/taosdemo/taosdemo.c + # src/util/src/tlog.c + sed -i "s/log\/taos/log\/kinghistorian/g" ${top_dir}/src/util/src/tlog.c + # src/dnode/src/dnodeSystem.c + sed -i "s/TDengine/KingHistorian/g" ${top_dir}/src/dnode/src/dnodeSystem.c + sed -i "s/TDengine/KingHistorian/g" ${top_dir}/src/dnode/src/dnodeMain.c + sed -i "s/taosdlog/khserverlog/g" ${top_dir}/src/dnode/src/dnodeMain.c + # src/client/src/tscSystem.c + sed -i "s/taoslog/khclientlog/g" ${top_dir}/src/client/src/tscSystem.c + # src/util/src/tnote.c + sed -i "s/taosinfo/khinfo/g" ${top_dir}/src/util/src/tnote.c + # src/dnode/CMakeLists.txt + sed -i "s/taos\.cfg/kinghistorian\.cfg/g" ${top_dir}/src/dnode/CMakeLists.txt + # src/dnode/CMakeLists.txt + sed -i "s/Default is taosdata/Default is khroot/g" ${top_dir}/src/kit/taosdump/taosdump.c + sed -i "s/\"taosdata\"/\"khroot\"/g" ${top_dir}/src/kit/taosdump/taosdump.c + sed -i "s/TDengine/KingHistorian/g" ${top_dir}/src/kit/taosdump/taosdump.c + sed -i "s/taos\/taos\.cfg/kinghistorian\/kinghistorian\.cfg/g" ${top_dir}/src/kit/taosdump/taosdump.c + # src/os/src/linux/linuxEnv.c + sed -i "s/etc\/taos/etc\/kinghistorian/g" ${top_dir}/src/os/src/linux/linuxEnv.c + sed -i "s/lib\/taos/lib\/kinghistorian/g" ${top_dir}/src/os/src/linux/linuxEnv.c + sed -i "s/log\/taos/log\/kinghistorian/g" ${top_dir}/src/os/src/linux/linuxEnv.c + # src/os/src/windows/wEnv.c + sed -i "s/TDengine/KingHistorian/g" ${top_dir}/src/os/src/windows/wEnv.c + # src/kit/shell/src/shellEngine.c + sed -i "s/TDengine shell/KingHistorian shell/g" ${top_dir}/src/kit/shell/src/shellEngine.c + sed -i "s/2020 by TAOS Data, Inc/2021 by Wellintech, Inc/g" ${top_dir}/src/kit/shell/src/shellEngine.c + sed -i "s/\"taos> \"/\"kh> \"/g" ${top_dir}/src/kit/shell/src/shellEngine.c + sed -i "s/\" -> \"/\" -> \"/g" ${top_dir}/src/kit/shell/src/shellEngine.c + sed -i "s/prompt_size = 6/prompt_size = 4/g" ${top_dir}/src/kit/shell/src/shellEngine.c +fi + +# for jinheng +if [[ "$dbName" == "jh" ]]; then + # Following files to change: + # * src/client/src/tscSystem.c + # * src/inc/taosdef.h + # * src/kit/shell/CMakeLists.txt + # * src/kit/shell/inc/shell.h + # * src/kit/shell/src/shellEngine.c + # * src/kit/shell/src/shellWindows.c + # * src/kit/taosdemo/taosdemo.c + # * src/kit/taosdump/taosdump.c + # * src/os/src/linux/linuxEnv.c + # * src/os/src/windows/wEnv.c + # * src/util/src/tconfig.c + # * src/util/src/tlog.c + + # src/dnode/src/dnodeSystem.c + sed -i "s/TDengine/jh_iot/g" ${top_dir}/src/dnode/src/dnodeSystem.c + # src/dnode/src/dnodeMain.c + sed -i "s/TDengine/jh_iot/g" ${top_dir}/src/dnode/src/dnodeMain.c + # TODO: src/dnode/CMakeLists.txt fi echo "build ${pagMode} package ..." @@ -205,9 +413,67 @@ fi # check support cpu type if [[ "$cpuType" == "x64" ]] || [[ "$cpuType" == "aarch64" ]] || [[ "$cpuType" == "aarch32" ]] || [[ "$cpuType" == "mips64" ]] ; then if [ "$verMode" != "cluster" ]; then + # community-version compile cmake ../ -DCPUTYPE=${cpuType} -DOSTYPE=${osType} -DSOMODE=${soMode} -DDBNAME=${dbName} -DVERTYPE=${verType} -DVERDATE="${build_time}" -DGITINFO=${gitinfo} -DGITINFOI=${gitinfoOfInternal} -DVERNUMBER=${verNumber} -DVERCOMPATIBLE=${verNumberComp} -DPAGMODE=${pagMode} -DBUILD_HTTP=${BUILD_HTTP} ${allocator_macro} else - cmake ../../ -DCPUTYPE=${cpuType} -DOSTYPE=${osType} -DSOMODE=${soMode} -DDBNAME=${dbName} -DVERTYPE=${verType} -DVERDATE="${build_time}" -DGITINFO=${gitinfo} -DGITINFOI=${gitinfoOfInternal} -DVERNUMBER=${verNumber} -DVERCOMPATIBLE=${verNumberComp} -DBUILD_HTTP=${BUILD_HTTP} ${allocator_macro} + # enterprise-version compile + if [[ "$dbName" == "power" ]]; then + # enterprise/src/kit/perfMonitor/perfMonitor.c + sed -i "s/\"taosdata\"/\"powerdb\"/g" ${top_dir}/../enterprise/src/kit/perfMonitor/perfMonitor.c + sed -i "s/TDengine/PowerDB/g" ${top_dir}/../enterprise/src/kit/perfMonitor/perfMonitor.c + # enterprise/src/plugins/admin/src/httpAdminHandle.c + sed -i "s/taos\.cfg/power\.cfg/g" ${top_dir}/../enterprise/src/plugins/admin/src/httpAdminHandle.c + # enterprise/src/plugins/grant/src/grantMain.c + sed -i "s/taos\.cfg/power\.cfg/g" ${top_dir}/../enterprise/src/plugins/grant/src/grantMain.c + # enterprise/src/plugins/module/src/moduleMain.c + sed -i "s/taos\.cfg/power\.cfg/g" ${top_dir}/../enterprise/src/plugins/module/src/moduleMain.c + fi + if [[ "$dbName" == "tq" ]]; then + # enterprise/src/kit/perfMonitor/perfMonitor.c + sed -i "s/\"taosdata\"/\"tqueue\"/g" ${top_dir}/../enterprise/src/kit/perfMonitor/perfMonitor.c + sed -i "s/TDengine/TQueue/g" ${top_dir}/../enterprise/src/kit/perfMonitor/perfMonitor.c + # enterprise/src/plugins/admin/src/httpAdminHandle.c + sed -i "s/taos\.cfg/tq\.cfg/g" ${top_dir}/../enterprise/src/plugins/admin/src/httpAdminHandle.c + # enterprise/src/plugins/grant/src/grantMain.c + sed -i "s/taos\.cfg/tq\.cfg/g" ${top_dir}/../enterprise/src/plugins/grant/src/grantMain.c + # enterprise/src/plugins/module/src/moduleMain.c + sed -i "s/taos\.cfg/tq\.cfg/g" ${top_dir}/../enterprise/src/plugins/module/src/moduleMain.c + fi + if [[ "$dbName" == "pro" ]]; then + # enterprise/src/kit/perfMonitor/perfMonitor.c + sed -i "s/\"taosdata\"/\"prodb\"/g" ${top_dir}/../enterprise/src/kit/perfMonitor/perfMonitor.c + sed -i "s/TDengine/ProDB/g" ${top_dir}/../enterprise/src/kit/perfMonitor/perfMonitor.c + # enterprise/src/plugins/admin/src/httpAdminHandle.c + sed -i "s/taos\.cfg/prodb\.cfg/g" ${top_dir}/../enterprise/src/plugins/admin/src/httpAdminHandle.c + # enterprise/src/plugins/grant/src/grantMain.c + sed -i "s/taos\.cfg/prodb\.cfg/g" ${top_dir}/../enterprise/src/plugins/grant/src/grantMain.c + # enterprise/src/plugins/module/src/moduleMain.c + sed -i "s/taos\.cfg/prodb\.cfg/g" ${top_dir}/../enterprise/src/plugins/module/src/moduleMain.c + fi + if [[ "$dbName" == "kh" ]]; then + # enterprise/src/kit/perfMonitor/perfMonitor.c + sed -i "s/\"taosdata\"/\"khroot\"/g" ${top_dir}/../enterprise/src/kit/perfMonitor/perfMonitor.c + sed -i "s/TDengine/KingHistorian/g" ${top_dir}/../enterprise/src/kit/perfMonitor/perfMonitor.c + # enterprise/src/plugins/admin/src/httpAdminHandle.c + sed -i "s/taos\.cfg/kinghistorian\.cfg/g" ${top_dir}/../enterprise/src/plugins/admin/src/httpAdminHandle.c + # enterprise/src/plugins/grant/src/grantMain.c + sed -i "s/taos\.cfg/kinghistorian\.cfg/g" ${top_dir}/../enterprise/src/plugins/grant/src/grantMain.c + # enterprise/src/plugins/module/src/moduleMain.c + sed -i "s/taos\.cfg/kinghistorian\.cfg/g" ${top_dir}/../enterprise/src/plugins/module/src/moduleMain.c + fi + if [[ "$dbName" == "jh" ]]; then + # enterprise/src/kit/perfMonitor/perfMonitor.c + sed -i "s/\"taosdata\"/\"jhdata\"/g" ${top_dir}/../enterprise/src/kit/perfMonitor/perfMonitor.c + sed -i "s/TDengine/jh_iot/g" ${top_dir}/../enterprise/src/kit/perfMonitor/perfMonitor.c + # enterprise/src/plugins/admin/src/httpAdminHandle.c + #sed -i "s/taos\.cfg/taos\.cfg/g" ${top_dir}/../enterprise/src/plugins/admin/src/httpAdminHandle.c + # enterprise/src/plugins/grant/src/grantMain.c + #sed -i "s/taos\.cfg/taos\.cfg/g" ${top_dir}/../enterprise/src/plugins/grant/src/grantMain.c + # enterprise/src/plugins/module/src/moduleMain.c + #sed -i "s/taos\.cfg/taos\.cfg/g" ${top_dir}/../enterprise/src/plugins/module/src/moduleMain.c + fi + + cmake ../../ -DCPUTYPE=${cpuType} -DOSTYPE=${osType} -DSOMODE=${soMode} -DDBNAME=${dbName} -DVERTYPE=${verType} -DVERDATE="${build_time}" -DGITINFO=${gitinfo} -DGITINFOI=${gitinfoOfInternal} -DVERNUMBER=${verNumber} -DVERCOMPATIBLE=${verNumberComp} ${allocator_macro} fi else echo "input cpuType=${cpuType} error!!!" @@ -240,7 +506,6 @@ if [ "$osType" != "Darwin" ]; then else echo "==========dpkg command not exist, so not release deb package!!!" fi - ret='0' command -v rpmbuild >/dev/null 2>&1 || { ret='1'; } if [ "$ret" -eq 0 ]; then @@ -272,12 +537,21 @@ if [ "$osType" != "Darwin" ]; then ${csudo} ./makepkg_pro.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode} ${dbName} ${verNumberComp} ${csudo} ./makeclient_pro.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode} ${dbName} ${csudo} ./makearbi_pro.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode} + elif [[ "$dbName" == "kh" ]]; then + ${csudo} ./makepkg_kh.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode} ${dbName} ${verNumberComp} + ${csudo} ./makeclient_kh.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode} ${dbName} + ${csudo} ./makearbi_kh.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode} + elif [[ "$dbName" == "jh" ]]; then + ${csudo} ./makepkg_jh.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode} ${dbName} ${verNumberComp} + ${csudo} ./makeclient_jh.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode} ${dbName} + ${csudo} ./makearbi_jh.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode} else ${csudo} ./makepkg_power.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode} ${dbName} ${verNumberComp} ${csudo} ./makeclient_power.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode} ${dbName} ${csudo} ./makearbi_power.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode} fi else + # only make client for Darwin cd ${script_dir}/tools ./makeclient.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${dbName} fi diff --git a/packaging/tools/install_arbi_jh.sh b/packaging/tools/install_arbi_jh.sh new file mode 100755 index 0000000000000000000000000000000000000000..2403f8fbd79abf4324577fe3dca3a8e0eac8ed01 --- /dev/null +++ b/packaging/tools/install_arbi_jh.sh @@ -0,0 +1,286 @@ +#!/bin/bash +# +# This file is used to install database on linux systems. The operating system +# is required to use systemd to manage services at boot + +set -e +#set -x + +# -----------------------Variables definition--------------------- +script_dir=$(dirname $(readlink -f "$0")) + +bin_link_dir="/usr/bin" +#inc_link_dir="/usr/include" + +#install main path +install_main_dir="/usr/local/tarbitrator" + +# old bin dir +bin_dir="/usr/local/tarbitrator/bin" + +service_config_dir="/etc/systemd/system" + +# Color setting +RED='\033[0;31m' +GREEN='\033[1;32m' +GREEN_DARK='\033[0;32m' +GREEN_UNDERLINE='\033[4;32m' +NC='\033[0m' + +csudo="" +if command -v sudo > /dev/null; then + csudo="sudo" +fi + +update_flag=0 + +initd_mod=0 +service_mod=2 +if pidof systemd &> /dev/null; then + service_mod=0 +elif $(which service &> /dev/null); then + service_mod=1 + service_config_dir="/etc/init.d" + if $(which chkconfig &> /dev/null); then + initd_mod=1 + elif $(which insserv &> /dev/null); then + initd_mod=2 + elif $(which update-rc.d &> /dev/null); then + initd_mod=3 + else + service_mod=2 + fi +else + service_mod=2 +fi + + +# get the operating system type for using the corresponding init file +# ubuntu/debian(deb), centos/fedora(rpm), others: opensuse, redhat, ..., no verification +#osinfo=$(awk -F= '/^NAME/{print $2}' /etc/os-release) +if [[ -e /etc/os-release ]]; then + osinfo=$(cat /etc/os-release | grep "NAME" | cut -d '"' -f2) ||: +else + osinfo="" +fi +#echo "osinfo: ${osinfo}" +os_type=0 +if echo $osinfo | grep -qwi "ubuntu" ; then +# echo "This is ubuntu system" + os_type=1 +elif echo $osinfo | grep -qwi "debian" ; then +# echo "This is debian system" + os_type=1 +elif echo $osinfo | grep -qwi "Kylin" ; then +# echo "This is Kylin system" + os_type=1 +elif echo $osinfo | grep -qwi "centos" ; then +# echo "This is centos system" + os_type=2 +elif echo $osinfo | grep -qwi "fedora" ; then +# echo "This is fedora system" + os_type=2 +else + echo " osinfo: ${osinfo}" + echo " This is an officially unverified linux system," + echo " if there are any problems with the installation and operation, " + echo " please feel free to contact jhict.com for support." + os_type=1 +fi + +function kill_tarbitrator() { + pid=$(ps -ef | grep "tarbitrator" | grep -v "grep" | awk '{print $2}') + if [ -n "$pid" ]; then + ${csudo} kill -9 $pid || : + fi +} + +function install_main_path() { + #create install main dir and all sub dir + ${csudo} rm -rf ${install_main_dir} || : + ${csudo} mkdir -p ${install_main_dir} + ${csudo} mkdir -p ${install_main_dir}/bin + #${csudo} mkdir -p ${install_main_dir}/include + ${csudo} mkdir -p ${install_main_dir}/init.d +} + +function install_bin() { + # Remove links + ${csudo} rm -f ${bin_link_dir}/rmtarbitrator || : + ${csudo} rm -f ${bin_link_dir}/tarbitrator || : + ${csudo} cp -r ${script_dir}/bin/* ${install_main_dir}/bin && ${csudo} chmod 0555 ${install_main_dir}/bin/* + + #Make link + [ -x ${install_main_dir}/bin/remove_arbi_jh.sh ] && ${csudo} ln -s ${install_main_dir}/bin/remove_arbi_jh.sh ${bin_link_dir}/rmtarbitrator || : + [ -x ${install_main_dir}/bin/tarbitrator ] && ${csudo} ln -s ${install_main_dir}/bin/tarbitrator ${bin_link_dir}/tarbitrator || : +} + +function install_header() { + ${csudo} rm -f ${inc_link_dir}/taos.h ${inc_link_dir}/taoserror.h || : + ${csudo} cp -f ${script_dir}/inc/* ${install_main_dir}/include && ${csudo} chmod 644 ${install_main_dir}/include/* + ${csudo} ln -s ${install_main_dir}/include/taos.h ${inc_link_dir}/taos.h + ${csudo} ln -s ${install_main_dir}/include/taoserror.h ${inc_link_dir}/taoserror.h +} + +function clean_service_on_sysvinit() { + if pidof tarbitrator &> /dev/null; then + ${csudo} service tarbitratord stop || : + fi + + if ((${initd_mod}==1)); then + if [ -e ${service_config_dir}/tarbitratord ]; then + ${csudo} chkconfig --del tarbitratord || : + fi + elif ((${initd_mod}==2)); then + if [ -e ${service_config_dir}/tarbitratord ]; then + ${csudo} insserv -r tarbitratord || : + fi + elif ((${initd_mod}==3)); then + if [ -e ${service_config_dir}/tarbitratord ]; then + ${csudo} update-rc.d -f tarbitratord remove || : + fi + fi + + ${csudo} rm -f ${service_config_dir}/tarbitratord || : + + if $(which init &> /dev/null); then + ${csudo} init q || : + fi +} + +function install_service_on_sysvinit() { + clean_service_on_sysvinit + sleep 1 + + # Install server service + if ((${os_type}==1)); then + ${csudo} cp -f ${script_dir}/init.d/tarbitratord.deb ${install_main_dir}/init.d/tarbitratord + ${csudo} cp ${script_dir}/init.d/tarbitratord.deb ${service_config_dir}/tarbitratord && ${csudo} chmod a+x ${service_config_dir}/tarbitratord + elif ((${os_type}==2)); then + ${csudo} cp -f ${script_dir}/init.d/tarbitratord.rpm ${install_main_dir}/init.d/tarbitratord + ${csudo} cp ${script_dir}/init.d/tarbitratord.rpm ${service_config_dir}/tarbitratord && ${csudo} chmod a+x ${service_config_dir}/tarbitratord + fi + + if ((${initd_mod}==1)); then + ${csudo} chkconfig --add tarbitratord || : + ${csudo} chkconfig --level 2345 tarbitratord on || : + elif ((${initd_mod}==2)); then + ${csudo} insserv tarbitratord || : + ${csudo} insserv -d tarbitratord || : + elif ((${initd_mod}==3)); then + ${csudo} update-rc.d tarbitratord defaults || : + fi +} + +function clean_service_on_systemd() { + tarbitratord_service_config="${service_config_dir}/tarbitratord.service" + if systemctl is-active --quiet tarbitratord; then + echo "tarbitrator is running, stopping it..." + ${csudo} systemctl stop tarbitratord &> /dev/null || echo &> /dev/null + fi + ${csudo} systemctl disable tarbitratord &> /dev/null || echo &> /dev/null + + ${csudo} rm -f ${tarbitratord_service_config} +} + +function install_service_on_systemd() { + clean_service_on_systemd + + tarbitratord_service_config="${service_config_dir}/tarbitratord.service" + + ${csudo} bash -c "echo '[Unit]' >> ${tarbitratord_service_config}" + ${csudo} bash -c "echo 'Description=jh_iot arbitrator service' >> ${tarbitratord_service_config}" + ${csudo} bash -c "echo 'After=network-online.target' >> ${tarbitratord_service_config}" + ${csudo} bash -c "echo 'Wants=network-online.target' >> ${tarbitratord_service_config}" + ${csudo} bash -c "echo >> ${tarbitratord_service_config}" + ${csudo} bash -c "echo '[Service]' >> ${tarbitratord_service_config}" + ${csudo} bash -c "echo 'Type=simple' >> ${tarbitratord_service_config}" + ${csudo} bash -c "echo 'ExecStart=/usr/bin/tarbitrator' >> ${tarbitratord_service_config}" + ${csudo} bash -c "echo 'TimeoutStopSec=1000000s' >> ${tarbitratord_service_config}" + ${csudo} bash -c "echo 'LimitNOFILE=infinity' >> ${tarbitratord_service_config}" + ${csudo} bash -c "echo 'LimitNPROC=infinity' >> ${tarbitratord_service_config}" + ${csudo} bash -c "echo 'LimitCORE=infinity' >> ${tarbitratord_service_config}" + ${csudo} bash -c "echo 'TimeoutStartSec=0' >> ${tarbitratord_service_config}" + ${csudo} bash -c "echo 'StandardOutput=null' >> ${tarbitratord_service_config}" + ${csudo} bash -c "echo 'Restart=always' >> ${tarbitratord_service_config}" + ${csudo} bash -c "echo 'StartLimitBurst=3' >> ${tarbitratord_service_config}" + ${csudo} bash -c "echo 'StartLimitInterval=60s' >> ${tarbitratord_service_config}" + ${csudo} bash -c "echo >> ${tarbitratord_service_config}" + ${csudo} bash -c "echo '[Install]' >> ${tarbitratord_service_config}" + ${csudo} bash -c "echo 'WantedBy=multi-user.target' >> ${tarbitratord_service_config}" + ${csudo} systemctl enable tarbitratord +} + +function install_service() { + if ((${service_mod}==0)); then + install_service_on_systemd + elif ((${service_mod}==1)); then + install_service_on_sysvinit + else + kill_tarbitrator + fi +} + +function update() { + # Start to update + echo -e "${GREEN}Start to update jh_iot's arbitrator ...${NC}" + # Stop the service if running + if pidof tarbitrator &> /dev/null; then + if ((${service_mod}==0)); then + ${csudo} systemctl stop tarbitratord || : + elif ((${service_mod}==1)); then + ${csudo} service tarbitratord stop || : + else + kill_tarbitrator + fi + sleep 1 + fi + + install_main_path + #install_header + install_bin + install_service + + echo + if ((${service_mod}==0)); then + echo -e "${GREEN_DARK}To start arbitrator ${NC}: ${csudo} systemctl start tarbitratord${NC}" + elif ((${service_mod}==1)); then + echo -e "${GREEN_DARK}To start arbitrator ${NC}: ${csudo} service tarbitratord start${NC}" + else + echo -e "${GREEN_DARK}To start arbitrator ${NC}: ./tarbitrator${NC}" + fi + echo + echo -e "\033[44;32;1mjh_iot's arbitrator is updated successfully!${NC}" +} + +function install() { + # Start to install + echo -e "${GREEN}Start to install jh_iot's arbitrator ...${NC}" + + install_main_path + #install_header + install_bin + install_service + echo + if ((${service_mod}==0)); then + echo -e "${GREEN_DARK}To start arbitrator ${NC}: ${csudo} systemctl start tarbitratord${NC}" + elif ((${service_mod}==1)); then + echo -e "${GREEN_DARK}To start arbitrator ${NC}: ${csudo} service tarbitratord start${NC}" + else + echo -e "${GREEN_DARK}To start arbitrator ${NC}: tarbitrator${NC}" + fi + + echo -e "\033[44;32;1mjh_iot's arbitrator is installed successfully!${NC}" + echo +} + + +## ==============================Main program starts from here============================ +# Install server and client +if [ -x ${bin_dir}/tarbitrator ]; then + update_flag=1 + update +else + install +fi + diff --git a/packaging/tools/install_arbi_kh.sh b/packaging/tools/install_arbi_kh.sh new file mode 100755 index 0000000000000000000000000000000000000000..9a2542936d935b70b762702f0f2f6ff92b51a4f3 --- /dev/null +++ b/packaging/tools/install_arbi_kh.sh @@ -0,0 +1,286 @@ +#!/bin/bash +# +# This file is used to install database on linux systems. The operating system +# is required to use systemd to manage services at boot + +set -e +#set -x + +# -----------------------Variables definition--------------------- +script_dir=$(dirname $(readlink -f "$0")) + +bin_link_dir="/usr/bin" +#inc_link_dir="/usr/include" + +#install main path +install_main_dir="/usr/local/tarbitrator" + +# old bin dir +bin_dir="/usr/local/tarbitrator/bin" + +service_config_dir="/etc/systemd/system" + +# Color setting +RED='\033[0;31m' +GREEN='\033[1;32m' +GREEN_DARK='\033[0;32m' +GREEN_UNDERLINE='\033[4;32m' +NC='\033[0m' + +csudo="" +if command -v sudo > /dev/null; then + csudo="sudo" +fi + +update_flag=0 + +initd_mod=0 +service_mod=2 +if pidof systemd &> /dev/null; then + service_mod=0 +elif $(which service &> /dev/null); then + service_mod=1 + service_config_dir="/etc/init.d" + if $(which chkconfig &> /dev/null); then + initd_mod=1 + elif $(which insserv &> /dev/null); then + initd_mod=2 + elif $(which update-rc.d &> /dev/null); then + initd_mod=3 + else + service_mod=2 + fi +else + service_mod=2 +fi + + +# get the operating system type for using the corresponding init file +# ubuntu/debian(deb), centos/fedora(rpm), others: opensuse, redhat, ..., no verification +#osinfo=$(awk -F= '/^NAME/{print $2}' /etc/os-release) +if [[ -e /etc/os-release ]]; then + osinfo=$(cat /etc/os-release | grep "NAME" | cut -d '"' -f2) ||: +else + osinfo="" +fi +#echo "osinfo: ${osinfo}" +os_type=0 +if echo $osinfo | grep -qwi "ubuntu" ; then +# echo "This is ubuntu system" + os_type=1 +elif echo $osinfo | grep -qwi "debian" ; then +# echo "This is debian system" + os_type=1 +elif echo $osinfo | grep -qwi "Kylin" ; then +# echo "This is Kylin system" + os_type=1 +elif echo $osinfo | grep -qwi "centos" ; then +# echo "This is centos system" + os_type=2 +elif echo $osinfo | grep -qwi "fedora" ; then +# echo "This is fedora system" + os_type=2 +else + echo " osinfo: ${osinfo}" + echo " This is an officially unverified linux system," + echo " if there are any problems with the installation and operation, " + echo " please feel free to contact wellintech.com for support." + os_type=1 +fi + +function kill_tarbitrator() { + pid=$(ps -ef | grep "tarbitrator" | grep -v "grep" | awk '{print $2}') + if [ -n "$pid" ]; then + ${csudo} kill -9 $pid || : + fi +} + +function install_main_path() { + #create install main dir and all sub dir + ${csudo} rm -rf ${install_main_dir} || : + ${csudo} mkdir -p ${install_main_dir} + ${csudo} mkdir -p ${install_main_dir}/bin + #${csudo} mkdir -p ${install_main_dir}/include + ${csudo} mkdir -p ${install_main_dir}/init.d +} + +function install_bin() { + # Remove links + ${csudo} rm -f ${bin_link_dir}/rmtarbitrator || : + ${csudo} rm -f ${bin_link_dir}/tarbitrator || : + ${csudo} cp -r ${script_dir}/bin/* ${install_main_dir}/bin && ${csudo} chmod 0555 ${install_main_dir}/bin/* + + #Make link + [ -x ${install_main_dir}/bin/remove_arbi_kh.sh ] && ${csudo} ln -s ${install_main_dir}/bin/remove_arbi_kh.sh ${bin_link_dir}/rmtarbitrator || : + [ -x ${install_main_dir}/bin/tarbitrator ] && ${csudo} ln -s ${install_main_dir}/bin/tarbitrator ${bin_link_dir}/tarbitrator || : +} + +function install_header() { + ${csudo} rm -f ${inc_link_dir}/taos.h ${inc_link_dir}/taoserror.h || : + ${csudo} cp -f ${script_dir}/inc/* ${install_main_dir}/include && ${csudo} chmod 644 ${install_main_dir}/include/* + ${csudo} ln -s ${install_main_dir}/include/taos.h ${inc_link_dir}/taos.h + ${csudo} ln -s ${install_main_dir}/include/taoserror.h ${inc_link_dir}/taoserror.h +} + +function clean_service_on_sysvinit() { + if pidof tarbitrator &> /dev/null; then + ${csudo} service tarbitratord stop || : + fi + + if ((${initd_mod}==1)); then + if [ -e ${service_config_dir}/tarbitratord ]; then + ${csudo} chkconfig --del tarbitratord || : + fi + elif ((${initd_mod}==2)); then + if [ -e ${service_config_dir}/tarbitratord ]; then + ${csudo} insserv -r tarbitratord || : + fi + elif ((${initd_mod}==3)); then + if [ -e ${service_config_dir}/tarbitratord ]; then + ${csudo} update-rc.d -f tarbitratord remove || : + fi + fi + + ${csudo} rm -f ${service_config_dir}/tarbitratord || : + + if $(which init &> /dev/null); then + ${csudo} init q || : + fi +} + +function install_service_on_sysvinit() { + clean_service_on_sysvinit + sleep 1 + + # Install khserver service + if ((${os_type}==1)); then + ${csudo} cp -f ${script_dir}/init.d/tarbitratord.deb ${install_main_dir}/init.d/tarbitratord + ${csudo} cp ${script_dir}/init.d/tarbitratord.deb ${service_config_dir}/tarbitratord && ${csudo} chmod a+x ${service_config_dir}/tarbitratord + elif ((${os_type}==2)); then + ${csudo} cp -f ${script_dir}/init.d/tarbitratord.rpm ${install_main_dir}/init.d/tarbitratord + ${csudo} cp ${script_dir}/init.d/tarbitratord.rpm ${service_config_dir}/tarbitratord && ${csudo} chmod a+x ${service_config_dir}/tarbitratord + fi + + if ((${initd_mod}==1)); then + ${csudo} chkconfig --add tarbitratord || : + ${csudo} chkconfig --level 2345 tarbitratord on || : + elif ((${initd_mod}==2)); then + ${csudo} insserv tarbitratord || : + ${csudo} insserv -d tarbitratord || : + elif ((${initd_mod}==3)); then + ${csudo} update-rc.d tarbitratord defaults || : + fi +} + +function clean_service_on_systemd() { + tarbitratord_service_config="${service_config_dir}/tarbitratord.service" + if systemctl is-active --quiet tarbitratord; then + echo "tarbitrator is running, stopping it..." + ${csudo} systemctl stop tarbitratord &> /dev/null || echo &> /dev/null + fi + ${csudo} systemctl disable tarbitratord &> /dev/null || echo &> /dev/null + + ${csudo} rm -f ${tarbitratord_service_config} +} + +function install_service_on_systemd() { + clean_service_on_systemd + + tarbitratord_service_config="${service_config_dir}/tarbitratord.service" + + ${csudo} bash -c "echo '[Unit]' >> ${tarbitratord_service_config}" + ${csudo} bash -c "echo 'Description=KingHistorian arbitrator service' >> ${tarbitratord_service_config}" + ${csudo} bash -c "echo 'After=network-online.target' >> ${tarbitratord_service_config}" + ${csudo} bash -c "echo 'Wants=network-online.target' >> ${tarbitratord_service_config}" + ${csudo} bash -c "echo >> ${tarbitratord_service_config}" + ${csudo} bash -c "echo '[Service]' >> ${tarbitratord_service_config}" + ${csudo} bash -c "echo 'Type=simple' >> ${tarbitratord_service_config}" + ${csudo} bash -c "echo 'ExecStart=/usr/bin/tarbitrator' >> ${tarbitratord_service_config}" + ${csudo} bash -c "echo 'TimeoutStopSec=1000000s' >> ${tarbitratord_service_config}" + ${csudo} bash -c "echo 'LimitNOFILE=infinity' >> ${tarbitratord_service_config}" + ${csudo} bash -c "echo 'LimitNPROC=infinity' >> ${tarbitratord_service_config}" + ${csudo} bash -c "echo 'LimitCORE=infinity' >> ${tarbitratord_service_config}" + ${csudo} bash -c "echo 'TimeoutStartSec=0' >> ${tarbitratord_service_config}" + ${csudo} bash -c "echo 'StandardOutput=null' >> ${tarbitratord_service_config}" + ${csudo} bash -c "echo 'Restart=always' >> ${tarbitratord_service_config}" + ${csudo} bash -c "echo 'StartLimitBurst=3' >> ${tarbitratord_service_config}" + ${csudo} bash -c "echo 'StartLimitInterval=60s' >> ${tarbitratord_service_config}" + ${csudo} bash -c "echo >> ${tarbitratord_service_config}" + ${csudo} bash -c "echo '[Install]' >> ${tarbitratord_service_config}" + ${csudo} bash -c "echo 'WantedBy=multi-user.target' >> ${tarbitratord_service_config}" + ${csudo} systemctl enable tarbitratord +} + +function install_service() { + if ((${service_mod}==0)); then + install_service_on_systemd + elif ((${service_mod}==1)); then + install_service_on_sysvinit + else + kill_tarbitrator + fi +} + +function update() { + # Start to update + echo -e "${GREEN}Start to update KingHistorian's arbitrator ...${NC}" + # Stop the service if running + if pidof tarbitrator &> /dev/null; then + if ((${service_mod}==0)); then + ${csudo} systemctl stop tarbitratord || : + elif ((${service_mod}==1)); then + ${csudo} service tarbitratord stop || : + else + kill_tarbitrator + fi + sleep 1 + fi + + install_main_path + #install_header + install_bin + install_service + + echo + if ((${service_mod}==0)); then + echo -e "${GREEN_DARK}To start arbitrator ${NC}: ${csudo} systemctl start tarbitratord${NC}" + elif ((${service_mod}==1)); then + echo -e "${GREEN_DARK}To start arbitrator ${NC}: ${csudo} service tarbitratord start${NC}" + else + echo -e "${GREEN_DARK}To start arbitrator ${NC}: ./tarbitrator${NC}" + fi + echo + echo -e "\033[44;32;1mKingHistorian's arbitrator is updated successfully!${NC}" +} + +function install() { + # Start to install + echo -e "${GREEN}Start to install KingHistorian's arbitrator ...${NC}" + + install_main_path + #install_header + install_bin + install_service + echo + if ((${service_mod}==0)); then + echo -e "${GREEN_DARK}To start arbitrator ${NC}: ${csudo} systemctl start tarbitratord${NC}" + elif ((${service_mod}==1)); then + echo -e "${GREEN_DARK}To start arbitrator ${NC}: ${csudo} service tarbitratord start${NC}" + else + echo -e "${GREEN_DARK}To start arbitrator ${NC}: tarbitrator${NC}" + fi + + echo -e "\033[44;32;1mKingHistorian's arbitrator is installed successfully!${NC}" + echo +} + + +## ==============================Main program starts from here============================ +# Install server and client +if [ -x ${bin_dir}/tarbitrator ]; then + update_flag=1 + update +else + install +fi + diff --git a/packaging/tools/install_arbi_power.sh b/packaging/tools/install_arbi_power.sh index 883db2b7169d125309125887cb72279c92c4602a..da86566eec43438cc9f266a86f4e7afe226ca6ce 100755 --- a/packaging/tools/install_arbi_power.sh +++ b/packaging/tools/install_arbi_power.sh @@ -176,9 +176,6 @@ function install_header() { } function clean_service_on_sysvinit() { - #restart_config_str="taos:2345:respawn:${service_config_dir}/taosd start" - #${csudo} sed -i "\|${restart_config_str}|d" /etc/inittab || : - if pidof tarbitrator &> /dev/null; then ${csudo} service tarbitratord stop || : fi @@ -279,7 +276,6 @@ function install_service() { elif ((${service_mod}==1)); then install_service_on_sysvinit else - # must manual stop taosd kill_tarbitrator fi } @@ -306,7 +302,6 @@ function update_PowerDB() { install_jemalloc echo - #echo -e "${GREEN_DARK}To configure PowerDB ${NC}: edit /etc/taos/taos.cfg" if ((${service_mod}==0)); then echo -e "${GREEN_DARK}To start arbitrator ${NC}: ${csudo} systemctl start tarbitratord${NC}" elif ((${service_mod}==1)); then @@ -329,7 +324,6 @@ function install_PowerDB() { install_jemalloc echo - #echo -e "${GREEN_DARK}To configure PowerDB ${NC}: edit /etc/taos/taos.cfg" if ((${service_mod}==0)); then echo -e "${GREEN_DARK}To start arbitrator ${NC}: ${csudo} systemctl start tarbitratord${NC}" elif ((${service_mod}==1)); then diff --git a/packaging/tools/install_arbi_pro.sh b/packaging/tools/install_arbi_pro.sh index 11165dbdd8bdf6afb4659250499cf1d9184c2395..98e896aa6583485ae1700d3557b0fe93a34b0a47 100755 --- a/packaging/tools/install_arbi_pro.sh +++ b/packaging/tools/install_arbi_pro.sh @@ -123,9 +123,6 @@ function install_header() { } function clean_service_on_sysvinit() { - #restart_config_str="taos:2345:respawn:${service_config_dir}/taosd start" - #${csudo} sed -i "\|${restart_config_str}|d" /etc/inittab || : - if pidof tarbitrator &> /dev/null; then ${csudo} service tarbitratord stop || : fi @@ -221,7 +218,6 @@ function install_service() { elif ((${service_mod}==1)); then install_service_on_sysvinit else - # must manual stop taosd kill_tarbitrator fi } @@ -247,7 +243,6 @@ function update_prodb() { install_service echo - #echo -e "${GREEN_DARK}To configure ProDB ${NC}: edit /etc/taos/taos.cfg" if ((${service_mod}==0)); then echo -e "${GREEN_DARK}To start arbitrator ${NC}: ${csudo} systemctl start tarbitratord${NC}" elif ((${service_mod}==1)); then @@ -268,7 +263,6 @@ function install_prodb() { install_bin install_service echo - #echo -e "${GREEN_DARK}To configure ProDB ${NC}: edit /etc/taos/taos.cfg" if ((${service_mod}==0)); then echo -e "${GREEN_DARK}To start arbitrator ${NC}: ${csudo} systemctl start tarbitratord${NC}" elif ((${service_mod}==1)); then diff --git a/packaging/tools/install_arbi_tq.sh b/packaging/tools/install_arbi_tq.sh index bd852dd0ad2c9114f2424193adccf56b0cb40412..37426b359d8957c664fd21552a95d45f50085485 100755 --- a/packaging/tools/install_arbi_tq.sh +++ b/packaging/tools/install_arbi_tq.sh @@ -123,9 +123,6 @@ function install_header() { } function clean_service_on_sysvinit() { - #restart_config_str="taos:2345:respawn:${service_config_dir}/taosd start" - #${csudo} sed -i "\|${restart_config_str}|d" /etc/inittab || : - if pidof tarbitrator &> /dev/null; then ${csudo} service tarbitratord stop || : fi @@ -226,7 +223,6 @@ function install_service() { elif ((${service_mod}==1)); then install_service_on_sysvinit else - # must manual stop taosd kill_tarbitrator fi } @@ -252,7 +248,6 @@ function update_tq() { install_service echo - #echo -e "${GREEN_DARK}To configure TQ ${NC}: edit /etc/taos/taos.cfg" if ((${service_mod}==0)); then echo -e "${GREEN_DARK}To start arbitrator ${NC}: ${csudo} systemctl start tarbitratord${NC}" elif ((${service_mod}==1)); then @@ -273,7 +268,6 @@ function install_tq() { install_bin install_service echo - #echo -e "${GREEN_DARK}To configure TQ ${NC}: edit /etc/taos/taos.cfg" if ((${service_mod}==0)); then echo -e "${GREEN_DARK}To start arbitrator ${NC}: ${csudo} systemctl start tarbitratord${NC}" elif ((${service_mod}==1)); then diff --git a/packaging/tools/install_client_jh.sh b/packaging/tools/install_client_jh.sh new file mode 100755 index 0000000000000000000000000000000000000000..cccf0a97be053796099d5b5f4a2c3db018c24955 --- /dev/null +++ b/packaging/tools/install_client_jh.sh @@ -0,0 +1,245 @@ +#!/bin/bash +# +# This file is used to install jh_taos client on linux systems. The operating system +# is required to use systemd to manage services at boot + +set -e +#set -x + +# -----------------------Variables definition--------------------- + +osType=Linux +pagMode=full + +if [ "$osType" != "Darwin" ]; then + script_dir=$(dirname $(readlink -f "$0")) + # Dynamic directory + data_dir="/var/lib/jh_taos" + log_dir="/var/log/jh_taos" +else + script_dir=`dirname $0` + cd ${script_dir} + script_dir="$(pwd)" + data_dir="/var/lib/jh_taos" + log_dir="~/jh_taos/log" +fi + +log_link_dir="/usr/local/jh_taos/log" + +cfg_install_dir="/etc/jh_taos" + +if [ "$osType" != "Darwin" ]; then + bin_link_dir="/usr/bin" + lib_link_dir="/usr/lib" + lib64_link_dir="/usr/lib64" + inc_link_dir="/usr/include" +else + bin_link_dir="/usr/local/bin" + lib_link_dir="/usr/local/lib" + inc_link_dir="/usr/local/include" +fi + +#install main path +install_main_dir="/usr/local/jh_taos" + +# old bin dir +bin_dir="/usr/local/jh_taos/bin" + +# Color setting +RED='\033[0;31m' +GREEN='\033[1;32m' +GREEN_DARK='\033[0;32m' +GREEN_UNDERLINE='\033[4;32m' +NC='\033[0m' + +csudo="" +if command -v sudo > /dev/null; then + csudo="sudo" +fi + +update_flag=0 + +function kill_client() { + pid=$(ps -ef | grep "jh_taos" | grep -v "grep" | awk '{print $2}') + if [ -n "$pid" ]; then + ${csudo} kill -9 $pid || : + fi +} + +function install_main_path() { + #create install main dir and all sub dir + ${csudo} rm -rf ${install_main_dir} || : + ${csudo} mkdir -p ${install_main_dir} + ${csudo} mkdir -p ${install_main_dir}/cfg + ${csudo} mkdir -p ${install_main_dir}/bin + ${csudo} mkdir -p ${install_main_dir}/connector + ${csudo} mkdir -p ${install_main_dir}/driver + ${csudo} mkdir -p ${install_main_dir}/examples + ${csudo} mkdir -p ${install_main_dir}/include +} + +function install_bin() { + # Remove links + ${csudo} rm -f ${bin_link_dir}/jh_taos || : + if [ "$osType" != "Darwin" ]; then + ${csudo} rm -f ${bin_link_dir}/jhdemo || : + ${csudo} rm -f ${bin_link_dir}/jh_taosdump || : + fi + ${csudo} rm -f ${bin_link_dir}/rmjh || : + ${csudo} rm -f ${bin_link_dir}/set_core || : + + ${csudo} cp -r ${script_dir}/bin/* ${install_main_dir}/bin && ${csudo} chmod 0555 ${install_main_dir}/bin/* + + #Make link + [ -x ${install_main_dir}/bin/jh_taos ] && ${csudo} ln -s ${install_main_dir}/bin/jh_taos ${bin_link_dir}/jh_taos || : + if [ "$osType" != "Darwin" ]; then + [ -x ${install_main_dir}/bin/jhdemo ] && ${csudo} ln -s ${install_main_dir}/bin/jhdemo ${bin_link_dir}/jhdemo || : + [ -x ${install_main_dir}/bin/jh_taosdump ] && ${csudo} ln -s ${install_main_dir}/bin/jh_taosdump ${bin_link_dir}/jh_taosdump || : + fi + [ -x ${install_main_dir}/bin/remove_client_jh.sh ] && ${csudo} ln -s ${install_main_dir}/bin/remove_client_jh.sh ${bin_link_dir}/rmjh || : + [ -x ${install_main_dir}/bin/set_core.sh ] && ${csudo} ln -s ${install_main_dir}/bin/set_core.sh ${bin_link_dir}/set_core || : +} + +function clean_lib() { + sudo rm -f /usr/lib/libtaos.* || : + sudo rm -rf ${lib_dir} || : +} + +function install_lib() { + # Remove links + ${csudo} rm -f ${lib_link_dir}/libtaos.* || : + ${csudo} rm -f ${lib64_link_dir}/libtaos.* || : + + ${csudo} cp -rf ${script_dir}/driver/* ${install_main_dir}/driver && ${csudo} chmod 777 ${install_main_dir}/driver/* + + if [ "$osType" != "Darwin" ]; then + ${csudo} ln -s ${install_main_dir}/driver/libtaos.* ${lib_link_dir}/libtaos.so.1 + ${csudo} ln -s ${lib_link_dir}/libtaos.so.1 ${lib_link_dir}/libtaos.so + + if [ -d "${lib64_link_dir}" ]; then + ${csudo} ln -s ${install_main_dir}/driver/libtaos.* ${lib64_link_dir}/libtaos.so.1 || : + ${csudo} ln -s ${lib64_link_dir}/libtaos.so.1 ${lib64_link_dir}/libtaos.so || : + fi + else + ${csudo} ln -s ${install_main_dir}/driver/libtaos.* ${lib_link_dir}/libtaos.1.dylib + ${csudo} ln -s ${lib_link_dir}/libtaos.1.dylib ${lib_link_dir}/libtaos.dylib + fi + + ${csudo} ldconfig +} + +function install_header() { + ${csudo} rm -f ${inc_link_dir}/taos.h ${inc_link_dir}/taoserror.h || : + ${csudo} cp -f ${script_dir}/inc/* ${install_main_dir}/include && ${csudo} chmod 644 ${install_main_dir}/include/* + ${csudo} ln -s ${install_main_dir}/include/taos.h ${inc_link_dir}/taos.h + ${csudo} ln -s ${install_main_dir}/include/taoserror.h ${inc_link_dir}/taoserror.h +} + +function install_config() { + if [ ! -f ${cfg_install_dir}/taos.cfg ]; then + ${csudo} mkdir -p ${cfg_install_dir} + [ -f ${script_dir}/cfg/taos.cfg ] && ${csudo} cp ${script_dir}/cfg/taos.cfg ${cfg_install_dir} + ${csudo} chmod 644 ${cfg_install_dir}/* + fi + + ${csudo} cp -f ${script_dir}/cfg/taos.cfg ${install_main_dir}/cfg/taos.cfg.org + ${csudo} ln -s ${cfg_install_dir}/taos.cfg ${install_main_dir}/cfg +} + + +function install_log() { + ${csudo} rm -rf ${log_dir} || : + + if [ "$osType" != "Darwin" ]; then + ${csudo} mkdir -p ${log_dir} && ${csudo} chmod 777 ${log_dir} + else + mkdir -p ${log_dir} && ${csudo} chmod 777 ${log_dir} + fi + ${csudo} ln -s ${log_dir} ${install_main_dir}/log +} + +function install_connector() { + ${csudo} cp -rf ${script_dir}/connector/* ${install_main_dir}/connector +} + +function install_examples() { + if [ -d ${script_dir}/examples ]; then + ${csudo} cp -rf ${script_dir}/examples/* ${install_main_dir}/examples + fi +} + +function update() { + # Start to update + if [ ! -e jh_taos.tar.gz ]; then + echo "File jh_taos.tar.gz does not exist" + exit 1 + fi + tar -zxf jh_taos.tar.gz + + echo -e "${GREEN}Start to update jh_iot client...${NC}" + # Stop the client shell if running + if pidof jh_taos &> /dev/null; then + kill_client + sleep 1 + fi + + install_main_path + + install_log + install_header + install_lib + if [ "$pagMode" != "lite" ]; then + install_connector + fi + install_examples + install_bin + install_config + + echo + echo -e "\033[44;32;1mjh_iot client is updated successfully!${NC}" + + rm -rf $(tar -tf jh_taos.tar.gz) +} + +function install() { + # Start to install + if [ ! -e jh_taos.tar.gz ]; then + echo "File jh_taos.tar.gz does not exist" + exit 1 + fi + tar -zxf jh_taos.tar.gz + + echo -e "${GREEN}Start to install jh_taos client...${NC}" + + install_main_path + install_log + install_header + install_lib + if [ "$pagMode" != "lite" ]; then + install_connector + fi + install_examples + install_bin + install_config + + echo + echo -e "\033[44;32;1mjh_iot client is installed successfully!${NC}" + + rm -rf $(tar -tf jh_taos.tar.gz) +} + + +## ==============================Main program starts from here============================ +# Install or updata client and client +# if server is already install, don't install client + if [ -e ${bin_dir}/jh_taosd ]; then + echo -e "\033[44;32;1mThere are already installed jh_iot server, so don't need install client!${NC}" + exit 0 + fi + + if [ -x ${bin_dir}/jh_taos ]; then + update_flag=1 + update + else + install + fi diff --git a/packaging/tools/install_client_kh.sh b/packaging/tools/install_client_kh.sh new file mode 100755 index 0000000000000000000000000000000000000000..210fd27fb6978527d76f0c915d0293370b93cf3e --- /dev/null +++ b/packaging/tools/install_client_kh.sh @@ -0,0 +1,246 @@ +#!/bin/bash +# +# This file is used to install kinghistorian client on linux systems. The operating system +# is required to use systemd to manage services at boot + +set -e +#set -x + +# -----------------------Variables definition--------------------- + +osType=Linux +pagMode=full + +if [ "$osType" != "Darwin" ]; then + script_dir=$(dirname $(readlink -f "$0")) + # Dynamic directory + data_dir="/var/lib/kinghistorian" + log_dir="/var/log/kinghistorian" +else + script_dir=`dirname $0` + cd ${script_dir} + script_dir="$(pwd)" + data_dir="/var/lib/kinghistorian" + log_dir="~/kinghistorian/log" +fi + +log_link_dir="/usr/local/kinghistorian/log" + +cfg_install_dir="/etc/kinghistorian" + +if [ "$osType" != "Darwin" ]; then + bin_link_dir="/usr/bin" + lib_link_dir="/usr/lib" + lib64_link_dir="/usr/lib64" + inc_link_dir="/usr/include" +else + bin_link_dir="/usr/local/bin" + lib_link_dir="/usr/local/lib" + inc_link_dir="/usr/local/include" +fi + +#install main path +install_main_dir="/usr/local/kinghistorian" + +# old bin dir +bin_dir="/usr/local/kinghistorian/bin" + +# Color setting +RED='\033[0;31m' +GREEN='\033[1;32m' +GREEN_DARK='\033[0;32m' +GREEN_UNDERLINE='\033[4;32m' +NC='\033[0m' + +csudo="" +if command -v sudo > /dev/null; then + csudo="sudo" +fi + +update_flag=0 + +function kill_client() { + pid=$(ps -ef | grep "khclient" | grep -v "grep" | awk '{print $2}') + if [ -n "$pid" ]; then + ${csudo} kill -9 $pid || : + fi +} + +function install_main_path() { + #create install main dir and all sub dir + ${csudo} rm -rf ${install_main_dir} || : + ${csudo} mkdir -p ${install_main_dir} + ${csudo} mkdir -p ${install_main_dir}/cfg + ${csudo} mkdir -p ${install_main_dir}/bin + ${csudo} mkdir -p ${install_main_dir}/connector + ${csudo} mkdir -p ${install_main_dir}/driver + ${csudo} mkdir -p ${install_main_dir}/examples + ${csudo} mkdir -p ${install_main_dir}/include +} + +function install_bin() { + # Remove links + ${csudo} rm -f ${bin_link_dir}/khclient || : + if [ "$osType" != "Darwin" ]; then + ${csudo} rm -f ${bin_link_dir}/khdemo || : + ${csudo} rm -f ${bin_link_dir}/khdump || : + fi + ${csudo} rm -f ${bin_link_dir}/rmkh || : + ${csudo} rm -f ${bin_link_dir}/set_core || : + + ${csudo} cp -r ${script_dir}/bin/* ${install_main_dir}/bin && ${csudo} chmod 0555 ${install_main_dir}/bin/* + + #Make link + [ -x ${install_main_dir}/bin/khclient ] && ${csudo} ln -s ${install_main_dir}/bin/khclient ${bin_link_dir}/khclient || : + if [ "$osType" != "Darwin" ]; then + [ -x ${install_main_dir}/bin/khdemo ] && ${csudo} ln -s ${install_main_dir}/bin/khdemo ${bin_link_dir}/khdemo || : + [ -x ${install_main_dir}/bin/khdump ] && ${csudo} ln -s ${install_main_dir}/bin/khdump ${bin_link_dir}/khdump || : + fi + [ -x ${install_main_dir}/bin/remove_client_kh.sh ] && ${csudo} ln -s ${install_main_dir}/bin/remove_client_kh.sh ${bin_link_dir}/rmkh || : + [ -x ${install_main_dir}/bin/set_core.sh ] && ${csudo} ln -s ${install_main_dir}/bin/set_core.sh ${bin_link_dir}/set_core || : +} + +function clean_lib() { + sudo rm -f /usr/lib/libtaos.* || : + sudo rm -rf ${lib_dir} || : +} + +function install_lib() { + # Remove links + ${csudo} rm -f ${lib_link_dir}/libtaos.* || : + ${csudo} rm -f ${lib64_link_dir}/libtaos.* || : + #${csudo} rm -rf ${v15_java_app_dir} || : + + ${csudo} cp -rf ${script_dir}/driver/* ${install_main_dir}/driver && ${csudo} chmod 777 ${install_main_dir}/driver/* + + if [ "$osType" != "Darwin" ]; then + ${csudo} ln -s ${install_main_dir}/driver/libtaos.* ${lib_link_dir}/libtaos.so.1 + ${csudo} ln -s ${lib_link_dir}/libtaos.so.1 ${lib_link_dir}/libtaos.so + + if [ -d "${lib64_link_dir}" ]; then + ${csudo} ln -s ${install_main_dir}/driver/libtaos.* ${lib64_link_dir}/libtaos.so.1 || : + ${csudo} ln -s ${lib64_link_dir}/libtaos.so.1 ${lib64_link_dir}/libtaos.so || : + fi + else + ${csudo} ln -s ${install_main_dir}/driver/libtaos.* ${lib_link_dir}/libtaos.1.dylib + ${csudo} ln -s ${lib_link_dir}/libtaos.1.dylib ${lib_link_dir}/libtaos.dylib + fi + + ${csudo} ldconfig +} + +function install_header() { + ${csudo} rm -f ${inc_link_dir}/taos.h ${inc_link_dir}/taoserror.h || : + ${csudo} cp -f ${script_dir}/inc/* ${install_main_dir}/include && ${csudo} chmod 644 ${install_main_dir}/include/* + ${csudo} ln -s ${install_main_dir}/include/taos.h ${inc_link_dir}/taos.h + ${csudo} ln -s ${install_main_dir}/include/taoserror.h ${inc_link_dir}/taoserror.h +} + +function install_config() { + if [ ! -f ${cfg_install_dir}/kinghistorian.cfg ]; then + ${csudo} mkdir -p ${cfg_install_dir} + [ -f ${script_dir}/cfg/kinghistorian.cfg ] && ${csudo} cp ${script_dir}/cfg/kinghistorian.cfg ${cfg_install_dir} + ${csudo} chmod 644 ${cfg_install_dir}/* + fi + + ${csudo} cp -f ${script_dir}/cfg/kinghistorian.cfg ${install_main_dir}/cfg/kinghistorian.cfg.org + ${csudo} ln -s ${cfg_install_dir}/kinghistorian.cfg ${install_main_dir}/cfg +} + + +function install_log() { + ${csudo} rm -rf ${log_dir} || : + + if [ "$osType" != "Darwin" ]; then + ${csudo} mkdir -p ${log_dir} && ${csudo} chmod 777 ${log_dir} + else + mkdir -p ${log_dir} && ${csudo} chmod 777 ${log_dir} + fi + ${csudo} ln -s ${log_dir} ${install_main_dir}/log +} + +function install_connector() { + ${csudo} cp -rf ${script_dir}/connector/* ${install_main_dir}/connector +} + +function install_examples() { + if [ -d ${script_dir}/examples ]; then + ${csudo} cp -rf ${script_dir}/examples/* ${install_main_dir}/examples + fi +} + +function update() { + # Start to update + if [ ! -e kinghistorian.tar.gz ]; then + echo "File kinghistorian.tar.gz does not exist" + exit 1 + fi + tar -zxf kinghistorian.tar.gz + + echo -e "${GREEN}Start to update KingHistorian client...${NC}" + # Stop the client shell if running + if pidof khclient &> /dev/null; then + kill_client + sleep 1 + fi + + install_main_path + + install_log + install_header + install_lib + if [ "$pagMode" != "lite" ]; then + install_connector + fi + install_examples + install_bin + install_config + + echo + echo -e "\033[44;32;1mKingHistorian client is updated successfully!${NC}" + + rm -rf $(tar -tf kinghistorian.tar.gz) +} + +function install() { + # Start to install + if [ ! -e kinghistorian.tar.gz ]; then + echo "File kinghistorian.tar.gz does not exist" + exit 1 + fi + tar -zxf kinghistorian.tar.gz + + echo -e "${GREEN}Start to install KingHistorian client...${NC}" + + install_main_path + install_log + install_header + install_lib + if [ "$pagMode" != "lite" ]; then + install_connector + fi + install_examples + install_bin + install_config + + echo + echo -e "\033[44;32;1mKingHistorian client is installed successfully!${NC}" + + rm -rf $(tar -tf kinghistorian.tar.gz) +} + + +## ==============================Main program starts from here============================ +# Install or updata client and client +# if server is already install, don't install client + if [ -e ${bin_dir}/khserver ]; then + echo -e "\033[44;32;1mThere are already installed KingHistorian server, so don't need install client!${NC}" + exit 0 + fi + + if [ -x ${bin_dir}/khclient ]; then + update_flag=1 + update + else + install + fi diff --git a/packaging/tools/install_client_power.sh b/packaging/tools/install_client_power.sh index 31da0d61319045800fe3a454d071118aa3a4768e..ecd22f3de19ee639e1e081fb8330b010f6b7227c 100755 --- a/packaging/tools/install_client_power.sh +++ b/packaging/tools/install_client_power.sh @@ -193,16 +193,14 @@ function install_jemalloc() { } function install_config() { - #${csudo} rm -f ${install_main_dir}/cfg/taos.cfg || : - - if [ ! -f ${cfg_install_dir}/taos.cfg ]; then + if [ ! -f ${cfg_install_dir}/power.cfg ]; then ${csudo} mkdir -p ${cfg_install_dir} - [ -f ${script_dir}/cfg/taos.cfg ] && ${csudo} cp ${script_dir}/cfg/taos.cfg ${cfg_install_dir} + [ -f ${script_dir}/cfg/power.cfg ] && ${csudo} cp ${script_dir}/cfg/power.cfg ${cfg_install_dir} ${csudo} chmod 644 ${cfg_install_dir}/* fi - ${csudo} cp -f ${script_dir}/cfg/taos.cfg ${install_main_dir}/cfg/taos.cfg.org - ${csudo} ln -s ${cfg_install_dir}/taos.cfg ${install_main_dir}/cfg + ${csudo} cp -f ${script_dir}/cfg/power.cfg ${install_main_dir}/cfg/power.cfg.org + ${csudo} ln -s ${cfg_install_dir}/power.cfg ${install_main_dir}/cfg } diff --git a/packaging/tools/install_client_pro.sh b/packaging/tools/install_client_pro.sh index fff8ae31200669ee3ab918a873e33fc32ece37c8..425d39b6be4a5e5340bde225433ce9125ceeecd0 100755 --- a/packaging/tools/install_client_pro.sh +++ b/packaging/tools/install_client_pro.sh @@ -109,7 +109,6 @@ function install_lib() { # Remove links ${csudo} rm -f ${lib_link_dir}/libtaos.* || : ${csudo} rm -f ${lib64_link_dir}/libtaos.* || : - #${csudo} rm -rf ${v15_java_app_dir} || : ${csudo} cp -rf ${script_dir}/driver/* ${install_main_dir}/driver && ${csudo} chmod 777 ${install_main_dir}/driver/* @@ -137,16 +136,14 @@ function install_header() { } function install_config() { - #${csudo} rm -f ${install_main_dir}/cfg/taos.cfg || : - - if [ ! -f ${cfg_install_dir}/taos.cfg ]; then + if [ ! -f ${cfg_install_dir}/prodb.cfg ]; then ${csudo} mkdir -p ${cfg_install_dir} - [ -f ${script_dir}/cfg/taos.cfg ] && ${csudo} cp ${script_dir}/cfg/taos.cfg ${cfg_install_dir} + [ -f ${script_dir}/cfg/prodb.cfg ] && ${csudo} cp ${script_dir}/cfg/prodb.cfg ${cfg_install_dir} ${csudo} chmod 644 ${cfg_install_dir}/* fi - ${csudo} cp -f ${script_dir}/cfg/taos.cfg ${install_main_dir}/cfg/taos.cfg.org - ${csudo} ln -s ${cfg_install_dir}/taos.cfg ${install_main_dir}/cfg + ${csudo} cp -f ${script_dir}/cfg/prodb.cfg ${install_main_dir}/cfg/prodb.cfg.org + ${csudo} ln -s ${cfg_install_dir}/prodb.cfg ${install_main_dir}/cfg } @@ -235,14 +232,14 @@ function install_prodb() { ## ==============================Main program starts from here============================ # Install or updata client and client # if server is already install, don't install client - if [ -e ${bin_dir}/prodbs ]; then - echo -e "\033[44;32;1mThere are already installed ProDB server, so don't need install client!${NC}" - exit 0 - fi +if [ -e ${bin_dir}/prodbs ]; then + echo -e "\033[44;32;1mThere are already installed ProDB server, so don't need install client!${NC}" + exit 0 +fi - if [ -x ${bin_dir}/prodbc ]; then - update_flag=1 - update_prodb - else - install_prodb - fi +if [ -x ${bin_dir}/prodbc ]; then + update_flag=1 + update_prodb +else + install_prodb +fi diff --git a/packaging/tools/install_client_tq.sh b/packaging/tools/install_client_tq.sh index 2537442ee264e9aeb4eb6b3d25a17faf60f4df9a..059e79949bb7f3296977aee011e9670c14087f5c 100755 --- a/packaging/tools/install_client_tq.sh +++ b/packaging/tools/install_client_tq.sh @@ -140,16 +140,14 @@ function install_header() { } function install_config() { - #${csudo} rm -f ${install_main_dir}/cfg/taos.cfg || : - - if [ ! -f ${cfg_install_dir}/taos.cfg ]; then + if [ ! -f ${cfg_install_dir}/tq.cfg ]; then ${csudo} mkdir -p ${cfg_install_dir} - [ -f ${script_dir}/cfg/taos.cfg ] && ${csudo} cp ${script_dir}/cfg/taos.cfg ${cfg_install_dir} + [ -f ${script_dir}/cfg/tq.cfg ] && ${csudo} cp ${script_dir}/cfg/tq.cfg ${cfg_install_dir} ${csudo} chmod 644 ${cfg_install_dir}/* fi - ${csudo} cp -f ${script_dir}/cfg/taos.cfg ${install_main_dir}/cfg/taos.cfg.org - ${csudo} ln -s ${cfg_install_dir}/taos.cfg ${install_main_dir}/cfg + ${csudo} cp -f ${script_dir}/cfg/tq.cfg ${install_main_dir}/cfg/tq.cfg.org + ${csudo} ln -s ${cfg_install_dir}/tq.cfg ${install_main_dir}/cfg } diff --git a/packaging/tools/install_jh.sh b/packaging/tools/install_jh.sh new file mode 100755 index 0000000000000000000000000000000000000000..867c5c93b647b788f5ef48c8ea3e6172747cacab --- /dev/null +++ b/packaging/tools/install_jh.sh @@ -0,0 +1,948 @@ +#!/bin/bash +# +# This file is used to install database on linux systems. The operating system +# is required to use systemd to manage services at boot + +set -e +#set -x + +verMode=edge +pagMode=full + +iplist="" +serverFqdn="" +# -----------------------Variables definition--------------------- +script_dir=$(dirname $(readlink -f "$0")) +# Dynamic directory +data_dir="/var/lib/jh_taos" +log_dir="/var/log/jh_taos" + +data_link_dir="/usr/local/jh_taos/data" +log_link_dir="/usr/local/jh_taos/log" + +cfg_install_dir="/etc/jh_taos" + +bin_link_dir="/usr/bin" +lib_link_dir="/usr/lib" +lib64_link_dir="/usr/lib64" +inc_link_dir="/usr/include" + +#install main path +install_main_dir="/usr/local/jh_taos" + +# old bin dir +bin_dir="/usr/local/jh_taos/bin" + +service_config_dir="/etc/systemd/system" +nginx_port=6060 +nginx_dir="/usr/local/nginxd" + +# Color setting +RED='\033[0;31m' +GREEN='\033[1;32m' +GREEN_DARK='\033[0;32m' +GREEN_UNDERLINE='\033[4;32m' +NC='\033[0m' + +csudo="" +if command -v sudo > /dev/null; then + csudo="sudo" +fi + +update_flag=0 + +initd_mod=0 +service_mod=2 +if pidof systemd &> /dev/null; then + service_mod=0 +elif $(which service &> /dev/null); then + service_mod=1 + service_config_dir="/etc/init.d" + if $(which chkconfig &> /dev/null); then + initd_mod=1 + elif $(which insserv &> /dev/null); then + initd_mod=2 + elif $(which update-rc.d &> /dev/null); then + initd_mod=3 + else + service_mod=2 + fi +else + service_mod=2 +fi + + +# get the operating system type for using the corresponding init file +# ubuntu/debian(deb), centos/fedora(rpm), others: opensuse, redhat, ..., no verification +#osinfo=$(awk -F= '/^NAME/{print $2}' /etc/os-release) +if [[ -e /etc/os-release ]]; then + osinfo=$(cat /etc/os-release | grep "NAME" | cut -d '"' -f2) ||: +else + osinfo="" +fi +#echo "osinfo: ${osinfo}" +os_type=0 +if echo $osinfo | grep -qwi "ubuntu" ; then +# echo "This is ubuntu system" + os_type=1 +elif echo $osinfo | grep -qwi "debian" ; then +# echo "This is debian system" + os_type=1 +elif echo $osinfo | grep -qwi "Kylin" ; then +# echo "This is Kylin system" + os_type=1 +elif echo $osinfo | grep -qwi "centos" ; then +# echo "This is centos system" + os_type=2 +elif echo $osinfo | grep -qwi "fedora" ; then +# echo "This is fedora system" + os_type=2 +else + echo " osinfo: ${osinfo}" + echo " This is an officially unverified linux system," + echo " if there are any problems with the installation and operation, " + echo " please feel free to contact jhict.com for support." + os_type=1 +fi + + +# ============================= get input parameters ================================================= + +# install.sh -v [server | client] -e [yes | no] -i [systemd | service | ...] + +# set parameters by default value +interactiveFqdn=yes # [yes | no] +verType=server # [server | client] +initType=systemd # [systemd | service | ...] + +while getopts "hv:e:i:" arg +do + case $arg in + e) + #echo "interactiveFqdn=$OPTARG" + interactiveFqdn=$( echo $OPTARG ) + ;; + v) + #echo "verType=$OPTARG" + verType=$(echo $OPTARG) + ;; + i) + #echo "initType=$OPTARG" + initType=$(echo $OPTARG) + ;; + h) + echo "Usage: `basename $0` -v [server | client] -e [yes | no]" + exit 0 + ;; + ?) #unknow option + echo "unkonw argument" + exit 1 + ;; + esac +done + +function kill_process() { + pid=$(ps -ef | grep "$1" | grep -v "grep" | awk '{print $2}') + if [ -n "$pid" ]; then + ${csudo} kill -9 $pid || : + fi +} + +function install_main_path() { + #create install main dir and all sub dir + ${csudo} rm -rf ${install_main_dir} || : + ${csudo} mkdir -p ${install_main_dir} + ${csudo} mkdir -p ${install_main_dir}/cfg + ${csudo} mkdir -p ${install_main_dir}/bin +# ${csudo} mkdir -p ${install_main_dir}/connector + ${csudo} mkdir -p ${install_main_dir}/driver +# ${csudo} mkdir -p ${install_main_dir}/examples + ${csudo} mkdir -p ${install_main_dir}/include + ${csudo} mkdir -p ${install_main_dir}/init.d + if [ "$verMode" == "cluster" ]; then + ${csudo} mkdir -p ${nginx_dir} + fi +} + +function install_bin() { + # Remove links + ${csudo} rm -f ${bin_link_dir}/jh_taos || : + ${csudo} rm -f ${bin_link_dir}/jh_taosd || : + ${csudo} rm -f ${bin_link_dir}/jhdemo || : + ${csudo} rm -f ${bin_link_dir}/rmjh || : + ${csudo} rm -f ${bin_link_dir}/tarbitrator || : + ${csudo} rm -f ${bin_link_dir}/set_core || : + + ${csudo} cp -r ${script_dir}/bin/* ${install_main_dir}/bin && ${csudo} chmod 0555 ${install_main_dir}/bin/* + + #Make link + [ -x ${install_main_dir}/bin/jh_taos ] && ${csudo} ln -s ${install_main_dir}/bin/jh_taos ${bin_link_dir}/jh_taos || : + [ -x ${install_main_dir}/bin/jh_taosd ] && ${csudo} ln -s ${install_main_dir}/bin/jh_taosd ${bin_link_dir}/jh_taosd || : + [ -x ${install_main_dir}/bin/jhdemo ] && ${csudo} ln -s ${install_main_dir}/bin/jhdemo ${bin_link_dir}/jhdemo || : + [ -x ${install_main_dir}/bin/remove_jh.sh ] && ${csudo} ln -s ${install_main_dir}/bin/remove_jh.sh ${bin_link_dir}/rmjh || : + [ -x ${install_main_dir}/bin/set_core.sh ] && ${csudo} ln -s ${install_main_dir}/bin/set_core.sh ${bin_link_dir}/set_core || : + [ -x ${install_main_dir}/bin/tarbitrator ] && ${csudo} ln -s ${install_main_dir}/bin/tarbitrator ${bin_link_dir}/tarbitrator || : + + if [ "$verMode" == "cluster" ]; then + ${csudo} cp -r ${script_dir}/nginxd/* ${nginx_dir} && ${csudo} chmod 0555 ${nginx_dir}/* + ${csudo} mkdir -p ${nginx_dir}/logs + ${csudo} chmod 777 ${nginx_dir}/sbin/nginx + fi +} + +function install_lib() { + # Remove links + ${csudo} rm -f ${lib_link_dir}/libtaos.* || : + ${csudo} rm -f ${lib64_link_dir}/libtaos.* || : + ${csudo} cp -rf ${script_dir}/driver/* ${install_main_dir}/driver && ${csudo} chmod 777 ${install_main_dir}/driver/* + + ${csudo} ln -s ${install_main_dir}/driver/libtaos.* ${lib_link_dir}/libtaos.so.1 + ${csudo} ln -s ${lib_link_dir}/libtaos.so.1 ${lib_link_dir}/libtaos.so + + if [[ -d ${lib64_link_dir} && ! -e ${lib64_link_dir}/libtaos.so ]]; then + ${csudo} ln -s ${install_main_dir}/driver/libtaos.* ${lib64_link_dir}/libtaos.so.1 || : + ${csudo} ln -s ${lib64_link_dir}/libtaos.so.1 ${lib64_link_dir}/libtaos.so || : + fi + + if [ "$osType" != "Darwin" ]; then + ${csudo} ldconfig + else + ${csudo} update_dyld_shared_cache + fi +} + +function install_header() { + ${csudo} rm -f ${inc_link_dir}/taos.h ${inc_link_dir}/taoserror.h || : + ${csudo} cp -f ${script_dir}/inc/* ${install_main_dir}/include && ${csudo} chmod 644 ${install_main_dir}/include/* + ${csudo} ln -s ${install_main_dir}/include/taos.h ${inc_link_dir}/taos.h + ${csudo} ln -s ${install_main_dir}/include/taoserror.h ${inc_link_dir}/taoserror.h +} + +function install_jemalloc() { + jemalloc_dir=${script_dir}/jemalloc + + if [ -d ${jemalloc_dir} ]; then + ${csudo} /usr/bin/install -c -d /usr/local/bin + + if [ -f ${jemalloc_dir}/bin/jemalloc-config ]; then + ${csudo} /usr/bin/install -c -m 755 ${jemalloc_dir}/bin/jemalloc-config /usr/local/bin + fi + if [ -f ${jemalloc_dir}/bin/jemalloc.sh ]; then + ${csudo} /usr/bin/install -c -m 755 ${jemalloc_dir}/bin/jemalloc.sh /usr/local/bin + fi + if [ -f ${jemalloc_dir}/bin/jeprof ]; then + ${csudo} /usr/bin/install -c -m 755 ${jemalloc_dir}/bin/jeprof /usr/local/bin + fi + if [ -f ${jemalloc_dir}/include/jemalloc/jemalloc.h ]; then + ${csudo} /usr/bin/install -c -d /usr/local/include/jemalloc + ${csudo} /usr/bin/install -c -m 644 ${jemalloc_dir}/include/jemalloc/jemalloc.h /usr/local/include/jemalloc + fi + if [ -f ${jemalloc_dir}/lib/libjemalloc.so.2 ]; then + ${csudo} /usr/bin/install -c -d /usr/local/lib + ${csudo} /usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc.so.2 /usr/local/lib + ${csudo} ln -sf libjemalloc.so.2 /usr/local/lib/libjemalloc.so + ${csudo} /usr/bin/install -c -d /usr/local/lib + if [ -f ${jemalloc_dir}/lib/libjemalloc.a ]; then + ${csudo} /usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc.a /usr/local/lib + fi + if [ -f ${jemalloc_dir}/lib/libjemalloc_pic.a ]; then + ${csudo} /usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc_pic.a /usr/local/lib + fi + if [ -f ${jemalloc_dir}/lib/libjemalloc_pic.a ]; then + ${csudo} /usr/bin/install -c -d /usr/local/lib/pkgconfig + ${csudo} /usr/bin/install -c -m 644 ${jemalloc_dir}/lib/pkgconfig/jemalloc.pc /usr/local/lib/pkgconfig + fi + fi + if [ -f ${jemalloc_dir}/share/doc/jemalloc/jemalloc.html ]; then + ${csudo} /usr/bin/install -c -d /usr/local/share/doc/jemalloc + ${csudo} /usr/bin/install -c -m 644 ${jemalloc_dir}/share/doc/jemalloc/jemalloc.html /usr/local/share/doc/jemalloc + fi + if [ -f ${jemalloc_dir}/share/man/man3/jemalloc.3 ]; then + ${csudo} /usr/bin/install -c -d /usr/local/share/man/man3 + ${csudo} /usr/bin/install -c -m 644 ${jemalloc_dir}/share/man/man3/jemalloc.3 /usr/local/share/man/man3 + fi + + if [ -d /etc/ld.so.conf.d ]; then + ${csudo} echo "/usr/local/lib" > /etc/ld.so.conf.d/jemalloc.conf + ${csudo} ldconfig + else + echo "/etc/ld.so.conf.d not found!" + fi + fi +} + +function add_newHostname_to_hosts() { + localIp="127.0.0.1" + OLD_IFS="$IFS" + IFS=" " + iphost=$(cat /etc/hosts | grep $1 | awk '{print $1}') + arr=($iphost) + IFS="$OLD_IFS" + for s in ${arr[@]} + do + if [[ "$s" == "$localIp" ]]; then + return + fi + done + ${csudo} echo "127.0.0.1 $1" >> /etc/hosts ||: +} + +function set_hostname() { + echo -e -n "${GREEN}Please enter one hostname(must not be 'localhost')${NC}:" + read newHostname + while true; do + if [[ ! -z "$newHostname" && "$newHostname" != "localhost" ]]; then + break + else + read -p "Please enter one hostname(must not be 'localhost'):" newHostname + fi + done + + ${csudo} hostname $newHostname ||: + retval=`echo $?` + if [[ $retval != 0 ]]; then + echo + echo "set hostname fail!" + return + fi + + #ubuntu/centos /etc/hostname + if [[ -e /etc/hostname ]]; then + ${csudo} echo $newHostname > /etc/hostname ||: + fi + + #debian: #HOSTNAME=yourname + if [[ -e /etc/sysconfig/network ]]; then + ${csudo} sed -i -r "s/#*\s*(HOSTNAME=\s*).*/\1$newHostname/" /etc/sysconfig/network ||: + fi + + ${csudo} sed -i -r "s/#*\s*(fqdn\s*).*/\1$newHostname/" ${cfg_install_dir}/taos.cfg + serverFqdn=$newHostname + + if [[ -e /etc/hosts ]]; then + add_newHostname_to_hosts $newHostname + fi +} + +function is_correct_ipaddr() { + newIp=$1 + OLD_IFS="$IFS" + IFS=" " + arr=($iplist) + IFS="$OLD_IFS" + for s in ${arr[@]} + do + if [[ "$s" == "$newIp" ]]; then + return 0 + fi + done + + return 1 +} + +function set_ipAsFqdn() { + iplist=$(ip address |grep inet |grep -v inet6 |grep -v 127.0.0.1 |awk '{print $2}' |awk -F "/" '{print $1}') ||: + if [ -z "$iplist" ]; then + iplist=$(ifconfig |grep inet |grep -v inet6 |grep -v 127.0.0.1 |awk '{print $2}' |awk -F ":" '{print $2}') ||: + fi + + if [ -z "$iplist" ]; then + echo + echo -e -n "${GREEN}Unable to get local ip, use 127.0.0.1${NC}" + localFqdn="127.0.0.1" + # Write the local FQDN to configuration file + ${csudo} sed -i -r "s/#*\s*(fqdn\s*).*/\1$localFqdn/" ${cfg_install_dir}/taos.cfg + serverFqdn=$localFqdn + echo + return + fi + + echo -e -n "${GREEN}Please choose an IP from local IP list${NC}:" + echo + echo -e -n "${GREEN}$iplist${NC}" + echo + echo + echo -e -n "${GREEN}Notes: if IP is used as the node name, data can NOT be migrated to other machine directly${NC}:" + read localFqdn + while true; do + if [ ! -z "$localFqdn" ]; then + # Check if correct ip address + is_correct_ipaddr $localFqdn + retval=`echo $?` + if [[ $retval != 0 ]]; then + read -p "Please choose an IP from local IP list:" localFqdn + else + # Write the local FQDN to configuration file + ${csudo} sed -i -r "s/#*\s*(fqdn\s*).*/\1$localFqdn/" ${cfg_install_dir}/taos.cfg + serverFqdn=$localFqdn + break + fi + else + read -p "Please choose an IP from local IP list:" localFqdn + fi + done +} + +function local_fqdn_check() { + #serverFqdn=$(hostname) + echo + echo -e -n "System hostname is: ${GREEN}$serverFqdn${NC}" + echo + if [[ "$serverFqdn" == "" ]] || [[ "$serverFqdn" == "localhost" ]]; then + echo -e -n "${GREEN}It is strongly recommended to configure a hostname for this machine ${NC}" + echo + + while true + do + read -r -p "Set hostname now? [Y/n] " input + if [ ! -n "$input" ]; then + set_hostname + break + else + case $input in + [yY][eE][sS]|[yY]) + set_hostname + break + ;; + + [nN][oO]|[nN]) + set_ipAsFqdn + break + ;; + + *) + echo "Invalid input..." + ;; + esac + fi + done + fi +} + +function install_config() { + if [ ! -f ${cfg_install_dir}/taos.cfg ]; then + ${csudo} mkdir -p ${cfg_install_dir} + [ -f ${script_dir}/cfg/taos.cfg ] && ${csudo} cp ${script_dir}/cfg/taos.cfg ${cfg_install_dir} + ${csudo} chmod 644 ${cfg_install_dir}/* + fi + + ${csudo} cp -f ${script_dir}/cfg/taos.cfg ${install_main_dir}/cfg/taos.cfg.org + ${csudo} ln -s ${cfg_install_dir}/taos.cfg ${install_main_dir}/cfg + + [ ! -z $1 ] && return 0 || : # only install client + + if ((${update_flag}==1)); then + return 0 + fi + + if [ "$interactiveFqdn" == "no" ]; then + return 0 + fi + + local_fqdn_check + + #FQDN_FORMAT="(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)" + #FQDN_FORMAT="(:[1-6][0-9][0-9][0-9][0-9]$)" + #PORT_FORMAT="(/[1-6][0-9][0-9][0-9][0-9]?/)" + #FQDN_PATTERN=":[0-9]{1,5}$" + + # first full-qualified domain name (FQDN) for jh_iot cluster system + echo + echo -e -n "${GREEN}Enter FQDN:port (like h1.jhict.com:6030) of an existing jh_iot cluster node to join${NC}" + echo + echo -e -n "${GREEN}OR leave it blank to build one${NC}:" + read firstEp + while true; do + if [ ! -z "$firstEp" ]; then + # check the format of the firstEp + #if [[ $firstEp == $FQDN_PATTERN ]]; then + # Write the first FQDN to configuration file + ${csudo} sed -i -r "s/#*\s*(firstEp\s*).*/\1$firstEp/" ${cfg_install_dir}/taos.cfg + break + #else + # read -p "Please enter the correct FQDN:port: " firstEp + #fi + else + break + fi + done +} + + +function install_log() { + ${csudo} rm -rf ${log_dir} || : + ${csudo} mkdir -p ${log_dir} && ${csudo} chmod 777 ${log_dir} + + ${csudo} ln -s ${log_dir} ${install_main_dir}/log +} + +function install_data() { + ${csudo} mkdir -p ${data_dir} + + ${csudo} ln -s ${data_dir} ${install_main_dir}/data +} + +function install_connector() { + ${csudo} cp -rf ${script_dir}/connector/* ${install_main_dir}/connector +} + +function install_examples() { + if [ -d ${script_dir}/examples ]; then + ${csudo} cp -rf ${script_dir}/examples/* ${install_main_dir}/examples + fi +} + +function clean_service_on_sysvinit() { + if pidof jh_taosd &> /dev/null; then + ${csudo} service jh_taosd stop || : + fi + + if pidof tarbitrator &> /dev/null; then + ${csudo} service tarbitratord stop || : + fi + + if ((${initd_mod}==1)); then + if [ -e ${service_config_dir}/jh_taosd ]; then + ${csudo} chkconfig --del jh_taosd || : + fi + + if [ -e ${service_config_dir}/tarbitratord ]; then + ${csudo} chkconfig --del tarbitratord || : + fi + elif ((${initd_mod}==2)); then + if [ -e ${service_config_dir}/jh_taosd ]; then + ${csudo} insserv -r jh_taosd || : + fi + if [ -e ${service_config_dir}/tarbitratord ]; then + ${csudo} insserv -r tarbitratord || : + fi + elif ((${initd_mod}==3)); then + if [ -e ${service_config_dir}/jh_taosd ]; then + ${csudo} update-rc.d -f jh_taosd remove || : + fi + if [ -e ${service_config_dir}/tarbitratord ]; then + ${csudo} update-rc.d -f tarbitratord remove || : + fi + fi + + ${csudo} rm -f ${service_config_dir}/jh_taosd || : + ${csudo} rm -f ${service_config_dir}/tarbitratord || : + + if $(which init &> /dev/null); then + ${csudo} init q || : + fi +} + +function install_service_on_sysvinit() { + clean_service_on_sysvinit + sleep 1 + + # Install jh_taosd service + + if ((${os_type}==1)); then + ${csudo} cp -f ${script_dir}/init.d/jh_taosd.deb ${install_main_dir}/init.d/jh_taosd + ${csudo} cp ${script_dir}/init.d/jh_taosd.deb ${service_config_dir}/jh_taosd && ${csudo} chmod a+x ${service_config_dir}/jh_taosd + ${csudo} cp -f ${script_dir}/init.d/tarbitratord.deb ${install_main_dir}/init.d/tarbitratord + ${csudo} cp ${script_dir}/init.d/tarbitratord.deb ${service_config_dir}/tarbitratord && ${csudo} chmod a+x ${service_config_dir}/tarbitratord + elif ((${os_type}==2)); then + ${csudo} cp -f ${script_dir}/init.d/jh_taosd.rpm ${install_main_dir}/init.d/jh_taosd + ${csudo} cp ${script_dir}/init.d/jh_taosd.rpm ${service_config_dir}/jh_taosd && ${csudo} chmod a+x ${service_config_dir}/jh_taosd + ${csudo} cp -f ${script_dir}/init.d/tarbitratord.rpm ${install_main_dir}/init.d/tarbitratord + ${csudo} cp ${script_dir}/init.d/tarbitratord.rpm ${service_config_dir}/tarbitratord && ${csudo} chmod a+x ${service_config_dir}/tarbitratord + fi + + if ((${initd_mod}==1)); then + ${csudo} chkconfig --add jh_taosd || : + ${csudo} chkconfig --level 2345 jh_taosd on || : + ${csudo} chkconfig --add tarbitratord || : + ${csudo} chkconfig --level 2345 tarbitratord on || : + elif ((${initd_mod}==2)); then + ${csudo} insserv jh_taosd || : + ${csudo} insserv -d jh_taosd || : + ${csudo} insserv tarbitratord || : + ${csudo} insserv -d tarbitratord || : + elif ((${initd_mod}==3)); then + ${csudo} update-rc.d jh_taosd defaults || : + ${csudo} update-rc.d tarbitratord defaults || : + fi +} + +function clean_service_on_systemd() { + jh_taosd_service_config="${service_config_dir}/jh_taosd.service" + if systemctl is-active --quiet jh_taosd; then + echo "jh_iot is running, stopping it..." + ${csudo} systemctl stop jh_taosd &> /dev/null || echo &> /dev/null + fi + ${csudo} systemctl disable jh_taosd &> /dev/null || echo &> /dev/null + ${csudo} rm -f ${jh_taosd_service_config} + + tarbitratord_service_config="${service_config_dir}/tarbitratord.service" + if systemctl is-active --quiet tarbitratord; then + echo "tarbitrator is running, stopping it..." + ${csudo} systemctl stop tarbitratord &> /dev/null || echo &> /dev/null + fi + ${csudo} systemctl disable tarbitratord &> /dev/null || echo &> /dev/null + ${csudo} rm -f ${tarbitratord_service_config} + + if [ "$verMode" == "cluster" ]; then + nginx_service_config="${service_config_dir}/nginxd.service" + if systemctl is-active --quiet nginxd; then + echo "Nginx for jh_iot is running, stopping it..." + ${csudo} systemctl stop nginxd &> /dev/null || echo &> /dev/null + fi + ${csudo} systemctl disable nginxd &> /dev/null || echo &> /dev/null + ${csudo} rm -f ${nginx_service_config} + fi +} + +function install_service_on_systemd() { + clean_service_on_systemd + + service_config="${service_config_dir}/jh_taosd.service" + ${csudo} bash -c "echo '[Unit]' >> ${service_config}" + ${csudo} bash -c "echo 'Description=jh_iot server service' >> ${service_config}" + ${csudo} bash -c "echo 'After=network-online.target' >> ${service_config}" + ${csudo} bash -c "echo 'Wants=network-online.target' >> ${service_config}" + ${csudo} bash -c "echo >> ${service_config}" + ${csudo} bash -c "echo '[Service]' >> ${service_config}" + ${csudo} bash -c "echo 'Type=simple' >> ${service_config}" + ${csudo} bash -c "echo 'ExecStart=/usr/bin/jh_taosd' >> ${service_config}" + ${csudo} bash -c "echo 'ExecStartPre=/usr/local/jh_taos/bin/startPre.sh' >> ${service_config}" + ${csudo} bash -c "echo 'TimeoutStopSec=1000000s' >> ${service_config}" + ${csudo} bash -c "echo 'LimitNOFILE=infinity' >> ${service_config}" + ${csudo} bash -c "echo 'LimitNPROC=infinity' >> ${service_config}" + ${csudo} bash -c "echo 'LimitCORE=infinity' >> ${service_config}" + ${csudo} bash -c "echo 'TimeoutStartSec=0' >> ${service_config}" + ${csudo} bash -c "echo 'StandardOutput=null' >> ${service_config}" + ${csudo} bash -c "echo 'Restart=always' >> ${service_config}" + ${csudo} bash -c "echo 'StartLimitBurst=3' >> ${service_config}" + ${csudo} bash -c "echo 'StartLimitInterval=60s' >> ${service_config}" + ${csudo} bash -c "echo >> ${service_config}" + ${csudo} bash -c "echo '[Install]' >> ${service_config}" + ${csudo} bash -c "echo 'WantedBy=multi-user.target' >> ${service_config}" + ${csudo} systemctl enable jh_taosd + + tarbitratord_service_config="${service_config_dir}/tarbitratord.service" + ${csudo} bash -c "echo '[Unit]' >> ${tarbitratord_service_config}" + ${csudo} bash -c "echo 'Description=jh_iot arbitrator service' >> ${tarbitratord_service_config}" + ${csudo} bash -c "echo 'After=network-online.target' >> ${tarbitratord_service_config}" + ${csudo} bash -c "echo 'Wants=network-online.target' >> ${tarbitratord_service_config}" + ${csudo} bash -c "echo >> ${tarbitratord_service_config}" + ${csudo} bash -c "echo '[Service]' >> ${tarbitratord_service_config}" + ${csudo} bash -c "echo 'Type=simple' >> ${tarbitratord_service_config}" + ${csudo} bash -c "echo 'ExecStart=/usr/bin/tarbitrator' >> ${tarbitratord_service_config}" + ${csudo} bash -c "echo 'TimeoutStopSec=1000000s' >> ${tarbitratord_service_config}" + ${csudo} bash -c "echo 'LimitNOFILE=infinity' >> ${tarbitratord_service_config}" + ${csudo} bash -c "echo 'LimitNPROC=infinity' >> ${tarbitratord_service_config}" + ${csudo} bash -c "echo 'LimitCORE=infinity' >> ${tarbitratord_service_config}" + ${csudo} bash -c "echo 'TimeoutStartSec=0' >> ${tarbitratord_service_config}" + ${csudo} bash -c "echo 'StandardOutput=null' >> ${tarbitratord_service_config}" + ${csudo} bash -c "echo 'Restart=always' >> ${tarbitratord_service_config}" + ${csudo} bash -c "echo 'StartLimitBurst=3' >> ${tarbitratord_service_config}" + ${csudo} bash -c "echo 'StartLimitInterval=60s' >> ${tarbitratord_service_config}" + ${csudo} bash -c "echo >> ${tarbitratord_service_config}" + ${csudo} bash -c "echo '[Install]' >> ${tarbitratord_service_config}" + ${csudo} bash -c "echo 'WantedBy=multi-user.target' >> ${tarbitratord_service_config}" + #${csudo} systemctl enable tarbitratord + + if [ "$verMode" == "cluster" ]; then + nginx_service_config="${service_config_dir}/nginxd.service" + ${csudo} bash -c "echo '[Unit]' >> ${nginx_service_config}" + ${csudo} bash -c "echo 'Description=Nginx For jh_iot Service' >> ${nginx_service_config}" + ${csudo} bash -c "echo 'After=network-online.target' >> ${nginx_service_config}" + ${csudo} bash -c "echo 'Wants=network-online.target' >> ${nginx_service_config}" + ${csudo} bash -c "echo >> ${nginx_service_config}" + ${csudo} bash -c "echo '[Service]' >> ${nginx_service_config}" + ${csudo} bash -c "echo 'Type=forking' >> ${nginx_service_config}" + ${csudo} bash -c "echo 'PIDFile=/usr/local/nginxd/logs/nginx.pid' >> ${nginx_service_config}" + ${csudo} bash -c "echo 'ExecStart=/usr/local/nginxd/sbin/nginx' >> ${nginx_service_config}" + ${csudo} bash -c "echo 'ExecStop=/usr/local/nginxd/sbin/nginx -s stop' >> ${nginx_service_config}" + ${csudo} bash -c "echo 'TimeoutStopSec=1000000s' >> ${nginx_service_config}" + ${csudo} bash -c "echo 'LimitNOFILE=infinity' >> ${nginx_service_config}" + ${csudo} bash -c "echo 'LimitNPROC=infinity' >> ${nginx_service_config}" + ${csudo} bash -c "echo 'LimitCORE=infinity' >> ${nginx_service_config}" + ${csudo} bash -c "echo 'TimeoutStartSec=0' >> ${nginx_service_config}" + ${csudo} bash -c "echo 'StandardOutput=null' >> ${nginx_service_config}" + ${csudo} bash -c "echo 'Restart=always' >> ${nginx_service_config}" + ${csudo} bash -c "echo 'StartLimitBurst=3' >> ${nginx_service_config}" + ${csudo} bash -c "echo 'StartLimitInterval=60s' >> ${nginx_service_config}" + ${csudo} bash -c "echo >> ${nginx_service_config}" + ${csudo} bash -c "echo '[Install]' >> ${nginx_service_config}" + ${csudo} bash -c "echo 'WantedBy=multi-user.target' >> ${nginx_service_config}" + if ! ${csudo} systemctl enable nginxd &> /dev/null; then + ${csudo} systemctl daemon-reexec + ${csudo} systemctl enable nginxd + fi + ${csudo} systemctl start nginxd + fi +} + +function install_service() { + if ((${service_mod}==0)); then + install_service_on_systemd + elif ((${service_mod}==1)); then + install_service_on_sysvinit + else + # must manual stop jh_taosd + kill_process jh_taosd + fi +} + +vercomp () { + if [[ $1 == $2 ]]; then + return 0 + fi + local IFS=. + local i ver1=($1) ver2=($2) + # fill empty fields in ver1 with zeros + for ((i=${#ver1[@]}; i<${#ver2[@]}; i++)); do + ver1[i]=0 + done + + for ((i=0; i<${#ver1[@]}; i++)); do + if [[ -z ${ver2[i]} ]] + then + # fill empty fields in ver2 with zeros + ver2[i]=0 + fi + if ((10#${ver1[i]} > 10#${ver2[i]})) + then + return 1 + fi + if ((10#${ver1[i]} < 10#${ver2[i]})) + then + return 2 + fi + done + return 0 +} + +function is_version_compatible() { + curr_version=`ls ${script_dir}/driver/libtaos.so* |cut -d '.' -f 3-6` + + if [ -f ${script_dir}/driver/vercomp.txt ]; then + min_compatible_version=`cat ${script_dir}/driver/vercomp.txt` + else + min_compatible_version=$(${script_dir}/bin/jh_taosd -V | head -1 | cut -d ' ' -f 5) + fi + + vercomp $curr_version $min_compatible_version + case $? in + 0) return 0;; + 1) return 0;; + 2) return 1;; + esac +} + +function update() { + # Start to update + if [ ! -e jh_taos.tar.gz ]; then + echo "File jh_taos.tar.gz does not exist" + exit 1 + fi + tar -zxf jh_taos.tar.gz + install_jemalloc + + # Check if version compatible + if ! is_version_compatible; then + echo -e "${RED}Version incompatible${NC}" + return 1 + fi + + echo -e "${GREEN}Start to update jh_iot...${NC}" + # Stop the service if running + if pidof jh_taosd &> /dev/null; then + if ((${service_mod}==0)); then + ${csudo} systemctl stop jh_taosd || : + elif ((${service_mod}==1)); then + ${csudo} service jh_taosd stop || : + else + kill_process jh_taosd + fi + sleep 1 + fi + if [ "$verMode" == "cluster" ]; then + if pidof nginx &> /dev/null; then + if ((${service_mod}==0)); then + ${csudo} systemctl stop nginxd || : + elif ((${service_mod}==1)); then + ${csudo} service nginxd stop || : + else + kill_process nginx + fi + sleep 1 + fi + fi + + install_main_path + + install_log + install_header + install_lib +# if [ "$pagMode" != "lite" ]; then +# install_connector +# fi +# install_examples + if [ -z $1 ]; then + install_bin + install_service + install_config + + openresty_work=false + if [ "$verMode" == "cluster" ]; then + # Check if openresty is installed + # Check if nginx is installed successfully + if type curl &> /dev/null; then + if curl -sSf http://127.0.0.1:${nginx_port} &> /dev/null; then + echo -e "\033[44;32;1mNginx for jh_iot is updated successfully!${NC}" + openresty_work=true + else + echo -e "\033[44;31;5mNginx for jh_iot does not work! Please try again!\033[0m" + fi + fi + fi + + #echo + #echo -e "\033[44;32;1mjh_iot is updated successfully!${NC}" + echo + echo -e "${GREEN_DARK}To configure jh_iot ${NC}: edit /etc/jh_taos/taos.cfg" + if ((${service_mod}==0)); then + echo -e "${GREEN_DARK}To start jh_iot ${NC}: ${csudo} systemctl start jh_taosd${NC}" + elif ((${service_mod}==1)); then + echo -e "${GREEN_DARK}To start jh_iot ${NC}: ${csudo} service jh_taosd start${NC}" + else + echo -e "${GREEN_DARK}To start jh_iot ${NC}: ./jh_taosd${NC}" + fi + + if [ ${openresty_work} = 'true' ]; then + echo -e "${GREEN_DARK}To access jh_iot ${NC}: use ${GREEN_UNDERLINE}jh_taos -h $serverFqdn${NC} in shell OR from ${GREEN_UNDERLINE}http://127.0.0.1:${nginx_port}${NC}" + else + echo -e "${GREEN_DARK}To access jh_iot ${NC}: use ${GREEN_UNDERLINE}jh_taos -h $serverFqdn${NC} in shell${NC}" + fi + + echo + echo -e "\033[44;32;1mjh_iot is updated successfully!${NC}" + else + install_bin + install_config + + echo + echo -e "\033[44;32;1mjh_iot client is updated successfully!${NC}" + fi + + rm -rf $(tar -tf jh_taos.tar.gz) +} + +function install() { + # Start to install + if [ ! -e jh_taos.tar.gz ]; then + echo "File jh_taos.tar.gz does not exist" + exit 1 + fi + tar -zxf jh_taos.tar.gz + + echo -e "${GREEN}Start to install jh_iot...${NC}" + + install_main_path + + if [ -z $1 ]; then + install_data + fi + + install_log + install_header + install_lib + install_jemalloc +# if [ "$pagMode" != "lite" ]; then +# install_connector +# fi +# install_examples + + if [ -z $1 ]; then # install service and client + # For installing new + install_bin + install_service + + openresty_work=false + if [ "$verMode" == "cluster" ]; then + # Check if nginx is installed successfully + if type curl &> /dev/null; then + if curl -sSf http://127.0.0.1:${nginx_port} &> /dev/null; then + echo -e "\033[44;32;1mNginx for jh_iot is installed successfully!${NC}" + openresty_work=true + else + echo -e "\033[44;31;5mNginx for jh_iot does not work! Please try again!\033[0m" + fi + fi + fi + + install_config + + # Ask if to start the service + #echo + #echo -e "\033[44;32;1mjh_iot is installed successfully!${NC}" + echo + echo -e "${GREEN_DARK}To configure jh_iot ${NC}: edit /etc/jh_taos/taos.cfg" + if ((${service_mod}==0)); then + echo -e "${GREEN_DARK}To start jh_iot ${NC}: ${csudo} systemctl start jh_taosd${NC}" + elif ((${service_mod}==1)); then + echo -e "${GREEN_DARK}To start jh_iot ${NC}: ${csudo} service jh_taosd start${NC}" + else + echo -e "${GREEN_DARK}To start jh_iot ${NC}: jh_taosd${NC}" + fi + + if [ ! -z "$firstEp" ]; then + tmpFqdn=${firstEp%%:*} + substr=":" + if [[ $firstEp =~ $substr ]];then + tmpPort=${firstEp#*:} + else + tmpPort="" + fi + if [[ "$tmpPort" != "" ]];then + echo -e "${GREEN_DARK}To access jh_iot ${NC}: jh_taos -h $tmpFqdn -P $tmpPort${GREEN_DARK} to login into cluster, then${NC}" + else + echo -e "${GREEN_DARK}To access jh_iot ${NC}: jh_taos -h $tmpFqdn${GREEN_DARK} to login into cluster, then${NC}" + fi + echo -e "${GREEN_DARK}execute ${NC}: create dnode 'newDnodeFQDN:port'; ${GREEN_DARK}to add this new node${NC}" + echo + elif [ ! -z "$serverFqdn" ]; then + echo -e "${GREEN_DARK}To access jh_iot ${NC}: jh_taos -h $serverFqdn${GREEN_DARK} to login into jh_iot server${NC}" + echo + fi + echo -e "\033[44;32;1mjh_iot is installed successfully!${NC}" + echo + else # Only install client + install_bin + install_config + + echo + echo -e "\033[44;32;1mjh_iot client is installed successfully!${NC}" + fi + + rm -rf $(tar -tf jh_taos.tar.gz) +} + + +## ==============================Main program starts from here============================ +serverFqdn=$(hostname) +if [ "$verType" == "server" ]; then + # Install server and client + if [ -x ${bin_dir}/jh_taosd ]; then + update_flag=1 + update + else + install + fi +elif [ "$verType" == "client" ]; then + interactiveFqdn=no + # Only install client + if [ -x ${bin_dir}/jh_taos ]; then + update_flag=1 + update client + else + install client + fi +else + echo "please input correct verType" +fi diff --git a/packaging/tools/install_kh.sh b/packaging/tools/install_kh.sh new file mode 100755 index 0000000000000000000000000000000000000000..44376b065ecd06d5f65a19f1f22da5625bba55de --- /dev/null +++ b/packaging/tools/install_kh.sh @@ -0,0 +1,948 @@ +#!/bin/bash +# +# This file is used to install database on linux systems. The operating system +# is required to use systemd to manage services at boot + +set -e +#set -x + +verMode=edge +pagMode=full + +iplist="" +serverFqdn="" +# -----------------------Variables definition--------------------- +script_dir=$(dirname $(readlink -f "$0")) +# Dynamic directory +data_dir="/var/lib/kinghistorian" +log_dir="/var/log/kinghistorian" + +data_link_dir="/usr/local/kinghistorian/data" +log_link_dir="/usr/local/kinghistorian/log" + +cfg_install_dir="/etc/kinghistorian" + +bin_link_dir="/usr/bin" +lib_link_dir="/usr/lib" +lib64_link_dir="/usr/lib64" +inc_link_dir="/usr/include" + +#install main path +install_main_dir="/usr/local/kinghistorian" + +# old bin dir +bin_dir="/usr/local/kinghistorian/bin" + +service_config_dir="/etc/systemd/system" +nginx_port=6060 +nginx_dir="/usr/local/nginxd" + +# Color setting +RED='\033[0;31m' +GREEN='\033[1;32m' +GREEN_DARK='\033[0;32m' +GREEN_UNDERLINE='\033[4;32m' +NC='\033[0m' + +csudo="" +if command -v sudo > /dev/null; then + csudo="sudo" +fi + +update_flag=0 + +initd_mod=0 +service_mod=2 +if pidof systemd &> /dev/null; then + service_mod=0 +elif $(which service &> /dev/null); then + service_mod=1 + service_config_dir="/etc/init.d" + if $(which chkconfig &> /dev/null); then + initd_mod=1 + elif $(which insserv &> /dev/null); then + initd_mod=2 + elif $(which update-rc.d &> /dev/null); then + initd_mod=3 + else + service_mod=2 + fi +else + service_mod=2 +fi + + +# get the operating system type for using the corresponding init file +# ubuntu/debian(deb), centos/fedora(rpm), others: opensuse, redhat, ..., no verification +#osinfo=$(awk -F= '/^NAME/{print $2}' /etc/os-release) +if [[ -e /etc/os-release ]]; then + osinfo=$(cat /etc/os-release | grep "NAME" | cut -d '"' -f2) ||: +else + osinfo="" +fi +#echo "osinfo: ${osinfo}" +os_type=0 +if echo $osinfo | grep -qwi "ubuntu" ; then +# echo "This is ubuntu system" + os_type=1 +elif echo $osinfo | grep -qwi "debian" ; then +# echo "This is debian system" + os_type=1 +elif echo $osinfo | grep -qwi "Kylin" ; then +# echo "This is Kylin system" + os_type=1 +elif echo $osinfo | grep -qwi "centos" ; then +# echo "This is centos system" + os_type=2 +elif echo $osinfo | grep -qwi "fedora" ; then +# echo "This is fedora system" + os_type=2 +else + echo " osinfo: ${osinfo}" + echo " This is an officially unverified linux system," + echo " if there are any problems with the installation and operation, " + echo " please feel free to contact wellintech.com for support." + os_type=1 +fi + + +# ============================= get input parameters ================================================= + +# install.sh -v [server | client] -e [yes | no] -i [systemd | service | ...] + +# set parameters by default value +interactiveFqdn=yes # [yes | no] +verType=server # [server | client] +initType=systemd # [systemd | service | ...] + +while getopts "hv:e:i:" arg +do + case $arg in + e) + #echo "interactiveFqdn=$OPTARG" + interactiveFqdn=$( echo $OPTARG ) + ;; + v) + #echo "verType=$OPTARG" + verType=$(echo $OPTARG) + ;; + i) + #echo "initType=$OPTARG" + initType=$(echo $OPTARG) + ;; + h) + echo "Usage: `basename $0` -v [server | client] -e [yes | no]" + exit 0 + ;; + ?) #unknow option + echo "unkonw argument" + exit 1 + ;; + esac +done + +function kill_process() { + pid=$(ps -ef | grep "$1" | grep -v "grep" | awk '{print $2}') + if [ -n "$pid" ]; then + ${csudo} kill -9 $pid || : + fi +} + +function install_main_path() { + #create install main dir and all sub dir + ${csudo} rm -rf ${install_main_dir} || : + ${csudo} mkdir -p ${install_main_dir} + ${csudo} mkdir -p ${install_main_dir}/cfg + ${csudo} mkdir -p ${install_main_dir}/bin +# ${csudo} mkdir -p ${install_main_dir}/connector + ${csudo} mkdir -p ${install_main_dir}/driver +# ${csudo} mkdir -p ${install_main_dir}/examples + ${csudo} mkdir -p ${install_main_dir}/include + ${csudo} mkdir -p ${install_main_dir}/init.d + if [ "$verMode" == "cluster" ]; then + ${csudo} mkdir -p ${nginx_dir} + fi +} + +function install_bin() { + # Remove links + ${csudo} rm -f ${bin_link_dir}/khclient || : + ${csudo} rm -f ${bin_link_dir}/khserver || : + ${csudo} rm -f ${bin_link_dir}/khdemo || : + ${csudo} rm -f ${bin_link_dir}/rmkh || : + ${csudo} rm -f ${bin_link_dir}/tarbitrator || : + ${csudo} rm -f ${bin_link_dir}/set_core || : + + ${csudo} cp -r ${script_dir}/bin/* ${install_main_dir}/bin && ${csudo} chmod 0555 ${install_main_dir}/bin/* + + #Make link + [ -x ${install_main_dir}/bin/khclient ] && ${csudo} ln -s ${install_main_dir}/bin/khclient ${bin_link_dir}/khclient || : + [ -x ${install_main_dir}/bin/khserver ] && ${csudo} ln -s ${install_main_dir}/bin/khserver ${bin_link_dir}/khserver || : + [ -x ${install_main_dir}/bin/khdemo ] && ${csudo} ln -s ${install_main_dir}/bin/khdemo ${bin_link_dir}/khdemo || : + [ -x ${install_main_dir}/bin/remove_kh.sh ] && ${csudo} ln -s ${install_main_dir}/bin/remove_kh.sh ${bin_link_dir}/rmkh || : + [ -x ${install_main_dir}/bin/set_core.sh ] && ${csudo} ln -s ${install_main_dir}/bin/set_core.sh ${bin_link_dir}/set_core || : + [ -x ${install_main_dir}/bin/tarbitrator ] && ${csudo} ln -s ${install_main_dir}/bin/tarbitrator ${bin_link_dir}/tarbitrator || : + + if [ "$verMode" == "cluster" ]; then + ${csudo} cp -r ${script_dir}/nginxd/* ${nginx_dir} && ${csudo} chmod 0555 ${nginx_dir}/* + ${csudo} mkdir -p ${nginx_dir}/logs + ${csudo} chmod 777 ${nginx_dir}/sbin/nginx + fi +} + +function install_lib() { + # Remove links + ${csudo} rm -f ${lib_link_dir}/libtaos.* || : + ${csudo} rm -f ${lib64_link_dir}/libtaos.* || : + ${csudo} cp -rf ${script_dir}/driver/* ${install_main_dir}/driver && ${csudo} chmod 777 ${install_main_dir}/driver/* + + ${csudo} ln -s ${install_main_dir}/driver/libtaos.* ${lib_link_dir}/libtaos.so.1 + ${csudo} ln -s ${lib_link_dir}/libtaos.so.1 ${lib_link_dir}/libtaos.so + + if [[ -d ${lib64_link_dir} && ! -e ${lib64_link_dir}/libtaos.so ]]; then + ${csudo} ln -s ${install_main_dir}/driver/libtaos.* ${lib64_link_dir}/libtaos.so.1 || : + ${csudo} ln -s ${lib64_link_dir}/libtaos.so.1 ${lib64_link_dir}/libtaos.so || : + fi + + if [ "$osType" != "Darwin" ]; then + ${csudo} ldconfig + else + ${csudo} update_dyld_shared_cache + fi +} + +function install_header() { + ${csudo} rm -f ${inc_link_dir}/taos.h ${inc_link_dir}/taoserror.h || : + ${csudo} cp -f ${script_dir}/inc/* ${install_main_dir}/include && ${csudo} chmod 644 ${install_main_dir}/include/* + ${csudo} ln -s ${install_main_dir}/include/taos.h ${inc_link_dir}/taos.h + ${csudo} ln -s ${install_main_dir}/include/taoserror.h ${inc_link_dir}/taoserror.h +} + +function install_jemalloc() { + jemalloc_dir=${script_dir}/jemalloc + + if [ -d ${jemalloc_dir} ]; then + ${csudo} /usr/bin/install -c -d /usr/local/bin + + if [ -f ${jemalloc_dir}/bin/jemalloc-config ]; then + ${csudo} /usr/bin/install -c -m 755 ${jemalloc_dir}/bin/jemalloc-config /usr/local/bin + fi + if [ -f ${jemalloc_dir}/bin/jemalloc.sh ]; then + ${csudo} /usr/bin/install -c -m 755 ${jemalloc_dir}/bin/jemalloc.sh /usr/local/bin + fi + if [ -f ${jemalloc_dir}/bin/jeprof ]; then + ${csudo} /usr/bin/install -c -m 755 ${jemalloc_dir}/bin/jeprof /usr/local/bin + fi + if [ -f ${jemalloc_dir}/include/jemalloc/jemalloc.h ]; then + ${csudo} /usr/bin/install -c -d /usr/local/include/jemalloc + ${csudo} /usr/bin/install -c -m 644 ${jemalloc_dir}/include/jemalloc/jemalloc.h /usr/local/include/jemalloc + fi + if [ -f ${jemalloc_dir}/lib/libjemalloc.so.2 ]; then + ${csudo} /usr/bin/install -c -d /usr/local/lib + ${csudo} /usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc.so.2 /usr/local/lib + ${csudo} ln -sf libjemalloc.so.2 /usr/local/lib/libjemalloc.so + ${csudo} /usr/bin/install -c -d /usr/local/lib + if [ -f ${jemalloc_dir}/lib/libjemalloc.a ]; then + ${csudo} /usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc.a /usr/local/lib + fi + if [ -f ${jemalloc_dir}/lib/libjemalloc_pic.a ]; then + ${csudo} /usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc_pic.a /usr/local/lib + fi + if [ -f ${jemalloc_dir}/lib/libjemalloc_pic.a ]; then + ${csudo} /usr/bin/install -c -d /usr/local/lib/pkgconfig + ${csudo} /usr/bin/install -c -m 644 ${jemalloc_dir}/lib/pkgconfig/jemalloc.pc /usr/local/lib/pkgconfig + fi + fi + if [ -f ${jemalloc_dir}/share/doc/jemalloc/jemalloc.html ]; then + ${csudo} /usr/bin/install -c -d /usr/local/share/doc/jemalloc + ${csudo} /usr/bin/install -c -m 644 ${jemalloc_dir}/share/doc/jemalloc/jemalloc.html /usr/local/share/doc/jemalloc + fi + if [ -f ${jemalloc_dir}/share/man/man3/jemalloc.3 ]; then + ${csudo} /usr/bin/install -c -d /usr/local/share/man/man3 + ${csudo} /usr/bin/install -c -m 644 ${jemalloc_dir}/share/man/man3/jemalloc.3 /usr/local/share/man/man3 + fi + + if [ -d /etc/ld.so.conf.d ]; then + ${csudo} echo "/usr/local/lib" > /etc/ld.so.conf.d/jemalloc.conf + ${csudo} ldconfig + else + echo "/etc/ld.so.conf.d not found!" + fi + fi +} + +function add_newHostname_to_hosts() { + localIp="127.0.0.1" + OLD_IFS="$IFS" + IFS=" " + iphost=$(cat /etc/hosts | grep $1 | awk '{print $1}') + arr=($iphost) + IFS="$OLD_IFS" + for s in ${arr[@]} + do + if [[ "$s" == "$localIp" ]]; then + return + fi + done + ${csudo} echo "127.0.0.1 $1" >> /etc/hosts ||: +} + +function set_hostname() { + echo -e -n "${GREEN}Please enter one hostname(must not be 'localhost')${NC}:" + read newHostname + while true; do + if [[ ! -z "$newHostname" && "$newHostname" != "localhost" ]]; then + break + else + read -p "Please enter one hostname(must not be 'localhost'):" newHostname + fi + done + + ${csudo} hostname $newHostname ||: + retval=`echo $?` + if [[ $retval != 0 ]]; then + echo + echo "set hostname fail!" + return + fi + + #ubuntu/centos /etc/hostname + if [[ -e /etc/hostname ]]; then + ${csudo} echo $newHostname > /etc/hostname ||: + fi + + #debian: #HOSTNAME=yourname + if [[ -e /etc/sysconfig/network ]]; then + ${csudo} sed -i -r "s/#*\s*(HOSTNAME=\s*).*/\1$newHostname/" /etc/sysconfig/network ||: + fi + + ${csudo} sed -i -r "s/#*\s*(fqdn\s*).*/\1$newHostname/" ${cfg_install_dir}/kinghistorian.cfg + serverFqdn=$newHostname + + if [[ -e /etc/hosts ]]; then + add_newHostname_to_hosts $newHostname + fi +} + +function is_correct_ipaddr() { + newIp=$1 + OLD_IFS="$IFS" + IFS=" " + arr=($iplist) + IFS="$OLD_IFS" + for s in ${arr[@]} + do + if [[ "$s" == "$newIp" ]]; then + return 0 + fi + done + + return 1 +} + +function set_ipAsFqdn() { + iplist=$(ip address |grep inet |grep -v inet6 |grep -v 127.0.0.1 |awk '{print $2}' |awk -F "/" '{print $1}') ||: + if [ -z "$iplist" ]; then + iplist=$(ifconfig |grep inet |grep -v inet6 |grep -v 127.0.0.1 |awk '{print $2}' |awk -F ":" '{print $2}') ||: + fi + + if [ -z "$iplist" ]; then + echo + echo -e -n "${GREEN}Unable to get local ip, use 127.0.0.1${NC}" + localFqdn="127.0.0.1" + # Write the local FQDN to configuration file + ${csudo} sed -i -r "s/#*\s*(fqdn\s*).*/\1$localFqdn/" ${cfg_install_dir}/kinghistorian.cfg + serverFqdn=$localFqdn + echo + return + fi + + echo -e -n "${GREEN}Please choose an IP from local IP list${NC}:" + echo + echo -e -n "${GREEN}$iplist${NC}" + echo + echo + echo -e -n "${GREEN}Notes: if IP is used as the node name, data can NOT be migrated to other machine directly${NC}:" + read localFqdn + while true; do + if [ ! -z "$localFqdn" ]; then + # Check if correct ip address + is_correct_ipaddr $localFqdn + retval=`echo $?` + if [[ $retval != 0 ]]; then + read -p "Please choose an IP from local IP list:" localFqdn + else + # Write the local FQDN to configuration file + ${csudo} sed -i -r "s/#*\s*(fqdn\s*).*/\1$localFqdn/" ${cfg_install_dir}/kinghistorian.cfg + serverFqdn=$localFqdn + break + fi + else + read -p "Please choose an IP from local IP list:" localFqdn + fi + done +} + +function local_fqdn_check() { + #serverFqdn=$(hostname) + echo + echo -e -n "System hostname is: ${GREEN}$serverFqdn${NC}" + echo + if [[ "$serverFqdn" == "" ]] || [[ "$serverFqdn" == "localhost" ]]; then + echo -e -n "${GREEN}It is strongly recommended to configure a hostname for this machine ${NC}" + echo + + while true + do + read -r -p "Set hostname now? [Y/n] " input + if [ ! -n "$input" ]; then + set_hostname + break + else + case $input in + [yY][eE][sS]|[yY]) + set_hostname + break + ;; + + [nN][oO]|[nN]) + set_ipAsFqdn + break + ;; + + *) + echo "Invalid input..." + ;; + esac + fi + done + fi +} + +function install_config() { + if [ ! -f ${cfg_install_dir}/kinghistorian.cfg ]; then + ${csudo} mkdir -p ${cfg_install_dir} + [ -f ${script_dir}/cfg/kinghistorian.cfg ] && ${csudo} cp ${script_dir}/cfg/kinghistorian.cfg ${cfg_install_dir} + ${csudo} chmod 644 ${cfg_install_dir}/* + fi + + ${csudo} cp -f ${script_dir}/cfg/kinghistorian.cfg ${install_main_dir}/cfg/kinghistorian.cfg.org + ${csudo} ln -s ${cfg_install_dir}/kinghistorian.cfg ${install_main_dir}/cfg + + [ ! -z $1 ] && return 0 || : # only install client + + if ((${update_flag}==1)); then + return 0 + fi + + if [ "$interactiveFqdn" == "no" ]; then + return 0 + fi + + local_fqdn_check + + #FQDN_FORMAT="(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)" + #FQDN_FORMAT="(:[1-6][0-9][0-9][0-9][0-9]$)" + #PORT_FORMAT="(/[1-6][0-9][0-9][0-9][0-9]?/)" + #FQDN_PATTERN=":[0-9]{1,5}$" + + # first full-qualified domain name (FQDN) for KingHistorian cluster system + echo + echo -e -n "${GREEN}Enter FQDN:port (like h1.wellintech.com:6030) of an existing KingHistorian cluster node to join${NC}" + echo + echo -e -n "${GREEN}OR leave it blank to build one${NC}:" + read firstEp + while true; do + if [ ! -z "$firstEp" ]; then + # check the format of the firstEp + #if [[ $firstEp == $FQDN_PATTERN ]]; then + # Write the first FQDN to configuration file + ${csudo} sed -i -r "s/#*\s*(firstEp\s*).*/\1$firstEp/" ${cfg_install_dir}/kinghistorian.cfg + break + #else + # read -p "Please enter the correct FQDN:port: " firstEp + #fi + else + break + fi + done +} + + +function install_log() { + ${csudo} rm -rf ${log_dir} || : + ${csudo} mkdir -p ${log_dir} && ${csudo} chmod 777 ${log_dir} + + ${csudo} ln -s ${log_dir} ${install_main_dir}/log +} + +function install_data() { + ${csudo} mkdir -p ${data_dir} + + ${csudo} ln -s ${data_dir} ${install_main_dir}/data +} + +function install_connector() { + ${csudo} cp -rf ${script_dir}/connector/* ${install_main_dir}/connector +} + +function install_examples() { + if [ -d ${script_dir}/examples ]; then + ${csudo} cp -rf ${script_dir}/examples/* ${install_main_dir}/examples + fi +} + +function clean_service_on_sysvinit() { + if pidof khserver &> /dev/null; then + ${csudo} service khserver stop || : + fi + + if pidof tarbitrator &> /dev/null; then + ${csudo} service tarbitratord stop || : + fi + + if ((${initd_mod}==1)); then + if [ -e ${service_config_dir}/khserver ]; then + ${csudo} chkconfig --del khserver || : + fi + + if [ -e ${service_config_dir}/tarbitratord ]; then + ${csudo} chkconfig --del tarbitratord || : + fi + elif ((${initd_mod}==2)); then + if [ -e ${service_config_dir}/khserver ]; then + ${csudo} insserv -r khserver || : + fi + if [ -e ${service_config_dir}/tarbitratord ]; then + ${csudo} insserv -r tarbitratord || : + fi + elif ((${initd_mod}==3)); then + if [ -e ${service_config_dir}/khserver ]; then + ${csudo} update-rc.d -f khserver remove || : + fi + if [ -e ${service_config_dir}/tarbitratord ]; then + ${csudo} update-rc.d -f tarbitratord remove || : + fi + fi + + ${csudo} rm -f ${service_config_dir}/khserver || : + ${csudo} rm -f ${service_config_dir}/tarbitratord || : + + if $(which init &> /dev/null); then + ${csudo} init q || : + fi +} + +function install_service_on_sysvinit() { + clean_service_on_sysvinit + sleep 1 + + # Install khserver service + + if ((${os_type}==1)); then + ${csudo} cp -f ${script_dir}/init.d/khserver.deb ${install_main_dir}/init.d/khserver + ${csudo} cp ${script_dir}/init.d/khserver.deb ${service_config_dir}/khserver && ${csudo} chmod a+x ${service_config_dir}/khserver + ${csudo} cp -f ${script_dir}/init.d/tarbitratord.deb ${install_main_dir}/init.d/tarbitratord + ${csudo} cp ${script_dir}/init.d/tarbitratord.deb ${service_config_dir}/tarbitratord && ${csudo} chmod a+x ${service_config_dir}/tarbitratord + elif ((${os_type}==2)); then + ${csudo} cp -f ${script_dir}/init.d/khserver.rpm ${install_main_dir}/init.d/khserver + ${csudo} cp ${script_dir}/init.d/khserver.rpm ${service_config_dir}/khserver && ${csudo} chmod a+x ${service_config_dir}/khserver + ${csudo} cp -f ${script_dir}/init.d/tarbitratord.rpm ${install_main_dir}/init.d/tarbitratord + ${csudo} cp ${script_dir}/init.d/tarbitratord.rpm ${service_config_dir}/tarbitratord && ${csudo} chmod a+x ${service_config_dir}/tarbitratord + fi + + if ((${initd_mod}==1)); then + ${csudo} chkconfig --add khserver || : + ${csudo} chkconfig --level 2345 khserver on || : + ${csudo} chkconfig --add tarbitratord || : + ${csudo} chkconfig --level 2345 tarbitratord on || : + elif ((${initd_mod}==2)); then + ${csudo} insserv khserver || : + ${csudo} insserv -d khserver || : + ${csudo} insserv tarbitratord || : + ${csudo} insserv -d tarbitratord || : + elif ((${initd_mod}==3)); then + ${csudo} update-rc.d khserver defaults || : + ${csudo} update-rc.d tarbitratord defaults || : + fi +} + +function clean_service_on_systemd() { + khserver_service_config="${service_config_dir}/khserver.service" + if systemctl is-active --quiet khserver; then + echo "KingHistorian is running, stopping it..." + ${csudo} systemctl stop khserver &> /dev/null || echo &> /dev/null + fi + ${csudo} systemctl disable khserver &> /dev/null || echo &> /dev/null + ${csudo} rm -f ${khserver_service_config} + + tarbitratord_service_config="${service_config_dir}/tarbitratord.service" + if systemctl is-active --quiet tarbitratord; then + echo "tarbitrator is running, stopping it..." + ${csudo} systemctl stop tarbitratord &> /dev/null || echo &> /dev/null + fi + ${csudo} systemctl disable tarbitratord &> /dev/null || echo &> /dev/null + ${csudo} rm -f ${tarbitratord_service_config} + + if [ "$verMode" == "cluster" ]; then + nginx_service_config="${service_config_dir}/nginxd.service" + if systemctl is-active --quiet nginxd; then + echo "Nginx for KingHistorian is running, stopping it..." + ${csudo} systemctl stop nginxd &> /dev/null || echo &> /dev/null + fi + ${csudo} systemctl disable nginxd &> /dev/null || echo &> /dev/null + ${csudo} rm -f ${nginx_service_config} + fi +} + +function install_service_on_systemd() { + clean_service_on_systemd + + service_config="${service_config_dir}/khserver.service" + ${csudo} bash -c "echo '[Unit]' >> ${service_config}" + ${csudo} bash -c "echo 'Description=KingHistorian server service' >> ${service_config}" + ${csudo} bash -c "echo 'After=network-online.target' >> ${service_config}" + ${csudo} bash -c "echo 'Wants=network-online.target' >> ${service_config}" + ${csudo} bash -c "echo >> ${service_config}" + ${csudo} bash -c "echo '[Service]' >> ${service_config}" + ${csudo} bash -c "echo 'Type=simple' >> ${service_config}" + ${csudo} bash -c "echo 'ExecStart=/usr/bin/khserver' >> ${service_config}" + ${csudo} bash -c "echo 'ExecStartPre=/usr/local/kinghistorian/bin/startPre.sh' >> ${service_config}" + ${csudo} bash -c "echo 'TimeoutStopSec=1000000s' >> ${service_config}" + ${csudo} bash -c "echo 'LimitNOFILE=infinity' >> ${service_config}" + ${csudo} bash -c "echo 'LimitNPROC=infinity' >> ${service_config}" + ${csudo} bash -c "echo 'LimitCORE=infinity' >> ${service_config}" + ${csudo} bash -c "echo 'TimeoutStartSec=0' >> ${service_config}" + ${csudo} bash -c "echo 'StandardOutput=null' >> ${service_config}" + ${csudo} bash -c "echo 'Restart=always' >> ${service_config}" + ${csudo} bash -c "echo 'StartLimitBurst=3' >> ${service_config}" + ${csudo} bash -c "echo 'StartLimitInterval=60s' >> ${service_config}" + ${csudo} bash -c "echo >> ${service_config}" + ${csudo} bash -c "echo '[Install]' >> ${service_config}" + ${csudo} bash -c "echo 'WantedBy=multi-user.target' >> ${service_config}" + ${csudo} systemctl enable khserver + + tarbitratord_service_config="${service_config_dir}/tarbitratord.service" + ${csudo} bash -c "echo '[Unit]' >> ${tarbitratord_service_config}" + ${csudo} bash -c "echo 'Description=KingHistorian arbitrator service' >> ${tarbitratord_service_config}" + ${csudo} bash -c "echo 'After=network-online.target' >> ${tarbitratord_service_config}" + ${csudo} bash -c "echo 'Wants=network-online.target' >> ${tarbitratord_service_config}" + ${csudo} bash -c "echo >> ${tarbitratord_service_config}" + ${csudo} bash -c "echo '[Service]' >> ${tarbitratord_service_config}" + ${csudo} bash -c "echo 'Type=simple' >> ${tarbitratord_service_config}" + ${csudo} bash -c "echo 'ExecStart=/usr/bin/tarbitrator' >> ${tarbitratord_service_config}" + ${csudo} bash -c "echo 'TimeoutStopSec=1000000s' >> ${tarbitratord_service_config}" + ${csudo} bash -c "echo 'LimitNOFILE=infinity' >> ${tarbitratord_service_config}" + ${csudo} bash -c "echo 'LimitNPROC=infinity' >> ${tarbitratord_service_config}" + ${csudo} bash -c "echo 'LimitCORE=infinity' >> ${tarbitratord_service_config}" + ${csudo} bash -c "echo 'TimeoutStartSec=0' >> ${tarbitratord_service_config}" + ${csudo} bash -c "echo 'StandardOutput=null' >> ${tarbitratord_service_config}" + ${csudo} bash -c "echo 'Restart=always' >> ${tarbitratord_service_config}" + ${csudo} bash -c "echo 'StartLimitBurst=3' >> ${tarbitratord_service_config}" + ${csudo} bash -c "echo 'StartLimitInterval=60s' >> ${tarbitratord_service_config}" + ${csudo} bash -c "echo >> ${tarbitratord_service_config}" + ${csudo} bash -c "echo '[Install]' >> ${tarbitratord_service_config}" + ${csudo} bash -c "echo 'WantedBy=multi-user.target' >> ${tarbitratord_service_config}" + #${csudo} systemctl enable tarbitratord + + if [ "$verMode" == "cluster" ]; then + nginx_service_config="${service_config_dir}/nginxd.service" + ${csudo} bash -c "echo '[Unit]' >> ${nginx_service_config}" + ${csudo} bash -c "echo 'Description=Nginx For KingHistorian Service' >> ${nginx_service_config}" + ${csudo} bash -c "echo 'After=network-online.target' >> ${nginx_service_config}" + ${csudo} bash -c "echo 'Wants=network-online.target' >> ${nginx_service_config}" + ${csudo} bash -c "echo >> ${nginx_service_config}" + ${csudo} bash -c "echo '[Service]' >> ${nginx_service_config}" + ${csudo} bash -c "echo 'Type=forking' >> ${nginx_service_config}" + ${csudo} bash -c "echo 'PIDFile=/usr/local/nginxd/logs/nginx.pid' >> ${nginx_service_config}" + ${csudo} bash -c "echo 'ExecStart=/usr/local/nginxd/sbin/nginx' >> ${nginx_service_config}" + ${csudo} bash -c "echo 'ExecStop=/usr/local/nginxd/sbin/nginx -s stop' >> ${nginx_service_config}" + ${csudo} bash -c "echo 'TimeoutStopSec=1000000s' >> ${nginx_service_config}" + ${csudo} bash -c "echo 'LimitNOFILE=infinity' >> ${nginx_service_config}" + ${csudo} bash -c "echo 'LimitNPROC=infinity' >> ${nginx_service_config}" + ${csudo} bash -c "echo 'LimitCORE=infinity' >> ${nginx_service_config}" + ${csudo} bash -c "echo 'TimeoutStartSec=0' >> ${nginx_service_config}" + ${csudo} bash -c "echo 'StandardOutput=null' >> ${nginx_service_config}" + ${csudo} bash -c "echo 'Restart=always' >> ${nginx_service_config}" + ${csudo} bash -c "echo 'StartLimitBurst=3' >> ${nginx_service_config}" + ${csudo} bash -c "echo 'StartLimitInterval=60s' >> ${nginx_service_config}" + ${csudo} bash -c "echo >> ${nginx_service_config}" + ${csudo} bash -c "echo '[Install]' >> ${nginx_service_config}" + ${csudo} bash -c "echo 'WantedBy=multi-user.target' >> ${nginx_service_config}" + if ! ${csudo} systemctl enable nginxd &> /dev/null; then + ${csudo} systemctl daemon-reexec + ${csudo} systemctl enable nginxd + fi + ${csudo} systemctl start nginxd + fi +} + +function install_service() { + if ((${service_mod}==0)); then + install_service_on_systemd + elif ((${service_mod}==1)); then + install_service_on_sysvinit + else + # must manual stop khserver + kill_process khserver + fi +} + +vercomp () { + if [[ $1 == $2 ]]; then + return 0 + fi + local IFS=. + local i ver1=($1) ver2=($2) + # fill empty fields in ver1 with zeros + for ((i=${#ver1[@]}; i<${#ver2[@]}; i++)); do + ver1[i]=0 + done + + for ((i=0; i<${#ver1[@]}; i++)); do + if [[ -z ${ver2[i]} ]] + then + # fill empty fields in ver2 with zeros + ver2[i]=0 + fi + if ((10#${ver1[i]} > 10#${ver2[i]})) + then + return 1 + fi + if ((10#${ver1[i]} < 10#${ver2[i]})) + then + return 2 + fi + done + return 0 +} + +function is_version_compatible() { + curr_version=`ls ${script_dir}/driver/libtaos.so* |cut -d '.' -f 3-6` + + if [ -f ${script_dir}/driver/vercomp.txt ]; then + min_compatible_version=`cat ${script_dir}/driver/vercomp.txt` + else + min_compatible_version=$(${script_dir}/bin/khserver -V | head -1 | cut -d ' ' -f 5) + fi + + vercomp $curr_version $min_compatible_version + case $? in + 0) return 0;; + 1) return 0;; + 2) return 1;; + esac +} + +function update() { + # Start to update + if [ ! -e kinghistorian.tar.gz ]; then + echo "File kinghistorian.tar.gz does not exist" + exit 1 + fi + tar -zxf kinghistorian.tar.gz + install_jemalloc + + # Check if version compatible + if ! is_version_compatible; then + echo -e "${RED}Version incompatible${NC}" + return 1 + fi + + echo -e "${GREEN}Start to update KingHistorian...${NC}" + # Stop the service if running + if pidof khserver &> /dev/null; then + if ((${service_mod}==0)); then + ${csudo} systemctl stop khserver || : + elif ((${service_mod}==1)); then + ${csudo} service khserver stop || : + else + kill_process khserver + fi + sleep 1 + fi + if [ "$verMode" == "cluster" ]; then + if pidof nginx &> /dev/null; then + if ((${service_mod}==0)); then + ${csudo} systemctl stop nginxd || : + elif ((${service_mod}==1)); then + ${csudo} service nginxd stop || : + else + kill_process nginx + fi + sleep 1 + fi + fi + + install_main_path + + install_log + install_header + install_lib +# if [ "$pagMode" != "lite" ]; then +# install_connector +# fi +# install_examples + if [ -z $1 ]; then + install_bin + install_service + install_config + + openresty_work=false + if [ "$verMode" == "cluster" ]; then + # Check if openresty is installed + # Check if nginx is installed successfully + if type curl &> /dev/null; then + if curl -sSf http://127.0.0.1:${nginx_port} &> /dev/null; then + echo -e "\033[44;32;1mNginx for KingHistorian is updated successfully!${NC}" + openresty_work=true + else + echo -e "\033[44;31;5mNginx for KingHistorian does not work! Please try again!\033[0m" + fi + fi + fi + + #echo + #echo -e "\033[44;32;1mKingHistorian is updated successfully!${NC}" + echo + echo -e "${GREEN_DARK}To configure KingHistorian ${NC}: edit /etc/kinghistorian/kinghistorian.cfg" + if ((${service_mod}==0)); then + echo -e "${GREEN_DARK}To start KingHistorian ${NC}: ${csudo} systemctl start khserver${NC}" + elif ((${service_mod}==1)); then + echo -e "${GREEN_DARK}To start KingHistorian ${NC}: ${csudo} service khserver start${NC}" + else + echo -e "${GREEN_DARK}To start KingHistorian ${NC}: ./khserver${NC}" + fi + + if [ ${openresty_work} = 'true' ]; then + echo -e "${GREEN_DARK}To access KingHistorian ${NC}: use ${GREEN_UNDERLINE}khclient -h $serverFqdn${NC} in shell OR from ${GREEN_UNDERLINE}http://127.0.0.1:${nginx_port}${NC}" + else + echo -e "${GREEN_DARK}To access KingHistorian ${NC}: use ${GREEN_UNDERLINE}khclient -h $serverFqdn${NC} in shell${NC}" + fi + + echo + echo -e "\033[44;32;1mKingHistorian is updated successfully!${NC}" + else + install_bin + install_config + + echo + echo -e "\033[44;32;1mKingHistorian client is updated successfully!${NC}" + fi + + rm -rf $(tar -tf kinghistorian.tar.gz) +} + +function install() { + # Start to install + if [ ! -e kinghistorian.tar.gz ]; then + echo "File kinghistorian.tar.gz does not exist" + exit 1 + fi + tar -zxf kinghistorian.tar.gz + + echo -e "${GREEN}Start to install KingHistorian...${NC}" + + install_main_path + + if [ -z $1 ]; then + install_data + fi + + install_log + install_header + install_lib + install_jemalloc +# if [ "$pagMode" != "lite" ]; then +# install_connector +# fi +# install_examples + + if [ -z $1 ]; then # install service and client + # For installing new + install_bin + install_service + + openresty_work=false + if [ "$verMode" == "cluster" ]; then + # Check if nginx is installed successfully + if type curl &> /dev/null; then + if curl -sSf http://127.0.0.1:${nginx_port} &> /dev/null; then + echo -e "\033[44;32;1mNginx for KingHistorian is installed successfully!${NC}" + openresty_work=true + else + echo -e "\033[44;31;5mNginx for KingHistorian does not work! Please try again!\033[0m" + fi + fi + fi + + install_config + + # Ask if to start the service + #echo + #echo -e "\033[44;32;1mKingHistorian is installed successfully!${NC}" + echo + echo -e "${GREEN_DARK}To configure KingHistorian ${NC}: edit /etc/kinghistorian/kinghistorian.cfg" + if ((${service_mod}==0)); then + echo -e "${GREEN_DARK}To start KingHistorian ${NC}: ${csudo} systemctl start khserver${NC}" + elif ((${service_mod}==1)); then + echo -e "${GREEN_DARK}To start KingHistorian ${NC}: ${csudo} service khserver start${NC}" + else + echo -e "${GREEN_DARK}To start KingHistorian ${NC}: khserver${NC}" + fi + + if [ ! -z "$firstEp" ]; then + tmpFqdn=${firstEp%%:*} + substr=":" + if [[ $firstEp =~ $substr ]];then + tmpPort=${firstEp#*:} + else + tmpPort="" + fi + if [[ "$tmpPort" != "" ]];then + echo -e "${GREEN_DARK}To access KingHistorian ${NC}: khclient -h $tmpFqdn -P $tmpPort${GREEN_DARK} to login into cluster, then${NC}" + else + echo -e "${GREEN_DARK}To access KingHistorian ${NC}: khclient -h $tmpFqdn${GREEN_DARK} to login into cluster, then${NC}" + fi + echo -e "${GREEN_DARK}execute ${NC}: create dnode 'newDnodeFQDN:port'; ${GREEN_DARK}to add this new node${NC}" + echo + elif [ ! -z "$serverFqdn" ]; then + echo -e "${GREEN_DARK}To access KingHistorian ${NC}: khclient -h $serverFqdn${GREEN_DARK} to login into KingHistorian server${NC}" + echo + fi + echo -e "\033[44;32;1mKingHistorian is installed successfully!${NC}" + echo + else # Only install client + install_bin + install_config + + echo + echo -e "\033[44;32;1mKingHistorian client is installed successfully!${NC}" + fi + + rm -rf $(tar -tf kinghistorian.tar.gz) +} + + +## ==============================Main program starts from here============================ +serverFqdn=$(hostname) +if [ "$verType" == "server" ]; then + # Install server and client + if [ -x ${bin_dir}/khserver ]; then + update_flag=1 + update + else + install + fi +elif [ "$verType" == "client" ]; then + interactiveFqdn=no + # Only install client + if [ -x ${bin_dir}/khclient ]; then + update_flag=1 + update client + else + install client + fi +else + echo "please input correct verType" +fi diff --git a/packaging/tools/install_power.sh b/packaging/tools/install_power.sh index 0e0ee7ba31f4715b2c5585dd040727d604aa90b1..35037464a2fbeaca6a21ced896de4da73b40aab0 100755 --- a/packaging/tools/install_power.sh +++ b/packaging/tools/install_power.sh @@ -210,13 +210,6 @@ function install_lib() { ${csudo} ln -s ${lib64_link_dir}/libtaos.so.1 ${lib64_link_dir}/libtaos.so || : fi - #if [ "$verMode" == "cluster" ]; then - # # Compatible with version 1.5 - # ${csudo} mkdir -p ${v15_java_app_dir} - # ${csudo} ln -s ${install_main_dir}/connector/taos-jdbcdriver-1.0.2-dist.jar ${v15_java_app_dir}/JDBCDriver-1.0.2-dist.jar - # ${csudo} chmod 777 ${v15_java_app_dir} || : - #fi - ${csudo} ldconfig } @@ -328,7 +321,7 @@ function set_hostname() { ${csudo} sed -i -r "s/#*\s*(HOSTNAME=\s*).*/\1$newHostname/" /etc/sysconfig/network ||: fi - ${csudo} sed -i -r "s/#*\s*(fqdn\s*).*/\1$newHostname/" ${cfg_install_dir}/taos.cfg + ${csudo} sed -i -r "s/#*\s*(fqdn\s*).*/\1$newHostname/" ${cfg_install_dir}/power.cfg serverFqdn=$newHostname if [[ -e /etc/hosts ]]; then @@ -363,7 +356,7 @@ function set_ipAsFqdn() { echo -e -n "${GREEN}Unable to get local ip, use 127.0.0.1${NC}" localFqdn="127.0.0.1" # Write the local FQDN to configuration file - ${csudo} sed -i -r "s/#*\s*(fqdn\s*).*/\1$localFqdn/" ${cfg_install_dir}/taos.cfg + ${csudo} sed -i -r "s/#*\s*(fqdn\s*).*/\1$localFqdn/" ${cfg_install_dir}/power.cfg serverFqdn=$localFqdn echo return @@ -385,7 +378,7 @@ function set_ipAsFqdn() { read -p "Please choose an IP from local IP list:" localFqdn else # Write the local FQDN to configuration file - ${csudo} sed -i -r "s/#*\s*(fqdn\s*).*/\1$localFqdn/" ${cfg_install_dir}/taos.cfg + ${csudo} sed -i -r "s/#*\s*(fqdn\s*).*/\1$localFqdn/" ${cfg_install_dir}/power.cfg serverFqdn=$localFqdn break fi @@ -432,16 +425,14 @@ function local_fqdn_check() { } function install_config() { - #${csudo} rm -f ${install_main_dir}/cfg/taos.cfg || : - - if [ ! -f ${cfg_install_dir}/taos.cfg ]; then + if [ ! -f ${cfg_install_dir}/power.cfg ]; then ${csudo} mkdir -p ${cfg_install_dir} - [ -f ${script_dir}/cfg/taos.cfg ] && ${csudo} cp ${script_dir}/cfg/taos.cfg ${cfg_install_dir} + [ -f ${script_dir}/cfg/power.cfg ] && ${csudo} cp ${script_dir}/cfg/power.cfg ${cfg_install_dir} ${csudo} chmod 644 ${cfg_install_dir}/* fi - ${csudo} cp -f ${script_dir}/cfg/taos.cfg ${install_main_dir}/cfg/taos.cfg.org - ${csudo} ln -s ${cfg_install_dir}/taos.cfg ${install_main_dir}/cfg + ${csudo} cp -f ${script_dir}/cfg/power.cfg ${install_main_dir}/cfg/power.cfg.org + ${csudo} ln -s ${cfg_install_dir}/power.cfg ${install_main_dir}/cfg [ ! -z $1 ] && return 0 || : # only install client @@ -471,7 +462,7 @@ function install_config() { # check the format of the firstEp #if [[ $firstEp == $FQDN_PATTERN ]]; then # Write the first FQDN to configuration file - ${csudo} sed -i -r "s/#*\s*(firstEp\s*).*/\1$firstEp/" ${cfg_install_dir}/taos.cfg + ${csudo} sed -i -r "s/#*\s*(firstEp\s*).*/\1$firstEp/" ${cfg_install_dir}/power.cfg break #else # read -p "Please enter the correct FQDN:port: " firstEp @@ -607,7 +598,7 @@ function clean_service_on_systemd() { if [ "$verMode" == "cluster" ]; then nginx_service_config="${service_config_dir}/nginxd.service" if systemctl is-active --quiet nginxd; then - echo "Nginx for TDengine is running, stopping it..." + echo "Nginx for PowerDB is running, stopping it..." ${csudo} systemctl stop nginxd &> /dev/null || echo &> /dev/null fi ${csudo} systemctl disable nginxd &> /dev/null || echo &> /dev/null @@ -646,7 +637,7 @@ function install_service_on_systemd() { tarbitratord_service_config="${service_config_dir}/tarbitratord.service" ${csudo} bash -c "echo '[Unit]' >> ${tarbitratord_service_config}" - ${csudo} bash -c "echo 'Description=TDengine arbitrator service' >> ${tarbitratord_service_config}" + ${csudo} bash -c "echo 'Description=PowerDB arbitrator service' >> ${tarbitratord_service_config}" ${csudo} bash -c "echo 'After=network-online.target' >> ${tarbitratord_service_config}" ${csudo} bash -c "echo 'Wants=network-online.target' >> ${tarbitratord_service_config}" ${csudo} bash -c "echo >> ${tarbitratord_service_config}" @@ -828,7 +819,7 @@ function update_PowerDB() { #echo #echo -e "\033[44;32;1mPowerDB is updated successfully!${NC}" echo - echo -e "${GREEN_DARK}To configure PowerDB ${NC}: edit /etc/power/taos.cfg" + echo -e "${GREEN_DARK}To configure PowerDB ${NC}: edit /etc/power/power.cfg" if ((${service_mod}==0)); then echo -e "${GREEN_DARK}To start PowerDB ${NC}: ${csudo} systemctl start powerd${NC}" elif ((${service_mod}==1)); then @@ -905,7 +896,7 @@ function install_PowerDB() { #echo #echo -e "\033[44;32;1mPowerDB is installed successfully!${NC}" echo - echo -e "${GREEN_DARK}To configure PowerDB ${NC}: edit /etc/power/taos.cfg" + echo -e "${GREEN_DARK}To configure PowerDB ${NC}: edit /etc/power/power.cfg" if ((${service_mod}==0)); then echo -e "${GREEN_DARK}To start PowerDB ${NC}: ${csudo} systemctl start powerd${NC}" elif ((${service_mod}==1)); then diff --git a/packaging/tools/install_pro.sh b/packaging/tools/install_pro.sh index e5675b858066148df07508ad2438b0f00d7ce7bf..e1b32e6058ed736b455fc3927eb82b61bbb82130 100755 --- a/packaging/tools/install_pro.sh +++ b/packaging/tools/install_pro.sh @@ -316,7 +316,7 @@ function set_hostname() { ${csudo} sed -i -r "s/#*\s*(HOSTNAME=\s*).*/\1$newHostname/" /etc/sysconfig/network ||: fi - ${csudo} sed -i -r "s/#*\s*(fqdn\s*).*/\1$newHostname/" ${cfg_install_dir}/taos.cfg + ${csudo} sed -i -r "s/#*\s*(fqdn\s*).*/\1$newHostname/" ${cfg_install_dir}/prodb.cfg serverFqdn=$newHostname if [[ -e /etc/hosts ]]; then @@ -351,7 +351,7 @@ function set_ipAsFqdn() { echo -e -n "${GREEN}Unable to get local ip, use 127.0.0.1${NC}" localFqdn="127.0.0.1" # Write the local FQDN to configuration file - ${csudo} sed -i -r "s/#*\s*(fqdn\s*).*/\1$localFqdn/" ${cfg_install_dir}/taos.cfg + ${csudo} sed -i -r "s/#*\s*(fqdn\s*).*/\1$localFqdn/" ${cfg_install_dir}/prodb.cfg serverFqdn=$localFqdn echo return @@ -373,7 +373,7 @@ function set_ipAsFqdn() { read -p "Please choose an IP from local IP list:" localFqdn else # Write the local FQDN to configuration file - ${csudo} sed -i -r "s/#*\s*(fqdn\s*).*/\1$localFqdn/" ${cfg_install_dir}/taos.cfg + ${csudo} sed -i -r "s/#*\s*(fqdn\s*).*/\1$localFqdn/" ${cfg_install_dir}/prodb.cfg serverFqdn=$localFqdn break fi @@ -420,14 +420,14 @@ function local_fqdn_check() { } function install_config() { - if [ ! -f ${cfg_install_dir}/taos.cfg ]; then + if [ ! -f ${cfg_install_dir}/prodb.cfg ]; then ${csudo} mkdir -p ${cfg_install_dir} - [ -f ${script_dir}/cfg/taos.cfg ] && ${csudo} cp ${script_dir}/cfg/taos.cfg ${cfg_install_dir} + [ -f ${script_dir}/cfg/prodb.cfg ] && ${csudo} cp ${script_dir}/cfg/prodb.cfg ${cfg_install_dir} ${csudo} chmod 644 ${cfg_install_dir}/* fi - ${csudo} cp -f ${script_dir}/cfg/taos.cfg ${install_main_dir}/cfg/taos.cfg.org - ${csudo} ln -s ${cfg_install_dir}/taos.cfg ${install_main_dir}/cfg + ${csudo} cp -f ${script_dir}/cfg/prodb.cfg ${install_main_dir}/cfg/prodb.cfg.org + ${csudo} ln -s ${cfg_install_dir}/prodb.cfg ${install_main_dir}/cfg [ ! -z $1 ] && return 0 || : # only install client @@ -457,7 +457,7 @@ function install_config() { # check the format of the firstEp #if [[ $firstEp == $FQDN_PATTERN ]]; then # Write the first FQDN to configuration file - ${csudo} sed -i -r "s/#*\s*(firstEp\s*).*/\1$firstEp/" ${cfg_install_dir}/taos.cfg + ${csudo} sed -i -r "s/#*\s*(firstEp\s*).*/\1$firstEp/" ${cfg_install_dir}/prodb.cfg break #else # read -p "Please enter the correct FQDN:port: " firstEp @@ -805,7 +805,7 @@ function update_prodb() { #echo #echo -e "\033[44;32;1mProDB is updated successfully!${NC}" echo - echo -e "${GREEN_DARK}To configure ProDB ${NC}: edit /etc/ProDB/taos.cfg" + echo -e "${GREEN_DARK}To configure ProDB ${NC}: edit /etc/ProDB/prodb.cfg" if ((${service_mod}==0)); then echo -e "${GREEN_DARK}To start ProDB ${NC}: ${csudo} systemctl start prodbs${NC}" elif ((${service_mod}==1)); then @@ -882,7 +882,7 @@ function install_prodb() { #echo #echo -e "\033[44;32;1mProDB is installed successfully!${NC}" echo - echo -e "${GREEN_DARK}To configure ProDB ${NC}: edit /etc/ProDB/taos.cfg" + echo -e "${GREEN_DARK}To configure ProDB ${NC}: edit /etc/ProDB/prodb.cfg" if ((${service_mod}==0)); then echo -e "${GREEN_DARK}To start ProDB ${NC}: ${csudo} systemctl start prodbs${NC}" elif ((${service_mod}==1)); then diff --git a/packaging/tools/install_tq.sh b/packaging/tools/install_tq.sh index ef5fb8c05a4a98a55918ee217125bd0f0a09b955..f5f3f9ba27ddcf7eaccb5c6a03317cc40af0d673 100755 --- a/packaging/tools/install_tq.sh +++ b/packaging/tools/install_tq.sh @@ -210,13 +210,6 @@ function install_lib() { ${csudo} ln -s ${lib64_link_dir}/libtaos.so.1 ${lib64_link_dir}/libtaos.so || : fi - #if [ "$verMode" == "cluster" ]; then - # # Compatible with version 1.5 - # ${csudo} mkdir -p ${v15_java_app_dir} - # ${csudo} ln -s ${install_main_dir}/connector/taos-jdbcdriver-1.0.2-dist.jar ${v15_java_app_dir}/JDBCDriver-1.0.2-dist.jar - # ${csudo} chmod 777 ${v15_java_app_dir} || : - #fi - ${csudo} ldconfig } @@ -328,7 +321,7 @@ function set_hostname() { ${csudo} sed -i -r "s/#*\s*(HOSTNAME=\s*).*/\1$newHostname/" /etc/sysconfig/network ||: fi - ${csudo} sed -i -r "s/#*\s*(fqdn\s*).*/\1$newHostname/" ${cfg_install_dir}/taos.cfg + ${csudo} sed -i -r "s/#*\s*(fqdn\s*).*/\1$newHostname/" ${cfg_install_dir}/tq.cfg serverFqdn=$newHostname if [[ -e /etc/hosts ]]; then @@ -363,7 +356,7 @@ function set_ipAsFqdn() { echo -e -n "${GREEN}Unable to get local ip, use 127.0.0.1${NC}" localFqdn="127.0.0.1" # Write the local FQDN to configuration file - ${csudo} sed -i -r "s/#*\s*(fqdn\s*).*/\1$localFqdn/" ${cfg_install_dir}/taos.cfg + ${csudo} sed -i -r "s/#*\s*(fqdn\s*).*/\1$localFqdn/" ${cfg_install_dir}/tq.cfg serverFqdn=$localFqdn echo return @@ -385,7 +378,7 @@ function set_ipAsFqdn() { read -p "Please choose an IP from local IP list:" localFqdn else # Write the local FQDN to configuration file - ${csudo} sed -i -r "s/#*\s*(fqdn\s*).*/\1$localFqdn/" ${cfg_install_dir}/taos.cfg + ${csudo} sed -i -r "s/#*\s*(fqdn\s*).*/\1$localFqdn/" ${cfg_install_dir}/tq.cfg serverFqdn=$localFqdn break fi @@ -432,16 +425,14 @@ function local_fqdn_check() { } function install_config() { - #${csudo} rm -f ${install_main_dir}/cfg/taos.cfg || : - - if [ ! -f ${cfg_install_dir}/taos.cfg ]; then + if [ ! -f ${cfg_install_dir}/tq.cfg ]; then ${csudo} mkdir -p ${cfg_install_dir} - [ -f ${script_dir}/cfg/taos.cfg ] && ${csudo} cp ${script_dir}/cfg/taos.cfg ${cfg_install_dir} + [ -f ${script_dir}/cfg/tq.cfg ] && ${csudo} cp ${script_dir}/cfg/tq.cfg ${cfg_install_dir} ${csudo} chmod 644 ${cfg_install_dir}/* fi - ${csudo} cp -f ${script_dir}/cfg/taos.cfg ${install_main_dir}/cfg/taos.cfg.org - ${csudo} ln -s ${cfg_install_dir}/taos.cfg ${install_main_dir}/cfg + ${csudo} cp -f ${script_dir}/cfg/tq.cfg ${install_main_dir}/cfg/tq.cfg.org + ${csudo} ln -s ${cfg_install_dir}/tq.cfg ${install_main_dir}/cfg [ ! -z $1 ] && return 0 || : # only install client @@ -471,7 +462,7 @@ function install_config() { # check the format of the firstEp #if [[ $firstEp == $FQDN_PATTERN ]]; then # Write the first FQDN to configuration file - ${csudo} sed -i -r "s/#*\s*(firstEp\s*).*/\1$firstEp/" ${cfg_install_dir}/taos.cfg + ${csudo} sed -i -r "s/#*\s*(firstEp\s*).*/\1$firstEp/" ${cfg_install_dir}/tq.cfg break #else # read -p "Please enter the correct FQDN:port: " firstEp @@ -607,7 +598,7 @@ function clean_service_on_systemd() { if [ "$verMode" == "cluster" ]; then nginx_service_config="${service_config_dir}/nginxd.service" if systemctl is-active --quiet nginxd; then - echo "Nginx for TDengine is running, stopping it..." + echo "Nginx for TQ is running, stopping it..." ${csudo} systemctl stop nginxd &> /dev/null || echo &> /dev/null fi ${csudo} systemctl disable nginxd &> /dev/null || echo &> /dev/null @@ -646,7 +637,7 @@ function install_service_on_systemd() { tarbitratord_service_config="${service_config_dir}/tarbitratord.service" ${csudo} bash -c "echo '[Unit]' >> ${tarbitratord_service_config}" - ${csudo} bash -c "echo 'Description=TDengine arbitrator service' >> ${tarbitratord_service_config}" + ${csudo} bash -c "echo 'Description=TQ arbitrator service' >> ${tarbitratord_service_config}" ${csudo} bash -c "echo 'After=network-online.target' >> ${tarbitratord_service_config}" ${csudo} bash -c "echo 'Wants=network-online.target' >> ${tarbitratord_service_config}" ${csudo} bash -c "echo >> ${tarbitratord_service_config}" @@ -828,7 +819,7 @@ function update_tq() { #echo #echo -e "\033[44;32;1mTQ is updated successfully!${NC}" echo - echo -e "${GREEN_DARK}To configure TQ ${NC}: edit /etc/tq/taos.cfg" + echo -e "${GREEN_DARK}To configure TQ ${NC}: edit /etc/tq/tq.cfg" if ((${service_mod}==0)); then echo -e "${GREEN_DARK}To start TQ ${NC}: ${csudo} systemctl start tqd${NC}" elif ((${service_mod}==1)); then @@ -905,7 +896,7 @@ function install_tq() { #echo #echo -e "\033[44;32;1mTQ is installed successfully!${NC}" echo - echo -e "${GREEN_DARK}To configure TQ ${NC}: edit /etc/tq/taos.cfg" + echo -e "${GREEN_DARK}To configure TQ ${NC}: edit /etc/tq/tq.cfg" if ((${service_mod}==0)); then echo -e "${GREEN_DARK}To start TQ ${NC}: ${csudo} systemctl start tqd${NC}" elif ((${service_mod}==1)); then diff --git a/packaging/tools/make_install.sh b/packaging/tools/make_install.sh index 093b2bb0a7ea8033b7509e231200b8b4ad6901be..297f3d512224d27dc135fecdf76e1db45972dcd8 100755 --- a/packaging/tools/make_install.sh +++ b/packaging/tools/make_install.sh @@ -345,9 +345,7 @@ function install_header() { } function install_config() { - #${csudo} rm -f ${install_main_dir}/cfg/taos.cfg || : - - if [ ! -f "${cfg_install_dir}/taos.cfg" ]; then + if [ ! -f ${cfg_install_dir}/taos.cfg ]; then ${csudo} mkdir -p ${cfg_install_dir} [ -f ${script_dir}/../cfg/taos.cfg ] && ${csudo} cp ${script_dir}/../cfg/taos.cfg ${cfg_install_dir} diff --git a/packaging/tools/makearbi_jh.sh b/packaging/tools/makearbi_jh.sh new file mode 100755 index 0000000000000000000000000000000000000000..5457b163599421d0a5917156efde1c8814a6f514 --- /dev/null +++ b/packaging/tools/makearbi_jh.sh @@ -0,0 +1,73 @@ +#!/bin/bash +# +# Generate arbitrator's tar.gz setup package for all os system + +set -e +#set -x + +curr_dir=$(pwd) +compile_dir=$1 +version=$2 +build_time=$3 +cpuType=$4 +osType=$5 +verMode=$6 +verType=$7 +pagMode=$8 + +script_dir="$(dirname $(readlink -f $0))" +top_dir="$(readlink -f ${script_dir}/../..)" + +# create compressed install file. +build_dir="${compile_dir}/build" +code_dir="${top_dir}/src" +release_dir="${top_dir}/release" + +#package_name='linux' +if [ "$verMode" == "cluster" ]; then + install_dir="${release_dir}/jh_iot-enterprise-arbitrator-${version}" +else + install_dir="${release_dir}/jh_iot-arbitrator-${version}" +fi + +# Directories and files. +bin_files="${build_dir}/bin/tarbitrator ${script_dir}/remove_arbi_jh.sh" +install_files="${script_dir}/install_arbi_jh.sh" + +init_file_tarbitrator_deb=${script_dir}/../deb/tarbitratord +init_file_tarbitrator_rpm=${script_dir}/../rpm/tarbitratord + +# make directories. +mkdir -p ${install_dir} && cp ${install_files} ${install_dir} && chmod a+x ${install_dir}/install_arbi_jh.sh || : +mkdir -p ${install_dir}/bin && cp ${bin_files} ${install_dir}/bin && chmod a+x ${install_dir}/bin/* || : +mkdir -p ${install_dir}/init.d && cp ${init_file_tarbitrator_deb} ${install_dir}/init.d/tarbitratord.deb || : +mkdir -p ${install_dir}/init.d && cp ${init_file_tarbitrator_rpm} ${install_dir}/init.d/tarbitratord.rpm || : + +cd ${release_dir} + +if [ "$verMode" == "cluster" ]; then + pkg_name=${install_dir}-${osType}-${cpuType} +elif [ "$verMode" == "edge" ]; then + pkg_name=${install_dir}-${osType}-${cpuType} +else + echo "unknow verMode, nor cluster or edge" + exit 1 +fi + +if [ "$verType" == "beta" ]; then + pkg_name=${pkg_name}-${verType} +elif [ "$verType" == "stable" ]; then + pkg_name=${pkg_name} +else + echo "unknow verType, nor stabel or beta" + exit 1 +fi + +tar -zcv -f "$(basename ${pkg_name}).tar.gz" $(basename ${install_dir}) --remove-files || : +exitcode=$? +if [ "$exitcode" != "0" ]; then + echo "tar ${pkg_name}.tar.gz error !!!" + exit $exitcode +fi + +cd ${curr_dir} diff --git a/packaging/tools/makearbi_kh.sh b/packaging/tools/makearbi_kh.sh new file mode 100755 index 0000000000000000000000000000000000000000..c7fa40eb4f1fc4003e6a584bdc5c4534616754d6 --- /dev/null +++ b/packaging/tools/makearbi_kh.sh @@ -0,0 +1,74 @@ +#!/bin/bash +# +# Generate arbitrator's tar.gz setup package for all os system + +set -e +#set -x + +curr_dir=$(pwd) +compile_dir=$1 +version=$2 +build_time=$3 +cpuType=$4 +osType=$5 +verMode=$6 +verType=$7 +pagMode=$8 + +script_dir="$(dirname $(readlink -f $0))" +top_dir="$(readlink -f ${script_dir}/../..)" + +# create compressed install file. +build_dir="${compile_dir}/build" +code_dir="${top_dir}/src" +release_dir="${top_dir}/release" + +#package_name='linux' +if [ "$verMode" == "cluster" ]; then + install_dir="${release_dir}/KingHistorian-enterprise-arbitrator-${version}" +else + install_dir="${release_dir}/KingHistorian-arbitrator-${version}" +fi + +# Directories and files. +bin_files="${build_dir}/bin/tarbitrator ${script_dir}/remove_arbi_kh.sh" +install_files="${script_dir}/install_arbi_kh.sh" + +init_file_tarbitrator_deb=${script_dir}/../deb/tarbitratord +init_file_tarbitrator_rpm=${script_dir}/../rpm/tarbitratord + +# make directories. +mkdir -p ${install_dir} && cp ${install_files} ${install_dir} && chmod a+x ${install_dir}/install_arbi_kh.sh || : +#mkdir -p ${install_dir}/inc && cp ${header_files} ${install_dir}/inc || : +mkdir -p ${install_dir}/bin && cp ${bin_files} ${install_dir}/bin && chmod a+x ${install_dir}/bin/* || : +mkdir -p ${install_dir}/init.d && cp ${init_file_tarbitrator_deb} ${install_dir}/init.d/tarbitratord.deb || : +mkdir -p ${install_dir}/init.d && cp ${init_file_tarbitrator_rpm} ${install_dir}/init.d/tarbitratord.rpm || : + +cd ${release_dir} + +if [ "$verMode" == "cluster" ]; then + pkg_name=${install_dir}-${osType}-${cpuType} +elif [ "$verMode" == "edge" ]; then + pkg_name=${install_dir}-${osType}-${cpuType} +else + echo "unknow verMode, nor cluster or edge" + exit 1 +fi + +if [ "$verType" == "beta" ]; then + pkg_name=${pkg_name}-${verType} +elif [ "$verType" == "stable" ]; then + pkg_name=${pkg_name} +else + echo "unknow verType, nor stabel or beta" + exit 1 +fi + +tar -zcv -f "$(basename ${pkg_name}).tar.gz" $(basename ${install_dir}) --remove-files || : +exitcode=$? +if [ "$exitcode" != "0" ]; then + echo "tar ${pkg_name}.tar.gz error !!!" + exit $exitcode +fi + +cd ${curr_dir} diff --git a/packaging/tools/makearbi_power.sh b/packaging/tools/makearbi_power.sh index fd50ecd43878de08e7bb94249da8cb64c3630e6e..f4d14809f70225406adae49f30f1dcbe4818f014 100755 --- a/packaging/tools/makearbi_power.sh +++ b/packaging/tools/makearbi_power.sh @@ -34,7 +34,6 @@ fi bin_files="${build_dir}/bin/tarbitrator ${script_dir}/remove_arbi_power.sh" install_files="${script_dir}/install_arbi_power.sh" -#header_files="${code_dir}/inc/taos.h ${code_dir}/inc/taoserror.h" init_file_tarbitrator_deb=${script_dir}/../deb/tarbitratord init_file_tarbitrator_rpm=${script_dir}/../rpm/tarbitratord diff --git a/packaging/tools/makearbi_pro.sh b/packaging/tools/makearbi_pro.sh index 6ce3765e44acc408ced9730c54b793338eb37b38..5b5cd5447c2630f0e6bd9ba53d50856761049bc6 100755 --- a/packaging/tools/makearbi_pro.sh +++ b/packaging/tools/makearbi_pro.sh @@ -34,7 +34,6 @@ fi bin_files="${build_dir}/bin/tarbitrator ${script_dir}/remove_arbi_pro.sh" install_files="${script_dir}/install_arbi_pro.sh" -#header_files="${code_dir}/inc/taos.h ${code_dir}/inc/taoserror.h" init_file_tarbitrator_deb=${script_dir}/../deb/tarbitratord init_file_tarbitrator_rpm=${script_dir}/../rpm/tarbitratord diff --git a/packaging/tools/makearbi_tq.sh b/packaging/tools/makearbi_tq.sh index c10dfec255d411965a3887942e5d2aded4635979..fb5b42a062b1bc34f4104e314de2adeca462d8b9 100755 --- a/packaging/tools/makearbi_tq.sh +++ b/packaging/tools/makearbi_tq.sh @@ -34,7 +34,6 @@ fi bin_files="${build_dir}/bin/tarbitrator ${script_dir}/remove_arbi_tq.sh" install_files="${script_dir}/install_arbi_tq.sh" -#header_files="${code_dir}/inc/taos.h ${code_dir}/inc/taoserror.h" init_file_tarbitrator_deb=${script_dir}/../deb/tarbitratord init_file_tarbitrator_rpm=${script_dir}/../rpm/tarbitratord diff --git a/packaging/tools/makeclient_jh.sh b/packaging/tools/makeclient_jh.sh new file mode 100755 index 0000000000000000000000000000000000000000..267d78b953be33c567f8175a369b13f326ee3f5a --- /dev/null +++ b/packaging/tools/makeclient_jh.sh @@ -0,0 +1,180 @@ +#!/bin/bash +# +# Generate tar.gz package for linux client in all os system +set -e +#set -x + +curr_dir=$(pwd) +compile_dir=$1 +version=$2 +build_time=$3 +cpuType=$4 +osType=$5 +verMode=$6 +verType=$7 +pagMode=$8 + +if [ "$osType" != "Darwin" ]; then + script_dir="$(dirname $(readlink -f $0))" + top_dir="$(readlink -f ${script_dir}/../..)" +else + script_dir=`dirname $0` + cd ${script_dir} + script_dir="$(pwd)" + top_dir=${script_dir}/../.. +fi + +# create compressed install file. +build_dir="${compile_dir}/build" +code_dir="${top_dir}/src" +release_dir="${top_dir}/release" + +#package_name='linux' + +if [ "$verMode" == "cluster" ]; then + install_dir="${release_dir}/jh_iot-enterprise-client-${version}" +else + install_dir="${release_dir}/jh_iot-client-${version}" +fi + +# Directories and files. + +if [ "$osType" != "Darwin" ]; then + lib_files="${build_dir}/lib/libtaos.so.${version}" +else + bin_files="${build_dir}/bin/taos ${script_dir}/remove_client_jh.sh" + lib_files="${build_dir}/lib/libtaos.${version}.dylib" +fi + +header_files="${code_dir}/inc/taos.h ${code_dir}/inc/taoserror.h" +if [ "$verMode" == "cluster" ]; then + cfg_dir="${top_dir}/../enterprise/packaging/cfg" +else + cfg_dir="${top_dir}/packaging/cfg" +fi + +install_files="${script_dir}/install_client_jh.sh" + +# make directories. +mkdir -p ${install_dir} +mkdir -p ${install_dir}/inc && cp ${header_files} ${install_dir}/inc +mkdir -p ${install_dir}/cfg && cp ${cfg_dir}/taos.cfg ${install_dir}/cfg/taos.cfg + +sed -i '/dataDir/ {s/taos/jh_taos/g}' ${install_dir}/cfg/taos.cfg +sed -i '/logDir/ {s/taos/jh_taos/g}' ${install_dir}/cfg/taos.cfg +sed -i "s/TDengine/jh_iot/g" ${install_dir}/cfg/taos.cfg + +mkdir -p ${install_dir}/bin +if [ "$osType" != "Darwin" ]; then + if [ "$pagMode" == "lite" ]; then + strip ${build_dir}/bin/taos + cp ${build_dir}/bin/taos ${install_dir}/bin/jh_taos + cp ${script_dir}/remove_jh.sh ${install_dir}/bin + else + cp ${build_dir}/bin/taos ${install_dir}/bin/jh_taos + cp ${script_dir}/remove_jh.sh ${install_dir}/bin + cp ${build_dir}/bin/taosdemo ${install_dir}/bin/jhdemo + cp ${build_dir}/bin/taosdump ${install_dir}/bin/jh_taosdump + cp ${script_dir}/set_core.sh ${install_dir}/bin + cp ${script_dir}/get_client.sh ${install_dir}/bin + cp ${script_dir}/taosd-dump-cfg.gdb ${install_dir}/bin + fi +else + cp ${bin_files} ${install_dir}/bin +fi +chmod a+x ${install_dir}/bin/* || : + +if [ -f ${build_dir}/bin/jemalloc-config ]; then + mkdir -p ${install_dir}/jemalloc/{bin,lib,lib/pkgconfig,include/jemalloc,share/doc/jemalloc,share/man/man3} + cp ${build_dir}/bin/jemalloc-config ${install_dir}/jemalloc/bin + if [ -f ${build_dir}/bin/jemalloc.sh ]; then + cp ${build_dir}/bin/jemalloc.sh ${install_dir}/jemalloc/bin + fi + if [ -f ${build_dir}/bin/jeprof ]; then + cp ${build_dir}/bin/jeprof ${install_dir}/jemalloc/bin + fi + if [ -f ${build_dir}/include/jemalloc/jemalloc.h ]; then + cp ${build_dir}/include/jemalloc/jemalloc.h ${install_dir}/jemalloc/include/jemalloc + fi + if [ -f ${build_dir}/lib/libjemalloc.so.2 ]; then + cp ${build_dir}/lib/libjemalloc.so.2 ${install_dir}/jemalloc/lib + ln -sf libjemalloc.so.2 ${install_dir}/jemalloc/lib/libjemalloc.so + fi + if [ -f ${build_dir}/lib/libjemalloc.a ]; then + cp ${build_dir}/lib/libjemalloc.a ${install_dir}/jemalloc/lib + fi + if [ -f ${build_dir}/lib/libjemalloc_pic.a ]; then + cp ${build_dir}/lib/libjemalloc_pic.a ${install_dir}/jemalloc/lib + fi + if [ -f ${build_dir}/lib/pkgconfig/jemalloc.pc ]; then + cp ${build_dir}/lib/pkgconfig/jemalloc.pc ${install_dir}/jemalloc/lib/pkgconfig + fi + if [ -f ${build_dir}/share/doc/jemalloc/jemalloc.html ]; then + cp ${build_dir}/share/doc/jemalloc/jemalloc.html ${install_dir}/jemalloc/share/doc/jemalloc + fi + if [ -f ${build_dir}/share/man/man3/jemalloc.3 ]; then + cp ${build_dir}/share/man/man3/jemalloc.3 ${install_dir}/jemalloc/share/man/man3 + fi +fi + +cd ${install_dir} + +if [ "$osType" != "Darwin" ]; then + tar -zcv -f jh_taos.tar.gz * --remove-files || : +else + tar -zcv -f jh_taos.tar.gz * || : + mv jh_taos.tar.gz .. + rm -rf ./* + mv ../jh_taos.tar.gz . +fi + +cd ${curr_dir} +cp ${install_files} ${install_dir} +if [ "$osType" == "Darwin" ]; then + sed 's/osType=Linux/osType=Darwin/g' ${install_dir}/install_client_jh.sh >> install_client_jh_temp.sh + mv install_client_jh_temp.sh ${install_dir}/install_client_jh.sh +fi +if [ "$pagMode" == "lite" ]; then + sed 's/pagMode=full/pagMode=lite/g' ${install_dir}/install_client_jh.sh >> install_client_jh_temp.sh + mv install_client_jh_temp.sh ${install_dir}/install_client_jh.sh +fi +chmod a+x ${install_dir}/install_client_jh.sh + +# Copy driver +mkdir -p ${install_dir}/driver +cp ${lib_files} ${install_dir}/driver + +cd ${release_dir} + +if [ "$verMode" == "cluster" ]; then + pkg_name=${install_dir}-${osType}-${cpuType} +elif [ "$verMode" == "edge" ]; then + pkg_name=${install_dir}-${osType}-${cpuType} +else + echo "unknow verMode, nor cluster or edge" + exit 1 +fi + +if [ "$pagMode" == "lite" ]; then + pkg_name=${pkg_name}-Lite +fi + +if [ "$verType" == "beta" ]; then + pkg_name=${pkg_name}-${verType} +elif [ "$verType" == "stable" ]; then + pkg_name=${pkg_name} +else + echo "unknow verType, nor stable or beta" + exit 1 +fi + +if [ "$osType" != "Darwin" ]; then + tar -zcv -f "$(basename ${pkg_name}).tar.gz" $(basename ${install_dir}) --remove-files || : +else + tar -zcv -f "$(basename ${pkg_name}).tar.gz" $(basename ${install_dir}) || : + mv "$(basename ${pkg_name}).tar.gz" .. + rm -rf ./* + mv ../"$(basename ${pkg_name}).tar.gz" . +fi + +cd ${curr_dir} diff --git a/packaging/tools/makeclient_kh.sh b/packaging/tools/makeclient_kh.sh new file mode 100755 index 0000000000000000000000000000000000000000..b991e2a6fae69a72d678cf5ff8751429a9f88fc6 --- /dev/null +++ b/packaging/tools/makeclient_kh.sh @@ -0,0 +1,180 @@ +#!/bin/bash +# +# Generate tar.gz package for linux client in all os system +set -e +#set -x + +curr_dir=$(pwd) +compile_dir=$1 +version=$2 +build_time=$3 +cpuType=$4 +osType=$5 +verMode=$6 +verType=$7 +pagMode=$8 + +if [ "$osType" != "Darwin" ]; then + script_dir="$(dirname $(readlink -f $0))" + top_dir="$(readlink -f ${script_dir}/../..)" +else + script_dir=`dirname $0` + cd ${script_dir} + script_dir="$(pwd)" + top_dir=${script_dir}/../.. +fi + +# create compressed install file. +build_dir="${compile_dir}/build" +code_dir="${top_dir}/src" +release_dir="${top_dir}/release" + +#package_name='linux' + +if [ "$verMode" == "cluster" ]; then + install_dir="${release_dir}/KingHistorian-enterprise-client-${version}" +else + install_dir="${release_dir}/KingHistorian-client-${version}" +fi + +# Directories and files. + +if [ "$osType" != "Darwin" ]; then + lib_files="${build_dir}/lib/libtaos.so.${version}" +else + bin_files="${build_dir}/bin/taos ${script_dir}/remove_client_kh.sh" + lib_files="${build_dir}/lib/libtaos.${version}.dylib" +fi + +header_files="${code_dir}/inc/taos.h ${code_dir}/inc/taoserror.h" +if [ "$verMode" == "cluster" ]; then + cfg_dir="${top_dir}/../enterprise/packaging/cfg" +else + cfg_dir="${top_dir}/packaging/cfg" +fi + +install_files="${script_dir}/install_client_kh.sh" + +# make directories. +mkdir -p ${install_dir} +mkdir -p ${install_dir}/inc && cp ${header_files} ${install_dir}/inc +mkdir -p ${install_dir}/cfg && cp ${cfg_dir}/taos.cfg ${install_dir}/cfg/kinghistorian.cfg + +sed -i '/dataDir/ {s/taos/kinghistorian/g}' ${install_dir}/cfg/kinghistorian.cfg +sed -i '/logDir/ {s/taos/kinghistorian/g}' ${install_dir}/cfg/kinghistorian.cfg +sed -i "s/TDengine/KingHistorian/g" ${install_dir}/cfg/kinghistorian.cfg + +mkdir -p ${install_dir}/bin +if [ "$osType" != "Darwin" ]; then + if [ "$pagMode" == "lite" ]; then + strip ${build_dir}/bin/taos + cp ${build_dir}/bin/taos ${install_dir}/bin/khclient + cp ${script_dir}/remove_kh.sh ${install_dir}/bin + else + cp ${build_dir}/bin/taos ${install_dir}/bin/khclient + cp ${script_dir}/remove_kh.sh ${install_dir}/bin + cp ${build_dir}/bin/taosdemo ${install_dir}/bin/khdemo + cp ${build_dir}/bin/taosdump ${install_dir}/bin/khdump + cp ${script_dir}/set_core.sh ${install_dir}/bin + cp ${script_dir}/get_client.sh ${install_dir}/bin + cp ${script_dir}/taosd-dump-cfg.gdb ${install_dir}/bin + fi +else + cp ${bin_files} ${install_dir}/bin +fi +chmod a+x ${install_dir}/bin/* || : + +if [ -f ${build_dir}/bin/jemalloc-config ]; then + mkdir -p ${install_dir}/jemalloc/{bin,lib,lib/pkgconfig,include/jemalloc,share/doc/jemalloc,share/man/man3} + cp ${build_dir}/bin/jemalloc-config ${install_dir}/jemalloc/bin + if [ -f ${build_dir}/bin/jemalloc.sh ]; then + cp ${build_dir}/bin/jemalloc.sh ${install_dir}/jemalloc/bin + fi + if [ -f ${build_dir}/bin/jeprof ]; then + cp ${build_dir}/bin/jeprof ${install_dir}/jemalloc/bin + fi + if [ -f ${build_dir}/include/jemalloc/jemalloc.h ]; then + cp ${build_dir}/include/jemalloc/jemalloc.h ${install_dir}/jemalloc/include/jemalloc + fi + if [ -f ${build_dir}/lib/libjemalloc.so.2 ]; then + cp ${build_dir}/lib/libjemalloc.so.2 ${install_dir}/jemalloc/lib + ln -sf libjemalloc.so.2 ${install_dir}/jemalloc/lib/libjemalloc.so + fi + if [ -f ${build_dir}/lib/libjemalloc.a ]; then + cp ${build_dir}/lib/libjemalloc.a ${install_dir}/jemalloc/lib + fi + if [ -f ${build_dir}/lib/libjemalloc_pic.a ]; then + cp ${build_dir}/lib/libjemalloc_pic.a ${install_dir}/jemalloc/lib + fi + if [ -f ${build_dir}/lib/pkgconfig/jemalloc.pc ]; then + cp ${build_dir}/lib/pkgconfig/jemalloc.pc ${install_dir}/jemalloc/lib/pkgconfig + fi + if [ -f ${build_dir}/share/doc/jemalloc/jemalloc.html ]; then + cp ${build_dir}/share/doc/jemalloc/jemalloc.html ${install_dir}/jemalloc/share/doc/jemalloc + fi + if [ -f ${build_dir}/share/man/man3/jemalloc.3 ]; then + cp ${build_dir}/share/man/man3/jemalloc.3 ${install_dir}/jemalloc/share/man/man3 + fi +fi + +cd ${install_dir} + +if [ "$osType" != "Darwin" ]; then + tar -zcv -f kinghistorian.tar.gz * --remove-files || : +else + tar -zcv -f kinghistorian.tar.gz * || : + mv kinghistorian.tar.gz .. + rm -rf ./* + mv ../kinghistorian.tar.gz . +fi + +cd ${curr_dir} +cp ${install_files} ${install_dir} +if [ "$osType" == "Darwin" ]; then + sed 's/osType=Linux/osType=Darwin/g' ${install_dir}/install_client_kh.sh >> install_client_kh_temp.sh + mv install_client_kh_temp.sh ${install_dir}/install_client_kh.sh +fi +if [ "$pagMode" == "lite" ]; then + sed 's/pagMode=full/pagMode=lite/g' ${install_dir}/install_client_kh.sh >> install_client_kh_temp.sh + mv install_client_kh_temp.sh ${install_dir}/install_client_kh.sh +fi +chmod a+x ${install_dir}/install_client_kh.sh + +# Copy driver +mkdir -p ${install_dir}/driver +cp ${lib_files} ${install_dir}/driver + +cd ${release_dir} + +if [ "$verMode" == "cluster" ]; then + pkg_name=${install_dir}-${osType}-${cpuType} +elif [ "$verMode" == "edge" ]; then + pkg_name=${install_dir}-${osType}-${cpuType} +else + echo "unknow verMode, nor cluster or edge" + exit 1 +fi + +if [ "$pagMode" == "lite" ]; then + pkg_name=${pkg_name}-Lite +fi + +if [ "$verType" == "beta" ]; then + pkg_name=${pkg_name}-${verType} +elif [ "$verType" == "stable" ]; then + pkg_name=${pkg_name} +else + echo "unknow verType, nor stable or beta" + exit 1 +fi + +if [ "$osType" != "Darwin" ]; then + tar -zcv -f "$(basename ${pkg_name}).tar.gz" $(basename ${install_dir}) --remove-files || : +else + tar -zcv -f "$(basename ${pkg_name}).tar.gz" $(basename ${install_dir}) || : + mv "$(basename ${pkg_name}).tar.gz" .. + rm -rf ./* + mv ../"$(basename ${pkg_name}).tar.gz" . +fi + +cd ${curr_dir} diff --git a/packaging/tools/makeclient_pro.sh b/packaging/tools/makeclient_pro.sh index 4a0b033d30e6478f37a62f9cc896aee0903d39c9..3aa37545545d9f9237376fc8f5883e618535b10a 100755 --- a/packaging/tools/makeclient_pro.sh +++ b/packaging/tools/makeclient_pro.sh @@ -58,11 +58,11 @@ install_files="${script_dir}/install_client_pro.sh" # make directories. mkdir -p ${install_dir} mkdir -p ${install_dir}/inc && cp ${header_files} ${install_dir}/inc -mkdir -p ${install_dir}/cfg && cp ${cfg_dir}/taos.cfg ${install_dir}/cfg/taos.cfg +mkdir -p ${install_dir}/cfg && cp ${cfg_dir}/taos.cfg ${install_dir}/cfg/prodb.cfg -sed -i '/dataDir/ {s/taos/ProDB/g}' ${install_dir}/cfg/taos.cfg -sed -i '/logDir/ {s/taos/ProDB/g}' ${install_dir}/cfg/taos.cfg -sed -i "s/TDengine/ProDB/g" ${install_dir}/cfg/taos.cfg +sed -i '/dataDir/ {s/taos/ProDB/g}' ${install_dir}/cfg/prodb.cfg +sed -i '/logDir/ {s/taos/ProDB/g}' ${install_dir}/cfg/prodb.cfg +sed -i "s/TDengine/ProDB/g" ${install_dir}/cfg/prodb.cfg mkdir -p ${install_dir}/bin if [ "$osType" != "Darwin" ]; then diff --git a/packaging/tools/makeclient_tq.sh b/packaging/tools/makeclient_tq.sh index 1cc7003661a7491b1df625916dd289de32434ee9..c6922d1c2357c99dc23b14212e652d32c7ac6368 100755 --- a/packaging/tools/makeclient_tq.sh +++ b/packaging/tools/makeclient_tq.sh @@ -40,13 +40,6 @@ fi # Directories and files. if [ "$osType" != "Darwin" ]; then -# if [ "$pagMode" == "lite" ]; then -# strip ${build_dir}/bin/tqd -# strip ${build_dir}/bin/tq -# bin_files="${build_dir}/bin/tq ${script_dir}/remove_client_tq.sh" -# else -# bin_files="${build_dir}/bin/tq ${build_dir}/bin/tqdemo ${script_dir}/remove_client_tq.sh ${script_dir}/set_core.sh" -# fi lib_files="${build_dir}/lib/libtaos.so.${version}" else bin_files="${build_dir}/bin/tq ${script_dir}/remove_client_tq.sh" @@ -65,11 +58,11 @@ install_files="${script_dir}/install_client_tq.sh" # make directories. mkdir -p ${install_dir} mkdir -p ${install_dir}/inc && cp ${header_files} ${install_dir}/inc -mkdir -p ${install_dir}/cfg && cp ${cfg_dir}/taos.cfg ${install_dir}/cfg/taos.cfg +mkdir -p ${install_dir}/cfg && cp ${cfg_dir}/taos.cfg ${install_dir}/cfg/tq.cfg -sed -i '/dataDir/ {s/taos/tq/g}' ${install_dir}/cfg/taos.cfg -sed -i '/logDir/ {s/taos/tq/g}' ${install_dir}/cfg/taos.cfg -sed -i "s/TDengine/TQ/g" ${install_dir}/cfg/taos.cfg +sed -i '/dataDir/ {s/taos/tq/g}' ${install_dir}/cfg/tq.cfg +sed -i '/logDir/ {s/taos/tq/g}' ${install_dir}/cfg/tq.cfg +sed -i "s/TDengine/TQ/g" ${install_dir}/cfg/tq.cfg mkdir -p ${install_dir}/bin if [ "$osType" != "Darwin" ]; then diff --git a/packaging/tools/makepkg_jh.sh b/packaging/tools/makepkg_jh.sh new file mode 100755 index 0000000000000000000000000000000000000000..d7b7746849578b0e2774ddf442566196102ea561 --- /dev/null +++ b/packaging/tools/makepkg_jh.sh @@ -0,0 +1,160 @@ +#!/bin/bash +# +# Generate tar.gz package for all os system + +set -e +#set -x + +curr_dir=$(pwd) +compile_dir=$1 +version=$2 +build_time=$3 +cpuType=$4 +osType=$5 +verMode=$6 +verType=$7 +pagMode=$8 +versionComp=$9 + +script_dir="$(dirname $(readlink -f $0))" +top_dir="$(readlink -f ${script_dir}/../..)" + +# create compressed install file. +build_dir="${compile_dir}/build" +code_dir="${top_dir}/src" +release_dir="${top_dir}/release" + +# package_name='linux' +if [ "$verMode" == "cluster" ]; then + install_dir="${release_dir}/jh_iot-enterprise-server-${version}" +else + install_dir="${release_dir}/jh_iot-server-${version}" +fi + +lib_files="${build_dir}/lib/libtaos.so.${version}" +header_files="${code_dir}/inc/taos.h ${code_dir}/inc/taoserror.h" +if [ "$verMode" == "cluster" ]; then + cfg_dir="${top_dir}/../enterprise/packaging/cfg" +else + cfg_dir="${top_dir}/packaging/cfg" +fi +install_files="${script_dir}/install_jh.sh" +nginx_dir="${code_dir}/../../enterprise/src/plugins/web" + +# make directories. +mkdir -p ${install_dir} +mkdir -p ${install_dir}/inc && cp ${header_files} ${install_dir}/inc +mkdir -p ${install_dir}/cfg && cp ${cfg_dir}/taos.cfg ${install_dir}/cfg/taos.cfg +mkdir -p ${install_dir}/bin + +# bin +if [ "$pagMode" == "lite" ]; then + strip ${build_dir}/bin/taosd + strip ${build_dir}/bin/taos +else + cp ${build_dir}/bin/taosdemo ${install_dir}/bin/jhdemo + cp ${build_dir}/bin/taosdump ${install_dir}/bin/jh_taosdump + cp ${build_dir}/bin/tarbitrator ${install_dir}/bin + cp ${script_dir}/set_core.sh ${install_dir}/bin + cp ${script_dir}/get_client.sh ${install_dir}/bin + cp ${script_dir}/startPre.sh ${install_dir}/bin + cp ${script_dir}/taosd-dump-cfg.gdb ${install_dir}/bin +fi +cp ${build_dir}/bin/taos ${install_dir}/bin/jh_taos +cp ${build_dir}/bin/taosd ${install_dir}/bin/jh_taosd +cp ${script_dir}/remove_jh.sh ${install_dir}/bin +chmod a+x ${install_dir}/bin/* || : + +# cluster +if [ "$verMode" == "cluster" ]; then + sed 's/verMode=edge/verMode=cluster/g' ${install_dir}/bin/remove_jh.sh >> remove_jh_temp.sh + mv remove_jh_temp.sh ${install_dir}/bin/remove_jh.sh + + mkdir -p ${install_dir}/nginxd && cp -r ${nginx_dir}/* ${install_dir}/nginxd + cp ${nginx_dir}/png/taos.png ${install_dir}/nginxd/admin/images/taos.png + rm -rf ${install_dir}/nginxd/png + + # replace the OEM name + sed -i -e 's/www.taosdata.com/www.jhict.com/g' $(grep -r 'www.taosdata.com' ${install_dir}/nginxd | sed -r "s/(.*\.html):\s*(.*)/\1/g") + sed -i -e 's/2017/2021/g' $(grep -r '2017' ${install_dir}/nginxd | sed -r "s/(.*\.html):\s*(.*)/\1/g") + sed -i -e 's/TAOS Data/Jinheng Technology/g' $(grep -r 'TAOS Data' ${install_dir}/nginxd | sed -r "s/(.*\.html):\s*(.*)/\1/g") + sed -i -e 's/taosd/jh_taosd/g' `grep -r 'taosd' ${install_dir}/nginxd | grep -E '*\.js\s*.*' | sed -r -e 's/(.*\.js):\s*(.*)/\1/g' | sort | uniq` + sed -i -e 's/taosd<\/th>/jh_taosd<\/th>/g' ${install_dir}/nginxd/admin/monitor.html + sed -i -e "s/data:\['taosd', 'system'\],/data:\['jh_taosd', 'system'\],/g" ${install_dir}/nginxd/admin/monitor.html + sed -i -e "s/name: 'taosd',/name: 'jh_taosd',/g" ${install_dir}/nginxd/admin/monitor.html + sed -i "s/TDengine/jh_iot/g" ${install_dir}/nginxd/admin/*.html + sed -i "s/TDengine/jh_iot/g" ${install_dir}/nginxd/admin/js/*.js + + if [ "$cpuType" == "aarch64" ]; then + cp -f ${install_dir}/nginxd/sbin/arm/64bit/nginx ${install_dir}/nginxd/sbin/ + elif [ "$cpuType" == "aarch32" ]; then + cp -f ${install_dir}/nginxd/sbin/arm/32bit/nginx ${install_dir}/nginxd/sbin/ + fi + rm -rf ${install_dir}/nginxd/sbin/arm +fi + +sed -i '/dataDir/ {s/taos/jh_taos/g}' ${install_dir}/cfg/taos.cfg +sed -i '/logDir/ {s/taos/jh_taos/g}' ${install_dir}/cfg/taos.cfg +sed -i "s/TDengine/jh_iot/g" ${install_dir}/cfg/taos.cfg +sed -i "s/support@taosdata.com/jhkj@njsteel.com.cn/g" ${install_dir}/cfg/taos.cfg +sed -i "s/taos client/client/g" ${install_dir}/cfg/taos.cfg +sed -i "s/taosd/server/g" ${install_dir}/cfg/taos.cfg + +cd ${install_dir} +tar -zcv -f jh_taos.tar.gz * --remove-files || : +exitcode=$? +if [ "$exitcode" != "0" ]; then + echo "tar jh_taos.tar.gz error !!!" + exit $exitcode +fi + +cd ${curr_dir} +cp ${install_files} ${install_dir} +if [ "$verMode" == "cluster" ]; then + sed 's/verMode=edge/verMode=cluster/g' ${install_dir}/install_jh.sh >> install_jh_temp.sh + mv install_jh_temp.sh ${install_dir}/install_jh.sh +fi +if [ "$pagMode" == "lite" ]; then + sed -e "s/pagMode=full/pagMode=lite/g" -e "s/taos_history/jh_taos_history/g" ${install_dir}/install.sh >> install_jh_temp.sh + mv install_jh_temp.sh ${install_dir}/install_jh.sh +fi + +sed -i "/install_connector$/d" ${install_dir}/install_jh.sh +sed -i "/install_examples$/d" ${install_dir}/install_jh.sh +chmod a+x ${install_dir}/install_jh.sh + +# Copy driver +mkdir -p ${install_dir}/driver && cp ${lib_files} ${install_dir}/driver && echo "${versionComp}" > ${install_dir}/driver/vercomp.txt + +cd ${release_dir} + +if [ "$verMode" == "cluster" ]; then + pkg_name=${install_dir}-${osType}-${cpuType} +elif [ "$verMode" == "edge" ]; then + pkg_name=${install_dir}-${osType}-${cpuType} +else + echo "unknow verMode, nor cluster or edge" + exit 1 +fi + +if [ "$pagMode" == "lite" ]; then + pkg_name=${pkg_name}-Lite +fi + +if [ "$verType" == "beta" ]; then + pkg_name=${pkg_name}-${verType} +elif [ "$verType" == "stable" ]; then + pkg_name=${pkg_name} +else + echo "unknow verType, nor stabel or beta" + exit 1 +fi + +tar -zcv -f "$(basename ${pkg_name}).tar.gz" $(basename ${install_dir}) --remove-files || : +exitcode=$? +if [ "$exitcode" != "0" ]; then + echo "tar ${pkg_name}.tar.gz error !!!" + exit $exitcode +fi + +cd ${curr_dir} diff --git a/packaging/tools/makepkg_kh.sh b/packaging/tools/makepkg_kh.sh new file mode 100755 index 0000000000000000000000000000000000000000..80b7a06041c6c9dca926ecc0f87c8c8a7958c639 --- /dev/null +++ b/packaging/tools/makepkg_kh.sh @@ -0,0 +1,160 @@ +#!/bin/bash +# +# Generate tar.gz package for all os system + +set -e +#set -x + +curr_dir=$(pwd) +compile_dir=$1 +version=$2 +build_time=$3 +cpuType=$4 +osType=$5 +verMode=$6 +verType=$7 +pagMode=$8 +versionComp=$9 + +script_dir="$(dirname $(readlink -f $0))" +top_dir="$(readlink -f ${script_dir}/../..)" + +# create compressed install file. +build_dir="${compile_dir}/build" +code_dir="${top_dir}/src" +release_dir="${top_dir}/release" + +# package_name='linux' +if [ "$verMode" == "cluster" ]; then + install_dir="${release_dir}/KingHistorian-enterprise-server-${version}" +else + install_dir="${release_dir}/KingHistorian-server-${version}" +fi + +lib_files="${build_dir}/lib/libtaos.so.${version}" +header_files="${code_dir}/inc/taos.h ${code_dir}/inc/taoserror.h" +if [ "$verMode" == "cluster" ]; then + cfg_dir="${top_dir}/../enterprise/packaging/cfg" +else + cfg_dir="${top_dir}/packaging/cfg" +fi +install_files="${script_dir}/install_kh.sh" +nginx_dir="${code_dir}/../../enterprise/src/plugins/web" + +# make directories. +mkdir -p ${install_dir} +mkdir -p ${install_dir}/inc && cp ${header_files} ${install_dir}/inc +mkdir -p ${install_dir}/cfg && cp ${cfg_dir}/taos.cfg ${install_dir}/cfg/kinghistorian.cfg +mkdir -p ${install_dir}/bin + +# bin +if [ "$pagMode" == "lite" ]; then + strip ${build_dir}/bin/taosd + strip ${build_dir}/bin/taos +else + cp ${build_dir}/bin/taosdemo ${install_dir}/bin/khdemo + cp ${build_dir}/bin/taosdump ${install_dir}/bin/khdump + cp ${build_dir}/bin/tarbitrator ${install_dir}/bin + cp ${script_dir}/set_core.sh ${install_dir}/bin + cp ${script_dir}/get_client.sh ${install_dir}/bin + cp ${script_dir}/startPre.sh ${install_dir}/bin + cp ${script_dir}/taosd-dump-cfg.gdb ${install_dir}/bin +fi +cp ${build_dir}/bin/taos ${install_dir}/bin/khclient +cp ${build_dir}/bin/taosd ${install_dir}/bin/khserver +cp ${script_dir}/remove_kh.sh ${install_dir}/bin +chmod a+x ${install_dir}/bin/* || : + +# cluster +if [ "$verMode" == "cluster" ]; then + sed 's/verMode=edge/verMode=cluster/g' ${install_dir}/bin/remove_kh.sh >> remove_kh_temp.sh + mv remove_kh_temp.sh ${install_dir}/bin/remove_kh.sh + + mkdir -p ${install_dir}/nginxd && cp -r ${nginx_dir}/* ${install_dir}/nginxd + cp ${nginx_dir}/png/taos.png ${install_dir}/nginxd/admin/images/taos.png + rm -rf ${install_dir}/nginxd/png + + # replace the OEM name, add by yangzy@2021-09-22 + sed -i -e 's/www.taosdata.com/www.wellintech.com/g' $(grep -r 'www.taosdata.com' ${install_dir}/nginxd | sed -r "s/(.*\.html):\s*(.*)/\1/g") + sed -i -e 's/2017/2021/g' $(grep -r '2017' ${install_dir}/nginxd | sed -r "s/(.*\.html):\s*(.*)/\1/g") + sed -i -e 's/TAOS Data/Wellintech/g' $(grep -r 'TAOS Data' ${install_dir}/nginxd | sed -r "s/(.*\.html):\s*(.*)/\1/g") + sed -i -e 's/taosd/khserver/g' `grep -r 'taosd' ${install_dir}/nginxd | grep -E '*\.js\s*.*' | sed -r -e 's/(.*\.js):\s*(.*)/\1/g' | sort | uniq` + sed -i -e 's/taosd<\/th>/khserver<\/th>/g' ${install_dir}/nginxd/admin/monitor.html + sed -i -e "s/data:\['taosd', 'system'\],/data:\['khserver', 'system'\],/g" ${install_dir}/nginxd/admin/monitor.html + sed -i -e "s/name: 'taosd',/name: 'khserver',/g" ${install_dir}/nginxd/admin/monitor.html + sed -i "s/TDengine/KingHistorian/g" ${install_dir}/nginxd/admin/*.html + sed -i "s/TDengine/KingHistorian/g" ${install_dir}/nginxd/admin/js/*.js + + if [ "$cpuType" == "aarch64" ]; then + cp -f ${install_dir}/nginxd/sbin/arm/64bit/nginx ${install_dir}/nginxd/sbin/ + elif [ "$cpuType" == "aarch32" ]; then + cp -f ${install_dir}/nginxd/sbin/arm/32bit/nginx ${install_dir}/nginxd/sbin/ + fi + rm -rf ${install_dir}/nginxd/sbin/arm +fi + +sed -i '/dataDir/ {s/taos/kinghistorian/g}' ${install_dir}/cfg/kinghistorian.cfg +sed -i '/logDir/ {s/taos/kinghistorian/g}' ${install_dir}/cfg/kinghistorian.cfg +sed -i "s/TDengine/KingHistorian/g" ${install_dir}/cfg/kinghistorian.cfg +sed -i "s/support@taosdata.com/support@wellintech.com/g" ${install_dir}/cfg/kinghistorian.cfg +sed -i "s/taos client/khclient/g" ${install_dir}/cfg/kinghistorian.cfg +sed -i "s/taosd/khserver/g" ${install_dir}/cfg/kinghistorian.cfg + +cd ${install_dir} +tar -zcv -f kinghistorian.tar.gz * --remove-files || : +exitcode=$? +if [ "$exitcode" != "0" ]; then + echo "tar kinghistorian.tar.gz error !!!" + exit $exitcode +fi + +cd ${curr_dir} +cp ${install_files} ${install_dir} +if [ "$verMode" == "cluster" ]; then + sed 's/verMode=edge/verMode=cluster/g' ${install_dir}/install_kh.sh >> install_kh_temp.sh + mv install_kh_temp.sh ${install_dir}/install_kh.sh +fi +if [ "$pagMode" == "lite" ]; then + sed -e "s/pagMode=full/pagMode=lite/g" -e "s/taos_history/kh_history/g" ${install_dir}/install.sh >> install_kh_temp.sh + mv install_kh_temp.sh ${install_dir}/install_kh.sh +fi + +sed -i "/install_connector$/d" ${install_dir}/install_kh.sh +sed -i "/install_examples$/d" ${install_dir}/install_kh.sh +chmod a+x ${install_dir}/install_kh.sh + +# Copy driver +mkdir -p ${install_dir}/driver && cp ${lib_files} ${install_dir}/driver && echo "${versionComp}" > ${install_dir}/driver/vercomp.txt + +cd ${release_dir} + +if [ "$verMode" == "cluster" ]; then + pkg_name=${install_dir}-${osType}-${cpuType} +elif [ "$verMode" == "edge" ]; then + pkg_name=${install_dir}-${osType}-${cpuType} +else + echo "unknow verMode, nor cluster or edge" + exit 1 +fi + +if [ "$pagMode" == "lite" ]; then + pkg_name=${pkg_name}-Lite +fi + +if [ "$verType" == "beta" ]; then + pkg_name=${pkg_name}-${verType} +elif [ "$verType" == "stable" ]; then + pkg_name=${pkg_name} +else + echo "unknow verType, nor stabel or beta" + exit 1 +fi + +tar -zcv -f "$(basename ${pkg_name}).tar.gz" $(basename ${install_dir}) --remove-files || : +exitcode=$? +if [ "$exitcode" != "0" ]; then + echo "tar ${pkg_name}.tar.gz error !!!" + exit $exitcode +fi + +cd ${curr_dir} diff --git a/packaging/tools/makepkg_power.sh b/packaging/tools/makepkg_power.sh index 65200ddd047358f92f8e3a612c08eedb60053311..47bf00dd5e96946d716f7146bfb8c942f4b5ecf1 100755 --- a/packaging/tools/makepkg_power.sh +++ b/packaging/tools/makepkg_power.sh @@ -31,16 +31,6 @@ else install_dir="${release_dir}/PowerDB-server-${version}" fi -# Directories and files. -#if [ "$pagMode" == "lite" ]; then -# strip ${build_dir}/bin/taosd -# strip ${build_dir}/bin/taos -# bin_files="${build_dir}/bin/powerd ${build_dir}/bin/power ${script_dir}/remove_power.sh" -#else -# bin_files="${build_dir}/bin/powerd ${build_dir}/bin/power ${build_dir}/bin/powerdemo ${build_dir}/bin/tarbitrator ${script_dir}/remove_power.sh\ -# ${script_dir}/set_core.sh ${script_dir}/startPre.sh ${script_dir}/taosd-dump-cfg.gdb" -#fi - lib_files="${build_dir}/lib/libtaos.so.${version}" header_files="${code_dir}/inc/taos.h ${code_dir}/inc/taoserror.h" if [ "$verMode" == "cluster" ]; then @@ -51,13 +41,6 @@ fi install_files="${script_dir}/install_power.sh" nginx_dir="${code_dir}/../../enterprise/src/plugins/web" -# Init file -#init_dir=${script_dir}/deb -#if [ $package_type = "centos" ]; then -# init_dir=${script_dir}/rpm -#fi -#init_files=${init_dir}/powerd -# temp use rpm's powerd. TODO: later modify according to os type init_file_deb=${script_dir}/../deb/powerd init_file_rpm=${script_dir}/../rpm/powerd init_file_tarbitrator_deb=${script_dir}/../deb/tarbitratord @@ -66,7 +49,7 @@ init_file_tarbitrator_rpm=${script_dir}/../rpm/tarbitratord # make directories. mkdir -p ${install_dir} mkdir -p ${install_dir}/inc && cp ${header_files} ${install_dir}/inc -mkdir -p ${install_dir}/cfg && cp ${cfg_dir}/taos.cfg ${install_dir}/cfg/taos.cfg +mkdir -p ${install_dir}/cfg && cp ${cfg_dir}/taos.cfg ${install_dir}/cfg/power.cfg #mkdir -p ${install_dir}/bin && cp ${bin_files} ${install_dir}/bin && chmod a+x ${install_dir}/bin/* || : mkdir -p ${install_dir}/bin @@ -109,9 +92,9 @@ if [ "$verMode" == "cluster" ]; then sed -i "s/TDengine/PowerDB/g" ${install_dir}/nginxd/admin/*.html sed -i "s/TDengine/PowerDB/g" ${install_dir}/nginxd/admin/js/*.js - sed -i '/dataDir/ {s/taos/power/g}' ${install_dir}/cfg/taos.cfg - sed -i '/logDir/ {s/taos/power/g}' ${install_dir}/cfg/taos.cfg - sed -i "s/TDengine/PowerDB/g" ${install_dir}/cfg/taos.cfg + sed -i '/dataDir/ {s/taos/power/g}' ${install_dir}/cfg/power.cfg + sed -i '/logDir/ {s/taos/power/g}' ${install_dir}/cfg/power.cfg + sed -i "s/TDengine/PowerDB/g" ${install_dir}/cfg/power.cfg if [ "$cpuType" == "aarch64" ]; then cp -f ${install_dir}/nginxd/sbin/arm/64bit/nginx ${install_dir}/nginxd/sbin/ diff --git a/packaging/tools/makepkg_pro.sh b/packaging/tools/makepkg_pro.sh index 457cb0de6f02f7000dc7437cde61bfec28c7205c..fedf49c28df54a376a629cb5d6151ff500674f74 100755 --- a/packaging/tools/makepkg_pro.sh +++ b/packaging/tools/makepkg_pro.sh @@ -44,7 +44,7 @@ nginx_dir="${code_dir}/../../enterprise/src/plugins/web" # make directories. mkdir -p ${install_dir} mkdir -p ${install_dir}/inc && cp ${header_files} ${install_dir}/inc -mkdir -p ${install_dir}/cfg && cp ${cfg_dir}/taos.cfg ${install_dir}/cfg/taos.cfg +mkdir -p ${install_dir}/cfg && cp ${cfg_dir}/taos.cfg ${install_dir}/cfg/prodb.cfg mkdir -p ${install_dir}/bin # bin @@ -94,12 +94,12 @@ if [ "$verMode" == "cluster" ]; then rm -rf ${install_dir}/nginxd/sbin/arm fi -sed -i '/dataDir/ {s/taos/ProDB/g}' ${install_dir}/cfg/taos.cfg -sed -i '/logDir/ {s/taos/ProDB/g}' ${install_dir}/cfg/taos.cfg -sed -i "s/TDengine/ProDB/g" ${install_dir}/cfg/taos.cfg -sed -i "s/support@taosdata.com/support@hanatech.com.cn/g" ${install_dir}/cfg/taos.cfg -sed -i "s/taos client/prodbc/g" ${install_dir}/cfg/taos.cfg -sed -i "s/taosd/prodbs/g" ${install_dir}/cfg/taos.cfg +sed -i '/dataDir/ {s/taos/ProDB/g}' ${install_dir}/cfg/prodb.cfg +sed -i '/logDir/ {s/taos/ProDB/g}' ${install_dir}/cfg/prodb.cfg +sed -i "s/TDengine/ProDB/g" ${install_dir}/cfg/prodb.cfg +sed -i "s/support@taosdata.com/support@hanatech.com.cn/g" ${install_dir}/cfg/prodb.cfg +sed -i "s/taos client/prodbc/g" ${install_dir}/cfg/prodb.cfg +sed -i "s/taosd/prodbs/g" ${install_dir}/cfg/prodb.cfg cd ${install_dir} tar -zcv -f prodb.tar.gz * --remove-files || : @@ -124,50 +124,9 @@ sed -i "/install_connector$/d" ${install_dir}/install_pro.sh sed -i "/install_examples$/d" ${install_dir}/install_pro.sh chmod a+x ${install_dir}/install_pro.sh -# Copy example code -#mkdir -p ${install_dir}/examples -#examples_dir="${top_dir}/tests/examples" -#cp -r ${examples_dir}/c ${install_dir}/examples -#sed -i '/passwd/ {s/taosdata/prodb/g}' ${install_dir}/examples/c/*.c -#sed -i '/root/ {s/taosdata/prodb/g}' ${install_dir}/examples/c/*.c -# -#if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then -# cp -r ${examples_dir}/JDBC ${install_dir}/examples -# cp -r ${examples_dir}/matlab ${install_dir}/examples -# mv ${install_dir}/examples/matlab/TDengineDemo.m ${install_dir}/examples/matlab/ProDBDemo.m -# sed -i '/password/ {s/taosdata/prodb/g}' ${install_dir}/examples/matlab/ProDBDemo.m -# cp -r ${examples_dir}/python ${install_dir}/examples -# sed -i '/password/ {s/taosdata/prodb/g}' ${install_dir}/examples/python/read_example.py -# cp -r ${examples_dir}/R ${install_dir}/examples -# sed -i '/password/ {s/taosdata/prodb/g}' ${install_dir}/examples/R/command.txt -# cp -r ${examples_dir}/go ${install_dir}/examples -# mv ${install_dir}/examples/go/taosdemo.go ${install_dir}/examples/go/prodemo.go -# sed -i '/root/ {s/taosdata/prodb/g}' ${install_dir}/examples/go/prodemo.go -#fi - # Copy driver mkdir -p ${install_dir}/driver && cp ${lib_files} ${install_dir}/driver && echo "${versionComp}" > ${install_dir}/driver/vercomp.txt -# Copy connector -#connector_dir="${code_dir}/connector" -#mkdir -p ${install_dir}/connector -#if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then -# cp ${build_dir}/lib/*.jar ${install_dir}/connector ||: - -# if find ${connector_dir}/go -mindepth 1 -maxdepth 1 | read; then -# cp -r ${connector_dir}/go ${install_dir}/connector -# else -# echo "WARNING: go connector not found, please check if want to use it!" -# fi -# cp -r ${connector_dir}/python ${install_dir}/connector/ -# mv ${install_dir}/connector/python/taos ${install_dir}/connector/python/prodb -# sed -i '/password/ {s/taosdata/prodb/g}' ${install_dir}/connector/python/prodb/cinterface.py - -# sed -i '/password/ {s/taosdata/prodb/g}' ${install_dir}/connector/python/prodb/subscription.py - -# sed -i '/self._password/ {s/taosdata/prodb/g}' ${install_dir}/connector/python/prodb/connection.py -#fi - cd ${release_dir} if [ "$verMode" == "cluster" ]; then diff --git a/packaging/tools/makepkg_tq.sh b/packaging/tools/makepkg_tq.sh index 07032379d7e4bab2636f3685b6edb620780a124a..c497b8201e1d58c41dff0afb5a9261146126977b 100755 --- a/packaging/tools/makepkg_tq.sh +++ b/packaging/tools/makepkg_tq.sh @@ -31,16 +31,6 @@ else install_dir="${release_dir}/TQ-server-${version}" fi -# Directories and files. -#if [ "$pagMode" == "lite" ]; then -# strip ${build_dir}/bin/taosd -# strip ${build_dir}/bin/taos -# bin_files="${build_dir}/bin/tqd ${build_dir}/bin/tq ${script_dir}/remove_tq.sh" -#else -# bin_files="${build_dir}/bin/tqd ${build_dir}/bin/tq ${build_dir}/bin/tqdemo ${build_dir}/bin/tarbitrator ${script_dir}/remove_tq.sh\ -# ${script_dir}/set_core.sh ${script_dir}/startPre.sh ${script_dir}/taosd-dump-cfg.gdb" -#fi - lib_files="${build_dir}/lib/libtaos.so.${version}" header_files="${code_dir}/inc/taos.h ${code_dir}/inc/taoserror.h" if [ "$verMode" == "cluster" ]; then @@ -51,34 +41,19 @@ fi install_files="${script_dir}/install_tq.sh" nginx_dir="${code_dir}/../../enterprise/src/plugins/web" -# Init file -#init_dir=${script_dir}/deb -#if [ $package_type = "centos" ]; then -# init_dir=${script_dir}/rpm -#fi -#init_files=${init_dir}/tqd -# temp use rpm's tqd. TODO: later modify according to os type -#init_file_deb=${script_dir}/../deb/tqd -#init_file_rpm=${script_dir}/../rpm/tqd -#init_file_tarbitrator_deb=${script_dir}/../deb/tarbitratord -#init_file_tarbitrator_rpm=${script_dir}/../rpm/tarbitratord - # make directories. mkdir -p ${install_dir} mkdir -p ${install_dir}/inc && cp ${header_files} ${install_dir}/inc -mkdir -p ${install_dir}/cfg && cp ${cfg_dir}/taos.cfg ${install_dir}/cfg/taos.cfg +mkdir -p ${install_dir}/cfg && cp ${cfg_dir}/taos.cfg ${install_dir}/cfg/tq.cfg -#mkdir -p ${install_dir}/bin && cp ${bin_files} ${install_dir}/bin && chmod a+x ${install_dir}/bin/* || : mkdir -p ${install_dir}/bin if [ "$pagMode" == "lite" ]; then strip ${build_dir}/bin/taosd strip ${build_dir}/bin/taos -# bin_files="${build_dir}/bin/tqd ${build_dir}/bin/tq ${script_dir}/remove_tq.sh" cp ${build_dir}/bin/taos ${install_dir}/bin/tq cp ${build_dir}/bin/taosd ${install_dir}/bin/tqd cp ${script_dir}/remove_tq.sh ${install_dir}/bin else -# bin_files="${build_dir}/bin/tqd ${build_dir}/bin/tq ${build_dir}/bin/tqdemo ${build_dir}/bin/tarbitrator ${script_dir}/remove_tq.sh ${script_dir}/set_core.sh" cp ${build_dir}/bin/taos ${install_dir}/bin/tq cp ${build_dir}/bin/taosd ${install_dir}/bin/tqd cp ${script_dir}/remove_tq.sh ${install_dir}/bin @@ -93,11 +68,6 @@ else fi chmod a+x ${install_dir}/bin/* || : -#mkdir -p ${install_dir}/init.d && cp ${init_file_deb} ${install_dir}/init.d/tqd.deb -#mkdir -p ${install_dir}/init.d && cp ${init_file_rpm} ${install_dir}/init.d/tqd.rpm -#mkdir -p ${install_dir}/init.d && cp ${init_file_tarbitrator_deb} ${install_dir}/init.d/tarbitratord.deb || : -#mkdir -p ${install_dir}/init.d && cp ${init_file_tarbitrator_rpm} ${install_dir}/init.d/tarbitratord.rpm || : - if [ "$verMode" == "cluster" ]; then sed 's/verMode=edge/verMode=cluster/g' ${install_dir}/bin/remove_tq.sh >> remove_tq_temp.sh mv remove_tq_temp.sh ${install_dir}/bin/remove_tq.sh @@ -109,9 +79,9 @@ if [ "$verMode" == "cluster" ]; then sed -i "s/TDengine/TQ/g" ${install_dir}/nginxd/admin/*.html sed -i "s/TDengine/TQ/g" ${install_dir}/nginxd/admin/js/*.js - sed -i '/dataDir/ {s/taos/tq/g}' ${install_dir}/cfg/taos.cfg - sed -i '/logDir/ {s/taos/tq/g}' ${install_dir}/cfg/taos.cfg - sed -i "s/TDengine/TQ/g" ${install_dir}/cfg/taos.cfg + sed -i '/dataDir/ {s/taos/tq/g}' ${install_dir}/cfg/tq.cfg + sed -i '/logDir/ {s/taos/tq/g}' ${install_dir}/cfg/tq.cfg + sed -i "s/TDengine/TQ/g" ${install_dir}/cfg/tq.cfg if [ "$cpuType" == "aarch64" ]; then cp -f ${install_dir}/nginxd/sbin/arm/64bit/nginx ${install_dir}/nginxd/sbin/ @@ -181,10 +151,6 @@ if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then sed -i '/self._password/ {s/taosdata/tqueue/g}' ${install_dir}/connector/python/taos/connection.py fi -# Copy release note -# cp ${script_dir}/release_note ${install_dir} - -# exit 1 cd ${release_dir} diff --git a/packaging/tools/post.sh b/packaging/tools/post.sh index 2f4b07067fd08ee3a9591f97e7291305307ff498..20ef425f56fb5d8f5d90b7a8cc4ef4a6da7a1b9c 100755 --- a/packaging/tools/post.sh +++ b/packaging/tools/post.sh @@ -468,8 +468,8 @@ function install_service_on_systemd() { function install_taosadapter_service() { if ((${service_mod}==0)); then - [ -f ${script_dir}/cfg/taosadapter.service ] &&\ - ${csudo} cp ${script_dir}/cfg/taosadapter.service \ + [ -f ${script_dir}/../cfg/taosadapter.service ] &&\ + ${csudo} cp ${script_dir}/../cfg/taosadapter.service \ ${service_config_dir}/ || : ${csudo} systemctl daemon-reload fi diff --git a/packaging/tools/remove_arbi_jh.sh b/packaging/tools/remove_arbi_jh.sh new file mode 100755 index 0000000000000000000000000000000000000000..8b690771c761ac51772dac83cafec46360a16be3 --- /dev/null +++ b/packaging/tools/remove_arbi_jh.sh @@ -0,0 +1,130 @@ +#!/bin/bash +# +# Script to stop the service and uninstall jh_iot's arbitrator + +set -e +#set -x + +verMode=edge + +RED='\033[0;31m' +GREEN='\033[1;32m' +NC='\033[0m' + +#install main path +install_main_dir="/usr/local/tarbitrator" +bin_link_dir="/usr/bin" + +service_config_dir="/etc/systemd/system" +tarbitrator_service_name="tarbitratord" +csudo="" +if command -v sudo > /dev/null; then + csudo="sudo" +fi + +initd_mod=0 +service_mod=2 +if pidof systemd &> /dev/null; then + service_mod=0 +elif $(which service &> /dev/null); then + service_mod=1 + service_config_dir="/etc/init.d" + if $(which chkconfig &> /dev/null); then + initd_mod=1 + elif $(which insserv &> /dev/null); then + initd_mod=2 + elif $(which update-rc.d &> /dev/null); then + initd_mod=3 + else + service_mod=2 + fi +else + service_mod=2 +fi + +function kill_tarbitrator() { + pid=$(ps -ef | grep "tarbitrator" | grep -v "grep" | awk '{print $2}') + if [ -n "$pid" ]; then + ${csudo} kill -9 $pid || : + fi +} + +function clean_bin() { + # Remove link + ${csudo} rm -f ${bin_link_dir}/tarbitrator || : +} + +function clean_header() { + # Remove link + ${csudo} rm -f ${inc_link_dir}/taos.h || : + ${csudo} rm -f ${inc_link_dir}/taoserror.h || : +} + +function clean_log() { + # Remove link + ${csudo} rm -rf /arbitrator.log || : +} + +function clean_service_on_systemd() { + tarbitratord_service_config="${service_config_dir}/${tarbitrator_service_name}.service" + + if systemctl is-active --quiet ${tarbitrator_service_name}; then + echo "jh_iot's tarbitrator is running, stopping it..." + ${csudo} systemctl stop ${tarbitrator_service_name} &> /dev/null || echo &> /dev/null + fi + ${csudo} systemctl disable ${tarbitrator_service_name} &> /dev/null || echo &> /dev/null + + ${csudo} rm -f ${tarbitratord_service_config} +} + +function clean_service_on_sysvinit() { + if pidof tarbitrator &> /dev/null; then + echo "jh_iot's tarbitrator is running, stopping it..." + ${csudo} service tarbitratord stop || : + fi + + if ((${initd_mod}==1)); then + if [ -e ${service_config_dir}/tarbitratord ]; then + ${csudo} chkconfig --del tarbitratord || : + fi + elif ((${initd_mod}==2)); then + if [ -e ${service_config_dir}/tarbitratord ]; then + ${csudo} insserv -r tarbitratord || : + fi + elif ((${initd_mod}==3)); then + if [ -e ${service_config_dir}/tarbitratord ]; then + ${csudo} update-rc.d -f tarbitratord remove || : + fi + fi + + ${csudo} rm -f ${service_config_dir}/tarbitratord || : + + if $(which init &> /dev/null); then + ${csudo} init q || : + fi +} + +function clean_service() { + if ((${service_mod}==0)); then + clean_service_on_systemd + elif ((${service_mod}==1)); then + clean_service_on_sysvinit + else + # must manual stop + kill_tarbitrator + fi +} + +# Stop service and disable booting start. +clean_service +# Remove binary file and links +clean_bin +# Remove header file. +##clean_header +# Remove log file +clean_log + +${csudo} rm -rf ${install_main_dir} + +echo -e "${GREEN}jh_iot's arbitrator is removed successfully!${NC}" +echo diff --git a/packaging/tools/remove_arbi_kh.sh b/packaging/tools/remove_arbi_kh.sh new file mode 100755 index 0000000000000000000000000000000000000000..ec3254b01649add57f9485c59878059e086b2669 --- /dev/null +++ b/packaging/tools/remove_arbi_kh.sh @@ -0,0 +1,130 @@ +#!/bin/bash +# +# Script to stop the service and uninstall KingHistorian's arbitrator + +set -e +#set -x + +verMode=edge + +RED='\033[0;31m' +GREEN='\033[1;32m' +NC='\033[0m' + +#install main path +install_main_dir="/usr/local/tarbitrator" +bin_link_dir="/usr/bin" + +service_config_dir="/etc/systemd/system" +tarbitrator_service_name="tarbitratord" +csudo="" +if command -v sudo > /dev/null; then + csudo="sudo" +fi + +initd_mod=0 +service_mod=2 +if pidof systemd &> /dev/null; then + service_mod=0 +elif $(which service &> /dev/null); then + service_mod=1 + service_config_dir="/etc/init.d" + if $(which chkconfig &> /dev/null); then + initd_mod=1 + elif $(which insserv &> /dev/null); then + initd_mod=2 + elif $(which update-rc.d &> /dev/null); then + initd_mod=3 + else + service_mod=2 + fi +else + service_mod=2 +fi + +function kill_tarbitrator() { + pid=$(ps -ef | grep "tarbitrator" | grep -v "grep" | awk '{print $2}') + if [ -n "$pid" ]; then + ${csudo} kill -9 $pid || : + fi +} + +function clean_bin() { + # Remove link + ${csudo} rm -f ${bin_link_dir}/tarbitrator || : +} + +function clean_header() { + # Remove link + ${csudo} rm -f ${inc_link_dir}/taos.h || : + ${csudo} rm -f ${inc_link_dir}/taoserror.h || : +} + +function clean_log() { + # Remove link + ${csudo} rm -rf /arbitrator.log || : +} + +function clean_service_on_systemd() { + tarbitratord_service_config="${service_config_dir}/${tarbitrator_service_name}.service" + + if systemctl is-active --quiet ${tarbitrator_service_name}; then + echo "KingHistorian's tarbitrator is running, stopping it..." + ${csudo} systemctl stop ${tarbitrator_service_name} &> /dev/null || echo &> /dev/null + fi + ${csudo} systemctl disable ${tarbitrator_service_name} &> /dev/null || echo &> /dev/null + + ${csudo} rm -f ${tarbitratord_service_config} +} + +function clean_service_on_sysvinit() { + if pidof tarbitrator &> /dev/null; then + echo "KingHistorian's tarbitrator is running, stopping it..." + ${csudo} service tarbitratord stop || : + fi + + if ((${initd_mod}==1)); then + if [ -e ${service_config_dir}/tarbitratord ]; then + ${csudo} chkconfig --del tarbitratord || : + fi + elif ((${initd_mod}==2)); then + if [ -e ${service_config_dir}/tarbitratord ]; then + ${csudo} insserv -r tarbitratord || : + fi + elif ((${initd_mod}==3)); then + if [ -e ${service_config_dir}/tarbitratord ]; then + ${csudo} update-rc.d -f tarbitratord remove || : + fi + fi + + ${csudo} rm -f ${service_config_dir}/tarbitratord || : + + if $(which init &> /dev/null); then + ${csudo} init q || : + fi +} + +function clean_service() { + if ((${service_mod}==0)); then + clean_service_on_systemd + elif ((${service_mod}==1)); then + clean_service_on_sysvinit + else + # must manual stop + kill_tarbitrator + fi +} + +# Stop service and disable booting start. +clean_service +# Remove binary file and links +clean_bin +# Remove header file. +##clean_header +# Remove log file +clean_log + +${csudo} rm -rf ${install_main_dir} + +echo -e "${GREEN}KingHistorian's arbitrator is removed successfully!${NC}" +echo diff --git a/packaging/tools/remove_arbi_power.sh b/packaging/tools/remove_arbi_power.sh index 077b19ec7d4208c604c2042c2aa1eacab2033c5b..f34cb4ffed63ae4db33080080a24101f44f3389d 100755 --- a/packaging/tools/remove_arbi_power.sh +++ b/packaging/tools/remove_arbi_power.sh @@ -127,4 +127,4 @@ clean_log ${csudo} rm -rf ${install_main_dir} echo -e "${GREEN}PowerDB's arbitrator is removed successfully!${NC}" -echo \ No newline at end of file +echo diff --git a/packaging/tools/remove_arbi_tq.sh b/packaging/tools/remove_arbi_tq.sh index 3d99b6d41a74938d74383df3d8cdfc75c2ebb7c8..27c78b28601a4c57482f9fc56a76175baf668948 100755 --- a/packaging/tools/remove_arbi_tq.sh +++ b/packaging/tools/remove_arbi_tq.sh @@ -127,4 +127,4 @@ clean_log ${csudo} rm -rf ${install_main_dir} echo -e "${GREEN}TQ's arbitrator is removed successfully!${NC}" -echo \ No newline at end of file +echo diff --git a/packaging/tools/remove_client_jh.sh b/packaging/tools/remove_client_jh.sh new file mode 100755 index 0000000000000000000000000000000000000000..a3f5dfd10debb0a28211b3682becd083d49ca9c6 --- /dev/null +++ b/packaging/tools/remove_client_jh.sh @@ -0,0 +1,79 @@ +#!/bin/bash +# +# Script to stop the client and uninstall database, but retain the config and log files. +set -e +# set -x + +RED='\033[0;31m' +GREEN='\033[1;32m' +NC='\033[0m' + +#install main path +install_main_dir="/usr/local/jh_taos" + +log_link_dir="/usr/local/jh_taos/log" +cfg_link_dir="/usr/local/jh_taos/cfg" +bin_link_dir="/usr/bin" +lib_link_dir="/usr/lib" +lib64_link_dir="/usr/lib64" +inc_link_dir="/usr/include" + +csudo="" +if command -v sudo > /dev/null; then + csudo="sudo" +fi + +function kill_client() { + if [ -n "$(pidof jh_taos)" ]; then + ${csudo} kill -9 $pid || : + fi +} + +function clean_bin() { + # Remove link + ${csudo} rm -f ${bin_link_dir}/jh_taos || : + ${csudo} rm -f ${bin_link_dir}/jhdemo || : + ${csudo} rm -f ${bin_link_dir}/jh_taosdump || : + ${csudo} rm -f ${bin_link_dir}/rmjh || : + ${csudo} rm -f ${bin_link_dir}/set_core || : +} + +function clean_lib() { + # Remove link + ${csudo} rm -f ${lib_link_dir}/libtaos.* || : + ${csudo} rm -f ${lib64_link_dir}/libtaos.* || : +} + +function clean_header() { + # Remove link + ${csudo} rm -f ${inc_link_dir}/taos.h || : + ${csudo} rm -f ${inc_link_dir}/taoserror.h || : +} + +function clean_config() { + # Remove link + ${csudo} rm -f ${cfg_link_dir}/* || : +} + +function clean_log() { + # Remove link + ${csudo} rm -rf ${log_link_dir} || : +} + +# Stop client. +kill_client +# Remove binary file and links +clean_bin +# Remove header file. +clean_header +# Remove lib file +clean_lib +# Remove link log directory +clean_log +# Remove link configuration file +clean_config + +${csudo} rm -rf ${install_main_dir} + +echo -e "${GREEN}jh_iot client is removed successfully!${NC}" +echo diff --git a/packaging/tools/remove_client_kh.sh b/packaging/tools/remove_client_kh.sh new file mode 100755 index 0000000000000000000000000000000000000000..6a44e875e3426b14400508b1bdbd7510c2ae49cb --- /dev/null +++ b/packaging/tools/remove_client_kh.sh @@ -0,0 +1,79 @@ +#!/bin/bash +# +# Script to stop the client and uninstall database, but retain the config and log files. +set -e +# set -x + +RED='\033[0;31m' +GREEN='\033[1;32m' +NC='\033[0m' + +#install main path +install_main_dir="/usr/local/kinghistorian" + +log_link_dir="/usr/local/kinghistorian/log" +cfg_link_dir="/usr/local/kinghistorian/cfg" +bin_link_dir="/usr/bin" +lib_link_dir="/usr/lib" +lib64_link_dir="/usr/lib64" +inc_link_dir="/usr/include" + +csudo="" +if command -v sudo > /dev/null; then + csudo="sudo" +fi + +function kill_client() { + if [ -n "$(pidof khclient)" ]; then + ${csudo} kill -9 $pid || : + fi +} + +function clean_bin() { + # Remove link + ${csudo} rm -f ${bin_link_dir}/khclient || : + ${csudo} rm -f ${bin_link_dir}/khdemo || : + ${csudo} rm -f ${bin_link_dir}/khdump || : + ${csudo} rm -f ${bin_link_dir}/rmkh || : + ${csudo} rm -f ${bin_link_dir}/set_core || : +} + +function clean_lib() { + # Remove link + ${csudo} rm -f ${lib_link_dir}/libtaos.* || : + ${csudo} rm -f ${lib64_link_dir}/libtaos.* || : +} + +function clean_header() { + # Remove link + ${csudo} rm -f ${inc_link_dir}/taos.h || : + ${csudo} rm -f ${inc_link_dir}/taoserror.h || : +} + +function clean_config() { + # Remove link + ${csudo} rm -f ${cfg_link_dir}/* || : +} + +function clean_log() { + # Remove link + ${csudo} rm -rf ${log_link_dir} || : +} + +# Stop client. +kill_client +# Remove binary file and links +clean_bin +# Remove header file. +clean_header +# Remove lib file +clean_lib +# Remove link log directory +clean_log +# Remove link configuration file +clean_config + +${csudo} rm -rf ${install_main_dir} + +echo -e "${GREEN}KingHistorian client is removed successfully!${NC}" +echo diff --git a/packaging/tools/remove_jh.sh b/packaging/tools/remove_jh.sh new file mode 100755 index 0000000000000000000000000000000000000000..b962b824fdeeda632be28eeeaa97199adcd6ca2c --- /dev/null +++ b/packaging/tools/remove_jh.sh @@ -0,0 +1,209 @@ +#!/bin/bash +# +# Script to stop the service and uninstall jh_taos, but retain the config, data and log files. + +set -e +#set -x + +verMode=edge + +RED='\033[0;31m' +GREEN='\033[1;32m' +NC='\033[0m' + +#install main path +install_main_dir="/usr/local/jh_taos" +data_link_dir="/usr/local/jh_taos/data" +log_link_dir="/usr/local/jh_taos/log" +cfg_link_dir="/usr/local/jh_taos/cfg" +bin_link_dir="/usr/bin" +lib_link_dir="/usr/lib" +lib64_link_dir="/usr/lib64" +inc_link_dir="/usr/include" +install_nginxd_dir="/usr/local/nginxd" + +service_config_dir="/etc/systemd/system" +service_name="jh_taosd" +tarbitrator_service_name="tarbitratord" +nginx_service_name="nginxd" +csudo="" +if command -v sudo > /dev/null; then + csudo="sudo" +fi + +initd_mod=0 +service_mod=2 +if pidof systemd &> /dev/null; then + service_mod=0 +elif $(which service &> /dev/null); then + service_mod=1 + service_config_dir="/etc/init.d" + if $(which chkconfig &> /dev/null); then + initd_mod=1 + elif $(which insserv &> /dev/null); then + initd_mod=2 + elif $(which update-rc.d &> /dev/null); then + initd_mod=3 + else + service_mod=2 + fi +else + service_mod=2 +fi + +function kill_process() { + pid=$(ps -ef | grep "jh_taosd" | grep -v "grep" | awk '{print $2}') + if [ -n "$pid" ]; then + ${csudo} kill -9 $pid || : + fi +} + +function kill_tarbitrator() { + pid=$(ps -ef | grep "tarbitrator" | grep -v "grep" | awk '{print $2}') + if [ -n "$pid" ]; then + ${csudo} kill -9 $pid || : + fi +} + +function clean_bin() { + # Remove link + ${csudo} rm -f ${bin_link_dir}/jh_taos || : + ${csudo} rm -f ${bin_link_dir}/jh_taosd || : + ${csudo} rm -f ${bin_link_dir}/jhdemo || : + ${csudo} rm -f ${bin_link_dir}/jh_taosdump || : + ${csudo} rm -f ${bin_link_dir}/rmjh || : + ${csudo} rm -f ${bin_link_dir}/tarbitrator || : + ${csudo} rm -f ${bin_link_dir}/set_core || : +} + +function clean_lib() { + # Remove link + ${csudo} rm -f ${lib_link_dir}/libtaos.* || : + ${csudo} rm -f ${lib64_link_dir}/libtaos.* || : +} + +function clean_header() { + # Remove link + ${csudo} rm -f ${inc_link_dir}/taos.h || : + ${csudo} rm -f ${inc_link_dir}/taoserror.h || : +} + +function clean_config() { + # Remove link + ${csudo} rm -f ${cfg_link_dir}/* || : +} + +function clean_log() { + # Remove link + ${csudo} rm -rf ${log_link_dir} || : +} + +function clean_service_on_systemd() { + service_config="${service_config_dir}/${service_name}.service" + if systemctl is-active --quiet ${service_name}; then + echo "jh_iot's jh_taosd is running, stopping it..." + ${csudo} systemctl stop ${service_name} &> /dev/null || echo &> /dev/null + fi + ${csudo} systemctl disable ${service_name} &> /dev/null || echo &> /dev/null + ${csudo} rm -f ${service_config} + + tarbitratord_service_config="${service_config_dir}/${tarbitrator_service_name}.service" + if systemctl is-active --quiet ${tarbitrator_service_name}; then + echo "jh_iot's tarbitrator is running, stopping it..." + ${csudo} systemctl stop ${tarbitrator_service_name} &> /dev/null || echo &> /dev/null + fi + ${csudo} systemctl disable ${tarbitrator_service_name} &> /dev/null || echo &> /dev/null + ${csudo} rm -f ${tarbitratord_service_config} + + if [ "$verMode" == "cluster" ]; then + nginx_service_config="${service_config_dir}/${nginx_service_name}.service" + if [ -d ${bin_dir}/web ]; then + if systemctl is-active --quiet ${nginx_service_name}; then + echo "Nginx for jh_iot is running, stopping it..." + ${csudo} systemctl stop ${nginx_service_name} &> /dev/null || echo &> /dev/null + fi + ${csudo} systemctl disable ${nginx_service_name} &> /dev/null || echo &> /dev/null + + ${csudo} rm -f ${nginx_service_config} + fi + fi +} + +function clean_service_on_sysvinit() { + if pidof jh_taosd &> /dev/null; then + echo "jh_iot's jh_taosd is running, stopping it..." + ${csudo} service jh_taosd stop || : + fi + + if pidof tarbitrator &> /dev/null; then + echo "jh_iot's tarbitrator is running, stopping it..." + ${csudo} service tarbitratord stop || : + fi + + if ((${initd_mod}==1)); then + if [ -e ${service_config_dir}/jh_taosd ]; then + ${csudo} chkconfig --del jh_taosd || : + fi + if [ -e ${service_config_dir}/tarbitratord ]; then + ${csudo} chkconfig --del tarbitratord || : + fi + elif ((${initd_mod}==2)); then + if [ -e ${service_config_dir}/jh_taosd ]; then + ${csudo} insserv -r jh_taosd || : + fi + if [ -e ${service_config_dir}/tarbitratord ]; then + ${csudo} insserv -r tarbitratord || : + fi + elif ((${initd_mod}==3)); then + if [ -e ${service_config_dir}/jh_taosd ]; then + ${csudo} update-rc.d -f jh_taosd remove || : + fi + if [ -e ${service_config_dir}/tarbitratord ]; then + ${csudo} update-rc.d -f tarbitratord remove || : + fi + fi + + ${csudo} rm -f ${service_config_dir}/jh_taosd || : + ${csudo} rm -f ${service_config_dir}/tarbitratord || : + + if $(which init &> /dev/null); then + ${csudo} init q || : + fi +} + +function clean_service() { + if ((${service_mod}==0)); then + clean_service_on_systemd + elif ((${service_mod}==1)); then + clean_service_on_sysvinit + else + kill_process + kill_tarbitrator + fi +} + +# Stop service and disable booting start. +clean_service +# Remove binary file and links +clean_bin +# Remove header file. +clean_header +# Remove lib file +clean_lib +# Remove link log directory +clean_log +# Remove link configuration file +clean_config +# Remove data link directory +${csudo} rm -rf ${data_link_dir} || : + +${csudo} rm -rf ${install_main_dir} +${csudo} rm -rf ${install_nginxd_dir} +if [[ -e /etc/os-release ]]; then + osinfo=$(awk -F= '/^NAME/{print $2}' /etc/os-release) +else + osinfo="" +fi + +echo -e "${GREEN}jh_iot is removed successfully!${NC}" +echo diff --git a/packaging/tools/remove_kh.sh b/packaging/tools/remove_kh.sh new file mode 100755 index 0000000000000000000000000000000000000000..0055e043e74a3fa287cebbd325f83c7f8f98ca8a --- /dev/null +++ b/packaging/tools/remove_kh.sh @@ -0,0 +1,209 @@ +#!/bin/bash +# +# Script to stop the service and uninstall kinghistorian, but retain the config, data and log files. + +set -e +#set -x + +verMode=edge + +RED='\033[0;31m' +GREEN='\033[1;32m' +NC='\033[0m' + +#install main path +install_main_dir="/usr/local/kinghistorian" +data_link_dir="/usr/local/kinghistorian/data" +log_link_dir="/usr/local/kinghistorian/log" +cfg_link_dir="/usr/local/kinghistorian/cfg" +bin_link_dir="/usr/bin" +lib_link_dir="/usr/lib" +lib64_link_dir="/usr/lib64" +inc_link_dir="/usr/include" +install_nginxd_dir="/usr/local/nginxd" + +service_config_dir="/etc/systemd/system" +service_name="khserver" +tarbitrator_service_name="tarbitratord" +nginx_service_name="nginxd" +csudo="" +if command -v sudo > /dev/null; then + csudo="sudo" +fi + +initd_mod=0 +service_mod=2 +if pidof systemd &> /dev/null; then + service_mod=0 +elif $(which service &> /dev/null); then + service_mod=1 + service_config_dir="/etc/init.d" + if $(which chkconfig &> /dev/null); then + initd_mod=1 + elif $(which insserv &> /dev/null); then + initd_mod=2 + elif $(which update-rc.d &> /dev/null); then + initd_mod=3 + else + service_mod=2 + fi +else + service_mod=2 +fi + +function kill_process() { + pid=$(ps -ef | grep "khserver" | grep -v "grep" | awk '{print $2}') + if [ -n "$pid" ]; then + ${csudo} kill -9 $pid || : + fi +} + +function kill_tarbitrator() { + pid=$(ps -ef | grep "tarbitrator" | grep -v "grep" | awk '{print $2}') + if [ -n "$pid" ]; then + ${csudo} kill -9 $pid || : + fi +} + +function clean_bin() { + # Remove link + ${csudo} rm -f ${bin_link_dir}/khclient || : + ${csudo} rm -f ${bin_link_dir}/khserver || : + ${csudo} rm -f ${bin_link_dir}/khdemo || : + ${csudo} rm -f ${bin_link_dir}/khdump || : + ${csudo} rm -f ${bin_link_dir}/rmkh || : + ${csudo} rm -f ${bin_link_dir}/tarbitrator || : + ${csudo} rm -f ${bin_link_dir}/set_core || : +} + +function clean_lib() { + # Remove link + ${csudo} rm -f ${lib_link_dir}/libtaos.* || : + ${csudo} rm -f ${lib64_link_dir}/libtaos.* || : +} + +function clean_header() { + # Remove link + ${csudo} rm -f ${inc_link_dir}/taos.h || : + ${csudo} rm -f ${inc_link_dir}/taoserror.h || : +} + +function clean_config() { + # Remove link + ${csudo} rm -f ${cfg_link_dir}/* || : +} + +function clean_log() { + # Remove link + ${csudo} rm -rf ${log_link_dir} || : +} + +function clean_service_on_systemd() { + service_config="${service_config_dir}/${service_name}.service" + if systemctl is-active --quiet ${service_name}; then + echo "KingHistorian's khserver is running, stopping it..." + ${csudo} systemctl stop ${service_name} &> /dev/null || echo &> /dev/null + fi + ${csudo} systemctl disable ${service_name} &> /dev/null || echo &> /dev/null + ${csudo} rm -f ${service_config} + + tarbitratord_service_config="${service_config_dir}/${tarbitrator_service_name}.service" + if systemctl is-active --quiet ${tarbitrator_service_name}; then + echo "KingHistorian's tarbitrator is running, stopping it..." + ${csudo} systemctl stop ${tarbitrator_service_name} &> /dev/null || echo &> /dev/null + fi + ${csudo} systemctl disable ${tarbitrator_service_name} &> /dev/null || echo &> /dev/null + ${csudo} rm -f ${tarbitratord_service_config} + + if [ "$verMode" == "cluster" ]; then + nginx_service_config="${service_config_dir}/${nginx_service_name}.service" + if [ -d ${bin_dir}/web ]; then + if systemctl is-active --quiet ${nginx_service_name}; then + echo "Nginx for KingHistorian is running, stopping it..." + ${csudo} systemctl stop ${nginx_service_name} &> /dev/null || echo &> /dev/null + fi + ${csudo} systemctl disable ${nginx_service_name} &> /dev/null || echo &> /dev/null + + ${csudo} rm -f ${nginx_service_config} + fi + fi +} + +function clean_service_on_sysvinit() { + if pidof khserver &> /dev/null; then + echo "KingHistorian's khserver is running, stopping it..." + ${csudo} service khserver stop || : + fi + + if pidof tarbitrator &> /dev/null; then + echo "KingHistorian's tarbitrator is running, stopping it..." + ${csudo} service tarbitratord stop || : + fi + + if ((${initd_mod}==1)); then + if [ -e ${service_config_dir}/khserver ]; then + ${csudo} chkconfig --del khserver || : + fi + if [ -e ${service_config_dir}/tarbitratord ]; then + ${csudo} chkconfig --del tarbitratord || : + fi + elif ((${initd_mod}==2)); then + if [ -e ${service_config_dir}/khserver ]; then + ${csudo} insserv -r khserver || : + fi + if [ -e ${service_config_dir}/tarbitratord ]; then + ${csudo} insserv -r tarbitratord || : + fi + elif ((${initd_mod}==3)); then + if [ -e ${service_config_dir}/khserver ]; then + ${csudo} update-rc.d -f khserver remove || : + fi + if [ -e ${service_config_dir}/tarbitratord ]; then + ${csudo} update-rc.d -f tarbitratord remove || : + fi + fi + + ${csudo} rm -f ${service_config_dir}/khserver || : + ${csudo} rm -f ${service_config_dir}/tarbitratord || : + + if $(which init &> /dev/null); then + ${csudo} init q || : + fi +} + +function clean_service() { + if ((${service_mod}==0)); then + clean_service_on_systemd + elif ((${service_mod}==1)); then + clean_service_on_sysvinit + else + kill_process + kill_tarbitrator + fi +} + +# Stop service and disable booting start. +clean_service +# Remove binary file and links +clean_bin +# Remove header file. +clean_header +# Remove lib file +clean_lib +# Remove link log directory +clean_log +# Remove link configuration file +clean_config +# Remove data link directory +${csudo} rm -rf ${data_link_dir} || : + +${csudo} rm -rf ${install_main_dir} +${csudo} rm -rf ${install_nginxd_dir} +if [[ -e /etc/os-release ]]; then + osinfo=$(awk -F= '/^NAME/{print $2}' /etc/os-release) +else + osinfo="" +fi + +echo -e "${GREEN}KingHistorian is removed successfully!${NC}" +echo diff --git a/packaging/tools/remove_power.sh b/packaging/tools/remove_power.sh index 816869cf444d8001e0c0aae30840d2c40a9e6af4..55abed87da7f9c7ba9375e002aa0a2ffd7fad1ed 100755 --- a/packaging/tools/remove_power.sh +++ b/packaging/tools/remove_power.sh @@ -1,6 +1,6 @@ #!/bin/bash # -# Script to stop the service and uninstall TDengine, but retain the config, data and log files. +# Script to stop the service and uninstall PowerDB, but retain the config, data and log files. set -e #set -x @@ -112,7 +112,7 @@ function clean_service_on_systemd() { tarbitratord_service_config="${service_config_dir}/${tarbitrator_service_name}.service" if systemctl is-active --quiet ${tarbitrator_service_name}; then - echo "TDengine tarbitrator is running, stopping it..." + echo "PowerDB tarbitrator is running, stopping it..." ${csudo} systemctl stop ${tarbitrator_service_name} &> /dev/null || echo &> /dev/null fi ${csudo} systemctl disable ${tarbitrator_service_name} &> /dev/null || echo &> /dev/null @@ -122,7 +122,7 @@ function clean_service_on_systemd() { nginx_service_config="${service_config_dir}/${nginx_service_name}.service" if [ -d ${bin_dir}/web ]; then if systemctl is-active --quiet ${nginx_service_name}; then - echo "Nginx for TDengine is running, stopping it..." + echo "Nginx for PowerDB is running, stopping it..." ${csudo} systemctl stop ${nginx_service_name} &> /dev/null || echo &> /dev/null fi ${csudo} systemctl disable ${nginx_service_name} &> /dev/null || echo &> /dev/null @@ -133,9 +133,6 @@ function clean_service_on_systemd() { } function clean_service_on_sysvinit() { - #restart_config_str="power:2345:respawn:${service_config_dir}/powerd start" - #${csudo} sed -i "\|${restart_config_str}|d" /etc/inittab || : - if pidof powerd &> /dev/null; then echo "PowerDB powerd is running, stopping it..." ${csudo} service powerd stop || : @@ -183,7 +180,6 @@ function clean_service() { elif ((${service_mod}==1)); then clean_service_on_sysvinit else - # must manual stop taosd kill_powerd kill_tarbitrator fi diff --git a/packaging/tools/remove_pro.sh b/packaging/tools/remove_pro.sh index f6dad22bc21b02a9d717d530c50bc19c5a718478..5906d60197a14a6cbb9862a9cddb278faafa1d7a 100755 --- a/packaging/tools/remove_pro.sh +++ b/packaging/tools/remove_pro.sh @@ -177,7 +177,6 @@ function clean_service() { elif ((${service_mod}==1)); then clean_service_on_sysvinit else - # must manual stop taosd kill_prodbs kill_tarbitrator fi diff --git a/packaging/tools/remove_tq.sh b/packaging/tools/remove_tq.sh index 211eed4dff09ab5da00d5c475cd93148b5ce1b24..14e7dd024d8b7b6c6b39567effb5ad6e8a20f8d5 100755 --- a/packaging/tools/remove_tq.sh +++ b/packaging/tools/remove_tq.sh @@ -1,6 +1,6 @@ #!/bin/bash # -# Script to stop the service and uninstall TDengine, but retain the config, data and log files. +# Script to stop the service and uninstall TQ, but retain the config, data and log files. set -e #set -x @@ -112,7 +112,7 @@ function clean_service_on_systemd() { tarbitratord_service_config="${service_config_dir}/${tarbitrator_service_name}.service" if systemctl is-active --quiet ${tarbitrator_service_name}; then - echo "TDengine tarbitrator is running, stopping it..." + echo "TQ tarbitrator is running, stopping it..." ${csudo} systemctl stop ${tarbitrator_service_name} &> /dev/null || echo &> /dev/null fi ${csudo} systemctl disable ${tarbitrator_service_name} &> /dev/null || echo &> /dev/null @@ -122,7 +122,7 @@ function clean_service_on_systemd() { nginx_service_config="${service_config_dir}/${nginx_service_name}.service" if [ -d ${bin_dir}/web ]; then if systemctl is-active --quiet ${nginx_service_name}; then - echo "Nginx for TDengine is running, stopping it..." + echo "Nginx for TQ is running, stopping it..." ${csudo} systemctl stop ${nginx_service_name} &> /dev/null || echo &> /dev/null fi ${csudo} systemctl disable ${nginx_service_name} &> /dev/null || echo &> /dev/null @@ -183,7 +183,6 @@ function clean_service() { elif ((${service_mod}==1)); then clean_service_on_sysvinit else - # must manual stop taosd kill_tqd kill_tarbitrator fi @@ -212,16 +211,5 @@ else osinfo="" fi -#if echo $osinfo | grep -qwi "ubuntu" ; then -## echo "this is ubuntu system" -# ${csudo} rm -f /var/lib/dpkg/info/tdengine* || : -#elif echo $osinfo | grep -qwi "debian" ; then -## echo "this is debian system" -# ${csudo} rm -f /var/lib/dpkg/info/tdengine* || : -#elif echo $osinfo | grep -qwi "centos" ; then -## echo "this is centos system" -# ${csudo} rpm -e --noscripts tdengine || : -#fi - echo -e "${GREEN}TQ is removed successfully!${NC}" echo diff --git a/src/client/inc/tscGlobalmerge.h b/src/client/inc/tscGlobalmerge.h index 875bb5e178d1d0f50b78b4b6c0cf6ae29b884a1a..6b3bf03316c64143ddb1aaab1d7b0a48a427381e 100644 --- a/src/client/inc/tscGlobalmerge.h +++ b/src/client/inc/tscGlobalmerge.h @@ -62,7 +62,7 @@ typedef struct SRetrieveSupport { uint32_t numOfRetry; // record the number of retry times } SRetrieveSupport; -int32_t tscCreateGlobalMergerEnv(SQueryInfo* pQueryInfo, tExtMemBuffer ***pMemBuffer, int32_t numOfSub, tOrderDescriptor **pDesc, uint32_t nBufferSize, int64_t id); +int32_t tscCreateGlobalMergerEnv(SQueryInfo* pQueryInfo, tExtMemBuffer ***pMemBuffer, int32_t numOfSub, tOrderDescriptor **pDesc, uint32_t* nBufferSize, int64_t id); void tscDestroyGlobalMergerEnv(tExtMemBuffer **pMemBuffer, tOrderDescriptor *pDesc, int32_t numOfVnodes); diff --git a/src/client/src/tscGlobalmerge.c b/src/client/src/tscGlobalmerge.c index a6edac0659c9a8f1670b05bd0d53d2ad6ca84e7c..e3062f9300aae277e37a7313b5878455f348a93d 100644 --- a/src/client/src/tscGlobalmerge.c +++ b/src/client/src/tscGlobalmerge.c @@ -233,7 +233,7 @@ static int32_t tscFlushTmpBufferImpl(tExtMemBuffer *pMemoryBuf, tOrderDescriptor // sort before flush to disk, the data must be consecutively put on tFilePage. if (pDesc->orderInfo.numOfCols > 0) { - tColDataQSort(pDesc, (int32_t)pPage->num, 0, (int32_t)pPage->num - 1, pPage->data, orderType); + tColDataMergeSort(pDesc, (int32_t)pPage->num, 0, (int32_t)pPage->num - 1, pPage->data, orderType); } #ifdef _DEBUG_VIEW @@ -364,8 +364,9 @@ static int32_t createOrderDescriptor(tOrderDescriptor **pOrderDesc, SQueryInfo* SExprInfo* pExprInfo = tscExprGet(pQueryInfo, j); int32_t functionId = pExprInfo->base.functionId; - if (pColIndex->colId == pExprInfo->base.colInfo.colId && - (functionId == TSDB_FUNC_PRJ || functionId == TSDB_FUNC_TAG || functionId == TSDB_FUNC_TAGPRJ)) { + + if (pColIndex->colId == pExprInfo->base.colInfo.colId && (functionId == TSDB_FUNC_PRJ || functionId == TSDB_FUNC_TAG || functionId == TSDB_FUNC_TAGPRJ)) { + orderColIndexList[i] = j; break; } @@ -408,8 +409,8 @@ static int32_t createOrderDescriptor(tOrderDescriptor **pOrderDesc, SQueryInfo* } int32_t tscCreateGlobalMergerEnv(SQueryInfo *pQueryInfo, tExtMemBuffer ***pMemBuffer, int32_t numOfSub, - tOrderDescriptor **pOrderDesc, uint32_t nBufferSizes, int64_t id) { - SSchema *pSchema = NULL; + tOrderDescriptor **pOrderDesc, uint32_t* nBufferSizes, int64_t id) { + SSchema1 *pSchema = NULL; SColumnModel *pModel = NULL; STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0); @@ -422,7 +423,7 @@ int32_t tscCreateGlobalMergerEnv(SQueryInfo *pQueryInfo, tExtMemBuffer ***pMemBu size_t size = tscNumOfExprs(pQueryInfo); - pSchema = (SSchema *)calloc(1, sizeof(SSchema) * size); + pSchema = (SSchema1 *)calloc(1, sizeof(SSchema1) * size); if (pSchema == NULL) { tscError("0x%"PRIx64" failed to allocate memory", id); return TSDB_CODE_TSC_OUT_OF_MEMORY; @@ -441,7 +442,10 @@ int32_t tscCreateGlobalMergerEnv(SQueryInfo *pQueryInfo, tExtMemBuffer ***pMemBu int32_t capacity = 0; if (rlen != 0) { - capacity = nBufferSizes / rlen; + if ((*nBufferSizes) < rlen) { + (*nBufferSizes) = rlen * 2; + } + capacity = (*nBufferSizes) / rlen; } pModel = createColumnModel(pSchema, (int32_t)size, capacity); @@ -458,7 +462,7 @@ int32_t tscCreateGlobalMergerEnv(SQueryInfo *pQueryInfo, tExtMemBuffer ***pMemBu assert(numOfSub <= pTableMetaInfo->vgroupList->numOfVgroups); for (int32_t i = 0; i < numOfSub; ++i) { - (*pMemBuffer)[i] = createExtMemBuffer(nBufferSizes, rlen, pg, pModel); + (*pMemBuffer)[i] = createExtMemBuffer(*nBufferSizes, rlen, pg, pModel); (*pMemBuffer)[i]->flushModel = MULTIPLE_APPEND_MODEL; } diff --git a/src/client/src/tscLocal.c b/src/client/src/tscLocal.c index c3c65018a50aea8e7f36d89c15c6b7faa12f2047..0a7a1a67949a3f0cccf923e745859f7c0692dda3 100644 --- a/src/client/src/tscLocal.c +++ b/src/client/src/tscLocal.c @@ -440,7 +440,7 @@ static int32_t tscSCreateSetValueToResObj(SSqlObj *pSql, int32_t rowLen, const c SQueryInfo* pQueryInfo = tscGetQueryInfo(&pSql->cmd); int32_t numOfRows = 1; if (strlen(ddl) == 0) { - + } pSql->res.pMerger = tscInitResObjForLocalQuery(numOfRows, rowLen, pSql->self); tscInitResForMerge(&pSql->res); @@ -459,7 +459,7 @@ static int32_t tscSCreateBuildResult(SSqlObj *pSql, BuildType type, const char * int32_t rowLen = tscSCreateBuildResultFields(pSql, type, result); tscFieldInfoUpdateOffset(pQueryInfo); - return tscSCreateSetValueToResObj(pSql, rowLen, str, result); + return tscSCreateSetValueToResObj(pSql, rowLen, str, result); } int32_t tscRebuildCreateTableStatement(void *param,char *result) { SCreateBuilder *builder = (SCreateBuilder *)param; @@ -473,8 +473,8 @@ int32_t tscRebuildCreateTableStatement(void *param,char *result) { code = tscGetTableTagValue(builder, buf); if (code == TSDB_CODE_SUCCESS) { snprintf(result + strlen(result), TSDB_MAX_BINARY_LEN - strlen(result), "CREATE TABLE `%s` USING `%s` TAGS %s", builder->buf, builder->sTableName, buf); - code = tscSCreateBuildResult(builder->pParentSql, SCREATE_BUILD_TABLE, builder->buf, result); - } + code = tscSCreateBuildResult(builder->pParentSql, SCREATE_BUILD_TABLE, builder->buf, result); + } free(buf); return code; } @@ -490,27 +490,27 @@ static int32_t tscGetDBInfo(SCreateBuilder *builder, char *result) { TAOS_FIELD *fields = taos_fetch_fields(pSql); int num_fields = taos_num_fields(pSql); - char buf[TSDB_DB_NAME_LEN + 64] = {0}; + char buf[TSDB_DB_NAME_LEN + 64] = {0}; do { memset(buf, 0, sizeof(buf)); - int32_t* lengths = taos_fetch_lengths(pSql); + int32_t* lengths = taos_fetch_lengths(pSql); int32_t ret = tscGetNthFieldResult(row, fields, lengths, 0, buf); if (0 == ret && STR_NOCASE_EQUAL(buf, strlen(buf), builder->buf, strlen(builder->buf))) { - snprintf(result + strlen(result), TSDB_MAX_BINARY_LEN - strlen(result), "CREATE DATABASE %s", buf); + snprintf(result + strlen(result), TSDB_MAX_BINARY_LEN - strlen(result), "CREATE DATABASE %s", buf); for (int i = 1; i < num_fields; i++) { for (int j = 0; showColumns[j] != NULL; j++) { if (STR_NOCASE_EQUAL(fields[i].name, strlen(fields[i].name), showColumns[j], strlen(showColumns[j]))) { memset(buf, 0, sizeof(buf)); ret = tscGetNthFieldResult(row, fields, lengths, i, buf); if (ret == 0) { - snprintf(result + strlen(result), TSDB_MAX_BINARY_LEN - strlen(result), " %s %s", showColumns[j], buf); + snprintf(result + strlen(result), TSDB_MAX_BINARY_LEN - strlen(result), " %s %s", showColumns[j], buf); } } } } break; - } - + } + row = tscFetchRow(builder); } while (row != NULL); @@ -528,9 +528,9 @@ int32_t tscRebuildCreateDBStatement(void *param,char *result) { if (buf == NULL) { return TSDB_CODE_TSC_OUT_OF_MEMORY; } - code = tscGetDBInfo(param, buf); + code = tscGetDBInfo(param, buf); if (code == TSDB_CODE_SUCCESS) { - code = tscSCreateBuildResult(builder->pParentSql, SCREATE_BUILD_DB, builder->buf, buf); + code = tscSCreateBuildResult(builder->pParentSql, SCREATE_BUILD_DB, builder->buf, buf); } free(buf); return code; @@ -539,7 +539,7 @@ int32_t tscRebuildCreateDBStatement(void *param,char *result) { static int32_t tscGetTableTagColumnName(SSqlObj *pSql, char **result) { char *buf = (char *)malloc(TSDB_MAX_BINARY_LEN); if (buf == NULL) { - return TSDB_CODE_TSC_OUT_OF_MEMORY; + return TSDB_CODE_TSC_OUT_OF_MEMORY; } buf[0] = 0; @@ -548,33 +548,33 @@ static int32_t tscGetTableTagColumnName(SSqlObj *pSql, char **result) { pMeta->tableType == TSDB_STREAM_TABLE) { free(buf); return TSDB_CODE_TSC_INVALID_VALUE; - } + } - SSchema *pTagsSchema = tscGetTableTagSchema(pMeta); + SSchema *pTagsSchema = tscGetTableTagSchema(pMeta); int32_t numOfTags = tscGetNumOfTags(pMeta); for (int32_t i = 0; i < numOfTags; i++) { if (i != numOfTags - 1) { - snprintf(buf + strlen(buf), TSDB_MAX_BINARY_LEN - strlen(buf), "%s,", pTagsSchema[i].name); + snprintf(buf + strlen(buf), TSDB_MAX_BINARY_LEN - strlen(buf), "`%s`,", pTagsSchema[i].name); } else { - snprintf(buf + strlen(buf), TSDB_MAX_BINARY_LEN - strlen(buf), "%s", pTagsSchema[i].name); + snprintf(buf + strlen(buf), TSDB_MAX_BINARY_LEN - strlen(buf), "`%s`", pTagsSchema[i].name); } - } + } *result = buf; return TSDB_CODE_SUCCESS; -} +} static int32_t tscRebuildDDLForSubTable(SSqlObj *pSql, const char *tableName, char *ddl) { SQueryInfo* pQueryInfo = tscGetQueryInfo(&pSql->cmd); STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0); STableMeta * pMeta = pTableMetaInfo->pTableMeta; - SSqlObj *pInterSql = (SSqlObj *)calloc(1, sizeof(SSqlObj)); + SSqlObj *pInterSql = (SSqlObj *)calloc(1, sizeof(SSqlObj)); if (pInterSql == NULL) { return TSDB_CODE_TSC_OUT_OF_MEMORY; - } + } - SCreateBuilder *param = (SCreateBuilder *)malloc(sizeof(SCreateBuilder)); + SCreateBuilder *param = (SCreateBuilder *)malloc(sizeof(SCreateBuilder)); if (param == NULL) { free(pInterSql); return TSDB_CODE_TSC_OUT_OF_MEMORY; diff --git a/src/client/src/tscParseLineProtocol.c b/src/client/src/tscParseLineProtocol.c index af57f7ec8c6c192bf84915abd86728ab8f195835..58f7c7e7d6a25f95292268d2ff5393c120bb3f97 100644 --- a/src/client/src/tscParseLineProtocol.c +++ b/src/client/src/tscParseLineProtocol.c @@ -156,13 +156,15 @@ static int32_t getSmlMd5ChildTableName(TAOS_SML_DATA_POINT* point, char* tableNa SStringBuilder sb; memset(&sb, 0, sizeof(sb)); char sTableName[TSDB_TABLE_NAME_LEN + TS_ESCAPE_CHAR_SIZE] = {0}; - strtolower(sTableName, point->stableName); + strncpy(sTableName, point->stableName, strlen(point->stableName)); + //strtolower(sTableName, point->stableName); taosStringBuilderAppendString(&sb, sTableName); for (int j = 0; j < point->tagNum; ++j) { taosStringBuilderAppendChar(&sb, ','); TAOS_SML_KV* tagKv = point->tags + j; char tagName[TSDB_COL_NAME_LEN + TS_ESCAPE_CHAR_SIZE] = {0}; - strtolower(tagName, tagKv->key); + strncpy(tagName, tagKv->key, strlen(tagKv->key)); + //strtolower(tagName, tagKv->key); taosStringBuilderAppendString(&sb, tagName); taosStringBuilderAppendChar(&sb, '='); taosStringBuilderAppend(&sb, tagKv->value, tagKv->length); @@ -261,10 +263,10 @@ static int32_t buildDataPointSchemas(TAOS_SML_DATA_POINT* points, int numPoint, static int32_t generateSchemaAction(SSchema* pointColField, SHashObj* dbAttrHash, SArray* dbAttrArray, bool isTag, char sTableName[], SSchemaAction* action, bool* actionNeeded, SSmlLinesInfo* info) { - char fieldNameLowerCase[TSDB_COL_NAME_LEN + TS_ESCAPE_CHAR_SIZE] = {0}; - strtolower(fieldNameLowerCase, pointColField->name); + char fieldName[TSDB_COL_NAME_LEN + TS_ESCAPE_CHAR_SIZE] = {0}; + strcpy(fieldName, pointColField->name); - size_t* pDbIndex = taosHashGet(dbAttrHash, fieldNameLowerCase, strlen(fieldNameLowerCase)); + size_t* pDbIndex = taosHashGet(dbAttrHash, fieldName, strlen(fieldName)); if (pDbIndex) { SSchema* dbAttr = taosArrayGet(dbAttrArray, *pDbIndex); assert(strcasecmp(dbAttr->name, pointColField->name) == 0); @@ -297,7 +299,7 @@ static int32_t generateSchemaAction(SSchema* pointColField, SHashObj* dbAttrHash *actionNeeded = true; } if (*actionNeeded) { - tscDebug("SML:0x%" PRIx64 " generate schema action. column name: %s, action: %d", info->id, fieldNameLowerCase, + tscDebug("SML:0x%" PRIx64 " generate schema action. column name: %s, action: %d", info->id, fieldName, action->action); } return 0; @@ -536,11 +538,8 @@ static int32_t retrieveTableMeta(TAOS* taos, char* tableName, STableMeta** pTabl tscDebug("SML:0x%" PRIx64 " retrieve table meta. super table name: %s", info->id, tableName); - char tableNameLowerCase[TSDB_TABLE_NAME_LEN + TS_ESCAPE_CHAR_SIZE]; - strtolower(tableNameLowerCase, tableName); - char sql[256]; - snprintf(sql, 256, "describe %s", tableNameLowerCase); + snprintf(sql, 256, "describe %s", tableName); TAOS_RES* res = taos_query(taos, sql); code = taos_errno(res); if (code != 0) { @@ -561,8 +560,10 @@ static int32_t retrieveTableMeta(TAOS* taos, char* tableName, STableMeta** pTabl pSql->fp = NULL; registerSqlObj(pSql); - SStrToken tableToken = {.z = tableNameLowerCase, .n = (uint32_t)strlen(tableNameLowerCase), .type = TK_ID}; - tGetToken(tableNameLowerCase, &tableToken.type); + char tableNameBuf[TSDB_TABLE_NAME_LEN + TS_ESCAPE_CHAR_SIZE] = {0}; + memcpy(tableNameBuf, tableName, strlen(tableName)); + SStrToken tableToken = {.z = tableNameBuf, .n = (uint32_t)strlen(tableName), .type = TK_ID}; + tGetToken(tableNameBuf, &tableToken.type); bool dbIncluded = false; // Check if the table name available or not if (tscValidateName(&tableToken, true, &dbIncluded) != TSDB_CODE_SUCCESS) { @@ -1839,7 +1840,7 @@ static int32_t parseSmlTimeStamp(TAOS_SML_KV **pTS, const char **index, SSmlLine const char *start, *cur; int32_t ret = TSDB_CODE_SUCCESS; int len = 0; - char key[] = "_ts"; + char key[] = "ts"; char *value = NULL; start = cur = *index; @@ -1870,24 +1871,14 @@ static int32_t parseSmlTimeStamp(TAOS_SML_KV **pTS, const char **index, SSmlLine bool checkDuplicateKey(char *key, SHashObj *pHash, SSmlLinesInfo* info) { char *val = NULL; - char *cur = key; - char keyLower[TSDB_COL_NAME_LEN]; - size_t keyLen = 0; - while(*cur != '\0') { - keyLower[keyLen] = tolower(*cur); - keyLen++; - cur++; - } - keyLower[keyLen] = '\0'; - - val = taosHashGet(pHash, keyLower, keyLen); + val = taosHashGet(pHash, key, strlen(key)); if (val) { - tscError("SML:0x%"PRIx64" Duplicate key detected:%s", info->id, keyLower); + tscError("SML:0x%"PRIx64" Duplicate key detected:%s", info->id, key); return true; } uint8_t dummy_val = 0; - taosHashPut(pHash, keyLower, strlen(key), &dummy_val, sizeof(uint8_t)); + taosHashPut(pHash, key, strlen(key), &dummy_val, sizeof(uint8_t)); return false; } @@ -1925,7 +1916,6 @@ static int32_t parseSmlKey(TAOS_SML_KV *pKV, const char **index, SHashObj *pHash pKV->key = calloc(len + TS_ESCAPE_CHAR_SIZE + 1, 1); memcpy(pKV->key, key, len + 1); - strntolower_s(pKV->key, pKV->key, (int32_t)len); addEscapeCharToString(pKV->key, len); tscDebug("SML:0x%"PRIx64" Key:%s|len:%d", info->id, pKV->key, len); *index = cur + 1; @@ -2053,7 +2043,7 @@ static int32_t parseSmlMeasurement(TAOS_SML_DATA_POINT *pSml, const char **index if (*cur == '\\') { escapeSpecialCharacter(1, &cur); } - pSml->stableName[len] = tolower(*cur); + pSml->stableName[len] = *cur; cur++; len++; } @@ -2129,7 +2119,6 @@ static int32_t parseSmlKvPairs(TAOS_SML_KV **pKVs, int *num_kvs, if (!isField && childTableNameLen != 0 && strcasecmp(pkv->key, childTableName) == 0) { smlData->childTableName = malloc(pkv->length + TS_ESCAPE_CHAR_SIZE + 1); memcpy(smlData->childTableName, pkv->value, pkv->length); - strntolower_s(smlData->childTableName, smlData->childTableName, (int32_t)pkv->length); addEscapeCharToString(smlData->childTableName, (int32_t)pkv->length); free(pkv->key); free(pkv->value); diff --git a/src/client/src/tscParseOpenTSDB.c b/src/client/src/tscParseOpenTSDB.c index 98a836810a9a2501761dfce38b48be1498267561..d064ede134129d796928768910c56573712319d1 100644 --- a/src/client/src/tscParseOpenTSDB.c +++ b/src/client/src/tscParseOpenTSDB.c @@ -65,7 +65,7 @@ static int32_t parseTelnetMetric(TAOS_SML_DATA_POINT *pSml, const char **index, } } - pSml->stableName[len] = tolower(*cur); + pSml->stableName[len] = *cur; cur++; len++; @@ -241,7 +241,6 @@ static int32_t parseTelnetTagKey(TAOS_SML_KV *pKV, const char **index, SHashObj pKV->key = tcalloc(len + TS_ESCAPE_CHAR_SIZE + 1, 1); memcpy(pKV->key, key, len + 1); - strntolower_s(pKV->key, pKV->key, (int32_t)len); addEscapeCharToString(pKV->key, len); //tscDebug("OTD:0x%"PRIx64" Key:%s|len:%d", info->id, pKV->key, len); *index = cur + 1; @@ -327,7 +326,6 @@ static int32_t parseTelnetTagKvs(TAOS_SML_KV **pKVs, int *num_kvs, *childTableName = tcalloc(pkv->length + TS_ESCAPE_CHAR_SIZE + 1, 1); memcpy(*childTableName, pkv->value, pkv->length); (*childTableName)[pkv->length] = '\0'; - strntolower_s(*childTableName, *childTableName, (int32_t)pkv->length); addEscapeCharToString(*childTableName, pkv->length); tfree(pkv->key); tfree(pkv->value); @@ -515,7 +513,6 @@ static int32_t parseMetricFromJSON(cJSON *root, TAOS_SML_DATA_POINT* pSml, SSmlL */ tstrncpy(pSml->stableName, metric->valuestring, stableLen + 1); - strntolower_s(pSml->stableName, pSml->stableName, (int32_t)stableLen); addEscapeCharToString(pSml->stableName, (int32_t)stableLen); return TSDB_CODE_SUCCESS; @@ -546,7 +543,6 @@ static int32_t parseTimestampFromJSONObj(cJSON *root, int64_t *tsVal, SSmlLinesI } size_t typeLen = strlen(type->valuestring); - strntolower_s(type->valuestring, type->valuestring, (int32_t)typeLen); if (typeLen == 1 && type->valuestring[0] == 's') { //seconds *tsVal = (int64_t)(*tsVal * 1e9); @@ -915,7 +911,6 @@ static int32_t parseTagsFromJSON(cJSON *root, TAOS_SML_KV **pKVs, int *num_kvs, size_t idLen = strlen(id->valuestring); *childTableName = tcalloc(idLen + TS_ESCAPE_CHAR_SIZE + 1, sizeof(char)); memcpy(*childTableName, id->valuestring, idLen); - strntolower_s(*childTableName, *childTableName, (int32_t)idLen); addEscapeCharToString(*childTableName, (int32_t)idLen); //check duplicate IDs @@ -954,7 +949,6 @@ static int32_t parseTagsFromJSON(cJSON *root, TAOS_SML_KV **pKVs, int *num_kvs, } pkv->key = tcalloc(keyLen + TS_ESCAPE_CHAR_SIZE + 1, sizeof(char)); strncpy(pkv->key, tag->string, keyLen); - strntolower_s(pkv->key, pkv->key, (int32_t)keyLen); addEscapeCharToString(pkv->key, (int32_t)keyLen); //value ret = parseValueFromJSON(tag, pkv, info); diff --git a/src/client/src/tscPrepare.c b/src/client/src/tscPrepare.c index 66286476024d465e2c08bf83f06195640c5315d3..215861295fa0ff94352956fa3fd740ace9e90766 100644 --- a/src/client/src/tscPrepare.c +++ b/src/client/src/tscPrepare.c @@ -1623,7 +1623,7 @@ int taos_stmt_prepare(TAOS_STMT* stmt, const char* sql, unsigned long length) { pRes->qId = 0; pRes->numOfRows = 0; - strtolower(pSql->sqlstr, sql); + strcpy(pSql->sqlstr, sql); tscDebugL("0x%"PRIx64" SQL: %s", pSql->self, pSql->sqlstr); if (tscIsInsertData(pSql->sqlstr)) { diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c index b2316bf39e950417ee978d3bf3d106084dc320a4..9c8ca49f9b1f345d312fd8422c50be1b6bec4f68 100644 --- a/src/client/src/tscSQLParser.c +++ b/src/client/src/tscSQLParser.c @@ -436,10 +436,12 @@ int32_t readFromFile(char *name, uint32_t *len, void **buf) { int32_t handleUserDefinedFunc(SSqlObj* pSql, struct SSqlInfo* pInfo) { - const char *msg1 = "invalidate function name"; + const char *msg1 = "invalid function name or length"; const char *msg2 = "path is too long"; const char *msg3 = "invalid outputtype"; + #ifdef LUA_EMBEDDED const char *msg4 = "invalid script"; + #endif const char *msg5 = "invalid dyn lib"; SSqlCmd *pCmd = &pSql->cmd; @@ -478,9 +480,12 @@ int32_t handleUserDefinedFunc(SSqlObj* pSql, struct SSqlInfo* pInfo) { } //validate *.lua or .so int32_t pathLen = (int32_t)strlen(createInfo->path.z); +#ifdef LUA_EMBEDDED if ((pathLen > 4) && (0 == strncmp(createInfo->path.z + pathLen - 4, ".lua", 4)) && !isValidScript(buf, len)) { return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg4); - } else if (pathLen > 3 && (0 == strncmp(createInfo->path.z + pathLen - 3, ".so", 3))) { + } else +#endif + if (pathLen > 3 && (0 == strncmp(createInfo->path.z + pathLen - 3, ".so", 3))) { void *handle = taosLoadDll(createInfo->path.z); taosCloseDll(handle); if (handle == NULL) { @@ -1483,7 +1488,7 @@ static bool validateTableColumnInfo(SArray* pFieldList, SSqlCmd* pCmd) { const char* msg3 = "duplicated column names"; const char* msg4 = "invalid data type"; const char* msg5 = "invalid binary/nchar column length"; - const char* msg6 = "invalid column name"; + const char* msg6 = "invalid column name or length"; const char* msg7 = "too many columns"; // number of fields no less than 2 @@ -1554,7 +1559,7 @@ static bool validateTagParams(SArray* pTagsList, SArray* pFieldList, SSqlCmd* pC const char* msg3 = "duplicated column names"; //const char* msg4 = "timestamp not allowed in tags"; const char* msg5 = "invalid data type in tags"; - const char* msg6 = "invalid tag name"; + const char* msg6 = "invalid tag name or length"; const char* msg7 = "invalid binary/nchar tag length"; // number of fields at least 1 @@ -1623,7 +1628,7 @@ static bool validateTagParams(SArray* pTagsList, SArray* pFieldList, SSqlCmd* pC */ int32_t validateOneTag(SSqlCmd* pCmd, TAOS_FIELD* pTagField) { const char* msg3 = "tag length too long"; - const char* msg4 = "invalid tag name"; + const char* msg4 = "invalid tag name or length"; const char* msg5 = "invalid binary/nchar tag length"; const char* msg6 = "invalid data type in tags"; const char* msg7 = "too many columns"; @@ -1696,7 +1701,7 @@ int32_t validateOneColumn(SSqlCmd* pCmd, TAOS_FIELD* pColField) { const char* msg1 = "too many columns"; const char* msg3 = "column length too long"; const char* msg4 = "invalid data type"; - const char* msg5 = "invalid column name"; + const char* msg5 = "invalid column name or length"; const char* msg6 = "invalid column length"; // assert(pCmd->numOfClause == 1); @@ -2054,7 +2059,7 @@ int32_t validateSelectNodeList(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SArray* pS const char* msg8 = "not support distinct in nest query"; const char* msg9 = "_block_dist not support subquery, only support stable/table"; const char* msg10 = "not support group by in block func"; - const char* msg11 = "invalid alias name"; + const char* msg11 = "invalid alias name or length"; // too many result columns not support order by in query if (taosArrayGetSize(pSelNodeList) > TSDB_MAX_COLUMNS) { @@ -2394,7 +2399,7 @@ static int32_t setExprInfoForFunctions(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SS } int16_t resType = 0; - int16_t resBytes = 0; + int32_t resBytes = 0; int32_t interBufSize = 0; getResultDataInfo(pSchema->type, pSchema->bytes, f, 0, &resType, &resBytes, &interBufSize, 0, false, pUdfInfo); @@ -2633,7 +2638,7 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col } int16_t resultType = 0; - int16_t resultSize = 0; + int32_t resultSize = 0; int32_t intermediateResSize = 0; if (getResultDataInfo(pSchema->type, pSchema->bytes, functionId, 0, &resultType, &resultSize, @@ -2892,7 +2897,7 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col tVariant* pVariant = &pParamElem[1].pNode->value; int16_t resultType = pSchema->type; - int16_t resultSize = pSchema->bytes; + int32_t resultSize = pSchema->bytes; int32_t interResult = 0; char val[8] = {0}; @@ -3074,7 +3079,7 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col s = pTagSchema[index.columnIndex]; } - int16_t bytes = 0; + int32_t bytes = 0; int16_t type = 0; int32_t inter = 0; @@ -3101,7 +3106,7 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col int32_t inter = 0; int16_t resType = 0; - int16_t bytes = 0; + int32_t bytes = 0; getResultDataInfo(TSDB_DATA_TYPE_INT, 4, TSDB_FUNC_BLKINFO, 0, &resType, &bytes, &inter, 0, 0, NULL); @@ -3154,7 +3159,7 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col int32_t inter = 0; int16_t resType = 0; - int16_t bytes = 0; + int32_t bytes = 0; getResultDataInfo(TSDB_DATA_TYPE_INT, 4, functionId, 0, &resType, &bytes, &inter, 0, false, pUdfInfo); SExprInfo* pExpr = tscExprAppend(pQueryInfo, functionId, &index, resType, bytes, getNewResColId(pCmd), inter, false); @@ -3474,7 +3479,7 @@ int32_t tscTansformFuncForSTableQuery(SQueryInfo* pQueryInfo) { assert(tscGetNumOfTags(pTableMetaInfo->pTableMeta) >= 0); - int16_t bytes = 0; + int32_t bytes = 0; int16_t type = 0; int32_t interBytes = 0; @@ -6361,6 +6366,7 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) { // the schema is located after the pMsg body, then followed by true tag value char* d = pUpdateMsg->data; SSchema* pTagCols = tscGetTableTagSchema(pTableMeta); + for (int i = 0; i < numOfTags; ++i) { STColumn* pCol = (STColumn*) d; pCol->colId = htons(pTagCols[i].colId); @@ -6415,6 +6421,14 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) { SColumnIndex columnIndex = COLUMN_INDEX_INITIALIZER; SStrToken name = {.type = TK_STRING, .z = pItem->pVar.pz, .n = pItem->pVar.nLen}; + + //handle Escape character backstick + bool inEscape = false; + if (name.z[0] == TS_ESCAPE_CHAR && name.z[name.n - 1] == TS_ESCAPE_CHAR) { + inEscape = true; + name.type = TK_ID; + } + if (getColumnIndexByName(&name, pQueryInfo, &columnIndex, tscGetErrorMsgPayload(pCmd)) != TSDB_CODE_SUCCESS) { return invalidOperationMsg(pMsg, msg17); } @@ -6425,6 +6439,13 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) { char name1[TSDB_COL_NAME_LEN] = {0}; tstrncpy(name1, pItem->pVar.pz, sizeof(name1)); + + int32_t nameLen = pItem->pVar.nLen; + if (inEscape) { + memmove(name1, name1 + 1, nameLen); + name1[nameLen - TS_ESCAPE_CHAR_SIZE] = '\0'; + } + TAOS_FIELD f = tscCreateField(TSDB_DATA_TYPE_INT, name1, tDataTypes[TSDB_DATA_TYPE_INT].bytes); tscFieldInfoAppend(&pQueryInfo->fieldsInfo, &f); } else if (pAlterSQL->type == TSDB_ALTER_TABLE_CHANGE_COLUMN) { @@ -6440,11 +6461,12 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) { SColumnIndex columnIndex = COLUMN_INDEX_INITIALIZER; SStrToken name = {.type = TK_STRING, .z = pItem->name, .n = (uint32_t)strlen(pItem->name)}; + //handle Escape character backstick + bool inEscape = false; if (name.z[0] == TS_ESCAPE_CHAR && name.z[name.n - 1] == TS_ESCAPE_CHAR) { - memmove(name.z, name.z + 1, name.n); - name.z[name.n - TS_ESCAPE_CHAR_SIZE] = '\0'; - name.n -= TS_ESCAPE_CHAR_SIZE; + inEscape = true; + name.type = TK_ID; } if (getColumnIndexByName(&name, pQueryInfo, &columnIndex, tscGetErrorMsgPayload(pCmd)) != TSDB_CODE_SUCCESS) { @@ -6480,6 +6502,13 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) { if (nLen >= TSDB_MAX_BYTES_PER_ROW) { return invalidOperationMsg(pMsg, msg24); } + + if (inEscape) { + memmove(name.z, name.z + 1, name.n); + name.z[name.n - TS_ESCAPE_CHAR_SIZE] = '\0'; + name.n -= TS_ESCAPE_CHAR_SIZE; + } + TAOS_FIELD f = tscCreateField(pColSchema->type, name.z, pItem->bytes); tscFieldInfoAppend(&pQueryInfo->fieldsInfo, &f); }else if (pAlterSQL->type == TSDB_ALTER_TABLE_MODIFY_TAG_COLUMN) { @@ -6763,6 +6792,10 @@ int32_t validateLocalConfig(SMiscInfo* pOptions) { } int32_t validateColumnName(char* name) { + if (strlen(name) == 0) { + return TSDB_CODE_TSC_INVALID_OPERATION; + } + bool ret = taosIsKeyWordToken(name, (int32_t)strlen(name)); if (ret) { return TSDB_CODE_TSC_INVALID_OPERATION; diff --git a/src/client/src/tscServer.c b/src/client/src/tscServer.c index 52a918bbe22589d85fc89cbff8249065129f1618..98693c94f1d68c194946fbf8b4c00e92c410c9ea 100644 --- a/src/client/src/tscServer.c +++ b/src/client/src/tscServer.c @@ -1536,7 +1536,17 @@ int tscBuildCreateTableMsg(SSqlObj *pSql, SSqlInfo *pInfo) { pMsg += sizeof(SCreateTableMsg); SCreatedTableInfo* p = taosArrayGet(list, i); - strcpy(pCreate->tableName, p->fullname); + //what pCreate->tableName point is a fixed char array which size is 237 + //what p->fullname point is a char* + //before the time we copy p->fullname to pCreate->tableName , we need to check the length of p->fullname + if (strlen(p->fullname) > 237) { + tscError("failed to write this name, which is over 237, just save the first 237 char here"); + strncpy(pCreate->tableName, p->fullname,237); + pCreate->tableName[236]='\0';//I don't know if this line is working properly + }else{ + strcpy(pCreate->tableName, p->fullname); + } + pCreate->igExists = (p->igExist)? 1 : 0; // use dbinfo from table id without modifying current db info @@ -3076,12 +3086,16 @@ int tscRenewTableMeta(SSqlObj *pSql, int32_t tableIndex) { pSql->rootObj->retryReason = pSql->retryReason; + SSqlObj *tmpSql = pSql->rootObj; + tscFreeSubobj(pSql->rootObj); + tfree(tmpSql->pSubs); + SArray* pNameList = taosArrayInit(1, POINTER_BYTES); SArray* vgroupList = taosArrayInit(1, POINTER_BYTES); char* n = strdup(name); taosArrayPush(pNameList, &n); - code = getMultiTableMetaFromMnode(pSql, pNameList, vgroupList, NULL, tscTableMetaCallBack, true); + code = getMultiTableMetaFromMnode(tmpSql, pNameList, vgroupList, NULL, tscTableMetaCallBack, true); taosArrayDestroyEx(pNameList, freeElem); taosArrayDestroyEx(vgroupList, freeElem); diff --git a/src/client/src/tscSql.c b/src/client/src/tscSql.c index 89da3c5640c6523d4d2a816b8ae0293310c5830a..6abdab4880f837c505eeba10b0dd9af6c1b86974 100644 --- a/src/client/src/tscSql.c +++ b/src/client/src/tscSql.c @@ -29,6 +29,7 @@ #include "ttimer.h" #include "tscProfile.h" +static char clusterDefaultId[] = "clusterDefaultId"; static bool validImpl(const char* str, size_t maxsize) { if (str == NULL) { return false; @@ -193,7 +194,9 @@ TAOS *taos_connect_internal(const char *ip, const char *user, const char *pass, tscBuildAndSendRequest(pSql, NULL); tsem_wait(&pSql->rspSem); - + if (0 == strlen(pSql->pTscObj->clusterId)) { + memcpy(pSql->pTscObj->clusterId, clusterDefaultId, strlen(clusterDefaultId)); + } pSql->pTscObj->pClusterInfo = (SClusterInfo *)tscAcquireClusterInfo(pSql->pTscObj->clusterId); if (pSql->res.code != TSDB_CODE_SUCCESS) { terrno = pSql->res.code; diff --git a/src/client/src/tscSubquery.c b/src/client/src/tscSubquery.c index 2412b8336efbcf289fc4d2e11796b01b02f8174d..9309f70d140b6d5cebcfa9fdf572c464b05c8df6 100644 --- a/src/client/src/tscSubquery.c +++ b/src/client/src/tscSubquery.c @@ -2000,7 +2000,7 @@ int32_t tscCreateJoinSubquery(SSqlObj *pSql, int16_t tableIndex, SJoinSupporter colIndex.columnIndex = tscGetTagColIndexById(pTableMetaInfo->pTableMeta, tagColId); - int16_t bytes = 0; + int32_t bytes = 0; int16_t type = 0; int32_t inter = 0; @@ -2636,7 +2636,7 @@ int32_t tscHandleMasterSTableQuery(SSqlObj *pSql) { pRes->qId = 0x1; // hack the qhandle check - const uint32_t nBufferSize = (1u << 18u); // 256KB, default buffer size + uint32_t nBufferSize = (1u << 18u); // 256KB, default buffer size SQueryInfo *pQueryInfo = tscGetQueryInfo(pCmd); STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0); @@ -2652,7 +2652,7 @@ int32_t tscHandleMasterSTableQuery(SSqlObj *pSql) { return ret; } - ret = tscCreateGlobalMergerEnv(pQueryInfo, &pMemoryBuf, pSql->subState.numOfSub, &pDesc, nBufferSize, pSql->self); + ret = tscCreateGlobalMergerEnv(pQueryInfo, &pMemoryBuf, pSql->subState.numOfSub, &pDesc, &nBufferSize, pSql->self); if (ret != 0) { pRes->code = ret; tscAsyncResultOnError(pSql); diff --git a/src/client/src/tscSystem.c b/src/client/src/tscSystem.c index edb8169f761e2b5aaba1ddfd7cda8a9008298948..bbd448e2d8b5069fae438d7adb9de14a31446d1b 100644 --- a/src/client/src/tscSystem.c +++ b/src/client/src/tscSystem.c @@ -47,7 +47,17 @@ int32_t tscNumOfObj = 0; // number of sqlObj in current process. static void *tscCheckDiskUsageTmr; void *tscRpcCache; // cache to keep rpc obj int32_t tscNumOfThreads = 1; // num of rpc threads +#ifdef _TD_POWER_ +char tscLogFileName[12] = "powerlog"; +#elif (_TD_TQ_ == true) +char tscLogFileName[12] = "tqlog"; +#elif (_TD_PRO_ == true) +char tscLogFileName[12] = "prolog"; +#elif (_TD_KH_ == true) +char tscLogFileName[12] = "khclientlog"; +#else char tscLogFileName[12] = "taoslog"; +#endif int tscLogFileNum = 10; static pthread_mutex_t rpcObjMutex; // mutex to protect open the rpc obj concurrently @@ -107,7 +117,7 @@ int32_t tscAcquireRpc(const char *key, const char *user, const char *secretEncry rpcObj.pDnodeConn = rpcOpen(&rpcInit); if (rpcObj.pDnodeConn == NULL) { pthread_mutex_unlock(&rpcObjMutex); - tscError("failed to init connection to TDengine"); + tscError("failed to init connection to server"); return -1; } @@ -133,8 +143,8 @@ void tscClusterInfoDestroy(SClusterInfo *pObj) { void *tscAcquireClusterInfo(const char *clusterId) { pthread_mutex_lock(&clusterMutex); - size_t len = strlen(clusterId); + SClusterInfo *pObj = NULL; SClusterInfo **ppObj = taosHashGet(tscClusterMap, clusterId, len); if (ppObj == NULL || *ppObj == NULL) { @@ -210,10 +220,10 @@ void taos_init_imp(void) { taosInitNotes(); rpcInit(); - +#ifdef LUA_EMBEDDED scriptEnvPoolInit(); - - tscDebug("starting to initialize TAOS client ..."); +#endif + tscDebug("starting to initialize client ..."); tscDebug("Local End Point is:%s", tsLocalEp); } @@ -276,7 +286,9 @@ void taos_cleanup(void) { } if (tscEmbedded == 0) { + #ifdef LUA_EMBEDDED scriptEnvPoolCleanup(); + #endif } int32_t id = tscObjRef; diff --git a/src/client/src/tscUtil.c b/src/client/src/tscUtil.c index 50b9a8fd7eea703dffe34f8ad5eb204b310f0d7e..94b4b45eda919e704f2551624b120081d903f50b 100644 --- a/src/client/src/tscUtil.c +++ b/src/client/src/tscUtil.c @@ -268,10 +268,6 @@ bool tscIsProjectionQueryOnSTable(SQueryInfo* pQueryInfo, int32_t tableIndex) { functionId != TSDB_FUNC_TS && functionId != TSDB_FUNC_ARITHM && functionId != TSDB_FUNC_TS_COMP && - functionId != TSDB_FUNC_DIFF && - functionId != TSDB_FUNC_DERIVATIVE && - functionId != TSDB_FUNC_MAVG && - functionId != TSDB_FUNC_CSUM && functionId != TSDB_FUNC_TS_DUMMY && functionId != TSDB_FUNC_TID_TAG && functionId != TSDB_FUNC_CEIL && @@ -3462,6 +3458,7 @@ int32_t tscQueryInfoCopy(SQueryInfo* pQueryInfo, const SQueryInfo* pSrc) { pQueryInfo->sessionWindow = pSrc->sessionWindow; pQueryInfo->pTableMetaInfo = NULL; pQueryInfo->multigroupResult = pSrc->multigroupResult; + pQueryInfo->stateWindow = pSrc->stateWindow; pQueryInfo->bufLen = pSrc->bufLen; pQueryInfo->orderProjectQuery = pSrc->orderProjectQuery; @@ -4866,7 +4863,7 @@ int32_t createProjectionExpr(SQueryInfo* pQueryInfo, STableMetaInfo* pTableMetaI } } - pse->colInfo.flag = TSDB_COL_NORMAL; + pse->colInfo.flag = pSource->base.colInfo.flag; //TSDB_COL_NORMAL; pse->resType = pSource->base.resType; pse->resBytes = pSource->base.resBytes; strncpy(pse->colInfo.name, pSource->base.aliasName, tListLen(pse->colInfo.name)); diff --git a/src/common/inc/tglobal.h b/src/common/inc/tglobal.h index bd201d980017522d0e32f6124290305d5b136f8d..317d48ea5987935c5d53af6ad578834071643f26 100644 --- a/src/common/inc/tglobal.h +++ b/src/common/inc/tglobal.h @@ -148,6 +148,7 @@ extern char tsMqttTopic[]; // monitor extern int8_t tsEnableMonitorModule; +extern int8_t tsMonitorReplica; extern char tsMonitorDbName[]; extern char tsInternalPass[]; extern int32_t tsMonitorInterval; diff --git a/src/common/inc/tname.h b/src/common/inc/tname.h index 22a6955026f4ff9adaf0cd8d262652ef75f534db..7a401d8a7f71c094654d06a2ed37ae3fd7fc9c94 100644 --- a/src/common/inc/tname.h +++ b/src/common/inc/tname.h @@ -51,7 +51,7 @@ typedef struct SSqlExpr { int16_t functionId; // function id in aAgg array int16_t resType; // return value type - int16_t resBytes; // length of return value + int32_t resBytes; // length of return value int32_t interBytes; // inter result buffer size int16_t colType; // table column type diff --git a/src/common/inc/tvariant.h b/src/common/inc/tvariant.h index c69a662846e15d515136d51a95b39d488a282f5c..03b17bdc463d6b5d0f812eafa723c886964d35fc 100644 --- a/src/common/inc/tvariant.h +++ b/src/common/inc/tvariant.h @@ -39,7 +39,7 @@ typedef struct tVariant { bool tVariantIsValid(tVariant *pVar); -void tVariantCreate(tVariant *pVar, SStrToken *token); +void tVariantCreate(tVariant *pVar, SStrToken *token, bool needRmquoteEscape); void tVariantCreateFromBinary(tVariant *pVar, const char *pz, size_t len, uint32_t type); diff --git a/src/common/src/tglobal.c b/src/common/src/tglobal.c index f13a35f0b9ae287f04cccf1785fce77f8d5c0678..62baaadbac2596bc66bf5955262a3d5ff35fcfc1 100644 --- a/src/common/src/tglobal.c +++ b/src/common/src/tglobal.c @@ -193,6 +193,7 @@ char tsMqttTopic[TSDB_MQTT_TOPIC_LEN] = "/test"; // # // monitor int8_t tsEnableMonitorModule = 1; +int8_t tsMonitorReplica = 1; char tsMonitorDbName[TSDB_DB_NAME_LEN] = "log"; char tsInternalPass[] = "secretkey"; int32_t tsMonitorInterval = 30; // seconds @@ -669,6 +670,16 @@ static void doInitGlobalConfig(void) { cfg.unitType = TAOS_CFG_UTYPE_SECOND; taosInitConfigOption(cfg); + cfg.option = "monitorReplica"; + cfg.ptr = &tsMonitorReplica; + cfg.valType = TAOS_CFG_VTYPE_INT8; + cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW; + cfg.minValue = 1; + cfg.maxValue = 3; + cfg.ptrLength = 1; + cfg.unitType = TAOS_CFG_UTYPE_NONE; + taosInitConfigOption(cfg); + cfg.option = "offlineThreshold"; cfg.ptr = &tsOfflineThreshold; cfg.valType = TAOS_CFG_VTYPE_INT32; diff --git a/src/common/src/tvariant.c b/src/common/src/tvariant.c index fadc6186fa5968414d020085a3ed6bdeb095d54d..f22e3da28b331d455f5f4d73251c37072e1f69fc 100644 --- a/src/common/src/tvariant.c +++ b/src/common/src/tvariant.c @@ -30,7 +30,7 @@ assert(0); \ } while (0) -void tVariantCreate(tVariant *pVar, SStrToken *token) { +void tVariantCreate(tVariant *pVar, SStrToken *token, bool needRmquoteEscape) { int32_t ret = 0; int32_t type = token->type; @@ -81,7 +81,7 @@ void tVariantCreate(tVariant *pVar, SStrToken *token) { case TSDB_DATA_TYPE_BINARY: { pVar->pz = strndup(token->z, token->n); - pVar->nLen = strRmquoteEscape(pVar->pz, token->n); + pVar->nLen = needRmquoteEscape ? strRmquoteEscape(pVar->pz, token->n) : token->n; break; } case TSDB_DATA_TYPE_TIMESTAMP: { diff --git a/src/dnode/inc/dnodeInt.h b/src/dnode/inc/dnodeInt.h index 1327cd4433fd2e2157becaaf5cb52e2ca0ffe6ef..7abff373834a2f2ddf39dbffb1ebcaadc6991dc9 100644 --- a/src/dnode/inc/dnodeInt.h +++ b/src/dnode/inc/dnodeInt.h @@ -29,8 +29,8 @@ extern "C" { extern int32_t dDebugFlag; -#define dFatal(...) { if (dDebugFlag & DEBUG_FATAL) { taosPrintLog("DND FATAL ", 255, __VA_ARGS__); }} -#define dError(...) { if (dDebugFlag & DEBUG_ERROR) { taosPrintLog("DND ERROR ", 255, __VA_ARGS__); }} +#define dFatal(...) { if (dDebugFlag & DEBUG_FATAL) { taosPrintLog("DND FATAL ", 255, __VA_ARGS__); dnodeIncDnodeError(); }} +#define dError(...) { if (dDebugFlag & DEBUG_ERROR) { taosPrintLog("DND ERROR ", 255, __VA_ARGS__); dnodeIncDnodeError(); }} #define dWarn(...) { if (dDebugFlag & DEBUG_WARN) { taosPrintLog("DND WARN ", 255, __VA_ARGS__); }} #define dInfo(...) { if (dDebugFlag & DEBUG_INFO) { taosPrintLog("DND ", 255, __VA_ARGS__); }} #define dDebug(...) { if (dDebugFlag & DEBUG_DEBUG) { taosPrintLog("DND ", dDebugFlag, __VA_ARGS__); }} diff --git a/src/dnode/src/dnodeMain.c b/src/dnode/src/dnodeMain.c index 0dc10385cfbf648ce90f3dabaf87c9a2ea189b6f..87128521730c9c58f3c3dd9b35ab3f919f6921ec 100644 --- a/src/dnode/src/dnodeMain.c +++ b/src/dnode/src/dnodeMain.c @@ -54,6 +54,7 @@ void moduleStop() {} void *tsDnodeTmr = NULL; static SRunStatus tsRunStatus = TSDB_RUN_STATUS_STOPPED; +static int64_t tsDnodeErrors = 0; static int32_t dnodeInitStorage(); static void dnodeCleanupStorage(); @@ -88,7 +89,9 @@ static SStep tsDnodeSteps[] = { {"dnode-shell", dnodeInitShell, dnodeCleanupShell}, {"dnode-statustmr", dnodeInitStatusTimer,dnodeCleanupStatusTimer}, {"dnode-telemetry", dnodeInitTelemetry, dnodeCleanupTelemetry}, +#ifdef LUA_EMBEDDED {"dnode-script", scriptEnvPoolInit, scriptEnvPoolCleanup}, +#endif {"dnode-grant", grantInit, grantCleanUp}, }; @@ -225,6 +228,14 @@ static void dnodeSetRunStatus(SRunStatus status) { tsRunStatus = status; } +int64_t dnodeGetDnodeError() { + return tsDnodeErrors; +} + +void dnodeIncDnodeError() { + atomic_add_fetch_64(&tsDnodeErrors, 1); +} + static void dnodeCheckDataDirOpenned(char *dir) { char filepath[256] = {0}; sprintf(filepath, "%s/.running", dir); diff --git a/src/dnode/src/dnodeShell.c b/src/dnode/src/dnodeShell.c index 98bbbf8f73b26535030c5096f128a7f84c2b9f61..f62e0c0df41f2fe399d0f4c1c8e661fcd0ef91b9 100644 --- a/src/dnode/src/dnodeShell.c +++ b/src/dnode/src/dnodeShell.c @@ -28,8 +28,8 @@ static void (*dnodeProcessShellMsgFp[TSDB_MSG_TYPE_MAX])(SRpcMsg *); static void dnodeProcessMsgFromShell(SRpcMsg *pMsg, SRpcEpSet *); static int dnodeRetrieveUserAuthInfo(char *user, char *spi, char *encrypt, char *secret, char *ckey); static void * tsShellRpc = NULL; -static int32_t tsQueryReqNum = 0; -static int32_t tsSubmitReqNum = 0; +static int64_t tsQueryReqNum = 0; +static int64_t tsSubmitReqNum = 0; int32_t dnodeInitShell() { dnodeProcessShellMsgFp[TSDB_MSG_TYPE_SUBMIT] = dnodeDispatchToVWriteQueue; @@ -136,9 +136,9 @@ static void dnodeProcessMsgFromShell(SRpcMsg *pMsg, SRpcEpSet *pEpSet) { } if (pMsg->msgType == TSDB_MSG_TYPE_QUERY) { - atomic_fetch_add_32(&tsQueryReqNum, 1); + atomic_fetch_add_64(&tsQueryReqNum, 1); } else if (pMsg->msgType == TSDB_MSG_TYPE_SUBMIT) { - atomic_fetch_add_32(&tsSubmitReqNum, 1); + atomic_fetch_add_64(&tsSubmitReqNum, 1); } else {} if ( dnodeProcessShellMsgFp[pMsg->msgType] ) { @@ -237,15 +237,31 @@ void *dnodeSendCfgTableToRecv(int32_t vgId, int32_t tid) { } } -SStatisInfo dnodeGetStatisInfo() { - SStatisInfo info = {0}; +SDnodeStatisInfo dnodeGetStatisInfo() { + SDnodeStatisInfo info = {0}; if (dnodeGetRunStatus() == TSDB_RUN_STATUS_RUNING) { #ifdef HTTP_EMBEDDED info.httpReqNum = httpGetReqCount(); #endif - info.queryReqNum = atomic_exchange_32(&tsQueryReqNum, 0); - info.submitReqNum = atomic_exchange_32(&tsSubmitReqNum, 0); + info.queryReqNum = atomic_exchange_64(&tsQueryReqNum, 0); + info.submitReqNum = atomic_exchange_64(&tsSubmitReqNum, 0); } return info; } + +int32_t dnodeGetHttpStatusInfo(int32_t index) { + int32_t httpStatus = 0; +#ifdef HTTP_EMBEDDED + httpStatus = httpGetStatusCodeCount(index); +#endif + return httpStatus; +} + +void dnodeClearHttpStatusInfo() { +#ifdef HTTP_EMBEDDED + for (int i = 0; i < MAX_HTTP_STATUS_CODE_NUM; ++i) { + httpClearStatusCodeCount(i); + } +#endif +} diff --git a/src/inc/dnode.h b/src/inc/dnode.h index 5ecaf19f61a022bae849c2f946acb0ee693aeb59..117b8a5657046abb6412144b352fca79b9d590da 100644 --- a/src/inc/dnode.h +++ b/src/inc/dnode.h @@ -23,13 +23,16 @@ extern "C" { #include "trpc.h" #include "taosmsg.h" +#define MAX_HTTP_STATUS_CODE_NUM 63 typedef struct { - int32_t queryReqNum; - int32_t submitReqNum; - int32_t httpReqNum; -} SStatisInfo; + int64_t queryReqNum; + int64_t submitReqNum; + int64_t httpReqNum; +} SDnodeStatisInfo; -SStatisInfo dnodeGetStatisInfo(); +SDnodeStatisInfo dnodeGetStatisInfo(); +int32_t dnodeGetHttpStatusInfo(int32_t index); +void dnodeClearHttpStatusInfo(); bool dnodeIsFirstDeploy(); bool dnodeIsMasterEp(char *ep); @@ -37,6 +40,8 @@ void dnodeGetEpSetForPeer(SRpcEpSet *epSet); void dnodeGetEpSetForShell(SRpcEpSet *epSet); int32_t dnodeGetDnodeId(); void dnodeGetClusterId(char *clusterId); +int64_t dnodeGetDnodeError(); +void dnodeIncDnodeError(); void dnodeUpdateEp(int32_t dnodeId, char *ep, char *fqdn, uint16_t *port); bool dnodeCheckEpChanged(int32_t dnodeId, char *epstr); diff --git a/src/inc/http.h b/src/inc/http.h index 0d4c386cbf1e784834019c5d75847ab20b7ce8e9..7333042641a8f7bda1574e77e373c7e9a258751d 100644 --- a/src/inc/http.h +++ b/src/inc/http.h @@ -22,7 +22,9 @@ extern "C" { #include -int32_t httpGetReqCount(); +int64_t httpGetReqCount(); +int32_t httpGetStatusCodeCount(int index); +int32_t httpClearStatusCodeCount(int index); int32_t httpInitSystem(); int32_t httpStartSystem(); void httpStopSystem(); diff --git a/src/inc/monitor.h b/src/inc/monitor.h index d2e5e06487dbdf311cef6da125d7ba3050b53a4d..6033f91ee72acb09f23f07fc32e6a40cf0136d76 100644 --- a/src/inc/monitor.h +++ b/src/inc/monitor.h @@ -22,6 +22,17 @@ extern "C" { #include +#define monSaveLogs(level, ...) { \ + monSaveLog(level, __VA_ARGS__); \ + monSaveDnodeLog(level, __VA_ARGS__); \ +} + +typedef struct { + const char * name; + int32_t code; + int32_t index; +} SMonHttpStatus; + typedef struct { char * acctId; int64_t currentPointsPerSecond; @@ -53,9 +64,16 @@ void monStopSystem(); void monCleanupSystem(); void monSaveAcctLog(SAcctMonitorObj *pMonObj); void monSaveLog(int32_t level, const char *const format, ...); +void monSaveDnodeLog(int32_t level, const char *const format, ...); void monExecuteSQL(char *sql); typedef void (*MonExecuteSQLCbFP)(void *param, TAOS_RES *, int code); void monExecuteSQLWithResultCallback(char *sql, MonExecuteSQLCbFP callback, void* param); +void monIncQueryReqCnt(); +void monIncSubmitReqCnt(); +int32_t monFetchQueryReqCnt(); +int32_t monFetchSubmitReqCnt(); +SMonHttpStatus *monGetHttpStatusHashTableEntry(int32_t code); + #ifdef __cplusplus } #endif diff --git a/src/inc/taosdef.h b/src/inc/taosdef.h index 9d48ed59cecfffe1ea36971fa502ed9dae3fb0bc..f05d1466371f32358e3442f53735028b20641d16 100644 --- a/src/inc/taosdef.h +++ b/src/inc/taosdef.h @@ -80,12 +80,17 @@ extern const int32_t TYPE_BYTES[15]; #define TSDB_DATA_NULL_STR_L "null" #define TSDB_DEFAULT_USER "root" + #ifdef _TD_POWER_ #define TSDB_DEFAULT_PASS "powerdb" #elif (_TD_TQ_ == true) #define TSDB_DEFAULT_PASS "tqueue" #elif (_TD_PRO_ == true) #define TSDB_DEFAULT_PASS "prodb" +#elif (_TD_KH_ == true) +#define TSDB_DEFAULT_PASS "khroot" +#elif (_TD_JH_ == true) +#define TSDB_DEFAULT_PASS "jhdata" #else #define TSDB_DEFAULT_PASS "taosdata" #endif diff --git a/src/inc/taosmsg.h b/src/inc/taosmsg.h index ea6a69aa386261d5742c732794580bbc14daf831..0f291936f5519b1db7f98b098e5f9f82303cd0f5 100644 --- a/src/inc/taosmsg.h +++ b/src/inc/taosmsg.h @@ -438,7 +438,7 @@ typedef struct SColumnFilterList { typedef struct SColumnInfo { int16_t colId; int16_t type; - int16_t bytes; + int32_t bytes; SColumnFilterList flist; } SColumnInfo; diff --git a/src/inc/vnode.h b/src/inc/vnode.h index b3291645c00be17283f7d078acb2d4c9a2629ece..29766e39842e9b53243e4d367116c8bc9ed35f7a 100644 --- a/src/inc/vnode.h +++ b/src/inc/vnode.h @@ -22,6 +22,12 @@ extern "C" { #include "trpc.h" #include "twal.h" +typedef struct { + int64_t submitReqSucNum; + int64_t submitRowNum; + int64_t submitRowSucNum; +} SVnodeStatisInfo; + typedef struct { int32_t len; void * rsp; @@ -62,7 +68,7 @@ int32_t vnodeOpen(int32_t vgId); int32_t vnodeAlter(void *pVnode, SCreateVnodeMsg *pVnodeCfg); int32_t vnodeSync(int32_t vgId); int32_t vnodeClose(int32_t vgId); -int32_t vnodeCompact(int32_t vgId); +int32_t vnodeCompact(int32_t vgId); // vnodeMgmt int32_t vnodeInitMgmt(); @@ -80,6 +86,8 @@ int32_t vnodeWriteToWQueue(void *pVnode, void *pHead, int32_t qtype, void *pRpcM void vnodeFreeFromWQueue(void *pVnode, SVWriteMsg *pWrite); int32_t vnodeProcessWrite(void *pVnode, void *pHead, int32_t qtype, void *pRspRet); +SVnodeStatisInfo vnodeGetStatisInfo(); + // vnodeSync void vnodeConfirmForward(void *pVnode, uint64_t version, int32_t code, bool force); diff --git a/src/kit/shell/CMakeLists.txt b/src/kit/shell/CMakeLists.txt index d69a267707470e7a5df4edfa85764aae580a13a6..7230701add8591c1995dc6b2e7e23ed03335f6f1 100644 --- a/src/kit/shell/CMakeLists.txt +++ b/src/kit/shell/CMakeLists.txt @@ -36,8 +36,14 @@ ELSEIF (TD_WINDOWS) IF (TD_POWER) SET_TARGET_PROPERTIES(shell PROPERTIES OUTPUT_NAME power) + ELSEIF (TD_TQ) + SET_TARGET_PROPERTIES(shell PROPERTIES OUTPUT_NAME tq) ELSEIF (TD_PRO) SET_TARGET_PROPERTIES(shell PROPERTIES OUTPUT_NAME prodbc) + ELSEIF (TD_KH) + SET_TARGET_PROPERTIES(shell PROPERTIES OUTPUT_NAME khclient) + ELSEIF (TD_JH) + SET_TARGET_PROPERTIES(shell PROPERTIES OUTPUT_NAME jh_taos) ELSE () SET_TARGET_PROPERTIES(shell PROPERTIES OUTPUT_NAME taos) ENDIF () diff --git a/src/kit/shell/inc/shell.h b/src/kit/shell/inc/shell.h index 03ccfe2d576df76407bc7a22cf17d884dd2bad51..9c5794278c5bd9545fb6260e4f8442d8c9e8cad9 100644 --- a/src/kit/shell/inc/shell.h +++ b/src/kit/shell/inc/shell.h @@ -28,8 +28,16 @@ #define MAX_HISTORY_SIZE 1000 #define MAX_COMMAND_SIZE 1048586 -#ifdef _TD_PRO_ +#ifdef _TD_POWER_ + #define HISTORY_FILE ".power_history" +#elif (_TD_TQ_ == true) + #define HISTORY_FILE ".tq_history" +#elif (_TD_PRO_ == true) #define HISTORY_FILE ".prodb_history" +#elif (_TD_KH_ == true) + #define HISTORY_FILE ".kh_history" +#elif (_TD_JH_ == true) + #define HISTORY_FILE ".jh_taos_history" #else #define HISTORY_FILE ".taos_history" #endif diff --git a/src/kit/shell/src/shellEngine.c b/src/kit/shell/src/shellEngine.c index 8ab9bfcf4e7685081cd6f09990f5365d94c4094b..e3e86e518690c8c4f773471b93bdb381a5bb8dc1 100644 --- a/src/kit/shell/src/shellEngine.c +++ b/src/kit/shell/src/shellEngine.c @@ -34,28 +34,36 @@ char CLIENT_VERSION[] = "Welcome to the PowerDB shell from %s, Client Version:%s\n" "Copyright (c) 2020 by PowerDB, Inc. All rights reserved.\n\n"; char PROMPT_HEADER[] = "power> "; - char CONTINUE_PROMPT[] = " -> "; int prompt_size = 7; #elif (_TD_TQ_ == true) char CLIENT_VERSION[] = "Welcome to the TQ shell from %s, Client Version:%s\n" "Copyright (c) 2020 by TQ, Inc. All rights reserved.\n\n"; char PROMPT_HEADER[] = "tq> "; - -char CONTINUE_PROMPT[] = " -> "; +char CONTINUE_PROMPT[] = " -> "; int prompt_size = 4; #elif (_TD_PRO_ == true) char CLIENT_VERSION[] = "Welcome to the ProDB shell from %s, Client Version:%s\n" "Copyright (c) 2020 by Hanatech, Inc. All rights reserved.\n\n"; char PROMPT_HEADER[] = "ProDB> "; - char CONTINUE_PROMPT[] = " -> "; int prompt_size = 7; +#elif (_TD_KH_ == true) +char CLIENT_VERSION[] = "Welcome to the KingHistorian shell from %s, Client Version:%s\n" + "Copyright (c) 2021 by Hanatech, Inc. All rights reserved.\n\n"; +char PROMPT_HEADER[] = "kh> "; +char CONTINUE_PROMPT[] = " -> "; +int prompt_size = 4; +#elif (_TD_JH_ == true) +char CLIENT_VERSION[] = "Welcome to the jh_iot shell from %s, Client Version:%s\n" + "Copyright (c) 2021 by jinheng, Inc. All rights reserved.\n\n"; +char PROMPT_HEADER[] = "jh_taos> "; +char CONTINUE_PROMPT[] = " -> "; +int prompt_size = 9; #else char CLIENT_VERSION[] = "Welcome to the TDengine shell from %s, Client Version:%s\n" "Copyright (c) 2020 by TAOS Data, Inc. All rights reserved.\n\n"; char PROMPT_HEADER[] = "taos> "; - char CONTINUE_PROMPT[] = " -> "; int prompt_size = 6; #endif diff --git a/src/kit/shell/src/shellWindows.c b/src/kit/shell/src/shellWindows.c index 0301fe6df2a6a1fbf8a75507193dfacb55385895..0babd88333c846c1f0b5dbe4baede4a6d38cbcdd 100644 --- a/src/kit/shell/src/shellWindows.c +++ b/src/kit/shell/src/shellWindows.c @@ -17,6 +17,8 @@ #include "taos.h" #include "shellCommand.h" +#define SHELL_INPUT_MAX_COMMAND_SIZE 500000 + extern char configDir[]; char WINCLIENT_VERSION[] = "Welcome to the TDengine shell from %s, Client Version:%s\n" @@ -274,32 +276,35 @@ int32_t shellReadCommand(TAOS *con, char command[]) { // Read input. void *console = GetStdHandle(STD_INPUT_HANDLE); unsigned long read; - wchar_t c; + wchar_t *c= (wchar_t *)calloc(SHELL_INPUT_MAX_COMMAND_SIZE, sizeof(wchar_t)); char mbStr[16]; while (1) { - int ret = ReadConsole(console, &c, 1, &read, NULL); - int size = WideCharToMultiByte(CP_UTF8, 0, &c, read, mbStr, sizeof(mbStr), NULL, NULL); - mbStr[size] = 0; - switch (c) { - case '\n': - if (isReadyGo(&cmd)) { - sprintf(command, "%s%s", cmd.buffer, cmd.command); - free(cmd.buffer); - cmd.buffer = NULL; - free(cmd.command); - cmd.command = NULL; - return 0; - } else { - shellPrintContinuePrompt(); - updateBuffer(&cmd); - } - break; - case '\r': - break; - default: - for (int i = 0; i < size; ++i) { - insertChar(&cmd, mbStr[i]); - } + int ret = ReadConsole(console, c, SHELL_INPUT_MAX_COMMAND_SIZE, &read, NULL); + for (int input_index = 0; input_index < read; input_index++) { + int size = WideCharToMultiByte(CP_UTF8, 0, &c[input_index], 1, mbStr, sizeof(mbStr), NULL, NULL); + mbStr[size] = 0; + switch (c[input_index]) { + case '\n': + if (isReadyGo(&cmd)) { + sprintf(command, "%s%s", cmd.buffer, cmd.command); + free(cmd.buffer); + cmd.buffer = NULL; + free(cmd.command); + cmd.command = NULL; + free(c); + return 0; + } else { + shellPrintContinuePrompt(); + updateBuffer(&cmd); + } + break; + case '\r': + break; + default: + for (int i = 0; i < size; ++i) { + insertChar(&cmd, mbStr[i]); + } + } } } @@ -327,6 +332,20 @@ void *shellLoopQuery(void *arg) { return NULL; } -void get_history_path(char *history) { sprintf(history, "C:/TDengine/%s", HISTORY_FILE); } +void get_history_path(char *history) { +#ifdef _TD_POWER_ + sprintf(history, "C:/PowerDB/%s", HISTORY_FILE); +#elif (_TD_TQ_ == true) + sprintf(history, "C:/TQueue/%s", HISTORY_FILE); +#elif (_TD_PRO_ == true) + sprintf(history, "C:/ProDB/%s", HISTORY_FILE); +#elif (_TD_KH_ == true) + sprintf(history, "C:/KingHistorian/%s", HISTORY_FILE); +#elif (_TD_JH_ == true) + sprintf(history, "C:/jh_iot/%s", HISTORY_FILE); +#else + sprintf(history, "C:/TDengine/%s", HISTORY_FILE); +#endif +} void exitShell() { exit(EXIT_SUCCESS); } diff --git a/src/kit/taosdemo/inc/demo.h b/src/kit/taosdemo/inc/demo.h index ff605afcbb05d2c27abd382a249456b52763f673..37dd01449da7e67e9165e0b22d7160d508595e52 100644 --- a/src/kit/taosdemo/inc/demo.h +++ b/src/kit/taosdemo/inc/demo.h @@ -141,6 +141,8 @@ extern char configDir[]; { TSDB_DATA_TYPE_FLOAT, TSDB_DATA_TYPE_INT, TSDB_DATA_TYPE_FLOAT } #define DEFAULT_DATATYPE \ { "FLOAT", "INT", "FLOAT" } +#define DEFAULT_DATALENGTH \ + { 4, 4, 4 } #define DEFAULT_BINWIDTH 64 #define DEFAULT_COL_COUNT 4 #define DEFAULT_LEN_ONE_ROW 76 @@ -156,6 +158,7 @@ extern char configDir[]; #define DEFAULT_TOTAL_INSERT 0 #define DEFAULT_TOTAL_AFFECT 0 #define DEFAULT_DEMO_MODE true +#define DEFAULT_CHINESE_OPT false #define DEFAULT_CREATE_BATCH 10 #define DEFAULT_SUB_INTERVAL 10000 #define DEFAULT_QUERY_INTERVAL 10000 @@ -306,6 +309,7 @@ typedef struct SArguments_S { bool async_mode; char data_type[MAX_NUM_COLUMNS + 1]; char * dataType[MAX_NUM_COLUMNS + 1]; + int32_t data_length[MAX_NUM_COLUMNS + 1]; uint32_t binwidth; uint32_t columnCount; uint64_t lenOfOneRow; @@ -326,6 +330,7 @@ typedef struct SArguments_S { uint64_t totalInsertRows; uint64_t totalAffectedRows; bool demo_mode; // use default column name and semi-random data + bool chinese; } SArguments; typedef struct SColumn_S { diff --git a/src/kit/taosdemo/src/demoCommandOpt.c b/src/kit/taosdemo/src/demoCommandOpt.c index ede4c71d1de7376c85d681299d35ca2f2075d45b..59d20710a02e453c63af4262a151fec32e59e7d4 100644 --- a/src/kit/taosdemo/src/demoCommandOpt.c +++ b/src/kit/taosdemo/src/demoCommandOpt.c @@ -972,6 +972,10 @@ int parse_args(int argc, char *argv[]) { (0 == strncmp(argv[i], "--escape-character", strlen("--escape-character")))) { g_args.escapeChar = true; + } else if ((0 == strncmp(argv[i], "-C", strlen("-C"))) || + (0 == strncmp(argv[i], "--chinese", + strlen("--chinese")))) { + g_args.chinese = true; } else if ((strcmp(argv[i], "-N") == 0) || (0 == strcmp(argv[i], "--normal-table"))) { g_args.demo_mode = false; @@ -1338,9 +1342,10 @@ void setParaFromArg() { g_args.prepared_rand = min(g_args.insertRows, MAX_PREPARED_RAND); g_Dbs.aggr_func = g_args.aggr_func; - char dataString[TSDB_MAX_BYTES_PER_ROW]; - char * data_type = g_args.data_type; - char **dataType = g_args.dataType; + char dataString[TSDB_MAX_BYTES_PER_ROW]; + char * data_type = g_args.data_type; + char ** dataType = g_args.dataType; + int32_t *data_length = g_args.data_length; memset(dataString, 0, TSDB_MAX_BYTES_PER_ROW); @@ -1469,6 +1474,47 @@ void setParaFromArg() { } else { g_Dbs.threadCountForCreateTbl = g_args.nthreads; g_Dbs.db[0].superTbls[0].tagCount = 0; + for (int i = 0; i < MAX_NUM_COLUMNS; i++) { + if (data_type[i] == TSDB_DATA_TYPE_NULL) { + break; + } + if (1 == regexMatch(dataType[i], + "^(NCHAR|BINARY)(\\([1-9][0-9]*\\))$", + REG_ICASE | REG_EXTENDED)) { + sscanf(dataType[i], "%[^(](%[^)]", type, length); + data_length[i] = atoi(length); + } else { + switch (data_type[i]) { + case TSDB_DATA_TYPE_BOOL: + case TSDB_DATA_TYPE_UTINYINT: + case TSDB_DATA_TYPE_TINYINT: + data_length[i] = sizeof(char); + break; + case TSDB_DATA_TYPE_SMALLINT: + case TSDB_DATA_TYPE_USMALLINT: + data_length[i] = sizeof(int16_t); + break; + case TSDB_DATA_TYPE_INT: + case TSDB_DATA_TYPE_UINT: + data_length[i] = sizeof(int32_t); + break; + case TSDB_DATA_TYPE_TIMESTAMP: + case TSDB_DATA_TYPE_BIGINT: + case TSDB_DATA_TYPE_UBIGINT: + data_length[i] = sizeof(int64_t); + break; + case TSDB_DATA_TYPE_FLOAT: + data_length[i] = sizeof(float); + break; + case TSDB_DATA_TYPE_DOUBLE: + data_length[i] = sizeof(double); + break; + default: + data_length[i] = g_args.binwidth; + break; + } + } + } } } @@ -1612,7 +1658,6 @@ void *queryStableAggrFunc(void *sarg) { if (code != 0) { errorPrint("Failed to query:%s\n", taos_errstr(pSql)); taos_free_result(pSql); - taos_close(taos); fclose(fp); free(command); return NULL; @@ -1698,8 +1743,15 @@ void *queryNtableAggrFunc(void *sarg) { double totalT = 0; uint64_t count = 0; for (int64_t i = 0; i < ntables; i++) { - sprintf(command, "SELECT %s FROM %s%" PRId64 " WHERE ts>= %" PRIu64, - aggreFunc[j], tb_prefix, i, startTime); + if (g_args.escapeChar) { + sprintf(command, + "SELECT %s FROM `%s%" PRId64 "` WHERE ts>= %" PRIu64, + aggreFunc[j], tb_prefix, i, startTime); + } else { + sprintf(command, + "SELECT %s FROM %s%" PRId64 " WHERE ts>= %" PRIu64, + aggreFunc[j], tb_prefix, i, startTime); + } double t = (double)taosGetTimestampUs(); debugPrint("%s() LN%d, sql command: %s\n", __func__, __LINE__, @@ -1708,9 +1760,9 @@ void *queryNtableAggrFunc(void *sarg) { int32_t code = taos_errno(pSql); if (code != 0) { - errorPrint("Failed to query:%s\n", taos_errstr(pSql)); + errorPrint("Failed to query <%s>, reason:%s\n", command, + taos_errstr(pSql)); taos_free_result(pSql); - taos_close(taos); fclose(fp); free(command); return NULL; diff --git a/src/kit/taosdemo/src/demoData.c b/src/kit/taosdemo/src/demoData.c index 39d7954a2488621cf3fdee859777484a1f38601b..59806864306b9ecf5c3ed35c9be631c50244da58 100644 --- a/src/kit/taosdemo/src/demoData.c +++ b/src/kit/taosdemo/src/demoData.c @@ -238,16 +238,81 @@ float UNUSED_FUNC demo_phase_float() { 360); } +static int usc2utf8(char* p, int unic) { + if ( unic <= 0x0000007F ) + { + *p = (unic & 0x7F); + return 1; + } + else if ( unic >= 0x00000080 && unic <= 0x000007FF ) + { + *(p+1) = (unic & 0x3F) | 0x80; + *p = ((unic >> 6) & 0x1F) | 0xC0; + return 2; + } + else if ( unic >= 0x00000800 && unic <= 0x0000FFFF ) + { + *(p+2) = (unic & 0x3F) | 0x80; + *(p+1) = ((unic >> 6) & 0x3F) | 0x80; + *p = ((unic >> 12) & 0x0F) | 0xE0; + return 3; + } + else if ( unic >= 0x00010000 && unic <= 0x001FFFFF ) + { + *(p+3) = (unic & 0x3F) | 0x80; + *(p+2) = ((unic >> 6) & 0x3F) | 0x80; + *(p+1) = ((unic >> 12) & 0x3F) | 0x80; + *p = ((unic >> 18) & 0x07) | 0xF0; + return 4; + } + else if ( unic >= 0x00200000 && unic <= 0x03FFFFFF ) + { + *(p+4) = (unic & 0x3F) | 0x80; + *(p+3) = ((unic >> 6) & 0x3F) | 0x80; + *(p+2) = ((unic >> 12) & 0x3F) | 0x80; + *(p+1) = ((unic >> 18) & 0x3F) | 0x80; + *p = ((unic >> 24) & 0x03) | 0xF8; + return 5; + } + else if ( unic >= 0x04000000 && unic <= 0x7FFFFFFF ) + { + *(p+5) = (unic & 0x3F) | 0x80; + *(p+4) = ((unic >> 6) & 0x3F) | 0x80; + *(p+3) = ((unic >> 12) & 0x3F) | 0x80; + *(p+2) = ((unic >> 18) & 0x3F) | 0x80; + *(p+1) = ((unic >> 24) & 0x3F) | 0x80; + *p = ((unic >> 30) & 0x01) | 0xFC; + return 6; + } + return 0; +} + void rand_string(char *str, int size) { - str[0] = 0; - if (size > 0) { - //--size; - int n; - for (n = 0; n < size; n++) { - int key = abs(taosRandom()) % (int)(sizeof(charset) - 1); - str[n] = charset[key]; + if (g_args.chinese) { + char* pstr = str; + int move = 0; + while (size > 0) { + // Chinese Character need 3 bytes space + if (size < 3) { + break; + } + // Basic Chinese Character's Unicode is from 0x4e00 to 0x9fa5 + int unic = 0x4e00 + rand() % (0x9fa5 - 0x4e00); + move = usc2utf8(pstr, unic); + pstr += move; + size -= move; + } + } else { + str[0] = 0; + if (size > 0) { + //--size; + int n; + for (n = 0; n < size; n++) { + int key = abs(taosRandom()) % (int)(sizeof(charset) - 1); + str[n] = charset[key]; + } + str[n] = 0; } - str[n] = 0; } } @@ -868,8 +933,8 @@ int64_t generateStbRowData(SSuperTable *stbInfo, char *recBuf, return strlen(recBuf); } -static int64_t generateData(char *recBuf, char *data_type, int64_t timestamp, - int lenOfBinary) { +static int64_t generateData(char *recBuf, char *data_type, int32_t *data_length, + int64_t timestamp) { memset(recBuf, 0, MAX_DATA_SIZE); char *pstr = recBuf; pstr += sprintf(pstr, "(%" PRId64 "", timestamp); @@ -915,13 +980,13 @@ static int64_t generateData(char *recBuf, char *data_type, int64_t timestamp, case TSDB_DATA_TYPE_BINARY: case TSDB_DATA_TYPE_NCHAR: - s = calloc(1, lenOfBinary + 1); + s = calloc(1, data_length[i] + 1); if (NULL == s) { errorPrint("%s", "failed to allocate memory\n"); return -1; } - rand_string(s, lenOfBinary); + rand_string(s, data_length[i]); pstr += sprintf(pstr, ",\"%s\"", s); free(s); break; @@ -1156,20 +1221,18 @@ static int32_t generateDataTailWithoutStb( int64_t retLen = 0; - char *data_type = g_args.data_type; - int lenOfBinary = g_args.binwidth; + char * data_type = g_args.data_type; + int32_t *data_length = g_args.data_length; if (g_args.disorderRatio) { retLen = - generateData(data, data_type, + generateData(data, data_type, data_length, startTime + getTSRandTail(g_args.timestamp_step, k, g_args.disorderRatio, - g_args.disorderRange), - lenOfBinary); + g_args.disorderRange)); } else { - retLen = generateData(data, data_type, - startTime + g_args.timestamp_step * k, - lenOfBinary); + retLen = generateData(data, data_type, data_length, + startTime + g_args.timestamp_step * k); } if (len > remainderBufLen) break; diff --git a/src/kit/taosdemo/src/demoInsert.c b/src/kit/taosdemo/src/demoInsert.c index 4dac128f33a3a9dc4723254f27378a98c90ebf1a..fe36aff5644c452ccb4216b75c6d7057ee6cee76 100644 --- a/src/kit/taosdemo/src/demoInsert.c +++ b/src/kit/taosdemo/src/demoInsert.c @@ -781,6 +781,7 @@ int createDatabasesAndStables(char *command) { tmfree(cmd); if (0 != ret) { + tmfree(g_Dbs.db[i].superTbls[j].colsOfCreateChildTable); errorPrint("create super table %" PRIu64 " failed!\n\n", j); continue; } @@ -1043,10 +1044,8 @@ int createChildTables() { // normal table len = snprintf(tblColsBuf, TSDB_MAX_BYTES_PER_ROW, "(TS TIMESTAMP"); for (int j = 0; j < g_args.columnCount; j++) { - if ((strncasecmp(g_args.dataType[j], "BINARY", - strlen("BINARY")) == 0) || - (strncasecmp(g_args.dataType[j], "NCHAR", - strlen("NCHAR")) == 0)) { + if ((strcasecmp(g_args.dataType[j], "BINARY") == 0) || + (strcasecmp(g_args.dataType[j], "NCHAR") == 0)) { snprintf(tblColsBuf + len, TSDB_MAX_BYTES_PER_ROW - len, ",C%d %s(%d)", j, g_args.dataType[j], g_args.binwidth); @@ -1139,7 +1138,17 @@ void postFreeResource() { tmfree(g_randfloat_buff); tmfree(g_rand_current_buff); tmfree(g_rand_phase_buff); - + tmfree(g_randdouble_buff); + tmfree(g_randuint_buff); + tmfree(g_randutinyint_buff); + tmfree(g_randusmallint_buff); + tmfree(g_randubigint_buff); + tmfree(g_randint); + tmfree(g_randuint); + tmfree(g_randbigint); + tmfree(g_randubigint); + tmfree(g_randfloat); + tmfree(g_randdouble); tmfree(g_sampleDataBuf); for (int l = 0; l < g_args.columnCount; l++) { diff --git a/src/kit/taosdemo/src/demoJsonOpt.c b/src/kit/taosdemo/src/demoJsonOpt.c index e74d2906c8f3294f0531145c8f13e5ce776e444f..b8d75ccacac07d225788946611d521b8b79a5c10 100644 --- a/src/kit/taosdemo/src/demoJsonOpt.c +++ b/src/kit/taosdemo/src/demoJsonOpt.c @@ -450,6 +450,25 @@ int getMetaFromInsertJsonFile(cJSON *root) { goto PARSE_OVER; } + cJSON *chineseOpt = cJSON_GetObjectItem(root, "chinese"); // yes, no, + if (chineseOpt && chineseOpt->type == cJSON_String && + chineseOpt->valuestring != NULL) { + if (0 == strncasecmp(chineseOpt->valuestring, "yes", 3)) { + g_args.chinese = true; + } else if (0 == strncasecmp(chineseOpt->valuestring, "no", 2)) { + g_args.chinese = false; + } else { + g_args.chinese = DEFAULT_CHINESE_OPT; + } + } else if (!chineseOpt) { + g_args.chinese = DEFAULT_CHINESE_OPT; + } else { + errorPrint( + "%s", + "failed to read json, chinese input mistake\n"); + goto PARSE_OVER; + } + cJSON *answerPrompt = cJSON_GetObjectItem(root, "confirm_parameter_prompt"); // yes, no, if (answerPrompt && answerPrompt->type == cJSON_String && diff --git a/src/kit/taosdemo/src/demoMain.c b/src/kit/taosdemo/src/demoMain.c index 4940d7188c999f03da0d021327c58d8a7c1a1b9f..9941f608efa7c0d8a72171383bade43e309fede0 100644 --- a/src/kit/taosdemo/src/demoMain.c +++ b/src/kit/taosdemo/src/demoMain.c @@ -45,6 +45,7 @@ SArguments g_args = { DEFAULT_SYNC_MODE, // mode : sync or async DEFAULT_DATA_TYPE, // data_type DEFAULT_DATATYPE, // dataType + DEFAULT_DATALENGTH, // data_length DEFAULT_BINWIDTH, // binwidth DEFAULT_COL_COUNT, // columnCount, timestamp + float + int + float DEFAULT_LEN_ONE_ROW, // lenOfOneRow @@ -65,6 +66,7 @@ SArguments g_args = { DEFAULT_TOTAL_INSERT, // totalInsertRows; DEFAULT_TOTAL_AFFECT, // totalAffectedRows; DEFAULT_DEMO_MODE, // demo_mode; + DEFAULT_CHINESE_OPT // chinese }; int main(int argc, char *argv[]) { diff --git a/src/kit/taosdemo/src/demoOutput.c b/src/kit/taosdemo/src/demoOutput.c index 026673ca86edb67d752f3cee58de8ea5a6769247..c253967f8fe270dfbacd3f9dccbfd74bde4487c1 100644 --- a/src/kit/taosdemo/src/demoOutput.c +++ b/src/kit/taosdemo/src/demoOutput.c @@ -313,6 +313,8 @@ void printHelp() { "Table prefix name. By default use 'd'."); printf("%s%s%s%s\n", indent, "-E, --escape-character", "\t", "Use escape character for Both Stable and normmal table name"); + printf("%s%s%s%s\n", indent, "-C, --chinese", "\t", + "Use chinese characters as the data source for binary/nchar data"); printf("%s%s%s%s\n", indent, "-s, --sql-file=FILE", "\t\t", "The select sql file."); printf("%s%s%s%s\n", indent, "-N, --normal-table", "\t\t", @@ -414,6 +416,8 @@ void printfInsertMeta() { printf("number of records per req: \033[33m%u\033[0m\n", g_args.reqPerReq); printf("max sql length: \033[33m%" PRIu64 "\033[0m\n", g_args.max_sql_len); + printf("random prepare data: \033[33m%" PRId64 "\033[0m\n", g_args.prepared_rand); + printf("chinese: \033[33m%s\033[0m\n", g_args.chinese?"yes":"no"); printf("database count: \033[33m%d\033[0m\n", g_Dbs.dbCount); diff --git a/src/kit/taosdemo/taosdemo.c b/src/kit/taosdemo/taosdemo.c new file mode 100644 index 0000000000000000000000000000000000000000..0a573b799ae184042f1126cb179e3909c6a2249b --- /dev/null +++ b/src/kit/taosdemo/taosdemo.c @@ -0,0 +1,12273 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + + +/* + when in some thread query return error, thread don't exit, but return, otherwise coredump in other thread. + */ + +#include +#include +#include +#define _GNU_SOURCE +#define CURL_STATICLIB + +#ifdef LINUX +#include +#include +#ifndef _ALPINE +#include +#endif +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#else +#include +#include +#endif + +#include +#include +#include "cJSON.h" + +#include "os.h" +#include "taos.h" +#include "taoserror.h" +#include "tutil.h" + +#define REQ_EXTRA_BUF_LEN 1024 +#define RESP_BUF_LEN 4096 + +extern char configDir[]; + +#define STR_INSERT_INTO "INSERT INTO " + +#define MAX_RECORDS_PER_REQ 32766 + +#define HEAD_BUFF_LEN TSDB_MAX_COLUMNS*24 // 16*MAX_COLUMNS + (192+32)*2 + insert into .. + +#define BUFFER_SIZE TSDB_MAX_ALLOWED_SQL_LEN +#define COND_BUF_LEN (BUFFER_SIZE - 30) +#define COL_BUFFER_LEN ((TSDB_COL_NAME_LEN + 15) * TSDB_MAX_COLUMNS) + +#define MAX_USERNAME_SIZE 64 +#define MAX_HOSTNAME_SIZE 253 // https://man7.org/linux/man-pages/man7/hostname.7.html +#define MAX_TB_NAME_SIZE 64 +#define MAX_DATA_SIZE (16*TSDB_MAX_COLUMNS)+20 // max record len: 16*MAX_COLUMNS, timestamp string and ,('') need extra space +#define OPT_ABORT 1 /* –abort */ +#define MAX_FILE_NAME_LEN 256 // max file name length on linux is 255. +#define MAX_PATH_LEN 4096 + +#define DEFAULT_START_TIME 1500000000000 + +#define MAX_PREPARED_RAND 1000000 +#define INT_BUFF_LEN 12 +#define BIGINT_BUFF_LEN 21 +#define SMALLINT_BUFF_LEN 7 +#define TINYINT_BUFF_LEN 5 +#define BOOL_BUFF_LEN 6 +#define FLOAT_BUFF_LEN 22 +#define DOUBLE_BUFF_LEN 42 +#define TIMESTAMP_BUFF_LEN 21 + +#define MAX_SAMPLES 10000 +#define MAX_NUM_COLUMNS (TSDB_MAX_COLUMNS - 1) // exclude first column timestamp + +#define MAX_DB_COUNT 8 +#define MAX_SUPER_TABLE_COUNT 200 + +#define MAX_QUERY_SQL_COUNT 100 + +#define MAX_DATABASE_COUNT 256 +#define INPUT_BUF_LEN 256 + +#define TBNAME_PREFIX_LEN (TSDB_TABLE_NAME_LEN - 20) // 20 characters reserved for seq +#define SMALL_BUFF_LEN 8 +#define DATATYPE_BUFF_LEN (SMALL_BUFF_LEN*3) +#define NOTE_BUFF_LEN (SMALL_BUFF_LEN*16) + +#define DEFAULT_NTHREADS 8 +#define DEFAULT_TIMESTAMP_STEP 1 +#define DEFAULT_INTERLACE_ROWS 0 +#define DEFAULT_DATATYPE_NUM 1 +#define DEFAULT_CHILDTABLES 10000 + +#define STMT_BIND_PARAM_BATCH 1 + +char* g_sampleDataBuf = NULL; +#if STMT_BIND_PARAM_BATCH == 1 + // bind param batch +char* g_sampleBindBatchArray = NULL; +#endif + +enum TEST_MODE { + INSERT_TEST, // 0 + QUERY_TEST, // 1 + SUBSCRIBE_TEST, // 2 + INVAID_TEST +}; + +typedef enum CREATE_SUB_TABLE_MOD_EN { + PRE_CREATE_SUBTBL, + AUTO_CREATE_SUBTBL, + NO_CREATE_SUBTBL +} CREATE_SUB_TABLE_MOD_EN; + +typedef enum TABLE_EXISTS_EN { + TBL_NO_EXISTS, + TBL_ALREADY_EXISTS, + TBL_EXISTS_BUTT +} TABLE_EXISTS_EN; + +enum enumSYNC_MODE { + SYNC_MODE, + ASYNC_MODE, + MODE_BUT +}; + +enum enum_TAOS_INTERFACE { + TAOSC_IFACE, + REST_IFACE, + STMT_IFACE, + INTERFACE_BUT +}; + +typedef enum enumQUERY_CLASS { + SPECIFIED_CLASS, + STABLE_CLASS, + CLASS_BUT +} QUERY_CLASS; + +typedef enum enum_PROGRESSIVE_OR_INTERLACE { + PROGRESSIVE_INSERT_MODE, + INTERLACE_INSERT_MODE, + INVALID_INSERT_MODE +} PROG_OR_INTERLACE_MODE; + +typedef enum enumQUERY_TYPE { + NO_INSERT_TYPE, + INSERT_TYPE, + QUERY_TYPE_BUT +} QUERY_TYPE; + +enum _show_db_index { + TSDB_SHOW_DB_NAME_INDEX, + TSDB_SHOW_DB_CREATED_TIME_INDEX, + TSDB_SHOW_DB_NTABLES_INDEX, + TSDB_SHOW_DB_VGROUPS_INDEX, + TSDB_SHOW_DB_REPLICA_INDEX, + TSDB_SHOW_DB_QUORUM_INDEX, + TSDB_SHOW_DB_DAYS_INDEX, + TSDB_SHOW_DB_KEEP_INDEX, + TSDB_SHOW_DB_CACHE_INDEX, + TSDB_SHOW_DB_BLOCKS_INDEX, + TSDB_SHOW_DB_MINROWS_INDEX, + TSDB_SHOW_DB_MAXROWS_INDEX, + TSDB_SHOW_DB_WALLEVEL_INDEX, + TSDB_SHOW_DB_FSYNC_INDEX, + TSDB_SHOW_DB_COMP_INDEX, + TSDB_SHOW_DB_CACHELAST_INDEX, + TSDB_SHOW_DB_PRECISION_INDEX, + TSDB_SHOW_DB_UPDATE_INDEX, + TSDB_SHOW_DB_STATUS_INDEX, + TSDB_MAX_SHOW_DB +}; + +// -----------------------------------------SHOW TABLES CONFIGURE ------------------------------------- +enum _show_stables_index { + TSDB_SHOW_STABLES_NAME_INDEX, + TSDB_SHOW_STABLES_CREATED_TIME_INDEX, + TSDB_SHOW_STABLES_COLUMNS_INDEX, + TSDB_SHOW_STABLES_METRIC_INDEX, + TSDB_SHOW_STABLES_UID_INDEX, + TSDB_SHOW_STABLES_TID_INDEX, + TSDB_SHOW_STABLES_VGID_INDEX, + TSDB_MAX_SHOW_STABLES +}; + +enum _describe_table_index { + TSDB_DESCRIBE_METRIC_FIELD_INDEX, + TSDB_DESCRIBE_METRIC_TYPE_INDEX, + TSDB_DESCRIBE_METRIC_LENGTH_INDEX, + TSDB_DESCRIBE_METRIC_NOTE_INDEX, + TSDB_MAX_DESCRIBE_METRIC +}; + +/* Used by main to communicate with parse_opt. */ +static char *g_dupstr = NULL; + +typedef struct SArguments_S { + char *metaFile; + uint32_t test_mode; + char *host; + uint16_t port; + uint16_t iface; + char * user; + char password[SHELL_MAX_PASSWORD_LEN]; + char * database; + int replica; + char * tb_prefix; + char * sqlFile; + bool use_metric; + bool drop_database; + bool aggr_func; + bool answer_yes; + bool debug_print; + bool verbose_print; + bool performance_print; + char * output_file; + bool async_mode; + char data_type[MAX_NUM_COLUMNS+1]; + char *dataType[MAX_NUM_COLUMNS+1]; + uint32_t binwidth; + uint32_t columnCount; + uint64_t lenOfOneRow; + uint32_t nthreads; + uint64_t insert_interval; + uint64_t timestamp_step; + int64_t query_times; + int64_t prepared_rand; + uint32_t interlaceRows; + uint32_t reqPerReq; // num_of_records_per_req + uint64_t max_sql_len; + int64_t ntables; + int64_t insertRows; + int abort; + uint32_t disorderRatio; // 0: no disorder, >0: x% + int disorderRange; // ms, us or ns. according to database precision + uint32_t method_of_delete; + uint64_t totalInsertRows; + uint64_t totalAffectedRows; + bool demo_mode; // use default column name and semi-random data +} SArguments; + +typedef struct SColumn_S { + char field[TSDB_COL_NAME_LEN]; + char data_type; + char dataType[DATATYPE_BUFF_LEN]; + uint32_t dataLen; + char note[NOTE_BUFF_LEN]; +} StrColumn; + +typedef struct SSuperTable_S { + char stbName[TSDB_TABLE_NAME_LEN]; + char dataSource[SMALL_BUFF_LEN]; // rand_gen or sample + char childTblPrefix[TBNAME_PREFIX_LEN]; + uint16_t childTblExists; + int64_t childTblCount; + uint64_t batchCreateTableNum; // 0: no batch, > 0: batch table number in one sql + uint8_t autoCreateTable; // 0: create sub table, 1: auto create sub table + uint16_t iface; // 0: taosc, 1: rest, 2: stmt + int64_t childTblLimit; + uint64_t childTblOffset; + + // int multiThreadWriteOneTbl; // 0: no, 1: yes + uint32_t interlaceRows; // + int disorderRatio; // 0: no disorder, >0: x% + int disorderRange; // ms, us or ns. according to database precision + uint64_t maxSqlLen; // + + uint64_t insertInterval; // insert interval, will override global insert interval + int64_t insertRows; + int64_t timeStampStep; + char startTimestamp[MAX_TB_NAME_SIZE]; + char sampleFormat[SMALL_BUFF_LEN]; // csv, json + char sampleFile[MAX_FILE_NAME_LEN]; + char tagsFile[MAX_FILE_NAME_LEN]; + + uint32_t columnCount; + StrColumn columns[TSDB_MAX_COLUMNS]; + uint32_t tagCount; + StrColumn tags[TSDB_MAX_TAGS]; + + char* childTblName; + char* colsOfCreateChildTable; + uint64_t lenOfOneRow; + uint64_t lenOfTagOfOneRow; + + char* sampleDataBuf; + bool useSampleTs; + + uint32_t tagSource; // 0: rand, 1: tag sample + char* tagDataBuf; + uint32_t tagSampleCount; + uint32_t tagUsePos; + +#if STMT_BIND_PARAM_BATCH == 1 + // bind param batch + char *sampleBindBatchArray; +#endif + // statistics + uint64_t totalInsertRows; + uint64_t totalAffectedRows; +} SSuperTable; + +typedef struct { + char name[TSDB_DB_NAME_LEN]; + char create_time[32]; + int64_t ntables; + int32_t vgroups; + int16_t replica; + int16_t quorum; + int16_t days; + char keeplist[64]; + int32_t cache; //MB + int32_t blocks; + int32_t minrows; + int32_t maxrows; + int8_t wallevel; + int32_t fsync; + int8_t comp; + int8_t cachelast; + char precision[SMALL_BUFF_LEN]; // time resolution + int8_t update; + char status[16]; +} SDbInfo; + +typedef struct SDbCfg_S { + // int maxtablesPerVnode; + uint32_t minRows; // 0 means default + uint32_t maxRows; // 0 means default + int comp; + int walLevel; + int cacheLast; + int fsync; + int replica; + int update; + int keep; + int days; + int cache; + int blocks; + int quorum; + char precision[SMALL_BUFF_LEN]; +} SDbCfg; + +typedef struct SDataBase_S { + char dbName[TSDB_DB_NAME_LEN]; + bool drop; // 0: use exists, 1: if exists, drop then new create + SDbCfg dbCfg; + uint64_t superTblCount; + SSuperTable* superTbls; +} SDataBase; + +typedef struct SDbs_S { + char cfgDir[MAX_FILE_NAME_LEN]; + char host[MAX_HOSTNAME_SIZE]; + struct sockaddr_in serv_addr; + + uint16_t port; + char user[MAX_USERNAME_SIZE]; + char password[SHELL_MAX_PASSWORD_LEN]; + char resultFile[MAX_FILE_NAME_LEN]; + bool use_metric; + bool aggr_func; + bool asyncMode; + + uint32_t threadCount; + uint32_t threadCountForCreateTbl; + uint32_t dbCount; + // statistics + uint64_t totalInsertRows; + uint64_t totalAffectedRows; + + SDataBase* db; +} SDbs; + +typedef struct SpecifiedQueryInfo_S { + uint64_t queryInterval; // 0: unlimited > 0 loop/s + uint32_t concurrent; + int sqlCount; + uint32_t asyncMode; // 0: sync, 1: async + uint64_t subscribeInterval; // ms + uint64_t queryTimes; + bool subscribeRestart; + int subscribeKeepProgress; + char sql[MAX_QUERY_SQL_COUNT][BUFFER_SIZE+1]; + char result[MAX_QUERY_SQL_COUNT][MAX_FILE_NAME_LEN]; + int resubAfterConsume[MAX_QUERY_SQL_COUNT]; + int endAfterConsume[MAX_QUERY_SQL_COUNT]; + TAOS_SUB* tsub[MAX_QUERY_SQL_COUNT]; + char topic[MAX_QUERY_SQL_COUNT][32]; + int consumed[MAX_QUERY_SQL_COUNT]; + TAOS_RES* res[MAX_QUERY_SQL_COUNT]; + uint64_t totalQueried; +} SpecifiedQueryInfo; + +typedef struct SuperQueryInfo_S { + char stbName[TSDB_TABLE_NAME_LEN]; + uint64_t queryInterval; // 0: unlimited > 0 loop/s + uint32_t threadCnt; + uint32_t asyncMode; // 0: sync, 1: async + uint64_t subscribeInterval; // ms + bool subscribeRestart; + int subscribeKeepProgress; + uint64_t queryTimes; + int64_t childTblCount; + char childTblPrefix[TBNAME_PREFIX_LEN]; // 20 characters reserved for seq + int sqlCount; + char sql[MAX_QUERY_SQL_COUNT][BUFFER_SIZE+1]; + char result[MAX_QUERY_SQL_COUNT][MAX_FILE_NAME_LEN]; + int resubAfterConsume; + int endAfterConsume; + TAOS_SUB* tsub[MAX_QUERY_SQL_COUNT]; + + char* childTblName; + uint64_t totalQueried; +} SuperQueryInfo; + +typedef struct SQueryMetaInfo_S { + char cfgDir[MAX_FILE_NAME_LEN]; + char host[MAX_HOSTNAME_SIZE]; + uint16_t port; + struct sockaddr_in serv_addr; + char user[MAX_USERNAME_SIZE]; + char password[SHELL_MAX_PASSWORD_LEN]; + char dbName[TSDB_DB_NAME_LEN]; + char queryMode[SMALL_BUFF_LEN]; // taosc, rest + + SpecifiedQueryInfo specifiedQueryInfo; + SuperQueryInfo superQueryInfo; + uint64_t totalQueried; +} SQueryMetaInfo; + +typedef struct SThreadInfo_S { + TAOS * taos; + TAOS_STMT *stmt; + int64_t *bind_ts; + +#if STMT_BIND_PARAM_BATCH == 1 + int64_t *bind_ts_array; + char *bindParams; + char *is_null; +#else + char* sampleBindArray; +#endif + + int threadID; + char db_name[TSDB_DB_NAME_LEN]; + uint32_t time_precision; + char filePath[4096]; + FILE *fp; + char tb_prefix[TSDB_TABLE_NAME_LEN]; + uint64_t start_table_from; + uint64_t end_table_to; + int64_t ntables; + int64_t tables_created; + uint64_t data_of_rate; + int64_t start_time; + char* cols; + bool use_metric; + SSuperTable* stbInfo; + char *buffer; // sql cmd buffer + + // for async insert + tsem_t lock_sem; + int64_t counter; + uint64_t st; + uint64_t et; + uint64_t lastTs; + + // sample data + int64_t samplePos; + // statistics + uint64_t totalInsertRows; + uint64_t totalAffectedRows; + + // insert delay statistics + uint64_t cntDelay; + uint64_t totalDelay; + uint64_t avgDelay; + uint64_t maxDelay; + uint64_t minDelay; + + // seq of query or subscribe + uint64_t querySeq; // sequence number of sql command + TAOS_SUB* tsub; + + int sockfd; +} threadInfo; + +#ifdef WINDOWS +#define _CRT_RAND_S + +#include +#include + +typedef unsigned __int32 uint32_t; + +#pragma comment ( lib, "ws2_32.lib" ) +// Some old MinGW/CYGWIN distributions don't define this: +#ifndef ENABLE_VIRTUAL_TERMINAL_PROCESSING +#define ENABLE_VIRTUAL_TERMINAL_PROCESSING 0x0004 +#endif // ENABLE_VIRTUAL_TERMINAL_PROCESSING + +static HANDLE g_stdoutHandle; +static DWORD g_consoleMode; + +static void setupForAnsiEscape(void) { + DWORD mode = 0; + g_stdoutHandle = GetStdHandle(STD_OUTPUT_HANDLE); + + if(g_stdoutHandle == INVALID_HANDLE_VALUE) { + exit(GetLastError()); + } + + if(!GetConsoleMode(g_stdoutHandle, &mode)) { + exit(GetLastError()); + } + + g_consoleMode = mode; + + // Enable ANSI escape codes + mode |= ENABLE_VIRTUAL_TERMINAL_PROCESSING; + + if(!SetConsoleMode(g_stdoutHandle, mode)) { + exit(GetLastError()); + } +} + +static void resetAfterAnsiEscape(void) { + // Reset colors + printf("\x1b[0m"); + + // Reset console mode + if(!SetConsoleMode(g_stdoutHandle, g_consoleMode)) { + exit(GetLastError()); + } +} + +static int taosRandom() +{ + int number; + rand_s(&number); + + return number; +} +#else // Not windows +static void setupForAnsiEscape(void) {} + +static void resetAfterAnsiEscape(void) { + // Reset colors + printf("\x1b[0m"); +} + +#include + +static int taosRandom() +{ + return rand(); +} + +#endif // ifdef Windows + +static void prompt(); +static int createDatabasesAndStables(); +static void createChildTables(); +static int queryDbExec(TAOS *taos, char *command, QUERY_TYPE type, bool quiet); +static int postProceSql(char *host, uint16_t port, char* sqlstr, threadInfo *pThreadInfo); +static int64_t getTSRandTail(int64_t timeStampStep, int32_t seq, + int disorderRatio, int disorderRange); +static bool getInfoFromJsonFile(char* file); +static void init_rand_data(); +static int regexMatch(const char *s, const char *reg, int cflags); + +/* ************ Global variables ************ */ + +int32_t* g_randint; +uint32_t* g_randuint; +int64_t* g_randbigint; +uint64_t* g_randubigint; +float* g_randfloat; +double* g_randdouble; + +char *g_randbool_buff = NULL; +char *g_randint_buff = NULL; +char *g_randuint_buff = NULL; +char *g_rand_voltage_buff = NULL; +char *g_randbigint_buff = NULL; +char *g_randubigint_buff = NULL; +char *g_randsmallint_buff = NULL; +char *g_randusmallint_buff = NULL; +char *g_randtinyint_buff = NULL; +char *g_randutinyint_buff = NULL; +char *g_randfloat_buff = NULL; +char *g_rand_current_buff = NULL; +char *g_rand_phase_buff = NULL; +char *g_randdouble_buff = NULL; + +char *g_aggreFuncDemo[] = {"*", "count(*)", "avg(current)", "sum(current)", + "max(current)", "min(current)", "first(current)", "last(current)"}; + +char *g_aggreFunc[] = {"*", "count(*)", "avg(C0)", "sum(C0)", + "max(C0)", "min(C0)", "first(C0)", "last(C0)"}; + +SArguments g_args = { + NULL, // metaFile + 0, // test_mode + "localhost", // host + 6030, // port + INTERFACE_BUT, // iface + "root", // user +#ifdef _TD_POWER_ + "powerdb", // password +#elif (_TD_TQ_ == true) + "tqueue", // password +#elif (_TD_PRO_ == true) + "prodb", // password +#elif (_TD_KH_ == true) + "khroot", // password +#elif (_TD_JH_ == true) + "jhdata", // password +#else + "taosdata", // password +#endif + "test", // database + 1, // replica + "d", // tb_prefix + NULL, // sqlFile + true, // use_metric + true, // drop_database + false, // aggr_func + false, // debug_print + false, // verbose_print + false, // performance statistic print + false, // answer_yes; + "./output.txt", // output_file + 0, // mode : sync or async + {TSDB_DATA_TYPE_FLOAT, + TSDB_DATA_TYPE_INT, + TSDB_DATA_TYPE_FLOAT}, + { + "FLOAT", // dataType + "INT", // dataType + "FLOAT", // dataType. demo mode has 3 columns + }, + 64, // binwidth + 4, // columnCount, timestamp + float + int + float + 20 + FLOAT_BUFF_LEN + INT_BUFF_LEN + FLOAT_BUFF_LEN, // lenOfOneRow + DEFAULT_NTHREADS,// nthreads + 0, // insert_interval + DEFAULT_TIMESTAMP_STEP, // timestamp_step + 1, // query_times + 10000, // prepared_rand + DEFAULT_INTERLACE_ROWS, // interlaceRows; + 30000, // reqPerReq + (1024*1024), // max_sql_len + DEFAULT_CHILDTABLES, // ntables + 10000, // insertRows + 0, // abort + 0, // disorderRatio + 1000, // disorderRange + 1, // method_of_delete + 0, // totalInsertRows; + 0, // totalAffectedRows; + true, // demo_mode; +}; + +static SDbs g_Dbs; +static int64_t g_totalChildTables = DEFAULT_CHILDTABLES; +static int64_t g_actualChildTables = 0; +static SQueryMetaInfo g_queryInfo; +static FILE * g_fpOfInsertResult = NULL; + +#if _MSC_VER <= 1900 +#define __func__ __FUNCTION__ +#endif + +#define debugPrint(fmt, ...) \ + do { if (g_args.debug_print || g_args.verbose_print) \ + fprintf(stderr, "DEBG: "fmt, __VA_ARGS__); } while(0) + +#define verbosePrint(fmt, ...) \ + do { if (g_args.verbose_print) \ + fprintf(stderr, "VERB: "fmt, __VA_ARGS__); } while(0) + +#define performancePrint(fmt, ...) \ + do { if (g_args.performance_print) \ + fprintf(stderr, "PERF: "fmt, __VA_ARGS__); } while(0) + +#define errorPrint(fmt, ...) \ + do {\ + fprintf(stderr, " \033[31m");\ + fprintf(stderr, "ERROR: "fmt, __VA_ARGS__);\ + fprintf(stderr, " \033[0m");\ + } while(0) + +#define errorPrint2(fmt, ...) \ + do {\ + struct tm Tm, *ptm;\ + struct timeval timeSecs; \ + time_t curTime;\ + gettimeofday(&timeSecs, NULL); \ + curTime = timeSecs.tv_sec;\ + ptm = localtime_r(&curTime, &Tm);\ + fprintf(stderr, " \033[31m");\ + fprintf(stderr, "%02d/%02d %02d:%02d:%02d.%06d %08" PRId64 " ",\ + ptm->tm_mon + 1, ptm->tm_mday, ptm->tm_hour,\ + ptm->tm_min, ptm->tm_sec, (int32_t)timeSecs.tv_usec,\ + taosGetSelfPthreadId());\ + fprintf(stderr, " \033[0m");\ + errorPrint(fmt, __VA_ARGS__);\ + } while(0) + +// for strncpy buffer overflow +#define min(a, b) (((a) < (b)) ? (a) : (b)) + + +/////////////////////////////////////////////////// + +static void ERROR_EXIT(const char *msg) { errorPrint("%s", msg); exit(-1); } + +#ifndef TAOSDEMO_COMMIT_SHA1 +#define TAOSDEMO_COMMIT_SHA1 "unknown" +#endif + +#ifndef TD_VERNUMBER +#define TD_VERNUMBER "unknown" +#endif + +#ifndef TAOSDEMO_STATUS +#define TAOSDEMO_STATUS "unknown" +#endif + +static void printVersion() { + char tdengine_ver[] = TD_VERNUMBER; + char taosdemo_ver[] = TAOSDEMO_COMMIT_SHA1; + char taosdemo_status[] = TAOSDEMO_STATUS; + + if (strlen(taosdemo_status) == 0) { + printf("taosdemo version %s-%s\n", + tdengine_ver, taosdemo_ver); + } else { + printf("taosdemo version %s-%s, status:%s\n", + tdengine_ver, taosdemo_ver, taosdemo_status); + } +} + +static void printHelp() { + char indent[10] = " "; + printf("%s\n\n", "Usage: taosdemo [OPTION...]"); + printf("%s%s%s%s\n", indent, "-f, --file=FILE", "\t\t", + "The meta file to the execution procedure."); + printf("%s%s%s%s\n", indent, "-u, --user=USER", "\t\t", + "The user name to use when connecting to the server."); + printf("%s%s%s%s\n", indent, "-p, --password", "\t\t", + "The password to use when connecting to the server."); + printf("%s%s%s%s\n", indent, "-c, --config-dir=CONFIG_DIR", "\t", + "Configuration directory."); + printf("%s%s%s%s\n", indent, "-h, --host=HOST", "\t\t", + "TDengine server FQDN to connect. The default host is localhost."); + printf("%s%s%s%s\n", indent, "-P, --port=PORT", "\t\t", + "The TCP/IP port number to use for the connection."); + printf("%s%s%s%s\n", indent, "-I, --interface=INTERFACE", "\t", + "The interface (taosc, rest, and stmt) taosdemo uses. By default use 'taosc'."); + printf("%s%s%s%s\n", indent, "-d, --database=DATABASE", "\t", + "Destination database. By default is 'test'."); + printf("%s%s%s%s\n", indent, "-a, --replica=REPLICA", "\t\t", + "Set the replica parameters of the database, By default use 1, min: 1, max: 3."); + printf("%s%s%s%s\n", indent, "-m, --table-prefix=TABLEPREFIX", "\t", + "Table prefix name. By default use 'd'."); + printf("%s%s%s%s\n", indent, "-E, --escape-character", "\t", + "Use escape character for Both Stable and normmal table name"); + printf("%s%s%s%s\n", indent, "-s, --sql-file=FILE", "\t\t", + "The select sql file."); + printf("%s%s%s%s\n", indent, "-N, --normal-table", "\t\t", "Use normal table flag."); + printf("%s%s%s%s\n", indent, "-o, --output=FILE", "\t\t", + "Direct output to the named file. By default use './output.txt'."); + printf("%s%s%s%s\n", indent, "-q, --query-mode=MODE", "\t\t", + "Query mode -- 0: SYNC, 1: ASYNC. By default use SYNC."); + printf("%s%s%s%s\n", indent, "-b, --data-type=DATATYPE", "\t", + "The data_type of columns, By default use: FLOAT,INT,FLOAT. NCHAR and BINARY can also use custom length. Eg: NCHAR(16),BINARY(8)"); + printf("%s%s%s%s%d\n", indent, "-w, --binwidth=WIDTH", "\t\t", + "The width of data_type 'BINARY' or 'NCHAR'. By default use ", + g_args.binwidth); + printf("%s%s%s%s%d%s%d\n", indent, "-l, --columns=COLUMNS", "\t\t", + "The number of columns per record. Demo mode by default is ", + DEFAULT_DATATYPE_NUM, + " (float, int, float). Max values is ", + MAX_NUM_COLUMNS); + printf("%s%s%s%s\n", indent, indent, indent, + "\t\t\t\tAll of the new column(s) type is INT. If use -b to specify column type, -l will be ignored."); + printf("%s%s%s%s%d.\n", indent, "-T, --threads=NUMBER", "\t\t", + "The number of threads. By default use ", DEFAULT_NTHREADS); + printf("%s%s%s%s\n", indent, "-i, --insert-interval=NUMBER", "\t", + "The sleep time (ms) between insertion. By default is 0."); + printf("%s%s%s%s%d.\n", indent, "-S, --time-step=TIME_STEP", "\t", + "The timestamp step between insertion. By default is ", + DEFAULT_TIMESTAMP_STEP); + printf("%s%s%s%s%d.\n", indent, "-B, --interlace-rows=NUMBER", "\t", + "The interlace rows of insertion. By default is ", + DEFAULT_INTERLACE_ROWS); + printf("%s%s%s%s\n", indent, "-r, --rec-per-req=NUMBER", "\t", + "The number of records per request. By default is 30000."); + printf("%s%s%s%s\n", indent, "-t, --tables=NUMBER", "\t\t", + "The number of tables. By default is 10000."); + printf("%s%s%s%s\n", indent, "-n, --records=NUMBER", "\t\t", + "The number of records per table. By default is 10000."); + printf("%s%s%s%s\n", indent, "-M, --random", "\t\t\t", + "The value of records generated are totally random."); + printf("%s\n", "\t\t\t\tBy default to simulate power equipment scenario."); + printf("%s%s%s%s\n", indent, "-x, --aggr-func", "\t\t", + "Test aggregation functions after insertion."); + printf("%s%s%s%s\n", indent, "-y, --answer-yes", "\t\t", "Input yes for prompt."); + printf("%s%s%s%s\n", indent, "-O, --disorder=NUMBER", "\t\t", + "Insert order mode--0: In order, 1 ~ 50: disorder ratio. By default is in order."); + printf("%s%s%s%s\n", indent, "-R, --disorder-range=NUMBER", "\t", + "Out of order data's range. Unit is ms. By default is 1000."); + printf("%s%s%s%s\n", indent, "-g, --debug", "\t\t\t", + "Print debug info."); + printf("%s%s%s%s\n", indent, "-?, --help\t", "\t\t", + "Give this help list"); + printf("%s%s%s%s\n", indent, " --usage\t", "\t\t", + "Give a short usage message"); + printf("%s%s\n", indent, "-V, --version\t\t\tPrint program version."); + /* printf("%s%s%s%s\n", indent, "-D", indent, + "Delete database if exists. 0: no, 1: yes, default is 1"); + */ + printf("\nMandatory or optional arguments to long options are also mandatory or optional\n\ +for any corresponding short options.\n\ +\n\ +Report bugs to .\n"); +} + +static bool isStringNumber(char *input) +{ + int len = strlen(input); + if (0 == len) { + return false; + } + + for (int i = 0; i < len; i++) { + if (!isdigit(input[i])) + return false; + } + + return true; +} + +static void errorWrongValue(char *program, char *wrong_arg, char *wrong_value) +{ + fprintf(stderr, "%s %s: %s is an invalid value\n", program, wrong_arg, wrong_value); + fprintf(stderr, "Try `taosdemo --help' or `taosdemo --usage' for more information.\n"); +} + +static void errorUnrecognized(char *program, char *wrong_arg) +{ + fprintf(stderr, "%s: unrecognized options '%s'\n", program, wrong_arg); + fprintf(stderr, "Try `taosdemo --help' or `taosdemo --usage' for more information.\n"); +} + +static void errorPrintReqArg(char *program, char *wrong_arg) +{ + fprintf(stderr, + "%s: option requires an argument -- '%s'\n", + program, wrong_arg); + fprintf(stderr, + "Try `taosdemo --help' or `taosdemo --usage' for more information.\n"); +} + +static void errorPrintReqArg2(char *program, char *wrong_arg) +{ + fprintf(stderr, + "%s: option requires a number argument '-%s'\n", + program, wrong_arg); + fprintf(stderr, + "Try `taosdemo --help' or `taosdemo --usage' for more information.\n"); +} + +static void errorPrintReqArg3(char *program, char *wrong_arg) +{ + fprintf(stderr, + "%s: option '%s' requires an argument\n", + program, wrong_arg); + fprintf(stderr, + "Try `taosdemo --help' or `taosdemo --usage' for more information.\n"); +} + +static void parse_args(int argc, char *argv[], SArguments *arguments) { + + for (int i = 1; i < argc; i++) { + if ((0 == strncmp(argv[i], "-f", strlen("-f"))) + || (0 == strncmp(argv[i], "--file", strlen("--file")))) { + arguments->demo_mode = false; + + if (2 == strlen(argv[i])) { + if (i+1 == argc) { + errorPrintReqArg(argv[0], "f"); + exit(EXIT_FAILURE); + } + arguments->metaFile = argv[++i]; + } else if (0 == strncmp(argv[i], "-f", strlen("-f"))) { + arguments->metaFile = (char *)(argv[i] + strlen("-f")); + } else if (strlen("--file") == strlen(argv[i])) { + if (i+1 == argc) { + errorPrintReqArg3(argv[0], "--file"); + exit(EXIT_FAILURE); + } + arguments->metaFile = argv[++i]; + } else if (0 == strncmp(argv[i], "--file=", strlen("--file="))) { + arguments->metaFile = (char *)(argv[i] + strlen("--file=")); + } else { + errorUnrecognized(argv[0], argv[i]); + exit(EXIT_FAILURE); + } + } else if ((0 == strncmp(argv[i], "-c", strlen("-c"))) + || (0 == strncmp(argv[i], "--config-dir", strlen("--config-dir")))) { + if (2 == strlen(argv[i])) { + if (argc == i+1) { + errorPrintReqArg(argv[0], "c"); + exit(EXIT_FAILURE); + } + tstrncpy(configDir, argv[++i], TSDB_FILENAME_LEN); + } else if (0 == strncmp(argv[i], "-c", strlen("-c"))) { + tstrncpy(configDir, (char *)(argv[i] + strlen("-c")), TSDB_FILENAME_LEN); + } else if (strlen("--config-dir") == strlen(argv[i])) { + if (argc == i+1) { + errorPrintReqArg3(argv[0], "--config-dir"); + exit(EXIT_FAILURE); + } + tstrncpy(configDir, argv[++i], TSDB_FILENAME_LEN); + } else if (0 == strncmp(argv[i], "--config-dir=", strlen("--config-dir="))) { + tstrncpy(configDir, (char *)(argv[i] + strlen("--config-dir=")), TSDB_FILENAME_LEN); + } else { + errorUnrecognized(argv[0], argv[i]); + exit(EXIT_FAILURE); + } + } else if ((0 == strncmp(argv[i], "-h", strlen("-h"))) + || (0 == strncmp(argv[i], "--host", strlen("--host")))) { + if (2 == strlen(argv[i])) { + if (argc == i+1) { + errorPrintReqArg(argv[0], "h"); + exit(EXIT_FAILURE); + } + arguments->host = argv[++i]; + } else if (0 == strncmp(argv[i], "-h", strlen("-h"))) { + arguments->host = (char *)(argv[i] + strlen("-h")); + } else if (strlen("--host") == strlen(argv[i])) { + if (argc == i+1) { + errorPrintReqArg3(argv[0], "--host"); + exit(EXIT_FAILURE); + } + arguments->host = argv[++i]; + } else if (0 == strncmp(argv[i], "--host=", strlen("--host="))) { + arguments->host = (char *)(argv[i] + strlen("--host=")); + } else { + errorUnrecognized(argv[0], argv[i]); + exit(EXIT_FAILURE); + } + } else if (strcmp(argv[i], "-PP") == 0) { + arguments->performance_print = true; + } else if ((0 == strncmp(argv[i], "-P", strlen("-P"))) + || (0 == strncmp(argv[i], "--port", strlen("--port")))) { + uint64_t port; + char strPort[BIGINT_BUFF_LEN]; + + if (2 == strlen(argv[i])) { + if (argc == i+1) { + errorPrintReqArg(argv[0], "P"); + exit(EXIT_FAILURE); + } else if (isStringNumber(argv[i+1])) { + tstrncpy(strPort, argv[++i], BIGINT_BUFF_LEN); + } else { + errorPrintReqArg2(argv[0], "P"); + exit(EXIT_FAILURE); + } + } else if (0 == strncmp(argv[i], "--port=", strlen("--port="))) { + if (isStringNumber((char *)(argv[i] + strlen("--port=")))) { + tstrncpy(strPort, (char *)(argv[i]+strlen("--port=")), BIGINT_BUFF_LEN); + } else { + errorPrintReqArg2(argv[0], "--port"); + exit(EXIT_FAILURE); + } + } else if (0 == strncmp(argv[i], "-P", strlen("-P"))) { + if (isStringNumber((char *)(argv[i] + strlen("-P")))) { + tstrncpy(strPort, (char *)(argv[i]+strlen("-P")), BIGINT_BUFF_LEN); + } else { + errorPrintReqArg2(argv[0], "--port"); + exit(EXIT_FAILURE); + } + } else if (strlen("--port") == strlen(argv[i])) { + if (argc == i+1) { + errorPrintReqArg3(argv[0], "--port"); + exit(EXIT_FAILURE); + } else if (isStringNumber(argv[i+1])) { + tstrncpy(strPort, argv[++i], BIGINT_BUFF_LEN); + } else { + errorPrintReqArg2(argv[0], "--port"); + exit(EXIT_FAILURE); + } + } else { + errorUnrecognized(argv[0], argv[i]); + exit(EXIT_FAILURE); + } + + port = atoi(strPort); + if (port > 65535) { + errorWrongValue("taosdump", "-P or --port", strPort); + exit(EXIT_FAILURE); + } + arguments->port = (uint16_t)port; + + } else if ((0 == strncmp(argv[i], "-I", strlen("-I"))) + || (0 == strncmp(argv[i], "--interface", strlen("--interface")))) { + if (2 == strlen(argv[i])) { + if (argc == i+1) { + errorPrintReqArg(argv[0], "I"); + exit(EXIT_FAILURE); + } + if (0 == strcasecmp(argv[i+1], "taosc")) { + arguments->iface = TAOSC_IFACE; + } else if (0 == strcasecmp(argv[i+1], "rest")) { + arguments->iface = REST_IFACE; + } else if (0 == strcasecmp(argv[i+1], "stmt")) { + arguments->iface = STMT_IFACE; + } else { + errorWrongValue(argv[0], "-I", argv[i+1]); + exit(EXIT_FAILURE); + } + i++; + } else if (0 == strncmp(argv[i], "--interface=", strlen("--interface="))) { + if (0 == strcasecmp((char *)(argv[i] + strlen("--interface=")), "taosc")) { + arguments->iface = TAOSC_IFACE; + } else if (0 == strcasecmp((char *)(argv[i] + strlen("--interface=")), "rest")) { + arguments->iface = REST_IFACE; + } else if (0 == strcasecmp((char *)(argv[i] + strlen("--interface=")), "stmt")) { + arguments->iface = STMT_IFACE; + } else { + errorPrintReqArg3(argv[0], "--interface"); + exit(EXIT_FAILURE); + } + } else if (0 == strncmp(argv[i], "-I", strlen("-I"))) { + if (0 == strcasecmp((char *)(argv[i] + strlen("-I")), "taosc")) { + arguments->iface = TAOSC_IFACE; + } else if (0 == strcasecmp((char *)(argv[i] + strlen("-I")), "rest")) { + arguments->iface = REST_IFACE; + } else if (0 == strcasecmp((char *)(argv[i] + strlen("-I")), "stmt")) { + arguments->iface = STMT_IFACE; + } else { + errorWrongValue(argv[0], "-I", + (char *)(argv[i] + strlen("-I"))); + exit(EXIT_FAILURE); + } + } else if (strlen("--interface") == strlen(argv[i])) { + if (argc == i+1) { + errorPrintReqArg3(argv[0], "--interface"); + exit(EXIT_FAILURE); + } + if (0 == strcasecmp(argv[i+1], "taosc")) { + arguments->iface = TAOSC_IFACE; + } else if (0 == strcasecmp(argv[i+1], "rest")) { + arguments->iface = REST_IFACE; + } else if (0 == strcasecmp(argv[i+1], "stmt")) { + arguments->iface = STMT_IFACE; + } else { + errorWrongValue(argv[0], "--interface", argv[i+1]); + exit(EXIT_FAILURE); + } + i++; + } else { + errorUnrecognized(argv[0], argv[i]); + exit(EXIT_FAILURE); + } + } else if ((0 == strncmp(argv[i], "-u", strlen("-u"))) + || (0 == strncmp(argv[i], "--user", strlen("--user")))) { + if (2 == strlen(argv[i])) { + if (argc == i+1) { + errorPrintReqArg(argv[0], "u"); + exit(EXIT_FAILURE); + } + arguments->user = argv[++i]; + } else if (0 == strncmp(argv[i], "-u", strlen("-u"))) { + arguments->user = (char *)(argv[i++] + strlen("-u")); + } else if (0 == strncmp(argv[i], "--user=", strlen("--user="))) { + arguments->user = (char *)(argv[i++] + strlen("--user=")); + } else if (strlen("--user") == strlen(argv[i])) { + if (argc == i+1) { + errorPrintReqArg3(argv[0], "--user"); + exit(EXIT_FAILURE); + } + arguments->user = argv[++i]; + } else { + errorUnrecognized(argv[0], argv[i]); + exit(EXIT_FAILURE); + } + } else if ((0 == strncmp(argv[i], "-p", strlen("-p"))) + || (0 == strcmp(argv[i], "--password"))) { + if ((strlen(argv[i]) == 2) || (0 == strcmp(argv[i], "--password"))) { + printf("Enter password: "); + taosSetConsoleEcho(false); + if (scanf("%s", arguments->password) > 1) { + fprintf(stderr, "password read error!\n"); + } + taosSetConsoleEcho(true); + } else { + tstrncpy(arguments->password, (char *)(argv[i] + 2), SHELL_MAX_PASSWORD_LEN); + } + } else if ((0 == strncmp(argv[i], "-o", strlen("-o"))) + || (0 == strncmp(argv[i], "--output", strlen("--output")))) { + if (2 == strlen(argv[i])) { + if (argc == i+1) { + errorPrintReqArg3(argv[0], "--output"); + exit(EXIT_FAILURE); + } + arguments->output_file = argv[++i]; + } else if (0 == strncmp(argv[i], "--output=", strlen("--output="))) { + arguments->output_file = (char *)(argv[i++] + strlen("--output=")); + } else if (0 == strncmp(argv[i], "-o", strlen("-o"))) { + arguments->output_file = (char *)(argv[i++] + strlen("-o")); + } else if (strlen("--output") == strlen(argv[i])) { + if (argc == i+1) { + errorPrintReqArg3(argv[0], "--output"); + exit(EXIT_FAILURE); + } + arguments->output_file = argv[++i]; + } else { + errorUnrecognized(argv[0], argv[i]); + exit(EXIT_FAILURE); + } + } else if ((0 == strncmp(argv[i], "-s", strlen("-s"))) + || (0 == strncmp(argv[i], "--sql-file", strlen("--sql-file")))) { + if (2 == strlen(argv[i])) { + if (argc == i+1) { + errorPrintReqArg(argv[0], "s"); + exit(EXIT_FAILURE); + } + arguments->sqlFile = argv[++i]; + } else if (0 == strncmp(argv[i], "--sql-file=", strlen("--sql-file="))) { + arguments->sqlFile = (char *)(argv[i++] + strlen("--sql-file=")); + } else if (0 == strncmp(argv[i], "-s", strlen("-s"))) { + arguments->sqlFile = (char *)(argv[i++] + strlen("-s")); + } else if (strlen("--sql-file") == strlen(argv[i])) { + if (argc == i+1) { + errorPrintReqArg3(argv[0], "--sql-file"); + exit(EXIT_FAILURE); + } + arguments->sqlFile = argv[++i]; + } else { + errorUnrecognized(argv[0], argv[i]); + exit(EXIT_FAILURE); + } + } else if ((0 == strncmp(argv[i], "-q", strlen("-q"))) + || (0 == strncmp(argv[i], "--query-mode", strlen("--query-mode")))) { + if (2 == strlen(argv[i])) { + if (argc == i+1) { + errorPrintReqArg(argv[0], "q"); + exit(EXIT_FAILURE); + } else if (!isStringNumber(argv[i+1])) { + errorPrintReqArg2(argv[0], "q"); + exit(EXIT_FAILURE); + } + arguments->async_mode = atoi(argv[++i]); + } else if (0 == strncmp(argv[i], "--query-mode=", strlen("--query-mode="))) { + if (isStringNumber((char *)(argv[i] + strlen("--query-mode=")))) { + arguments->async_mode = atoi((char *)(argv[i]+strlen("--query-mode="))); + } else { + errorPrintReqArg2(argv[0], "--query-mode"); + exit(EXIT_FAILURE); + } + } else if (0 == strncmp(argv[i], "-q", strlen("-q"))) { + if (isStringNumber((char *)(argv[i] + strlen("-q")))) { + arguments->async_mode = atoi((char *)(argv[i]+strlen("-q"))); + } else { + errorPrintReqArg2(argv[0], "-q"); + exit(EXIT_FAILURE); + } + } else if (strlen("--query-mode") == strlen(argv[i])) { + if (argc == i+1) { + errorPrintReqArg3(argv[0], "--query-mode"); + exit(EXIT_FAILURE); + } else if (!isStringNumber(argv[i+1])) { + errorPrintReqArg2(argv[0], "--query-mode"); + exit(EXIT_FAILURE); + } + arguments->async_mode = atoi(argv[++i]); + } else { + errorUnrecognized(argv[0], argv[i]); + exit(EXIT_FAILURE); + } + } else if ((0 == strncmp(argv[i], "-T", strlen("-T"))) + || (0 == strncmp(argv[i], "--threads", strlen("--threads")))) { + if (2 == strlen(argv[i])) { + if (argc == i+1) { + errorPrintReqArg(argv[0], "T"); + exit(EXIT_FAILURE); + } else if (!isStringNumber(argv[i+1])) { + errorPrintReqArg2(argv[0], "T"); + exit(EXIT_FAILURE); + } + arguments->nthreads = atoi(argv[++i]); + } else if (0 == strncmp(argv[i], "--threads=", strlen("--threads="))) { + if (isStringNumber((char *)(argv[i] + strlen("--threads=")))) { + arguments->nthreads = atoi((char *)(argv[i]+strlen("--threads="))); + } else { + errorPrintReqArg2(argv[0], "--threads"); + exit(EXIT_FAILURE); + } + } else if (0 == strncmp(argv[i], "-T", strlen("-T"))) { + if (isStringNumber((char *)(argv[i] + strlen("-T")))) { + arguments->nthreads = atoi((char *)(argv[i]+strlen("-T"))); + } else { + errorPrintReqArg2(argv[0], "-T"); + exit(EXIT_FAILURE); + } + } else if (strlen("--threads") == strlen(argv[i])) { + if (argc == i+1) { + errorPrintReqArg3(argv[0], "--threads"); + exit(EXIT_FAILURE); + } else if (!isStringNumber(argv[i+1])) { + errorPrintReqArg2(argv[0], "--threads"); + exit(EXIT_FAILURE); + } + arguments->nthreads = atoi(argv[++i]); + } else { + errorUnrecognized(argv[0], argv[i]); + exit(EXIT_FAILURE); + } + } else if ((0 == strncmp(argv[i], "-i", strlen("-i"))) + || (0 == strncmp(argv[i], "--insert-interval", strlen("--insert-interval")))) { + if (2 == strlen(argv[i])) { + if (argc == i+1) { + errorPrintReqArg(argv[0], "i"); + exit(EXIT_FAILURE); + } else if (!isStringNumber(argv[i+1])) { + errorPrintReqArg2(argv[0], "i"); + exit(EXIT_FAILURE); + } + arguments->insert_interval = atoi(argv[++i]); + } else if (0 == strncmp(argv[i], "--insert-interval=", strlen("--insert-interval="))) { + if (isStringNumber((char *)(argv[i] + strlen("--insert-interval=")))) { + arguments->insert_interval = atoi((char *)(argv[i]+strlen("--insert-interval="))); + } else { + errorPrintReqArg3(argv[0], "--insert-innterval"); + exit(EXIT_FAILURE); + } + } else if (0 == strncmp(argv[i], "-i", strlen("-i"))) { + if (isStringNumber((char *)(argv[i] + strlen("-i")))) { + arguments->insert_interval = atoi((char *)(argv[i]+strlen("-i"))); + } else { + errorPrintReqArg3(argv[0], "-i"); + exit(EXIT_FAILURE); + } + } else if (strlen("--insert-interval")== strlen(argv[i])) { + if (argc == i+1) { + errorPrintReqArg3(argv[0], "--insert-interval"); + exit(EXIT_FAILURE); + } else if (!isStringNumber(argv[i+1])) { + errorPrintReqArg2(argv[0], "--insert-interval"); + exit(EXIT_FAILURE); + } + arguments->insert_interval = atoi(argv[++i]); + } else { + errorUnrecognized(argv[0], argv[i]); + exit(EXIT_FAILURE); + } + } else if ((0 == strncmp(argv[i], "-S", strlen("-S"))) + || (0 == strncmp(argv[i], "--time-step", strlen("--time-step")))) { + if (2 == strlen(argv[i])) { + if (argc == i+1) { + errorPrintReqArg(argv[0], "S"); + exit(EXIT_FAILURE); + } else if (!isStringNumber(argv[i+1])) { + errorPrintReqArg2(argv[0], "S"); + exit(EXIT_FAILURE); + } + arguments->timestamp_step = atoi(argv[++i]); + } else if (0 == strncmp(argv[i], "--time-step=", strlen("--time-step="))) { + if (isStringNumber((char *)(argv[i] + strlen("--time-step=")))) { + arguments->async_mode = atoi((char *)(argv[i]+strlen("--time-step="))); + } else { + errorPrintReqArg2(argv[0], "--time-step"); + exit(EXIT_FAILURE); + } + } else if (0 == strncmp(argv[i], "-S", strlen("-S"))) { + if (isStringNumber((char *)(argv[i] + strlen("-S")))) { + arguments->timestamp_step = atoi((char *)(argv[i]+strlen("-S"))); + } else { + errorPrintReqArg2(argv[0], "-S"); + exit(EXIT_FAILURE); + } + } else if (strlen("--time-step") == strlen(argv[i])) { + if (argc == i+1) { + errorPrintReqArg3(argv[0], "--time-step"); + exit(EXIT_FAILURE); + } else if (!isStringNumber(argv[i+1])) { + errorPrintReqArg2(argv[0], "--time-step"); + exit(EXIT_FAILURE); + } + arguments->timestamp_step = atoi(argv[++i]); + } else { + errorUnrecognized(argv[0], argv[i]); + exit(EXIT_FAILURE); + } + } else if (strcmp(argv[i], "-qt") == 0) { + if ((argc == i+1) + || (!isStringNumber(argv[i+1]))) { + printHelp(); + errorPrint("%s", "\n\t-qt need a number following!\n"); + exit(EXIT_FAILURE); + } + arguments->query_times = atoi(argv[++i]); + } else if ((0 == strncmp(argv[i], "-B", strlen("-B"))) + || (0 == strncmp(argv[i], "--interlace-rows", strlen("--interlace-rows")))) { + if (strlen("-B") == strlen(argv[i])) { + if (argc == i+1) { + errorPrintReqArg(argv[0], "B"); + exit(EXIT_FAILURE); + } else if (!isStringNumber(argv[i+1])) { + errorPrintReqArg2(argv[0], "B"); + exit(EXIT_FAILURE); + } + arguments->interlaceRows = atoi(argv[++i]); + } else if (0 == strncmp(argv[i], "--interlace-rows=", strlen("--interlace-rows="))) { + if (isStringNumber((char *)(argv[i] + strlen("--interlace-rows=")))) { + arguments->interlaceRows = atoi((char *)(argv[i]+strlen("--interlace-rows="))); + } else { + errorPrintReqArg2(argv[0], "--interlace-rows"); + exit(EXIT_FAILURE); + } + } else if (0 == strncmp(argv[i], "-B", strlen("-B"))) { + if (isStringNumber((char *)(argv[i] + strlen("-B")))) { + arguments->interlaceRows = atoi((char *)(argv[i]+strlen("-B"))); + } else { + errorPrintReqArg2(argv[0], "-B"); + exit(EXIT_FAILURE); + } + } else if (strlen("--interlace-rows")== strlen(argv[i])) { + if (argc == i+1) { + errorPrintReqArg3(argv[0], "--interlace-rows"); + exit(EXIT_FAILURE); + } else if (!isStringNumber(argv[i+1])) { + errorPrintReqArg2(argv[0], "--interlace-rows"); + exit(EXIT_FAILURE); + } + arguments->interlaceRows = atoi(argv[++i]); + } else { + errorUnrecognized(argv[0], argv[i]); + exit(EXIT_FAILURE); + } + } else if ((0 == strncmp(argv[i], "-r", strlen("-r"))) + || (0 == strncmp(argv[i], "--rec-per-req", 13))) { + if (strlen("-r") == strlen(argv[i])) { + if (argc == i+1) { + errorPrintReqArg(argv[0], "r"); + exit(EXIT_FAILURE); + } else if (!isStringNumber(argv[i+1])) { + errorPrintReqArg2(argv[0], "r"); + exit(EXIT_FAILURE); + } + arguments->reqPerReq = atoi(argv[++i]); + } else if (0 == strncmp(argv[i], "--rec-per-req=", strlen("--rec-per-req="))) { + if (isStringNumber((char *)(argv[i] + strlen("--rec-per-req=")))) { + arguments->reqPerReq = atoi((char *)(argv[i]+strlen("--rec-per-req="))); + } else { + errorPrintReqArg2(argv[0], "--rec-per-req"); + exit(EXIT_FAILURE); + } + } else if (0 == strncmp(argv[i], "-r", strlen("-r"))) { + if (isStringNumber((char *)(argv[i] + strlen("-r")))) { + arguments->reqPerReq = atoi((char *)(argv[i]+strlen("-r"))); + } else { + errorPrintReqArg2(argv[0], "-r"); + exit(EXIT_FAILURE); + } + } else if (strlen("--rec-per-req")== strlen(argv[i])) { + if (argc == i+1) { + errorPrintReqArg3(argv[0], "--rec-per-req"); + exit(EXIT_FAILURE); + } else if (!isStringNumber(argv[i+1])) { + errorPrintReqArg2(argv[0], "--rec-per-req"); + exit(EXIT_FAILURE); + } + arguments->reqPerReq = atoi(argv[++i]); + } else { + errorUnrecognized(argv[0], argv[i]); + exit(EXIT_FAILURE); + } + } else if ((0 == strncmp(argv[i], "-t", strlen("-t"))) + || (0 == strncmp(argv[i], "--tables", strlen("--tables")))) { + if (2 == strlen(argv[i])) { + if (argc == i+1) { + errorPrintReqArg(argv[0], "t"); + exit(EXIT_FAILURE); + } else if (!isStringNumber(argv[i+1])) { + errorPrintReqArg2(argv[0], "t"); + exit(EXIT_FAILURE); + } + arguments->ntables = atoi(argv[++i]); + } else if (0 == strncmp(argv[i], "--tables=", strlen("--tables="))) { + if (isStringNumber((char *)(argv[i] + strlen("--tables=")))) { + arguments->ntables = atoi((char *)(argv[i]+strlen("--tables="))); + } else { + errorPrintReqArg2(argv[0], "--tables"); + exit(EXIT_FAILURE); + } + } else if (0 == strncmp(argv[i], "-t", strlen("-t"))) { + if (isStringNumber((char *)(argv[i] + strlen("-t")))) { + arguments->ntables = atoi((char *)(argv[i]+strlen("-t"))); + } else { + errorPrintReqArg2(argv[0], "-t"); + exit(EXIT_FAILURE); + } + } else if (strlen("--tables") == strlen(argv[i])) { + if (argc == i+1) { + errorPrintReqArg3(argv[0], "--tables"); + exit(EXIT_FAILURE); + } else if (!isStringNumber(argv[i+1])) { + errorPrintReqArg2(argv[0], "--tables"); + exit(EXIT_FAILURE); + } + arguments->ntables = atoi(argv[++i]); + } else { + errorUnrecognized(argv[0], argv[i]); + exit(EXIT_FAILURE); + } + + g_totalChildTables = arguments->ntables; + } else if ((0 == strncmp(argv[i], "-n", strlen("-n"))) + || (0 == strncmp(argv[i], "--records", strlen("--records")))) { + if (2 == strlen(argv[i])) { + if (argc == i+1) { + errorPrintReqArg(argv[0], "n"); + exit(EXIT_FAILURE); + } else if (!isStringNumber(argv[i+1])) { + errorPrintReqArg2(argv[0], "n"); + exit(EXIT_FAILURE); + } + arguments->insertRows = atoi(argv[++i]); + } else if (0 == strncmp(argv[i], "--records=", strlen("--records="))) { + if (isStringNumber((char *)(argv[i] + strlen("--records=")))) { + arguments->insertRows = atoi((char *)(argv[i]+strlen("--records="))); + } else { + errorPrintReqArg2(argv[0], "--records"); + exit(EXIT_FAILURE); + } + } else if (0 == strncmp(argv[i], "-n", strlen("-n"))) { + if (isStringNumber((char *)(argv[i] + strlen("-n")))) { + arguments->insertRows = atoi((char *)(argv[i]+strlen("-n"))); + } else { + errorPrintReqArg2(argv[0], "-n"); + exit(EXIT_FAILURE); + } + } else if (strlen("--records") == strlen(argv[i])) { + if (argc == i+1) { + errorPrintReqArg3(argv[0], "--records"); + exit(EXIT_FAILURE); + } else if (!isStringNumber(argv[i+1])) { + errorPrintReqArg2(argv[0], "--records"); + exit(EXIT_FAILURE); + } + arguments->insertRows = atoi(argv[++i]); + } else { + errorUnrecognized(argv[0], argv[i]); + exit(EXIT_FAILURE); + } + } else if ((0 == strncmp(argv[i], "-d", strlen("-d"))) + || (0 == strncmp(argv[i], "--database", strlen("--database")))) { + if (2 == strlen(argv[i])) { + if (argc == i+1) { + errorPrintReqArg(argv[0], "d"); + exit(EXIT_FAILURE); + } + arguments->database = argv[++i]; + } else if (0 == strncmp(argv[i], "--database=", strlen("--database="))) { + arguments->output_file = (char *)(argv[i] + strlen("--database=")); + } else if (0 == strncmp(argv[i], "-d", strlen("-d"))) { + arguments->output_file = (char *)(argv[i] + strlen("-d")); + } else if (strlen("--database") == strlen(argv[i])) { + if (argc == i+1) { + errorPrintReqArg3(argv[0], "--database"); + exit(EXIT_FAILURE); + } + arguments->database = argv[++i]; + } else { + errorUnrecognized(argv[0], argv[i]); + exit(EXIT_FAILURE); + } + } else if ((0 == strncmp(argv[i], "-l", strlen("-l"))) + || (0 == strncmp(argv[i], "--columns", strlen("--columns")))) { + arguments->demo_mode = false; + if (2 == strlen(argv[i])) { + if (argc == i+1) { + errorPrintReqArg(argv[0], "l"); + exit(EXIT_FAILURE); + } else if (!isStringNumber(argv[i+1])) { + errorPrintReqArg2(argv[0], "l"); + exit(EXIT_FAILURE); + } + arguments->columnCount = atoi(argv[++i]); + } else if (0 == strncmp(argv[i], "--columns=", strlen("--columns="))) { + if (isStringNumber((char *)(argv[i] + strlen("--columns=")))) { + arguments->columnCount = atoi((char *)(argv[i]+strlen("--columns="))); + } else { + errorPrintReqArg2(argv[0], "--columns"); + exit(EXIT_FAILURE); + } + } else if (0 == strncmp(argv[i], "-l", strlen("-l"))) { + if (isStringNumber((char *)(argv[i] + strlen("-l")))) { + arguments->columnCount = atoi((char *)(argv[i]+strlen("-l"))); + } else { + errorPrintReqArg2(argv[0], "-l"); + exit(EXIT_FAILURE); + } + } else if (strlen("--columns")== strlen(argv[i])) { + if (argc == i+1) { + errorPrintReqArg3(argv[0], "--columns"); + exit(EXIT_FAILURE); + } else if (!isStringNumber(argv[i+1])) { + errorPrintReqArg2(argv[0], "--columns"); + exit(EXIT_FAILURE); + } + arguments->columnCount = atoi(argv[++i]); + } else { + errorUnrecognized(argv[0], argv[i]); + exit(EXIT_FAILURE); + } + + if (arguments->columnCount > MAX_NUM_COLUMNS) { + printf("WARNING: max acceptable columns count is %d\n", MAX_NUM_COLUMNS); + prompt(); + arguments->columnCount = MAX_NUM_COLUMNS; + } + + for (int col = DEFAULT_DATATYPE_NUM; col < arguments->columnCount; col ++) { + arguments->dataType[col] = "INT"; + arguments->data_type[col] = TSDB_DATA_TYPE_INT; + } + for (int col = arguments->columnCount; col < MAX_NUM_COLUMNS; col++) { + arguments->dataType[col] = NULL; + arguments->data_type[col] = TSDB_DATA_TYPE_NULL; + } + } else if ((0 == strncmp(argv[i], "-b", strlen("-b"))) + || (0 == strncmp(argv[i], "--data-type", strlen("--data-type")))) { + arguments->demo_mode = false; + + char *dataType; + if (2 == strlen(argv[i])) { + if (argc == i+1) { + errorPrintReqArg(argv[0], "b"); + exit(EXIT_FAILURE); + } + dataType = argv[++i]; + } else if (0 == strncmp(argv[i], "--data-type=", strlen("--data-type="))) { + dataType = (char *)(argv[i] + strlen("--data-type=")); + } else if (0 == strncmp(argv[i], "-b", strlen("-b"))) { + dataType = (char *)(argv[i] + strlen("-b")); + } else if (strlen("--data-type") == strlen(argv[i])) { + if (argc == i+1) { + errorPrintReqArg3(argv[0], "--data-type"); + exit(EXIT_FAILURE); + } + dataType = argv[++i]; + } else { + errorUnrecognized(argv[0], argv[i]); + exit(EXIT_FAILURE); + } + + if (strstr(dataType, ",") == NULL) { + // only one col + if (strcasecmp(dataType, "INT") + && strcasecmp(dataType, "FLOAT") + && strcasecmp(dataType, "TINYINT") + && strcasecmp(dataType, "BOOL") + && strcasecmp(dataType, "SMALLINT") + && strcasecmp(dataType, "BIGINT") + && strcasecmp(dataType, "DOUBLE") + && strcasecmp(dataType, "TIMESTAMP") + && !regexMatch(dataType, + "^(NCHAR|BINARY)(\\([1-9][0-9]*\\))?$", + REG_ICASE | REG_EXTENDED) + && strcasecmp(dataType, "UTINYINT") + && strcasecmp(dataType, "USMALLINT") + && strcasecmp(dataType, "UINT") + && strcasecmp(dataType, "UBIGINT")) { + printHelp(); + errorPrint("%s", "-b: Invalid data_type!\n"); + exit(EXIT_FAILURE); + } + arguments->dataType[0] = dataType; + if (0 == strcasecmp(dataType, "INT")) { + arguments->data_type[0] = TSDB_DATA_TYPE_INT; + } else if (0 == strcasecmp(dataType, "TINYINT")) { + arguments->data_type[0] = TSDB_DATA_TYPE_TINYINT; + } else if (0 == strcasecmp(dataType, "SMALLINT")) { + arguments->data_type[0] = TSDB_DATA_TYPE_SMALLINT; + } else if (0 == strcasecmp(dataType, "BIGINT")) { + arguments->data_type[0] = TSDB_DATA_TYPE_BIGINT; + } else if (0 == strcasecmp(dataType, "FLOAT")) { + arguments->data_type[0] = TSDB_DATA_TYPE_FLOAT; + } else if (0 == strcasecmp(dataType, "DOUBLE")) { + arguments->data_type[0] = TSDB_DATA_TYPE_DOUBLE; + } else if (1 == regexMatch(dataType, + "^BINARY(\\([1-9][0-9]*\\))?$", + REG_ICASE | REG_EXTENDED)) { + arguments->data_type[0] = TSDB_DATA_TYPE_BINARY; + } else if (1 == regexMatch(dataType, + "^NCHAR(\\([1-9][0-9]*\\))?$", + REG_ICASE | REG_EXTENDED)) { + arguments->data_type[0] = TSDB_DATA_TYPE_NCHAR; + } else if (0 == strcasecmp(dataType, "BOOL")) { + arguments->data_type[0] = TSDB_DATA_TYPE_BOOL; + } else if (0 == strcasecmp(dataType, "TIMESTAMP")) { + arguments->data_type[0] = TSDB_DATA_TYPE_TIMESTAMP; + } else if (0 == strcasecmp(dataType, "UTINYINT")) { + arguments->data_type[0] = TSDB_DATA_TYPE_UTINYINT; + } else if (0 == strcasecmp(dataType, "USMALLINT")) { + arguments->data_type[0] = TSDB_DATA_TYPE_USMALLINT; + } else if (0 == strcasecmp(dataType, "UINT")) { + arguments->data_type[0] = TSDB_DATA_TYPE_UINT; + } else if (0 == strcasecmp(dataType, "UBIGINT")) { + arguments->data_type[0] = TSDB_DATA_TYPE_UBIGINT; + } else { + arguments->data_type[0] = TSDB_DATA_TYPE_NULL; + } + arguments->dataType[1] = NULL; + arguments->data_type[1] = TSDB_DATA_TYPE_NULL; + } else { + // more than one col + int index = 0; + g_dupstr = strdup(dataType); + char *running = g_dupstr; + char *token = strsep(&running, ","); + while(token != NULL) { + if (strcasecmp(token, "INT") + && strcasecmp(token, "FLOAT") + && strcasecmp(token, "TINYINT") + && strcasecmp(token, "BOOL") + && strcasecmp(token, "SMALLINT") + && strcasecmp(token, "BIGINT") + && strcasecmp(token, "DOUBLE") + && strcasecmp(token, "TIMESTAMP") + && !regexMatch(token, "^(NCHAR|BINARY)(\\([1-9][0-9]*\\))?$", REG_ICASE | REG_EXTENDED) + && strcasecmp(token, "UTINYINT") + && strcasecmp(token, "USMALLINT") + && strcasecmp(token, "UINT") + && strcasecmp(token, "UBIGINT")) { + printHelp(); + free(g_dupstr); + errorPrint("%s", "-b: Invalid data_type!\n"); + exit(EXIT_FAILURE); + } + + if (0 == strcasecmp(token, "INT")) { + arguments->data_type[index] = TSDB_DATA_TYPE_INT; + } else if (0 == strcasecmp(token, "FLOAT")) { + arguments->data_type[index] = TSDB_DATA_TYPE_FLOAT; + } else if (0 == strcasecmp(token, "SMALLINT")) { + arguments->data_type[index] = TSDB_DATA_TYPE_SMALLINT; + } else if (0 == strcasecmp(token, "BIGINT")) { + arguments->data_type[index] = TSDB_DATA_TYPE_BIGINT; + } else if (0 == strcasecmp(token, "DOUBLE")) { + arguments->data_type[index] = TSDB_DATA_TYPE_DOUBLE; + } else if (0 == strcasecmp(token, "TINYINT")) { + arguments->data_type[index] = TSDB_DATA_TYPE_TINYINT; + } else if (1 == regexMatch(token, "^BINARY(\\([1-9][0-9]*\\))?$", REG_ICASE | + REG_EXTENDED)) { + arguments->data_type[index] = TSDB_DATA_TYPE_BINARY; + } else if (1 == regexMatch(token, "^NCHAR(\\([1-9][0-9]*\\))?$", REG_ICASE | + REG_EXTENDED)) { + arguments->data_type[index] = TSDB_DATA_TYPE_NCHAR; + } else if (0 == strcasecmp(token, "BOOL")) { + arguments->data_type[index] = TSDB_DATA_TYPE_BOOL; + } else if (0 == strcasecmp(token, "TIMESTAMP")) { + arguments->data_type[index] = TSDB_DATA_TYPE_TIMESTAMP; + } else if (0 == strcasecmp(token, "UTINYINT")) { + arguments->data_type[index] = TSDB_DATA_TYPE_UTINYINT; + } else if (0 == strcasecmp(token, "USMALLINT")) { + arguments->data_type[index] = TSDB_DATA_TYPE_USMALLINT; + } else if (0 == strcasecmp(token, "UINT")) { + arguments->data_type[index] = TSDB_DATA_TYPE_UINT; + } else if (0 == strcasecmp(token, "UBIGINT")) { + arguments->data_type[index] = TSDB_DATA_TYPE_UBIGINT; + } else { + arguments->data_type[index] = TSDB_DATA_TYPE_NULL; + } + arguments->dataType[index] = token; + index ++; + token = strsep(&running, ","); + if (index >= MAX_NUM_COLUMNS) break; + } + arguments->dataType[index] = NULL; + arguments->data_type[index] = TSDB_DATA_TYPE_NULL; + } + } else if ((0 == strncmp(argv[i], "-w", strlen("-w"))) + || (0 == strncmp(argv[i], "--binwidth", strlen("--binwidth")))) { + if (2 == strlen(argv[i])) { + if (argc == i+1) { + errorPrintReqArg(argv[0], "w"); + exit(EXIT_FAILURE); + } else if (!isStringNumber(argv[i+1])) { + errorPrintReqArg2(argv[0], "w"); + exit(EXIT_FAILURE); + } + arguments->binwidth = atoi(argv[++i]); + } else if (0 == strncmp(argv[i], "--binwidth=", strlen("--binwidth="))) { + if (isStringNumber((char *)(argv[i] + strlen("--binwidth=")))) { + arguments->binwidth = atoi((char *)(argv[i]+strlen("--binwidth="))); + } else { + errorPrintReqArg2(argv[0], "--binwidth"); + exit(EXIT_FAILURE); + } + } else if (0 == strncmp(argv[i], "-w", strlen("-w"))) { + if (isStringNumber((char *)(argv[i] + strlen("-w")))) { + arguments->binwidth = atoi((char *)(argv[i]+strlen("-w"))); + } else { + errorPrintReqArg2(argv[0], "-w"); + exit(EXIT_FAILURE); + } + } else if (strlen("--binwidth") == strlen(argv[i])) { + if (argc == i+1) { + errorPrintReqArg3(argv[0], "--binwidth"); + exit(EXIT_FAILURE); + } else if (!isStringNumber(argv[i+1])) { + errorPrintReqArg2(argv[0], "--binwidth"); + exit(EXIT_FAILURE); + } + arguments->binwidth = atoi(argv[++i]); + } else { + errorUnrecognized(argv[0], argv[i]); + exit(EXIT_FAILURE); + } + } else if ((0 == strncmp(argv[i], "-m", strlen("-m"))) + || (0 == strncmp(argv[i], "--table-prefix", strlen("--table-prefix")))) { + if (2 == strlen(argv[i])) { + if (argc == i+1) { + errorPrintReqArg(argv[0], "m"); + exit(EXIT_FAILURE); + } + arguments->tb_prefix = argv[++i]; + } else if (0 == strncmp(argv[i], "--table-prefix=", strlen("--table-prefix="))) { + arguments->tb_prefix = (char *)(argv[i] + strlen("--table-prefix=")); + } else if (0 == strncmp(argv[i], "-m", strlen("-m"))) { + arguments->tb_prefix = (char *)(argv[i] + strlen("-m")); + } else if (strlen("--table-prefix") == strlen(argv[i])) { + if (argc == i+1) { + errorPrintReqArg3(argv[0], "--table-prefix"); + exit(EXIT_FAILURE); + } + arguments->tb_prefix = argv[++i]; + } else { + errorUnrecognized(argv[0], argv[i]); + exit(EXIT_FAILURE); + } + } else if ((strcmp(argv[i], "-N") == 0) + || (0 == strcmp(argv[i], "--normal-table"))) { + arguments->demo_mode = false; + arguments->use_metric = false; + } else if ((strcmp(argv[i], "-M") == 0) + || (0 == strcmp(argv[i], "--random"))) { + arguments->demo_mode = false; + } else if ((strcmp(argv[i], "-x") == 0) + || (0 == strcmp(argv[i], "--aggr-func"))) { + arguments->aggr_func = true; + } else if ((strcmp(argv[i], "-y") == 0) + || (0 == strcmp(argv[i], "--answer-yes"))) { + arguments->answer_yes = true; + } else if ((strcmp(argv[i], "-g") == 0) + || (0 == strcmp(argv[i], "--debug"))) { + arguments->debug_print = true; + } else if (strcmp(argv[i], "-gg") == 0) { + arguments->verbose_print = true; + } else if ((0 == strncmp(argv[i], "-R", strlen("-R"))) + || (0 == strncmp(argv[i], "--disorder-range", + strlen("--disorder-range")))) { + if (strlen("-R") == strlen(argv[i])) { + if (argc == i+1) { + errorPrintReqArg(argv[0], "R"); + exit(EXIT_FAILURE); + } else if (!isStringNumber(argv[i+1])) { + errorPrintReqArg2(argv[0], "R"); + exit(EXIT_FAILURE); + } + arguments->disorderRange = atoi(argv[++i]); + } else if (0 == strncmp(argv[i], "--disorder-range=", + strlen("--disorder-range="))) { + if (isStringNumber((char *)(argv[i] + strlen("--disorder-range=")))) { + arguments->disorderRange = + atoi((char *)(argv[i]+strlen("--disorder-range="))); + } else { + errorPrintReqArg2(argv[0], "--disorder-range"); + exit(EXIT_FAILURE); + } + } else if (0 == strncmp(argv[i], "-R", strlen("-R"))) { + if (isStringNumber((char *)(argv[i] + strlen("-R")))) { + arguments->disorderRange = + atoi((char *)(argv[i]+strlen("-R"))); + } else { + errorPrintReqArg2(argv[0], "-R"); + exit(EXIT_FAILURE); + } + + if (arguments->disorderRange < 0) { + errorPrint("Invalid disorder range %d, will be set to %d\n", + arguments->disorderRange, 1000); + arguments->disorderRange = 1000; + } + } else if (strlen("--disorder-range") == strlen(argv[i])) { + if (argc == i+1) { + errorPrintReqArg3(argv[0], "--disorder-range"); + exit(EXIT_FAILURE); + } else if (!isStringNumber(argv[i+1])) { + errorPrintReqArg2(argv[0], "--disorder-range"); + exit(EXIT_FAILURE); + } + arguments->disorderRange = atoi(argv[++i]); + } else { + errorUnrecognized(argv[0], argv[i]); + exit(EXIT_FAILURE); + } + } else if ((0 == strncmp(argv[i], "-O", strlen("-O"))) + || (0 == strncmp(argv[i], "--disorder", strlen("--disorder")))) { + if (2 == strlen(argv[i])) { + if (argc == i+1) { + errorPrintReqArg(argv[0], "O"); + exit(EXIT_FAILURE); + } else if (!isStringNumber(argv[i+1])) { + errorPrintReqArg2(argv[0], "O"); + exit(EXIT_FAILURE); + } + arguments->disorderRatio = atoi(argv[++i]); + } else if (0 == strncmp(argv[i], "--disorder=", strlen("--disorder="))) { + if (isStringNumber((char *)(argv[i] + strlen("--disorder=")))) { + arguments->disorderRatio = atoi((char *)(argv[i]+strlen("--disorder="))); + } else { + errorPrintReqArg2(argv[0], "--disorder"); + exit(EXIT_FAILURE); + } + } else if (0 == strncmp(argv[i], "-O", strlen("-O"))) { + if (isStringNumber((char *)(argv[i] + strlen("-O")))) { + arguments->disorderRatio = atoi((char *)(argv[i]+strlen("-O"))); + } else { + errorPrintReqArg2(argv[0], "-O"); + exit(EXIT_FAILURE); + } + } else if (strlen("--disorder") == strlen(argv[i])) { + if (argc == i+1) { + errorPrintReqArg3(argv[0], "--disorder"); + exit(EXIT_FAILURE); + } else if (!isStringNumber(argv[i+1])) { + errorPrintReqArg2(argv[0], "--disorder"); + exit(EXIT_FAILURE); + } + arguments->disorderRatio = atoi(argv[++i]); + } else { + errorUnrecognized(argv[0], argv[i]); + exit(EXIT_FAILURE); + } + + if (arguments->disorderRatio > 50) { + errorPrint("Invalid disorder ratio %d, will be set to %d\n", + arguments->disorderRatio, 50); + arguments->disorderRatio = 50; + } + } else if ((0 == strncmp(argv[i], "-a", strlen("-a"))) + || (0 == strncmp(argv[i], "--replica", + strlen("--replica")))) { + if (2 == strlen(argv[i])) { + if (argc == i+1) { + errorPrintReqArg(argv[0], "a"); + exit(EXIT_FAILURE); + } else if (!isStringNumber(argv[i+1])) { + errorPrintReqArg2(argv[0], "a"); + exit(EXIT_FAILURE); + } + arguments->replica = atoi(argv[++i]); + } else if (0 == strncmp(argv[i], "--replica=", + strlen("--replica="))) { + if (isStringNumber((char *)(argv[i] + strlen("--replica=")))) { + arguments->replica = + atoi((char *)(argv[i]+strlen("--replica="))); + } else { + errorPrintReqArg2(argv[0], "--replica"); + exit(EXIT_FAILURE); + } + } else if (0 == strncmp(argv[i], "-a", strlen("-a"))) { + if (isStringNumber((char *)(argv[i] + strlen("-a")))) { + arguments->replica = + atoi((char *)(argv[i]+strlen("-a"))); + } else { + errorPrintReqArg2(argv[0], "-a"); + exit(EXIT_FAILURE); + } + } else if (strlen("--replica") == strlen(argv[i])) { + if (argc == i+1) { + errorPrintReqArg3(argv[0], "--replica"); + exit(EXIT_FAILURE); + } else if (!isStringNumber(argv[i+1])) { + errorPrintReqArg2(argv[0], "--replica"); + exit(EXIT_FAILURE); + } + arguments->replica = atoi(argv[++i]); + } else { + errorUnrecognized(argv[0], argv[i]); + exit(EXIT_FAILURE); + } + + if (arguments->replica > 3 || arguments->replica < 1) { + errorPrint("Invalid replica value %d, will be set to %d\n", + arguments->replica, 1); + arguments->replica = 1; + } + } else if (strcmp(argv[i], "-D") == 0) { + arguments->method_of_delete = atoi(argv[++i]); + if (arguments->method_of_delete > 3) { + errorPrint("%s", "\n\t-D need a value (0~3) number following!\n"); + exit(EXIT_FAILURE); + } + } else if ((strcmp(argv[i], "--version") == 0) + || (strcmp(argv[i], "-V") == 0)) { + printVersion(); + exit(0); + } else if ((strcmp(argv[i], "--help") == 0) + || (strcmp(argv[i], "-?") == 0)) { + printHelp(); + exit(0); + } else if (strcmp(argv[i], "--usage") == 0) { + printf(" Usage: taosdemo [-f JSONFILE] [-u USER] [-p PASSWORD] [-c CONFIG_DIR]\n\ + [-h HOST] [-P PORT] [-I INTERFACE] [-d DATABASE] [-a REPLICA]\n\ + [-m TABLEPREFIX] [-s SQLFILE] [-N] [-o OUTPUTFILE] [-q QUERYMODE]\n\ + [-b DATATYPES] [-w WIDTH_OF_BINARY] [-l COLUMNS] [-T THREADNUMBER]\n\ + [-i SLEEPTIME] [-S TIME_STEP] [-B INTERLACE_ROWS] [-t TABLES]\n\ + [-n RECORDS] [-M] [-x] [-y] [-O ORDERMODE] [-R RANGE] [-a REPLIcA][-g]\n\ + [--help] [--usage] [--version]\n"); + exit(0); + } else { + // to simulate argp_option output + if (strlen(argv[i]) > 2) { + if (0 == strncmp(argv[i], "--", 2)) { + fprintf(stderr, "%s: unrecognized options '%s'\n", argv[0], argv[i]); + } else if (0 == strncmp(argv[i], "-", 1)) { + char tmp[2] = {0}; + tstrncpy(tmp, argv[i]+1, 2); + fprintf(stderr, "%s: invalid options -- '%s'\n", argv[0], tmp); + } else { + fprintf(stderr, "%s: Too many arguments\n", argv[0]); + } + } else { + fprintf(stderr, "%s invalid options -- '%s'\n", argv[0], + (char *)((char *)argv[i])+1); + } + fprintf(stderr, "Try `taosdemo --help' or `taosdemo --usage' for more information.\n"); + exit(EXIT_FAILURE); + } + } + + int columnCount; + for (columnCount = 0; columnCount < MAX_NUM_COLUMNS; columnCount ++) { + if (g_args.dataType[columnCount] == NULL) { + break; + } + } + + if (0 == columnCount) { + ERROR_EXIT("data type error!"); + } + g_args.columnCount = columnCount; + + g_args.lenOfOneRow = 20; // timestamp + for (int c = 0; c < g_args.columnCount; c++) { + switch(g_args.data_type[c]) { + case TSDB_DATA_TYPE_BINARY: + g_args.lenOfOneRow += g_args.binwidth + 3; + break; + + case TSDB_DATA_TYPE_NCHAR: + g_args.lenOfOneRow += g_args.binwidth + 3; + break; + + case TSDB_DATA_TYPE_INT: + case TSDB_DATA_TYPE_UINT: + g_args.lenOfOneRow += INT_BUFF_LEN; + break; + + case TSDB_DATA_TYPE_BIGINT: + case TSDB_DATA_TYPE_UBIGINT: + g_args.lenOfOneRow += BIGINT_BUFF_LEN; + break; + + case TSDB_DATA_TYPE_SMALLINT: + case TSDB_DATA_TYPE_USMALLINT: + g_args.lenOfOneRow += SMALLINT_BUFF_LEN; + break; + + case TSDB_DATA_TYPE_TINYINT: + case TSDB_DATA_TYPE_UTINYINT: + g_args.lenOfOneRow += TINYINT_BUFF_LEN; + break; + + case TSDB_DATA_TYPE_BOOL: + g_args.lenOfOneRow += BOOL_BUFF_LEN; + break; + + case TSDB_DATA_TYPE_FLOAT: + g_args.lenOfOneRow += FLOAT_BUFF_LEN; + break; + + case TSDB_DATA_TYPE_DOUBLE: + g_args.lenOfOneRow += DOUBLE_BUFF_LEN; + break; + + case TSDB_DATA_TYPE_TIMESTAMP: + g_args.lenOfOneRow += TIMESTAMP_BUFF_LEN; + break; + + default: + errorPrint2("get error data type : %s\n", g_args.dataType[c]); + exit(EXIT_FAILURE); + } + } + + if (((arguments->debug_print) && (NULL != arguments->metaFile)) + || arguments->verbose_print) { + printf("###################################################################\n"); + printf("# meta file: %s\n", arguments->metaFile); + printf("# Server IP: %s:%hu\n", + arguments->host == NULL ? "localhost" : arguments->host, + arguments->port ); + printf("# User: %s\n", arguments->user); + printf("# Password: %s\n", arguments->password); + printf("# Use metric: %s\n", + arguments->use_metric ? "true" : "false"); + if (*(arguments->dataType)) { + printf("# Specified data type: "); + for (int c = 0; c < MAX_NUM_COLUMNS; c++) + if (arguments->dataType[c]) + printf("%s,", arguments->dataType[c]); + else + break; + printf("\n"); + } + printf("# Insertion interval: %"PRIu64"\n", + arguments->insert_interval); + printf("# Number of records per req: %u\n", + arguments->reqPerReq); + printf("# Max SQL length: %"PRIu64"\n", + arguments->max_sql_len); + printf("# Length of Binary: %d\n", arguments->binwidth); + printf("# Number of Threads: %d\n", arguments->nthreads); + printf("# Number of Tables: %"PRId64"\n", + arguments->ntables); + printf("# Number of Data per Table: %"PRId64"\n", + arguments->insertRows); + printf("# Database name: %s\n", arguments->database); + printf("# Table prefix: %s\n", arguments->tb_prefix); + if (arguments->disorderRatio) { + printf("# Data order: %d\n", arguments->disorderRatio); + printf("# Data out of order rate: %d\n", arguments->disorderRange); + } + printf("# Delete method: %d\n", arguments->method_of_delete); + printf("# Answer yes when prompt: %d\n", arguments->answer_yes); + printf("# Print debug info: %d\n", arguments->debug_print); + printf("# Print verbose info: %d\n", arguments->verbose_print); + printf("###################################################################\n"); + + prompt(); + } +} + +static void tmfclose(FILE *fp) { + if (NULL != fp) { + fclose(fp); + } +} + +static void tmfree(void *buf) { + if (NULL != buf) { + free(buf); + buf = NULL; + } +} + +static int queryDbExec(TAOS *taos, char *command, QUERY_TYPE type, bool quiet) { + + verbosePrint("%s() LN%d - command: %s\n", __func__, __LINE__, command); + + TAOS_RES *res = taos_query(taos, command); + int32_t code = taos_errno(res); + + if (code != 0) { + if (!quiet) { + errorPrint2("Failed to execute <%s>, reason: %s\n", + command, taos_errstr(res)); + } + taos_free_result(res); + //taos_close(taos); + return -1; + } + + if (INSERT_TYPE == type) { + int affectedRows = taos_affected_rows(res); + taos_free_result(res); + return affectedRows; + } + + taos_free_result(res); + return 0; +} + +static void appendResultBufToFile(char *resultBuf, threadInfo *pThreadInfo) +{ + pThreadInfo->fp = fopen(pThreadInfo->filePath, "at"); + if (pThreadInfo->fp == NULL) { + errorPrint2( + "%s() LN%d, failed to open result file: %s, result will not save to file\n", + __func__, __LINE__, pThreadInfo->filePath); + return; + } + + fprintf(pThreadInfo->fp, "%s", resultBuf); + tmfclose(pThreadInfo->fp); + pThreadInfo->fp = NULL; +} + +static void fetchResult(TAOS_RES *res, threadInfo* pThreadInfo) { + TAOS_ROW row = NULL; + int num_rows = 0; + int num_fields = taos_field_count(res); + TAOS_FIELD *fields = taos_fetch_fields(res); + + char* databuf = (char*) calloc(1, 100*1024*1024); + if (databuf == NULL) { + errorPrint2("%s() LN%d, failed to malloc, warning: save result to file slowly!\n", + __func__, __LINE__); + return ; + } + + int64_t totalLen = 0; + + // fetch the records row by row + while((row = taos_fetch_row(res))) { + if (totalLen >= (100*1024*1024 - HEAD_BUFF_LEN*2)) { + if (strlen(pThreadInfo->filePath) > 0) + appendResultBufToFile(databuf, pThreadInfo); + totalLen = 0; + memset(databuf, 0, 100*1024*1024); + } + num_rows++; + char temp[HEAD_BUFF_LEN] = {0}; + int len = taos_print_row(temp, row, fields, num_fields); + len += sprintf(temp + len, "\n"); + //printf("query result:%s\n", temp); + memcpy(databuf + totalLen, temp, len); + totalLen += len; + verbosePrint("%s() LN%d, totalLen: %"PRId64"\n", + __func__, __LINE__, totalLen); + } + + verbosePrint("%s() LN%d, databuf=%s resultFile=%s\n", + __func__, __LINE__, databuf, pThreadInfo->filePath); + if (strlen(pThreadInfo->filePath) > 0) { + appendResultBufToFile(databuf, pThreadInfo); + } + free(databuf); +} + +static void selectAndGetResult( + threadInfo *pThreadInfo, char *command) +{ + if (0 == strncasecmp(g_queryInfo.queryMode, "taosc", strlen("taosc"))) { + TAOS_RES *res = taos_query(pThreadInfo->taos, command); + if (res == NULL || taos_errno(res) != 0) { + errorPrint2("%s() LN%d, failed to execute sql:%s, reason:%s\n", + __func__, __LINE__, command, taos_errstr(res)); + taos_free_result(res); + return; + } + + fetchResult(res, pThreadInfo); + taos_free_result(res); + + } else if (0 == strncasecmp(g_queryInfo.queryMode, "rest", strlen("rest"))) { + int retCode = postProceSql( + g_queryInfo.host, g_queryInfo.port, + command, + pThreadInfo); + if (0 != retCode) { + printf("====restful return fail, threadID[%d]\n", pThreadInfo->threadID); + } + + } else { + errorPrint2("%s() LN%d, unknown query mode: %s\n", + __func__, __LINE__, g_queryInfo.queryMode); + } +} + +static char *rand_bool_str() { + static int cursor; + cursor++; + if (cursor > (g_args.prepared_rand - 1)) cursor = 0; + return g_randbool_buff + ((cursor % g_args.prepared_rand) * BOOL_BUFF_LEN); +} + +static int32_t rand_bool() { + static int cursor; + cursor++; + if (cursor > (g_args.prepared_rand - 1)) cursor = 0; + return g_randint[cursor % g_args.prepared_rand] % 2; +} + +static char *rand_tinyint_str() +{ + static int cursor; + cursor++; + if (cursor > (g_args.prepared_rand - 1)) cursor = 0; + return g_randtinyint_buff + + ((cursor % g_args.prepared_rand) * TINYINT_BUFF_LEN); +} + +static int32_t rand_tinyint() +{ + static int cursor; + cursor++; + if (cursor > (g_args.prepared_rand - 1)) cursor = 0; + return g_randint[cursor % g_args.prepared_rand] % 128; +} + +static char *rand_utinyint_str() +{ + static int cursor; + cursor++; + if (cursor > (g_args.prepared_rand - 1)) cursor = 0; + return g_randutinyint_buff + + ((cursor % g_args.prepared_rand) * TINYINT_BUFF_LEN); +} + +static int32_t rand_utinyint() +{ + static int cursor; + cursor++; + if (cursor > (g_args.prepared_rand - 1)) cursor = 0; + return g_randuint[cursor % g_args.prepared_rand] % 255; +} + +static char *rand_smallint_str() +{ + static int cursor; + cursor++; + if (cursor > (g_args.prepared_rand - 1)) cursor = 0; + return g_randsmallint_buff + + ((cursor % g_args.prepared_rand) * SMALLINT_BUFF_LEN); +} + +static int32_t rand_smallint() +{ + static int cursor; + cursor++; + if (cursor > (g_args.prepared_rand - 1)) cursor = 0; + return g_randint[cursor % g_args.prepared_rand] % 32768; +} + +static char *rand_usmallint_str() +{ + static int cursor; + cursor++; + if (cursor > (g_args.prepared_rand - 1)) cursor = 0; + return g_randusmallint_buff + + ((cursor % g_args.prepared_rand) * SMALLINT_BUFF_LEN); +} + +static int32_t rand_usmallint() +{ + static int cursor; + cursor++; + if (cursor > (g_args.prepared_rand - 1)) cursor = 0; + return g_randuint[cursor % g_args.prepared_rand] % 65535; +} + +static char *rand_int_str() +{ + static int cursor; + cursor++; + if (cursor > (g_args.prepared_rand - 1)) cursor = 0; + return g_randint_buff + ((cursor % g_args.prepared_rand) * INT_BUFF_LEN); +} + +static int32_t rand_int() +{ + static int cursor; + cursor++; + if (cursor > (g_args.prepared_rand - 1)) cursor = 0; + return g_randint[cursor % g_args.prepared_rand]; +} + +static char *rand_uint_str() +{ + static int cursor; + cursor++; + if (cursor > (g_args.prepared_rand - 1)) cursor = 0; + return g_randuint_buff + ((cursor % g_args.prepared_rand) * INT_BUFF_LEN); +} + +static int32_t rand_uint() +{ + static int cursor; + cursor++; + if (cursor > (g_args.prepared_rand - 1)) cursor = 0; + return g_randuint[cursor % g_args.prepared_rand]; +} + +static char *rand_bigint_str() +{ + static int cursor; + cursor++; + if (cursor > (g_args.prepared_rand - 1)) cursor = 0; + return g_randbigint_buff + + ((cursor % g_args.prepared_rand) * BIGINT_BUFF_LEN); +} + +static int64_t rand_bigint() +{ + static int cursor; + cursor++; + if (cursor > (g_args.prepared_rand - 1)) cursor = 0; + return g_randbigint[cursor % g_args.prepared_rand]; +} + +static char *rand_ubigint_str() +{ + static int cursor; + cursor++; + if (cursor > (g_args.prepared_rand - 1)) cursor = 0; + return g_randubigint_buff + + ((cursor % g_args.prepared_rand) * BIGINT_BUFF_LEN); +} + +static int64_t rand_ubigint() +{ + static int cursor; + cursor++; + if (cursor > (g_args.prepared_rand - 1)) cursor = 0; + return g_randubigint[cursor % g_args.prepared_rand]; +} + +static char *rand_float_str() +{ + static int cursor; + cursor++; + if (cursor > (g_args.prepared_rand - 1)) cursor = 0; + return g_randfloat_buff + ((cursor % g_args.prepared_rand) * FLOAT_BUFF_LEN); +} + + +static float rand_float() +{ + static int cursor; + cursor++; + if (cursor > (g_args.prepared_rand - 1)) cursor = 0; + return g_randfloat[cursor % g_args.prepared_rand]; +} + +static char *demo_current_float_str() +{ + static int cursor; + cursor++; + if (cursor > (g_args.prepared_rand - 1)) cursor = 0; + return g_rand_current_buff + + ((cursor % g_args.prepared_rand) * FLOAT_BUFF_LEN); +} + +static float UNUSED_FUNC demo_current_float() +{ + static int cursor; + cursor++; + if (cursor > (g_args.prepared_rand - 1)) cursor = 0; + return (float)(9.8 + 0.04 * (g_randint[cursor % g_args.prepared_rand] % 10) + + g_randfloat[cursor % g_args.prepared_rand]/1000000000); +} + +static char *demo_voltage_int_str() +{ + static int cursor; + cursor++; + if (cursor > (g_args.prepared_rand - 1)) cursor = 0; + return g_rand_voltage_buff + + ((cursor % g_args.prepared_rand) * INT_BUFF_LEN); +} + +static int32_t UNUSED_FUNC demo_voltage_int() +{ + static int cursor; + cursor++; + if (cursor > (g_args.prepared_rand - 1)) cursor = 0; + return 215 + g_randint[cursor % g_args.prepared_rand] % 10; +} + +static char *demo_phase_float_str() { + static int cursor; + cursor++; + if (cursor > (g_args.prepared_rand - 1)) cursor = 0; + return g_rand_phase_buff + ((cursor % g_args.prepared_rand) * FLOAT_BUFF_LEN); +} + +static float UNUSED_FUNC demo_phase_float() { + static int cursor; + cursor++; + if (cursor > (g_args.prepared_rand - 1)) cursor = 0; + return (float)((115 + g_randint[cursor % g_args.prepared_rand] % 10 + + g_randfloat[cursor % g_args.prepared_rand]/1000000000)/360); +} + +#if 0 +static const char charNum[] = "0123456789"; + +static void nonrand_string(char *, int) __attribute__ ((unused)); // reserve for debugging purpose +static void nonrand_string(char *str, int size) +{ + str[0] = 0; + if (size > 0) { + int n; + for (n = 0; n < size; n++) { + str[n] = charNum[n % 10]; + } + str[n] = 0; + } +} +#endif + +static const char charset[] = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890"; + +static void rand_string(char *str, int size) { + str[0] = 0; + if (size > 0) { + //--size; + int n; + for (n = 0; n < size; n++) { + int key = abs(rand_tinyint()) % (int)(sizeof(charset) - 1); + str[n] = charset[key]; + } + str[n] = 0; + } +} + +static char *rand_double_str() +{ + static int cursor; + cursor++; + if (cursor > (g_args.prepared_rand - 1)) cursor = 0; + return g_randdouble_buff + (cursor * DOUBLE_BUFF_LEN); +} + +static double rand_double() +{ + static int cursor; + cursor++; + cursor = cursor % g_args.prepared_rand; + return g_randdouble[cursor]; +} + +static void init_rand_data() { + + g_randint_buff = calloc(1, INT_BUFF_LEN * g_args.prepared_rand); + assert(g_randint_buff); + g_rand_voltage_buff = calloc(1, INT_BUFF_LEN * g_args.prepared_rand); + assert(g_rand_voltage_buff); + g_randbigint_buff = calloc(1, BIGINT_BUFF_LEN * g_args.prepared_rand); + assert(g_randbigint_buff); + g_randsmallint_buff = calloc(1, SMALLINT_BUFF_LEN * g_args.prepared_rand); + assert(g_randsmallint_buff); + g_randtinyint_buff = calloc(1, TINYINT_BUFF_LEN * g_args.prepared_rand); + assert(g_randtinyint_buff); + g_randbool_buff = calloc(1, BOOL_BUFF_LEN * g_args.prepared_rand); + assert(g_randbool_buff); + g_randfloat_buff = calloc(1, FLOAT_BUFF_LEN * g_args.prepared_rand); + assert(g_randfloat_buff); + g_rand_current_buff = calloc(1, FLOAT_BUFF_LEN * g_args.prepared_rand); + assert(g_rand_current_buff); + g_rand_phase_buff = calloc(1, FLOAT_BUFF_LEN * g_args.prepared_rand); + assert(g_rand_phase_buff); + g_randdouble_buff = calloc(1, DOUBLE_BUFF_LEN * g_args.prepared_rand); + assert(g_randdouble_buff); + g_randuint_buff = calloc(1, INT_BUFF_LEN * g_args.prepared_rand); + assert(g_randuint_buff); + g_randutinyint_buff = calloc(1, TINYINT_BUFF_LEN * g_args.prepared_rand); + assert(g_randutinyint_buff); + g_randusmallint_buff = calloc(1, SMALLINT_BUFF_LEN * g_args.prepared_rand); + assert(g_randusmallint_buff); + g_randubigint_buff = calloc(1, BIGINT_BUFF_LEN * g_args.prepared_rand); + assert(g_randubigint_buff); + g_randint = calloc(1, sizeof(int32_t) * g_args.prepared_rand); + assert(g_randint); + g_randuint = calloc(1, sizeof(uint32_t) * g_args.prepared_rand); + assert(g_randuint); + g_randbigint = calloc(1, sizeof(int64_t) * g_args.prepared_rand); + assert(g_randbigint); + g_randubigint = calloc(1, sizeof(uint64_t) * g_args.prepared_rand); + assert(g_randubigint); + g_randfloat = calloc(1, sizeof(float) * g_args.prepared_rand); + assert(g_randfloat); + g_randdouble = calloc(1, sizeof(double) * g_args.prepared_rand); + assert(g_randdouble); + + for (int i = 0; i < g_args.prepared_rand; i++) { + g_randint[i] = (int)(taosRandom() % RAND_MAX - (RAND_MAX >> 1)); + g_randuint[i] = (int)(taosRandom()); + sprintf(g_randint_buff + i * INT_BUFF_LEN, "%d", + g_randint[i]); + sprintf(g_rand_voltage_buff + i * INT_BUFF_LEN, "%d", + 215 + g_randint[i] % 10); + + sprintf(g_randbool_buff + i * BOOL_BUFF_LEN, "%s", + ((g_randint[i] % 2) & 1)?"true":"false"); + sprintf(g_randsmallint_buff + i * SMALLINT_BUFF_LEN, "%d", + g_randint[i] % 32768); + sprintf(g_randtinyint_buff + i * TINYINT_BUFF_LEN, "%d", + g_randint[i] % 128); + sprintf(g_randuint_buff + i * INT_BUFF_LEN, "%d", + g_randuint[i]); + sprintf(g_randusmallint_buff + i * SMALLINT_BUFF_LEN, "%d", + g_randuint[i] % 65535); + sprintf(g_randutinyint_buff + i * TINYINT_BUFF_LEN, "%d", + g_randuint[i] % 255); + + g_randbigint[i] = (int64_t)(taosRandom() % RAND_MAX - (RAND_MAX >> 1)); + g_randubigint[i] = (uint64_t)(taosRandom()); + sprintf(g_randbigint_buff + i * BIGINT_BUFF_LEN, "%"PRId64"", + g_randbigint[i]); + sprintf(g_randubigint_buff + i * BIGINT_BUFF_LEN, "%"PRId64"", + g_randubigint[i]); + + g_randfloat[i] = (float)(taosRandom() / 1000.0) * (taosRandom() % 2 > 0.5 ? 1 : -1); + sprintf(g_randfloat_buff + i * FLOAT_BUFF_LEN, "%f", + g_randfloat[i]); + sprintf(g_rand_current_buff + i * FLOAT_BUFF_LEN, "%f", + (float)(9.8 + 0.04 * (g_randint[i] % 10) + + g_randfloat[i]/1000000000)); + sprintf(g_rand_phase_buff + i * FLOAT_BUFF_LEN, "%f", + (float)((115 + g_randint[i] % 10 + + g_randfloat[i]/1000000000)/360)); + + g_randdouble[i] = (double)(taosRandom() / 1000000.0) * (taosRandom() % 2 > 0.5 ? 1 : -1); + sprintf(g_randdouble_buff + i * DOUBLE_BUFF_LEN, "%f", + g_randdouble[i]); + } +} + +#define SHOW_PARSE_RESULT_START() \ + do { if (g_args.metaFile) \ + printf("\033[1m\033[40;32m================ %s parse result START ================\033[0m\n", \ + g_args.metaFile); } while(0) + +#define SHOW_PARSE_RESULT_END() \ + do { if (g_args.metaFile) \ + printf("\033[1m\033[40;32m================ %s parse result END================\033[0m\n", \ + g_args.metaFile); } while(0) + +#define SHOW_PARSE_RESULT_START_TO_FILE(fp) \ + do { if (g_args.metaFile) \ + fprintf(fp, "\033[1m\033[40;32m================ %s parse result START ================\033[0m\n", \ + g_args.metaFile); } while(0) + +#define SHOW_PARSE_RESULT_END_TO_FILE(fp) \ + do { if (g_args.metaFile) \ + fprintf(fp, "\033[1m\033[40;32m================ %s parse result END================\033[0m\n", \ + g_args.metaFile); } while(0) + +static int printfInsertMeta() { + SHOW_PARSE_RESULT_START(); + + if (g_args.demo_mode) { + printf("\ntaosdemo is simulating data generated by power equipment monitoring...\n\n"); + } else { + printf("\ntaosdemo is simulating random data as you request..\n\n"); + } + + if (g_args.iface != INTERFACE_BUT) { + // first time if no iface specified + printf("interface: \033[33m%s\033[0m\n", + (g_args.iface==TAOSC_IFACE)?"taosc": + (g_args.iface==REST_IFACE)?"rest":"stmt"); + } + + printf("host: \033[33m%s:%u\033[0m\n", + g_Dbs.host, g_Dbs.port); + printf("user: \033[33m%s\033[0m\n", g_Dbs.user); + printf("password: \033[33m%s\033[0m\n", g_Dbs.password); + printf("configDir: \033[33m%s\033[0m\n", configDir); + printf("resultFile: \033[33m%s\033[0m\n", g_Dbs.resultFile); + printf("thread num of insert data: \033[33m%d\033[0m\n", g_Dbs.threadCount); + printf("thread num of create table: \033[33m%d\033[0m\n", + g_Dbs.threadCountForCreateTbl); + printf("top insert interval: \033[33m%"PRIu64"\033[0m\n", + g_args.insert_interval); + printf("number of records per req: \033[33m%u\033[0m\n", + g_args.reqPerReq); + printf("max sql length: \033[33m%"PRIu64"\033[0m\n", + g_args.max_sql_len); + + printf("database count: \033[33m%d\033[0m\n", g_Dbs.dbCount); + + for (int i = 0; i < g_Dbs.dbCount; i++) { + printf("database[\033[33m%d\033[0m]:\n", i); + printf(" database[%d] name: \033[33m%s\033[0m\n", + i, g_Dbs.db[i].dbName); + if (0 == g_Dbs.db[i].drop) { + printf(" drop: \033[33m no\033[0m\n"); + } else { + printf(" drop: \033[33m yes\033[0m\n"); + } + + if (g_Dbs.db[i].dbCfg.blocks > 0) { + printf(" blocks: \033[33m%d\033[0m\n", + g_Dbs.db[i].dbCfg.blocks); + } + if (g_Dbs.db[i].dbCfg.cache > 0) { + printf(" cache: \033[33m%d\033[0m\n", + g_Dbs.db[i].dbCfg.cache); + } + if (g_Dbs.db[i].dbCfg.days > 0) { + printf(" days: \033[33m%d\033[0m\n", + g_Dbs.db[i].dbCfg.days); + } + if (g_Dbs.db[i].dbCfg.keep > 0) { + printf(" keep: \033[33m%d\033[0m\n", + g_Dbs.db[i].dbCfg.keep); + } + if (g_Dbs.db[i].dbCfg.replica > 0) { + printf(" replica: \033[33m%d\033[0m\n", + g_Dbs.db[i].dbCfg.replica); + } + if (g_Dbs.db[i].dbCfg.update > 0) { + printf(" update: \033[33m%d\033[0m\n", + g_Dbs.db[i].dbCfg.update); + } + if (g_Dbs.db[i].dbCfg.minRows > 0) { + printf(" minRows: \033[33m%d\033[0m\n", + g_Dbs.db[i].dbCfg.minRows); + } + if (g_Dbs.db[i].dbCfg.maxRows > 0) { + printf(" maxRows: \033[33m%d\033[0m\n", + g_Dbs.db[i].dbCfg.maxRows); + } + if (g_Dbs.db[i].dbCfg.comp > 0) { + printf(" comp: \033[33m%d\033[0m\n", g_Dbs.db[i].dbCfg.comp); + } + if (g_Dbs.db[i].dbCfg.walLevel > 0) { + printf(" walLevel: \033[33m%d\033[0m\n", + g_Dbs.db[i].dbCfg.walLevel); + } + if (g_Dbs.db[i].dbCfg.fsync > 0) { + printf(" fsync: \033[33m%d\033[0m\n", + g_Dbs.db[i].dbCfg.fsync); + } + if (g_Dbs.db[i].dbCfg.quorum > 0) { + printf(" quorum: \033[33m%d\033[0m\n", + g_Dbs.db[i].dbCfg.quorum); + } + if (g_Dbs.db[i].dbCfg.precision[0] != 0) { + if ((0 == strncasecmp(g_Dbs.db[i].dbCfg.precision, "ms", 2)) + || (0 == strncasecmp(g_Dbs.db[i].dbCfg.precision, "us", 2)) + || (0 == strncasecmp(g_Dbs.db[i].dbCfg.precision, "ns", 2))) { + printf(" precision: \033[33m%s\033[0m\n", + g_Dbs.db[i].dbCfg.precision); + } else { + printf("\033[1m\033[40;31m precision error: %s\033[0m\n", + g_Dbs.db[i].dbCfg.precision); + return -1; + } + } + + + if (g_args.use_metric) { + printf(" super table count: \033[33m%"PRIu64"\033[0m\n", + g_Dbs.db[i].superTblCount); + for (uint64_t j = 0; j < g_Dbs.db[i].superTblCount; j++) { + printf(" super table[\033[33m%"PRIu64"\033[0m]:\n", j); + + printf(" stbName: \033[33m%s\033[0m\n", + g_Dbs.db[i].superTbls[j].stbName); + + if (PRE_CREATE_SUBTBL == g_Dbs.db[i].superTbls[j].autoCreateTable) { + printf(" autoCreateTable: \033[33m%s\033[0m\n", "no"); + } else if (AUTO_CREATE_SUBTBL == + g_Dbs.db[i].superTbls[j].autoCreateTable) { + printf(" autoCreateTable: \033[33m%s\033[0m\n", "yes"); + } else { + printf(" autoCreateTable: \033[33m%s\033[0m\n", "error"); + } + + if (TBL_NO_EXISTS == g_Dbs.db[i].superTbls[j].childTblExists) { + printf(" childTblExists: \033[33m%s\033[0m\n", "no"); + } else if (TBL_ALREADY_EXISTS == g_Dbs.db[i].superTbls[j].childTblExists) { + printf(" childTblExists: \033[33m%s\033[0m\n", "yes"); + } else { + printf(" childTblExists: \033[33m%s\033[0m\n", "error"); + } + + printf(" childTblCount: \033[33m%"PRId64"\033[0m\n", + g_Dbs.db[i].superTbls[j].childTblCount); + printf(" childTblPrefix: \033[33m%s\033[0m\n", + g_Dbs.db[i].superTbls[j].childTblPrefix); + printf(" dataSource: \033[33m%s\033[0m\n", + g_Dbs.db[i].superTbls[j].dataSource); + printf(" iface: \033[33m%s\033[0m\n", + (g_Dbs.db[i].superTbls[j].iface==TAOSC_IFACE)?"taosc": + (g_Dbs.db[i].superTbls[j].iface==REST_IFACE)?"rest":"stmt"); + if (g_Dbs.db[i].superTbls[j].childTblLimit > 0) { + printf(" childTblLimit: \033[33m%"PRId64"\033[0m\n", + g_Dbs.db[i].superTbls[j].childTblLimit); + } + if (g_Dbs.db[i].superTbls[j].childTblOffset > 0) { + printf(" childTblOffset: \033[33m%"PRIu64"\033[0m\n", + g_Dbs.db[i].superTbls[j].childTblOffset); + } + printf(" insertRows: \033[33m%"PRId64"\033[0m\n", + g_Dbs.db[i].superTbls[j].insertRows); + /* + if (0 == g_Dbs.db[i].superTbls[j].multiThreadWriteOneTbl) { + printf(" multiThreadWriteOneTbl: \033[33m no\033[0m\n"); + }else { + printf(" multiThreadWriteOneTbl: \033[33m yes\033[0m\n"); + } + */ + printf(" interlaceRows: \033[33m%u\033[0m\n", + g_Dbs.db[i].superTbls[j].interlaceRows); + + if (g_Dbs.db[i].superTbls[j].interlaceRows > 0) { + printf(" stable insert interval: \033[33m%"PRIu64"\033[0m\n", + g_Dbs.db[i].superTbls[j].insertInterval); + } + + printf(" disorderRange: \033[33m%d\033[0m\n", + g_Dbs.db[i].superTbls[j].disorderRange); + printf(" disorderRatio: \033[33m%d\033[0m\n", + g_Dbs.db[i].superTbls[j].disorderRatio); + printf(" maxSqlLen: \033[33m%"PRIu64"\033[0m\n", + g_Dbs.db[i].superTbls[j].maxSqlLen); + printf(" timeStampStep: \033[33m%"PRId64"\033[0m\n", + g_Dbs.db[i].superTbls[j].timeStampStep); + printf(" startTimestamp: \033[33m%s\033[0m\n", + g_Dbs.db[i].superTbls[j].startTimestamp); + printf(" sampleFormat: \033[33m%s\033[0m\n", + g_Dbs.db[i].superTbls[j].sampleFormat); + printf(" sampleFile: \033[33m%s\033[0m\n", + g_Dbs.db[i].superTbls[j].sampleFile); + printf(" useSampleTs: \033[33m%s\033[0m\n", + g_Dbs.db[i].superTbls[j].useSampleTs ? "yes (warning: disorderRange/disorderRatio is disabled)" : "no"); + printf(" tagsFile: \033[33m%s\033[0m\n", + g_Dbs.db[i].superTbls[j].tagsFile); + printf(" columnCount: \033[33m%d\033[0m\n ", + g_Dbs.db[i].superTbls[j].columnCount); + for (int k = 0; k < g_Dbs.db[i].superTbls[j].columnCount; k++) { + //printf("dataType:%s, dataLen:%d\t", g_Dbs.db[i].superTbls[j].columns[k].dataType, g_Dbs.db[i].superTbls[j].columns[k].dataLen); + if ((0 == strncasecmp(g_Dbs.db[i].superTbls[j].columns[k].dataType, + "binary", 6)) + || (0 == strncasecmp(g_Dbs.db[i].superTbls[j].columns[k].dataType, + "nchar", 5))) { + printf("column[%d]:\033[33m%s(%d)\033[0m ", k, + g_Dbs.db[i].superTbls[j].columns[k].dataType, + g_Dbs.db[i].superTbls[j].columns[k].dataLen); + } else { + printf("column[%d]:\033[33m%s\033[0m ", k, + g_Dbs.db[i].superTbls[j].columns[k].dataType); + } + } + printf("\n"); + + printf(" tagCount: \033[33m%d\033[0m\n ", + g_Dbs.db[i].superTbls[j].tagCount); + for (int k = 0; k < g_Dbs.db[i].superTbls[j].tagCount; k++) { + //printf("dataType:%s, dataLen:%d\t", g_Dbs.db[i].superTbls[j].tags[k].dataType, g_Dbs.db[i].superTbls[j].tags[k].dataLen); + if ((0 == strncasecmp(g_Dbs.db[i].superTbls[j].tags[k].dataType, + "binary", strlen("binary"))) + || (0 == strncasecmp(g_Dbs.db[i].superTbls[j].tags[k].dataType, + "nchar", strlen("nchar")))) { + printf("tag[%d]:\033[33m%s(%d)\033[0m ", k, + g_Dbs.db[i].superTbls[j].tags[k].dataType, + g_Dbs.db[i].superTbls[j].tags[k].dataLen); + } else { + printf("tag[%d]:\033[33m%s\033[0m ", k, + g_Dbs.db[i].superTbls[j].tags[k].dataType); + } + } + printf("\n"); + } + } else { + printf(" childTblCount: \033[33m%"PRId64"\033[0m\n", + g_args.ntables); + printf(" insertRows: \033[33m%"PRId64"\033[0m\n", + g_args.insertRows); + } + printf("\n"); + } + + SHOW_PARSE_RESULT_END(); + + return 0; +} + +static void printfInsertMetaToFile(FILE* fp) { + + SHOW_PARSE_RESULT_START_TO_FILE(fp); + + fprintf(fp, "host: %s:%u\n", g_Dbs.host, g_Dbs.port); + fprintf(fp, "user: %s\n", g_Dbs.user); + fprintf(fp, "configDir: %s\n", configDir); + fprintf(fp, "resultFile: %s\n", g_Dbs.resultFile); + fprintf(fp, "thread num of insert data: %d\n", g_Dbs.threadCount); + fprintf(fp, "thread num of create table: %d\n", g_Dbs.threadCountForCreateTbl); + fprintf(fp, "number of records per req: %u\n", g_args.reqPerReq); + fprintf(fp, "max sql length: %"PRIu64"\n", g_args.max_sql_len); + fprintf(fp, "database count: %d\n", g_Dbs.dbCount); + + for (int i = 0; i < g_Dbs.dbCount; i++) { + fprintf(fp, "database[%d]:\n", i); + fprintf(fp, " database[%d] name: %s\n", i, g_Dbs.db[i].dbName); + if (0 == g_Dbs.db[i].drop) { + fprintf(fp, " drop: no\n"); + }else { + fprintf(fp, " drop: yes\n"); + } + + if (g_Dbs.db[i].dbCfg.blocks > 0) { + fprintf(fp, " blocks: %d\n", g_Dbs.db[i].dbCfg.blocks); + } + if (g_Dbs.db[i].dbCfg.cache > 0) { + fprintf(fp, " cache: %d\n", g_Dbs.db[i].dbCfg.cache); + } + if (g_Dbs.db[i].dbCfg.days > 0) { + fprintf(fp, " days: %d\n", g_Dbs.db[i].dbCfg.days); + } + if (g_Dbs.db[i].dbCfg.keep > 0) { + fprintf(fp, " keep: %d\n", g_Dbs.db[i].dbCfg.keep); + } + if (g_Dbs.db[i].dbCfg.replica > 0) { + fprintf(fp, " replica: %d\n", g_Dbs.db[i].dbCfg.replica); + } + if (g_Dbs.db[i].dbCfg.update > 0) { + fprintf(fp, " update: %d\n", g_Dbs.db[i].dbCfg.update); + } + if (g_Dbs.db[i].dbCfg.minRows > 0) { + fprintf(fp, " minRows: %d\n", g_Dbs.db[i].dbCfg.minRows); + } + if (g_Dbs.db[i].dbCfg.maxRows > 0) { + fprintf(fp, " maxRows: %d\n", g_Dbs.db[i].dbCfg.maxRows); + } + if (g_Dbs.db[i].dbCfg.comp > 0) { + fprintf(fp, " comp: %d\n", g_Dbs.db[i].dbCfg.comp); + } + if (g_Dbs.db[i].dbCfg.walLevel > 0) { + fprintf(fp, " walLevel: %d\n", g_Dbs.db[i].dbCfg.walLevel); + } + if (g_Dbs.db[i].dbCfg.fsync > 0) { + fprintf(fp, " fsync: %d\n", g_Dbs.db[i].dbCfg.fsync); + } + if (g_Dbs.db[i].dbCfg.quorum > 0) { + fprintf(fp, " quorum: %d\n", g_Dbs.db[i].dbCfg.quorum); + } + if (g_Dbs.db[i].dbCfg.precision[0] != 0) { + if ((0 == strncasecmp(g_Dbs.db[i].dbCfg.precision, "ms", 2)) + || (0 == strncasecmp(g_Dbs.db[i].dbCfg.precision, "ns", 2)) + || (0 == strncasecmp(g_Dbs.db[i].dbCfg.precision, "us", 2))) { + fprintf(fp, " precision: %s\n", + g_Dbs.db[i].dbCfg.precision); + } else { + fprintf(fp, " precision error: %s\n", + g_Dbs.db[i].dbCfg.precision); + } + } + + fprintf(fp, " super table count: %"PRIu64"\n", + g_Dbs.db[i].superTblCount); + for (int j = 0; j < g_Dbs.db[i].superTblCount; j++) { + fprintf(fp, " super table[%d]:\n", j); + + fprintf(fp, " stbName: %s\n", + g_Dbs.db[i].superTbls[j].stbName); + + if (PRE_CREATE_SUBTBL == g_Dbs.db[i].superTbls[j].autoCreateTable) { + fprintf(fp, " autoCreateTable: %s\n", "no"); + } else if (AUTO_CREATE_SUBTBL + == g_Dbs.db[i].superTbls[j].autoCreateTable) { + fprintf(fp, " autoCreateTable: %s\n", "yes"); + } else { + fprintf(fp, " autoCreateTable: %s\n", "error"); + } + + if (TBL_NO_EXISTS == g_Dbs.db[i].superTbls[j].childTblExists) { + fprintf(fp, " childTblExists: %s\n", "no"); + } else if (TBL_ALREADY_EXISTS + == g_Dbs.db[i].superTbls[j].childTblExists) { + fprintf(fp, " childTblExists: %s\n", "yes"); + } else { + fprintf(fp, " childTblExists: %s\n", "error"); + } + + fprintf(fp, " childTblCount: %"PRId64"\n", + g_Dbs.db[i].superTbls[j].childTblCount); + fprintf(fp, " childTblPrefix: %s\n", + g_Dbs.db[i].superTbls[j].childTblPrefix); + fprintf(fp, " dataSource: %s\n", + g_Dbs.db[i].superTbls[j].dataSource); + fprintf(fp, " iface: %s\n", + (g_Dbs.db[i].superTbls[j].iface==TAOSC_IFACE)?"taosc": + (g_Dbs.db[i].superTbls[j].iface==REST_IFACE)?"rest":"stmt"); + fprintf(fp, " insertRows: %"PRId64"\n", + g_Dbs.db[i].superTbls[j].insertRows); + fprintf(fp, " interlace rows: %u\n", + g_Dbs.db[i].superTbls[j].interlaceRows); + if (g_Dbs.db[i].superTbls[j].interlaceRows > 0) { + fprintf(fp, " stable insert interval: %"PRIu64"\n", + g_Dbs.db[i].superTbls[j].insertInterval); + } + /* + if (0 == g_Dbs.db[i].superTbls[j].multiThreadWriteOneTbl) { + fprintf(fp, " multiThreadWriteOneTbl: no\n"); + }else { + fprintf(fp, " multiThreadWriteOneTbl: yes\n"); + } + */ + fprintf(fp, " interlaceRows: %u\n", + g_Dbs.db[i].superTbls[j].interlaceRows); + fprintf(fp, " disorderRange: %d\n", + g_Dbs.db[i].superTbls[j].disorderRange); + fprintf(fp, " disorderRatio: %d\n", + g_Dbs.db[i].superTbls[j].disorderRatio); + fprintf(fp, " maxSqlLen: %"PRIu64"\n", + g_Dbs.db[i].superTbls[j].maxSqlLen); + + fprintf(fp, " timeStampStep: %"PRId64"\n", + g_Dbs.db[i].superTbls[j].timeStampStep); + fprintf(fp, " startTimestamp: %s\n", + g_Dbs.db[i].superTbls[j].startTimestamp); + fprintf(fp, " sampleFormat: %s\n", + g_Dbs.db[i].superTbls[j].sampleFormat); + fprintf(fp, " sampleFile: %s\n", + g_Dbs.db[i].superTbls[j].sampleFile); + fprintf(fp, " tagsFile: %s\n", + g_Dbs.db[i].superTbls[j].tagsFile); + + fprintf(fp, " columnCount: %d\n ", + g_Dbs.db[i].superTbls[j].columnCount); + for (int k = 0; k < g_Dbs.db[i].superTbls[j].columnCount; k++) { + //printf("dataType:%s, dataLen:%d\t", g_Dbs.db[i].superTbls[j].columns[k].dataType, g_Dbs.db[i].superTbls[j].columns[k].dataLen); + if ((0 == strncasecmp( + g_Dbs.db[i].superTbls[j].columns[k].dataType, + "binary", strlen("binary"))) + || (0 == strncasecmp( + g_Dbs.db[i].superTbls[j].columns[k].dataType, + "nchar", strlen("nchar")))) { + fprintf(fp, "column[%d]:%s(%d) ", k, + g_Dbs.db[i].superTbls[j].columns[k].dataType, + g_Dbs.db[i].superTbls[j].columns[k].dataLen); + } else { + fprintf(fp, "column[%d]:%s ", + k, g_Dbs.db[i].superTbls[j].columns[k].dataType); + } + } + fprintf(fp, "\n"); + + fprintf(fp, " tagCount: %d\n ", + g_Dbs.db[i].superTbls[j].tagCount); + for (int k = 0; k < g_Dbs.db[i].superTbls[j].tagCount; k++) { + //printf("dataType:%s, dataLen:%d\t", g_Dbs.db[i].superTbls[j].tags[k].dataType, g_Dbs.db[i].superTbls[j].tags[k].dataLen); + if ((0 == strncasecmp(g_Dbs.db[i].superTbls[j].tags[k].dataType, + "binary", strlen("binary"))) + || (0 == strncasecmp(g_Dbs.db[i].superTbls[j].tags[k].dataType, + "nchar", strlen("nchar")))) { + fprintf(fp, "tag[%d]:%s(%d) ", + k, g_Dbs.db[i].superTbls[j].tags[k].dataType, + g_Dbs.db[i].superTbls[j].tags[k].dataLen); + } else { + fprintf(fp, "tag[%d]:%s ", k, g_Dbs.db[i].superTbls[j].tags[k].dataType); + } + } + fprintf(fp, "\n"); + } + fprintf(fp, "\n"); + } + + SHOW_PARSE_RESULT_END_TO_FILE(fp); +} + +static void printfQueryMeta() { + + SHOW_PARSE_RESULT_START(); + + printf("host: \033[33m%s:%u\033[0m\n", + g_queryInfo.host, g_queryInfo.port); + printf("user: \033[33m%s\033[0m\n", g_queryInfo.user); + printf("database name: \033[33m%s\033[0m\n", g_queryInfo.dbName); + + printf("\n"); + + if ((SUBSCRIBE_TEST == g_args.test_mode) || (QUERY_TEST == g_args.test_mode)) { + printf("specified table query info: \n"); + printf("sqlCount: \033[33m%d\033[0m\n", + g_queryInfo.specifiedQueryInfo.sqlCount); + if (g_queryInfo.specifiedQueryInfo.sqlCount > 0) { + printf("specified tbl query times:\n"); + printf(" \033[33m%"PRIu64"\033[0m\n", + g_queryInfo.specifiedQueryInfo.queryTimes); + printf("query interval: \033[33m%"PRIu64" ms\033[0m\n", + g_queryInfo.specifiedQueryInfo.queryInterval); + printf("top query times:\033[33m%"PRIu64"\033[0m\n", g_args.query_times); + printf("concurrent: \033[33m%d\033[0m\n", + g_queryInfo.specifiedQueryInfo.concurrent); + printf("mod: \033[33m%s\033[0m\n", + (g_queryInfo.specifiedQueryInfo.asyncMode)?"async":"sync"); + printf("interval: \033[33m%"PRIu64"\033[0m\n", + g_queryInfo.specifiedQueryInfo.subscribeInterval); + printf("restart: \033[33m%d\033[0m\n", + g_queryInfo.specifiedQueryInfo.subscribeRestart); + printf("keepProgress: \033[33m%d\033[0m\n", + g_queryInfo.specifiedQueryInfo.subscribeKeepProgress); + + for (int i = 0; i < g_queryInfo.specifiedQueryInfo.sqlCount; i++) { + printf(" sql[%d]: \033[33m%s\033[0m\n", + i, g_queryInfo.specifiedQueryInfo.sql[i]); + } + printf("\n"); + } + + printf("super table query info:\n"); + printf("sqlCount: \033[33m%d\033[0m\n", + g_queryInfo.superQueryInfo.sqlCount); + + if (g_queryInfo.superQueryInfo.sqlCount > 0) { + printf("query interval: \033[33m%"PRIu64"\033[0m\n", + g_queryInfo.superQueryInfo.queryInterval); + printf("threadCnt: \033[33m%d\033[0m\n", + g_queryInfo.superQueryInfo.threadCnt); + printf("childTblCount: \033[33m%"PRId64"\033[0m\n", + g_queryInfo.superQueryInfo.childTblCount); + printf("stable name: \033[33m%s\033[0m\n", + g_queryInfo.superQueryInfo.stbName); + printf("stb query times:\033[33m%"PRIu64"\033[0m\n", + g_queryInfo.superQueryInfo.queryTimes); + + printf("mod: \033[33m%s\033[0m\n", + (g_queryInfo.superQueryInfo.asyncMode)?"async":"sync"); + printf("interval: \033[33m%"PRIu64"\033[0m\n", + g_queryInfo.superQueryInfo.subscribeInterval); + printf("restart: \033[33m%d\033[0m\n", + g_queryInfo.superQueryInfo.subscribeRestart); + printf("keepProgress: \033[33m%d\033[0m\n", + g_queryInfo.superQueryInfo.subscribeKeepProgress); + + for (int i = 0; i < g_queryInfo.superQueryInfo.sqlCount; i++) { + printf(" sql[%d]: \033[33m%s\033[0m\n", + i, g_queryInfo.superQueryInfo.sql[i]); + } + printf("\n"); + } + } + + SHOW_PARSE_RESULT_END(); +} + +static char* formatTimestamp(char* buf, int64_t val, int precision) { + time_t tt; + if (precision == TSDB_TIME_PRECISION_MICRO) { + tt = (time_t)(val / 1000000); + } if (precision == TSDB_TIME_PRECISION_NANO) { + tt = (time_t)(val / 1000000000); + } else { + tt = (time_t)(val / 1000); + } + + /* comment out as it make testcases like select_with_tags.sim fail. + but in windows, this may cause the call to localtime crash if tt < 0, + need to find a better solution. + if (tt < 0) { + tt = 0; + } + */ + +#ifdef WINDOWS + if (tt < 0) tt = 0; +#endif + + struct tm* ptm = localtime(&tt); + size_t pos = strftime(buf, 32, "%Y-%m-%d %H:%M:%S", ptm); + + if (precision == TSDB_TIME_PRECISION_MICRO) { + sprintf(buf + pos, ".%06d", (int)(val % 1000000)); + } else if (precision == TSDB_TIME_PRECISION_NANO) { + sprintf(buf + pos, ".%09d", (int)(val % 1000000000)); + } else { + sprintf(buf + pos, ".%03d", (int)(val % 1000)); + } + + return buf; +} + +static void xDumpFieldToFile(FILE* fp, const char* val, + TAOS_FIELD* field, int32_t length, int precision) { + + if (val == NULL) { + fprintf(fp, "%s", TSDB_DATA_NULL_STR); + return; + } + + char buf[TSDB_MAX_BYTES_PER_ROW]; + switch (field->type) { + case TSDB_DATA_TYPE_BOOL: + fprintf(fp, "%d", ((((int32_t)(*((int8_t*)val))) == 1) ? 1 : 0)); + break; + + case TSDB_DATA_TYPE_TINYINT: + fprintf(fp, "%d", *((int8_t *)val)); + break; + + case TSDB_DATA_TYPE_UTINYINT: + fprintf(fp, "%d", *((uint8_t *)val)); + break; + + case TSDB_DATA_TYPE_SMALLINT: + fprintf(fp, "%d", *((int16_t *)val)); + break; + + case TSDB_DATA_TYPE_USMALLINT: + fprintf(fp, "%d", *((uint16_t *)val)); + break; + + case TSDB_DATA_TYPE_INT: + fprintf(fp, "%d", *((int32_t *)val)); + break; + + case TSDB_DATA_TYPE_UINT: + fprintf(fp, "%d", *((uint32_t *)val)); + break; + + case TSDB_DATA_TYPE_BIGINT: + fprintf(fp, "%"PRId64"", *((int64_t *)val)); + break; + + case TSDB_DATA_TYPE_UBIGINT: + fprintf(fp, "%"PRId64"", *((uint64_t *)val)); + break; + + case TSDB_DATA_TYPE_FLOAT: + fprintf(fp, "%.5f", GET_FLOAT_VAL(val)); + break; + + case TSDB_DATA_TYPE_DOUBLE: + fprintf(fp, "%.9f", GET_DOUBLE_VAL(val)); + break; + + case TSDB_DATA_TYPE_BINARY: + case TSDB_DATA_TYPE_NCHAR: + memcpy(buf, val, length); + buf[length] = 0; + fprintf(fp, "\'%s\'", buf); + break; + + case TSDB_DATA_TYPE_TIMESTAMP: + formatTimestamp(buf, *(int64_t*)val, precision); + fprintf(fp, "'%s'", buf); + break; + + default: + break; + } +} + +static int xDumpResultToFile(const char* fname, TAOS_RES* tres) { + TAOS_ROW row = taos_fetch_row(tres); + if (row == NULL) { + return 0; + } + + FILE* fp = fopen(fname, "at"); + if (fp == NULL) { + errorPrint2("%s() LN%d, failed to open file: %s\n", + __func__, __LINE__, fname); + return -1; + } + + int num_fields = taos_num_fields(tres); + TAOS_FIELD *fields = taos_fetch_fields(tres); + int precision = taos_result_precision(tres); + + for (int col = 0; col < num_fields; col++) { + if (col > 0) { + fprintf(fp, ","); + } + fprintf(fp, "%s", fields[col].name); + } + fputc('\n', fp); + + int numOfRows = 0; + do { + int32_t* length = taos_fetch_lengths(tres); + for (int i = 0; i < num_fields; i++) { + if (i > 0) { + fputc(',', fp); + } + xDumpFieldToFile(fp, + (const char*)row[i], fields +i, length[i], precision); + } + fputc('\n', fp); + + numOfRows++; + row = taos_fetch_row(tres); + } while( row != NULL); + + fclose(fp); + + return numOfRows; +} + +static int getDbFromServer(TAOS * taos, SDbInfo** dbInfos) { + TAOS_RES * res; + TAOS_ROW row = NULL; + int count = 0; + + res = taos_query(taos, "show databases;"); + int32_t code = taos_errno(res); + + if (code != 0) { + errorPrint2("failed to run , reason: %s\n", + taos_errstr(res)); + return -1; + } + + TAOS_FIELD *fields = taos_fetch_fields(res); + + while((row = taos_fetch_row(res)) != NULL) { + // sys database name : 'log' + if (strncasecmp(row[TSDB_SHOW_DB_NAME_INDEX], "log", + fields[TSDB_SHOW_DB_NAME_INDEX].bytes) == 0) { + continue; + } + + dbInfos[count] = (SDbInfo *)calloc(1, sizeof(SDbInfo)); + if (dbInfos[count] == NULL) { + errorPrint2("failed to allocate memory for some dbInfo[%d]\n", count); + return -1; + } + + tstrncpy(dbInfos[count]->name, (char *)row[TSDB_SHOW_DB_NAME_INDEX], + fields[TSDB_SHOW_DB_NAME_INDEX].bytes); + formatTimestamp(dbInfos[count]->create_time, + *(int64_t*)row[TSDB_SHOW_DB_CREATED_TIME_INDEX], + TSDB_TIME_PRECISION_MILLI); + dbInfos[count]->ntables = *((int64_t *)row[TSDB_SHOW_DB_NTABLES_INDEX]); + dbInfos[count]->vgroups = *((int32_t *)row[TSDB_SHOW_DB_VGROUPS_INDEX]); + dbInfos[count]->replica = *((int16_t *)row[TSDB_SHOW_DB_REPLICA_INDEX]); + dbInfos[count]->quorum = *((int16_t *)row[TSDB_SHOW_DB_QUORUM_INDEX]); + dbInfos[count]->days = *((int16_t *)row[TSDB_SHOW_DB_DAYS_INDEX]); + + tstrncpy(dbInfos[count]->keeplist, (char *)row[TSDB_SHOW_DB_KEEP_INDEX], + fields[TSDB_SHOW_DB_KEEP_INDEX].bytes); + dbInfos[count]->cache = *((int32_t *)row[TSDB_SHOW_DB_CACHE_INDEX]); + dbInfos[count]->blocks = *((int32_t *)row[TSDB_SHOW_DB_BLOCKS_INDEX]); + dbInfos[count]->minrows = *((int32_t *)row[TSDB_SHOW_DB_MINROWS_INDEX]); + dbInfos[count]->maxrows = *((int32_t *)row[TSDB_SHOW_DB_MAXROWS_INDEX]); + dbInfos[count]->wallevel = *((int8_t *)row[TSDB_SHOW_DB_WALLEVEL_INDEX]); + dbInfos[count]->fsync = *((int32_t *)row[TSDB_SHOW_DB_FSYNC_INDEX]); + dbInfos[count]->comp = (int8_t)(*((int8_t *)row[TSDB_SHOW_DB_COMP_INDEX])); + dbInfos[count]->cachelast = + (int8_t)(*((int8_t *)row[TSDB_SHOW_DB_CACHELAST_INDEX])); + + tstrncpy(dbInfos[count]->precision, + (char *)row[TSDB_SHOW_DB_PRECISION_INDEX], + fields[TSDB_SHOW_DB_PRECISION_INDEX].bytes); + dbInfos[count]->update = *((int8_t *)row[TSDB_SHOW_DB_UPDATE_INDEX]); + tstrncpy(dbInfos[count]->status, (char *)row[TSDB_SHOW_DB_STATUS_INDEX], + fields[TSDB_SHOW_DB_STATUS_INDEX].bytes); + + count++; + if (count > MAX_DATABASE_COUNT) { + errorPrint("%s() LN%d, The database count overflow than %d\n", + __func__, __LINE__, MAX_DATABASE_COUNT); + break; + } + } + + return count; +} + +static void printfDbInfoForQueryToFile( + char* filename, SDbInfo* dbInfos, int index) { + + if (filename[0] == 0) + return; + + FILE *fp = fopen(filename, "at"); + if (fp == NULL) { + errorPrint( "failed to open file: %s\n", filename); + return; + } + + fprintf(fp, "================ database[%d] ================\n", index); + fprintf(fp, "name: %s\n", dbInfos->name); + fprintf(fp, "created_time: %s\n", dbInfos->create_time); + fprintf(fp, "ntables: %"PRId64"\n", dbInfos->ntables); + fprintf(fp, "vgroups: %d\n", dbInfos->vgroups); + fprintf(fp, "replica: %d\n", dbInfos->replica); + fprintf(fp, "quorum: %d\n", dbInfos->quorum); + fprintf(fp, "days: %d\n", dbInfos->days); + fprintf(fp, "keep0,keep1,keep(D): %s\n", dbInfos->keeplist); + fprintf(fp, "cache(MB): %d\n", dbInfos->cache); + fprintf(fp, "blocks: %d\n", dbInfos->blocks); + fprintf(fp, "minrows: %d\n", dbInfos->minrows); + fprintf(fp, "maxrows: %d\n", dbInfos->maxrows); + fprintf(fp, "wallevel: %d\n", dbInfos->wallevel); + fprintf(fp, "fsync: %d\n", dbInfos->fsync); + fprintf(fp, "comp: %d\n", dbInfos->comp); + fprintf(fp, "cachelast: %d\n", dbInfos->cachelast); + fprintf(fp, "precision: %s\n", dbInfos->precision); + fprintf(fp, "update: %d\n", dbInfos->update); + fprintf(fp, "status: %s\n", dbInfos->status); + fprintf(fp, "\n"); + + fclose(fp); +} + +static void printfQuerySystemInfo(TAOS * taos) { + char filename[MAX_FILE_NAME_LEN] = {0}; + char buffer[1024] = {0}; + TAOS_RES* res; + + time_t t; + struct tm* lt; + time(&t); + lt = localtime(&t); + snprintf(filename, MAX_FILE_NAME_LEN, "querySystemInfo-%d-%d-%d %d:%d:%d", + lt->tm_year+1900, lt->tm_mon, lt->tm_mday, lt->tm_hour, lt->tm_min, + lt->tm_sec); + + // show variables + res = taos_query(taos, "show variables;"); + //fetchResult(res, filename); + xDumpResultToFile(filename, res); + + // show dnodes + res = taos_query(taos, "show dnodes;"); + xDumpResultToFile(filename, res); + //fetchResult(res, filename); + + // show databases + res = taos_query(taos, "show databases;"); + SDbInfo** dbInfos = (SDbInfo **)calloc(MAX_DATABASE_COUNT, sizeof(SDbInfo *)); + if (dbInfos == NULL) { + errorPrint("%s() LN%d, failed to allocate memory\n", __func__, __LINE__); + return; + } + int dbCount = getDbFromServer(taos, dbInfos); + if (dbCount <= 0) { + free(dbInfos); + return; + } + + for (int i = 0; i < dbCount; i++) { + // printf database info + printfDbInfoForQueryToFile(filename, dbInfos[i], i); + + // show db.vgroups + snprintf(buffer, 1024, "show %s.vgroups;", dbInfos[i]->name); + res = taos_query(taos, buffer); + xDumpResultToFile(filename, res); + + // show db.stables + snprintf(buffer, 1024, "show %s.stables;", dbInfos[i]->name); + res = taos_query(taos, buffer); + xDumpResultToFile(filename, res); + free(dbInfos[i]); + } + + free(dbInfos); +} + +static int postProceSql(char *host, uint16_t port, + char* sqlstr, threadInfo *pThreadInfo) +{ + char *req_fmt = "POST %s HTTP/1.1\r\nHost: %s:%d\r\nAccept: */*\r\nAuthorization: Basic %s\r\nContent-Length: %d\r\nContent-Type: application/x-www-form-urlencoded\r\n\r\n%s"; + + char *url = "/rest/sql"; + + int bytes, sent, received, req_str_len, resp_len; + char *request_buf; + char response_buf[RESP_BUF_LEN]; + uint16_t rest_port = port + TSDB_PORT_HTTP; + + int req_buf_len = strlen(sqlstr) + REQ_EXTRA_BUF_LEN; + + request_buf = malloc(req_buf_len); + if (NULL == request_buf) { + errorPrint("%s", "cannot allocate memory.\n"); + exit(EXIT_FAILURE); + } + + char userpass_buf[INPUT_BUF_LEN]; + int mod_table[] = {0, 2, 1}; + + static char base64[] = {'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', + 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', + 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', + 'Y', 'Z', 'a', 'b', 'c', 'd', 'e', 'f', + 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', + 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', + 'w', 'x', 'y', 'z', '0', '1', '2', '3', + '4', '5', '6', '7', '8', '9', '+', '/'}; + + if (g_args.test_mode == INSERT_TEST) { + snprintf(userpass_buf, INPUT_BUF_LEN, "%s:%s", + g_Dbs.user, g_Dbs.password); + } else { + snprintf(userpass_buf, INPUT_BUF_LEN, "%s:%s", + g_queryInfo.user, g_queryInfo.password); + } + + size_t userpass_buf_len = strlen(userpass_buf); + size_t encoded_len = 4 * ((userpass_buf_len +2) / 3); + + char base64_buf[INPUT_BUF_LEN]; + + memset(base64_buf, 0, INPUT_BUF_LEN); + + for (int n = 0, m = 0; n < userpass_buf_len;) { + uint32_t oct_a = n < userpass_buf_len ? + (unsigned char) userpass_buf[n++]:0; + uint32_t oct_b = n < userpass_buf_len ? + (unsigned char) userpass_buf[n++]:0; + uint32_t oct_c = n < userpass_buf_len ? + (unsigned char) userpass_buf[n++]:0; + uint32_t triple = (oct_a << 0x10) + (oct_b << 0x08) + oct_c; + + base64_buf[m++] = base64[(triple >> 3* 6) & 0x3f]; + base64_buf[m++] = base64[(triple >> 2* 6) & 0x3f]; + base64_buf[m++] = base64[(triple >> 1* 6) & 0x3f]; + base64_buf[m++] = base64[(triple >> 0* 6) & 0x3f]; + } + + for (int l = 0; l < mod_table[userpass_buf_len % 3]; l++) + base64_buf[encoded_len - 1 - l] = '='; + + debugPrint("%s() LN%d: auth string base64 encoded: %s\n", + __func__, __LINE__, base64_buf); + char *auth = base64_buf; + + int r = snprintf(request_buf, + req_buf_len, + req_fmt, url, host, rest_port, + auth, strlen(sqlstr), sqlstr); + if (r >= req_buf_len) { + free(request_buf); + ERROR_EXIT("too long request"); + } + verbosePrint("%s() LN%d: Request:\n%s\n", __func__, __LINE__, request_buf); + + req_str_len = strlen(request_buf); + sent = 0; + do { +#ifdef WINDOWS + bytes = send(pThreadInfo->sockfd, request_buf + sent, req_str_len - sent, 0); +#else + bytes = write(pThreadInfo->sockfd, request_buf + sent, req_str_len - sent); +#endif + if (bytes < 0) + ERROR_EXIT("writing message to socket"); + if (bytes == 0) + break; + sent+=bytes; + } while(sent < req_str_len); + + memset(response_buf, 0, RESP_BUF_LEN); + resp_len = sizeof(response_buf) - 1; + received = 0; + + char resEncodingChunk[] = "Encoding: chunked"; + char resHttp[] = "HTTP/1.1 "; + char resHttpOk[] = "HTTP/1.1 200 OK"; + + do { +#ifdef WINDOWS + bytes = recv(pThreadInfo->sockfd, response_buf + received, resp_len - received, 0); +#else + bytes = read(pThreadInfo->sockfd, response_buf + received, resp_len - received); +#endif + verbosePrint("%s() LN%d: bytes:%d\n", __func__, __LINE__, bytes); + if (bytes < 0) { + free(request_buf); + ERROR_EXIT("reading response from socket"); + } + if (bytes == 0) + break; + received += bytes; + + verbosePrint("%s() LN%d: received:%d resp_len:%d, response_buf:\n%s\n", + __func__, __LINE__, received, resp_len, response_buf); + + response_buf[RESP_BUF_LEN - 1] = '\0'; + if (strlen(response_buf)) { + if (((NULL == strstr(response_buf, resEncodingChunk)) + && (NULL != strstr(response_buf, resHttp))) + || ((NULL != strstr(response_buf, resHttpOk)) + && (NULL != strstr(response_buf, "\"status\":")))) { + debugPrint( + "%s() LN%d: received:%d resp_len:%d, response_buf:\n%s\n", + __func__, __LINE__, received, resp_len, response_buf); + break; + } + } + } while(received < resp_len); + + if (received == resp_len) { + free(request_buf); + ERROR_EXIT("storing complete response from socket"); + } + + if (strlen(pThreadInfo->filePath) > 0) { + appendResultBufToFile(response_buf, pThreadInfo); + } + + free(request_buf); + + response_buf[RESP_BUF_LEN - 1] = '\0'; + if (NULL == strstr(response_buf, resHttpOk)) { + errorPrint("%s() LN%d, Response:\n%s\n", + __func__, __LINE__, response_buf); + return -1; + } + return 0; +} + +static char* getTagValueFromTagSample(SSuperTable* stbInfo, int tagUsePos) { + char* dataBuf = (char*)calloc(TSDB_MAX_SQL_LEN+1, 1); + if (NULL == dataBuf) { + errorPrint2("%s() LN%d, calloc failed! size:%d\n", + __func__, __LINE__, TSDB_MAX_SQL_LEN+1); + return NULL; + } + + int dataLen = 0; + dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen, + "(%s)", stbInfo->tagDataBuf + stbInfo->lenOfTagOfOneRow * tagUsePos); + + return dataBuf; +} + +static char *generateBinaryNCharTagValues(int64_t tableSeq, uint32_t len) +{ + char* buf = (char*)calloc(len, 1); + if (NULL == buf) { + printf("calloc failed! size:%d\n", len); + return NULL; + } + + if (tableSeq % 2) { + tstrncpy(buf, "beijing", len); + } else { + tstrncpy(buf, "shanghai", len); + } + //rand_string(buf, stbInfo->tags[i].dataLen); + + return buf; +} + +static char* generateTagValuesForStb(SSuperTable* stbInfo, int64_t tableSeq) { + char* dataBuf = (char*)calloc(TSDB_MAX_SQL_LEN+1, 1); + if (NULL == dataBuf) { + printf("calloc failed! size:%d\n", TSDB_MAX_SQL_LEN+1); + return NULL; + } + + int dataLen = 0; + dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen, "("); + for (int i = 0; i < stbInfo->tagCount; i++) { + if ((0 == strncasecmp(stbInfo->tags[i].dataType, + "binary", strlen("binary"))) + || (0 == strncasecmp(stbInfo->tags[i].dataType, + "nchar", strlen("nchar")))) { + if (stbInfo->tags[i].dataLen > TSDB_MAX_BINARY_LEN) { + printf("binary or nchar length overflow, max size:%u\n", + (uint32_t)TSDB_MAX_BINARY_LEN); + tmfree(dataBuf); + return NULL; + } + + int32_t tagBufLen = stbInfo->tags[i].dataLen + 1; + char *buf = generateBinaryNCharTagValues(tableSeq, tagBufLen); + if (NULL == buf) { + tmfree(dataBuf); + return NULL; + } + dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen, + "\'%s\',", buf); + tmfree(buf); + } else if (0 == strncasecmp(stbInfo->tags[i].dataType, + "int", strlen("int"))) { + if ((g_args.demo_mode) && (i == 0)) { + dataLen += snprintf(dataBuf + dataLen, + TSDB_MAX_SQL_LEN - dataLen, + "%"PRId64",", (tableSeq % 10) + 1); + } else { + dataLen += snprintf(dataBuf + dataLen, + TSDB_MAX_SQL_LEN - dataLen, + "%"PRId64",", tableSeq); + } + } else if (0 == strncasecmp(stbInfo->tags[i].dataType, + "bigint", strlen("bigint"))) { + dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen, + "%"PRId64",", rand_bigint()); + } else if (0 == strncasecmp(stbInfo->tags[i].dataType, + "float", strlen("float"))) { + dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen, + "%f,", rand_float()); + } else if (0 == strncasecmp(stbInfo->tags[i].dataType, + "double", strlen("double"))) { + dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen, + "%f,", rand_double()); + } else if (0 == strncasecmp(stbInfo->tags[i].dataType, + "smallint", strlen("smallint"))) { + dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen, + "%d,", rand_smallint()); + } else if (0 == strncasecmp(stbInfo->tags[i].dataType, + "tinyint", strlen("tinyint"))) { + dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen, + "%d,", rand_tinyint()); + } else if (0 == strncasecmp(stbInfo->tags[i].dataType, + "bool", strlen("bool"))) { + dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen, + "%d,", rand_bool()); + } else if (0 == strncasecmp(stbInfo->tags[i].dataType, + "timestamp", strlen("timestamp"))) { + dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen, + "%"PRId64",", rand_ubigint()); + } else if (0 == strncasecmp(stbInfo->tags[i].dataType, + "utinyint", strlen("utinyint"))) { + dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen, + "%d,", rand_utinyint()); + } else if (0 == strncasecmp(stbInfo->tags[i].dataType, + "usmallint", strlen("usmallint"))) { + dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen, + "%d,", rand_usmallint()); + } else if (0 == strncasecmp(stbInfo->tags[i].dataType, + "uint", strlen("uint"))) { + dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen, + "%d,", rand_uint()); + } else if (0 == strncasecmp(stbInfo->tags[i].dataType, + "ubigint", strlen("ubigint"))) { + dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen, + "%"PRId64",", rand_ubigint()); + } else { + errorPrint2("No support data type: %s\n", stbInfo->tags[i].dataType); + tmfree(dataBuf); + return NULL; + } + } + + dataLen -= 1; + dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen, ")"); + return dataBuf; +} + +static int calcRowLen(SSuperTable* superTbls) { + int colIndex; + int lenOfOneRow = 0; + + for (colIndex = 0; colIndex < superTbls->columnCount; colIndex++) { + char* dataType = superTbls->columns[colIndex].dataType; + + switch(superTbls->columns[colIndex].data_type) { + case TSDB_DATA_TYPE_BINARY: + lenOfOneRow += superTbls->columns[colIndex].dataLen + 3; + break; + + case TSDB_DATA_TYPE_NCHAR: + lenOfOneRow += superTbls->columns[colIndex].dataLen + 3; + break; + + case TSDB_DATA_TYPE_INT: + case TSDB_DATA_TYPE_UINT: + lenOfOneRow += INT_BUFF_LEN; + break; + + case TSDB_DATA_TYPE_BIGINT: + case TSDB_DATA_TYPE_UBIGINT: + lenOfOneRow += BIGINT_BUFF_LEN; + break; + + case TSDB_DATA_TYPE_SMALLINT: + case TSDB_DATA_TYPE_USMALLINT: + lenOfOneRow += SMALLINT_BUFF_LEN; + break; + + case TSDB_DATA_TYPE_TINYINT: + case TSDB_DATA_TYPE_UTINYINT: + lenOfOneRow += TINYINT_BUFF_LEN; + break; + + case TSDB_DATA_TYPE_BOOL: + lenOfOneRow += BOOL_BUFF_LEN; + break; + + case TSDB_DATA_TYPE_FLOAT: + lenOfOneRow += FLOAT_BUFF_LEN; + break; + + case TSDB_DATA_TYPE_DOUBLE: + lenOfOneRow += DOUBLE_BUFF_LEN; + break; + + case TSDB_DATA_TYPE_TIMESTAMP: + lenOfOneRow += TIMESTAMP_BUFF_LEN; + break; + + default: + errorPrint2("get error data type : %s\n", dataType); + exit(EXIT_FAILURE); + } + } + + superTbls->lenOfOneRow = lenOfOneRow + 20; // timestamp + + int tagIndex; + int lenOfTagOfOneRow = 0; + for (tagIndex = 0; tagIndex < superTbls->tagCount; tagIndex++) { + char * dataType = superTbls->tags[tagIndex].dataType; + switch (superTbls->tags[tagIndex].data_type) + { + case TSDB_DATA_TYPE_BINARY: + lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + 3; + break; + case TSDB_DATA_TYPE_NCHAR: + lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + 3; + break; + case TSDB_DATA_TYPE_INT: + case TSDB_DATA_TYPE_UINT: + lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + INT_BUFF_LEN; + break; + case TSDB_DATA_TYPE_BIGINT: + case TSDB_DATA_TYPE_UBIGINT: + lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + BIGINT_BUFF_LEN; + break; + case TSDB_DATA_TYPE_SMALLINT: + case TSDB_DATA_TYPE_USMALLINT: + lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + SMALLINT_BUFF_LEN; + break; + case TSDB_DATA_TYPE_TINYINT: + case TSDB_DATA_TYPE_UTINYINT: + lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + TINYINT_BUFF_LEN; + break; + case TSDB_DATA_TYPE_BOOL: + lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + BOOL_BUFF_LEN; + break; + case TSDB_DATA_TYPE_FLOAT: + lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + FLOAT_BUFF_LEN; + break; + case TSDB_DATA_TYPE_DOUBLE: + lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + DOUBLE_BUFF_LEN; + break; + default: + errorPrint2("get error tag type : %s\n", dataType); + exit(EXIT_FAILURE); + } + } + + superTbls->lenOfTagOfOneRow = lenOfTagOfOneRow; + + return 0; +} + +static int getChildNameOfSuperTableWithLimitAndOffset(TAOS * taos, + char* dbName, char* stbName, char** childTblNameOfSuperTbl, + int64_t* childTblCountOfSuperTbl, int64_t limit, uint64_t offset) { + + char command[1024] = "\0"; + char limitBuf[100] = "\0"; + + TAOS_RES * res; + TAOS_ROW row = NULL; + + char* childTblName = *childTblNameOfSuperTbl; + + snprintf(limitBuf, 100, " limit %"PRId64" offset %"PRIu64"", + limit, offset); + + //get all child table name use cmd: select tbname from superTblName; + snprintf(command, 1024, "select tbname from %s.%s %s", dbName, stbName, limitBuf); + + res = taos_query(taos, command); + int32_t code = taos_errno(res); + if (code != 0) { + taos_free_result(res); + taos_close(taos); + errorPrint2("%s() LN%d, failed to run command %s\n", + __func__, __LINE__, command); + exit(EXIT_FAILURE); + } + + int64_t childTblCount = (limit < 0)?10000:limit; + int64_t count = 0; + if (childTblName == NULL) { + childTblName = (char*)calloc(1, childTblCount * TSDB_TABLE_NAME_LEN); + if (NULL == childTblName) { + taos_free_result(res); + taos_close(taos); + errorPrint2("%s() LN%d, failed to allocate memory!\n", __func__, __LINE__); + exit(EXIT_FAILURE); + } + } + + char* pTblName = childTblName; + while((row = taos_fetch_row(res)) != NULL) { + int32_t* len = taos_fetch_lengths(res); + + if (0 == strlen((char *)row[0])) { + errorPrint2("%s() LN%d, No.%"PRId64" table return empty name\n", + __func__, __LINE__, count); + exit(EXIT_FAILURE); + } + + tstrncpy(pTblName, (char *)row[0], len[0]+1); + //printf("==== sub table name: %s\n", pTblName); + count++; + if (count >= childTblCount - 1) { + char *tmp = realloc(childTblName, + (size_t)childTblCount*1.5*TSDB_TABLE_NAME_LEN+1); + if (tmp != NULL) { + childTblName = tmp; + childTblCount = (int)(childTblCount*1.5); + memset(childTblName + count*TSDB_TABLE_NAME_LEN, 0, + (size_t)((childTblCount-count)*TSDB_TABLE_NAME_LEN)); + } else { + // exit, if allocate more memory failed + tmfree(childTblName); + taos_free_result(res); + taos_close(taos); + errorPrint2("%s() LN%d, realloc fail for save child table name of %s.%s\n", + __func__, __LINE__, dbName, stbName); + exit(EXIT_FAILURE); + } + } + pTblName = childTblName + count * TSDB_TABLE_NAME_LEN; + } + + *childTblCountOfSuperTbl = count; + *childTblNameOfSuperTbl = childTblName; + + taos_free_result(res); + return 0; +} + +static int getAllChildNameOfSuperTable(TAOS * taos, char* dbName, + char* stbName, char** childTblNameOfSuperTbl, + int64_t* childTblCountOfSuperTbl) { + + return getChildNameOfSuperTableWithLimitAndOffset(taos, dbName, stbName, + childTblNameOfSuperTbl, childTblCountOfSuperTbl, + -1, 0); +} + +static int getSuperTableFromServer(TAOS * taos, char* dbName, + SSuperTable* superTbls) { + + char command[1024] = "\0"; + TAOS_RES * res; + TAOS_ROW row = NULL; + int count = 0; + + //get schema use cmd: describe superTblName; + snprintf(command, 1024, "describe %s.%s", dbName, superTbls->stbName); + res = taos_query(taos, command); + int32_t code = taos_errno(res); + if (code != 0) { + printf("failed to run command %s\n", command); + taos_free_result(res); + return -1; + } + + int tagIndex = 0; + int columnIndex = 0; + TAOS_FIELD *fields = taos_fetch_fields(res); + while((row = taos_fetch_row(res)) != NULL) { + if (0 == count) { + count++; + continue; + } + + if (strcmp((char *)row[TSDB_DESCRIBE_METRIC_NOTE_INDEX], "TAG") == 0) { + tstrncpy(superTbls->tags[tagIndex].field, + (char *)row[TSDB_DESCRIBE_METRIC_FIELD_INDEX], + fields[TSDB_DESCRIBE_METRIC_FIELD_INDEX].bytes); + if (0 == strncasecmp((char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX], + "INT", strlen("INT"))) { + superTbls->tags[tagIndex].data_type = TSDB_DATA_TYPE_INT; + } else if (0 == strncasecmp((char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX], + "TINYINT", strlen("TINYINT"))) { + superTbls->tags[tagIndex].data_type = TSDB_DATA_TYPE_TINYINT; + } else if (0 == strncasecmp((char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX], + "SMALLINT", strlen("SMALLINT"))) { + superTbls->tags[tagIndex].data_type = TSDB_DATA_TYPE_SMALLINT; + } else if (0 == strncasecmp((char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX], + "BIGINT", strlen("BIGINT"))) { + superTbls->tags[tagIndex].data_type = TSDB_DATA_TYPE_BIGINT; + } else if (0 == strncasecmp((char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX], + "FLOAT", strlen("FLOAT"))) { + superTbls->tags[tagIndex].data_type = TSDB_DATA_TYPE_FLOAT; + } else if (0 == strncasecmp((char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX], + "DOUBLE", strlen("DOUBLE"))) { + superTbls->tags[tagIndex].data_type = TSDB_DATA_TYPE_DOUBLE; + } else if (0 == strncasecmp((char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX], + "BINARY", strlen("BINARY"))) { + superTbls->tags[tagIndex].data_type = TSDB_DATA_TYPE_BINARY; + } else if (0 == strncasecmp((char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX], + "NCHAR", strlen("NCHAR"))) { + superTbls->tags[tagIndex].data_type = TSDB_DATA_TYPE_NCHAR; + } else if (0 == strncasecmp((char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX], + "BOOL", strlen("BOOL"))) { + superTbls->tags[tagIndex].data_type = TSDB_DATA_TYPE_BOOL; + } else if (0 == strncasecmp((char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX], + "TIMESTAMP", strlen("TIMESTAMP"))) { + superTbls->tags[tagIndex].data_type = TSDB_DATA_TYPE_TIMESTAMP; + } else if (0 == strncasecmp((char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX], + "TINYINT UNSIGNED", strlen("TINYINT UNSIGNED"))) { + superTbls->tags[tagIndex].data_type = TSDB_DATA_TYPE_UTINYINT; + tstrncpy(superTbls->tags[tagIndex].dataType,"UTINYINT", + min(DATATYPE_BUFF_LEN, + fields[TSDB_DESCRIBE_METRIC_TYPE_INDEX].bytes) + 1); + } else if (0 == strncasecmp((char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX], + "SMALLINT UNSIGNED", strlen("SMALLINT UNSIGNED"))) { + superTbls->tags[tagIndex].data_type = TSDB_DATA_TYPE_USMALLINT; + tstrncpy(superTbls->tags[tagIndex].dataType,"USMALLINT", + min(DATATYPE_BUFF_LEN, + fields[TSDB_DESCRIBE_METRIC_TYPE_INDEX].bytes) + 1); + } else if (0 == strncasecmp((char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX], + "INT UNSIGNED", strlen("INT UNSIGNED"))) { + superTbls->tags[tagIndex].data_type = TSDB_DATA_TYPE_UINT; + tstrncpy(superTbls->tags[tagIndex].dataType,"UINT", + min(DATATYPE_BUFF_LEN, + fields[TSDB_DESCRIBE_METRIC_TYPE_INDEX].bytes) + 1); + }else if (0 == strncasecmp((char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX], + "BIGINT UNSIGNED", strlen("BIGINT UNSIGNED"))) { + superTbls->tags[tagIndex].data_type = TSDB_DATA_TYPE_UBIGINT; + tstrncpy(superTbls->tags[tagIndex].dataType,"UBIGINT", + min(DATATYPE_BUFF_LEN, + fields[TSDB_DESCRIBE_METRIC_TYPE_INDEX].bytes) + 1); + } else { + superTbls->tags[tagIndex].data_type = TSDB_DATA_TYPE_NULL; + } + superTbls->tags[tagIndex].dataLen = + *((int *)row[TSDB_DESCRIBE_METRIC_LENGTH_INDEX]); + tstrncpy(superTbls->tags[tagIndex].note, + (char *)row[TSDB_DESCRIBE_METRIC_NOTE_INDEX], + min(NOTE_BUFF_LEN, + fields[TSDB_DESCRIBE_METRIC_NOTE_INDEX].bytes) + 1); + if (strstr((char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX], "UNSIGNED") == NULL) + { + tstrncpy(superTbls->tags[tagIndex].dataType, + (char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX], + min(DATATYPE_BUFF_LEN, + fields[TSDB_DESCRIBE_METRIC_TYPE_INDEX].bytes) + 1); + } + tagIndex++; + } else { + tstrncpy(superTbls->columns[columnIndex].field, + (char *)row[TSDB_DESCRIBE_METRIC_FIELD_INDEX], + fields[TSDB_DESCRIBE_METRIC_FIELD_INDEX].bytes); + + + if (0 == strncasecmp((char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX], + "INT", strlen("INT")) && + strstr((char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX], "UNSIGNED") == NULL) { + superTbls->columns[columnIndex].data_type = TSDB_DATA_TYPE_INT; + } else if (0 == strncasecmp((char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX], + "TINYINT", strlen("TINYINT")) && + strstr((char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX], "UNSIGNED") == NULL) { + superTbls->columns[columnIndex].data_type = TSDB_DATA_TYPE_TINYINT; + } else if (0 == strncasecmp((char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX], + "SMALLINT", strlen("SMALLINT")) && + strstr((char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX], "UNSIGNED") == NULL) { + superTbls->columns[columnIndex].data_type = TSDB_DATA_TYPE_SMALLINT; + } else if (0 == strncasecmp((char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX], + "BIGINT", strlen("BIGINT")) && + strstr((char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX], "UNSIGNED") == NULL) { + superTbls->columns[columnIndex].data_type = TSDB_DATA_TYPE_BIGINT; + } else if (0 == strncasecmp((char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX], + "FLOAT", strlen("FLOAT"))) { + superTbls->columns[columnIndex].data_type = TSDB_DATA_TYPE_FLOAT; + } else if (0 == strncasecmp((char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX], + "DOUBLE", strlen("DOUBLE"))) { + superTbls->columns[columnIndex].data_type = TSDB_DATA_TYPE_DOUBLE; + } else if (0 == strncasecmp((char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX], + "BINARY", strlen("BINARY"))) { + superTbls->columns[columnIndex].data_type = TSDB_DATA_TYPE_BINARY; + } else if (0 == strncasecmp((char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX], + "NCHAR", strlen("NCHAR"))) { + superTbls->columns[columnIndex].data_type = TSDB_DATA_TYPE_NCHAR; + } else if (0 == strncasecmp((char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX], + "BOOL", strlen("BOOL"))) { + superTbls->columns[columnIndex].data_type = TSDB_DATA_TYPE_BOOL; + } else if (0 == strncasecmp((char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX], + "TIMESTAMP", strlen("TIMESTAMP"))) { + superTbls->columns[columnIndex].data_type = TSDB_DATA_TYPE_TIMESTAMP; + } else if (0 == strncasecmp((char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX], + "TINYINT UNSIGNED", strlen("TINYINT UNSIGNED"))) { + superTbls->columns[columnIndex].data_type = TSDB_DATA_TYPE_UTINYINT; + tstrncpy(superTbls->columns[columnIndex].dataType,"UTINYINT", + min(DATATYPE_BUFF_LEN, + fields[TSDB_DESCRIBE_METRIC_TYPE_INDEX].bytes) + 1); + } else if (0 == strncasecmp((char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX], + "SMALLINT UNSIGNED", strlen("SMALLINT UNSIGNED"))) { + superTbls->columns[columnIndex].data_type = TSDB_DATA_TYPE_USMALLINT; + tstrncpy(superTbls->columns[columnIndex].dataType,"USMALLINT", + min(DATATYPE_BUFF_LEN, + fields[TSDB_DESCRIBE_METRIC_TYPE_INDEX].bytes) + 1); + } else if (0 == strncasecmp((char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX], + "INT UNSIGNED", strlen("INT UNSIGNED"))) { + superTbls->columns[columnIndex].data_type = TSDB_DATA_TYPE_UINT; + tstrncpy(superTbls->columns[columnIndex].dataType,"UINT", + min(DATATYPE_BUFF_LEN, + fields[TSDB_DESCRIBE_METRIC_TYPE_INDEX].bytes) + 1); + } else if (0 == strncasecmp((char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX], + "BIGINT UNSIGNED", strlen("BIGINT UNSIGNED"))) { + superTbls->columns[columnIndex].data_type = TSDB_DATA_TYPE_UBIGINT; + tstrncpy(superTbls->columns[columnIndex].dataType,"UBIGINT", + min(DATATYPE_BUFF_LEN, + fields[TSDB_DESCRIBE_METRIC_TYPE_INDEX].bytes) + 1); + } else { + superTbls->columns[columnIndex].data_type = TSDB_DATA_TYPE_NULL; + } + superTbls->columns[columnIndex].dataLen = + *((int *)row[TSDB_DESCRIBE_METRIC_LENGTH_INDEX]); + tstrncpy(superTbls->columns[columnIndex].note, + (char *)row[TSDB_DESCRIBE_METRIC_NOTE_INDEX], + min(NOTE_BUFF_LEN, + fields[TSDB_DESCRIBE_METRIC_NOTE_INDEX].bytes) + 1); + + if (strstr((char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX], "UNSIGNED") == NULL) { + tstrncpy(superTbls->columns[columnIndex].dataType, + (char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX], + min(DATATYPE_BUFF_LEN, + fields[TSDB_DESCRIBE_METRIC_TYPE_INDEX].bytes) + 1); + } + + columnIndex++; + } + count++; + } + + superTbls->columnCount = columnIndex; + superTbls->tagCount = tagIndex; + taos_free_result(res); + + calcRowLen(superTbls); + + /* + if (TBL_ALREADY_EXISTS == superTbls->childTblExists) { + //get all child table name use cmd: select tbname from superTblName; + int childTblCount = 10000; + superTbls->childTblName = (char*)calloc(1, childTblCount * TSDB_TABLE_NAME_LEN); + if (superTbls->childTblName == NULL) { + errorPrint2("%s() LN%d, alloc memory failed!\n", __func__, __LINE__); + return -1; + } + getAllChildNameOfSuperTable(taos, dbName, + superTbls->stbName, + &superTbls->childTblName, + &superTbls->childTblCount); + } + */ + return 0; +} + +static int createSuperTable( + TAOS * taos, char* dbName, + SSuperTable* superTbl) { + + char *command = calloc(1, BUFFER_SIZE); + assert(command); + + char cols[COL_BUFFER_LEN] = "\0"; + int len = 0; + + int lenOfOneRow = 0; + + if (superTbl->columnCount == 0) { + errorPrint2("%s() LN%d, super table column count is %d\n", + __func__, __LINE__, superTbl->columnCount); + free(command); + return -1; + } + + for (int colIndex = 0; colIndex < superTbl->columnCount; colIndex++) { + + switch(superTbl->columns[colIndex].data_type) { + case TSDB_DATA_TYPE_BINARY: + len += snprintf(cols + len, COL_BUFFER_LEN - len, + ",C%d %s(%d)", colIndex, "BINARY", + superTbl->columns[colIndex].dataLen); + lenOfOneRow += superTbl->columns[colIndex].dataLen + 3; + break; + + case TSDB_DATA_TYPE_NCHAR: + len += snprintf(cols + len, COL_BUFFER_LEN - len, + ",C%d %s(%d)", colIndex, "NCHAR", + superTbl->columns[colIndex].dataLen); + lenOfOneRow += superTbl->columns[colIndex].dataLen + 3; + break; + + case TSDB_DATA_TYPE_INT: + if ((g_args.demo_mode) && (colIndex == 1)) { + len += snprintf(cols + len, COL_BUFFER_LEN - len, + ", VOLTAGE INT"); + } else { + len += snprintf(cols + len, COL_BUFFER_LEN - len, ",C%d %s", colIndex, "INT"); + } + lenOfOneRow += INT_BUFF_LEN; + break; + + case TSDB_DATA_TYPE_BIGINT: + len += snprintf(cols + len, COL_BUFFER_LEN - len, ",C%d %s", + colIndex, "BIGINT"); + lenOfOneRow += BIGINT_BUFF_LEN; + break; + + case TSDB_DATA_TYPE_SMALLINT: + len += snprintf(cols + len, COL_BUFFER_LEN - len, ",C%d %s", + colIndex, "SMALLINT"); + lenOfOneRow += SMALLINT_BUFF_LEN; + break; + + case TSDB_DATA_TYPE_TINYINT: + len += snprintf(cols + len, COL_BUFFER_LEN - len, ",C%d %s", colIndex, "TINYINT"); + lenOfOneRow += TINYINT_BUFF_LEN; + break; + + case TSDB_DATA_TYPE_BOOL: + len += snprintf(cols + len, COL_BUFFER_LEN - len, ",C%d %s", colIndex, "BOOL"); + lenOfOneRow += BOOL_BUFF_LEN; + break; + + case TSDB_DATA_TYPE_FLOAT: + if (g_args.demo_mode) { + if (colIndex == 0) { + len += snprintf(cols + len, COL_BUFFER_LEN - len, ", CURRENT FLOAT"); + } else if (colIndex == 2) { + len += snprintf(cols + len, COL_BUFFER_LEN - len, ", PHASE FLOAT"); + } + } else { + len += snprintf(cols + len, COL_BUFFER_LEN - len, ",C%d %s", colIndex, "FLOAT"); + } + + lenOfOneRow += FLOAT_BUFF_LEN; + break; + + case TSDB_DATA_TYPE_DOUBLE: + len += snprintf(cols + len, COL_BUFFER_LEN - len, ",C%d %s", + colIndex, "DOUBLE"); + lenOfOneRow += DOUBLE_BUFF_LEN; + break; + + case TSDB_DATA_TYPE_TIMESTAMP: + len += snprintf(cols + len, COL_BUFFER_LEN - len, ",C%d %s", + colIndex, "TIMESTAMP"); + lenOfOneRow += TIMESTAMP_BUFF_LEN; + break; + + case TSDB_DATA_TYPE_UTINYINT: + len += snprintf(cols + len, COL_BUFFER_LEN - len, ",C%d %s", + colIndex, "TINYINT UNSIGNED"); + lenOfOneRow += TINYINT_BUFF_LEN; + break; + + case TSDB_DATA_TYPE_USMALLINT: + len += snprintf(cols + len, COL_BUFFER_LEN - len, ",C%d %s", + colIndex, "SMALLINT UNSIGNED"); + lenOfOneRow += SMALLINT_BUFF_LEN; + break; + + case TSDB_DATA_TYPE_UINT: + len += snprintf(cols + len, COL_BUFFER_LEN - len, ",C%d %s", + colIndex, "INT UNSIGNED"); + lenOfOneRow += INT_BUFF_LEN; + break; + + case TSDB_DATA_TYPE_UBIGINT: + len += snprintf(cols + len, COL_BUFFER_LEN - len, ",C%d %s", + colIndex, "BIGINT UNSIGNED"); + lenOfOneRow += BIGINT_BUFF_LEN; + break; + + default: + taos_close(taos); + free(command); + errorPrint2("%s() LN%d, config error data type : %s\n", + __func__, __LINE__, superTbl->columns[colIndex].dataType); + exit(EXIT_FAILURE); + } + } + + superTbl->lenOfOneRow = lenOfOneRow + 20; // timestamp + + // save for creating child table + superTbl->colsOfCreateChildTable = (char*)calloc(len+20, 1); + if (NULL == superTbl->colsOfCreateChildTable) { + taos_close(taos); + free(command); + errorPrint2("%s() LN%d, Failed when calloc, size:%d", + __func__, __LINE__, len+1); + exit(EXIT_FAILURE); + } + + snprintf(superTbl->colsOfCreateChildTable, len+20, "(ts timestamp%s)", cols); + verbosePrint("%s() LN%d: %s\n", + __func__, __LINE__, superTbl->colsOfCreateChildTable); + + if (superTbl->tagCount == 0) { + errorPrint2("%s() LN%d, super table tag count is %d\n", + __func__, __LINE__, superTbl->tagCount); + free(command); + return -1; + } + + char tags[TSDB_MAX_TAGS_LEN] = "\0"; + int tagIndex; + len = 0; + + int lenOfTagOfOneRow = 0; + len += snprintf(tags + len, TSDB_MAX_TAGS_LEN - len, "("); + for (tagIndex = 0; tagIndex < superTbl->tagCount; tagIndex++) { + char* dataType = superTbl->tags[tagIndex].dataType; + + if (strcasecmp(dataType, "BINARY") == 0) { + if ((g_args.demo_mode) && (tagIndex == 1)) { + len += snprintf(tags + len, TSDB_MAX_TAGS_LEN - len, + "location BINARY(%d),", + superTbl->tags[tagIndex].dataLen); + } else { + len += snprintf(tags + len, TSDB_MAX_TAGS_LEN - len, + "T%d %s(%d),", tagIndex, "BINARY", + superTbl->tags[tagIndex].dataLen); + } + lenOfTagOfOneRow += superTbl->tags[tagIndex].dataLen + 3; + } else if (strcasecmp(dataType, "NCHAR") == 0) { + len += snprintf(tags + len, TSDB_MAX_TAGS_LEN - len, + "T%d %s(%d),", tagIndex, + "NCHAR", superTbl->tags[tagIndex].dataLen); + lenOfTagOfOneRow += superTbl->tags[tagIndex].dataLen + 3; + } else if (strcasecmp(dataType, "INT") == 0) { + if ((g_args.demo_mode) && (tagIndex == 0)) { + len += snprintf(tags + len, TSDB_MAX_TAGS_LEN - len, + "groupId INT, "); + } else { + len += snprintf(tags + len, TSDB_MAX_TAGS_LEN - len, + "T%d %s,", tagIndex, "INT"); + } + lenOfTagOfOneRow += superTbl->tags[tagIndex].dataLen + INT_BUFF_LEN; + } else if (strcasecmp(dataType, "BIGINT") == 0) { + len += snprintf(tags + len, TSDB_MAX_TAGS_LEN - len, + "T%d %s,", tagIndex, "BIGINT"); + lenOfTagOfOneRow += superTbl->tags[tagIndex].dataLen + BIGINT_BUFF_LEN; + } else if (strcasecmp(dataType, "SMALLINT") == 0) { + len += snprintf(tags + len, TSDB_MAX_TAGS_LEN - len, + "T%d %s,", tagIndex, "SMALLINT"); + lenOfTagOfOneRow += superTbl->tags[tagIndex].dataLen + SMALLINT_BUFF_LEN; + } else if (strcasecmp(dataType, "TINYINT") == 0) { + len += snprintf(tags + len, TSDB_MAX_TAGS_LEN - len, + "T%d %s,", tagIndex, "TINYINT"); + lenOfTagOfOneRow += superTbl->tags[tagIndex].dataLen + TINYINT_BUFF_LEN; + } else if (strcasecmp(dataType, "BOOL") == 0) { + len += snprintf(tags + len, TSDB_MAX_TAGS_LEN - len, + "T%d %s,", tagIndex, "BOOL"); + lenOfTagOfOneRow += superTbl->tags[tagIndex].dataLen + BOOL_BUFF_LEN; + } else if (strcasecmp(dataType, "FLOAT") == 0) { + len += snprintf(tags + len, TSDB_MAX_TAGS_LEN - len, + "T%d %s,", tagIndex, "FLOAT"); + lenOfTagOfOneRow += superTbl->tags[tagIndex].dataLen + FLOAT_BUFF_LEN; + } else if (strcasecmp(dataType, "DOUBLE") == 0) { + len += snprintf(tags + len, TSDB_MAX_TAGS_LEN - len, + "T%d %s,", tagIndex, "DOUBLE"); + lenOfTagOfOneRow += superTbl->tags[tagIndex].dataLen + DOUBLE_BUFF_LEN; + } else if (strcasecmp(dataType, "UTINYINT") == 0) { + len += snprintf(tags + len, TSDB_MAX_TAGS_LEN - len, + "T%d %s,", tagIndex, "TINYINT UNSIGNED"); + lenOfTagOfOneRow += superTbl->tags[tagIndex].dataLen + TINYINT_BUFF_LEN; + } else if (strcasecmp(dataType, "USMALLINT") == 0) { + len += snprintf(tags + len, TSDB_MAX_TAGS_LEN - len, + "T%d %s,", tagIndex, "SMALLINT UNSIGNED"); + lenOfTagOfOneRow += superTbl->tags[tagIndex].dataLen + SMALLINT_BUFF_LEN; + } else if (strcasecmp(dataType, "UINT") == 0) { + len += snprintf(tags + len, TSDB_MAX_TAGS_LEN - len, + "T%d %s,", tagIndex, "INT UNSIGNED"); + lenOfTagOfOneRow += superTbl->tags[tagIndex].dataLen + INT_BUFF_LEN; + } else if (strcasecmp(dataType, "UBIGINT") == 0) { + len += snprintf(tags + len, TSDB_MAX_TAGS_LEN - len, + "T%d %s,", tagIndex, "BIGINT UNSIGNED"); + lenOfTagOfOneRow += superTbl->tags[tagIndex].dataLen + BIGINT_BUFF_LEN; + } else if (strcasecmp(dataType, "TIMESTAMP") == 0) { + len += snprintf(tags + len, TSDB_MAX_TAGS_LEN - len, + "T%d %s,", tagIndex, "TIMESTAMP"); + lenOfTagOfOneRow += superTbl->tags[tagIndex].dataLen + TIMESTAMP_BUFF_LEN; + } else { + taos_close(taos); + free(command); + errorPrint2("%s() LN%d, config error tag type : %s\n", + __func__, __LINE__, dataType); + exit(EXIT_FAILURE); + } + } + + len -= 1; + len += snprintf(tags + len, TSDB_MAX_TAGS_LEN - len, ")"); + + superTbl->lenOfTagOfOneRow = lenOfTagOfOneRow; + + + snprintf(command, BUFFER_SIZE, + "CREATE TABLE IF NOT EXISTS %s.%s (ts TIMESTAMP%s) TAGS %s", + dbName, superTbl->stbName, cols, tags); + if (0 != queryDbExec(taos, command, NO_INSERT_TYPE, false)) { + errorPrint2("create supertable %s failed!\n\n", + superTbl->stbName); + free(command); + return -1; + } + + debugPrint("create supertable %s success!\n\n", superTbl->stbName); + free(command); + return 0; +} + +int createDatabasesAndStables(char *command) { + TAOS * taos = NULL; + int ret = 0; + taos = taos_connect(g_Dbs.host, g_Dbs.user, g_Dbs.password, NULL, g_Dbs.port); + if (taos == NULL) { + errorPrint2("Failed to connect to TDengine, reason:%s\n", taos_errstr(NULL)); + return -1; + } + + for (int i = 0; i < g_Dbs.dbCount; i++) { + if (g_Dbs.db[i].drop) { + sprintf(command, "drop database if exists %s;", g_Dbs.db[i].dbName); + if (0 != queryDbExec(taos, command, NO_INSERT_TYPE, false)) { + taos_close(taos); + return -1; + } + + int dataLen = 0; + dataLen += snprintf(command + dataLen, + BUFFER_SIZE - dataLen, "CREATE DATABASE IF NOT EXISTS %s", + g_Dbs.db[i].dbName); + + if (g_Dbs.db[i].dbCfg.blocks > 0) { + dataLen += snprintf(command + dataLen, + BUFFER_SIZE - dataLen, " BLOCKS %d", + g_Dbs.db[i].dbCfg.blocks); + } + if (g_Dbs.db[i].dbCfg.cache > 0) { + dataLen += snprintf(command + dataLen, + BUFFER_SIZE - dataLen, " CACHE %d", + g_Dbs.db[i].dbCfg.cache); + } + if (g_Dbs.db[i].dbCfg.days > 0) { + dataLen += snprintf(command + dataLen, + BUFFER_SIZE - dataLen, " DAYS %d", + g_Dbs.db[i].dbCfg.days); + } + if (g_Dbs.db[i].dbCfg.keep > 0) { + dataLen += snprintf(command + dataLen, + BUFFER_SIZE - dataLen, " KEEP %d", + g_Dbs.db[i].dbCfg.keep); + } + if (g_Dbs.db[i].dbCfg.quorum > 1) { + dataLen += snprintf(command + dataLen, + BUFFER_SIZE - dataLen, " QUORUM %d", + g_Dbs.db[i].dbCfg.quorum); + } + if (g_Dbs.db[i].dbCfg.replica > 0) { + dataLen += snprintf(command + dataLen, + BUFFER_SIZE - dataLen, " REPLICA %d", + g_Dbs.db[i].dbCfg.replica); + } + if (g_Dbs.db[i].dbCfg.update > 0) { + dataLen += snprintf(command + dataLen, + BUFFER_SIZE - dataLen, " UPDATE %d", + g_Dbs.db[i].dbCfg.update); + } + //if (g_Dbs.db[i].dbCfg.maxtablesPerVnode > 0) { + // dataLen += snprintf(command + dataLen, + // BUFFER_SIZE - dataLen, "tables %d ", g_Dbs.db[i].dbCfg.maxtablesPerVnode); + //} + if (g_Dbs.db[i].dbCfg.minRows > 0) { + dataLen += snprintf(command + dataLen, + BUFFER_SIZE - dataLen, " MINROWS %d", + g_Dbs.db[i].dbCfg.minRows); + } + if (g_Dbs.db[i].dbCfg.maxRows > 0) { + dataLen += snprintf(command + dataLen, + BUFFER_SIZE - dataLen, " MAXROWS %d", + g_Dbs.db[i].dbCfg.maxRows); + } + if (g_Dbs.db[i].dbCfg.comp > 0) { + dataLen += snprintf(command + dataLen, + BUFFER_SIZE - dataLen, " COMP %d", + g_Dbs.db[i].dbCfg.comp); + } + if (g_Dbs.db[i].dbCfg.walLevel > 0) { + dataLen += snprintf(command + dataLen, + BUFFER_SIZE - dataLen, " wal %d", + g_Dbs.db[i].dbCfg.walLevel); + } + if (g_Dbs.db[i].dbCfg.cacheLast > 0) { + dataLen += snprintf(command + dataLen, + BUFFER_SIZE - dataLen, " CACHELAST %d", + g_Dbs.db[i].dbCfg.cacheLast); + } + if (g_Dbs.db[i].dbCfg.fsync > 0) { + dataLen += snprintf(command + dataLen, BUFFER_SIZE - dataLen, + " FSYNC %d", g_Dbs.db[i].dbCfg.fsync); + } + if ((0 == strncasecmp(g_Dbs.db[i].dbCfg.precision, "ms", 2)) + || (0 == strncasecmp(g_Dbs.db[i].dbCfg.precision, + "ns", 2)) + || (0 == strncasecmp(g_Dbs.db[i].dbCfg.precision, + "us", 2))) { + dataLen += snprintf(command + dataLen, BUFFER_SIZE - dataLen, + " precision \'%s\';", g_Dbs.db[i].dbCfg.precision); + } + + if (0 != queryDbExec(taos, command, NO_INSERT_TYPE, false)) { + taos_close(taos); + errorPrint("\ncreate database %s failed!\n\n", + g_Dbs.db[i].dbName); + return -1; + } + printf("\ncreate database %s success!\n\n", g_Dbs.db[i].dbName); + } + + debugPrint("%s() LN%d supertbl count:%"PRIu64"\n", + __func__, __LINE__, g_Dbs.db[i].superTblCount); + + int validStbCount = 0; + + for (uint64_t j = 0; j < g_Dbs.db[i].superTblCount; j++) { + sprintf(command, "describe %s.%s;", g_Dbs.db[i].dbName, + g_Dbs.db[i].superTbls[j].stbName); + ret = queryDbExec(taos, command, NO_INSERT_TYPE, true); + + if ((ret != 0) || (g_Dbs.db[i].drop)) { + ret = createSuperTable(taos, g_Dbs.db[i].dbName, + &g_Dbs.db[i].superTbls[j]); + + if (0 != ret) { + errorPrint("create super table %"PRIu64" failed!\n\n", j); + continue; + } + } else { + ret = getSuperTableFromServer(taos, g_Dbs.db[i].dbName, + &g_Dbs.db[i].superTbls[j]); + if (0 != ret) { + errorPrint2("\nget super table %s.%s info failed!\n\n", + g_Dbs.db[i].dbName, g_Dbs.db[i].superTbls[j].stbName); + continue; + } + } + validStbCount ++; + } + g_Dbs.db[i].superTblCount = validStbCount; + } + + taos_close(taos); + return 0; +} + +static void* createTable(void *sarg) +{ + threadInfo *pThreadInfo = (threadInfo *)sarg; + SSuperTable* stbInfo = pThreadInfo->stbInfo; + + setThreadName("createTable"); + + uint64_t lastPrintTime = taosGetTimestampMs(); + + int buff_len = BUFFER_SIZE; + + pThreadInfo->buffer = calloc(buff_len, 1); + if (pThreadInfo->buffer == NULL) { + errorPrint2("%s() LN%d, Memory allocated failed!\n", __func__, __LINE__); + exit(EXIT_FAILURE); + } + + int len = 0; + int batchNum = 0; + + verbosePrint("%s() LN%d: Creating table from %"PRIu64" to %"PRIu64"\n", + __func__, __LINE__, + pThreadInfo->start_table_from, pThreadInfo->end_table_to); + + for (uint64_t i = pThreadInfo->start_table_from; + i <= pThreadInfo->end_table_to; i++) { + if (0 == g_Dbs.use_metric) { + snprintf(pThreadInfo->buffer, buff_len, + "CREATE TABLE IF NOT EXISTS %s.%s%"PRIu64" %s;", + pThreadInfo->db_name, + g_args.tb_prefix, i, + pThreadInfo->cols); + batchNum ++; + } else { + if (stbInfo == NULL) { + free(pThreadInfo->buffer); + errorPrint2("%s() LN%d, use metric, but super table info is NULL\n", + __func__, __LINE__); + exit(EXIT_FAILURE); + } else { + if (0 == len) { + batchNum = 0; + memset(pThreadInfo->buffer, 0, buff_len); + len += snprintf(pThreadInfo->buffer + len, + buff_len - len, "CREATE TABLE "); + } + + char* tagsValBuf = NULL; + if (0 == stbInfo->tagSource) { + tagsValBuf = generateTagValuesForStb(stbInfo, i); + } else { + if (0 == stbInfo->tagSampleCount) { + free(pThreadInfo->buffer); + ERROR_EXIT("use sample file for tag, but has no content!\n"); + } + tagsValBuf = getTagValueFromTagSample( + stbInfo, + i % stbInfo->tagSampleCount); + } + + if (NULL == tagsValBuf) { + free(pThreadInfo->buffer); + ERROR_EXIT("use metric, but tag buffer is NULL\n"); + } + len += snprintf(pThreadInfo->buffer + len, + buff_len - len, + "if not exists %s.%s%"PRIu64" using %s.%s tags %s ", + pThreadInfo->db_name, stbInfo->childTblPrefix, + i, pThreadInfo->db_name, + stbInfo->stbName, tagsValBuf); + free(tagsValBuf); + batchNum++; + if ((batchNum < stbInfo->batchCreateTableNum) + && ((buff_len - len) + >= (stbInfo->lenOfTagOfOneRow + 256))) { + continue; + } + } + } + + len = 0; + + if (0 != queryDbExec(pThreadInfo->taos, pThreadInfo->buffer, + NO_INSERT_TYPE, false)) { + errorPrint2("queryDbExec() failed. buffer:\n%s\n", pThreadInfo->buffer); + free(pThreadInfo->buffer); + return NULL; + } + pThreadInfo->tables_created += batchNum; + uint64_t currentPrintTime = taosGetTimestampMs(); + if (currentPrintTime - lastPrintTime > 30*1000) { + printf("thread[%d] already create %"PRIu64" - %"PRIu64" tables\n", + pThreadInfo->threadID, pThreadInfo->start_table_from, i); + lastPrintTime = currentPrintTime; + } + } + + if (0 != len) { + if (0 != queryDbExec(pThreadInfo->taos, pThreadInfo->buffer, + NO_INSERT_TYPE, false)) { + errorPrint2("queryDbExec() failed. buffer:\n%s\n", pThreadInfo->buffer); + } + pThreadInfo->tables_created += batchNum; + } + free(pThreadInfo->buffer); + return NULL; +} + +static int startMultiThreadCreateChildTable( + char* cols, int threads, uint64_t tableFrom, int64_t ntables, + char* db_name, SSuperTable* stbInfo) { + + pthread_t *pids = calloc(1, threads * sizeof(pthread_t)); + threadInfo *infos = calloc(1, threads * sizeof(threadInfo)); + + if ((NULL == pids) || (NULL == infos)) { + ERROR_EXIT("createChildTable malloc failed\n"); + } + + if (threads < 1) { + threads = 1; + } + + int64_t a = ntables / threads; + if (a < 1) { + threads = ntables; + a = 1; + } + + int64_t b = 0; + b = ntables % threads; + + for (int64_t i = 0; i < threads; i++) { + threadInfo *pThreadInfo = infos + i; + pThreadInfo->threadID = i; + tstrncpy(pThreadInfo->db_name, db_name, TSDB_DB_NAME_LEN); + pThreadInfo->stbInfo = stbInfo; + verbosePrint("%s() %d db_name: %s\n", __func__, __LINE__, db_name); + pThreadInfo->taos = taos_connect( + g_Dbs.host, + g_Dbs.user, + g_Dbs.password, + db_name, + g_Dbs.port); + if (pThreadInfo->taos == NULL) { + errorPrint2("%s() LN%d, Failed to connect to TDengine, reason:%s\n", + __func__, __LINE__, taos_errstr(NULL)); + free(pids); + free(infos); + return -1; + } + + pThreadInfo->start_table_from = tableFrom; + pThreadInfo->ntables = iend_table_to = i < b ? tableFrom + a : tableFrom + a - 1; + tableFrom = pThreadInfo->end_table_to + 1; + pThreadInfo->use_metric = true; + pThreadInfo->cols = cols; + pThreadInfo->minDelay = UINT64_MAX; + pThreadInfo->tables_created = 0; + pthread_create(pids + i, NULL, createTable, pThreadInfo); + } + + for (int i = 0; i < threads; i++) { + pthread_join(pids[i], NULL); + } + + for (int i = 0; i < threads; i++) { + threadInfo *pThreadInfo = infos + i; + taos_close(pThreadInfo->taos); + + g_actualChildTables += pThreadInfo->tables_created; + } + + free(pids); + free(infos); + + return 0; +} + +static void createChildTables() { + char tblColsBuf[TSDB_MAX_BYTES_PER_ROW]; + int len; + + for (int i = 0; i < g_Dbs.dbCount; i++) { + if (g_Dbs.use_metric) { + if (g_Dbs.db[i].superTblCount > 0) { + // with super table + for (int j = 0; j < g_Dbs.db[i].superTblCount; j++) { + if ((AUTO_CREATE_SUBTBL + == g_Dbs.db[i].superTbls[j].autoCreateTable) + || (TBL_ALREADY_EXISTS + == g_Dbs.db[i].superTbls[j].childTblExists)) { + continue; + } + verbosePrint("%s() LN%d: %s\n", __func__, __LINE__, + g_Dbs.db[i].superTbls[j].colsOfCreateChildTable); + uint64_t startFrom = 0; + + verbosePrint("%s() LN%d: create %"PRId64" child tables from %"PRIu64"\n", + __func__, __LINE__, g_totalChildTables, startFrom); + + startMultiThreadCreateChildTable( + g_Dbs.db[i].superTbls[j].colsOfCreateChildTable, + g_Dbs.threadCountForCreateTbl, + startFrom, + g_Dbs.db[i].superTbls[j].childTblCount, + g_Dbs.db[i].dbName, &(g_Dbs.db[i].superTbls[j])); + } + } + } else { + // normal table + len = snprintf(tblColsBuf, TSDB_MAX_BYTES_PER_ROW, "(TS TIMESTAMP"); + for (int j = 0; j < g_args.columnCount; j++) { + if ((strncasecmp(g_args.dataType[j], "BINARY", strlen("BINARY")) == 0) + || (strncasecmp(g_args.dataType[j], + "NCHAR", strlen("NCHAR")) == 0)) { + snprintf(tblColsBuf + len, TSDB_MAX_BYTES_PER_ROW - len, + ",C%d %s(%d)", j, g_args.dataType[j], g_args.binwidth); + } else { + snprintf(tblColsBuf + len, TSDB_MAX_BYTES_PER_ROW - len, + ",C%d %s", j, g_args.dataType[j]); + } + len = strlen(tblColsBuf); + } + + snprintf(tblColsBuf + len, TSDB_MAX_BYTES_PER_ROW - len, ")"); + + verbosePrint("%s() LN%d: dbName: %s num of tb: %"PRId64" schema: %s\n", + __func__, __LINE__, + g_Dbs.db[i].dbName, g_args.ntables, tblColsBuf); + startMultiThreadCreateChildTable( + tblColsBuf, + g_Dbs.threadCountForCreateTbl, + 0, + g_args.ntables, + g_Dbs.db[i].dbName, + NULL); + } + } +} + +/* + Read 10000 lines at most. If more than 10000 lines, continue to read after using + */ +static int readTagFromCsvFileToMem(SSuperTable * stbInfo) { + size_t n = 0; + ssize_t readLen = 0; + char * line = NULL; + + FILE *fp = fopen(stbInfo->tagsFile, "r"); + if (fp == NULL) { + printf("Failed to open tags file: %s, reason:%s\n", + stbInfo->tagsFile, strerror(errno)); + return -1; + } + + if (stbInfo->tagDataBuf) { + free(stbInfo->tagDataBuf); + stbInfo->tagDataBuf = NULL; + } + + int tagCount = 10000; + int count = 0; + char* tagDataBuf = calloc(1, stbInfo->lenOfTagOfOneRow * tagCount); + if (tagDataBuf == NULL) { + printf("Failed to calloc, reason:%s\n", strerror(errno)); + fclose(fp); + return -1; + } + + while((readLen = tgetline(&line, &n, fp)) != -1) { + if (('\r' == line[readLen - 1]) || ('\n' == line[readLen - 1])) { + line[--readLen] = 0; + } + + if (readLen == 0) { + continue; + } + + memcpy(tagDataBuf + count * stbInfo->lenOfTagOfOneRow, line, readLen); + count++; + + if (count >= tagCount - 1) { + char *tmp = realloc(tagDataBuf, + (size_t)tagCount*1.5*stbInfo->lenOfTagOfOneRow); + if (tmp != NULL) { + tagDataBuf = tmp; + tagCount = (int)(tagCount*1.5); + memset(tagDataBuf + count*stbInfo->lenOfTagOfOneRow, + 0, (size_t)((tagCount-count)*stbInfo->lenOfTagOfOneRow)); + } else { + // exit, if allocate more memory failed + printf("realloc fail for save tag val from %s\n", stbInfo->tagsFile); + tmfree(tagDataBuf); + free(line); + fclose(fp); + return -1; + } + } + } + + stbInfo->tagDataBuf = tagDataBuf; + stbInfo->tagSampleCount = count; + + free(line); + fclose(fp); + return 0; +} + +static void getAndSetRowsFromCsvFile(SSuperTable *stbInfo) { + FILE *fp = fopen(stbInfo->sampleFile, "r"); + int line_count = 0; + if (fp == NULL) { + errorPrint("Failed to open sample file: %s, reason:%s\n", + stbInfo->sampleFile, strerror(errno)); + exit(EXIT_FAILURE); + } + char *buf = calloc(1, stbInfo->maxSqlLen); + while (fgets(buf, stbInfo->maxSqlLen, fp)) { + line_count++; + } + fclose(fp); + tmfree(buf); + stbInfo->insertRows = line_count; +} + +/* + Read 10000 lines at most. If more than 10000 lines, continue to read after using + */ +static int generateSampleFromCsvForStb( + SSuperTable* stbInfo) { + size_t n = 0; + ssize_t readLen = 0; + char * line = NULL; + int getRows = 0; + + FILE* fp = fopen(stbInfo->sampleFile, "r"); + if (fp == NULL) { + errorPrint("Failed to open sample file: %s, reason:%s\n", + stbInfo->sampleFile, strerror(errno)); + return -1; + } + + assert(stbInfo->sampleDataBuf); + memset(stbInfo->sampleDataBuf, 0, + MAX_SAMPLES * stbInfo->lenOfOneRow); + while(1) { + readLen = tgetline(&line, &n, fp); + if (-1 == readLen) { + if(0 != fseek(fp, 0, SEEK_SET)) { + errorPrint("Failed to fseek file: %s, reason:%s\n", + stbInfo->sampleFile, strerror(errno)); + fclose(fp); + return -1; + } + continue; + } + + if (('\r' == line[readLen - 1]) || ('\n' == line[readLen - 1])) { + line[--readLen] = 0; + } + + if (readLen == 0) { + continue; + } + + if (readLen > stbInfo->lenOfOneRow) { + printf("sample row len[%d] overflow define schema len[%"PRIu64"], so discard this row\n", + (int32_t)readLen, stbInfo->lenOfOneRow); + continue; + } + + memcpy(stbInfo->sampleDataBuf + getRows * stbInfo->lenOfOneRow, + line, readLen); + getRows++; + + if (getRows == MAX_SAMPLES) { + break; + } + } + + fclose(fp); + tmfree(line); + return 0; +} + +static bool getColumnAndTagTypeFromInsertJsonFile( + cJSON* stbInfo, SSuperTable* superTbls) { + bool ret = false; + + // columns + cJSON *columns = cJSON_GetObjectItem(stbInfo, "columns"); + if (columns && columns->type != cJSON_Array) { + errorPrint("%s", "failed to read json, columns not found\n"); + goto PARSE_OVER; + } else if (NULL == columns) { + superTbls->columnCount = 0; + superTbls->tagCount = 0; + return true; + } + + int columnSize = cJSON_GetArraySize(columns); + if ((columnSize + 1/* ts */) > TSDB_MAX_COLUMNS) { + errorPrint("failed to read json, column size overflow, max column size is %d\n", + TSDB_MAX_COLUMNS); + goto PARSE_OVER; + } + + int count = 1; + int index = 0; + StrColumn columnCase; + + //superTbls->columnCount = columnSize; + for (int k = 0; k < columnSize; ++k) { + cJSON* column = cJSON_GetArrayItem(columns, k); + if (column == NULL) continue; + + count = 1; + cJSON* countObj = cJSON_GetObjectItem(column, "count"); + if (countObj && countObj->type == cJSON_Number) { + count = countObj->valueint; + } else if (countObj && countObj->type != cJSON_Number) { + errorPrint("%s", "failed to read json, column count not found\n"); + goto PARSE_OVER; + } else { + count = 1; + } + + // column info + memset(&columnCase, 0, sizeof(StrColumn)); + cJSON *dataType = cJSON_GetObjectItem(column, "type"); + if (!dataType || dataType->type != cJSON_String + || dataType->valuestring == NULL) { + errorPrint("%s", "failed to read json, column type not found\n"); + goto PARSE_OVER; + } + //tstrncpy(superTbls->columns[k].dataType, dataType->valuestring, DATATYPE_BUFF_LEN); + tstrncpy(columnCase.dataType, dataType->valuestring, + min(DATATYPE_BUFF_LEN, strlen(dataType->valuestring) + 1)); + + cJSON* dataLen = cJSON_GetObjectItem(column, "len"); + if (dataLen && dataLen->type == cJSON_Number) { + columnCase.dataLen = dataLen->valueint; + } else if (dataLen && dataLen->type != cJSON_Number) { + debugPrint("%s() LN%d: failed to read json, column len not found\n", + __func__, __LINE__); + goto PARSE_OVER; + } else { + columnCase.dataLen = SMALL_BUFF_LEN; + } + + for (int n = 0; n < count; ++n) { + tstrncpy(superTbls->columns[index].dataType, + columnCase.dataType, + min(DATATYPE_BUFF_LEN, strlen(columnCase.dataType) + 1)); + + superTbls->columns[index].dataLen = columnCase.dataLen; + index++; + } + } + + if ((index + 1 /* ts */) > MAX_NUM_COLUMNS) { + errorPrint("failed to read json, column size overflow, allowed max column size is %d\n", + MAX_NUM_COLUMNS); + goto PARSE_OVER; + } + + superTbls->columnCount = index; + + for (int c = 0; c < superTbls->columnCount; c++) { + if (0 == strncasecmp(superTbls->columns[c].dataType, + "INT", strlen("INT"))) { + superTbls->columns[c].data_type = TSDB_DATA_TYPE_INT; + } else if (0 == strncasecmp(superTbls->columns[c].dataType, + "TINYINT", strlen("TINYINT"))) { + superTbls->columns[c].data_type = TSDB_DATA_TYPE_TINYINT; + } else if (0 == strncasecmp(superTbls->columns[c].dataType, + "SMALLINT", strlen("SMALLINT"))) { + superTbls->columns[c].data_type = TSDB_DATA_TYPE_SMALLINT; + } else if (0 == strncasecmp(superTbls->columns[c].dataType, + "BIGINT", strlen("BIGINT"))) { + superTbls->columns[c].data_type = TSDB_DATA_TYPE_BIGINT; + } else if (0 == strncasecmp(superTbls->columns[c].dataType, + "FLOAT", strlen("FLOAT"))) { + superTbls->columns[c].data_type = TSDB_DATA_TYPE_FLOAT; + } else if (0 == strncasecmp(superTbls->columns[c].dataType, + "DOUBLE", strlen("DOUBLE"))) { + superTbls->columns[c].data_type = TSDB_DATA_TYPE_DOUBLE; + } else if (0 == strncasecmp(superTbls->columns[c].dataType, + "BINARY", strlen("BINARY"))) { + superTbls->columns[c].data_type = TSDB_DATA_TYPE_BINARY; + } else if (0 == strncasecmp(superTbls->columns[c].dataType, + "NCHAR", strlen("NCHAR"))) { + superTbls->columns[c].data_type = TSDB_DATA_TYPE_NCHAR; + } else if (0 == strncasecmp(superTbls->columns[c].dataType, + "BOOL", strlen("BOOL"))) { + superTbls->columns[c].data_type = TSDB_DATA_TYPE_BOOL; + } else if (0 == strncasecmp(superTbls->columns[c].dataType, + "TIMESTAMP", strlen("TIMESTAMP"))) { + superTbls->columns[c].data_type = TSDB_DATA_TYPE_TIMESTAMP; + } else if (0 == strncasecmp(superTbls->columns[c].dataType, + "UTINYINT", strlen("UTINYINT"))) { + superTbls->columns[c].data_type = TSDB_DATA_TYPE_UTINYINT; + } else if (0 == strncasecmp(superTbls->columns[c].dataType, + "USMALLINT", strlen("USMALLINT"))) { + superTbls->columns[c].data_type = TSDB_DATA_TYPE_USMALLINT; + } else if (0 == strncasecmp(superTbls->columns[c].dataType, + "UINT", strlen("UINT"))) { + superTbls->columns[c].data_type = TSDB_DATA_TYPE_UINT; + } else if (0 == strncasecmp(superTbls->columns[c].dataType, + "UBIGINT", strlen("UBIGINT"))) { + superTbls->columns[c].data_type = TSDB_DATA_TYPE_UBIGINT; + } else { + superTbls->columns[c].data_type = TSDB_DATA_TYPE_NULL; + } + } + + count = 1; + index = 0; + // tags + cJSON *tags = cJSON_GetObjectItem(stbInfo, "tags"); + if (!tags || tags->type != cJSON_Array) { + errorPrint("%s", "failed to read json, tags not found\n"); + goto PARSE_OVER; + } + + int tagSize = cJSON_GetArraySize(tags); + if (tagSize > TSDB_MAX_TAGS) { + errorPrint("failed to read json, tags size overflow, max tag size is %d\n", + TSDB_MAX_TAGS); + goto PARSE_OVER; + } + + //superTbls->tagCount = tagSize; + for (int k = 0; k < tagSize; ++k) { + cJSON* tag = cJSON_GetArrayItem(tags, k); + if (tag == NULL) continue; + + count = 1; + cJSON* countObj = cJSON_GetObjectItem(tag, "count"); + if (countObj && countObj->type == cJSON_Number) { + count = countObj->valueint; + } else if (countObj && countObj->type != cJSON_Number) { + errorPrint("%s", "failed to read json, column count not found\n"); + goto PARSE_OVER; + } else { + count = 1; + } + + // column info + memset(&columnCase, 0, sizeof(StrColumn)); + cJSON *dataType = cJSON_GetObjectItem(tag, "type"); + if (!dataType || dataType->type != cJSON_String + || dataType->valuestring == NULL) { + errorPrint("%s", "failed to read json, tag type not found\n"); + goto PARSE_OVER; + } + tstrncpy(columnCase.dataType, dataType->valuestring, + min(DATATYPE_BUFF_LEN, strlen(dataType->valuestring) + 1)); + + cJSON* dataLen = cJSON_GetObjectItem(tag, "len"); + if (dataLen && dataLen->type == cJSON_Number) { + columnCase.dataLen = dataLen->valueint; + } else if (dataLen && dataLen->type != cJSON_Number) { + errorPrint("%s", "failed to read json, column len not found\n"); + goto PARSE_OVER; + } else { + columnCase.dataLen = 0; + } + + for (int n = 0; n < count; ++n) { + tstrncpy(superTbls->tags[index].dataType, columnCase.dataType, + min(DATATYPE_BUFF_LEN, strlen(columnCase.dataType) + 1)); + superTbls->tags[index].dataLen = columnCase.dataLen; + index++; + } + } + + if (index > TSDB_MAX_TAGS) { + errorPrint("failed to read json, tags size overflow, allowed max tag count is %d\n", + TSDB_MAX_TAGS); + goto PARSE_OVER; + } + + superTbls->tagCount = index; + + for (int t = 0; t < superTbls->tagCount; t++) { + if (0 == strncasecmp(superTbls->tags[t].dataType, + "INT", strlen("INT"))) { + superTbls->tags[t].data_type = TSDB_DATA_TYPE_INT; + } else if (0 == strncasecmp(superTbls->tags[t].dataType, + "TINYINT", strlen("TINYINT"))) { + superTbls->tags[t].data_type = TSDB_DATA_TYPE_TINYINT; + } else if (0 == strncasecmp(superTbls->tags[t].dataType, + "SMALLINT", strlen("SMALLINT"))) { + superTbls->tags[t].data_type = TSDB_DATA_TYPE_SMALLINT; + } else if (0 == strncasecmp(superTbls->tags[t].dataType, + "BIGINT", strlen("BIGINT"))) { + superTbls->tags[t].data_type = TSDB_DATA_TYPE_BIGINT; + } else if (0 == strncasecmp(superTbls->tags[t].dataType, + "FLOAT", strlen("FLOAT"))) { + superTbls->tags[t].data_type = TSDB_DATA_TYPE_FLOAT; + } else if (0 == strncasecmp(superTbls->tags[t].dataType, + "DOUBLE", strlen("DOUBLE"))) { + superTbls->tags[t].data_type = TSDB_DATA_TYPE_DOUBLE; + } else if (0 == strncasecmp(superTbls->tags[t].dataType, + "BINARY", strlen("BINARY"))) { + superTbls->tags[t].data_type = TSDB_DATA_TYPE_BINARY; + } else if (0 == strncasecmp(superTbls->tags[t].dataType, + "NCHAR", strlen("NCHAR"))) { + superTbls->tags[t].data_type = TSDB_DATA_TYPE_NCHAR; + } else if (0 == strncasecmp(superTbls->tags[t].dataType, + "BOOL", strlen("BOOL"))) { + superTbls->tags[t].data_type = TSDB_DATA_TYPE_BOOL; + } else if (0 == strncasecmp(superTbls->tags[t].dataType, + "TIMESTAMP", strlen("TIMESTAMP"))) { + superTbls->tags[t].data_type = TSDB_DATA_TYPE_TIMESTAMP; + } else if (0 == strncasecmp(superTbls->tags[t].dataType, + "UTINYINT", strlen("UTINYINT"))) { + superTbls->tags[t].data_type = TSDB_DATA_TYPE_UTINYINT; + } else if (0 == strncasecmp(superTbls->tags[t].dataType, + "USMALLINT", strlen("USMALLINT"))) { + superTbls->tags[t].data_type = TSDB_DATA_TYPE_USMALLINT; + } else if (0 == strncasecmp(superTbls->tags[t].dataType, + "UINT", strlen("UINT"))) { + superTbls->tags[t].data_type = TSDB_DATA_TYPE_UINT; + } else if (0 == strncasecmp(superTbls->tags[t].dataType, + "UBIGINT", strlen("UBIGINT"))) { + superTbls->tags[t].data_type = TSDB_DATA_TYPE_UBIGINT; + } else { + superTbls->tags[t].data_type = TSDB_DATA_TYPE_NULL; + } + } + + if ((superTbls->columnCount + superTbls->tagCount + 1 /* ts */) > TSDB_MAX_COLUMNS) { + errorPrint("columns + tags is more than allowed max columns count: %d\n", + TSDB_MAX_COLUMNS); + goto PARSE_OVER; + } + ret = true; + +PARSE_OVER: + return ret; +} + +static bool getMetaFromInsertJsonFile(cJSON* root) { + bool ret = false; + + cJSON* cfgdir = cJSON_GetObjectItem(root, "cfgdir"); + if (cfgdir && cfgdir->type == cJSON_String && cfgdir->valuestring != NULL) { + tstrncpy(g_Dbs.cfgDir, cfgdir->valuestring, MAX_FILE_NAME_LEN); + } + + cJSON* host = cJSON_GetObjectItem(root, "host"); + if (host && host->type == cJSON_String && host->valuestring != NULL) { + tstrncpy(g_Dbs.host, host->valuestring, MAX_HOSTNAME_SIZE); + } else if (!host) { + tstrncpy(g_Dbs.host, "127.0.0.1", MAX_HOSTNAME_SIZE); + } else { + errorPrint("%s", "failed to read json, host not found\n"); + goto PARSE_OVER; + } + + cJSON* port = cJSON_GetObjectItem(root, "port"); + if (port && port->type == cJSON_Number) { + g_Dbs.port = port->valueint; + } else if (!port) { + g_Dbs.port = 6030; + } + + cJSON* user = cJSON_GetObjectItem(root, "user"); + if (user && user->type == cJSON_String && user->valuestring != NULL) { + tstrncpy(g_Dbs.user, user->valuestring, MAX_USERNAME_SIZE); + } else if (!user) { + tstrncpy(g_Dbs.user, "root", MAX_USERNAME_SIZE); + } + + cJSON* password = cJSON_GetObjectItem(root, "password"); + if (password && password->type == cJSON_String && password->valuestring != NULL) { + tstrncpy(g_Dbs.password, password->valuestring, SHELL_MAX_PASSWORD_LEN); + } else if (!password) { + tstrncpy(g_Dbs.password, "taosdata", SHELL_MAX_PASSWORD_LEN); + } + + cJSON* resultfile = cJSON_GetObjectItem(root, "result_file"); + if (resultfile && resultfile->type == cJSON_String && resultfile->valuestring != NULL) { + tstrncpy(g_Dbs.resultFile, resultfile->valuestring, MAX_FILE_NAME_LEN); + } else if (!resultfile) { + tstrncpy(g_Dbs.resultFile, "./insert_res.txt", MAX_FILE_NAME_LEN); + } + + cJSON* threads = cJSON_GetObjectItem(root, "thread_count"); + if (threads && threads->type == cJSON_Number) { + g_Dbs.threadCount = threads->valueint; + } else if (!threads) { + g_Dbs.threadCount = 1; + } else { + errorPrint("%s", "failed to read json, threads not found\n"); + goto PARSE_OVER; + } + + cJSON* threads2 = cJSON_GetObjectItem(root, "thread_count_create_tbl"); + if (threads2 && threads2->type == cJSON_Number) { + g_Dbs.threadCountForCreateTbl = threads2->valueint; + } else if (!threads2) { + g_Dbs.threadCountForCreateTbl = 1; + } else { + errorPrint("%s", "failed to read json, threads2 not found\n"); + goto PARSE_OVER; + } + + cJSON* gInsertInterval = cJSON_GetObjectItem(root, "insert_interval"); + if (gInsertInterval && gInsertInterval->type == cJSON_Number) { + if (gInsertInterval->valueint <0) { + errorPrint("%s", "failed to read json, insert interval input mistake\n"); + goto PARSE_OVER; + } + g_args.insert_interval = gInsertInterval->valueint; + } else if (!gInsertInterval) { + g_args.insert_interval = 0; + } else { + errorPrint("%s", "failed to read json, insert_interval input mistake\n"); + goto PARSE_OVER; + } + + cJSON* interlaceRows = cJSON_GetObjectItem(root, "interlace_rows"); + if (interlaceRows && interlaceRows->type == cJSON_Number) { + if (interlaceRows->valueint < 0) { + errorPrint("%s", "failed to read json, interlaceRows input mistake\n"); + goto PARSE_OVER; + + } + g_args.interlaceRows = interlaceRows->valueint; + } else if (!interlaceRows) { + g_args.interlaceRows = 0; // 0 means progressive mode, > 0 mean interlace mode. max value is less or equ num_of_records_per_req + } else { + errorPrint("%s", "failed to read json, interlaceRows input mistake\n"); + goto PARSE_OVER; + } + + cJSON* maxSqlLen = cJSON_GetObjectItem(root, "max_sql_len"); + if (maxSqlLen && maxSqlLen->type == cJSON_Number) { + if (maxSqlLen->valueint < 0) { + errorPrint("%s() LN%d, failed to read json, max_sql_len input mistake\n", + __func__, __LINE__); + goto PARSE_OVER; + } + g_args.max_sql_len = maxSqlLen->valueint; + } else if (!maxSqlLen) { + g_args.max_sql_len = (1024*1024); + } else { + errorPrint("%s() LN%d, failed to read json, max_sql_len input mistake\n", + __func__, __LINE__); + goto PARSE_OVER; + } + + cJSON* numRecPerReq = cJSON_GetObjectItem(root, "num_of_records_per_req"); + if (numRecPerReq && numRecPerReq->type == cJSON_Number) { + if (numRecPerReq->valueint <= 0) { + errorPrint("%s() LN%d, failed to read json, num_of_records_per_req input mistake\n", + __func__, __LINE__); + goto PARSE_OVER; + } else if (numRecPerReq->valueint > MAX_RECORDS_PER_REQ) { + printf("NOTICE: number of records per request value %"PRIu64" > %d\n\n", + numRecPerReq->valueint, MAX_RECORDS_PER_REQ); + printf(" number of records per request value will be set to %d\n\n", + MAX_RECORDS_PER_REQ); + prompt(); + numRecPerReq->valueint = MAX_RECORDS_PER_REQ; + } + g_args.reqPerReq = numRecPerReq->valueint; + } else if (!numRecPerReq) { + g_args.reqPerReq = MAX_RECORDS_PER_REQ; + } else { + errorPrint("%s() LN%d, failed to read json, num_of_records_per_req not found\n", + __func__, __LINE__); + goto PARSE_OVER; + } + + cJSON* prepareRand = cJSON_GetObjectItem(root, "prepared_rand"); + if (prepareRand && prepareRand->type == cJSON_Number) { + if (prepareRand->valueint <= 0) { + errorPrint("%s() LN%d, failed to read json, prepared_rand input mistake\n", + __func__, __LINE__); + goto PARSE_OVER; + } + g_args.prepared_rand = prepareRand->valueint; + } else if (!prepareRand) { + g_args.prepared_rand = 10000; + } else { + errorPrint("%s() LN%d, failed to read json, prepared_rand not found\n", + __func__, __LINE__); + goto PARSE_OVER; + } + + cJSON *answerPrompt = cJSON_GetObjectItem(root, "confirm_parameter_prompt"); // yes, no, + if (answerPrompt + && answerPrompt->type == cJSON_String + && answerPrompt->valuestring != NULL) { + if (0 == strncasecmp(answerPrompt->valuestring, "yes", 3)) { + g_args.answer_yes = false; + } else if (0 == strncasecmp(answerPrompt->valuestring, "no", 2)) { + g_args.answer_yes = true; + } else { + g_args.answer_yes = false; + } + } else if (!answerPrompt) { + g_args.answer_yes = true; // default is no, mean answer_yes. + } else { + errorPrint("%s", "failed to read json, confirm_parameter_prompt input mistake\n"); + goto PARSE_OVER; + } + + // rows per table need be less than insert batch + if (g_args.interlaceRows > g_args.reqPerReq) { + printf("NOTICE: interlace rows value %u > num_of_records_per_req %u\n\n", + g_args.interlaceRows, g_args.reqPerReq); + printf(" interlace rows value will be set to num_of_records_per_req %u\n\n", + g_args.reqPerReq); + prompt(); + g_args.interlaceRows = g_args.reqPerReq; + } + + cJSON* dbs = cJSON_GetObjectItem(root, "databases"); + if (!dbs || dbs->type != cJSON_Array) { + errorPrint("%s", "failed to read json, databases not found\n"); + goto PARSE_OVER; + } + + int dbSize = cJSON_GetArraySize(dbs); + if (dbSize > MAX_DB_COUNT) { + errorPrint( + "failed to read json, databases size overflow, max database is %d\n", + MAX_DB_COUNT); + goto PARSE_OVER; + } + g_Dbs.db = calloc(1, sizeof(SDataBase)*dbSize); + assert(g_Dbs.db); + g_Dbs.dbCount = dbSize; + for (int i = 0; i < dbSize; ++i) { + cJSON* dbinfos = cJSON_GetArrayItem(dbs, i); + if (dbinfos == NULL) continue; + + // dbinfo + cJSON *dbinfo = cJSON_GetObjectItem(dbinfos, "dbinfo"); + if (!dbinfo || dbinfo->type != cJSON_Object) { + errorPrint("%s", "failed to read json, dbinfo not found\n"); + goto PARSE_OVER; + } + + cJSON *dbName = cJSON_GetObjectItem(dbinfo, "name"); + if (!dbName || dbName->type != cJSON_String || dbName->valuestring == NULL) { + errorPrint("%s", "failed to read json, db name not found\n"); + goto PARSE_OVER; + } + tstrncpy(g_Dbs.db[i].dbName, dbName->valuestring, TSDB_DB_NAME_LEN); + + cJSON *drop = cJSON_GetObjectItem(dbinfo, "drop"); + if (drop && drop->type == cJSON_String && drop->valuestring != NULL) { + if (0 == strncasecmp(drop->valuestring, "yes", strlen("yes"))) { + g_Dbs.db[i].drop = true; + } else { + g_Dbs.db[i].drop = false; + } + } else if (!drop) { + g_Dbs.db[i].drop = g_args.drop_database; + } else { + errorPrint("%s", "failed to read json, drop input mistake\n"); + goto PARSE_OVER; + } + + cJSON *precision = cJSON_GetObjectItem(dbinfo, "precision"); + if (precision && precision->type == cJSON_String + && precision->valuestring != NULL) { + tstrncpy(g_Dbs.db[i].dbCfg.precision, precision->valuestring, + SMALL_BUFF_LEN); + } else if (!precision) { + memset(g_Dbs.db[i].dbCfg.precision, 0, SMALL_BUFF_LEN); + } else { + errorPrint("%s", "failed to read json, precision not found\n"); + goto PARSE_OVER; + } + + cJSON* update = cJSON_GetObjectItem(dbinfo, "update"); + if (update && update->type == cJSON_Number) { + g_Dbs.db[i].dbCfg.update = update->valueint; + } else if (!update) { + g_Dbs.db[i].dbCfg.update = -1; + } else { + errorPrint("%s", "failed to read json, update not found\n"); + goto PARSE_OVER; + } + + cJSON* replica = cJSON_GetObjectItem(dbinfo, "replica"); + if (replica && replica->type == cJSON_Number) { + g_Dbs.db[i].dbCfg.replica = replica->valueint; + } else if (!replica) { + g_Dbs.db[i].dbCfg.replica = -1; + } else { + errorPrint("%s", "failed to read json, replica not found\n"); + goto PARSE_OVER; + } + + cJSON* keep = cJSON_GetObjectItem(dbinfo, "keep"); + if (keep && keep->type == cJSON_Number) { + g_Dbs.db[i].dbCfg.keep = keep->valueint; + } else if (!keep) { + g_Dbs.db[i].dbCfg.keep = -1; + } else { + errorPrint("%s", "failed to read json, keep not found\n"); + goto PARSE_OVER; + } + + cJSON* days = cJSON_GetObjectItem(dbinfo, "days"); + if (days && days->type == cJSON_Number) { + g_Dbs.db[i].dbCfg.days = days->valueint; + } else if (!days) { + g_Dbs.db[i].dbCfg.days = -1; + } else { + errorPrint("%s", "failed to read json, days not found\n"); + goto PARSE_OVER; + } + + cJSON* cache = cJSON_GetObjectItem(dbinfo, "cache"); + if (cache && cache->type == cJSON_Number) { + g_Dbs.db[i].dbCfg.cache = cache->valueint; + } else if (!cache) { + g_Dbs.db[i].dbCfg.cache = -1; + } else { + errorPrint("%s", "failed to read json, cache not found\n"); + goto PARSE_OVER; + } + + cJSON* blocks= cJSON_GetObjectItem(dbinfo, "blocks"); + if (blocks && blocks->type == cJSON_Number) { + g_Dbs.db[i].dbCfg.blocks = blocks->valueint; + } else if (!blocks) { + g_Dbs.db[i].dbCfg.blocks = -1; + } else { + errorPrint("%s", "failed to read json, block not found\n"); + goto PARSE_OVER; + } + + //cJSON* maxtablesPerVnode= cJSON_GetObjectItem(dbinfo, "maxtablesPerVnode"); + //if (maxtablesPerVnode && maxtablesPerVnode->type == cJSON_Number) { + // g_Dbs.db[i].dbCfg.maxtablesPerVnode = maxtablesPerVnode->valueint; + //} else if (!maxtablesPerVnode) { + // g_Dbs.db[i].dbCfg.maxtablesPerVnode = TSDB_DEFAULT_TABLES; + //} else { + // printf("failed to read json, maxtablesPerVnode not found"); + // goto PARSE_OVER; + //} + + cJSON* minRows= cJSON_GetObjectItem(dbinfo, "minRows"); + if (minRows && minRows->type == cJSON_Number) { + g_Dbs.db[i].dbCfg.minRows = minRows->valueint; + } else if (!minRows) { + g_Dbs.db[i].dbCfg.minRows = 0; // 0 means default + } else { + errorPrint("%s", "failed to read json, minRows not found\n"); + goto PARSE_OVER; + } + + cJSON* maxRows= cJSON_GetObjectItem(dbinfo, "maxRows"); + if (maxRows && maxRows->type == cJSON_Number) { + g_Dbs.db[i].dbCfg.maxRows = maxRows->valueint; + } else if (!maxRows) { + g_Dbs.db[i].dbCfg.maxRows = 0; // 0 means default + } else { + errorPrint("%s", "failed to read json, maxRows not found\n"); + goto PARSE_OVER; + } + + cJSON* comp= cJSON_GetObjectItem(dbinfo, "comp"); + if (comp && comp->type == cJSON_Number) { + g_Dbs.db[i].dbCfg.comp = comp->valueint; + } else if (!comp) { + g_Dbs.db[i].dbCfg.comp = -1; + } else { + errorPrint("%s", "failed to read json, comp not found\n"); + goto PARSE_OVER; + } + + cJSON* walLevel= cJSON_GetObjectItem(dbinfo, "walLevel"); + if (walLevel && walLevel->type == cJSON_Number) { + g_Dbs.db[i].dbCfg.walLevel = walLevel->valueint; + } else if (!walLevel) { + g_Dbs.db[i].dbCfg.walLevel = -1; + } else { + errorPrint("%s", "failed to read json, walLevel not found\n"); + goto PARSE_OVER; + } + + cJSON* cacheLast= cJSON_GetObjectItem(dbinfo, "cachelast"); + if (cacheLast && cacheLast->type == cJSON_Number) { + g_Dbs.db[i].dbCfg.cacheLast = cacheLast->valueint; + } else if (!cacheLast) { + g_Dbs.db[i].dbCfg.cacheLast = -1; + } else { + errorPrint("%s", "failed to read json, cacheLast not found\n"); + goto PARSE_OVER; + } + + cJSON* quorum= cJSON_GetObjectItem(dbinfo, "quorum"); + if (quorum && quorum->type == cJSON_Number) { + g_Dbs.db[i].dbCfg.quorum = quorum->valueint; + } else if (!quorum) { + g_Dbs.db[i].dbCfg.quorum = 1; + } else { + printf("failed to read json, quorum input mistake"); + goto PARSE_OVER; + } + + cJSON* fsync= cJSON_GetObjectItem(dbinfo, "fsync"); + if (fsync && fsync->type == cJSON_Number) { + g_Dbs.db[i].dbCfg.fsync = fsync->valueint; + } else if (!fsync) { + g_Dbs.db[i].dbCfg.fsync = -1; + } else { + errorPrint("%s", "failed to read json, fsync input mistake\n"); + goto PARSE_OVER; + } + + // super_tables + cJSON *stables = cJSON_GetObjectItem(dbinfos, "super_tables"); + if (!stables || stables->type != cJSON_Array) { + errorPrint("%s", "failed to read json, super_tables not found\n"); + goto PARSE_OVER; + } + + int stbSize = cJSON_GetArraySize(stables); + if (stbSize > MAX_SUPER_TABLE_COUNT) { + errorPrint( + "failed to read json, supertable size overflow, max supertable is %d\n", + MAX_SUPER_TABLE_COUNT); + goto PARSE_OVER; + } + g_Dbs.db[i].superTbls = calloc(1, stbSize * sizeof(SSuperTable)); + assert(g_Dbs.db[i].superTbls); + g_Dbs.db[i].superTblCount = stbSize; + for (int j = 0; j < stbSize; ++j) { + cJSON* stbInfo = cJSON_GetArrayItem(stables, j); + if (stbInfo == NULL) continue; + + // dbinfo + cJSON *stbName = cJSON_GetObjectItem(stbInfo, "name"); + if (!stbName || stbName->type != cJSON_String + || stbName->valuestring == NULL) { + errorPrint("%s", "failed to read json, stb name not found\n"); + goto PARSE_OVER; + } + tstrncpy(g_Dbs.db[i].superTbls[j].stbName, stbName->valuestring, + TSDB_TABLE_NAME_LEN); + + cJSON *prefix = cJSON_GetObjectItem(stbInfo, "childtable_prefix"); + if (!prefix || prefix->type != cJSON_String || prefix->valuestring == NULL) { + errorPrint("%s", "failed to read json, childtable_prefix not found\n"); + goto PARSE_OVER; + } + tstrncpy(g_Dbs.db[i].superTbls[j].childTblPrefix, prefix->valuestring, + TBNAME_PREFIX_LEN); + + cJSON *autoCreateTbl = cJSON_GetObjectItem(stbInfo, "auto_create_table"); + if (autoCreateTbl + && autoCreateTbl->type == cJSON_String + && autoCreateTbl->valuestring != NULL) { + if ((0 == strncasecmp(autoCreateTbl->valuestring, "yes", 3)) + && (TBL_ALREADY_EXISTS != g_Dbs.db[i].superTbls[j].childTblExists)) { + g_Dbs.db[i].superTbls[j].autoCreateTable = AUTO_CREATE_SUBTBL; + } else if (0 == strncasecmp(autoCreateTbl->valuestring, "no", 2)) { + g_Dbs.db[i].superTbls[j].autoCreateTable = PRE_CREATE_SUBTBL; + } else { + g_Dbs.db[i].superTbls[j].autoCreateTable = PRE_CREATE_SUBTBL; + } + } else if (!autoCreateTbl) { + g_Dbs.db[i].superTbls[j].autoCreateTable = PRE_CREATE_SUBTBL; + } else { + errorPrint("%s", "failed to read json, auto_create_table not found\n"); + goto PARSE_OVER; + } + + cJSON* batchCreateTbl = cJSON_GetObjectItem(stbInfo, "batch_create_tbl_num"); + if (batchCreateTbl && batchCreateTbl->type == cJSON_Number) { + g_Dbs.db[i].superTbls[j].batchCreateTableNum = batchCreateTbl->valueint; + } else if (!batchCreateTbl) { + g_Dbs.db[i].superTbls[j].batchCreateTableNum = 10; + } else { + errorPrint("%s", "failed to read json, batch_create_tbl_num not found\n"); + goto PARSE_OVER; + } + + cJSON *childTblExists = cJSON_GetObjectItem(stbInfo, "child_table_exists"); // yes, no + if (childTblExists + && childTblExists->type == cJSON_String + && childTblExists->valuestring != NULL) { + if ((0 == strncasecmp(childTblExists->valuestring, "yes", 3)) + && (g_Dbs.db[i].drop == false)) { + g_Dbs.db[i].superTbls[j].childTblExists = TBL_ALREADY_EXISTS; + } else if ((0 == strncasecmp(childTblExists->valuestring, "no", 2) + || (g_Dbs.db[i].drop == true))) { + g_Dbs.db[i].superTbls[j].childTblExists = TBL_NO_EXISTS; + } else { + g_Dbs.db[i].superTbls[j].childTblExists = TBL_NO_EXISTS; + } + } else if (!childTblExists) { + g_Dbs.db[i].superTbls[j].childTblExists = TBL_NO_EXISTS; + } else { + errorPrint("%s", + "failed to read json, child_table_exists not found\n"); + goto PARSE_OVER; + } + + if (TBL_ALREADY_EXISTS == g_Dbs.db[i].superTbls[j].childTblExists) { + g_Dbs.db[i].superTbls[j].autoCreateTable = PRE_CREATE_SUBTBL; + } + + cJSON* count = cJSON_GetObjectItem(stbInfo, "childtable_count"); + if (!count || count->type != cJSON_Number || 0 >= count->valueint) { + errorPrint("%s", + "failed to read json, childtable_count input mistake\n"); + goto PARSE_OVER; + } + g_Dbs.db[i].superTbls[j].childTblCount = count->valueint; + g_totalChildTables += g_Dbs.db[i].superTbls[j].childTblCount; + + cJSON *dataSource = cJSON_GetObjectItem(stbInfo, "data_source"); + if (dataSource && dataSource->type == cJSON_String + && dataSource->valuestring != NULL) { + tstrncpy(g_Dbs.db[i].superTbls[j].dataSource, + dataSource->valuestring, + min(SMALL_BUFF_LEN, strlen(dataSource->valuestring) + 1)); + } else if (!dataSource) { + tstrncpy(g_Dbs.db[i].superTbls[j].dataSource, "rand", + min(SMALL_BUFF_LEN, strlen("rand") + 1)); + } else { + errorPrint("%s", "failed to read json, data_source not found\n"); + goto PARSE_OVER; + } + + cJSON *stbIface = cJSON_GetObjectItem(stbInfo, "insert_mode"); // taosc , rest, stmt + if (stbIface && stbIface->type == cJSON_String + && stbIface->valuestring != NULL) { + if (0 == strcasecmp(stbIface->valuestring, "taosc")) { + g_Dbs.db[i].superTbls[j].iface= TAOSC_IFACE; + } else if (0 == strcasecmp(stbIface->valuestring, "rest")) { + g_Dbs.db[i].superTbls[j].iface= REST_IFACE; + } else if (0 == strcasecmp(stbIface->valuestring, "stmt")) { + g_Dbs.db[i].superTbls[j].iface= STMT_IFACE; + } else { + errorPrint("failed to read json, insert_mode %s not recognized\n", + stbIface->valuestring); + goto PARSE_OVER; + } + } else if (!stbIface) { + g_Dbs.db[i].superTbls[j].iface = TAOSC_IFACE; + } else { + errorPrint("%s", "failed to read json, insert_mode not found\n"); + goto PARSE_OVER; + } + + cJSON* childTbl_limit = cJSON_GetObjectItem(stbInfo, "childtable_limit"); + if ((childTbl_limit) && (g_Dbs.db[i].drop != true) + && (g_Dbs.db[i].superTbls[j].childTblExists == TBL_ALREADY_EXISTS)) { + if (childTbl_limit->type != cJSON_Number) { + errorPrint("%s", "failed to read json, childtable_limit\n"); + goto PARSE_OVER; + } + g_Dbs.db[i].superTbls[j].childTblLimit = childTbl_limit->valueint; + } else { + g_Dbs.db[i].superTbls[j].childTblLimit = -1; // select ... limit -1 means all query result, drop = yes mean all table need recreate, limit value is invalid. + } + + cJSON* childTbl_offset = cJSON_GetObjectItem(stbInfo, "childtable_offset"); + if ((childTbl_offset) && (g_Dbs.db[i].drop != true) + && (g_Dbs.db[i].superTbls[j].childTblExists == TBL_ALREADY_EXISTS)) { + if ((childTbl_offset->type != cJSON_Number) + || (0 > childTbl_offset->valueint)) { + errorPrint("%s", "failed to read json, childtable_offset\n"); + goto PARSE_OVER; + } + g_Dbs.db[i].superTbls[j].childTblOffset = childTbl_offset->valueint; + } else { + g_Dbs.db[i].superTbls[j].childTblOffset = 0; + } + + cJSON *ts = cJSON_GetObjectItem(stbInfo, "start_timestamp"); + if (ts && ts->type == cJSON_String && ts->valuestring != NULL) { + tstrncpy(g_Dbs.db[i].superTbls[j].startTimestamp, + ts->valuestring, TSDB_DB_NAME_LEN); + } else if (!ts) { + tstrncpy(g_Dbs.db[i].superTbls[j].startTimestamp, + "now", TSDB_DB_NAME_LEN); + } else { + errorPrint("%s", "failed to read json, start_timestamp not found\n"); + goto PARSE_OVER; + } + + cJSON* timestampStep = cJSON_GetObjectItem(stbInfo, "timestamp_step"); + if (timestampStep && timestampStep->type == cJSON_Number) { + g_Dbs.db[i].superTbls[j].timeStampStep = timestampStep->valueint; + } else if (!timestampStep) { + g_Dbs.db[i].superTbls[j].timeStampStep = g_args.timestamp_step; + } else { + errorPrint("%s", "failed to read json, timestamp_step not found\n"); + goto PARSE_OVER; + } + + cJSON *sampleFormat = cJSON_GetObjectItem(stbInfo, "sample_format"); + if (sampleFormat && sampleFormat->type + == cJSON_String && sampleFormat->valuestring != NULL) { + tstrncpy(g_Dbs.db[i].superTbls[j].sampleFormat, + sampleFormat->valuestring, + min(SMALL_BUFF_LEN, + strlen(sampleFormat->valuestring) + 1)); + } else if (!sampleFormat) { + tstrncpy(g_Dbs.db[i].superTbls[j].sampleFormat, "csv", + SMALL_BUFF_LEN); + } else { + errorPrint("%s", "failed to read json, sample_format not found\n"); + goto PARSE_OVER; + } + + cJSON *sampleFile = cJSON_GetObjectItem(stbInfo, "sample_file"); + if (sampleFile && sampleFile->type == cJSON_String + && sampleFile->valuestring != NULL) { + tstrncpy(g_Dbs.db[i].superTbls[j].sampleFile, + sampleFile->valuestring, + min(MAX_FILE_NAME_LEN, + strlen(sampleFile->valuestring) + 1)); + } else if (!sampleFile) { + memset(g_Dbs.db[i].superTbls[j].sampleFile, 0, + MAX_FILE_NAME_LEN); + } else { + errorPrint("%s", "failed to read json, sample_file not found\n"); + goto PARSE_OVER; + } + + cJSON *useSampleTs = cJSON_GetObjectItem(stbInfo, "use_sample_ts"); + if (useSampleTs && useSampleTs->type == cJSON_String + && useSampleTs->valuestring != NULL) { + if (0 == strncasecmp(useSampleTs->valuestring, "yes", 3)) { + g_Dbs.db[i].superTbls[j].useSampleTs = true; + } else if (0 == strncasecmp(useSampleTs->valuestring, "no", 2)){ + g_Dbs.db[i].superTbls[j].useSampleTs = false; + } else { + g_Dbs.db[i].superTbls[j].useSampleTs = false; + } + } else if (!useSampleTs) { + g_Dbs.db[i].superTbls[j].useSampleTs = false; + } else { + errorPrint("%s", "failed to read json, use_sample_ts not found\n"); + goto PARSE_OVER; + } + + cJSON *tagsFile = cJSON_GetObjectItem(stbInfo, "tags_file"); + if ((tagsFile && tagsFile->type == cJSON_String) + && (tagsFile->valuestring != NULL)) { + tstrncpy(g_Dbs.db[i].superTbls[j].tagsFile, + tagsFile->valuestring, MAX_FILE_NAME_LEN); + if (0 == g_Dbs.db[i].superTbls[j].tagsFile[0]) { + g_Dbs.db[i].superTbls[j].tagSource = 0; + } else { + g_Dbs.db[i].superTbls[j].tagSource = 1; + } + } else if (!tagsFile) { + memset(g_Dbs.db[i].superTbls[j].tagsFile, 0, MAX_FILE_NAME_LEN); + g_Dbs.db[i].superTbls[j].tagSource = 0; + } else { + errorPrint("%s", "failed to read json, tags_file not found\n"); + goto PARSE_OVER; + } + + cJSON* stbMaxSqlLen = cJSON_GetObjectItem(stbInfo, "max_sql_len"); + if (stbMaxSqlLen && stbMaxSqlLen->type == cJSON_Number) { + int32_t len = stbMaxSqlLen->valueint; + if (len > TSDB_MAX_ALLOWED_SQL_LEN) { + len = TSDB_MAX_ALLOWED_SQL_LEN; + } else if (len < 5) { + len = 5; + } + g_Dbs.db[i].superTbls[j].maxSqlLen = len; + } else if (!maxSqlLen) { + g_Dbs.db[i].superTbls[j].maxSqlLen = g_args.max_sql_len; + } else { + errorPrint("%s", "failed to read json, stbMaxSqlLen input mistake\n"); + goto PARSE_OVER; + } + /* + cJSON *multiThreadWriteOneTbl = + cJSON_GetObjectItem(stbInfo, "multi_thread_write_one_tbl"); // no , yes + if (multiThreadWriteOneTbl + && multiThreadWriteOneTbl->type == cJSON_String + && multiThreadWriteOneTbl->valuestring != NULL) { + if (0 == strncasecmp(multiThreadWriteOneTbl->valuestring, "yes", 3)) { + g_Dbs.db[i].superTbls[j].multiThreadWriteOneTbl = 1; + } else { + g_Dbs.db[i].superTbls[j].multiThreadWriteOneTbl = 0; + } + } else if (!multiThreadWriteOneTbl) { + g_Dbs.db[i].superTbls[j].multiThreadWriteOneTbl = 0; + } else { + errorPrint("%s", "failed to read json, multiThreadWriteOneTbl not found\n"); + goto PARSE_OVER; + } + */ + cJSON* insertRows = cJSON_GetObjectItem(stbInfo, "insert_rows"); + if (insertRows && insertRows->type == cJSON_Number) { + if (insertRows->valueint < 0) { + errorPrint("%s", "failed to read json, insert_rows input mistake\n"); + goto PARSE_OVER; + } + g_Dbs.db[i].superTbls[j].insertRows = insertRows->valueint; + } else if (!insertRows) { + g_Dbs.db[i].superTbls[j].insertRows = 0x7FFFFFFFFFFFFFFF; + } else { + errorPrint("%s", "failed to read json, insert_rows input mistake\n"); + goto PARSE_OVER; + } + + cJSON* stbInterlaceRows = cJSON_GetObjectItem(stbInfo, "interlace_rows"); + if (stbInterlaceRows && stbInterlaceRows->type == cJSON_Number) { + if (stbInterlaceRows->valueint < 0) { + errorPrint("%s", "failed to read json, interlace rows input mistake\n"); + goto PARSE_OVER; + } + g_Dbs.db[i].superTbls[j].interlaceRows = stbInterlaceRows->valueint; + + if (g_Dbs.db[i].superTbls[j].interlaceRows > g_Dbs.db[i].superTbls[j].insertRows) { + printf("NOTICE: db[%d].superTbl[%d]'s interlace rows value %u > insert_rows %"PRId64"\n\n", + i, j, g_Dbs.db[i].superTbls[j].interlaceRows, + g_Dbs.db[i].superTbls[j].insertRows); + printf(" interlace rows value will be set to insert_rows %"PRId64"\n\n", + g_Dbs.db[i].superTbls[j].insertRows); + prompt(); + g_Dbs.db[i].superTbls[j].interlaceRows = g_Dbs.db[i].superTbls[j].insertRows; + } + } else if (!stbInterlaceRows) { + g_Dbs.db[i].superTbls[j].interlaceRows = g_args.interlaceRows; // 0 means progressive mode, > 0 mean interlace mode. max value is less or equ num_of_records_per_req + } else { + errorPrint( + "%s", "failed to read json, interlace rows input mistake\n"); + goto PARSE_OVER; + } + + cJSON* disorderRatio = cJSON_GetObjectItem(stbInfo, "disorder_ratio"); + if (disorderRatio && disorderRatio->type == cJSON_Number) { + if (disorderRatio->valueint > 50) + disorderRatio->valueint = 50; + + if (disorderRatio->valueint < 0) + disorderRatio->valueint = 0; + + g_Dbs.db[i].superTbls[j].disorderRatio = disorderRatio->valueint; + } else if (!disorderRatio) { + g_Dbs.db[i].superTbls[j].disorderRatio = 0; + } else { + errorPrint("%s", "failed to read json, disorderRatio not found\n"); + goto PARSE_OVER; + } + + cJSON* disorderRange = cJSON_GetObjectItem(stbInfo, "disorder_range"); + if (disorderRange && disorderRange->type == cJSON_Number) { + g_Dbs.db[i].superTbls[j].disorderRange = disorderRange->valueint; + } else if (!disorderRange) { + g_Dbs.db[i].superTbls[j].disorderRange = 1000; + } else { + errorPrint("%s", "failed to read json, disorderRange not found\n"); + goto PARSE_OVER; + } + + cJSON* insertInterval = cJSON_GetObjectItem(stbInfo, "insert_interval"); + if (insertInterval && insertInterval->type == cJSON_Number) { + g_Dbs.db[i].superTbls[j].insertInterval = insertInterval->valueint; + if (insertInterval->valueint < 0) { + errorPrint("%s", "failed to read json, insert_interval input mistake\n"); + goto PARSE_OVER; + } + } else if (!insertInterval) { + verbosePrint("%s() LN%d: stable insert interval be overrode by global %"PRIu64".\n", + __func__, __LINE__, g_args.insert_interval); + g_Dbs.db[i].superTbls[j].insertInterval = g_args.insert_interval; + } else { + errorPrint("%s", "failed to read json, insert_interval input mistake\n"); + goto PARSE_OVER; + } + + int retVal = getColumnAndTagTypeFromInsertJsonFile( + stbInfo, &g_Dbs.db[i].superTbls[j]); + if (false == retVal) { + goto PARSE_OVER; + } + } + } + + ret = true; + +PARSE_OVER: + return ret; +} + +static bool getMetaFromQueryJsonFile(cJSON* root) { + bool ret = false; + + cJSON* cfgdir = cJSON_GetObjectItem(root, "cfgdir"); + if (cfgdir && cfgdir->type == cJSON_String && cfgdir->valuestring != NULL) { + tstrncpy(g_queryInfo.cfgDir, cfgdir->valuestring, MAX_FILE_NAME_LEN); + } + + cJSON* host = cJSON_GetObjectItem(root, "host"); + if (host && host->type == cJSON_String && host->valuestring != NULL) { + tstrncpy(g_queryInfo.host, host->valuestring, MAX_HOSTNAME_SIZE); + } else if (!host) { + tstrncpy(g_queryInfo.host, "127.0.0.1", MAX_HOSTNAME_SIZE); + } else { + errorPrint("%s", "failed to read json, host not found\n"); + goto PARSE_OVER; + } + + cJSON* port = cJSON_GetObjectItem(root, "port"); + if (port && port->type == cJSON_Number) { + g_queryInfo.port = port->valueint; + } else if (!port) { + g_queryInfo.port = 6030; + } + + cJSON* user = cJSON_GetObjectItem(root, "user"); + if (user && user->type == cJSON_String && user->valuestring != NULL) { + tstrncpy(g_queryInfo.user, user->valuestring, MAX_USERNAME_SIZE); + } else if (!user) { + tstrncpy(g_queryInfo.user, "root", MAX_USERNAME_SIZE); ; + } + + cJSON* password = cJSON_GetObjectItem(root, "password"); + if (password && password->type == cJSON_String && password->valuestring != NULL) { + tstrncpy(g_queryInfo.password, password->valuestring, SHELL_MAX_PASSWORD_LEN); + } else if (!password) { + tstrncpy(g_queryInfo.password, "taosdata", SHELL_MAX_PASSWORD_LEN);; + } + + cJSON *answerPrompt = cJSON_GetObjectItem(root, "confirm_parameter_prompt"); // yes, no, + if (answerPrompt && answerPrompt->type == cJSON_String + && answerPrompt->valuestring != NULL) { + if (0 == strncasecmp(answerPrompt->valuestring, "yes", 3)) { + g_args.answer_yes = false; + } else if (0 == strncasecmp(answerPrompt->valuestring, "no", 2)) { + g_args.answer_yes = true; + } else { + g_args.answer_yes = false; + } + } else if (!answerPrompt) { + g_args.answer_yes = false; + } else { + errorPrint("%s", "failed to read json, confirm_parameter_prompt not found\n"); + goto PARSE_OVER; + } + + cJSON* gQueryTimes = cJSON_GetObjectItem(root, "query_times"); + if (gQueryTimes && gQueryTimes->type == cJSON_Number) { + if (gQueryTimes->valueint <= 0) { + errorPrint("%s()", "failed to read json, query_times input mistake\n"); + goto PARSE_OVER; + } + g_args.query_times = gQueryTimes->valueint; + } else if (!gQueryTimes) { + g_args.query_times = 1; + } else { + errorPrint("%s", "failed to read json, query_times input mistake\n"); + goto PARSE_OVER; + } + + cJSON* dbs = cJSON_GetObjectItem(root, "databases"); + if (dbs && dbs->type == cJSON_String && dbs->valuestring != NULL) { + tstrncpy(g_queryInfo.dbName, dbs->valuestring, TSDB_DB_NAME_LEN); + } else if (!dbs) { + errorPrint("%s", "failed to read json, databases not found\n"); + goto PARSE_OVER; + } + + cJSON* queryMode = cJSON_GetObjectItem(root, "query_mode"); + if (queryMode + && queryMode->type == cJSON_String + && queryMode->valuestring != NULL) { + tstrncpy(g_queryInfo.queryMode, queryMode->valuestring, + min(SMALL_BUFF_LEN, strlen(queryMode->valuestring) + 1)); + } else if (!queryMode) { + tstrncpy(g_queryInfo.queryMode, "taosc", + min(SMALL_BUFF_LEN, strlen("taosc") + 1)); + } else { + errorPrint("%s", "failed to read json, query_mode not found\n"); + goto PARSE_OVER; + } + + // specified_table_query + cJSON *specifiedQuery = cJSON_GetObjectItem(root, "specified_table_query"); + if (!specifiedQuery) { + g_queryInfo.specifiedQueryInfo.concurrent = 1; + g_queryInfo.specifiedQueryInfo.sqlCount = 0; + } else if (specifiedQuery->type != cJSON_Object) { + errorPrint("%s", "failed to read json, super_table_query not found\n"); + goto PARSE_OVER; + } else { + cJSON* queryInterval = cJSON_GetObjectItem(specifiedQuery, "query_interval"); + if (queryInterval && queryInterval->type == cJSON_Number) { + g_queryInfo.specifiedQueryInfo.queryInterval = queryInterval->valueint; + } else if (!queryInterval) { + g_queryInfo.specifiedQueryInfo.queryInterval = 0; + } + + cJSON* specifiedQueryTimes = cJSON_GetObjectItem(specifiedQuery, + "query_times"); + if (specifiedQueryTimes && specifiedQueryTimes->type == cJSON_Number) { + if (specifiedQueryTimes->valueint <= 0) { + errorPrint( + "failed to read json, query_times: %"PRId64", need be a valid (>0) number\n", + specifiedQueryTimes->valueint); + goto PARSE_OVER; + + } + g_queryInfo.specifiedQueryInfo.queryTimes = specifiedQueryTimes->valueint; + } else if (!specifiedQueryTimes) { + g_queryInfo.specifiedQueryInfo.queryTimes = g_args.query_times; + } else { + errorPrint("%s() LN%d, failed to read json, query_times input mistake\n", + __func__, __LINE__); + goto PARSE_OVER; + } + + cJSON* concurrent = cJSON_GetObjectItem(specifiedQuery, "concurrent"); + if (concurrent && concurrent->type == cJSON_Number) { + if (concurrent->valueint <= 0) { + errorPrint( + "query sqlCount %d or concurrent %d is not correct.\n", + g_queryInfo.specifiedQueryInfo.sqlCount, + g_queryInfo.specifiedQueryInfo.concurrent); + goto PARSE_OVER; + } + g_queryInfo.specifiedQueryInfo.concurrent = concurrent->valueint; + } else if (!concurrent) { + g_queryInfo.specifiedQueryInfo.concurrent = 1; + } + + cJSON* specifiedAsyncMode = cJSON_GetObjectItem(specifiedQuery, "mode"); + if (specifiedAsyncMode && specifiedAsyncMode->type == cJSON_String + && specifiedAsyncMode->valuestring != NULL) { + if (0 == strcmp("sync", specifiedAsyncMode->valuestring)) { + g_queryInfo.specifiedQueryInfo.asyncMode = SYNC_MODE; + } else if (0 == strcmp("async", specifiedAsyncMode->valuestring)) { + g_queryInfo.specifiedQueryInfo.asyncMode = ASYNC_MODE; + } else { + errorPrint("%s", "failed to read json, async mode input error\n"); + goto PARSE_OVER; + } + } else { + g_queryInfo.specifiedQueryInfo.asyncMode = SYNC_MODE; + } + + cJSON* interval = cJSON_GetObjectItem(specifiedQuery, "interval"); + if (interval && interval->type == cJSON_Number) { + g_queryInfo.specifiedQueryInfo.subscribeInterval = interval->valueint; + } else if (!interval) { + //printf("failed to read json, subscribe interval no found\n"); + //goto PARSE_OVER; + g_queryInfo.specifiedQueryInfo.subscribeInterval = 10000; + } + + cJSON* restart = cJSON_GetObjectItem(specifiedQuery, "restart"); + if (restart && restart->type == cJSON_String && restart->valuestring != NULL) { + if (0 == strcmp("yes", restart->valuestring)) { + g_queryInfo.specifiedQueryInfo.subscribeRestart = true; + } else if (0 == strcmp("no", restart->valuestring)) { + g_queryInfo.specifiedQueryInfo.subscribeRestart = false; + } else { + errorPrint("%s", "failed to read json, subscribe restart error\n"); + goto PARSE_OVER; + } + } else { + g_queryInfo.specifiedQueryInfo.subscribeRestart = true; + } + + cJSON* keepProgress = cJSON_GetObjectItem(specifiedQuery, "keepProgress"); + if (keepProgress + && keepProgress->type == cJSON_String + && keepProgress->valuestring != NULL) { + if (0 == strcmp("yes", keepProgress->valuestring)) { + g_queryInfo.specifiedQueryInfo.subscribeKeepProgress = 1; + } else if (0 == strcmp("no", keepProgress->valuestring)) { + g_queryInfo.specifiedQueryInfo.subscribeKeepProgress = 0; + } else { + errorPrint("%s", "failed to read json, subscribe keepProgress error\n"); + goto PARSE_OVER; + } + } else { + g_queryInfo.specifiedQueryInfo.subscribeKeepProgress = 0; + } + + // sqls + cJSON* specifiedSqls = cJSON_GetObjectItem(specifiedQuery, "sqls"); + if (!specifiedSqls) { + g_queryInfo.specifiedQueryInfo.sqlCount = 0; + } else if (specifiedSqls->type != cJSON_Array) { + errorPrint("%s", "failed to read json, super sqls not found\n"); + goto PARSE_OVER; + } else { + int superSqlSize = cJSON_GetArraySize(specifiedSqls); + if (superSqlSize * g_queryInfo.specifiedQueryInfo.concurrent + > MAX_QUERY_SQL_COUNT) { + errorPrint("failed to read json, query sql(%d) * concurrent(%d) overflow, max is %d\n", + superSqlSize, + g_queryInfo.specifiedQueryInfo.concurrent, + MAX_QUERY_SQL_COUNT); + goto PARSE_OVER; + } + + g_queryInfo.specifiedQueryInfo.sqlCount = superSqlSize; + for (int j = 0; j < superSqlSize; ++j) { + cJSON* sql = cJSON_GetArrayItem(specifiedSqls, j); + if (sql == NULL) continue; + + cJSON *sqlStr = cJSON_GetObjectItem(sql, "sql"); + if (!sqlStr || sqlStr->type != cJSON_String || sqlStr->valuestring == NULL) { + errorPrint("%s", "failed to read json, sql not found\n"); + goto PARSE_OVER; + } + tstrncpy(g_queryInfo.specifiedQueryInfo.sql[j], + sqlStr->valuestring, BUFFER_SIZE); + + // default value is -1, which mean infinite loop + g_queryInfo.specifiedQueryInfo.endAfterConsume[j] = -1; + cJSON* endAfterConsume = + cJSON_GetObjectItem(specifiedQuery, "endAfterConsume"); + if (endAfterConsume + && endAfterConsume->type == cJSON_Number) { + g_queryInfo.specifiedQueryInfo.endAfterConsume[j] + = endAfterConsume->valueint; + } + if (g_queryInfo.specifiedQueryInfo.endAfterConsume[j] < -1) + g_queryInfo.specifiedQueryInfo.endAfterConsume[j] = -1; + + g_queryInfo.specifiedQueryInfo.resubAfterConsume[j] = -1; + cJSON* resubAfterConsume = + cJSON_GetObjectItem(specifiedQuery, "resubAfterConsume"); + if ((resubAfterConsume) + && (resubAfterConsume->type == cJSON_Number) + && (resubAfterConsume->valueint >= 0)) { + g_queryInfo.specifiedQueryInfo.resubAfterConsume[j] + = resubAfterConsume->valueint; + } + + if (g_queryInfo.specifiedQueryInfo.resubAfterConsume[j] < -1) + g_queryInfo.specifiedQueryInfo.resubAfterConsume[j] = -1; + + cJSON *result = cJSON_GetObjectItem(sql, "result"); + if ((NULL != result) && (result->type == cJSON_String) + && (result->valuestring != NULL)) { + tstrncpy(g_queryInfo.specifiedQueryInfo.result[j], + result->valuestring, MAX_FILE_NAME_LEN); + } else if (NULL == result) { + memset(g_queryInfo.specifiedQueryInfo.result[j], + 0, MAX_FILE_NAME_LEN); + } else { + errorPrint("%s", + "failed to read json, super query result file not found\n"); + goto PARSE_OVER; + } + } + } + } + + // super_table_query + cJSON *superQuery = cJSON_GetObjectItem(root, "super_table_query"); + if (!superQuery) { + g_queryInfo.superQueryInfo.threadCnt = 1; + g_queryInfo.superQueryInfo.sqlCount = 0; + } else if (superQuery->type != cJSON_Object) { + errorPrint("%s", "failed to read json, sub_table_query not found\n"); + ret = true; + goto PARSE_OVER; + } else { + cJSON* subrate = cJSON_GetObjectItem(superQuery, "query_interval"); + if (subrate && subrate->type == cJSON_Number) { + g_queryInfo.superQueryInfo.queryInterval = subrate->valueint; + } else if (!subrate) { + g_queryInfo.superQueryInfo.queryInterval = 0; + } + + cJSON* superQueryTimes = cJSON_GetObjectItem(superQuery, "query_times"); + if (superQueryTimes && superQueryTimes->type == cJSON_Number) { + if (superQueryTimes->valueint <= 0) { + errorPrint("failed to read json, query_times: %"PRId64", need be a valid (>0) number\n", + superQueryTimes->valueint); + goto PARSE_OVER; + } + g_queryInfo.superQueryInfo.queryTimes = superQueryTimes->valueint; + } else if (!superQueryTimes) { + g_queryInfo.superQueryInfo.queryTimes = g_args.query_times; + } else { + errorPrint("%s", "failed to read json, query_times input mistake\n"); + goto PARSE_OVER; + } + + cJSON* threads = cJSON_GetObjectItem(superQuery, "threads"); + if (threads && threads->type == cJSON_Number) { + if (threads->valueint <= 0) { + errorPrint("%s", "failed to read json, threads input mistake\n"); + goto PARSE_OVER; + + } + g_queryInfo.superQueryInfo.threadCnt = threads->valueint; + } else if (!threads) { + g_queryInfo.superQueryInfo.threadCnt = 1; + } + + //cJSON* subTblCnt = cJSON_GetObjectItem(superQuery, "childtable_count"); + //if (subTblCnt && subTblCnt->type == cJSON_Number) { + // g_queryInfo.superQueryInfo.childTblCount = subTblCnt->valueint; + //} else if (!subTblCnt) { + // g_queryInfo.superQueryInfo.childTblCount = 0; + //} + + cJSON* stblname = cJSON_GetObjectItem(superQuery, "stblname"); + if (stblname && stblname->type == cJSON_String + && stblname->valuestring != NULL) { + tstrncpy(g_queryInfo.superQueryInfo.stbName, stblname->valuestring, + TSDB_TABLE_NAME_LEN); + } else { + errorPrint("%s", "failed to read json, super table name input error\n"); + goto PARSE_OVER; + } + + cJSON* superAsyncMode = cJSON_GetObjectItem(superQuery, "mode"); + if (superAsyncMode && superAsyncMode->type == cJSON_String + && superAsyncMode->valuestring != NULL) { + if (0 == strcmp("sync", superAsyncMode->valuestring)) { + g_queryInfo.superQueryInfo.asyncMode = SYNC_MODE; + } else if (0 == strcmp("async", superAsyncMode->valuestring)) { + g_queryInfo.superQueryInfo.asyncMode = ASYNC_MODE; + } else { + errorPrint("%s", "failed to read json, async mode input error\n"); + goto PARSE_OVER; + } + } else { + g_queryInfo.superQueryInfo.asyncMode = SYNC_MODE; + } + + cJSON* superInterval = cJSON_GetObjectItem(superQuery, "interval"); + if (superInterval && superInterval->type == cJSON_Number) { + if (superInterval->valueint < 0) { + errorPrint("%s", "failed to read json, interval input mistake\n"); + goto PARSE_OVER; + } + g_queryInfo.superQueryInfo.subscribeInterval = superInterval->valueint; + } else if (!superInterval) { + //printf("failed to read json, subscribe interval no found\n"); + //goto PARSE_OVER; + g_queryInfo.superQueryInfo.subscribeInterval = 10000; + } + + cJSON* subrestart = cJSON_GetObjectItem(superQuery, "restart"); + if (subrestart && subrestart->type == cJSON_String + && subrestart->valuestring != NULL) { + if (0 == strcmp("yes", subrestart->valuestring)) { + g_queryInfo.superQueryInfo.subscribeRestart = true; + } else if (0 == strcmp("no", subrestart->valuestring)) { + g_queryInfo.superQueryInfo.subscribeRestart = false; + } else { + errorPrint("%s", "failed to read json, subscribe restart error\n"); + goto PARSE_OVER; + } + } else { + g_queryInfo.superQueryInfo.subscribeRestart = true; + } + + cJSON* superkeepProgress = cJSON_GetObjectItem(superQuery, "keepProgress"); + if (superkeepProgress && + superkeepProgress->type == cJSON_String + && superkeepProgress->valuestring != NULL) { + if (0 == strcmp("yes", superkeepProgress->valuestring)) { + g_queryInfo.superQueryInfo.subscribeKeepProgress = 1; + } else if (0 == strcmp("no", superkeepProgress->valuestring)) { + g_queryInfo.superQueryInfo.subscribeKeepProgress = 0; + } else { + errorPrint("%s", + "failed to read json, subscribe super table keepProgress error\n"); + goto PARSE_OVER; + } + } else { + g_queryInfo.superQueryInfo.subscribeKeepProgress = 0; + } + + // default value is -1, which mean do not resub + g_queryInfo.superQueryInfo.endAfterConsume = -1; + cJSON* superEndAfterConsume = + cJSON_GetObjectItem(superQuery, "endAfterConsume"); + if (superEndAfterConsume + && superEndAfterConsume->type == cJSON_Number) { + g_queryInfo.superQueryInfo.endAfterConsume = + superEndAfterConsume->valueint; + } + if (g_queryInfo.superQueryInfo.endAfterConsume < -1) + g_queryInfo.superQueryInfo.endAfterConsume = -1; + + // default value is -1, which mean do not resub + g_queryInfo.superQueryInfo.resubAfterConsume = -1; + cJSON* superResubAfterConsume = + cJSON_GetObjectItem(superQuery, "resubAfterConsume"); + if ((superResubAfterConsume) + && (superResubAfterConsume->type == cJSON_Number) + && (superResubAfterConsume->valueint >= 0)) { + g_queryInfo.superQueryInfo.resubAfterConsume = + superResubAfterConsume->valueint; + } + if (g_queryInfo.superQueryInfo.resubAfterConsume < -1) + g_queryInfo.superQueryInfo.resubAfterConsume = -1; + + // supert table sqls + cJSON* superSqls = cJSON_GetObjectItem(superQuery, "sqls"); + if (!superSqls) { + g_queryInfo.superQueryInfo.sqlCount = 0; + } else if (superSqls->type != cJSON_Array) { + errorPrint("%s", "failed to read json, super sqls not found\n"); + goto PARSE_OVER; + } else { + int superSqlSize = cJSON_GetArraySize(superSqls); + if (superSqlSize > MAX_QUERY_SQL_COUNT) { + errorPrint("failed to read json, query sql size overflow, max is %d\n", + MAX_QUERY_SQL_COUNT); + goto PARSE_OVER; + } + + g_queryInfo.superQueryInfo.sqlCount = superSqlSize; + for (int j = 0; j < superSqlSize; ++j) { + cJSON* sql = cJSON_GetArrayItem(superSqls, j); + if (sql == NULL) continue; + + cJSON *sqlStr = cJSON_GetObjectItem(sql, "sql"); + if (!sqlStr || sqlStr->type != cJSON_String + || sqlStr->valuestring == NULL) { + errorPrint("%s", "failed to read json, sql not found\n"); + goto PARSE_OVER; + } + tstrncpy(g_queryInfo.superQueryInfo.sql[j], sqlStr->valuestring, + BUFFER_SIZE); + + cJSON *result = cJSON_GetObjectItem(sql, "result"); + if (result != NULL && result->type == cJSON_String + && result->valuestring != NULL) { + tstrncpy(g_queryInfo.superQueryInfo.result[j], + result->valuestring, MAX_FILE_NAME_LEN); + } else if (NULL == result) { + memset(g_queryInfo.superQueryInfo.result[j], 0, MAX_FILE_NAME_LEN); + } else { + errorPrint("%s", "failed to read json, sub query result file not found\n"); + goto PARSE_OVER; + } + } + } + } + + ret = true; + +PARSE_OVER: + return ret; +} + +static bool getInfoFromJsonFile(char* file) { + debugPrint("%s %d %s\n", __func__, __LINE__, file); + + FILE *fp = fopen(file, "r"); + if (!fp) { + errorPrint("failed to read %s, reason:%s\n", file, strerror(errno)); + return false; + } + + bool ret = false; + int maxLen = 6400000; + char *content = calloc(1, maxLen + 1); + int len = fread(content, 1, maxLen, fp); + if (len <= 0) { + free(content); + fclose(fp); + errorPrint("failed to read %s, content is null", file); + return false; + } + + content[len] = 0; + cJSON* root = cJSON_Parse(content); + if (root == NULL) { + errorPrint("failed to cjson parse %s, invalid json format\n", file); + goto PARSE_OVER; + } + + cJSON* filetype = cJSON_GetObjectItem(root, "filetype"); + if (filetype && filetype->type == cJSON_String && filetype->valuestring != NULL) { + if (0 == strcasecmp("insert", filetype->valuestring)) { + g_args.test_mode = INSERT_TEST; + } else if (0 == strcasecmp("query", filetype->valuestring)) { + g_args.test_mode = QUERY_TEST; + } else if (0 == strcasecmp("subscribe", filetype->valuestring)) { + g_args.test_mode = SUBSCRIBE_TEST; + } else { + errorPrint("%s", "failed to read json, filetype not support\n"); + goto PARSE_OVER; + } + } else if (!filetype) { + g_args.test_mode = INSERT_TEST; + } else { + errorPrint("%s", "failed to read json, filetype not found\n"); + goto PARSE_OVER; + } + + if (INSERT_TEST == g_args.test_mode) { + memset(&g_Dbs, 0, sizeof(SDbs)); + g_Dbs.use_metric = g_args.use_metric; + ret = getMetaFromInsertJsonFile(root); + } else if ((QUERY_TEST == g_args.test_mode) + || (SUBSCRIBE_TEST == g_args.test_mode)) { + memset(&g_queryInfo, 0, sizeof(SQueryMetaInfo)); + ret = getMetaFromQueryJsonFile(root); + } else { + errorPrint("%s", + "input json file type error! please input correct file type: insert or query or subscribe\n"); + goto PARSE_OVER; + } + +PARSE_OVER: + free(content); + cJSON_Delete(root); + fclose(fp); + return ret; +} + +static int prepareSampleData() { + for (int i = 0; i < g_Dbs.dbCount; i++) { + for (int j = 0; j < g_Dbs.db[i].superTblCount; j++) { + if (g_Dbs.db[i].superTbls[j].tagsFile[0] != 0) { + if (readTagFromCsvFileToMem(&g_Dbs.db[i].superTbls[j]) != 0) { + return -1; + } + } + } + } + + return 0; +} + +static void postFreeResource() { + tmfclose(g_fpOfInsertResult); + + for (int i = 0; i < g_Dbs.dbCount; i++) { + for (uint64_t j = 0; j < g_Dbs.db[i].superTblCount; j++) { + if (0 != g_Dbs.db[i].superTbls[j].colsOfCreateChildTable) { + tmfree(g_Dbs.db[i].superTbls[j].colsOfCreateChildTable); + g_Dbs.db[i].superTbls[j].colsOfCreateChildTable = NULL; + } + if (0 != g_Dbs.db[i].superTbls[j].sampleDataBuf) { + tmfree(g_Dbs.db[i].superTbls[j].sampleDataBuf); + g_Dbs.db[i].superTbls[j].sampleDataBuf = NULL; + } + +#if STMT_BIND_PARAM_BATCH == 1 + for (int c = 0; + c < g_Dbs.db[i].superTbls[j].columnCount; c ++) { + + if (g_Dbs.db[i].superTbls[j].sampleBindBatchArray) { + + tmfree((char *)((uintptr_t)*(uintptr_t*)( + g_Dbs.db[i].superTbls[j].sampleBindBatchArray + + sizeof(char*) * c))); + } + } + tmfree(g_Dbs.db[i].superTbls[j].sampleBindBatchArray); +#endif + if (0 != g_Dbs.db[i].superTbls[j].tagDataBuf) { + tmfree(g_Dbs.db[i].superTbls[j].tagDataBuf); + g_Dbs.db[i].superTbls[j].tagDataBuf = NULL; + } + if (0 != g_Dbs.db[i].superTbls[j].childTblName) { + tmfree(g_Dbs.db[i].superTbls[j].childTblName); + g_Dbs.db[i].superTbls[j].childTblName = NULL; + } + } + tmfree(g_Dbs.db[i].superTbls); + } + tmfree(g_Dbs.db); + tmfree(g_randbool_buff); + tmfree(g_randint_buff); + tmfree(g_rand_voltage_buff); + tmfree(g_randbigint_buff); + tmfree(g_randsmallint_buff); + tmfree(g_randtinyint_buff); + tmfree(g_randfloat_buff); + tmfree(g_rand_current_buff); + tmfree(g_rand_phase_buff); + tmfree(g_randdouble_buff); + tmfree(g_randuint_buff); + tmfree(g_randutinyint_buff); + tmfree(g_randusmallint_buff); + tmfree(g_randubigint_buff); + tmfree(g_randint); + tmfree(g_randuint); + tmfree(g_randbigint); + tmfree(g_randubigint); + tmfree(g_randfloat); + tmfree(g_randdouble); + + tmfree(g_sampleDataBuf); + +#if STMT_BIND_PARAM_BATCH == 1 + for (int l = 0; + l < g_args.columnCount; l ++) { + if (g_sampleBindBatchArray) { + tmfree((char *)((uintptr_t)*(uintptr_t*)( + g_sampleBindBatchArray + + sizeof(char*) * l))); + } + } + tmfree(g_sampleBindBatchArray); + +#endif +} + +static int getRowDataFromSample( + char* dataBuf, int64_t maxLen, int64_t timestamp, + SSuperTable* stbInfo, int64_t* sampleUsePos) +{ + if ((*sampleUsePos) == MAX_SAMPLES) { + *sampleUsePos = 0; + } + + int dataLen = 0; + if(stbInfo->useSampleTs) { + dataLen += snprintf(dataBuf + dataLen, maxLen - dataLen, + "(%s", + stbInfo->sampleDataBuf + + stbInfo->lenOfOneRow * (*sampleUsePos)); + } else { + dataLen += snprintf(dataBuf + dataLen, maxLen - dataLen, + "(%" PRId64 ", ", timestamp); + dataLen += snprintf(dataBuf + dataLen, maxLen - dataLen, + "%s", + stbInfo->sampleDataBuf + + stbInfo->lenOfOneRow * (*sampleUsePos)); + } + + dataLen += snprintf(dataBuf + dataLen, maxLen - dataLen, ")"); + + (*sampleUsePos)++; + + return dataLen; +} + +static int64_t generateStbRowData( + SSuperTable* stbInfo, + char* recBuf, + int64_t remainderBufLen, + int64_t timestamp) +{ + int64_t dataLen = 0; + char *pstr = recBuf; + int64_t maxLen = MAX_DATA_SIZE; + int tmpLen; + + dataLen += snprintf(pstr + dataLen, maxLen - dataLen, + "(%" PRId64 "", timestamp); + + for (int i = 0; i < stbInfo->columnCount; i++) { + tstrncpy(pstr + dataLen, ",", 2); + dataLen += 1; + + if ((stbInfo->columns[i].data_type == TSDB_DATA_TYPE_BINARY) + || (stbInfo->columns[i].data_type == TSDB_DATA_TYPE_NCHAR)) { + if (stbInfo->columns[i].dataLen > TSDB_MAX_BINARY_LEN) { + errorPrint2("binary or nchar length overflow, max size:%u\n", + (uint32_t)TSDB_MAX_BINARY_LEN); + return -1; + } + + if ((stbInfo->columns[i].dataLen + 1) > + /* need count 3 extra chars \', \', and , */ + (remainderBufLen - dataLen - 3)) { + return 0; + } + char* buf = (char*)calloc(stbInfo->columns[i].dataLen+1, 1); + if (NULL == buf) { + errorPrint2("calloc failed! size:%d\n", stbInfo->columns[i].dataLen); + return -1; + } + rand_string(buf, stbInfo->columns[i].dataLen); + dataLen += snprintf(pstr + dataLen, maxLen - dataLen, "\'%s\'", buf); + tmfree(buf); + + } else { + char *tmp = NULL; + switch(stbInfo->columns[i].data_type) { + case TSDB_DATA_TYPE_INT: + if ((g_args.demo_mode) && (i == 1)) { + tmp = demo_voltage_int_str(); + } else { + tmp = rand_int_str(); + } + tmpLen = strlen(tmp); + tstrncpy(pstr + dataLen, tmp, min(tmpLen + 1, INT_BUFF_LEN)); + break; + + case TSDB_DATA_TYPE_UINT: + tmp = rand_uint_str(); + tmpLen = strlen(tmp); + tstrncpy(pstr + dataLen, tmp, min(tmpLen + 1, INT_BUFF_LEN)); + break; + + case TSDB_DATA_TYPE_BIGINT: + tmp = rand_bigint_str(); + tmpLen = strlen(tmp); + tstrncpy(pstr + dataLen, tmp, min(tmpLen + 1, BIGINT_BUFF_LEN)); + break; + + case TSDB_DATA_TYPE_UBIGINT: + tmp = rand_ubigint_str(); + tmpLen = strlen(tmp); + tstrncpy(pstr + dataLen, tmp, min(tmpLen + 1, BIGINT_BUFF_LEN)); + break; + + case TSDB_DATA_TYPE_FLOAT: + if (g_args.demo_mode) { + if (i == 0) { + tmp = demo_current_float_str(); + } else { + tmp = demo_phase_float_str(); + } + } else { + tmp = rand_float_str(); + } + tmpLen = strlen(tmp); + tstrncpy(pstr + dataLen, tmp, min(tmpLen + 1, FLOAT_BUFF_LEN)); + break; + + case TSDB_DATA_TYPE_DOUBLE: + tmp = rand_double_str(); + tmpLen = strlen(tmp); + tstrncpy(pstr + dataLen, tmp, min(tmpLen + 1, DOUBLE_BUFF_LEN)); + break; + + case TSDB_DATA_TYPE_SMALLINT: + tmp = rand_smallint_str(); + tmpLen = strlen(tmp); + tstrncpy(pstr + dataLen, tmp, min(tmpLen + 1, SMALLINT_BUFF_LEN)); + break; + + case TSDB_DATA_TYPE_USMALLINT: + tmp = rand_usmallint_str(); + tmpLen = strlen(tmp); + tstrncpy(pstr + dataLen, tmp, min(tmpLen + 1, SMALLINT_BUFF_LEN)); + break; + + case TSDB_DATA_TYPE_TINYINT: + tmp = rand_tinyint_str(); + tmpLen = strlen(tmp); + tstrncpy(pstr + dataLen, tmp, min(tmpLen + 1, TINYINT_BUFF_LEN)); + break; + + case TSDB_DATA_TYPE_UTINYINT: + tmp = rand_utinyint_str(); + tmpLen = strlen(tmp); + tstrncpy(pstr + dataLen, tmp, min(tmpLen + 1, TINYINT_BUFF_LEN)); + break; + + case TSDB_DATA_TYPE_BOOL: + tmp = rand_bool_str(); + tmpLen = strlen(tmp); + tstrncpy(pstr + dataLen, tmp, min(tmpLen + 1, BOOL_BUFF_LEN)); + break; + + case TSDB_DATA_TYPE_TIMESTAMP: + tmp = rand_bigint_str(); + tmpLen = strlen(tmp); + tstrncpy(pstr + dataLen, tmp, min(tmpLen + 1, BIGINT_BUFF_LEN)); + break; + + case TSDB_DATA_TYPE_NULL: + break; + + default: + errorPrint2("Not support data type: %s\n", + stbInfo->columns[i].dataType); + exit(EXIT_FAILURE); + } + if (tmp) { + dataLen += tmpLen; + } + } + + if (dataLen > (remainderBufLen - (128))) + return 0; + } + + dataLen += snprintf(pstr + dataLen, 2, ")"); + + verbosePrint("%s() LN%d, dataLen:%"PRId64"\n", __func__, __LINE__, dataLen); + verbosePrint("%s() LN%d, recBuf:\n\t%s\n", __func__, __LINE__, recBuf); + + return strlen(recBuf); +} + +static int64_t generateData(char *recBuf, char *data_type, + int64_t timestamp, int lenOfBinary) { + memset(recBuf, 0, MAX_DATA_SIZE); + char *pstr = recBuf; + pstr += sprintf(pstr, "(%"PRId64"", timestamp); + + int columnCount = g_args.columnCount; + + bool b; + char *s; + for (int i = 0; i < columnCount; i++) { + switch (data_type[i]) { + case TSDB_DATA_TYPE_TINYINT: + pstr += sprintf(pstr, ",%d", rand_tinyint() ); + break; + + case TSDB_DATA_TYPE_SMALLINT: + pstr += sprintf(pstr, ",%d", rand_smallint()); + break; + + case TSDB_DATA_TYPE_INT: + pstr += sprintf(pstr, ",%d", rand_int()); + break; + + case TSDB_DATA_TYPE_BIGINT: + pstr += sprintf(pstr, ",%"PRId64"", rand_bigint()); + break; + + case TSDB_DATA_TYPE_TIMESTAMP: + pstr += sprintf(pstr, ",%"PRId64"", rand_bigint()); + break; + + case TSDB_DATA_TYPE_FLOAT: + pstr += sprintf(pstr, ",%10.4f", rand_float()); + break; + + case TSDB_DATA_TYPE_DOUBLE: + pstr += sprintf(pstr, ",%20.8f", rand_double()); + break; + + case TSDB_DATA_TYPE_BOOL: + b = rand_bool() & 1; + pstr += sprintf(pstr, ",%s", b ? "true" : "false"); + break; + + case TSDB_DATA_TYPE_BINARY: + s = malloc(lenOfBinary + 1); + if (s == NULL) { + errorPrint2("%s() LN%d, memory allocation %d bytes failed\n", + __func__, __LINE__, lenOfBinary + 1); + exit(EXIT_FAILURE); + } + rand_string(s, lenOfBinary); + pstr += sprintf(pstr, ",\"%s\"", s); + free(s); + break; + + case TSDB_DATA_TYPE_NCHAR: + s = malloc(lenOfBinary + 1); + if (s == NULL) { + errorPrint2("%s() LN%d, memory allocation %d bytes failed\n", + __func__, __LINE__, lenOfBinary + 1); + exit(EXIT_FAILURE); + } + rand_string(s, lenOfBinary); + pstr += sprintf(pstr, ",\"%s\"", s); + free(s); + break; + + case TSDB_DATA_TYPE_UTINYINT: + pstr += sprintf(pstr, ",%d", rand_utinyint() ); + break; + + case TSDB_DATA_TYPE_USMALLINT: + pstr += sprintf(pstr, ",%d", rand_usmallint()); + break; + + case TSDB_DATA_TYPE_UINT: + pstr += sprintf(pstr, ",%d", rand_uint()); + break; + + case TSDB_DATA_TYPE_UBIGINT: + pstr += sprintf(pstr, ",%"PRId64"", rand_ubigint()); + break; + + case TSDB_DATA_TYPE_NULL: + break; + + default: + errorPrint2("%s() LN%d, Unknown data type %d\n", + __func__, __LINE__, + data_type[i]); + exit(EXIT_FAILURE); + } + + if (strlen(recBuf) > MAX_DATA_SIZE) { + ERROR_EXIT("column length too long, abort"); + } + } + + pstr += sprintf(pstr, ")"); + + verbosePrint("%s() LN%d, recBuf:\n\t%s\n", __func__, __LINE__, recBuf); + + return (int32_t)strlen(recBuf); +} + +static int generateSampleFromRand( + char *sampleDataBuf, + uint64_t lenOfOneRow, + int columnCount, + StrColumn *columns + ) +{ + char data[MAX_DATA_SIZE]; + memset(data, 0, MAX_DATA_SIZE); + + char *buff = malloc(lenOfOneRow); + if (NULL == buff) { + errorPrint2("%s() LN%d, memory allocation %"PRIu64" bytes failed\n", + __func__, __LINE__, lenOfOneRow); + exit(EXIT_FAILURE); + } + + for (int i=0; i < MAX_SAMPLES; i++) { + uint64_t pos = 0; + memset(buff, 0, lenOfOneRow); + + for (int c = 0; c < columnCount; c++) { + char *tmp = NULL; + + uint32_t dataLen; + char data_type = (columns)?(columns[c].data_type):g_args.data_type[c]; + + switch(data_type) { + case TSDB_DATA_TYPE_BINARY: + dataLen = (columns)?columns[c].dataLen:g_args.binwidth; + rand_string(data, dataLen); + pos += sprintf(buff + pos, "%s,", data); + break; + + case TSDB_DATA_TYPE_NCHAR: + dataLen = (columns)?columns[c].dataLen:g_args.binwidth; + rand_string(data, dataLen - 1); + pos += sprintf(buff + pos, "%s,", data); + break; + + case TSDB_DATA_TYPE_INT: + if ((g_args.demo_mode) && (c == 1)) { + tmp = demo_voltage_int_str(); + } else { + tmp = rand_int_str(); + } + pos += sprintf(buff + pos, "%s,", tmp); + break; + + case TSDB_DATA_TYPE_UINT: + pos += sprintf(buff + pos, "%s,", rand_uint_str()); + break; + + case TSDB_DATA_TYPE_BIGINT: + pos += sprintf(buff + pos, "%s,", rand_bigint_str()); + break; + + case TSDB_DATA_TYPE_UBIGINT: + pos += sprintf(buff + pos, "%s,", rand_ubigint_str()); + break; + + case TSDB_DATA_TYPE_FLOAT: + if (g_args.demo_mode) { + if (c == 0) { + tmp = demo_current_float_str(); + } else { + tmp = demo_phase_float_str(); + } + } else { + tmp = rand_float_str(); + } + pos += sprintf(buff + pos, "%s,", tmp); + break; + + case TSDB_DATA_TYPE_DOUBLE: + pos += sprintf(buff + pos, "%s,", rand_double_str()); + break; + + case TSDB_DATA_TYPE_SMALLINT: + pos += sprintf(buff + pos, "%s,", rand_smallint_str()); + break; + + case TSDB_DATA_TYPE_USMALLINT: + pos += sprintf(buff + pos, "%s,", rand_usmallint_str()); + break; + + case TSDB_DATA_TYPE_TINYINT: + pos += sprintf(buff + pos, "%s,", rand_tinyint_str()); + break; + + case TSDB_DATA_TYPE_UTINYINT: + pos += sprintf(buff + pos, "%s,", rand_utinyint_str()); + break; + + case TSDB_DATA_TYPE_BOOL: + pos += sprintf(buff + pos, "%s,", rand_bool_str()); + break; + + case TSDB_DATA_TYPE_TIMESTAMP: + pos += sprintf(buff + pos, "%s,", rand_bigint_str()); + break; + + case TSDB_DATA_TYPE_NULL: + break; + + default: + errorPrint2("%s() LN%d, Unknown data type %s\n", + __func__, __LINE__, + (columns)?(columns[c].dataType):g_args.dataType[c]); + exit(EXIT_FAILURE); + } + } + + *(buff + pos - 1) = 0; + memcpy(sampleDataBuf + i * lenOfOneRow, buff, pos); + } + + free(buff); + return 0; +} + +static int generateSampleFromRandForNtb() +{ + return generateSampleFromRand( + g_sampleDataBuf, + g_args.lenOfOneRow, + g_args.columnCount, + NULL); +} + +static int generateSampleFromRandForStb(SSuperTable *stbInfo) +{ + return generateSampleFromRand( + stbInfo->sampleDataBuf, + stbInfo->lenOfOneRow, + stbInfo->columnCount, + stbInfo->columns); +} + +static int prepareSampleForNtb() { + g_sampleDataBuf = calloc(g_args.lenOfOneRow * MAX_SAMPLES, 1); + if (NULL == g_sampleDataBuf) { + errorPrint2("%s() LN%d, Failed to calloc %"PRIu64" Bytes, reason:%s\n", + __func__, __LINE__, + g_args.lenOfOneRow * MAX_SAMPLES, + strerror(errno)); + return -1; + } + + return generateSampleFromRandForNtb(); +} + +static int prepareSampleForStb(SSuperTable *stbInfo) { + + stbInfo->sampleDataBuf = calloc( + stbInfo->lenOfOneRow * MAX_SAMPLES, 1); + if (NULL == stbInfo->sampleDataBuf) { + errorPrint2("%s() LN%d, Failed to calloc %"PRIu64" Bytes, reason:%s\n", + __func__, __LINE__, + stbInfo->lenOfOneRow * MAX_SAMPLES, + strerror(errno)); + return -1; + } + + int ret; + if (0 == strncasecmp(stbInfo->dataSource, "sample", strlen("sample"))) { + if(stbInfo->useSampleTs) { + getAndSetRowsFromCsvFile(stbInfo); + } + ret = generateSampleFromCsvForStb(stbInfo); + } else { + ret = generateSampleFromRandForStb(stbInfo); + } + + if (0 != ret) { + errorPrint2("%s() LN%d, read sample from csv file failed.\n", + __func__, __LINE__); + tmfree(stbInfo->sampleDataBuf); + stbInfo->sampleDataBuf = NULL; + return -1; + } + + return 0; +} + +static int32_t execInsert(threadInfo *pThreadInfo, uint32_t k) +{ + int32_t affectedRows; + SSuperTable* stbInfo = pThreadInfo->stbInfo; + + uint16_t iface; + if (stbInfo) + iface = stbInfo->iface; + else { + if (g_args.iface == INTERFACE_BUT) + iface = TAOSC_IFACE; + else + iface = g_args.iface; + } + + debugPrint("[%d] %s() LN%d %s\n", pThreadInfo->threadID, + __func__, __LINE__, + (iface==TAOSC_IFACE)? + "taosc":(iface==REST_IFACE)?"rest":"stmt"); + + switch(iface) { + case TAOSC_IFACE: + verbosePrint("[%d] %s() LN%d %s\n", pThreadInfo->threadID, + __func__, __LINE__, pThreadInfo->buffer); + + affectedRows = queryDbExec( + pThreadInfo->taos, + pThreadInfo->buffer, INSERT_TYPE, false); + break; + + case REST_IFACE: + verbosePrint("[%d] %s() LN%d %s\n", pThreadInfo->threadID, + __func__, __LINE__, pThreadInfo->buffer); + + if (0 != postProceSql(g_Dbs.host, g_Dbs.port, + pThreadInfo->buffer, pThreadInfo)) { + affectedRows = -1; + printf("========restful return fail, threadID[%d]\n", + pThreadInfo->threadID); + } else { + affectedRows = k; + } + break; + + case STMT_IFACE: + debugPrint("%s() LN%d, stmt=%p", + __func__, __LINE__, pThreadInfo->stmt); + if (0 != taos_stmt_execute(pThreadInfo->stmt)) { + errorPrint2("%s() LN%d, failied to execute insert statement. reason: %s\n", + __func__, __LINE__, taos_stmt_errstr(pThreadInfo->stmt)); + + fprintf(stderr, "\n\033[31m === Please reduce batch number if WAL size exceeds limit. ===\033[0m\n\n"); + exit(EXIT_FAILURE); + } + affectedRows = k; + break; + + default: + errorPrint2("%s() LN%d: unknown insert mode: %d\n", + __func__, __LINE__, stbInfo->iface); + affectedRows = 0; + } + + return affectedRows; +} + +static void getTableName(char *pTblName, + threadInfo* pThreadInfo, uint64_t tableSeq) +{ + SSuperTable* stbInfo = pThreadInfo->stbInfo; + if (stbInfo) { + if (AUTO_CREATE_SUBTBL != stbInfo->autoCreateTable) { + if (stbInfo->childTblLimit > 0) { + snprintf(pTblName, TSDB_TABLE_NAME_LEN, "%s", + stbInfo->childTblName + + (tableSeq - stbInfo->childTblOffset) * TSDB_TABLE_NAME_LEN); + } else { + verbosePrint("[%d] %s() LN%d: from=%"PRIu64" count=%"PRId64" seq=%"PRIu64"\n", + pThreadInfo->threadID, __func__, __LINE__, + pThreadInfo->start_table_from, + pThreadInfo->ntables, tableSeq); + snprintf(pTblName, TSDB_TABLE_NAME_LEN, "%s", + stbInfo->childTblName + tableSeq * TSDB_TABLE_NAME_LEN); + } + } else { + snprintf(pTblName, TSDB_TABLE_NAME_LEN, + "%s%"PRIu64"", stbInfo->childTblPrefix, tableSeq); + } + } else { + snprintf(pTblName, TSDB_TABLE_NAME_LEN, "%s%"PRIu64"", g_args.tb_prefix, tableSeq); + } +} + +static int32_t generateDataTailWithoutStb( + uint32_t batch, char* buffer, + int64_t remainderBufLen, int64_t insertRows, + uint64_t recordFrom, int64_t startTime, + /* int64_t *pSamplePos, */int64_t *dataLen) { + + uint64_t len = 0; + char *pstr = buffer; + + verbosePrint("%s() LN%d batch=%d\n", __func__, __LINE__, batch); + + int32_t k = 0; + for (k = 0; k < batch;) { + char *data = pstr; + memset(data, 0, MAX_DATA_SIZE); + + int64_t retLen = 0; + + char *data_type = g_args.data_type; + int lenOfBinary = g_args.binwidth; + + if (g_args.disorderRatio) { + retLen = generateData(data, data_type, + startTime + getTSRandTail( + g_args.timestamp_step, k, + g_args.disorderRatio, + g_args.disorderRange), + lenOfBinary); + } else { + retLen = generateData(data, data_type, + startTime + g_args.timestamp_step * k, + lenOfBinary); + } + + if (len > remainderBufLen) + break; + + pstr += retLen; + k++; + len += retLen; + remainderBufLen -= retLen; + + verbosePrint("%s() LN%d len=%"PRIu64" k=%d \nbuffer=%s\n", + __func__, __LINE__, len, k, buffer); + + recordFrom ++; + + if (recordFrom >= insertRows) { + break; + } + } + + *dataLen = len; + return k; +} + +static int64_t getTSRandTail(int64_t timeStampStep, int32_t seq, + int disorderRatio, int disorderRange) +{ + int64_t randTail = timeStampStep * seq; + if (disorderRatio > 0) { + int rand_num = taosRandom() % 100; + if(rand_num < disorderRatio) { + randTail = (randTail + + (taosRandom() % disorderRange + 1)) * (-1); + debugPrint("rand data generated, back %"PRId64"\n", randTail); + } + } + + return randTail; +} + +static int32_t generateStbDataTail( + SSuperTable* stbInfo, + uint32_t batch, char* buffer, + int64_t remainderBufLen, int64_t insertRows, + uint64_t recordFrom, int64_t startTime, + int64_t *pSamplePos, int64_t *dataLen) { + uint64_t len = 0; + + char *pstr = buffer; + + bool tsRand; + if (0 == strncasecmp(stbInfo->dataSource, "rand", strlen("rand"))) { + tsRand = true; + } else { + tsRand = false; + } + verbosePrint("%s() LN%d batch=%u buflen=%"PRId64"\n", + __func__, __LINE__, batch, remainderBufLen); + + int32_t k; + for (k = 0; k < batch;) { + char *data = pstr; + + int64_t lenOfRow = 0; + + if (tsRand) { + if (stbInfo->disorderRatio > 0) { + lenOfRow = generateStbRowData(stbInfo, data, + remainderBufLen, + startTime + getTSRandTail( + stbInfo->timeStampStep, k, + stbInfo->disorderRatio, + stbInfo->disorderRange) + ); + } else { + lenOfRow = generateStbRowData(stbInfo, data, + remainderBufLen, + startTime + stbInfo->timeStampStep * k + ); + } + } else { + lenOfRow = getRowDataFromSample( + data, + (remainderBufLen < MAX_DATA_SIZE)?remainderBufLen:MAX_DATA_SIZE, + startTime + stbInfo->timeStampStep * k, + stbInfo, + pSamplePos); + } + + if (lenOfRow == 0) { + data[0] = '\0'; + break; + } + if ((lenOfRow + 1) > remainderBufLen) { + break; + } + + pstr += lenOfRow; + k++; + len += lenOfRow; + remainderBufLen -= lenOfRow; + + verbosePrint("%s() LN%d len=%"PRIu64" k=%u \nbuffer=%s\n", + __func__, __LINE__, len, k, buffer); + + recordFrom ++; + + if (recordFrom >= insertRows) { + break; + } + } + + *dataLen = len; + return k; +} + + +static int generateSQLHeadWithoutStb(char *tableName, + char *dbName, + char *buffer, int remainderBufLen) +{ + int len; + + char headBuf[HEAD_BUFF_LEN]; + + len = snprintf( + headBuf, + HEAD_BUFF_LEN, + "%s.%s values", + dbName, + tableName); + + if (len > remainderBufLen) + return -1; + + tstrncpy(buffer, headBuf, len + 1); + + return len; +} + +static int generateStbSQLHead( + SSuperTable* stbInfo, + char *tableName, int64_t tableSeq, + char *dbName, + char *buffer, int remainderBufLen) +{ + int len; + + char headBuf[HEAD_BUFF_LEN]; + + if (AUTO_CREATE_SUBTBL == stbInfo->autoCreateTable) { + char* tagsValBuf = NULL; + if (0 == stbInfo->tagSource) { + tagsValBuf = generateTagValuesForStb(stbInfo, tableSeq); + } else { + tagsValBuf = getTagValueFromTagSample( + stbInfo, + tableSeq % stbInfo->tagSampleCount); + } + if (NULL == tagsValBuf) { + errorPrint2("%s() LN%d, tag buf failed to allocate memory\n", + __func__, __LINE__); + return -1; + } + + len = snprintf( + headBuf, + HEAD_BUFF_LEN, + "%s.%s using %s.%s TAGS%s values", + dbName, + tableName, + dbName, + stbInfo->stbName, + tagsValBuf); + tmfree(tagsValBuf); + } else if (TBL_ALREADY_EXISTS == stbInfo->childTblExists) { + len = snprintf( + headBuf, + HEAD_BUFF_LEN, + "%s.%s values", + dbName, + tableName); + } else { + len = snprintf( + headBuf, + HEAD_BUFF_LEN, + "%s.%s values", + dbName, + tableName); + } + + if (len > remainderBufLen) + return -1; + + tstrncpy(buffer, headBuf, len + 1); + + return len; +} + +static int32_t generateStbInterlaceData( + threadInfo *pThreadInfo, + char *tableName, uint32_t batchPerTbl, + uint64_t i, + uint32_t batchPerTblTimes, + uint64_t tableSeq, + char *buffer, + int64_t insertRows, + int64_t startTime, + uint64_t *pRemainderBufLen) +{ + assert(buffer); + char *pstr = buffer; + + SSuperTable *stbInfo = pThreadInfo->stbInfo; + int headLen = generateStbSQLHead( + stbInfo, + tableName, tableSeq, pThreadInfo->db_name, + pstr, *pRemainderBufLen); + + if (headLen <= 0) { + return 0; + } + // generate data buffer + verbosePrint("[%d] %s() LN%d i=%"PRIu64" buffer:\n%s\n", + pThreadInfo->threadID, __func__, __LINE__, i, buffer); + + pstr += headLen; + *pRemainderBufLen -= headLen; + + int64_t dataLen = 0; + + verbosePrint("[%d] %s() LN%d i=%"PRIu64" batchPerTblTimes=%u batchPerTbl = %u\n", + pThreadInfo->threadID, __func__, __LINE__, + i, batchPerTblTimes, batchPerTbl); + + if (0 == strncasecmp(stbInfo->startTimestamp, "now", 3)) { + startTime = taosGetTimestamp(pThreadInfo->time_precision); + } + + int32_t k = generateStbDataTail( + stbInfo, + batchPerTbl, pstr, *pRemainderBufLen, insertRows, 0, + startTime, + &(pThreadInfo->samplePos), &dataLen); + + if (k == batchPerTbl) { + pstr += dataLen; + *pRemainderBufLen -= dataLen; + } else { + debugPrint("%s() LN%d, generated data tail: %u, not equal batch per table: %u\n", + __func__, __LINE__, k, batchPerTbl); + pstr -= headLen; + pstr[0] = '\0'; + k = 0; + } + + return k; +} + +static int64_t generateInterlaceDataWithoutStb( + char *tableName, uint32_t batch, + uint64_t tableSeq, + char *dbName, char *buffer, + int64_t insertRows, + int64_t startTime, + uint64_t *pRemainderBufLen) +{ + assert(buffer); + char *pstr = buffer; + + int headLen = generateSQLHeadWithoutStb( + tableName, dbName, + pstr, *pRemainderBufLen); + + if (headLen <= 0) { + return 0; + } + + pstr += headLen; + *pRemainderBufLen -= headLen; + + int64_t dataLen = 0; + + int32_t k = generateDataTailWithoutStb( + batch, pstr, *pRemainderBufLen, insertRows, 0, + startTime, + &dataLen); + + if (k == batch) { + pstr += dataLen; + *pRemainderBufLen -= dataLen; + } else { + debugPrint("%s() LN%d, generated data tail: %d, not equal batch per table: %u\n", + __func__, __LINE__, k, batch); + pstr -= headLen; + pstr[0] = '\0'; + k = 0; + } + + return k; +} + +static int32_t prepareStmtBindArrayByType( + TAOS_BIND *bind, + char data_type, int32_t dataLen, + int32_t timePrec, + char *value) +{ + int32_t *bind_int; + uint32_t *bind_uint; + int64_t *bind_bigint; + uint64_t *bind_ubigint; + float *bind_float; + double *bind_double; + int8_t *bind_bool; + int64_t *bind_ts2; + int16_t *bind_smallint; + uint16_t *bind_usmallint; + int8_t *bind_tinyint; + uint8_t *bind_utinyint; + + switch(data_type) { + case TSDB_DATA_TYPE_BINARY: + if (dataLen > TSDB_MAX_BINARY_LEN) { + errorPrint2("binary length overflow, max size:%u\n", + (uint32_t)TSDB_MAX_BINARY_LEN); + return -1; + } + char *bind_binary; + + bind->buffer_type = TSDB_DATA_TYPE_BINARY; + if (value) { + bind_binary = calloc(1, strlen(value) + 1); + strncpy(bind_binary, value, strlen(value)); + bind->buffer_length = strlen(bind_binary); + } else { + bind_binary = calloc(1, dataLen + 1); + rand_string(bind_binary, dataLen); + bind->buffer_length = dataLen; + } + + bind->length = &bind->buffer_length; + bind->buffer = bind_binary; + bind->is_null = NULL; + break; + + case TSDB_DATA_TYPE_NCHAR: + if (dataLen > TSDB_MAX_BINARY_LEN) { + errorPrint2("nchar length overflow, max size:%u\n", + (uint32_t)TSDB_MAX_BINARY_LEN); + return -1; + } + char *bind_nchar; + + bind->buffer_type = TSDB_DATA_TYPE_NCHAR; + if (value) { + bind_nchar = calloc(1, strlen(value) + 1); + strncpy(bind_nchar, value, strlen(value)); + } else { + bind_nchar = calloc(1, dataLen + 1); + rand_string(bind_nchar, dataLen); + } + + bind->buffer_length = strlen(bind_nchar); + bind->buffer = bind_nchar; + bind->length = &bind->buffer_length; + bind->is_null = NULL; + break; + + case TSDB_DATA_TYPE_INT: + bind_int = malloc(sizeof(int32_t)); + assert(bind_int); + + if (value) { + *bind_int = atoi(value); + } else { + *bind_int = rand_int(); + } + bind->buffer_type = TSDB_DATA_TYPE_INT; + bind->buffer_length = sizeof(int32_t); + bind->buffer = bind_int; + bind->length = &bind->buffer_length; + bind->is_null = NULL; + break; + + case TSDB_DATA_TYPE_UINT: + bind_uint = malloc(sizeof(uint32_t)); + assert(bind_uint); + + if (value) { + *bind_uint = atoi(value); + } else { + *bind_uint = rand_int(); + } + bind->buffer_type = TSDB_DATA_TYPE_UINT; + bind->buffer_length = sizeof(uint32_t); + bind->buffer = bind_uint; + bind->length = &bind->buffer_length; + bind->is_null = NULL; + break; + + case TSDB_DATA_TYPE_BIGINT: + bind_bigint = malloc(sizeof(int64_t)); + assert(bind_bigint); + + if (value) { + *bind_bigint = atoll(value); + } else { + *bind_bigint = rand_bigint(); + } + bind->buffer_type = TSDB_DATA_TYPE_BIGINT; + bind->buffer_length = sizeof(int64_t); + bind->buffer = bind_bigint; + bind->length = &bind->buffer_length; + bind->is_null = NULL; + break; + + case TSDB_DATA_TYPE_UBIGINT: + bind_ubigint = malloc(sizeof(uint64_t)); + assert(bind_ubigint); + + if (value) { + *bind_ubigint = atoll(value); + } else { + *bind_ubigint = rand_bigint(); + } + bind->buffer_type = TSDB_DATA_TYPE_UBIGINT; + bind->buffer_length = sizeof(uint64_t); + bind->buffer = bind_ubigint; + bind->length = &bind->buffer_length; + bind->is_null = NULL; + break; + + case TSDB_DATA_TYPE_FLOAT: + bind_float = malloc(sizeof(float)); + assert(bind_float); + + if (value) { + *bind_float = (float)atof(value); + } else { + *bind_float = rand_float(); + } + bind->buffer_type = TSDB_DATA_TYPE_FLOAT; + bind->buffer_length = sizeof(float); + bind->buffer = bind_float; + bind->length = &bind->buffer_length; + bind->is_null = NULL; + break; + + case TSDB_DATA_TYPE_DOUBLE: + bind_double = malloc(sizeof(double)); + assert(bind_double); + + if (value) { + *bind_double = atof(value); + } else { + *bind_double = rand_double(); + } + bind->buffer_type = TSDB_DATA_TYPE_DOUBLE; + bind->buffer_length = sizeof(double); + bind->buffer = bind_double; + bind->length = &bind->buffer_length; + bind->is_null = NULL; + break; + + case TSDB_DATA_TYPE_SMALLINT: + bind_smallint = malloc(sizeof(int16_t)); + assert(bind_smallint); + + if (value) { + *bind_smallint = (int16_t)atoi(value); + } else { + *bind_smallint = rand_smallint(); + } + bind->buffer_type = TSDB_DATA_TYPE_SMALLINT; + bind->buffer_length = sizeof(int16_t); + bind->buffer = bind_smallint; + bind->length = &bind->buffer_length; + bind->is_null = NULL; + break; + + case TSDB_DATA_TYPE_USMALLINT: + bind_usmallint = malloc(sizeof(uint16_t)); + assert(bind_usmallint); + + if (value) { + *bind_usmallint = (uint16_t)atoi(value); + } else { + *bind_usmallint = rand_smallint(); + } + bind->buffer_type = TSDB_DATA_TYPE_SMALLINT; + bind->buffer_length = sizeof(uint16_t); + bind->buffer = bind_usmallint; + bind->length = &bind->buffer_length; + bind->is_null = NULL; + break; + + case TSDB_DATA_TYPE_TINYINT: + bind_tinyint = malloc(sizeof(int8_t)); + assert(bind_tinyint); + + if (value) { + *bind_tinyint = (int8_t)atoi(value); + } else { + *bind_tinyint = rand_tinyint(); + } + bind->buffer_type = TSDB_DATA_TYPE_TINYINT; + bind->buffer_length = sizeof(int8_t); + bind->buffer = bind_tinyint; + bind->length = &bind->buffer_length; + bind->is_null = NULL; + break; + + case TSDB_DATA_TYPE_UTINYINT: + bind_utinyint = malloc(sizeof(uint8_t)); + assert(bind_utinyint); + + if (value) { + *bind_utinyint = (int8_t)atoi(value); + } else { + *bind_utinyint = rand_tinyint(); + } + bind->buffer_type = TSDB_DATA_TYPE_UTINYINT; + bind->buffer_length = sizeof(uint8_t); + bind->buffer = bind_utinyint; + bind->length = &bind->buffer_length; + bind->is_null = NULL; + break; + + case TSDB_DATA_TYPE_BOOL: + bind_bool = malloc(sizeof(int8_t)); + assert(bind_bool); + + if (value) { + if (strncasecmp(value, "true", 4)) { + *bind_bool = true; + } else { + *bind_bool = false; + } + } else { + *bind_bool = rand_bool(); + } + bind->buffer_type = TSDB_DATA_TYPE_BOOL; + bind->buffer_length = sizeof(int8_t); + bind->buffer = bind_bool; + bind->length = &bind->buffer_length; + bind->is_null = NULL; + break; + + case TSDB_DATA_TYPE_TIMESTAMP: + bind_ts2 = malloc(sizeof(int64_t)); + assert(bind_ts2); + + if (value) { + if (strchr(value, ':') && strchr(value, '-')) { + int i = 0; + while(value[i] != '\0') { + if (value[i] == '\"' || value[i] == '\'') { + value[i] = ' '; + } + i++; + } + int64_t tmpEpoch; + if (TSDB_CODE_SUCCESS != taosParseTime( + value, &tmpEpoch, strlen(value), + timePrec, 0)) { + free(bind_ts2); + errorPrint2("Input %s, time format error!\n", value); + return -1; + } + *bind_ts2 = tmpEpoch; + } else { + *bind_ts2 = atoll(value); + } + } else { + *bind_ts2 = rand_bigint(); + } + bind->buffer_type = TSDB_DATA_TYPE_TIMESTAMP; + bind->buffer_length = sizeof(int64_t); + bind->buffer = bind_ts2; + bind->length = &bind->buffer_length; + bind->is_null = NULL; + break; + + case TSDB_DATA_TYPE_NULL: + break; + + default: + errorPrint2("Not support data type: %d\n", data_type); + exit(EXIT_FAILURE); + } + + return 0; +} + +static int32_t prepareStmtBindArrayByTypeForRand( + TAOS_BIND *bind, + char data_type, int32_t dataLen, + int32_t timePrec, + char **ptr, + char *value) +{ + int32_t *bind_int; + uint32_t *bind_uint; + int64_t *bind_bigint; + uint64_t *bind_ubigint; + float *bind_float; + double *bind_double; + int16_t *bind_smallint; + uint16_t *bind_usmallint; + int8_t *bind_tinyint; + uint8_t *bind_utinyint; + int8_t *bind_bool; + int64_t *bind_ts2; + + switch(data_type) { + case TSDB_DATA_TYPE_BINARY: + + if (dataLen > TSDB_MAX_BINARY_LEN) { + errorPrint2("binary length overflow, max size:%u\n", + (uint32_t)TSDB_MAX_BINARY_LEN); + return -1; + } + char *bind_binary = (char *)*ptr; + + bind->buffer_type = TSDB_DATA_TYPE_BINARY; + if (value) { + strncpy(bind_binary, value, strlen(value)); + bind->buffer_length = strlen(bind_binary); + } else { + rand_string(bind_binary, dataLen); + bind->buffer_length = dataLen; + } + + bind->length = &bind->buffer_length; + bind->buffer = bind_binary; + bind->is_null = NULL; + + *ptr += bind->buffer_length; + break; + + case TSDB_DATA_TYPE_NCHAR: + if (dataLen > TSDB_MAX_BINARY_LEN) { + errorPrint2("nchar length overflow, max size: %u\n", + (uint32_t)TSDB_MAX_BINARY_LEN); + return -1; + } + char *bind_nchar = (char *)*ptr; + + bind->buffer_type = TSDB_DATA_TYPE_NCHAR; + if (value) { + strncpy(bind_nchar, value, strlen(value)); + } else { + rand_string(bind_nchar, dataLen); + } + + bind->buffer_length = strlen(bind_nchar); + bind->buffer = bind_nchar; + bind->length = &bind->buffer_length; + bind->is_null = NULL; + + *ptr += bind->buffer_length; + break; + + case TSDB_DATA_TYPE_INT: + bind_int = (int32_t *)*ptr; + + if (value) { + *bind_int = atoi(value); + } else { + *bind_int = rand_int(); + } + bind->buffer_type = TSDB_DATA_TYPE_INT; + bind->buffer_length = sizeof(int32_t); + bind->buffer = bind_int; + bind->length = &bind->buffer_length; + bind->is_null = NULL; + + *ptr += bind->buffer_length; + break; + + case TSDB_DATA_TYPE_UINT: + bind_uint = (uint32_t *)*ptr; + + if (value) { + *bind_uint = atoi(value); + } else { + *bind_uint = rand_int(); + } + bind->buffer_type = TSDB_DATA_TYPE_UINT; + bind->buffer_length = sizeof(uint32_t); + bind->buffer = bind_uint; + bind->length = &bind->buffer_length; + bind->is_null = NULL; + + *ptr += bind->buffer_length; + break; + + case TSDB_DATA_TYPE_BIGINT: + bind_bigint = (int64_t *)*ptr; + + if (value) { + *bind_bigint = atoll(value); + } else { + *bind_bigint = rand_bigint(); + } + bind->buffer_type = TSDB_DATA_TYPE_BIGINT; + bind->buffer_length = sizeof(int64_t); + bind->buffer = bind_bigint; + bind->length = &bind->buffer_length; + bind->is_null = NULL; + + *ptr += bind->buffer_length; + break; + + case TSDB_DATA_TYPE_UBIGINT: + bind_ubigint = (uint64_t *)*ptr; + + if (value) { + *bind_ubigint = atoll(value); + } else { + *bind_ubigint = rand_bigint(); + } + bind->buffer_type = TSDB_DATA_TYPE_UBIGINT; + bind->buffer_length = sizeof(uint64_t); + bind->buffer = bind_ubigint; + bind->length = &bind->buffer_length; + bind->is_null = NULL; + + *ptr += bind->buffer_length; + break; + + case TSDB_DATA_TYPE_FLOAT: + bind_float = (float *)*ptr; + + if (value) { + *bind_float = (float)atof(value); + } else { + *bind_float = rand_float(); + } + bind->buffer_type = TSDB_DATA_TYPE_FLOAT; + bind->buffer_length = sizeof(float); + bind->buffer = bind_float; + bind->length = &bind->buffer_length; + bind->is_null = NULL; + + *ptr += bind->buffer_length; + break; + + case TSDB_DATA_TYPE_DOUBLE: + bind_double = (double *)*ptr; + + if (value) { + *bind_double = atof(value); + } else { + *bind_double = rand_double(); + } + bind->buffer_type = TSDB_DATA_TYPE_DOUBLE; + bind->buffer_length = sizeof(double); + bind->buffer = bind_double; + bind->length = &bind->buffer_length; + bind->is_null = NULL; + + *ptr += bind->buffer_length; + break; + + case TSDB_DATA_TYPE_SMALLINT: + bind_smallint = (int16_t *)*ptr; + + if (value) { + *bind_smallint = (int16_t)atoi(value); + } else { + *bind_smallint = rand_smallint(); + } + bind->buffer_type = TSDB_DATA_TYPE_SMALLINT; + bind->buffer_length = sizeof(int16_t); + bind->buffer = bind_smallint; + bind->length = &bind->buffer_length; + bind->is_null = NULL; + + *ptr += bind->buffer_length; + break; + + case TSDB_DATA_TYPE_USMALLINT: + bind_usmallint = (uint16_t *)*ptr; + + if (value) { + *bind_usmallint = (uint16_t)atoi(value); + } else { + *bind_usmallint = rand_smallint(); + } + bind->buffer_type = TSDB_DATA_TYPE_USMALLINT; + bind->buffer_length = sizeof(uint16_t); + bind->buffer = bind_usmallint; + bind->length = &bind->buffer_length; + bind->is_null = NULL; + + *ptr += bind->buffer_length; + break; + + case TSDB_DATA_TYPE_TINYINT: + bind_tinyint = (int8_t *)*ptr; + + if (value) { + *bind_tinyint = (int8_t)atoi(value); + } else { + *bind_tinyint = rand_tinyint(); + } + bind->buffer_type = TSDB_DATA_TYPE_TINYINT; + bind->buffer_length = sizeof(int8_t); + bind->buffer = bind_tinyint; + bind->length = &bind->buffer_length; + bind->is_null = NULL; + + *ptr += bind->buffer_length; + break; + + case TSDB_DATA_TYPE_UTINYINT: + bind_utinyint = (uint8_t *)*ptr; + + if (value) { + *bind_utinyint = (uint8_t)atoi(value); + } else { + *bind_utinyint = rand_tinyint(); + } + bind->buffer_type = TSDB_DATA_TYPE_UTINYINT; + bind->buffer_length = sizeof(uint8_t); + bind->buffer = bind_utinyint; + bind->length = &bind->buffer_length; + bind->is_null = NULL; + + *ptr += bind->buffer_length; + break; + + case TSDB_DATA_TYPE_BOOL: + bind_bool = (int8_t *)*ptr; + + if (value) { + if (strncasecmp(value, "true", 4)) { + *bind_bool = true; + } else { + *bind_bool = false; + } + } else { + *bind_bool = rand_bool(); + } + bind->buffer_type = TSDB_DATA_TYPE_BOOL; + bind->buffer_length = sizeof(int8_t); + bind->buffer = bind_bool; + bind->length = &bind->buffer_length; + bind->is_null = NULL; + + *ptr += bind->buffer_length; + break; + + case TSDB_DATA_TYPE_TIMESTAMP: + bind_ts2 = (int64_t *)*ptr; + + if (value) { + if (strchr(value, ':') && strchr(value, '-')) { + int i = 0; + while(value[i] != '\0') { + if (value[i] == '\"' || value[i] == '\'') { + value[i] = ' '; + } + i++; + } + int64_t tmpEpoch; + if (TSDB_CODE_SUCCESS != taosParseTime( + value, &tmpEpoch, strlen(value), + timePrec, 0)) { + errorPrint2("Input %s, time format error!\n", value); + return -1; + } + *bind_ts2 = tmpEpoch; + } else { + *bind_ts2 = atoll(value); + } + } else { + *bind_ts2 = rand_bigint(); + } + bind->buffer_type = TSDB_DATA_TYPE_TIMESTAMP; + bind->buffer_length = sizeof(int64_t); + bind->buffer = bind_ts2; + bind->length = &bind->buffer_length; + bind->is_null = NULL; + + *ptr += bind->buffer_length; + break; + + default: + errorPrint2("No support data type: %d\n", data_type); + return -1; + } + + return 0; +} + +static int32_t prepareStmtWithoutStb( + threadInfo *pThreadInfo, + char *tableName, + uint32_t batch, + int64_t insertRows, + int64_t recordFrom, + int64_t startTime) +{ + TAOS_STMT *stmt = pThreadInfo->stmt; + int ret = taos_stmt_set_tbname(stmt, tableName); + if (ret != 0) { + errorPrint2("failed to execute taos_stmt_set_tbname(%s). return 0x%x. reason: %s\n", + tableName, ret, taos_stmt_errstr(stmt)); + return ret; + } + + char *data_type = g_args.data_type; + + char *bindArray = malloc(sizeof(TAOS_BIND) * (g_args.columnCount + 1)); + if (bindArray == NULL) { + errorPrint2("Failed to allocate %d bind params\n", + (g_args.columnCount + 1)); + return -1; + } + + int32_t k = 0; + for (k = 0; k < batch;) { + /* columnCount + 1 (ts) */ + + TAOS_BIND *bind = (TAOS_BIND *)(bindArray + 0); + + int64_t *bind_ts = pThreadInfo->bind_ts; + + bind->buffer_type = TSDB_DATA_TYPE_TIMESTAMP; + + if (g_args.disorderRatio) { + *bind_ts = startTime + getTSRandTail( + g_args.timestamp_step, k, + g_args.disorderRatio, + g_args.disorderRange); + } else { + *bind_ts = startTime + g_args.timestamp_step * k; + } + bind->buffer_length = sizeof(int64_t); + bind->buffer = bind_ts; + bind->length = &bind->buffer_length; + bind->is_null = NULL; + + for (int i = 0; i < g_args.columnCount; i ++) { + bind = (TAOS_BIND *)((char *)bindArray + + (sizeof(TAOS_BIND) * (i + 1))); + if ( -1 == prepareStmtBindArrayByType( + bind, + data_type[i], + g_args.binwidth, + pThreadInfo->time_precision, + NULL)) { + free(bindArray); + return -1; + } + } + if (0 != taos_stmt_bind_param(stmt, (TAOS_BIND *)bindArray)) { + errorPrint2("%s() LN%d, stmt_bind_param() failed! reason: %s\n", + __func__, __LINE__, taos_stmt_errstr(stmt)); + break; + } + // if msg > 3MB, break + if (0 != taos_stmt_add_batch(stmt)) { + errorPrint2("%s() LN%d, stmt_add_batch() failed! reason: %s\n", + __func__, __LINE__, taos_stmt_errstr(stmt)); + break; + } + + k++; + recordFrom ++; + if (recordFrom >= insertRows) { + break; + } + } + + free(bindArray); + return k; +} + +static int32_t prepareStbStmtBindTag( + char *bindArray, SSuperTable *stbInfo, + char *tagsVal, + int32_t timePrec) +{ + TAOS_BIND *tag; + + for (int t = 0; t < stbInfo->tagCount; t ++) { + tag = (TAOS_BIND *)((char *)bindArray + (sizeof(TAOS_BIND) * t)); + if ( -1 == prepareStmtBindArrayByType( + tag, + stbInfo->tags[t].data_type, + stbInfo->tags[t].dataLen, + timePrec, + NULL)) { + return -1; + } + } + + return 0; +} + +static int32_t prepareStbStmtBindRand( + int64_t *ts, + char *bindArray, SSuperTable *stbInfo, + int64_t startTime, int32_t recSeq, + int32_t timePrec) +{ + char data[MAX_DATA_SIZE]; + memset(data, 0, MAX_DATA_SIZE); + char *ptr = data; + + TAOS_BIND *bind; + + for (int i = 0; i < stbInfo->columnCount + 1; i ++) { + bind = (TAOS_BIND *)((char *)bindArray + (sizeof(TAOS_BIND) * i)); + + if (i == 0) { + int64_t *bind_ts = ts; + + bind->buffer_type = TSDB_DATA_TYPE_TIMESTAMP; + if (stbInfo->disorderRatio) { + *bind_ts = startTime + getTSRandTail( + stbInfo->timeStampStep, recSeq, + stbInfo->disorderRatio, + stbInfo->disorderRange); + } else { + *bind_ts = startTime + stbInfo->timeStampStep * recSeq; + } + bind->buffer_length = sizeof(int64_t); + bind->buffer = bind_ts; + bind->length = &bind->buffer_length; + bind->is_null = NULL; + + ptr += bind->buffer_length; + } else if ( -1 == prepareStmtBindArrayByTypeForRand( + bind, + stbInfo->columns[i-1].data_type, + stbInfo->columns[i-1].dataLen, + timePrec, + &ptr, + NULL)) { + return -1; + } + } + + return 0; +} + +UNUSED_FUNC static int32_t prepareStbStmtRand( + threadInfo *pThreadInfo, + char *tableName, + int64_t tableSeq, + uint32_t batch, + uint64_t insertRows, + uint64_t recordFrom, + int64_t startTime) +{ + int ret; + SSuperTable *stbInfo = pThreadInfo->stbInfo; + TAOS_STMT *stmt = pThreadInfo->stmt; + + if (AUTO_CREATE_SUBTBL == stbInfo->autoCreateTable) { + char* tagsValBuf = NULL; + + if (0 == stbInfo->tagSource) { + tagsValBuf = generateTagValuesForStb(stbInfo, tableSeq); + } else { + tagsValBuf = getTagValueFromTagSample( + stbInfo, + tableSeq % stbInfo->tagSampleCount); + } + + if (NULL == tagsValBuf) { + errorPrint2("%s() LN%d, tag buf failed to allocate memory\n", + __func__, __LINE__); + return -1; + } + + char *tagsArray = calloc(1, sizeof(TAOS_BIND) * stbInfo->tagCount); + if (NULL == tagsArray) { + tmfree(tagsValBuf); + errorPrint2("%s() LN%d, tag buf failed to allocate memory\n", + __func__, __LINE__); + return -1; + } + + if (-1 == prepareStbStmtBindTag( + tagsArray, stbInfo, tagsValBuf, pThreadInfo->time_precision + /* is tag */)) { + tmfree(tagsValBuf); + tmfree(tagsArray); + return -1; + } + + ret = taos_stmt_set_tbname_tags(stmt, tableName, (TAOS_BIND *)tagsArray); + + tmfree(tagsValBuf); + tmfree(tagsArray); + + if (0 != ret) { + errorPrint2("%s() LN%d, stmt_set_tbname_tags() failed! reason: %s\n", + __func__, __LINE__, taos_stmt_errstr(stmt)); + return -1; + } + } else { + ret = taos_stmt_set_tbname(stmt, tableName); + if (0 != ret) { + errorPrint2("%s() LN%d, stmt_set_tbname() failed! reason: %s\n", + __func__, __LINE__, taos_stmt_errstr(stmt)); + return -1; + } + } + + char *bindArray = calloc(1, sizeof(TAOS_BIND) * (stbInfo->columnCount + 1)); + if (bindArray == NULL) { + errorPrint2("%s() LN%d, Failed to allocate %d bind params\n", + __func__, __LINE__, (stbInfo->columnCount + 1)); + return -1; + } + + uint32_t k; + for (k = 0; k < batch;) { + /* columnCount + 1 (ts) */ + if (-1 == prepareStbStmtBindRand( + pThreadInfo->bind_ts, + bindArray, stbInfo, + startTime, k, + pThreadInfo->time_precision + /* is column */)) { + free(bindArray); + return -1; + } + ret = taos_stmt_bind_param(stmt, (TAOS_BIND *)bindArray); + if (0 != ret) { + errorPrint2("%s() LN%d, stmt_bind_param() failed! reason: %s\n", + __func__, __LINE__, taos_stmt_errstr(stmt)); + free(bindArray); + return -1; + } + // if msg > 3MB, break + ret = taos_stmt_add_batch(stmt); + if (0 != ret) { + errorPrint2("%s() LN%d, stmt_add_batch() failed! reason: %s\n", + __func__, __LINE__, taos_stmt_errstr(stmt)); + free(bindArray); + return -1; + } + + k++; + recordFrom ++; + + if (recordFrom >= insertRows) { + break; + } + } + + free(bindArray); + return k; +} + +#if STMT_BIND_PARAM_BATCH == 1 +static int execStbBindParamBatch( + threadInfo *pThreadInfo, + char *tableName, + int64_t tableSeq, + uint32_t batch, + uint64_t insertRows, + uint64_t recordFrom, + int64_t startTime, + int64_t *pSamplePos) +{ + int ret; + TAOS_STMT *stmt = pThreadInfo->stmt; + + SSuperTable *stbInfo = pThreadInfo->stbInfo; + assert(stbInfo); + + uint32_t columnCount = pThreadInfo->stbInfo->columnCount; + + uint32_t thisBatch = MAX_SAMPLES - (*pSamplePos); + + if (thisBatch > batch) { + thisBatch = batch; + } + verbosePrint("%s() LN%d, batch=%d pos=%"PRId64" thisBatch=%d\n", + __func__, __LINE__, batch, *pSamplePos, thisBatch); + + memset(pThreadInfo->bindParams, 0, + (sizeof(TAOS_MULTI_BIND) * (columnCount + 1))); + memset(pThreadInfo->is_null, 0, thisBatch); + + for (int c = 0; c < columnCount + 1; c ++) { + TAOS_MULTI_BIND *param = (TAOS_MULTI_BIND *)(pThreadInfo->bindParams + sizeof(TAOS_MULTI_BIND) * c); + + char data_type; + + if (c == 0) { + data_type = TSDB_DATA_TYPE_TIMESTAMP; + param->buffer_length = sizeof(int64_t); + param->buffer = pThreadInfo->bind_ts_array; + + } else { + data_type = stbInfo->columns[c-1].data_type; + + char *tmpP; + + switch(data_type) { + case TSDB_DATA_TYPE_BINARY: + param->buffer_length = + stbInfo->columns[c-1].dataLen; + + tmpP = + (char *)((uintptr_t)*(uintptr_t*)(stbInfo->sampleBindBatchArray + +sizeof(char*)*(c-1))); + + verbosePrint("%s() LN%d, tmpP=%p pos=%"PRId64" width=%"PRIxPTR" position=%"PRId64"\n", + __func__, __LINE__, tmpP, *pSamplePos, param->buffer_length, + (*pSamplePos) * param->buffer_length); + + param->buffer = (void *)(tmpP + *pSamplePos * param->buffer_length); + break; + + case TSDB_DATA_TYPE_NCHAR: + param->buffer_length = + stbInfo->columns[c-1].dataLen; + + tmpP = + (char *)((uintptr_t)*(uintptr_t*)(stbInfo->sampleBindBatchArray + +sizeof(char*)*(c-1))); + + verbosePrint("%s() LN%d, tmpP=%p pos=%"PRId64" width=%"PRIxPTR" position=%"PRId64"\n", + __func__, __LINE__, tmpP, *pSamplePos, param->buffer_length, + (*pSamplePos) * param->buffer_length); + + param->buffer = (void *)(tmpP + *pSamplePos * param->buffer_length); + break; + + case TSDB_DATA_TYPE_INT: + case TSDB_DATA_TYPE_UINT: + param->buffer_length = sizeof(int32_t); + param->buffer = + (void *)((uintptr_t)*(uintptr_t*)(stbInfo->sampleBindBatchArray+sizeof(char*)*(c-1)) + + stbInfo->columns[c-1].dataLen * (*pSamplePos)); + break; + + case TSDB_DATA_TYPE_TINYINT: + case TSDB_DATA_TYPE_UTINYINT: + param->buffer_length = sizeof(int8_t); + param->buffer = + (void *)((uintptr_t)*(uintptr_t*)( + stbInfo->sampleBindBatchArray + +sizeof(char*)*(c-1)) + + stbInfo->columns[c-1].dataLen*(*pSamplePos)); + break; + + case TSDB_DATA_TYPE_SMALLINT: + case TSDB_DATA_TYPE_USMALLINT: + param->buffer_length = sizeof(int16_t); + param->buffer = + (void *)((uintptr_t)*(uintptr_t*)(stbInfo->sampleBindBatchArray+sizeof(char*)*(c-1)) + + stbInfo->columns[c-1].dataLen * (*pSamplePos)); + break; + + case TSDB_DATA_TYPE_BIGINT: + case TSDB_DATA_TYPE_UBIGINT: + param->buffer_length = sizeof(int64_t); + param->buffer = + (void *)((uintptr_t)*(uintptr_t*)(stbInfo->sampleBindBatchArray+sizeof(char*)*(c-1)) + + stbInfo->columns[c-1].dataLen * (*pSamplePos)); + break; + + case TSDB_DATA_TYPE_BOOL: + param->buffer_length = sizeof(int8_t); + param->buffer = + (void *)((uintptr_t)*(uintptr_t*)(stbInfo->sampleBindBatchArray+sizeof(char*)*(c-1)) + + stbInfo->columns[c-1].dataLen * (*pSamplePos)); + break; + + case TSDB_DATA_TYPE_FLOAT: + param->buffer_length = sizeof(float); + param->buffer = + (void *)((uintptr_t)*(uintptr_t*)(stbInfo->sampleBindBatchArray+sizeof(char*)*(c-1)) + + stbInfo->columns[c-1].dataLen * (*pSamplePos)); + break; + + case TSDB_DATA_TYPE_DOUBLE: + param->buffer_length = sizeof(double); + param->buffer = + (void *)((uintptr_t)*(uintptr_t*)(stbInfo->sampleBindBatchArray+sizeof(char*)*(c-1)) + + stbInfo->columns[c-1].dataLen * (*pSamplePos)); + break; + + case TSDB_DATA_TYPE_TIMESTAMP: + param->buffer_length = sizeof(int64_t); + param->buffer = + (void *)((uintptr_t)*(uintptr_t*)(stbInfo->sampleBindBatchArray+sizeof(char*)*(c-1)) + + stbInfo->columns[c-1].dataLen * (*pSamplePos)); + break; + + default: + errorPrint("%s() LN%d, wrong data type: %d\n", + __func__, + __LINE__, + data_type); + exit(EXIT_FAILURE); + + } + } + + param->buffer_type = data_type; + param->length = malloc(sizeof(int32_t) * thisBatch); + assert(param->length); + + for (int b = 0; b < thisBatch; b++) { + if (param->buffer_type == TSDB_DATA_TYPE_NCHAR) { + param->length[b] = strlen( + (char *)param->buffer + b * + stbInfo->columns[c].dataLen + ); + } else { + param->length[b] = param->buffer_length; + } + } + param->is_null = pThreadInfo->is_null; + param->num = thisBatch; + } + + uint32_t k; + for (k = 0; k < thisBatch;) { + /* columnCount + 1 (ts) */ + if (stbInfo->disorderRatio) { + *(pThreadInfo->bind_ts_array + k) = startTime + getTSRandTail( + stbInfo->timeStampStep, k, + stbInfo->disorderRatio, + stbInfo->disorderRange); + } else { + *(pThreadInfo->bind_ts_array + k) = startTime + stbInfo->timeStampStep * k; + } + + debugPrint("%s() LN%d, k=%d ts=%"PRId64"\n", + __func__, __LINE__, + k, *(pThreadInfo->bind_ts_array +k)); + k++; + recordFrom ++; + + (*pSamplePos) ++; + if ((*pSamplePos) == MAX_SAMPLES) { + *pSamplePos = 0; + } + + if (recordFrom >= insertRows) { + break; + } + } + + ret = taos_stmt_bind_param_batch(stmt, (TAOS_MULTI_BIND *)pThreadInfo->bindParams); + if (0 != ret) { + errorPrint2("%s() LN%d, stmt_bind_param() failed! reason: %s\n", + __func__, __LINE__, taos_stmt_errstr(stmt)); + return -1; + } + + for (int c = 0; c < stbInfo->columnCount + 1; c ++) { + TAOS_MULTI_BIND *param = (TAOS_MULTI_BIND *)(pThreadInfo->bindParams + sizeof(TAOS_MULTI_BIND) * c); + free(param->length); + } + + // if msg > 3MB, break + ret = taos_stmt_add_batch(stmt); + if (0 != ret) { + errorPrint2("%s() LN%d, stmt_add_batch() failed! reason: %s\n", + __func__, __LINE__, taos_stmt_errstr(stmt)); + return -1; + } + return k; +} + +static int parseSamplefileToStmtBatch( + SSuperTable* stbInfo) +{ + // char *sampleDataBuf = (stbInfo)? + // stbInfo->sampleDataBuf:g_sampleDataBuf; + int32_t columnCount = (stbInfo)?stbInfo->columnCount:g_args.columnCount; + char *sampleBindBatchArray = NULL; + + if (stbInfo) { + stbInfo->sampleBindBatchArray = calloc(1, sizeof(uintptr_t *) * columnCount); + sampleBindBatchArray = stbInfo->sampleBindBatchArray; + } else { + g_sampleBindBatchArray = calloc(1, sizeof(uintptr_t *) * columnCount); + sampleBindBatchArray = g_sampleBindBatchArray; + } + assert(sampleBindBatchArray); + + for (int c = 0; c < columnCount; c++) { + char data_type = (stbInfo)?stbInfo->columns[c].data_type:g_args.data_type[c]; + + char *tmpP = NULL; + + switch(data_type) { + case TSDB_DATA_TYPE_INT: + case TSDB_DATA_TYPE_UINT: + tmpP = calloc(1, sizeof(int) * MAX_SAMPLES); + assert(tmpP); + *(uintptr_t*)(sampleBindBatchArray+ sizeof(uintptr_t*)*c) = (uintptr_t)tmpP; + break; + + case TSDB_DATA_TYPE_TINYINT: + case TSDB_DATA_TYPE_UTINYINT: + tmpP = calloc(1, sizeof(int8_t) * MAX_SAMPLES); + assert(tmpP); + *(uintptr_t*)(sampleBindBatchArray+ sizeof(uintptr_t*)*c) = (uintptr_t)tmpP; + break; + + case TSDB_DATA_TYPE_SMALLINT: + case TSDB_DATA_TYPE_USMALLINT: + tmpP = calloc(1, sizeof(int16_t) * MAX_SAMPLES); + assert(tmpP); + *(uintptr_t*)(sampleBindBatchArray+ sizeof(uintptr_t*)*c) = (uintptr_t)tmpP; + break; + + case TSDB_DATA_TYPE_BIGINT: + case TSDB_DATA_TYPE_UBIGINT: + tmpP = calloc(1, sizeof(int64_t) * MAX_SAMPLES); + assert(tmpP); + *(uintptr_t*)(sampleBindBatchArray+ sizeof(uintptr_t*)*c) = (uintptr_t)tmpP; + break; + + case TSDB_DATA_TYPE_BOOL: + tmpP = calloc(1, sizeof(int8_t) * MAX_SAMPLES); + assert(tmpP); + *(uintptr_t*)(sampleBindBatchArray+ sizeof(uintptr_t*)*c) = (uintptr_t)tmpP; + break; + + case TSDB_DATA_TYPE_FLOAT: + tmpP = calloc(1, sizeof(float) * MAX_SAMPLES); + assert(tmpP); + *(uintptr_t*)(sampleBindBatchArray+ sizeof(uintptr_t*)*c) = (uintptr_t)tmpP; + break; + + case TSDB_DATA_TYPE_DOUBLE: + tmpP = calloc(1, sizeof(double) * MAX_SAMPLES); + assert(tmpP); + *(uintptr_t*)(sampleBindBatchArray+ sizeof(uintptr_t*)*c) = (uintptr_t)tmpP; + break; + + case TSDB_DATA_TYPE_BINARY: + case TSDB_DATA_TYPE_NCHAR: + tmpP = calloc(1, MAX_SAMPLES * + (((stbInfo)?stbInfo->columns[c].dataLen:g_args.binwidth))); + assert(tmpP); + *(uintptr_t*)(sampleBindBatchArray+ sizeof(uintptr_t*)*c) = (uintptr_t)tmpP; + break; + + case TSDB_DATA_TYPE_TIMESTAMP: + tmpP = calloc(1, sizeof(int64_t) * MAX_SAMPLES); + assert(tmpP); + *(uintptr_t*)(sampleBindBatchArray+ sizeof(uintptr_t*)*c) = (uintptr_t)tmpP; + break; + + default: + errorPrint("Unknown data type: %s\n", + (stbInfo)?stbInfo->columns[c].dataType:g_args.dataType[c]); + exit(EXIT_FAILURE); + } + } + + char *sampleDataBuf = (stbInfo)?stbInfo->sampleDataBuf:g_sampleDataBuf; + int64_t lenOfOneRow = (stbInfo)?stbInfo->lenOfOneRow:g_args.lenOfOneRow; + + for (int i=0; i < MAX_SAMPLES; i++) { + int cursor = 0; + + for (int c = 0; c < columnCount; c++) { + char data_type = (stbInfo)? + stbInfo->columns[c].data_type: + g_args.data_type[c]; + char *restStr = sampleDataBuf + + lenOfOneRow * i + cursor; + int lengthOfRest = strlen(restStr); + + int index = 0; + for (index = 0; index < lengthOfRest; index ++) { + if (restStr[index] == ',') { + break; + } + } + + char *tmpStr = calloc(1, index + 1); + if (NULL == tmpStr) { + errorPrint2("%s() LN%d, Failed to allocate %d bind buffer\n", + __func__, __LINE__, index + 1); + return -1; + } + + strncpy(tmpStr, restStr, index); + cursor += index + 1; // skip ',' too + char *tmpP; + + switch(data_type) { + case TSDB_DATA_TYPE_INT: + case TSDB_DATA_TYPE_UINT: + *((int32_t*)((uintptr_t)*(uintptr_t*)(sampleBindBatchArray + +sizeof(char*)*c)+sizeof(int32_t)*i)) = + atoi(tmpStr); + break; + + case TSDB_DATA_TYPE_FLOAT: + *(float*)(((uintptr_t)*(uintptr_t*)(sampleBindBatchArray + +sizeof(char*)*c)+sizeof(float)*i)) = + (float)atof(tmpStr); + break; + + case TSDB_DATA_TYPE_DOUBLE: + *(double*)(((uintptr_t)*(uintptr_t*)(sampleBindBatchArray + +sizeof(char*)*c)+sizeof(double)*i)) = + atof(tmpStr); + break; + + case TSDB_DATA_TYPE_TINYINT: + case TSDB_DATA_TYPE_UTINYINT: + *((int8_t*)((uintptr_t)*(uintptr_t*)(sampleBindBatchArray + +sizeof(char*)*c)+sizeof(int8_t)*i)) = + (int8_t)atoi(tmpStr); + break; + + case TSDB_DATA_TYPE_SMALLINT: + case TSDB_DATA_TYPE_USMALLINT: + *((int16_t*)((uintptr_t)*(uintptr_t*)(sampleBindBatchArray + +sizeof(char*)*c)+sizeof(int16_t)*i)) = + (int16_t)atoi(tmpStr); + break; + + case TSDB_DATA_TYPE_BIGINT: + case TSDB_DATA_TYPE_UBIGINT: + *((int64_t*)((uintptr_t)*(uintptr_t*)(sampleBindBatchArray + +sizeof(char*)*c)+sizeof(int64_t)*i)) = + (int64_t)atol(tmpStr); + break; + + case TSDB_DATA_TYPE_BOOL: + *((int8_t*)((uintptr_t)*(uintptr_t*)(sampleBindBatchArray + +sizeof(char*)*c)+sizeof(int8_t)*i)) = + (int8_t)atoi(tmpStr); + break; + + case TSDB_DATA_TYPE_TIMESTAMP: + *((int64_t*)((uintptr_t)*(uintptr_t*)(sampleBindBatchArray + +sizeof(char*)*c)+sizeof(int64_t)*i)) = + (int64_t)atol(tmpStr); + break; + + case TSDB_DATA_TYPE_BINARY: + case TSDB_DATA_TYPE_NCHAR: + tmpP = (char *)(*(uintptr_t*)(sampleBindBatchArray + +sizeof(char*)*c)); + strcpy(tmpP + i* + (((stbInfo)?stbInfo->columns[c].dataLen:g_args.binwidth)) + , tmpStr); + break; + + default: + break; + } + + free(tmpStr); + } + } + + return 0; +} + +static int parseSampleToStmtBatchForThread( + threadInfo *pThreadInfo, SSuperTable *stbInfo, + uint32_t timePrec, + uint32_t batch) +{ + uint32_t columnCount = (stbInfo)?stbInfo->columnCount:g_args.columnCount; + + pThreadInfo->bind_ts_array = malloc(sizeof(int64_t) * batch); + assert(pThreadInfo->bind_ts_array); + + pThreadInfo->bindParams = malloc(sizeof(TAOS_MULTI_BIND) * (columnCount + 1)); + assert(pThreadInfo->bindParams); + + pThreadInfo->is_null = malloc(batch); + assert(pThreadInfo->is_null); + + return 0; +} + +static int parseStbSampleToStmtBatchForThread( + threadInfo *pThreadInfo, + SSuperTable *stbInfo, + uint32_t timePrec, + uint32_t batch) +{ + return parseSampleToStmtBatchForThread( + pThreadInfo, stbInfo, timePrec, batch); +} + +static int parseNtbSampleToStmtBatchForThread( + threadInfo *pThreadInfo, uint32_t timePrec, uint32_t batch) +{ + return parseSampleToStmtBatchForThread( + pThreadInfo, NULL, timePrec, batch); +} + +#else +static int parseSampleToStmt( + threadInfo *pThreadInfo, + SSuperTable *stbInfo, uint32_t timePrec) +{ + pThreadInfo->sampleBindArray = + (char *)calloc(1, sizeof(char *) * MAX_SAMPLES); + if (pThreadInfo->sampleBindArray == NULL) { + errorPrint2("%s() LN%d, Failed to allocate %"PRIu64" bind array buffer\n", + __func__, __LINE__, + (uint64_t)sizeof(char *) * MAX_SAMPLES); + return -1; + } + + int32_t columnCount = (stbInfo)?stbInfo->columnCount:g_args.columnCount; + char *sampleDataBuf = (stbInfo)?stbInfo->sampleDataBuf:g_sampleDataBuf; + int64_t lenOfOneRow = (stbInfo)?stbInfo->lenOfOneRow:g_args.lenOfOneRow; + + for (int i=0; i < MAX_SAMPLES; i++) { + char *bindArray = + calloc(1, sizeof(TAOS_BIND) * (columnCount + 1)); + if (bindArray == NULL) { + errorPrint2("%s() LN%d, Failed to allocate %d bind params\n", + __func__, __LINE__, (columnCount + 1)); + return -1; + } + + TAOS_BIND *bind; + int cursor = 0; + + for (int c = 0; c < columnCount + 1; c++) { + bind = (TAOS_BIND *)((char *)bindArray + (sizeof(TAOS_BIND) * c)); + + if (c == 0) { + bind->buffer_type = TSDB_DATA_TYPE_TIMESTAMP; + bind->buffer_length = sizeof(int64_t); + bind->buffer = NULL; //bind_ts; + bind->length = &bind->buffer_length; + bind->is_null = NULL; + } else { + char data_type = (stbInfo)? + stbInfo->columns[c-1].data_type: + g_args.data_type[c-1]; + int32_t dataLen = (stbInfo)? + stbInfo->columns[c-1].dataLen: + g_args.binwidth; + char *restStr = sampleDataBuf + + lenOfOneRow * i + cursor; + int lengthOfRest = strlen(restStr); + + int index = 0; + for (index = 0; index < lengthOfRest; index ++) { + if (restStr[index] == ',') { + break; + } + } + + char *bindBuffer = calloc(1, index + 1); + if (bindBuffer == NULL) { + errorPrint2("%s() LN%d, Failed to allocate %d bind buffer\n", + __func__, __LINE__, index + 1); + return -1; + } + + strncpy(bindBuffer, restStr, index); + cursor += index + 1; // skip ',' too + + if (-1 == prepareStmtBindArrayByType( + bind, + data_type, + dataLen, + timePrec, + bindBuffer)) { + free(bindBuffer); + free(bindArray); + return -1; + } + free(bindBuffer); + } + } + *((uintptr_t *)(pThreadInfo->sampleBindArray + (sizeof(char *)) * i)) = + (uintptr_t)bindArray; + } + + return 0; +} + +static int parseStbSampleToStmt( + threadInfo *pThreadInfo, + SSuperTable *stbInfo, uint32_t timePrec) +{ + return parseSampleToStmt( + pThreadInfo, + stbInfo, timePrec); +} + +static int parseNtbSampleToStmt( + threadInfo *pThreadInfo, + uint32_t timePrec) +{ + return parseSampleToStmt( + pThreadInfo, + NULL, + timePrec); +} + +static int32_t prepareStbStmtBindStartTime( + char *tableName, + int64_t *ts, + char *bindArray, SSuperTable *stbInfo, + int64_t startTime, int32_t recSeq) +{ + TAOS_BIND *bind; + + bind = (TAOS_BIND *)bindArray; + + int64_t *bind_ts = ts; + + bind->buffer_type = TSDB_DATA_TYPE_TIMESTAMP; + if (stbInfo->disorderRatio) { + *bind_ts = startTime + getTSRandTail( + stbInfo->timeStampStep, recSeq, + stbInfo->disorderRatio, + stbInfo->disorderRange); + } else { + *bind_ts = startTime + stbInfo->timeStampStep * recSeq; + } + + verbosePrint("%s() LN%d, tableName: %s, bind_ts=%"PRId64"\n", + __func__, __LINE__, tableName, *bind_ts); + + bind->buffer_length = sizeof(int64_t); + bind->buffer = bind_ts; + bind->length = &bind->buffer_length; + bind->is_null = NULL; + + return 0; +} + +static uint32_t execBindParam( + threadInfo *pThreadInfo, + char *tableName, + int64_t tableSeq, + uint32_t batch, + uint64_t insertRows, + uint64_t recordFrom, + int64_t startTime, + int64_t *pSamplePos) +{ + int ret; + SSuperTable *stbInfo = pThreadInfo->stbInfo; + TAOS_STMT *stmt = pThreadInfo->stmt; + + uint32_t k; + for (k = 0; k < batch;) { + char *bindArray = (char *)(*((uintptr_t *) + (pThreadInfo->sampleBindArray + (sizeof(char *)) * (*pSamplePos)))); + /* columnCount + 1 (ts) */ + if (-1 == prepareStbStmtBindStartTime( + tableName, + pThreadInfo->bind_ts, + bindArray, stbInfo, + startTime, k + /* is column */)) { + return -1; + } + ret = taos_stmt_bind_param(stmt, (TAOS_BIND *)bindArray); + if (0 != ret) { + errorPrint2("%s() LN%d, stmt_bind_param() failed! reason: %s\n", + __func__, __LINE__, taos_stmt_errstr(stmt)); + return -1; + } + // if msg > 3MB, break + ret = taos_stmt_add_batch(stmt); + if (0 != ret) { + errorPrint2("%s() LN%d, stmt_add_batch() failed! reason: %s\n", + __func__, __LINE__, taos_stmt_errstr(stmt)); + return -1; + } + + k++; + recordFrom ++; + + (*pSamplePos) ++; + if ((*pSamplePos) == MAX_SAMPLES) { + *pSamplePos = 0; + } + + if (recordFrom >= insertRows) { + break; + } + } + + return k; +} +#endif + +static int32_t prepareStbStmt( + threadInfo *pThreadInfo, + char *tableName, + int64_t tableSeq, + uint32_t batch, + uint64_t insertRows, + uint64_t recordFrom, + int64_t startTime, + int64_t *pSamplePos) +{ + int ret; + SSuperTable *stbInfo = pThreadInfo->stbInfo; + TAOS_STMT *stmt = pThreadInfo->stmt; + + if (AUTO_CREATE_SUBTBL == stbInfo->autoCreateTable) { + char* tagsValBuf = NULL; + + if (0 == stbInfo->tagSource) { + tagsValBuf = generateTagValuesForStb(stbInfo, tableSeq); + } else { + tagsValBuf = getTagValueFromTagSample( + stbInfo, + tableSeq % stbInfo->tagSampleCount); + } + + if (NULL == tagsValBuf) { + errorPrint2("%s() LN%d, tag buf failed to allocate memory\n", + __func__, __LINE__); + return -1; + } + + char *tagsArray = calloc(1, sizeof(TAOS_BIND) * stbInfo->tagCount); + if (NULL == tagsArray) { + tmfree(tagsValBuf); + errorPrint2("%s() LN%d, tag buf failed to allocate memory\n", + __func__, __LINE__); + return -1; + } + + if (-1 == prepareStbStmtBindTag( + tagsArray, stbInfo, tagsValBuf, pThreadInfo->time_precision + /* is tag */)) { + tmfree(tagsValBuf); + tmfree(tagsArray); + return -1; + } + + ret = taos_stmt_set_tbname_tags(stmt, tableName, (TAOS_BIND *)tagsArray); + + tmfree(tagsValBuf); + tmfree(tagsArray); + + if (0 != ret) { + errorPrint2("%s() LN%d, stmt_set_tbname_tags() failed! reason: %s\n", + __func__, __LINE__, taos_stmt_errstr(stmt)); + return -1; + } + } else { + ret = taos_stmt_set_tbname(stmt, tableName); + if (0 != ret) { + errorPrint2("%s() LN%d, stmt_set_tbname() failed! reason: %s\n", + __func__, __LINE__, taos_stmt_errstr(stmt)); + return -1; + } + } + +#if STMT_BIND_PARAM_BATCH == 1 + return execStbBindParamBatch( + pThreadInfo, + tableName, + tableSeq, + batch, + insertRows, + recordFrom, + startTime, + pSamplePos); +#else + return execBindParam( + pThreadInfo, + tableName, + tableSeq, + batch, + insertRows, + recordFrom, + startTime, + pSamplePos); +#endif +} + +static int32_t generateStbProgressiveData( + SSuperTable *stbInfo, + char *tableName, + int64_t tableSeq, + char *dbName, char *buffer, + int64_t insertRows, + uint64_t recordFrom, int64_t startTime, int64_t *pSamplePos, + int64_t *pRemainderBufLen) +{ + assert(buffer != NULL); + char *pstr = buffer; + + memset(pstr, 0, *pRemainderBufLen); + + int64_t headLen = generateStbSQLHead( + stbInfo, + tableName, tableSeq, dbName, + buffer, *pRemainderBufLen); + + if (headLen <= 0) { + return 0; + } + pstr += headLen; + *pRemainderBufLen -= headLen; + + int64_t dataLen; + + return generateStbDataTail(stbInfo, + g_args.reqPerReq, pstr, *pRemainderBufLen, + insertRows, recordFrom, + startTime, + pSamplePos, &dataLen); +} + +static int32_t generateProgressiveDataWithoutStb( + char *tableName, + /* int64_t tableSeq, */ + threadInfo *pThreadInfo, char *buffer, + int64_t insertRows, + uint64_t recordFrom, int64_t startTime, /*int64_t *pSamplePos, */ + int64_t *pRemainderBufLen) +{ + assert(buffer != NULL); + char *pstr = buffer; + + memset(buffer, 0, *pRemainderBufLen); + + int64_t headLen = generateSQLHeadWithoutStb( + tableName, pThreadInfo->db_name, + buffer, *pRemainderBufLen); + + if (headLen <= 0) { + return 0; + } + pstr += headLen; + *pRemainderBufLen -= headLen; + + int64_t dataLen; + + return generateDataTailWithoutStb( + g_args.reqPerReq, pstr, *pRemainderBufLen, insertRows, recordFrom, + startTime, + /*pSamplePos, */&dataLen); +} + +static void printStatPerThread(threadInfo *pThreadInfo) +{ + if (0 == pThreadInfo->totalDelay) + pThreadInfo->totalDelay = 1; + + fprintf(stderr, "====thread[%d] completed total inserted rows: %"PRIu64 ", total affected rows: %"PRIu64". %.2f records/second====\n", + pThreadInfo->threadID, + pThreadInfo->totalInsertRows, + pThreadInfo->totalAffectedRows, + (double)(pThreadInfo->totalAffectedRows/((double)pThreadInfo->totalDelay/1000000.0)) + ); +} + +#if STMT_BIND_PARAM_BATCH == 1 +// stmt sync write interlace data +static void* syncWriteInterlaceStmtBatch(threadInfo *pThreadInfo, uint32_t interlaceRows) { + debugPrint("[%d] %s() LN%d: ### stmt interlace write\n", + pThreadInfo->threadID, __func__, __LINE__); + + int64_t insertRows; + int64_t timeStampStep; + uint64_t insert_interval; + + SSuperTable* stbInfo = pThreadInfo->stbInfo; + + if (stbInfo) { + insertRows = stbInfo->insertRows; + timeStampStep = stbInfo->timeStampStep; + insert_interval = stbInfo->insertInterval; + } else { + insertRows = g_args.insertRows; + timeStampStep = g_args.timestamp_step; + insert_interval = g_args.insert_interval; + } + + debugPrint("[%d] %s() LN%d: start_table_from=%"PRIu64" ntables=%"PRId64" insertRows=%"PRIu64"\n", + pThreadInfo->threadID, __func__, __LINE__, + pThreadInfo->start_table_from, + pThreadInfo->ntables, insertRows); + + uint64_t timesInterlace = (insertRows / interlaceRows) + 1; + uint32_t precalcBatch = interlaceRows; + + if (precalcBatch > g_args.reqPerReq) + precalcBatch = g_args.reqPerReq; + + if (precalcBatch > MAX_SAMPLES) + precalcBatch = MAX_SAMPLES; + + pThreadInfo->totalInsertRows = 0; + pThreadInfo->totalAffectedRows = 0; + + uint64_t st = 0; + uint64_t et = UINT64_MAX; + + uint64_t lastPrintTime = taosGetTimestampMs(); + uint64_t startTs = taosGetTimestampMs(); + uint64_t endTs; + + uint64_t tableSeq = pThreadInfo->start_table_from; + int64_t startTime; + + bool flagSleep = true; + uint64_t sleepTimeTotal = 0; + + int percentComplete = 0; + int64_t totalRows = insertRows * pThreadInfo->ntables; + pThreadInfo->samplePos = 0; + + for (int64_t interlace = 0; + interlace < timesInterlace; interlace ++) { + if ((flagSleep) && (insert_interval)) { + st = taosGetTimestampMs(); + flagSleep = false; + } + + int64_t generated = 0; + int64_t samplePos; + + for (; tableSeq < pThreadInfo->start_table_from + pThreadInfo->ntables; tableSeq ++) { + char tableName[TSDB_TABLE_NAME_LEN]; + getTableName(tableName, pThreadInfo, tableSeq); + if (0 == strlen(tableName)) { + errorPrint2("[%d] %s() LN%d, getTableName return null\n", + pThreadInfo->threadID, __func__, __LINE__); + return NULL; + } + + samplePos = pThreadInfo->samplePos; + startTime = pThreadInfo->start_time + + interlace * interlaceRows * timeStampStep; + uint64_t remainRecPerTbl = + insertRows - interlaceRows * interlace; + uint64_t recPerTbl = 0; + + uint64_t remainPerInterlace; + if (remainRecPerTbl > interlaceRows) { + remainPerInterlace = interlaceRows; + } else { + remainPerInterlace = remainRecPerTbl; + } + + while(remainPerInterlace > 0) { + + uint32_t batch; + if (remainPerInterlace > precalcBatch) { + batch = precalcBatch; + } else { + batch = remainPerInterlace; + } + debugPrint("[%d] %s() LN%d, tableName:%s, batch:%d startTime:%"PRId64"\n", + pThreadInfo->threadID, + __func__, __LINE__, + tableName, batch, startTime); + + if (stbInfo) { + generated = prepareStbStmt( + pThreadInfo, + tableName, + tableSeq, + batch, + insertRows, 0, + startTime, + &samplePos); + } else { + generated = prepareStmtWithoutStb( + pThreadInfo, + tableName, + batch, + insertRows, + interlaceRows * interlace + recPerTbl, + startTime); + } + + debugPrint("[%d] %s() LN%d, generated records is %"PRId64"\n", + pThreadInfo->threadID, __func__, __LINE__, generated); + if (generated < 0) { + errorPrint2("[%d] %s() LN%d, generated records is %"PRId64"\n", + pThreadInfo->threadID, __func__, __LINE__, generated); + goto free_of_interlace_stmt; + } else if (generated == 0) { + break; + } + + recPerTbl += generated; + remainPerInterlace -= generated; + pThreadInfo->totalInsertRows += generated; + + verbosePrint("[%d] %s() LN%d totalInsertRows=%"PRIu64"\n", + pThreadInfo->threadID, __func__, __LINE__, + pThreadInfo->totalInsertRows); + + startTs = taosGetTimestampUs(); + + int64_t affectedRows = execInsert(pThreadInfo, generated); + + endTs = taosGetTimestampUs(); + uint64_t delay = endTs - startTs; + performancePrint("%s() LN%d, insert execution time is %10.2f ms\n", + __func__, __LINE__, delay / 1000.0); + verbosePrint("[%d] %s() LN%d affectedRows=%"PRId64"\n", + pThreadInfo->threadID, + __func__, __LINE__, affectedRows); + + if (delay > pThreadInfo->maxDelay) pThreadInfo->maxDelay = delay; + if (delay < pThreadInfo->minDelay) pThreadInfo->minDelay = delay; + pThreadInfo->cntDelay++; + pThreadInfo->totalDelay += delay; + + if (generated != affectedRows) { + errorPrint2("[%d] %s() LN%d execInsert() insert %"PRId64", affected rows: %"PRId64"\n\n", + pThreadInfo->threadID, __func__, __LINE__, + generated, affectedRows); + goto free_of_interlace_stmt; + } + + pThreadInfo->totalAffectedRows += affectedRows; + + int currentPercent = pThreadInfo->totalAffectedRows * 100 / totalRows; + if (currentPercent > percentComplete ) { + printf("[%d]:%d%%\n", pThreadInfo->threadID, currentPercent); + percentComplete = currentPercent; + } + int64_t currentPrintTime = taosGetTimestampMs(); + if (currentPrintTime - lastPrintTime > 30*1000) { + printf("thread[%d] has currently inserted rows: %"PRIu64 ", affected rows: %"PRIu64 "\n", + pThreadInfo->threadID, + pThreadInfo->totalInsertRows, + pThreadInfo->totalAffectedRows); + lastPrintTime = currentPrintTime; + } + + startTime += (generated * timeStampStep); + } + } + pThreadInfo->samplePos = samplePos; + + if (tableSeq == pThreadInfo->start_table_from + + pThreadInfo->ntables) { + // turn to first table + tableSeq = pThreadInfo->start_table_from; + + flagSleep = true; + } + + if ((insert_interval) && flagSleep) { + et = taosGetTimestampMs(); + + if (insert_interval > (et - st) ) { + uint64_t sleepTime = insert_interval - (et -st); + performancePrint("%s() LN%d sleep: %"PRId64" ms for insert interval\n", + __func__, __LINE__, sleepTime); + taosMsleep(sleepTime); // ms + sleepTimeTotal += insert_interval; + } + } + } + if (percentComplete < 100) + printf("[%d]:%d%%\n", pThreadInfo->threadID, percentComplete); + +free_of_interlace_stmt: + printStatPerThread(pThreadInfo); + return NULL; +} +#else +// stmt sync write interlace data +static void* syncWriteInterlaceStmt(threadInfo *pThreadInfo, uint32_t interlaceRows) { + debugPrint("[%d] %s() LN%d: ### stmt interlace write\n", + pThreadInfo->threadID, __func__, __LINE__); + + int64_t insertRows; + uint64_t maxSqlLen; + int64_t timeStampStep; + uint64_t insert_interval; + + SSuperTable* stbInfo = pThreadInfo->stbInfo; + + if (stbInfo) { + insertRows = stbInfo->insertRows; + maxSqlLen = stbInfo->maxSqlLen; + timeStampStep = stbInfo->timeStampStep; + insert_interval = stbInfo->insertInterval; + } else { + insertRows = g_args.insertRows; + maxSqlLen = g_args.max_sql_len; + timeStampStep = g_args.timestamp_step; + insert_interval = g_args.insert_interval; + } + + debugPrint("[%d] %s() LN%d: start_table_from=%"PRIu64" ntables=%"PRId64" insertRows=%"PRIu64"\n", + pThreadInfo->threadID, __func__, __LINE__, + pThreadInfo->start_table_from, + pThreadInfo->ntables, insertRows); + + uint32_t batchPerTbl = interlaceRows; + uint32_t batchPerTblTimes; + + if (interlaceRows > g_args.reqPerReq) + interlaceRows = g_args.reqPerReq; + + if ((interlaceRows > 0) && (pThreadInfo->ntables > 1)) { + batchPerTblTimes = + g_args.reqPerReq / interlaceRows; + } else { + batchPerTblTimes = 1; + } + + pThreadInfo->totalInsertRows = 0; + pThreadInfo->totalAffectedRows = 0; + + uint64_t st = 0; + uint64_t et = UINT64_MAX; + + uint64_t lastPrintTime = taosGetTimestampMs(); + uint64_t startTs = taosGetTimestampMs(); + uint64_t endTs; + + uint64_t tableSeq = pThreadInfo->start_table_from; + int64_t startTime = pThreadInfo->start_time; + + uint64_t generatedRecPerTbl = 0; + bool flagSleep = true; + uint64_t sleepTimeTotal = 0; + + int percentComplete = 0; + int64_t totalRows = insertRows * pThreadInfo->ntables; + + while(pThreadInfo->totalInsertRows < pThreadInfo->ntables * insertRows) { + if ((flagSleep) && (insert_interval)) { + st = taosGetTimestampMs(); + flagSleep = false; + } + + uint32_t recOfBatch = 0; + + int32_t generated; + for (uint64_t i = 0; i < batchPerTblTimes; i ++) { + char tableName[TSDB_TABLE_NAME_LEN]; + + getTableName(tableName, pThreadInfo, tableSeq); + if (0 == strlen(tableName)) { + errorPrint2("[%d] %s() LN%d, getTableName return null\n", + pThreadInfo->threadID, __func__, __LINE__); + return NULL; + } + + debugPrint("[%d] %s() LN%d, tableName:%s, batch:%d startTime:%"PRId64"\n", + pThreadInfo->threadID, + __func__, __LINE__, + tableName, batchPerTbl, startTime); + if (stbInfo) { + generated = prepareStbStmt( + pThreadInfo, + tableName, + tableSeq, + batchPerTbl, + insertRows, 0, + startTime, + &(pThreadInfo->samplePos)); + } else { + generated = prepareStmtWithoutStb( + pThreadInfo, + tableName, + batchPerTbl, + insertRows, i, + startTime); + } + + debugPrint("[%d] %s() LN%d, generated records is %d\n", + pThreadInfo->threadID, __func__, __LINE__, generated); + if (generated < 0) { + errorPrint2("[%d] %s() LN%d, generated records is %d\n", + pThreadInfo->threadID, __func__, __LINE__, generated); + goto free_of_interlace_stmt; + } else if (generated == 0) { + break; + } + + tableSeq ++; + recOfBatch += batchPerTbl; + + pThreadInfo->totalInsertRows += batchPerTbl; + + verbosePrint("[%d] %s() LN%d batchPerTbl=%d recOfBatch=%d\n", + pThreadInfo->threadID, __func__, __LINE__, + batchPerTbl, recOfBatch); + + if (tableSeq == pThreadInfo->start_table_from + pThreadInfo->ntables) { + // turn to first table + tableSeq = pThreadInfo->start_table_from; + generatedRecPerTbl += batchPerTbl; + + startTime = pThreadInfo->start_time + + generatedRecPerTbl * timeStampStep; + + flagSleep = true; + if (generatedRecPerTbl >= insertRows) + break; + + int64_t remainRows = insertRows - generatedRecPerTbl; + if ((remainRows > 0) && (batchPerTbl > remainRows)) + batchPerTbl = remainRows; + + if (pThreadInfo->ntables * batchPerTbl < g_args.reqPerReq) + break; + } + + verbosePrint("[%d] %s() LN%d generatedRecPerTbl=%"PRId64" insertRows=%"PRId64"\n", + pThreadInfo->threadID, __func__, __LINE__, + generatedRecPerTbl, insertRows); + + if ((g_args.reqPerReq - recOfBatch) < batchPerTbl) + break; + } + + verbosePrint("[%d] %s() LN%d recOfBatch=%d totalInsertRows=%"PRIu64"\n", + pThreadInfo->threadID, __func__, __LINE__, recOfBatch, + pThreadInfo->totalInsertRows); + + startTs = taosGetTimestampUs(); + + if (recOfBatch == 0) { + errorPrint2("[%d] %s() LN%d Failed to insert records of batch %d\n", + pThreadInfo->threadID, __func__, __LINE__, + batchPerTbl); + if (batchPerTbl > 0) { + errorPrint("\tIf the batch is %d, the length of the SQL to insert a row must be less then %"PRId64"\n", + batchPerTbl, maxSqlLen / batchPerTbl); + } + goto free_of_interlace_stmt; + } + int64_t affectedRows = execInsert(pThreadInfo, recOfBatch); + + endTs = taosGetTimestampUs(); + uint64_t delay = endTs - startTs; + performancePrint("%s() LN%d, insert execution time is %10.2f ms\n", + __func__, __LINE__, delay / 1000.0); + verbosePrint("[%d] %s() LN%d affectedRows=%"PRId64"\n", + pThreadInfo->threadID, + __func__, __LINE__, affectedRows); + + if (delay > pThreadInfo->maxDelay) pThreadInfo->maxDelay = delay; + if (delay < pThreadInfo->minDelay) pThreadInfo->minDelay = delay; + pThreadInfo->cntDelay++; + pThreadInfo->totalDelay += delay; + + if (recOfBatch != affectedRows) { + errorPrint2("[%d] %s() LN%d execInsert insert %d, affected rows: %"PRId64"\n\n", + pThreadInfo->threadID, __func__, __LINE__, + recOfBatch, affectedRows); + goto free_of_interlace_stmt; + } + + pThreadInfo->totalAffectedRows += affectedRows; + + int currentPercent = pThreadInfo->totalAffectedRows * 100 / totalRows; + if (currentPercent > percentComplete ) { + printf("[%d]:%d%%\n", pThreadInfo->threadID, currentPercent); + percentComplete = currentPercent; + } + int64_t currentPrintTime = taosGetTimestampMs(); + if (currentPrintTime - lastPrintTime > 30*1000) { + printf("thread[%d] has currently inserted rows: %"PRIu64 ", affected rows: %"PRIu64 "\n", + pThreadInfo->threadID, + pThreadInfo->totalInsertRows, + pThreadInfo->totalAffectedRows); + lastPrintTime = currentPrintTime; + } + + if ((insert_interval) && flagSleep) { + et = taosGetTimestampMs(); + + if (insert_interval > (et - st) ) { + uint64_t sleepTime = insert_interval - (et -st); + performancePrint("%s() LN%d sleep: %"PRId64" ms for insert interval\n", + __func__, __LINE__, sleepTime); + taosMsleep(sleepTime); // ms + sleepTimeTotal += insert_interval; + } + } + } + if (percentComplete < 100) + printf("[%d]:%d%%\n", pThreadInfo->threadID, percentComplete); + +free_of_interlace_stmt: + printStatPerThread(pThreadInfo); + return NULL; +} + +#endif + +// sync write interlace data +static void* syncWriteInterlace(threadInfo *pThreadInfo, uint32_t interlaceRows) { + debugPrint("[%d] %s() LN%d: ### interlace write\n", + pThreadInfo->threadID, __func__, __LINE__); + + int64_t insertRows; + uint64_t maxSqlLen; + int64_t timeStampStep; + uint64_t insert_interval; + + SSuperTable* stbInfo = pThreadInfo->stbInfo; + + if (stbInfo) { + insertRows = stbInfo->insertRows; + maxSqlLen = stbInfo->maxSqlLen; + timeStampStep = stbInfo->timeStampStep; + insert_interval = stbInfo->insertInterval; + } else { + insertRows = g_args.insertRows; + maxSqlLen = g_args.max_sql_len; + timeStampStep = g_args.timestamp_step; + insert_interval = g_args.insert_interval; + } + + debugPrint("[%d] %s() LN%d: start_table_from=%"PRIu64" ntables=%"PRId64" insertRows=%"PRIu64"\n", + pThreadInfo->threadID, __func__, __LINE__, + pThreadInfo->start_table_from, + pThreadInfo->ntables, insertRows); +#if 1 + if (interlaceRows > g_args.reqPerReq) + interlaceRows = g_args.reqPerReq; + + uint32_t batchPerTbl = interlaceRows; + uint32_t batchPerTblTimes; + + if ((interlaceRows > 0) && (pThreadInfo->ntables > 1)) { + batchPerTblTimes = + g_args.reqPerReq / interlaceRows; + } else { + batchPerTblTimes = 1; + } +#else + uint32_t batchPerTbl; + if (interlaceRows > g_args.reqPerReq) + batchPerTbl = g_args.reqPerReq; + else + batchPerTbl = interlaceRows; + + uint32_t batchPerTblTimes; + + if ((interlaceRows > 0) && (pThreadInfo->ntables > 1)) { + batchPerTblTimes = + interlaceRows / batchPerTbl; + } else { + batchPerTblTimes = 1; + } +#endif + pThreadInfo->buffer = calloc(maxSqlLen, 1); + if (NULL == pThreadInfo->buffer) { + errorPrint2( "%s() LN%d, Failed to alloc %"PRIu64" Bytes, reason:%s\n", + __func__, __LINE__, maxSqlLen, strerror(errno)); + return NULL; + } + + pThreadInfo->totalInsertRows = 0; + pThreadInfo->totalAffectedRows = 0; + + uint64_t st = 0; + uint64_t et = UINT64_MAX; + + uint64_t lastPrintTime = taosGetTimestampMs(); + uint64_t startTs = taosGetTimestampMs(); + uint64_t endTs; + + uint64_t tableSeq = pThreadInfo->start_table_from; + int64_t startTime = pThreadInfo->start_time; + + uint64_t generatedRecPerTbl = 0; + bool flagSleep = true; + uint64_t sleepTimeTotal = 0; + + int percentComplete = 0; + int64_t totalRows = insertRows * pThreadInfo->ntables; + + while(pThreadInfo->totalInsertRows < pThreadInfo->ntables * insertRows) { + if ((flagSleep) && (insert_interval)) { + st = taosGetTimestampMs(); + flagSleep = false; + } + + // generate data + memset(pThreadInfo->buffer, 0, maxSqlLen); + uint64_t remainderBufLen = maxSqlLen; + + char *pstr = pThreadInfo->buffer; + + int len = snprintf(pstr, + strlen(STR_INSERT_INTO) + 1, "%s", STR_INSERT_INTO); + pstr += len; + remainderBufLen -= len; + + uint32_t recOfBatch = 0; + + int32_t generated; + for (uint64_t i = 0; i < batchPerTblTimes; i ++) { + char tableName[TSDB_TABLE_NAME_LEN]; + + getTableName(tableName, pThreadInfo, tableSeq); + if (0 == strlen(tableName)) { + errorPrint2("[%d] %s() LN%d, getTableName return null\n", + pThreadInfo->threadID, __func__, __LINE__); + free(pThreadInfo->buffer); + return NULL; + } + + uint64_t oldRemainderLen = remainderBufLen; + + if (stbInfo) { + generated = generateStbInterlaceData( + pThreadInfo, + tableName, batchPerTbl, i, + batchPerTblTimes, + tableSeq, + pstr, + insertRows, + startTime, + &remainderBufLen); + } else { + generated = generateInterlaceDataWithoutStb( + tableName, batchPerTbl, + tableSeq, + pThreadInfo->db_name, pstr, + insertRows, + startTime, + &remainderBufLen); + } + + debugPrint("[%d] %s() LN%d, generated records is %d\n", + pThreadInfo->threadID, __func__, __LINE__, generated); + if (generated < 0) { + errorPrint2("[%d] %s() LN%d, generated records is %d\n", + pThreadInfo->threadID, __func__, __LINE__, generated); + goto free_of_interlace; + } else if (generated == 0) { + break; + } + + tableSeq ++; + recOfBatch += batchPerTbl; + + pstr += (oldRemainderLen - remainderBufLen); + pThreadInfo->totalInsertRows += batchPerTbl; + + verbosePrint("[%d] %s() LN%d batchPerTbl=%d recOfBatch=%d\n", + pThreadInfo->threadID, __func__, __LINE__, + batchPerTbl, recOfBatch); + + if (tableSeq == pThreadInfo->start_table_from + pThreadInfo->ntables) { + // turn to first table + tableSeq = pThreadInfo->start_table_from; + generatedRecPerTbl += batchPerTbl; + + startTime = pThreadInfo->start_time + + generatedRecPerTbl * timeStampStep; + + flagSleep = true; + if (generatedRecPerTbl >= insertRows) + break; + + int64_t remainRows = insertRows - generatedRecPerTbl; + if ((remainRows > 0) && (batchPerTbl > remainRows)) + batchPerTbl = remainRows; + + if (pThreadInfo->ntables * batchPerTbl < g_args.reqPerReq) + break; + } + + verbosePrint("[%d] %s() LN%d generatedRecPerTbl=%"PRId64" insertRows=%"PRId64"\n", + pThreadInfo->threadID, __func__, __LINE__, + generatedRecPerTbl, insertRows); + + if ((g_args.reqPerReq - recOfBatch) < batchPerTbl) + break; + } + + verbosePrint("[%d] %s() LN%d recOfBatch=%d totalInsertRows=%"PRIu64"\n", + pThreadInfo->threadID, __func__, __LINE__, recOfBatch, + pThreadInfo->totalInsertRows); + verbosePrint("[%d] %s() LN%d, buffer=%s\n", + pThreadInfo->threadID, __func__, __LINE__, pThreadInfo->buffer); + + startTs = taosGetTimestampUs(); + + if (recOfBatch == 0) { + errorPrint2("[%d] %s() LN%d Failed to insert records of batch %d\n", + pThreadInfo->threadID, __func__, __LINE__, + batchPerTbl); + if (batchPerTbl > 0) { + errorPrint("\tIf the batch is %d, the length of the SQL to insert a row must be less then %"PRId64"\n", + batchPerTbl, maxSqlLen / batchPerTbl); + } + errorPrint("\tPlease check if the buffer length(%"PRId64") or batch(%d) is set with proper value!\n", + maxSqlLen, batchPerTbl); + goto free_of_interlace; + } + int64_t affectedRows = execInsert(pThreadInfo, recOfBatch); + + endTs = taosGetTimestampUs(); + uint64_t delay = endTs - startTs; + performancePrint("%s() LN%d, insert execution time is %10.2f ms\n", + __func__, __LINE__, delay / 1000.0); + verbosePrint("[%d] %s() LN%d affectedRows=%"PRId64"\n", + pThreadInfo->threadID, + __func__, __LINE__, affectedRows); + + if (delay > pThreadInfo->maxDelay) pThreadInfo->maxDelay = delay; + if (delay < pThreadInfo->minDelay) pThreadInfo->minDelay = delay; + pThreadInfo->cntDelay++; + pThreadInfo->totalDelay += delay; + + if (recOfBatch != affectedRows) { + errorPrint2("[%d] %s() LN%d execInsert insert %d, affected rows: %"PRId64"\n%s\n", + pThreadInfo->threadID, __func__, __LINE__, + recOfBatch, affectedRows, pThreadInfo->buffer); + goto free_of_interlace; + } + + pThreadInfo->totalAffectedRows += affectedRows; + + int currentPercent = pThreadInfo->totalAffectedRows * 100 / totalRows; + if (currentPercent > percentComplete ) { + printf("[%d]:%d%%\n", pThreadInfo->threadID, currentPercent); + percentComplete = currentPercent; + } + int64_t currentPrintTime = taosGetTimestampMs(); + if (currentPrintTime - lastPrintTime > 30*1000) { + printf("thread[%d] has currently inserted rows: %"PRIu64 ", affected rows: %"PRIu64 "\n", + pThreadInfo->threadID, + pThreadInfo->totalInsertRows, + pThreadInfo->totalAffectedRows); + lastPrintTime = currentPrintTime; + } + + if ((insert_interval) && flagSleep) { + et = taosGetTimestampMs(); + + if (insert_interval > (et - st) ) { + uint64_t sleepTime = insert_interval - (et -st); + performancePrint("%s() LN%d sleep: %"PRId64" ms for insert interval\n", + __func__, __LINE__, sleepTime); + taosMsleep(sleepTime); // ms + sleepTimeTotal += insert_interval; + } + } + } + if (percentComplete < 100) + printf("[%d]:%d%%\n", pThreadInfo->threadID, percentComplete); + +free_of_interlace: + tmfree(pThreadInfo->buffer); + printStatPerThread(pThreadInfo); + return NULL; +} + +static void* syncWriteProgressiveStmt(threadInfo *pThreadInfo) { + debugPrint("%s() LN%d: ### stmt progressive write\n", __func__, __LINE__); + + SSuperTable* stbInfo = pThreadInfo->stbInfo; + int64_t timeStampStep = + stbInfo?stbInfo->timeStampStep:g_args.timestamp_step; + int64_t insertRows = + (stbInfo)?stbInfo->insertRows:g_args.insertRows; + verbosePrint("%s() LN%d insertRows=%"PRId64"\n", + __func__, __LINE__, insertRows); + + uint64_t lastPrintTime = taosGetTimestampMs(); + uint64_t startTs = taosGetTimestampMs(); + uint64_t endTs; + + pThreadInfo->totalInsertRows = 0; + pThreadInfo->totalAffectedRows = 0; + + pThreadInfo->samplePos = 0; + + int percentComplete = 0; + int64_t totalRows = insertRows * pThreadInfo->ntables; + + for (uint64_t tableSeq = pThreadInfo->start_table_from; + tableSeq <= pThreadInfo->end_table_to; + tableSeq ++) { + int64_t start_time = pThreadInfo->start_time; + + for (uint64_t i = 0; i < insertRows;) { + char tableName[TSDB_TABLE_NAME_LEN]; + getTableName(tableName, pThreadInfo, tableSeq); + verbosePrint("%s() LN%d: tid=%d seq=%"PRId64" tableName=%s\n", + __func__, __LINE__, + pThreadInfo->threadID, tableSeq, tableName); + if (0 == strlen(tableName)) { + errorPrint2("[%d] %s() LN%d, getTableName return null\n", + pThreadInfo->threadID, __func__, __LINE__); + return NULL; + } + + // measure prepare + insert + startTs = taosGetTimestampUs(); + + int32_t generated; + if (stbInfo) { + generated = prepareStbStmt( + pThreadInfo, + tableName, + tableSeq, + (g_args.reqPerReq>stbInfo->insertRows)? + stbInfo->insertRows: + g_args.reqPerReq, + insertRows, i, start_time, + &(pThreadInfo->samplePos)); + } else { + generated = prepareStmtWithoutStb( + pThreadInfo, + tableName, + g_args.reqPerReq, + insertRows, i, + start_time); + } + + verbosePrint("[%d] %s() LN%d generated=%d\n", + pThreadInfo->threadID, + __func__, __LINE__, generated); + + if (generated > 0) + i += generated; + else + goto free_of_stmt_progressive; + + start_time += generated * timeStampStep; + pThreadInfo->totalInsertRows += generated; + + // only measure insert + // startTs = taosGetTimestampUs(); + + int32_t affectedRows = execInsert(pThreadInfo, generated); + + endTs = taosGetTimestampUs(); + uint64_t delay = endTs - startTs; + performancePrint("%s() LN%d, insert execution time is %10.f ms\n", + __func__, __LINE__, delay/1000.0); + verbosePrint("[%d] %s() LN%d affectedRows=%d\n", + pThreadInfo->threadID, + __func__, __LINE__, affectedRows); + + if (delay > pThreadInfo->maxDelay) pThreadInfo->maxDelay = delay; + if (delay < pThreadInfo->minDelay) pThreadInfo->minDelay = delay; + pThreadInfo->cntDelay++; + pThreadInfo->totalDelay += delay; + + if (affectedRows < 0) { + errorPrint2("%s() LN%d, affected rows: %d\n", + __func__, __LINE__, affectedRows); + goto free_of_stmt_progressive; + } + + pThreadInfo->totalAffectedRows += affectedRows; + + int currentPercent = pThreadInfo->totalAffectedRows * 100 / totalRows; + if (currentPercent > percentComplete ) { + printf("[%d]:%d%%\n", pThreadInfo->threadID, currentPercent); + percentComplete = currentPercent; + } + int64_t currentPrintTime = taosGetTimestampMs(); + if (currentPrintTime - lastPrintTime > 30*1000) { + printf("thread[%d] has currently inserted rows: %"PRId64 ", affected rows: %"PRId64 "\n", + pThreadInfo->threadID, + pThreadInfo->totalInsertRows, + pThreadInfo->totalAffectedRows); + lastPrintTime = currentPrintTime; + } + + if (i >= insertRows) + break; + } // insertRows + + if ((g_args.verbose_print) && + (tableSeq == pThreadInfo->ntables - 1) && (stbInfo) + && (0 == strncasecmp( + stbInfo->dataSource, + "sample", strlen("sample")))) { + verbosePrint("%s() LN%d samplePos=%"PRId64"\n", + __func__, __LINE__, pThreadInfo->samplePos); + } + } // tableSeq + + if (percentComplete < 100) { + printf("[%d]:%d%%\n", pThreadInfo->threadID, percentComplete); + } + +free_of_stmt_progressive: + tmfree(pThreadInfo->buffer); + printStatPerThread(pThreadInfo); + return NULL; +} +// sync insertion progressive data +static void* syncWriteProgressive(threadInfo *pThreadInfo) { + debugPrint("%s() LN%d: ### progressive write\n", __func__, __LINE__); + + SSuperTable* stbInfo = pThreadInfo->stbInfo; + uint64_t maxSqlLen = stbInfo?stbInfo->maxSqlLen:g_args.max_sql_len; + int64_t timeStampStep = + stbInfo?stbInfo->timeStampStep:g_args.timestamp_step; + int64_t insertRows = + (stbInfo)?stbInfo->insertRows:g_args.insertRows; + verbosePrint("%s() LN%d insertRows=%"PRId64"\n", + __func__, __LINE__, insertRows); + + pThreadInfo->buffer = calloc(maxSqlLen, 1); + if (NULL == pThreadInfo->buffer) { + errorPrint2("Failed to alloc %"PRIu64" bytes, reason:%s\n", + maxSqlLen, + strerror(errno)); + return NULL; + } + + uint64_t lastPrintTime = taosGetTimestampMs(); + uint64_t startTs = taosGetTimestampMs(); + uint64_t endTs; + + pThreadInfo->totalInsertRows = 0; + pThreadInfo->totalAffectedRows = 0; + + pThreadInfo->samplePos = 0; + + int percentComplete = 0; + int64_t totalRows = insertRows * pThreadInfo->ntables; + + for (uint64_t tableSeq = pThreadInfo->start_table_from; + tableSeq <= pThreadInfo->end_table_to; + tableSeq ++) { + int64_t start_time = pThreadInfo->start_time; + + for (uint64_t i = 0; i < insertRows;) { + char tableName[TSDB_TABLE_NAME_LEN]; + getTableName(tableName, pThreadInfo, tableSeq); + verbosePrint("%s() LN%d: tid=%d seq=%"PRId64" tableName=%s\n", + __func__, __LINE__, + pThreadInfo->threadID, tableSeq, tableName); + if (0 == strlen(tableName)) { + errorPrint2("[%d] %s() LN%d, getTableName return null\n", + pThreadInfo->threadID, __func__, __LINE__); + free(pThreadInfo->buffer); + return NULL; + } + + int64_t remainderBufLen = maxSqlLen - 2000; + char *pstr = pThreadInfo->buffer; + + int len = snprintf(pstr, + strlen(STR_INSERT_INTO) + 1, "%s", STR_INSERT_INTO); + + pstr += len; + remainderBufLen -= len; + + // measure prepare + insert + startTs = taosGetTimestampUs(); + + int32_t generated; + if (stbInfo) { + if (stbInfo->iface == STMT_IFACE) { + generated = prepareStbStmt( + pThreadInfo, + tableName, + tableSeq, + (g_args.reqPerReq>stbInfo->insertRows)? + stbInfo->insertRows: + g_args.reqPerReq, + insertRows, i, start_time, + &(pThreadInfo->samplePos)); + } else { + generated = generateStbProgressiveData( + stbInfo, + tableName, tableSeq, + pThreadInfo->db_name, pstr, + insertRows, i, start_time, + &(pThreadInfo->samplePos), + &remainderBufLen); + } + } else { + if (g_args.iface == STMT_IFACE) { + generated = prepareStmtWithoutStb( + pThreadInfo, + tableName, + g_args.reqPerReq, + insertRows, i, + start_time); + } else { + generated = generateProgressiveDataWithoutStb( + tableName, + /* tableSeq, */ + pThreadInfo, pstr, insertRows, + i, start_time, + /* &(pThreadInfo->samplePos), */ + &remainderBufLen); + } + } + + verbosePrint("[%d] %s() LN%d generated=%d\n", + pThreadInfo->threadID, + __func__, __LINE__, generated); + + if (generated > 0) + i += generated; + else + goto free_of_progressive; + + start_time += generated * timeStampStep; + pThreadInfo->totalInsertRows += generated; + + // only measure insert + // startTs = taosGetTimestampUs(); + + int32_t affectedRows = execInsert(pThreadInfo, generated); + + endTs = taosGetTimestampUs(); + uint64_t delay = endTs - startTs; + performancePrint("%s() LN%d, insert execution time is %10.f ms\n", + __func__, __LINE__, delay/1000.0); + verbosePrint("[%d] %s() LN%d affectedRows=%d\n", + pThreadInfo->threadID, + __func__, __LINE__, affectedRows); + + if (delay > pThreadInfo->maxDelay) pThreadInfo->maxDelay = delay; + if (delay < pThreadInfo->minDelay) pThreadInfo->minDelay = delay; + pThreadInfo->cntDelay++; + pThreadInfo->totalDelay += delay; + + if (affectedRows < 0) { + errorPrint2("%s() LN%d, affected rows: %d\n", + __func__, __LINE__, affectedRows); + goto free_of_progressive; + } + + pThreadInfo->totalAffectedRows += affectedRows; + + int currentPercent = pThreadInfo->totalAffectedRows * 100 / totalRows; + if (currentPercent > percentComplete ) { + printf("[%d]:%d%%\n", pThreadInfo->threadID, currentPercent); + percentComplete = currentPercent; + } + int64_t currentPrintTime = taosGetTimestampMs(); + if (currentPrintTime - lastPrintTime > 30*1000) { + printf("thread[%d] has currently inserted rows: %"PRId64 ", affected rows: %"PRId64 "\n", + pThreadInfo->threadID, + pThreadInfo->totalInsertRows, + pThreadInfo->totalAffectedRows); + lastPrintTime = currentPrintTime; + } + + if (i >= insertRows) + break; + } // insertRows + + if ((g_args.verbose_print) && + (tableSeq == pThreadInfo->ntables - 1) && (stbInfo) + && (0 == strncasecmp( + stbInfo->dataSource, + "sample", strlen("sample")))) { + verbosePrint("%s() LN%d samplePos=%"PRId64"\n", + __func__, __LINE__, pThreadInfo->samplePos); + } + } // tableSeq + + if (percentComplete < 100) { + printf("[%d]:%d%%\n", pThreadInfo->threadID, percentComplete); + } + +free_of_progressive: + tmfree(pThreadInfo->buffer); + printStatPerThread(pThreadInfo); + return NULL; +} + +static void* syncWrite(void *sarg) { + + threadInfo *pThreadInfo = (threadInfo *)sarg; + SSuperTable* stbInfo = pThreadInfo->stbInfo; + + setThreadName("syncWrite"); + + uint32_t interlaceRows = 0; + + if (stbInfo) { + if (stbInfo->interlaceRows < stbInfo->insertRows) + interlaceRows = stbInfo->interlaceRows; + } else { + if (g_args.interlaceRows < g_args.insertRows) + interlaceRows = g_args.interlaceRows; + } + + if (interlaceRows > 0) { + // interlace mode + if (stbInfo) { + if (STMT_IFACE == stbInfo->iface) { +#if STMT_BIND_PARAM_BATCH == 1 + return syncWriteInterlaceStmtBatch(pThreadInfo, interlaceRows); +#else + return syncWriteInterlaceStmt(pThreadInfo, interlaceRows); +#endif + } else { + return syncWriteInterlace(pThreadInfo, interlaceRows); + } + } + } else { + // progressive mode + if (((stbInfo) && (STMT_IFACE == stbInfo->iface)) + || (STMT_IFACE == g_args.iface)) { + return syncWriteProgressiveStmt(pThreadInfo); + } else { + return syncWriteProgressive(pThreadInfo); + } + } + + return NULL; +} + +static void callBack(void *param, TAOS_RES *res, int code) { + threadInfo* pThreadInfo = (threadInfo*)param; + SSuperTable* stbInfo = pThreadInfo->stbInfo; + + int insert_interval = + stbInfo?stbInfo->insertInterval:g_args.insert_interval; + if (insert_interval) { + pThreadInfo->et = taosGetTimestampMs(); + if ((pThreadInfo->et - pThreadInfo->st) < insert_interval) { + taosMsleep(insert_interval - (pThreadInfo->et - pThreadInfo->st)); // ms + } + } + + char *buffer = calloc(1, pThreadInfo->stbInfo->maxSqlLen); + char data[MAX_DATA_SIZE]; + char *pstr = buffer; + pstr += sprintf(pstr, "INSERT INTO %s.%s%"PRId64" VALUES", + pThreadInfo->db_name, pThreadInfo->tb_prefix, + pThreadInfo->start_table_from); + // if (pThreadInfo->counter >= pThreadInfo->stbInfo->insertRows) { + if (pThreadInfo->counter >= g_args.reqPerReq) { + pThreadInfo->start_table_from++; + pThreadInfo->counter = 0; + } + if (pThreadInfo->start_table_from > pThreadInfo->end_table_to) { + tsem_post(&pThreadInfo->lock_sem); + free(buffer); + taos_free_result(res); + return; + } + + for (int i = 0; i < g_args.reqPerReq; i++) { + int rand_num = taosRandom() % 100; + if (0 != pThreadInfo->stbInfo->disorderRatio + && rand_num < pThreadInfo->stbInfo->disorderRatio) { + int64_t d = pThreadInfo->lastTs + - (taosRandom() % pThreadInfo->stbInfo->disorderRange + 1); + generateStbRowData(pThreadInfo->stbInfo, data, + MAX_DATA_SIZE, + d); + } else { + generateStbRowData(pThreadInfo->stbInfo, + data, + MAX_DATA_SIZE, + pThreadInfo->lastTs += 1000); + } + pstr += sprintf(pstr, "%s", data); + pThreadInfo->counter++; + + if (pThreadInfo->counter >= pThreadInfo->stbInfo->insertRows) { + break; + } + } + + if (insert_interval) { + pThreadInfo->st = taosGetTimestampMs(); + } + taos_query_a(pThreadInfo->taos, buffer, callBack, pThreadInfo); + free(buffer); + + taos_free_result(res); +} + +static void *asyncWrite(void *sarg) { + threadInfo *pThreadInfo = (threadInfo *)sarg; + SSuperTable* stbInfo = pThreadInfo->stbInfo; + + setThreadName("asyncWrite"); + + pThreadInfo->st = 0; + pThreadInfo->et = 0; + pThreadInfo->lastTs = pThreadInfo->start_time; + + int insert_interval = + stbInfo?stbInfo->insertInterval:g_args.insert_interval; + if (insert_interval) { + pThreadInfo->st = taosGetTimestampMs(); + } + taos_query_a(pThreadInfo->taos, "show databases", callBack, pThreadInfo); + + tsem_wait(&(pThreadInfo->lock_sem)); + + return NULL; +} + +static int convertHostToServAddr(char *host, uint16_t port, struct sockaddr_in *serv_addr) +{ + uint16_t rest_port = port + TSDB_PORT_HTTP; + struct hostent *server = gethostbyname(host); + if ((server == NULL) || (server->h_addr == NULL)) { + errorPrint2("%s", "no such host"); + return -1; + } + + debugPrint("h_name: %s\nh_addr=%p\nh_addretype: %s\nh_length: %d\n", + server->h_name, + server->h_addr, + (server->h_addrtype == AF_INET)?"ipv4":"ipv6", + server->h_length); + + memset(serv_addr, 0, sizeof(struct sockaddr_in)); + serv_addr->sin_family = AF_INET; + serv_addr->sin_port = htons(rest_port); +#ifdef WINDOWS + serv_addr->sin_addr.s_addr = inet_addr(host); +#else + memcpy(&(serv_addr->sin_addr.s_addr), server->h_addr, server->h_length); +#endif + return 0; +} + +static void startMultiThreadInsertData(int threads, char* db_name, + char* precision, SSuperTable* stbInfo) { + + int32_t timePrec = TSDB_TIME_PRECISION_MILLI; + if (0 != precision[0]) { + if (0 == strncasecmp(precision, "ms", 2)) { + timePrec = TSDB_TIME_PRECISION_MILLI; + } else if (0 == strncasecmp(precision, "us", 2)) { + timePrec = TSDB_TIME_PRECISION_MICRO; + } else if (0 == strncasecmp(precision, "ns", 2)) { + timePrec = TSDB_TIME_PRECISION_NANO; + } else { + errorPrint2("Not support precision: %s\n", precision); + exit(EXIT_FAILURE); + } + } + + int64_t startTime; + if (stbInfo) { + if (0 == strncasecmp(stbInfo->startTimestamp, "now", 3)) { + startTime = taosGetTimestamp(timePrec); + } else { + if (TSDB_CODE_SUCCESS != taosParseTime( + stbInfo->startTimestamp, + &startTime, + strlen(stbInfo->startTimestamp), + timePrec, 0)) { + ERROR_EXIT("failed to parse time!\n"); + } + } + } else { + startTime = DEFAULT_START_TIME; + } + debugPrint("%s() LN%d, startTime= %"PRId64"\n", + __func__, __LINE__, startTime); + + // read sample data from file first + int ret; + if (stbInfo) { + ret = prepareSampleForStb(stbInfo); + } else { + ret = prepareSampleForNtb(); + } + + if (0 != ret) { + errorPrint2("%s() LN%d, prepare sample data for stable failed!\n", + __func__, __LINE__); + exit(EXIT_FAILURE); + } + + TAOS* taos0 = taos_connect( + g_Dbs.host, g_Dbs.user, + g_Dbs.password, db_name, g_Dbs.port); + if (NULL == taos0) { + errorPrint2("%s() LN%d, connect to server fail , reason: %s\n", + __func__, __LINE__, taos_errstr(NULL)); + exit(EXIT_FAILURE); + } + + int64_t ntables = 0; + uint64_t tableFrom = 0; + + if (stbInfo) { + int64_t limit; + uint64_t offset; + + if ((NULL != g_args.sqlFile) + && (stbInfo->childTblExists == TBL_NO_EXISTS) + && ((stbInfo->childTblOffset != 0) + || (stbInfo->childTblLimit >= 0))) { + printf("WARNING: offset and limit will not be used since the child tables not exists!\n"); + } + + if (stbInfo->childTblExists == TBL_ALREADY_EXISTS) { + if ((stbInfo->childTblLimit < 0) + || ((stbInfo->childTblOffset + + stbInfo->childTblLimit) + > (stbInfo->childTblCount))) { + + if (stbInfo->childTblCount < stbInfo->childTblOffset) { + printf("WARNING: offset will not be used since the child tables count is less then offset!\n"); + + stbInfo->childTblOffset = 0; + } + stbInfo->childTblLimit = + stbInfo->childTblCount - stbInfo->childTblOffset; + } + + offset = stbInfo->childTblOffset; + limit = stbInfo->childTblLimit; + } else { + limit = stbInfo->childTblCount; + offset = 0; + } + + ntables = limit; + tableFrom = offset; + + if ((stbInfo->childTblExists != TBL_NO_EXISTS) + && ((stbInfo->childTblOffset + stbInfo->childTblLimit) + > stbInfo->childTblCount)) { + printf("WARNING: specified offset + limit > child table count!\n"); + prompt(); + } + + if ((stbInfo->childTblExists != TBL_NO_EXISTS) + && (0 == stbInfo->childTblLimit)) { + printf("WARNING: specified limit = 0, which cannot find table name to insert or query! \n"); + prompt(); + } + + stbInfo->childTblName = (char*)calloc(1, + limit * TSDB_TABLE_NAME_LEN); + if (stbInfo->childTblName == NULL) { + taos_close(taos0); + errorPrint2("%s() LN%d, alloc memory failed!\n", __func__, __LINE__); + exit(EXIT_FAILURE); + } + + int64_t childTblCount; + getChildNameOfSuperTableWithLimitAndOffset( + taos0, + db_name, stbInfo->stbName, + &stbInfo->childTblName, &childTblCount, + limit, + offset); + ntables = childTblCount; + } else { + ntables = g_args.ntables; + tableFrom = 0; + } + + taos_close(taos0); + + int64_t a = ntables / threads; + if (a < 1) { + threads = ntables; + a = 1; + } + + int64_t b = 0; + if (threads != 0) { + b = ntables % threads; + } + + if (g_args.iface == REST_IFACE || ((stbInfo) && (stbInfo->iface == REST_IFACE))) { + if (convertHostToServAddr( + g_Dbs.host, g_Dbs.port, &(g_Dbs.serv_addr)) != 0) { + ERROR_EXIT("convert host to server address"); + } + } + + pthread_t *pids = calloc(1, threads * sizeof(pthread_t)); + threadInfo *infos = calloc(1, threads * sizeof(threadInfo)); + assert(pids != NULL); + assert(infos != NULL); + + char *stmtBuffer = calloc(1, BUFFER_SIZE); + assert(stmtBuffer); + +#if STMT_BIND_PARAM_BATCH == 1 + uint32_t interlaceRows = 0; + uint32_t batch; + + if (stbInfo) { + if (stbInfo->interlaceRows < stbInfo->insertRows) + interlaceRows = stbInfo->interlaceRows; + } else { + if (g_args.interlaceRows < g_args.insertRows) + interlaceRows = g_args.interlaceRows; + } + + if (interlaceRows > 0) { + batch = interlaceRows; + } else { + batch = (g_args.reqPerReq>g_args.insertRows)? + g_args.insertRows:g_args.reqPerReq; + } + +#endif + + if ((g_args.iface == STMT_IFACE) + || ((stbInfo) + && (stbInfo->iface == STMT_IFACE))) { + char *pstr = stmtBuffer; + + if ((stbInfo) + && (AUTO_CREATE_SUBTBL + == stbInfo->autoCreateTable)) { + pstr += sprintf(pstr, "INSERT INTO ? USING %s TAGS(?", + stbInfo->stbName); + for (int tag = 0; tag < (stbInfo->tagCount - 1); + tag ++ ) { + pstr += sprintf(pstr, ",?"); + } + pstr += sprintf(pstr, ") VALUES(?"); + } else { + pstr += sprintf(pstr, "INSERT INTO ? VALUES(?"); + } + + int columnCount = (stbInfo)? + stbInfo->columnCount: + g_args.columnCount; + + for (int col = 0; col < columnCount; col ++) { + pstr += sprintf(pstr, ",?"); + } + pstr += sprintf(pstr, ")"); + + debugPrint("%s() LN%d, stmtBuffer: %s", __func__, __LINE__, stmtBuffer); +#if STMT_BIND_PARAM_BATCH == 1 + parseSamplefileToStmtBatch(stbInfo); +#endif + } + + for (int i = 0; i < threads; i++) { + threadInfo *pThreadInfo = infos + i; + pThreadInfo->threadID = i; + + tstrncpy(pThreadInfo->db_name, db_name, TSDB_DB_NAME_LEN); + pThreadInfo->time_precision = timePrec; + pThreadInfo->stbInfo = stbInfo; + + pThreadInfo->start_time = startTime; + pThreadInfo->minDelay = UINT64_MAX; + + if ((NULL == stbInfo) || + (stbInfo->iface != REST_IFACE)) { + //t_info->taos = taos; + pThreadInfo->taos = taos_connect( + g_Dbs.host, g_Dbs.user, + g_Dbs.password, db_name, g_Dbs.port); + if (NULL == pThreadInfo->taos) { + free(infos); + errorPrint2( + "%s() LN%d, connect to server fail from insert sub thread, reason: %s\n", + __func__, __LINE__, + taos_errstr(NULL)); + exit(EXIT_FAILURE); + } + + if ((g_args.iface == STMT_IFACE) + || ((stbInfo) + && (stbInfo->iface == STMT_IFACE))) { + + pThreadInfo->stmt = taos_stmt_init(pThreadInfo->taos); + if (NULL == pThreadInfo->stmt) { + free(pids); + free(infos); + errorPrint2( + "%s() LN%d, failed init stmt, reason: %s\n", + __func__, __LINE__, + taos_errstr(NULL)); + exit(EXIT_FAILURE); + } + + if (0 != taos_stmt_prepare(pThreadInfo->stmt, stmtBuffer, 0)) { + free(pids); + free(infos); + free(stmtBuffer); + errorPrint2("failed to execute taos_stmt_prepare. return 0x%x. reason: %s\n", + ret, taos_stmt_errstr(pThreadInfo->stmt)); + exit(EXIT_FAILURE); + } + pThreadInfo->bind_ts = malloc(sizeof(int64_t)); + + if (stbInfo) { +#if STMT_BIND_PARAM_BATCH == 1 + parseStbSampleToStmtBatchForThread( + pThreadInfo, stbInfo, timePrec, batch); +#else + parseStbSampleToStmt(pThreadInfo, stbInfo, timePrec); +#endif + } else { +#if STMT_BIND_PARAM_BATCH == 1 + parseNtbSampleToStmtBatchForThread( + pThreadInfo, timePrec, batch); +#else + parseNtbSampleToStmt(pThreadInfo, timePrec); +#endif + } + } + } else { + pThreadInfo->taos = NULL; + } + + /* if ((NULL == stbInfo) + || (0 == stbInfo->multiThreadWriteOneTbl)) { + */ + pThreadInfo->start_table_from = tableFrom; + pThreadInfo->ntables = iend_table_to = i < b ? tableFrom + a : tableFrom + a - 1; + tableFrom = pThreadInfo->end_table_to + 1; + /* } else { + pThreadInfo->start_table_from = 0; + pThreadInfo->ntables = stbInfo->childTblCount; + pThreadInfo->start_time = pThreadInfo->start_time + rand_int() % 10000 - rand_tinyint(); + } + */ + + if (g_args.iface == REST_IFACE || ((stbInfo) && (stbInfo->iface == REST_IFACE))) { +#ifdef WINDOWS + WSADATA wsaData; + WSAStartup(MAKEWORD(2, 2), &wsaData); + SOCKET sockfd; +#else + int sockfd; +#endif + sockfd = socket(AF_INET, SOCK_STREAM, 0); + if (sockfd < 0) { +#ifdef WINDOWS + errorPrint( "Could not create socket : %d" , WSAGetLastError()); +#endif + debugPrint("%s() LN%d, sockfd=%d\n", __func__, __LINE__, sockfd); + ERROR_EXIT("opening socket"); + } + + int retConn = connect(sockfd, (struct sockaddr *)&(g_Dbs.serv_addr), sizeof(struct sockaddr)); + debugPrint("%s() LN%d connect() return %d\n", __func__, __LINE__, retConn); + if (retConn < 0) { + ERROR_EXIT("connecting"); + } + pThreadInfo->sockfd = sockfd; + } + + + tsem_init(&(pThreadInfo->lock_sem), 0, 0); + if (ASYNC_MODE == g_Dbs.asyncMode) { + pthread_create(pids + i, NULL, asyncWrite, pThreadInfo); + } else { + pthread_create(pids + i, NULL, syncWrite, pThreadInfo); + } + } + + free(stmtBuffer); + + int64_t start = taosGetTimestampUs(); + + for (int i = 0; i < threads; i++) { + pthread_join(pids[i], NULL); + } + + uint64_t totalDelay = 0; + uint64_t maxDelay = 0; + uint64_t minDelay = UINT64_MAX; + uint64_t cntDelay = 1; + double avgDelay = 0; + + for (int i = 0; i < threads; i++) { + threadInfo *pThreadInfo = infos + i; + + tsem_destroy(&(pThreadInfo->lock_sem)); + taos_close(pThreadInfo->taos); + + if (pThreadInfo->stmt) { + taos_stmt_close(pThreadInfo->stmt); + } + + tmfree((char *)pThreadInfo->bind_ts); +#if STMT_BIND_PARAM_BATCH == 1 + tmfree((char *)pThreadInfo->bind_ts_array); + tmfree(pThreadInfo->bindParams); + tmfree(pThreadInfo->is_null); + if (g_args.iface == REST_IFACE || ((stbInfo) && (stbInfo->iface == REST_IFACE))) { +#ifdef WINDOWS + closesocket(pThreadInfo->sockfd); + WSACleanup(); +#else + close(pThreadInfo->sockfd); +#endif + } +#else + if (pThreadInfo->sampleBindArray) { + for (int k = 0; k < MAX_SAMPLES; k++) { + uintptr_t *tmp = (uintptr_t *)(*(uintptr_t *)( + pThreadInfo->sampleBindArray + + sizeof(uintptr_t *) * k)); + int columnCount = (pThreadInfo->stbInfo)? + pThreadInfo->stbInfo->columnCount: + g_args.columnCount; + for (int c = 1; c < columnCount + 1; c++) { + TAOS_BIND *bind = (TAOS_BIND *)((char *)tmp + (sizeof(TAOS_BIND) * c)); + if (bind) + tmfree(bind->buffer); + } + tmfree((char *)tmp); + } + tmfree(pThreadInfo->sampleBindArray); + } +#endif + + debugPrint("%s() LN%d, [%d] totalInsert=%"PRIu64" totalAffected=%"PRIu64"\n", + __func__, __LINE__, + pThreadInfo->threadID, pThreadInfo->totalInsertRows, + pThreadInfo->totalAffectedRows); + if (stbInfo) { + stbInfo->totalAffectedRows += pThreadInfo->totalAffectedRows; + stbInfo->totalInsertRows += pThreadInfo->totalInsertRows; + } else { + g_args.totalAffectedRows += pThreadInfo->totalAffectedRows; + g_args.totalInsertRows += pThreadInfo->totalInsertRows; + } + + totalDelay += pThreadInfo->totalDelay; + cntDelay += pThreadInfo->cntDelay; + if (pThreadInfo->maxDelay > maxDelay) maxDelay = pThreadInfo->maxDelay; + if (pThreadInfo->minDelay < minDelay) minDelay = pThreadInfo->minDelay; + } + + if (cntDelay == 0) cntDelay = 1; + avgDelay = (double)totalDelay / cntDelay; + + int64_t end = taosGetTimestampUs(); + int64_t t = end - start; + if (0 == t) t = 1; + + double tInMs = (double) t / 1000000.0; + + if (stbInfo) { + fprintf(stderr, "Spent %.4f seconds to insert rows: %"PRIu64", affected rows: %"PRIu64" with %d thread(s) into %s.%s. %.2f records/second\n\n", + tInMs, stbInfo->totalInsertRows, + stbInfo->totalAffectedRows, + threads, db_name, stbInfo->stbName, + (double)(stbInfo->totalInsertRows/tInMs)); + + if (g_fpOfInsertResult) { + fprintf(g_fpOfInsertResult, + "Spent %.4f seconds to insert rows: %"PRIu64", affected rows: %"PRIu64" with %d thread(s) into %s.%s. %.2f records/second\n\n", + tInMs, stbInfo->totalInsertRows, + stbInfo->totalAffectedRows, + threads, db_name, stbInfo->stbName, + (double)(stbInfo->totalInsertRows/tInMs)); + } + } else { + fprintf(stderr, "Spent %.4f seconds to insert rows: %"PRIu64", affected rows: %"PRIu64" with %d thread(s) into %s %.2f records/second\n\n", + tInMs, g_args.totalInsertRows, + g_args.totalAffectedRows, + threads, db_name, + (double)(g_args.totalInsertRows/tInMs)); + if (g_fpOfInsertResult) { + fprintf(g_fpOfInsertResult, + "Spent %.4f seconds to insert rows: %"PRIu64", affected rows: %"PRIu64" with %d thread(s) into %s %.2f records/second\n\n", + tInMs, g_args.totalInsertRows, + g_args.totalAffectedRows, + threads, db_name, + (double)(g_args.totalInsertRows/tInMs)); + } + } + + if (minDelay != UINT64_MAX) { + fprintf(stderr, "insert delay, avg: %10.2fms, max: %10.2fms, min: %10.2fms\n\n", + (double)avgDelay/1000.0, + (double)maxDelay/1000.0, + (double)minDelay/1000.0); + + if (g_fpOfInsertResult) { + fprintf(g_fpOfInsertResult, "insert delay, avg:%10.2fms, max: %10.2fms, min: %10.2fms\n\n", + (double)avgDelay/1000.0, + (double)maxDelay/1000.0, + (double)minDelay/1000.0); + } + } + + //taos_close(taos); + + free(pids); + free(infos); +} + +static void *queryNtableAggrFunc(void *sarg) { + threadInfo *pThreadInfo = (threadInfo *)sarg; + TAOS *taos = pThreadInfo->taos; + setThreadName("queryNtableAggrFunc"); + char *command = calloc(1, BUFFER_SIZE); + assert(command); + + uint64_t startTime = pThreadInfo->start_time; + char *tb_prefix = pThreadInfo->tb_prefix; + FILE *fp = fopen(pThreadInfo->filePath, "a"); + if (NULL == fp) { + errorPrint2("fopen %s fail, reason:%s.\n", pThreadInfo->filePath, strerror(errno)); + free(command); + return NULL; + } + + int64_t insertRows; + /* if (pThreadInfo->stbInfo) { + insertRows = pThreadInfo->stbInfo->insertRows; // nrecords_per_table; + } else { + */ + insertRows = g_args.insertRows; + // } + + int64_t ntables = pThreadInfo->ntables; // pThreadInfo->end_table_to - pThreadInfo->start_table_from + 1; + int64_t totalData = insertRows * ntables; + bool aggr_func = g_Dbs.aggr_func; + + char **aggreFunc; + int n; + + if (g_args.demo_mode) { + aggreFunc = g_aggreFuncDemo; + n = aggr_func?(sizeof(g_aggreFuncDemo) / sizeof(g_aggreFuncDemo[0])) : 2; + } else { + aggreFunc = g_aggreFunc; + n = aggr_func?(sizeof(g_aggreFunc) / sizeof(g_aggreFunc[0])) : 2; + } + + if (!aggr_func) { + printf("\nThe first field is either Binary or Bool. Aggregation functions are not supported.\n"); + } + printf("%"PRId64" records:\n", totalData); + fprintf(fp, "| QFunctions | QRecords | QSpeed(R/s) | QLatency(ms) |\n"); + + for (int j = 0; j < n; j++) { + double totalT = 0; + uint64_t count = 0; + for (int64_t i = 0; i < ntables; i++) { + sprintf(command, "SELECT %s FROM %s%"PRId64" WHERE ts>= %" PRIu64, + aggreFunc[j], tb_prefix, i, startTime); + + double t = taosGetTimestampUs(); + debugPrint("%s() LN%d, sql command: %s\n", + __func__, __LINE__, command); + TAOS_RES *pSql = taos_query(taos, command); + int32_t code = taos_errno(pSql); + + if (code != 0) { + errorPrint2("Failed to query:%s\n", taos_errstr(pSql)); + taos_free_result(pSql); + taos_close(taos); + fclose(fp); + free(command); + return NULL; + } + + while(taos_fetch_row(pSql) != NULL) { + count++; + } + + t = taosGetTimestampUs() - t; + totalT += t; + + taos_free_result(pSql); + } + + fprintf(fp, "|%10s | %"PRId64" | %12.2f | %10.2f |\n", + aggreFunc[j][0] == '*' ? " * " : aggreFunc[j], totalData, + (double)(ntables * insertRows) / totalT, totalT / 1000000); + printf("select %10s took %.6f second(s)\n", aggreFunc[j], totalT / 1000000); + } + fprintf(fp, "\n"); + fclose(fp); + free(command); + return NULL; +} + +static void *queryStableAggrFunc(void *sarg) { + threadInfo *pThreadInfo = (threadInfo *)sarg; + TAOS *taos = pThreadInfo->taos; + setThreadName("queryStableAggrFunc"); + char *command = calloc(1, BUFFER_SIZE); + assert(command); + + FILE *fp = fopen(pThreadInfo->filePath, "a"); + if (NULL == fp) { + printf("fopen %s fail, reason:%s.\n", pThreadInfo->filePath, strerror(errno)); + free(command); + return NULL; + } + + int64_t insertRows = pThreadInfo->stbInfo->insertRows; + int64_t ntables = pThreadInfo->ntables; // pThreadInfo->end_table_to - pThreadInfo->start_table_from + 1; + int64_t totalData = insertRows * ntables; + bool aggr_func = g_Dbs.aggr_func; + + char **aggreFunc; + int n; + + if (g_args.demo_mode) { + aggreFunc = g_aggreFuncDemo; + n = aggr_func?(sizeof(g_aggreFuncDemo) / sizeof(g_aggreFuncDemo[0])) : 2; + } else { + aggreFunc = g_aggreFunc; + n = aggr_func?(sizeof(g_aggreFunc) / sizeof(g_aggreFunc[0])) : 2; + } + + if (!aggr_func) { + printf("\nThe first field is either Binary or Bool. Aggregation functions are not supported.\n"); + } + + printf("%"PRId64" records:\n", totalData); + fprintf(fp, "Querying On %"PRId64" records:\n", totalData); + + for (int j = 0; j < n; j++) { + char condition[COND_BUF_LEN] = "\0"; + char tempS[64] = "\0"; + + int64_t m = 10 < ntables ? 10 : ntables; + + for (int64_t i = 1; i <= m; i++) { + if (i == 1) { + if (g_args.demo_mode) { + sprintf(tempS, "groupid = %"PRId64"", i); + } else { + sprintf(tempS, "t0 = %"PRId64"", i); + } + } else { + if (g_args.demo_mode) { + sprintf(tempS, " or groupid = %"PRId64" ", i); + } else { + sprintf(tempS, " or t0 = %"PRId64" ", i); + } + } + strncat(condition, tempS, COND_BUF_LEN - 1); + + sprintf(command, "SELECT %s FROM meters WHERE %s", aggreFunc[j], condition); + + printf("Where condition: %s\n", condition); + + debugPrint("%s() LN%d, sql command: %s\n", + __func__, __LINE__, command); + fprintf(fp, "%s\n", command); + + double t = taosGetTimestampUs(); + + TAOS_RES *pSql = taos_query(taos, command); + int32_t code = taos_errno(pSql); + + if (code != 0) { + errorPrint2("Failed to query:%s\n", taos_errstr(pSql)); + taos_free_result(pSql); + taos_close(taos); + fclose(fp); + free(command); + return NULL; + } + int count = 0; + while(taos_fetch_row(pSql) != NULL) { + count++; + } + t = taosGetTimestampUs() - t; + + fprintf(fp, "| Speed: %12.2f(per s) | Latency: %.4f(ms) |\n", + ntables * insertRows / (t / 1000), t); + printf("select %10s took %.6f second(s)\n\n", aggreFunc[j], t / 1000000); + + taos_free_result(pSql); + } + fprintf(fp, "\n"); + } + fclose(fp); + free(command); + + return NULL; +} + +static void prompt() +{ + if (!g_args.answer_yes) { + printf(" Press enter key to continue or Ctrl-C to stop\n\n"); + (void)getchar(); + } +} + +static int insertTestProcess() { + + setupForAnsiEscape(); + int ret = printfInsertMeta(); + resetAfterAnsiEscape(); + + if (ret == -1) + exit(EXIT_FAILURE); + + debugPrint("%d result file: %s\n", __LINE__, g_Dbs.resultFile); + g_fpOfInsertResult = fopen(g_Dbs.resultFile, "a"); + if (NULL == g_fpOfInsertResult) { + errorPrint("Failed to open %s for save result\n", g_Dbs.resultFile); + return -1; + } + + if (g_fpOfInsertResult) + printfInsertMetaToFile(g_fpOfInsertResult); + + prompt(); + + init_rand_data(); + + // create database and super tables + char *cmdBuffer = calloc(1, BUFFER_SIZE); + assert(cmdBuffer); + + if(createDatabasesAndStables(cmdBuffer) != 0) { + if (g_fpOfInsertResult) + fclose(g_fpOfInsertResult); + free(cmdBuffer); + return -1; + } + free(cmdBuffer); + + // pretreatment + if (prepareSampleData() != 0) { + if (g_fpOfInsertResult) + fclose(g_fpOfInsertResult); + return -1; + } + + double start; + double end; + + if (g_totalChildTables > 0) { + fprintf(stderr, + "creating %"PRId64" table(s) with %d thread(s)\n\n", + g_totalChildTables, g_Dbs.threadCountForCreateTbl); + if (g_fpOfInsertResult) { + fprintf(g_fpOfInsertResult, + "creating %"PRId64" table(s) with %d thread(s)\n\n", + g_totalChildTables, g_Dbs.threadCountForCreateTbl); + } + + // create child tables + start = taosGetTimestampMs(); + createChildTables(); + end = taosGetTimestampMs(); + + fprintf(stderr, + "\nSpent %.4f seconds to create %"PRId64" table(s) with %d thread(s), actual %"PRId64" table(s) created\n\n", + (end - start)/1000.0, g_totalChildTables, + g_Dbs.threadCountForCreateTbl, g_actualChildTables); + if (g_fpOfInsertResult) { + fprintf(g_fpOfInsertResult, + "\nSpent %.4f seconds to create %"PRId64" table(s) with %d thread(s), actual %"PRId64" table(s) created\n\n", + (end - start)/1000.0, g_totalChildTables, + g_Dbs.threadCountForCreateTbl, g_actualChildTables); + } + } + + // create sub threads for inserting data + //start = taosGetTimestampMs(); + for (int i = 0; i < g_Dbs.dbCount; i++) { + if (g_Dbs.use_metric) { + if (g_Dbs.db[i].superTblCount > 0) { + for (uint64_t j = 0; j < g_Dbs.db[i].superTblCount; j++) { + + SSuperTable* stbInfo = &g_Dbs.db[i].superTbls[j]; + + if (stbInfo && (stbInfo->insertRows > 0)) { + startMultiThreadInsertData( + g_Dbs.threadCount, + g_Dbs.db[i].dbName, + g_Dbs.db[i].dbCfg.precision, + stbInfo); + } + } + } + } else { + startMultiThreadInsertData( + g_Dbs.threadCount, + g_Dbs.db[i].dbName, + g_Dbs.db[i].dbCfg.precision, + NULL); + } + } + //end = taosGetTimestampMs(); + + //int64_t totalInsertRows = 0; + //int64_t totalAffectedRows = 0; + //for (int i = 0; i < g_Dbs.dbCount; i++) { + // for (int j = 0; j < g_Dbs.db[i].superTblCount; j++) { + // totalInsertRows+= g_Dbs.db[i].superTbls[j].totalInsertRows; + // totalAffectedRows += g_Dbs.db[i].superTbls[j].totalAffectedRows; + //} + //printf("Spent %.4f seconds to insert rows: %"PRId64", affected rows: %"PRId64" with %d thread(s)\n\n", end - start, totalInsertRows, totalAffectedRows, g_Dbs.threadCount); + + return 0; +} + +static void *specifiedTableQuery(void *sarg) { + threadInfo *pThreadInfo = (threadInfo *)sarg; + + setThreadName("specTableQuery"); + + if (pThreadInfo->taos == NULL) { + TAOS * taos = NULL; + taos = taos_connect(g_queryInfo.host, + g_queryInfo.user, + g_queryInfo.password, + NULL, + g_queryInfo.port); + if (taos == NULL) { + errorPrint2("[%d] Failed to connect to TDengine, reason:%s\n", + pThreadInfo->threadID, taos_errstr(NULL)); + return NULL; + } else { + pThreadInfo->taos = taos; + } + } + + char sqlStr[TSDB_DB_NAME_LEN + 5]; + sprintf(sqlStr, "use %s", g_queryInfo.dbName); + if (0 != queryDbExec(pThreadInfo->taos, sqlStr, NO_INSERT_TYPE, false)) { + taos_close(pThreadInfo->taos); + errorPrint("use database %s failed!\n\n", + g_queryInfo.dbName); + return NULL; + } + + uint64_t st = 0; + uint64_t et = 0; + + uint64_t queryTimes = g_queryInfo.specifiedQueryInfo.queryTimes; + + uint64_t totalQueried = 0; + uint64_t lastPrintTime = taosGetTimestampMs(); + uint64_t startTs = taosGetTimestampMs(); + + if (g_queryInfo.specifiedQueryInfo.result[pThreadInfo->querySeq][0] != '\0') { + sprintf(pThreadInfo->filePath, "%s-%d", + g_queryInfo.specifiedQueryInfo.result[pThreadInfo->querySeq], + pThreadInfo->threadID); + } + + while(queryTimes --) { + if (g_queryInfo.specifiedQueryInfo.queryInterval && (et - st) < + (int64_t)g_queryInfo.specifiedQueryInfo.queryInterval) { + taosMsleep(g_queryInfo.specifiedQueryInfo.queryInterval - (et - st)); // ms + } + + st = taosGetTimestampMs(); + + selectAndGetResult(pThreadInfo, + g_queryInfo.specifiedQueryInfo.sql[pThreadInfo->querySeq]); + + et = taosGetTimestampMs(); + printf("=thread[%"PRId64"] use %s complete one sql, Spent %10.3f s\n", + taosGetSelfPthreadId(), g_queryInfo.queryMode, (et - st)/1000.0); + + totalQueried ++; + g_queryInfo.specifiedQueryInfo.totalQueried ++; + + uint64_t currentPrintTime = taosGetTimestampMs(); + uint64_t endTs = taosGetTimestampMs(); + if (currentPrintTime - lastPrintTime > 30*1000) { + debugPrint("%s() LN%d, endTs=%"PRIu64" ms, startTs=%"PRIu64" ms\n", + __func__, __LINE__, endTs, startTs); + printf("thread[%d] has currently completed queries: %"PRIu64", QPS: %10.6f\n", + pThreadInfo->threadID, + totalQueried, + (double)(totalQueried/((endTs-startTs)/1000.0))); + lastPrintTime = currentPrintTime; + } + } + return NULL; +} + +static void replaceChildTblName(char* inSql, char* outSql, int tblIndex) { + char sourceString[32] = "xxxx"; + char subTblName[TSDB_TABLE_NAME_LEN]; + sprintf(subTblName, "%s.%s", + g_queryInfo.dbName, + g_queryInfo.superQueryInfo.childTblName + tblIndex*TSDB_TABLE_NAME_LEN); + + //printf("inSql: %s\n", inSql); + + char* pos = strstr(inSql, sourceString); + if (0 == pos) { + return; + } + + tstrncpy(outSql, inSql, pos - inSql + 1); + //printf("1: %s\n", outSql); + strncat(outSql, subTblName, BUFFER_SIZE - 1); + //printf("2: %s\n", outSql); + strncat(outSql, pos+strlen(sourceString), BUFFER_SIZE - 1); + //printf("3: %s\n", outSql); +} + +static void *superTableQuery(void *sarg) { + char *sqlstr = calloc(1, BUFFER_SIZE); + assert(sqlstr); + + threadInfo *pThreadInfo = (threadInfo *)sarg; + + setThreadName("superTableQuery"); + + if (pThreadInfo->taos == NULL) { + TAOS * taos = NULL; + taos = taos_connect(g_queryInfo.host, + g_queryInfo.user, + g_queryInfo.password, + NULL, + g_queryInfo.port); + if (taos == NULL) { + errorPrint("[%d] Failed to connect to TDengine, reason:%s\n", + pThreadInfo->threadID, taos_errstr(NULL)); + free(sqlstr); + return NULL; + } else { + pThreadInfo->taos = taos; + } + } + + uint64_t st = 0; + uint64_t et = (int64_t)g_queryInfo.superQueryInfo.queryInterval; + + uint64_t queryTimes = g_queryInfo.superQueryInfo.queryTimes; + uint64_t totalQueried = 0; + uint64_t startTs = taosGetTimestampMs(); + + uint64_t lastPrintTime = taosGetTimestampMs(); + while(queryTimes --) { + if (g_queryInfo.superQueryInfo.queryInterval + && (et - st) < (int64_t)g_queryInfo.superQueryInfo.queryInterval) { + taosMsleep(g_queryInfo.superQueryInfo.queryInterval - (et - st)); // ms + //printf("========sleep duration:%"PRId64 "========inserted rows:%d, table range:%d - %d\n", (1000 - (et - st)), i, pThreadInfo->start_table_from, pThreadInfo->end_table_to); + } + + st = taosGetTimestampMs(); + for (int i = pThreadInfo->start_table_from; i <= pThreadInfo->end_table_to; i++) { + for (int j = 0; j < g_queryInfo.superQueryInfo.sqlCount; j++) { + memset(sqlstr, 0, BUFFER_SIZE); + replaceChildTblName(g_queryInfo.superQueryInfo.sql[j], sqlstr, i); + if (g_queryInfo.superQueryInfo.result[j][0] != '\0') { + sprintf(pThreadInfo->filePath, "%s-%d", + g_queryInfo.superQueryInfo.result[j], + pThreadInfo->threadID); + } + selectAndGetResult(pThreadInfo, sqlstr); + + totalQueried++; + g_queryInfo.superQueryInfo.totalQueried ++; + + int64_t currentPrintTime = taosGetTimestampMs(); + int64_t endTs = taosGetTimestampMs(); + if (currentPrintTime - lastPrintTime > 30*1000) { + printf("thread[%d] has currently completed queries: %"PRIu64", QPS: %10.3f\n", + pThreadInfo->threadID, + totalQueried, + (double)(totalQueried/((endTs-startTs)/1000.0))); + lastPrintTime = currentPrintTime; + } + } + } + et = taosGetTimestampMs(); + printf("####thread[%"PRId64"] complete all sqls to allocate all sub-tables[%"PRIu64" - %"PRIu64"] once queries duration:%.4fs\n\n", + taosGetSelfPthreadId(), + pThreadInfo->start_table_from, + pThreadInfo->end_table_to, + (double)(et - st)/1000.0); + } + + free(sqlstr); + return NULL; +} + +static int queryTestProcess() { + + setupForAnsiEscape(); + printfQueryMeta(); + resetAfterAnsiEscape(); + + TAOS * taos = NULL; + taos = taos_connect(g_queryInfo.host, + g_queryInfo.user, + g_queryInfo.password, + NULL, + g_queryInfo.port); + if (taos == NULL) { + errorPrint("Failed to connect to TDengine, reason:%s\n", + taos_errstr(NULL)); + exit(EXIT_FAILURE); + } + + if (0 != g_queryInfo.superQueryInfo.sqlCount) { + getAllChildNameOfSuperTable(taos, + g_queryInfo.dbName, + g_queryInfo.superQueryInfo.stbName, + &g_queryInfo.superQueryInfo.childTblName, + &g_queryInfo.superQueryInfo.childTblCount); + } + + prompt(); + + if (g_args.debug_print || g_args.verbose_print) { + printfQuerySystemInfo(taos); + } + + if (0 == strncasecmp(g_queryInfo.queryMode, "rest", strlen("rest"))) { + if (convertHostToServAddr( + g_queryInfo.host, g_queryInfo.port, &g_queryInfo.serv_addr) != 0) + ERROR_EXIT("convert host to server address"); + } + + pthread_t *pids = NULL; + threadInfo *infos = NULL; + //==== create sub threads for query from specify table + int nConcurrent = g_queryInfo.specifiedQueryInfo.concurrent; + uint64_t nSqlCount = g_queryInfo.specifiedQueryInfo.sqlCount; + + uint64_t startTs = taosGetTimestampMs(); + + if ((nSqlCount > 0) && (nConcurrent > 0)) { + + pids = calloc(1, nConcurrent * nSqlCount * sizeof(pthread_t)); + infos = calloc(1, nConcurrent * nSqlCount * sizeof(threadInfo)); + + if ((NULL == pids) || (NULL == infos)) { + taos_close(taos); + ERROR_EXIT("memory allocation failed for create threads\n"); + } + + for (uint64_t i = 0; i < nSqlCount; i++) { + for (int j = 0; j < nConcurrent; j++) { + uint64_t seq = i * nConcurrent + j; + threadInfo *pThreadInfo = infos + seq; + pThreadInfo->threadID = seq; + pThreadInfo->querySeq = i; + + if (0 == strncasecmp(g_queryInfo.queryMode, "taosc", 5)) { + + char sqlStr[TSDB_DB_NAME_LEN + 5]; + sprintf(sqlStr, "USE %s", g_queryInfo.dbName); + if (0 != queryDbExec(taos, sqlStr, NO_INSERT_TYPE, false)) { + taos_close(taos); + free(infos); + free(pids); + errorPrint2("use database %s failed!\n\n", + g_queryInfo.dbName); + return -1; + } + } + + if (0 == strncasecmp(g_queryInfo.queryMode, "rest", 4)) { +#ifdef WINDOWS + WSADATA wsaData; + WSAStartup(MAKEWORD(2, 2), &wsaData); + SOCKET sockfd; +#else + int sockfd; +#endif + sockfd = socket(AF_INET, SOCK_STREAM, 0); + if (sockfd < 0) { +#ifdef WINDOWS + errorPrint( "Could not create socket : %d" , WSAGetLastError()); +#endif + debugPrint("%s() LN%d, sockfd=%d\n", __func__, __LINE__, sockfd); + ERROR_EXIT("opening socket"); + } + + int retConn = connect(sockfd, (struct sockaddr *)&(g_queryInfo.serv_addr), + sizeof(struct sockaddr)); + debugPrint("%s() LN%d connect() return %d\n", __func__, __LINE__, retConn); + if (retConn < 0) { + ERROR_EXIT("connecting"); + } + pThreadInfo->sockfd = sockfd; + } + pThreadInfo->taos = NULL;// workaround to use separate taos connection; + + pthread_create(pids + seq, NULL, specifiedTableQuery, + pThreadInfo); + } + } + } else { + g_queryInfo.specifiedQueryInfo.concurrent = 0; + } + + taos_close(taos); + + pthread_t *pidsOfSub = NULL; + threadInfo *infosOfSub = NULL; + //==== create sub threads for query from all sub table of the super table + if ((g_queryInfo.superQueryInfo.sqlCount > 0) + && (g_queryInfo.superQueryInfo.threadCnt > 0)) { + pidsOfSub = calloc(1, g_queryInfo.superQueryInfo.threadCnt * sizeof(pthread_t)); + infosOfSub = calloc(1, g_queryInfo.superQueryInfo.threadCnt * sizeof(threadInfo)); + + if ((NULL == pidsOfSub) || (NULL == infosOfSub)) { + free(infos); + free(pids); + + ERROR_EXIT("memory allocation failed for create threads\n"); + } + + int64_t ntables = g_queryInfo.superQueryInfo.childTblCount; + int threads = g_queryInfo.superQueryInfo.threadCnt; + + int64_t a = ntables / threads; + if (a < 1) { + threads = ntables; + a = 1; + } + + int64_t b = 0; + if (threads != 0) { + b = ntables % threads; + } + + uint64_t tableFrom = 0; + for (int i = 0; i < threads; i++) { + threadInfo *pThreadInfo = infosOfSub + i; + pThreadInfo->threadID = i; + + pThreadInfo->start_table_from = tableFrom; + pThreadInfo->ntables = iend_table_to = i < b ? tableFrom + a : tableFrom + a - 1; + tableFrom = pThreadInfo->end_table_to + 1; + pThreadInfo->taos = NULL; // workaround to use separate taos connection; + if (0 == strncasecmp(g_queryInfo.queryMode, "rest", 4)) { +#ifdef WINDOWS + WSADATA wsaData; + WSAStartup(MAKEWORD(2, 2), &wsaData); + SOCKET sockfd; +#else + int sockfd; +#endif + sockfd = socket(AF_INET, SOCK_STREAM, 0); + if (sockfd < 0) { +#ifdef WINDOWS + errorPrint( "Could not create socket : %d" , WSAGetLastError()); +#endif + debugPrint("%s() LN%d, sockfd=%d\n", __func__, __LINE__, sockfd); + ERROR_EXIT("opening socket"); + } + + int retConn = connect(sockfd, (struct sockaddr *)&(g_queryInfo.serv_addr), + sizeof(struct sockaddr)); + debugPrint("%s() LN%d connect() return %d\n", __func__, __LINE__, retConn); + if (retConn < 0) { + ERROR_EXIT("connecting"); + } + pThreadInfo->sockfd = sockfd; + } + pthread_create(pidsOfSub + i, NULL, superTableQuery, pThreadInfo); + } + + g_queryInfo.superQueryInfo.threadCnt = threads; + } else { + g_queryInfo.superQueryInfo.threadCnt = 0; + } + + if ((nSqlCount > 0) && (nConcurrent > 0)) { + for (int i = 0; i < nConcurrent; i++) { + for (int j = 0; j < nSqlCount; j++) { + pthread_join(pids[i * nSqlCount + j], NULL); + if (0 == strncasecmp(g_queryInfo.queryMode, "rest", 4)) { + threadInfo *pThreadInfo = infos + i * nSqlCount + j; +#ifdef WINDOWS + closesocket(pThreadInfo->sockfd); + WSACleanup(); +#else + close(pThreadInfo->sockfd); +#endif + } + } + } + } + + tmfree((char*)pids); + tmfree((char*)infos); + + for (int i = 0; i < g_queryInfo.superQueryInfo.threadCnt; i++) { + pthread_join(pidsOfSub[i], NULL); + if (0 == strncasecmp(g_queryInfo.queryMode, "rest", 4)) { + threadInfo *pThreadInfo = infosOfSub + i; +#ifdef WINDOWS + closesocket(pThreadInfo->sockfd); + WSACleanup(); +#else + close(pThreadInfo->sockfd); +#endif + } + } + + tmfree((char*)pidsOfSub); + tmfree((char*)infosOfSub); + + // taos_close(taos);// workaround to use separate taos connection; + uint64_t endTs = taosGetTimestampMs(); + + uint64_t totalQueried = g_queryInfo.specifiedQueryInfo.totalQueried + + g_queryInfo.superQueryInfo.totalQueried; + + fprintf(stderr, "==== completed total queries: %"PRIu64", the QPS of all threads: %10.3f====\n", + totalQueried, + (double)(totalQueried/((endTs-startTs)/1000.0))); + return 0; +} + +static void stable_sub_callback( + TAOS_SUB* tsub, TAOS_RES *res, void* param, int code) { + if (res == NULL || taos_errno(res) != 0) { + errorPrint2("%s() LN%d, failed to subscribe result, code:%d, reason:%s\n", + __func__, __LINE__, code, taos_errstr(res)); + return; + } + + if (param) + fetchResult(res, (threadInfo *)param); + // tao_unsubscribe() will free result. +} + +static void specified_sub_callback( + TAOS_SUB* tsub, TAOS_RES *res, void* param, int code) { + if (res == NULL || taos_errno(res) != 0) { + errorPrint2("%s() LN%d, failed to subscribe result, code:%d, reason:%s\n", + __func__, __LINE__, code, taos_errstr(res)); + return; + } + + if (param) + fetchResult(res, (threadInfo *)param); + // tao_unsubscribe() will free result. +} + +static TAOS_SUB* subscribeImpl( + QUERY_CLASS class, + threadInfo *pThreadInfo, + char *sql, char* topic, bool restart, uint64_t interval) +{ + TAOS_SUB* tsub = NULL; + + if ((SPECIFIED_CLASS == class) + && (ASYNC_MODE == g_queryInfo.specifiedQueryInfo.asyncMode)) { + tsub = taos_subscribe( + pThreadInfo->taos, + restart, + topic, sql, specified_sub_callback, (void*)pThreadInfo, + g_queryInfo.specifiedQueryInfo.subscribeInterval); + } else if ((STABLE_CLASS == class) + && (ASYNC_MODE == g_queryInfo.superQueryInfo.asyncMode)) { + tsub = taos_subscribe( + pThreadInfo->taos, + restart, + topic, sql, stable_sub_callback, (void*)pThreadInfo, + g_queryInfo.superQueryInfo.subscribeInterval); + } else { + tsub = taos_subscribe( + pThreadInfo->taos, + restart, + topic, sql, NULL, NULL, interval); + } + + if (tsub == NULL) { + errorPrint2("failed to create subscription. topic:%s, sql:%s\n", topic, sql); + return NULL; + } + + return tsub; +} + +static void *superSubscribe(void *sarg) { + threadInfo *pThreadInfo = (threadInfo *)sarg; + char *subSqlStr = calloc(1, BUFFER_SIZE); + assert(subSqlStr); + + TAOS_SUB* tsub[MAX_QUERY_SQL_COUNT] = {0}; + uint64_t tsubSeq; + + setThreadName("superSub"); + + if (pThreadInfo->ntables > MAX_QUERY_SQL_COUNT) { + free(subSqlStr); + errorPrint("The table number(%"PRId64") of the thread is more than max query sql count: %d\n", + pThreadInfo->ntables, MAX_QUERY_SQL_COUNT); + exit(EXIT_FAILURE); + } + + if (pThreadInfo->taos == NULL) { + pThreadInfo->taos = taos_connect(g_queryInfo.host, + g_queryInfo.user, + g_queryInfo.password, + g_queryInfo.dbName, + g_queryInfo.port); + if (pThreadInfo->taos == NULL) { + errorPrint2("[%d] Failed to connect to TDengine, reason:%s\n", + pThreadInfo->threadID, taos_errstr(NULL)); + free(subSqlStr); + return NULL; + } + } + + char sqlStr[TSDB_DB_NAME_LEN + 5]; + sprintf(sqlStr, "USE %s", g_queryInfo.dbName); + if (0 != queryDbExec(pThreadInfo->taos, sqlStr, NO_INSERT_TYPE, false)) { + taos_close(pThreadInfo->taos); + errorPrint2("use database %s failed!\n\n", + g_queryInfo.dbName); + free(subSqlStr); + return NULL; + } + + char topic[32] = {0}; + for (uint64_t i = pThreadInfo->start_table_from; + i <= pThreadInfo->end_table_to; i++) { + tsubSeq = i - pThreadInfo->start_table_from; + verbosePrint("%s() LN%d, [%d], start=%"PRId64" end=%"PRId64" i=%"PRIu64"\n", + __func__, __LINE__, + pThreadInfo->threadID, + pThreadInfo->start_table_from, + pThreadInfo->end_table_to, i); + sprintf(topic, "taosdemo-subscribe-%"PRIu64"-%"PRIu64"", + i, pThreadInfo->querySeq); + memset(subSqlStr, 0, BUFFER_SIZE); + replaceChildTblName( + g_queryInfo.superQueryInfo.sql[pThreadInfo->querySeq], + subSqlStr, i); + if (g_queryInfo.superQueryInfo.result[pThreadInfo->querySeq][0] != 0) { + sprintf(pThreadInfo->filePath, "%s-%d", + g_queryInfo.superQueryInfo.result[pThreadInfo->querySeq], + pThreadInfo->threadID); + } + + verbosePrint("%s() LN%d, [%d] subSqlStr: %s\n", + __func__, __LINE__, pThreadInfo->threadID, subSqlStr); + tsub[tsubSeq] = subscribeImpl( + STABLE_CLASS, + pThreadInfo, subSqlStr, topic, + g_queryInfo.superQueryInfo.subscribeRestart, + g_queryInfo.superQueryInfo.subscribeInterval); + if (NULL == tsub[tsubSeq]) { + taos_close(pThreadInfo->taos); + free(subSqlStr); + return NULL; + } + } + + // start loop to consume result + int consumed[MAX_QUERY_SQL_COUNT]; + for (int i = 0; i < MAX_QUERY_SQL_COUNT; i++) { + consumed[i] = 0; + } + TAOS_RES* res = NULL; + + uint64_t st = 0, et = 0; + + while ((g_queryInfo.superQueryInfo.endAfterConsume == -1) + || (g_queryInfo.superQueryInfo.endAfterConsume > + consumed[pThreadInfo->end_table_to + - pThreadInfo->start_table_from])) { + + verbosePrint("super endAfterConsume: %d, consumed: %d\n", + g_queryInfo.superQueryInfo.endAfterConsume, + consumed[pThreadInfo->end_table_to + - pThreadInfo->start_table_from]); + for (uint64_t i = pThreadInfo->start_table_from; + i <= pThreadInfo->end_table_to; i++) { + tsubSeq = i - pThreadInfo->start_table_from; + if (ASYNC_MODE == g_queryInfo.superQueryInfo.asyncMode) { + continue; + } + + st = taosGetTimestampMs(); + performancePrint("st: %"PRIu64" et: %"PRIu64" st-et: %"PRIu64"\n", st, et, (st - et)); + res = taos_consume(tsub[tsubSeq]); + et = taosGetTimestampMs(); + performancePrint("st: %"PRIu64" et: %"PRIu64" delta: %"PRIu64"\n", st, et, (et - st)); + + if (res) { + if (g_queryInfo.superQueryInfo.result[pThreadInfo->querySeq][0] != 0) { + sprintf(pThreadInfo->filePath, "%s-%d", + g_queryInfo.superQueryInfo.result[pThreadInfo->querySeq], + pThreadInfo->threadID); + fetchResult(res, pThreadInfo); + } + consumed[tsubSeq] ++; + + if ((g_queryInfo.superQueryInfo.resubAfterConsume != -1) + && (consumed[tsubSeq] >= + g_queryInfo.superQueryInfo.resubAfterConsume)) { + verbosePrint("%s() LN%d, keepProgress:%d, resub super table query: %"PRIu64"\n", + __func__, __LINE__, + g_queryInfo.superQueryInfo.subscribeKeepProgress, + pThreadInfo->querySeq); + taos_unsubscribe(tsub[tsubSeq], + g_queryInfo.superQueryInfo.subscribeKeepProgress); + consumed[tsubSeq]= 0; + tsub[tsubSeq] = subscribeImpl( + STABLE_CLASS, + pThreadInfo, subSqlStr, topic, + g_queryInfo.superQueryInfo.subscribeRestart, + g_queryInfo.superQueryInfo.subscribeInterval + ); + if (NULL == tsub[tsubSeq]) { + taos_close(pThreadInfo->taos); + free(subSqlStr); + return NULL; + } + } + } + } + } + verbosePrint("%s() LN%d, super endAfterConsume: %d, consumed: %d\n", + __func__, __LINE__, + g_queryInfo.superQueryInfo.endAfterConsume, + consumed[pThreadInfo->end_table_to - pThreadInfo->start_table_from]); + taos_free_result(res); + + for (uint64_t i = pThreadInfo->start_table_from; + i <= pThreadInfo->end_table_to; i++) { + tsubSeq = i - pThreadInfo->start_table_from; + taos_unsubscribe(tsub[tsubSeq], 0); + } + + taos_close(pThreadInfo->taos); + free(subSqlStr); + return NULL; +} + +static void *specifiedSubscribe(void *sarg) { + threadInfo *pThreadInfo = (threadInfo *)sarg; + // TAOS_SUB* tsub = NULL; + + setThreadName("specSub"); + + if (pThreadInfo->taos == NULL) { + pThreadInfo->taos = taos_connect(g_queryInfo.host, + g_queryInfo.user, + g_queryInfo.password, + g_queryInfo.dbName, + g_queryInfo.port); + if (pThreadInfo->taos == NULL) { + errorPrint2("[%d] Failed to connect to TDengine, reason:%s\n", + pThreadInfo->threadID, taos_errstr(NULL)); + return NULL; + } + } + + char sqlStr[TSDB_DB_NAME_LEN + 5]; + sprintf(sqlStr, "USE %s", g_queryInfo.dbName); + if (0 != queryDbExec(pThreadInfo->taos, sqlStr, NO_INSERT_TYPE, false)) { + taos_close(pThreadInfo->taos); + return NULL; + } + + sprintf(g_queryInfo.specifiedQueryInfo.topic[pThreadInfo->threadID], + "taosdemo-subscribe-%"PRIu64"-%d", + pThreadInfo->querySeq, + pThreadInfo->threadID); + if (g_queryInfo.specifiedQueryInfo.result[pThreadInfo->querySeq][0] != '\0') { + sprintf(pThreadInfo->filePath, "%s-%d", + g_queryInfo.specifiedQueryInfo.result[pThreadInfo->querySeq], + pThreadInfo->threadID); + } + g_queryInfo.specifiedQueryInfo.tsub[pThreadInfo->threadID] = subscribeImpl( + SPECIFIED_CLASS, pThreadInfo, + g_queryInfo.specifiedQueryInfo.sql[pThreadInfo->querySeq], + g_queryInfo.specifiedQueryInfo.topic[pThreadInfo->threadID], + g_queryInfo.specifiedQueryInfo.subscribeRestart, + g_queryInfo.specifiedQueryInfo.subscribeInterval); + if (NULL == g_queryInfo.specifiedQueryInfo.tsub[pThreadInfo->threadID]) { + taos_close(pThreadInfo->taos); + return NULL; + } + + // start loop to consume result + + g_queryInfo.specifiedQueryInfo.consumed[pThreadInfo->threadID] = 0; + while((g_queryInfo.specifiedQueryInfo.endAfterConsume[pThreadInfo->querySeq] == -1) + || (g_queryInfo.specifiedQueryInfo.consumed[pThreadInfo->threadID] < + g_queryInfo.specifiedQueryInfo.endAfterConsume[pThreadInfo->querySeq])) { + + printf("consumed[%d]: %d, endAfterConsum[%"PRId64"]: %d\n", + pThreadInfo->threadID, + g_queryInfo.specifiedQueryInfo.consumed[pThreadInfo->threadID], + pThreadInfo->querySeq, + g_queryInfo.specifiedQueryInfo.endAfterConsume[pThreadInfo->querySeq]); + if (ASYNC_MODE == g_queryInfo.specifiedQueryInfo.asyncMode) { + continue; + } + + g_queryInfo.specifiedQueryInfo.res[pThreadInfo->threadID] = taos_consume( + g_queryInfo.specifiedQueryInfo.tsub[pThreadInfo->threadID]); + if (g_queryInfo.specifiedQueryInfo.res[pThreadInfo->threadID]) { + if (g_queryInfo.specifiedQueryInfo.result[pThreadInfo->querySeq][0] + != 0) { + sprintf(pThreadInfo->filePath, "%s-%d", + g_queryInfo.specifiedQueryInfo.result[pThreadInfo->querySeq], + pThreadInfo->threadID); + } + fetchResult( + g_queryInfo.specifiedQueryInfo.res[pThreadInfo->threadID], + pThreadInfo); + + g_queryInfo.specifiedQueryInfo.consumed[pThreadInfo->threadID] ++; + if ((g_queryInfo.specifiedQueryInfo.resubAfterConsume[pThreadInfo->querySeq] != -1) + && (g_queryInfo.specifiedQueryInfo.consumed[pThreadInfo->threadID] >= + g_queryInfo.specifiedQueryInfo.resubAfterConsume[pThreadInfo->querySeq])) { + printf("keepProgress:%d, resub specified query: %"PRIu64"\n", + g_queryInfo.specifiedQueryInfo.subscribeKeepProgress, + pThreadInfo->querySeq); + g_queryInfo.specifiedQueryInfo.consumed[pThreadInfo->threadID] = 0; + taos_unsubscribe(g_queryInfo.specifiedQueryInfo.tsub[pThreadInfo->threadID], + g_queryInfo.specifiedQueryInfo.subscribeKeepProgress); + g_queryInfo.specifiedQueryInfo.tsub[pThreadInfo->threadID] = + subscribeImpl( + SPECIFIED_CLASS, + pThreadInfo, + g_queryInfo.specifiedQueryInfo.sql[pThreadInfo->querySeq], + g_queryInfo.specifiedQueryInfo.topic[pThreadInfo->threadID], + g_queryInfo.specifiedQueryInfo.subscribeRestart, + g_queryInfo.specifiedQueryInfo.subscribeInterval); + if (NULL == g_queryInfo.specifiedQueryInfo.tsub[pThreadInfo->threadID]) { + taos_close(pThreadInfo->taos); + return NULL; + } + } + } + } + taos_free_result(g_queryInfo.specifiedQueryInfo.res[pThreadInfo->threadID]); + taos_close(pThreadInfo->taos); + + return NULL; +} + +static int subscribeTestProcess() { + setupForAnsiEscape(); + printfQueryMeta(); + resetAfterAnsiEscape(); + + prompt(); + + TAOS * taos = NULL; + taos = taos_connect(g_queryInfo.host, + g_queryInfo.user, + g_queryInfo.password, + g_queryInfo.dbName, + g_queryInfo.port); + if (taos == NULL) { + errorPrint2("Failed to connect to TDengine, reason:%s\n", + taos_errstr(NULL)); + exit(EXIT_FAILURE); + } + + if (0 != g_queryInfo.superQueryInfo.sqlCount) { + getAllChildNameOfSuperTable(taos, + g_queryInfo.dbName, + g_queryInfo.superQueryInfo.stbName, + &g_queryInfo.superQueryInfo.childTblName, + &g_queryInfo.superQueryInfo.childTblCount); + } + + taos_close(taos); // workaround to use separate taos connection; + + pthread_t *pids = NULL; + threadInfo *infos = NULL; + + pthread_t *pidsOfStable = NULL; + threadInfo *infosOfStable = NULL; + + //==== create threads for query for specified table + if (g_queryInfo.specifiedQueryInfo.sqlCount <= 0) { + debugPrint("%s() LN%d, specified query sqlCount %d.\n", + __func__, __LINE__, + g_queryInfo.specifiedQueryInfo.sqlCount); + } else { + if (g_queryInfo.specifiedQueryInfo.concurrent <= 0) { + errorPrint2("%s() LN%d, specified query sqlCount %d.\n", + __func__, __LINE__, + g_queryInfo.specifiedQueryInfo.sqlCount); + exit(EXIT_FAILURE); + } + + pids = calloc( + 1, + g_queryInfo.specifiedQueryInfo.sqlCount * + g_queryInfo.specifiedQueryInfo.concurrent * + sizeof(pthread_t)); + infos = calloc( + 1, + g_queryInfo.specifiedQueryInfo.sqlCount * + g_queryInfo.specifiedQueryInfo.concurrent * + sizeof(threadInfo)); + if ((NULL == pids) || (NULL == infos)) { + errorPrint2("%s() LN%d, malloc failed for create threads\n", __func__, __LINE__); + exit(EXIT_FAILURE); + } + + for (int i = 0; i < g_queryInfo.specifiedQueryInfo.sqlCount; i++) { + for (int j = 0; j < g_queryInfo.specifiedQueryInfo.concurrent; j++) { + uint64_t seq = i * g_queryInfo.specifiedQueryInfo.concurrent + j; + threadInfo *pThreadInfo = infos + seq; + pThreadInfo->threadID = seq; + pThreadInfo->querySeq = i; + pThreadInfo->taos = NULL; // workaround to use separate taos connection; + pthread_create(pids + seq, NULL, specifiedSubscribe, pThreadInfo); + } + } + } + + //==== create threads for super table query + if (g_queryInfo.superQueryInfo.sqlCount <= 0) { + debugPrint("%s() LN%d, super table query sqlCount %d.\n", + __func__, __LINE__, + g_queryInfo.superQueryInfo.sqlCount); + } else { + if ((g_queryInfo.superQueryInfo.sqlCount > 0) + && (g_queryInfo.superQueryInfo.threadCnt > 0)) { + pidsOfStable = calloc( + 1, + g_queryInfo.superQueryInfo.sqlCount * + g_queryInfo.superQueryInfo.threadCnt * + sizeof(pthread_t)); + infosOfStable = calloc( + 1, + g_queryInfo.superQueryInfo.sqlCount * + g_queryInfo.superQueryInfo.threadCnt * + sizeof(threadInfo)); + if ((NULL == pidsOfStable) || (NULL == infosOfStable)) { + errorPrint2("%s() LN%d, malloc failed for create threads\n", + __func__, __LINE__); + // taos_close(taos); + exit(EXIT_FAILURE); + } + + int64_t ntables = g_queryInfo.superQueryInfo.childTblCount; + int threads = g_queryInfo.superQueryInfo.threadCnt; + + int64_t a = ntables / threads; + if (a < 1) { + threads = ntables; + a = 1; + } + + int64_t b = 0; + if (threads != 0) { + b = ntables % threads; + } + + for (uint64_t i = 0; i < g_queryInfo.superQueryInfo.sqlCount; i++) { + uint64_t tableFrom = 0; + for (int j = 0; j < threads; j++) { + uint64_t seq = i * threads + j; + threadInfo *pThreadInfo = infosOfStable + seq; + pThreadInfo->threadID = seq; + pThreadInfo->querySeq = i; + + pThreadInfo->start_table_from = tableFrom; + pThreadInfo->ntables = jend_table_to = jend_table_to + 1; + pThreadInfo->taos = NULL; // workaround to use separate taos connection; + pthread_create(pidsOfStable + seq, + NULL, superSubscribe, pThreadInfo); + } + } + + g_queryInfo.superQueryInfo.threadCnt = threads; + + for (int i = 0; i < g_queryInfo.superQueryInfo.sqlCount; i++) { + for (int j = 0; j < threads; j++) { + uint64_t seq = i * threads + j; + pthread_join(pidsOfStable[seq], NULL); + } + } + } + } + + for (int i = 0; i < g_queryInfo.specifiedQueryInfo.sqlCount; i++) { + for (int j = 0; j < g_queryInfo.specifiedQueryInfo.concurrent; j++) { + uint64_t seq = i * g_queryInfo.specifiedQueryInfo.concurrent + j; + pthread_join(pids[seq], NULL); + } + } + + tmfree((char*)pids); + tmfree((char*)infos); + + tmfree((char*)pidsOfStable); + tmfree((char*)infosOfStable); + // taos_close(taos); + return 0; +} + +static void setParaFromArg() { + char type[20]; + char length[20]; + if (g_args.host) { + tstrncpy(g_Dbs.host, g_args.host, MAX_HOSTNAME_SIZE); + } else { + tstrncpy(g_Dbs.host, "127.0.0.1", MAX_HOSTNAME_SIZE); + } + + if (g_args.user) { + tstrncpy(g_Dbs.user, g_args.user, MAX_USERNAME_SIZE); + } + + tstrncpy(g_Dbs.password, g_args.password, SHELL_MAX_PASSWORD_LEN); + + if (g_args.port) { + g_Dbs.port = g_args.port; + } + + g_Dbs.threadCount = g_args.nthreads; + g_Dbs.threadCountForCreateTbl = g_args.nthreads; + + g_Dbs.dbCount = 1; + g_Dbs.db[0].drop = true; + + tstrncpy(g_Dbs.db[0].dbName, g_args.database, TSDB_DB_NAME_LEN); + g_Dbs.db[0].dbCfg.replica = g_args.replica; + tstrncpy(g_Dbs.db[0].dbCfg.precision, "ms", SMALL_BUFF_LEN); + + tstrncpy(g_Dbs.resultFile, g_args.output_file, MAX_FILE_NAME_LEN); + + g_Dbs.use_metric = g_args.use_metric; + g_args.prepared_rand = min(g_args.insertRows, MAX_PREPARED_RAND); + g_Dbs.aggr_func = g_args.aggr_func; + + char dataString[TSDB_MAX_BYTES_PER_ROW]; + char *data_type = g_args.data_type; + char **dataType = g_args.dataType; + + memset(dataString, 0, TSDB_MAX_BYTES_PER_ROW); + + if ((data_type[0] == TSDB_DATA_TYPE_BINARY) + || (data_type[0] == TSDB_DATA_TYPE_BOOL) + || (data_type[0] == TSDB_DATA_TYPE_NCHAR)) { + g_Dbs.aggr_func = false; + } + + if (g_args.use_metric) { + g_Dbs.db[0].superTblCount = 1; + tstrncpy(g_Dbs.db[0].superTbls[0].stbName, "meters", TSDB_TABLE_NAME_LEN); + g_Dbs.db[0].superTbls[0].childTblCount = g_args.ntables; + g_Dbs.threadCount = g_args.nthreads; + g_Dbs.threadCountForCreateTbl = g_args.nthreads; + g_Dbs.asyncMode = g_args.async_mode; + + g_Dbs.db[0].superTbls[0].autoCreateTable = PRE_CREATE_SUBTBL; + g_Dbs.db[0].superTbls[0].childTblExists = TBL_NO_EXISTS; + g_Dbs.db[0].superTbls[0].disorderRange = g_args.disorderRange; + g_Dbs.db[0].superTbls[0].disorderRatio = g_args.disorderRatio; + tstrncpy(g_Dbs.db[0].superTbls[0].childTblPrefix, + g_args.tb_prefix, TBNAME_PREFIX_LEN); + tstrncpy(g_Dbs.db[0].superTbls[0].dataSource, "rand", SMALL_BUFF_LEN); + + if (g_args.iface == INTERFACE_BUT) { + g_Dbs.db[0].superTbls[0].iface = TAOSC_IFACE; + } else { + g_Dbs.db[0].superTbls[0].iface = g_args.iface; + } + tstrncpy(g_Dbs.db[0].superTbls[0].startTimestamp, + "2017-07-14 10:40:00.000", MAX_TB_NAME_SIZE); + g_Dbs.db[0].superTbls[0].timeStampStep = g_args.timestamp_step; + + g_Dbs.db[0].superTbls[0].insertRows = g_args.insertRows; + g_Dbs.db[0].superTbls[0].maxSqlLen = g_args.max_sql_len; + + g_Dbs.db[0].superTbls[0].columnCount = 0; + for (int i = 0; i < MAX_NUM_COLUMNS; i++) { + if (data_type[i] == TSDB_DATA_TYPE_NULL) { + break; + } + + g_Dbs.db[0].superTbls[0].columns[i].data_type = data_type[i]; + tstrncpy(g_Dbs.db[0].superTbls[0].columns[i].dataType, + dataType[i], min(DATATYPE_BUFF_LEN, strlen(dataType[i]) + 1)); + if (1 == regexMatch(dataType[i], "^(NCHAR|BINARY)(\\([1-9][0-9]*\\))$", REG_ICASE | + REG_EXTENDED)) { + sscanf(dataType[i], "%[^(](%[^)]", type, length); + g_Dbs.db[0].superTbls[0].columns[i].dataLen = atoi(length); + tstrncpy(g_Dbs.db[0].superTbls[0].columns[i].dataType, + type, min(DATATYPE_BUFF_LEN, strlen(type) + 1)); + } else { + g_Dbs.db[0].superTbls[0].columns[i].dataLen = g_args.binwidth; + tstrncpy(g_Dbs.db[0].superTbls[0].columns[i].dataType, + dataType[i], min(DATATYPE_BUFF_LEN, strlen(dataType[i]) + 1)); + } + g_Dbs.db[0].superTbls[0].columnCount++; + } + + if (g_Dbs.db[0].superTbls[0].columnCount > g_args.columnCount) { + g_Dbs.db[0].superTbls[0].columnCount = g_args.columnCount; + } else { + for (int i = g_Dbs.db[0].superTbls[0].columnCount; + i < g_args.columnCount; i++) { + g_Dbs.db[0].superTbls[0].columns[i].data_type = TSDB_DATA_TYPE_INT; + tstrncpy(g_Dbs.db[0].superTbls[0].columns[i].dataType, + "INT", min(DATATYPE_BUFF_LEN, strlen("INT") + 1)); + g_Dbs.db[0].superTbls[0].columns[i].dataLen = 0; + g_Dbs.db[0].superTbls[0].columnCount++; + } + } + + tstrncpy(g_Dbs.db[0].superTbls[0].tags[0].dataType, + "INT", min(DATATYPE_BUFF_LEN, strlen("INT") + 1)); + g_Dbs.db[0].superTbls[0].tags[0].dataLen = 0; + + tstrncpy(g_Dbs.db[0].superTbls[0].tags[1].dataType, + "BINARY", min(DATATYPE_BUFF_LEN, strlen("BINARY") + 1)); + g_Dbs.db[0].superTbls[0].tags[1].dataLen = g_args.binwidth; + g_Dbs.db[0].superTbls[0].tagCount = 2; + } else { + g_Dbs.threadCountForCreateTbl = g_args.nthreads; + g_Dbs.db[0].superTbls[0].tagCount = 0; + } +} + +/* Function to do regular expression check */ +static int regexMatch(const char *s, const char *reg, int cflags) { + regex_t regex; + char msgbuf[100] = {0}; + + /* Compile regular expression */ + if (regcomp(®ex, reg, cflags) != 0) { + ERROR_EXIT("Fail to compile regex\n"); + } + + /* Execute regular expression */ + int reti = regexec(®ex, s, 0, NULL, 0); + if (!reti) { + regfree(®ex); + return 1; + } else if (reti == REG_NOMATCH) { + regfree(®ex); + return 0; + } else { + regerror(reti, ®ex, msgbuf, sizeof(msgbuf)); + regfree(®ex); + printf("Regex match failed: %s\n", msgbuf); + exit(EXIT_FAILURE); + } + return 0; +} + +static int isCommentLine(char *line) { + if (line == NULL) return 1; + + return regexMatch(line, "^\\s*#.*", REG_EXTENDED); +} + +static void querySqlFile(TAOS* taos, char* sqlFile) +{ + FILE *fp = fopen(sqlFile, "r"); + if (fp == NULL) { + printf("failed to open file %s, reason:%s\n", sqlFile, strerror(errno)); + return; + } + + int read_len = 0; + char * cmd = calloc(1, TSDB_MAX_BYTES_PER_ROW); + size_t cmd_len = 0; + char * line = NULL; + size_t line_len = 0; + + double t = taosGetTimestampMs(); + + while((read_len = tgetline(&line, &line_len, fp)) != -1) { + if (read_len >= TSDB_MAX_BYTES_PER_ROW) continue; + line[--read_len] = '\0'; + + if (read_len == 0 || isCommentLine(line)) { // line starts with # + continue; + } + + if (line[read_len - 1] == '\\') { + line[read_len - 1] = ' '; + memcpy(cmd + cmd_len, line, read_len); + cmd_len += read_len; + continue; + } + + memcpy(cmd + cmd_len, line, read_len); + if (0 != queryDbExec(taos, cmd, NO_INSERT_TYPE, false)) { + errorPrint2("%s() LN%d, queryDbExec %s failed!\n", + __func__, __LINE__, cmd); + tmfree(cmd); + tmfree(line); + tmfclose(fp); + return; + } + memset(cmd, 0, TSDB_MAX_BYTES_PER_ROW); + cmd_len = 0; + } + + t = taosGetTimestampMs() - t; + printf("run %s took %.6f second(s)\n\n", sqlFile, t); + + tmfree(cmd); + tmfree(line); + tmfclose(fp); + return; +} + +static void testMetaFile() { + if (INSERT_TEST == g_args.test_mode) { + if (g_Dbs.cfgDir[0]) + taos_options(TSDB_OPTION_CONFIGDIR, g_Dbs.cfgDir); + + insertTestProcess(); + + } else if (QUERY_TEST == g_args.test_mode) { + if (g_queryInfo.cfgDir[0]) + taos_options(TSDB_OPTION_CONFIGDIR, g_queryInfo.cfgDir); + + queryTestProcess(); + + } else if (SUBSCRIBE_TEST == g_args.test_mode) { + if (g_queryInfo.cfgDir[0]) + taos_options(TSDB_OPTION_CONFIGDIR, g_queryInfo.cfgDir); + + subscribeTestProcess(); + + } else { + ; + } +} + +static void queryAggrFunc() { + // query data + + pthread_t read_id; + threadInfo *pThreadInfo = calloc(1, sizeof(threadInfo)); + assert(pThreadInfo); + pThreadInfo->start_time = DEFAULT_START_TIME; // 2017-07-14 10:40:00.000 + pThreadInfo->start_table_from = 0; + + if (g_args.use_metric) { + pThreadInfo->ntables = g_Dbs.db[0].superTbls[0].childTblCount; + pThreadInfo->end_table_to = g_Dbs.db[0].superTbls[0].childTblCount - 1; + pThreadInfo->stbInfo = &g_Dbs.db[0].superTbls[0]; + tstrncpy(pThreadInfo->tb_prefix, + g_Dbs.db[0].superTbls[0].childTblPrefix, TBNAME_PREFIX_LEN); + } else { + pThreadInfo->ntables = g_args.ntables; + pThreadInfo->end_table_to = g_args.ntables -1; + tstrncpy(pThreadInfo->tb_prefix, g_args.tb_prefix, TSDB_TABLE_NAME_LEN); + } + + pThreadInfo->taos = taos_connect( + g_Dbs.host, + g_Dbs.user, + g_Dbs.password, + g_Dbs.db[0].dbName, + g_Dbs.port); + if (pThreadInfo->taos == NULL) { + free(pThreadInfo); + errorPrint2("Failed to connect to TDengine, reason:%s\n", + taos_errstr(NULL)); + exit(EXIT_FAILURE); + } + + tstrncpy(pThreadInfo->filePath, g_Dbs.resultFile, MAX_FILE_NAME_LEN); + + if (!g_Dbs.use_metric) { + pthread_create(&read_id, NULL, queryNtableAggrFunc, pThreadInfo); + } else { + pthread_create(&read_id, NULL, queryStableAggrFunc, pThreadInfo); + } + pthread_join(read_id, NULL); + taos_close(pThreadInfo->taos); + free(pThreadInfo); +} + +static void testCmdLine() { + + if (strlen(configDir)) { + wordexp_t full_path; + if (wordexp(configDir, &full_path, 0) != 0) { + errorPrint("Invalid path %s\n", configDir); + return; + } + taos_options(TSDB_OPTION_CONFIGDIR, full_path.we_wordv[0]); + wordfree(&full_path); + } + + g_args.test_mode = INSERT_TEST; + insertTestProcess(); + + if (g_Dbs.aggr_func) { + queryAggrFunc(); + } +} + +int main(int argc, char *argv[]) { + parse_args(argc, argv, &g_args); + + debugPrint("meta file: %s\n", g_args.metaFile); + + if (g_args.metaFile) { + g_totalChildTables = 0; + + if (false == getInfoFromJsonFile(g_args.metaFile)) { + printf("Failed to read %s\n", g_args.metaFile); + return 1; + } + + testMetaFile(); + } else { + memset(&g_Dbs, 0, sizeof(SDbs)); + g_Dbs.db = calloc(1, sizeof(SDataBase)); + assert(g_Dbs.db); + g_Dbs.db[0].superTbls = calloc(1, sizeof(SSuperTable)); + assert(g_Dbs.db[0].superTbls); + setParaFromArg(); + + if (NULL != g_args.sqlFile) { + TAOS* qtaos = taos_connect( + g_Dbs.host, + g_Dbs.user, + g_Dbs.password, + g_Dbs.db[0].dbName, + g_Dbs.port); + querySqlFile(qtaos, g_args.sqlFile); + taos_close(qtaos); + + } else { + testCmdLine(); + } + + if (g_dupstr) + free(g_dupstr); + } + postFreeResource(); + + return 0; +} diff --git a/src/kit/taosdump/taosdump.c b/src/kit/taosdump/taosdump.c index d552e6123fd6d3e496006a0cb79f662d5c139cc1..85731aca5a5dac1860a21a5a557becf5c7ad19ee 100644 --- a/src/kit/taosdump/taosdump.c +++ b/src/kit/taosdump/taosdump.c @@ -280,7 +280,15 @@ SDbInfo **g_dbInfos = NULL; TableInfo *g_tablesList = NULL; const char *argp_program_version = version; +#ifdef _TD_PRO_ +const char *argp_program_bug_address = ""; +#elif (_TD_KH_ == true) +const char *argp_program_bug_address = ""; +#elif (_TD_JH_ == true) +const char *argp_program_bug_address = ""; +#else const char *argp_program_bug_address = ""; +#endif /* Program documentation. */ static char doc[] = ""; @@ -303,6 +311,14 @@ static struct argp_option options[] = { {"user", 'u', "USER", 0, "User name used to connect to server. Default is root.", 0}, #ifdef _TD_POWER_ {"password", 'p', 0, 0, "User password to connect to server. Default is powerdb.", 0}, +#elif (_TD_TQ_ == true) + {"password", 'p', 0, 0, "User password to connect to server. Default is tqueue.", 0}, +#elif (_TD_PRO_ == true) + {"password", 'p', 0, 0, "User password to connect to server. Default is prodb.", 0}, +#elif (_TD_KH_ == true) + {"password", 'p', 0, 0, "User password to connect to server. Default is khroot.", 0}, +#elif (_TD_JH_ == true) + {"password", 'p', 0, 0, "User password to connect to server. Default is jhdata.", 0}, #else {"password", 'p', 0, 0, "User password to connect to server. Default is taosdata.", 0}, #endif @@ -313,7 +329,15 @@ static struct argp_option options[] = { {"inpath", 'i', "INPATH", 0, "Input file path.", 1}, {"resultFile", 'r', "RESULTFILE", 0, "DumpOut/In Result file path and name.", 1}, #ifdef _TD_POWER_ - {"config-dir", 'c', "CONFIG_DIR", 0, "Configure directory. Default is /etc/power/taos.cfg.", 1}, + {"config-dir", 'c', "CONFIG_DIR", 0, "Configure directory. Default is /etc/power/power.cfg.", 1}, +#elif (_TD_TQ_ == true) + {"config-dir", 'c', "CONFIG_DIR", 0, "Configure directory. Default is /etc/tq/tq.cfg.", 1}, +#elif (_TD_PRO_ == true) + {"config-dir", 'c', "CONFIG_DIR", 0, "Configure directory. Default is /etc/ProDB/prodb.cfg.", 1}, +#elif (_TD_KH_ == true) + {"config-dir", 'c', "CONFIG_DIR", 0, "Configure directory. Default is /etc/kinghistorian/kinghistorian.cfg.", 1}, +#elif (_TD_JH_ == true) + {"config-dir", 'c', "CONFIG_DIR", 0, "Configure directory. Default is /etc/jh_taos/taos.cfg.", 1}, #else {"config-dir", 'c', "CONFIG_DIR", 0, "Configure directory. Default is /etc/taos/taos.cfg.", 1}, #endif @@ -403,6 +427,14 @@ struct arguments g_args = { "root", #ifdef _TD_POWER_ "powerdb", +#elif (_TD_TQ_ == true) + "tqueue", +#elif (_TD_PRO_ == true) + "prodb", +#elif (_TD_KH_ == true) + "khroot", +#elif (_TD_JH_ == true) + "jhdata", #else "taosdata", #endif @@ -857,7 +889,7 @@ static int getTableRecordInfo( TAOS *taos = taos_connect(g_args.host, g_args.user, g_args.password, dbName, g_args.port); if (taos == NULL) { - errorPrint("Failed to connect to TDengine server %s\n", g_args.host); + errorPrint("Failed to connect to server %s\n", g_args.host); return -1; } @@ -987,7 +1019,7 @@ static int getDumpDbCount() taos = taos_connect(g_args.host, g_args.user, g_args.password, NULL, g_args.port); if (NULL == taos) { - errorPrint("Failed to connect to TDengine server %s\n", g_args.host); + errorPrint("Failed to connect to server %s\n", g_args.host); return 0; } @@ -1111,7 +1143,7 @@ static int64_t getNtbCountOfStb(char *dbName, char *stbName) TAOS *taos = taos_connect(g_args.host, g_args.user, g_args.password, dbName, g_args.port); if (taos == NULL) { - errorPrint("Failed to connect to TDengine server %s\n", g_args.host); + errorPrint("Failed to connect to server %s\n", g_args.host); return -1; } @@ -1420,7 +1452,7 @@ static int64_t dumpCreateSTableClauseOfDb( g_args.user, g_args.password, dbInfo->name, g_args.port); if (NULL == taos) { errorPrint( - "Failed to connect to TDengine server %s by specified database %s\n", + "Failed to connect to server %s by specified database %s\n", g_args.host, dbInfo->name); return 0; } @@ -2157,7 +2189,7 @@ static int dumpInOneAvroFile(char* fcharset, TAOS *taos = taos_connect(g_args.host, g_args.user, g_args.password, namespace, g_args.port); if (taos == NULL) { - errorPrint("Failed to connect to TDengine server %s\n", g_args.host); + errorPrint("Failed to connect to server %s\n", g_args.host); return -1; } @@ -2720,7 +2752,7 @@ static int64_t dumpTableData(FILE *fp, char *tbName, g_args.user, g_args.password, dbName, g_args.port); if (NULL == taos) { errorPrint( - "Failed to connect to TDengine server %s by specified database %s\n", + "Failed to connect to server %s by specified database %s\n", g_args.host, dbName); return -1; } @@ -3274,7 +3306,7 @@ static int dumpInSqlWorkThreads() pThread->taos = taos_connect(g_args.host, g_args.user, g_args.password, NULL, g_args.port); if (pThread->taos == NULL) { - errorPrint("Failed to connect to TDengine server %s\n", g_args.host); + errorPrint("Failed to connect to server %s\n", g_args.host); free(infos); free(pids); return -1; @@ -3310,7 +3342,7 @@ static int dumpInDbs() NULL, g_args.port); if (taos == NULL) { - errorPrint("%s() LN%d, failed to connect to TDengine server\n", + errorPrint("%s() LN%d, failed to connect to server\n", __func__, __LINE__); return -1; } @@ -3461,7 +3493,7 @@ static int64_t dumpNtbOfDbByThreads( g_args.port ); if (NULL == pThreadInfo->taos) { - errorPrint("%s() LN%d, Failed to connect to TDengine, reason: %s\n", + errorPrint("%s() LN%d, Failed to connect to server, reason: %s\n", __func__, __LINE__, taos_errstr(NULL)); @@ -3503,7 +3535,7 @@ static int64_t dumpNTablesOfDb(SDbInfo *dbInfo) g_args.user, g_args.password, dbInfo->name, g_args.port); if (NULL == taos) { errorPrint( - "Failed to connect to TDengine server %s by specified database %s\n", + "Failed to connect to server %s by specified database %s\n", g_args.host, dbInfo->name); return 0; } @@ -3596,7 +3628,7 @@ static int64_t dumpNtbOfStbByThreads( g_args.port ); if (NULL == pThreadInfo->taos) { - errorPrint("%s() LN%d, Failed to connect to TDengine, reason: %s\n", + errorPrint("%s() LN%d, Failed to connect to server, reason: %s\n", __func__, __LINE__, taos_errstr(NULL)); @@ -3693,7 +3725,7 @@ static int dumpOut() { taos = taos_connect(g_args.host, g_args.user, g_args.password, NULL, g_args.port); if (taos == NULL) { - errorPrint("Failed to connect to TDengine server %s\n", g_args.host); + errorPrint("Failed to connect to server %s\n", g_args.host); goto _exit_failure; } diff --git a/src/mnode/inc/mnodeInt.h b/src/mnode/inc/mnodeInt.h index 7a791d76e6796cfed22657f1bc7ffdb26890ea40..aefdf23bdadaed47e56435ca8e8c0d049bec3ea4 100644 --- a/src/mnode/inc/mnodeInt.h +++ b/src/mnode/inc/mnodeInt.h @@ -41,9 +41,9 @@ extern int32_t sdbDebugFlag; #define sdbDebug(...) { if (sdbDebugFlag & DEBUG_DEBUG) { taosPrintLog("SDB ", sdbDebugFlag, __VA_ARGS__); }} #define sdbTrace(...) { if (sdbDebugFlag & DEBUG_TRACE) { taosPrintLog("SDB ", sdbDebugFlag, __VA_ARGS__); }} -#define mLError(...) { monSaveLog(2, __VA_ARGS__); mError(__VA_ARGS__) } -#define mLWarn(...) { monSaveLog(1, __VA_ARGS__); mWarn(__VA_ARGS__) } -#define mLInfo(...) { monSaveLog(0, __VA_ARGS__); mInfo(__VA_ARGS__) } +#define mLError(...) { monSaveLogs(2, __VA_ARGS__); mError(__VA_ARGS__) } +#define mLWarn(...) { monSaveLogs(1, __VA_ARGS__); mWarn(__VA_ARGS__) } +#define mLInfo(...) { monSaveLogs(0, __VA_ARGS__); mInfo(__VA_ARGS__) } #ifdef __cplusplus } diff --git a/src/mnode/src/mnodeDnode.c b/src/mnode/src/mnodeDnode.c index 7dd199cca4248e5467017e1b6247f3b534c45711..168995916553dc8b1d02f9cd05563cfb4c5319de 100644 --- a/src/mnode/src/mnodeDnode.c +++ b/src/mnode/src/mnodeDnode.c @@ -1177,8 +1177,8 @@ static int32_t mnodeGetVnodeMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *pC int32_t cols = 0; SUserObj *pUser = mnodeGetUserFromConn(pConn); if (pUser == NULL) return 0; - - if (strcmp(pUser->user, TSDB_DEFAULT_USER) != 0) { + + if (strcmp(pUser->pAcct->user, TSDB_DEFAULT_USER) != 0 ) { mnodeDecUserRef(pUser); return TSDB_CODE_MND_NO_RIGHTS; } @@ -1256,10 +1256,10 @@ static int32_t mnodeRetrieveVnodes(SShowObj *pShow, char *data, int32_t rows, vo STR_TO_VARSTR(pWrite, syncRole[pVgid->role]); cols++; numOfRows++; - } } if (numOfRows >= rows) { + mnodeDecVgroupRef(pVgroup); break; } diff --git a/src/mnode/src/mnodeProfile.c b/src/mnode/src/mnodeProfile.c index 15438fc234bb60089e9d0f52fe92eb5bf8b3eeae..70532d2216a9ad4ecb8e3ceca94367a535796a9e 100644 --- a/src/mnode/src/mnodeProfile.c +++ b/src/mnode/src/mnodeProfile.c @@ -166,7 +166,7 @@ static void mnodeCancelGetNextConn(void *pIter) { static int32_t mnodeGetConnsMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *pConn) { SUserObj *pUser = mnodeGetUserFromConn(pConn); if (pUser == NULL) return 0; - if (strcmp(pUser->user, TSDB_DEFAULT_USER) != 0) return TSDB_CODE_MND_NO_RIGHTS; + if (strcmp(pUser->pAcct->user, TSDB_DEFAULT_USER) != 0) return TSDB_CODE_MND_NO_RIGHTS; int32_t cols = 0; SSchema *pSchema = pMeta->schema; @@ -322,7 +322,7 @@ int32_t mnodeSaveQueryStreamList(SConnObj *pConn, SHeartBeatMsg *pHBMsg) { static int32_t mnodeGetQueryMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *pConn) { SUserObj *pUser = mnodeGetUserFromConn(pConn); if (pUser == NULL) return 0; - if (strcmp(pUser->user, TSDB_DEFAULT_USER) != 0) return TSDB_CODE_MND_NO_RIGHTS; + if (strcmp(pUser->pAcct->user, TSDB_DEFAULT_USER) != 0) return TSDB_CODE_MND_NO_RIGHTS; int32_t cols = 0; SSchema *pSchema = pMeta->schema; diff --git a/src/os/inc/osSysinfo.h b/src/os/inc/osSysinfo.h index 0320ab0f7fe5f72e70fd6d12d8d2b0a27dec3754..e2408058f068165506c7e6ffbc6e4ec3dee9a93c 100644 --- a/src/os/inc/osSysinfo.h +++ b/src/os/inc/osSysinfo.h @@ -30,10 +30,11 @@ int32_t taosGetDiskSize(char *dataDir, SysDiskSize *diskSize); int32_t taosGetCpuCores(); void taosGetSystemInfo(); -bool taosReadProcIO(int64_t* rchars, int64_t* wchars); -bool taosGetProcIO(float *readKB, float *writeKB); +bool taosReadProcIO(int64_t* rchars, int64_t* wchars, int64_t* rbytes, int64_t* wbytes); +bool taosGetProcIO(float *rcharKB, float *wcharKB, float *rbyteKB, float* wbyteKB); bool taosGetCardInfo(int64_t *bytes, int64_t *rbytes, int64_t *tbytes); bool taosGetBandSpeed(float *bandSpeedKb); +bool taosGetNetworkIO(float *netInKb, float *netOutKb); void taosGetDisk(); bool taosGetCpuUsage(float *sysCpuUsage, float *procCpuUsage) ; bool taosGetProcMemory(float *memoryUsedMB) ; diff --git a/src/os/src/darwin/dwSysInfo.c b/src/os/src/darwin/dwSysInfo.c index a87a15a3f211768ecce747c7bc6ff236bad2f3ee..8cfba775688b221a85667df86dea0c46f54e02cc 100644 --- a/src/os/src/darwin/dwSysInfo.c +++ b/src/os/src/darwin/dwSysInfo.c @@ -191,15 +191,19 @@ void taosGetSystemInfo() { taosGetSystemLocale(); } -bool taosReadProcIO(int64_t *rchars, int64_t *wchars) { +bool taosReadProcIO(int64_t *rchars, int64_t *wchars, int64_t *rbytes, int64_t *wbytes) { if (rchars) *rchars = 0; if (wchars) *wchars = 0; + if (rbytes) *rbytes = 0; + if (wbytes) *wbytes = 0; return true; } -bool taosGetProcIO(float *readKB, float *writeKB) { - *readKB = 0; - *writeKB = 0; +bool taosGetProcIO(float *rcharKB, float *wcharKB, float *rbyteKB, float *wbyteKB) { + *rcharKB = 0; + *wcharKB = 0; + *rbyteKB = 0; + *wbyteKB = 0; return true; } @@ -215,6 +219,12 @@ bool taosGetBandSpeed(float *bandSpeedKb) { return true; } +bool taosGetNetworkIO(float *netInKb, float *netOutKb) { + *netInKb = 0; + *netOutKb = 0; + return true; +} + bool taosGetCpuUsage(float *sysCpuUsage, float *procCpuUsage) { *sysCpuUsage = 0; *procCpuUsage = 0; diff --git a/src/os/src/detail/osFile.c b/src/os/src/detail/osFile.c index cc12968c72eef5b3970ca68cf660de502b402e1e..039d688526c4cb1bbcc3ad3163bf3d47437ee625 100644 --- a/src/os/src/detail/osFile.c +++ b/src/os/src/detail/osFile.c @@ -28,6 +28,7 @@ void taosClose(FileFd fd) { void taosGetTmpfilePath(const char *fileNamePrefix, char *dstPath) { const char *tdengineTmpFileNamePrefix = "tdengine-"; char tmpPath[PATH_MAX]; + static uint64_t seqId = 0; int32_t len = (int32_t)strlen(tsTempDir); memcpy(tmpPath, tsTempDir, len); @@ -43,8 +44,10 @@ void taosGetTmpfilePath(const char *fileNamePrefix, char *dstPath) { strcat(tmpPath, "-%d-%s"); } - char rand[8] = {0}; - taosRandStr(rand, tListLen(rand) - 1); + char rand[32] = {0}; + + sprintf(rand, "%" PRIu64, atomic_add_fetch_64(&seqId, 1)); + snprintf(dstPath, PATH_MAX, tmpPath, getpid(), rand); } diff --git a/src/os/src/detail/osSysinfo.c b/src/os/src/detail/osSysinfo.c index 64d33be40ed09b2783b4c7e08b6c20618a43cf8a..af8f2dcdaf1568ba5e10656aacb7d0958155dde2 100644 --- a/src/os/src/detail/osSysinfo.c +++ b/src/os/src/detail/osSysinfo.c @@ -335,7 +335,9 @@ int32_t taosGetDiskSize(char *dataDir, SysDiskSize *diskSize) { } bool taosGetCardInfo(int64_t *bytes, int64_t *rbytes, int64_t *tbytes) { - *bytes = 0; + if (bytes) *bytes = 0; + if (rbytes) *rbytes = 0; + if (tbytes) *tbytes = 0; FILE *fp = fopen(tsSysNetFile, "r"); if (fp == NULL) { uError("open file:%s failed", tsSysNetFile); @@ -350,7 +352,7 @@ bool taosGetCardInfo(int64_t *bytes, int64_t *rbytes, int64_t *tbytes) { memset(line, 0, len); int64_t o_rbytes = 0; - int64_t rpackts = 0; + int64_t rpackets = 0; int64_t o_tbytes = 0; int64_t tpackets = 0; int64_t nouse1 = 0; @@ -376,10 +378,10 @@ bool taosGetCardInfo(int64_t *bytes, int64_t *rbytes, int64_t *tbytes) { sscanf(line, "%s %" PRId64 " %" PRId64 " %" PRId64 " %" PRId64 " %" PRId64 " %" PRId64 " %" PRId64 " %" PRId64 " %" PRId64 " %" PRId64, - nouse0, &o_rbytes, &rpackts, &nouse1, &nouse2, &nouse3, &nouse4, &nouse5, &nouse6, &o_tbytes, &tpackets); + nouse0, &o_rbytes, &rpackets, &nouse1, &nouse2, &nouse3, &nouse4, &nouse5, &nouse6, &o_tbytes, &tpackets); if (rbytes) *rbytes = o_rbytes; if (tbytes) *tbytes = o_tbytes; - *bytes += (o_rbytes + o_tbytes); + if (bytes) *bytes += (o_rbytes + o_tbytes); } tfree(line); @@ -424,7 +426,46 @@ bool taosGetBandSpeed(float *bandSpeedKb) { return true; } -bool taosReadProcIO(int64_t *rchars, int64_t *wchars) { +bool taosGetNetworkIO(float *netInKb, float *netOutKb) { + static int64_t lastBytesIn = 0, lastBytesOut = 0; + static time_t lastTimeIO = 0; + int64_t curBytesIn = 0, curBytesOut = 0; + time_t curTime = time(NULL); + + if (!taosGetCardInfo(NULL, &curBytesIn, &curBytesOut)) { + return false; + } + + if (lastTimeIO == 0 || lastBytesIn == 0 || lastBytesOut == 0) { + lastTimeIO = curTime; + lastBytesIn = curBytesIn; lastBytesOut = curBytesOut; + *netInKb = 0; + *netOutKb = 0; + return true; + } + + if (lastTimeIO >= curTime || lastBytesIn > curBytesIn || lastBytesOut > curBytesOut) { + lastTimeIO = curTime; + lastBytesIn = curBytesIn; lastBytesOut = curBytesOut; + *netInKb = 0; + *netOutKb = 0; + return true; + } + + double totalBytesIn = (double)(curBytesIn - lastBytesIn) / 1024 * 8; // Kb + *netInKb = (float)(totalBytesIn / (double)(curTime - lastTimeIO)); + + double totalBytesOut = (double)(curBytesOut - lastBytesOut) / 1024 * 8; // Kb + *netOutKb = (float)(totalBytesOut / (double)(curTime - lastTimeIO)); + + lastTimeIO = curTime; + lastBytesIn = curBytesIn; + lastBytesOut = curBytesOut; + + return true; +} + +bool taosReadProcIO(int64_t *rchars, int64_t *wchars, int64_t *rbytes, int64_t *wbytes) { FILE *fp = fopen(tsProcIOFile, "r"); if (fp == NULL) { uError("open file:%s failed", tsProcIOFile); @@ -434,7 +475,7 @@ bool taosReadProcIO(int64_t *rchars, int64_t *wchars) { ssize_t _bytes = 0; size_t len; char * line = NULL; - char tmp[10]; + char tmp[15]; int readIndex = 0; while (!feof(fp)) { @@ -450,16 +491,21 @@ bool taosReadProcIO(int64_t *rchars, int64_t *wchars) { } else if (strstr(line, "wchar:") != NULL) { sscanf(line, "%s %" PRId64, tmp, wchars); readIndex++; - } else { + } else if (strstr(line, "read_bytes:") != NULL){ + sscanf(line, "%s %" PRId64, tmp, rbytes); + readIndex++; + } else if (strstr(line, "write_bytes:") != NULL){ + sscanf(line, "%s %" PRId64, tmp, wbytes); + readIndex++; } - if (readIndex >= 2) break; + if (readIndex >= 4) break; } tfree(line); fclose(fp); - if (readIndex < 2) { + if (readIndex < 4) { uError("read file:%s failed", tsProcIOFile); return false; } @@ -467,30 +513,43 @@ bool taosReadProcIO(int64_t *rchars, int64_t *wchars) { return true; } -bool taosGetProcIO(float *readKB, float *writeKB) { - static int64_t lastReadbyte = -1; - static int64_t lastWritebyte = -1; +bool taosGetProcIO(float *rcharKB, float *wcharKB, float *rbyteKB, float *wbyteKB) { + static int64_t lastRchar = -1, lastRbyte = -1; + static int64_t lastWchar = -1, lastWbyte = -1; + static time_t lastTime = 0; + time_t curTime = time(NULL); - int64_t curReadbyte = 0; - int64_t curWritebyte = 0; + int64_t curRchar = 0, curRbyte = 0; + int64_t curWchar = 0, curWbyte = 0; - if (!taosReadProcIO(&curReadbyte, &curWritebyte)) { + if (!taosReadProcIO(&curRchar, &curWchar, &curRbyte, &curWbyte)) { return false; } - if (lastReadbyte == -1 || lastWritebyte == -1) { - lastReadbyte = curReadbyte; - lastWritebyte = curWritebyte; + if (lastTime == 0 || lastRchar == -1 || lastWchar == -1 || lastRbyte == -1 || lastWbyte == -1) { + lastTime = curTime; + lastRchar = curRchar; + lastWchar = curWchar; + lastRbyte = curRbyte; + lastWbyte = curWbyte; return false; } - *readKB = (float)((double)(curReadbyte - lastReadbyte) / 1024); - *writeKB = (float)((double)(curWritebyte - lastWritebyte) / 1024); - if (*readKB < 0) *readKB = 0; - if (*writeKB < 0) *writeKB = 0; + *rcharKB = (float)((double)(curRchar - lastRchar) / 1024 / (double)(curTime - lastTime)); + *wcharKB = (float)((double)(curWchar - lastWchar) / 1024 / (double)(curTime - lastTime)); + if (*rcharKB < 0) *rcharKB = 0; + if (*wcharKB < 0) *wcharKB = 0; + + *rbyteKB = (float)((double)(curRbyte - lastRbyte) / 1024 / (double)(curTime - lastTime)); + *wbyteKB = (float)((double)(curWbyte - lastWbyte) / 1024 / (double)(curTime - lastTime)); + if (*rbyteKB < 0) *rbyteKB = 0; + if (*wbyteKB < 0) *wbyteKB = 0; - lastReadbyte = curReadbyte; - lastWritebyte = curWritebyte; + lastRchar = curRchar; + lastWchar = curWchar; + lastRbyte = curRbyte; + lastWbyte = curWbyte; + lastTime = curTime; return true; } @@ -501,13 +560,13 @@ void taosGetSystemInfo() { tsNumOfCores = taosGetCpuCores(); tsTotalMemoryMB = taosGetTotalMemory(); - float tmp1, tmp2; + float tmp1, tmp2, tmp3, tmp4; taosGetSysMemory(&tmp1); taosGetProcMemory(&tmp2); // taosGetDisk(); taosGetBandSpeed(&tmp1); taosGetCpuUsage(&tmp1, &tmp2); - taosGetProcIO(&tmp1, &tmp2); + taosGetProcIO(&tmp1, &tmp2, &tmp3, &tmp4); taosGetSystemTimezone(); taosGetSystemLocale(); diff --git a/src/os/src/linux/linuxEnv.c b/src/os/src/linux/linuxEnv.c index 35ca64d79f8b7a883014fd6ca980300ede22d6e2..84c873202b685e690252890e347632e096a4b39e 100644 --- a/src/os/src/linux/linuxEnv.c +++ b/src/os/src/linux/linuxEnv.c @@ -39,6 +39,20 @@ void osInit() { strcpy(tsDataDir, "/var/lib/ProDB"); strcpy(tsLogDir, "/var/log/ProDB"); strcpy(tsScriptDir, "/etc/ProDB"); +#elif (_TD_KH_ == true) + if (configDir[0] == 0) { + strcpy(configDir, "/etc/kinghistorian"); + } + strcpy(tsDataDir, "/var/lib/kinghistorian"); + strcpy(tsLogDir, "/var/log/kinghistorian"); + strcpy(tsScriptDir, "/etc/kinghistorian"); +#elif (_TD_JH_ == true) + if (configDir[0] == 0) { + strcpy(configDir, "/etc/jh_taos"); + } + strcpy(tsDataDir, "/var/lib/jh_taos"); + strcpy(tsLogDir, "/var/log/jh_taos"); + strcpy(tsScriptDir, "/etc/jh_taos"); #else if (configDir[0] == 0) { strcpy(configDir, "/etc/taos"); diff --git a/src/os/src/windows/wEnv.c b/src/os/src/windows/wEnv.c index 6f46bb43c75ff2c9735fc53a11bce585c1c213f6..6e087c9b29d7468b7c5a4e82c0f69b38f2c01223 100644 --- a/src/os/src/windows/wEnv.c +++ b/src/os/src/windows/wEnv.c @@ -33,12 +33,12 @@ void osInit() { strcpy(tsScriptDir, "C:/PowerDB/script"); #elif (_TD_TQ_ == true) if (configDir[0] == 0) { - strcpy(configDir, "C:/TQ/cfg"); + strcpy(configDir, "C:/TQueue/cfg"); } - strcpy(tsVnodeDir, "C:/TQ/data"); - strcpy(tsDataDir, "C:/TQ/data"); - strcpy(tsLogDir, "C:/TQ/log"); - strcpy(tsScriptDir, "C:/TQ/script"); + strcpy(tsVnodeDir, "C:/TQueue/data"); + strcpy(tsDataDir, "C:/TQueue/data"); + strcpy(tsLogDir, "C:/TQueue/log"); + strcpy(tsScriptDir, "C:/TQueue/script"); #elif (_TD_PRO_ == true) if (configDir[0] == 0) { strcpy(configDir, "C:/ProDB/cfg"); @@ -47,6 +47,22 @@ void osInit() { strcpy(tsDataDir, "C:/ProDB/data"); strcpy(tsLogDir, "C:/ProDB/log"); strcpy(tsScriptDir, "C:/ProDB/script"); +#elif (_TD_KH_ == true) + if (configDir[0] == 0) { + strcpy(configDir, "C:/KingHistorian/cfg"); + } + strcpy(tsVnodeDir, "C:/KingHistorian/data"); + strcpy(tsDataDir, "C:/KingHistorian/data"); + strcpy(tsLogDir, "C:/KingHistorian/log"); + strcpy(tsScriptDir, "C:/KingHistorian/script"); +#elif (_TD_JH_ == true) + if (configDir[0] == 0) { + strcpy(configDir, "C:/jh_iot/cfg"); + } + strcpy(tsVnodeDir, "C:/jh_iot/data"); + strcpy(tsDataDir, "C:/jh_iot/data"); + strcpy(tsLogDir, "C:/jh_iot/log"); + strcpy(tsScriptDir, "C:/jh_iot/script"); #else if (configDir[0] == 0) { strcpy(configDir, "C:/TDengine/cfg"); diff --git a/src/os/src/windows/wSysinfo.c b/src/os/src/windows/wSysinfo.c index 831a6bdaf09c32e0e1a35bb240200de437b36ae4..193a83d7d73ee904204fa6ce1a5a1b562c92d17a 100644 --- a/src/os/src/windows/wSysinfo.c +++ b/src/os/src/windows/wSysinfo.c @@ -169,40 +169,59 @@ bool taosGetBandSpeed(float *bandSpeedKb) { return true; } -bool taosReadProcIO(int64_t *readbyte, int64_t *writebyte) { +bool taosGetNetworkIO(float *netInKb, float *netOutKb) { + *netInKb = 0; + *netOutKb = 0; + return true; +} + +bool taosReadProcIO(int64_t *rchars, int64_t *wchars, int64_t *rbytes, int64_t *wbytes) { IO_COUNTERS io_counter; if (GetProcessIoCounters(GetCurrentProcess(), &io_counter)) { - if (readbyte) *readbyte = io_counter.ReadTransferCount; - if (writebyte) *writebyte = io_counter.WriteTransferCount; + if (rchars) *rchars = io_counter.ReadTransferCount; + if (wchars) *wchars = io_counter.WriteTransferCount; return true; } return false; } -bool taosGetProcIO(float *readKB, float *writeKB) { - static int64_t lastReadbyte = -1; - static int64_t lastWritebyte = -1; +bool taosGetProcIO(float *rcharKB, float *wcharKB, float *rbyteKB, float *wbyteKB) { + static int64_t lastRchar = -1, lastRbyte = -1; + static int64_t lastWchar = -1, lastWbyte = -1; + static time_t lastTime = 0; + time_t curTime = time(NULL); - int64_t curReadbyte = 0; - int64_t curWritebyte = 0; + int64_t curRchar = 0, curRbyte = 0; + int64_t curWchar = 0, curWbyte = 0; - if (!taosReadProcIO(&curReadbyte, &curWritebyte)) { + if (!taosReadProcIO(&curRchar, &curWchar, &curRbyte, &curWbyte)) { return false; } - if (lastReadbyte == -1 || lastWritebyte == -1) { - lastReadbyte = curReadbyte; - lastWritebyte = curWritebyte; + if (lastTime == 0 || lastRchar == -1 || lastWchar == -1 || lastRbyte == -1 || lastWbyte == -1) { + lastTime = curTime; + lastRchar = curRchar; + lastWchar = curWchar; + lastRbyte = curRbyte; + lastWbyte = curWbyte; return false; } - *readKB = (float)((double)(curReadbyte - lastReadbyte) / 1024); - *writeKB = (float)((double)(curWritebyte - lastWritebyte) / 1024); - if (*readKB < 0) *readKB = 0; - if (*writeKB < 0) *writeKB = 0; + *rcharKB = (float)((double)(curRchar - lastRchar) / 1024 / (double)(curTime - lastTime)); + *wcharKB = (float)((double)(curWchar - lastWchar) / 1024 / (double)(curTime - lastTime)); + if (*rcharKB < 0) *rcharKB = 0; + if (*wcharKB < 0) *wcharKB = 0; + + *rbyteKB = (float)((double)(curRbyte - lastRbyte) / 1024 / (double)(curTime - lastTime)); + *wbyteKB = (float)((double)(curWbyte - lastWbyte) / 1024 / (double)(curTime - lastTime)); + if (*rbyteKB < 0) *rbyteKB = 0; + if (*wbyteKB < 0) *wbyteKB = 0; - lastReadbyte = curReadbyte; - lastWritebyte = curWritebyte; + lastRchar = curRchar; + lastWchar = curWchar; + lastRbyte = curRbyte; + lastWbyte = curWbyte; + lastTime = curTime; return true; } @@ -211,11 +230,11 @@ void taosGetSystemInfo() { tsNumOfCores = taosGetCpuCores(); tsTotalMemoryMB = taosGetTotalMemory(); - float tmp1, tmp2; + float tmp1, tmp2, tmp3, tmp4; // taosGetDisk(); taosGetBandSpeed(&tmp1); taosGetCpuUsage(&tmp1, &tmp2); - taosGetProcIO(&tmp1, &tmp2); + taosGetProcIO(&tmp1, &tmp2, &tmp3, &tmp4); taosGetSystemTimezone(); taosGetSystemLocale(); diff --git a/src/plugins/http/inc/httpInt.h b/src/plugins/http/inc/httpInt.h index af0f4c579557d1bd919847ef858af8dc1616bb6f..de595907659ffc84c42afb7802b1a7902b126ea1 100644 --- a/src/plugins/http/inc/httpInt.h +++ b/src/plugins/http/inc/httpInt.h @@ -42,6 +42,7 @@ #define HTTP_WRITE_WAIT_TIME_MS 5 #define HTTP_PASSWORD_LEN TSDB_UNI_LEN #define HTTP_SESSION_ID_LEN (TSDB_USER_LEN + HTTP_PASSWORD_LEN) +#define HTTP_STATUS_CODE_NUM 63 typedef enum HttpReqType { HTTP_REQTYPE_OTHERS = 0, @@ -187,8 +188,9 @@ typedef struct HttpServer { SOCKET fd; int32_t numOfThreads; int32_t methodScannerLen; - int32_t requestNum; + int64_t requestNum; int32_t status; + int32_t statusCodeErrs[HTTP_STATUS_CODE_NUM]; pthread_t thread; HttpThread * pThreads; void * contextCache; diff --git a/src/plugins/http/src/httpMetricsHandle.c b/src/plugins/http/src/httpMetricsHandle.c index dbabd48774aa077b1b4f842b4bc47dfd5de67ce4..2031c2a6db2630b8a944cdb582d07e46c1f9313e 100644 --- a/src/plugins/http/src/httpMetricsHandle.c +++ b/src/plugins/http/src/httpMetricsHandle.c @@ -123,9 +123,9 @@ bool metricsProcessRequest(HttpContext* pContext) { } { - int64_t rchars = 0; - int64_t wchars = 0; - bool succeeded = taosReadProcIO(&rchars, &wchars); + int64_t rchars = 0, rbytes = 0; + int64_t wchars = 0, wbytes = 0; + bool succeeded = taosReadProcIO(&rchars, &wchars, &rbytes, &wbytes); if (!succeeded) { httpError("failed to get io info"); } else { @@ -164,7 +164,7 @@ bool metricsProcessRequest(HttpContext* pContext) { } { - SStatisInfo info = dnodeGetStatisInfo(); + SDnodeStatisInfo info = dnodeGetStatisInfo(); { char* keyReqHttp = "req_http"; char* keyReqSelect = "req_select"; @@ -181,4 +181,4 @@ bool metricsProcessRequest(HttpContext* pContext) { pContext->reqType = HTTP_REQTYPE_OTHERS; httpFreeJsonBuf(pContext); return false; -} \ No newline at end of file +} diff --git a/src/plugins/http/src/httpResp.c b/src/plugins/http/src/httpResp.c index 824ce746704b477ee482d82fc617ce37af307a59..a2452a16b94fea060a370c86518bb36c1da45070 100644 --- a/src/plugins/http/src/httpResp.c +++ b/src/plugins/http/src/httpResp.c @@ -21,6 +21,7 @@ #include "httpResp.h" #include "httpJson.h" #include "httpContext.h" +#include "monitor.h" const char *httpKeepAliveStr[] = {"", "Connection: Keep-Alive\r\n", "Connection: Close\r\n"}; @@ -153,6 +154,10 @@ void httpSendErrorResp(HttpContext *pContext, int32_t errNo) { httpCode = pContext->parser->httpCode; } + HttpServer *pServer = &tsHttpServer; + SMonHttpStatus *httpStatus = monGetHttpStatusHashTableEntry(httpCode); + pServer->statusCodeErrs[httpStatus->index] += 1; + pContext->error = true; char *httpCodeStr = httpGetStatusDesc(httpCode); diff --git a/src/plugins/http/src/httpServer.c b/src/plugins/http/src/httpServer.c index 13a0835c3960333c6d12aa443025de5fb95d565e..7f350a6f1599ba857a4249f500ffcb59bd50985e 100644 --- a/src/plugins/http/src/httpServer.c +++ b/src/plugins/http/src/httpServer.c @@ -53,7 +53,7 @@ static void httpStopThread(HttpThread *pThread) { break; } } while (0); - if (r) { + if (r && taosCheckPthreadValid(pThread->thread)) { pthread_cancel(pThread->thread); } #else @@ -63,15 +63,21 @@ static void httpStopThread(HttpThread *pThread) { httpError("%s, failed to create eventfd, will call pthread_cancel instead, which may result in data corruption: %s", pThread->label, strerror(errno)); pThread->stop = true; - pthread_cancel(pThread->thread); + if (taosCheckPthreadValid(pThread->thread)) { + pthread_cancel(pThread->thread); + } } else if (epoll_ctl(pThread->pollFd, EPOLL_CTL_ADD, fd, &event) < 0) { httpError("%s, failed to call epoll_ctl, will call pthread_cancel instead, which may result in data corruption: %s", pThread->label, strerror(errno)); - pthread_cancel(pThread->thread); + if (taosCheckPthreadValid(pThread->thread)) { + pthread_cancel(pThread->thread); + } } #endif // __APPLE__ - pthread_join(pThread->thread, NULL); + if (taosCheckPthreadValid(pThread->thread)) { + pthread_join(pThread->thread, NULL); + } #ifdef __APPLE__ if (sv[0] != -1) { @@ -190,7 +196,7 @@ static void httpProcessHttpData(void *param) { } else { if (httpReadData(pContext)) { (*(pThread->processData))(pContext); - atomic_fetch_add_32(&pServer->requestNum, 1); + atomic_fetch_add_64(&pServer->requestNum, 1); } } } @@ -398,9 +404,12 @@ static bool httpReadData(HttpContext *pContext) { return true; } } else if (nread < 0) { - if (errno == EINTR || errno == EAGAIN || errno == EWOULDBLOCK) { + if (errno == EINTR) { + httpDebug("context:%p, fd:%d, read from socket error:%d, continue", pContext, pContext->fd, errno); + continue; + } else if (errno == EAGAIN || errno == EWOULDBLOCK) { httpDebug("context:%p, fd:%d, read from socket error:%d, wait another event", pContext, pContext->fd, errno); - continue; // later again + return false; } else { httpError("context:%p, fd:%d, read from socket error:%d, close connect", pContext, pContext->fd, errno); taosCloseSocket(pContext->fd); diff --git a/src/plugins/http/src/httpSystem.c b/src/plugins/http/src/httpSystem.c index 085863f4e410ec7f80e531db6045594fbed37201..1e388541acf825e4852443bd385c08832b03147a 100644 --- a/src/plugins/http/src/httpSystem.c +++ b/src/plugins/http/src/httpSystem.c @@ -120,4 +120,10 @@ void httpCleanUpSystem() { tsHttpServer.status = HTTP_SERVER_CLOSED; } -int32_t httpGetReqCount() { return atomic_exchange_32(&tsHttpServer.requestNum, 0); } +int64_t httpGetReqCount() { return atomic_exchange_64(&tsHttpServer.requestNum, 0); } +int32_t httpGetStatusCodeCount(int index) { + return atomic_load_32(&tsHttpServer.statusCodeErrs[index]); +} +int32_t httpClearStatusCodeCount(int index) { + return atomic_exchange_32(&tsHttpServer.statusCodeErrs[index], 0); +} diff --git a/src/plugins/monitor/src/monMain.c b/src/plugins/monitor/src/monMain.c index fea793fa860fd17ff30bcecae1436180bc6b34bf..ed60ba42caa4a882f21b511aa8009d10d52dad52 100644 --- a/src/plugins/monitor/src/monMain.c +++ b/src/plugins/monitor/src/monMain.c @@ -17,12 +17,14 @@ #include "os.h" #include "taosdef.h" #include "taoserror.h" +#include "tfs.h" #include "tlog.h" #include "ttimer.h" #include "tutil.h" #include "tscUtil.h" #include "tsclient.h" #include "dnode.h" +#include "vnode.h" #include "monitor.h" #include "taoserror.h" @@ -33,10 +35,81 @@ #define monDebug(...) { if (monDebugFlag & DEBUG_DEBUG) { taosPrintLog("MON ", monDebugFlag, __VA_ARGS__); }} #define monTrace(...) { if (monDebugFlag & DEBUG_TRACE) { taosPrintLog("MON ", monDebugFlag, __VA_ARGS__); }} -#define SQL_LENGTH 1030 -#define LOG_LEN_STR 512 -#define IP_LEN_STR TSDB_EP_LEN -#define CHECK_INTERVAL 1000 +#define SQL_LENGTH 4096 +#define LOG_LEN_STR 512 +#define IP_LEN_STR TSDB_EP_LEN +#define VGROUP_STATUS_LEN 512 +#define DNODE_INFO_LEN 128 +#define QUERY_ID_LEN 24 +#define CHECK_INTERVAL 1000 + +#define SQL_STR_FMT "\"%s\"" + +static SMonHttpStatus monHttpStatusTable[] = { + {"HTTP_CODE_CONTINUE", 100}, + {"HTTP_CODE_SWITCHING_PROTOCOL", 101}, + {"HTTP_CODE_PROCESSING", 102}, + {"HTTP_CODE_EARLY_HINTS", 103}, + {"HTTP_CODE_OK", 200}, + {"HTTP_CODE_CREATED", 201}, + {"HTTP_CODE_ACCEPTED", 202}, + {"HTTP_CODE_NON_AUTHORITATIVE_INFO", 203}, + {"HTTP_CODE_NO_CONTENT", 204}, + {"HTTP_CODE_RESET_CONTENT", 205}, + {"HTTP_CODE_PARTIAL_CONTENT", 206}, + {"HTTP_CODE_MULTI_STATUS", 207}, + {"HTTP_CODE_ALREADY_REPORTED", 208}, + {"HTTP_CODE_IM_USED", 226}, + {"HTTP_CODE_MULTIPLE_CHOICE", 300}, + {"HTTP_CODE_MOVED_PERMANENTLY", 301}, + {"HTTP_CODE_FOUND", 302}, + {"HTTP_CODE_SEE_OTHER", 303}, + {"HTTP_CODE_NOT_MODIFIED", 304}, + {"HTTP_CODE_USE_PROXY", 305}, + {"HTTP_CODE_UNUSED", 306}, + {"HTTP_CODE_TEMPORARY_REDIRECT", 307}, + {"HTTP_CODE_PERMANENT_REDIRECT", 308}, + {"HTTP_CODE_BAD_REQUEST", 400}, + {"HTTP_CODE_UNAUTHORIZED", 401}, + {"HTTP_CODE_PAYMENT_REQUIRED", 402}, + {"HTTP_CODE_FORBIDDEN", 403}, + {"HTTP_CODE_NOT_FOUND", 404}, + {"HTTP_CODE_METHOD_NOT_ALLOWED", 405}, + {"HTTP_CODE_NOT_ACCEPTABLE", 406}, + {"HTTP_CODE_PROXY_AUTH_REQUIRED", 407}, + {"HTTP_CODE_REQUEST_TIMEOUT", 408}, + {"HTTP_CODE_CONFLICT", 409}, + {"HTTP_CODE_GONE", 410}, + {"HTTP_CODE_LENGTH_REQUIRED", 411}, + {"HTTP_CODE_PRECONDITION_FAILED", 412}, + {"HTTP_CODE_PAYLOAD_TOO_LARGE", 413}, + {"HTTP_CODE_URI_TOO_LARGE", 414}, + {"HTTP_CODE_UNSUPPORTED_MEDIA_TYPE", 415}, + {"HTTP_CODE_RANGE_NOT_SATISFIABLE", 416}, + {"HTTP_CODE_EXPECTATION_FAILED", 417}, + {"HTTP_CODE_IM_A_TEAPOT", 418}, + {"HTTP_CODE_MISDIRECTED_REQUEST", 421}, + {"HTTP_CODE_UNPROCESSABLE_ENTITY", 422}, + {"HTTP_CODE_LOCKED", 423}, + {"HTTP_CODE_FAILED_DEPENDENCY", 424}, + {"HTTP_CODE_TOO_EARLY", 425}, + {"HTTP_CODE_UPGRADE_REQUIRED", 426}, + {"HTTP_CODE_PRECONDITION_REQUIRED", 428}, + {"HTTP_CODE_TOO_MANY_REQUESTS", 429}, + {"HTTP_CODE_REQ_HDR_FIELDS_TOO_LARGE",431}, + {"HTTP_CODE_UNAVAIL_4_LEGAL_REASONS", 451}, + {"HTTP_CODE_INTERNAL_SERVER_ERROR", 500}, + {"HTTP_CODE_NOT_IMPLEMENTED", 501}, + {"HTTP_CODE_BAD_GATEWAY", 502}, + {"HTTP_CODE_SERVICE_UNAVAILABLE", 503}, + {"HTTP_CODE_GATEWAY_TIMEOUT", 504}, + {"HTTP_CODE_HTTP_VER_NOT_SUPPORTED", 505}, + {"HTTP_CODE_VARIANT_ALSO_NEGOTIATES", 506}, + {"HTTP_CODE_INSUFFICIENT_STORAGE", 507}, + {"HTTP_CODE_LOOP_DETECTED", 508}, + {"HTTP_CODE_NOT_EXTENDED", 510}, + {"HTTP_CODE_NETWORK_AUTH_REQUIRED", 511} +}; typedef enum { MON_CMD_CREATE_DB, @@ -46,6 +119,18 @@ typedef enum { MON_CMD_CREATE_TB_DN, MON_CMD_CREATE_TB_ACCT_ROOT, MON_CMD_CREATE_TB_SLOWQUERY, + //followings are extension for taoskeeper + MON_CMD_CREATE_TB_CLUSTER, + MON_CMD_CREATE_MT_DNODES, + MON_CMD_CREATE_TB_DNODE, + MON_CMD_CREATE_MT_DISKS, + MON_CMD_CREATE_TB_DISKS, + MON_CMD_CREATE_MT_VGROUPS, + MON_CMD_CREATE_MT_LOGS, + MON_CMD_CREATE_TB_DNODE_LOG, + MON_CMD_CREATE_TB_GRANTS, + MON_CMD_CREATE_MT_RESTFUL, + MON_CMD_CREATE_TB_RESTFUL, MON_CMD_MAX } EMonCmd; @@ -61,17 +146,46 @@ typedef struct { int8_t cmdIndex; int8_t state; int8_t start; // enable/disable by mnode - int8_t quiting; // taosd is quiting + int8_t quiting; // taosd is quiting char sql[SQL_LENGTH + 1]; } SMonConn; +typedef struct { + SDnodeStatisInfo dInfo; + SVnodeStatisInfo vInfo; + float io_read; + float io_write; + float io_read_disk; + float io_write_disk; + int32_t monQueryReqCnt; + int32_t monSubmitReqCnt; +} SMonStat; + +static void *monHttpStatusHashTable; + static SMonConn tsMonitor = {0}; +static SMonStat tsMonStat = {{0}}; +static int32_t monQueryReqNum = 0, monSubmitReqNum = 0; +static bool monHasMnodeMaster = false; + static void monSaveSystemInfo(); +static void monSaveClusterInfo(); +static void monSaveDnodesInfo(); +static void monSaveVgroupsInfo(); +static void monSaveSlowQueryInfo(); +static void monSaveDisksInfo(); +static void monSaveGrantsInfo(); +static void monSaveHttpReqInfo(); +static void monGetSysStats(); static void *monThreadFunc(void *param); static void monBuildMonitorSql(char *sql, int32_t cmd); +static void monInitHttpStatusHashTable(); +static void monCleanupHttpStatusHashTable(); + extern int32_t (*monStartSystemFp)(); extern void (*monStopSystemFp)(); extern void (*monExecuteSQLFp)(char *sql); +extern char * strptime(const char *buf, const char *fmt, struct tm *tm); //make the compilation pass int32_t monInitSystem() { if (tsMonitor.ep[0] == 0) { @@ -84,6 +198,7 @@ int32_t monInitSystem() { tsMonitor.ep[i] = '_'; } } + monInitHttpStatusHashTable(); pthread_attr_t thAttr; pthread_attr_init(&thAttr); @@ -112,6 +227,31 @@ int32_t monStartSystem() { return 0; } +static void monInitHttpStatusHashTable() { + int32_t numOfEntries = tListLen(monHttpStatusTable); + monHttpStatusHashTable = taosHashInit(numOfEntries, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), true, false); + for (int32_t i = 0; i < numOfEntries; ++i) { + monHttpStatusTable[i].index = i; + SMonHttpStatus* pEntry = &monHttpStatusTable[i]; + taosHashPut(monHttpStatusHashTable, &monHttpStatusTable[i].code, sizeof(int32_t), + &pEntry, POINTER_BYTES); + } +} + +static void monCleanupHttpStatusHashTable() { + void* m = monHttpStatusHashTable; + if (m != NULL && atomic_val_compare_exchange_ptr(&monHttpStatusHashTable, m, 0) == m) { + taosHashCleanup(m); + } +} + +SMonHttpStatus *monGetHttpStatusHashTableEntry(int32_t code) { + if (monHttpStatusHashTable == NULL) { + return NULL; + } + return (SMonHttpStatus*)taosHashGet(monHttpStatusHashTable, &code, sizeof(int32_t)); +} + static void *monThreadFunc(void *param) { monDebug("starting to initialize monitor module ..."); setThreadName("monitor"); @@ -132,7 +272,7 @@ static void *monThreadFunc(void *param) { if (tsMonitor.start == 0) { continue; } - + if (dnodeGetDnodeId() <= 0) { monDebug("dnode not initialized, waiting for 3000 ms to start monitor module"); continue; @@ -173,6 +313,19 @@ static void *monThreadFunc(void *param) { if (tsMonitor.state == MON_STATE_INITED) { if (accessTimes % tsMonitorInterval == 0) { + monGetSysStats(); + //monSaveDnodesInfo has to be the first, as it calculates + //stats using monSubmitReqNum before any insertion from monitor + monSaveDnodesInfo(); + if (monHasMnodeMaster) { + //only mnode master will write cluster info + monSaveClusterInfo(); + } + monSaveVgroupsInfo(); + monSaveSlowQueryInfo(); + monSaveDisksInfo(); + monSaveGrantsInfo(); + monSaveHttpReqInfo(); monSaveSystemInfo(); } } @@ -193,9 +346,9 @@ static void monBuildMonitorSql(char *sql, int32_t cmd) { if (cmd == MON_CMD_CREATE_DB) { snprintf(sql, SQL_LENGTH, - "create database if not exists %s replica 1 days 10 keep %s cache %d " + "create database if not exists %s replica %d days 10 keep %s cache %d " "blocks %d precision 'us'", - tsMonitorDbName, keepValue, TSDB_MIN_CACHE_BLOCK_SIZE, TSDB_MIN_TOTAL_BLOCKS); + tsMonitorDbName, tsMonitorReplica, keepValue, TSDB_MIN_CACHE_BLOCK_SIZE, TSDB_MIN_TOTAL_BLOCKS); } else if (cmd == MON_CMD_CREATE_MT_DN) { snprintf(sql, SQL_LENGTH, "create table if not exists %s.dn(ts timestamp" @@ -204,7 +357,7 @@ static void monBuildMonitorSql(char *sql, int32_t cmd) { ", disk_used float, disk_total int" ", band_speed float" ", io_read float, io_write float" - ", req_http int, req_select int, req_insert int" + ", req_http bigint, req_select bigint, req_insert bigint" ") tags (dnodeid int, fqdn binary(%d))", tsMonitorDbName, TSDB_FQDN_LEN); } else if (cmd == MON_CMD_CREATE_TB_DN) { @@ -231,14 +384,96 @@ static void monBuildMonitorSql(char *sql, int32_t cmd) { tsMonitorDbName, TSDB_DEFAULT_USER); } else if (cmd == MON_CMD_CREATE_TB_SLOWQUERY) { snprintf(sql, SQL_LENGTH, - "create table if not exists %s.slowquery(ts timestamp, username " - "binary(%d), created_time timestamp, time bigint, sql binary(%d))", - tsMonitorDbName, TSDB_TABLE_FNAME_LEN - 1, TSDB_SLOW_QUERY_SQL_LEN); + "create table if not exists %s.slowquery(ts timestamp, query_id " + "binary(%d), username binary(%d), qid binary(%d), created_time timestamp, time bigint, end_point binary(%d), sql binary(%d))", + tsMonitorDbName, QUERY_ID_LEN, TSDB_TABLE_FNAME_LEN - 1, QUERY_ID_LEN, TSDB_EP_LEN, TSDB_SLOW_QUERY_SQL_LEN); } else if (cmd == MON_CMD_CREATE_TB_LOG) { snprintf(sql, SQL_LENGTH, "create table if not exists %s.log(ts timestamp, level tinyint, " "content binary(%d), ipaddr binary(%d))", tsMonitorDbName, LOG_LEN_STR, IP_LEN_STR); + } else if (cmd == MON_CMD_CREATE_TB_CLUSTER) { + snprintf(sql, SQL_LENGTH, + "create table if not exists %s.cluster_info(ts timestamp" + ", first_ep binary(%d), version binary(%d)" + ", master_uptime float, monitor_interval int" + ", dnodes_total int, dnodes_alive int" + ", mnodes_total int, mnodes_alive int" + ", vgroups_total int, vgroups_alive int" + ", vnodes_total int, vnodes_alive int" + ", connections_total int)", + tsMonitorDbName, TSDB_EP_LEN, TSDB_VERSION_LEN); + } else if (cmd == MON_CMD_CREATE_MT_DNODES) { + snprintf(sql, SQL_LENGTH, + "create table if not exists %s.dnodes_info(ts timestamp" + ", uptime float" + ", cpu_engine float, cpu_system float, cpu_cores int" + ", mem_engine float, mem_system float, mem_total float" + ", disk_engine float, disk_used float, disk_total float" + ", net_in float, net_out float" + ", io_read float, io_write float" + ", io_read_disk float, io_write_disk float" + ", req_http bigint, req_http_rate float" + ", req_select bigint, req_select_rate float" + ", req_insert bigint, req_insert_success bigint, req_insert_rate float" + ", req_insert_batch bigint, req_insert_batch_success bigint, req_insert_batch_rate float" + ", errors bigint" + ", vnodes_num int" + ", masters int" + ", has_mnode bool" + ") tags (dnode_id int, dnode_ep binary(%d))", + tsMonitorDbName, TSDB_EP_LEN); + } else if (cmd == MON_CMD_CREATE_TB_DNODE) { + snprintf(sql, SQL_LENGTH, "create table if not exists %s.dnode_%d using %s.dnodes_info tags(%d, '%s')", tsMonitorDbName, + dnodeGetDnodeId(), tsMonitorDbName, dnodeGetDnodeId(), tsLocalEp); + } else if (cmd == MON_CMD_CREATE_MT_DISKS) { + snprintf(sql, SQL_LENGTH, + "create table if not exists %s.disks_info(ts timestamp" + ", datadir_l0_used float, datadir_l0_total float" + ", datadir_l1_used float, datadir_l1_total float" + ", datadir_l2_used float, datadir_l2_total float" + ") tags (dnode_id int, dnode_ep binary(%d))", + tsMonitorDbName, TSDB_EP_LEN); + } else if (cmd == MON_CMD_CREATE_TB_DISKS) { + snprintf(sql, SQL_LENGTH, "create table if not exists %s.disks_%d using %s.disks_info tags(%d, '%s')", tsMonitorDbName, + dnodeGetDnodeId(), tsMonitorDbName, dnodeGetDnodeId(), tsLocalEp); + } else if (cmd == MON_CMD_CREATE_MT_VGROUPS) { + snprintf(sql, SQL_LENGTH, + "create table if not exists %s.vgroups_info(ts timestamp" + ", database_name binary(%d)" + ", tables_num int, status binary(%d)" + ", online_vnodes tinyint" + ", dnode_ids binary(%d), dnode_roles binary(%d)" + ") tags (vgroup_id int)", + tsMonitorDbName, TSDB_DB_NAME_LEN, VGROUP_STATUS_LEN, + DNODE_INFO_LEN, DNODE_INFO_LEN); + } else if (cmd == MON_CMD_CREATE_MT_LOGS) { + snprintf(sql, SQL_LENGTH, + "create table if not exists %s.logs(ts timestamp, level tinyint, " + "content binary(%d)) tags (dnode_id int, dnode_ep binary(%d))", + tsMonitorDbName, LOG_LEN_STR, TSDB_EP_LEN); + } else if (cmd == MON_CMD_CREATE_TB_DNODE_LOG) { + snprintf(sql, SQL_LENGTH, "create table if not exists %s.dnode_%d_log using %s.logs tags(%d, '%s')", tsMonitorDbName, + dnodeGetDnodeId(), tsMonitorDbName, dnodeGetDnodeId(), tsLocalEp); + } else if (cmd == MON_CMD_CREATE_TB_GRANTS) { + snprintf(sql, SQL_LENGTH, + "create table if not exists %s.grants_info(ts timestamp" + ", expire_time int, timeseries_used int, timeseries_total int)", + tsMonitorDbName); + } else if (cmd == MON_CMD_CREATE_MT_RESTFUL) { + int pos = snprintf(sql, SQL_LENGTH, + "create table if not exists %s.restful_info(ts timestamp", tsMonitorDbName); + for (int i = 0; i < tListLen(monHttpStatusTable); ++i) { + pos += snprintf(sql + pos, SQL_LENGTH, ", `%s(%d)` int", + monHttpStatusTable[i].name, + monHttpStatusTable[i].code); + } + snprintf(sql + pos, SQL_LENGTH, + ") tags (dnode_id int, dnode_ep binary(%d))", + TSDB_EP_LEN); + } else if (cmd == MON_CMD_CREATE_TB_RESTFUL) { + snprintf(sql, SQL_LENGTH, "create table if not exists %s.restful_%d using %s.restful_info tags(%d, '%s')", tsMonitorDbName, + dnodeGetDnodeId(), tsMonitorDbName, dnodeGetDnodeId(), tsLocalEp); } sql[SQL_LENGTH] = 0; @@ -262,9 +497,25 @@ void monCleanupSystem() { taos_close(tsMonitor.conn); tsMonitor.conn = NULL; } + monCleanupHttpStatusHashTable(); monInfo("monitor module is cleaned up"); } +static void monGetSysStats() { + memset(&tsMonStat, 0, sizeof(SMonStat)); + bool suc = taosGetProcIO(&tsMonStat.io_read, &tsMonStat.io_write, + &tsMonStat.io_read_disk, &tsMonStat.io_write_disk); + if (!suc) { + monDebug("failed to get io info"); + } + + tsMonStat.dInfo = dnodeGetStatisInfo(); + tsMonStat.vInfo = vnodeGetStatisInfo(); + + tsMonStat.monQueryReqCnt = monFetchQueryReqCnt(); + tsMonStat.monSubmitReqCnt = monFetchSubmitReqCnt(); +} + // unit is MB static int32_t monBuildMemorySql(char *sql) { float sysMemoryUsedMB = 0; @@ -279,7 +530,7 @@ static int32_t monBuildMemorySql(char *sql) { monDebug("failed to get proc memory info"); } - return sprintf(sql, ", %f, %f, %d", procMemoryUsedMB, sysMemoryUsedMB, tsTotalMemoryMB); + return snprintf(sql, SQL_LENGTH, ", %f, %f, %d", procMemoryUsedMB, sysMemoryUsedMB, tsTotalMemoryMB); } // unit is % @@ -294,12 +545,12 @@ static int32_t monBuildCpuSql(char *sql) { sysCpuUsage = procCpuUsage + 0.1f; } - return sprintf(sql, ", %f, %f, %d", procCpuUsage, sysCpuUsage, tsNumOfCores); + return snprintf(sql, SQL_LENGTH, ", %f, %f, %d", procCpuUsage, sysCpuUsage, tsNumOfCores); } // unit is GB static int32_t monBuildDiskSql(char *sql) { - return sprintf(sql, ", %f, %d", tsUsedDataDirGB, (int32_t)tsTotalDataDirGB); + return snprintf(sql, SQL_LENGTH, ", %f, %d", tsUsedDataDirGB, (int32_t)tsTotalDataDirGB); } // unit is Kb @@ -310,22 +561,20 @@ static int32_t monBuildBandSql(char *sql) { monDebug("failed to get bandwidth speed"); } - return sprintf(sql, ", %f", bandSpeedKb); + return snprintf(sql, SQL_LENGTH, ", %f", bandSpeedKb); } static int32_t monBuildReqSql(char *sql) { - SStatisInfo info = dnodeGetStatisInfo(); - return sprintf(sql, ", %d, %d, %d)", info.httpReqNum, info.queryReqNum, info.submitReqNum); + SDnodeStatisInfo info = tsMonStat.dInfo; + return snprintf(sql, SQL_LENGTH, ", %"PRId64", %"PRId64", %"PRId64")", info.httpReqNum, info.queryReqNum, info.submitReqNum); } static int32_t monBuildIoSql(char *sql) { float readKB = 0, writeKB = 0; - bool suc = taosGetProcIO(&readKB, &writeKB); - if (!suc) { - monDebug("failed to get io info"); - } + readKB = tsMonStat.io_read; + writeKB = tsMonStat.io_write; - return sprintf(sql, ", %f, %f", readKB, writeKB); + return snprintf(sql, SQL_LENGTH, ", %f, %f", readKB, writeKB); } static void monSaveSystemInfo() { @@ -347,15 +596,783 @@ static void monSaveSystemInfo() { if (code != 0) { monError("failed to save system info, reason:%s, sql:%s", tstrerror(code), tsMonitor.sql); } else { + monIncSubmitReqCnt(); monDebug("successfully to save system info, sql:%s", tsMonitor.sql); } } +static int32_t monGetRowElemCharLen(TAOS_FIELD field, char *rowElem) { + int32_t charLen = varDataLen(rowElem - VARSTR_HEADER_SIZE); + if (field.type == TSDB_DATA_TYPE_BINARY) { + assert(charLen <= field.bytes && charLen >= 0); + } else { + assert(charLen <= field.bytes * TSDB_NCHAR_SIZE && charLen >= 0); + } + + return charLen; +} + +static int32_t monBuildFirstEpSql(char *sql) { + return snprintf(sql, SQL_LENGTH, ", "SQL_STR_FMT, tsFirst); +} + +static int32_t monBuildVersionSql(char *sql) { + return snprintf(sql, SQL_LENGTH, ", "SQL_STR_FMT, version); +} + +static int32_t monBuildMasterUptimeSql(char *sql) { + int64_t masterUptime = 0; + TAOS_RES *result = taos_query(tsMonitor.conn, "show mnodes"); + + TAOS_ROW row; + int32_t num_fields = taos_num_fields(result); + TAOS_FIELD *fields = taos_fetch_fields(result); + + while ((row = taos_fetch_row(result))) { + for (int i = 0; i < num_fields; ++i) { + int32_t charLen = monGetRowElemCharLen(fields[i], (char *)row[i]); + if (strcmp(fields[i].name, "role") == 0 && strncmp((char *)row[i], "master", charLen) == 0) { + if (strcmp(fields[i + 1].name, "role_time") == 0) { + int64_t now = taosGetTimestamp(TSDB_TIME_PRECISION_MILLI); + //master uptime in seconds + masterUptime = (now - *(int64_t *)row[i + 1]) / 1000; + } + } + } + } + + taos_free_result(result); + + return snprintf(sql, SQL_LENGTH, ", %" PRId64, masterUptime); +} + +static int32_t monBuildMonIntervalSql(char *sql) { + return snprintf(sql, SQL_LENGTH, ", %d", tsMonitorInterval); +} + +static int32_t monBuildDnodesTotalSql(char *sql) { + int32_t totalDnodes = 0, totalDnodesAlive = 0; + TAOS_RES *result = taos_query(tsMonitor.conn, "show dnodes"); + int32_t code = taos_errno(result); + if (code != TSDB_CODE_SUCCESS) { + monError("failed to execute cmd: show dnodes, reason:%s", tstrerror(code)); + } + + TAOS_ROW row; + int32_t num_fields = taos_num_fields(result); + TAOS_FIELD *fields = taos_fetch_fields(result); + + while ((row = taos_fetch_row(result))) { + totalDnodes++; + for (int i = 0; i < num_fields; ++i) { + if (strcmp(fields[i].name, "status") == 0) { + int32_t charLen = monGetRowElemCharLen(fields[i], (char *)row[i]); + if (strncmp((char *)row[i], "ready", charLen) == 0) { + totalDnodesAlive++; + } + } + } + } + + taos_free_result(result); + + return snprintf(sql, SQL_LENGTH, ", %d, %d", totalDnodes, totalDnodesAlive); +} + +static int32_t monBuildMnodesTotalSql(char *sql) { + int32_t totalMnodes = 0, totalMnodesAlive= 0; + TAOS_RES *result = taos_query(tsMonitor.conn, "show mnodes"); + int32_t code = taos_errno(result); + if (code != TSDB_CODE_SUCCESS) { + monError("failed to execute cmd: show mnodes, reason:%s", tstrerror(code)); + } + + TAOS_ROW row; + int32_t num_fields = taos_num_fields(result); + TAOS_FIELD *fields = taos_fetch_fields(result); + + while ((row = taos_fetch_row(result))) { + totalMnodes++; + for (int i = 0; i < num_fields; ++i) { + if (strcmp(fields[i].name, "role") == 0) { + int32_t charLen = monGetRowElemCharLen(fields[i], (char *)row[i]); + if (strncmp((char *)row[i], "master", charLen) == 0 || + strncmp((char *)row[i], "slave", charLen) == 0) { + totalMnodesAlive += 1; + } + } + } + } + + taos_free_result(result); + + return snprintf(sql, SQL_LENGTH, ", %d, %d", totalMnodes, totalMnodesAlive); +} + + +static int32_t monGetVgroupsTotalStats(char *dbName, int32_t *totalVgroups, + int32_t *totalVgroupsAlive) { + char subsql[TSDB_DB_NAME_LEN + 14]; + memset(subsql, 0, sizeof(subsql)); + snprintf(subsql, TSDB_DB_NAME_LEN + 13, "show %s.vgroups", dbName); + TAOS_RES *result = taos_query(tsMonitor.conn, subsql); + int32_t code = taos_errno(result); + if (code != TSDB_CODE_SUCCESS) { + monError("failed to execute cmd: show %s.vgroups, reason:%s", dbName, tstrerror(code)); + } + + TAOS_ROW row; + int32_t num_fields = taos_num_fields(result); + TAOS_FIELD *fields = taos_fetch_fields(result); + + while ((row = taos_fetch_row(result))) { + *totalVgroups += 1; + for (int i = 0; i < num_fields; ++i) { + if (strcmp(fields[i].name, "status") == 0) { + int32_t charLen = monGetRowElemCharLen(fields[i], (char *)row[i]); + if (strncmp((char *)row[i], "ready", charLen) == 0) { + *totalVgroupsAlive += 1; + } + } + } + } + taos_free_result(result); + + return 0; +} + +static int32_t monBuildVgroupsTotalSql(char *sql) { + int32_t totalVgroups = 0, totalVgroupsAlive = 0; + TAOS_RES *result = taos_query(tsMonitor.conn, "show databases"); + int32_t code = taos_errno(result); + if (code != TSDB_CODE_SUCCESS) { + monError("failed to execute cmd: show databases, reason:%s", tstrerror(code)); + } + + TAOS_ROW row; + int32_t num_fields = taos_num_fields(result); + TAOS_FIELD *fields = taos_fetch_fields(result); + + while ((row = taos_fetch_row(result))) { + for (int i = 0; i < num_fields; ++i) { + //database name + if (strcmp(fields[i].name, "name") == 0) { + monGetVgroupsTotalStats((char *)row[i], &totalVgroups, &totalVgroupsAlive); + } + } + } + + taos_free_result(result); + + return snprintf(sql, SQL_LENGTH, ", %d, %d", totalVgroups, totalVgroupsAlive); +} + +static int32_t monGetVnodesTotalStats(char *ep, int32_t *totalVnodes, + int32_t *totalVnodesAlive) { + char subsql[TSDB_EP_LEN + 15]; + memset(subsql, 0, sizeof(subsql)); + snprintf(subsql, TSDB_EP_LEN, "show vnodes "SQL_STR_FMT, ep); + TAOS_RES *result = taos_query(tsMonitor.conn, subsql); + int32_t code = taos_errno(result); + if (code != TSDB_CODE_SUCCESS) { + monError("failed to execute cmd: show vnodes "SQL_STR_FMT", reason:%s", ep, tstrerror(code)); + } + + TAOS_ROW row; + int32_t num_fields = taos_num_fields(result); + TAOS_FIELD *fields = taos_fetch_fields(result); + + while ((row = taos_fetch_row(result))) { + *totalVnodes += 1; + for (int i = 0; i < num_fields; ++i) { + if (strcmp(fields[i].name, "status") == 0) { + int32_t charLen = monGetRowElemCharLen(fields[i], (char *)row[i]); + if (strncmp((char *)row[i], "master", charLen) == 0 || + strncmp((char *)row[i], "slave", charLen) == 0) { + *totalVnodesAlive += 1; + } + } + } + } + taos_free_result(result); + + return 0; +} + +static int32_t monBuildVnodesTotalSql(char *sql) { + int32_t totalVnodes = 0, totalVnodesAlive = 0; + TAOS_RES *result = taos_query(tsMonitor.conn, "show dnodes"); + int32_t code = taos_errno(result); + if (code != TSDB_CODE_SUCCESS) { + monError("failed to execute cmd: show dnodes, reason:%s", tstrerror(code)); + } + + TAOS_ROW row; + int32_t num_fields = taos_num_fields(result); + TAOS_FIELD *fields = taos_fetch_fields(result); + + while ((row = taos_fetch_row(result))) { + for (int i = 0; i < num_fields; ++i) { + //database name + if (strcmp(fields[i].name, "end_point") == 0) { + monGetVnodesTotalStats((char *)row[i], &totalVnodes, &totalVnodesAlive); + } + } + } + + taos_free_result(result); + + return snprintf(sql, SQL_LENGTH, ", %d, %d", totalVnodes, totalVnodesAlive); +} + +static int32_t monBuildConnsTotalSql(char *sql) { + int32_t totalConns = 0; + TAOS_RES *result = taos_query(tsMonitor.conn, "show connections"); + TAOS_ROW row; + + int32_t code = taos_errno(result); + if (code != TSDB_CODE_SUCCESS) { + monError("failed to execute cmd: show connections, reason:%s", tstrerror(code)); + } + + while ((row = taos_fetch_row(result))) { + totalConns++; + } + + taos_free_result(result); + return snprintf(sql, SQL_LENGTH, ", %d)", totalConns); +} + +static int32_t monBuildDnodeUptimeSql(char *sql) { + int64_t dnodeUptime = 0; + TAOS_RES *result = taos_query(tsMonitor.conn, "show dnodes"); + int32_t code = taos_errno(result); + if (code != TSDB_CODE_SUCCESS) { + monError("failed to execute cmd: show dnodes, reason:%s", tstrerror(code)); + } + + TAOS_ROW row; + int32_t num_fields = taos_num_fields(result); + TAOS_FIELD *fields = taos_fetch_fields(result); + + bool is_self_ep = false; + while ((row = taos_fetch_row(result))) { + if (is_self_ep) { + break; + } + for (int i = 0; i < num_fields; ++i) { + if (strcmp(fields[i].name, "end_point") == 0) { + int32_t charLen = monGetRowElemCharLen(fields[i], (char *)row[i]); + if (strncmp((char *)row[i], tsLocalEp, charLen) == 0) { + is_self_ep = true; + } + } + if (strcmp(fields[i].name, "create_time") == 0) { + if (is_self_ep) { + int64_t now = taosGetTimestamp(TSDB_TIME_PRECISION_MILLI); + //dnodes uptime in seconds + dnodeUptime = (now - *(int64_t *)row[i]) / 1000; + } + } + } + } + + taos_free_result(result); + + return snprintf(sql, SQL_LENGTH, ", %" PRId64, dnodeUptime); +} + +static int32_t monBuildDnodeIoSql(char *sql) { + float rcharKB = 0, wcharKB = 0; + float rbyteKB = 0, wbyteKB = 0; + rcharKB = tsMonStat.io_read; + wcharKB = tsMonStat.io_write; + rbyteKB = tsMonStat.io_read_disk; + wbyteKB = tsMonStat.io_write_disk; + + return snprintf(sql, SQL_LENGTH, ", %f, %f, %f, %f", rcharKB / 1024, wcharKB / 1024, + rbyteKB / 1024, wbyteKB / 1024); +} + +static int32_t monBuildNetworkIOSql(char *sql) { + float netInKb = 0, netOutKb = 0; + bool suc = taosGetNetworkIO(&netInKb, &netOutKb); + if (!suc) { + monDebug("failed to get network I/O info"); + } + + return snprintf(sql, SQL_LENGTH, ", %f, %f", netInKb / 1024, + netOutKb / 1024); +} + +static int32_t monBuildDnodeReqSql(char *sql) { + int64_t queryReqNum = tsMonStat.dInfo.queryReqNum - tsMonStat.monQueryReqCnt; + int64_t submitReqNum = tsMonStat.dInfo.submitReqNum; + int64_t submitRowNum = tsMonStat.vInfo.submitRowNum; + int64_t submitReqSucNum = tsMonStat.vInfo.submitReqSucNum; + int64_t submitRowSucNum = tsMonStat.vInfo.submitRowSucNum; + + float interval = (float)(tsMonitorInterval * 1.0); + float httpReqRate = tsMonStat.dInfo.httpReqNum / interval; + float queryReqRate = queryReqNum / interval; + float submitReqRate = submitReqNum / interval; + float submitRowRate = submitRowNum / interval; + + return snprintf(sql, SQL_LENGTH, ", %"PRId64", %f, %"PRId64", %f, %"PRId64", %"PRId64", %f, %"PRId64", %"PRId64", %f", + tsMonStat.dInfo.httpReqNum, httpReqRate, + queryReqNum, queryReqRate, + submitRowNum, submitRowSucNum, submitRowRate, + submitReqNum, submitReqSucNum, submitReqRate); +} + +static int32_t monBuildDnodeErrorsSql(char *sql) { + int64_t dnode_err = dnodeGetDnodeError(); + return snprintf(sql, SQL_LENGTH, ", %"PRId64, dnode_err); +} + +static int32_t monBuildDnodeVnodesSql(char *sql) { + int32_t vnodeNum = 0, masterNum = 0; + char sqlStr[TSDB_EP_LEN + 15]; + memset(sqlStr, 0, sizeof(sqlStr)); + snprintf(sqlStr, TSDB_EP_LEN + 14, "show vnodes "SQL_STR_FMT, tsLocalEp); + TAOS_RES *result = taos_query(tsMonitor.conn, sqlStr); + int32_t code = taos_errno(result); + if (code != TSDB_CODE_SUCCESS) { + monError("failed to execute cmd: show vnodes "SQL_STR_FMT", reason:%s", tsLocalEp, tstrerror(code)); + } + + TAOS_ROW row; + int32_t num_fields = taos_num_fields(result); + TAOS_FIELD *fields = taos_fetch_fields(result); + + while ((row = taos_fetch_row(result))) { + vnodeNum += 1; + for (int i = 0; i < num_fields; ++i) { + if (strcmp(fields[i].name, "status") == 0) { + int32_t charLen = monGetRowElemCharLen(fields[i], (char *)row[i]); + if (strncmp((char *)row[i], "master", charLen) == 0) { + masterNum += 1; + } + } + } + } + taos_free_result(result); + + return snprintf(sql, SQL_LENGTH, ", %d, %d", vnodeNum, masterNum); +} + +static int32_t monBuildDnodeMnodeSql(char *sql) { + bool has_mnode = false, has_mnode_row; + TAOS_RES *result = taos_query(tsMonitor.conn, "show mnodes"); + int32_t code = taos_errno(result); + if (code != TSDB_CODE_SUCCESS) { + monError("failed to execute cmd: show mnodes, reason:%s", tstrerror(code)); + } + + TAOS_ROW row; + int32_t num_fields = taos_num_fields(result); + TAOS_FIELD *fields = taos_fetch_fields(result); + + int32_t charLen; + while ((row = taos_fetch_row(result))) { + has_mnode_row = false; + for (int i = 0; i < num_fields; ++i) { + if (strcmp(fields[i].name, "end_point") == 0) { + charLen = monGetRowElemCharLen(fields[i], (char *)row[i]); + if (strncmp((char *)row[i], tsLocalEp, charLen) == 0) { + has_mnode = true; + has_mnode_row = true; + } + } else if (strcmp(fields[i].name, "role") == 0) { + charLen = monGetRowElemCharLen(fields[i], (char *)row[i]); + if (strncmp((char *)row[i], "master", charLen) == 0) { + if (has_mnode_row) { + monHasMnodeMaster = true; + } + } + } + } + } + taos_free_result(result); + + return snprintf(sql, SQL_LENGTH, ", %s)", has_mnode ? "true" : "false"); +} + +static int32_t monBuildDnodeDiskSql(char *sql) { + float taosdDataDirGB = 0; + return snprintf(sql, SQL_LENGTH, ", %f, %f, %f", taosdDataDirGB, tsUsedDataDirGB, tsTotalDataDirGB); +} + +static int32_t monBuildDiskTierSql(char *sql) { + const int8_t numTiers = 3; + const double unit = 1024 * 1024 * 1024; + SFSMeta fsMeta; + STierMeta* tierMetas = calloc(numTiers, sizeof(STierMeta)); + tfsUpdateInfo(&fsMeta, tierMetas, numTiers); + int32_t pos = 0; + + for (int i = 0; i < numTiers; ++i) { + pos += snprintf(sql + pos, SQL_LENGTH, ", %f, %f", (float)(tierMetas[i].used / unit), (float)(tierMetas[i].size / unit)); + } + pos += snprintf(sql + pos, SQL_LENGTH, ")"); + + free(tierMetas); + + return pos; +} + +static void monSaveClusterInfo() { + int64_t ts = taosGetTimestampUs(); + char * sql = tsMonitor.sql; + int32_t pos = snprintf(sql, SQL_LENGTH, "insert into %s.cluster_info values(%" PRId64, tsMonitorDbName, ts); + + pos += monBuildFirstEpSql(sql + pos); + pos += monBuildVersionSql(sql + pos); + pos += monBuildMasterUptimeSql(sql + pos); + pos += monBuildMonIntervalSql(sql + pos); + pos += monBuildDnodesTotalSql(sql + pos); + pos += monBuildMnodesTotalSql(sql + pos); + pos += monBuildVgroupsTotalSql(sql + pos); + pos += monBuildVnodesTotalSql(sql + pos); + pos += monBuildConnsTotalSql(sql + pos); + + monDebug("save cluster, sql:%s", sql); + + void *res = taos_query(tsMonitor.conn, tsMonitor.sql); + int32_t code = taos_errno(res); + taos_free_result(res); + + if (code != 0) { + monError("failed to save cluster info, reason:%s, sql:%s", tstrerror(code), tsMonitor.sql); + } else { + monIncSubmitReqCnt(); + monDebug("successfully to save cluster info, sql:%s", tsMonitor.sql); + } +} + +static void monSaveDnodesInfo() { + int64_t ts = taosGetTimestampUs(); + char * sql = tsMonitor.sql; + int64_t intervalUs = tsMonitorInterval * 1000000; + ts = ts / intervalUs * intervalUs; //To align timestamp to integer multiples of monitor interval + int32_t pos = snprintf(sql, SQL_LENGTH, "insert into %s.dnode_%d values(%" PRId64, tsMonitorDbName, dnodeGetDnodeId(), ts); + + pos += monBuildDnodeUptimeSql(sql + pos); + pos += monBuildCpuSql(sql + pos); + pos += monBuildMemorySql(sql + pos); + pos += monBuildDnodeDiskSql(sql + pos); + pos += monBuildNetworkIOSql(sql + pos); + pos += monBuildDnodeIoSql(sql + pos); + pos += monBuildDnodeReqSql(sql + pos); + pos += monBuildDnodeErrorsSql(sql + pos); + pos += monBuildDnodeVnodesSql(sql + pos); + pos += monBuildDnodeMnodeSql(sql + pos); + + monDebug("save dnodes, sql:%s", sql); + + void *res = taos_query(tsMonitor.conn, tsMonitor.sql); + int32_t code = taos_errno(res); + taos_free_result(res); + + if (code != 0) { + monError("failed to save dnode_%d info, reason:%s, sql:%s", dnodeGetDnodeId(), tstrerror(code), tsMonitor.sql); + } else { + monIncSubmitReqCnt(); + monDebug("successfully to save dnode_%d info, sql:%s", dnodeGetDnodeId(), tsMonitor.sql); + } +} + + +static int32_t checkCreateVgroupTable(int32_t vgId) { + char subsql[256]; + bool create_table = false; + int32_t code = TSDB_CODE_SUCCESS; + + memset(subsql, 0, sizeof(subsql)); + snprintf(subsql, 255, "describe %s.vgroup_%d", tsMonitorDbName, vgId); + + TAOS_RES *result = taos_query(tsMonitor.conn, subsql); + code = taos_errno(result); + if (code != 0) { + create_table = true; + snprintf(subsql, sizeof(subsql), "create table if not exists %s.vgroup_%d using %s.vgroups_info tags(%d)", + tsMonitorDbName, vgId, tsMonitorDbName, vgId); + monError("table vgroup_%d not exist, create table vgroup_%d", vgId, vgId); + } + taos_free_result(result); + + if (create_table == true) { + result = taos_query(tsMonitor.conn, subsql); + code = taos_errno(result); + taos_free_result(result); + } + + return code; +} + +static uint32_t monBuildVgroupsInfoSql(char *sql, char *dbName) { + char v_dnode_ids[256] = {0}, v_dnode_status[1024] = {0}; + int64_t ts = taosGetTimestampUs(); + + memset(sql, 0, SQL_LENGTH + 1); + snprintf(sql, SQL_LENGTH, "show %s.vgroups", dbName); + TAOS_RES *result = taos_query(tsMonitor.conn, sql); + int32_t code = taos_errno(result); + if (code != TSDB_CODE_SUCCESS) { + monError("failed to execute cmd: show %s.vgroups, reason:%s", dbName, tstrerror(code)); + } + + TAOS_ROW row; + int32_t num_fields = taos_num_fields(result); + TAOS_FIELD *fields = taos_fetch_fields(result); + + int32_t charLen; + while ((row = taos_fetch_row(result))) { + int32_t vgId; + int32_t pos = 0; + + for (int i = 0; i < num_fields; ++i) { + const char *v_dnode_str = strchr(fields[i].name, '_'); + if (strcmp(fields[i].name, "vgId") == 0) { + vgId = *(int32_t *)row[i]; + if (checkCreateVgroupTable(vgId) == TSDB_CODE_SUCCESS) { + memset(sql, 0, SQL_LENGTH + 1); + pos += snprintf(sql, SQL_LENGTH, "insert into %s.vgroup_%d values(%" PRId64 ", "SQL_STR_FMT, + tsMonitorDbName, vgId, ts, dbName); + } else { + return TSDB_CODE_SUCCESS; + } + } else if (strcmp(fields[i].name, "tables") == 0) { + pos += snprintf(sql + pos, SQL_LENGTH, ", %d", *(int32_t *)row[i]); + } else if (strcmp(fields[i].name, "status") == 0) { + charLen = monGetRowElemCharLen(fields[i], (char *)row[i]); + pos += snprintf(sql + pos, strlen(SQL_STR_FMT) + charLen + 1, ", "SQL_STR_FMT, (char *)row[i]); + } else if (strcmp(fields[i].name, "onlines") == 0) { + pos += snprintf(sql + pos, SQL_LENGTH, ", %d", *(int32_t *)row[i]); + } else if (v_dnode_str && strcmp(v_dnode_str, "_dnode") == 0) { + snprintf(v_dnode_ids, sizeof(v_dnode_ids), "%d;", *(int16_t *)row[i]); + } else if (v_dnode_str && strcmp(v_dnode_str, "_status") == 0) { + charLen = monGetRowElemCharLen(fields[i], (char *)row[i]); + snprintf(v_dnode_status, charLen + 1, "%s;", (char *)row[i]); + } else if (strcmp(fields[i].name, "compacting") == 0) { + //flush dnode_ids and dnode_role in to sql + pos += snprintf(sql + pos, SQL_LENGTH, ", "SQL_STR_FMT", "SQL_STR_FMT")", v_dnode_ids, v_dnode_status); + } + } + monDebug("save vgroups, sql:%s", sql); + TAOS_RES *res = taos_query(tsMonitor.conn, sql); + code = taos_errno(res); + taos_free_result(res); + if (code != 0) { + monError("failed to save vgroup_%d info, reason:%s, sql:%s", vgId, tstrerror(code), tsMonitor.sql); + } else { + monIncSubmitReqCnt(); + monDebug("successfully to save vgroup_%d info, sql:%s", vgId, tsMonitor.sql); + } + } + taos_free_result(result); + + return TSDB_CODE_SUCCESS; +} + +static void monSaveVgroupsInfo() { + char * sql = tsMonitor.sql; + TAOS_RES *result = taos_query(tsMonitor.conn, "show databases"); + int32_t code = taos_errno(result); + if (code != TSDB_CODE_SUCCESS) { + monError("failed to execute cmd: show databases, reason:%s", tstrerror(code)); + } + + TAOS_ROW row; + int32_t num_fields = taos_num_fields(result); + TAOS_FIELD *fields = taos_fetch_fields(result); + + while ((row = taos_fetch_row(result))) { + for (int i = 0; i < num_fields; ++i) { + //database name + if (strcmp(fields[i].name, "name") == 0) { + monBuildVgroupsInfoSql(sql, (char *)row[i]); + } + } + } + + taos_free_result(result); +} + +static void monSaveSlowQueryInfo() { + int64_t ts = taosGetTimestampUs(); + char * sql = tsMonitor.sql; + int32_t pos = snprintf(sql, SQL_LENGTH, "insert into %s.slowquery values(%" PRId64, tsMonitorDbName, ts); + bool has_slowquery = false; + + TAOS_RES *result = taos_query(tsMonitor.conn, "show queries"); + int32_t code = taos_errno(result); + if (code != TSDB_CODE_SUCCESS) { + monError("failed to execute cmd: show queries, reason:%s", tstrerror(code)); + } + + TAOS_ROW row; + int32_t num_fields = taos_num_fields(result); + TAOS_FIELD *fields = taos_fetch_fields(result); + + int32_t charLen; + while ((row = taos_fetch_row(result))) { + for (int i = 0; i < num_fields; ++i) { + if (strcmp(fields[i].name, "query_id") == 0) { + has_slowquery = true; + charLen = monGetRowElemCharLen(fields[i], (char *)row[i]); + pos += snprintf(sql + pos, strlen(SQL_STR_FMT) + charLen + 1, ", "SQL_STR_FMT, (char *)row[i]); + } else if (strcmp(fields[i].name, "user") == 0) { + charLen = monGetRowElemCharLen(fields[i], (char *)row[i]); + pos += snprintf(sql + pos, strlen(SQL_STR_FMT) + charLen + 1, ", "SQL_STR_FMT, (char *)row[i]); + } else if (strcmp(fields[i].name, "qid") == 0) { + charLen = monGetRowElemCharLen(fields[i], (char *)row[i]); + pos += snprintf(sql + pos, strlen(SQL_STR_FMT) + charLen + 1, ", "SQL_STR_FMT, (char *)row[i]); + } else if (strcmp(fields[i].name, "created_time") == 0) { + int64_t create_time = *(int64_t *)row[i]; + create_time = convertTimePrecision(create_time, TSDB_TIME_PRECISION_MILLI, TSDB_TIME_PRECISION_MICRO); + pos += snprintf(sql + pos, SQL_LENGTH, ", %" PRId64 "", create_time); + } else if (strcmp(fields[i].name, "time") == 0) { + pos += snprintf(sql + pos, SQL_LENGTH, ", %" PRId64 "", *(int64_t *)row[i]); + } else if (strcmp(fields[i].name, "ep") == 0) { + charLen = monGetRowElemCharLen(fields[i], (char *)row[i]); + pos += snprintf(sql + pos, strlen(SQL_STR_FMT) + charLen + 1, ", "SQL_STR_FMT, (char *)row[i]); + } else if (strcmp(fields[i].name, "sql") == 0) { + charLen = monGetRowElemCharLen(fields[i], (char *)row[i]); + pos += snprintf(sql + pos, strlen(SQL_STR_FMT) + charLen + 2, ", "SQL_STR_FMT")", (char *)row[i]); + } + } + } + + monDebug("save slow query, sql:%s", sql); + taos_free_result(result); + if (!has_slowquery) { + return; + } + void *res = taos_query(tsMonitor.conn, tsMonitor.sql); + code = taos_errno(res); + taos_free_result(res); + + if (code != 0) { + monError("failed to save slowquery info, reason:%s, sql:%s", tstrerror(code), tsMonitor.sql); + } else { + monIncSubmitReqCnt(); + monDebug("successfully to save slowquery info, sql:%s", tsMonitor.sql); + } + +} + +static void monSaveDisksInfo() { + int64_t ts = taosGetTimestampUs(); + char * sql = tsMonitor.sql; + int32_t pos = snprintf(sql, SQL_LENGTH, "insert into %s.disks_%d values(%" PRId64, tsMonitorDbName, dnodeGetDnodeId(), ts); + + monBuildDiskTierSql(sql + pos); + + monDebug("save disk, sql:%s", sql); + + void *res = taos_query(tsMonitor.conn, tsMonitor.sql); + int32_t code = taos_errno(res); + taos_free_result(res); + + if (code != 0) { + monError("failed to save disks_%d info, reason:%s, sql:%s", dnodeGetDnodeId(), tstrerror(code), tsMonitor.sql); + } else { + monIncSubmitReqCnt(); + monDebug("successfully to save disks_%d info, sql:%s", dnodeGetDnodeId(), tsMonitor.sql); + } +} + +static void monSaveGrantsInfo() { + int64_t ts = taosGetTimestampUs(); + char * sql = tsMonitor.sql; + int32_t pos = snprintf(sql, SQL_LENGTH, "insert into %s.grants_info values(%" PRId64, tsMonitorDbName, ts); + + TAOS_RES *result = taos_query(tsMonitor.conn, "show grants"); + int32_t code = taos_errno(result); + if (code != TSDB_CODE_SUCCESS) { + taos_free_result(result); + return; + } + + TAOS_ROW row; + int32_t num_fields = taos_num_fields(result); + TAOS_FIELD *fields = taos_fetch_fields(result); + + while ((row = taos_fetch_row(result))) { + for (int i = 0; i < num_fields; ++i) { + if (strcmp(fields[i].name, "expire time") == 0) { + char *expStr = (char *)row[i]; + if (expStr[0] == 'u') { + pos += snprintf(sql + pos, SQL_LENGTH, ", NULL"); + } else { + struct tm expTime = {0}; + strptime((char *)row[i], "%Y-%m-%d %H:%M:%S", &expTime); + int32_t expTimeSec = (int32_t)mktime(&expTime); + pos += snprintf(sql + pos, SQL_LENGTH, ", %d", expTimeSec - taosGetTimestampSec()); + } + } else if (strcmp(fields[i].name, "timeseries") == 0) { + char *timeseries = (char *)row[i]; + if (timeseries[0] == 'u') { + pos += snprintf(sql + pos, SQL_LENGTH, ", NULL, NULL)"); + } else { + int32_t timeseries_used = strtol(timeseries, NULL, 10); + timeseries = strchr(timeseries, '/'); + int32_t timeseries_total = strtol(timeseries + 1, NULL, 10); + pos += snprintf(sql + pos, SQL_LENGTH, ", %d, %d)", timeseries_used, timeseries_total); + } + } + } + } + + monDebug("save grants, sql:%s", sql); + taos_free_result(result); + + void *res = taos_query(tsMonitor.conn, tsMonitor.sql); + code = taos_errno(res); + taos_free_result(res); + + if (code != 0) { + monError("failed to save grants info, reason:%s, sql:%s", tstrerror(code), tsMonitor.sql); + } else { + monIncSubmitReqCnt(); + monDebug("successfully to save grants info, sql:%s", tsMonitor.sql); + } + +} + +static void monSaveHttpReqInfo() { + int64_t ts = taosGetTimestampUs(); + char * sql = tsMonitor.sql; + int32_t pos = snprintf(sql, SQL_LENGTH, "insert into %s.restful_%d values(%" PRId64, tsMonitorDbName, dnodeGetDnodeId(), ts); + + for (int32_t i = 0; i < tListLen(monHttpStatusTable); ++i) { + int32_t status = dnodeGetHttpStatusInfo(i); + pos += snprintf(sql + pos, SQL_LENGTH, ", %d", status); + } + pos += snprintf(sql + pos, SQL_LENGTH, ")"); + dnodeClearHttpStatusInfo(); + + monDebug("save http req, sql:%s", sql); + + void *res = taos_query(tsMonitor.conn, tsMonitor.sql); + int32_t code = taos_errno(res); + taos_free_result(res); + + if (code != 0) { + monError("failed to save restful_%d info, reason:%s, sql:%s", dnodeGetDnodeId(), tstrerror(code), tsMonitor.sql); + } else { + monIncSubmitReqCnt(); + monDebug("successfully to save restful_%d info, sql:%s", dnodeGetDnodeId(), tsMonitor.sql); + } +} + static void monExecSqlCb(void *param, TAOS_RES *result, int32_t code) { int32_t c = taos_errno(result); if (c != TSDB_CODE_SUCCESS) { monError("save %s failed, reason:%s", (char *)param, tstrerror(c)); } else { + monIncSubmitReqCnt(); int32_t rows = taos_affected_rows(result); monDebug("save %s succ, rows:%d", (char *)param, rows); } @@ -367,7 +1384,7 @@ void monSaveAcctLog(SAcctMonitorObj *pMon) { if (tsMonitor.state != MON_STATE_INITED) return; char sql[1024] = {0}; - sprintf(sql, + snprintf(sql, 1023, "insert into %s.acct_%s using %s.acct tags('%s') values(now" ", %" PRId64 ", %" PRId64 ", %" PRId64 ", %" PRId64 @@ -411,13 +1428,34 @@ void monSaveLog(int32_t level, const char *const format, ...) { va_end(argpointer); if (len > max_length) len = max_length; - len += sprintf(sql + len, "', '%s')", tsLocalEp); + len += snprintf(sql + len, SQL_LENGTH, "', '%s')", tsLocalEp); sql[len++] = 0; monDebug("save log, sql: %s", sql); taos_query_a(tsMonitor.conn, sql, monExecSqlCb, "log"); } +void monSaveDnodeLog(int32_t level, const char *const format, ...) { + if (tsMonitor.state != MON_STATE_INITED) return; + + va_list argpointer; + char sql[SQL_LENGTH] = {0}; + int32_t max_length = SQL_LENGTH - 30; + int32_t len = snprintf(sql, (size_t)max_length, "insert into %s.dnode_%d_log values(%" PRId64 ", %d,'", tsMonitorDbName, + dnodeGetDnodeId(), taosGetTimestampUs(), level); + + va_start(argpointer, format); + len += vsnprintf(sql + len, (size_t)(max_length - len), format, argpointer); + va_end(argpointer); + if (len > max_length) len = max_length; + + len += snprintf(sql + len, SQL_LENGTH, "')"); + sql[len++] = 0; + + monDebug("save dnode log, sql: %s", sql); + taos_query_a(tsMonitor.conn, sql, monExecSqlCb, "log"); +} + void monExecuteSQL(char *sql) { if (tsMonitor.state != MON_STATE_INITED) return; @@ -434,3 +1472,19 @@ void monExecuteSQLWithResultCallback(char *sql, MonExecuteSQLCbFP callback, void monDebug("execute sql:%s", sql); taos_query_a(tsMonitor.conn, sql, callback, param); } + +void monIncQueryReqCnt() { + atomic_fetch_add_32(&monQueryReqNum, 1); +} + +void monIncSubmitReqCnt() { + atomic_fetch_add_32(&monSubmitReqNum, 1); +} + +int32_t monFetchQueryReqCnt() { + return atomic_exchange_32(&monQueryReqNum, 0); +} + +int32_t monFetchSubmitReqCnt() { + return atomic_exchange_32(&monSubmitReqNum, 0); +} diff --git a/src/query/CMakeLists.txt b/src/query/CMakeLists.txt index 4b57843708ac8d1c24c69e68fe406b0edbeeabd2..a815942fbedb4f3b99e3595c3960d931ddde192a 100644 --- a/src/query/CMakeLists.txt +++ b/src/query/CMakeLists.txt @@ -11,11 +11,19 @@ SET_SOURCE_FILES_PROPERTIES(src/sql.c PROPERTIES COMPILE_FLAGS -w) TARGET_LINK_LIBRARIES(query tsdb tutil lua) IF (TD_LINUX) - TARGET_LINK_LIBRARIES(query m rt lua) + IF (TD_BUILD_LUA) + TARGET_LINK_LIBRARIES(query m rt lua) + ELSE () + TARGET_LINK_LIBRARIES(query m rt) + ENDIF () ADD_SUBDIRECTORY(tests) ENDIF () IF (TD_DARWIN) - TARGET_LINK_LIBRARIES(query m lua) + IF (TD_BUILD_LUA) + TARGET_LINK_LIBRARIES(query m lua) + ELSE () + TARGET_LINK_LIBRARIES(query m) + ENDIF () ADD_SUBDIRECTORY(tests) ENDIF () diff --git a/src/query/inc/qAggMain.h b/src/query/inc/qAggMain.h index 4ba6dd2c536d9bbf3b5e2b3430293671a0ecb7b7..c9a022d7a1210b31b81bf3895a9b804a03bd30ae 100644 --- a/src/query/inc/qAggMain.h +++ b/src/query/inc/qAggMain.h @@ -183,7 +183,7 @@ typedef struct SQLFunctionCtx { int16_t inputBytes; int16_t outputType; - int16_t outputBytes; // size of results, determined by function and input column data type + int32_t outputBytes; // size of results, determined by function and input column data type int32_t interBufBytes; // internal buffer size bool hasNull; // null value exist in current block bool requireNull; // require null in some function @@ -227,7 +227,7 @@ typedef struct SAggFunctionInfo { #define GET_RES_INFO(ctx) ((ctx)->resultInfo) int32_t getResultDataInfo(int32_t dataType, int32_t dataBytes, int32_t functionId, int32_t param, int16_t *type, - int16_t *len, int32_t *interBytes, int16_t extLength, bool isSuperTable, SUdfInfo* pUdfInfo); + int32_t *len, int32_t *interBytes, int16_t extLength, bool isSuperTable, SUdfInfo* pUdfInfo); int32_t isValidFunction(const char* name, int32_t len); #define IS_STREAM_QUERY_VALID(x) (((x)&TSDB_FUNCSTATE_STREAM) != 0) diff --git a/src/query/inc/qExtbuffer.h b/src/query/inc/qExtbuffer.h index d4a9ed0cbcef0a52085dcd60569270037fb57908..ccdfd5c05994b71bd911c3a66d02dc1ffa58a474 100644 --- a/src/query/inc/qExtbuffer.h +++ b/src/query/inc/qExtbuffer.h @@ -75,8 +75,15 @@ typedef struct tFilePagesItem { tFilePage item; } tFilePagesItem; +typedef struct SSchema1 { + uint8_t type; + char name[TSDB_COL_NAME_LEN]; + int16_t colId; + int32_t bytes; +} SSchema1; + typedef struct SSchemaEx { - struct SSchema field; + SSchema1 field; int32_t offset; } SSchemaEx; @@ -178,7 +185,7 @@ bool tExtMemBufferIsAllDataInMem(tExtMemBuffer *pMemBuffer); * @param blockCapacity * @return */ -SColumnModel *createColumnModel(SSchema *fields, int32_t numOfCols, int32_t blockCapacity); +SColumnModel *createColumnModel(SSchema1 *fields, int32_t numOfCols, int32_t blockCapacity); /** * @@ -199,7 +206,7 @@ void destroyColumnModel(SColumnModel *pModel); void tColModelCompact(SColumnModel *pModel, tFilePage *inputBuffer, int32_t maxElemsCapacity); void tColModelErase(SColumnModel *pModel, tFilePage *inputBuffer, int32_t maxCapacity, int32_t s, int32_t e); -SSchema *getColumnModelSchema(SColumnModel *pColumnModel, int32_t index); +SSchema1 *getColumnModelSchema(SColumnModel *pColumnModel, int32_t index); int16_t getColumnModelOffset(SColumnModel *pColumnModel, int32_t index); @@ -229,6 +236,9 @@ typedef int (*__col_compar_fn_t)(tOrderDescriptor *, int32_t numOfRows, int32_t void tColDataQSort(tOrderDescriptor *, int32_t numOfRows, int32_t start, int32_t end, char *data, int32_t orderType); +void tColDataMergeSort(tOrderDescriptor *, int32_t numOfRows, int32_t start, int32_t end, char *data, int32_t orderType); + + void taoscQSort(void** pCols, SSchema* pSchema, int32_t numOfCols, int32_t numOfRows, int32_t index, __compar_fn_t compareFn); int32_t compare_sa(tOrderDescriptor *, int32_t numOfRows, int32_t idx1, int32_t idx2, char *data); diff --git a/src/query/inc/qScript.h b/src/query/inc/qScript.h index 574bb51368afeaeddef5fbd5c5bd8469fbe0cdef..2dc9b5812bbfa34dcebdde5438516d3be42a51d2 100644 --- a/src/query/inc/qScript.h +++ b/src/query/inc/qScript.h @@ -15,7 +15,7 @@ #ifndef TDENGINE_QSCRIPT_H #define TDENGINE_QSCRIPT_H - +#ifdef LUA_EMBEDDED #include #include #include @@ -78,5 +78,5 @@ void destroyScriptCtx(void *pScriptCtx); int32_t scriptEnvPoolInit(); void scriptEnvPoolCleanup(); bool isValidScript(char *script, int32_t len); - +#endif //LUA_EMBEDDED #endif //TDENGINE_QSCRIPT_H diff --git a/src/query/inc/qSqlparser.h b/src/query/inc/qSqlparser.h index da3df98ca337625916471f331858301a28748d0a..eb3a06e01d7034ebe4ee474574aa077dcbb5e87e 100644 --- a/src/query/inc/qSqlparser.h +++ b/src/query/inc/qSqlparser.h @@ -281,7 +281,7 @@ typedef struct tSqlExprItem { SArray *tVariantListAppend(SArray *pList, tVariant *pVar, uint8_t sortOrder); SArray *tVariantListInsert(SArray *pList, tVariant *pVar, uint8_t sortOrder, int32_t index); -SArray *tVariantListAppendToken(SArray *pList, SStrToken *pAliasToken, uint8_t sortOrder); +SArray *tVariantListAppendToken(SArray *pList, SStrToken *pAliasToken, uint8_t sortOrder, bool needRmquoteEscape); SRelationInfo *setTableNameList(SRelationInfo* pFromInfo, SStrToken *pName, SStrToken* pAlias); void *destroyRelationInfo(SRelationInfo* pFromInfo); diff --git a/src/query/inc/sql.y b/src/query/inc/sql.y index fedf8fead0dd31fc0d71e2b3b1674af92a30f077..30441334210727a87f8e1a042981be89b8de22ef 100644 --- a/src/query/inc/sql.y +++ b/src/query/inc/sql.y @@ -253,7 +253,7 @@ acct_optr(Y) ::= pps(C) tseries(D) storage(P) streams(F) qtime(Q) dbs(E) users(K intitemlist(A) ::= intitemlist(X) COMMA intitem(Y). { A = tVariantListAppend(X, &Y, -1); } intitemlist(A) ::= intitem(X). { A = tVariantListAppend(NULL, &X, -1); } -intitem(A) ::= INTEGER(X). { toTSDBType(X.type); tVariantCreate(&A, &X); } +intitem(A) ::= INTEGER(X). { toTSDBType(X.type); tVariantCreate(&A, &X, true); } %type keep {SArray*} %destructor keep {taosArrayDestroy($$);} @@ -438,39 +438,39 @@ column(A) ::= ids(X) typename(Y). { tagitemlist(A) ::= tagitemlist(X) COMMA tagitem(Y). { A = tVariantListAppend(X, &Y, -1); } tagitemlist(A) ::= tagitem(X). { A = tVariantListAppend(NULL, &X, -1); } -tagitem(A) ::= INTEGER(X). { toTSDBType(X.type); tVariantCreate(&A, &X); } -tagitem(A) ::= FLOAT(X). { toTSDBType(X.type); tVariantCreate(&A, &X); } -tagitem(A) ::= STRING(X). { toTSDBType(X.type); tVariantCreate(&A, &X); } -tagitem(A) ::= BOOL(X). { toTSDBType(X.type); tVariantCreate(&A, &X); } -tagitem(A) ::= NULL(X). { X.type = 0; tVariantCreate(&A, &X); } -tagitem(A) ::= NOW(X). { X.type = TSDB_DATA_TYPE_TIMESTAMP; tVariantCreate(&A, &X);} +tagitem(A) ::= INTEGER(X). { toTSDBType(X.type); tVariantCreate(&A, &X, true); } +tagitem(A) ::= FLOAT(X). { toTSDBType(X.type); tVariantCreate(&A, &X, true); } +tagitem(A) ::= STRING(X). { toTSDBType(X.type); tVariantCreate(&A, &X, true); } +tagitem(A) ::= BOOL(X). { toTSDBType(X.type); tVariantCreate(&A, &X, true); } +tagitem(A) ::= NULL(X). { X.type = 0; tVariantCreate(&A, &X, true); } +tagitem(A) ::= NOW(X). { X.type = TSDB_DATA_TYPE_TIMESTAMP; tVariantCreate(&A, &X, true);} tagitem(A) ::= MINUS(X) INTEGER(Y).{ X.n += Y.n; X.type = Y.type; toTSDBType(X.type); - tVariantCreate(&A, &X); + tVariantCreate(&A, &X, true); } tagitem(A) ::= MINUS(X) FLOAT(Y). { X.n += Y.n; X.type = Y.type; toTSDBType(X.type); - tVariantCreate(&A, &X); + tVariantCreate(&A, &X, true); } tagitem(A) ::= PLUS(X) INTEGER(Y). { X.n += Y.n; X.type = Y.type; toTSDBType(X.type); - tVariantCreate(&A, &X); + tVariantCreate(&A, &X, true); } tagitem(A) ::= PLUS(X) FLOAT(Y). { X.n += Y.n; X.type = Y.type; toTSDBType(X.type); - tVariantCreate(&A, &X); + tVariantCreate(&A, &X, true); } //////////////////////// The SELECT statement ///////////////////////////////// @@ -609,7 +609,7 @@ fill_opt(N) ::= . { N = 0; } fill_opt(N) ::= FILL LP ID(Y) COMMA tagitemlist(X) RP. { tVariant A = {0}; toTSDBType(Y.type); - tVariantCreate(&A, &Y); + tVariantCreate(&A, &Y, true); tVariantListInsert(X, &A, -1, 0); N = X; @@ -617,7 +617,7 @@ fill_opt(N) ::= FILL LP ID(Y) COMMA tagitemlist(X) RP. { fill_opt(N) ::= FILL LP ID(Y) RP. { toTSDBType(Y.type); - N = tVariantListAppendToken(NULL, &Y, -1); + N = tVariantListAppendToken(NULL, &Y, -1, true); } %type sliding_opt {SStrToken} @@ -649,7 +649,7 @@ item(A) ::= ids(X) cpxName(Y). { toTSDBType(X.type); X.n += Y.n; - tVariantCreate(&A, &X); + tVariantCreate(&A, &X, true); } %type sortorder {int} @@ -796,7 +796,7 @@ cmd ::= ALTER TABLE ids(X) cpxName(F) DROP COLUMN ids(A). { X.n += F.n; toTSDBType(A.type); - SArray* K = tVariantListAppendToken(NULL, &A, -1); + SArray* K = tVariantListAppendToken(NULL, &A, -1, false); SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&X, NULL, K, TSDB_ALTER_TABLE_DROP_COLUMN, -1); setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); @@ -818,7 +818,7 @@ cmd ::= ALTER TABLE ids(X) cpxName(Z) DROP TAG ids(Y). { X.n += Z.n; toTSDBType(Y.type); - SArray* A = tVariantListAppendToken(NULL, &Y, -1); + SArray* A = tVariantListAppendToken(NULL, &Y, -1, true); SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&X, NULL, A, TSDB_ALTER_TABLE_DROP_TAG_COLUMN, -1); setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); @@ -828,10 +828,10 @@ cmd ::= ALTER TABLE ids(X) cpxName(F) CHANGE TAG ids(Y) ids(Z). { X.n += F.n; toTSDBType(Y.type); - SArray* A = tVariantListAppendToken(NULL, &Y, -1); + SArray* A = tVariantListAppendToken(NULL, &Y, -1, true); toTSDBType(Z.type); - A = tVariantListAppendToken(A, &Z, -1); + A = tVariantListAppendToken(A, &Z, -1, true); SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&X, NULL, A, TSDB_ALTER_TABLE_CHANGE_TAG_COLUMN, -1); setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); @@ -841,7 +841,7 @@ cmd ::= ALTER TABLE ids(X) cpxName(F) SET TAG ids(Y) EQ tagitem(Z). { X.n += F.n; toTSDBType(Y.type); - SArray* A = tVariantListAppendToken(NULL, &Y, -1); + SArray* A = tVariantListAppendToken(NULL, &Y, -1, true); A = tVariantListAppend(A, &Z, -1); SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&X, NULL, A, TSDB_ALTER_TABLE_UPDATE_TAG_VAL, -1); @@ -865,7 +865,7 @@ cmd ::= ALTER STABLE ids(X) cpxName(F) DROP COLUMN ids(A). { X.n += F.n; toTSDBType(A.type); - SArray* K = tVariantListAppendToken(NULL, &A, -1); + SArray* K = tVariantListAppendToken(NULL, &A, -1, true); SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&X, NULL, K, TSDB_ALTER_TABLE_DROP_COLUMN, TSDB_SUPER_TABLE); setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); @@ -887,7 +887,7 @@ cmd ::= ALTER STABLE ids(X) cpxName(Z) DROP TAG ids(Y). { X.n += Z.n; toTSDBType(Y.type); - SArray* A = tVariantListAppendToken(NULL, &Y, -1); + SArray* A = tVariantListAppendToken(NULL, &Y, -1, true); SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&X, NULL, A, TSDB_ALTER_TABLE_DROP_TAG_COLUMN, TSDB_SUPER_TABLE); setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); @@ -897,10 +897,10 @@ cmd ::= ALTER STABLE ids(X) cpxName(F) CHANGE TAG ids(Y) ids(Z). { X.n += F.n; toTSDBType(Y.type); - SArray* A = tVariantListAppendToken(NULL, &Y, -1); + SArray* A = tVariantListAppendToken(NULL, &Y, -1, true); toTSDBType(Z.type); - A = tVariantListAppendToken(A, &Z, -1); + A = tVariantListAppendToken(A, &Z, -1, true); SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&X, NULL, A, TSDB_ALTER_TABLE_CHANGE_TAG_COLUMN, TSDB_SUPER_TABLE); setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); @@ -910,7 +910,7 @@ cmd ::= ALTER STABLE ids(X) cpxName(F) SET TAG ids(Y) EQ tagitem(Z). { X.n += F.n; toTSDBType(Y.type); - SArray* A = tVariantListAppendToken(NULL, &Y, -1); + SArray* A = tVariantListAppendToken(NULL, &Y, -1, true); A = tVariantListAppend(A, &Z, -1); SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&X, NULL, A, TSDB_ALTER_TABLE_UPDATE_TAG_VAL, TSDB_SUPER_TABLE); diff --git a/src/query/src/qAggMain.c b/src/query/src/qAggMain.c index 3bac631c5466a7d3a1823e6e26882105d983ccc5..f26b3cda1a56df698db0db1465bd6116726ca0ae 100644 --- a/src/query/src/qAggMain.c +++ b/src/query/src/qAggMain.c @@ -196,8 +196,16 @@ typedef struct { char *taglists; } SSampleFuncInfo; +typedef struct { + bool valueAssigned; + union { + int64_t i64Prev; + double d64Prev; + }; +} SDiffFuncInfo; + int32_t getResultDataInfo(int32_t dataType, int32_t dataBytes, int32_t functionId, int32_t param, int16_t *type, - int16_t *bytes, int32_t *interBytes, int16_t extLength, bool isSuperTable, SUdfInfo* pUdfInfo) { + int32_t *bytes, int32_t *interBytes, int16_t extLength, bool isSuperTable, SUdfInfo* pUdfInfo) { if (!isValidDataType(dataType)) { qError("Illegal data type %d or data type length %d", dataType, dataBytes); return TSDB_CODE_TSC_INVALID_OPERATION; @@ -210,21 +218,24 @@ int32_t getResultDataInfo(int32_t dataType, int32_t dataBytes, int32_t functionI functionId == TSDB_FUNC_FLOOR || functionId == TSDB_FUNC_ROUND) { *type = (int16_t)dataType; - *bytes = (int16_t)dataBytes; + *bytes = dataBytes; if (functionId == TSDB_FUNC_INTERP) { *interBytes = sizeof(SInterpInfoDetail); + } else if (functionId == TSDB_FUNC_DIFF) { + *interBytes = sizeof(SDiffFuncInfo); } else { *interBytes = 0; } + return TSDB_CODE_SUCCESS; } // (uid, tid) + VGID + TAGSIZE + VARSTR_HEADER_SIZE if (functionId == TSDB_FUNC_TID_TAG) { // todo use struct *type = TSDB_DATA_TYPE_BINARY; - *bytes = (int16_t)(dataBytes + sizeof(int16_t) + sizeof(int64_t) + sizeof(int32_t) + sizeof(int32_t) + VARSTR_HEADER_SIZE); + *bytes = (dataBytes + sizeof(int16_t) + sizeof(int64_t) + sizeof(int32_t) + sizeof(int32_t) + VARSTR_HEADER_SIZE); *interBytes = 0; return TSDB_CODE_SUCCESS; } @@ -302,7 +313,7 @@ int32_t getResultDataInfo(int32_t dataType, int32_t dataBytes, int32_t functionI if (functionId == TSDB_FUNC_MIN || functionId == TSDB_FUNC_MAX) { *type = TSDB_DATA_TYPE_BINARY; - *bytes = (int16_t)(dataBytes + DATA_SET_FLAG_SIZE); + *bytes = (dataBytes + DATA_SET_FLAG_SIZE); *interBytes = *bytes; return TSDB_CODE_SUCCESS; @@ -325,13 +336,13 @@ int32_t getResultDataInfo(int32_t dataType, int32_t dataBytes, int32_t functionI return TSDB_CODE_SUCCESS; } else if (functionId == TSDB_FUNC_TOP || functionId == TSDB_FUNC_BOTTOM) { *type = TSDB_DATA_TYPE_BINARY; - *bytes = (int16_t)(sizeof(STopBotInfo) + (sizeof(tValuePair) + POINTER_BYTES + extLength) * param); + *bytes = (sizeof(STopBotInfo) + (sizeof(tValuePair) + POINTER_BYTES + extLength) * param); *interBytes = *bytes; return TSDB_CODE_SUCCESS; } else if (functionId == TSDB_FUNC_SAMPLE) { *type = TSDB_DATA_TYPE_BINARY; - *bytes = (int16_t)(sizeof(SSampleFuncInfo) + dataBytes*param + sizeof(int64_t)*param + extLength*param); + *bytes = (sizeof(SSampleFuncInfo) + dataBytes*param + sizeof(int64_t)*param + extLength*param); *interBytes = *bytes; return TSDB_CODE_SUCCESS; @@ -344,14 +355,14 @@ int32_t getResultDataInfo(int32_t dataType, int32_t dataBytes, int32_t functionI } else if (functionId == TSDB_FUNC_APERCT) { *type = TSDB_DATA_TYPE_BINARY; int16_t bytesHist = sizeof(SHistBin) * (MAX_HISTOGRAM_BIN + 1) + sizeof(SHistogramInfo) + sizeof(SAPercentileInfo); - int16_t bytesDigest = (int16_t)(sizeof(SAPercentileInfo) + TDIGEST_SIZE(COMPRESSION)); + int32_t bytesDigest = (int32_t) (sizeof(SAPercentileInfo) + TDIGEST_SIZE(COMPRESSION)); *bytes = MAX(bytesHist, bytesDigest); *interBytes = *bytes; return TSDB_CODE_SUCCESS; } else if (functionId == TSDB_FUNC_LAST_ROW) { *type = TSDB_DATA_TYPE_BINARY; - *bytes = (int16_t)(sizeof(SLastrowInfo) + dataBytes); + *bytes = (sizeof(SLastrowInfo) + dataBytes); *interBytes = *bytes; return TSDB_CODE_SUCCESS; @@ -379,7 +390,7 @@ int32_t getResultDataInfo(int32_t dataType, int32_t dataBytes, int32_t functionI *type = TSDB_DATA_TYPE_DOUBLE; *bytes = sizeof(double); int16_t bytesHist = sizeof(SAPercentileInfo) + sizeof(SHistogramInfo) + sizeof(SHistBin) * (MAX_HISTOGRAM_BIN + 1); - int16_t bytesDigest = (int16_t)(sizeof(SAPercentileInfo) + TDIGEST_SIZE(COMPRESSION)); + int32_t bytesDigest = (int32_t) (sizeof(SAPercentileInfo) + TDIGEST_SIZE(COMPRESSION)); *interBytes = MAX(bytesHist, bytesDigest); return TSDB_CODE_SUCCESS; } else if (functionId == TSDB_FUNC_TWA) { @@ -416,31 +427,31 @@ int32_t getResultDataInfo(int32_t dataType, int32_t dataBytes, int32_t functionI *interBytes = sizeof(SStddevInfo); } else if (functionId == TSDB_FUNC_MIN || functionId == TSDB_FUNC_MAX) { *type = (int16_t)dataType; - *bytes = (int16_t)dataBytes; + *bytes = dataBytes; *interBytes = dataBytes + DATA_SET_FLAG_SIZE; } else if (functionId == TSDB_FUNC_FIRST || functionId == TSDB_FUNC_LAST) { *type = (int16_t)dataType; - *bytes = (int16_t)dataBytes; - *interBytes = (int16_t)(dataBytes + sizeof(SFirstLastInfo)); + *bytes = dataBytes; + *interBytes = (dataBytes + sizeof(SFirstLastInfo)); } else if (functionId == TSDB_FUNC_SPREAD) { *type = (int16_t)TSDB_DATA_TYPE_DOUBLE; *bytes = sizeof(double); *interBytes = sizeof(SSpreadInfo); } else if (functionId == TSDB_FUNC_PERCT) { *type = (int16_t)TSDB_DATA_TYPE_DOUBLE; - *bytes = (int16_t)sizeof(double); - *interBytes = (int16_t)sizeof(SPercentileInfo); + *bytes = sizeof(double); + *interBytes = sizeof(SPercentileInfo); } else if (functionId == TSDB_FUNC_LEASTSQR) { *type = TSDB_DATA_TYPE_BINARY; *bytes = MAX(TSDB_AVG_FUNCTION_INTER_BUFFER_SIZE, sizeof(SLeastsquaresInfo)); // string *interBytes = *bytes; } else if (functionId == TSDB_FUNC_FIRST_DST || functionId == TSDB_FUNC_LAST_DST) { *type = TSDB_DATA_TYPE_BINARY; - *bytes = (int16_t)(dataBytes + sizeof(SFirstLastInfo)); + *bytes = (dataBytes + sizeof(SFirstLastInfo)); *interBytes = *bytes; } else if (functionId == TSDB_FUNC_TOP || functionId == TSDB_FUNC_BOTTOM) { *type = (int16_t)dataType; - *bytes = (int16_t)dataBytes; + *bytes = dataBytes; size_t size = sizeof(STopBotInfo) + (sizeof(tValuePair) + POINTER_BYTES + extLength) * param; @@ -448,12 +459,12 @@ int32_t getResultDataInfo(int32_t dataType, int32_t dataBytes, int32_t functionI *interBytes = (int32_t)size; } else if (functionId == TSDB_FUNC_SAMPLE) { *type = (int16_t)dataType; - *bytes = (int16_t)dataBytes; + *bytes = dataBytes; size_t size = sizeof(SSampleFuncInfo) + dataBytes*param + sizeof(int64_t)*param + extLength*param; *interBytes = (int32_t)size; } else if (functionId == TSDB_FUNC_LAST_ROW) { *type = (int16_t)dataType; - *bytes = (int16_t)dataBytes; + *bytes = dataBytes; *interBytes = dataBytes; } else if (functionId == TSDB_FUNC_STDDEV_DST) { *type = TSDB_DATA_TYPE_BINARY; @@ -2984,18 +2995,16 @@ static void full_copy_function(SQLFunctionCtx *pCtx) { } } -enum { - INITIAL_VALUE_NOT_ASSIGNED = 0, -}; static bool diff_function_setup(SQLFunctionCtx *pCtx, SResultRowCellInfo* pResInfo) { if (!function_setup(pCtx, pResInfo)) { return false; } - // diff function require the value is set to -1 - pCtx->param[1].nType = INITIAL_VALUE_NOT_ASSIGNED; - return false; + SDiffFuncInfo* pDiffInfo = GET_ROWCELL_INTERBUF(pResInfo); + pDiffInfo->valueAssigned = false; + pDiffInfo->i64Prev = 0; + return true; } static bool deriv_function_setup(SQLFunctionCtx *pCtx, SResultRowCellInfo* pResultInfo) { @@ -3201,22 +3210,14 @@ static void deriv_function(SQLFunctionCtx *pCtx) { GET_RES_INFO(pCtx)->numOfRes += notNullElems; } -#define DIFF_IMPL(ctx, d, type) \ - do { \ - if ((ctx)->param[1].nType == INITIAL_VALUE_NOT_ASSIGNED) { \ - (ctx)->param[1].nType = (ctx)->inputType; \ - *(type *)&(ctx)->param[1].i64 = *(type *)(d); \ - } else { \ - *(type *)(ctx)->pOutput = *(type *)(d) - (*(type *)(&(ctx)->param[1].i64)); \ - *(type *)(&(ctx)->param[1].i64) = *(type *)(d); \ - *(int64_t *)(ctx)->ptsOutputBuf = GET_TS_DATA(ctx, index); \ - } \ - } while (0); // TODO difference in date column static void diff_function(SQLFunctionCtx *pCtx) { + SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx); + SDiffFuncInfo *pDiffInfo = GET_ROWCELL_INTERBUF(pResInfo); + void *data = GET_INPUT_DATA_LIST(pCtx); - bool isFirstBlock = (pCtx->param[1].nType == INITIAL_VALUE_NOT_ASSIGNED); + bool isFirstBlock = (pDiffInfo->valueAssigned == false); int32_t notNullElems = 0; @@ -3236,15 +3237,15 @@ static void diff_function(SQLFunctionCtx *pCtx) { continue; } - if (pCtx->param[1].nType != INITIAL_VALUE_NOT_ASSIGNED) { // initial value is not set yet - *pOutput = (int32_t)(pData[i] - pCtx->param[1].i64); // direct previous may be null + if (pDiffInfo->valueAssigned) { + *pOutput = (int32_t)(pData[i] - pDiffInfo->i64Prev); // direct previous may be null *pTimestamp = (tsList != NULL)? tsList[i]:0; pOutput += 1; pTimestamp += 1; } - pCtx->param[1].i64 = pData[i]; - pCtx->param[1].nType = pCtx->inputType; + pDiffInfo->i64Prev = pData[i]; + pDiffInfo->valueAssigned = true; notNullElems++; } break; @@ -3258,15 +3259,15 @@ static void diff_function(SQLFunctionCtx *pCtx) { continue; } - if (pCtx->param[1].nType != INITIAL_VALUE_NOT_ASSIGNED) { // initial value is not set yet - *pOutput = pData[i] - pCtx->param[1].i64; // direct previous may be null + if (pDiffInfo->valueAssigned) { + *pOutput = pData[i] - pDiffInfo->i64Prev; // direct previous may be null *pTimestamp = (tsList != NULL)? tsList[i]:0; pOutput += 1; pTimestamp += 1; } - pCtx->param[1].i64 = pData[i]; - pCtx->param[1].nType = pCtx->inputType; + pDiffInfo->i64Prev = pData[i]; + pDiffInfo->valueAssigned = true; notNullElems++; } break; @@ -3280,15 +3281,15 @@ static void diff_function(SQLFunctionCtx *pCtx) { continue; } - if (pCtx->param[1].nType != INITIAL_VALUE_NOT_ASSIGNED) { // initial value is not set yet - SET_DOUBLE_VAL(pOutput, pData[i] - pCtx->param[1].dKey); // direct previous may be null + if (pDiffInfo->valueAssigned) { // initial value is not set yet + SET_DOUBLE_VAL(pOutput, pData[i] - pDiffInfo->d64Prev); // direct previous may be null *pTimestamp = (tsList != NULL)? tsList[i]:0; pOutput += 1; pTimestamp += 1; } - pCtx->param[1].dKey = pData[i]; - pCtx->param[1].nType = pCtx->inputType; + pDiffInfo->d64Prev = pData[i]; + pDiffInfo->valueAssigned = true; notNullElems++; } break; @@ -3302,15 +3303,15 @@ static void diff_function(SQLFunctionCtx *pCtx) { continue; } - if (pCtx->param[1].nType != INITIAL_VALUE_NOT_ASSIGNED) { // initial value is not set yet - *pOutput = (float)(pData[i] - pCtx->param[1].dKey); // direct previous may be null + if (pDiffInfo->valueAssigned) { // initial value is not set yet + *pOutput = (float)(pData[i] - pDiffInfo->d64Prev); // direct previous may be null *pTimestamp = (tsList != NULL)? tsList[i]:0; pOutput += 1; pTimestamp += 1; } - pCtx->param[1].dKey = pData[i]; - pCtx->param[1].nType = pCtx->inputType; + pDiffInfo->d64Prev = pData[i]; + pDiffInfo->valueAssigned = true; notNullElems++; } break; @@ -3324,15 +3325,15 @@ static void diff_function(SQLFunctionCtx *pCtx) { continue; } - if (pCtx->param[1].nType != INITIAL_VALUE_NOT_ASSIGNED) { // initial value is not set yet - *pOutput = (int16_t)(pData[i] - pCtx->param[1].i64); // direct previous may be null + if (pDiffInfo->valueAssigned) { // initial value is not set yet + *pOutput = (int16_t)(pData[i] - pDiffInfo->i64Prev); // direct previous may be null *pTimestamp = (tsList != NULL)? tsList[i]:0; pOutput += 1; pTimestamp += 1; } - pCtx->param[1].i64 = pData[i]; - pCtx->param[1].nType = pCtx->inputType; + pDiffInfo->i64Prev = pData[i]; + pDiffInfo->valueAssigned = true; notNullElems++; } break; @@ -3347,15 +3348,15 @@ static void diff_function(SQLFunctionCtx *pCtx) { continue; } - if (pCtx->param[1].nType != INITIAL_VALUE_NOT_ASSIGNED) { // initial value is not set yet - *pOutput = (int8_t)(pData[i] - pCtx->param[1].i64); // direct previous may be null + if (pDiffInfo->valueAssigned) { // initial value is not set yet + *pOutput = (int8_t)(pData[i] - pDiffInfo->i64Prev); // direct previous may be null *pTimestamp = (tsList != NULL)? tsList[i]:0; pOutput += 1; pTimestamp += 1; } - pCtx->param[1].i64 = pData[i]; - pCtx->param[1].nType = pCtx->inputType; + pDiffInfo->i64Prev = pData[i]; + pDiffInfo->valueAssigned = true; notNullElems++; } break; @@ -3365,7 +3366,7 @@ static void diff_function(SQLFunctionCtx *pCtx) { } // initial value is not set yet - if (pCtx->param[1].nType == INITIAL_VALUE_NOT_ASSIGNED || notNullElems <= 0) { + if (!pDiffInfo->valueAssigned || notNullElems <= 0) { /* * 1. current block and blocks before are full of null * 2. current block may be null value diff --git a/src/query/src/qExecutor.c b/src/query/src/qExecutor.c index 6a89a2f823ad3ff57807561d7ead4b919851b000..7e89b3a766f0c9124417c65b83dfe55853b0f094 100644 --- a/src/query/src/qExecutor.c +++ b/src/query/src/qExecutor.c @@ -1578,9 +1578,6 @@ static void doHashGroupbyAgg(SOperatorInfo* pOperator, SGroupbyOperatorInfo *pIn int32_t num = 0; for (int32_t j = 0; j < pSDataBlock->info.rows; ++j) { char* val = ((char*)pColInfoData->pData) + bytes * j; - if (isNull(val, type)) { - continue; - } // Compare with the previous row of this column, and do not set the output buffer again if they are identical. if (pInfo->prevData == NULL) { @@ -2639,6 +2636,14 @@ static void getIntermediateBufInfo(SQueryRuntimeEnv* pRuntimeEnv, int32_t* ps, i while(((*rowsize) * MIN_ROWS_PER_PAGE) > (*ps) - overhead) { *ps = ((*ps) << 1u); } + + if (*ps > 5 * 1024 * 1024) { + MIN_ROWS_PER_PAGE = 2; + *ps = DEFAULT_INTERN_BUF_PAGE_SIZE; + while(((*rowsize) * MIN_ROWS_PER_PAGE) > (*ps) - overhead) { + *ps = ((*ps) << 1u); + } + } } #define IS_PREFILTER_TYPE(_t) ((_t) != TSDB_DATA_TYPE_BINARY && (_t) != TSDB_DATA_TYPE_NCHAR) @@ -4792,8 +4797,8 @@ int32_t doInitQInfo(SQInfo* pQInfo, STSBuf* pTsBuf, void* tsdb, void* sourceOptr int32_t ps = DEFAULT_PAGE_SIZE; getIntermediateBufInfo(pRuntimeEnv, &ps, &pQueryAttr->intermediateResultRowSize); - int32_t TENMB = 1024*1024*10; - int32_t code = createDiskbasedResultBuffer(&pRuntimeEnv->pResultBuf, ps, TENMB, pQInfo->qId); + int32_t TWENTYMB = 1024*1024*20; + int32_t code = createDiskbasedResultBuffer(&pRuntimeEnv->pResultBuf, ps, TWENTYMB, pQInfo->qId); if (code != TSDB_CODE_SUCCESS) { return code; } @@ -8240,7 +8245,7 @@ void destroyUdfInfo(SUdfInfo* pUdfInfo) { taosCloseDll(pUdfInfo->handle); tfree(pUdfInfo); } - +#ifdef LUA_EMBEDDED static char* getUdfFuncName(char* funcname, char* name, int type) { switch (type) { case TSDB_UDF_FUNC_NORMAL: @@ -8265,8 +8270,9 @@ static char* getUdfFuncName(char* funcname, char* name, int type) { return funcname; } - +#endif int32_t initUdfInfo(SUdfInfo* pUdfInfo) { +#ifdef LUA_EMBEDDED if (pUdfInfo == NULL || pUdfInfo->handle) { return TSDB_CODE_SUCCESS; } @@ -8350,7 +8356,7 @@ int32_t initUdfInfo(SUdfInfo* pUdfInfo) { return (*(udfInitFunc)pUdfInfo->funcs[TSDB_UDF_FUNC_INIT])(&pUdfInfo->init); } } - +#endif //LUA_EMBEDDED return TSDB_CODE_SUCCESS; } diff --git a/src/query/src/qExtbuffer.c b/src/query/src/qExtbuffer.c index 5994099a0d1ad6b1a87aa19edb6151680128f6df..ef4b56dc877ca75fe79ac5a345e99e0a3717eff5 100644 --- a/src/query/src/qExtbuffer.c +++ b/src/query/src/qExtbuffer.c @@ -521,7 +521,7 @@ static void swap(SColumnModel *pColumnModel, int32_t count, int32_t s1, char *da void *first = COLMODEL_GET_VAL(data1, pColumnModel, count, s1, i); void *second = COLMODEL_GET_VAL(data1, pColumnModel, count, s2, i); - SSchema* pSchema = &pColumnModel->pFields[i].field; + SSchema1* pSchema = &pColumnModel->pFields[i].field; tsDataSwap(first, second, pSchema->type, pSchema->bytes, buf); } } @@ -641,6 +641,89 @@ static UNUSED_FUNC void tRowModelDisplay(tOrderDescriptor *pDescriptor, int32_t printf("\n"); } +static void mergeSortIndicesByOrderColumns(tOrderDescriptor *pDescriptor, int32_t numOfRows, int32_t start, int32_t end, char *data, + int32_t orderType, __col_compar_fn_t compareFn, int32_t* indices, int32_t* aux) { + if (end <= start) { + return; + } + + + int32_t mid = start + (end-start)/2; + mergeSortIndicesByOrderColumns(pDescriptor, numOfRows, start, mid, data, orderType, compareFn, indices, aux); + mergeSortIndicesByOrderColumns(pDescriptor, numOfRows, mid+1, end, data, orderType, compareFn, indices, aux); + int32_t left = start; + int32_t right = mid + 1; + int32_t k; + for (k = start; k <= end; ++k) { + if (left == mid+1) { + aux[k] = indices[right]; + ++right; + } else if (right == end+1) { + aux[k] = indices[left]; + ++left; + } else { + int32_t ret = compareFn(pDescriptor, numOfRows, indices[left], indices[right], data); + if (ret <= 0) { + aux[k] = indices[left]; + ++left; + } else { + aux[k] = indices[right]; + ++right; + } + } + } + + for (k = start; k <= end; ++k) { + indices[k] = aux[k]; + } +} + +static void columnwiseMergeSortImpl(tOrderDescriptor *pDescriptor, int32_t numOfRows, int32_t start, int32_t end, char* data, + int32_t orderType, __col_compar_fn_t compareFn) { + int32_t* indices = malloc(numOfRows * sizeof(int32_t)); + int32_t* aux = malloc(numOfRows * sizeof(int32_t)); + + for (int32_t i = 0; i < numOfRows; ++i) { + indices[i] = i; + } + + mergeSortIndicesByOrderColumns(pDescriptor, numOfRows, 0, numOfRows-1, data, orderType, compareFn, indices, aux); + + int32_t numOfCols = pDescriptor->pColumnModel->numOfCols; + + int32_t prevLength = 0; + char* p = NULL; + + for(int32_t i = 0; i < numOfCols; ++i) { + int16_t colOffset = getColumnModelOffset(pDescriptor->pColumnModel, i); + int32_t colBytes = pDescriptor->pColumnModel->pFields[i].field.bytes; + // make sure memory buffer is enough + if (prevLength < colBytes) { + char *tmp = realloc(p, colBytes * numOfRows); + assert(tmp); + + p = tmp; + prevLength = colBytes; + } + + char* colData = data + colOffset * numOfRows; + memcpy(p, colData, colBytes * numOfRows); + + for(int32_t j = 0; j < numOfRows; ++j){ + char* dest = colData + colBytes * j; + + int32_t newPos = indices[j]; + char* src = p + (newPos * colBytes); + memcpy(dest, src, colBytes); + } + + } + + tfree(p); + tfree(aux); + tfree(indices); +} + static void columnwiseQSortImpl(tOrderDescriptor *pDescriptor, int32_t numOfRows, int32_t start, int32_t end, char *data, int32_t orderType, __col_compar_fn_t compareFn, void* buf) { #ifdef _DEBUG_VIEW @@ -742,15 +825,15 @@ static void columnwiseQSortImpl(tOrderDescriptor *pDescriptor, int32_t numOfRows } } -void tColDataQSort(tOrderDescriptor *pDescriptor, int32_t numOfRows, int32_t start, int32_t end, char *data, int32_t order) { +void tColDataQSort(tOrderDescriptor *pDescriptor, int32_t numOfRows, int32_t start, int32_t end, char *data, int32_t orderType) { // short array sort, incur another sort procedure instead of quick sort process - __col_compar_fn_t compareFn = (order == TSDB_ORDER_ASC) ? compare_sa : compare_sd; + __col_compar_fn_t compareFn = (orderType == TSDB_ORDER_ASC) ? compare_sa : compare_sd; SColumnModel* pModel = pDescriptor->pColumnModel; size_t width = 0; for(int32_t i = 0; i < pModel->numOfCols; ++i) { - SSchema* pSchema = &pModel->pFields[i].field; + SSchema1* pSchema = &pModel->pFields[i].field; if (width < pSchema->bytes) { width = pSchema->bytes; } @@ -762,16 +845,44 @@ void tColDataQSort(tOrderDescriptor *pDescriptor, int32_t numOfRows, int32_t sta if (end - start + 1 <= 8) { tColDataInsertSort(pDescriptor, numOfRows, start, end, data, compareFn, buf); } else { - columnwiseQSortImpl(pDescriptor, numOfRows, start, end, data, order, compareFn, buf); + columnwiseQSortImpl(pDescriptor, numOfRows, start, end, data, orderType, compareFn, buf); } free(buf); } +void tColDataMergeSort(tOrderDescriptor *pDescriptor, int32_t numOfRows, int32_t start, int32_t end, char *data, int32_t orderType) { + // short array sort, incur another sort procedure instead of quick sort process + __col_compar_fn_t compareFn = (orderType == TSDB_ORDER_ASC) ? compare_sa : compare_sd; + + SColumnModel* pModel = pDescriptor->pColumnModel; + + size_t width = 0; + for(int32_t i = 0; i < pModel->numOfCols; ++i) { + SSchema1* pSchema = &pModel->pFields[i].field; + if (width < pSchema->bytes) { + width = pSchema->bytes; + } + } + + char* buf = malloc(width); + assert(width > 0 && buf != NULL); + + if (end - start + 1 <= 8) { + tColDataInsertSort(pDescriptor, numOfRows, start, end, data, compareFn, buf); + } else { + columnwiseMergeSortImpl(pDescriptor, numOfRows, start, end, data, orderType, compareFn); + } + + free(buf); +} + + + /* * deep copy of sschema */ -SColumnModel *createColumnModel(SSchema *fields, int32_t numOfCols, int32_t blockCapacity) { +SColumnModel *createColumnModel(SSchema1 *fields, int32_t numOfCols, int32_t blockCapacity) { SColumnModel *pColumnModel = (SColumnModel *)calloc(1, sizeof(SColumnModel) + numOfCols * sizeof(SSchemaEx)); if (pColumnModel == NULL) { return NULL; @@ -1023,7 +1134,7 @@ void tColModelCompact(SColumnModel *pModel, tFilePage *inputBuffer, int32_t maxE } } -SSchema* getColumnModelSchema(SColumnModel *pColumnModel, int32_t index) { +SSchema1* getColumnModelSchema(SColumnModel *pColumnModel, int32_t index) { assert(pColumnModel != NULL && index >= 0 && index < pColumnModel->numOfCols); return &pColumnModel->pFields[index].field; } @@ -1045,7 +1156,7 @@ void tColModelErase(SColumnModel *pModel, tFilePage *inputBuffer, int32_t blockC /* start from the second column */ for (int32_t i = 0; i < pModel->numOfCols; ++i) { int16_t offset = getColumnModelOffset(pModel, i); - SSchema* pSchema = getColumnModelSchema(pModel, i); + SSchema1* pSchema = getColumnModelSchema(pModel, i); char *startPos = inputBuffer->data + offset * blockCapacity + s * pSchema->bytes; char *endPos = startPos + pSchema->bytes * removed; diff --git a/src/query/src/qScript.c b/src/query/src/qScript.c index c43b0b3435b2073d4711bbb8a0ec0d9e347b0d13..a8a6f6732b7eef33cad040c2aadc4b3e1848bde2 100644 --- a/src/query/src/qScript.c +++ b/src/query/src/qScript.c @@ -12,7 +12,7 @@ * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . */ - +#ifdef LUA_EMBEDDED #include "os.h" #include "qScript.h" #include "ttype.h" @@ -444,3 +444,4 @@ bool isValidScript(char *script, int32_t len) { return ret; } +#endif diff --git a/src/query/src/qSqlParser.c b/src/query/src/qSqlParser.c index 21723d6b7190d974c6c519521ebe10949e39c9f8..1f6de550cfe195b15aafe970dc900681939546e4 100644 --- a/src/query/src/qSqlParser.c +++ b/src/query/src/qSqlParser.c @@ -143,14 +143,14 @@ tSqlExpr *tSqlExprCreateIdValue(SSqlInfo* pInfo, SStrToken *pToken, int32_t optr if (optrType == TK_NULL) { if (pToken){ pToken->type = TSDB_DATA_TYPE_NULL; - tVariantCreate(&pSqlExpr->value, pToken); + tVariantCreate(&pSqlExpr->value, pToken, true); } pSqlExpr->tokenId = optrType; pSqlExpr->type = SQL_NODE_VALUE; } else if (optrType == TK_INTEGER || optrType == TK_STRING || optrType == TK_FLOAT || optrType == TK_BOOL) { if (pToken) { toTSDBType(pToken->type); - tVariantCreate(&pSqlExpr->value, pToken); + tVariantCreate(&pSqlExpr->value, pToken, true); } pSqlExpr->tokenId = optrType; pSqlExpr->type = SQL_NODE_VALUE; @@ -203,7 +203,7 @@ tSqlExpr *tSqlExprCreateTimestamp(SStrToken *pToken, int32_t optrType) { if (optrType == TK_INTEGER || optrType == TK_STRING) { if (pToken) { toTSDBType(pToken->type); - tVariantCreate(&pSqlExpr->value, pToken); + tVariantCreate(&pSqlExpr->value, pToken, true); } pSqlExpr->tokenId = optrType; pSqlExpr->type = SQL_NODE_VALUE; @@ -559,14 +559,14 @@ void tSqlExprDestroy(tSqlExpr *pExpr) { doDestroySqlExprNode(pExpr); } -SArray *tVariantListAppendToken(SArray *pList, SStrToken *pToken, uint8_t order) { +SArray *tVariantListAppendToken(SArray *pList, SStrToken *pToken, uint8_t order, bool needRmquoteEscape) { if (pList == NULL) { pList = taosArrayInit(4, sizeof(tVariantListItem)); } if (pToken) { tVariantListItem item; - tVariantCreate(&item.pVar, pToken); + tVariantCreate(&item.pVar, pToken, needRmquoteEscape); item.sortOrder = order; taosArrayPush(pList, &item); @@ -680,7 +680,7 @@ void tSetColumnInfo(TAOS_FIELD *pField, SStrToken *pName, TAOS_FIELD *pType) { // column name is too long, set the it to be invalid. if ((int32_t) pName->n >= maxLen) { - pName->n = -1; + pField->name[0] = 0; } else { strncpy(pField->name, pName->z, pName->n); pField->name[pName->n] = 0; diff --git a/src/query/src/sql.c b/src/query/src/sql.c index eee1880ab92cc938dfd60760ec8e30981d31cf7b..b06e430139339e7fbe335a3dbb3683bc470b2994 100644 --- a/src/query/src/sql.c +++ b/src/query/src/sql.c @@ -139,18 +139,18 @@ typedef union { #define ParseCTX_FETCH #define ParseCTX_STORE #define YYFALLBACK 1 -#define YYNSTATE 379 -#define YYNRULE 303 -#define YYNRULE_WITH_ACTION 303 +#define YYNSTATE 378 +#define YYNRULE 302 +#define YYNRULE_WITH_ACTION 302 #define YYNTOKEN 199 -#define YY_MAX_SHIFT 378 -#define YY_MIN_SHIFTREDUCE 594 -#define YY_MAX_SHIFTREDUCE 896 -#define YY_ERROR_ACTION 897 -#define YY_ACCEPT_ACTION 898 -#define YY_NO_ACTION 899 -#define YY_MIN_REDUCE 900 -#define YY_MAX_REDUCE 1202 +#define YY_MAX_SHIFT 377 +#define YY_MIN_SHIFTREDUCE 593 +#define YY_MAX_SHIFTREDUCE 894 +#define YY_ERROR_ACTION 895 +#define YY_ACCEPT_ACTION 896 +#define YY_NO_ACTION 897 +#define YY_MIN_REDUCE 898 +#define YY_MAX_REDUCE 1199 /************* End control #defines *******************************************/ #define YY_NLOOKAHEAD ((int)(sizeof(yy_lookahead)/sizeof(yy_lookahead[0]))) @@ -217,88 +217,87 @@ typedef union { ** yy_default[] Default action for each state. ** *********** Begin parsing tables **********************************************/ -#define YY_ACTTAB_COUNT (791) +#define YY_ACTTAB_COUNT (790) static const YYACTIONTYPE yy_action[] = { - /* 0 */ 245, 646, 377, 238, 1056, 23, 214, 730, 1078, 647, - /* 10 */ 682, 898, 378, 59, 60, 251, 63, 64, 1178, 1056, - /* 20 */ 259, 53, 52, 51, 646, 62, 335, 67, 65, 68, - /* 30 */ 66, 159, 647, 337, 175, 58, 57, 355, 354, 56, - /* 40 */ 55, 54, 59, 60, 253, 63, 64, 1055, 1056, 259, - /* 50 */ 53, 52, 51, 297, 62, 335, 67, 65, 68, 66, - /* 60 */ 1026, 1069, 1024, 1025, 58, 57, 1198, 1027, 56, 55, - /* 70 */ 54, 1028, 256, 1029, 1030, 58, 57, 1075, 281, 56, - /* 80 */ 55, 54, 59, 60, 166, 63, 64, 38, 84, 259, - /* 90 */ 53, 52, 51, 90, 62, 335, 67, 65, 68, 66, - /* 100 */ 1069, 288, 287, 646, 58, 57, 333, 29, 56, 55, - /* 110 */ 54, 647, 59, 61, 833, 63, 64, 241, 1042, 259, - /* 120 */ 53, 52, 51, 646, 62, 335, 67, 65, 68, 66, - /* 130 */ 45, 647, 240, 214, 58, 57, 1053, 852, 56, 55, - /* 140 */ 54, 60, 1050, 63, 64, 1179, 282, 259, 53, 52, - /* 150 */ 51, 166, 62, 335, 67, 65, 68, 66, 38, 309, - /* 160 */ 39, 95, 58, 57, 798, 799, 56, 55, 54, 595, - /* 170 */ 596, 597, 598, 599, 600, 601, 602, 603, 604, 605, - /* 180 */ 606, 607, 608, 157, 1069, 239, 63, 64, 770, 252, - /* 190 */ 259, 53, 52, 51, 255, 62, 335, 67, 65, 68, - /* 200 */ 66, 242, 365, 249, 333, 58, 57, 1053, 211, 56, - /* 210 */ 55, 54, 257, 44, 331, 372, 371, 330, 329, 328, - /* 220 */ 370, 327, 326, 325, 369, 324, 368, 367, 1126, 16, - /* 230 */ 307, 15, 166, 24, 6, 1018, 1006, 1007, 1008, 1009, - /* 240 */ 1010, 1011, 1012, 1013, 1014, 1015, 1016, 1017, 1019, 1020, - /* 250 */ 217, 166, 258, 848, 212, 214, 837, 225, 840, 839, - /* 260 */ 843, 842, 99, 141, 140, 139, 224, 1179, 258, 848, - /* 270 */ 340, 90, 837, 774, 840, 273, 843, 56, 55, 54, - /* 280 */ 67, 65, 68, 66, 277, 276, 236, 237, 58, 57, - /* 290 */ 336, 767, 56, 55, 54, 1039, 1040, 35, 1043, 260, - /* 300 */ 373, 987, 236, 237, 5, 41, 185, 268, 45, 1125, - /* 310 */ 38, 184, 108, 113, 104, 112, 754, 9, 181, 751, - /* 320 */ 262, 752, 786, 753, 38, 102, 789, 267, 96, 38, - /* 330 */ 320, 280, 838, 82, 841, 69, 125, 119, 130, 218, - /* 340 */ 232, 949, 118, 129, 117, 135, 138, 128, 195, 264, - /* 350 */ 265, 69, 293, 294, 132, 205, 203, 201, 38, 1052, - /* 360 */ 214, 1044, 200, 145, 144, 143, 142, 127, 38, 250, - /* 370 */ 849, 844, 1179, 1053, 344, 38, 38, 845, 1053, 365, - /* 380 */ 846, 44, 38, 372, 371, 83, 849, 844, 370, 376, - /* 390 */ 375, 150, 369, 845, 368, 367, 38, 263, 38, 261, - /* 400 */ 268, 343, 342, 345, 269, 219, 266, 1053, 350, 349, - /* 410 */ 815, 182, 14, 346, 220, 268, 98, 1053, 87, 1041, - /* 420 */ 347, 351, 88, 97, 1053, 1053, 1054, 352, 156, 154, - /* 430 */ 153, 1053, 959, 755, 756, 950, 34, 243, 85, 195, - /* 440 */ 795, 353, 195, 357, 805, 1053, 101, 1053, 806, 1, - /* 450 */ 183, 3, 196, 847, 161, 284, 292, 291, 70, 284, - /* 460 */ 75, 78, 26, 740, 312, 742, 314, 741, 814, 315, - /* 470 */ 871, 850, 835, 645, 18, 81, 17, 39, 39, 70, - /* 480 */ 100, 70, 137, 136, 25, 25, 759, 25, 760, 20, - /* 490 */ 757, 19, 758, 124, 22, 123, 21, 289, 1173, 1172, - /* 500 */ 1171, 234, 79, 76, 235, 215, 216, 729, 290, 1190, - /* 510 */ 836, 221, 213, 222, 223, 1136, 227, 228, 229, 1135, - /* 520 */ 247, 226, 278, 1132, 210, 1131, 248, 356, 48, 1070, - /* 530 */ 158, 1077, 1088, 1067, 155, 1085, 1086, 285, 1118, 1090, - /* 540 */ 160, 165, 1117, 303, 1051, 177, 283, 86, 178, 1049, - /* 550 */ 179, 180, 964, 785, 317, 318, 296, 319, 322, 323, - /* 560 */ 167, 46, 244, 298, 310, 168, 208, 42, 334, 958, - /* 570 */ 341, 1197, 115, 1196, 1193, 186, 348, 1189, 121, 300, - /* 580 */ 80, 77, 50, 308, 1188, 1185, 169, 306, 187, 304, - /* 590 */ 984, 43, 302, 40, 47, 209, 946, 131, 944, 133, - /* 600 */ 134, 942, 941, 299, 270, 198, 199, 938, 937, 936, - /* 610 */ 935, 934, 933, 932, 202, 204, 929, 927, 925, 923, - /* 620 */ 206, 920, 295, 207, 916, 49, 321, 91, 301, 1119, - /* 630 */ 366, 126, 359, 358, 360, 361, 363, 233, 362, 254, - /* 640 */ 316, 364, 374, 896, 271, 272, 895, 275, 274, 894, - /* 650 */ 876, 230, 963, 231, 109, 962, 110, 877, 279, 284, - /* 660 */ 311, 10, 286, 762, 30, 89, 940, 939, 190, 194, - /* 670 */ 146, 985, 188, 189, 192, 191, 931, 193, 147, 148, - /* 680 */ 930, 4, 149, 1022, 92, 986, 922, 176, 172, 170, - /* 690 */ 173, 171, 174, 33, 921, 794, 2, 73, 792, 791, - /* 700 */ 1032, 788, 787, 74, 164, 796, 162, 246, 807, 163, - /* 710 */ 31, 801, 93, 32, 803, 94, 305, 11, 12, 13, - /* 720 */ 27, 313, 36, 28, 101, 103, 106, 660, 695, 693, - /* 730 */ 692, 105, 691, 689, 688, 37, 107, 687, 684, 650, - /* 740 */ 111, 332, 7, 853, 851, 8, 338, 339, 39, 114, - /* 750 */ 71, 116, 72, 120, 732, 731, 728, 122, 676, 674, - /* 760 */ 666, 672, 668, 670, 664, 662, 698, 697, 696, 694, - /* 770 */ 690, 686, 685, 197, 648, 612, 900, 899, 899, 899, - /* 780 */ 899, 899, 899, 899, 899, 899, 899, 899, 899, 151, - /* 790 */ 152, + /* 0 */ 244, 644, 376, 237, 1053, 23, 213, 728, 1075, 645, + /* 10 */ 680, 896, 377, 59, 60, 250, 63, 64, 1175, 1053, + /* 20 */ 258, 53, 52, 51, 644, 62, 334, 67, 65, 68, + /* 30 */ 66, 158, 645, 336, 174, 58, 57, 354, 353, 56, + /* 40 */ 55, 54, 59, 60, 252, 63, 64, 1052, 1053, 258, + /* 50 */ 53, 52, 51, 296, 62, 334, 67, 65, 68, 66, + /* 60 */ 1023, 1066, 1021, 1022, 58, 57, 1195, 1024, 56, 55, + /* 70 */ 54, 1025, 255, 1026, 1027, 58, 57, 1072, 280, 56, + /* 80 */ 55, 54, 59, 60, 165, 63, 64, 38, 84, 258, + /* 90 */ 53, 52, 51, 90, 62, 334, 67, 65, 68, 66, + /* 100 */ 1066, 287, 286, 644, 58, 57, 332, 29, 56, 55, + /* 110 */ 54, 645, 59, 61, 831, 63, 64, 240, 1039, 258, + /* 120 */ 53, 52, 51, 644, 62, 334, 67, 65, 68, 66, + /* 130 */ 45, 645, 239, 213, 58, 57, 1050, 850, 56, 55, + /* 140 */ 54, 60, 1047, 63, 64, 1176, 281, 258, 53, 52, + /* 150 */ 51, 165, 62, 334, 67, 65, 68, 66, 38, 308, + /* 160 */ 39, 95, 58, 57, 796, 797, 56, 55, 54, 594, + /* 170 */ 595, 596, 597, 598, 599, 600, 601, 602, 603, 604, + /* 180 */ 605, 606, 607, 156, 1066, 238, 63, 64, 768, 251, + /* 190 */ 258, 53, 52, 51, 254, 62, 334, 67, 65, 68, + /* 200 */ 66, 241, 364, 248, 332, 58, 57, 1050, 210, 56, + /* 210 */ 55, 54, 256, 44, 330, 371, 370, 329, 328, 327, + /* 220 */ 369, 326, 325, 324, 368, 323, 367, 366, 1123, 16, + /* 230 */ 306, 15, 165, 24, 6, 1015, 1003, 1004, 1005, 1006, + /* 240 */ 1007, 1008, 1009, 1010, 1011, 1012, 1013, 1014, 1016, 1017, + /* 250 */ 216, 165, 257, 846, 211, 213, 835, 224, 838, 837, + /* 260 */ 841, 840, 99, 141, 140, 139, 223, 1176, 257, 846, + /* 270 */ 339, 90, 835, 772, 838, 272, 841, 56, 55, 54, + /* 280 */ 67, 65, 68, 66, 276, 275, 235, 236, 58, 57, + /* 290 */ 335, 765, 56, 55, 54, 1036, 1037, 35, 1040, 259, + /* 300 */ 372, 984, 235, 236, 5, 41, 184, 267, 45, 1122, + /* 310 */ 38, 183, 108, 113, 104, 112, 752, 9, 180, 749, + /* 320 */ 261, 750, 784, 751, 38, 102, 787, 266, 96, 38, + /* 330 */ 319, 279, 836, 82, 839, 69, 125, 119, 130, 217, + /* 340 */ 231, 946, 118, 129, 117, 135, 138, 128, 194, 263, + /* 350 */ 264, 69, 292, 293, 132, 204, 202, 200, 38, 1049, + /* 360 */ 213, 1041, 199, 145, 144, 143, 142, 127, 38, 249, + /* 370 */ 847, 842, 1176, 1050, 343, 38, 38, 843, 1050, 364, + /* 380 */ 844, 44, 38, 371, 370, 83, 847, 842, 369, 375, + /* 390 */ 374, 621, 368, 843, 367, 366, 38, 262, 38, 260, + /* 400 */ 267, 342, 341, 344, 268, 218, 265, 1050, 349, 348, + /* 410 */ 813, 181, 14, 345, 219, 267, 98, 1050, 87, 1038, + /* 420 */ 346, 350, 88, 97, 1050, 1050, 1051, 351, 155, 153, + /* 430 */ 152, 1050, 956, 753, 754, 947, 34, 242, 85, 194, + /* 440 */ 793, 352, 194, 356, 803, 1050, 101, 1050, 804, 1, + /* 450 */ 182, 3, 195, 845, 160, 283, 291, 290, 70, 283, + /* 460 */ 75, 78, 26, 738, 311, 740, 313, 739, 812, 314, + /* 470 */ 869, 848, 833, 643, 18, 81, 17, 39, 39, 70, + /* 480 */ 100, 70, 137, 136, 25, 25, 757, 25, 758, 20, + /* 490 */ 755, 19, 756, 124, 22, 123, 21, 288, 1170, 1169, + /* 500 */ 1168, 233, 79, 76, 234, 214, 215, 727, 289, 1187, + /* 510 */ 834, 220, 212, 221, 222, 1133, 226, 227, 228, 1132, + /* 520 */ 246, 225, 277, 1129, 209, 1128, 247, 355, 48, 1067, + /* 530 */ 157, 1074, 1085, 1064, 154, 1082, 1083, 284, 1115, 1087, + /* 540 */ 159, 164, 1114, 302, 1048, 176, 282, 86, 177, 1046, + /* 550 */ 178, 179, 961, 783, 316, 317, 295, 318, 321, 322, + /* 560 */ 166, 46, 243, 297, 309, 80, 207, 42, 333, 955, + /* 570 */ 340, 1194, 115, 1193, 1190, 185, 347, 1186, 121, 299, + /* 580 */ 77, 167, 50, 307, 1185, 1182, 168, 305, 186, 303, + /* 590 */ 981, 43, 301, 40, 47, 208, 943, 131, 941, 133, + /* 600 */ 134, 939, 938, 298, 269, 197, 198, 935, 934, 933, + /* 610 */ 932, 931, 930, 929, 201, 203, 925, 923, 921, 205, + /* 620 */ 918, 206, 294, 914, 320, 49, 91, 300, 1116, 365, + /* 630 */ 126, 357, 358, 359, 360, 361, 362, 232, 363, 253, + /* 640 */ 315, 373, 894, 270, 271, 893, 273, 274, 229, 892, + /* 650 */ 875, 230, 109, 960, 959, 874, 110, 146, 278, 283, + /* 660 */ 310, 10, 285, 89, 92, 760, 937, 936, 189, 147, + /* 670 */ 188, 982, 187, 190, 191, 193, 928, 192, 148, 927, + /* 680 */ 4, 149, 1019, 920, 30, 983, 919, 175, 171, 169, + /* 690 */ 172, 170, 173, 33, 2, 792, 1029, 73, 790, 789, + /* 700 */ 786, 785, 74, 163, 794, 161, 245, 805, 162, 11, + /* 710 */ 799, 93, 31, 801, 94, 304, 32, 12, 13, 27, + /* 720 */ 312, 103, 28, 101, 106, 36, 658, 693, 691, 690, + /* 730 */ 689, 105, 687, 686, 37, 107, 685, 682, 648, 111, + /* 740 */ 7, 331, 849, 337, 8, 851, 338, 114, 39, 71, + /* 750 */ 72, 116, 120, 730, 729, 726, 122, 674, 672, 664, + /* 760 */ 670, 666, 668, 662, 660, 696, 695, 694, 692, 688, + /* 770 */ 684, 683, 196, 646, 611, 898, 897, 897, 897, 897, + /* 780 */ 897, 897, 897, 897, 897, 897, 897, 897, 150, 151, }; static const YYCODETYPE yy_lookahead[] = { /* 0 */ 247, 1, 201, 202, 251, 269, 269, 5, 201, 9, @@ -357,30 +356,30 @@ static const YYCODETYPE yy_lookahead[] = { /* 530 */ 201, 201, 201, 265, 62, 201, 201, 249, 279, 201, /* 540 */ 201, 201, 279, 201, 249, 253, 203, 203, 201, 201, /* 550 */ 201, 201, 201, 126, 201, 201, 273, 201, 201, 201, - /* 560 */ 264, 201, 273, 273, 134, 263, 201, 201, 201, 201, + /* 560 */ 264, 201, 273, 273, 134, 139, 201, 201, 201, 201, /* 570 */ 201, 201, 201, 201, 201, 201, 201, 201, 201, 273, - /* 580 */ 139, 141, 138, 137, 201, 201, 262, 132, 201, 131, + /* 580 */ 141, 263, 138, 137, 201, 201, 262, 132, 201, 131, /* 590 */ 201, 201, 130, 201, 201, 201, 201, 201, 201, 201, /* 600 */ 201, 201, 201, 133, 201, 201, 201, 201, 201, 201, /* 610 */ 201, 201, 201, 201, 201, 201, 201, 201, 201, 201, - /* 620 */ 201, 201, 127, 201, 201, 143, 91, 203, 203, 203, - /* 630 */ 115, 98, 53, 97, 94, 96, 95, 203, 57, 203, - /* 640 */ 203, 93, 86, 5, 156, 5, 5, 5, 156, 5, - /* 650 */ 101, 203, 213, 203, 209, 213, 209, 102, 145, 122, - /* 660 */ 117, 84, 99, 85, 84, 123, 203, 203, 216, 215, - /* 670 */ 204, 222, 221, 220, 217, 219, 203, 218, 204, 204, - /* 680 */ 203, 205, 204, 240, 99, 224, 203, 254, 259, 261, - /* 690 */ 258, 260, 257, 255, 203, 85, 210, 99, 126, 126, - /* 700 */ 240, 5, 5, 84, 99, 85, 84, 1, 85, 84, - /* 710 */ 99, 85, 84, 99, 85, 84, 84, 135, 135, 84, - /* 720 */ 84, 117, 89, 84, 118, 80, 72, 5, 9, 5, - /* 730 */ 5, 88, 5, 5, 5, 89, 88, 5, 5, 87, - /* 740 */ 80, 15, 84, 119, 85, 84, 26, 61, 99, 150, - /* 750 */ 16, 150, 16, 150, 5, 5, 85, 150, 5, 5, + /* 620 */ 201, 201, 127, 201, 91, 143, 203, 203, 203, 115, + /* 630 */ 98, 97, 53, 94, 96, 57, 95, 203, 93, 203, + /* 640 */ 203, 86, 5, 156, 5, 5, 156, 5, 203, 5, + /* 650 */ 102, 203, 209, 213, 213, 101, 209, 204, 145, 122, + /* 660 */ 117, 84, 99, 123, 99, 85, 203, 203, 216, 204, + /* 670 */ 220, 222, 221, 219, 217, 215, 203, 218, 204, 203, + /* 680 */ 205, 204, 240, 203, 84, 224, 203, 254, 259, 261, + /* 690 */ 258, 260, 257, 255, 210, 85, 240, 99, 126, 126, + /* 700 */ 5, 5, 84, 99, 85, 84, 1, 85, 84, 135, + /* 710 */ 85, 84, 99, 85, 84, 84, 99, 135, 84, 84, + /* 720 */ 117, 80, 84, 118, 72, 89, 5, 9, 5, 5, + /* 730 */ 5, 88, 5, 5, 89, 88, 5, 5, 87, 80, + /* 740 */ 84, 15, 85, 26, 84, 119, 61, 150, 99, 16, + /* 750 */ 16, 150, 150, 5, 5, 85, 150, 5, 5, 5, /* 760 */ 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, - /* 770 */ 5, 5, 5, 99, 87, 62, 0, 282, 282, 282, - /* 780 */ 282, 282, 282, 282, 282, 282, 282, 282, 282, 21, - /* 790 */ 21, 282, 282, 282, 282, 282, 282, 282, 282, 282, + /* 770 */ 5, 5, 99, 87, 62, 0, 282, 282, 282, 282, + /* 780 */ 282, 282, 282, 282, 282, 282, 282, 282, 21, 21, + /* 790 */ 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, /* 800 */ 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, /* 810 */ 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, /* 820 */ 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, @@ -399,17 +398,17 @@ static const YYCODETYPE yy_lookahead[] = { /* 950 */ 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, /* 960 */ 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, /* 970 */ 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, - /* 980 */ 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, + /* 980 */ 282, 282, 282, 282, 282, 282, 282, 282, 282, }; -#define YY_SHIFT_COUNT (378) +#define YY_SHIFT_COUNT (377) #define YY_SHIFT_MIN (0) -#define YY_SHIFT_MAX (776) +#define YY_SHIFT_MAX (775) static const unsigned short int yy_shift_ofst[] = { /* 0 */ 187, 113, 113, 281, 281, 20, 251, 267, 267, 23, /* 10 */ 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, /* 20 */ 102, 102, 102, 0, 122, 267, 314, 314, 314, 9, /* 30 */ 9, 102, 102, 36, 102, 118, 102, 102, 102, 102, - /* 40 */ 287, 20, 110, 110, 5, 791, 791, 791, 267, 267, + /* 40 */ 287, 20, 110, 110, 5, 790, 790, 790, 267, 267, /* 50 */ 267, 267, 267, 267, 267, 267, 267, 267, 267, 267, /* 60 */ 267, 267, 267, 267, 267, 267, 267, 267, 267, 267, /* 70 */ 314, 314, 314, 317, 317, 2, 2, 2, 2, 2, @@ -420,33 +419,33 @@ static const unsigned short int yy_shift_ofst[] = { /* 120 */ 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, /* 130 */ 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, /* 140 */ 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, - /* 150 */ 102, 102, 102, 102, 102, 102, 102, 102, 472, 472, - /* 160 */ 472, 427, 427, 427, 427, 472, 472, 441, 440, 430, - /* 170 */ 444, 446, 455, 458, 462, 470, 495, 482, 472, 472, - /* 180 */ 472, 535, 535, 515, 20, 20, 472, 472, 533, 536, - /* 190 */ 579, 540, 539, 581, 541, 548, 515, 5, 472, 472, - /* 200 */ 556, 556, 472, 556, 472, 556, 472, 472, 791, 791, - /* 210 */ 29, 69, 69, 99, 69, 127, 170, 240, 253, 253, - /* 220 */ 253, 253, 253, 253, 272, 291, 40, 40, 40, 40, - /* 230 */ 250, 257, 130, 328, 238, 238, 254, 327, 322, 364, - /* 240 */ 61, 333, 337, 421, 355, 359, 363, 361, 362, 378, - /* 250 */ 379, 380, 381, 382, 352, 385, 386, 471, 150, 18, - /* 260 */ 388, 81, 194, 326, 481, 485, 341, 345, 391, 346, - /* 270 */ 402, 638, 488, 640, 641, 492, 642, 644, 555, 549, - /* 280 */ 513, 537, 543, 577, 542, 578, 580, 563, 585, 610, - /* 290 */ 598, 572, 573, 696, 697, 619, 620, 622, 623, 625, - /* 300 */ 626, 605, 628, 629, 631, 706, 632, 611, 582, 614, - /* 310 */ 583, 635, 543, 636, 604, 639, 606, 645, 633, 643, - /* 320 */ 654, 722, 646, 648, 719, 724, 725, 727, 728, 729, - /* 330 */ 732, 733, 652, 726, 660, 658, 659, 624, 661, 720, - /* 340 */ 686, 734, 599, 601, 649, 649, 649, 649, 736, 603, - /* 350 */ 607, 649, 649, 649, 749, 750, 671, 649, 753, 754, + /* 150 */ 102, 102, 102, 102, 102, 102, 102, 472, 472, 472, + /* 160 */ 427, 427, 427, 427, 472, 472, 426, 439, 430, 444, + /* 170 */ 446, 455, 458, 462, 470, 495, 482, 472, 472, 472, + /* 180 */ 533, 533, 514, 20, 20, 472, 472, 532, 534, 579, + /* 190 */ 539, 538, 578, 541, 545, 514, 5, 472, 472, 555, + /* 200 */ 555, 472, 555, 472, 555, 472, 472, 790, 790, 29, + /* 210 */ 69, 69, 99, 69, 127, 170, 240, 253, 253, 253, + /* 220 */ 253, 253, 253, 272, 291, 40, 40, 40, 40, 250, + /* 230 */ 257, 130, 328, 238, 238, 254, 327, 322, 364, 61, + /* 240 */ 333, 337, 421, 355, 359, 363, 361, 362, 378, 379, + /* 250 */ 380, 381, 382, 352, 385, 386, 471, 150, 18, 388, + /* 260 */ 81, 194, 326, 481, 485, 341, 345, 391, 346, 402, + /* 270 */ 637, 487, 639, 640, 490, 642, 644, 548, 554, 513, + /* 280 */ 537, 543, 577, 540, 580, 600, 563, 565, 610, 598, + /* 290 */ 572, 573, 695, 696, 618, 619, 621, 622, 624, 625, + /* 300 */ 604, 627, 628, 630, 705, 631, 613, 574, 617, 582, + /* 310 */ 634, 543, 635, 603, 638, 605, 641, 636, 643, 652, + /* 320 */ 721, 645, 647, 718, 723, 724, 725, 727, 728, 731, + /* 330 */ 732, 651, 726, 659, 656, 657, 626, 660, 717, 685, + /* 340 */ 733, 597, 601, 649, 649, 649, 649, 734, 602, 606, + /* 350 */ 649, 649, 649, 748, 749, 670, 649, 752, 753, 754, /* 360 */ 755, 756, 757, 758, 759, 760, 761, 762, 763, 764, - /* 370 */ 765, 766, 767, 674, 687, 768, 769, 713, 776, + /* 370 */ 765, 766, 673, 686, 767, 768, 712, 775, }; -#define YY_REDUCE_COUNT (209) +#define YY_REDUCE_COUNT (208) #define YY_REDUCE_MIN (-264) -#define YY_REDUCE_MAX (491) +#define YY_REDUCE_MAX (484) static const short yy_reduce_ofst[] = { /* 0 */ -188, 10, 10, -165, -165, 53, -136, -14, 91, -170, /* 10 */ -114, -50, -117, -43, 123, 128, 157, 167, 174, 175, @@ -463,52 +462,52 @@ static const short yy_reduce_ofst[] = { /* 120 */ 375, 376, 377, 383, 384, 387, 389, 390, 392, 393, /* 130 */ 394, 395, 396, 397, 398, 399, 400, 401, 403, 404, /* 140 */ 405, 406, 407, 408, 409, 410, 411, 412, 413, 414, - /* 150 */ 415, 416, 417, 418, 419, 420, 422, 423, 343, 344, - /* 160 */ 424, 283, 289, 290, 306, 425, 426, 268, 296, 302, - /* 170 */ 324, 428, 431, 429, 432, 435, 438, 433, 434, 436, - /* 180 */ 437, 439, 442, 443, 445, 447, 448, 450, 449, 451, - /* 190 */ 453, 452, 456, 457, 459, 454, 460, 461, 463, 464, - /* 200 */ 466, 474, 473, 475, 477, 478, 483, 491, 486, 476, + /* 150 */ 415, 416, 417, 418, 419, 420, 422, 343, 344, 423, + /* 160 */ 283, 289, 290, 306, 424, 425, 268, 296, 318, 324, + /* 170 */ 428, 431, 429, 432, 435, 438, 433, 434, 436, 437, + /* 180 */ 440, 441, 442, 443, 447, 445, 448, 449, 451, 450, + /* 190 */ 452, 454, 457, 459, 460, 456, 461, 463, 464, 453, + /* 200 */ 465, 473, 474, 476, 477, 480, 483, 484, 475, }; static const YYACTIONTYPE yy_default[] = { - /* 0 */ 897, 1021, 960, 1031, 947, 957, 1181, 1181, 1181, 897, - /* 10 */ 897, 897, 897, 897, 897, 897, 897, 897, 897, 897, - /* 20 */ 897, 897, 897, 1079, 917, 1181, 897, 897, 897, 897, - /* 30 */ 897, 897, 897, 1103, 897, 957, 897, 897, 897, 897, - /* 40 */ 967, 957, 967, 967, 897, 1074, 1005, 1023, 897, 897, - /* 50 */ 897, 897, 897, 897, 897, 897, 897, 897, 897, 897, - /* 60 */ 897, 897, 897, 897, 897, 897, 897, 897, 897, 897, - /* 70 */ 897, 897, 897, 897, 897, 897, 897, 897, 897, 897, - /* 80 */ 897, 897, 897, 897, 897, 1081, 1087, 1084, 897, 897, - /* 90 */ 897, 1089, 897, 897, 897, 1122, 1122, 1072, 897, 897, - /* 100 */ 897, 897, 897, 897, 897, 897, 897, 897, 897, 897, - /* 110 */ 897, 897, 897, 897, 897, 897, 897, 897, 897, 897, - /* 120 */ 897, 897, 897, 897, 897, 897, 897, 897, 897, 897, - /* 130 */ 897, 945, 897, 943, 897, 897, 897, 897, 897, 897, - /* 140 */ 897, 897, 897, 897, 897, 897, 897, 897, 897, 897, - /* 150 */ 928, 897, 897, 897, 897, 897, 897, 915, 919, 919, - /* 160 */ 919, 897, 897, 897, 897, 919, 919, 1129, 1133, 1115, - /* 170 */ 1127, 1123, 1110, 1108, 1106, 1114, 1099, 1137, 919, 919, - /* 180 */ 919, 965, 965, 961, 957, 957, 919, 919, 983, 981, - /* 190 */ 979, 971, 977, 973, 975, 969, 948, 897, 919, 919, - /* 200 */ 955, 955, 919, 955, 919, 955, 919, 919, 1005, 1023, - /* 210 */ 897, 1138, 1128, 897, 1180, 1168, 1167, 897, 1176, 1175, - /* 220 */ 1174, 1166, 1165, 1164, 897, 897, 1160, 1163, 1162, 1161, - /* 230 */ 897, 897, 897, 897, 1170, 1169, 897, 897, 897, 897, - /* 240 */ 897, 897, 897, 1096, 897, 897, 897, 1134, 1130, 897, - /* 250 */ 897, 897, 897, 897, 897, 897, 897, 897, 1140, 897, - /* 260 */ 897, 897, 897, 897, 897, 897, 897, 897, 1033, 897, - /* 270 */ 897, 897, 897, 897, 897, 897, 897, 897, 897, 897, - /* 280 */ 897, 1071, 897, 897, 897, 897, 897, 1083, 1082, 897, - /* 290 */ 897, 897, 897, 897, 897, 897, 897, 897, 897, 897, - /* 300 */ 897, 897, 897, 897, 897, 897, 897, 1124, 897, 1116, - /* 310 */ 897, 897, 1045, 897, 897, 897, 897, 897, 897, 897, - /* 320 */ 897, 897, 897, 897, 897, 897, 897, 897, 897, 897, - /* 330 */ 897, 897, 897, 897, 897, 897, 897, 897, 897, 897, - /* 340 */ 897, 897, 897, 897, 1199, 1194, 1195, 1192, 897, 897, - /* 350 */ 897, 1191, 1186, 1187, 897, 897, 897, 1184, 897, 897, - /* 360 */ 897, 897, 897, 897, 897, 897, 897, 897, 897, 897, - /* 370 */ 897, 897, 897, 989, 897, 926, 924, 897, 897, + /* 0 */ 895, 1018, 957, 1028, 944, 954, 1178, 1178, 1178, 895, + /* 10 */ 895, 895, 895, 895, 895, 895, 895, 895, 895, 895, + /* 20 */ 895, 895, 895, 1076, 915, 1178, 895, 895, 895, 895, + /* 30 */ 895, 895, 895, 1100, 895, 954, 895, 895, 895, 895, + /* 40 */ 964, 954, 964, 964, 895, 1071, 1002, 1020, 895, 895, + /* 50 */ 895, 895, 895, 895, 895, 895, 895, 895, 895, 895, + /* 60 */ 895, 895, 895, 895, 895, 895, 895, 895, 895, 895, + /* 70 */ 895, 895, 895, 895, 895, 895, 895, 895, 895, 895, + /* 80 */ 895, 895, 895, 895, 895, 1078, 1084, 1081, 895, 895, + /* 90 */ 895, 1086, 895, 895, 895, 1119, 1119, 1069, 895, 895, + /* 100 */ 895, 895, 895, 895, 895, 895, 895, 895, 895, 895, + /* 110 */ 895, 895, 895, 895, 895, 895, 895, 895, 895, 895, + /* 120 */ 895, 895, 895, 895, 895, 895, 895, 895, 895, 895, + /* 130 */ 895, 942, 895, 940, 895, 895, 895, 895, 895, 895, + /* 140 */ 895, 895, 895, 895, 895, 895, 895, 895, 895, 895, + /* 150 */ 895, 895, 895, 895, 895, 895, 913, 917, 917, 917, + /* 160 */ 895, 895, 895, 895, 917, 917, 1126, 1130, 1112, 1124, + /* 170 */ 1120, 1107, 1105, 1103, 1111, 1096, 1134, 917, 917, 917, + /* 180 */ 962, 962, 958, 954, 954, 917, 917, 980, 978, 976, + /* 190 */ 968, 974, 970, 972, 966, 945, 895, 917, 917, 952, + /* 200 */ 952, 917, 952, 917, 952, 917, 917, 1002, 1020, 895, + /* 210 */ 1135, 1125, 895, 1177, 1165, 1164, 895, 1173, 1172, 1171, + /* 220 */ 1163, 1162, 1161, 895, 895, 1157, 1160, 1159, 1158, 895, + /* 230 */ 895, 895, 895, 1167, 1166, 895, 895, 895, 895, 895, + /* 240 */ 895, 895, 1093, 895, 895, 895, 1131, 1127, 895, 895, + /* 250 */ 895, 895, 895, 895, 895, 895, 895, 1137, 895, 895, + /* 260 */ 895, 895, 895, 895, 895, 895, 895, 1030, 895, 895, + /* 270 */ 895, 895, 895, 895, 895, 895, 895, 895, 895, 895, + /* 280 */ 1068, 895, 895, 895, 895, 895, 1080, 1079, 895, 895, + /* 290 */ 895, 895, 895, 895, 895, 895, 895, 895, 895, 895, + /* 300 */ 895, 895, 895, 895, 895, 895, 1121, 895, 1113, 895, + /* 310 */ 895, 1042, 895, 895, 895, 895, 895, 895, 895, 895, + /* 320 */ 895, 895, 895, 895, 895, 895, 895, 895, 895, 895, + /* 330 */ 895, 895, 895, 895, 895, 895, 895, 895, 895, 895, + /* 340 */ 895, 895, 895, 1196, 1191, 1192, 1189, 895, 895, 895, + /* 350 */ 1188, 1183, 1184, 895, 895, 895, 1181, 895, 895, 895, + /* 360 */ 895, 895, 895, 895, 895, 895, 895, 895, 895, 895, + /* 370 */ 895, 895, 986, 895, 924, 922, 895, 895, }; /********** End of lemon-generated parsing tables *****************************/ @@ -1132,280 +1131,279 @@ static const char *const yyRuleName[] = { /* 26 */ "cmd ::= SHOW dbPrefix STABLES", /* 27 */ "cmd ::= SHOW dbPrefix STABLES LIKE ids", /* 28 */ "cmd ::= SHOW dbPrefix VGROUPS", - /* 29 */ "cmd ::= SHOW dbPrefix VGROUPS ids", - /* 30 */ "cmd ::= DROP TABLE ifexists ids cpxName", - /* 31 */ "cmd ::= DROP STABLE ifexists ids cpxName", - /* 32 */ "cmd ::= DROP DATABASE ifexists ids", - /* 33 */ "cmd ::= DROP TOPIC ifexists ids", - /* 34 */ "cmd ::= DROP FUNCTION ids", - /* 35 */ "cmd ::= DROP DNODE ids", - /* 36 */ "cmd ::= DROP USER ids", - /* 37 */ "cmd ::= DROP ACCOUNT ids", - /* 38 */ "cmd ::= USE ids", - /* 39 */ "cmd ::= DESCRIBE ids cpxName", - /* 40 */ "cmd ::= DESC ids cpxName", - /* 41 */ "cmd ::= ALTER USER ids PASS ids", - /* 42 */ "cmd ::= ALTER USER ids PRIVILEGE ids", - /* 43 */ "cmd ::= ALTER DNODE ids ids", - /* 44 */ "cmd ::= ALTER DNODE ids ids ids", - /* 45 */ "cmd ::= ALTER LOCAL ids", - /* 46 */ "cmd ::= ALTER LOCAL ids ids", - /* 47 */ "cmd ::= ALTER DATABASE ids alter_db_optr", - /* 48 */ "cmd ::= ALTER TOPIC ids alter_topic_optr", - /* 49 */ "cmd ::= ALTER ACCOUNT ids acct_optr", - /* 50 */ "cmd ::= ALTER ACCOUNT ids PASS ids acct_optr", - /* 51 */ "cmd ::= COMPACT VNODES IN LP exprlist RP", - /* 52 */ "ids ::= ID", - /* 53 */ "ids ::= STRING", - /* 54 */ "ifexists ::= IF EXISTS", - /* 55 */ "ifexists ::=", - /* 56 */ "ifnotexists ::= IF NOT EXISTS", - /* 57 */ "ifnotexists ::=", - /* 58 */ "cmd ::= CREATE DNODE ids", - /* 59 */ "cmd ::= CREATE ACCOUNT ids PASS ids acct_optr", - /* 60 */ "cmd ::= CREATE DATABASE ifnotexists ids db_optr", - /* 61 */ "cmd ::= CREATE TOPIC ifnotexists ids topic_optr", - /* 62 */ "cmd ::= CREATE FUNCTION ids AS ids OUTPUTTYPE typename bufsize", - /* 63 */ "cmd ::= CREATE AGGREGATE FUNCTION ids AS ids OUTPUTTYPE typename bufsize", - /* 64 */ "cmd ::= CREATE USER ids PASS ids", - /* 65 */ "bufsize ::=", - /* 66 */ "bufsize ::= BUFSIZE INTEGER", - /* 67 */ "pps ::=", - /* 68 */ "pps ::= PPS INTEGER", - /* 69 */ "tseries ::=", - /* 70 */ "tseries ::= TSERIES INTEGER", - /* 71 */ "dbs ::=", - /* 72 */ "dbs ::= DBS INTEGER", - /* 73 */ "streams ::=", - /* 74 */ "streams ::= STREAMS INTEGER", - /* 75 */ "storage ::=", - /* 76 */ "storage ::= STORAGE INTEGER", - /* 77 */ "qtime ::=", - /* 78 */ "qtime ::= QTIME INTEGER", - /* 79 */ "users ::=", - /* 80 */ "users ::= USERS INTEGER", - /* 81 */ "conns ::=", - /* 82 */ "conns ::= CONNS INTEGER", - /* 83 */ "state ::=", - /* 84 */ "state ::= STATE ids", - /* 85 */ "acct_optr ::= pps tseries storage streams qtime dbs users conns state", - /* 86 */ "intitemlist ::= intitemlist COMMA intitem", - /* 87 */ "intitemlist ::= intitem", - /* 88 */ "intitem ::= INTEGER", - /* 89 */ "keep ::= KEEP intitemlist", - /* 90 */ "cache ::= CACHE INTEGER", - /* 91 */ "replica ::= REPLICA INTEGER", - /* 92 */ "quorum ::= QUORUM INTEGER", - /* 93 */ "days ::= DAYS INTEGER", - /* 94 */ "minrows ::= MINROWS INTEGER", - /* 95 */ "maxrows ::= MAXROWS INTEGER", - /* 96 */ "blocks ::= BLOCKS INTEGER", - /* 97 */ "ctime ::= CTIME INTEGER", - /* 98 */ "wal ::= WAL INTEGER", - /* 99 */ "fsync ::= FSYNC INTEGER", - /* 100 */ "comp ::= COMP INTEGER", - /* 101 */ "prec ::= PRECISION STRING", - /* 102 */ "update ::= UPDATE INTEGER", - /* 103 */ "cachelast ::= CACHELAST INTEGER", - /* 104 */ "partitions ::= PARTITIONS INTEGER", - /* 105 */ "db_optr ::=", - /* 106 */ "db_optr ::= db_optr cache", - /* 107 */ "db_optr ::= db_optr replica", - /* 108 */ "db_optr ::= db_optr quorum", - /* 109 */ "db_optr ::= db_optr days", - /* 110 */ "db_optr ::= db_optr minrows", - /* 111 */ "db_optr ::= db_optr maxrows", - /* 112 */ "db_optr ::= db_optr blocks", - /* 113 */ "db_optr ::= db_optr ctime", - /* 114 */ "db_optr ::= db_optr wal", - /* 115 */ "db_optr ::= db_optr fsync", - /* 116 */ "db_optr ::= db_optr comp", - /* 117 */ "db_optr ::= db_optr prec", - /* 118 */ "db_optr ::= db_optr keep", - /* 119 */ "db_optr ::= db_optr update", - /* 120 */ "db_optr ::= db_optr cachelast", - /* 121 */ "topic_optr ::= db_optr", - /* 122 */ "topic_optr ::= topic_optr partitions", - /* 123 */ "alter_db_optr ::=", - /* 124 */ "alter_db_optr ::= alter_db_optr replica", - /* 125 */ "alter_db_optr ::= alter_db_optr quorum", - /* 126 */ "alter_db_optr ::= alter_db_optr keep", - /* 127 */ "alter_db_optr ::= alter_db_optr blocks", - /* 128 */ "alter_db_optr ::= alter_db_optr comp", - /* 129 */ "alter_db_optr ::= alter_db_optr update", - /* 130 */ "alter_db_optr ::= alter_db_optr cachelast", - /* 131 */ "alter_topic_optr ::= alter_db_optr", - /* 132 */ "alter_topic_optr ::= alter_topic_optr partitions", - /* 133 */ "typename ::= ids", - /* 134 */ "typename ::= ids LP signed RP", - /* 135 */ "typename ::= ids UNSIGNED", - /* 136 */ "signed ::= INTEGER", - /* 137 */ "signed ::= PLUS INTEGER", - /* 138 */ "signed ::= MINUS INTEGER", - /* 139 */ "cmd ::= CREATE TABLE create_table_args", - /* 140 */ "cmd ::= CREATE TABLE create_stable_args", - /* 141 */ "cmd ::= CREATE STABLE create_stable_args", - /* 142 */ "cmd ::= CREATE TABLE create_table_list", - /* 143 */ "create_table_list ::= create_from_stable", - /* 144 */ "create_table_list ::= create_table_list create_from_stable", - /* 145 */ "create_table_args ::= ifnotexists ids cpxName LP columnlist RP", - /* 146 */ "create_stable_args ::= ifnotexists ids cpxName LP columnlist RP TAGS LP columnlist RP", - /* 147 */ "create_from_stable ::= ifnotexists ids cpxName USING ids cpxName TAGS LP tagitemlist RP", - /* 148 */ "create_from_stable ::= ifnotexists ids cpxName USING ids cpxName LP tagNamelist RP TAGS LP tagitemlist RP", - /* 149 */ "tagNamelist ::= tagNamelist COMMA ids", - /* 150 */ "tagNamelist ::= ids", - /* 151 */ "create_table_args ::= ifnotexists ids cpxName AS select", - /* 152 */ "columnlist ::= columnlist COMMA column", - /* 153 */ "columnlist ::= column", - /* 154 */ "column ::= ids typename", - /* 155 */ "tagitemlist ::= tagitemlist COMMA tagitem", - /* 156 */ "tagitemlist ::= tagitem", - /* 157 */ "tagitem ::= INTEGER", - /* 158 */ "tagitem ::= FLOAT", - /* 159 */ "tagitem ::= STRING", - /* 160 */ "tagitem ::= BOOL", - /* 161 */ "tagitem ::= NULL", - /* 162 */ "tagitem ::= NOW", - /* 163 */ "tagitem ::= MINUS INTEGER", - /* 164 */ "tagitem ::= MINUS FLOAT", - /* 165 */ "tagitem ::= PLUS INTEGER", - /* 166 */ "tagitem ::= PLUS FLOAT", - /* 167 */ "select ::= SELECT selcollist from where_opt range_option interval_option sliding_opt session_option windowstate_option fill_opt groupby_opt having_opt orderby_opt slimit_opt limit_opt", - /* 168 */ "select ::= LP select RP", - /* 169 */ "union ::= select", - /* 170 */ "union ::= union UNION ALL select", - /* 171 */ "cmd ::= union", - /* 172 */ "select ::= SELECT selcollist", - /* 173 */ "sclp ::= selcollist COMMA", - /* 174 */ "sclp ::=", - /* 175 */ "selcollist ::= sclp distinct expr as", - /* 176 */ "selcollist ::= sclp STAR", - /* 177 */ "as ::= AS ids", - /* 178 */ "as ::= ids", - /* 179 */ "as ::=", - /* 180 */ "distinct ::= DISTINCT", - /* 181 */ "distinct ::=", - /* 182 */ "from ::= FROM tablelist", - /* 183 */ "from ::= FROM sub", - /* 184 */ "sub ::= LP union RP", - /* 185 */ "sub ::= LP union RP ids", - /* 186 */ "sub ::= sub COMMA LP union RP ids", - /* 187 */ "tablelist ::= ids cpxName", - /* 188 */ "tablelist ::= ids cpxName ids", - /* 189 */ "tablelist ::= tablelist COMMA ids cpxName", - /* 190 */ "tablelist ::= tablelist COMMA ids cpxName ids", - /* 191 */ "tmvar ::= VARIABLE", - /* 192 */ "timestamp ::= INTEGER", - /* 193 */ "timestamp ::= MINUS INTEGER", - /* 194 */ "timestamp ::= PLUS INTEGER", - /* 195 */ "timestamp ::= STRING", - /* 196 */ "timestamp ::= NOW", - /* 197 */ "timestamp ::= NOW PLUS VARIABLE", - /* 198 */ "timestamp ::= NOW MINUS VARIABLE", - /* 199 */ "range_option ::=", - /* 200 */ "range_option ::= RANGE LP timestamp COMMA timestamp RP", - /* 201 */ "interval_option ::= intervalKey LP tmvar RP", - /* 202 */ "interval_option ::= intervalKey LP tmvar COMMA tmvar RP", - /* 203 */ "interval_option ::=", - /* 204 */ "intervalKey ::= INTERVAL", - /* 205 */ "intervalKey ::= EVERY", - /* 206 */ "session_option ::=", - /* 207 */ "session_option ::= SESSION LP ids cpxName COMMA tmvar RP", - /* 208 */ "windowstate_option ::=", - /* 209 */ "windowstate_option ::= STATE_WINDOW LP ids RP", - /* 210 */ "fill_opt ::=", - /* 211 */ "fill_opt ::= FILL LP ID COMMA tagitemlist RP", - /* 212 */ "fill_opt ::= FILL LP ID RP", - /* 213 */ "sliding_opt ::= SLIDING LP tmvar RP", - /* 214 */ "sliding_opt ::=", - /* 215 */ "orderby_opt ::=", - /* 216 */ "orderby_opt ::= ORDER BY sortlist", - /* 217 */ "sortlist ::= sortlist COMMA item sortorder", - /* 218 */ "sortlist ::= item sortorder", - /* 219 */ "item ::= ids cpxName", - /* 220 */ "sortorder ::= ASC", - /* 221 */ "sortorder ::= DESC", - /* 222 */ "sortorder ::=", - /* 223 */ "groupby_opt ::=", - /* 224 */ "groupby_opt ::= GROUP BY grouplist", - /* 225 */ "grouplist ::= grouplist COMMA item", - /* 226 */ "grouplist ::= item", - /* 227 */ "having_opt ::=", - /* 228 */ "having_opt ::= HAVING expr", - /* 229 */ "limit_opt ::=", - /* 230 */ "limit_opt ::= LIMIT signed", - /* 231 */ "limit_opt ::= LIMIT signed OFFSET signed", - /* 232 */ "limit_opt ::= LIMIT signed COMMA signed", - /* 233 */ "slimit_opt ::=", - /* 234 */ "slimit_opt ::= SLIMIT signed", - /* 235 */ "slimit_opt ::= SLIMIT signed SOFFSET signed", - /* 236 */ "slimit_opt ::= SLIMIT signed COMMA signed", - /* 237 */ "where_opt ::=", - /* 238 */ "where_opt ::= WHERE expr", - /* 239 */ "expr ::= LP expr RP", - /* 240 */ "expr ::= ID", - /* 241 */ "expr ::= ID DOT ID", - /* 242 */ "expr ::= ID DOT STAR", - /* 243 */ "expr ::= INTEGER", - /* 244 */ "expr ::= MINUS INTEGER", - /* 245 */ "expr ::= PLUS INTEGER", - /* 246 */ "expr ::= FLOAT", - /* 247 */ "expr ::= MINUS FLOAT", - /* 248 */ "expr ::= PLUS FLOAT", - /* 249 */ "expr ::= STRING", - /* 250 */ "expr ::= NOW", - /* 251 */ "expr ::= VARIABLE", - /* 252 */ "expr ::= PLUS VARIABLE", - /* 253 */ "expr ::= MINUS VARIABLE", - /* 254 */ "expr ::= BOOL", - /* 255 */ "expr ::= NULL", - /* 256 */ "expr ::= ID LP exprlist RP", - /* 257 */ "expr ::= ID LP STAR RP", - /* 258 */ "expr ::= expr IS NULL", - /* 259 */ "expr ::= expr IS NOT NULL", - /* 260 */ "expr ::= expr LT expr", - /* 261 */ "expr ::= expr GT expr", - /* 262 */ "expr ::= expr LE expr", - /* 263 */ "expr ::= expr GE expr", - /* 264 */ "expr ::= expr NE expr", - /* 265 */ "expr ::= expr EQ expr", - /* 266 */ "expr ::= expr BETWEEN expr AND expr", - /* 267 */ "expr ::= expr AND expr", - /* 268 */ "expr ::= expr OR expr", - /* 269 */ "expr ::= expr PLUS expr", - /* 270 */ "expr ::= expr MINUS expr", - /* 271 */ "expr ::= expr STAR expr", - /* 272 */ "expr ::= expr SLASH expr", - /* 273 */ "expr ::= expr REM expr", - /* 274 */ "expr ::= expr LIKE expr", - /* 275 */ "expr ::= expr MATCH expr", - /* 276 */ "expr ::= expr NMATCH expr", - /* 277 */ "expr ::= expr IN LP exprlist RP", - /* 278 */ "exprlist ::= exprlist COMMA expritem", - /* 279 */ "exprlist ::= expritem", - /* 280 */ "expritem ::= expr", - /* 281 */ "expritem ::=", - /* 282 */ "cmd ::= RESET QUERY CACHE", - /* 283 */ "cmd ::= SYNCDB ids REPLICA", - /* 284 */ "cmd ::= ALTER TABLE ids cpxName ADD COLUMN columnlist", - /* 285 */ "cmd ::= ALTER TABLE ids cpxName DROP COLUMN ids", - /* 286 */ "cmd ::= ALTER TABLE ids cpxName MODIFY COLUMN columnlist", - /* 287 */ "cmd ::= ALTER TABLE ids cpxName ADD TAG columnlist", - /* 288 */ "cmd ::= ALTER TABLE ids cpxName DROP TAG ids", - /* 289 */ "cmd ::= ALTER TABLE ids cpxName CHANGE TAG ids ids", - /* 290 */ "cmd ::= ALTER TABLE ids cpxName SET TAG ids EQ tagitem", - /* 291 */ "cmd ::= ALTER TABLE ids cpxName MODIFY TAG columnlist", - /* 292 */ "cmd ::= ALTER STABLE ids cpxName ADD COLUMN columnlist", - /* 293 */ "cmd ::= ALTER STABLE ids cpxName DROP COLUMN ids", - /* 294 */ "cmd ::= ALTER STABLE ids cpxName MODIFY COLUMN columnlist", - /* 295 */ "cmd ::= ALTER STABLE ids cpxName ADD TAG columnlist", - /* 296 */ "cmd ::= ALTER STABLE ids cpxName DROP TAG ids", - /* 297 */ "cmd ::= ALTER STABLE ids cpxName CHANGE TAG ids ids", - /* 298 */ "cmd ::= ALTER STABLE ids cpxName SET TAG ids EQ tagitem", - /* 299 */ "cmd ::= ALTER STABLE ids cpxName MODIFY TAG columnlist", - /* 300 */ "cmd ::= KILL CONNECTION INTEGER", - /* 301 */ "cmd ::= KILL STREAM INTEGER COLON INTEGER", - /* 302 */ "cmd ::= KILL QUERY INTEGER COLON INTEGER", + /* 29 */ "cmd ::= DROP TABLE ifexists ids cpxName", + /* 30 */ "cmd ::= DROP STABLE ifexists ids cpxName", + /* 31 */ "cmd ::= DROP DATABASE ifexists ids", + /* 32 */ "cmd ::= DROP TOPIC ifexists ids", + /* 33 */ "cmd ::= DROP FUNCTION ids", + /* 34 */ "cmd ::= DROP DNODE ids", + /* 35 */ "cmd ::= DROP USER ids", + /* 36 */ "cmd ::= DROP ACCOUNT ids", + /* 37 */ "cmd ::= USE ids", + /* 38 */ "cmd ::= DESCRIBE ids cpxName", + /* 39 */ "cmd ::= DESC ids cpxName", + /* 40 */ "cmd ::= ALTER USER ids PASS ids", + /* 41 */ "cmd ::= ALTER USER ids PRIVILEGE ids", + /* 42 */ "cmd ::= ALTER DNODE ids ids", + /* 43 */ "cmd ::= ALTER DNODE ids ids ids", + /* 44 */ "cmd ::= ALTER LOCAL ids", + /* 45 */ "cmd ::= ALTER LOCAL ids ids", + /* 46 */ "cmd ::= ALTER DATABASE ids alter_db_optr", + /* 47 */ "cmd ::= ALTER TOPIC ids alter_topic_optr", + /* 48 */ "cmd ::= ALTER ACCOUNT ids acct_optr", + /* 49 */ "cmd ::= ALTER ACCOUNT ids PASS ids acct_optr", + /* 50 */ "cmd ::= COMPACT VNODES IN LP exprlist RP", + /* 51 */ "ids ::= ID", + /* 52 */ "ids ::= STRING", + /* 53 */ "ifexists ::= IF EXISTS", + /* 54 */ "ifexists ::=", + /* 55 */ "ifnotexists ::= IF NOT EXISTS", + /* 56 */ "ifnotexists ::=", + /* 57 */ "cmd ::= CREATE DNODE ids", + /* 58 */ "cmd ::= CREATE ACCOUNT ids PASS ids acct_optr", + /* 59 */ "cmd ::= CREATE DATABASE ifnotexists ids db_optr", + /* 60 */ "cmd ::= CREATE TOPIC ifnotexists ids topic_optr", + /* 61 */ "cmd ::= CREATE FUNCTION ids AS ids OUTPUTTYPE typename bufsize", + /* 62 */ "cmd ::= CREATE AGGREGATE FUNCTION ids AS ids OUTPUTTYPE typename bufsize", + /* 63 */ "cmd ::= CREATE USER ids PASS ids", + /* 64 */ "bufsize ::=", + /* 65 */ "bufsize ::= BUFSIZE INTEGER", + /* 66 */ "pps ::=", + /* 67 */ "pps ::= PPS INTEGER", + /* 68 */ "tseries ::=", + /* 69 */ "tseries ::= TSERIES INTEGER", + /* 70 */ "dbs ::=", + /* 71 */ "dbs ::= DBS INTEGER", + /* 72 */ "streams ::=", + /* 73 */ "streams ::= STREAMS INTEGER", + /* 74 */ "storage ::=", + /* 75 */ "storage ::= STORAGE INTEGER", + /* 76 */ "qtime ::=", + /* 77 */ "qtime ::= QTIME INTEGER", + /* 78 */ "users ::=", + /* 79 */ "users ::= USERS INTEGER", + /* 80 */ "conns ::=", + /* 81 */ "conns ::= CONNS INTEGER", + /* 82 */ "state ::=", + /* 83 */ "state ::= STATE ids", + /* 84 */ "acct_optr ::= pps tseries storage streams qtime dbs users conns state", + /* 85 */ "intitemlist ::= intitemlist COMMA intitem", + /* 86 */ "intitemlist ::= intitem", + /* 87 */ "intitem ::= INTEGER", + /* 88 */ "keep ::= KEEP intitemlist", + /* 89 */ "cache ::= CACHE INTEGER", + /* 90 */ "replica ::= REPLICA INTEGER", + /* 91 */ "quorum ::= QUORUM INTEGER", + /* 92 */ "days ::= DAYS INTEGER", + /* 93 */ "minrows ::= MINROWS INTEGER", + /* 94 */ "maxrows ::= MAXROWS INTEGER", + /* 95 */ "blocks ::= BLOCKS INTEGER", + /* 96 */ "ctime ::= CTIME INTEGER", + /* 97 */ "wal ::= WAL INTEGER", + /* 98 */ "fsync ::= FSYNC INTEGER", + /* 99 */ "comp ::= COMP INTEGER", + /* 100 */ "prec ::= PRECISION STRING", + /* 101 */ "update ::= UPDATE INTEGER", + /* 102 */ "cachelast ::= CACHELAST INTEGER", + /* 103 */ "partitions ::= PARTITIONS INTEGER", + /* 104 */ "db_optr ::=", + /* 105 */ "db_optr ::= db_optr cache", + /* 106 */ "db_optr ::= db_optr replica", + /* 107 */ "db_optr ::= db_optr quorum", + /* 108 */ "db_optr ::= db_optr days", + /* 109 */ "db_optr ::= db_optr minrows", + /* 110 */ "db_optr ::= db_optr maxrows", + /* 111 */ "db_optr ::= db_optr blocks", + /* 112 */ "db_optr ::= db_optr ctime", + /* 113 */ "db_optr ::= db_optr wal", + /* 114 */ "db_optr ::= db_optr fsync", + /* 115 */ "db_optr ::= db_optr comp", + /* 116 */ "db_optr ::= db_optr prec", + /* 117 */ "db_optr ::= db_optr keep", + /* 118 */ "db_optr ::= db_optr update", + /* 119 */ "db_optr ::= db_optr cachelast", + /* 120 */ "topic_optr ::= db_optr", + /* 121 */ "topic_optr ::= topic_optr partitions", + /* 122 */ "alter_db_optr ::=", + /* 123 */ "alter_db_optr ::= alter_db_optr replica", + /* 124 */ "alter_db_optr ::= alter_db_optr quorum", + /* 125 */ "alter_db_optr ::= alter_db_optr keep", + /* 126 */ "alter_db_optr ::= alter_db_optr blocks", + /* 127 */ "alter_db_optr ::= alter_db_optr comp", + /* 128 */ "alter_db_optr ::= alter_db_optr update", + /* 129 */ "alter_db_optr ::= alter_db_optr cachelast", + /* 130 */ "alter_topic_optr ::= alter_db_optr", + /* 131 */ "alter_topic_optr ::= alter_topic_optr partitions", + /* 132 */ "typename ::= ids", + /* 133 */ "typename ::= ids LP signed RP", + /* 134 */ "typename ::= ids UNSIGNED", + /* 135 */ "signed ::= INTEGER", + /* 136 */ "signed ::= PLUS INTEGER", + /* 137 */ "signed ::= MINUS INTEGER", + /* 138 */ "cmd ::= CREATE TABLE create_table_args", + /* 139 */ "cmd ::= CREATE TABLE create_stable_args", + /* 140 */ "cmd ::= CREATE STABLE create_stable_args", + /* 141 */ "cmd ::= CREATE TABLE create_table_list", + /* 142 */ "create_table_list ::= create_from_stable", + /* 143 */ "create_table_list ::= create_table_list create_from_stable", + /* 144 */ "create_table_args ::= ifnotexists ids cpxName LP columnlist RP", + /* 145 */ "create_stable_args ::= ifnotexists ids cpxName LP columnlist RP TAGS LP columnlist RP", + /* 146 */ "create_from_stable ::= ifnotexists ids cpxName USING ids cpxName TAGS LP tagitemlist RP", + /* 147 */ "create_from_stable ::= ifnotexists ids cpxName USING ids cpxName LP tagNamelist RP TAGS LP tagitemlist RP", + /* 148 */ "tagNamelist ::= tagNamelist COMMA ids", + /* 149 */ "tagNamelist ::= ids", + /* 150 */ "create_table_args ::= ifnotexists ids cpxName AS select", + /* 151 */ "columnlist ::= columnlist COMMA column", + /* 152 */ "columnlist ::= column", + /* 153 */ "column ::= ids typename", + /* 154 */ "tagitemlist ::= tagitemlist COMMA tagitem", + /* 155 */ "tagitemlist ::= tagitem", + /* 156 */ "tagitem ::= INTEGER", + /* 157 */ "tagitem ::= FLOAT", + /* 158 */ "tagitem ::= STRING", + /* 159 */ "tagitem ::= BOOL", + /* 160 */ "tagitem ::= NULL", + /* 161 */ "tagitem ::= NOW", + /* 162 */ "tagitem ::= MINUS INTEGER", + /* 163 */ "tagitem ::= MINUS FLOAT", + /* 164 */ "tagitem ::= PLUS INTEGER", + /* 165 */ "tagitem ::= PLUS FLOAT", + /* 166 */ "select ::= SELECT selcollist from where_opt range_option interval_option sliding_opt session_option windowstate_option fill_opt groupby_opt having_opt orderby_opt slimit_opt limit_opt", + /* 167 */ "select ::= LP select RP", + /* 168 */ "union ::= select", + /* 169 */ "union ::= union UNION ALL select", + /* 170 */ "cmd ::= union", + /* 171 */ "select ::= SELECT selcollist", + /* 172 */ "sclp ::= selcollist COMMA", + /* 173 */ "sclp ::=", + /* 174 */ "selcollist ::= sclp distinct expr as", + /* 175 */ "selcollist ::= sclp STAR", + /* 176 */ "as ::= AS ids", + /* 177 */ "as ::= ids", + /* 178 */ "as ::=", + /* 179 */ "distinct ::= DISTINCT", + /* 180 */ "distinct ::=", + /* 181 */ "from ::= FROM tablelist", + /* 182 */ "from ::= FROM sub", + /* 183 */ "sub ::= LP union RP", + /* 184 */ "sub ::= LP union RP ids", + /* 185 */ "sub ::= sub COMMA LP union RP ids", + /* 186 */ "tablelist ::= ids cpxName", + /* 187 */ "tablelist ::= ids cpxName ids", + /* 188 */ "tablelist ::= tablelist COMMA ids cpxName", + /* 189 */ "tablelist ::= tablelist COMMA ids cpxName ids", + /* 190 */ "tmvar ::= VARIABLE", + /* 191 */ "timestamp ::= INTEGER", + /* 192 */ "timestamp ::= MINUS INTEGER", + /* 193 */ "timestamp ::= PLUS INTEGER", + /* 194 */ "timestamp ::= STRING", + /* 195 */ "timestamp ::= NOW", + /* 196 */ "timestamp ::= NOW PLUS VARIABLE", + /* 197 */ "timestamp ::= NOW MINUS VARIABLE", + /* 198 */ "range_option ::=", + /* 199 */ "range_option ::= RANGE LP timestamp COMMA timestamp RP", + /* 200 */ "interval_option ::= intervalKey LP tmvar RP", + /* 201 */ "interval_option ::= intervalKey LP tmvar COMMA tmvar RP", + /* 202 */ "interval_option ::=", + /* 203 */ "intervalKey ::= INTERVAL", + /* 204 */ "intervalKey ::= EVERY", + /* 205 */ "session_option ::=", + /* 206 */ "session_option ::= SESSION LP ids cpxName COMMA tmvar RP", + /* 207 */ "windowstate_option ::=", + /* 208 */ "windowstate_option ::= STATE_WINDOW LP ids RP", + /* 209 */ "fill_opt ::=", + /* 210 */ "fill_opt ::= FILL LP ID COMMA tagitemlist RP", + /* 211 */ "fill_opt ::= FILL LP ID RP", + /* 212 */ "sliding_opt ::= SLIDING LP tmvar RP", + /* 213 */ "sliding_opt ::=", + /* 214 */ "orderby_opt ::=", + /* 215 */ "orderby_opt ::= ORDER BY sortlist", + /* 216 */ "sortlist ::= sortlist COMMA item sortorder", + /* 217 */ "sortlist ::= item sortorder", + /* 218 */ "item ::= ids cpxName", + /* 219 */ "sortorder ::= ASC", + /* 220 */ "sortorder ::= DESC", + /* 221 */ "sortorder ::=", + /* 222 */ "groupby_opt ::=", + /* 223 */ "groupby_opt ::= GROUP BY grouplist", + /* 224 */ "grouplist ::= grouplist COMMA item", + /* 225 */ "grouplist ::= item", + /* 226 */ "having_opt ::=", + /* 227 */ "having_opt ::= HAVING expr", + /* 228 */ "limit_opt ::=", + /* 229 */ "limit_opt ::= LIMIT signed", + /* 230 */ "limit_opt ::= LIMIT signed OFFSET signed", + /* 231 */ "limit_opt ::= LIMIT signed COMMA signed", + /* 232 */ "slimit_opt ::=", + /* 233 */ "slimit_opt ::= SLIMIT signed", + /* 234 */ "slimit_opt ::= SLIMIT signed SOFFSET signed", + /* 235 */ "slimit_opt ::= SLIMIT signed COMMA signed", + /* 236 */ "where_opt ::=", + /* 237 */ "where_opt ::= WHERE expr", + /* 238 */ "expr ::= LP expr RP", + /* 239 */ "expr ::= ID", + /* 240 */ "expr ::= ID DOT ID", + /* 241 */ "expr ::= ID DOT STAR", + /* 242 */ "expr ::= INTEGER", + /* 243 */ "expr ::= MINUS INTEGER", + /* 244 */ "expr ::= PLUS INTEGER", + /* 245 */ "expr ::= FLOAT", + /* 246 */ "expr ::= MINUS FLOAT", + /* 247 */ "expr ::= PLUS FLOAT", + /* 248 */ "expr ::= STRING", + /* 249 */ "expr ::= NOW", + /* 250 */ "expr ::= VARIABLE", + /* 251 */ "expr ::= PLUS VARIABLE", + /* 252 */ "expr ::= MINUS VARIABLE", + /* 253 */ "expr ::= BOOL", + /* 254 */ "expr ::= NULL", + /* 255 */ "expr ::= ID LP exprlist RP", + /* 256 */ "expr ::= ID LP STAR RP", + /* 257 */ "expr ::= expr IS NULL", + /* 258 */ "expr ::= expr IS NOT NULL", + /* 259 */ "expr ::= expr LT expr", + /* 260 */ "expr ::= expr GT expr", + /* 261 */ "expr ::= expr LE expr", + /* 262 */ "expr ::= expr GE expr", + /* 263 */ "expr ::= expr NE expr", + /* 264 */ "expr ::= expr EQ expr", + /* 265 */ "expr ::= expr BETWEEN expr AND expr", + /* 266 */ "expr ::= expr AND expr", + /* 267 */ "expr ::= expr OR expr", + /* 268 */ "expr ::= expr PLUS expr", + /* 269 */ "expr ::= expr MINUS expr", + /* 270 */ "expr ::= expr STAR expr", + /* 271 */ "expr ::= expr SLASH expr", + /* 272 */ "expr ::= expr REM expr", + /* 273 */ "expr ::= expr LIKE expr", + /* 274 */ "expr ::= expr MATCH expr", + /* 275 */ "expr ::= expr NMATCH expr", + /* 276 */ "expr ::= expr IN LP exprlist RP", + /* 277 */ "exprlist ::= exprlist COMMA expritem", + /* 278 */ "exprlist ::= expritem", + /* 279 */ "expritem ::= expr", + /* 280 */ "expritem ::=", + /* 281 */ "cmd ::= RESET QUERY CACHE", + /* 282 */ "cmd ::= SYNCDB ids REPLICA", + /* 283 */ "cmd ::= ALTER TABLE ids cpxName ADD COLUMN columnlist", + /* 284 */ "cmd ::= ALTER TABLE ids cpxName DROP COLUMN ids", + /* 285 */ "cmd ::= ALTER TABLE ids cpxName MODIFY COLUMN columnlist", + /* 286 */ "cmd ::= ALTER TABLE ids cpxName ADD TAG columnlist", + /* 287 */ "cmd ::= ALTER TABLE ids cpxName DROP TAG ids", + /* 288 */ "cmd ::= ALTER TABLE ids cpxName CHANGE TAG ids ids", + /* 289 */ "cmd ::= ALTER TABLE ids cpxName SET TAG ids EQ tagitem", + /* 290 */ "cmd ::= ALTER TABLE ids cpxName MODIFY TAG columnlist", + /* 291 */ "cmd ::= ALTER STABLE ids cpxName ADD COLUMN columnlist", + /* 292 */ "cmd ::= ALTER STABLE ids cpxName DROP COLUMN ids", + /* 293 */ "cmd ::= ALTER STABLE ids cpxName MODIFY COLUMN columnlist", + /* 294 */ "cmd ::= ALTER STABLE ids cpxName ADD TAG columnlist", + /* 295 */ "cmd ::= ALTER STABLE ids cpxName DROP TAG ids", + /* 296 */ "cmd ::= ALTER STABLE ids cpxName CHANGE TAG ids ids", + /* 297 */ "cmd ::= ALTER STABLE ids cpxName SET TAG ids EQ tagitem", + /* 298 */ "cmd ::= ALTER STABLE ids cpxName MODIFY TAG columnlist", + /* 299 */ "cmd ::= KILL CONNECTION INTEGER", + /* 300 */ "cmd ::= KILL STREAM INTEGER COLON INTEGER", + /* 301 */ "cmd ::= KILL QUERY INTEGER COLON INTEGER", }; #endif /* NDEBUG */ @@ -1903,280 +1901,279 @@ static const YYCODETYPE yyRuleInfoLhs[] = { 200, /* (26) cmd ::= SHOW dbPrefix STABLES */ 200, /* (27) cmd ::= SHOW dbPrefix STABLES LIKE ids */ 200, /* (28) cmd ::= SHOW dbPrefix VGROUPS */ - 200, /* (29) cmd ::= SHOW dbPrefix VGROUPS ids */ - 200, /* (30) cmd ::= DROP TABLE ifexists ids cpxName */ - 200, /* (31) cmd ::= DROP STABLE ifexists ids cpxName */ - 200, /* (32) cmd ::= DROP DATABASE ifexists ids */ - 200, /* (33) cmd ::= DROP TOPIC ifexists ids */ - 200, /* (34) cmd ::= DROP FUNCTION ids */ - 200, /* (35) cmd ::= DROP DNODE ids */ - 200, /* (36) cmd ::= DROP USER ids */ - 200, /* (37) cmd ::= DROP ACCOUNT ids */ - 200, /* (38) cmd ::= USE ids */ - 200, /* (39) cmd ::= DESCRIBE ids cpxName */ - 200, /* (40) cmd ::= DESC ids cpxName */ - 200, /* (41) cmd ::= ALTER USER ids PASS ids */ - 200, /* (42) cmd ::= ALTER USER ids PRIVILEGE ids */ - 200, /* (43) cmd ::= ALTER DNODE ids ids */ - 200, /* (44) cmd ::= ALTER DNODE ids ids ids */ - 200, /* (45) cmd ::= ALTER LOCAL ids */ - 200, /* (46) cmd ::= ALTER LOCAL ids ids */ - 200, /* (47) cmd ::= ALTER DATABASE ids alter_db_optr */ - 200, /* (48) cmd ::= ALTER TOPIC ids alter_topic_optr */ - 200, /* (49) cmd ::= ALTER ACCOUNT ids acct_optr */ - 200, /* (50) cmd ::= ALTER ACCOUNT ids PASS ids acct_optr */ - 200, /* (51) cmd ::= COMPACT VNODES IN LP exprlist RP */ - 201, /* (52) ids ::= ID */ - 201, /* (53) ids ::= STRING */ - 204, /* (54) ifexists ::= IF EXISTS */ - 204, /* (55) ifexists ::= */ - 209, /* (56) ifnotexists ::= IF NOT EXISTS */ - 209, /* (57) ifnotexists ::= */ - 200, /* (58) cmd ::= CREATE DNODE ids */ - 200, /* (59) cmd ::= CREATE ACCOUNT ids PASS ids acct_optr */ - 200, /* (60) cmd ::= CREATE DATABASE ifnotexists ids db_optr */ - 200, /* (61) cmd ::= CREATE TOPIC ifnotexists ids topic_optr */ - 200, /* (62) cmd ::= CREATE FUNCTION ids AS ids OUTPUTTYPE typename bufsize */ - 200, /* (63) cmd ::= CREATE AGGREGATE FUNCTION ids AS ids OUTPUTTYPE typename bufsize */ - 200, /* (64) cmd ::= CREATE USER ids PASS ids */ - 213, /* (65) bufsize ::= */ - 213, /* (66) bufsize ::= BUFSIZE INTEGER */ - 214, /* (67) pps ::= */ - 214, /* (68) pps ::= PPS INTEGER */ - 215, /* (69) tseries ::= */ - 215, /* (70) tseries ::= TSERIES INTEGER */ - 216, /* (71) dbs ::= */ - 216, /* (72) dbs ::= DBS INTEGER */ - 217, /* (73) streams ::= */ - 217, /* (74) streams ::= STREAMS INTEGER */ - 218, /* (75) storage ::= */ - 218, /* (76) storage ::= STORAGE INTEGER */ - 219, /* (77) qtime ::= */ - 219, /* (78) qtime ::= QTIME INTEGER */ - 220, /* (79) users ::= */ - 220, /* (80) users ::= USERS INTEGER */ - 221, /* (81) conns ::= */ - 221, /* (82) conns ::= CONNS INTEGER */ - 222, /* (83) state ::= */ - 222, /* (84) state ::= STATE ids */ - 207, /* (85) acct_optr ::= pps tseries storage streams qtime dbs users conns state */ - 223, /* (86) intitemlist ::= intitemlist COMMA intitem */ - 223, /* (87) intitemlist ::= intitem */ - 224, /* (88) intitem ::= INTEGER */ - 225, /* (89) keep ::= KEEP intitemlist */ - 226, /* (90) cache ::= CACHE INTEGER */ - 227, /* (91) replica ::= REPLICA INTEGER */ - 228, /* (92) quorum ::= QUORUM INTEGER */ - 229, /* (93) days ::= DAYS INTEGER */ - 230, /* (94) minrows ::= MINROWS INTEGER */ - 231, /* (95) maxrows ::= MAXROWS INTEGER */ - 232, /* (96) blocks ::= BLOCKS INTEGER */ - 233, /* (97) ctime ::= CTIME INTEGER */ - 234, /* (98) wal ::= WAL INTEGER */ - 235, /* (99) fsync ::= FSYNC INTEGER */ - 236, /* (100) comp ::= COMP INTEGER */ - 237, /* (101) prec ::= PRECISION STRING */ - 238, /* (102) update ::= UPDATE INTEGER */ - 239, /* (103) cachelast ::= CACHELAST INTEGER */ - 240, /* (104) partitions ::= PARTITIONS INTEGER */ - 210, /* (105) db_optr ::= */ - 210, /* (106) db_optr ::= db_optr cache */ - 210, /* (107) db_optr ::= db_optr replica */ - 210, /* (108) db_optr ::= db_optr quorum */ - 210, /* (109) db_optr ::= db_optr days */ - 210, /* (110) db_optr ::= db_optr minrows */ - 210, /* (111) db_optr ::= db_optr maxrows */ - 210, /* (112) db_optr ::= db_optr blocks */ - 210, /* (113) db_optr ::= db_optr ctime */ - 210, /* (114) db_optr ::= db_optr wal */ - 210, /* (115) db_optr ::= db_optr fsync */ - 210, /* (116) db_optr ::= db_optr comp */ - 210, /* (117) db_optr ::= db_optr prec */ - 210, /* (118) db_optr ::= db_optr keep */ - 210, /* (119) db_optr ::= db_optr update */ - 210, /* (120) db_optr ::= db_optr cachelast */ - 211, /* (121) topic_optr ::= db_optr */ - 211, /* (122) topic_optr ::= topic_optr partitions */ - 205, /* (123) alter_db_optr ::= */ - 205, /* (124) alter_db_optr ::= alter_db_optr replica */ - 205, /* (125) alter_db_optr ::= alter_db_optr quorum */ - 205, /* (126) alter_db_optr ::= alter_db_optr keep */ - 205, /* (127) alter_db_optr ::= alter_db_optr blocks */ - 205, /* (128) alter_db_optr ::= alter_db_optr comp */ - 205, /* (129) alter_db_optr ::= alter_db_optr update */ - 205, /* (130) alter_db_optr ::= alter_db_optr cachelast */ - 206, /* (131) alter_topic_optr ::= alter_db_optr */ - 206, /* (132) alter_topic_optr ::= alter_topic_optr partitions */ - 212, /* (133) typename ::= ids */ - 212, /* (134) typename ::= ids LP signed RP */ - 212, /* (135) typename ::= ids UNSIGNED */ - 241, /* (136) signed ::= INTEGER */ - 241, /* (137) signed ::= PLUS INTEGER */ - 241, /* (138) signed ::= MINUS INTEGER */ - 200, /* (139) cmd ::= CREATE TABLE create_table_args */ - 200, /* (140) cmd ::= CREATE TABLE create_stable_args */ - 200, /* (141) cmd ::= CREATE STABLE create_stable_args */ - 200, /* (142) cmd ::= CREATE TABLE create_table_list */ - 244, /* (143) create_table_list ::= create_from_stable */ - 244, /* (144) create_table_list ::= create_table_list create_from_stable */ - 242, /* (145) create_table_args ::= ifnotexists ids cpxName LP columnlist RP */ - 243, /* (146) create_stable_args ::= ifnotexists ids cpxName LP columnlist RP TAGS LP columnlist RP */ - 245, /* (147) create_from_stable ::= ifnotexists ids cpxName USING ids cpxName TAGS LP tagitemlist RP */ - 245, /* (148) create_from_stable ::= ifnotexists ids cpxName USING ids cpxName LP tagNamelist RP TAGS LP tagitemlist RP */ - 248, /* (149) tagNamelist ::= tagNamelist COMMA ids */ - 248, /* (150) tagNamelist ::= ids */ - 242, /* (151) create_table_args ::= ifnotexists ids cpxName AS select */ - 246, /* (152) columnlist ::= columnlist COMMA column */ - 246, /* (153) columnlist ::= column */ - 250, /* (154) column ::= ids typename */ - 247, /* (155) tagitemlist ::= tagitemlist COMMA tagitem */ - 247, /* (156) tagitemlist ::= tagitem */ - 251, /* (157) tagitem ::= INTEGER */ - 251, /* (158) tagitem ::= FLOAT */ - 251, /* (159) tagitem ::= STRING */ - 251, /* (160) tagitem ::= BOOL */ - 251, /* (161) tagitem ::= NULL */ - 251, /* (162) tagitem ::= NOW */ - 251, /* (163) tagitem ::= MINUS INTEGER */ - 251, /* (164) tagitem ::= MINUS FLOAT */ - 251, /* (165) tagitem ::= PLUS INTEGER */ - 251, /* (166) tagitem ::= PLUS FLOAT */ - 249, /* (167) select ::= SELECT selcollist from where_opt range_option interval_option sliding_opt session_option windowstate_option fill_opt groupby_opt having_opt orderby_opt slimit_opt limit_opt */ - 249, /* (168) select ::= LP select RP */ - 266, /* (169) union ::= select */ - 266, /* (170) union ::= union UNION ALL select */ - 200, /* (171) cmd ::= union */ - 249, /* (172) select ::= SELECT selcollist */ - 267, /* (173) sclp ::= selcollist COMMA */ - 267, /* (174) sclp ::= */ - 252, /* (175) selcollist ::= sclp distinct expr as */ - 252, /* (176) selcollist ::= sclp STAR */ - 270, /* (177) as ::= AS ids */ - 270, /* (178) as ::= ids */ - 270, /* (179) as ::= */ - 268, /* (180) distinct ::= DISTINCT */ - 268, /* (181) distinct ::= */ - 253, /* (182) from ::= FROM tablelist */ - 253, /* (183) from ::= FROM sub */ - 272, /* (184) sub ::= LP union RP */ - 272, /* (185) sub ::= LP union RP ids */ - 272, /* (186) sub ::= sub COMMA LP union RP ids */ - 271, /* (187) tablelist ::= ids cpxName */ - 271, /* (188) tablelist ::= ids cpxName ids */ - 271, /* (189) tablelist ::= tablelist COMMA ids cpxName */ - 271, /* (190) tablelist ::= tablelist COMMA ids cpxName ids */ - 273, /* (191) tmvar ::= VARIABLE */ - 274, /* (192) timestamp ::= INTEGER */ - 274, /* (193) timestamp ::= MINUS INTEGER */ - 274, /* (194) timestamp ::= PLUS INTEGER */ - 274, /* (195) timestamp ::= STRING */ - 274, /* (196) timestamp ::= NOW */ - 274, /* (197) timestamp ::= NOW PLUS VARIABLE */ - 274, /* (198) timestamp ::= NOW MINUS VARIABLE */ - 255, /* (199) range_option ::= */ - 255, /* (200) range_option ::= RANGE LP timestamp COMMA timestamp RP */ - 256, /* (201) interval_option ::= intervalKey LP tmvar RP */ - 256, /* (202) interval_option ::= intervalKey LP tmvar COMMA tmvar RP */ - 256, /* (203) interval_option ::= */ - 275, /* (204) intervalKey ::= INTERVAL */ - 275, /* (205) intervalKey ::= EVERY */ - 258, /* (206) session_option ::= */ - 258, /* (207) session_option ::= SESSION LP ids cpxName COMMA tmvar RP */ - 259, /* (208) windowstate_option ::= */ - 259, /* (209) windowstate_option ::= STATE_WINDOW LP ids RP */ - 260, /* (210) fill_opt ::= */ - 260, /* (211) fill_opt ::= FILL LP ID COMMA tagitemlist RP */ - 260, /* (212) fill_opt ::= FILL LP ID RP */ - 257, /* (213) sliding_opt ::= SLIDING LP tmvar RP */ - 257, /* (214) sliding_opt ::= */ - 263, /* (215) orderby_opt ::= */ - 263, /* (216) orderby_opt ::= ORDER BY sortlist */ - 276, /* (217) sortlist ::= sortlist COMMA item sortorder */ - 276, /* (218) sortlist ::= item sortorder */ - 278, /* (219) item ::= ids cpxName */ - 279, /* (220) sortorder ::= ASC */ - 279, /* (221) sortorder ::= DESC */ - 279, /* (222) sortorder ::= */ - 261, /* (223) groupby_opt ::= */ - 261, /* (224) groupby_opt ::= GROUP BY grouplist */ - 280, /* (225) grouplist ::= grouplist COMMA item */ - 280, /* (226) grouplist ::= item */ - 262, /* (227) having_opt ::= */ - 262, /* (228) having_opt ::= HAVING expr */ - 265, /* (229) limit_opt ::= */ - 265, /* (230) limit_opt ::= LIMIT signed */ - 265, /* (231) limit_opt ::= LIMIT signed OFFSET signed */ - 265, /* (232) limit_opt ::= LIMIT signed COMMA signed */ - 264, /* (233) slimit_opt ::= */ - 264, /* (234) slimit_opt ::= SLIMIT signed */ - 264, /* (235) slimit_opt ::= SLIMIT signed SOFFSET signed */ - 264, /* (236) slimit_opt ::= SLIMIT signed COMMA signed */ - 254, /* (237) where_opt ::= */ - 254, /* (238) where_opt ::= WHERE expr */ - 269, /* (239) expr ::= LP expr RP */ - 269, /* (240) expr ::= ID */ - 269, /* (241) expr ::= ID DOT ID */ - 269, /* (242) expr ::= ID DOT STAR */ - 269, /* (243) expr ::= INTEGER */ - 269, /* (244) expr ::= MINUS INTEGER */ - 269, /* (245) expr ::= PLUS INTEGER */ - 269, /* (246) expr ::= FLOAT */ - 269, /* (247) expr ::= MINUS FLOAT */ - 269, /* (248) expr ::= PLUS FLOAT */ - 269, /* (249) expr ::= STRING */ - 269, /* (250) expr ::= NOW */ - 269, /* (251) expr ::= VARIABLE */ - 269, /* (252) expr ::= PLUS VARIABLE */ - 269, /* (253) expr ::= MINUS VARIABLE */ - 269, /* (254) expr ::= BOOL */ - 269, /* (255) expr ::= NULL */ - 269, /* (256) expr ::= ID LP exprlist RP */ - 269, /* (257) expr ::= ID LP STAR RP */ - 269, /* (258) expr ::= expr IS NULL */ - 269, /* (259) expr ::= expr IS NOT NULL */ - 269, /* (260) expr ::= expr LT expr */ - 269, /* (261) expr ::= expr GT expr */ - 269, /* (262) expr ::= expr LE expr */ - 269, /* (263) expr ::= expr GE expr */ - 269, /* (264) expr ::= expr NE expr */ - 269, /* (265) expr ::= expr EQ expr */ - 269, /* (266) expr ::= expr BETWEEN expr AND expr */ - 269, /* (267) expr ::= expr AND expr */ - 269, /* (268) expr ::= expr OR expr */ - 269, /* (269) expr ::= expr PLUS expr */ - 269, /* (270) expr ::= expr MINUS expr */ - 269, /* (271) expr ::= expr STAR expr */ - 269, /* (272) expr ::= expr SLASH expr */ - 269, /* (273) expr ::= expr REM expr */ - 269, /* (274) expr ::= expr LIKE expr */ - 269, /* (275) expr ::= expr MATCH expr */ - 269, /* (276) expr ::= expr NMATCH expr */ - 269, /* (277) expr ::= expr IN LP exprlist RP */ - 208, /* (278) exprlist ::= exprlist COMMA expritem */ - 208, /* (279) exprlist ::= expritem */ - 281, /* (280) expritem ::= expr */ - 281, /* (281) expritem ::= */ - 200, /* (282) cmd ::= RESET QUERY CACHE */ - 200, /* (283) cmd ::= SYNCDB ids REPLICA */ - 200, /* (284) cmd ::= ALTER TABLE ids cpxName ADD COLUMN columnlist */ - 200, /* (285) cmd ::= ALTER TABLE ids cpxName DROP COLUMN ids */ - 200, /* (286) cmd ::= ALTER TABLE ids cpxName MODIFY COLUMN columnlist */ - 200, /* (287) cmd ::= ALTER TABLE ids cpxName ADD TAG columnlist */ - 200, /* (288) cmd ::= ALTER TABLE ids cpxName DROP TAG ids */ - 200, /* (289) cmd ::= ALTER TABLE ids cpxName CHANGE TAG ids ids */ - 200, /* (290) cmd ::= ALTER TABLE ids cpxName SET TAG ids EQ tagitem */ - 200, /* (291) cmd ::= ALTER TABLE ids cpxName MODIFY TAG columnlist */ - 200, /* (292) cmd ::= ALTER STABLE ids cpxName ADD COLUMN columnlist */ - 200, /* (293) cmd ::= ALTER STABLE ids cpxName DROP COLUMN ids */ - 200, /* (294) cmd ::= ALTER STABLE ids cpxName MODIFY COLUMN columnlist */ - 200, /* (295) cmd ::= ALTER STABLE ids cpxName ADD TAG columnlist */ - 200, /* (296) cmd ::= ALTER STABLE ids cpxName DROP TAG ids */ - 200, /* (297) cmd ::= ALTER STABLE ids cpxName CHANGE TAG ids ids */ - 200, /* (298) cmd ::= ALTER STABLE ids cpxName SET TAG ids EQ tagitem */ - 200, /* (299) cmd ::= ALTER STABLE ids cpxName MODIFY TAG columnlist */ - 200, /* (300) cmd ::= KILL CONNECTION INTEGER */ - 200, /* (301) cmd ::= KILL STREAM INTEGER COLON INTEGER */ - 200, /* (302) cmd ::= KILL QUERY INTEGER COLON INTEGER */ + 200, /* (29) cmd ::= DROP TABLE ifexists ids cpxName */ + 200, /* (30) cmd ::= DROP STABLE ifexists ids cpxName */ + 200, /* (31) cmd ::= DROP DATABASE ifexists ids */ + 200, /* (32) cmd ::= DROP TOPIC ifexists ids */ + 200, /* (33) cmd ::= DROP FUNCTION ids */ + 200, /* (34) cmd ::= DROP DNODE ids */ + 200, /* (35) cmd ::= DROP USER ids */ + 200, /* (36) cmd ::= DROP ACCOUNT ids */ + 200, /* (37) cmd ::= USE ids */ + 200, /* (38) cmd ::= DESCRIBE ids cpxName */ + 200, /* (39) cmd ::= DESC ids cpxName */ + 200, /* (40) cmd ::= ALTER USER ids PASS ids */ + 200, /* (41) cmd ::= ALTER USER ids PRIVILEGE ids */ + 200, /* (42) cmd ::= ALTER DNODE ids ids */ + 200, /* (43) cmd ::= ALTER DNODE ids ids ids */ + 200, /* (44) cmd ::= ALTER LOCAL ids */ + 200, /* (45) cmd ::= ALTER LOCAL ids ids */ + 200, /* (46) cmd ::= ALTER DATABASE ids alter_db_optr */ + 200, /* (47) cmd ::= ALTER TOPIC ids alter_topic_optr */ + 200, /* (48) cmd ::= ALTER ACCOUNT ids acct_optr */ + 200, /* (49) cmd ::= ALTER ACCOUNT ids PASS ids acct_optr */ + 200, /* (50) cmd ::= COMPACT VNODES IN LP exprlist RP */ + 201, /* (51) ids ::= ID */ + 201, /* (52) ids ::= STRING */ + 204, /* (53) ifexists ::= IF EXISTS */ + 204, /* (54) ifexists ::= */ + 209, /* (55) ifnotexists ::= IF NOT EXISTS */ + 209, /* (56) ifnotexists ::= */ + 200, /* (57) cmd ::= CREATE DNODE ids */ + 200, /* (58) cmd ::= CREATE ACCOUNT ids PASS ids acct_optr */ + 200, /* (59) cmd ::= CREATE DATABASE ifnotexists ids db_optr */ + 200, /* (60) cmd ::= CREATE TOPIC ifnotexists ids topic_optr */ + 200, /* (61) cmd ::= CREATE FUNCTION ids AS ids OUTPUTTYPE typename bufsize */ + 200, /* (62) cmd ::= CREATE AGGREGATE FUNCTION ids AS ids OUTPUTTYPE typename bufsize */ + 200, /* (63) cmd ::= CREATE USER ids PASS ids */ + 213, /* (64) bufsize ::= */ + 213, /* (65) bufsize ::= BUFSIZE INTEGER */ + 214, /* (66) pps ::= */ + 214, /* (67) pps ::= PPS INTEGER */ + 215, /* (68) tseries ::= */ + 215, /* (69) tseries ::= TSERIES INTEGER */ + 216, /* (70) dbs ::= */ + 216, /* (71) dbs ::= DBS INTEGER */ + 217, /* (72) streams ::= */ + 217, /* (73) streams ::= STREAMS INTEGER */ + 218, /* (74) storage ::= */ + 218, /* (75) storage ::= STORAGE INTEGER */ + 219, /* (76) qtime ::= */ + 219, /* (77) qtime ::= QTIME INTEGER */ + 220, /* (78) users ::= */ + 220, /* (79) users ::= USERS INTEGER */ + 221, /* (80) conns ::= */ + 221, /* (81) conns ::= CONNS INTEGER */ + 222, /* (82) state ::= */ + 222, /* (83) state ::= STATE ids */ + 207, /* (84) acct_optr ::= pps tseries storage streams qtime dbs users conns state */ + 223, /* (85) intitemlist ::= intitemlist COMMA intitem */ + 223, /* (86) intitemlist ::= intitem */ + 224, /* (87) intitem ::= INTEGER */ + 225, /* (88) keep ::= KEEP intitemlist */ + 226, /* (89) cache ::= CACHE INTEGER */ + 227, /* (90) replica ::= REPLICA INTEGER */ + 228, /* (91) quorum ::= QUORUM INTEGER */ + 229, /* (92) days ::= DAYS INTEGER */ + 230, /* (93) minrows ::= MINROWS INTEGER */ + 231, /* (94) maxrows ::= MAXROWS INTEGER */ + 232, /* (95) blocks ::= BLOCKS INTEGER */ + 233, /* (96) ctime ::= CTIME INTEGER */ + 234, /* (97) wal ::= WAL INTEGER */ + 235, /* (98) fsync ::= FSYNC INTEGER */ + 236, /* (99) comp ::= COMP INTEGER */ + 237, /* (100) prec ::= PRECISION STRING */ + 238, /* (101) update ::= UPDATE INTEGER */ + 239, /* (102) cachelast ::= CACHELAST INTEGER */ + 240, /* (103) partitions ::= PARTITIONS INTEGER */ + 210, /* (104) db_optr ::= */ + 210, /* (105) db_optr ::= db_optr cache */ + 210, /* (106) db_optr ::= db_optr replica */ + 210, /* (107) db_optr ::= db_optr quorum */ + 210, /* (108) db_optr ::= db_optr days */ + 210, /* (109) db_optr ::= db_optr minrows */ + 210, /* (110) db_optr ::= db_optr maxrows */ + 210, /* (111) db_optr ::= db_optr blocks */ + 210, /* (112) db_optr ::= db_optr ctime */ + 210, /* (113) db_optr ::= db_optr wal */ + 210, /* (114) db_optr ::= db_optr fsync */ + 210, /* (115) db_optr ::= db_optr comp */ + 210, /* (116) db_optr ::= db_optr prec */ + 210, /* (117) db_optr ::= db_optr keep */ + 210, /* (118) db_optr ::= db_optr update */ + 210, /* (119) db_optr ::= db_optr cachelast */ + 211, /* (120) topic_optr ::= db_optr */ + 211, /* (121) topic_optr ::= topic_optr partitions */ + 205, /* (122) alter_db_optr ::= */ + 205, /* (123) alter_db_optr ::= alter_db_optr replica */ + 205, /* (124) alter_db_optr ::= alter_db_optr quorum */ + 205, /* (125) alter_db_optr ::= alter_db_optr keep */ + 205, /* (126) alter_db_optr ::= alter_db_optr blocks */ + 205, /* (127) alter_db_optr ::= alter_db_optr comp */ + 205, /* (128) alter_db_optr ::= alter_db_optr update */ + 205, /* (129) alter_db_optr ::= alter_db_optr cachelast */ + 206, /* (130) alter_topic_optr ::= alter_db_optr */ + 206, /* (131) alter_topic_optr ::= alter_topic_optr partitions */ + 212, /* (132) typename ::= ids */ + 212, /* (133) typename ::= ids LP signed RP */ + 212, /* (134) typename ::= ids UNSIGNED */ + 241, /* (135) signed ::= INTEGER */ + 241, /* (136) signed ::= PLUS INTEGER */ + 241, /* (137) signed ::= MINUS INTEGER */ + 200, /* (138) cmd ::= CREATE TABLE create_table_args */ + 200, /* (139) cmd ::= CREATE TABLE create_stable_args */ + 200, /* (140) cmd ::= CREATE STABLE create_stable_args */ + 200, /* (141) cmd ::= CREATE TABLE create_table_list */ + 244, /* (142) create_table_list ::= create_from_stable */ + 244, /* (143) create_table_list ::= create_table_list create_from_stable */ + 242, /* (144) create_table_args ::= ifnotexists ids cpxName LP columnlist RP */ + 243, /* (145) create_stable_args ::= ifnotexists ids cpxName LP columnlist RP TAGS LP columnlist RP */ + 245, /* (146) create_from_stable ::= ifnotexists ids cpxName USING ids cpxName TAGS LP tagitemlist RP */ + 245, /* (147) create_from_stable ::= ifnotexists ids cpxName USING ids cpxName LP tagNamelist RP TAGS LP tagitemlist RP */ + 248, /* (148) tagNamelist ::= tagNamelist COMMA ids */ + 248, /* (149) tagNamelist ::= ids */ + 242, /* (150) create_table_args ::= ifnotexists ids cpxName AS select */ + 246, /* (151) columnlist ::= columnlist COMMA column */ + 246, /* (152) columnlist ::= column */ + 250, /* (153) column ::= ids typename */ + 247, /* (154) tagitemlist ::= tagitemlist COMMA tagitem */ + 247, /* (155) tagitemlist ::= tagitem */ + 251, /* (156) tagitem ::= INTEGER */ + 251, /* (157) tagitem ::= FLOAT */ + 251, /* (158) tagitem ::= STRING */ + 251, /* (159) tagitem ::= BOOL */ + 251, /* (160) tagitem ::= NULL */ + 251, /* (161) tagitem ::= NOW */ + 251, /* (162) tagitem ::= MINUS INTEGER */ + 251, /* (163) tagitem ::= MINUS FLOAT */ + 251, /* (164) tagitem ::= PLUS INTEGER */ + 251, /* (165) tagitem ::= PLUS FLOAT */ + 249, /* (166) select ::= SELECT selcollist from where_opt range_option interval_option sliding_opt session_option windowstate_option fill_opt groupby_opt having_opt orderby_opt slimit_opt limit_opt */ + 249, /* (167) select ::= LP select RP */ + 266, /* (168) union ::= select */ + 266, /* (169) union ::= union UNION ALL select */ + 200, /* (170) cmd ::= union */ + 249, /* (171) select ::= SELECT selcollist */ + 267, /* (172) sclp ::= selcollist COMMA */ + 267, /* (173) sclp ::= */ + 252, /* (174) selcollist ::= sclp distinct expr as */ + 252, /* (175) selcollist ::= sclp STAR */ + 270, /* (176) as ::= AS ids */ + 270, /* (177) as ::= ids */ + 270, /* (178) as ::= */ + 268, /* (179) distinct ::= DISTINCT */ + 268, /* (180) distinct ::= */ + 253, /* (181) from ::= FROM tablelist */ + 253, /* (182) from ::= FROM sub */ + 272, /* (183) sub ::= LP union RP */ + 272, /* (184) sub ::= LP union RP ids */ + 272, /* (185) sub ::= sub COMMA LP union RP ids */ + 271, /* (186) tablelist ::= ids cpxName */ + 271, /* (187) tablelist ::= ids cpxName ids */ + 271, /* (188) tablelist ::= tablelist COMMA ids cpxName */ + 271, /* (189) tablelist ::= tablelist COMMA ids cpxName ids */ + 273, /* (190) tmvar ::= VARIABLE */ + 274, /* (191) timestamp ::= INTEGER */ + 274, /* (192) timestamp ::= MINUS INTEGER */ + 274, /* (193) timestamp ::= PLUS INTEGER */ + 274, /* (194) timestamp ::= STRING */ + 274, /* (195) timestamp ::= NOW */ + 274, /* (196) timestamp ::= NOW PLUS VARIABLE */ + 274, /* (197) timestamp ::= NOW MINUS VARIABLE */ + 255, /* (198) range_option ::= */ + 255, /* (199) range_option ::= RANGE LP timestamp COMMA timestamp RP */ + 256, /* (200) interval_option ::= intervalKey LP tmvar RP */ + 256, /* (201) interval_option ::= intervalKey LP tmvar COMMA tmvar RP */ + 256, /* (202) interval_option ::= */ + 275, /* (203) intervalKey ::= INTERVAL */ + 275, /* (204) intervalKey ::= EVERY */ + 258, /* (205) session_option ::= */ + 258, /* (206) session_option ::= SESSION LP ids cpxName COMMA tmvar RP */ + 259, /* (207) windowstate_option ::= */ + 259, /* (208) windowstate_option ::= STATE_WINDOW LP ids RP */ + 260, /* (209) fill_opt ::= */ + 260, /* (210) fill_opt ::= FILL LP ID COMMA tagitemlist RP */ + 260, /* (211) fill_opt ::= FILL LP ID RP */ + 257, /* (212) sliding_opt ::= SLIDING LP tmvar RP */ + 257, /* (213) sliding_opt ::= */ + 263, /* (214) orderby_opt ::= */ + 263, /* (215) orderby_opt ::= ORDER BY sortlist */ + 276, /* (216) sortlist ::= sortlist COMMA item sortorder */ + 276, /* (217) sortlist ::= item sortorder */ + 278, /* (218) item ::= ids cpxName */ + 279, /* (219) sortorder ::= ASC */ + 279, /* (220) sortorder ::= DESC */ + 279, /* (221) sortorder ::= */ + 261, /* (222) groupby_opt ::= */ + 261, /* (223) groupby_opt ::= GROUP BY grouplist */ + 280, /* (224) grouplist ::= grouplist COMMA item */ + 280, /* (225) grouplist ::= item */ + 262, /* (226) having_opt ::= */ + 262, /* (227) having_opt ::= HAVING expr */ + 265, /* (228) limit_opt ::= */ + 265, /* (229) limit_opt ::= LIMIT signed */ + 265, /* (230) limit_opt ::= LIMIT signed OFFSET signed */ + 265, /* (231) limit_opt ::= LIMIT signed COMMA signed */ + 264, /* (232) slimit_opt ::= */ + 264, /* (233) slimit_opt ::= SLIMIT signed */ + 264, /* (234) slimit_opt ::= SLIMIT signed SOFFSET signed */ + 264, /* (235) slimit_opt ::= SLIMIT signed COMMA signed */ + 254, /* (236) where_opt ::= */ + 254, /* (237) where_opt ::= WHERE expr */ + 269, /* (238) expr ::= LP expr RP */ + 269, /* (239) expr ::= ID */ + 269, /* (240) expr ::= ID DOT ID */ + 269, /* (241) expr ::= ID DOT STAR */ + 269, /* (242) expr ::= INTEGER */ + 269, /* (243) expr ::= MINUS INTEGER */ + 269, /* (244) expr ::= PLUS INTEGER */ + 269, /* (245) expr ::= FLOAT */ + 269, /* (246) expr ::= MINUS FLOAT */ + 269, /* (247) expr ::= PLUS FLOAT */ + 269, /* (248) expr ::= STRING */ + 269, /* (249) expr ::= NOW */ + 269, /* (250) expr ::= VARIABLE */ + 269, /* (251) expr ::= PLUS VARIABLE */ + 269, /* (252) expr ::= MINUS VARIABLE */ + 269, /* (253) expr ::= BOOL */ + 269, /* (254) expr ::= NULL */ + 269, /* (255) expr ::= ID LP exprlist RP */ + 269, /* (256) expr ::= ID LP STAR RP */ + 269, /* (257) expr ::= expr IS NULL */ + 269, /* (258) expr ::= expr IS NOT NULL */ + 269, /* (259) expr ::= expr LT expr */ + 269, /* (260) expr ::= expr GT expr */ + 269, /* (261) expr ::= expr LE expr */ + 269, /* (262) expr ::= expr GE expr */ + 269, /* (263) expr ::= expr NE expr */ + 269, /* (264) expr ::= expr EQ expr */ + 269, /* (265) expr ::= expr BETWEEN expr AND expr */ + 269, /* (266) expr ::= expr AND expr */ + 269, /* (267) expr ::= expr OR expr */ + 269, /* (268) expr ::= expr PLUS expr */ + 269, /* (269) expr ::= expr MINUS expr */ + 269, /* (270) expr ::= expr STAR expr */ + 269, /* (271) expr ::= expr SLASH expr */ + 269, /* (272) expr ::= expr REM expr */ + 269, /* (273) expr ::= expr LIKE expr */ + 269, /* (274) expr ::= expr MATCH expr */ + 269, /* (275) expr ::= expr NMATCH expr */ + 269, /* (276) expr ::= expr IN LP exprlist RP */ + 208, /* (277) exprlist ::= exprlist COMMA expritem */ + 208, /* (278) exprlist ::= expritem */ + 281, /* (279) expritem ::= expr */ + 281, /* (280) expritem ::= */ + 200, /* (281) cmd ::= RESET QUERY CACHE */ + 200, /* (282) cmd ::= SYNCDB ids REPLICA */ + 200, /* (283) cmd ::= ALTER TABLE ids cpxName ADD COLUMN columnlist */ + 200, /* (284) cmd ::= ALTER TABLE ids cpxName DROP COLUMN ids */ + 200, /* (285) cmd ::= ALTER TABLE ids cpxName MODIFY COLUMN columnlist */ + 200, /* (286) cmd ::= ALTER TABLE ids cpxName ADD TAG columnlist */ + 200, /* (287) cmd ::= ALTER TABLE ids cpxName DROP TAG ids */ + 200, /* (288) cmd ::= ALTER TABLE ids cpxName CHANGE TAG ids ids */ + 200, /* (289) cmd ::= ALTER TABLE ids cpxName SET TAG ids EQ tagitem */ + 200, /* (290) cmd ::= ALTER TABLE ids cpxName MODIFY TAG columnlist */ + 200, /* (291) cmd ::= ALTER STABLE ids cpxName ADD COLUMN columnlist */ + 200, /* (292) cmd ::= ALTER STABLE ids cpxName DROP COLUMN ids */ + 200, /* (293) cmd ::= ALTER STABLE ids cpxName MODIFY COLUMN columnlist */ + 200, /* (294) cmd ::= ALTER STABLE ids cpxName ADD TAG columnlist */ + 200, /* (295) cmd ::= ALTER STABLE ids cpxName DROP TAG ids */ + 200, /* (296) cmd ::= ALTER STABLE ids cpxName CHANGE TAG ids ids */ + 200, /* (297) cmd ::= ALTER STABLE ids cpxName SET TAG ids EQ tagitem */ + 200, /* (298) cmd ::= ALTER STABLE ids cpxName MODIFY TAG columnlist */ + 200, /* (299) cmd ::= KILL CONNECTION INTEGER */ + 200, /* (300) cmd ::= KILL STREAM INTEGER COLON INTEGER */ + 200, /* (301) cmd ::= KILL QUERY INTEGER COLON INTEGER */ }; /* For rule J, yyRuleInfoNRhs[J] contains the negative of the number @@ -2211,280 +2208,279 @@ static const signed char yyRuleInfoNRhs[] = { -3, /* (26) cmd ::= SHOW dbPrefix STABLES */ -5, /* (27) cmd ::= SHOW dbPrefix STABLES LIKE ids */ -3, /* (28) cmd ::= SHOW dbPrefix VGROUPS */ - -4, /* (29) cmd ::= SHOW dbPrefix VGROUPS ids */ - -5, /* (30) cmd ::= DROP TABLE ifexists ids cpxName */ - -5, /* (31) cmd ::= DROP STABLE ifexists ids cpxName */ - -4, /* (32) cmd ::= DROP DATABASE ifexists ids */ - -4, /* (33) cmd ::= DROP TOPIC ifexists ids */ - -3, /* (34) cmd ::= DROP FUNCTION ids */ - -3, /* (35) cmd ::= DROP DNODE ids */ - -3, /* (36) cmd ::= DROP USER ids */ - -3, /* (37) cmd ::= DROP ACCOUNT ids */ - -2, /* (38) cmd ::= USE ids */ - -3, /* (39) cmd ::= DESCRIBE ids cpxName */ - -3, /* (40) cmd ::= DESC ids cpxName */ - -5, /* (41) cmd ::= ALTER USER ids PASS ids */ - -5, /* (42) cmd ::= ALTER USER ids PRIVILEGE ids */ - -4, /* (43) cmd ::= ALTER DNODE ids ids */ - -5, /* (44) cmd ::= ALTER DNODE ids ids ids */ - -3, /* (45) cmd ::= ALTER LOCAL ids */ - -4, /* (46) cmd ::= ALTER LOCAL ids ids */ - -4, /* (47) cmd ::= ALTER DATABASE ids alter_db_optr */ - -4, /* (48) cmd ::= ALTER TOPIC ids alter_topic_optr */ - -4, /* (49) cmd ::= ALTER ACCOUNT ids acct_optr */ - -6, /* (50) cmd ::= ALTER ACCOUNT ids PASS ids acct_optr */ - -6, /* (51) cmd ::= COMPACT VNODES IN LP exprlist RP */ - -1, /* (52) ids ::= ID */ - -1, /* (53) ids ::= STRING */ - -2, /* (54) ifexists ::= IF EXISTS */ - 0, /* (55) ifexists ::= */ - -3, /* (56) ifnotexists ::= IF NOT EXISTS */ - 0, /* (57) ifnotexists ::= */ - -3, /* (58) cmd ::= CREATE DNODE ids */ - -6, /* (59) cmd ::= CREATE ACCOUNT ids PASS ids acct_optr */ - -5, /* (60) cmd ::= CREATE DATABASE ifnotexists ids db_optr */ - -5, /* (61) cmd ::= CREATE TOPIC ifnotexists ids topic_optr */ - -8, /* (62) cmd ::= CREATE FUNCTION ids AS ids OUTPUTTYPE typename bufsize */ - -9, /* (63) cmd ::= CREATE AGGREGATE FUNCTION ids AS ids OUTPUTTYPE typename bufsize */ - -5, /* (64) cmd ::= CREATE USER ids PASS ids */ - 0, /* (65) bufsize ::= */ - -2, /* (66) bufsize ::= BUFSIZE INTEGER */ - 0, /* (67) pps ::= */ - -2, /* (68) pps ::= PPS INTEGER */ - 0, /* (69) tseries ::= */ - -2, /* (70) tseries ::= TSERIES INTEGER */ - 0, /* (71) dbs ::= */ - -2, /* (72) dbs ::= DBS INTEGER */ - 0, /* (73) streams ::= */ - -2, /* (74) streams ::= STREAMS INTEGER */ - 0, /* (75) storage ::= */ - -2, /* (76) storage ::= STORAGE INTEGER */ - 0, /* (77) qtime ::= */ - -2, /* (78) qtime ::= QTIME INTEGER */ - 0, /* (79) users ::= */ - -2, /* (80) users ::= USERS INTEGER */ - 0, /* (81) conns ::= */ - -2, /* (82) conns ::= CONNS INTEGER */ - 0, /* (83) state ::= */ - -2, /* (84) state ::= STATE ids */ - -9, /* (85) acct_optr ::= pps tseries storage streams qtime dbs users conns state */ - -3, /* (86) intitemlist ::= intitemlist COMMA intitem */ - -1, /* (87) intitemlist ::= intitem */ - -1, /* (88) intitem ::= INTEGER */ - -2, /* (89) keep ::= KEEP intitemlist */ - -2, /* (90) cache ::= CACHE INTEGER */ - -2, /* (91) replica ::= REPLICA INTEGER */ - -2, /* (92) quorum ::= QUORUM INTEGER */ - -2, /* (93) days ::= DAYS INTEGER */ - -2, /* (94) minrows ::= MINROWS INTEGER */ - -2, /* (95) maxrows ::= MAXROWS INTEGER */ - -2, /* (96) blocks ::= BLOCKS INTEGER */ - -2, /* (97) ctime ::= CTIME INTEGER */ - -2, /* (98) wal ::= WAL INTEGER */ - -2, /* (99) fsync ::= FSYNC INTEGER */ - -2, /* (100) comp ::= COMP INTEGER */ - -2, /* (101) prec ::= PRECISION STRING */ - -2, /* (102) update ::= UPDATE INTEGER */ - -2, /* (103) cachelast ::= CACHELAST INTEGER */ - -2, /* (104) partitions ::= PARTITIONS INTEGER */ - 0, /* (105) db_optr ::= */ - -2, /* (106) db_optr ::= db_optr cache */ - -2, /* (107) db_optr ::= db_optr replica */ - -2, /* (108) db_optr ::= db_optr quorum */ - -2, /* (109) db_optr ::= db_optr days */ - -2, /* (110) db_optr ::= db_optr minrows */ - -2, /* (111) db_optr ::= db_optr maxrows */ - -2, /* (112) db_optr ::= db_optr blocks */ - -2, /* (113) db_optr ::= db_optr ctime */ - -2, /* (114) db_optr ::= db_optr wal */ - -2, /* (115) db_optr ::= db_optr fsync */ - -2, /* (116) db_optr ::= db_optr comp */ - -2, /* (117) db_optr ::= db_optr prec */ - -2, /* (118) db_optr ::= db_optr keep */ - -2, /* (119) db_optr ::= db_optr update */ - -2, /* (120) db_optr ::= db_optr cachelast */ - -1, /* (121) topic_optr ::= db_optr */ - -2, /* (122) topic_optr ::= topic_optr partitions */ - 0, /* (123) alter_db_optr ::= */ - -2, /* (124) alter_db_optr ::= alter_db_optr replica */ - -2, /* (125) alter_db_optr ::= alter_db_optr quorum */ - -2, /* (126) alter_db_optr ::= alter_db_optr keep */ - -2, /* (127) alter_db_optr ::= alter_db_optr blocks */ - -2, /* (128) alter_db_optr ::= alter_db_optr comp */ - -2, /* (129) alter_db_optr ::= alter_db_optr update */ - -2, /* (130) alter_db_optr ::= alter_db_optr cachelast */ - -1, /* (131) alter_topic_optr ::= alter_db_optr */ - -2, /* (132) alter_topic_optr ::= alter_topic_optr partitions */ - -1, /* (133) typename ::= ids */ - -4, /* (134) typename ::= ids LP signed RP */ - -2, /* (135) typename ::= ids UNSIGNED */ - -1, /* (136) signed ::= INTEGER */ - -2, /* (137) signed ::= PLUS INTEGER */ - -2, /* (138) signed ::= MINUS INTEGER */ - -3, /* (139) cmd ::= CREATE TABLE create_table_args */ - -3, /* (140) cmd ::= CREATE TABLE create_stable_args */ - -3, /* (141) cmd ::= CREATE STABLE create_stable_args */ - -3, /* (142) cmd ::= CREATE TABLE create_table_list */ - -1, /* (143) create_table_list ::= create_from_stable */ - -2, /* (144) create_table_list ::= create_table_list create_from_stable */ - -6, /* (145) create_table_args ::= ifnotexists ids cpxName LP columnlist RP */ - -10, /* (146) create_stable_args ::= ifnotexists ids cpxName LP columnlist RP TAGS LP columnlist RP */ - -10, /* (147) create_from_stable ::= ifnotexists ids cpxName USING ids cpxName TAGS LP tagitemlist RP */ - -13, /* (148) create_from_stable ::= ifnotexists ids cpxName USING ids cpxName LP tagNamelist RP TAGS LP tagitemlist RP */ - -3, /* (149) tagNamelist ::= tagNamelist COMMA ids */ - -1, /* (150) tagNamelist ::= ids */ - -5, /* (151) create_table_args ::= ifnotexists ids cpxName AS select */ - -3, /* (152) columnlist ::= columnlist COMMA column */ - -1, /* (153) columnlist ::= column */ - -2, /* (154) column ::= ids typename */ - -3, /* (155) tagitemlist ::= tagitemlist COMMA tagitem */ - -1, /* (156) tagitemlist ::= tagitem */ - -1, /* (157) tagitem ::= INTEGER */ - -1, /* (158) tagitem ::= FLOAT */ - -1, /* (159) tagitem ::= STRING */ - -1, /* (160) tagitem ::= BOOL */ - -1, /* (161) tagitem ::= NULL */ - -1, /* (162) tagitem ::= NOW */ - -2, /* (163) tagitem ::= MINUS INTEGER */ - -2, /* (164) tagitem ::= MINUS FLOAT */ - -2, /* (165) tagitem ::= PLUS INTEGER */ - -2, /* (166) tagitem ::= PLUS FLOAT */ - -15, /* (167) select ::= SELECT selcollist from where_opt range_option interval_option sliding_opt session_option windowstate_option fill_opt groupby_opt having_opt orderby_opt slimit_opt limit_opt */ - -3, /* (168) select ::= LP select RP */ - -1, /* (169) union ::= select */ - -4, /* (170) union ::= union UNION ALL select */ - -1, /* (171) cmd ::= union */ - -2, /* (172) select ::= SELECT selcollist */ - -2, /* (173) sclp ::= selcollist COMMA */ - 0, /* (174) sclp ::= */ - -4, /* (175) selcollist ::= sclp distinct expr as */ - -2, /* (176) selcollist ::= sclp STAR */ - -2, /* (177) as ::= AS ids */ - -1, /* (178) as ::= ids */ - 0, /* (179) as ::= */ - -1, /* (180) distinct ::= DISTINCT */ - 0, /* (181) distinct ::= */ - -2, /* (182) from ::= FROM tablelist */ - -2, /* (183) from ::= FROM sub */ - -3, /* (184) sub ::= LP union RP */ - -4, /* (185) sub ::= LP union RP ids */ - -6, /* (186) sub ::= sub COMMA LP union RP ids */ - -2, /* (187) tablelist ::= ids cpxName */ - -3, /* (188) tablelist ::= ids cpxName ids */ - -4, /* (189) tablelist ::= tablelist COMMA ids cpxName */ - -5, /* (190) tablelist ::= tablelist COMMA ids cpxName ids */ - -1, /* (191) tmvar ::= VARIABLE */ - -1, /* (192) timestamp ::= INTEGER */ - -2, /* (193) timestamp ::= MINUS INTEGER */ - -2, /* (194) timestamp ::= PLUS INTEGER */ - -1, /* (195) timestamp ::= STRING */ - -1, /* (196) timestamp ::= NOW */ - -3, /* (197) timestamp ::= NOW PLUS VARIABLE */ - -3, /* (198) timestamp ::= NOW MINUS VARIABLE */ - 0, /* (199) range_option ::= */ - -6, /* (200) range_option ::= RANGE LP timestamp COMMA timestamp RP */ - -4, /* (201) interval_option ::= intervalKey LP tmvar RP */ - -6, /* (202) interval_option ::= intervalKey LP tmvar COMMA tmvar RP */ - 0, /* (203) interval_option ::= */ - -1, /* (204) intervalKey ::= INTERVAL */ - -1, /* (205) intervalKey ::= EVERY */ - 0, /* (206) session_option ::= */ - -7, /* (207) session_option ::= SESSION LP ids cpxName COMMA tmvar RP */ - 0, /* (208) windowstate_option ::= */ - -4, /* (209) windowstate_option ::= STATE_WINDOW LP ids RP */ - 0, /* (210) fill_opt ::= */ - -6, /* (211) fill_opt ::= FILL LP ID COMMA tagitemlist RP */ - -4, /* (212) fill_opt ::= FILL LP ID RP */ - -4, /* (213) sliding_opt ::= SLIDING LP tmvar RP */ - 0, /* (214) sliding_opt ::= */ - 0, /* (215) orderby_opt ::= */ - -3, /* (216) orderby_opt ::= ORDER BY sortlist */ - -4, /* (217) sortlist ::= sortlist COMMA item sortorder */ - -2, /* (218) sortlist ::= item sortorder */ - -2, /* (219) item ::= ids cpxName */ - -1, /* (220) sortorder ::= ASC */ - -1, /* (221) sortorder ::= DESC */ - 0, /* (222) sortorder ::= */ - 0, /* (223) groupby_opt ::= */ - -3, /* (224) groupby_opt ::= GROUP BY grouplist */ - -3, /* (225) grouplist ::= grouplist COMMA item */ - -1, /* (226) grouplist ::= item */ - 0, /* (227) having_opt ::= */ - -2, /* (228) having_opt ::= HAVING expr */ - 0, /* (229) limit_opt ::= */ - -2, /* (230) limit_opt ::= LIMIT signed */ - -4, /* (231) limit_opt ::= LIMIT signed OFFSET signed */ - -4, /* (232) limit_opt ::= LIMIT signed COMMA signed */ - 0, /* (233) slimit_opt ::= */ - -2, /* (234) slimit_opt ::= SLIMIT signed */ - -4, /* (235) slimit_opt ::= SLIMIT signed SOFFSET signed */ - -4, /* (236) slimit_opt ::= SLIMIT signed COMMA signed */ - 0, /* (237) where_opt ::= */ - -2, /* (238) where_opt ::= WHERE expr */ - -3, /* (239) expr ::= LP expr RP */ - -1, /* (240) expr ::= ID */ - -3, /* (241) expr ::= ID DOT ID */ - -3, /* (242) expr ::= ID DOT STAR */ - -1, /* (243) expr ::= INTEGER */ - -2, /* (244) expr ::= MINUS INTEGER */ - -2, /* (245) expr ::= PLUS INTEGER */ - -1, /* (246) expr ::= FLOAT */ - -2, /* (247) expr ::= MINUS FLOAT */ - -2, /* (248) expr ::= PLUS FLOAT */ - -1, /* (249) expr ::= STRING */ - -1, /* (250) expr ::= NOW */ - -1, /* (251) expr ::= VARIABLE */ - -2, /* (252) expr ::= PLUS VARIABLE */ - -2, /* (253) expr ::= MINUS VARIABLE */ - -1, /* (254) expr ::= BOOL */ - -1, /* (255) expr ::= NULL */ - -4, /* (256) expr ::= ID LP exprlist RP */ - -4, /* (257) expr ::= ID LP STAR RP */ - -3, /* (258) expr ::= expr IS NULL */ - -4, /* (259) expr ::= expr IS NOT NULL */ - -3, /* (260) expr ::= expr LT expr */ - -3, /* (261) expr ::= expr GT expr */ - -3, /* (262) expr ::= expr LE expr */ - -3, /* (263) expr ::= expr GE expr */ - -3, /* (264) expr ::= expr NE expr */ - -3, /* (265) expr ::= expr EQ expr */ - -5, /* (266) expr ::= expr BETWEEN expr AND expr */ - -3, /* (267) expr ::= expr AND expr */ - -3, /* (268) expr ::= expr OR expr */ - -3, /* (269) expr ::= expr PLUS expr */ - -3, /* (270) expr ::= expr MINUS expr */ - -3, /* (271) expr ::= expr STAR expr */ - -3, /* (272) expr ::= expr SLASH expr */ - -3, /* (273) expr ::= expr REM expr */ - -3, /* (274) expr ::= expr LIKE expr */ - -3, /* (275) expr ::= expr MATCH expr */ - -3, /* (276) expr ::= expr NMATCH expr */ - -5, /* (277) expr ::= expr IN LP exprlist RP */ - -3, /* (278) exprlist ::= exprlist COMMA expritem */ - -1, /* (279) exprlist ::= expritem */ - -1, /* (280) expritem ::= expr */ - 0, /* (281) expritem ::= */ - -3, /* (282) cmd ::= RESET QUERY CACHE */ - -3, /* (283) cmd ::= SYNCDB ids REPLICA */ - -7, /* (284) cmd ::= ALTER TABLE ids cpxName ADD COLUMN columnlist */ - -7, /* (285) cmd ::= ALTER TABLE ids cpxName DROP COLUMN ids */ - -7, /* (286) cmd ::= ALTER TABLE ids cpxName MODIFY COLUMN columnlist */ - -7, /* (287) cmd ::= ALTER TABLE ids cpxName ADD TAG columnlist */ - -7, /* (288) cmd ::= ALTER TABLE ids cpxName DROP TAG ids */ - -8, /* (289) cmd ::= ALTER TABLE ids cpxName CHANGE TAG ids ids */ - -9, /* (290) cmd ::= ALTER TABLE ids cpxName SET TAG ids EQ tagitem */ - -7, /* (291) cmd ::= ALTER TABLE ids cpxName MODIFY TAG columnlist */ - -7, /* (292) cmd ::= ALTER STABLE ids cpxName ADD COLUMN columnlist */ - -7, /* (293) cmd ::= ALTER STABLE ids cpxName DROP COLUMN ids */ - -7, /* (294) cmd ::= ALTER STABLE ids cpxName MODIFY COLUMN columnlist */ - -7, /* (295) cmd ::= ALTER STABLE ids cpxName ADD TAG columnlist */ - -7, /* (296) cmd ::= ALTER STABLE ids cpxName DROP TAG ids */ - -8, /* (297) cmd ::= ALTER STABLE ids cpxName CHANGE TAG ids ids */ - -9, /* (298) cmd ::= ALTER STABLE ids cpxName SET TAG ids EQ tagitem */ - -7, /* (299) cmd ::= ALTER STABLE ids cpxName MODIFY TAG columnlist */ - -3, /* (300) cmd ::= KILL CONNECTION INTEGER */ - -5, /* (301) cmd ::= KILL STREAM INTEGER COLON INTEGER */ - -5, /* (302) cmd ::= KILL QUERY INTEGER COLON INTEGER */ + -5, /* (29) cmd ::= DROP TABLE ifexists ids cpxName */ + -5, /* (30) cmd ::= DROP STABLE ifexists ids cpxName */ + -4, /* (31) cmd ::= DROP DATABASE ifexists ids */ + -4, /* (32) cmd ::= DROP TOPIC ifexists ids */ + -3, /* (33) cmd ::= DROP FUNCTION ids */ + -3, /* (34) cmd ::= DROP DNODE ids */ + -3, /* (35) cmd ::= DROP USER ids */ + -3, /* (36) cmd ::= DROP ACCOUNT ids */ + -2, /* (37) cmd ::= USE ids */ + -3, /* (38) cmd ::= DESCRIBE ids cpxName */ + -3, /* (39) cmd ::= DESC ids cpxName */ + -5, /* (40) cmd ::= ALTER USER ids PASS ids */ + -5, /* (41) cmd ::= ALTER USER ids PRIVILEGE ids */ + -4, /* (42) cmd ::= ALTER DNODE ids ids */ + -5, /* (43) cmd ::= ALTER DNODE ids ids ids */ + -3, /* (44) cmd ::= ALTER LOCAL ids */ + -4, /* (45) cmd ::= ALTER LOCAL ids ids */ + -4, /* (46) cmd ::= ALTER DATABASE ids alter_db_optr */ + -4, /* (47) cmd ::= ALTER TOPIC ids alter_topic_optr */ + -4, /* (48) cmd ::= ALTER ACCOUNT ids acct_optr */ + -6, /* (49) cmd ::= ALTER ACCOUNT ids PASS ids acct_optr */ + -6, /* (50) cmd ::= COMPACT VNODES IN LP exprlist RP */ + -1, /* (51) ids ::= ID */ + -1, /* (52) ids ::= STRING */ + -2, /* (53) ifexists ::= IF EXISTS */ + 0, /* (54) ifexists ::= */ + -3, /* (55) ifnotexists ::= IF NOT EXISTS */ + 0, /* (56) ifnotexists ::= */ + -3, /* (57) cmd ::= CREATE DNODE ids */ + -6, /* (58) cmd ::= CREATE ACCOUNT ids PASS ids acct_optr */ + -5, /* (59) cmd ::= CREATE DATABASE ifnotexists ids db_optr */ + -5, /* (60) cmd ::= CREATE TOPIC ifnotexists ids topic_optr */ + -8, /* (61) cmd ::= CREATE FUNCTION ids AS ids OUTPUTTYPE typename bufsize */ + -9, /* (62) cmd ::= CREATE AGGREGATE FUNCTION ids AS ids OUTPUTTYPE typename bufsize */ + -5, /* (63) cmd ::= CREATE USER ids PASS ids */ + 0, /* (64) bufsize ::= */ + -2, /* (65) bufsize ::= BUFSIZE INTEGER */ + 0, /* (66) pps ::= */ + -2, /* (67) pps ::= PPS INTEGER */ + 0, /* (68) tseries ::= */ + -2, /* (69) tseries ::= TSERIES INTEGER */ + 0, /* (70) dbs ::= */ + -2, /* (71) dbs ::= DBS INTEGER */ + 0, /* (72) streams ::= */ + -2, /* (73) streams ::= STREAMS INTEGER */ + 0, /* (74) storage ::= */ + -2, /* (75) storage ::= STORAGE INTEGER */ + 0, /* (76) qtime ::= */ + -2, /* (77) qtime ::= QTIME INTEGER */ + 0, /* (78) users ::= */ + -2, /* (79) users ::= USERS INTEGER */ + 0, /* (80) conns ::= */ + -2, /* (81) conns ::= CONNS INTEGER */ + 0, /* (82) state ::= */ + -2, /* (83) state ::= STATE ids */ + -9, /* (84) acct_optr ::= pps tseries storage streams qtime dbs users conns state */ + -3, /* (85) intitemlist ::= intitemlist COMMA intitem */ + -1, /* (86) intitemlist ::= intitem */ + -1, /* (87) intitem ::= INTEGER */ + -2, /* (88) keep ::= KEEP intitemlist */ + -2, /* (89) cache ::= CACHE INTEGER */ + -2, /* (90) replica ::= REPLICA INTEGER */ + -2, /* (91) quorum ::= QUORUM INTEGER */ + -2, /* (92) days ::= DAYS INTEGER */ + -2, /* (93) minrows ::= MINROWS INTEGER */ + -2, /* (94) maxrows ::= MAXROWS INTEGER */ + -2, /* (95) blocks ::= BLOCKS INTEGER */ + -2, /* (96) ctime ::= CTIME INTEGER */ + -2, /* (97) wal ::= WAL INTEGER */ + -2, /* (98) fsync ::= FSYNC INTEGER */ + -2, /* (99) comp ::= COMP INTEGER */ + -2, /* (100) prec ::= PRECISION STRING */ + -2, /* (101) update ::= UPDATE INTEGER */ + -2, /* (102) cachelast ::= CACHELAST INTEGER */ + -2, /* (103) partitions ::= PARTITIONS INTEGER */ + 0, /* (104) db_optr ::= */ + -2, /* (105) db_optr ::= db_optr cache */ + -2, /* (106) db_optr ::= db_optr replica */ + -2, /* (107) db_optr ::= db_optr quorum */ + -2, /* (108) db_optr ::= db_optr days */ + -2, /* (109) db_optr ::= db_optr minrows */ + -2, /* (110) db_optr ::= db_optr maxrows */ + -2, /* (111) db_optr ::= db_optr blocks */ + -2, /* (112) db_optr ::= db_optr ctime */ + -2, /* (113) db_optr ::= db_optr wal */ + -2, /* (114) db_optr ::= db_optr fsync */ + -2, /* (115) db_optr ::= db_optr comp */ + -2, /* (116) db_optr ::= db_optr prec */ + -2, /* (117) db_optr ::= db_optr keep */ + -2, /* (118) db_optr ::= db_optr update */ + -2, /* (119) db_optr ::= db_optr cachelast */ + -1, /* (120) topic_optr ::= db_optr */ + -2, /* (121) topic_optr ::= topic_optr partitions */ + 0, /* (122) alter_db_optr ::= */ + -2, /* (123) alter_db_optr ::= alter_db_optr replica */ + -2, /* (124) alter_db_optr ::= alter_db_optr quorum */ + -2, /* (125) alter_db_optr ::= alter_db_optr keep */ + -2, /* (126) alter_db_optr ::= alter_db_optr blocks */ + -2, /* (127) alter_db_optr ::= alter_db_optr comp */ + -2, /* (128) alter_db_optr ::= alter_db_optr update */ + -2, /* (129) alter_db_optr ::= alter_db_optr cachelast */ + -1, /* (130) alter_topic_optr ::= alter_db_optr */ + -2, /* (131) alter_topic_optr ::= alter_topic_optr partitions */ + -1, /* (132) typename ::= ids */ + -4, /* (133) typename ::= ids LP signed RP */ + -2, /* (134) typename ::= ids UNSIGNED */ + -1, /* (135) signed ::= INTEGER */ + -2, /* (136) signed ::= PLUS INTEGER */ + -2, /* (137) signed ::= MINUS INTEGER */ + -3, /* (138) cmd ::= CREATE TABLE create_table_args */ + -3, /* (139) cmd ::= CREATE TABLE create_stable_args */ + -3, /* (140) cmd ::= CREATE STABLE create_stable_args */ + -3, /* (141) cmd ::= CREATE TABLE create_table_list */ + -1, /* (142) create_table_list ::= create_from_stable */ + -2, /* (143) create_table_list ::= create_table_list create_from_stable */ + -6, /* (144) create_table_args ::= ifnotexists ids cpxName LP columnlist RP */ + -10, /* (145) create_stable_args ::= ifnotexists ids cpxName LP columnlist RP TAGS LP columnlist RP */ + -10, /* (146) create_from_stable ::= ifnotexists ids cpxName USING ids cpxName TAGS LP tagitemlist RP */ + -13, /* (147) create_from_stable ::= ifnotexists ids cpxName USING ids cpxName LP tagNamelist RP TAGS LP tagitemlist RP */ + -3, /* (148) tagNamelist ::= tagNamelist COMMA ids */ + -1, /* (149) tagNamelist ::= ids */ + -5, /* (150) create_table_args ::= ifnotexists ids cpxName AS select */ + -3, /* (151) columnlist ::= columnlist COMMA column */ + -1, /* (152) columnlist ::= column */ + -2, /* (153) column ::= ids typename */ + -3, /* (154) tagitemlist ::= tagitemlist COMMA tagitem */ + -1, /* (155) tagitemlist ::= tagitem */ + -1, /* (156) tagitem ::= INTEGER */ + -1, /* (157) tagitem ::= FLOAT */ + -1, /* (158) tagitem ::= STRING */ + -1, /* (159) tagitem ::= BOOL */ + -1, /* (160) tagitem ::= NULL */ + -1, /* (161) tagitem ::= NOW */ + -2, /* (162) tagitem ::= MINUS INTEGER */ + -2, /* (163) tagitem ::= MINUS FLOAT */ + -2, /* (164) tagitem ::= PLUS INTEGER */ + -2, /* (165) tagitem ::= PLUS FLOAT */ + -15, /* (166) select ::= SELECT selcollist from where_opt range_option interval_option sliding_opt session_option windowstate_option fill_opt groupby_opt having_opt orderby_opt slimit_opt limit_opt */ + -3, /* (167) select ::= LP select RP */ + -1, /* (168) union ::= select */ + -4, /* (169) union ::= union UNION ALL select */ + -1, /* (170) cmd ::= union */ + -2, /* (171) select ::= SELECT selcollist */ + -2, /* (172) sclp ::= selcollist COMMA */ + 0, /* (173) sclp ::= */ + -4, /* (174) selcollist ::= sclp distinct expr as */ + -2, /* (175) selcollist ::= sclp STAR */ + -2, /* (176) as ::= AS ids */ + -1, /* (177) as ::= ids */ + 0, /* (178) as ::= */ + -1, /* (179) distinct ::= DISTINCT */ + 0, /* (180) distinct ::= */ + -2, /* (181) from ::= FROM tablelist */ + -2, /* (182) from ::= FROM sub */ + -3, /* (183) sub ::= LP union RP */ + -4, /* (184) sub ::= LP union RP ids */ + -6, /* (185) sub ::= sub COMMA LP union RP ids */ + -2, /* (186) tablelist ::= ids cpxName */ + -3, /* (187) tablelist ::= ids cpxName ids */ + -4, /* (188) tablelist ::= tablelist COMMA ids cpxName */ + -5, /* (189) tablelist ::= tablelist COMMA ids cpxName ids */ + -1, /* (190) tmvar ::= VARIABLE */ + -1, /* (191) timestamp ::= INTEGER */ + -2, /* (192) timestamp ::= MINUS INTEGER */ + -2, /* (193) timestamp ::= PLUS INTEGER */ + -1, /* (194) timestamp ::= STRING */ + -1, /* (195) timestamp ::= NOW */ + -3, /* (196) timestamp ::= NOW PLUS VARIABLE */ + -3, /* (197) timestamp ::= NOW MINUS VARIABLE */ + 0, /* (198) range_option ::= */ + -6, /* (199) range_option ::= RANGE LP timestamp COMMA timestamp RP */ + -4, /* (200) interval_option ::= intervalKey LP tmvar RP */ + -6, /* (201) interval_option ::= intervalKey LP tmvar COMMA tmvar RP */ + 0, /* (202) interval_option ::= */ + -1, /* (203) intervalKey ::= INTERVAL */ + -1, /* (204) intervalKey ::= EVERY */ + 0, /* (205) session_option ::= */ + -7, /* (206) session_option ::= SESSION LP ids cpxName COMMA tmvar RP */ + 0, /* (207) windowstate_option ::= */ + -4, /* (208) windowstate_option ::= STATE_WINDOW LP ids RP */ + 0, /* (209) fill_opt ::= */ + -6, /* (210) fill_opt ::= FILL LP ID COMMA tagitemlist RP */ + -4, /* (211) fill_opt ::= FILL LP ID RP */ + -4, /* (212) sliding_opt ::= SLIDING LP tmvar RP */ + 0, /* (213) sliding_opt ::= */ + 0, /* (214) orderby_opt ::= */ + -3, /* (215) orderby_opt ::= ORDER BY sortlist */ + -4, /* (216) sortlist ::= sortlist COMMA item sortorder */ + -2, /* (217) sortlist ::= item sortorder */ + -2, /* (218) item ::= ids cpxName */ + -1, /* (219) sortorder ::= ASC */ + -1, /* (220) sortorder ::= DESC */ + 0, /* (221) sortorder ::= */ + 0, /* (222) groupby_opt ::= */ + -3, /* (223) groupby_opt ::= GROUP BY grouplist */ + -3, /* (224) grouplist ::= grouplist COMMA item */ + -1, /* (225) grouplist ::= item */ + 0, /* (226) having_opt ::= */ + -2, /* (227) having_opt ::= HAVING expr */ + 0, /* (228) limit_opt ::= */ + -2, /* (229) limit_opt ::= LIMIT signed */ + -4, /* (230) limit_opt ::= LIMIT signed OFFSET signed */ + -4, /* (231) limit_opt ::= LIMIT signed COMMA signed */ + 0, /* (232) slimit_opt ::= */ + -2, /* (233) slimit_opt ::= SLIMIT signed */ + -4, /* (234) slimit_opt ::= SLIMIT signed SOFFSET signed */ + -4, /* (235) slimit_opt ::= SLIMIT signed COMMA signed */ + 0, /* (236) where_opt ::= */ + -2, /* (237) where_opt ::= WHERE expr */ + -3, /* (238) expr ::= LP expr RP */ + -1, /* (239) expr ::= ID */ + -3, /* (240) expr ::= ID DOT ID */ + -3, /* (241) expr ::= ID DOT STAR */ + -1, /* (242) expr ::= INTEGER */ + -2, /* (243) expr ::= MINUS INTEGER */ + -2, /* (244) expr ::= PLUS INTEGER */ + -1, /* (245) expr ::= FLOAT */ + -2, /* (246) expr ::= MINUS FLOAT */ + -2, /* (247) expr ::= PLUS FLOAT */ + -1, /* (248) expr ::= STRING */ + -1, /* (249) expr ::= NOW */ + -1, /* (250) expr ::= VARIABLE */ + -2, /* (251) expr ::= PLUS VARIABLE */ + -2, /* (252) expr ::= MINUS VARIABLE */ + -1, /* (253) expr ::= BOOL */ + -1, /* (254) expr ::= NULL */ + -4, /* (255) expr ::= ID LP exprlist RP */ + -4, /* (256) expr ::= ID LP STAR RP */ + -3, /* (257) expr ::= expr IS NULL */ + -4, /* (258) expr ::= expr IS NOT NULL */ + -3, /* (259) expr ::= expr LT expr */ + -3, /* (260) expr ::= expr GT expr */ + -3, /* (261) expr ::= expr LE expr */ + -3, /* (262) expr ::= expr GE expr */ + -3, /* (263) expr ::= expr NE expr */ + -3, /* (264) expr ::= expr EQ expr */ + -5, /* (265) expr ::= expr BETWEEN expr AND expr */ + -3, /* (266) expr ::= expr AND expr */ + -3, /* (267) expr ::= expr OR expr */ + -3, /* (268) expr ::= expr PLUS expr */ + -3, /* (269) expr ::= expr MINUS expr */ + -3, /* (270) expr ::= expr STAR expr */ + -3, /* (271) expr ::= expr SLASH expr */ + -3, /* (272) expr ::= expr REM expr */ + -3, /* (273) expr ::= expr LIKE expr */ + -3, /* (274) expr ::= expr MATCH expr */ + -3, /* (275) expr ::= expr NMATCH expr */ + -5, /* (276) expr ::= expr IN LP exprlist RP */ + -3, /* (277) exprlist ::= exprlist COMMA expritem */ + -1, /* (278) exprlist ::= expritem */ + -1, /* (279) expritem ::= expr */ + 0, /* (280) expritem ::= */ + -3, /* (281) cmd ::= RESET QUERY CACHE */ + -3, /* (282) cmd ::= SYNCDB ids REPLICA */ + -7, /* (283) cmd ::= ALTER TABLE ids cpxName ADD COLUMN columnlist */ + -7, /* (284) cmd ::= ALTER TABLE ids cpxName DROP COLUMN ids */ + -7, /* (285) cmd ::= ALTER TABLE ids cpxName MODIFY COLUMN columnlist */ + -7, /* (286) cmd ::= ALTER TABLE ids cpxName ADD TAG columnlist */ + -7, /* (287) cmd ::= ALTER TABLE ids cpxName DROP TAG ids */ + -8, /* (288) cmd ::= ALTER TABLE ids cpxName CHANGE TAG ids ids */ + -9, /* (289) cmd ::= ALTER TABLE ids cpxName SET TAG ids EQ tagitem */ + -7, /* (290) cmd ::= ALTER TABLE ids cpxName MODIFY TAG columnlist */ + -7, /* (291) cmd ::= ALTER STABLE ids cpxName ADD COLUMN columnlist */ + -7, /* (292) cmd ::= ALTER STABLE ids cpxName DROP COLUMN ids */ + -7, /* (293) cmd ::= ALTER STABLE ids cpxName MODIFY COLUMN columnlist */ + -7, /* (294) cmd ::= ALTER STABLE ids cpxName ADD TAG columnlist */ + -7, /* (295) cmd ::= ALTER STABLE ids cpxName DROP TAG ids */ + -8, /* (296) cmd ::= ALTER STABLE ids cpxName CHANGE TAG ids ids */ + -9, /* (297) cmd ::= ALTER STABLE ids cpxName SET TAG ids EQ tagitem */ + -7, /* (298) cmd ::= ALTER STABLE ids cpxName MODIFY TAG columnlist */ + -3, /* (299) cmd ::= KILL CONNECTION INTEGER */ + -5, /* (300) cmd ::= KILL STREAM INTEGER COLON INTEGER */ + -5, /* (301) cmd ::= KILL QUERY INTEGER COLON INTEGER */ }; static void yy_accept(yyParser*); /* Forward Declaration */ @@ -2575,9 +2571,9 @@ static YYACTIONTYPE yy_reduce( /********** Begin reduce actions **********************************************/ YYMINORTYPE yylhsminor; case 0: /* program ::= cmd */ - case 139: /* cmd ::= CREATE TABLE create_table_args */ yytestcase(yyruleno==139); - case 140: /* cmd ::= CREATE TABLE create_stable_args */ yytestcase(yyruleno==140); - case 141: /* cmd ::= CREATE STABLE create_stable_args */ yytestcase(yyruleno==141); + case 138: /* cmd ::= CREATE TABLE create_table_args */ yytestcase(yyruleno==138); + case 139: /* cmd ::= CREATE TABLE create_stable_args */ yytestcase(yyruleno==139); + case 140: /* cmd ::= CREATE STABLE create_stable_args */ yytestcase(yyruleno==140); {} break; case 1: /* cmd ::= SHOW DATABASES */ @@ -2687,144 +2683,137 @@ static YYACTIONTYPE yy_reduce( setShowOptions(pInfo, TSDB_MGMT_TABLE_VGROUP, &token, 0); } break; - case 29: /* cmd ::= SHOW dbPrefix VGROUPS ids */ -{ - SStrToken token; - tSetDbName(&token, &yymsp[-2].minor.yy0); - setShowOptions(pInfo, TSDB_MGMT_TABLE_VGROUP, &token, &yymsp[0].minor.yy0); -} - break; - case 30: /* cmd ::= DROP TABLE ifexists ids cpxName */ + case 29: /* cmd ::= DROP TABLE ifexists ids cpxName */ { yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; setDropDbTableInfo(pInfo, TSDB_SQL_DROP_TABLE, &yymsp[-1].minor.yy0, &yymsp[-2].minor.yy0, -1, -1); } break; - case 31: /* cmd ::= DROP STABLE ifexists ids cpxName */ + case 30: /* cmd ::= DROP STABLE ifexists ids cpxName */ { yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; setDropDbTableInfo(pInfo, TSDB_SQL_DROP_TABLE, &yymsp[-1].minor.yy0, &yymsp[-2].minor.yy0, -1, TSDB_SUPER_TABLE); } break; - case 32: /* cmd ::= DROP DATABASE ifexists ids */ + case 31: /* cmd ::= DROP DATABASE ifexists ids */ { setDropDbTableInfo(pInfo, TSDB_SQL_DROP_DB, &yymsp[0].minor.yy0, &yymsp[-1].minor.yy0, TSDB_DB_TYPE_DEFAULT, -1); } break; - case 33: /* cmd ::= DROP TOPIC ifexists ids */ + case 32: /* cmd ::= DROP TOPIC ifexists ids */ { setDropDbTableInfo(pInfo, TSDB_SQL_DROP_DB, &yymsp[0].minor.yy0, &yymsp[-1].minor.yy0, TSDB_DB_TYPE_TOPIC, -1); } break; - case 34: /* cmd ::= DROP FUNCTION ids */ + case 33: /* cmd ::= DROP FUNCTION ids */ { setDropFuncInfo(pInfo, TSDB_SQL_DROP_FUNCTION, &yymsp[0].minor.yy0); } break; - case 35: /* cmd ::= DROP DNODE ids */ + case 34: /* cmd ::= DROP DNODE ids */ { setDCLSqlElems(pInfo, TSDB_SQL_DROP_DNODE, 1, &yymsp[0].minor.yy0); } break; - case 36: /* cmd ::= DROP USER ids */ + case 35: /* cmd ::= DROP USER ids */ { setDCLSqlElems(pInfo, TSDB_SQL_DROP_USER, 1, &yymsp[0].minor.yy0); } break; - case 37: /* cmd ::= DROP ACCOUNT ids */ + case 36: /* cmd ::= DROP ACCOUNT ids */ { setDCLSqlElems(pInfo, TSDB_SQL_DROP_ACCT, 1, &yymsp[0].minor.yy0); } break; - case 38: /* cmd ::= USE ids */ + case 37: /* cmd ::= USE ids */ { setDCLSqlElems(pInfo, TSDB_SQL_USE_DB, 1, &yymsp[0].minor.yy0);} break; - case 39: /* cmd ::= DESCRIBE ids cpxName */ - case 40: /* cmd ::= DESC ids cpxName */ yytestcase(yyruleno==40); + case 38: /* cmd ::= DESCRIBE ids cpxName */ + case 39: /* cmd ::= DESC ids cpxName */ yytestcase(yyruleno==39); { yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; setDCLSqlElems(pInfo, TSDB_SQL_DESCRIBE_TABLE, 1, &yymsp[-1].minor.yy0); } break; - case 41: /* cmd ::= ALTER USER ids PASS ids */ + case 40: /* cmd ::= ALTER USER ids PASS ids */ { setAlterUserSql(pInfo, TSDB_ALTER_USER_PASSWD, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0, NULL); } break; - case 42: /* cmd ::= ALTER USER ids PRIVILEGE ids */ + case 41: /* cmd ::= ALTER USER ids PRIVILEGE ids */ { setAlterUserSql(pInfo, TSDB_ALTER_USER_PRIVILEGES, &yymsp[-2].minor.yy0, NULL, &yymsp[0].minor.yy0);} break; - case 43: /* cmd ::= ALTER DNODE ids ids */ + case 42: /* cmd ::= ALTER DNODE ids ids */ { setDCLSqlElems(pInfo, TSDB_SQL_CFG_DNODE, 2, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy0); } break; - case 44: /* cmd ::= ALTER DNODE ids ids ids */ + case 43: /* cmd ::= ALTER DNODE ids ids ids */ { setDCLSqlElems(pInfo, TSDB_SQL_CFG_DNODE, 3, &yymsp[-2].minor.yy0, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy0); } break; - case 45: /* cmd ::= ALTER LOCAL ids */ + case 44: /* cmd ::= ALTER LOCAL ids */ { setDCLSqlElems(pInfo, TSDB_SQL_CFG_LOCAL, 1, &yymsp[0].minor.yy0); } break; - case 46: /* cmd ::= ALTER LOCAL ids ids */ + case 45: /* cmd ::= ALTER LOCAL ids ids */ { setDCLSqlElems(pInfo, TSDB_SQL_CFG_LOCAL, 2, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy0); } break; - case 47: /* cmd ::= ALTER DATABASE ids alter_db_optr */ - case 48: /* cmd ::= ALTER TOPIC ids alter_topic_optr */ yytestcase(yyruleno==48); + case 46: /* cmd ::= ALTER DATABASE ids alter_db_optr */ + case 47: /* cmd ::= ALTER TOPIC ids alter_topic_optr */ yytestcase(yyruleno==47); { SStrToken t = {0}; setCreateDbInfo(pInfo, TSDB_SQL_ALTER_DB, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy10, &t);} break; - case 49: /* cmd ::= ALTER ACCOUNT ids acct_optr */ + case 48: /* cmd ::= ALTER ACCOUNT ids acct_optr */ { setCreateAcctSql(pInfo, TSDB_SQL_ALTER_ACCT, &yymsp[-1].minor.yy0, NULL, &yymsp[0].minor.yy427);} break; - case 50: /* cmd ::= ALTER ACCOUNT ids PASS ids acct_optr */ + case 49: /* cmd ::= ALTER ACCOUNT ids PASS ids acct_optr */ { setCreateAcctSql(pInfo, TSDB_SQL_ALTER_ACCT, &yymsp[-3].minor.yy0, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy427);} break; - case 51: /* cmd ::= COMPACT VNODES IN LP exprlist RP */ + case 50: /* cmd ::= COMPACT VNODES IN LP exprlist RP */ { setCompactVnodeSql(pInfo, TSDB_SQL_COMPACT_VNODE, yymsp[-1].minor.yy345);} break; - case 52: /* ids ::= ID */ - case 53: /* ids ::= STRING */ yytestcase(yyruleno==53); + case 51: /* ids ::= ID */ + case 52: /* ids ::= STRING */ yytestcase(yyruleno==52); {yylhsminor.yy0 = yymsp[0].minor.yy0; } yymsp[0].minor.yy0 = yylhsminor.yy0; break; - case 54: /* ifexists ::= IF EXISTS */ + case 53: /* ifexists ::= IF EXISTS */ { yymsp[-1].minor.yy0.n = 1;} break; - case 55: /* ifexists ::= */ - case 57: /* ifnotexists ::= */ yytestcase(yyruleno==57); - case 181: /* distinct ::= */ yytestcase(yyruleno==181); + case 54: /* ifexists ::= */ + case 56: /* ifnotexists ::= */ yytestcase(yyruleno==56); + case 180: /* distinct ::= */ yytestcase(yyruleno==180); { yymsp[1].minor.yy0.n = 0;} break; - case 56: /* ifnotexists ::= IF NOT EXISTS */ + case 55: /* ifnotexists ::= IF NOT EXISTS */ { yymsp[-2].minor.yy0.n = 1;} break; - case 58: /* cmd ::= CREATE DNODE ids */ + case 57: /* cmd ::= CREATE DNODE ids */ { setDCLSqlElems(pInfo, TSDB_SQL_CREATE_DNODE, 1, &yymsp[0].minor.yy0);} break; - case 59: /* cmd ::= CREATE ACCOUNT ids PASS ids acct_optr */ + case 58: /* cmd ::= CREATE ACCOUNT ids PASS ids acct_optr */ { setCreateAcctSql(pInfo, TSDB_SQL_CREATE_ACCT, &yymsp[-3].minor.yy0, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy427);} break; - case 60: /* cmd ::= CREATE DATABASE ifnotexists ids db_optr */ - case 61: /* cmd ::= CREATE TOPIC ifnotexists ids topic_optr */ yytestcase(yyruleno==61); + case 59: /* cmd ::= CREATE DATABASE ifnotexists ids db_optr */ + case 60: /* cmd ::= CREATE TOPIC ifnotexists ids topic_optr */ yytestcase(yyruleno==60); { setCreateDbInfo(pInfo, TSDB_SQL_CREATE_DB, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy10, &yymsp[-2].minor.yy0);} break; - case 62: /* cmd ::= CREATE FUNCTION ids AS ids OUTPUTTYPE typename bufsize */ + case 61: /* cmd ::= CREATE FUNCTION ids AS ids OUTPUTTYPE typename bufsize */ { setCreateFuncInfo(pInfo, TSDB_SQL_CREATE_FUNCTION, &yymsp[-5].minor.yy0, &yymsp[-3].minor.yy0, &yymsp[-1].minor.yy487, &yymsp[0].minor.yy0, 1);} break; - case 63: /* cmd ::= CREATE AGGREGATE FUNCTION ids AS ids OUTPUTTYPE typename bufsize */ + case 62: /* cmd ::= CREATE AGGREGATE FUNCTION ids AS ids OUTPUTTYPE typename bufsize */ { setCreateFuncInfo(pInfo, TSDB_SQL_CREATE_FUNCTION, &yymsp[-5].minor.yy0, &yymsp[-3].minor.yy0, &yymsp[-1].minor.yy487, &yymsp[0].minor.yy0, 2);} break; - case 64: /* cmd ::= CREATE USER ids PASS ids */ + case 63: /* cmd ::= CREATE USER ids PASS ids */ { setCreateUserSql(pInfo, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0);} break; - case 65: /* bufsize ::= */ - case 67: /* pps ::= */ yytestcase(yyruleno==67); - case 69: /* tseries ::= */ yytestcase(yyruleno==69); - case 71: /* dbs ::= */ yytestcase(yyruleno==71); - case 73: /* streams ::= */ yytestcase(yyruleno==73); - case 75: /* storage ::= */ yytestcase(yyruleno==75); - case 77: /* qtime ::= */ yytestcase(yyruleno==77); - case 79: /* users ::= */ yytestcase(yyruleno==79); - case 81: /* conns ::= */ yytestcase(yyruleno==81); - case 83: /* state ::= */ yytestcase(yyruleno==83); + case 64: /* bufsize ::= */ + case 66: /* pps ::= */ yytestcase(yyruleno==66); + case 68: /* tseries ::= */ yytestcase(yyruleno==68); + case 70: /* dbs ::= */ yytestcase(yyruleno==70); + case 72: /* streams ::= */ yytestcase(yyruleno==72); + case 74: /* storage ::= */ yytestcase(yyruleno==74); + case 76: /* qtime ::= */ yytestcase(yyruleno==76); + case 78: /* users ::= */ yytestcase(yyruleno==78); + case 80: /* conns ::= */ yytestcase(yyruleno==80); + case 82: /* state ::= */ yytestcase(yyruleno==82); { yymsp[1].minor.yy0.n = 0; } break; - case 66: /* bufsize ::= BUFSIZE INTEGER */ - case 68: /* pps ::= PPS INTEGER */ yytestcase(yyruleno==68); - case 70: /* tseries ::= TSERIES INTEGER */ yytestcase(yyruleno==70); - case 72: /* dbs ::= DBS INTEGER */ yytestcase(yyruleno==72); - case 74: /* streams ::= STREAMS INTEGER */ yytestcase(yyruleno==74); - case 76: /* storage ::= STORAGE INTEGER */ yytestcase(yyruleno==76); - case 78: /* qtime ::= QTIME INTEGER */ yytestcase(yyruleno==78); - case 80: /* users ::= USERS INTEGER */ yytestcase(yyruleno==80); - case 82: /* conns ::= CONNS INTEGER */ yytestcase(yyruleno==82); - case 84: /* state ::= STATE ids */ yytestcase(yyruleno==84); + case 65: /* bufsize ::= BUFSIZE INTEGER */ + case 67: /* pps ::= PPS INTEGER */ yytestcase(yyruleno==67); + case 69: /* tseries ::= TSERIES INTEGER */ yytestcase(yyruleno==69); + case 71: /* dbs ::= DBS INTEGER */ yytestcase(yyruleno==71); + case 73: /* streams ::= STREAMS INTEGER */ yytestcase(yyruleno==73); + case 75: /* storage ::= STORAGE INTEGER */ yytestcase(yyruleno==75); + case 77: /* qtime ::= QTIME INTEGER */ yytestcase(yyruleno==77); + case 79: /* users ::= USERS INTEGER */ yytestcase(yyruleno==79); + case 81: /* conns ::= CONNS INTEGER */ yytestcase(yyruleno==81); + case 83: /* state ::= STATE ids */ yytestcase(yyruleno==83); { yymsp[-1].minor.yy0 = yymsp[0].minor.yy0; } break; - case 85: /* acct_optr ::= pps tseries storage streams qtime dbs users conns state */ + case 84: /* acct_optr ::= pps tseries storage streams qtime dbs users conns state */ { yylhsminor.yy427.maxUsers = (yymsp[-2].minor.yy0.n>0)?atoi(yymsp[-2].minor.yy0.z):-1; yylhsminor.yy427.maxDbs = (yymsp[-3].minor.yy0.n>0)?atoi(yymsp[-3].minor.yy0.z):-1; @@ -2838,135 +2827,135 @@ static YYACTIONTYPE yy_reduce( } yymsp[-8].minor.yy427 = yylhsminor.yy427; break; - case 86: /* intitemlist ::= intitemlist COMMA intitem */ - case 155: /* tagitemlist ::= tagitemlist COMMA tagitem */ yytestcase(yyruleno==155); + case 85: /* intitemlist ::= intitemlist COMMA intitem */ + case 154: /* tagitemlist ::= tagitemlist COMMA tagitem */ yytestcase(yyruleno==154); { yylhsminor.yy345 = tVariantListAppend(yymsp[-2].minor.yy345, &yymsp[0].minor.yy2, -1); } yymsp[-2].minor.yy345 = yylhsminor.yy345; break; - case 87: /* intitemlist ::= intitem */ - case 156: /* tagitemlist ::= tagitem */ yytestcase(yyruleno==156); + case 86: /* intitemlist ::= intitem */ + case 155: /* tagitemlist ::= tagitem */ yytestcase(yyruleno==155); { yylhsminor.yy345 = tVariantListAppend(NULL, &yymsp[0].minor.yy2, -1); } yymsp[0].minor.yy345 = yylhsminor.yy345; break; - case 88: /* intitem ::= INTEGER */ - case 157: /* tagitem ::= INTEGER */ yytestcase(yyruleno==157); - case 158: /* tagitem ::= FLOAT */ yytestcase(yyruleno==158); - case 159: /* tagitem ::= STRING */ yytestcase(yyruleno==159); - case 160: /* tagitem ::= BOOL */ yytestcase(yyruleno==160); -{ toTSDBType(yymsp[0].minor.yy0.type); tVariantCreate(&yylhsminor.yy2, &yymsp[0].minor.yy0); } + case 87: /* intitem ::= INTEGER */ + case 156: /* tagitem ::= INTEGER */ yytestcase(yyruleno==156); + case 157: /* tagitem ::= FLOAT */ yytestcase(yyruleno==157); + case 158: /* tagitem ::= STRING */ yytestcase(yyruleno==158); + case 159: /* tagitem ::= BOOL */ yytestcase(yyruleno==159); +{ toTSDBType(yymsp[0].minor.yy0.type); tVariantCreate(&yylhsminor.yy2, &yymsp[0].minor.yy0, true); } yymsp[0].minor.yy2 = yylhsminor.yy2; break; - case 89: /* keep ::= KEEP intitemlist */ + case 88: /* keep ::= KEEP intitemlist */ { yymsp[-1].minor.yy345 = yymsp[0].minor.yy345; } break; - case 90: /* cache ::= CACHE INTEGER */ - case 91: /* replica ::= REPLICA INTEGER */ yytestcase(yyruleno==91); - case 92: /* quorum ::= QUORUM INTEGER */ yytestcase(yyruleno==92); - case 93: /* days ::= DAYS INTEGER */ yytestcase(yyruleno==93); - case 94: /* minrows ::= MINROWS INTEGER */ yytestcase(yyruleno==94); - case 95: /* maxrows ::= MAXROWS INTEGER */ yytestcase(yyruleno==95); - case 96: /* blocks ::= BLOCKS INTEGER */ yytestcase(yyruleno==96); - case 97: /* ctime ::= CTIME INTEGER */ yytestcase(yyruleno==97); - case 98: /* wal ::= WAL INTEGER */ yytestcase(yyruleno==98); - case 99: /* fsync ::= FSYNC INTEGER */ yytestcase(yyruleno==99); - case 100: /* comp ::= COMP INTEGER */ yytestcase(yyruleno==100); - case 101: /* prec ::= PRECISION STRING */ yytestcase(yyruleno==101); - case 102: /* update ::= UPDATE INTEGER */ yytestcase(yyruleno==102); - case 103: /* cachelast ::= CACHELAST INTEGER */ yytestcase(yyruleno==103); - case 104: /* partitions ::= PARTITIONS INTEGER */ yytestcase(yyruleno==104); + case 89: /* cache ::= CACHE INTEGER */ + case 90: /* replica ::= REPLICA INTEGER */ yytestcase(yyruleno==90); + case 91: /* quorum ::= QUORUM INTEGER */ yytestcase(yyruleno==91); + case 92: /* days ::= DAYS INTEGER */ yytestcase(yyruleno==92); + case 93: /* minrows ::= MINROWS INTEGER */ yytestcase(yyruleno==93); + case 94: /* maxrows ::= MAXROWS INTEGER */ yytestcase(yyruleno==94); + case 95: /* blocks ::= BLOCKS INTEGER */ yytestcase(yyruleno==95); + case 96: /* ctime ::= CTIME INTEGER */ yytestcase(yyruleno==96); + case 97: /* wal ::= WAL INTEGER */ yytestcase(yyruleno==97); + case 98: /* fsync ::= FSYNC INTEGER */ yytestcase(yyruleno==98); + case 99: /* comp ::= COMP INTEGER */ yytestcase(yyruleno==99); + case 100: /* prec ::= PRECISION STRING */ yytestcase(yyruleno==100); + case 101: /* update ::= UPDATE INTEGER */ yytestcase(yyruleno==101); + case 102: /* cachelast ::= CACHELAST INTEGER */ yytestcase(yyruleno==102); + case 103: /* partitions ::= PARTITIONS INTEGER */ yytestcase(yyruleno==103); { yymsp[-1].minor.yy0 = yymsp[0].minor.yy0; } break; - case 105: /* db_optr ::= */ + case 104: /* db_optr ::= */ {setDefaultCreateDbOption(&yymsp[1].minor.yy10); yymsp[1].minor.yy10.dbType = TSDB_DB_TYPE_DEFAULT;} break; - case 106: /* db_optr ::= db_optr cache */ + case 105: /* db_optr ::= db_optr cache */ { yylhsminor.yy10 = yymsp[-1].minor.yy10; yylhsminor.yy10.cacheBlockSize = strtol(yymsp[0].minor.yy0.z, NULL, 10); } yymsp[-1].minor.yy10 = yylhsminor.yy10; break; - case 107: /* db_optr ::= db_optr replica */ - case 124: /* alter_db_optr ::= alter_db_optr replica */ yytestcase(yyruleno==124); + case 106: /* db_optr ::= db_optr replica */ + case 123: /* alter_db_optr ::= alter_db_optr replica */ yytestcase(yyruleno==123); { yylhsminor.yy10 = yymsp[-1].minor.yy10; yylhsminor.yy10.replica = strtol(yymsp[0].minor.yy0.z, NULL, 10); } yymsp[-1].minor.yy10 = yylhsminor.yy10; break; - case 108: /* db_optr ::= db_optr quorum */ - case 125: /* alter_db_optr ::= alter_db_optr quorum */ yytestcase(yyruleno==125); + case 107: /* db_optr ::= db_optr quorum */ + case 124: /* alter_db_optr ::= alter_db_optr quorum */ yytestcase(yyruleno==124); { yylhsminor.yy10 = yymsp[-1].minor.yy10; yylhsminor.yy10.quorum = strtol(yymsp[0].minor.yy0.z, NULL, 10); } yymsp[-1].minor.yy10 = yylhsminor.yy10; break; - case 109: /* db_optr ::= db_optr days */ + case 108: /* db_optr ::= db_optr days */ { yylhsminor.yy10 = yymsp[-1].minor.yy10; yylhsminor.yy10.daysPerFile = strtol(yymsp[0].minor.yy0.z, NULL, 10); } yymsp[-1].minor.yy10 = yylhsminor.yy10; break; - case 110: /* db_optr ::= db_optr minrows */ + case 109: /* db_optr ::= db_optr minrows */ { yylhsminor.yy10 = yymsp[-1].minor.yy10; yylhsminor.yy10.minRowsPerBlock = strtod(yymsp[0].minor.yy0.z, NULL); } yymsp[-1].minor.yy10 = yylhsminor.yy10; break; - case 111: /* db_optr ::= db_optr maxrows */ + case 110: /* db_optr ::= db_optr maxrows */ { yylhsminor.yy10 = yymsp[-1].minor.yy10; yylhsminor.yy10.maxRowsPerBlock = strtod(yymsp[0].minor.yy0.z, NULL); } yymsp[-1].minor.yy10 = yylhsminor.yy10; break; - case 112: /* db_optr ::= db_optr blocks */ - case 127: /* alter_db_optr ::= alter_db_optr blocks */ yytestcase(yyruleno==127); + case 111: /* db_optr ::= db_optr blocks */ + case 126: /* alter_db_optr ::= alter_db_optr blocks */ yytestcase(yyruleno==126); { yylhsminor.yy10 = yymsp[-1].minor.yy10; yylhsminor.yy10.numOfBlocks = strtol(yymsp[0].minor.yy0.z, NULL, 10); } yymsp[-1].minor.yy10 = yylhsminor.yy10; break; - case 113: /* db_optr ::= db_optr ctime */ + case 112: /* db_optr ::= db_optr ctime */ { yylhsminor.yy10 = yymsp[-1].minor.yy10; yylhsminor.yy10.commitTime = strtol(yymsp[0].minor.yy0.z, NULL, 10); } yymsp[-1].minor.yy10 = yylhsminor.yy10; break; - case 114: /* db_optr ::= db_optr wal */ + case 113: /* db_optr ::= db_optr wal */ { yylhsminor.yy10 = yymsp[-1].minor.yy10; yylhsminor.yy10.walLevel = strtol(yymsp[0].minor.yy0.z, NULL, 10); } yymsp[-1].minor.yy10 = yylhsminor.yy10; break; - case 115: /* db_optr ::= db_optr fsync */ + case 114: /* db_optr ::= db_optr fsync */ { yylhsminor.yy10 = yymsp[-1].minor.yy10; yylhsminor.yy10.fsyncPeriod = strtol(yymsp[0].minor.yy0.z, NULL, 10); } yymsp[-1].minor.yy10 = yylhsminor.yy10; break; - case 116: /* db_optr ::= db_optr comp */ - case 128: /* alter_db_optr ::= alter_db_optr comp */ yytestcase(yyruleno==128); + case 115: /* db_optr ::= db_optr comp */ + case 127: /* alter_db_optr ::= alter_db_optr comp */ yytestcase(yyruleno==127); { yylhsminor.yy10 = yymsp[-1].minor.yy10; yylhsminor.yy10.compressionLevel = strtol(yymsp[0].minor.yy0.z, NULL, 10); } yymsp[-1].minor.yy10 = yylhsminor.yy10; break; - case 117: /* db_optr ::= db_optr prec */ + case 116: /* db_optr ::= db_optr prec */ { yylhsminor.yy10 = yymsp[-1].minor.yy10; yylhsminor.yy10.precision = yymsp[0].minor.yy0; } yymsp[-1].minor.yy10 = yylhsminor.yy10; break; - case 118: /* db_optr ::= db_optr keep */ - case 126: /* alter_db_optr ::= alter_db_optr keep */ yytestcase(yyruleno==126); + case 117: /* db_optr ::= db_optr keep */ + case 125: /* alter_db_optr ::= alter_db_optr keep */ yytestcase(yyruleno==125); { yylhsminor.yy10 = yymsp[-1].minor.yy10; yylhsminor.yy10.keep = yymsp[0].minor.yy345; } yymsp[-1].minor.yy10 = yylhsminor.yy10; break; - case 119: /* db_optr ::= db_optr update */ - case 129: /* alter_db_optr ::= alter_db_optr update */ yytestcase(yyruleno==129); + case 118: /* db_optr ::= db_optr update */ + case 128: /* alter_db_optr ::= alter_db_optr update */ yytestcase(yyruleno==128); { yylhsminor.yy10 = yymsp[-1].minor.yy10; yylhsminor.yy10.update = strtol(yymsp[0].minor.yy0.z, NULL, 10); } yymsp[-1].minor.yy10 = yylhsminor.yy10; break; - case 120: /* db_optr ::= db_optr cachelast */ - case 130: /* alter_db_optr ::= alter_db_optr cachelast */ yytestcase(yyruleno==130); + case 119: /* db_optr ::= db_optr cachelast */ + case 129: /* alter_db_optr ::= alter_db_optr cachelast */ yytestcase(yyruleno==129); { yylhsminor.yy10 = yymsp[-1].minor.yy10; yylhsminor.yy10.cachelast = strtol(yymsp[0].minor.yy0.z, NULL, 10); } yymsp[-1].minor.yy10 = yylhsminor.yy10; break; - case 121: /* topic_optr ::= db_optr */ - case 131: /* alter_topic_optr ::= alter_db_optr */ yytestcase(yyruleno==131); + case 120: /* topic_optr ::= db_optr */ + case 130: /* alter_topic_optr ::= alter_db_optr */ yytestcase(yyruleno==130); { yylhsminor.yy10 = yymsp[0].minor.yy10; yylhsminor.yy10.dbType = TSDB_DB_TYPE_TOPIC; } yymsp[0].minor.yy10 = yylhsminor.yy10; break; - case 122: /* topic_optr ::= topic_optr partitions */ - case 132: /* alter_topic_optr ::= alter_topic_optr partitions */ yytestcase(yyruleno==132); + case 121: /* topic_optr ::= topic_optr partitions */ + case 131: /* alter_topic_optr ::= alter_topic_optr partitions */ yytestcase(yyruleno==131); { yylhsminor.yy10 = yymsp[-1].minor.yy10; yylhsminor.yy10.partitions = strtol(yymsp[0].minor.yy0.z, NULL, 10); } yymsp[-1].minor.yy10 = yylhsminor.yy10; break; - case 123: /* alter_db_optr ::= */ + case 122: /* alter_db_optr ::= */ { setDefaultCreateDbOption(&yymsp[1].minor.yy10); yymsp[1].minor.yy10.dbType = TSDB_DB_TYPE_DEFAULT;} break; - case 133: /* typename ::= ids */ + case 132: /* typename ::= ids */ { yymsp[0].minor.yy0.type = 0; tSetColumnType (&yylhsminor.yy487, &yymsp[0].minor.yy0); } yymsp[0].minor.yy487 = yylhsminor.yy487; break; - case 134: /* typename ::= ids LP signed RP */ + case 133: /* typename ::= ids LP signed RP */ { if (yymsp[-1].minor.yy525 <= 0) { yymsp[-3].minor.yy0.type = 0; @@ -2978,7 +2967,7 @@ static YYACTIONTYPE yy_reduce( } yymsp[-3].minor.yy487 = yylhsminor.yy487; break; - case 135: /* typename ::= ids UNSIGNED */ + case 134: /* typename ::= ids UNSIGNED */ { yymsp[-1].minor.yy0.type = 0; yymsp[-1].minor.yy0.n = ((yymsp[0].minor.yy0.z + yymsp[0].minor.yy0.n) - yymsp[-1].minor.yy0.z); @@ -2986,20 +2975,20 @@ static YYACTIONTYPE yy_reduce( } yymsp[-1].minor.yy487 = yylhsminor.yy487; break; - case 136: /* signed ::= INTEGER */ + case 135: /* signed ::= INTEGER */ { yylhsminor.yy525 = strtol(yymsp[0].minor.yy0.z, NULL, 10); } yymsp[0].minor.yy525 = yylhsminor.yy525; break; - case 137: /* signed ::= PLUS INTEGER */ + case 136: /* signed ::= PLUS INTEGER */ { yymsp[-1].minor.yy525 = strtol(yymsp[0].minor.yy0.z, NULL, 10); } break; - case 138: /* signed ::= MINUS INTEGER */ + case 137: /* signed ::= MINUS INTEGER */ { yymsp[-1].minor.yy525 = -strtol(yymsp[0].minor.yy0.z, NULL, 10);} break; - case 142: /* cmd ::= CREATE TABLE create_table_list */ + case 141: /* cmd ::= CREATE TABLE create_table_list */ { pInfo->type = TSDB_SQL_CREATE_TABLE; pInfo->pCreateTableInfo = yymsp[0].minor.yy170;} break; - case 143: /* create_table_list ::= create_from_stable */ + case 142: /* create_table_list ::= create_from_stable */ { SCreateTableSql* pCreateTable = calloc(1, sizeof(SCreateTableSql)); pCreateTable->childTableInfo = taosArrayInit(4, sizeof(SCreatedTableInfo)); @@ -3010,14 +2999,14 @@ static YYACTIONTYPE yy_reduce( } yymsp[0].minor.yy170 = yylhsminor.yy170; break; - case 144: /* create_table_list ::= create_table_list create_from_stable */ + case 143: /* create_table_list ::= create_table_list create_from_stable */ { taosArrayPush(yymsp[-1].minor.yy170->childTableInfo, &yymsp[0].minor.yy72); yylhsminor.yy170 = yymsp[-1].minor.yy170; } yymsp[-1].minor.yy170 = yylhsminor.yy170; break; - case 145: /* create_table_args ::= ifnotexists ids cpxName LP columnlist RP */ + case 144: /* create_table_args ::= ifnotexists ids cpxName LP columnlist RP */ { yylhsminor.yy170 = tSetCreateTableInfo(yymsp[-1].minor.yy345, NULL, NULL, TSQL_CREATE_TABLE); setSqlInfo(pInfo, yylhsminor.yy170, NULL, TSDB_SQL_CREATE_TABLE); @@ -3027,7 +3016,7 @@ static YYACTIONTYPE yy_reduce( } yymsp[-5].minor.yy170 = yylhsminor.yy170; break; - case 146: /* create_stable_args ::= ifnotexists ids cpxName LP columnlist RP TAGS LP columnlist RP */ + case 145: /* create_stable_args ::= ifnotexists ids cpxName LP columnlist RP TAGS LP columnlist RP */ { yylhsminor.yy170 = tSetCreateTableInfo(yymsp[-5].minor.yy345, yymsp[-1].minor.yy345, NULL, TSQL_CREATE_STABLE); setSqlInfo(pInfo, yylhsminor.yy170, NULL, TSDB_SQL_CREATE_TABLE); @@ -3037,7 +3026,7 @@ static YYACTIONTYPE yy_reduce( } yymsp[-9].minor.yy170 = yylhsminor.yy170; break; - case 147: /* create_from_stable ::= ifnotexists ids cpxName USING ids cpxName TAGS LP tagitemlist RP */ + case 146: /* create_from_stable ::= ifnotexists ids cpxName USING ids cpxName TAGS LP tagitemlist RP */ { yymsp[-5].minor.yy0.n += yymsp[-4].minor.yy0.n; yymsp[-8].minor.yy0.n += yymsp[-7].minor.yy0.n; @@ -3045,7 +3034,7 @@ static YYACTIONTYPE yy_reduce( } yymsp[-9].minor.yy72 = yylhsminor.yy72; break; - case 148: /* create_from_stable ::= ifnotexists ids cpxName USING ids cpxName LP tagNamelist RP TAGS LP tagitemlist RP */ + case 147: /* create_from_stable ::= ifnotexists ids cpxName USING ids cpxName LP tagNamelist RP TAGS LP tagitemlist RP */ { yymsp[-8].minor.yy0.n += yymsp[-7].minor.yy0.n; yymsp[-11].minor.yy0.n += yymsp[-10].minor.yy0.n; @@ -3053,15 +3042,15 @@ static YYACTIONTYPE yy_reduce( } yymsp[-12].minor.yy72 = yylhsminor.yy72; break; - case 149: /* tagNamelist ::= tagNamelist COMMA ids */ + case 148: /* tagNamelist ::= tagNamelist COMMA ids */ {taosArrayPush(yymsp[-2].minor.yy345, &yymsp[0].minor.yy0); yylhsminor.yy345 = yymsp[-2].minor.yy345; } yymsp[-2].minor.yy345 = yylhsminor.yy345; break; - case 150: /* tagNamelist ::= ids */ + case 149: /* tagNamelist ::= ids */ {yylhsminor.yy345 = taosArrayInit(4, sizeof(SStrToken)); taosArrayPush(yylhsminor.yy345, &yymsp[0].minor.yy0);} yymsp[0].minor.yy345 = yylhsminor.yy345; break; - case 151: /* create_table_args ::= ifnotexists ids cpxName AS select */ + case 150: /* create_table_args ::= ifnotexists ids cpxName AS select */ { yylhsminor.yy170 = tSetCreateTableInfo(NULL, NULL, yymsp[0].minor.yy68, TSQL_CREATE_STREAM); setSqlInfo(pInfo, yylhsminor.yy170, NULL, TSDB_SQL_CREATE_TABLE); @@ -3071,638 +3060,638 @@ static YYACTIONTYPE yy_reduce( } yymsp[-4].minor.yy170 = yylhsminor.yy170; break; - case 152: /* columnlist ::= columnlist COMMA column */ + case 151: /* columnlist ::= columnlist COMMA column */ {taosArrayPush(yymsp[-2].minor.yy345, &yymsp[0].minor.yy487); yylhsminor.yy345 = yymsp[-2].minor.yy345; } yymsp[-2].minor.yy345 = yylhsminor.yy345; break; - case 153: /* columnlist ::= column */ + case 152: /* columnlist ::= column */ {yylhsminor.yy345 = taosArrayInit(4, sizeof(TAOS_FIELD)); taosArrayPush(yylhsminor.yy345, &yymsp[0].minor.yy487);} yymsp[0].minor.yy345 = yylhsminor.yy345; break; - case 154: /* column ::= ids typename */ + case 153: /* column ::= ids typename */ { tSetColumnInfo(&yylhsminor.yy487, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy487); } yymsp[-1].minor.yy487 = yylhsminor.yy487; break; - case 161: /* tagitem ::= NULL */ -{ yymsp[0].minor.yy0.type = 0; tVariantCreate(&yylhsminor.yy2, &yymsp[0].minor.yy0); } + case 160: /* tagitem ::= NULL */ +{ yymsp[0].minor.yy0.type = 0; tVariantCreate(&yylhsminor.yy2, &yymsp[0].minor.yy0, true); } yymsp[0].minor.yy2 = yylhsminor.yy2; break; - case 162: /* tagitem ::= NOW */ -{ yymsp[0].minor.yy0.type = TSDB_DATA_TYPE_TIMESTAMP; tVariantCreate(&yylhsminor.yy2, &yymsp[0].minor.yy0);} + case 161: /* tagitem ::= NOW */ +{ yymsp[0].minor.yy0.type = TSDB_DATA_TYPE_TIMESTAMP; tVariantCreate(&yylhsminor.yy2, &yymsp[0].minor.yy0, true);} yymsp[0].minor.yy2 = yylhsminor.yy2; break; - case 163: /* tagitem ::= MINUS INTEGER */ - case 164: /* tagitem ::= MINUS FLOAT */ yytestcase(yyruleno==164); - case 165: /* tagitem ::= PLUS INTEGER */ yytestcase(yyruleno==165); - case 166: /* tagitem ::= PLUS FLOAT */ yytestcase(yyruleno==166); + case 162: /* tagitem ::= MINUS INTEGER */ + case 163: /* tagitem ::= MINUS FLOAT */ yytestcase(yyruleno==163); + case 164: /* tagitem ::= PLUS INTEGER */ yytestcase(yyruleno==164); + case 165: /* tagitem ::= PLUS FLOAT */ yytestcase(yyruleno==165); { yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; yymsp[-1].minor.yy0.type = yymsp[0].minor.yy0.type; toTSDBType(yymsp[-1].minor.yy0.type); - tVariantCreate(&yylhsminor.yy2, &yymsp[-1].minor.yy0); + tVariantCreate(&yylhsminor.yy2, &yymsp[-1].minor.yy0, true); } yymsp[-1].minor.yy2 = yylhsminor.yy2; break; - case 167: /* select ::= SELECT selcollist from where_opt range_option interval_option sliding_opt session_option windowstate_option fill_opt groupby_opt having_opt orderby_opt slimit_opt limit_opt */ + case 166: /* select ::= SELECT selcollist from where_opt range_option interval_option sliding_opt session_option windowstate_option fill_opt groupby_opt having_opt orderby_opt slimit_opt limit_opt */ { yylhsminor.yy68 = tSetQuerySqlNode(&yymsp[-14].minor.yy0, yymsp[-13].minor.yy345, yymsp[-12].minor.yy484, yymsp[-11].minor.yy418, yymsp[-4].minor.yy345, yymsp[-2].minor.yy345, &yymsp[-9].minor.yy280, &yymsp[-7].minor.yy295, &yymsp[-6].minor.yy432, &yymsp[-8].minor.yy0, yymsp[-5].minor.yy345, &yymsp[0].minor.yy114, &yymsp[-1].minor.yy114, yymsp[-3].minor.yy418, &yymsp[-10].minor.yy144); } yymsp[-14].minor.yy68 = yylhsminor.yy68; break; - case 168: /* select ::= LP select RP */ + case 167: /* select ::= LP select RP */ {yymsp[-2].minor.yy68 = yymsp[-1].minor.yy68;} break; - case 169: /* union ::= select */ + case 168: /* union ::= select */ { yylhsminor.yy345 = setSubclause(NULL, yymsp[0].minor.yy68); } yymsp[0].minor.yy345 = yylhsminor.yy345; break; - case 170: /* union ::= union UNION ALL select */ + case 169: /* union ::= union UNION ALL select */ { yylhsminor.yy345 = appendSelectClause(yymsp[-3].minor.yy345, yymsp[0].minor.yy68); } yymsp[-3].minor.yy345 = yylhsminor.yy345; break; - case 171: /* cmd ::= union */ + case 170: /* cmd ::= union */ { setSqlInfo(pInfo, yymsp[0].minor.yy345, NULL, TSDB_SQL_SELECT); } break; - case 172: /* select ::= SELECT selcollist */ + case 171: /* select ::= SELECT selcollist */ { yylhsminor.yy68 = tSetQuerySqlNode(&yymsp[-1].minor.yy0, yymsp[0].minor.yy345, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL); } yymsp[-1].minor.yy68 = yylhsminor.yy68; break; - case 173: /* sclp ::= selcollist COMMA */ + case 172: /* sclp ::= selcollist COMMA */ {yylhsminor.yy345 = yymsp[-1].minor.yy345;} yymsp[-1].minor.yy345 = yylhsminor.yy345; break; - case 174: /* sclp ::= */ - case 215: /* orderby_opt ::= */ yytestcase(yyruleno==215); + case 173: /* sclp ::= */ + case 214: /* orderby_opt ::= */ yytestcase(yyruleno==214); {yymsp[1].minor.yy345 = 0;} break; - case 175: /* selcollist ::= sclp distinct expr as */ + case 174: /* selcollist ::= sclp distinct expr as */ { yylhsminor.yy345 = tSqlExprListAppend(yymsp[-3].minor.yy345, yymsp[-1].minor.yy418, yymsp[-2].minor.yy0.n? &yymsp[-2].minor.yy0:0, yymsp[0].minor.yy0.n?&yymsp[0].minor.yy0:0); } yymsp[-3].minor.yy345 = yylhsminor.yy345; break; - case 176: /* selcollist ::= sclp STAR */ + case 175: /* selcollist ::= sclp STAR */ { tSqlExpr *pNode = tSqlExprCreateIdValue(pInfo, NULL, TK_ALL); yylhsminor.yy345 = tSqlExprListAppend(yymsp[-1].minor.yy345, pNode, 0, 0); } yymsp[-1].minor.yy345 = yylhsminor.yy345; break; - case 177: /* as ::= AS ids */ + case 176: /* as ::= AS ids */ { yymsp[-1].minor.yy0 = yymsp[0].minor.yy0; } break; - case 178: /* as ::= ids */ + case 177: /* as ::= ids */ { yylhsminor.yy0 = yymsp[0].minor.yy0; } yymsp[0].minor.yy0 = yylhsminor.yy0; break; - case 179: /* as ::= */ + case 178: /* as ::= */ { yymsp[1].minor.yy0.n = 0; } break; - case 180: /* distinct ::= DISTINCT */ + case 179: /* distinct ::= DISTINCT */ { yylhsminor.yy0 = yymsp[0].minor.yy0; } yymsp[0].minor.yy0 = yylhsminor.yy0; break; - case 182: /* from ::= FROM tablelist */ - case 183: /* from ::= FROM sub */ yytestcase(yyruleno==183); + case 181: /* from ::= FROM tablelist */ + case 182: /* from ::= FROM sub */ yytestcase(yyruleno==182); {yymsp[-1].minor.yy484 = yymsp[0].minor.yy484;} break; - case 184: /* sub ::= LP union RP */ + case 183: /* sub ::= LP union RP */ {yymsp[-2].minor.yy484 = addSubqueryElem(NULL, yymsp[-1].minor.yy345, NULL);} break; - case 185: /* sub ::= LP union RP ids */ + case 184: /* sub ::= LP union RP ids */ {yymsp[-3].minor.yy484 = addSubqueryElem(NULL, yymsp[-2].minor.yy345, &yymsp[0].minor.yy0);} break; - case 186: /* sub ::= sub COMMA LP union RP ids */ + case 185: /* sub ::= sub COMMA LP union RP ids */ {yylhsminor.yy484 = addSubqueryElem(yymsp[-5].minor.yy484, yymsp[-2].minor.yy345, &yymsp[0].minor.yy0);} yymsp[-5].minor.yy484 = yylhsminor.yy484; break; - case 187: /* tablelist ::= ids cpxName */ + case 186: /* tablelist ::= ids cpxName */ { yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; yylhsminor.yy484 = setTableNameList(NULL, &yymsp[-1].minor.yy0, NULL); } yymsp[-1].minor.yy484 = yylhsminor.yy484; break; - case 188: /* tablelist ::= ids cpxName ids */ + case 187: /* tablelist ::= ids cpxName ids */ { yymsp[-2].minor.yy0.n += yymsp[-1].minor.yy0.n; yylhsminor.yy484 = setTableNameList(NULL, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0); } yymsp[-2].minor.yy484 = yylhsminor.yy484; break; - case 189: /* tablelist ::= tablelist COMMA ids cpxName */ + case 188: /* tablelist ::= tablelist COMMA ids cpxName */ { yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; yylhsminor.yy484 = setTableNameList(yymsp[-3].minor.yy484, &yymsp[-1].minor.yy0, NULL); } yymsp[-3].minor.yy484 = yylhsminor.yy484; break; - case 190: /* tablelist ::= tablelist COMMA ids cpxName ids */ + case 189: /* tablelist ::= tablelist COMMA ids cpxName ids */ { yymsp[-2].minor.yy0.n += yymsp[-1].minor.yy0.n; yylhsminor.yy484 = setTableNameList(yymsp[-4].minor.yy484, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0); } yymsp[-4].minor.yy484 = yylhsminor.yy484; break; - case 191: /* tmvar ::= VARIABLE */ + case 190: /* tmvar ::= VARIABLE */ {yylhsminor.yy0 = yymsp[0].minor.yy0;} yymsp[0].minor.yy0 = yylhsminor.yy0; break; - case 192: /* timestamp ::= INTEGER */ + case 191: /* timestamp ::= INTEGER */ { yylhsminor.yy418 = tSqlExprCreateTimestamp(&yymsp[0].minor.yy0, TK_INTEGER);} yymsp[0].minor.yy418 = yylhsminor.yy418; break; - case 193: /* timestamp ::= MINUS INTEGER */ - case 194: /* timestamp ::= PLUS INTEGER */ yytestcase(yyruleno==194); + case 192: /* timestamp ::= MINUS INTEGER */ + case 193: /* timestamp ::= PLUS INTEGER */ yytestcase(yyruleno==193); { yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; yymsp[-1].minor.yy0.type = TK_INTEGER; yylhsminor.yy418 = tSqlExprCreateTimestamp(&yymsp[-1].minor.yy0, TK_INTEGER);} yymsp[-1].minor.yy418 = yylhsminor.yy418; break; - case 195: /* timestamp ::= STRING */ + case 194: /* timestamp ::= STRING */ { yylhsminor.yy418 = tSqlExprCreateTimestamp(&yymsp[0].minor.yy0, TK_STRING);} yymsp[0].minor.yy418 = yylhsminor.yy418; break; - case 196: /* timestamp ::= NOW */ + case 195: /* timestamp ::= NOW */ { yylhsminor.yy418 = tSqlExprCreateTimestamp(&yymsp[0].minor.yy0, TK_NOW); } yymsp[0].minor.yy418 = yylhsminor.yy418; break; - case 197: /* timestamp ::= NOW PLUS VARIABLE */ + case 196: /* timestamp ::= NOW PLUS VARIABLE */ {yymsp[-2].minor.yy418 = tSqlExprCreateTimestamp(&yymsp[0].minor.yy0, TK_PLUS); } break; - case 198: /* timestamp ::= NOW MINUS VARIABLE */ + case 197: /* timestamp ::= NOW MINUS VARIABLE */ {yymsp[-2].minor.yy418 = tSqlExprCreateTimestamp(&yymsp[0].minor.yy0, TK_MINUS); } break; - case 199: /* range_option ::= */ + case 198: /* range_option ::= */ {yymsp[1].minor.yy144.start = 0; yymsp[1].minor.yy144.end = 0;} break; - case 200: /* range_option ::= RANGE LP timestamp COMMA timestamp RP */ + case 199: /* range_option ::= RANGE LP timestamp COMMA timestamp RP */ {yymsp[-5].minor.yy144.start = yymsp[-3].minor.yy418; yymsp[-5].minor.yy144.end = yymsp[-1].minor.yy418;} break; - case 201: /* interval_option ::= intervalKey LP tmvar RP */ + case 200: /* interval_option ::= intervalKey LP tmvar RP */ {yylhsminor.yy280.interval = yymsp[-1].minor.yy0; yylhsminor.yy280.offset.n = 0; yylhsminor.yy280.token = yymsp[-3].minor.yy40;} yymsp[-3].minor.yy280 = yylhsminor.yy280; break; - case 202: /* interval_option ::= intervalKey LP tmvar COMMA tmvar RP */ + case 201: /* interval_option ::= intervalKey LP tmvar COMMA tmvar RP */ {yylhsminor.yy280.interval = yymsp[-3].minor.yy0; yylhsminor.yy280.offset = yymsp[-1].minor.yy0; yylhsminor.yy280.token = yymsp[-5].minor.yy40;} yymsp[-5].minor.yy280 = yylhsminor.yy280; break; - case 203: /* interval_option ::= */ + case 202: /* interval_option ::= */ {memset(&yymsp[1].minor.yy280, 0, sizeof(yymsp[1].minor.yy280));} break; - case 204: /* intervalKey ::= INTERVAL */ + case 203: /* intervalKey ::= INTERVAL */ {yymsp[0].minor.yy40 = TK_INTERVAL;} break; - case 205: /* intervalKey ::= EVERY */ + case 204: /* intervalKey ::= EVERY */ {yymsp[0].minor.yy40 = TK_EVERY; } break; - case 206: /* session_option ::= */ + case 205: /* session_option ::= */ {yymsp[1].minor.yy295.col.n = 0; yymsp[1].minor.yy295.gap.n = 0;} break; - case 207: /* session_option ::= SESSION LP ids cpxName COMMA tmvar RP */ + case 206: /* session_option ::= SESSION LP ids cpxName COMMA tmvar RP */ { yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n; yymsp[-6].minor.yy295.col = yymsp[-4].minor.yy0; yymsp[-6].minor.yy295.gap = yymsp[-1].minor.yy0; } break; - case 208: /* windowstate_option ::= */ + case 207: /* windowstate_option ::= */ { yymsp[1].minor.yy432.col.n = 0; yymsp[1].minor.yy432.col.z = NULL;} break; - case 209: /* windowstate_option ::= STATE_WINDOW LP ids RP */ + case 208: /* windowstate_option ::= STATE_WINDOW LP ids RP */ { yymsp[-3].minor.yy432.col = yymsp[-1].minor.yy0; } break; - case 210: /* fill_opt ::= */ + case 209: /* fill_opt ::= */ { yymsp[1].minor.yy345 = 0; } break; - case 211: /* fill_opt ::= FILL LP ID COMMA tagitemlist RP */ + case 210: /* fill_opt ::= FILL LP ID COMMA tagitemlist RP */ { tVariant A = {0}; toTSDBType(yymsp[-3].minor.yy0.type); - tVariantCreate(&A, &yymsp[-3].minor.yy0); + tVariantCreate(&A, &yymsp[-3].minor.yy0, true); tVariantListInsert(yymsp[-1].minor.yy345, &A, -1, 0); yymsp[-5].minor.yy345 = yymsp[-1].minor.yy345; } break; - case 212: /* fill_opt ::= FILL LP ID RP */ + case 211: /* fill_opt ::= FILL LP ID RP */ { toTSDBType(yymsp[-1].minor.yy0.type); - yymsp[-3].minor.yy345 = tVariantListAppendToken(NULL, &yymsp[-1].minor.yy0, -1); + yymsp[-3].minor.yy345 = tVariantListAppendToken(NULL, &yymsp[-1].minor.yy0, -1, true); } break; - case 213: /* sliding_opt ::= SLIDING LP tmvar RP */ + case 212: /* sliding_opt ::= SLIDING LP tmvar RP */ {yymsp[-3].minor.yy0 = yymsp[-1].minor.yy0; } break; - case 214: /* sliding_opt ::= */ + case 213: /* sliding_opt ::= */ {yymsp[1].minor.yy0.n = 0; yymsp[1].minor.yy0.z = NULL; yymsp[1].minor.yy0.type = 0; } break; - case 216: /* orderby_opt ::= ORDER BY sortlist */ + case 215: /* orderby_opt ::= ORDER BY sortlist */ {yymsp[-2].minor.yy345 = yymsp[0].minor.yy345;} break; - case 217: /* sortlist ::= sortlist COMMA item sortorder */ + case 216: /* sortlist ::= sortlist COMMA item sortorder */ { yylhsminor.yy345 = tVariantListAppend(yymsp[-3].minor.yy345, &yymsp[-1].minor.yy2, yymsp[0].minor.yy281); } yymsp[-3].minor.yy345 = yylhsminor.yy345; break; - case 218: /* sortlist ::= item sortorder */ + case 217: /* sortlist ::= item sortorder */ { yylhsminor.yy345 = tVariantListAppend(NULL, &yymsp[-1].minor.yy2, yymsp[0].minor.yy281); } yymsp[-1].minor.yy345 = yylhsminor.yy345; break; - case 219: /* item ::= ids cpxName */ + case 218: /* item ::= ids cpxName */ { toTSDBType(yymsp[-1].minor.yy0.type); yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; - tVariantCreate(&yylhsminor.yy2, &yymsp[-1].minor.yy0); + tVariantCreate(&yylhsminor.yy2, &yymsp[-1].minor.yy0, true); } yymsp[-1].minor.yy2 = yylhsminor.yy2; break; - case 220: /* sortorder ::= ASC */ + case 219: /* sortorder ::= ASC */ { yymsp[0].minor.yy281 = TSDB_ORDER_ASC; } break; - case 221: /* sortorder ::= DESC */ + case 220: /* sortorder ::= DESC */ { yymsp[0].minor.yy281 = TSDB_ORDER_DESC;} break; - case 222: /* sortorder ::= */ + case 221: /* sortorder ::= */ { yymsp[1].minor.yy281 = TSDB_ORDER_ASC; } break; - case 223: /* groupby_opt ::= */ + case 222: /* groupby_opt ::= */ { yymsp[1].minor.yy345 = 0;} break; - case 224: /* groupby_opt ::= GROUP BY grouplist */ + case 223: /* groupby_opt ::= GROUP BY grouplist */ { yymsp[-2].minor.yy345 = yymsp[0].minor.yy345;} break; - case 225: /* grouplist ::= grouplist COMMA item */ + case 224: /* grouplist ::= grouplist COMMA item */ { yylhsminor.yy345 = tVariantListAppend(yymsp[-2].minor.yy345, &yymsp[0].minor.yy2, -1); } yymsp[-2].minor.yy345 = yylhsminor.yy345; break; - case 226: /* grouplist ::= item */ + case 225: /* grouplist ::= item */ { yylhsminor.yy345 = tVariantListAppend(NULL, &yymsp[0].minor.yy2, -1); } yymsp[0].minor.yy345 = yylhsminor.yy345; break; - case 227: /* having_opt ::= */ - case 237: /* where_opt ::= */ yytestcase(yyruleno==237); - case 281: /* expritem ::= */ yytestcase(yyruleno==281); + case 226: /* having_opt ::= */ + case 236: /* where_opt ::= */ yytestcase(yyruleno==236); + case 280: /* expritem ::= */ yytestcase(yyruleno==280); {yymsp[1].minor.yy418 = 0;} break; - case 228: /* having_opt ::= HAVING expr */ - case 238: /* where_opt ::= WHERE expr */ yytestcase(yyruleno==238); + case 227: /* having_opt ::= HAVING expr */ + case 237: /* where_opt ::= WHERE expr */ yytestcase(yyruleno==237); {yymsp[-1].minor.yy418 = yymsp[0].minor.yy418;} break; - case 229: /* limit_opt ::= */ - case 233: /* slimit_opt ::= */ yytestcase(yyruleno==233); + case 228: /* limit_opt ::= */ + case 232: /* slimit_opt ::= */ yytestcase(yyruleno==232); {yymsp[1].minor.yy114.limit = -1; yymsp[1].minor.yy114.offset = 0;} break; - case 230: /* limit_opt ::= LIMIT signed */ - case 234: /* slimit_opt ::= SLIMIT signed */ yytestcase(yyruleno==234); + case 229: /* limit_opt ::= LIMIT signed */ + case 233: /* slimit_opt ::= SLIMIT signed */ yytestcase(yyruleno==233); {yymsp[-1].minor.yy114.limit = yymsp[0].minor.yy525; yymsp[-1].minor.yy114.offset = 0;} break; - case 231: /* limit_opt ::= LIMIT signed OFFSET signed */ + case 230: /* limit_opt ::= LIMIT signed OFFSET signed */ { yymsp[-3].minor.yy114.limit = yymsp[-2].minor.yy525; yymsp[-3].minor.yy114.offset = yymsp[0].minor.yy525;} break; - case 232: /* limit_opt ::= LIMIT signed COMMA signed */ + case 231: /* limit_opt ::= LIMIT signed COMMA signed */ { yymsp[-3].minor.yy114.limit = yymsp[0].minor.yy525; yymsp[-3].minor.yy114.offset = yymsp[-2].minor.yy525;} break; - case 235: /* slimit_opt ::= SLIMIT signed SOFFSET signed */ + case 234: /* slimit_opt ::= SLIMIT signed SOFFSET signed */ {yymsp[-3].minor.yy114.limit = yymsp[-2].minor.yy525; yymsp[-3].minor.yy114.offset = yymsp[0].minor.yy525;} break; - case 236: /* slimit_opt ::= SLIMIT signed COMMA signed */ + case 235: /* slimit_opt ::= SLIMIT signed COMMA signed */ {yymsp[-3].minor.yy114.limit = yymsp[0].minor.yy525; yymsp[-3].minor.yy114.offset = yymsp[-2].minor.yy525;} break; - case 239: /* expr ::= LP expr RP */ + case 238: /* expr ::= LP expr RP */ {yylhsminor.yy418 = yymsp[-1].minor.yy418; yylhsminor.yy418->exprToken.z = yymsp[-2].minor.yy0.z; yylhsminor.yy418->exprToken.n = (yymsp[0].minor.yy0.z - yymsp[-2].minor.yy0.z + 1);} yymsp[-2].minor.yy418 = yylhsminor.yy418; break; - case 240: /* expr ::= ID */ + case 239: /* expr ::= ID */ { yylhsminor.yy418 = tSqlExprCreateIdValue(pInfo, &yymsp[0].minor.yy0, TK_ID);} yymsp[0].minor.yy418 = yylhsminor.yy418; break; - case 241: /* expr ::= ID DOT ID */ + case 240: /* expr ::= ID DOT ID */ { yymsp[-2].minor.yy0.n += (1+yymsp[0].minor.yy0.n); yylhsminor.yy418 = tSqlExprCreateIdValue(pInfo, &yymsp[-2].minor.yy0, TK_ID);} yymsp[-2].minor.yy418 = yylhsminor.yy418; break; - case 242: /* expr ::= ID DOT STAR */ + case 241: /* expr ::= ID DOT STAR */ { yymsp[-2].minor.yy0.n += (1+yymsp[0].minor.yy0.n); yylhsminor.yy418 = tSqlExprCreateIdValue(pInfo, &yymsp[-2].minor.yy0, TK_ALL);} yymsp[-2].minor.yy418 = yylhsminor.yy418; break; - case 243: /* expr ::= INTEGER */ + case 242: /* expr ::= INTEGER */ { yylhsminor.yy418 = tSqlExprCreateIdValue(pInfo, &yymsp[0].minor.yy0, TK_INTEGER);} yymsp[0].minor.yy418 = yylhsminor.yy418; break; - case 244: /* expr ::= MINUS INTEGER */ - case 245: /* expr ::= PLUS INTEGER */ yytestcase(yyruleno==245); + case 243: /* expr ::= MINUS INTEGER */ + case 244: /* expr ::= PLUS INTEGER */ yytestcase(yyruleno==244); { yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; yymsp[-1].minor.yy0.type = TK_INTEGER; yylhsminor.yy418 = tSqlExprCreateIdValue(pInfo, &yymsp[-1].minor.yy0, TK_INTEGER);} yymsp[-1].minor.yy418 = yylhsminor.yy418; break; - case 246: /* expr ::= FLOAT */ + case 245: /* expr ::= FLOAT */ { yylhsminor.yy418 = tSqlExprCreateIdValue(pInfo, &yymsp[0].minor.yy0, TK_FLOAT);} yymsp[0].minor.yy418 = yylhsminor.yy418; break; - case 247: /* expr ::= MINUS FLOAT */ - case 248: /* expr ::= PLUS FLOAT */ yytestcase(yyruleno==248); + case 246: /* expr ::= MINUS FLOAT */ + case 247: /* expr ::= PLUS FLOAT */ yytestcase(yyruleno==247); { yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; yymsp[-1].minor.yy0.type = TK_FLOAT; yylhsminor.yy418 = tSqlExprCreateIdValue(pInfo, &yymsp[-1].minor.yy0, TK_FLOAT);} yymsp[-1].minor.yy418 = yylhsminor.yy418; break; - case 249: /* expr ::= STRING */ + case 248: /* expr ::= STRING */ { yylhsminor.yy418 = tSqlExprCreateIdValue(pInfo, &yymsp[0].minor.yy0, TK_STRING);} yymsp[0].minor.yy418 = yylhsminor.yy418; break; - case 250: /* expr ::= NOW */ + case 249: /* expr ::= NOW */ { yylhsminor.yy418 = tSqlExprCreateIdValue(pInfo, &yymsp[0].minor.yy0, TK_NOW); } yymsp[0].minor.yy418 = yylhsminor.yy418; break; - case 251: /* expr ::= VARIABLE */ + case 250: /* expr ::= VARIABLE */ { yylhsminor.yy418 = tSqlExprCreateIdValue(pInfo, &yymsp[0].minor.yy0, TK_VARIABLE);} yymsp[0].minor.yy418 = yylhsminor.yy418; break; - case 252: /* expr ::= PLUS VARIABLE */ - case 253: /* expr ::= MINUS VARIABLE */ yytestcase(yyruleno==253); + case 251: /* expr ::= PLUS VARIABLE */ + case 252: /* expr ::= MINUS VARIABLE */ yytestcase(yyruleno==252); { yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; yymsp[-1].minor.yy0.type = TK_VARIABLE; yylhsminor.yy418 = tSqlExprCreateIdValue(pInfo, &yymsp[-1].minor.yy0, TK_VARIABLE);} yymsp[-1].minor.yy418 = yylhsminor.yy418; break; - case 254: /* expr ::= BOOL */ + case 253: /* expr ::= BOOL */ { yylhsminor.yy418 = tSqlExprCreateIdValue(pInfo, &yymsp[0].minor.yy0, TK_BOOL);} yymsp[0].minor.yy418 = yylhsminor.yy418; break; - case 255: /* expr ::= NULL */ + case 254: /* expr ::= NULL */ { yylhsminor.yy418 = tSqlExprCreateIdValue(pInfo, &yymsp[0].minor.yy0, TK_NULL);} yymsp[0].minor.yy418 = yylhsminor.yy418; break; - case 256: /* expr ::= ID LP exprlist RP */ + case 255: /* expr ::= ID LP exprlist RP */ { tStrTokenAppend(pInfo->funcs, &yymsp[-3].minor.yy0); yylhsminor.yy418 = tSqlExprCreateFunction(yymsp[-1].minor.yy345, &yymsp[-3].minor.yy0, &yymsp[0].minor.yy0, yymsp[-3].minor.yy0.type); } yymsp[-3].minor.yy418 = yylhsminor.yy418; break; - case 257: /* expr ::= ID LP STAR RP */ + case 256: /* expr ::= ID LP STAR RP */ { tStrTokenAppend(pInfo->funcs, &yymsp[-3].minor.yy0); yylhsminor.yy418 = tSqlExprCreateFunction(NULL, &yymsp[-3].minor.yy0, &yymsp[0].minor.yy0, yymsp[-3].minor.yy0.type); } yymsp[-3].minor.yy418 = yylhsminor.yy418; break; - case 258: /* expr ::= expr IS NULL */ + case 257: /* expr ::= expr IS NULL */ {yylhsminor.yy418 = tSqlExprCreate(yymsp[-2].minor.yy418, NULL, TK_ISNULL);} yymsp[-2].minor.yy418 = yylhsminor.yy418; break; - case 259: /* expr ::= expr IS NOT NULL */ + case 258: /* expr ::= expr IS NOT NULL */ {yylhsminor.yy418 = tSqlExprCreate(yymsp[-3].minor.yy418, NULL, TK_NOTNULL);} yymsp[-3].minor.yy418 = yylhsminor.yy418; break; - case 260: /* expr ::= expr LT expr */ + case 259: /* expr ::= expr LT expr */ {yylhsminor.yy418 = tSqlExprCreate(yymsp[-2].minor.yy418, yymsp[0].minor.yy418, TK_LT);} yymsp[-2].minor.yy418 = yylhsminor.yy418; break; - case 261: /* expr ::= expr GT expr */ + case 260: /* expr ::= expr GT expr */ {yylhsminor.yy418 = tSqlExprCreate(yymsp[-2].minor.yy418, yymsp[0].minor.yy418, TK_GT);} yymsp[-2].minor.yy418 = yylhsminor.yy418; break; - case 262: /* expr ::= expr LE expr */ + case 261: /* expr ::= expr LE expr */ {yylhsminor.yy418 = tSqlExprCreate(yymsp[-2].minor.yy418, yymsp[0].minor.yy418, TK_LE);} yymsp[-2].minor.yy418 = yylhsminor.yy418; break; - case 263: /* expr ::= expr GE expr */ + case 262: /* expr ::= expr GE expr */ {yylhsminor.yy418 = tSqlExprCreate(yymsp[-2].minor.yy418, yymsp[0].minor.yy418, TK_GE);} yymsp[-2].minor.yy418 = yylhsminor.yy418; break; - case 264: /* expr ::= expr NE expr */ + case 263: /* expr ::= expr NE expr */ {yylhsminor.yy418 = tSqlExprCreate(yymsp[-2].minor.yy418, yymsp[0].minor.yy418, TK_NE);} yymsp[-2].minor.yy418 = yylhsminor.yy418; break; - case 265: /* expr ::= expr EQ expr */ + case 264: /* expr ::= expr EQ expr */ {yylhsminor.yy418 = tSqlExprCreate(yymsp[-2].minor.yy418, yymsp[0].minor.yy418, TK_EQ);} yymsp[-2].minor.yy418 = yylhsminor.yy418; break; - case 266: /* expr ::= expr BETWEEN expr AND expr */ + case 265: /* expr ::= expr BETWEEN expr AND expr */ { tSqlExpr* X2 = tSqlExprClone(yymsp[-4].minor.yy418); yylhsminor.yy418 = tSqlExprCreate(tSqlExprCreate(yymsp[-4].minor.yy418, yymsp[-2].minor.yy418, TK_GE), tSqlExprCreate(X2, yymsp[0].minor.yy418, TK_LE), TK_AND);} yymsp[-4].minor.yy418 = yylhsminor.yy418; break; - case 267: /* expr ::= expr AND expr */ + case 266: /* expr ::= expr AND expr */ {yylhsminor.yy418 = tSqlExprCreate(yymsp[-2].minor.yy418, yymsp[0].minor.yy418, TK_AND);} yymsp[-2].minor.yy418 = yylhsminor.yy418; break; - case 268: /* expr ::= expr OR expr */ + case 267: /* expr ::= expr OR expr */ {yylhsminor.yy418 = tSqlExprCreate(yymsp[-2].minor.yy418, yymsp[0].minor.yy418, TK_OR); } yymsp[-2].minor.yy418 = yylhsminor.yy418; break; - case 269: /* expr ::= expr PLUS expr */ + case 268: /* expr ::= expr PLUS expr */ {yylhsminor.yy418 = tSqlExprCreate(yymsp[-2].minor.yy418, yymsp[0].minor.yy418, TK_PLUS); } yymsp[-2].minor.yy418 = yylhsminor.yy418; break; - case 270: /* expr ::= expr MINUS expr */ + case 269: /* expr ::= expr MINUS expr */ {yylhsminor.yy418 = tSqlExprCreate(yymsp[-2].minor.yy418, yymsp[0].minor.yy418, TK_MINUS); } yymsp[-2].minor.yy418 = yylhsminor.yy418; break; - case 271: /* expr ::= expr STAR expr */ + case 270: /* expr ::= expr STAR expr */ {yylhsminor.yy418 = tSqlExprCreate(yymsp[-2].minor.yy418, yymsp[0].minor.yy418, TK_STAR); } yymsp[-2].minor.yy418 = yylhsminor.yy418; break; - case 272: /* expr ::= expr SLASH expr */ + case 271: /* expr ::= expr SLASH expr */ {yylhsminor.yy418 = tSqlExprCreate(yymsp[-2].minor.yy418, yymsp[0].minor.yy418, TK_DIVIDE);} yymsp[-2].minor.yy418 = yylhsminor.yy418; break; - case 273: /* expr ::= expr REM expr */ + case 272: /* expr ::= expr REM expr */ {yylhsminor.yy418 = tSqlExprCreate(yymsp[-2].minor.yy418, yymsp[0].minor.yy418, TK_REM); } yymsp[-2].minor.yy418 = yylhsminor.yy418; break; - case 274: /* expr ::= expr LIKE expr */ + case 273: /* expr ::= expr LIKE expr */ {yylhsminor.yy418 = tSqlExprCreate(yymsp[-2].minor.yy418, yymsp[0].minor.yy418, TK_LIKE); } yymsp[-2].minor.yy418 = yylhsminor.yy418; break; - case 275: /* expr ::= expr MATCH expr */ + case 274: /* expr ::= expr MATCH expr */ {yylhsminor.yy418 = tSqlExprCreate(yymsp[-2].minor.yy418, yymsp[0].minor.yy418, TK_MATCH); } yymsp[-2].minor.yy418 = yylhsminor.yy418; break; - case 276: /* expr ::= expr NMATCH expr */ + case 275: /* expr ::= expr NMATCH expr */ {yylhsminor.yy418 = tSqlExprCreate(yymsp[-2].minor.yy418, yymsp[0].minor.yy418, TK_NMATCH); } yymsp[-2].minor.yy418 = yylhsminor.yy418; break; - case 277: /* expr ::= expr IN LP exprlist RP */ + case 276: /* expr ::= expr IN LP exprlist RP */ {yylhsminor.yy418 = tSqlExprCreate(yymsp[-4].minor.yy418, (tSqlExpr*)yymsp[-1].minor.yy345, TK_IN); } yymsp[-4].minor.yy418 = yylhsminor.yy418; break; - case 278: /* exprlist ::= exprlist COMMA expritem */ + case 277: /* exprlist ::= exprlist COMMA expritem */ {yylhsminor.yy345 = tSqlExprListAppend(yymsp[-2].minor.yy345,yymsp[0].minor.yy418,0, 0);} yymsp[-2].minor.yy345 = yylhsminor.yy345; break; - case 279: /* exprlist ::= expritem */ + case 278: /* exprlist ::= expritem */ {yylhsminor.yy345 = tSqlExprListAppend(0,yymsp[0].minor.yy418,0, 0);} yymsp[0].minor.yy345 = yylhsminor.yy345; break; - case 280: /* expritem ::= expr */ + case 279: /* expritem ::= expr */ {yylhsminor.yy418 = yymsp[0].minor.yy418;} yymsp[0].minor.yy418 = yylhsminor.yy418; break; - case 282: /* cmd ::= RESET QUERY CACHE */ + case 281: /* cmd ::= RESET QUERY CACHE */ { setDCLSqlElems(pInfo, TSDB_SQL_RESET_CACHE, 0);} break; - case 283: /* cmd ::= SYNCDB ids REPLICA */ + case 282: /* cmd ::= SYNCDB ids REPLICA */ { setDCLSqlElems(pInfo, TSDB_SQL_SYNC_DB_REPLICA, 1, &yymsp[-1].minor.yy0);} break; - case 284: /* cmd ::= ALTER TABLE ids cpxName ADD COLUMN columnlist */ + case 283: /* cmd ::= ALTER TABLE ids cpxName ADD COLUMN columnlist */ { yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n; SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&yymsp[-4].minor.yy0, yymsp[0].minor.yy345, NULL, TSDB_ALTER_TABLE_ADD_COLUMN, -1); setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } break; - case 285: /* cmd ::= ALTER TABLE ids cpxName DROP COLUMN ids */ + case 284: /* cmd ::= ALTER TABLE ids cpxName DROP COLUMN ids */ { yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n; toTSDBType(yymsp[0].minor.yy0.type); - SArray* K = tVariantListAppendToken(NULL, &yymsp[0].minor.yy0, -1); + SArray* K = tVariantListAppendToken(NULL, &yymsp[0].minor.yy0, -1, false); SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&yymsp[-4].minor.yy0, NULL, K, TSDB_ALTER_TABLE_DROP_COLUMN, -1); setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } break; - case 286: /* cmd ::= ALTER TABLE ids cpxName MODIFY COLUMN columnlist */ + case 285: /* cmd ::= ALTER TABLE ids cpxName MODIFY COLUMN columnlist */ { yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n; SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&yymsp[-4].minor.yy0, yymsp[0].minor.yy345, NULL, TSDB_ALTER_TABLE_CHANGE_COLUMN, -1); setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } break; - case 287: /* cmd ::= ALTER TABLE ids cpxName ADD TAG columnlist */ + case 286: /* cmd ::= ALTER TABLE ids cpxName ADD TAG columnlist */ { yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n; SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&yymsp[-4].minor.yy0, yymsp[0].minor.yy345, NULL, TSDB_ALTER_TABLE_ADD_TAG_COLUMN, -1); setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } break; - case 288: /* cmd ::= ALTER TABLE ids cpxName DROP TAG ids */ + case 287: /* cmd ::= ALTER TABLE ids cpxName DROP TAG ids */ { yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n; toTSDBType(yymsp[0].minor.yy0.type); - SArray* A = tVariantListAppendToken(NULL, &yymsp[0].minor.yy0, -1); + SArray* A = tVariantListAppendToken(NULL, &yymsp[0].minor.yy0, -1, true); SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&yymsp[-4].minor.yy0, NULL, A, TSDB_ALTER_TABLE_DROP_TAG_COLUMN, -1); setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } break; - case 289: /* cmd ::= ALTER TABLE ids cpxName CHANGE TAG ids ids */ + case 288: /* cmd ::= ALTER TABLE ids cpxName CHANGE TAG ids ids */ { yymsp[-5].minor.yy0.n += yymsp[-4].minor.yy0.n; toTSDBType(yymsp[-1].minor.yy0.type); - SArray* A = tVariantListAppendToken(NULL, &yymsp[-1].minor.yy0, -1); + SArray* A = tVariantListAppendToken(NULL, &yymsp[-1].minor.yy0, -1, true); toTSDBType(yymsp[0].minor.yy0.type); - A = tVariantListAppendToken(A, &yymsp[0].minor.yy0, -1); + A = tVariantListAppendToken(A, &yymsp[0].minor.yy0, -1, true); SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&yymsp[-5].minor.yy0, NULL, A, TSDB_ALTER_TABLE_CHANGE_TAG_COLUMN, -1); setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } break; - case 290: /* cmd ::= ALTER TABLE ids cpxName SET TAG ids EQ tagitem */ + case 289: /* cmd ::= ALTER TABLE ids cpxName SET TAG ids EQ tagitem */ { yymsp[-6].minor.yy0.n += yymsp[-5].minor.yy0.n; toTSDBType(yymsp[-2].minor.yy0.type); - SArray* A = tVariantListAppendToken(NULL, &yymsp[-2].minor.yy0, -1); + SArray* A = tVariantListAppendToken(NULL, &yymsp[-2].minor.yy0, -1, true); A = tVariantListAppend(A, &yymsp[0].minor.yy2, -1); SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&yymsp[-6].minor.yy0, NULL, A, TSDB_ALTER_TABLE_UPDATE_TAG_VAL, -1); setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } break; - case 291: /* cmd ::= ALTER TABLE ids cpxName MODIFY TAG columnlist */ + case 290: /* cmd ::= ALTER TABLE ids cpxName MODIFY TAG columnlist */ { yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n; SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&yymsp[-4].minor.yy0, yymsp[0].minor.yy345, NULL, TSDB_ALTER_TABLE_MODIFY_TAG_COLUMN, -1); setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } break; - case 292: /* cmd ::= ALTER STABLE ids cpxName ADD COLUMN columnlist */ + case 291: /* cmd ::= ALTER STABLE ids cpxName ADD COLUMN columnlist */ { yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n; SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&yymsp[-4].minor.yy0, yymsp[0].minor.yy345, NULL, TSDB_ALTER_TABLE_ADD_COLUMN, TSDB_SUPER_TABLE); setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } break; - case 293: /* cmd ::= ALTER STABLE ids cpxName DROP COLUMN ids */ + case 292: /* cmd ::= ALTER STABLE ids cpxName DROP COLUMN ids */ { yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n; toTSDBType(yymsp[0].minor.yy0.type); - SArray* K = tVariantListAppendToken(NULL, &yymsp[0].minor.yy0, -1); + SArray* K = tVariantListAppendToken(NULL, &yymsp[0].minor.yy0, -1, true); SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&yymsp[-4].minor.yy0, NULL, K, TSDB_ALTER_TABLE_DROP_COLUMN, TSDB_SUPER_TABLE); setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } break; - case 294: /* cmd ::= ALTER STABLE ids cpxName MODIFY COLUMN columnlist */ + case 293: /* cmd ::= ALTER STABLE ids cpxName MODIFY COLUMN columnlist */ { yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n; SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&yymsp[-4].minor.yy0, yymsp[0].minor.yy345, NULL, TSDB_ALTER_TABLE_CHANGE_COLUMN, TSDB_SUPER_TABLE); setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } break; - case 295: /* cmd ::= ALTER STABLE ids cpxName ADD TAG columnlist */ + case 294: /* cmd ::= ALTER STABLE ids cpxName ADD TAG columnlist */ { yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n; SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&yymsp[-4].minor.yy0, yymsp[0].minor.yy345, NULL, TSDB_ALTER_TABLE_ADD_TAG_COLUMN, TSDB_SUPER_TABLE); setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } break; - case 296: /* cmd ::= ALTER STABLE ids cpxName DROP TAG ids */ + case 295: /* cmd ::= ALTER STABLE ids cpxName DROP TAG ids */ { yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n; toTSDBType(yymsp[0].minor.yy0.type); - SArray* A = tVariantListAppendToken(NULL, &yymsp[0].minor.yy0, -1); + SArray* A = tVariantListAppendToken(NULL, &yymsp[0].minor.yy0, -1, true); SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&yymsp[-4].minor.yy0, NULL, A, TSDB_ALTER_TABLE_DROP_TAG_COLUMN, TSDB_SUPER_TABLE); setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } break; - case 297: /* cmd ::= ALTER STABLE ids cpxName CHANGE TAG ids ids */ + case 296: /* cmd ::= ALTER STABLE ids cpxName CHANGE TAG ids ids */ { yymsp[-5].minor.yy0.n += yymsp[-4].minor.yy0.n; toTSDBType(yymsp[-1].minor.yy0.type); - SArray* A = tVariantListAppendToken(NULL, &yymsp[-1].minor.yy0, -1); + SArray* A = tVariantListAppendToken(NULL, &yymsp[-1].minor.yy0, -1, true); toTSDBType(yymsp[0].minor.yy0.type); - A = tVariantListAppendToken(A, &yymsp[0].minor.yy0, -1); + A = tVariantListAppendToken(A, &yymsp[0].minor.yy0, -1, true); SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&yymsp[-5].minor.yy0, NULL, A, TSDB_ALTER_TABLE_CHANGE_TAG_COLUMN, TSDB_SUPER_TABLE); setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } break; - case 298: /* cmd ::= ALTER STABLE ids cpxName SET TAG ids EQ tagitem */ + case 297: /* cmd ::= ALTER STABLE ids cpxName SET TAG ids EQ tagitem */ { yymsp[-6].minor.yy0.n += yymsp[-5].minor.yy0.n; toTSDBType(yymsp[-2].minor.yy0.type); - SArray* A = tVariantListAppendToken(NULL, &yymsp[-2].minor.yy0, -1); + SArray* A = tVariantListAppendToken(NULL, &yymsp[-2].minor.yy0, -1, true); A = tVariantListAppend(A, &yymsp[0].minor.yy2, -1); SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&yymsp[-6].minor.yy0, NULL, A, TSDB_ALTER_TABLE_UPDATE_TAG_VAL, TSDB_SUPER_TABLE); setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } break; - case 299: /* cmd ::= ALTER STABLE ids cpxName MODIFY TAG columnlist */ + case 298: /* cmd ::= ALTER STABLE ids cpxName MODIFY TAG columnlist */ { yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n; SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&yymsp[-4].minor.yy0, yymsp[0].minor.yy345, NULL, TSDB_ALTER_TABLE_MODIFY_TAG_COLUMN, TSDB_SUPER_TABLE); setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } break; - case 300: /* cmd ::= KILL CONNECTION INTEGER */ + case 299: /* cmd ::= KILL CONNECTION INTEGER */ {setKillSql(pInfo, TSDB_SQL_KILL_CONNECTION, &yymsp[0].minor.yy0);} break; - case 301: /* cmd ::= KILL STREAM INTEGER COLON INTEGER */ + case 300: /* cmd ::= KILL STREAM INTEGER COLON INTEGER */ {yymsp[-2].minor.yy0.n += (yymsp[-1].minor.yy0.n + yymsp[0].minor.yy0.n); setKillSql(pInfo, TSDB_SQL_KILL_STREAM, &yymsp[-2].minor.yy0);} break; - case 302: /* cmd ::= KILL QUERY INTEGER COLON INTEGER */ + case 301: /* cmd ::= KILL QUERY INTEGER COLON INTEGER */ {yymsp[-2].minor.yy0.n += (yymsp[-1].minor.yy0.n + yymsp[0].minor.yy0.n); setKillSql(pInfo, TSDB_SQL_KILL_QUERY, &yymsp[-2].minor.yy0);} break; default: diff --git a/src/query/tests/cSortTest.cpp b/src/query/tests/cSortTest.cpp index aa5aa89afc211678cfc521dccd46fbdb533fbff1..ae3d4490ce8218031f8712f23b070dad637425e7 100644 --- a/src/query/tests/cSortTest.cpp +++ b/src/query/tests/cSortTest.cpp @@ -97,7 +97,7 @@ TEST(testCase, colunmnwise_sort_test) { } TEST(testCase, columnsort_test) { - SSchema field[1] = { + SSchema1 field[1] = { {TSDB_DATA_TYPE_INT, "k", sizeof(int32_t)}, }; diff --git a/src/query/tests/percentileTest.cpp b/src/query/tests/percentileTest.cpp index 1b6951201af5908378fb253b38cea01de1210d57..9985f7e6446913458c798ef8f27961e319bfc3d5 100644 --- a/src/query/tests/percentileTest.cpp +++ b/src/query/tests/percentileTest.cpp @@ -192,7 +192,7 @@ void largeDataTest() { void qsortTest() { printf("running : %s\n", __FUNCTION__); - SSchema field[1] = { + SSchema1 field[1] = { {TSDB_DATA_TYPE_INT, "k", sizeof(int32_t)}, }; diff --git a/src/tsdb/src/tsdbMain.c b/src/tsdb/src/tsdbMain.c index dfa4b74b7a5720398f9fc748078a0be6d870dda7..5e1c2bc69e6aad67cae979713ea9e3caa3b73584 100644 --- a/src/tsdb/src/tsdbMain.c +++ b/src/tsdb/src/tsdbMain.c @@ -660,9 +660,9 @@ static int tsdbRestoreLastColumns(STsdbRepo *pRepo, STable *pTable, SReadH* pRea int numColumns; int32_t blockIdx; SDataStatis* pBlockStatis = NULL; - SMemRow row = NULL; + // SMemRow row = NULL; // restore last column data with last schema - + int err = 0; numColumns = schemaNCols(pSchema); @@ -676,15 +676,15 @@ static int tsdbRestoreLastColumns(STsdbRepo *pRepo, STable *pTable, SReadH* pRea } } - row = taosTMalloc(memRowMaxBytesFromSchema(pSchema)); - if (row == NULL) { - terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; - err = -1; - goto out; - } + // row = taosTMalloc(memRowMaxBytesFromSchema(pSchema)); + // if (row == NULL) { + // terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; + // err = -1; + // goto out; + // } - memRowSetType(row, SMEM_ROW_DATA); - tdInitDataRow(memRowDataBody(row), pSchema); + // memRowSetType(row, SMEM_ROW_DATA); + // tdInitDataRow(memRowDataBody(row), pSchema); // first load block index info if (tsdbLoadBlockInfo(pReadh, NULL, NULL) < 0) { @@ -743,10 +743,12 @@ static int tsdbRestoreLastColumns(STsdbRepo *pRepo, STable *pTable, SReadH* pRea for (int32_t rowId = pBlock->numOfRows - 1; rowId >= 0; rowId--) { SDataCol *pDataCol = pReadh->pDCols[0]->cols + i; const void* pColData = tdGetColDataOfRow(pDataCol, rowId); - tdAppendColVal(memRowDataBody(row), pColData, pCol->type, pCol->offset); - //SDataCol *pDataCol = readh.pDCols[0]->cols + j; - void *value = tdGetRowDataOfCol(memRowDataBody(row), (int8_t)pCol->type, TD_DATA_ROW_HEAD_SIZE + pCol->offset); - if (isNull(value, pCol->type)) { + // tdAppendColVal(memRowDataBody(row), pColData, pCol->type, pCol->offset); + // SDataCol *pDataCol = readh.pDCols[0]->cols + j; + // void *value = tdGetRowDataOfCol(memRowDataBody(row), (int8_t)pCol->type, TD_DATA_ROW_HEAD_SIZE + + // + // pCol->offset); + if (isNull(pColData, pCol->type)) { continue; } @@ -761,13 +763,14 @@ static int tsdbRestoreLastColumns(STsdbRepo *pRepo, STable *pTable, SReadH* pRea pLastCol->pData = malloc(bytes); pLastCol->bytes = bytes; pLastCol->colId = pCol->colId; - memcpy(pLastCol->pData, value, bytes); + memcpy(pLastCol->pData, pColData, bytes); // save row ts(in column 0) pDataCol = pReadh->pDCols[0]->cols + 0; - pCol = schemaColAt(pSchema, 0); - tdAppendColVal(memRowDataBody(row), tdGetColDataOfRow(pDataCol, rowId), pCol->type, pCol->offset); - pLastCol->ts = memRowKey(row); + // pCol = schemaColAt(pSchema, 0); + // tdAppendColVal(memRowDataBody(row), tdGetColDataOfRow(pDataCol, rowId), pCol->type, pCol->offset); + // pLastCol->ts = memRowKey(row); + pLastCol->ts = tdGetKey(*(TKEY *)(tdGetColDataOfRow(pDataCol, rowId))); pTable->restoreColumnNum += 1; @@ -779,7 +782,7 @@ static int tsdbRestoreLastColumns(STsdbRepo *pRepo, STable *pTable, SReadH* pRea } out: - taosTZfree(row); + // taosTZfree(row); tfree(pBlockStatis); if (err == 0 && numColumns <= pTable->restoreColumnNum) { diff --git a/src/tsdb/src/tsdbMemTable.c b/src/tsdb/src/tsdbMemTable.c index 28993f08c447c2a84a4493e40d74e924ad656c74..6bcffd85f411085f6fc973151fca5d97cf4bd87c 100644 --- a/src/tsdb/src/tsdbMemTable.c +++ b/src/tsdb/src/tsdbMemTable.c @@ -57,7 +57,7 @@ int32_t tsdbInsertData(STsdbRepo *repo, SSubmitMsg *pMsg, SShellSubmitRspMsg *pR STsdbRepo * pRepo = repo; SSubmitMsgIter msgIter = {0}; SSubmitBlk * pBlock = NULL; - int32_t affectedrows = 0; + int32_t affectedrows = 0, numOfRows = 0; if (tsdbScanAndConvertSubmitMsg(pRepo, pMsg) < 0) { if (terrno != TSDB_CODE_TDB_TABLE_RECONFIGURE) { @@ -73,9 +73,13 @@ int32_t tsdbInsertData(STsdbRepo *repo, SSubmitMsg *pMsg, SShellSubmitRspMsg *pR if (tsdbInsertDataToTable(pRepo, pBlock, &affectedrows) < 0) { return -1; } + numOfRows += pBlock->numOfRows; } - if (pRsp != NULL) pRsp->affectedRows = htonl(affectedrows); + if (pRsp != NULL) { + pRsp->affectedRows = htonl(affectedrows); + pRsp->numOfRows = htonl(numOfRows); + } if (tsdbCheckCommit(pRepo) < 0) return -1; return 0; diff --git a/src/util/src/tconfig.c b/src/util/src/tconfig.c index 69b0d8d7bb9ad5ab37321a5460c3f083e3a71dba..2a49862bac14633f77db921b7d2e17b160019425 100644 --- a/src/util/src/tconfig.c +++ b/src/util/src/tconfig.c @@ -334,8 +334,9 @@ bool taosReadConfigOption(const char *option, char *value, char *value2, char *v if (taosReadDirectoryConfig(cfg, value)) { taosReadDataDirCfg(value, value2, value3); ret = true; + } else { + ret = false; } - ret = false; break; default: uError("config option:%s, input value:%s, can't be recognized", option, value); @@ -382,6 +383,12 @@ void taosReadGlobalLogCfg() { #elif (_TD_PRO_ == true) printf("configDir:%s not there, use default value: /etc/ProDB", configDir); strcpy(configDir, "/etc/ProDB"); + #elif (_TD_KH_ == true) + printf("configDir:%s not there, use default value: /etc/kinghistorian", configDir); + strcpy(configDir, "/etc/kinghistorian"); + #elif (_TD_JH_ == true) + printf("configDir:%s not there, use default value: /etc/jh_taos", configDir); + strcpy(configDir, "/etc/jh_taos"); #else printf("configDir:%s not there, use default value: /etc/taos", configDir); strcpy(configDir, "/etc/taos"); diff --git a/src/util/src/tlog.c b/src/util/src/tlog.c index a2eea5aa7d99a43f2cf7f0552e843ce9a52034c0..232d10a7d07594c9c62cd13767c320da27af2a73 100644 --- a/src/util/src/tlog.c +++ b/src/util/src/tlog.c @@ -87,6 +87,10 @@ char tsLogDir[TSDB_FILENAME_LEN] = "/var/log/power"; char tsLogDir[TSDB_FILENAME_LEN] = "/var/log/tq"; #elif (_TD_PRO_ == true) char tsLogDir[TSDB_FILENAME_LEN] = "/var/log/ProDB"; +#elif (_TD_KH_ == true) +char tsLogDir[TSDB_FILENAME_LEN] = "/var/log/kinghistorian"; +#elif (_TD_JH_ == true) +char tsLogDir[TSDB_FILENAME_LEN] = "/var/log/jh_taos"; #else char tsLogDir[PATH_MAX] = "/var/log/taos"; #endif diff --git a/src/vnode/src/vnodeWrite.c b/src/vnode/src/vnodeWrite.c index 35c2ab72dfdf55f66b1095c757fc4f90656c842b..40148fcc6d34196bed1997cb2499a4202a460fe2 100644 --- a/src/vnode/src/vnodeWrite.c +++ b/src/vnode/src/vnodeWrite.c @@ -27,6 +27,10 @@ #define MAX_QUEUED_MSG_NUM 100000 #define MAX_QUEUED_MSG_SIZE 1024*1024*1024 //1GB +static int64_t tsSubmitReqSucNum = 0; +static int64_t tsSubmitRowNum = 0; +static int64_t tsSubmitRowSucNum = 0; + extern void * tsDnodeTmr; static int32_t (*vnodeProcessWriteMsgFp[TSDB_MSG_TYPE_MAX])(SVnodeObj *, void *pCont, SRspRet *); static int32_t vnodeProcessSubmitMsg(SVnodeObj *pVnode, void *pCont, SRspRet *); @@ -163,7 +167,16 @@ static int32_t vnodeProcessSubmitMsg(SVnodeObj *pVnode, void *pCont, SRspRet *pR pRsp = pRet->rsp; } - if (tsdbInsertData(pVnode->tsdb, pCont, pRsp) < 0) code = terrno; + if (tsdbInsertData(pVnode->tsdb, pCont, pRsp) < 0) { + code = terrno; + } else { + if (pRsp != NULL) atomic_fetch_add_64(&tsSubmitReqSucNum, 1); + } + + if (pRsp) { + atomic_fetch_add_64(&tsSubmitRowNum, ntohl(pRsp->numOfRows)); + atomic_fetch_add_64(&tsSubmitRowSucNum, ntohl(pRsp->affectedRows)); + } return code; } @@ -425,3 +438,12 @@ void vnodeWaitWriteCompleted(SVnodeObj *pVnode) { if (extraSleep) taosMsleep(900); } + +SVnodeStatisInfo vnodeGetStatisInfo() { + SVnodeStatisInfo info = {0}; + info.submitReqSucNum = atomic_exchange_64(&tsSubmitReqSucNum, 0); + info.submitRowNum = atomic_exchange_64(&tsSubmitRowNum, 0); + info.submitRowSucNum = atomic_exchange_64(&tsSubmitRowSucNum, 0); + + return info; +} diff --git a/tests/examples/lua/OpenResty/rest/test.lua b/tests/examples/lua/OpenResty/rest/test.lua index 48aeef3fb4dd8c9a0dc18e8039b4b8c781760666..2dc0cf10f22b90c8bcb925700b1d7ebd00ff153a 100644 --- a/tests/examples/lua/OpenResty/rest/test.lua +++ b/tests/examples/lua/OpenResty/rest/test.lua @@ -63,6 +63,7 @@ else end +--[[ local flag = false function query_callback(res) if res.code ~=0 then @@ -80,9 +81,10 @@ end driver.query_a(conn,"insert into m1 values ('2019-09-01 00:00:00.001', 3, 'robotspace'),('2019-09-01 00:00:00.006', 4, 'Hilink'),('2019-09-01 00:00:00.007', 6, 'Harmony')", query_callback) while not flag do --- ngx.say("i am here once...") + ngx.say("i am here once...") ngx.sleep(0.001) -- time unit is second end +--]] ngx.say("pool water_mark:"..pool:get_water_mark()) diff --git a/tests/examples/lua/README.md b/tests/examples/lua/README.md index 32d6a4cace9bd0bf66238ff32af1d3ecf0285046..bdc88edbd7b5d6798a8df6530ea82d24eb22915b 100644 --- a/tests/examples/lua/README.md +++ b/tests/examples/lua/README.md @@ -1,7 +1,10 @@ # TDengine driver connector for Lua -It's a Lua implementation for [TDengine](https://github.com/taosdata/TDengine), an open-sourced big data platform designed and optimized for the Internet of Things (IoT), Connected Cars, Industrial IoT, and IT Infrastructure and Application Monitoring. You may need to install Lua5.3 . - +It's a Lua implementation for [TDengine](https://github.com/taosdata/TDengine), an open-sourced big data platform designed and optimized for the Internet of Things (IoT), Connected Cars, Industrial IoT, and IT Infrastructure and Application Monitoring. You may need to install Lua5.3 . +As TDengine is built with lua-enable, the built-in lua module conflicts with external lua. The following commands require TDengine built with lua-disable. +To disable built-in lua: +mkdir debug && cd debug +cmake .. -DBUILD_LUA=false && cmake --build . ## Lua Dependencies - Lua: ``` diff --git a/tests/examples/lua/lua51/lua_connector51.c b/tests/examples/lua/lua51/lua_connector51.c index fe2152945dc1915dca5de31458a8cbb2f007f4f2..b6e0b6d1de200b09750ffba6845ae9bf0606f4d8 100644 --- a/tests/examples/lua/lua51/lua_connector51.c +++ b/tests/examples/lua/lua51/lua_connector51.c @@ -102,7 +102,7 @@ static int l_query(lua_State *L){ printf("failed, reason:%s\n", taos_errstr(result)); lua_pushinteger(L, -1); lua_setfield(L, table_index, "code"); - lua_pushstring(L, taos_errstr(taos)); + lua_pushstring(L, taos_errstr(result)); lua_setfield(L, table_index, "error"); return 1; diff --git a/tests/examples/lua/lua_connector.c b/tests/examples/lua/lua_connector.c index 8c2ea3e9e83237fc8ed9ebce687f5131352e4d14..06568f35d656d5d9af1ae2e88eeaeba92f0ede91 100644 --- a/tests/examples/lua/lua_connector.c +++ b/tests/examples/lua/lua_connector.c @@ -102,7 +102,7 @@ static int l_query(lua_State *L){ printf("failed, reason:%s\n", taos_errstr(result)); lua_pushinteger(L, -1); lua_setfield(L, table_index, "code"); - lua_pushstring(L, taos_errstr(taos)); + lua_pushstring(L, taos_errstr(result)); lua_setfield(L, table_index, "error"); return 1; diff --git a/tests/pytest/fulltest.sh b/tests/pytest/fulltest.sh index bf91fcf0b92616ff3640d072ad2fefa760d23041..86bd39fa065d99ff66eeb2580ec80f07aa7601e6 100755 --- a/tests/pytest/fulltest.sh +++ b/tests/pytest/fulltest.sh @@ -171,7 +171,7 @@ python3 test.py -f tools/taosdemoTestLimitOffset.py python3 test.py -f tools/taosdemoTestTblAlt.py python3 test.py -f tools/taosdemoTestSampleData.py python3 test.py -f tools/taosdemoTestInterlace.py -python3 test.py -f tools/taosdemoTestQuery.py +# python3 test.py -f tools/taosdemoTestQuery.py # restful test for python # python3 test.py -f restful/restful_bind_db1.py @@ -218,9 +218,9 @@ python3 ./test.py -f perfbenchmark/bug3433.py python3 ./test.py -f perfbenchmark/taosdemoInsert.py #taosdemo -#python3 test.py -f tools/taosdemoAllTest/taosdemoTestInsertWithJson.py -# python3 test.py -f tools/taosdemoAllTest/taosdemoTestQueryWithJson.py -# python3 test.py -f tools/taosdemoAllTest/taosdemoTestInsertAllType.py +python3 test.py -f tools/taosdemoAllTest/taosdemoTestInsertWithJson.py +python3 test.py -f tools/taosdemoAllTest/taosdemoTestQueryWithJson.py +python3 test.py -f tools/taosdemoAllTest/taosdemoTestInsertAllType.py #query python3 test.py -f query/distinctOneColTb.py diff --git a/tests/pytest/functions/function_count_last_stab.py b/tests/pytest/functions/function_count_last_stab.py index 1d777c6bd314941036f542c7d0e9063e590fa7dd..cd0a9b17c2fd8c98544dca09f6a7008929225ece 100644 --- a/tests/pytest/functions/function_count_last_stab.py +++ b/tests/pytest/functions/function_count_last_stab.py @@ -42,22 +42,22 @@ class TDTestCase: % (self.ts + i, i + 1, 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1)) tdSql.query("select count(*),last(*) from stest group by col1") - tdSql.checkRows(10) - tdSql.checkData(0, 0, 1) - tdSql.checkData(1, 2, 2) - tdSql.checkData(1, 3, 1) + tdSql.checkRows(11) + tdSql.checkData(1, 0, 1) + tdSql.checkData(2, 2, 2) + tdSql.checkData(2, 3, 1) tdSql.query("select count(*),last(*) from stest group by col2") - tdSql.checkRows(1) - tdSql.checkData(0, 0, 10) - tdSql.checkData(0, 2, 10) - tdSql.checkData(0, 3, 1) + tdSql.checkRows(2) + tdSql.checkData(1, 0, 10) + tdSql.checkData(1, 2, 10) + tdSql.checkData(1, 3, 1) tdSql.query("select count(*),last(ts,stest.*) from stest group by col1") - tdSql.checkRows(10) - tdSql.checkData(0, 0, 1) - tdSql.checkData(0, 2, "2018-09-17 09:00:00") - tdSql.checkData(1, 4, 1) + tdSql.checkRows(11) + tdSql.checkData(1, 0, 1) + tdSql.checkData(1, 2, "2018-09-17 09:00:00") + tdSql.checkData(2, 4, 1) diff --git a/tests/pytest/query/select_last_crash.py b/tests/pytest/query/select_last_crash.py index 9b580a24acd145e2e90ae6ee513e946d72820f2c..8bfd71ef2788fdcf1fc0f2876b52934633638068 100644 --- a/tests/pytest/query/select_last_crash.py +++ b/tests/pytest/query/select_last_crash.py @@ -16,6 +16,8 @@ import taos from util.log import * from util.cases import * from util.sql import * +from util.dnodes import * +import random class TDTestCase: @@ -41,6 +43,32 @@ class TDTestCase: tdSql.query("select last(*) from st") tdSql.checkRows(1) + + # TS-717 + tdLog.info("case for TS-717") + cachelast_values = [0, 1, 2, 3] + + for value in cachelast_values: + tdLog.info("case for cachelast value: %d" % value) + tdSql.execute("drop database if exists db") + tdLog.sleep(1) + tdSql.execute("create database db cachelast %d" % value) + tdSql.execute("use db") + tdSql.execute("create table stb(ts timestamp, c1 int, c2 binary(20), c3 binary(5)) tags(t1 int)") + + sql = "insert into t1 using stb tags(1) (ts, c1, c2) values" + for i in range(self.rowNum): + sql += "(%d, %d, 'test')" % (self.ts + i, random.randint(1,100)) + tdSql.execute(sql) + + tdSql.query("select * from stb") + tdSql.checkRows(self.rowNum) + + tdDnodes.stop(1) + tdDnodes.start(1) + + tdSql.query("select * from stb") + tdSql.checkRows(self.rowNum) def stop(self): tdSql.close() diff --git a/tests/pytest/tools/taosdemoAllTest/insert-1s1tnt1r.json b/tests/pytest/tools/taosdemoAllTest/insert-1s1tnt1r.json index 0068a9c30463ff39d49cbd14d15b5d84747d0a59..d73719ebe41c5f25fc2cd585bc9974d9e83a946e 100644 --- a/tests/pytest/tools/taosdemoAllTest/insert-1s1tnt1r.json +++ b/tests/pytest/tools/taosdemoAllTest/insert-1s1tnt1r.json @@ -35,7 +35,7 @@ "super_tables": [{ "name": "stb0", "child_table_exists":"no", - "childtable_count": 1000, + "childtable_count": 11, "childtable_prefix": "stb00_", "auto_create_table": "no", "batch_create_tbl_num": 1, @@ -61,7 +61,7 @@ { "name": "stb1", "child_table_exists":"no", - "childtable_count": 1000, + "childtable_count": 10, "childtable_prefix": "stb01_", "auto_create_table": "no", "batch_create_tbl_num": 10, diff --git a/tests/pytest/tools/taosdemoAllTest/insert-default.json b/tests/pytest/tools/taosdemoAllTest/insert-default.json new file mode 100644 index 0000000000000000000000000000000000000000..423f94819f6302a50835a74e7eb5bb06fbf58c94 --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/insert-default.json @@ -0,0 +1,16 @@ +{ + "filetype": "insert", + "databases": [{ + "dbinfo": { + "name": "db" + }, + "super_tables": [{ + "name": "stb0", + "childtable_count": 100, + "childtable_prefix": "stb00_", + "insert_rows": 123, + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":1}, {"type": "BINARY", "len": 16, "count":1}, {"type": "BINARY", "len": 32, "count":1}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":1}] + }] + }] +} diff --git a/tests/pytest/tools/taosdemoAllTest/queryQps.json b/tests/pytest/tools/taosdemoAllTest/queryQps.json index 7ebad5e2b2f5af687656c8eed041579d7de1e2c2..c2ff21ea911f09bd459648a57a7ce4609f8bfb58 100644 --- a/tests/pytest/tools/taosdemoAllTest/queryQps.json +++ b/tests/pytest/tools/taosdemoAllTest/queryQps.json @@ -17,7 +17,7 @@ "result": "./query_res0.txt" }, { - "sql": "select last_row(*) from stb00_99 ", + "sql": "select last_row(*) from stb00_9 ", "result": "./query_res1.txt" }] diff --git a/tests/pytest/tools/taosdemoAllTest/sml/1174-large-sml.json b/tests/pytest/tools/taosdemoAllTest/sml/1174-large-sml.json new file mode 100644 index 0000000000000000000000000000000000000000..741a9f814b8a61c692343621c3dcc1117544fbed --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/sml/1174-large-sml.json @@ -0,0 +1,46 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "192.168.1.103", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 10, + "thread_count_create_tbl": 1, + "result_file": "1174.out", + "confirm_parameter_prompt": "no", + "num_of_records_per_req": 51, + "databases": [ + { + "dbinfo": { + "name": "gdse", + "drop": "yes", + "keep": 36500 + }, + "super_tables": [{ + "name": "model_1174", + "child_table_exists":"no", + "childtable_count": 1, + "childtable_prefix": "model_1174_", + "auto_create_table": "no", + "batch_create_tbl_num": 0, + "data_source": "sample", + "insert_mode": "sml", + "insert_rate": 0, + "insert_rows": 2592000, + "interlace_rows": 1, + "multi_thread_write_one_tbl": "no", + "number_of_tbl_in_one_sql": 0, + "max_sql_len": 1048576, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1000, + "start_timestamp": "2021-05-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "tools/taosdemoAllTest/1174.csv", + "tags_file": "tools/taosdemoAllTest/1174-tag.csv", + "columns": [{"type": "FLOAT", "count": 109}, {"type": "INT", "count": 4}, {"type": "FLOAT", "count": 8}, {"type": "INT", "count": 1}, {"type": "FLOAT", "count": 5}, {"type": "INT", "count": 47}, {"type": "BOOL", "count": 103}, {"type": "INT", "count": 2}, {"type": "TIMESTAMP", "count": 3}, {"type": "BOOL", "count": 28}, {"type": "INT", "count": 3}, {"type": "FLOAT", "count": 6}, {"type": "INT", "count": 1}, {"type": "FLOAT", "count": 7}, {"type": "BOOL", "count": 7}, {"type": "FLOAT", "count": 2}, {"type": "INT", "count": 3}, {"type": "FLOAT", "count": 3}, {"type": "INT", "count": 3}, {"type": "BOOL", "count": 1}], + "tags": [{"type": "INT", "count": 1}] + }] + }] +} diff --git a/tests/pytest/tools/taosdemoAllTest/sml/1174-large-taosc.json b/tests/pytest/tools/taosdemoAllTest/sml/1174-large-taosc.json new file mode 100644 index 0000000000000000000000000000000000000000..bb21003e9340b91496b8f96014aa7b318bb44895 --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/sml/1174-large-taosc.json @@ -0,0 +1,46 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "192.168.1.103", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 10, + "thread_count_create_tbl": 1, + "result_file": "1174.out", + "confirm_parameter_prompt": "no", + "num_of_records_per_req": 51, + "databases": [ + { + "dbinfo": { + "name": "gdse", + "drop": "yes", + "keep": 36500 + }, + "super_tables": [{ + "name": "model_1174", + "child_table_exists":"no", + "childtable_count": 1, + "childtable_prefix": "model_1174_", + "auto_create_table": "no", + "batch_create_tbl_num": 0, + "data_source": "sample", + "insert_mode": "taosc", + "insert_rate": 0, + "insert_rows": 2592000, + "interlace_rows": 1, + "multi_thread_write_one_tbl": "no", + "number_of_tbl_in_one_sql": 0, + "max_sql_len": 1048576, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1000, + "start_timestamp": "2021-05-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "tools/taosdemoAllTest/1174.csv", + "tags_file": "tools/taosdemoAllTest/1174-tag.csv", + "columns": [{"type": "FLOAT", "count": 109}, {"type": "INT", "count": 4}, {"type": "FLOAT", "count": 8}, {"type": "INT", "count": 1}, {"type": "FLOAT", "count": 5}, {"type": "INT", "count": 47}, {"type": "BOOL", "count": 103}, {"type": "INT", "count": 2}, {"type": "TIMESTAMP", "count": 3}, {"type": "BOOL", "count": 28}, {"type": "INT", "count": 3}, {"type": "FLOAT", "count": 6}, {"type": "INT", "count": 1}, {"type": "FLOAT", "count": 7}, {"type": "BOOL", "count": 7}, {"type": "FLOAT", "count": 2}, {"type": "INT", "count": 3}, {"type": "FLOAT", "count": 3}, {"type": "INT", "count": 3}, {"type": "BOOL", "count": 1}], + "tags": [{"type": "INT", "count": 1}] + }] + }] +} diff --git a/tests/pytest/tools/taosdemoAllTest/sml/1174-small-sml-random.json b/tests/pytest/tools/taosdemoAllTest/sml/1174-small-sml-random.json new file mode 100644 index 0000000000000000000000000000000000000000..615baad853987220ea2c76663327a2a783b4cdb4 --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/sml/1174-small-sml-random.json @@ -0,0 +1,46 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "192.168.1.103", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 10, + "thread_count_create_tbl": 1, + "result_file": "1174.out", + "confirm_parameter_prompt": "no", + "num_of_records_per_req": 51, + "databases": [ + { + "dbinfo": { + "name": "gdse", + "drop": "yes", + "keep": 36500 + }, + "super_tables": [{ + "name": "model_1174", + "child_table_exists":"no", + "childtable_count": 1, + "childtable_prefix": "model_1174_", + "auto_create_table": "no", + "batch_create_tbl_num": 0, + "data_source": "rand", + "insert_mode": "sml", + "insert_rate": 0, + "insert_rows": 259200, + "interlace_rows": 1, + "multi_thread_write_one_tbl": "no", + "number_of_tbl_in_one_sql": 0, + "max_sql_len": 1048576, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1000, + "start_timestamp": "2021-05-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "tools/taosdemoAllTest/1174.csv", + "tags_file": "tools/taosdemoAllTest/1174-tag.csv", + "columns": [{"type": "FLOAT", "count": 109}, {"type": "INT", "count": 4}, {"type": "FLOAT", "count": 8}, {"type": "INT", "count": 1}, {"type": "FLOAT", "count": 5}, {"type": "INT", "count": 47}, {"type": "BOOL", "count": 103}, {"type": "INT", "count": 2}, {"type": "TIMESTAMP", "count": 3}, {"type": "BOOL", "count": 28}, {"type": "INT", "count": 3}, {"type": "FLOAT", "count": 6}, {"type": "INT", "count": 1}, {"type": "FLOAT", "count": 7}, {"type": "BOOL", "count": 7}, {"type": "FLOAT", "count": 2}, {"type": "INT", "count": 3}, {"type": "FLOAT", "count": 3}, {"type": "INT", "count": 3}, {"type": "BOOL", "count": 1}], + "tags": [{"type": "INT", "count": 1}] + }] + }] +} diff --git a/tests/pytest/tools/taosdemoAllTest/sml/1174-small-sml.json b/tests/pytest/tools/taosdemoAllTest/sml/1174-small-sml.json new file mode 100644 index 0000000000000000000000000000000000000000..be7c182d0ebdfd377bff4a020c63a03333160b39 --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/sml/1174-small-sml.json @@ -0,0 +1,46 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "192.168.1.103", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 10, + "thread_count_create_tbl": 1, + "result_file": "1174.out", + "confirm_parameter_prompt": "no", + "num_of_records_per_req": 51, + "databases": [ + { + "dbinfo": { + "name": "gdse", + "drop": "yes", + "keep": 36500 + }, + "super_tables": [{ + "name": "model_1174", + "child_table_exists":"no", + "childtable_count": 1, + "childtable_prefix": "model_1174_", + "auto_create_table": "no", + "batch_create_tbl_num": 0, + "data_source": "sample", + "insert_mode": "sml", + "insert_rate": 0, + "insert_rows": 259200, + "interlace_rows": 1, + "multi_thread_write_one_tbl": "no", + "number_of_tbl_in_one_sql": 0, + "max_sql_len": 1048576, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1000, + "start_timestamp": "2021-05-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "tools/taosdemoAllTest/1174.csv", + "tags_file": "tools/taosdemoAllTest/1174-tag.csv", + "columns": [{"type": "FLOAT", "count": 109}, {"type": "INT", "count": 4}, {"type": "FLOAT", "count": 8}, {"type": "INT", "count": 1}, {"type": "FLOAT", "count": 5}, {"type": "INT", "count": 47}, {"type": "BOOL", "count": 103}, {"type": "INT", "count": 2}, {"type": "TIMESTAMP", "count": 3}, {"type": "BOOL", "count": 28}, {"type": "INT", "count": 3}, {"type": "FLOAT", "count": 6}, {"type": "INT", "count": 1}, {"type": "FLOAT", "count": 7}, {"type": "BOOL", "count": 7}, {"type": "FLOAT", "count": 2}, {"type": "INT", "count": 3}, {"type": "FLOAT", "count": 3}, {"type": "INT", "count": 3}, {"type": "BOOL", "count": 1}], + "tags": [{"type": "INT", "count": 1}] + }] + }] +} diff --git a/tests/pytest/tools/taosdemoAllTest/sml/1174-small-taosc.json b/tests/pytest/tools/taosdemoAllTest/sml/1174-small-taosc.json new file mode 100644 index 0000000000000000000000000000000000000000..f74ac693a90f48ce8cf0fceca61723861631d37a --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/sml/1174-small-taosc.json @@ -0,0 +1,46 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "192.168.1.103", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 10, + "thread_count_create_tbl": 1, + "result_file": "1174.out", + "confirm_parameter_prompt": "no", + "num_of_records_per_req": 51, + "databases": [ + { + "dbinfo": { + "name": "gdse", + "drop": "yes", + "keep": 36500 + }, + "super_tables": [{ + "name": "model_1174", + "child_table_exists":"no", + "childtable_count": 1, + "childtable_prefix": "model_1174_", + "auto_create_table": "no", + "batch_create_tbl_num": 0, + "data_source": "sample", + "insert_mode": "taosc", + "insert_rate": 0, + "insert_rows": 259200, + "interlace_rows": 1, + "multi_thread_write_one_tbl": "no", + "number_of_tbl_in_one_sql": 0, + "max_sql_len": 1048576, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1000, + "start_timestamp": "2021-05-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "tools/taosdemoAllTest/1174.csv", + "tags_file": "tools/taosdemoAllTest/1174-tag.csv", + "columns": [{"type": "FLOAT", "count": 109}, {"type": "INT", "count": 4}, {"type": "FLOAT", "count": 8}, {"type": "INT", "count": 1}, {"type": "FLOAT", "count": 5}, {"type": "INT", "count": 47}, {"type": "BOOL", "count": 103}, {"type": "INT", "count": 2}, {"type": "TIMESTAMP", "count": 3}, {"type": "BOOL", "count": 28}, {"type": "INT", "count": 3}, {"type": "FLOAT", "count": 6}, {"type": "INT", "count": 1}, {"type": "FLOAT", "count": 7}, {"type": "BOOL", "count": 7}, {"type": "FLOAT", "count": 2}, {"type": "INT", "count": 3}, {"type": "FLOAT", "count": 3}, {"type": "INT", "count": 3}, {"type": "BOOL", "count": 1}], + "tags": [{"type": "INT", "count": 1}] + }] + }] +} diff --git a/tests/pytest/tools/taosdemoAllTest/sml/insert-1s1tnt1r-sml.json b/tests/pytest/tools/taosdemoAllTest/sml/insert-1s1tnt1r-sml.json new file mode 100644 index 0000000000000000000000000000000000000000..5cd06c02759ddcba93eaa8ef4ef848a9b645cbda --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/sml/insert-1s1tnt1r-sml.json @@ -0,0 +1,142 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "thread_count_create_tbl": 4, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "interlace_rows": 10, + "num_of_records_per_req": 1, + "max_sql_len": 1024000, + "databases": [{ + "dbinfo": { + "name": "db", + "drop": "yes", + "replica": 1, + "days": 10, + "cache": 50, + "blocks": 8, + "precision": "ms", + "keep": 36500, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 0 + }, + "super_tables": [{ + "name": "stb0", + "child_table_exists":"no", + "childtable_count": 10, + "childtable_prefix": "stb00_", + "auto_create_table": "no", + "batch_create_tbl_num": 1, + "data_source": "rand", + "insert_mode": "sml", + "insert_rows": 100, + "childtable_limit": 0, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":1}, {"type": "BINARY", "len": 16, "count":1}, {"type": "BINARY", "len": 32, "count":1}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":1}] + }, + { + "name": "stb1", + "child_table_exists":"no", + "childtable_count": 20, + "childtable_prefix": "stb01_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "sml", + "insert_rows": 200, + "childtable_limit": 0, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":1}, {"type": "BINARY", "len": 16, "count":1}, {"type": "BINARY", "len": 32, "count":1}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":1}] + }, + { + "name": "stb2", + "child_table_exists":"no", + "childtable_count": 10, + "childtable_prefix": "stb00_", + "auto_create_table": "no", + "batch_create_tbl_num": 1, + "data_source": "rand", + "insert_mode": "sml", + "line_protocol": "telnet" , + "insert_rows": 100, + "childtable_limit": 0, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "DOUBLE"}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":1}] + }, + { + "name": "stb3", + "child_table_exists":"no", + "childtable_count": 10, + "childtable_prefix": "stb00_", + "auto_create_table": "no", + "batch_create_tbl_num": 1, + "data_source": "rand", + "insert_mode": "sml", + "line_protocol": "json" , + "insert_rows": 100, + "childtable_limit": -1, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":1}] + }] + }] +} diff --git a/tests/pytest/tools/taosdemoAllTest/sml/insert-1s1tntmr-sml.json b/tests/pytest/tools/taosdemoAllTest/sml/insert-1s1tntmr-sml.json new file mode 100644 index 0000000000000000000000000000000000000000..0885e01782b41079ccbfb7a30a8b4d3628ba9c20 --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/sml/insert-1s1tntmr-sml.json @@ -0,0 +1,88 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "thread_count_create_tbl": 4, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "interlace_rows": 10, + "num_of_records_per_req": 1000, + "max_sql_len": 1024000, + "databases": [{ + "dbinfo": { + "name": "db", + "drop": "yes", + "replica": 1, + "days": 10, + "cache": 50, + "blocks": 8, + "precision": "ms", + "keep": 36500, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 0 + }, + "super_tables": [{ + "name": "stb0", + "child_table_exists":"no", + "childtable_count": 10, + "childtable_prefix": "stb00_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "sml", + "insert_rows": 150, + "childtable_limit": 0, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":1}, {"type": "BINARY", "len": 16, "count":1}, {"type": "BINARY", "len": 32, "count":1}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":1}] + }, + { + "name": "stb1", + "child_table_exists":"no", + "childtable_count": 15, + "childtable_prefix": "stb01_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "sml", + "insert_rows": 200, + "childtable_limit": 0, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":1}, {"type": "BINARY", "len": 16, "count":1}, {"type": "BINARY", "len": 32, "count":1}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":1}] + }] + }] +} diff --git a/tests/pytest/tools/taosdemoAllTest/sml/insert-allDataType-sml.json b/tests/pytest/tools/taosdemoAllTest/sml/insert-allDataType-sml.json new file mode 100644 index 0000000000000000000000000000000000000000..5be20c28bba11ff40296d062f93ab4fda57a1f88 --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/sml/insert-allDataType-sml.json @@ -0,0 +1,88 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "thread_count_create_tbl": 4, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "interlace_rows": 10, + "num_of_records_per_req": 1000, + "max_sql_len": 1024000, + "databases": [{ + "dbinfo": { + "name": "db", + "drop": "yes", + "replica": 1, + "days": 10, + "cache": 50, + "blocks": 8, + "precision": "ms", + "keep": 36500, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 0 + }, + "super_tables": [{ + "name": "stb0", + "child_table_exists":"no", + "childtable_count": 1000, + "childtable_prefix": "stb00_", + "auto_create_table": "no", + "batch_create_tbl_num": 1, + "data_source": "rand", + "insert_mode": "sml", + "insert_rows": 100, + "childtable_limit": 0, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "TIMESTAMP"}, {"type": "BIGINT"}, {"type": "FLOAT"}, {"type": "DOUBLE"}, {"type": "SMALLINT"}, {"type": "TINYINT"}, {"type": "BOOL"}, {"type": "NCHAR","len": 16, "count":1}, {"type": "UINT"}, {"type": "UBIGINT"}, {"type": "UTINYINT"}, {"type": "USMALLINT"}, {"type": "BINARY", "len": 16, "count":1}], + "tags": [{"type": "INT"}, {"type": "BIGINT"}, {"type": "FLOAT"}, {"type": "DOUBLE"}, {"type": "SMALLINT"}, {"type": "TINYINT"}, {"type": "BOOL"}, {"type": "NCHAR","len": 16, "count":1}, {"type": "UINT"}, {"type": "UBIGINT"}, {"type": "UTINYINT"}, {"type": "USMALLINT"}, {"type": "BINARY", "len": 16, "count":1}] + }, + { + "name": "stb1", + "child_table_exists":"no", + "childtable_count": 1000, + "childtable_prefix": "stb01_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "sml", + "insert_rows": 200, + "childtable_limit": 0, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":1}, {"type": "BINARY", "len": 16, "count":1}, {"type": "BINARY", "len": 32, "count":1}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":1}] + }] + }] +} diff --git a/tests/pytest/tools/taosdemoAllTest/sml/insert-disorder-sml.json b/tests/pytest/tools/taosdemoAllTest/sml/insert-disorder-sml.json new file mode 100644 index 0000000000000000000000000000000000000000..6f24801cb04f9f515e33898fb587b95029def325 --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/sml/insert-disorder-sml.json @@ -0,0 +1,88 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "thread_count_create_tbl": 4, + "result_file":"./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "interlace_rows": 10, + "num_of_records_per_req": 1000, + "max_sql_len": 1024000, + "databases": [{ + "dbinfo": { + "name": "db", + "drop": "yes", + "replica": 1, + "days": 10, + "cache": 50, + "blocks": 8, + "precision": "ms", + "keep": 36500, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 0 + }, + "super_tables": [{ + "name": "stb0", + "child_table_exists":"no", + "childtable_count": 1, + "childtable_prefix": "stb00_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "sml", + "insert_rows": 10, + "childtable_limit": -1, + "childtable_offset": 0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 1, + "insert_interval": 0, + "max_sql_len": 1024000, + "disorder_ratio": 10, + "disorder_range": 100, + "timestamp_step": 1000, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }, + { + "name": "stb1", + "child_table_exists":"no", + "childtable_count":1, + "childtable_prefix": "stb01_", + "auto_create_table": "no", + "batch_create_tbl_num": 1, + "data_source": "rand", + "insert_mode": "sml", + "insert_rows": 10, + "childtable_limit": -1, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 100, + "disorder_range": 1, + "timestamp_step": 1000, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":10}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }] + }] +} diff --git a/tests/pytest/tools/taosdemoAllTest/sml/insert-drop-exist-auto-N00-sml.json b/tests/pytest/tools/taosdemoAllTest/sml/insert-drop-exist-auto-N00-sml.json new file mode 100644 index 0000000000000000000000000000000000000000..92e6ec0df7a70329312676298c3b5ffccc2a8767 --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/sml/insert-drop-exist-auto-N00-sml.json @@ -0,0 +1,181 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "thread_count_create_tbl": 4, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "interlace_rows": 100, + "num_of_records_per_req": 100, + "databases": [{ + "dbinfo": { + "name": "db", + "drop": "no", + "replica": 1, + "days": 10, + "cache": 16, + "blocks": 8, + "precision": "ms", + "keep": 36500, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 0 + }, + "super_tables": [{ + "name": "stb", + "child_table_exists":"no", + "auto_create_table": "123", + "childtable_count": 20, + "childtable_prefix": "NN123_", + "batch_create_tbl_num": 100, + "data_source": "rand", + "insert_mode": "sml", + "insert_rows": 5, + "childtable_limit": 40, + "childtable_offset":0, + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 10, + "start_timestamp": "now", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}], + "tags": [{"type": "TINYINT"}] + },{ + "name": "stb", + "child_table_exists":"no", + "auto_create_table": "no", + "childtable_count": 20, + "childtable_prefix": "NNN_", + "batch_create_tbl_num": 100, + "data_source": "rand", + "insert_mode": "sml", + "insert_rows": 5, + "childtable_limit": 40, + "childtable_offset":0, + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 10, + "start_timestamp": "now", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}], + "tags": [{"type": "TINYINT"}] + },{ + "name": "stb", + "child_table_exists":"no", + "auto_create_table": "yes", + "childtable_count": 20, + "childtable_prefix": "NNY_", + "batch_create_tbl_num": 100, + "data_source": "rand", + "insert_mode": "sml", + "insert_rows": 5, + "childtable_limit": 40, + "childtable_offset":0, + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 10, + "start_timestamp": "now", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}], + "tags": [{"type": "TINYINT"}] + },{ + "name": "stb", + "child_table_exists":"yes", + "auto_create_table": "123", + "childtable_count": 20, + "childtable_prefix": "NY123_", + "batch_create_tbl_num": 100, + "data_source": "rand", + "insert_mode": "sml", + "insert_rows": 5, + "childtable_limit": 40, + "childtable_offset":0, + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 10, + "start_timestamp": "now", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}], + "tags": [{"type": "TINYINT"}] + },{ + "name": "stb", + "child_table_exists":"yes", + "auto_create_table": "no", + "childtable_count": 20, + "childtable_prefix": "NYN_", + "batch_create_tbl_num": 100, + "data_source": "rand", + "insert_mode": "sml", + "insert_rows": 5, + "childtable_limit": 40, + "childtable_offset":0, + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 10, + "start_timestamp": "now", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}], + "tags": [{"type": "TINYINT"}] + },{ + "name": "stb", + "child_table_exists":"yes", + "auto_create_table": "yes", + "childtable_count": 20, + "childtable_prefix": "NYY_", + "batch_create_tbl_num": 100, + "data_source": "rand", + "insert_mode": "sml", + "insert_rows": 5, + "childtable_limit": 40, + "childtable_offset":0, + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 10, + "start_timestamp": "now", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}], + "tags": [{"type": "TINYINT"}] + } + ] + }] +} \ No newline at end of file diff --git a/tests/pytest/tools/taosdemoAllTest/sml/insert-drop-exist-auto-Y00-sml.json b/tests/pytest/tools/taosdemoAllTest/sml/insert-drop-exist-auto-Y00-sml.json new file mode 100644 index 0000000000000000000000000000000000000000..c09493ec7b892baba37a7be4addb0ce526752f07 --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/sml/insert-drop-exist-auto-Y00-sml.json @@ -0,0 +1,181 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "thread_count_create_tbl": 4, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "interlace_rows": 100, + "num_of_records_per_req": 100, + "databases": [{ + "dbinfo": { + "name": "db", + "drop": "yes", + "replica": 1, + "days": 10, + "cache": 16, + "blocks": 8, + "precision": "ms", + "keep": 36500, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 0 + }, + "super_tables": [{ + "name": "stb", + "child_table_exists":"no", + "auto_create_table": "123", + "childtable_count": 20, + "childtable_prefix": "YN123_", + "batch_create_tbl_num": 100, + "data_source": "rand", + "insert_mode": "sml", + "insert_rows": 5, + "childtable_limit": 40, + "childtable_offset":0, + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 10, + "start_timestamp": "now", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}], + "tags": [{"type": "TINYINT"}] + },{ + "name": "stb", + "child_table_exists":"no", + "auto_create_table": "no", + "childtable_count": 20, + "childtable_prefix": "YNN_", + "batch_create_tbl_num": 100, + "data_source": "rand", + "insert_mode": "sml", + "insert_rows": 5, + "childtable_limit": 40, + "childtable_offset":0, + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 10, + "start_timestamp": "now", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}], + "tags": [{"type": "TINYINT"}] + },{ + "name": "stb", + "child_table_exists":"no", + "auto_create_table": "yes", + "childtable_count": 20, + "childtable_prefix": "YNY_", + "batch_create_tbl_num": 100, + "data_source": "rand", + "insert_mode": "sml", + "insert_rows": 5, + "childtable_limit": 40, + "childtable_offset":0, + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 10, + "start_timestamp": "now", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}], + "tags": [{"type": "TINYINT"}] + },{ + "name": "stb", + "child_table_exists":"yes", + "auto_create_table": "123", + "childtable_count": 20, + "childtable_prefix": "YY123_", + "batch_create_tbl_num": 100, + "data_source": "rand", + "insert_mode": "sml", + "insert_rows": 5, + "childtable_limit": 40, + "childtable_offset":0, + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 10, + "start_timestamp": "now", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}], + "tags": [{"type": "TINYINT"}] + },{ + "name": "stb", + "child_table_exists":"yes", + "auto_create_table": "no", + "childtable_count": 20, + "childtable_prefix": "YYN_", + "batch_create_tbl_num": 100, + "data_source": "rand", + "insert_mode": "sml", + "insert_rows": 5, + "childtable_limit": 40, + "childtable_offset":0, + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 10, + "start_timestamp": "now", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}], + "tags": [{"type": "TINYINT"}] + },{ + "name": "stb", + "child_table_exists":"yes", + "auto_create_table": "yes", + "childtable_count": 20, + "childtable_prefix": "YYY_", + "batch_create_tbl_num": 100, + "data_source": "rand", + "insert_mode": "sml", + "insert_rows": 5, + "childtable_limit": 40, + "childtable_offset":0, + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 10, + "start_timestamp": "now", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}], + "tags": [{"type": "TINYINT"}] + } + ] + }] +} \ No newline at end of file diff --git a/tests/pytest/tools/taosdemoAllTest/sml/insert-interlace-row-sml.json b/tests/pytest/tools/taosdemoAllTest/sml/insert-interlace-row-sml.json new file mode 100644 index 0000000000000000000000000000000000000000..e04f2ff5e7cb24cb5384b7451712b3fe83bf18c3 --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/sml/insert-interlace-row-sml.json @@ -0,0 +1,62 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "thread_count_create_tbl": 4, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "interlace_rows": 100, + "num_of_records_per_req": 1000, + "max_sql_len": 1024000, + "databases": [{ + "dbinfo": { + "name": "db", + "drop": "yes", + "replica": 1, + "days": 10, + "cache": 50, + "blocks": 8, + "precision": "ms", + "keep": 36500, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 0 + }, + "super_tables": [{ + "name": "stb0", + "child_table_exists":"no", + "childtable_count": 100, + "childtable_prefix": "stb00_", + "auto_create_table": "no", + "batch_create_tbl_num": 20, + "data_source": "rand", + "insert_mode": "sml", + "insert_rows": 150, + "childtable_limit": -1, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 151, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":10}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }] + }] +} diff --git a/tests/pytest/tools/taosdemoAllTest/sml/insert-interval-speed-sml.json b/tests/pytest/tools/taosdemoAllTest/sml/insert-interval-speed-sml.json new file mode 100644 index 0000000000000000000000000000000000000000..4a4227adb8fdcd0cb025a10c5b6f417c921acd96 --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/sml/insert-interval-speed-sml.json @@ -0,0 +1,88 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "thread_count_create_tbl": 4, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 100, + "interlace_rows": 0, + "num_of_records_per_req": 2000, + "max_sql_len": 1024000, + "databases": [{ + "dbinfo": { + "name": "db", + "drop": "yes", + "replica": 1, + "days": 10, + "cache": 16, + "blocks": 8, + "precision": "ms", + "keep": 36500, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 0 + }, + "super_tables": [{ + "name": "stb0", + "child_table_exists":"no", + "childtable_count": 100, + "childtable_prefix": "stb00_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "sml", + "insert_rows": 20, + "childtable_limit": 0, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 1000, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":1}, {"type": "BINARY", "len": 16, "count":1}, {"type": "BINARY", "len": 32, "count":6}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }, + { + "name": "stb1", + "child_table_exists":"no", + "childtable_count": 20, + "childtable_prefix": "stb01_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "sml", + "insert_rows": 35, + "childtable_limit": 0, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 1000, + "insert_interval": 200, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":1}, {"type": "BINARY", "len": 16, "count":1}, {"type": "BINARY", "len": 32, "count":1}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":1}] + }] + }] +} diff --git a/tests/pytest/tools/taosdemoAllTest/sml/insert-newdb-sml.json b/tests/pytest/tools/taosdemoAllTest/sml/insert-newdb-sml.json new file mode 100644 index 0000000000000000000000000000000000000000..1d29842e02c654987c50e6e73d4aec5eed48aa83 --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/sml/insert-newdb-sml.json @@ -0,0 +1,166 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "thread_count_create_tbl": 4, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "interlace_rows": 0, + "num_of_records_per_req": 3000, + "max_sql_len": 1024000, + "databases": [{ + "dbinfo": { + "name": "db", + "drop": "yes", + "replica": 1, + "days": 10, + "cache": 16, + "blocks": 8, + "precision": "ms", + "keep": 36500, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 1 + }, + "super_tables": [{ + "name": "stb0", + "child_table_exists":"yes", + "childtable_count": 5, + "childtable_prefix": "stb00_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "sml", + "insert_rows": 10, + "childtable_limit": -1, + "childtable_offset": 0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval": 0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":1}, {"type": "BINARY", "len": 16, "count":1}, {"type": "BINARY", "len": 32, "count":1}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }, + { + "name": "stb1", + "child_table_exists":"no", + "childtable_count": 6, + "childtable_prefix": "stb01_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "sml", + "insert_rows": 20, + "childtable_limit": -1, + "childtable_offset": 0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval": 0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":6}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }, + { + "name": "stb2", + "child_table_exists":"no", + "childtable_count": 7, + "childtable_prefix": "stb02_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "sml", + "insert_rows": 20, + "childtable_limit": 4, + "childtable_offset": 0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval": 0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":6}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }, + { + "name": "stb3", + "child_table_exists":"no", + "childtable_count": 8, + "childtable_prefix": "stb03_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "sml", + "insert_rows": 20, + "childtable_limit": 2, + "childtable_offset": 7, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval": 0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":6}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }, + { + "name": "stb4", + "child_table_exists":"no", + "childtable_count": 8, + "childtable_prefix": "stb04_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "sml", + "insert_rows": 20, + "childtable_limit": 0, + "childtable_offset": 7, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval": 0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":6}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }] + }] +} diff --git a/tests/pytest/tools/taosdemoAllTest/sml/insert-newtable-sml.json b/tests/pytest/tools/taosdemoAllTest/sml/insert-newtable-sml.json new file mode 100644 index 0000000000000000000000000000000000000000..886503a950ca18b752bfa264218bb8564ce44ae0 --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/sml/insert-newtable-sml.json @@ -0,0 +1,166 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "thread_count_create_tbl": 4, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "interlace_rows": 0, + "num_of_records_per_req": 3000, + "max_sql_len": 1024000, + "databases": [{ + "dbinfo": { + "name": "db", + "drop": "no", + "replica": 1, + "days": 10, + "cache": 16, + "blocks": 8, + "precision": "ms", + "keep": 36500, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 1 + }, + "super_tables": [{ + "name": "stb0", + "child_table_exists":"yes", + "childtable_count": 5, + "childtable_prefix": "stb00_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "sml", + "insert_rows": 20, + "childtable_limit": -1, + "childtable_offset": 0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval": 0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-12-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":6}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }, + { + "name": "stb1", + "child_table_exists":"no", + "childtable_count": 6, + "childtable_prefix": "stb01_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "sml", + "insert_rows": 20, + "childtable_limit": -1, + "childtable_offset": 0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval": 0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-12-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":6}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }, + { + "name": "stb2", + "child_table_exists":"no", + "childtable_count": 7, + "childtable_prefix": "stb02_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "sml", + "insert_rows": 20, + "childtable_limit": 4, + "childtable_offset": 0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval": 0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-12-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":6}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }, + { + "name": "stb3", + "child_table_exists":"no", + "childtable_count": 8, + "childtable_prefix": "stb03_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "sml", + "insert_rows": 20, + "childtable_limit": 2, + "childtable_offset": 7, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval": 0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-12-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":6}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }, + { + "name": "stb4", + "child_table_exists":"no", + "childtable_count": 8, + "childtable_prefix": "stb04_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "sml", + "insert_rows": 30, + "childtable_limit": 0, + "childtable_offset": 7, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval": 0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-12-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":6}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }] + }] +} diff --git a/tests/pytest/tools/taosdemoAllTest/sml/insert-nodbnodrop-sml.json b/tests/pytest/tools/taosdemoAllTest/sml/insert-nodbnodrop-sml.json new file mode 100644 index 0000000000000000000000000000000000000000..ca99d135c5f466c911f3063b88fbb3e58c4e4ed4 --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/sml/insert-nodbnodrop-sml.json @@ -0,0 +1,62 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "thread_count_create_tbl": 4, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "interlace_rows": 0, + "num_of_records_per_req": 3000, + "max_sql_len": 1024000, + "databases": [{ + "dbinfo": { + "name": "dbno", + "drop": "no", + "replica": 1, + "days": 10, + "cache": 16, + "blocks": 8, + "precision": "ms", + "keep": 36500, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 1 + }, + "super_tables": [{ + "name": "stb0", + "child_table_exists":"no", + "childtable_count": 5, + "childtable_prefix": "stb00_", + "auto_create_table": "yes", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "sml", + "insert_rows": 10, + "childtable_limit": -1, + "childtable_offset": 0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval": 0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":6}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }] + }] +} diff --git a/tests/pytest/tools/taosdemoAllTest/sml/insert-offset-sml.json b/tests/pytest/tools/taosdemoAllTest/sml/insert-offset-sml.json new file mode 100644 index 0000000000000000000000000000000000000000..d0109b50cf449b0e7e1b258ae29723a560b1d2f6 --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/sml/insert-offset-sml.json @@ -0,0 +1,166 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "thread_count_create_tbl": 4, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "interlace_rows": 0, + "num_of_records_per_req": 3000, + "max_sql_len": 1024000, + "databases": [{ + "dbinfo": { + "name": "db", + "drop": "no", + "replica": 1, + "days": 10, + "cache": 16, + "blocks": 8, + "precision": "ms", + "keep": 36500, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 0 + }, + "super_tables": [{ + "name": "stb0", + "child_table_exists":"yes", + "childtable_count": 5, + "childtable_prefix": "stb00_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "sml", + "insert_rows": 20, + "childtable_limit": 0, + "childtable_offset": 0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval": 0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-11-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":6}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }, + { + "name": "stb1", + "child_table_exists":"yes", + "childtable_count": 6, + "childtable_prefix": "stb01_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "sml", + "insert_rows": 20, + "childtable_limit": -1, + "childtable_offset": 0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval": 0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-11-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":6}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }, + { + "name": "stb2", + "child_table_exists":"yes", + "childtable_count": 7, + "childtable_prefix": "stb02_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "sml", + "insert_rows": 20, + "childtable_limit": 4, + "childtable_offset": 0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval": 0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-11-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":6}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }, + { + "name": "stb3", + "child_table_exists":"yes", + "childtable_count": 8, + "childtable_prefix": "stb03_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "sml", + "insert_rows": 20, + "childtable_limit": 2, + "childtable_offset":7, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval": 0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-11-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":6}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }, + { + "name": "stb4", + "child_table_exists":"yes", + "childtable_count": 8, + "childtable_prefix": "stb04_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "sml", + "insert_rows": 20, + "childtable_limit": 0, + "childtable_offset": 7, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval": 0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-11-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":6}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }] + }] +} diff --git a/tests/pytest/tools/taosdemoAllTest/sml/insert-renewdb-sml.json b/tests/pytest/tools/taosdemoAllTest/sml/insert-renewdb-sml.json new file mode 100644 index 0000000000000000000000000000000000000000..f8f3a8ee5cea1834c31ebb275a10977cd960f829 --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/sml/insert-renewdb-sml.json @@ -0,0 +1,166 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "thread_count_create_tbl": 4, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "interlace_rows": 0, + "num_of_records_per_req": 3000, + "max_sql_len": 1024000, + "databases": [{ + "dbinfo": { + "name": "db", + "drop": "yes", + "replica": 1, + "days": 10, + "cache": 16, + "blocks": 8, + "precision": "ms", + "keep": 36500, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 1 + }, + "super_tables": [{ + "name": "stb0", + "child_table_exists":"yes", + "childtable_count": 5, + "childtable_prefix": "stb00_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "sml", + "insert_rows": 10, + "childtable_limit": -1, + "childtable_offset": 0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval": 0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":6}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }, + { + "name": "stb1", + "child_table_exists":"no", + "childtable_count": 6, + "childtable_prefix": "stb01_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "sml", + "insert_rows": 20, + "childtable_limit": -1, + "childtable_offset": 0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval": 0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":6}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }, + { + "name": "stb2", + "child_table_exists":"no", + "childtable_count": 7, + "childtable_prefix": "stb02_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "sml", + "insert_rows": 20, + "childtable_limit": 4, + "childtable_offset": 0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval": 0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":6}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }, + { + "name": "stb3", + "child_table_exists":"no", + "childtable_count": 8, + "childtable_prefix": "stb03_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "sml", + "insert_rows": 20, + "childtable_limit": 2, + "childtable_offset": 7, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval": 0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":6}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }, + { + "name": "stb4", + "child_table_exists":"no", + "childtable_count": 8, + "childtable_prefix": "stb04_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "sml", + "insert_rows": 20, + "childtable_limit": 0, + "childtable_offset": 7, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval": 0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":6}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }] + }] +} diff --git a/tests/pytest/tools/taosdemoAllTest/sml/insert-sample-sml.json b/tests/pytest/tools/taosdemoAllTest/sml/insert-sample-sml.json new file mode 100644 index 0000000000000000000000000000000000000000..780fd60bb7e98f18a5c33798b6bb35a77e1d85db --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/sml/insert-sample-sml.json @@ -0,0 +1,88 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "thread_count_create_tbl": 4, + "result_file":"./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "interlace_rows": 10, + "num_of_records_per_req": 1000, + "max_sql_len": 1024000, + "databases": [{ + "dbinfo": { + "name": "dbtest123", + "drop": "yes", + "replica": 1, + "days": 10, + "cache": 50, + "blocks": 8, + "precision": "ms", + "keep": 36500, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 0 + }, + "super_tables": [{ + "name": "stb0", + "child_table_exists":"no", + "childtable_count": 1, + "childtable_prefix": "stb00_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "sample", + "insert_mode": "sml", + "insert_rows": 10, + "childtable_limit": -1, + "childtable_offset": 0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval": 0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./tools/taosdemoAllTest/sample.csv", + "tags_file": "", + "columns": [{"type": "INT", "count":3}, {"type": "DOUBLE", "count":3}, {"type": "BINARY", "len": 16, "count":1}, {"type": "BINARY", "len": 32, "count":1}, {"type": "BOOL"}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }, + { + "name": "stb1", + "child_table_exists":"no", + "childtable_count":2, + "childtable_prefix": "stb01_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "sml", + "insert_rows": 10, + "childtable_limit": -1, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 10, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "./tools/taosdemoAllTest/tags.csv", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":10}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], + "tags": [{"type": "TINYINT", "count":3}, {"type": "BINARY", "len": 16, "count":2}] + }] + }] +} diff --git a/tests/pytest/tools/taosdemoAllTest/sml/insert-sml-json-alltype.json b/tests/pytest/tools/taosdemoAllTest/sml/insert-sml-json-alltype.json new file mode 100644 index 0000000000000000000000000000000000000000..2de298efa6553ec0c6de095ee0515a73e777445f --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/sml/insert-sml-json-alltype.json @@ -0,0 +1,270 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "thread_count_create_tbl": 4, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "interlace_rows": 50000, + "num_of_records_per_req": 50000, + "max_sql_len": 1025000, + "databases": [{ + "dbinfo": { + "name": "db", + "drop": "yes", + "replica": 1, + "days": 10, + "cache": 50, + "blocks": 8, + "precision": "ms", + "keep": 36500, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 0 + }, + "super_tables": [{ + "name": "stb0", + "child_table_exists":"no", + "childtable_count": 2, + "childtable_prefix": "stb00_", + "auto_create_table": "no", + "batch_create_tbl_num": 100, + "data_source": "rand", + "insert_mode": "sml", + "line_protocol": "json" , + "insert_rows":50, + "childtable_limit": -1, + "childtable_offset":0, + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1025000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2012-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}], + "tags": [{"type": "INT", "count":1}] + }, + { + "name": "stb2", + "child_table_exists":"no", + "childtable_count": 4, + "childtable_prefix": "stb02_", + "auto_create_table": "no", + "batch_create_tbl_num": 100, + "data_source": "rand", + "insert_mode": "sml", + "line_protocol": "json" , + "insert_rows":50, + "childtable_limit": -1, + "childtable_offset":0, + "interlace_rows": 32767, + "insert_interval":0, + "max_sql_len": 1025000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2012-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "TINYINT", "count":1}], + "tags": [{"type": "TINYINT", "count":1}] + }, + { + "name": "stb3", + "child_table_exists":"no", + "childtable_count": 5, + "childtable_prefix": "stb03_", + "auto_create_table": "no", + "batch_create_tbl_num": 100, + "data_source": "rand", + "insert_mode": "sml", + "line_protocol": "json" , + "insert_rows":50, + "childtable_limit": -1, + "childtable_offset":0, + "interlace_rows": 32767, + "insert_interval":0, + "max_sql_len": 1025000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2012-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "BIGINT"}], + "tags": [{"type": "BIGINT", "count":1}] + }, + { + "name": "stb4", + "child_table_exists":"no", + "childtable_count": 20, + "childtable_prefix": "stb04_", + "auto_create_table": "no", + "batch_create_tbl_num": 100, + "data_source": "rand", + "insert_mode": "sml", + "line_protocol": "json" , + "insert_rows":50, + "childtable_limit": -1, + "childtable_offset":0, + "interlace_rows": 32767, + "insert_interval":0, + "max_sql_len": 1025000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2012-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "SMALLINT"}], + "tags": [{"type": "SMALLINT", "count":1}] + }, + { + "name": "stb5", + "child_table_exists":"no", + "childtable_count": 40, + "childtable_prefix": "stb05_", + "auto_create_table": "no", + "batch_create_tbl_num": 100, + "data_source": "rand", + "insert_mode": "sml", + "line_protocol": "json" , + "insert_rows":50, + "childtable_limit": -1, + "childtable_offset":0, + "interlace_rows": 32767, + "insert_interval":0, + "max_sql_len": 1025000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2012-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "FLOAT"}], + "tags": [{"type": "FLOAT", "count":1}] + }, + { + "name": "stb6", + "child_table_exists":"no", + "childtable_count": 15, + "childtable_prefix": "stb06_", + "auto_create_table": "no", + "batch_create_tbl_num": 100, + "data_source": "rand", + "insert_mode": "sml", + "line_protocol": "json" , + "insert_rows":50, + "childtable_limit": -1, + "childtable_offset":0, + "interlace_rows": 32767, + "insert_interval":0, + "max_sql_len": 1025000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2012-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "DOUBLE"}], + "tags": [{"type": "DOUBLE", "count":1}] + }, + { + "name": "stb7", + "child_table_exists":"no", + "childtable_count": 10, + "childtable_prefix": "stb07_", + "auto_create_table": "no", + "batch_create_tbl_num": 100, + "data_source": "rand", + "insert_mode": "sml", + "line_protocol": "json" , + "insert_rows":50, + "childtable_limit": -1, + "childtable_offset":0, + "interlace_rows": 32767, + "insert_interval":0, + "max_sql_len": 1025000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2012-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [ {"type": "BOOL"}], + "tags": [{"type": "BOOL", "count":1}] + }, + { + "name": "stb8", + "child_table_exists":"no", + "childtable_count": 20, + "childtable_prefix": "stb08_", + "auto_create_table": "no", + "batch_create_tbl_num": 100, + "data_source": "rand", + "insert_mode": "sml", + "line_protocol": "json" , + "insert_rows":50, + "childtable_limit": -1, + "childtable_offset":0, + "interlace_rows": 32767, + "insert_interval":0, + "max_sql_len": 1025000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2012-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "NCHAR","len": 16, "count":1}], + "tags": [{"type": "NCHAR", "count":1}] + }, + { + "name": "stb9", + "child_table_exists":"no", + "childtable_count": 3, + "childtable_prefix": "stb09_", + "auto_create_table": "no", + "batch_create_tbl_num": 100, + "data_source": "rand", + "insert_mode": "sml", + "line_protocol": "json" , + "insert_rows":50, + "childtable_limit": -1, + "childtable_offset":0, + "interlace_rows": 32767, + "insert_interval":0, + "max_sql_len": 1025000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2012-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "BINARY", "len": 16, "count":1}], + "tags": [{"type": "BINARY", "count":1}] + }] + }] +} diff --git a/tests/pytest/tools/taosdemoAllTest/sml/insert-sml-telnet-alltype.json b/tests/pytest/tools/taosdemoAllTest/sml/insert-sml-telnet-alltype.json new file mode 100644 index 0000000000000000000000000000000000000000..ff825440e5cbfd8aa5d8d6e74538c5802af8af38 --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/sml/insert-sml-telnet-alltype.json @@ -0,0 +1,374 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "thread_count_create_tbl": 4, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "interlace_rows": 50000, + "num_of_records_per_req": 50000, + "max_sql_len": 1025000, + "databases": [{ + "dbinfo": { + "name": "db", + "drop": "yes", + "replica": 1, + "days": 10, + "cache": 50, + "blocks": 8, + "precision": "ms", + "keep": 36500, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 0 + }, + "super_tables": [{ + "name": "stb0", + "child_table_exists":"no", + "childtable_count": 2, + "childtable_prefix": "stb00_", + "auto_create_table": "no", + "batch_create_tbl_num": 100, + "data_source": "rand", + "insert_mode": "sml", + "line_protocol": "telnet" , + "insert_rows":50, + "childtable_limit": -1, + "childtable_offset":0, + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1025000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2012-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}], + "tags": [{"type": "INT", "count":1}] + }, + { + "name": "stb1", + "child_table_exists":"no", + "childtable_count": 3, + "childtable_prefix": "stb01_", + "auto_create_table": "no", + "batch_create_tbl_num": 100, + "data_source": "rand", + "insert_mode": "sml", + "line_protocol": "telnet" , + "insert_rows":50, + "childtable_limit": -1, + "childtable_offset":0, + "interlace_rows": 32767, + "insert_interval":0, + "max_sql_len": 1025000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2012-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "UINT"}], + "tags": [{"type": "UINT", "count":1}] + }, + { + "name": "stb2", + "child_table_exists":"no", + "childtable_count": 4, + "childtable_prefix": "stb02_", + "auto_create_table": "no", + "batch_create_tbl_num": 100, + "data_source": "rand", + "insert_mode": "sml", + "line_protocol": "telnet" , + "insert_rows":50, + "childtable_limit": -1, + "childtable_offset":0, + "interlace_rows": 32767, + "insert_interval":0, + "max_sql_len": 1025000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2012-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "TINYINT", "count":1}], + "tags": [{"type": "TINYINT", "count":1}] + }, + { + "name": "stb3", + "child_table_exists":"no", + "childtable_count": 5, + "childtable_prefix": "stb03_", + "auto_create_table": "no", + "batch_create_tbl_num": 100, + "data_source": "rand", + "insert_mode": "sml", + "line_protocol": "telnet" , + "insert_rows":50, + "childtable_limit": -1, + "childtable_offset":0, + "interlace_rows": 32767, + "insert_interval":0, + "max_sql_len": 1025000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2012-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "BIGINT"}], + "tags": [{"type": "BIGINT", "count":1}] + }, + { + "name": "stb4", + "child_table_exists":"no", + "childtable_count": 6, + "childtable_prefix": "stb04_", + "auto_create_table": "no", + "batch_create_tbl_num": 100, + "data_source": "rand", + "insert_mode": "sml", + "line_protocol": "telnet" , + "insert_rows":30, + "childtable_limit": -1, + "childtable_offset":0, + "interlace_rows": 32767, + "insert_interval":0, + "max_sql_len": 1025000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2012-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "SMALLINT"}], + "tags": [{"type": "SMALLINT", "count":1}] + }, + { + "name": "stb5", + "child_table_exists":"no", + "childtable_count": 15, + "childtable_prefix": "stb05_", + "auto_create_table": "no", + "batch_create_tbl_num": 100, + "data_source": "rand", + "insert_mode": "sml", + "line_protocol": "telnet" , + "insert_rows":20, + "childtable_limit": -1, + "childtable_offset":0, + "interlace_rows": 32767, + "insert_interval":0, + "max_sql_len": 1025000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2012-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "FLOAT"}], + "tags": [{"type": "FLOAT", "count":1}] + }, + { + "name": "stb6", + "child_table_exists":"no", + "childtable_count": 20, + "childtable_prefix": "stb06_", + "auto_create_table": "no", + "batch_create_tbl_num": 100, + "data_source": "rand", + "insert_mode": "sml", + "line_protocol": "telnet" , + "insert_rows":10, + "childtable_limit": -1, + "childtable_offset":0, + "interlace_rows": 32767, + "insert_interval":0, + "max_sql_len": 1025000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2012-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "DOUBLE"}], + "tags": [{"type": "DOUBLE", "count":1}] + }, + { + "name": "stb7", + "child_table_exists":"no", + "childtable_count": 30, + "childtable_prefix": "stb07_", + "auto_create_table": "no", + "batch_create_tbl_num": 100, + "data_source": "rand", + "insert_mode": "sml", + "line_protocol": "telnet" , + "insert_rows":5, + "childtable_limit": -1, + "childtable_offset":0, + "interlace_rows": 32767, + "insert_interval":0, + "max_sql_len": 1025000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2012-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [ {"type": "BOOL"}], + "tags": [{"type": "BOOL", "count":1}] + }, + { + "name": "stb8", + "child_table_exists":"no", + "childtable_count": 20, + "childtable_prefix": "stb08_", + "auto_create_table": "no", + "batch_create_tbl_num": 100, + "data_source": "rand", + "insert_mode": "sml", + "line_protocol": "telnet" , + "insert_rows":30, + "childtable_limit": -1, + "childtable_offset":0, + "interlace_rows": 32767, + "insert_interval":0, + "max_sql_len": 1025000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2012-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "NCHAR","len": 16, "count":1}], + "tags": [{"type": "NCHAR", "count":1}] + }, + { + "name": "stb9", + "child_table_exists":"no", + "childtable_count": 3, + "childtable_prefix": "stb09_", + "auto_create_table": "no", + "batch_create_tbl_num": 100, + "data_source": "rand", + "insert_mode": "sml", + "line_protocol": "telnet" , + "insert_rows":50, + "childtable_limit": -1, + "childtable_offset":0, + "interlace_rows": 32767, + "insert_interval":0, + "max_sql_len": 1025000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2012-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "BINARY", "len": 16, "count":1}], + "tags": [{"type": "BINARY", "count":1}] + }, + { + "name": "stb10", + "child_table_exists":"no", + "childtable_count": 3, + "childtable_prefix": "stb10_", + "auto_create_table": "no", + "batch_create_tbl_num": 100, + "data_source": "rand", + "insert_mode": "sml", + "line_protocol": "telnet" , + "insert_rows":50, + "childtable_limit": -1, + "childtable_offset":0, + "interlace_rows": 32767, + "insert_interval":0, + "max_sql_len": 1025000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2012-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "UBIGINT"}], + "tags": [{"type": "UBIGINT", "count":1}] + }, + { + "name": "stb11", + "child_table_exists":"no", + "childtable_count": 3, + "childtable_prefix": "stb11_", + "auto_create_table": "no", + "batch_create_tbl_num": 100, + "data_source": "rand", + "insert_mode": "sml", + "line_protocol": "telnet" , + "insert_rows":50, + "childtable_limit": -1, + "childtable_offset":0, + "interlace_rows": 32767, + "insert_interval":0, + "max_sql_len": 1025000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2012-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "UTINYINT"}], + "tags": [{"type": "UTINYINT", "count":1}] + }, + { + "name": "stb12", + "child_table_exists":"no", + "childtable_count": 3, + "childtable_prefix": "stb12_", + "auto_create_table": "no", + "batch_create_tbl_num": 100, + "data_source": "rand", + "insert_mode": "sml", + "line_protocol": "telnet" , + "insert_rows":50, + "childtable_limit": -1, + "childtable_offset":0, + "interlace_rows": 32767, + "insert_interval":0, + "max_sql_len": 1025000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2012-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [ {"type": "USMALLINT"}], + "tags": [{"type": "USMALLINT", "count":1}] + }] + }] +} diff --git a/tests/pytest/tools/taosdemoAllTest/sml/insert-timestep-sml.json b/tests/pytest/tools/taosdemoAllTest/sml/insert-timestep-sml.json new file mode 100644 index 0000000000000000000000000000000000000000..1d496b6b46bf3df3c4312bacafbfb77125491058 --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/sml/insert-timestep-sml.json @@ -0,0 +1,88 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "thread_count_create_tbl": 4, + "result_file":"./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "interlace_rows": 10, + "num_of_records_per_req": 1000, + "max_sql_len": 1024000, + "databases": [{ + "dbinfo": { + "name": "db", + "drop": "yes", + "replica": 1, + "days": 10, + "cache": 50, + "blocks": 8, + "precision": "ms", + "keep": 36500, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 0 + }, + "super_tables": [{ + "name": "stb0", + "child_table_exists":"no", + "childtable_count": 10, + "childtable_prefix": "stb00_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "sml", + "insert_rows": 20, + "childtable_limit": 0, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":10}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }, + { + "name": "stb1", + "child_table_exists":"no", + "childtable_count":20, + "childtable_prefix": "stb01_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "sml", + "insert_rows": 20, + "childtable_limit": 0, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 10, + "start_timestamp": "2020-11-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":10}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }] + }] +} diff --git a/tests/pytest/tools/taosdemoAllTest/sml/insertBinaryLenLarge16374AllcolLar49151-sml.json b/tests/pytest/tools/taosdemoAllTest/sml/insertBinaryLenLarge16374AllcolLar49151-sml.json new file mode 100644 index 0000000000000000000000000000000000000000..83689d6c40e3844707cc367431f37f4f8ec144d5 --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/sml/insertBinaryLenLarge16374AllcolLar49151-sml.json @@ -0,0 +1,166 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "thread_count_create_tbl": 4, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "interlace_rows": 10, + "num_of_records_per_req": 10240000000, + "max_sql_len": 10240000000, + "databases": [{ + "dbinfo": { + "name": "db", + "drop": "yes", + "replica": 1, + "days": 10, + "cache": 50, + "blocks": 8, + "precision": "ms", + "keep": 36500, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 0 + }, + "super_tables": [{ + "name": "stb0", + "child_table_exists":"no", + "childtable_count": 1, + "childtable_prefix": "stb00_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "sml", + "insert_rows": 1, + "childtable_limit": 0, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "BINARY", "len": 16374, "count":1}], + "tags": [{"type": "TINYINT", "count":12}, {"type": "BINARY", "len": 16, "count":2}] + }, + { + "name": "stb1", + "child_table_exists":"no", + "childtable_count": 1, + "childtable_prefix": "stb01_", + "auto_create_table": "no", + "batch_create_tbl_num": 12, + "data_source": "rand", + "insert_mode": "sml", + "insert_rows": 1, + "childtable_limit": 0, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "BINARY", "len": 16370, "count":1},{"type": "INT"}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }, + { + "name": "stb3", + "child_table_exists":"no", + "childtable_count": 1, + "childtable_prefix": "stb03_", + "auto_create_table": "no", + "batch_create_tbl_num": 12, + "data_source": "rand", + "insert_mode": "sml", + "insert_rows": 1, + "childtable_limit": 0, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "BINARY", "len": 16371, "count":3},{"type": "INT","count":6}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }, + { + "name": "stb2", + "child_table_exists":"no", + "childtable_count": 1, + "childtable_prefix": "stb02_", + "auto_create_table": "no", + "batch_create_tbl_num": 12, + "data_source": "rand", + "insert_mode": "sml", + "insert_rows": 1, + "childtable_limit": 0, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "BINARY", "len": 16375, "count":1},{"type": "INT"}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }, + { + "name": "stb4", + "child_table_exists":"no", + "childtable_count": 1, + "childtable_prefix": "stb04_", + "auto_create_table": "no", + "batch_create_tbl_num": 12, + "data_source": "rand", + "insert_mode": "sml", + "insert_rows": 1, + "childtable_limit": 0, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 100, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "BINARY", "len": 16371, "count":3},{"type": "INT","count":6},{"type": "TINYINT"}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }] + }] +} diff --git a/tests/pytest/tools/taosdemoAllTest/sml/insertChildTab0-sml.json b/tests/pytest/tools/taosdemoAllTest/sml/insertChildTab0-sml.json new file mode 100644 index 0000000000000000000000000000000000000000..28f566833fc8958d364ee867c7628d573b4bf8ee --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/sml/insertChildTab0-sml.json @@ -0,0 +1,88 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "thread_count_create_tbl": 4, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "interlace_rows": 10, + "num_of_records_per_req": 10, + "max_sql_len": 10240000000, + "databases": [{ + "dbinfo": { + "name": "db", + "drop": "yes", + "replica": 1, + "days": 10, + "cache": 50, + "blocks": 8, + "precision": "ms", + "keep": 36500, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 0 + }, + "super_tables": [{ + "name": "stb0", + "child_table_exists":"no", + "childtable_count": 0, + "childtable_prefix": "stb00_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "sml", + "insert_rows": 1, + "childtable_limit": 0, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "BINARY", "len": 1, "count":1}], + "tags": [{"type": "TINYINT", "count":1}, {"type": "BINARY", "len": 16, "count":2}] + }, + { + "name": "stb1", + "child_table_exists":"no", + "childtable_count": 10, + "childtable_prefix": "stb01_", + "auto_create_table": "no", + "batch_create_tbl_num": 12, + "data_source": "rand", + "insert_mode": "sml", + "insert_rows": 2, + "childtable_limit": 0, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "BINARY", "len": 1, "count":1}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }] + }] +} diff --git a/tests/pytest/tools/taosdemoAllTest/sml/insertChildTabLess0-sml.json b/tests/pytest/tools/taosdemoAllTest/sml/insertChildTabLess0-sml.json new file mode 100644 index 0000000000000000000000000000000000000000..8f27feba6be7e3018461b0070420cc759cf8fc72 --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/sml/insertChildTabLess0-sml.json @@ -0,0 +1,88 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "thread_count_create_tbl": 4, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "interlace_rows": 10, + "num_of_records_per_req": 10, + "max_sql_len": 10240000000, + "databases": [{ + "dbinfo": { + "name": "db", + "drop": "yes", + "replica": 1, + "days": 10, + "cache": 50, + "blocks": 8, + "precision": "ms", + "keep": 36500, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 0 + }, + "super_tables": [{ + "name": "stb0", + "child_table_exists":"no", + "childtable_count": -1, + "childtable_prefix": "stb00_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "sml", + "insert_rows": 1, + "childtable_limit": 0, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "BINARY", "len": 1, "count":1}], + "tags": [{"type": "TINYINT", "count":1}, {"type": "BINARY", "len": 16, "count":2}] + }, + { + "name": "stb1", + "child_table_exists":"no", + "childtable_count": 10, + "childtable_prefix": "stb01_", + "auto_create_table": "no", + "batch_create_tbl_num": 12, + "data_source": "rand", + "insert_mode": "sml", + "insert_rows": 2, + "childtable_limit": 0, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "BINARY", "len": 1, "count":1}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }] + }] +} diff --git a/tests/pytest/tools/taosdemoAllTest/sml/insertColumnsAndTagNum4096-sml.json b/tests/pytest/tools/taosdemoAllTest/sml/insertColumnsAndTagNum4096-sml.json new file mode 100644 index 0000000000000000000000000000000000000000..2e4063cf272ba18732f0e456362cb1103ba6d5c4 --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/sml/insertColumnsAndTagNum4096-sml.json @@ -0,0 +1,62 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "thread_count_create_tbl": 4, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "interlace_rows": 0, + "num_of_records_per_req": 1000, + "max_sql_len": 10240000000, + "databases": [{ + "dbinfo": { + "name": "db", + "drop": "yes", + "replica": 1, + "days": 10, + "cache": 50, + "blocks": 8, + "precision": "ms", + "keep": 36500, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 0 + }, + "super_tables": [{ + "name": "stb0", + "child_table_exists":"no", + "childtable_count": 10, + "childtable_prefix": "stb00_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "sml", + "insert_rows": 1000, + "childtable_limit": 0, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":1004}, {"type": "BINARY", "len": 5, "count":3075}, {"type": "BINARY", "len": 32, "count":6}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":7}] + }] + }] +} diff --git a/tests/pytest/tools/taosdemoAllTest/sml/insertColumnsAndTagNumLarge4096-sml.json b/tests/pytest/tools/taosdemoAllTest/sml/insertColumnsAndTagNumLarge4096-sml.json new file mode 100644 index 0000000000000000000000000000000000000000..c6fe0300f535a2b9d798b09853f0ad333e3bbcfd --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/sml/insertColumnsAndTagNumLarge4096-sml.json @@ -0,0 +1,62 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "thread_count_create_tbl": 4, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "interlace_rows": 10000, + "num_of_records_per_req": 10000, + "max_sql_len": 10240000000, + "databases": [{ + "dbinfo": { + "name": "db", + "drop": "yes", + "replica": 1, + "days": 10, + "cache": 50, + "blocks": 8, + "precision": "ms", + "keep": 36500, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 0 + }, + "super_tables": [{ + "name": "stb0", + "child_table_exists":"no", + "childtable_count": 10, + "childtable_prefix": "stb00_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "sml", + "insert_rows": 1000, + "childtable_limit": 0, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":1005}, {"type": "BINARY", "len": 16, "count":3075}, {"type": "BINARY", "len": 32, "count":6}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":7}] + }] + }] +} diff --git a/tests/pytest/tools/taosdemoAllTest/sml/insertColumnsNum0-sml.json b/tests/pytest/tools/taosdemoAllTest/sml/insertColumnsNum0-sml.json new file mode 100644 index 0000000000000000000000000000000000000000..92e88141ca09971d0d202ee488471c14e07d4cd3 --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/sml/insertColumnsNum0-sml.json @@ -0,0 +1,62 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "thread_count_create_tbl": 4, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "interlace_rows": 10, + "num_of_records_per_req": 100, + "max_sql_len": 10240000000, + "databases": [{ + "dbinfo": { + "name": "db", + "drop": "yes", + "replica": 1, + "days": 10, + "cache": 50, + "blocks": 8, + "precision": "ms", + "keep": 36500, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 0 + }, + "super_tables": [{ + "name": "stb0", + "child_table_exists":"no", + "childtable_count": 10, + "childtable_prefix": "stb00_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "sml", + "insert_rows": 1000, + "childtable_limit": 0, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":0}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":7}] + }] + }] +} diff --git a/tests/pytest/tools/taosdemoAllTest/sml/insertInterlaceRowsLarge1M-sml.json b/tests/pytest/tools/taosdemoAllTest/sml/insertInterlaceRowsLarge1M-sml.json new file mode 100644 index 0000000000000000000000000000000000000000..18f1a39e0afcdae3d52e4bc4a4a97e15dbcfda37 --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/sml/insertInterlaceRowsLarge1M-sml.json @@ -0,0 +1,62 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "thread_count_create_tbl": 4, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "interlace_rows": 0, + "num_of_records_per_req": 1000, + "max_sql_len": 10240000000, + "databases": [{ + "dbinfo": { + "name": "db", + "drop": "yes", + "replica": 1, + "days": 10, + "cache": 50, + "blocks": 8, + "precision": "ms", + "keep": 36500, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 0 + }, + "super_tables": [{ + "name": "stb0", + "child_table_exists":"no", + "childtable_count": 10, + "childtable_prefix": "stb00_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "sml", + "insert_rows": 1000, + "childtable_limit": 0, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 1000, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":1004}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":7}] + }] + }] +} diff --git a/tests/pytest/tools/taosdemoAllTest/sml/insertMaxNumPerReq-sml-telnet.json b/tests/pytest/tools/taosdemoAllTest/sml/insertMaxNumPerReq-sml-telnet.json new file mode 100644 index 0000000000000000000000000000000000000000..01ec546012ad04f94cfb6224048fffd89d5cbbc8 --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/sml/insertMaxNumPerReq-sml-telnet.json @@ -0,0 +1,88 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "thread_count_create_tbl": 4, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "interlace_rows": 50000, + "num_of_records_per_req": 50000, + "max_sql_len": 1025000, + "databases": [{ + "dbinfo": { + "name": "db", + "drop": "yes", + "replica": 1, + "days": 10, + "cache": 50, + "blocks": 8, + "precision": "ms", + "keep": 36500, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 0 + }, + "super_tables": [{ + "name": "stb0", + "child_table_exists":"no", + "childtable_count": 100, + "childtable_prefix": "stb00_", + "auto_create_table": "no", + "batch_create_tbl_num": 100, + "data_source": "rand", + "insert_mode": "sml", + "line_protocol": "json" , + "insert_rows":50000, + "childtable_limit": -1, + "childtable_offset":0, + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1025000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2012-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "TINYINT", "count":1}], + "tags": [{"type": "TINYINT", "count":1}] + }, + { + "name": "stb1", + "child_table_exists":"no", + "childtable_count": 100, + "childtable_prefix": "stb01_", + "auto_create_table": "no", + "batch_create_tbl_num": 100, + "data_source": "rand", + "insert_mode": "sml", + "line_protocol": "telnet" , + "insert_rows":50000, + "childtable_limit": -1, + "childtable_offset":0, + "interlace_rows": 32767, + "insert_interval":0, + "max_sql_len": 1025000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2012-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "TINYINT", "count":1}], + "tags": [{"type": "TINYINT", "count":1}] + }] + }] +} diff --git a/tests/pytest/tools/taosdemoAllTest/sml/insertMaxNumPerReq-sml.json b/tests/pytest/tools/taosdemoAllTest/sml/insertMaxNumPerReq-sml.json new file mode 100644 index 0000000000000000000000000000000000000000..d950a260f6ed3ad4a9ed53bc859304a71e5a680a --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/sml/insertMaxNumPerReq-sml.json @@ -0,0 +1,86 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "thread_count_create_tbl": 4, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "interlace_rows": 50000, + "num_of_records_per_req": 50000, + "max_sql_len": 1025000, + "databases": [{ + "dbinfo": { + "name": "db", + "drop": "yes", + "replica": 1, + "days": 10, + "cache": 50, + "blocks": 8, + "precision": "ms", + "keep": 36500, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 0 + }, + "super_tables": [{ + "name": "stb0", + "child_table_exists":"no", + "childtable_count": 100, + "childtable_prefix": "stb00_", + "auto_create_table": "no", + "batch_create_tbl_num": 100, + "data_source": "rand", + "insert_mode": "sml", + "insert_rows":50000, + "childtable_limit": -1, + "childtable_offset":0, + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1025000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2012-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "TINYINT", "count":1}], + "tags": [{"type": "TINYINT", "count":1}] + }, + { + "name": "stb1", + "child_table_exists":"no", + "childtable_count": 100, + "childtable_prefix": "stb01_", + "auto_create_table": "no", + "batch_create_tbl_num": 100, + "data_source": "rand", + "insert_mode": "sml", + "insert_rows":50000, + "childtable_limit": -1, + "childtable_offset":0, + "interlace_rows": 32767, + "insert_interval":0, + "max_sql_len": 1025000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2012-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "TINYINT", "count":1}], + "tags": [{"type": "TINYINT", "count":1}] + }] + }] +} diff --git a/tests/pytest/tools/taosdemoAllTest/sml/insertNumOfrecordPerReq0-sml.json b/tests/pytest/tools/taosdemoAllTest/sml/insertNumOfrecordPerReq0-sml.json new file mode 100644 index 0000000000000000000000000000000000000000..0deed5ba5420a1dd9a1efddbb6e1e7a757dc10d0 --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/sml/insertNumOfrecordPerReq0-sml.json @@ -0,0 +1,88 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "thread_count_create_tbl": 4, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "interlace_rows": 10, + "num_of_records_per_req": 0, + "max_sql_len": 10240000000, + "databases": [{ + "dbinfo": { + "name": "db", + "drop": "yes", + "replica": 1, + "days": 10, + "cache": 50, + "blocks": 8, + "precision": "ms", + "keep": 36500, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 0 + }, + "super_tables": [{ + "name": "stb0", + "child_table_exists":"no", + "childtable_count": 1, + "childtable_prefix": "stb00_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "sml", + "insert_rows": 1, + "childtable_limit": 0, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "BINARY", "len": 1, "count":1}], + "tags": [{"type": "TINYINT", "count":1}, {"type": "BINARY", "len": 16, "count":2}] + }, + { + "name": "stb1", + "child_table_exists":"no", + "childtable_count": 2, + "childtable_prefix": "stb01_", + "auto_create_table": "no", + "batch_create_tbl_num": 12, + "data_source": "rand", + "insert_mode": "sml", + "insert_rows": 2, + "childtable_limit": 0, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "BINARY", "len": 1, "count":1}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }] + }] +} diff --git a/tests/pytest/tools/taosdemoAllTest/sml/insertNumOfrecordPerReqless0-sml.json b/tests/pytest/tools/taosdemoAllTest/sml/insertNumOfrecordPerReqless0-sml.json new file mode 100644 index 0000000000000000000000000000000000000000..9d1d1ee71898d5e80a7310822da00de6c4636746 --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/sml/insertNumOfrecordPerReqless0-sml.json @@ -0,0 +1,88 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "thread_count_create_tbl": 4, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "interlace_rows": 10, + "num_of_records_per_req": -1, + "max_sql_len": 10240000000, + "databases": [{ + "dbinfo": { + "name": "db", + "drop": "yes", + "replica": 1, + "days": 10, + "cache": 50, + "blocks": 8, + "precision": "ms", + "keep": 36500, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 0 + }, + "super_tables": [{ + "name": "stb0", + "child_table_exists":"no", + "childtable_count": 1, + "childtable_prefix": "stb00_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "sml", + "insert_rows": 1, + "childtable_limit": 0, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "BINARY", "len": 1, "count":1}], + "tags": [{"type": "TINYINT", "count":1}, {"type": "BINARY", "len": 16, "count":2}] + }, + { + "name": "stb1", + "child_table_exists":"no", + "childtable_count": 2, + "childtable_prefix": "stb01_", + "auto_create_table": "no", + "batch_create_tbl_num": 12, + "data_source": "rand", + "insert_mode": "sml", + "insert_rows": 2, + "childtable_limit": 0, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "BINARY", "len": 1, "count":1}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }] + }] +} diff --git a/tests/pytest/tools/taosdemoAllTest/sml/insertSigcolumnsNum4096-sml.json b/tests/pytest/tools/taosdemoAllTest/sml/insertSigcolumnsNum4096-sml.json new file mode 100644 index 0000000000000000000000000000000000000000..f732d2e0c5575740dc3d1eeade05e09de8860faf --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/sml/insertSigcolumnsNum4096-sml.json @@ -0,0 +1,62 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "thread_count_create_tbl": 4, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "interlace_rows": 10, + "num_of_records_per_req": 100, + "max_sql_len": 10240000000, + "databases": [{ + "dbinfo": { + "name": "db", + "drop": "yes", + "replica": 1, + "days": 10, + "cache": 50, + "blocks": 8, + "precision": "ms", + "keep": 36500, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 0 + }, + "super_tables": [{ + "name": "stb0", + "child_table_exists":"no", + "childtable_count": 10, + "childtable_prefix": "stb00_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "sml", + "insert_rows": 1000, + "childtable_limit": 0, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "DOUBLE", "count":4096}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":7}] + }] + }] +} diff --git a/tests/pytest/tools/taosdemoAllTest/sml/insertTagsNumLarge128-sml.json b/tests/pytest/tools/taosdemoAllTest/sml/insertTagsNumLarge128-sml.json new file mode 100644 index 0000000000000000000000000000000000000000..24f468d719546733b900ecbd283f2904e96d222f --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/sml/insertTagsNumLarge128-sml.json @@ -0,0 +1,62 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "thread_count_create_tbl": 4, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "interlace_rows": 10, + "num_of_records_per_req": 1000000, + "max_sql_len": 1024000000, + "databases": [{ + "dbinfo": { + "name": "db1", + "drop": "yes", + "replica": 1, + "days": 10, + "cache": 50, + "blocks": 8, + "precision": "ms", + "keep": 36500, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 0 + }, + "super_tables": [{ + "name": "stb0", + "child_table_exists":"no", + "childtable_count": 10, + "childtable_prefix": "stb00_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "sml", + "insert_rows": 1000, + "childtable_limit": 0, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 10000, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":1}, {"type": "BIGINT", "count":1}, {"type": "float", "count":1}, {"type": "double", "count":1}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], + "tags": [{"type": "TINYINT", "count":127}, {"type": "BINARY", "len": 16, "count":2}] + }] + }] +} diff --git a/tests/pytest/tools/taosdemoAllTest/sml/insertTimestepMulRowsLargeint16-sml.json b/tests/pytest/tools/taosdemoAllTest/sml/insertTimestepMulRowsLargeint16-sml.json new file mode 100644 index 0000000000000000000000000000000000000000..07e625dad3a28929a63475aa18310ff6d5b24cc6 --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/sml/insertTimestepMulRowsLargeint16-sml.json @@ -0,0 +1,65 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "localhost", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "thread_count_create_tbl": 4, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "databases": [ + { + "dbinfo": { + "name": "blf", + "drop": "yes" + }, + "super_tables": [ + { + "name": "p_0_topics", + "child_table_exists": "no", + "childtable_count": 10, + "childtable_prefix": "p_0_topics_", + "auto_create_table": "no", + "data_source": "rand", + "insert_mode": "sml", + "insert_rows": 525600, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 1000, + "max_sql_len": 1048576, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 60000, + "start_timestamp": "2019-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [ + { + "type": "INT", + "count": 1 + }, + { + "type": "FLOAT", + "count": 1 + }, + { + "type": "BINARY", + "len": 12, + "count": 1 + } + ], + "tags": [ + { + "type": "BINARY", + "len": 12, + "count": 10 + } + ] + } + ] + } + ] +} \ No newline at end of file diff --git a/tests/pytest/tools/taosdemoAllTest/taosdemoTestInsertWithJson.py b/tests/pytest/tools/taosdemoAllTest/taosdemoTestInsertWithJson.py index 0e68e2e88078d3239ceba8d88200e7ea5b1cffe4..1ab67e721774200f238b81c2fc6bd88564a0b3f6 100644 --- a/tests/pytest/tools/taosdemoAllTest/taosdemoTestInsertWithJson.py +++ b/tests/pytest/tools/taosdemoAllTest/taosdemoTestInsertWithJson.py @@ -48,21 +48,25 @@ class TDTestCase: tdLog.info("taosd found in %s" % buildPath) binPath = buildPath+ "/build/bin/" + testcaseFilename = os.path.split(__file__)[-1] + os.system("rm -rf ./insert*_res.txt*") + os.system("rm -rf tools/taosdemoAllTest/%s.sql" % testcaseFilename ) + # insert: create one or mutiple tables per sql and insert multiple rows per sql os.system("%staosdemo -f tools/taosdemoAllTest/insert-1s1tnt1r.json -y " % binPath) tdSql.execute("use db") tdSql.query("select count (tbname) from stb0") - tdSql.checkData(0, 0, 1000) + tdSql.checkData(0, 0, 11) tdSql.query("select count (tbname) from stb1") - tdSql.checkData(0, 0, 1000) + tdSql.checkData(0, 0, 10) tdSql.query("select count(*) from stb00_0") tdSql.checkData(0, 0, 100) tdSql.query("select count(*) from stb0") - tdSql.checkData(0, 0, 100000) + tdSql.checkData(0, 0, 1100) tdSql.query("select count(*) from stb01_1") tdSql.checkData(0, 0, 200) tdSql.query("select count(*) from stb1") - tdSql.checkData(0, 0, 200000) + tdSql.checkData(0, 0, 2000) # restful connector insert data os.system("%staosdemo -f tools/taosdemoAllTest/insertRestful.json -y " % binPath) @@ -80,7 +84,17 @@ class TDTestCase: tdSql.query("select count(*) from stb1") tdSql.checkData(0, 0, 200) - + # default values json files + tdSql.execute("drop database if exists db") + os.system("%staosdemo -f tools/taosdemoAllTest/insert-default.json -y " % binPath) + tdSql.query("show databases;") + for i in range(tdSql.queryRows): + if tdSql.queryResult[i][0] == 'db': + tdSql.checkData(i, 2, 100) + tdSql.checkData(i, 4, 1) + tdSql.checkData(i, 6, 10) + tdSql.checkData(i, 16, 'ms') + # insert: create mutiple tables per sql and insert one rows per sql . os.system("%staosdemo -f tools/taosdemoAllTest/insert-1s1tntmr.json -y " % binPath) tdSql.execute("use db") @@ -339,9 +353,10 @@ class TDTestCase: tdSql.query('show tables like \'YYY%\'') #child_table_exists = yes, auto_create_table varies = yes tdSql.checkRows(20) - testcaseFilename = os.path.split(__file__)[-1] - os.system("rm -rf ./insert_res.txt") - os.system("rm -rf tools/taosdemoAllTest/%s.sql" % testcaseFilename ) + # rm useless files + os.system("rm -rf ./insert*_res.txt*") + + diff --git a/tests/pytest/tools/taosdemoAllTest/taosdemoTestInsertWithJsonSml.py b/tests/pytest/tools/taosdemoAllTest/taosdemoTestInsertWithJsonSml.py new file mode 100644 index 0000000000000000000000000000000000000000..af5006bb2333bbc9d8fba4a93ebcab3f5bb13257 --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/taosdemoTestInsertWithJsonSml.py @@ -0,0 +1,264 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import os +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + def getBuildPath(self): + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + for root, dirs, files in os.walk(projPath): + if ("taosd" in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + buildPath = root[:len(root)-len("/build/bin")] + break + return buildPath + + def run(self): + buildPath = self.getBuildPath() + if (buildPath == ""): + tdLog.exit("taosd not found!") + else: + tdLog.info("taosd found in %s" % buildPath) + binPath = buildPath+ "/build/bin/" + + # insert: create one or mutiple tables per sql and insert multiple rows per sql + # line_protocol——telnet and json + os.system("%staosdemo -f tools/taosdemoAllTest/sml/insert-1s1tnt1r-sml.json -y " % binPath) + tdSql.execute("use db") + tdSql.query("select count (tbname) from stb0") + tdSql.checkData(0, 0, 10) + tdSql.query("select count (tbname) from stb1") + tdSql.checkData(0, 0, 20) + tdSql.query("select count(*) from stb00_0") + tdSql.checkData(0, 0, 100) + tdSql.query("select count(*) from stb0") + tdSql.checkData(0, 0, 1000) + tdSql.query("select count(*) from stb01_1") + tdSql.checkData(0, 0, 200) + tdSql.query("select count(*) from stb1") + tdSql.checkData(0, 0, 4000) + + + # insert: create mutiple tables per sql and insert one rows per sql . + os.system("%staosdemo -f tools/taosdemoAllTest/sml/insert-1s1tntmr-sml.json -y " % binPath) + tdSql.execute("use db") + tdSql.query("select count (tbname) from stb0") + tdSql.checkData(0, 0, 10) + tdSql.query("select count (tbname) from stb1") + tdSql.checkData(0, 0, 15) + tdSql.query("select count(*) from stb00_0") + tdSql.checkData(0, 0, 150) + tdSql.query("select count(*) from stb0") + tdSql.checkData(0, 0, 1500) + tdSql.query("select count(*) from stb01_0") + tdSql.checkData(0, 0, 200) + tdSql.query("select count(*) from stb1") + tdSql.checkData(0, 0, 3000) + + # insert: using parament "insert_interval to controls spped of insert. + # but We need to have accurate methods to control the speed, such as getting the speed value, checking the count and so on。 + os.system("%staosdemo -f tools/taosdemoAllTest/sml/insert-interval-speed-sml.json -y" % binPath) + tdSql.execute("use db") + tdSql.query("select tbname from stb0") + tdSql.checkRows(100 ) + tdSql.query("select count(*) from stb00_0") + tdSql.checkData(0, 0, 20) + tdSql.query("select count(*) from stb0") + tdSql.checkData(0, 0, 2000) + tdSql.query("show stables") + tdSql.checkData(1, 4, 20) + tdSql.query("select count(*) from stb01_0") + tdSql.checkData(0, 0, 35) + tdSql.query("select count(*) from stb1") + tdSql.checkData(0, 0, 700) + + # spend 2min30s for 3 testcases. + # insert: drop and child_table_exists combination test + # insert: sml can't support parament "childtable_offset and childtable_limit" \ drop=no or child_table_exists = yes + + # os.system("%staosdemo -f tools/taosdemoAllTest/sml/insert-nodbnodrop-sml.json -y" % binPath) + # tdSql.error("show dbno.stables") + os.system("%staosdemo -f tools/taosdemoAllTest/sml/insert-newdb-sml.json -y" % binPath) + tdSql.execute("use db") + tdSql.query("select count (tbname) from stb0") + tdSql.checkData(0, 0, 5) + tdSql.query("select count (tbname) from stb1") + tdSql.checkData(0, 0, 6) + tdSql.query("select count (tbname) from stb2") + tdSql.checkData(0, 0, 7) + tdSql.query("select count (tbname) from stb3") + tdSql.checkData(0, 0, 8) + tdSql.query("select count (tbname) from stb4") + tdSql.checkData(0, 0, 8) + os.system("%staosdemo -f tools/taosdemoAllTest/sml/insert-renewdb-sml.json -y" % binPath) + tdSql.execute("use db") + tdSql.query("select count(*) from stb0") + tdSql.checkData(0, 0, 50) + tdSql.query("select count(*) from stb1") + tdSql.checkData(0, 0, 120) + tdSql.query("select count(*) from stb2") + tdSql.checkData(0, 0, 140) + tdSql.query("select count(*) from stb3") + tdSql.checkData(0, 0, 160) + tdSql.query("select count(*) from stb4") + tdSql.checkData(0, 0, 160) + + + # insert: let parament in json file is illegal, it'll expect error. + tdSql.execute("drop database if exists db") + os.system("%staosdemo -f tools/taosdemoAllTest/sml/insertColumnsAndTagNumLarge4096-sml.json -y " % binPath) + tdSql.error("use db") + tdSql.execute("drop database if exists db") + os.system("%staosdemo -f tools/taosdemoAllTest/sml/insertSigcolumnsNum4096-sml.json -y " % binPath) + tdSql.error("select * from db.stb0") + # tdSql.execute("drop database if exists db") + # os.system("%staosdemo -f tools/taosdemoAllTest/sml/insertColumnsAndTagNum4096-sml.json -y " % binPath) + # tdSql.query("select count(*) from db.stb0") + # tdSql.checkData(0, 0, 10000) + tdSql.execute("drop database if exists db") + os.system("%staosdemo -f tools/taosdemoAllTest/sml/insertInterlaceRowsLarge1M-sml.json -y " % binPath) + tdSql.query("select count(*) from db.stb0") + tdSql.checkRows(0) + tdSql.execute("drop database if exists db") + os.system("%staosdemo -f tools/taosdemoAllTest/sml/insertColumnsNum0-sml.json -y " % binPath) + tdSql.execute("use db") + tdSql.query("show stables like 'stb0%' ") + tdSql.checkData(0, 2, 11) + tdSql.execute("drop database if exists db") + os.system("%staosdemo -f tools/taosdemoAllTest/sml/insertTagsNumLarge128-sml.json -y " % binPath) + tdSql.error("use db1") + tdSql.execute("drop database if exists db") + os.system("%staosdemo -f tools/taosdemoAllTest/sml/insertBinaryLenLarge16374AllcolLar49151-sml.json -y " % binPath) + tdSql.query("select count(*) from db.stb0") + tdSql.checkRows(1) + tdSql.query("select count(*) from db.stb1") + tdSql.checkRows(1) + tdSql.error("select * from db.stb4") + tdSql.error("select * from db.stb2") + tdSql.query("select count(*) from db.stb3") + tdSql.checkRows(1) + tdSql.execute("drop database if exists db") + os.system("%staosdemo -f tools/taosdemoAllTest/sml/insertNumOfrecordPerReq0-sml.json -y " % binPath) + tdSql.error("select count(*) from db.stb0") + tdSql.execute("drop database if exists db") + os.system("%staosdemo -f tools/taosdemoAllTest/sml/insertNumOfrecordPerReqless0-sml.json -y " % binPath) + tdSql.error("use db") + tdSql.execute("drop database if exists db") + os.system("%staosdemo -f tools/taosdemoAllTest/sml/insertChildTab0-sml.json -y " % binPath) + tdSql.error("use db") + tdSql.execute("drop database if exists db") + os.system("%staosdemo -f tools/taosdemoAllTest/sml/insertChildTabLess0-sml.json -y " % binPath) + tdSql.error("use db") + tdSql.execute("drop database if exists blf") + os.system("%staosdemo -f tools/taosdemoAllTest/sml/insertTimestepMulRowsLargeint16-sml.json -y " % binPath) + tdSql.execute("use blf") + tdSql.query("select ts from blf.p_0_topics_7 limit 262800,1") + tdSql.checkData(0, 0, "2020-03-31 12:00:00.000") + tdSql.query("select first(ts) from blf.p_0_topics_2") + tdSql.checkData(0, 0, "2019-10-01 00:00:00") + tdSql.query("select last(ts) from blf.p_0_topics_6 ") + tdSql.checkData(0, 0, "2020-09-29 23:59:00") + # it will be commented in ci because it spend too much time to insert data, but when you can excute it when you want to test this case. + # os.system("%staosdemo -f tools/taosdemoAllTest/sml/insertMaxNumPerReq-sml.json -y " % binPath) + # tdSql.execute("use db") + # tdSql.query("select count(*) from stb0") + # tdSql.checkData(0, 0, 5000000) + # tdSql.query("select count(*) from stb1") + # tdSql.checkData(0, 0, 5000000) + # os.system("%staosdemo -f tools/taosdemoAllTest/sml/insertMaxNumPerReq-sml-telnet.json -y " % binPath) + # tdSql.execute("use db") + # tdSql.query("select count(*) from stb0") + # tdSql.checkData(0, 0, 5000000) + # tdSql.query("select count(*) from stb1") + # tdSql.checkData(0, 0, 5000000) + + + # insert: timestamp and step + os.system("%staosdemo -f tools/taosdemoAllTest/sml/insert-timestep-sml.json -y " % binPath) + tdSql.execute("use db") + tdSql.query("select count (tbname) from stb0") + tdSql.checkData(0, 0, 10) + tdSql.query("select count (tbname) from stb1") + tdSql.checkData(0, 0, 20) + tdSql.query("select last(ts) from db.stb00_0") + tdSql.checkData(0, 0, "2020-10-01 00:00:00.019000") + tdSql.query("select count(*) from stb0") + tdSql.checkData(0, 0, 200) + tdSql.query("select last(ts) from db.stb01_0") + tdSql.checkData(0, 0, "2020-11-01 00:00:00.190000") + tdSql.query("select count(*) from stb1") + tdSql.checkData(0, 0, 400) + + # # insert: disorder_ratio + os.system("%staosdemo -f tools/taosdemoAllTest/sml/insert-disorder-sml.json 2>&1 -y " % binPath) + tdSql.execute("use db") + tdSql.query("select count (tbname) from stb0") + tdSql.checkData(0, 0, 1) + tdSql.query("select count (tbname) from stb1") + tdSql.checkData(0, 0, 1) + tdSql.query("select count(*) from stb0") + tdSql.checkData(0, 0, 10) + tdSql.query("select count(*) from stb1") + tdSql.checkData(0, 0, 10) + + # insert: sample json + os.system("%staosdemo -f tools/taosdemoAllTest/sml/insert-sample-sml.json -y " % binPath) + tdSql.execute("use dbtest123") + tdSql.query("select c2 from stb0") + tdSql.checkData(0, 0, 2147483647) + tdSql.query("select * from stb1 where t1=-127") + tdSql.checkRows(20) + tdSql.query("select * from stb1 where t2=127") + tdSql.checkRows(10) + tdSql.query("select * from stb1 where t2=126") + tdSql.checkRows(10) + + # insert: test interlace parament + os.system("%staosdemo -f tools/taosdemoAllTest/sml/insert-interlace-row-sml.json -y " % binPath) + tdSql.execute("use db") + tdSql.query("select count (tbname) from stb0") + tdSql.checkData(0, 0, 100) + tdSql.query("select count (*) from stb0") + tdSql.checkData(0, 0, 15000) + + + testcaseFilename = os.path.split(__file__)[-1] + os.system("rm -rf ./insert_res.txt") + os.system("rm -rf tools/taosdemoAllTest/%s.sql" % testcaseFilename ) + + + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/tools/taosdemoAllTest/taosdemoTestQueryWithJson.py b/tests/pytest/tools/taosdemoAllTest/taosdemoTestQueryWithJson.py index 3a3152ecde3c4eca09d8b8583cf90bbfdc0cc31d..4025e023809d773d750314c8fa635be664b2ff76 100644 --- a/tests/pytest/tools/taosdemoAllTest/taosdemoTestQueryWithJson.py +++ b/tests/pytest/tools/taosdemoAllTest/taosdemoTestQueryWithJson.py @@ -52,8 +52,8 @@ class TDTestCase: self.expectResult = expectResult with open("%s" % filename, 'r+') as f1: for line in f1.readlines(): - queryResult = line.strip().split()[0] - self.assertCheck(filename, queryResult, expectResult) + queryResultTaosc = line.strip().split()[0] + self.assertCheck(filename, queryResultTaosc, expectResult) # 获取restful接口查询的结果文件中的关键内容,目前的关键内容找到第一个key就跳出循,所以就只有一个数据。后续再修改多个结果文件。 def getfileDataRestful(self, filename): @@ -65,9 +65,12 @@ class TDTestCase: pattern = re.compile("{.*}") contents = pattern.search(contents).group() contentsDict = ast.literal_eval(contents) # 字符串转换为字典 - queryResult = contentsDict['data'][0][0] + queryResultRest = contentsDict['data'][0][0] break - return queryResult + else : + queryResultRest = "" + return queryResultRest + # 获取taosc接口查询次数 def queryTimesTaosc(self, filename): @@ -185,9 +188,10 @@ class TDTestCase: os.system( "%staosdemo -f tools/taosdemoAllTest/queryInsertdata.json" % binPath) - os.system( + exceptcode = os.system( "%staosdemo -f tools/taosdemoAllTest/queryQps.json" % binPath) + assert exceptcode == 0 # use illegal or out of range parameters query json file os.system( @@ -228,7 +232,7 @@ class TDTestCase: os.system("rm -rf tools/taosdemoAllTest/*.py.sql") os.system("rm -rf ./querySystemInfo*") os.system("rm -rf ./query_res*") -# os.system("rm -rf ./all_query*") + os.system("rm -rf ./all_query*") os.system("rm -rf ./test_query_res0.txt") def stop(self): diff --git a/tests/script/fullGeneralSuite.sim b/tests/script/fullGeneralSuite.sim index a2b3978ad7fd6c6f2b4adf9f62b9edb4fed2a9e5..a5d8216de1d537843bef42fbf3752a45e899b5c3 100644 --- a/tests/script/fullGeneralSuite.sim +++ b/tests/script/fullGeneralSuite.sim @@ -38,6 +38,7 @@ run general/compute/stddev.sim run general/compute/sum.sim run general/compute/top.sim run general/compute/block_dist.sim +run general/compute/table_group.sim run general/db/alter_option.sim run general/db/alter_tables_d2.sim run general/db/alter_tables_v1.sim diff --git a/tests/script/general/compute/table_group.sim b/tests/script/general/compute/table_group.sim new file mode 100644 index 0000000000000000000000000000000000000000..cbce7963c3ede6688a2a6d1d0934fa6dfbc7f25b --- /dev/null +++ b/tests/script/general/compute/table_group.sim @@ -0,0 +1,2485 @@ +system sh/stop_dnodes.sh + +system sh/deploy.sh -n dnode1 -i 1 +system sh/cfg.sh -n dnode1 -c walLevel -v 1 +system sh/cfg.sh -n dnode1 -c minRows -v 10 +system sh/exec.sh -n dnode1 -s start +sleep 200 +sql connect + +$db = m_db_tbg_limit +$tb = ct +$mt = st + +sql drop database $db -x step1 +step1: +sql create database $db +sql use $db +sql create table $mt ( ts timestamp, f1 int, f2 int ) tags(t int) + +$tb1 = ct1 +$tb2 = ct2 +$tb3 = ct3 +$tbnum = 3 +$rownum = 10 + +$x = 0 +while $x < $rownum + $i = 1 + while $i <= $tbnum + $tbi = $tb . $i + $inc = $x * 60 + $inc1 = $inc + $i + $ms = 1601481600000 + $inc1 + $v1 = $x * 10 + $v = $v1 + $i + sql insert into $tbi using $mt tags( $i ) values ( $ms , $v , $v ) + $i = $i + 1 + endw + $x = $x + 1 +endw + +system sh/exec.sh -n dnode1 -s stop -x SIGINT +sleep 500 +system sh/exec.sh -n dnode1 -s start +sleep 200 +sql connect +sql use $db + +$x = 0 +while $x < $rownum + $i = 1 + while $i <= $tbnum + $tbi = $tb . $i + $inc = $x * 60 + $inc1 = $inc + $i + $ms = 1601481700000 + $inc1 + $v1 = $x * 10 + $temp = $rownum * 10 + $v1 = $v1 + $temp + $v = $v1 + $i + sql insert into $tbi using $mt tags( $i ) values ( $ms , $v , $v ) + $i = $i + 1 + endw + $x = $x + 1 +endw + +system sh/exec.sh -n dnode1 -s stop -x SIGINT +sleep 500 +system sh/exec.sh -n dnode1 -s start +sql connect +sql use $db + +print execute sql select csum(f1) from st group by tbname + +sql select csum(f1) from st group by tbname + +if $rows != 60 then + return -1 +endi +if $data00 != @20-10-01 00:00:00.001@ then + return -1 +endi +if $data01 != @1@ then + return -1 +endi +if $data02 != @ct1@ then + return -1 +endi +if $data10 != @20-10-01 00:00:00.061@ then + return -1 +endi +if $data11 != @12@ then + return -1 +endi +if $data12 != @ct1@ then + return -1 +endi +if $data20 != @20-10-01 00:00:00.121@ then + return -1 +endi +if $data21 != @33@ then + return -1 +endi +if $data22 != @ct1@ then + return -1 +endi +if $data30 != @20-10-01 00:00:00.181@ then + return -1 +endi +if $data31 != @64@ then + return -1 +endi +if $data32 != @ct1@ then + return -1 +endi +if $data40 != @20-10-01 00:00:00.241@ then + return -1 +endi +if $data41 != @105@ then + return -1 +endi +if $data42 != @ct1@ then + return -1 +endi +if $data50 != @20-10-01 00:00:00.301@ then + return -1 +endi +if $data51 != @156@ then + return -1 +endi +if $data52 != @ct1@ then + return -1 +endi +if $data60 != @20-10-01 00:00:00.361@ then + return -1 +endi +if $data61 != @217@ then + return -1 +endi +if $data62 != @ct1@ then + return -1 +endi +if $data70 != @20-10-01 00:00:00.421@ then + return -1 +endi +if $data71 != @288@ then + return -1 +endi +if $data72 != @ct1@ then + return -1 +endi +if $data80 != @20-10-01 00:00:00.481@ then + return -1 +endi +if $data81 != @369@ then + return -1 +endi +if $data82 != @ct1@ then + return -1 +endi +if $data90 != @20-10-01 00:00:00.541@ then + return -1 +endi +if $data91 != @460@ then + return -1 +endi +if $data92 != @ct1@ then + return -1 +endi +print execute sql select diff(f1) from st group by tbname + +sql select diff(f1) from st group by tbname + +if $rows != 57 then + return -1 +endi +if $data00 != @20-10-01 00:00:00.061@ then + return -1 +endi +if $data01 != @10@ then + return -1 +endi +if $data02 != @ct1@ then + return -1 +endi +if $data10 != @20-10-01 00:00:00.121@ then + return -1 +endi +if $data11 != @10@ then + return -1 +endi +if $data12 != @ct1@ then + return -1 +endi +if $data20 != @20-10-01 00:00:00.181@ then + return -1 +endi +if $data21 != @10@ then + return -1 +endi +if $data22 != @ct1@ then + return -1 +endi +if $data30 != @20-10-01 00:00:00.241@ then + return -1 +endi +if $data31 != @10@ then + return -1 +endi +if $data32 != @ct1@ then + return -1 +endi +if $data40 != @20-10-01 00:00:00.301@ then + return -1 +endi +if $data41 != @10@ then + return -1 +endi +if $data42 != @ct1@ then + return -1 +endi +if $data50 != @20-10-01 00:00:00.361@ then + return -1 +endi +if $data51 != @10@ then + return -1 +endi +if $data52 != @ct1@ then + return -1 +endi +if $data60 != @20-10-01 00:00:00.421@ then + return -1 +endi +if $data61 != @10@ then + return -1 +endi +if $data62 != @ct1@ then + return -1 +endi +if $data70 != @20-10-01 00:00:00.481@ then + return -1 +endi +if $data71 != @10@ then + return -1 +endi +if $data72 != @ct1@ then + return -1 +endi +if $data80 != @20-10-01 00:00:00.541@ then + return -1 +endi +if $data81 != @10@ then + return -1 +endi +if $data82 != @ct1@ then + return -1 +endi +if $data90 != @20-10-01 00:01:40.001@ then + return -1 +endi +if $data91 != @10@ then + return -1 +endi +if $data92 != @ct1@ then + return -1 +endi +print execute sql select derivative(f1, 1s, 0) from st group by tbname + +sql select derivative(f1, 1s, 0) from st group by tbname + +if $rows != 57 then + return -1 +endi +if $data00 != @20-10-01 00:00:00.061@ then + return -1 +endi +if $data01 != @166.666666667@ then + return -1 +endi +if $data02 != @ct1@ then + return -1 +endi +if $data10 != @20-10-01 00:00:00.121@ then + return -1 +endi +if $data11 != @166.666666667@ then + return -1 +endi +if $data12 != @ct1@ then + return -1 +endi +if $data20 != @20-10-01 00:00:00.181@ then + return -1 +endi +if $data21 != @166.666666667@ then + return -1 +endi +if $data22 != @ct1@ then + return -1 +endi +if $data30 != @20-10-01 00:00:00.241@ then + return -1 +endi +if $data31 != @166.666666667@ then + return -1 +endi +if $data32 != @ct1@ then + return -1 +endi +if $data40 != @20-10-01 00:00:00.301@ then + return -1 +endi +if $data41 != @166.666666667@ then + return -1 +endi +if $data42 != @ct1@ then + return -1 +endi +if $data50 != @20-10-01 00:00:00.361@ then + return -1 +endi +if $data51 != @166.666666667@ then + return -1 +endi +if $data52 != @ct1@ then + return -1 +endi +if $data60 != @20-10-01 00:00:00.421@ then + return -1 +endi +if $data61 != @166.666666667@ then + return -1 +endi +if $data62 != @ct1@ then + return -1 +endi +if $data70 != @20-10-01 00:00:00.481@ then + return -1 +endi +if $data71 != @166.666666667@ then + return -1 +endi +if $data72 != @ct1@ then + return -1 +endi +if $data80 != @20-10-01 00:00:00.541@ then + return -1 +endi +if $data81 != @166.666666667@ then + return -1 +endi +if $data82 != @ct1@ then + return -1 +endi +if $data90 != @20-10-01 00:01:40.001@ then + return -1 +endi +if $data91 != @0.100542932@ then + return -1 +endi +if $data92 != @ct1@ then + return -1 +endi +print execute sql select mavg(f1,2) from st group by tbname + +sql select mavg(f1,2) from st group by tbname + +if $rows != 57 then + return -1 +endi +if $data00 != @20-10-01 00:00:00.061@ then + return -1 +endi +if $data01 != @6.000000000@ then + return -1 +endi +if $data02 != @ct1@ then + return -1 +endi +if $data10 != @20-10-01 00:00:00.121@ then + return -1 +endi +if $data11 != @16.000000000@ then + return -1 +endi +if $data12 != @ct1@ then + return -1 +endi +if $data20 != @20-10-01 00:00:00.181@ then + return -1 +endi +if $data21 != @26.000000000@ then + return -1 +endi +if $data22 != @ct1@ then + return -1 +endi +if $data30 != @20-10-01 00:00:00.241@ then + return -1 +endi +if $data31 != @36.000000000@ then + return -1 +endi +if $data32 != @ct1@ then + return -1 +endi +if $data40 != @20-10-01 00:00:00.301@ then + return -1 +endi +if $data41 != @46.000000000@ then + return -1 +endi +if $data42 != @ct1@ then + return -1 +endi +if $data50 != @20-10-01 00:00:00.361@ then + return -1 +endi +if $data51 != @56.000000000@ then + return -1 +endi +if $data52 != @ct1@ then + return -1 +endi +if $data60 != @20-10-01 00:00:00.421@ then + return -1 +endi +if $data61 != @66.000000000@ then + return -1 +endi +if $data62 != @ct1@ then + return -1 +endi +if $data70 != @20-10-01 00:00:00.481@ then + return -1 +endi +if $data71 != @76.000000000@ then + return -1 +endi +if $data72 != @ct1@ then + return -1 +endi +if $data80 != @20-10-01 00:00:00.541@ then + return -1 +endi +if $data81 != @86.000000000@ then + return -1 +endi +if $data82 != @ct1@ then + return -1 +endi +if $data90 != @20-10-01 00:01:40.001@ then + return -1 +endi +if $data91 != @96.000000000@ then + return -1 +endi +if $data92 != @ct1@ then + return -1 +endi +print execute sql select csum(f1),t from st group by tbname + +sql select csum(f1),t from st group by tbname + +if $rows != 60 then + return -1 +endi +if $data00 != @20-10-01 00:00:00.001@ then + return -1 +endi +if $data01 != @1@ then + return -1 +endi +if $data02 != @1@ then + return -1 +endi +if $data03 != @ct1@ then + return -1 +endi +if $data10 != @20-10-01 00:00:00.061@ then + return -1 +endi +if $data11 != @12@ then + return -1 +endi +if $data12 != @1@ then + return -1 +endi +if $data13 != @ct1@ then + return -1 +endi +if $data20 != @20-10-01 00:00:00.121@ then + return -1 +endi +if $data21 != @33@ then + return -1 +endi +if $data22 != @1@ then + return -1 +endi +if $data23 != @ct1@ then + return -1 +endi +if $data30 != @20-10-01 00:00:00.181@ then + return -1 +endi +if $data31 != @64@ then + return -1 +endi +if $data32 != @1@ then + return -1 +endi +if $data33 != @ct1@ then + return -1 +endi +if $data40 != @20-10-01 00:00:00.241@ then + return -1 +endi +if $data41 != @105@ then + return -1 +endi +if $data42 != @1@ then + return -1 +endi +if $data43 != @ct1@ then + return -1 +endi +if $data50 != @20-10-01 00:00:00.301@ then + return -1 +endi +if $data51 != @156@ then + return -1 +endi +if $data52 != @1@ then + return -1 +endi +if $data53 != @ct1@ then + return -1 +endi +if $data60 != @20-10-01 00:00:00.361@ then + return -1 +endi +if $data61 != @217@ then + return -1 +endi +if $data62 != @1@ then + return -1 +endi +if $data63 != @ct1@ then + return -1 +endi +if $data70 != @20-10-01 00:00:00.421@ then + return -1 +endi +if $data71 != @288@ then + return -1 +endi +if $data72 != @1@ then + return -1 +endi +if $data73 != @ct1@ then + return -1 +endi +if $data80 != @20-10-01 00:00:00.481@ then + return -1 +endi +if $data81 != @369@ then + return -1 +endi +if $data82 != @1@ then + return -1 +endi +if $data83 != @ct1@ then + return -1 +endi +if $data90 != @20-10-01 00:00:00.541@ then + return -1 +endi +if $data91 != @460@ then + return -1 +endi +if $data92 != @1@ then + return -1 +endi +if $data93 != @ct1@ then + return -1 +endi +print execute sql select diff(f1),t from st group by tbname + +sql select diff(f1),t from st group by tbname + +if $rows != 57 then + return -1 +endi +if $data00 != @20-10-01 00:00:00.061@ then + return -1 +endi +if $data01 != @10@ then + return -1 +endi +if $data02 != @1@ then + return -1 +endi +if $data03 != @ct1@ then + return -1 +endi +if $data10 != @20-10-01 00:00:00.121@ then + return -1 +endi +if $data11 != @10@ then + return -1 +endi +if $data12 != @1@ then + return -1 +endi +if $data13 != @ct1@ then + return -1 +endi +if $data20 != @20-10-01 00:00:00.181@ then + return -1 +endi +if $data21 != @10@ then + return -1 +endi +if $data22 != @1@ then + return -1 +endi +if $data23 != @ct1@ then + return -1 +endi +if $data30 != @20-10-01 00:00:00.241@ then + return -1 +endi +if $data31 != @10@ then + return -1 +endi +if $data32 != @1@ then + return -1 +endi +if $data33 != @ct1@ then + return -1 +endi +if $data40 != @20-10-01 00:00:00.301@ then + return -1 +endi +if $data41 != @10@ then + return -1 +endi +if $data42 != @1@ then + return -1 +endi +if $data43 != @ct1@ then + return -1 +endi +if $data50 != @20-10-01 00:00:00.361@ then + return -1 +endi +if $data51 != @10@ then + return -1 +endi +if $data52 != @1@ then + return -1 +endi +if $data53 != @ct1@ then + return -1 +endi +if $data60 != @20-10-01 00:00:00.421@ then + return -1 +endi +if $data61 != @10@ then + return -1 +endi +if $data62 != @1@ then + return -1 +endi +if $data63 != @ct1@ then + return -1 +endi +if $data70 != @20-10-01 00:00:00.481@ then + return -1 +endi +if $data71 != @10@ then + return -1 +endi +if $data72 != @1@ then + return -1 +endi +if $data73 != @ct1@ then + return -1 +endi +if $data80 != @20-10-01 00:00:00.541@ then + return -1 +endi +if $data81 != @10@ then + return -1 +endi +if $data82 != @1@ then + return -1 +endi +if $data83 != @ct1@ then + return -1 +endi +if $data90 != @20-10-01 00:01:40.001@ then + return -1 +endi +if $data91 != @10@ then + return -1 +endi +if $data92 != @1@ then + return -1 +endi +if $data93 != @ct1@ then + return -1 +endi +print execute sql select derivative(f1, 1s, 0),t from st group by tbname + +sql select derivative(f1, 1s, 0),t from st group by tbname + +if $rows != 57 then + return -1 +endi +if $data00 != @20-10-01 00:00:00.061@ then + return -1 +endi +if $data01 != @166.666666667@ then + return -1 +endi +if $data02 != @1@ then + return -1 +endi +if $data03 != @ct1@ then + return -1 +endi +if $data10 != @20-10-01 00:00:00.121@ then + return -1 +endi +if $data11 != @166.666666667@ then + return -1 +endi +if $data12 != @1@ then + return -1 +endi +if $data13 != @ct1@ then + return -1 +endi +if $data20 != @20-10-01 00:00:00.181@ then + return -1 +endi +if $data21 != @166.666666667@ then + return -1 +endi +if $data22 != @1@ then + return -1 +endi +if $data23 != @ct1@ then + return -1 +endi +if $data30 != @20-10-01 00:00:00.241@ then + return -1 +endi +if $data31 != @166.666666667@ then + return -1 +endi +if $data32 != @1@ then + return -1 +endi +if $data33 != @ct1@ then + return -1 +endi +if $data40 != @20-10-01 00:00:00.301@ then + return -1 +endi +if $data41 != @166.666666667@ then + return -1 +endi +if $data42 != @1@ then + return -1 +endi +if $data43 != @ct1@ then + return -1 +endi +if $data50 != @20-10-01 00:00:00.361@ then + return -1 +endi +if $data51 != @166.666666667@ then + return -1 +endi +if $data52 != @1@ then + return -1 +endi +if $data53 != @ct1@ then + return -1 +endi +if $data60 != @20-10-01 00:00:00.421@ then + return -1 +endi +if $data61 != @166.666666667@ then + return -1 +endi +if $data62 != @1@ then + return -1 +endi +if $data63 != @ct1@ then + return -1 +endi +if $data70 != @20-10-01 00:00:00.481@ then + return -1 +endi +if $data71 != @166.666666667@ then + return -1 +endi +if $data72 != @1@ then + return -1 +endi +if $data73 != @ct1@ then + return -1 +endi +if $data80 != @20-10-01 00:00:00.541@ then + return -1 +endi +if $data81 != @166.666666667@ then + return -1 +endi +if $data82 != @1@ then + return -1 +endi +if $data83 != @ct1@ then + return -1 +endi +if $data90 != @20-10-01 00:01:40.001@ then + return -1 +endi +if $data91 != @0.100542932@ then + return -1 +endi +if $data92 != @1@ then + return -1 +endi +if $data93 != @ct1@ then + return -1 +endi +print execute sql select mavg(f1,2),t from st group by tbname + +sql select mavg(f1,2),t from st group by tbname + +if $rows != 57 then + return -1 +endi +if $data00 != @20-10-01 00:00:00.061@ then + return -1 +endi +if $data01 != @6.000000000@ then + return -1 +endi +if $data02 != @1@ then + return -1 +endi +if $data03 != @ct1@ then + return -1 +endi +if $data10 != @20-10-01 00:00:00.121@ then + return -1 +endi +if $data11 != @16.000000000@ then + return -1 +endi +if $data12 != @1@ then + return -1 +endi +if $data13 != @ct1@ then + return -1 +endi +if $data20 != @20-10-01 00:00:00.181@ then + return -1 +endi +if $data21 != @26.000000000@ then + return -1 +endi +if $data22 != @1@ then + return -1 +endi +if $data23 != @ct1@ then + return -1 +endi +if $data30 != @20-10-01 00:00:00.241@ then + return -1 +endi +if $data31 != @36.000000000@ then + return -1 +endi +if $data32 != @1@ then + return -1 +endi +if $data33 != @ct1@ then + return -1 +endi +if $data40 != @20-10-01 00:00:00.301@ then + return -1 +endi +if $data41 != @46.000000000@ then + return -1 +endi +if $data42 != @1@ then + return -1 +endi +if $data43 != @ct1@ then + return -1 +endi +if $data50 != @20-10-01 00:00:00.361@ then + return -1 +endi +if $data51 != @56.000000000@ then + return -1 +endi +if $data52 != @1@ then + return -1 +endi +if $data53 != @ct1@ then + return -1 +endi +if $data60 != @20-10-01 00:00:00.421@ then + return -1 +endi +if $data61 != @66.000000000@ then + return -1 +endi +if $data62 != @1@ then + return -1 +endi +if $data63 != @ct1@ then + return -1 +endi +if $data70 != @20-10-01 00:00:00.481@ then + return -1 +endi +if $data71 != @76.000000000@ then + return -1 +endi +if $data72 != @1@ then + return -1 +endi +if $data73 != @ct1@ then + return -1 +endi +if $data80 != @20-10-01 00:00:00.541@ then + return -1 +endi +if $data81 != @86.000000000@ then + return -1 +endi +if $data82 != @1@ then + return -1 +endi +if $data83 != @ct1@ then + return -1 +endi +if $data90 != @20-10-01 00:01:40.001@ then + return -1 +endi +if $data91 != @96.000000000@ then + return -1 +endi +if $data92 != @1@ then + return -1 +endi +if $data93 != @ct1@ then + return -1 +endi +print execute sql select csum(f1) from st group by tbname limit 2 + +sql select csum(f1) from st group by tbname limit 2 + +if $rows != 6 then + return -1 +endi +if $data00 != @20-10-01 00:00:00.001@ then + return -1 +endi +if $data01 != @1@ then + return -1 +endi +if $data02 != @ct1@ then + return -1 +endi +if $data10 != @20-10-01 00:00:00.061@ then + return -1 +endi +if $data11 != @12@ then + return -1 +endi +if $data12 != @ct1@ then + return -1 +endi +if $data20 != @20-10-01 00:00:00.002@ then + return -1 +endi +if $data21 != @2@ then + return -1 +endi +if $data22 != @ct2@ then + return -1 +endi +if $data30 != @20-10-01 00:00:00.062@ then + return -1 +endi +if $data31 != @14@ then + return -1 +endi +if $data32 != @ct2@ then + return -1 +endi +if $data40 != @20-10-01 00:00:00.003@ then + return -1 +endi +if $data41 != @3@ then + return -1 +endi +if $data42 != @ct3@ then + return -1 +endi +if $data50 != @20-10-01 00:00:00.063@ then + return -1 +endi +if $data51 != @16@ then + return -1 +endi +if $data52 != @ct3@ then + return -1 +endi +print execute sql select diff(f1) from st group by tbname limit 2 + +sql select diff(f1) from st group by tbname limit 2 + +if $rows != 6 then + return -1 +endi +if $data00 != @20-10-01 00:00:00.061@ then + return -1 +endi +if $data01 != @10@ then + return -1 +endi +if $data02 != @ct1@ then + return -1 +endi +if $data10 != @20-10-01 00:00:00.121@ then + return -1 +endi +if $data11 != @10@ then + return -1 +endi +if $data12 != @ct1@ then + return -1 +endi +if $data20 != @20-10-01 00:00:00.062@ then + return -1 +endi +if $data21 != @10@ then + return -1 +endi +if $data22 != @ct2@ then + return -1 +endi +if $data30 != @20-10-01 00:00:00.122@ then + return -1 +endi +if $data31 != @10@ then + return -1 +endi +if $data32 != @ct2@ then + return -1 +endi +if $data40 != @20-10-01 00:00:00.063@ then + return -1 +endi +if $data41 != @10@ then + return -1 +endi +if $data42 != @ct3@ then + return -1 +endi +if $data50 != @20-10-01 00:00:00.123@ then + return -1 +endi +if $data51 != @10@ then + return -1 +endi +if $data52 != @ct3@ then + return -1 +endi +print execute sql select derivative(f1, 1s, 0) from st group by tbname limit 2 + +sql select derivative(f1, 1s, 0) from st group by tbname limit 2 + +if $rows != 6 then + return -1 +endi +if $data00 != @20-10-01 00:00:00.061@ then + return -1 +endi +if $data01 != @166.666666667@ then + return -1 +endi +if $data02 != @ct1@ then + return -1 +endi +if $data10 != @20-10-01 00:00:00.121@ then + return -1 +endi +if $data11 != @166.666666667@ then + return -1 +endi +if $data12 != @ct1@ then + return -1 +endi +if $data20 != @20-10-01 00:00:00.062@ then + return -1 +endi +if $data21 != @166.666666667@ then + return -1 +endi +if $data22 != @ct2@ then + return -1 +endi +if $data30 != @20-10-01 00:00:00.122@ then + return -1 +endi +if $data31 != @166.666666667@ then + return -1 +endi +if $data32 != @ct2@ then + return -1 +endi +if $data40 != @20-10-01 00:00:00.063@ then + return -1 +endi +if $data41 != @166.666666667@ then + return -1 +endi +if $data42 != @ct3@ then + return -1 +endi +if $data50 != @20-10-01 00:00:00.123@ then + return -1 +endi +if $data51 != @166.666666667@ then + return -1 +endi +if $data52 != @ct3@ then + return -1 +endi +print execute sql select mavg(f1,2) from st group by tbname limit 2 + +sql select mavg(f1,2) from st group by tbname limit 2 + +if $rows != 6 then + return -1 +endi +if $data00 != @20-10-01 00:00:00.061@ then + return -1 +endi +if $data01 != @6.000000000@ then + return -1 +endi +if $data02 != @ct1@ then + return -1 +endi +if $data10 != @20-10-01 00:00:00.121@ then + return -1 +endi +if $data11 != @16.000000000@ then + return -1 +endi +if $data12 != @ct1@ then + return -1 +endi +if $data20 != @20-10-01 00:00:00.062@ then + return -1 +endi +if $data21 != @7.000000000@ then + return -1 +endi +if $data22 != @ct2@ then + return -1 +endi +if $data30 != @20-10-01 00:00:00.122@ then + return -1 +endi +if $data31 != @17.000000000@ then + return -1 +endi +if $data32 != @ct2@ then + return -1 +endi +if $data40 != @20-10-01 00:00:00.063@ then + return -1 +endi +if $data41 != @8.000000000@ then + return -1 +endi +if $data42 != @ct3@ then + return -1 +endi +if $data50 != @20-10-01 00:00:00.123@ then + return -1 +endi +if $data51 != @18.000000000@ then + return -1 +endi +if $data52 != @ct3@ then + return -1 +endi +print execute sql select csum(f1),t from st group by tbname limit 2 + +sql select csum(f1),t from st group by tbname limit 2 + +if $rows != 6 then + return -1 +endi +if $data00 != @20-10-01 00:00:00.001@ then + return -1 +endi +if $data01 != @1@ then + return -1 +endi +if $data02 != @1@ then + return -1 +endi +if $data03 != @ct1@ then + return -1 +endi +if $data10 != @20-10-01 00:00:00.061@ then + return -1 +endi +if $data11 != @12@ then + return -1 +endi +if $data12 != @1@ then + return -1 +endi +if $data13 != @ct1@ then + return -1 +endi +if $data20 != @20-10-01 00:00:00.002@ then + return -1 +endi +if $data21 != @2@ then + return -1 +endi +if $data22 != @2@ then + return -1 +endi +if $data23 != @ct2@ then + return -1 +endi +if $data30 != @20-10-01 00:00:00.062@ then + return -1 +endi +if $data31 != @14@ then + return -1 +endi +if $data32 != @2@ then + return -1 +endi +if $data33 != @ct2@ then + return -1 +endi +if $data40 != @20-10-01 00:00:00.003@ then + return -1 +endi +if $data41 != @3@ then + return -1 +endi +if $data42 != @3@ then + return -1 +endi +if $data43 != @ct3@ then + return -1 +endi +if $data50 != @20-10-01 00:00:00.063@ then + return -1 +endi +if $data51 != @16@ then + return -1 +endi +if $data52 != @3@ then + return -1 +endi +if $data53 != @ct3@ then + return -1 +endi +print execute sql select diff(f1),t from st group by tbname limit 2 + +sql select diff(f1),t from st group by tbname limit 2 + +if $rows != 6 then + return -1 +endi +if $data00 != @20-10-01 00:00:00.061@ then + return -1 +endi +if $data01 != @10@ then + return -1 +endi +if $data02 != @1@ then + return -1 +endi +if $data03 != @ct1@ then + return -1 +endi +if $data10 != @20-10-01 00:00:00.121@ then + return -1 +endi +if $data11 != @10@ then + return -1 +endi +if $data12 != @1@ then + return -1 +endi +if $data13 != @ct1@ then + return -1 +endi +if $data20 != @20-10-01 00:00:00.062@ then + return -1 +endi +if $data21 != @10@ then + return -1 +endi +if $data22 != @2@ then + return -1 +endi +if $data23 != @ct2@ then + return -1 +endi +if $data30 != @20-10-01 00:00:00.122@ then + return -1 +endi +if $data31 != @10@ then + return -1 +endi +if $data32 != @2@ then + return -1 +endi +if $data33 != @ct2@ then + return -1 +endi +if $data40 != @20-10-01 00:00:00.063@ then + return -1 +endi +if $data41 != @10@ then + return -1 +endi +if $data42 != @3@ then + return -1 +endi +if $data43 != @ct3@ then + return -1 +endi +if $data50 != @20-10-01 00:00:00.123@ then + return -1 +endi +if $data51 != @10@ then + return -1 +endi +if $data52 != @3@ then + return -1 +endi +if $data53 != @ct3@ then + return -1 +endi +print execute sql select derivative(f1, 1s, 0),t from st group by tbname limit 2 + +sql select derivative(f1, 1s, 0),t from st group by tbname limit 2 + +if $rows != 6 then + return -1 +endi +if $data00 != @20-10-01 00:00:00.061@ then + return -1 +endi +if $data01 != @166.666666667@ then + return -1 +endi +if $data02 != @1@ then + return -1 +endi +if $data03 != @ct1@ then + return -1 +endi +if $data10 != @20-10-01 00:00:00.121@ then + return -1 +endi +if $data11 != @166.666666667@ then + return -1 +endi +if $data12 != @1@ then + return -1 +endi +if $data13 != @ct1@ then + return -1 +endi +if $data20 != @20-10-01 00:00:00.062@ then + return -1 +endi +if $data21 != @166.666666667@ then + return -1 +endi +if $data22 != @2@ then + return -1 +endi +if $data23 != @ct2@ then + return -1 +endi +if $data30 != @20-10-01 00:00:00.122@ then + return -1 +endi +if $data31 != @166.666666667@ then + return -1 +endi +if $data32 != @2@ then + return -1 +endi +if $data33 != @ct2@ then + return -1 +endi +if $data40 != @20-10-01 00:00:00.063@ then + return -1 +endi +if $data41 != @166.666666667@ then + return -1 +endi +if $data42 != @3@ then + return -1 +endi +if $data43 != @ct3@ then + return -1 +endi +if $data50 != @20-10-01 00:00:00.123@ then + return -1 +endi +if $data51 != @166.666666667@ then + return -1 +endi +if $data52 != @3@ then + return -1 +endi +if $data53 != @ct3@ then + return -1 +endi +print execute sql select mavg(f1,2),t from st group by tbname limit 2 + +sql select mavg(f1,2),t from st group by tbname limit 2 + +if $rows != 6 then + return -1 +endi +if $data00 != @20-10-01 00:00:00.061@ then + return -1 +endi +if $data01 != @6.000000000@ then + return -1 +endi +if $data02 != @1@ then + return -1 +endi +if $data03 != @ct1@ then + return -1 +endi +if $data10 != @20-10-01 00:00:00.121@ then + return -1 +endi +if $data11 != @16.000000000@ then + return -1 +endi +if $data12 != @1@ then + return -1 +endi +if $data13 != @ct1@ then + return -1 +endi +if $data20 != @20-10-01 00:00:00.062@ then + return -1 +endi +if $data21 != @7.000000000@ then + return -1 +endi +if $data22 != @2@ then + return -1 +endi +if $data23 != @ct2@ then + return -1 +endi +if $data30 != @20-10-01 00:00:00.122@ then + return -1 +endi +if $data31 != @17.000000000@ then + return -1 +endi +if $data32 != @2@ then + return -1 +endi +if $data33 != @ct2@ then + return -1 +endi +if $data40 != @20-10-01 00:00:00.063@ then + return -1 +endi +if $data41 != @8.000000000@ then + return -1 +endi +if $data42 != @3@ then + return -1 +endi +if $data43 != @ct3@ then + return -1 +endi +if $data50 != @20-10-01 00:00:00.123@ then + return -1 +endi +if $data51 != @18.000000000@ then + return -1 +endi +if $data52 != @3@ then + return -1 +endi +if $data53 != @ct3@ then + return -1 +endi +print execute sql select csum(f1) from st group by tbname limit 2 offset 9 + +sql select csum(f1) from st group by tbname limit 2 offset 9 + +if $rows != 6 then + return -1 +endi +if $data00 != @20-10-01 00:00:00.541@ then + return -1 +endi +if $data01 != @460@ then + return -1 +endi +if $data02 != @ct1@ then + return -1 +endi +if $data10 != @20-10-01 00:01:40.001@ then + return -1 +endi +if $data11 != @561@ then + return -1 +endi +if $data12 != @ct1@ then + return -1 +endi +if $data20 != @20-10-01 00:00:00.542@ then + return -1 +endi +if $data21 != @470@ then + return -1 +endi +if $data22 != @ct2@ then + return -1 +endi +if $data30 != @20-10-01 00:01:40.002@ then + return -1 +endi +if $data31 != @572@ then + return -1 +endi +if $data32 != @ct2@ then + return -1 +endi +if $data40 != @20-10-01 00:00:00.543@ then + return -1 +endi +if $data41 != @480@ then + return -1 +endi +if $data42 != @ct3@ then + return -1 +endi +if $data50 != @20-10-01 00:01:40.003@ then + return -1 +endi +if $data51 != @583@ then + return -1 +endi +if $data52 != @ct3@ then + return -1 +endi +print execute sql select diff(f1) from st group by tbname limit 2 offset 9 + +sql select diff(f1) from st group by tbname limit 2 offset 9 + +if $rows != 6 then + return -1 +endi +if $data00 != @20-10-01 00:01:40.001@ then + return -1 +endi +if $data01 != @10@ then + return -1 +endi +if $data02 != @ct1@ then + return -1 +endi +if $data10 != @20-10-01 00:01:40.061@ then + return -1 +endi +if $data11 != @10@ then + return -1 +endi +if $data12 != @ct1@ then + return -1 +endi +if $data20 != @20-10-01 00:01:40.002@ then + return -1 +endi +if $data21 != @10@ then + return -1 +endi +if $data22 != @ct2@ then + return -1 +endi +if $data30 != @20-10-01 00:01:40.062@ then + return -1 +endi +if $data31 != @10@ then + return -1 +endi +if $data32 != @ct2@ then + return -1 +endi +if $data40 != @20-10-01 00:01:40.003@ then + return -1 +endi +if $data41 != @10@ then + return -1 +endi +if $data42 != @ct3@ then + return -1 +endi +if $data50 != @20-10-01 00:01:40.063@ then + return -1 +endi +if $data51 != @10@ then + return -1 +endi +if $data52 != @ct3@ then + return -1 +endi +print execute sql select derivative(f1, 1s, 0) from st group by tbname limit 2 offset 9 + +sql select derivative(f1, 1s, 0) from st group by tbname limit 2 offset 9 + +if $rows != 6 then + return -1 +endi +if $data00 != @20-10-01 00:01:40.001@ then + return -1 +endi +if $data01 != @0.100542932@ then + return -1 +endi +if $data02 != @ct1@ then + return -1 +endi +if $data10 != @20-10-01 00:01:40.061@ then + return -1 +endi +if $data11 != @166.666666667@ then + return -1 +endi +if $data12 != @ct1@ then + return -1 +endi +if $data20 != @20-10-01 00:01:40.002@ then + return -1 +endi +if $data21 != @0.100542932@ then + return -1 +endi +if $data22 != @ct2@ then + return -1 +endi +if $data30 != @20-10-01 00:01:40.062@ then + return -1 +endi +if $data31 != @166.666666667@ then + return -1 +endi +if $data32 != @ct2@ then + return -1 +endi +if $data40 != @20-10-01 00:01:40.003@ then + return -1 +endi +if $data41 != @0.100542932@ then + return -1 +endi +if $data42 != @ct3@ then + return -1 +endi +if $data50 != @20-10-01 00:01:40.063@ then + return -1 +endi +if $data51 != @166.666666667@ then + return -1 +endi +if $data52 != @ct3@ then + return -1 +endi +print execute sql select mavg(f1,2) from st group by tbname limit 2 offset 9 + +sql select mavg(f1,2) from st group by tbname limit 2 offset 9 + +if $rows != 6 then + return -1 +endi +if $data00 != @20-10-01 00:01:40.001@ then + return -1 +endi +if $data01 != @96.000000000@ then + return -1 +endi +if $data02 != @ct1@ then + return -1 +endi +if $data10 != @20-10-01 00:01:40.061@ then + return -1 +endi +if $data11 != @106.000000000@ then + return -1 +endi +if $data12 != @ct1@ then + return -1 +endi +if $data20 != @20-10-01 00:01:40.002@ then + return -1 +endi +if $data21 != @97.000000000@ then + return -1 +endi +if $data22 != @ct2@ then + return -1 +endi +if $data30 != @20-10-01 00:01:40.062@ then + return -1 +endi +if $data31 != @107.000000000@ then + return -1 +endi +if $data32 != @ct2@ then + return -1 +endi +if $data40 != @20-10-01 00:01:40.003@ then + return -1 +endi +if $data41 != @98.000000000@ then + return -1 +endi +if $data42 != @ct3@ then + return -1 +endi +if $data50 != @20-10-01 00:01:40.063@ then + return -1 +endi +if $data51 != @108.000000000@ then + return -1 +endi +if $data52 != @ct3@ then + return -1 +endi +print execute sql select csum(f1),t from st group by tbname limit 2 offset 9 + +sql select csum(f1),t from st group by tbname limit 2 offset 9 + +if $rows != 6 then + return -1 +endi +if $data00 != @20-10-01 00:00:00.541@ then + return -1 +endi +if $data01 != @460@ then + return -1 +endi +if $data02 != @1@ then + return -1 +endi +if $data03 != @ct1@ then + return -1 +endi +if $data10 != @20-10-01 00:01:40.001@ then + return -1 +endi +if $data11 != @561@ then + return -1 +endi +if $data12 != @1@ then + return -1 +endi +if $data13 != @ct1@ then + return -1 +endi +if $data20 != @20-10-01 00:00:00.542@ then + return -1 +endi +if $data21 != @470@ then + return -1 +endi +if $data22 != @2@ then + return -1 +endi +if $data23 != @ct2@ then + return -1 +endi +if $data30 != @20-10-01 00:01:40.002@ then + return -1 +endi +if $data31 != @572@ then + return -1 +endi +if $data32 != @2@ then + return -1 +endi +if $data33 != @ct2@ then + return -1 +endi +if $data40 != @20-10-01 00:00:00.543@ then + return -1 +endi +if $data41 != @480@ then + return -1 +endi +if $data42 != @3@ then + return -1 +endi +if $data43 != @ct3@ then + return -1 +endi +if $data50 != @20-10-01 00:01:40.003@ then + return -1 +endi +if $data51 != @583@ then + return -1 +endi +if $data52 != @3@ then + return -1 +endi +if $data53 != @ct3@ then + return -1 +endi +print execute sql select diff(f1),t from st group by tbname limit 2 offset 9 + +sql select diff(f1),t from st group by tbname limit 2 offset 9 + +if $rows != 6 then + return -1 +endi +if $data00 != @20-10-01 00:01:40.001@ then + return -1 +endi +if $data01 != @10@ then + return -1 +endi +if $data02 != @1@ then + return -1 +endi +if $data03 != @ct1@ then + return -1 +endi +if $data10 != @20-10-01 00:01:40.061@ then + return -1 +endi +if $data11 != @10@ then + return -1 +endi +if $data12 != @1@ then + return -1 +endi +if $data13 != @ct1@ then + return -1 +endi +if $data20 != @20-10-01 00:01:40.002@ then + return -1 +endi +if $data21 != @10@ then + return -1 +endi +if $data22 != @2@ then + return -1 +endi +if $data23 != @ct2@ then + return -1 +endi +if $data30 != @20-10-01 00:01:40.062@ then + return -1 +endi +if $data31 != @10@ then + return -1 +endi +if $data32 != @2@ then + return -1 +endi +if $data33 != @ct2@ then + return -1 +endi +if $data40 != @20-10-01 00:01:40.003@ then + return -1 +endi +if $data41 != @10@ then + return -1 +endi +if $data42 != @3@ then + return -1 +endi +if $data43 != @ct3@ then + return -1 +endi +if $data50 != @20-10-01 00:01:40.063@ then + return -1 +endi +if $data51 != @10@ then + return -1 +endi +if $data52 != @3@ then + return -1 +endi +if $data53 != @ct3@ then + return -1 +endi +print execute sql select derivative(f1, 1s, 0),t from st group by tbname limit 2 offset 9 + +sql select derivative(f1, 1s, 0),t from st group by tbname limit 2 offset 9 + +if $rows != 6 then + return -1 +endi +if $data00 != @20-10-01 00:01:40.001@ then + return -1 +endi +if $data01 != @0.100542932@ then + return -1 +endi +if $data02 != @1@ then + return -1 +endi +if $data03 != @ct1@ then + return -1 +endi +if $data10 != @20-10-01 00:01:40.061@ then + return -1 +endi +if $data11 != @166.666666667@ then + return -1 +endi +if $data12 != @1@ then + return -1 +endi +if $data13 != @ct1@ then + return -1 +endi +if $data20 != @20-10-01 00:01:40.002@ then + return -1 +endi +if $data21 != @0.100542932@ then + return -1 +endi +if $data22 != @2@ then + return -1 +endi +if $data23 != @ct2@ then + return -1 +endi +if $data30 != @20-10-01 00:01:40.062@ then + return -1 +endi +if $data31 != @166.666666667@ then + return -1 +endi +if $data32 != @2@ then + return -1 +endi +if $data33 != @ct2@ then + return -1 +endi +if $data40 != @20-10-01 00:01:40.003@ then + return -1 +endi +if $data41 != @0.100542932@ then + return -1 +endi +if $data42 != @3@ then + return -1 +endi +if $data43 != @ct3@ then + return -1 +endi +if $data50 != @20-10-01 00:01:40.063@ then + return -1 +endi +if $data51 != @166.666666667@ then + return -1 +endi +if $data52 != @3@ then + return -1 +endi +if $data53 != @ct3@ then + return -1 +endi +print execute sql select mavg(f1,2),t from st group by tbname limit 2 offset 9 + +sql select mavg(f1,2),t from st group by tbname limit 2 offset 9 + +if $rows != 6 then + return -1 +endi +if $data00 != @20-10-01 00:01:40.001@ then + return -1 +endi +if $data01 != @96.000000000@ then + return -1 +endi +if $data02 != @1@ then + return -1 +endi +if $data03 != @ct1@ then + return -1 +endi +if $data10 != @20-10-01 00:01:40.061@ then + return -1 +endi +if $data11 != @106.000000000@ then + return -1 +endi +if $data12 != @1@ then + return -1 +endi +if $data13 != @ct1@ then + return -1 +endi +if $data20 != @20-10-01 00:01:40.002@ then + return -1 +endi +if $data21 != @97.000000000@ then + return -1 +endi +if $data22 != @2@ then + return -1 +endi +if $data23 != @ct2@ then + return -1 +endi +if $data30 != @20-10-01 00:01:40.062@ then + return -1 +endi +if $data31 != @107.000000000@ then + return -1 +endi +if $data32 != @2@ then + return -1 +endi +if $data33 != @ct2@ then + return -1 +endi +if $data40 != @20-10-01 00:01:40.003@ then + return -1 +endi +if $data41 != @98.000000000@ then + return -1 +endi +if $data42 != @3@ then + return -1 +endi +if $data43 != @ct3@ then + return -1 +endi +if $data50 != @20-10-01 00:01:40.063@ then + return -1 +endi +if $data51 != @108.000000000@ then + return -1 +endi +if $data52 != @3@ then + return -1 +endi +if $data53 != @ct3@ then + return -1 +endi +print execute sql select csum(f1),t,tbname from st group by tbname limit 2 offset 9 + +sql select csum(f1),t,tbname from st group by tbname limit 2 offset 9 + +if $rows != 6 then + return -1 +endi +if $data00 != @20-10-01 00:00:00.541@ then + return -1 +endi +if $data01 != @460@ then + return -1 +endi +if $data02 != @1@ then + return -1 +endi +if $data03 != @ct1@ then + return -1 +endi +if $data04 != @ct1@ then + return -1 +endi +if $data10 != @20-10-01 00:01:40.001@ then + return -1 +endi +if $data11 != @561@ then + return -1 +endi +if $data12 != @1@ then + return -1 +endi +if $data13 != @ct1@ then + return -1 +endi +if $data14 != @ct1@ then + return -1 +endi +if $data20 != @20-10-01 00:00:00.542@ then + return -1 +endi +if $data21 != @470@ then + return -1 +endi +if $data22 != @2@ then + return -1 +endi +if $data23 != @ct2@ then + return -1 +endi +if $data24 != @ct2@ then + return -1 +endi +if $data30 != @20-10-01 00:01:40.002@ then + return -1 +endi +if $data31 != @572@ then + return -1 +endi +if $data32 != @2@ then + return -1 +endi +if $data33 != @ct2@ then + return -1 +endi +if $data34 != @ct2@ then + return -1 +endi +if $data40 != @20-10-01 00:00:00.543@ then + return -1 +endi +if $data41 != @480@ then + return -1 +endi +if $data42 != @3@ then + return -1 +endi +if $data43 != @ct3@ then + return -1 +endi +if $data44 != @ct3@ then + return -1 +endi +if $data50 != @20-10-01 00:01:40.003@ then + return -1 +endi +if $data51 != @583@ then + return -1 +endi +if $data52 != @3@ then + return -1 +endi +if $data53 != @ct3@ then + return -1 +endi +if $data54 != @ct3@ then + return -1 +endi +print execute sql select diff(f1),t,tbname from st group by tbname limit 2 offset 9 + +sql select diff(f1),t,tbname from st group by tbname limit 2 offset 9 + +if $rows != 6 then + return -1 +endi +if $data00 != @20-10-01 00:01:40.001@ then + return -1 +endi +if $data01 != @10@ then + return -1 +endi +if $data02 != @1@ then + return -1 +endi +if $data03 != @ct1@ then + return -1 +endi +if $data04 != @ct1@ then + return -1 +endi +if $data10 != @20-10-01 00:01:40.061@ then + return -1 +endi +if $data11 != @10@ then + return -1 +endi +if $data12 != @1@ then + return -1 +endi +if $data13 != @ct1@ then + return -1 +endi +if $data14 != @ct1@ then + return -1 +endi +if $data20 != @20-10-01 00:01:40.002@ then + return -1 +endi +if $data21 != @10@ then + return -1 +endi +if $data22 != @2@ then + return -1 +endi +if $data23 != @ct2@ then + return -1 +endi +if $data24 != @ct2@ then + return -1 +endi +if $data30 != @20-10-01 00:01:40.062@ then + return -1 +endi +if $data31 != @10@ then + return -1 +endi +if $data32 != @2@ then + return -1 +endi +if $data33 != @ct2@ then + return -1 +endi +if $data34 != @ct2@ then + return -1 +endi +if $data40 != @20-10-01 00:01:40.003@ then + return -1 +endi +if $data41 != @10@ then + return -1 +endi +if $data42 != @3@ then + return -1 +endi +if $data43 != @ct3@ then + return -1 +endi +if $data44 != @ct3@ then + return -1 +endi +if $data50 != @20-10-01 00:01:40.063@ then + return -1 +endi +if $data51 != @10@ then + return -1 +endi +if $data52 != @3@ then + return -1 +endi +if $data53 != @ct3@ then + return -1 +endi +if $data54 != @ct3@ then + return -1 +endi +print execute sql select derivative(f1, 1s, 0),t,tbname from st group by tbname limit 2 offset 9 + +sql select derivative(f1, 1s, 0),t,tbname from st group by tbname limit 2 offset 9 + +if $rows != 6 then + return -1 +endi +if $data00 != @20-10-01 00:01:40.001@ then + return -1 +endi +if $data01 != @0.100542932@ then + return -1 +endi +if $data02 != @1@ then + return -1 +endi +if $data03 != @ct1@ then + return -1 +endi +if $data04 != @ct1@ then + return -1 +endi +if $data10 != @20-10-01 00:01:40.061@ then + return -1 +endi +if $data11 != @166.666666667@ then + return -1 +endi +if $data12 != @1@ then + return -1 +endi +if $data13 != @ct1@ then + return -1 +endi +if $data14 != @ct1@ then + return -1 +endi +if $data20 != @20-10-01 00:01:40.002@ then + return -1 +endi +if $data21 != @0.100542932@ then + return -1 +endi +if $data22 != @2@ then + return -1 +endi +if $data23 != @ct2@ then + return -1 +endi +if $data24 != @ct2@ then + return -1 +endi +if $data30 != @20-10-01 00:01:40.062@ then + return -1 +endi +if $data31 != @166.666666667@ then + return -1 +endi +if $data32 != @2@ then + return -1 +endi +if $data33 != @ct2@ then + return -1 +endi +if $data34 != @ct2@ then + return -1 +endi +if $data40 != @20-10-01 00:01:40.003@ then + return -1 +endi +if $data41 != @0.100542932@ then + return -1 +endi +if $data42 != @3@ then + return -1 +endi +if $data43 != @ct3@ then + return -1 +endi +if $data44 != @ct3@ then + return -1 +endi +if $data50 != @20-10-01 00:01:40.063@ then + return -1 +endi +if $data51 != @166.666666667@ then + return -1 +endi +if $data52 != @3@ then + return -1 +endi +if $data53 != @ct3@ then + return -1 +endi +if $data54 != @ct3@ then + return -1 +endi +print execute sql select mavg(f1,2),t,tbname from st group by tbname limit 2 offset 9 + +sql select mavg(f1,2),t,tbname from st group by tbname limit 2 offset 9 + +if $rows != 6 then + return -1 +endi +if $data00 != @20-10-01 00:01:40.001@ then + return -1 +endi +if $data01 != @96.000000000@ then + return -1 +endi +if $data02 != @1@ then + return -1 +endi +if $data03 != @ct1@ then + return -1 +endi +if $data04 != @ct1@ then + return -1 +endi +if $data10 != @20-10-01 00:01:40.061@ then + return -1 +endi +if $data11 != @106.000000000@ then + return -1 +endi +if $data12 != @1@ then + return -1 +endi +if $data13 != @ct1@ then + return -1 +endi +if $data14 != @ct1@ then + return -1 +endi +if $data20 != @20-10-01 00:01:40.002@ then + return -1 +endi +if $data21 != @97.000000000@ then + return -1 +endi +if $data22 != @2@ then + return -1 +endi +if $data23 != @ct2@ then + return -1 +endi +if $data24 != @ct2@ then + return -1 +endi +if $data30 != @20-10-01 00:01:40.062@ then + return -1 +endi +if $data31 != @107.000000000@ then + return -1 +endi +if $data32 != @2@ then + return -1 +endi +if $data33 != @ct2@ then + return -1 +endi +if $data34 != @ct2@ then + return -1 +endi +if $data40 != @20-10-01 00:01:40.003@ then + return -1 +endi +if $data41 != @98.000000000@ then + return -1 +endi +if $data42 != @3@ then + return -1 +endi +if $data43 != @ct3@ then + return -1 +endi +if $data44 != @ct3@ then + return -1 +endi +if $data50 != @20-10-01 00:01:40.063@ then + return -1 +endi +if $data51 != @108.000000000@ then + return -1 +endi +if $data52 != @3@ then + return -1 +endi +if $data53 != @ct3@ then + return -1 +endi +if $data54 != @ct3@ then + return -1 +endi +print ================================ clear +#sql drop database $db +#sql show databases +#if $rows != 0 then +# return -1 +#endi + +#system sh/exec.sh -n dnode1 -s stop -x SIGINT + diff --git a/tests/script/general/compute/testSuite.sim b/tests/script/general/compute/testSuite.sim index 25c93ed29339c326628b885c34ed8766299460aa..ceb6525c7f67ab6c06f80d527f90c00299fbbd6e 100644 --- a/tests/script/general/compute/testSuite.sim +++ b/tests/script/general/compute/testSuite.sim @@ -20,3 +20,4 @@ run general/compute/stddev.sim run general/compute/sum.sim run general/compute/top.sim run general/compute/block_dist.sim +run general/compute/table_group.sim diff --git a/tests/script/general/parser/col_arithmetic_query.sim b/tests/script/general/parser/col_arithmetic_query.sim index 502de9583e9727d2dbee4a5601f974d6a46173ba..9b0dc8e964cf39909b803fe5ea20a7bdff8ceb59 100644 --- a/tests/script/general/parser/col_arithmetic_query.sim +++ b/tests/script/general/parser/col_arithmetic_query.sim @@ -561,19 +561,19 @@ endi sql_error select first(c6) - last(c6) *12 / count(*) from $stb group by c3; sql select first(c6) - last(c6) *12 / count(*) from $stb group by c5; -if $rows != 10 then +if $rows != 11 then return -1 endi -if $data00 != 0.000000000 then +if $data00 != -0.002160000 then return -1 endi -if $data10 != 0.997600000 then +if $data10 != 0.000000000 then return -1 endi -if $data90 != 8.978400000 then +if $data90 != 7.980800000 then return -1 endi diff --git a/tests/script/general/parser/nestquery.sim b/tests/script/general/parser/nestquery.sim index 22f18179a418b5779993c91a17d62848674e1774..25cc8a4ee8774fc7b43234d54d06abdc56af61f8 100644 --- a/tests/script/general/parser/nestquery.sim +++ b/tests/script/general/parser/nestquery.sim @@ -181,7 +181,7 @@ endi sql_error select stddev(c1) from (select c1 from nest_tb0); sql_error select percentile(c1, 20) from (select * from nest_tb0); -sql_error select interp(c1) from (select * from nest_tb0); +#sql_error select interp(c1) from (select * from nest_tb0); sql_error select derivative(val, 1s, 0) from (select c1 val from nest_tb0); sql_error select twa(c1) from (select c1 from nest_tb0); sql_error select irate(c1) from (select c1 from nest_tb0); @@ -925,5 +925,18 @@ if $data00 != 24 then return -1 endi +sql select sum(a)/sum(b) from meters where ts >= '2021-09-30 15:00:00.000' and ts <= '2021-09-30 15:00:05.000' interval(1s) fill(null) group by area order by area; +if $rows != 12 then + return -1 +endi +if $data00 != @21-09-30 15:00:00.000@ then + return -1 +endi +if $data01 != NULL then + return -1 +endi +if $data02 != 0 then + return -1 +endi system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/jenkins/basic.txt b/tests/script/jenkins/basic.txt index 2696b93acedd8c116dfd0922798012c27c8e8692..9a8f602901507bc4fc31d3902461394446a3067b 100644 --- a/tests/script/jenkins/basic.txt +++ b/tests/script/jenkins/basic.txt @@ -414,6 +414,7 @@ cd ../../../debug; make ./test.sh -f general/parser/between_and.sim ./test.sh -f general/parser/last_cache.sim ./test.sh -f unique/big/balance.sim +./test.sh -f general/parser/nestquery.sim ./test.sh -f general/parser/udf.sim ./test.sh -f general/parser/udf_dll.sim diff --git a/tests/script/regressionSuite.sim b/tests/script/regressionSuite.sim index a705601d96f64f2690c3b933228b02b49d59af68..91a88451924e6856a693208d30733c1f610dd74a 100644 --- a/tests/script/regressionSuite.sim +++ b/tests/script/regressionSuite.sim @@ -38,6 +38,7 @@ run general/compute/stddev.sim run general/compute/sum.sim run general/compute/top.sim run general/compute/block_dist.sim +run general/compute/table_group.sim run general/db/alter_option.sim run general/db/alter_tables_d2.sim run general/db/alter_tables_v1.sim diff --git a/tests/script/unique/cluster/cache.sim b/tests/script/unique/cluster/cache.sim index 740eddfb0d36767631c08a60806ab2e38e6f364a..1b3771353f8ca411db1fc8ea62335c5ecc16bf45 100644 --- a/tests/script/unique/cluster/cache.sim +++ b/tests/script/unique/cluster/cache.sim @@ -41,7 +41,7 @@ sql create dnode $hostname2 sleep 10000 sql show log.tables; -if $rows > 6 then +if $rows > 20 then return -1 endi @@ -50,7 +50,7 @@ print ===>rows $rows print $data00 $data01 $data02 print $data10 $data11 $data12 print $data20 $data21 $data22 -if $rows < 10 then +if $rows < 9 then return -1 endi diff --git a/tests/script/unique/dnode/monitor.sim b/tests/script/unique/dnode/monitor.sim index 0b41a4137ca74046b24e84fb6202279f45eaa578..b1be43ecf58355216266accd308e5e7d41088892 100644 --- a/tests/script/unique/dnode/monitor.sim +++ b/tests/script/unique/dnode/monitor.sim @@ -42,7 +42,7 @@ print dnode2 openVnodes $data2_2 if $data2_1 != 0 then return -1 endi -if $data2_2 != 1 then +if $data2_2 != 2 then return -1 endi @@ -56,7 +56,25 @@ print $data30 print $data40 print $data50 -if $rows > 6 then +print *num of tables $rows + +if $rows > 17 then + return -1 +endi + +sql show log.stables + +print $data00 +print $data10 +print $data20 +print $data30 +print $data40 +print $data50 +print $data60 + +print *num of stables $rows + +if $rows > 7 then return -1 endi diff --git a/tests/script/unique/dnode/monitor_bug.sim b/tests/script/unique/dnode/monitor_bug.sim index 60c6524d9ce70c549cbea2964768888bf0d72fcb..acde8cc3c76297249c71aa0ad307502ce0283391 100644 --- a/tests/script/unique/dnode/monitor_bug.sim +++ b/tests/script/unique/dnode/monitor_bug.sim @@ -19,7 +19,7 @@ sleep 3000 sql show dnodes print dnode1 openVnodes $data2_1 -if $data2_1 > 2 then +if $data2_1 > 4 then return -1 endi @@ -28,20 +28,20 @@ sql create dnode $hostname2 system sh/exec.sh -n dnode2 -s start $x = 0 -show2: +show2: $x = $x + 1 sleep 2000 if $x == 10 then return -1 endi - + sql show dnodes print dnode1 openVnodes $data2_1 print dnode2 openVnodes $data2_2 if $data2_1 != 0 then goto show2 endi -if $data2_2 > 2 then +if $data2_2 > 4 then goto show2 endi @@ -55,7 +55,7 @@ print $data30 print $data40 print $data50 -if $rows > 5 then +if $rows > 14 then return -1 endi @@ -74,4 +74,4 @@ if $rows2 <= $rows1 then endi system sh/exec.sh -n dnode1 -s stop -x SIGINT -system sh/exec.sh -n dnode2 -s stop -x SIGINT \ No newline at end of file +system sh/exec.sh -n dnode2 -s stop -x SIGINT