diff --git a/.gitmodules b/.gitmodules
index 7edcdff5d3dd805ec6b222915688940c7bd7dcb9..5bb7cff2cd9fe465fac3ab932732069127a6a5b7 100644
--- a/.gitmodules
+++ b/.gitmodules
@@ -16,9 +16,9 @@
[submodule "deps/TSZ"]
path = deps/TSZ
url = https://github.com/taosdata/TSZ.git
-[submodule "deps/avro"]
- path = deps/avro
- url = https://github.com/apache/avro
+[submodule "src/kit/taos-tools"]
+ path = src/kit/taos-tools
+ url = https://github.com/taosdata/taos-tools
[submodule "src/plugins/taosadapter"]
path = src/plugins/taosadapter
url = https://github.com/taosdata/taosadapter
diff --git a/README.md b/README.md
index edca04afd486687ea8653e955ae50da457f77ab9..44b34a4e78c28817ca8fde5305c4ca664064cd34 100644
--- a/README.md
+++ b/README.md
@@ -34,7 +34,7 @@ At the moment, TDengine only supports building and running on Linux systems. You
To build TDengine, use [CMake](https://cmake.org/) 3.0.2 or higher versions in the project directory.
-## Install tools
+## Install build dependencies
### Ubuntu 16.04 and above & Debian:
```bash
@@ -58,6 +58,12 @@ To install Apache Maven:
sudo apt-get install -y maven
```
+#### Install build dependencies for taos-tools
+To build the [taos-tools](https://github.com/taosdata/taos-tools) on Ubuntu/Debian, the following packages need to be installed.
+```bash
+sudo apt install libjansson-dev libsnappy-dev liblzma-dev libz-dev pkg-config
+```
+
### Centos 7:
```bash
sudo yum install epel-release
@@ -91,6 +97,12 @@ To install Apache Maven:
sudo dnf install -y maven
```
+#### Install build dependencies for taos-tools
+To build the [taos-tools](https://github.com/taosdata/taos-tools) on CentOS, the following packages need to be installed.
+```bash
+sudo yum install xz-devel snappy-devel jansson-devel pkgconfig libatomic
+```
+
### Setup golang environment
TDengine includes few components developed by Go language. Please refer to golang.org official documentation for golang environment setup.
@@ -108,7 +120,7 @@ git clone https://github.com/taosdata/TDengine.git
cd TDengine
```
-The connectors for go & grafana have been moved to separated repositories,
+The connectors for go & grafana and some tools have been moved to separated repositories,
so you should run this command in the TDengine directory to install them:
```bash
git submodule update --init --recursive
@@ -234,7 +246,7 @@ wget -qO - http://repos.taosdata.com/tdengine.key | sudo apt-key add -
echo "deb [arch=amd64] http://repos.taosdata.com/tdengine-stable stable main" | sudo tee /etc/apt/sources.list.d/tdengine-stable.list
[Optional] echo "deb [arch=amd64] http://repos.taosdata.com/tdengine-beta beta main" | sudo tee /etc/apt/sources.list.d/tdengine-beta.list
sudo apt-get update
-apt-get policy tdengine
+apt-cache policy tdengine
sudo apt-get install tdengine
```
diff --git a/cmake/define.inc b/cmake/define.inc
index 21b517e197fbb8ee568d3017cd5e78a8750644e2..7e6293c9a9abdc82313b9d3982692b5d506f2a06 100755
--- a/cmake/define.inc
+++ b/cmake/define.inc
@@ -53,6 +53,14 @@ IF (TD_PRO)
ADD_DEFINITIONS(-D_TD_PRO_)
ENDIF ()
+IF (TD_KH)
+ ADD_DEFINITIONS(-D_TD_KH_)
+ENDIF ()
+
+IF (TD_JH)
+ ADD_DEFINITIONS(-D_TD_JH_)
+ENDIF ()
+
IF (TD_MEM_CHECK)
ADD_DEFINITIONS(-DTAOS_MEM_CHECK)
ENDIF ()
@@ -152,6 +160,32 @@ IF (TD_BUILD_HTTP)
ADD_DEFINITIONS(-DHTTP_EMBEDDED)
ENDIF ()
+IF ("${BUILD_TOOLS}" STREQUAL "")
+ IF (TD_LINUX)
+ IF (TD_ARM_32)
+ SET(BUILD_TOOLS "false")
+ ELSEIF (TD_ARM_64)
+ SET(BUILD_TOOLS "false")
+ ELSE ()
+ SET(BUILD_TOOLS "true")
+ ENDIF ()
+ ELSEIF (TD_DARWIN)
+ SET(BUILD_TOOLS "false")
+ ELSE ()
+ SET(BUILD_TOOLS "false")
+ ENDIF ()
+ENDIF ()
+
+IF ("${BUILD_TOOLS}" MATCHES "false")
+ MESSAGE("${Yellow} Will _not_ build taos_tools! ${ColourReset}")
+ SET(TD_TAOS_TOOLS FALSE)
+ELSE ()
+ MESSAGE("")
+ MESSAGE("${Green} Will build taos_tools! ${ColourReset}")
+ MESSAGE("")
+ SET(TD_TAOS_TOOLS TRUE)
+ENDIF ()
+
IF (${BUILD_LUA} MATCHES "false")
SET(TD_BUILD_LUA FALSE)
ENDIF ()
@@ -159,18 +193,10 @@ ENDIF ()
IF (TD_BUILD_LUA)
MESSAGE("Enable lua")
ADD_DEFINITIONS(-DLUA_EMBEDDED)
+ SET(LINK_LUA "lua")
ELSE ()
MESSAGE("Disable lua")
-ENDIF ()
-
-IF ("${AVRO_SUPPORT}" MATCHES "true")
- SET(TD_AVRO_SUPPORT TRUE)
-ELSEIF ("${AVRO_SUPPORT}" MATCHES "false")
- SET(TD_AVRO_SUPPORT FALSE)
-ENDIF ()
-
-IF (TD_AVRO_SUPPORT)
- ADD_DEFINITIONS(-DAVRO_SUPPORT)
+ SET(LINK_LUA "")
ENDIF ()
IF (TD_LINUX)
diff --git a/cmake/input.inc b/cmake/input.inc
index 4273f576b4bfb292e946fa8086527a48389b9908..83de94c1e20dde13cc1812d978e91ba04bfc5c7e 100755
--- a/cmake/input.inc
+++ b/cmake/input.inc
@@ -52,6 +52,12 @@ ELSEIF (${DBNAME} MATCHES "tq")
ELSEIF (${DBNAME} MATCHES "pro")
SET(TD_PRO TRUE)
MESSAGE(STATUS "pro is true")
+ELSEIF (${DBNAME} MATCHES "kh")
+ SET(TD_KH TRUE)
+ MESSAGE(STATUS "kh is true")
+ELSEIF (${DBNAME} MATCHES "jh")
+ SET(TD_JH TRUE)
+ MESSAGE(STATUS "jh is true")
ENDIF ()
IF (${DLLTYPE} MATCHES "go")
@@ -92,9 +98,9 @@ ENDIF ()
SET(TD_BUILD_HTTP FALSE)
-SET(TD_BUILD_LUA TRUE)
+SET(TD_TAOS_TOOLS TRUE)
-SET(TD_AVRO_SUPPORT FALSE)
+SET(TD_BUILD_LUA TRUE)
SET(TD_MEMORY_SANITIZER FALSE)
IF (${MEMORY_SANITIZER} MATCHES "true")
diff --git a/cmake/install.inc b/cmake/install.inc
index c90aa3f9511e416106309e603853028e7096f082..e78bba8d8d293f8c9c76e00f22b74efedf9591b3 100755
--- a/cmake/install.inc
+++ b/cmake/install.inc
@@ -5,8 +5,14 @@ IF (TD_LINUX)
ELSEIF (TD_WINDOWS)
IF (TD_POWER)
SET(CMAKE_INSTALL_PREFIX C:/PowerDB)
+ ELSEIF (TD_TQ)
+ SET(CMAKE_INSTALL_PREFIX C:/TQueue)
ELSEIF (TD_PRO)
SET(CMAKE_INSTALL_PREFIX C:/ProDB)
+ ELSEIF (TD_KH)
+ SET(CMAKE_INSTALL_PREFIX C:/KingHistorian)
+ ELSEIF (TD_JH)
+ SET(CMAKE_INSTALL_PREFIX C:/jh_iot)
ELSE ()
SET(CMAKE_INSTALL_PREFIX C:/TDengine)
ENDIF ()
@@ -25,8 +31,14 @@ ELSEIF (TD_WINDOWS)
IF (TD_POWER)
INSTALL(FILES ${EXECUTABLE_OUTPUT_PATH}/power.exe DESTINATION .)
+ ELSEIF (TD_TQ)
+ INSTALL(FILES ${EXECUTABLE_OUTPUT_PATH}/tq.exe DESTINATION .)
ELSEIF (TD_PRO)
INSTALL(FILES ${EXECUTABLE_OUTPUT_PATH}/prodbc.exe DESTINATION .)
+ ELSEIF (TD_KH)
+ INSTALL(FILES ${EXECUTABLE_OUTPUT_PATH}/khclient.exe DESTINATION .)
+ ELSEIF (TD_JH)
+ INSTALL(FILES ${EXECUTABLE_OUTPUT_PATH}/jh_taos.exe DESTINATION .)
ELSE ()
INSTALL(FILES ${EXECUTABLE_OUTPUT_PATH}/taos.exe DESTINATION .)
INSTALL(FILES ${EXECUTABLE_OUTPUT_PATH}/taosdemo.exe DESTINATION .)
diff --git a/deps/CMakeLists.txt b/deps/CMakeLists.txt
index 45eaf6495d0f20c512d175c880af9bc1ed8f0ba6..a8b4fd288ea83676c98fa9db5acc464b42f51992 100644
--- a/deps/CMakeLists.txt
+++ b/deps/CMakeLists.txt
@@ -28,30 +28,6 @@ IF (TD_DARWIN AND TD_MQTT)
ADD_SUBDIRECTORY(MQTT-C)
ENDIF ()
-IF (TD_AVRO_SUPPORT)
- MESSAGE("")
- MESSAGE("${Green} ENABLE avro format support ${ColourReset}")
- MESSAGE("")
- include(ExternalProject)
- ExternalProject_Add(
- apache-avro
- PREFIX "avro"
- SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/avro/lang/c
- BUILD_IN_SOURCE 1
- PATCH_COMMAND
- COMMAND git clean -f -d
- COMMAND sed -i.bak -e "/TARGETS avroappend/d" ${CMAKE_CURRENT_SOURCE_DIR}/avro/lang/c/src/CMakeLists.txt
- COMMAND sed -i.bak -e "/TARGETS avrocat/d" ${CMAKE_CURRENT_SOURCE_DIR}/avro/lang/c/src/CMakeLists.txt
- COMMAND sed -i.bak -e "/TARGETS avromod/d" ${CMAKE_CURRENT_SOURCE_DIR}/avro/lang/c/src/CMakeLists.txt
- COMMAND sed -i.bak -e "/TARGETS avropipe/d" ${CMAKE_CURRENT_SOURCE_DIR}/avro/lang/c/src/CMakeLists.txt
- CONFIGURE_COMMAND cmake -DCMAKE_INSTALL_PREFIX:PATH=${CMAKE_BINARY_DIR}/build
- )
-ELSE ()
- MESSAGE("")
- MESSAGE("${Yellow} NO avro format support ${ColourReset}")
- MESSAGE("")
-ENDIF ()
-
IF (TD_LINUX_64 AND JEMALLOC_ENABLED)
MESSAGE("")
MESSAGE("${Green} ENABLE jemalloc ${ColourReset}")
diff --git a/deps/avro b/deps/avro
deleted file mode 160000
index a1fce29d9675b4dd95dfee9db32cc505d0b2227c..0000000000000000000000000000000000000000
--- a/deps/avro
+++ /dev/null
@@ -1 +0,0 @@
-Subproject commit a1fce29d9675b4dd95dfee9db32cc505d0b2227c
diff --git a/documentation20/cn/00.index/docs.md b/documentation20/cn/00.index/docs.md
index a4fba357bbc47bd84ea7c2a6931a64bf274e5d9b..bfd870bf6412bf19898f9f5d569e6536bc156b1a 100644
--- a/documentation20/cn/00.index/docs.md
+++ b/documentation20/cn/00.index/docs.md
@@ -110,7 +110,8 @@ TDengine是一个高效的存储、查询、分析时序大数据的平台,专
* [用户管理](/administrator#user):添加、删除TDengine用户,修改用户密码
* [数据导入](/administrator#import):可按脚本文件导入,也可按数据文件导入
* [数据导出](/administrator#export):从shell按表导出,也可用taosdump工具做各种导出
-* [系统监控](/administrator#status):检查系统现有的连接、查询、流式计算,日志和事件等
+* [系统连接、任务查询管理](/administrator#status):检查系统现有的连接、查询、流式计算,日志和事件等
+* [系统监控](/administrator#monitoring):系统监控,使用TDinsight进行集群监控等
* [性能优化](/administrator#optimize):对长期运行的系统进行维护优化,保障性能表现
* [文件目录结构](/administrator#directories):TDengine数据文件、配置文件等所在目录
* [参数限制与保留关键字](/administrator#keywords):TDengine的参数限制与保留关键字列表
diff --git a/documentation20/cn/02.getting-started/02.taosdemo/docs.md b/documentation20/cn/02.getting-started/02.taosdemo/docs.md
index 4e43eac069465a092ab3f48b4069d5630da5fba9..5838d63665f11e5b77c5c83d181424de811f0926 100644
--- a/documentation20/cn/02.getting-started/02.taosdemo/docs.md
+++ b/documentation20/cn/02.getting-started/02.taosdemo/docs.md
@@ -439,7 +439,7 @@ TDengine是涛思数据专为物联网、车联网、工业互联网、IT运维
附录 - 完整 taosdemo 参数介绍
--
-taosdemo支持两种配置参数的模式,一种是命令行参数,一种是使用json格式的配置文件。
+taosdemo支持两种配置参数的模式,一种是命令行参数,一种是使用 JSON 格式的配置文件。
一、命令行参数
-f:指定taosdemo所需参数的meta文件。当使用该参数时,其他所有命令行参数都失效。可选项,缺省是NULL。目前仅支持不含 BOM(byte-order mark)的标准 UTF-8 编码文件。
@@ -505,10 +505,10 @@ taosdemo支持两种配置参数的模式,一种是命令行参数,一种是
--help: 打印命令参数列表。
-二、json格式的配置文件中所有参数说明
+二、JSON 格式的配置文件中所有参数说明
taosdemo支持3种功能的测试,包括插入、查询、订阅。但一个taosdemo实例不能同时支持三种功能,一个 taosdemo 实例只能支持其中的一种功能,通过配置文件来指定进行哪种功能的测试。
-1、插入功能测试的json配置文件
+1、插入功能测试的 JSON 配置文件
{
"filetype": "insert",
@@ -695,7 +695,7 @@ taosdemo支持3种功能的测试,包括插入、查询、订阅。但一个ta
"count":该类型的连续列个数,可选项,缺省是1。
}]
-2、查询功能测试的json配置文件
+2、查询功能测试的 JSON 配置文件
{
"filetype": "query",
@@ -784,7 +784,7 @@ taosdemo支持3种功能的测试,包括插入、查询、订阅。但一个ta
注意:每条sql语句后的保存结果的文件不能重名,且生成结果文件时,文件名会附加线程号。
查询结果显示:如果查询线程结束一次查询距开始执行时间超过30秒打印一次查询次数、用时和QPS。所有查询结束时,汇总打印总的查询次数和QPS。
-3、订阅功能测试的json配置文件
+3、订阅功能测试的 JSON 配置文件
{
"filetype":"subscribe",
diff --git a/documentation20/cn/05.insert/docs.md b/documentation20/cn/05.insert/docs.md
index a82aecd97c832f9b7f276ec27832097e46845dfc..4a77e09ecafb6b5e444dc8d54383ce96bd751ba7 100644
--- a/documentation20/cn/05.insert/docs.md
+++ b/documentation20/cn/05.insert/docs.md
@@ -34,7 +34,7 @@ INSERT INTO d1001 VALUES (1538548685000, 10.3, 219, 0.31) (1538548695000, 12.6,
无模式写入方式建立的超级表及其对应的子表与通过 SQL 直接建立的超级表和子表完全没有区别,您也可以通过 SQL 语句直接向其中写入数据。需要注意的是,通过无模式写入方式建立的表,其表名是基于标签值按照固定的映射规则生成,所以无法明确地进行表意,缺乏可读性。
**无模式写入行协议**
-
TDengine 的无模式写入的行协议兼容 InfluxDB 的 行协议(Line Protocol)、OpenTSDB 的 telnet 行协议、OpenTSDB 的 Json 格式协议。但是使用这三种协议的时候,需要在 API 中指定输入内容使用解析协议的标准。
+
TDengine 的无模式写入的行协议兼容 InfluxDB 的 行协议(Line Protocol)、OpenTSDB 的 telnet 行协议、OpenTSDB 的 JSON 格式协议。但是使用这三种协议的时候,需要在 API 中指定输入内容使用解析协议的标准。
对于InfluxDB、OpenTSDB的标准写入协议请参考各自的文档。下面首先以 InfluxDB 的行协议为基础,介绍 TDengine 扩展的协议内容,允许用户采用更加精细的方式控制(超级表)模式。
@@ -99,8 +99,8 @@ st,t1=3,t2=4,t3=t3 c1=3i64,c3="passit",c2=false,c4=4f64 1626006833639000000
| **序号** | **值** | **说明** |
| ---- | ------------------- | ------------ |
| 1 | SML_LINE_PROTOCOL | InfluxDB行协议(Line Protocol) |
-| 2 | SML_TELNET_PROTOCOL | OpenTSDB文本行协议 |
-| 3 | SML_JSON_PROTOCOL | Json协议格式 |
+| 2 | SML_TELNET_PROTOCOL | OpenTSDB 文本行协议 |
+| 3 | SML_JSON_PROTOCOL | JSON 协议格式 |
在 SML_LINE_PROTOCOL 解析模式下,需要用户指定输入的时间戳的时间分辨率。可用的时间分辨率如下表所示:
@@ -145,7 +145,7 @@ st,t1=3,t2=4,t3=t3 c1=3i64,c6="passit" 1626006833640000000
如果是无模式写入过程中的数据本身错误,应用会得到 TSDB_CODE_TSC_LINE_SYNTAX_ERROR 错误信息,该错误信息表明错误发生在写入文本中。其他的错误码与原系统一致,可以通过 taos_errstr 获取具体的错误原因。
**后续升级计划**
-
当前版本只提供了 C 版本的 API,后续将提供 其他高级语言的 API,例如 Java/Go/Python/C# 等。此外,在TDengine v2.3及后续版本中,您还可以通过 Taos Adapter 采用 REST 的方式直接写入无模式数据。
+
当前版本只提供了 C 版本的 API,后续将提供 其他高级语言的 API,例如 Java/Go/Python/C# 等。此外,在TDengine v2.3及后续版本中,您还可以通过 taosAdapter 采用 REST 的方式直接写入无模式数据。
## Prometheus 直接写入
@@ -241,10 +241,10 @@ use prometheus;
select * from apiserver_request_latencies_bucket;
```
-## Telegraf 直接写入(通过 taosadapter)
+## Telegraf 直接写入(通过 taosAdapter)
安装 Telegraf 请参考[官方文档](https://portal.influxdata.com/downloads/)。
-TDengine 新版本(2.3.0.0+)包含一个 taosadapter 独立程序,负责接收包括 Telegraf 的多种应用的数据写入。
+TDengine 新版本(2.3.0.0+)包含一个 taosAdapter 独立程序,负责接收包括 Telegraf 的多种应用的数据写入。
配置方法,在 /etc/telegraf/telegraf.conf 增加如下文字,其中 database name 请填写希望在 TDengine 保存 Telegraf 数据的数据库名,TDengine server/cluster host、username和 password 填写 TDengine 实际值:
```
@@ -264,14 +264,14 @@ sudo systemctl start telegraf
```
即可在 TDengine 中查询 metrics 数据库中 Telegraf 写入的数据。
-taosadapter 相关配置参数请参考 taosadapter --help 命令输出以及相关文档。
+taosAdapter 相关配置参数请参考 taosadapter --help 命令输出以及相关文档。
-## collectd 直接写入(通过 taosadapter)
+## collectd 直接写入(通过 taosAdapter)
安装 collectd,请参考[官方文档](https://collectd.org/download.shtml)。
-TDengine 新版本(2.3.0.0+)包含一个 taosadapter 独立程序,负责接收包括 collectd 的多种应用的数据写入。
+TDengine 新版本(2.3.0.0+)包含一个 taosAdapter 独立程序,负责接收包括 collectd 的多种应用的数据写入。
-在 /etc/collectd/collectd.conf 文件中增加如下内容,其中 host 和 port 请填写 TDengine 和 taosadapter 配置的实际值:
+在 /etc/collectd/collectd.conf 文件中增加如下内容,其中 host 和 port 请填写 TDengine 和 taosAdapter 配置的实际值:
```
LoadPlugin network
@@ -282,15 +282,15 @@ LoadPlugin network
```
sudo systemctl start collectd
```
-taosadapter 相关配置参数请参考 taosadapter --help 命令输出以及相关文档。
+taosAdapter 相关配置参数请参考 taosadapter --help 命令输出以及相关文档。
-## StatsD 直接写入(通过 taosadapter)
+## StatsD 直接写入(通过 taosAdapter)
安装 StatsD
请参考[官方文档](https://github.com/statsd/statsd)。
-TDengine 新版本(2.3.0.0+)包含一个 taosadapter 独立程序,负责接收包括 StatsD 的多种应用的数据写入。
+TDengine 新版本(2.3.0.0+)包含一个 taosAdapter 独立程序,负责接收包括 StatsD 的多种应用的数据写入。
-在 config.js 文件中增加如下内容后启动 StatsD,其中 host 和 port 请填写 TDengine 和 taosadapter 配置的实际值:
+在 config.js 文件中增加如下内容后启动 StatsD,其中 host 和 port 请填写 TDengine 和 taosAdapter 配置的实际值:
```
backends 部分添加 "./backends/repeater"
repeater 部分添加 { host:'', port: }
@@ -305,12 +305,12 @@ port: 8125
}
```
-taosadapter 相关配置参数请参考 taosadapter --help 命令输出以及相关文档。
+taosAdapter 相关配置参数请参考 taosadapter --help 命令输出以及相关文档。
## 使用 Bailongma 2.0 接入 Telegraf 数据写入
-*注意:TDengine 新版本(2.3.0.0+)提供新版本 Bailongma ,命名为 taosadapter ,提供更简便的 Telegraf 数据写入以及其他更强大的功能,Bailongma v2 即之前版本将逐步不再维护。
+*注意:TDengine 新版本(2.3.0.0+)提供新版本 Bailongma ,命名为 taosAdapter ,提供更简便的 Telegraf 数据写入以及其他更强大的功能,Bailongma v2 即之前版本将逐步不再维护。
[Telegraf](https://www.influxdata.com/time-series-platform/telegraf/)是一流行的IT运维数据采集开源工具,TDengine提供一个小工具[Bailongma](https://github.com/taosdata/Bailongma),只需在Telegraf做简单配置,无需任何代码,就可将Telegraf采集的数据直接写入TDengine,并按规则在TDengine自动创建库和相关表项。博文[用Docker容器快速搭建一个Devops监控Demo](https://www.taosdata.com/blog/2020/02/03/1189.html)即是采用bailongma将Prometheus和Telegraf的数据写入TDengine中的示例,可以参考。
diff --git a/documentation20/cn/08.connector/docs.md b/documentation20/cn/08.connector/docs.md
index a1689151aabd82b93821a11cb6de107090db0fae..a788e9fa372207bc9085511fe0b16c925800627d 100644
--- a/documentation20/cn/08.connector/docs.md
+++ b/documentation20/cn/08.connector/docs.md
@@ -1252,7 +1252,7 @@ node nodejsChecker.js host=localhost
### Node.js连接器的使用
-以下是Node.js 连接器的一些基本使用方法,详细的使用方法可参考[TDengine Node.js connector](http://docs.taosdata.com/node)。
+以下是Node.js 连接器的一些基本使用方法,详细的使用方法可参考[TDengine Node.js connector](https://github.com/taosdata/TDengine/tree/develop/src/connector/nodejs)。
#### 建立连接
diff --git a/documentation20/cn/11.administrator/docs.md b/documentation20/cn/11.administrator/docs.md
index 6efb2ca3fe85049ad001e670354a652bfb426bb1..c2b3b5a4b6cc01c7b7f738367e05d1ea07ad6ee8 100644
--- a/documentation20/cn/11.administrator/docs.md
+++ b/documentation20/cn/11.administrator/docs.md
@@ -114,7 +114,7 @@ taosd -C
下面仅仅列出一些重要的配置参数,更多的参数请看配置文件里的说明。各个参数的详细介绍及作用请看前述章节,而且这些参数的缺省配置都是可以工作的,一般无需设置。**注意:配置文件参数修改后,需要重启*taosd*服务,或客户端应用才能生效。**
-| **#** | **配置参数名称** | **内部** | **S\|C** | **单位** | **含义** | **取值范围** | **缺省值** | **备注** |
+| **#** | **配置参数名称** | **内部** | **SC** | **单位** | **含义** | **取值范围** | **缺省值** | **补充说明** |
| ----- | ----------------------- | -------- | -------- | -------- | ------------------------------------------------------------ | ------------------------------------------------------------ | ------------------------------------------------------------ | ------------------------------------------------------------ |
| 1 | firstEP | | **SC** | | taosd启动时,主动连接的集群中首个dnode的end point | | localhost:6030 | |
| 2 | secondEP | YES | **SC** | | taosd启动时,如果firstEp连接不上,尝试连接集群中第二个dnode的end point | | 无 | |
@@ -553,11 +553,55 @@ KILL STREAM ;
强制关闭流式计算,其中的中stream-id是SHOW STREAMS中显示的connection-id:stream-no字串,如103:2,拷贝粘贴即可。
-## 系统监控
+## 系统监控
TDengine启动后,会自动创建一个监测数据库log,并自动将服务器的CPU、内存、硬盘空间、带宽、请求数、磁盘读写速度、慢查询等信息定时写入该数据库。TDengine还将重要的系统操作(比如登录、创建、删除数据库等)日志以及各种错误报警信息记录下来存放在log库里。系统管理员可以从CLI直接查看这个数据库,也可以在WEB通过图形化界面查看这些监测信息。
-这些监测信息的采集缺省是打开的,但可以修改配置文件里的选项monitor将其关闭或打开。
+这些监测信息的采集缺省是打开的,但可以修改配置文件里的选项enableMonitor将其关闭或打开。
+
+### TDinsight - 使用监控数据库 + Grafana 对 TDengine 进行监控的解决方案
+
+从 2.3.3.0 开始,监控数据库将提供更多的监控项,您可以从 [TDinsight Grafana Dashboard](https://grafana.com/grafana/dashboards/15167) 了解如何使用 TDinsight 方案对 TDengine 进行监控。
+
+我们提供了一个自动化脚本 `TDinsight.sh` 对TDinsight进行部署。
+
+下载 `TDinsight.sh`:
+
+```bash
+wget https://github.com/taosdata/grafanaplugin/raw/master/dashboards/TDinsight.sh
+chmod +x TDinsight.sh
+```
+
+准备:
+
+1. TDengine Server 信息:
+ * TDengine RESTful 服务:对本地而言,可以是 http://localhost:6041 ,使用参数 `-a`。
+ * TDengine 用户名和密码,使用 `-u` `-p` 参数设置。
+
+2. Grafana 告警通知
+ * 使用已经存在的Grafana Notification Channel `uid`,参数 `-E`。该参数可以使用 `curl -u admin:admin localhost:3000/api/alert-notifications |jq` 来获取。
+
+ ```bash
+ sudo ./TDinsight.sh -a http://localhost:6041 -u root -p taosdata -E
+ ```
+
+ * 使用 TDengine 数据源插件内置的阿里云短信告警通知,使用 `-s` 启用之,并设置如下参数:
+ 1. 阿里云短信服务Key ID,参数 `-I`
+ 2. 阿里云短信服务Key Secret,参数 `K`
+ 3. 阿里云短信服务签名,参数 `-S`
+ 4. 短信通知模板号,参数 `-C`
+ 5. 短信通知模板输入参数,JSON格式,参数 `-T`,如 `{"alarm_level":"%s","time":"%s","name":"%s","content":"%s"}`
+ 6. 逗号分隔的通知手机列表,参数 `-B`
+
+ ```bash
+ sudo ./TDinsight.sh -a http://localhost:6041 -u root -p taosdata -s \
+ -I XXXXXXX -K XXXXXXXX -S taosdata -C SMS_1111111 -B 18900000000 \
+ -T '{"alarm_level":"%s","time":"%s","name":"%s","content":"%s"}'
+ ```
+
+运行程序并重启 Grafana 服务,打开面板:。
+
+更多使用场景和限制请参考[TDinsight](https://github.com/taosdata/grafanaplugin/blob/master/dashboards/TDinsight.md) 文档。
## 性能优化
diff --git a/documentation20/cn/12.taos-sql/docs.md b/documentation20/cn/12.taos-sql/docs.md
index 18c39d665483997f1680f0253baddd8ceabcf1d9..6244df6192e9a8bf3047d40d1b52f4b647250375 100644
--- a/documentation20/cn/12.taos-sql/docs.md
+++ b/documentation20/cn/12.taos-sql/docs.md
@@ -1603,3 +1603,18 @@ TAOS SQL 支持对标签、TBNAME 进行 GROUP BY 操作,也支持普通列进
IS NOT NULL 支持所有类型的列。不为空的表达式为 <>"",仅对非数值类型的列适用。
+## 表(列)名合法性说明
+TDengine 中的表(列)名命名规则如下:
+只能由字母、数字、下划线构成,数字不能在首位,长度不能超过192字节,不区分大小写。
+
+转移后表(列)名规则:
+为了兼容支持更多形式的表(列)名,TDengine 引入新的转义符 "`"。可用让表名与关键词不冲突,同时不受限于上述表名称合法性约束检查。
+转义后的表(列)名同样受到长度限制要求,且长度计算的时候不计算转义符。使用转义字符以后,不再对转义字符中的内容进行大小写统一。
+
+例如:
+\`aBc\` 和 \`abc\` 是不同的表(列)名,但是 abc 和 aBc 是相同的表(列)名。
+
+需要注意的是转义字符中的内容必须是可打印字符。
+
+支持版本
+支持转义符的功能从 2.3.0.1 版本开始。
diff --git a/documentation20/cn/13.faq/docs.md b/documentation20/cn/13.faq/docs.md
index eb5f20e708bb4bb592a1ab2d535fcf261457b989..9132e8dca63c47e4b22ad87ef9fd4d4a1997077a 100644
--- a/documentation20/cn/13.faq/docs.md
+++ b/documentation20/cn/13.faq/docs.md
@@ -185,23 +185,23 @@ TDengine 中时间戳的时区总是由客户端进行处理,而与服务端
| TCP | 6041 | 客户端与服务端之间的 RESTful 通讯。 | 随 serverPort 端口变化。 |
| TCP | 6042 | Arbitrator 的服务端口。 | 随 Arbitrator 启动参数设置变化。 |
| TCP | 6043 | TaosKeeper 监控服务端口。 | 随 TaosKeeper 启动参数设置变化。 |
-| TCP | 6044 | 支持 StatsD 的数据接入端口。 | 随 taosadapter 启动参数设置变化(2.3.0.1+以上版本)。 |
-| TCP | 6045 | 支持 collectd 数据接入端口。 | 随 taosadapter 启动参数设置变化(2.3.0.1+以上版本)。 |
+| TCP | 6044 | 支持 StatsD 的数据接入端口。 | 随 taosAdapter 启动参数设置变化(2.3.0.1+以上版本)。 |
+| TCP | 6045 | 支持 collectd 数据接入端口。 | 随 taosAdapter 启动参数设置变化(2.3.0.1+以上版本)。 |
| TCP | 6060 | 企业版内 Monitor 服务的网络端口。 | |
| UDP | 6030-6034 | 客户端与服务端之间通讯。 | 随 serverPort 端口变化。 |
| UDP | 6035-6039 | 多节点集群的节点间通讯。 | 随 serverPort 端口变化。 |
## 20. go 语言编写组件编译失败怎样解决?
-新版本 TDengine 2.3.0.0 包含一个使用 go 语言开发的 taosadapter 组件,取代之前内置的 httpd ,提供包含原 httpd 功能以及支持多种其他软件(Prometheus、Telegraf、collectd、StatsD等)的数据接入功能。
-使用最新 develop 分支代码编译需要先 `git submodule update --init --recursive` 下载 taosadapter 仓库代码后再编译。
+新版本 TDengine 2.3.0.0 包含一个使用 go 语言开发的 taosAdapter 组件,取代之前内置的 httpd ,提供包含原 httpd 功能以及支持多种其他软件(Prometheus、Telegraf、collectd、StatsD等)的数据接入功能。
+使用最新 develop 分支代码编译需要先 `git submodule update --init --recursive` 下载 taosAdapter 仓库代码后再编译。
-目前编译方式默认自动编译 taosadapter。go 语言版本要求 1.14 以上,如果发生 go 编译错误,往往是国内访问 go mod 问题,可以通过设置 go 环境变量来解决:
+目前编译方式默认自动编译 taosAdapter。go 语言版本要求 1.14 以上,如果发生 go 编译错误,往往是国内访问 go mod 问题,可以通过设置 go 环境变量来解决:
```sh
go env -w GO111MODULE=on
go env -w GOPROXY=https://goproxy.cn,direct
```
-如果希望继续使用之前的内置 httpd,可以关闭 taosadapter 编译,使用
+如果希望继续使用之前的内置 httpd,可以关闭 taosAdapter 编译,使用
`cmake .. -DBUILD_HTTP=true` 使用原来内置的 httpd。
diff --git a/documentation20/cn/14.devops/02.collectd/docs.md b/documentation20/cn/14.devops/02.collectd/docs.md
index 5860e70ceafafadc21c5772c96515e0925897e3a..0073cf78340a1100ec97cb70685410ced0cf5d4e 100644
--- a/documentation20/cn/14.devops/02.collectd/docs.md
+++ b/documentation20/cn/14.devops/02.collectd/docs.md
@@ -40,7 +40,7 @@ IT 运维监测数据通常都是对时间特性比较敏感的数据,例如
```
### 配置 collectd
-在 /etc/collectd/collectd.conf 文件中增加如下内容,其中 host 和 port 请填写 TDengine 和 Taos Adapter 配置的实际值:
+在 /etc/collectd/collectd.conf 文件中增加如下内容,其中 host 和 port 请填写 TDengine 和 taosAdapter 配置的实际值:
```
LoadPlugin network
@@ -51,7 +51,7 @@ sudo systemctl start collectd
```
### 配置 StatsD
-在 config.js 文件中增加如下内容后启动 StatsD,其中 host 和 port 请填写 TDengine 和 Taos Adapter 配置的实际值:
+在 config.js 文件中增加如下内容后启动 StatsD,其中 host 和 port 请填写 TDengine 和 taosAdapter 配置的实际值:
```
backends 部分添加 "./backends/repeater"
repeater 部分添加 { host:'', port: }
diff --git a/documentation20/cn/14.devops/03.immigrate/docs.md b/documentation20/cn/14.devops/03.immigrate/docs.md
index b2a68e1b15acef2d574600e59d1b18d890938ac6..980dc0c0f40632de40b54aec4de719eea4d8bc59 100644
--- a/documentation20/cn/14.devops/03.immigrate/docs.md
+++ b/documentation20/cn/14.devops/03.immigrate/docs.md
@@ -8,7 +8,7 @@
- 数据写入和查询的性能远超 OpenTSDB;
- 针对时序数据的高效压缩机制,压缩后在磁盘上的存储空间不到 1/5;
-- 安装部署非常简单,单一安装包完成安装部署,除了 taosadapter 需要依赖 Go 运行环境外,不依赖其他的第三方软件,整个安装部署过程秒级搞定;
+- 安装部署非常简单,单一安装包完成安装部署,除了 taosAdapter 需要依赖 Go 运行环境外,不依赖其他的第三方软件,整个安装部署过程秒级搞定;
- 提供的内建函数覆盖 OpenTSDB 支持的全部查询函数,还支持更多的时序数据查询函数、标量函数及聚合函数,支持多种时间窗口聚合、连接查询、表达式运算、多种分组聚合、用户定义排序、以及用户定义函数等高级查询功能。采用类 SQL 的语法规则,更加简单易学,基本上没有学习成本。
- 支持多达 128 个标签,标签总长度可达到 16 KB;
- 除 HTTP 之外,还提供 Java、Python、C、Rust、Go 等多种语言的接口,支持 JDBC 等多种企业级标准连接器协议。
@@ -40,9 +40,9 @@
- **调整数据收集器配置**
-在 TDengine 2.3 版本中,后台服务 taosd 启动后一个 HTTP 的服务 taosadapter 也会自动启用*。*利用 taosadapter 能够兼容 Influxdb 的 Line Protocol 和 OpenTSDB 的 telnet/Json 写入协议,可以将 collectd 和 StatsD 收集的数据直接推送到TDengine。
+在 TDengine 2.3 版本中,后台服务 taosd 启动后一个 HTTP 的服务 taosAdapter 也会自动启用*。*利用 taosAdapter 能够兼容 Influxdb 的 Line Protocol 和 OpenTSDB 的 telnet/JSON 写入协议,可以将 collectd 和 StatsD 收集的数据直接推送到TDengine。
-如果使用 collectd,修改其默认位置 `/etc/collectd/collectd.conf` 的配置文件为指向 taosadapter 部署的节点 IP 地址和端口。假设 taosadapter 的 IP 地址为192.168.1.130,端口为 6046,配置如下:
+如果使用 collectd,修改其默认位置 `/etc/collectd/collectd.conf` 的配置文件为指向 taosAdapter 部署的节点 IP 地址和端口。假设 taosAdapter 的 IP 地址为192.168.1.130,端口为 6046,配置如下:
```html
LoadPlugin write_tsdb
@@ -57,7 +57,7 @@ LoadPlugin write_tsdb
```
-即可让 collectd 将数据使用推送到 OpenTSDB 的插件方式推送到 taosadapter, taosadapter 将调用 API 将数据写入到 taosd 中,从而完成数据的写入工作。如果你使用的是 StatsD 相应地调整配置文件信息。
+即可让 collectd 将数据使用推送到 OpenTSDB 的插件方式推送到 taosAdapter, taosAdapter 将调用 API 将数据写入到 taosd 中,从而完成数据的写入工作。如果你使用的是 StatsD 相应地调整配置文件信息。
- **调整看板(Dashborad)系统**
@@ -106,7 +106,7 @@ sudo systemctl start grafana-server
TDengine 当前只支持 Grafana 的可视化看板呈现,所以如果你的应用中使用了 Grafana 以外的前端看板(例如[TSDash](https://github.com/facebook/tsdash)、[Status Wolf](https://github.com/box/StatusWolf)等),那么前端看板将无法直接迁移到 TDengine,需要将前端看板重新适配到 Grafana 才可以正常运行。
-截止到 2.3.0.x 版本,TDengine 只能够支持 collectd 和 StatsD 作为数据收集汇聚软件,当然后面会陆续提供更多的数据收集聚合软件的接入支持。如果您的收集端使用了其他类型的数据汇聚器,您的应用需要适配到这两个数据汇聚端系统,才能够将数据正常写入。除了上述两个数据汇聚端软件协议以外,TDengine 还支持通过 InfluxDB 的行协议和 OpenTSDB 的数据写入协议、Json 格式将数据直接写入,您可以重写数据推送端的逻辑,使用 TDengine 支持的行协议来写入数据。
+截止到 2.3.0.x 版本,TDengine 只能够支持 collectd 和 StatsD 作为数据收集汇聚软件,当然后面会陆续提供更多的数据收集聚合软件的接入支持。如果您的收集端使用了其他类型的数据汇聚器,您的应用需要适配到这两个数据汇聚端系统,才能够将数据正常写入。除了上述两个数据汇聚端软件协议以外,TDengine 还支持通过 InfluxDB 的行协议和 OpenTSDB 的数据写入协议、JSON 格式将数据直接写入,您可以重写数据推送端的逻辑,使用 TDengine 支持的行协议来写入数据。
此外,如果你的应用中使用了 OpenTSDB 以下特性,在将应用迁移到 TDengine 之前你还需要了解以下注意事项:
@@ -353,7 +353,7 @@ Select sum(val) from table_name
完整示例:
```json
-//OpenTSDB查询Json
+//OpenTSDB查询JSON
query = {
“start”:1510560000,
“end”: 1515000009,
diff --git a/documentation20/en/00.index/docs.md b/documentation20/en/00.index/docs.md
index 0cb6612700a9211ee30fc51fed8a3b3fa77f3342..2042a9c963664e65f960f8b7109511d63dd398f6 100644
--- a/documentation20/en/00.index/docs.md
+++ b/documentation20/en/00.index/docs.md
@@ -106,7 +106,8 @@ TDengine is a highly efficient platform to store, query, and analyze time-series
- [User Management](/administrator#user): add/delete TDengine users, modify user password
- [Import Data](/administrator#import): import data into TDengine from either script or CSV file
- [Export Data](/administrator#export): export data either from TDengine shell or from the taosdump tool
-- [System Monitor](/administrator#status): monitor the system connections, queries, streaming calculation, logs, and events
+- [System Connection and Task Query Management](/administrator#status): show the system connections, queries, streaming calculation and others
+- [System Monitor](/administrator#monitoring): monitor TDengine cluster with log database and TDinsight.
- [File Directory Structure](/administrator#directories): directories where TDengine data files and configuration files located
- [Parameter Limitss and Reserved Keywords](/administrator#keywords): TDengine’s list of parameter limits and reserved keywords
diff --git a/documentation20/en/08.connector/docs.md b/documentation20/en/08.connector/docs.md
index 806bebd77738bd4251607237e3f88c589baa4741..4f471f024ef0294badbfe7d02b97daae670c1cfa 100644
--- a/documentation20/en/08.connector/docs.md
+++ b/documentation20/en/08.connector/docs.md
@@ -1022,7 +1022,7 @@ Steps:
### How to use Node.js
-The following are some basic uses of node.js connector. Please refer to [TDengine Node.js connector](http://docs.taosdata.com/node) for details.
+The following are some basic uses of node.js connector. Please refer to [TDengine Node.js connector](https://github.com/taosdata/TDengine/tree/develop/src/connector/nodejs)for details.
### Create connection
diff --git a/documentation20/en/11.administrator/docs.md b/documentation20/en/11.administrator/docs.md
index 3b045848ffe434891203422f637afb81a29f3575..11dd3e482d5e68bb642a94c533f23d390edf61f3 100644
--- a/documentation20/en/11.administrator/docs.md
+++ b/documentation20/en/11.administrator/docs.md
@@ -400,12 +400,57 @@ KILL STREAM ;
Force to turn off the stream computing, in which stream-id is the connection-id: stream-no string displayed in SHOW STREAMS, such as 103: 2, copy and paste it.
-## System Monitoring
+## System Monitoring
After TDengine is started, it will automatically create a monitoring database log and write the server's CPU, memory, hard disk space, bandwidth, number of requests, disk read-write speed, slow query and other information into the database regularly. TDengine also records important system operations (such as logging in, creating, deleting databases, etc.) logs and various error alarm information and stores them in the log database. The system administrator can view the database directly from CLI or view the monitoring information through GUI on WEB.
The collection of these monitoring metrics is turned on by default, but you can modify option monitor in the configuration file to turn it off or on.
+### TDinsight - Monitor TDengine with Grafana + Data Source
+
+Starting from v2.3.3.0, TDengine's log database provides more metrics for resources and status monitoring. Here we introduce a zero-dependency monitoring solution - we call it TDinsight - with Grafana. You can find the documentation from [GitHub](https://github.com/taosdata/grafanaplugin/blob/master/dashboards/TDinsight.md).
+
+We provide an automation shell script [`TDinsight.sh`](https://github.com/taosdata/grafanaplugin/blob/master/dashboards/TDinsight.sh) as a shortcut to help setup TDinsight on the Grafana server.
+
+First, download `TDinsight.sh` from GitHub:
+
+```bash
+wget https://github.com/taosdata/grafanaplugin/raw/master/dashboards/TDinsight.sh
+chmod +x TDinsight.sh
+```
+
+Some CLI options are needed to use the script:
+
+1. TDengine server informations:
+
+ - TDengine RESTful endpoint, like `http://localhost:6041`, will be used with option `-a`.
+ - TDengine user `-u` (`root` by default), and password with `-p` (`taosdata` by default).
+
+2. Grafana alerting notifications. There's two ways to setup this:
+ 1. To use existing Grafana notification channel with `uid`, option `-E`. The `uid` could be retrieved with `curl -u admin:admin localhost:3000/api/alert-notifications |'.[]| .uid + "," + .name' -r`, then use it like this:
+
+ ```bash
+ sudo ./TDinsight.sh -a http://localhost:6041 -u root -p taosdata -E
+ ```
+
+ 2. Use TDengine data source plugin's builtin [Aliyun SMS](https://www.aliyun.com/product/sms) alerting support with `-s` flag, and input these options:
+ 1. Access key id with option `-I`
+ 2. Access key secret with option `K`
+ 3. Access key sign name with option `-S`
+ 4. Message template code with option `-C`
+ 5. Message template params in JSON format with option `-T`, eg. `{"alarm_level":"%s","time":"%s","name":"%s","content":"%s"}`.
+ 6. `,`-separated phone numbers list with option `-B`
+
+ ```bash
+ sudo ./TDinsight.sh -a http://localhost:6041 -u root -p taosdata -s \
+ -I XXXXXXX -K XXXXXXXX -S taosdata -C SMS_1111111 -B 18900000000 \
+ -T '{"alarm_level":"%s","time":"%s","name":"%s","content":"%s"}'
+ ```
+
+Follow the usage of the script and then restart grafana-server service, here we go .
+
+Refer to [TDinsight](https://github.com/taosdata/grafanaplugin/blob/master/dashboards/TDinsight.md) README for more scenario and limitations of the script, and the metrics descriptions for all of the TDinsight.
+
## File Directory Structure
After installing TDengine, the following directories or files are generated in the operating system by default:
diff --git a/importSampleData/README.md b/importSampleData/README.md
index 56c5be0da422aadc5e05fe000ab83c312d29b6c8..c945cf52cb82723681f37efc42d3325f89011d39 100644
--- a/importSampleData/README.md
+++ b/importSampleData/README.md
@@ -1,36 +1,28 @@
-## 样例数据导入
+# 样例数据导入
该工具可以根据用户提供的 `json` 或 `csv` 格式样例数据文件快速导入 `TDengine`,目前仅能在 Linux 上运行。
为了体验写入和查询性能,可以对样例数据进行横向、纵向扩展。横向扩展是指将一个表(监测点)的数据克隆到多张表,纵向扩展是指将样例数据中的一段时间范围内的数据在时间轴上复制。该工具还支持历史数据导入至当前时间后持续导入,这样可以测试插入和查询并行进行的场景,以模拟真实环境。
-## 下载安装
+## 编译安装
-### 下载可执行文件
+由于该工具使用 go 语言开发,编译之前需要先安装 go,具体请参考 [Getting Started][2]。执行以下命令即可编译成可执行文件 `bin/taosimport`。
-由于该工具使用 go 语言开发,为了方便使用,项目中已经提供了编译好的可执行文件 `bin/taosimport`。通过 `git clone https://github.com/taosdata/TDengine.git` 命令或者直接下载 `ZIP` 文件解压进入样例导入程序目录 `cd importSampleData`,执行 `bin/taosimport`。
-
-### go 源码编译
-
-由于该工具使用 go 语言开发,编译之前需要先安装 go,具体请参考 [Getting Started][2],而且需要安装 TDengine 的 Go Connector, 具体请参考[TDengine 连接器文档][3]。安装完成之后,执行以下命令即可编译成可执行文件 `bin/taosimport`。
```shell
-go get https://github.com/taosdata/TDengine/importSampleData
-cd $GOPATH/src/github.com/taosdata/TDengine/importSampleData
+go mod tidy
go build -o bin/taosimport app/main.go
```
-> 注:由于目前 TDengine 的 go connector 只支持 linux 环境,所以该工具暂时只能在 linux 系统中运行。
-> 如果 go get 失败可以下载之后复制 `github.com/taosdata/TDengine/importSampleData` 文件夹到 $GOPATH 的 src 目录下再执行 `go build -o bin/taosimport app/main.go`。
-
## 使用
### 快速体验
执行命令 `bin/taosimport` 会根据默认配置执行以下操作:
+
1. 创建数据库
- 自动创建名称为 `test_yyyyMMdd` 的数据库。
-
+ 自动创建名称为 `test_yyyyMMdd` 的数据库,`yyyyMMdd` 是当前日期,如`20211111`。
+
2. 创建超级表
根据配置文件 `config/cfg.toml` 中指定的 `sensor_info` 场景信息创建相应的超级表。
@@ -48,21 +40,25 @@ go build -o bin/taosimport app/main.go
taos> use test_yyyyMMdd;
taos> select count(*) from s_sensor_info;
```
+
* 查询各个分组的记录数
```shell
taos> select count(*) from s_sensor_info group by devgroup;
```
+
* 按 1h 间隔查询各聚合指标
```shell
taos> select count(temperature), sum(temperature), avg(temperature) from s_sensor_info interval(1h);
```
+
* 查询指定位置最新上传指标
```shell
taos> select last(*) from s_sensor_info where location = 'beijing';
```
+
> 更多查询及函数使用请参考 [数据查询][4]
### 详细使用说明
@@ -70,23 +66,23 @@ go build -o bin/taosimport app/main.go
执行命令 `bin/taosimport -h` 可以查看详细参数使用说明:
* -cfg string
-
+
导入配置文件路径,包含样例数据文件相关描述及对应 TDengine 配置信息。默认使用 `config/cfg.toml`。
-
+
* -cases string
需要导入的场景名称,该名称可从 -cfg 指定的配置文件中 `[usecase]` 查看,可同时导入多个场景,中间使用逗号分隔,如:`sensor_info,camera_detection`,默认为 `sensor_info`。
-
+
* -hnum int
需要将样例数据进行横向扩展的倍数,假设原有样例数据包含 1 张子表 `t_0` 数据,指定 hnum 为 2 时会根据原有表名创建 `t_0、t_1` 两张子表。默认为 100。
-
+
* -vnum int
需要将样例数据进行纵向扩展的次数,如果设置为 0 代表将历史数据导入至当前时间后持续按照指定间隔导入。默认为 1000,表示将样例数据在时间轴上纵向复制1000 次。
* -delay int
-
+
当 vnum 设置为 0 时持续导入的时间间隔,默认为所有场景中最小记录间隔时间的一半,单位 ms。
* -tick int
@@ -102,25 +98,25 @@ go build -o bin/taosimport app/main.go
当 save 为 1 时保存统计信息的表名, 默认 statistic。
* -auto int
-
+
是否自动生成样例数据中的主键时间戳,1 是,0 否, 默认 0。
-
+
* -start string
导入的记录开始时间,格式为 `"yyyy-MM-dd HH:mm:ss.SSS"`,不设置会使用样例数据中最小时间,设置后会忽略样例数据中的主键时间,会按照指定的 start 进行导入。如果 auto 为 1,则必须设置 start,默认为空。
-
+
* -interval int
导入的记录时间间隔,该设置只会在指定 `auto=1` 之后生效,否则会根据样例数据自动计算间隔时间。单位为毫秒,默认 1000。
* -thread int
-
+
执行导入数据的线程数目,默认为 10。
* -batch int
-
+
执行导入数据时的批量大小,默认为 100。批量是指一次写操作时,包含多少条记录。
-
+
* -host string
导入的 TDengine 服务器 IP,默认为 127.0.0.1。
@@ -138,7 +134,7 @@ go build -o bin/taosimport app/main.go
导入的 TDengine 用户密码,默认为 taosdata。
* -dropdb int
-
+
导入数据之前是否删除数据库,1 是,0 否, 默认 0。
* -db string
@@ -160,7 +156,7 @@ go build -o bin/taosimport app/main.go
执行上述命令后会将 sensor_info 场景的数据横向扩展2倍从指定时间 `2019-12-12 00:00:00.000` 开始且记录间隔时间为 5000 毫秒开始导入,导入至当前时间后会自动持续导入。
### config/cfg.toml 配置文件说明
-
+
``` toml
# 传感器场景
[sensor_info] # 场景名称
@@ -237,8 +233,6 @@ devid,location,color,devgroup,ts,temperature,humidity
0, beijing, white, 0, 1575129601000, 22, 14.377142
```
-
-
[1]: https://github.com/taosdata/TDengine
[2]: https://golang.org/doc/install
[3]: https://www.taosdata.com/cn/documentation/connector/#Go-Connector
diff --git a/importSampleData/go.mod b/importSampleData/go.mod
index fa1d978e597b3eb5b9f35e45f599d5a0f97ff267..d2e58d302b3c917922206cbfc3a7d5afef8266c9 100644
--- a/importSampleData/go.mod
+++ b/importSampleData/go.mod
@@ -3,6 +3,6 @@ module github.com/taosdata/TDengine/importSampleData
go 1.13
require (
- github.com/pelletier/go-toml v1.9.0 // indirect
- github.com/taosdata/driver-go v0.0.0-20210415143420-d99751356e28 // indirect
+ github.com/pelletier/go-toml v1.9.0
+ github.com/taosdata/driver-go v0.0.0-20210415143420-d99751356e28
)
diff --git a/packaging/check_package.sh b/packaging/check_package.sh
index 0870e8c8eccc1a745ae5b081e2726ed8d809cf2b..e625f90912825f30ba279ecf6dbe59ff7dade97f 100755
--- a/packaging/check_package.sh
+++ b/packaging/check_package.sh
@@ -142,11 +142,11 @@ function check_main_path() {
function check_bin_path() {
# check install bin dir and all sub dir
- bin_dir=("taos" "taosd" "taosadapter" "taosdemo" "taosdump" "remove.sh" "tarbitrator" "set_core.sh")
+ bin_dir=("taos" "taosd" "taosadapter" "taosdemo" "remove.sh" "tarbitrator" "set_core.sh")
for i in "${bin_dir[@]}";do
check_file ${sbin_dir} $i
done
- lbin_dir=("taos" "taosd" "taosadapter" "taosdemo" "taosdump" "rmtaos" "tarbitrator" "set_core")
+ lbin_dir=("taos" "taosd" "taosadapter" "taosdemo" "rmtaos" "tarbitrator" "set_core")
for i in "${lbin_dir[@]}";do
check_link ${bin_link_dir}/$i
done
@@ -170,7 +170,7 @@ function check_lib_path() {
function check_header_path() {
# check all header
- header_dir=("taos.h" "taoserror.h")
+ header_dir=("taos.h" "taosdef.h" "taoserror.h")
for i in "${header_dir[@]}";do
check_link ${inc_link_dir}/$i
done
diff --git a/packaging/deb/DEBIAN/prerm b/packaging/deb/DEBIAN/prerm
index 235834a747e82886eef6c4540877307aa4dd3996..b6979cf26435e4c4b0d19f5c93bb92cda988b3bf 100644
--- a/packaging/deb/DEBIAN/prerm
+++ b/packaging/deb/DEBIAN/prerm
@@ -27,7 +27,6 @@ else
${csudo} rm -f ${bin_link_dir}/taosd || :
${csudo} rm -f ${bin_link_dir}/taosadapter || :
${csudo} rm -f ${bin_link_dir}/taosdemo || :
- ${csudo} rm -f ${bin_link_dir}/taosdump || :
${csudo} rm -f ${cfg_link_dir}/* || :
${csudo} rm -f ${inc_link_dir}/taos.h || :
${csudo} rm -f ${lib_link_dir}/libtaos.* || :
diff --git a/packaging/deb/makedeb.sh b/packaging/deb/makedeb.sh
index f28d98ba9a6fae4390bfa301760aff9583ba4e40..de365ae127bfb0509d8a3d67bc37e576bd61dc0f 100755
--- a/packaging/deb/makedeb.sh
+++ b/packaging/deb/makedeb.sh
@@ -59,7 +59,6 @@ cp ${compile_dir}/../packaging/tools/set_core.sh ${pkg_dir}${install_home_pat
cp ${compile_dir}/../packaging/tools/taosd-dump-cfg.gdb ${pkg_dir}${install_home_path}/bin
cp ${compile_dir}/build/bin/taosdemo ${pkg_dir}${install_home_path}/bin
-cp ${compile_dir}/build/bin/taosdump ${pkg_dir}${install_home_path}/bin
cp ${compile_dir}/build/bin/taosd ${pkg_dir}${install_home_path}/bin
if [ -f "${compile_dir}/build/bin/taosadapter" ]; then
@@ -69,6 +68,7 @@ fi
cp ${compile_dir}/build/bin/taos ${pkg_dir}${install_home_path}/bin
cp ${compile_dir}/build/lib/${libfile} ${pkg_dir}${install_home_path}/driver
cp ${compile_dir}/../src/inc/taos.h ${pkg_dir}${install_home_path}/include
+cp ${compile_dir}/../src/inc/taosdef.h ${pkg_dir}${install_home_path}/include
cp ${compile_dir}/../src/inc/taoserror.h ${pkg_dir}${install_home_path}/include
cp -r ${top_dir}/tests/examples/* ${pkg_dir}${install_home_path}/examples
cp -r ${top_dir}/src/connector/python ${pkg_dir}${install_home_path}/connector
@@ -78,16 +78,6 @@ cp ${compile_dir}/build/lib/taos-jdbcdriver*.* ${pkg_dir}${install_home_path}/c
install_user_local_path="/usr/local"
-if [ -f ${compile_dir}/build/lib/libavro.so.23.0.0 ]; then
- mkdir -p ${pkg_dir}${install_user_local_path}/lib
- cp ${compile_dir}/build/lib/libavro.so.23.0.0 ${pkg_dir}${install_user_local_path}/lib/
- ln -sf libavro.so.23.0.0 ${pkg_dir}${install_user_local_path}/lib/libavro.so.23
- ln -sf libavro.so.23 ${pkg_dir}${install_user_local_path}/lib/libavro.so
-fi
-if [ -f ${compile_dir}/build/lib/libavro.a ]; then
- cp ${compile_dir}/build/lib/libavro.a ${pkg_dir}${install_user_local_path}/lib/
-fi
-
if [ -f ${compile_dir}/build/bin/jemalloc-config ]; then
mkdir -p ${pkg_dir}${install_user_local_path}/{bin,lib,lib/pkgconfig,include/jemalloc,share/doc/jemalloc,share/man/man3}
cp ${compile_dir}/build/bin/jemalloc-config ${pkg_dir}${install_user_local_path}/bin/
@@ -128,13 +118,7 @@ chmod 755 ${pkg_dir}/DEBIAN/*
debver="Version: "$tdengine_ver
sed -i "2c$debver" ${pkg_dir}/DEBIAN/control
-if [ -f ${compile_dir}/build/lib/libavro.so.23.0.0 ]; then
- sed -i.bak "s/#Depends: no/Depends: libjansson4, libsnappy1v5/g" ${pkg_dir}/DEBIAN/control
-fi
-
#get taos version, then set deb name
-
-
if [ "$verMode" == "cluster" ]; then
debname="TDengine-server-"${tdengine_ver}-${osType}-${cpuType}
elif [ "$verMode" == "edge" ]; then
@@ -153,13 +137,11 @@ else
exit 1
fi
-
-
# make deb package
dpkg -b ${pkg_dir} $debname
echo "make deb package success!"
cp ${pkg_dir}/*.deb ${output_dir}
-# clean tmep dir
+# clean temp dir
rm -rf ${pkg_dir}
diff --git a/packaging/deb/taosd b/packaging/deb/taosd
index 5002607da20b621ca69a8a2a25e713879d0308af..fe356ca6565c916086273e5669918b04065964cd 100644
--- a/packaging/deb/taosd
+++ b/packaging/deb/taosd
@@ -7,19 +7,19 @@
# chkconfig: 2345 99 01
#
### BEGIN INIT INFO
-# Provides: TDEngine
+# Provides: TDengine
# Required-Start: $local_fs $network $syslog
# Required-Stop: $local_fs $network $syslog
# Default-Start: 2 3 4 5
# Default-Stop: 0 1 6
-# Short-Description: Starts TDEngine taosd
-# Description: Starts TDEngine taosd, a time-series database engine
+# Short-Description: Starts TDengine taosd
+# Description: Starts TDengine taosd, a time-series database engine
### END INIT INFO
set -e
PATH="/bin:/usr/bin:/sbin:/usr/sbin"
-NAME="TDEngine"
+NAME="TDengine"
USER="root"
GROUP="root"
DAEMON="/usr/local/taos/bin/taosd"
@@ -40,7 +40,7 @@ MAX_OPEN_FILES=65535
case "$1" in
start)
- log_action_begin_msg "Starting TDEngine..."
+ log_action_begin_msg "Starting TDengine..."
$DAEMON_HTTPD &
if start-stop-daemon --test --start --chuid "$USER:$GROUP" --background --make-pidfile --pidfile "$PID_FILE" --exec "$DAEMON" -- $APPARGS &> /dev/null; then
@@ -57,7 +57,7 @@ case "$1" in
;;
stop)
- log_action_begin_msg "Stopping TDEngine..."
+ log_action_begin_msg "Stopping TDengine..."
pkill -9 $DAEMON_HTTPD_NAME
set +e
if [ -f "$PID_FILE" ]; then
@@ -66,12 +66,12 @@ case "$1" in
log_action_cont_msg "TSD is not running but pid file exists, cleaning up"
elif [ $? -eq 3 ]; then
PID="`cat $PID_FILE`"
- log_failure_msg "Failed to stop TDEngine (pid $PID)"
+ log_failure_msg "Failed to stop TDengine (pid $PID)"
exit 1
fi
rm -f "$PID_FILE"
else
- log_action_cont_msg "TDEngine was not running"
+ log_action_cont_msg "TDengine was not running"
fi
log_action_end_msg 0
set -e
diff --git a/packaging/release.sh b/packaging/release.sh
index c82d5704ac5c4d89837f5afe4b1f6e27419279cc..8049e974b807363c856f63eebe026c74c6972c0a 100755
--- a/packaging/release.sh
+++ b/packaging/release.sh
@@ -11,7 +11,7 @@ set -e
# -V [stable | beta]
# -l [full | lite]
# -s [static | dynamic]
-# -d [taos | power | tq ]
+# -d [taos | power | tq | pro | kh | jh]
# -n [2.0.0.3]
# -m [2.0.0.0]
@@ -22,7 +22,7 @@ cpuType=x64 # [aarch32 | aarch64 | x64 | x86 | mips64 ...]
osType=Linux # [Linux | Kylin | Alpine | Raspberrypi | Darwin | Windows | Ningsi60 | Ningsi80 |...]
pagMode=full # [full | lite]
soMode=dynamic # [static | dynamic]
-dbName=taos # [taos | power | tq | pro]
+dbName=taos # [taos | power | tq | pro | kh | jh]
allocator=glibc # [glibc | jemalloc]
verNumber=""
verNumberComp="1.0.0.0"
@@ -78,7 +78,7 @@ do
echo " -l [full | lite] "
echo " -a [glibc | jemalloc] "
echo " -s [static | dynamic] "
- echo " -d [taos | power | tq | pro] "
+ echo " -d [taos | power | tq | pro | kh | jh] "
echo " -n [version number] "
echo " -m [compatible version number] "
exit 0
@@ -192,22 +192,292 @@ else
allocator_macro=""
fi
+# for powerdb
+if [[ "$dbName" == "power" ]]; then
+ # cmake/install.inc
+ sed -i "s/C:\/TDengine/C:\/PowerDB/g" ${top_dir}/cmake/install.inc
+ sed -i "s/taos\.exe/power\.exe/g" ${top_dir}/cmake/install.inc
+ sed -i "s/taosdemo\.exe/powerdemo\.exe/g" ${top_dir}/cmake/install.inc
+ # src/kit/shell/inc/shell.h
+ sed -i "s/taos_history/power_history/g" ${top_dir}/src/kit/shell/inc/shell.h
+ # src/inc/taosdef.h
+ sed -i "s/\"taosdata\"/\"powerdb\"/g" ${top_dir}/src/inc/taosdef.h
+ # src/util/src/tconfig.c
+ sed -i "s/taos config/power config/g" ${top_dir}/src/util/src/tconfig.c
+ sed -i "s/taos\.cfg/power\.cfg/g" ${top_dir}/src/util/src/tconfig.c
+ sed -i "s/etc\/taos/etc\/power/g" ${top_dir}/src/util/src/tconfig.c
+ # src/kit/taosdemo/taosdemo.c
+ sed -i "s/\"taosdata\"/\"powerdb\"/g" ${top_dir}/src/kit/taosdemo/taosdemo.c
+ # src/util/src/tlog.c
+ sed -i "s/log\/taos/log\/power/g" ${top_dir}/src/util/src/tlog.c
+ # src/dnode/src/dnodeSystem.c
+ sed -i "s/TDengine/Power/g" ${top_dir}/src/dnode/src/dnodeSystem.c
+ sed -i "s/TDengine/Power/g" ${top_dir}/src/dnode/src/dnodeMain.c
+ sed -i "s/taosdlog/powerdlog/g" ${top_dir}/src/dnode/src/dnodeMain.c
+ # src/client/src/tscSystem.c
+ sed -i "s/taoslog/powerlog/g" ${top_dir}/src/client/src/tscSystem.c
+ # src/util/src/tnote.c
+ sed -i "s/taosinfo/powerinfo/g" ${top_dir}/src/util/src/tnote.c
+ # src/dnode/CMakeLists.txt
+ sed -i "s/taos\.cfg/power\.cfg/g" ${top_dir}/src/dnode/CMakeLists.txt
+ # src/kit/taosdump/taosdump.c
+ sed -i "s/TDengine/Power/g" ${top_dir}/src/kit/taosdump/taosdump.c
+ sed -i "s/Default is taosdata/Default is power/g" ${top_dir}/src/kit/taosdump/taosdump.c
+ sed -i "s/\"taosdata\"/\"powerdb\"/g" ${top_dir}/src/kit/taosdump/taosdump.c
+ sed -i "s/taos\/taos\.cfg/power\/power\.cfg/g" ${top_dir}/src/kit/taosdump/taosdump.c
+ # src/os/src/linux/linuxEnv.c
+ sed -i "s/etc\/taos/etc\/power/g" ${top_dir}/src/os/src/linux/linuxEnv.c
+ sed -i "s/lib\/taos/lib\/power/g" ${top_dir}/src/os/src/linux/linuxEnv.c
+ sed -i "s/log\/taos/log\/power/g" ${top_dir}/src/os/src/linux/linuxEnv.c
+ # src/os/src/windows/wEnv.c
+ sed -i "s/TDengine/PowerDB/g" ${top_dir}/src/os/src/windows/wEnv.c
+ # src/kit/shell/src/shellEngine.c
+ sed -i "s/TDengine shell/PowerDB shell/g" ${top_dir}/src/kit/shell/src/shellEngine.c
+ sed -i "s/2020 by TAOS Data, Inc/2020 by PowerDB, Inc/g" ${top_dir}/src/kit/shell/src/shellEngine.c
+ sed -i "s/\"taos> \"/\"power> \"/g" ${top_dir}/src/kit/shell/src/shellEngine.c
+ sed -i "s/\" -> \"/\" -> \"/g" ${top_dir}/src/kit/shell/src/shellEngine.c
+ sed -i "s/prompt_size = 6/prompt_size = 7/g" ${top_dir}/src/kit/shell/src/shellEngine.c
+fi
+
+# for tq
+if [[ "$dbName" == "tq" ]]; then
+ # cmake/install.inc
+ sed -i "s/C:\/TDengine/C:\/TQueue/g" ${top_dir}/cmake/install.inc
+ sed -i "s/taos\.exe/tq\.exe/g" ${top_dir}/cmake/install.inc
+ sed -i "s/taosdemo\.exe/tqdemo\.exe/g" ${top_dir}/cmake/install.inc
+ # src/kit/shell/inc/shell.h
+ sed -i "s/taos_history/tq_history/g" ${top_dir}/src/kit/shell/inc/shell.h
+ # src/inc/taosdef.h
+ sed -i "s/\"taosdata\"/\"tqueue\"/g" ${top_dir}/src/inc/taosdef.h
+ # src/util/src/tconfig.c
+ sed -i "s/taos config/tq config/g" ${top_dir}/src/util/src/tconfig.c
+ sed -i "s/taos\.cfg/tq\.cfg/g" ${top_dir}/src/util/src/tconfig.c
+ sed -i "s/etc\/taos/etc\/tq/g" ${top_dir}/src/util/src/tconfig.c
+ # src/kit/taosdemo/taosdemo.c
+ sed -i "s/\"taosdata\"/\"tqueue\"/g" ${top_dir}/src/kit/taosdemo/taosdemo.c
+ # src/util/src/tlog.c
+ sed -i "s/log\/taos/log\/tq/g" ${top_dir}/src/util/src/tlog.c
+ # src/dnode/src/dnodeSystem.c
+ sed -i "s/TDengine/TQueue/g" ${top_dir}/src/dnode/src/dnodeSystem.c
+ sed -i "s/TDengine/TQueue/g" ${top_dir}/src/dnode/src/dnodeMain.c
+ sed -i "s/taosdlog/tqdlog/g" ${top_dir}/src/dnode/src/dnodeMain.c
+ # src/client/src/tscSystem.c
+ sed -i "s/taoslog/tqlog/g" ${top_dir}/src/client/src/tscSystem.c
+ # src/util/src/tnote.c
+ sed -i "s/taosinfo/tqinfo/g" ${top_dir}/src/util/src/tnote.c
+ # src/dnode/CMakeLists.txt
+ sed -i "s/taos\.cfg/tq\.cfg/g" ${top_dir}/src/dnode/CMakeLists.txt
+ # src/kit/taosdump/taosdump.c
+ sed -i "s/TDengine/TQueue/g" ${top_dir}/src/kit/taosdump/taosdump.c
+ sed -i "s/Default is taosdata/Default is tqueue/g" ${top_dir}/src/kit/taosdump/taosdump.c
+ sed -i "s/\"taosdata\"/\"tqueue\"/g" ${top_dir}/src/kit/taosdump/taosdump.c
+ sed -i "s/taos\/taos\.cfg/tq\/tq\.cfg/g" ${top_dir}/src/kit/taosdump/taosdump.c
+ # src/os/src/linux/linuxEnv.c
+ sed -i "s/etc\/taos/etc\/tq/g" ${top_dir}/src/os/src/linux/linuxEnv.c
+ sed -i "s/lib\/taos/lib\/tq/g" ${top_dir}/src/os/src/linux/linuxEnv.c
+ sed -i "s/log\/taos/log\/tq/g" ${top_dir}/src/os/src/linux/linuxEnv.c
+ # src/os/src/windows/wEnv.c
+ sed -i "s/TDengine/TQ/g" ${top_dir}/src/os/src/windows/wEnv.c
+ # src/kit/shell/src/shellEngine.c
+ sed -i "s/TDengine shell/TQ shell/g" ${top_dir}/src/kit/shell/src/shellEngine.c
+ sed -i "s/2020 by TAOS Data, Inc/2020 by TQ, Inc/g" ${top_dir}/src/kit/shell/src/shellEngine.c
+ sed -i "s/\"taos> \"/\"tq> \"/g" ${top_dir}/src/kit/shell/src/shellEngine.c
+ sed -i "s/\" -> \"/\" -> \"/g" ${top_dir}/src/kit/shell/src/shellEngine.c
+ sed -i "s/prompt_size = 6/prompt_size = 4/g" ${top_dir}/src/kit/shell/src/shellEngine.c
+fi
+
+# for prodb
if [[ "$dbName" == "pro" ]]; then
- sed -i "s/taos config/prodb config/g" ${top_dir}/src/util/src/tconfig.c
- sed -i "s/TDengine/ProDB/g" ${top_dir}/src/dnode/src/dnodeSystem.c
+ # cmake/install.inc
+ sed -i "s/C:\/TDengine/C:\/ProDB/g" ${top_dir}/cmake/install.inc
+ sed -i "s/taos\.exe/prodbc\.exe/g" ${top_dir}/cmake/install.inc
+ sed -i "s/taosdemo\.exe/prodemo\.exe/g" ${top_dir}/cmake/install.inc
+ # src/kit/shell/inc/shell.h
+ sed -i "s/taos_history/prodb_history/g" ${top_dir}/src/kit/shell/inc/shell.h
+ # src/inc/taosdef.h
+ sed -i "s/\"taosdata\"/\"prodb\"/g" ${top_dir}/src/inc/taosdef.h
+ # src/util/src/tconfig.c
+ sed -i "s/taos config/prodb config/g" ${top_dir}/src/util/src/tconfig.c
+ sed -i "s/taos\.cfg/prodb\.cfg/g" ${top_dir}/src/util/src/tconfig.c
+ sed -i "s/etc\/taos/etc\/ProDB/g" ${top_dir}/src/util/src/tconfig.c
+ # src/kit/taosdemo/taosdemo.c
+ sed -i "s/\"taosdata\"/\"prodb\"/g" ${top_dir}/src/kit/taosdemo/taosdemo.c
+ sed -i "s/support@taosdata.com/support@hanatech.com.cn/g" ${top_dir}/src/kit/taosdemo/taosdemo.c
+ # src/util/src/tlog.c
+ sed -i "s/log\/taos/log\/ProDB/g" ${top_dir}/src/util/src/tlog.c
+ # src/dnode/src/dnodeSystem.c
+ sed -i "s/TDengine/ProDB/g" ${top_dir}/src/dnode/src/dnodeSystem.c
+ sed -i "s/TDengine/ProDB/g" ${top_dir}/src/dnode/src/dnodeMain.c
+ sed -i "s/taosdlog/prodlog/g" ${top_dir}/src/dnode/src/dnodeMain.c
+ # src/client/src/tscSystem.c
+ sed -i "s/taoslog/prolog/g" ${top_dir}/src/client/src/tscSystem.c
+ # src/util/src/tnote.c
+ sed -i "s/taosinfo/proinfo/g" ${top_dir}/src/util/src/tnote.c
+ # src/dnode/CMakeLists.txt
+ sed -i "s/taos\.cfg/prodb\.cfg/g" ${top_dir}/src/dnode/CMakeLists.txt
+ # src/kit/taosdump/taosdump.c
+ sed -i "s/Default is taosdata/Default is prodb/g" ${top_dir}/src/kit/taosdump/taosdump.c
+ sed -i "s/taos\/taos\.cfg/ProDB\/prodb\.cfg/g" ${top_dir}/src/kit/taosdump/taosdump.c
+ sed -i "s/\"taosdata\"/\"prodb\"/g" ${top_dir}/src/kit/taosdump/taosdump.c
+ sed -i "s/TDengine/ProDB/g" ${top_dir}/src/kit/taosdump/taosdump.c
+ # src/os/src/linux/linuxEnv.c
+ sed -i "s/etc\/taos/etc\/ProDB/g" ${top_dir}/src/os/src/linux/linuxEnv.c
+ sed -i "s/lib\/taos/lib\/ProDB/g" ${top_dir}/src/os/src/linux/linuxEnv.c
+ sed -i "s/log\/taos/log\/ProDB/g" ${top_dir}/src/os/src/linux/linuxEnv.c
+ # src/os/src/windows/wEnv.c
+ sed -i "s/TDengine/ProDB/g" ${top_dir}/src/os/src/windows/wEnv.c
+ # src/kit/shell/src/shellEngine.c
+ sed -i "s/TDengine shell/ProDB shell/g" ${top_dir}/src/kit/shell/src/shellEngine.c
+ sed -i "s/2020 by TAOS Data, Inc/2020 by Hanatech, Inc/g" ${top_dir}/src/kit/shell/src/shellEngine.c
+ sed -i "s/\"taos> \"/\"ProDB> \"/g" ${top_dir}/src/kit/shell/src/shellEngine.c
+ sed -i "s/\" -> \"/\" -> \"/g" ${top_dir}/src/kit/shell/src/shellEngine.c
+ sed -i "s/prompt_size = 6/prompt_size = 7/g" ${top_dir}/src/kit/shell/src/shellEngine.c
+fi
+
+# for KingHistorian
+if [[ "$dbName" == "kh" ]]; then
+ # cmake/install.inc
+ sed -i "s/C:\/TDengine/C:\/KingHistorian/g" ${top_dir}/cmake/install.inc
+ sed -i "s/taos\.exe/khclient\.exe/g" ${top_dir}/cmake/install.inc
+ sed -i "s/taosdemo\.exe/khdemo\.exe/g" ${top_dir}/cmake/install.inc
+ # src/kit/shell/inc/shell.h
+ sed -i "s/taos_history/kh_history/g" ${top_dir}/src/kit/shell/inc/shell.h
+ # src/inc/taosdef.h
+ sed -i "s/\"taosdata\"/\"khroot\"/g" ${top_dir}/src/inc/taosdef.h
+ # src/util/src/tconfig.c
+ sed -i "s/taos config/kh config/g" ${top_dir}/src/util/src/tconfig.c
+ sed -i "s/taos\.cfg/kinghistorian\.cfg/g" ${top_dir}/src/util/src/tconfig.c
+ sed -i "s/etc\/taos/etc\/kinghistorian/g" ${top_dir}/src/util/src/tconfig.c
+ # src/kit/taosdemo/taosdemo.c
+ sed -i "s/\"taosdata\"/\"khroot\"/g" ${top_dir}/src/kit/taosdemo/taosdemo.c
+ sed -i "s/support@taosdata.com/support@wellintech.com/g" ${top_dir}/src/kit/taosdemo/taosdemo.c
+ # src/util/src/tlog.c
+ sed -i "s/log\/taos/log\/kinghistorian/g" ${top_dir}/src/util/src/tlog.c
+ # src/dnode/src/dnodeSystem.c
+ sed -i "s/TDengine/KingHistorian/g" ${top_dir}/src/dnode/src/dnodeSystem.c
+ sed -i "s/TDengine/KingHistorian/g" ${top_dir}/src/dnode/src/dnodeMain.c
+ sed -i "s/taosdlog/khserverlog/g" ${top_dir}/src/dnode/src/dnodeMain.c
+ # src/client/src/tscSystem.c
+ sed -i "s/taoslog/khclientlog/g" ${top_dir}/src/client/src/tscSystem.c
+ # src/util/src/tnote.c
+ sed -i "s/taosinfo/khinfo/g" ${top_dir}/src/util/src/tnote.c
+ # src/dnode/CMakeLists.txt
+ sed -i "s/taos\.cfg/kinghistorian\.cfg/g" ${top_dir}/src/dnode/CMakeLists.txt
+ # src/dnode/CMakeLists.txt
+ sed -i "s/Default is taosdata/Default is khroot/g" ${top_dir}/src/kit/taosdump/taosdump.c
+ sed -i "s/\"taosdata\"/\"khroot\"/g" ${top_dir}/src/kit/taosdump/taosdump.c
+ sed -i "s/TDengine/KingHistorian/g" ${top_dir}/src/kit/taosdump/taosdump.c
+ sed -i "s/taos\/taos\.cfg/kinghistorian\/kinghistorian\.cfg/g" ${top_dir}/src/kit/taosdump/taosdump.c
+ # src/os/src/linux/linuxEnv.c
+ sed -i "s/etc\/taos/etc\/kinghistorian/g" ${top_dir}/src/os/src/linux/linuxEnv.c
+ sed -i "s/lib\/taos/lib\/kinghistorian/g" ${top_dir}/src/os/src/linux/linuxEnv.c
+ sed -i "s/log\/taos/log\/kinghistorian/g" ${top_dir}/src/os/src/linux/linuxEnv.c
+ # src/os/src/windows/wEnv.c
+ sed -i "s/TDengine/KingHistorian/g" ${top_dir}/src/os/src/windows/wEnv.c
+ # src/kit/shell/src/shellEngine.c
+ sed -i "s/TDengine shell/KingHistorian shell/g" ${top_dir}/src/kit/shell/src/shellEngine.c
+ sed -i "s/2020 by TAOS Data, Inc/2021 by Wellintech, Inc/g" ${top_dir}/src/kit/shell/src/shellEngine.c
+ sed -i "s/\"taos> \"/\"kh> \"/g" ${top_dir}/src/kit/shell/src/shellEngine.c
+ sed -i "s/\" -> \"/\" -> \"/g" ${top_dir}/src/kit/shell/src/shellEngine.c
+ sed -i "s/prompt_size = 6/prompt_size = 4/g" ${top_dir}/src/kit/shell/src/shellEngine.c
+fi
+
+# for jinheng
+if [[ "$dbName" == "jh" ]]; then
+ # Following files to change:
+ # * src/client/src/tscSystem.c
+ # * src/inc/taosdef.h
+ # * src/kit/shell/CMakeLists.txt
+ # * src/kit/shell/inc/shell.h
+ # * src/kit/shell/src/shellEngine.c
+ # * src/kit/shell/src/shellWindows.c
+ # * src/kit/taosdemo/taosdemo.c
+ # * src/kit/taosdump/taosdump.c
+ # * src/os/src/linux/linuxEnv.c
+ # * src/os/src/windows/wEnv.c
+ # * src/util/src/tconfig.c
+ # * src/util/src/tlog.c
+
+ # src/dnode/src/dnodeSystem.c
+ sed -i "s/TDengine/jh_iot/g" ${top_dir}/src/dnode/src/dnodeSystem.c
+ # src/dnode/src/dnodeMain.c
+ sed -i "s/TDengine/jh_iot/g" ${top_dir}/src/dnode/src/dnodeMain.c
+ # TODO: src/dnode/CMakeLists.txt
fi
echo "build ${pagMode} package ..."
if [[ "$pagMode" == "lite" ]]; then
BUILD_HTTP=true
+ BUILD_TOOLS=false
+else
+ BUILD_HTTP=false
+ BUILD_TOOLS=true
fi
# check support cpu type
if [[ "$cpuType" == "x64" ]] || [[ "$cpuType" == "aarch64" ]] || [[ "$cpuType" == "aarch32" ]] || [[ "$cpuType" == "mips64" ]] ; then
if [ "$verMode" != "cluster" ]; then
- cmake ../ -DCPUTYPE=${cpuType} -DOSTYPE=${osType} -DSOMODE=${soMode} -DDBNAME=${dbName} -DVERTYPE=${verType} -DVERDATE="${build_time}" -DGITINFO=${gitinfo} -DGITINFOI=${gitinfoOfInternal} -DVERNUMBER=${verNumber} -DVERCOMPATIBLE=${verNumberComp} -DPAGMODE=${pagMode} -DBUILD_HTTP=${BUILD_HTTP} ${allocator_macro}
+ # community-version compile
+ cmake ../ -DCPUTYPE=${cpuType} -DOSTYPE=${osType} -DSOMODE=${soMode} -DDBNAME=${dbName} -DVERTYPE=${verType} -DVERDATE="${build_time}" -DGITINFO=${gitinfo} -DGITINFOI=${gitinfoOfInternal} -DVERNUMBER=${verNumber} -DVERCOMPATIBLE=${verNumberComp} -DPAGMODE=${pagMode} -DBUILD_HTTP=${BUILD_HTTP} -DBUILD_TOOLS=${BUILD_TOOLS} ${allocator_macro}
else
- cmake ../../ -DCPUTYPE=${cpuType} -DOSTYPE=${osType} -DSOMODE=${soMode} -DDBNAME=${dbName} -DVERTYPE=${verType} -DVERDATE="${build_time}" -DGITINFO=${gitinfo} -DGITINFOI=${gitinfoOfInternal} -DVERNUMBER=${verNumber} -DVERCOMPATIBLE=${verNumberComp} -DBUILD_HTTP=${BUILD_HTTP} ${allocator_macro}
+ # enterprise-version compile
+ if [[ "$dbName" == "power" ]]; then
+ # enterprise/src/kit/perfMonitor/perfMonitor.c
+ sed -i "s/\"taosdata\"/\"powerdb\"/g" ${top_dir}/../enterprise/src/kit/perfMonitor/perfMonitor.c
+ sed -i "s/TDengine/PowerDB/g" ${top_dir}/../enterprise/src/kit/perfMonitor/perfMonitor.c
+ # enterprise/src/plugins/admin/src/httpAdminHandle.c
+ sed -i "s/taos\.cfg/power\.cfg/g" ${top_dir}/../enterprise/src/plugins/admin/src/httpAdminHandle.c
+ # enterprise/src/plugins/grant/src/grantMain.c
+ sed -i "s/taos\.cfg/power\.cfg/g" ${top_dir}/../enterprise/src/plugins/grant/src/grantMain.c
+ # enterprise/src/plugins/module/src/moduleMain.c
+ sed -i "s/taos\.cfg/power\.cfg/g" ${top_dir}/../enterprise/src/plugins/module/src/moduleMain.c
+ fi
+ if [[ "$dbName" == "tq" ]]; then
+ # enterprise/src/kit/perfMonitor/perfMonitor.c
+ sed -i "s/\"taosdata\"/\"tqueue\"/g" ${top_dir}/../enterprise/src/kit/perfMonitor/perfMonitor.c
+ sed -i "s/TDengine/TQueue/g" ${top_dir}/../enterprise/src/kit/perfMonitor/perfMonitor.c
+ # enterprise/src/plugins/admin/src/httpAdminHandle.c
+ sed -i "s/taos\.cfg/tq\.cfg/g" ${top_dir}/../enterprise/src/plugins/admin/src/httpAdminHandle.c
+ # enterprise/src/plugins/grant/src/grantMain.c
+ sed -i "s/taos\.cfg/tq\.cfg/g" ${top_dir}/../enterprise/src/plugins/grant/src/grantMain.c
+ # enterprise/src/plugins/module/src/moduleMain.c
+ sed -i "s/taos\.cfg/tq\.cfg/g" ${top_dir}/../enterprise/src/plugins/module/src/moduleMain.c
+ fi
+ if [[ "$dbName" == "pro" ]]; then
+ # enterprise/src/kit/perfMonitor/perfMonitor.c
+ sed -i "s/\"taosdata\"/\"prodb\"/g" ${top_dir}/../enterprise/src/kit/perfMonitor/perfMonitor.c
+ sed -i "s/TDengine/ProDB/g" ${top_dir}/../enterprise/src/kit/perfMonitor/perfMonitor.c
+ # enterprise/src/plugins/admin/src/httpAdminHandle.c
+ sed -i "s/taos\.cfg/prodb\.cfg/g" ${top_dir}/../enterprise/src/plugins/admin/src/httpAdminHandle.c
+ # enterprise/src/plugins/grant/src/grantMain.c
+ sed -i "s/taos\.cfg/prodb\.cfg/g" ${top_dir}/../enterprise/src/plugins/grant/src/grantMain.c
+ # enterprise/src/plugins/module/src/moduleMain.c
+ sed -i "s/taos\.cfg/prodb\.cfg/g" ${top_dir}/../enterprise/src/plugins/module/src/moduleMain.c
+ fi
+ if [[ "$dbName" == "kh" ]]; then
+ # enterprise/src/kit/perfMonitor/perfMonitor.c
+ sed -i "s/\"taosdata\"/\"khroot\"/g" ${top_dir}/../enterprise/src/kit/perfMonitor/perfMonitor.c
+ sed -i "s/TDengine/KingHistorian/g" ${top_dir}/../enterprise/src/kit/perfMonitor/perfMonitor.c
+ # enterprise/src/plugins/admin/src/httpAdminHandle.c
+ sed -i "s/taos\.cfg/kinghistorian\.cfg/g" ${top_dir}/../enterprise/src/plugins/admin/src/httpAdminHandle.c
+ # enterprise/src/plugins/grant/src/grantMain.c
+ sed -i "s/taos\.cfg/kinghistorian\.cfg/g" ${top_dir}/../enterprise/src/plugins/grant/src/grantMain.c
+ # enterprise/src/plugins/module/src/moduleMain.c
+ sed -i "s/taos\.cfg/kinghistorian\.cfg/g" ${top_dir}/../enterprise/src/plugins/module/src/moduleMain.c
+ fi
+ if [[ "$dbName" == "jh" ]]; then
+ # enterprise/src/kit/perfMonitor/perfMonitor.c
+ sed -i "s/\"taosdata\"/\"jhdata\"/g" ${top_dir}/../enterprise/src/kit/perfMonitor/perfMonitor.c
+ sed -i "s/TDengine/jh_iot/g" ${top_dir}/../enterprise/src/kit/perfMonitor/perfMonitor.c
+ # enterprise/src/plugins/admin/src/httpAdminHandle.c
+ #sed -i "s/taos\.cfg/taos\.cfg/g" ${top_dir}/../enterprise/src/plugins/admin/src/httpAdminHandle.c
+ # enterprise/src/plugins/grant/src/grantMain.c
+ #sed -i "s/taos\.cfg/taos\.cfg/g" ${top_dir}/../enterprise/src/plugins/grant/src/grantMain.c
+ # enterprise/src/plugins/module/src/moduleMain.c
+ #sed -i "s/taos\.cfg/taos\.cfg/g" ${top_dir}/../enterprise/src/plugins/module/src/moduleMain.c
+ fi
+
+ cmake ../../ -DCPUTYPE=${cpuType} -DOSTYPE=${osType} -DSOMODE=${soMode} -DDBNAME=${dbName} -DVERTYPE=${verType} -DVERDATE="${build_time}" -DGITINFO=${gitinfo} -DGITINFOI=${gitinfoOfInternal} -DVERNUMBER=${verNumber} -DVERCOMPATIBLE=${verNumberComp} -DBUILD_HTTP=${BUILD_HTTP} -DBUILD_TOOLS=${BUILD_TOOLS} ${allocator_macro}
fi
else
echo "input cpuType=${cpuType} error!!!"
@@ -216,9 +486,9 @@ fi
if [[ "$allocator" == "jemalloc" ]]; then
# jemalloc need compile first, so disable parallel build
- make V=1 && ${csudo} make install
+ make -j 8 && ${csudo} make install
else
- make -j8 && ${csudo} make install
+ make -j 8 && ${csudo} make install
fi
cd ${curr_dir}
@@ -237,10 +507,18 @@ if [ "$osType" != "Darwin" ]; then
${csudo} mkdir -p ${output_dir}
cd ${script_dir}/deb
${csudo} ./makedeb.sh ${compile_dir} ${output_dir} ${verNumber} ${cpuType} ${osType} ${verMode} ${verType}
+
+ if [ -d ${top_dir}/src/kit/taos-tools/packaging/deb ]; then
+ cd ${top_dir}/src/kit/taos-tools/packaging/deb
+ [ -z "$taos_tools_ver" ] && taos_tools_ver="0.1.0"
+
+ taos_tools_ver=$(git describe --tags|sed -e 's/ver-//g')
+ ${csudo} ./make-taos-tools-deb.sh ${top_dir} \
+ ${compile_dir} ${output_dir} ${taos_tools_ver} ${cpuType} ${osType} ${verMode} ${verType}
+ fi
else
echo "==========dpkg command not exist, so not release deb package!!!"
fi
-
ret='0'
command -v rpmbuild >/dev/null 2>&1 || { ret='1'; }
if [ "$ret" -eq 0 ]; then
@@ -252,6 +530,15 @@ if [ "$osType" != "Darwin" ]; then
${csudo} mkdir -p ${output_dir}
cd ${script_dir}/rpm
${csudo} ./makerpm.sh ${compile_dir} ${output_dir} ${verNumber} ${cpuType} ${osType} ${verMode} ${verType}
+
+ if [ -d ${top_dir}/src/kit/taos-tools/packaging/rpm ]; then
+ cd ${top_dir}/src/kit/taos-tools/packaging/rpm
+ [ -z "$taos_tools_ver" ] && taos_tools_ver="0.1.0"
+
+ taos_tools_ver=$(git describe --tags|sed -e 's/ver-//g'|sed -e 's/-/_/g')
+ ${csudo} ./make-taos-tools-rpm.sh ${top_dir} \
+ ${compile_dir} ${output_dir} ${taos_tools_ver} ${cpuType} ${osType} ${verMode} ${verType}
+ fi
else
echo "==========rpmbuild command not exist, so not release rpm package!!!"
fi
@@ -272,12 +559,21 @@ if [ "$osType" != "Darwin" ]; then
${csudo} ./makepkg_pro.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode} ${dbName} ${verNumberComp}
${csudo} ./makeclient_pro.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode} ${dbName}
${csudo} ./makearbi_pro.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode}
+ elif [[ "$dbName" == "kh" ]]; then
+ ${csudo} ./makepkg_kh.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode} ${dbName} ${verNumberComp}
+ ${csudo} ./makeclient_kh.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode} ${dbName}
+ ${csudo} ./makearbi_kh.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode}
+ elif [[ "$dbName" == "jh" ]]; then
+ ${csudo} ./makepkg_jh.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode} ${dbName} ${verNumberComp}
+ ${csudo} ./makeclient_jh.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode} ${dbName}
+ ${csudo} ./makearbi_jh.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode}
else
${csudo} ./makepkg_power.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode} ${dbName} ${verNumberComp}
${csudo} ./makeclient_power.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode} ${dbName}
${csudo} ./makearbi_power.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode}
fi
else
+ # only make client for Darwin
cd ${script_dir}/tools
./makeclient.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${dbName}
fi
diff --git a/packaging/rpm/makerpm.sh b/packaging/rpm/makerpm.sh
index 42ceeb791b6154f7d22a477bf3b3c3b8c726869c..b4cf3d1450619f6a43a5303afa45f71c4402c2e7 100755
--- a/packaging/rpm/makerpm.sh
+++ b/packaging/rpm/makerpm.sh
@@ -56,10 +56,6 @@ cd ${pkg_dir}
${csudo} mkdir -p BUILD BUILDROOT RPMS SOURCES SPECS SRPMS
-if [ -f ${compile_dir}/build/lib/libavro.so.23.0.0 ]; then
- sed -i.bak 's/#Requires:/Requires: jansson snappy/g' ${spec_file}
-fi
-
${csudo} rpmbuild --define="_version ${tdengine_ver}" --define="_topdir ${pkg_dir}" --define="_compiledir ${compile_dir}" -bb ${spec_file}
# copy rpm package to output_dir, and modify package name, then clean temp dir
diff --git a/packaging/rpm/taosd b/packaging/rpm/taosd
index 46dd712e3139dad69d3db6db8b289d0f2424811a..f8a5a2357ea1e8f399d0692f1b0e0d6398e8f855 100644
--- a/packaging/rpm/taosd
+++ b/packaging/rpm/taosd
@@ -1,10 +1,10 @@
#!/bin/bash
#
-# taosd This shell script takes care of starting and stopping TDEngine.
+# taosd This shell script takes care of starting and stopping TDengine.
#
# chkconfig: 2345 99 01
-# description: TDEngine is a districuted, scalable, high-performance Time Series Database
-# (TSDB). More than just a pure database, TDEngine also provides the ability
+# description: TDengine is a districuted, scalable, high-performance Time Series Database
+# (TSDB). More than just a pure database, TDengine also provides the ability
# to do stream computing, aggregation etc.
#
#
@@ -13,8 +13,8 @@
# Required-Start: $network $local_fs $remote_fs
# Required-Stop: $network $local_fs $remote_fs
# Short-Description: start and stop taosd
-# Description: TDEngine is a districuted, scalable, high-performance Time Series Database
-# (TSDB). More than just a pure database, TDEngine also provides the ability
+# Description: TDengine is a districuted, scalable, high-performance Time Series Database
+# (TSDB). More than just a pure database, TDengine also provides the ability
# to do stream computing, aggregation etc.
### END INIT INFO
diff --git a/packaging/rpm/tdengine.spec b/packaging/rpm/tdengine.spec
index f7b8462dbedc74a270a8560bb51a853e292cff27..31e5e49ab4deef817da89afbefa1b6dd18ad07cd 100644
--- a/packaging/rpm/tdengine.spec
+++ b/packaging/rpm/tdengine.spec
@@ -72,9 +72,9 @@ if [ -f %{_compiledir}/build/bin/taosadapter ]; then
cp %{_compiledir}/build/bin/taosadapter %{buildroot}%{homepath}/bin ||:
fi
cp %{_compiledir}/build/bin/taosdemo %{buildroot}%{homepath}/bin
-cp %{_compiledir}/build/bin/taosdump %{buildroot}%{homepath}/bin
cp %{_compiledir}/build/lib/${libfile} %{buildroot}%{homepath}/driver
cp %{_compiledir}/../src/inc/taos.h %{buildroot}%{homepath}/include
+cp %{_compiledir}/../src/inc/taosdef.h %{buildroot}%{homepath}/include
cp %{_compiledir}/../src/inc/taoserror.h %{buildroot}%{homepath}/include
cp -r %{_compiledir}/../src/connector/python %{buildroot}%{homepath}/connector
cp -r %{_compiledir}/../src/connector/go %{buildroot}%{homepath}/connector
@@ -82,15 +82,6 @@ cp -r %{_compiledir}/../src/connector/nodejs %{buildroot}%{homepath}/conn
cp %{_compiledir}/build/lib/taos-jdbcdriver*.* %{buildroot}%{homepath}/connector ||:
cp -r %{_compiledir}/../tests/examples/* %{buildroot}%{homepath}/examples
-if [ -f %{_compiledir}/build/lib/libavro.so.23.0.0 ]; then
- cp %{_compiledir}/build/lib/libavro.so.23.0.0 %{buildroot}%{homepath}/driver
- ln -sf libavro.so.23.0.0 %{buildroot}%{homepath}/driver/libavro.so.23
- ln -sf libavro.so.23 %{buildroot}%{homepath}/driver/libavro.so
-fi
-if [ -f %{_compiledir}/build/lib/libavro.a ]; then
- cp %{_compiledir}/build/lib/libavro.a %{buildroot}%{homepath}/driver
-fi
-
if [ -f %{_compiledir}/build/bin/jemalloc-config ]; then
mkdir -p %{buildroot}%{userlocalpath}/bin
mkdir -p %{buildroot}%{userlocalpath}/lib
@@ -206,9 +197,9 @@ if [ $1 -eq 0 ];then
${csudo} rm -f ${bin_link_dir}/taosd || :
${csudo} rm -f ${bin_link_dir}/taosadapter || :
${csudo} rm -f ${bin_link_dir}/taosdemo || :
- ${csudo} rm -f ${bin_link_dir}/taosdump || :
${csudo} rm -f ${cfg_link_dir}/* || :
${csudo} rm -f ${inc_link_dir}/taos.h || :
+ ${csudo} rm -f ${inc_link_dir}/taosdef.h || :
${csudo} rm -f ${inc_link_dir}/taoserror.h || :
${csudo} rm -f ${lib_link_dir}/libtaos.* || :
diff --git a/packaging/tools/install.sh b/packaging/tools/install.sh
index dcd4a83da8929d76aa61d848985b5c4ffe46b9c5..c0377cf8345b66ed8d588a6b36a4b7cdcdba028d 100755
--- a/packaging/tools/install.sh
+++ b/packaging/tools/install.sh
@@ -236,6 +236,29 @@ function install_lib() {
${csudo} ldconfig
}
+function install_avro() {
+ if [ "$osType" != "Darwin" ]; then
+ avro_dir=${script_dir}/avro
+ if [ -f "${avro_dir}/lib/libavro.so.23.0.0" ] && [ -d /usr/local/$1 ]; then
+ ${csudo} /usr/bin/install -c -d /usr/local/$1
+ ${csudo} /usr/bin/install -c -m 755 ${avro_dir}/lib/libavro.so.23.0.0 /usr/local/$1
+ ${csudo} ln -sf /usr/local/$1/libavro.so.23.0.0 /usr/local/$1/libavro.so.23
+ ${csudo} ln -sf /usr/local/$1/libavro.so.23 /usr/local/$1/libavro.so
+
+ ${csudo} /usr/bin/install -c -d /usr/local/$1
+ [ -f ${avro_dir}/lib/libavro.a ] &&
+ ${csudo} /usr/bin/install -c -m 755 ${avro_dir}/lib/libavro.a /usr/local/$1
+
+ if [ -d /etc/ld.so.conf.d ]; then
+ echo "/usr/local/$1" | ${csudo} tee /etc/ld.so.conf.d/libavro.conf > /dev/null || echo -e "failed to write /etc/ld.so.conf.d/libavro.conf"
+ ${csudo} ldconfig
+ else
+ echo "/etc/ld.so.conf.d not found!"
+ fi
+ fi
+ fi
+}
+
function install_jemalloc() {
jemalloc_dir=${script_dir}/jemalloc
@@ -281,7 +304,7 @@ function install_jemalloc() {
fi
if [ -d /etc/ld.so.conf.d ]; then
- ${csudo} echo "/usr/local/lib" > /etc/ld.so.conf.d/jemalloc.conf
+ echo "/usr/local/lib" | ${csudo} tee /etc/ld.so.conf.d/jemalloc.conf > /dev/null || echo -e "failed to write /etc/ld.so.conf.d/jemalloc.conf"
${csudo} ldconfig
else
echo "/etc/ld.so.conf.d not found!"
@@ -290,9 +313,10 @@ function install_jemalloc() {
}
function install_header() {
- ${csudo} rm -f ${inc_link_dir}/taos.h ${inc_link_dir}/taoserror.h || :
+ ${csudo} rm -f ${inc_link_dir}/taos.h ${inc_link_dir}/taosdef.h ${inc_link_dir}/taoserror.h || :
${csudo} cp -f ${script_dir}/inc/* ${install_main_dir}/include && ${csudo} chmod 644 ${install_main_dir}/include/*
${csudo} ln -s ${install_main_dir}/include/taos.h ${inc_link_dir}/taos.h
+ ${csudo} ln -s ${install_main_dir}/include/taosdef.h ${inc_link_dir}/taosdef.h
${csudo} ln -s ${install_main_dir}/include/taoserror.h ${inc_link_dir}/taoserror.h
}
@@ -848,6 +872,8 @@ function update_TDengine() {
fi
tar -zxf taos.tar.gz
install_jemalloc
+ install_avro lib
+ install_avro lib64
echo -e "${GREEN}Start to update TDengine...${NC}"
# Stop the service if running
@@ -960,6 +986,9 @@ function install_TDengine() {
install_header
install_lib
install_jemalloc
+ install_avro lib
+ install_avro lib64
+
if [ "$pagMode" != "lite" ]; then
install_connector
fi
diff --git a/packaging/tools/install_arbi.sh b/packaging/tools/install_arbi.sh
index 3a5e64153836096268dee2be08919cd774b68ebe..33896a08b3f653dbddc1ad480ddab3bf73a513ef 100755
--- a/packaging/tools/install_arbi.sh
+++ b/packaging/tools/install_arbi.sh
@@ -116,9 +116,10 @@ function install_bin() {
}
function install_header() {
- ${csudo} rm -f ${inc_link_dir}/taos.h ${inc_link_dir}/taoserror.h || :
+ ${csudo} rm -f ${inc_link_dir}/taos.h ${inc_link_dir}/taosdef.h ${inc_link_dir}/taoserror.h || :
${csudo} cp -f ${script_dir}/inc/* ${install_main_dir}/include && ${csudo} chmod 644 ${install_main_dir}/include/*
${csudo} ln -s ${install_main_dir}/include/taos.h ${inc_link_dir}/taos.h
+ ${csudo} ln -s ${install_main_dir}/include/taosdef.h ${inc_link_dir}/taosdef.h
${csudo} ln -s ${install_main_dir}/include/taoserror.h ${inc_link_dir}/taoserror.h
}
@@ -167,7 +168,7 @@ function install_jemalloc() {
fi
if [ -d /etc/ld.so.conf.d ]; then
- ${csudo} echo "/usr/local/lib" > /etc/ld.so.conf.d/jemalloc.conf
+ echo "/usr/local/lib" | ${csudo} tee /etc/ld.so.conf.d/jemalloc.conf > /dev/null || echo -e "failed to write /etc/ld.so.conf.d/jemalloc.conf"
${csudo} ldconfig
else
echo "/etc/ld.so.conf.d not found!"
diff --git a/packaging/tools/install_arbi_jh.sh b/packaging/tools/install_arbi_jh.sh
new file mode 100755
index 0000000000000000000000000000000000000000..2403f8fbd79abf4324577fe3dca3a8e0eac8ed01
--- /dev/null
+++ b/packaging/tools/install_arbi_jh.sh
@@ -0,0 +1,286 @@
+#!/bin/bash
+#
+# This file is used to install database on linux systems. The operating system
+# is required to use systemd to manage services at boot
+
+set -e
+#set -x
+
+# -----------------------Variables definition---------------------
+script_dir=$(dirname $(readlink -f "$0"))
+
+bin_link_dir="/usr/bin"
+#inc_link_dir="/usr/include"
+
+#install main path
+install_main_dir="/usr/local/tarbitrator"
+
+# old bin dir
+bin_dir="/usr/local/tarbitrator/bin"
+
+service_config_dir="/etc/systemd/system"
+
+# Color setting
+RED='\033[0;31m'
+GREEN='\033[1;32m'
+GREEN_DARK='\033[0;32m'
+GREEN_UNDERLINE='\033[4;32m'
+NC='\033[0m'
+
+csudo=""
+if command -v sudo > /dev/null; then
+ csudo="sudo"
+fi
+
+update_flag=0
+
+initd_mod=0
+service_mod=2
+if pidof systemd &> /dev/null; then
+ service_mod=0
+elif $(which service &> /dev/null); then
+ service_mod=1
+ service_config_dir="/etc/init.d"
+ if $(which chkconfig &> /dev/null); then
+ initd_mod=1
+ elif $(which insserv &> /dev/null); then
+ initd_mod=2
+ elif $(which update-rc.d &> /dev/null); then
+ initd_mod=3
+ else
+ service_mod=2
+ fi
+else
+ service_mod=2
+fi
+
+
+# get the operating system type for using the corresponding init file
+# ubuntu/debian(deb), centos/fedora(rpm), others: opensuse, redhat, ..., no verification
+#osinfo=$(awk -F= '/^NAME/{print $2}' /etc/os-release)
+if [[ -e /etc/os-release ]]; then
+ osinfo=$(cat /etc/os-release | grep "NAME" | cut -d '"' -f2) ||:
+else
+ osinfo=""
+fi
+#echo "osinfo: ${osinfo}"
+os_type=0
+if echo $osinfo | grep -qwi "ubuntu" ; then
+# echo "This is ubuntu system"
+ os_type=1
+elif echo $osinfo | grep -qwi "debian" ; then
+# echo "This is debian system"
+ os_type=1
+elif echo $osinfo | grep -qwi "Kylin" ; then
+# echo "This is Kylin system"
+ os_type=1
+elif echo $osinfo | grep -qwi "centos" ; then
+# echo "This is centos system"
+ os_type=2
+elif echo $osinfo | grep -qwi "fedora" ; then
+# echo "This is fedora system"
+ os_type=2
+else
+ echo " osinfo: ${osinfo}"
+ echo " This is an officially unverified linux system,"
+ echo " if there are any problems with the installation and operation, "
+ echo " please feel free to contact jhict.com for support."
+ os_type=1
+fi
+
+function kill_tarbitrator() {
+ pid=$(ps -ef | grep "tarbitrator" | grep -v "grep" | awk '{print $2}')
+ if [ -n "$pid" ]; then
+ ${csudo} kill -9 $pid || :
+ fi
+}
+
+function install_main_path() {
+ #create install main dir and all sub dir
+ ${csudo} rm -rf ${install_main_dir} || :
+ ${csudo} mkdir -p ${install_main_dir}
+ ${csudo} mkdir -p ${install_main_dir}/bin
+ #${csudo} mkdir -p ${install_main_dir}/include
+ ${csudo} mkdir -p ${install_main_dir}/init.d
+}
+
+function install_bin() {
+ # Remove links
+ ${csudo} rm -f ${bin_link_dir}/rmtarbitrator || :
+ ${csudo} rm -f ${bin_link_dir}/tarbitrator || :
+ ${csudo} cp -r ${script_dir}/bin/* ${install_main_dir}/bin && ${csudo} chmod 0555 ${install_main_dir}/bin/*
+
+ #Make link
+ [ -x ${install_main_dir}/bin/remove_arbi_jh.sh ] && ${csudo} ln -s ${install_main_dir}/bin/remove_arbi_jh.sh ${bin_link_dir}/rmtarbitrator || :
+ [ -x ${install_main_dir}/bin/tarbitrator ] && ${csudo} ln -s ${install_main_dir}/bin/tarbitrator ${bin_link_dir}/tarbitrator || :
+}
+
+function install_header() {
+ ${csudo} rm -f ${inc_link_dir}/taos.h ${inc_link_dir}/taoserror.h || :
+ ${csudo} cp -f ${script_dir}/inc/* ${install_main_dir}/include && ${csudo} chmod 644 ${install_main_dir}/include/*
+ ${csudo} ln -s ${install_main_dir}/include/taos.h ${inc_link_dir}/taos.h
+ ${csudo} ln -s ${install_main_dir}/include/taoserror.h ${inc_link_dir}/taoserror.h
+}
+
+function clean_service_on_sysvinit() {
+ if pidof tarbitrator &> /dev/null; then
+ ${csudo} service tarbitratord stop || :
+ fi
+
+ if ((${initd_mod}==1)); then
+ if [ -e ${service_config_dir}/tarbitratord ]; then
+ ${csudo} chkconfig --del tarbitratord || :
+ fi
+ elif ((${initd_mod}==2)); then
+ if [ -e ${service_config_dir}/tarbitratord ]; then
+ ${csudo} insserv -r tarbitratord || :
+ fi
+ elif ((${initd_mod}==3)); then
+ if [ -e ${service_config_dir}/tarbitratord ]; then
+ ${csudo} update-rc.d -f tarbitratord remove || :
+ fi
+ fi
+
+ ${csudo} rm -f ${service_config_dir}/tarbitratord || :
+
+ if $(which init &> /dev/null); then
+ ${csudo} init q || :
+ fi
+}
+
+function install_service_on_sysvinit() {
+ clean_service_on_sysvinit
+ sleep 1
+
+ # Install server service
+ if ((${os_type}==1)); then
+ ${csudo} cp -f ${script_dir}/init.d/tarbitratord.deb ${install_main_dir}/init.d/tarbitratord
+ ${csudo} cp ${script_dir}/init.d/tarbitratord.deb ${service_config_dir}/tarbitratord && ${csudo} chmod a+x ${service_config_dir}/tarbitratord
+ elif ((${os_type}==2)); then
+ ${csudo} cp -f ${script_dir}/init.d/tarbitratord.rpm ${install_main_dir}/init.d/tarbitratord
+ ${csudo} cp ${script_dir}/init.d/tarbitratord.rpm ${service_config_dir}/tarbitratord && ${csudo} chmod a+x ${service_config_dir}/tarbitratord
+ fi
+
+ if ((${initd_mod}==1)); then
+ ${csudo} chkconfig --add tarbitratord || :
+ ${csudo} chkconfig --level 2345 tarbitratord on || :
+ elif ((${initd_mod}==2)); then
+ ${csudo} insserv tarbitratord || :
+ ${csudo} insserv -d tarbitratord || :
+ elif ((${initd_mod}==3)); then
+ ${csudo} update-rc.d tarbitratord defaults || :
+ fi
+}
+
+function clean_service_on_systemd() {
+ tarbitratord_service_config="${service_config_dir}/tarbitratord.service"
+ if systemctl is-active --quiet tarbitratord; then
+ echo "tarbitrator is running, stopping it..."
+ ${csudo} systemctl stop tarbitratord &> /dev/null || echo &> /dev/null
+ fi
+ ${csudo} systemctl disable tarbitratord &> /dev/null || echo &> /dev/null
+
+ ${csudo} rm -f ${tarbitratord_service_config}
+}
+
+function install_service_on_systemd() {
+ clean_service_on_systemd
+
+ tarbitratord_service_config="${service_config_dir}/tarbitratord.service"
+
+ ${csudo} bash -c "echo '[Unit]' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'Description=jh_iot arbitrator service' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'After=network-online.target' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'Wants=network-online.target' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo '[Service]' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'Type=simple' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'ExecStart=/usr/bin/tarbitrator' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'TimeoutStopSec=1000000s' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'LimitNOFILE=infinity' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'LimitNPROC=infinity' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'LimitCORE=infinity' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'TimeoutStartSec=0' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'StandardOutput=null' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'Restart=always' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'StartLimitBurst=3' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'StartLimitInterval=60s' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo '[Install]' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'WantedBy=multi-user.target' >> ${tarbitratord_service_config}"
+ ${csudo} systemctl enable tarbitratord
+}
+
+function install_service() {
+ if ((${service_mod}==0)); then
+ install_service_on_systemd
+ elif ((${service_mod}==1)); then
+ install_service_on_sysvinit
+ else
+ kill_tarbitrator
+ fi
+}
+
+function update() {
+ # Start to update
+ echo -e "${GREEN}Start to update jh_iot's arbitrator ...${NC}"
+ # Stop the service if running
+ if pidof tarbitrator &> /dev/null; then
+ if ((${service_mod}==0)); then
+ ${csudo} systemctl stop tarbitratord || :
+ elif ((${service_mod}==1)); then
+ ${csudo} service tarbitratord stop || :
+ else
+ kill_tarbitrator
+ fi
+ sleep 1
+ fi
+
+ install_main_path
+ #install_header
+ install_bin
+ install_service
+
+ echo
+ if ((${service_mod}==0)); then
+ echo -e "${GREEN_DARK}To start arbitrator ${NC}: ${csudo} systemctl start tarbitratord${NC}"
+ elif ((${service_mod}==1)); then
+ echo -e "${GREEN_DARK}To start arbitrator ${NC}: ${csudo} service tarbitratord start${NC}"
+ else
+ echo -e "${GREEN_DARK}To start arbitrator ${NC}: ./tarbitrator${NC}"
+ fi
+ echo
+ echo -e "\033[44;32;1mjh_iot's arbitrator is updated successfully!${NC}"
+}
+
+function install() {
+ # Start to install
+ echo -e "${GREEN}Start to install jh_iot's arbitrator ...${NC}"
+
+ install_main_path
+ #install_header
+ install_bin
+ install_service
+ echo
+ if ((${service_mod}==0)); then
+ echo -e "${GREEN_DARK}To start arbitrator ${NC}: ${csudo} systemctl start tarbitratord${NC}"
+ elif ((${service_mod}==1)); then
+ echo -e "${GREEN_DARK}To start arbitrator ${NC}: ${csudo} service tarbitratord start${NC}"
+ else
+ echo -e "${GREEN_DARK}To start arbitrator ${NC}: tarbitrator${NC}"
+ fi
+
+ echo -e "\033[44;32;1mjh_iot's arbitrator is installed successfully!${NC}"
+ echo
+}
+
+
+## ==============================Main program starts from here============================
+# Install server and client
+if [ -x ${bin_dir}/tarbitrator ]; then
+ update_flag=1
+ update
+else
+ install
+fi
+
diff --git a/packaging/tools/install_arbi_kh.sh b/packaging/tools/install_arbi_kh.sh
new file mode 100755
index 0000000000000000000000000000000000000000..9a2542936d935b70b762702f0f2f6ff92b51a4f3
--- /dev/null
+++ b/packaging/tools/install_arbi_kh.sh
@@ -0,0 +1,286 @@
+#!/bin/bash
+#
+# This file is used to install database on linux systems. The operating system
+# is required to use systemd to manage services at boot
+
+set -e
+#set -x
+
+# -----------------------Variables definition---------------------
+script_dir=$(dirname $(readlink -f "$0"))
+
+bin_link_dir="/usr/bin"
+#inc_link_dir="/usr/include"
+
+#install main path
+install_main_dir="/usr/local/tarbitrator"
+
+# old bin dir
+bin_dir="/usr/local/tarbitrator/bin"
+
+service_config_dir="/etc/systemd/system"
+
+# Color setting
+RED='\033[0;31m'
+GREEN='\033[1;32m'
+GREEN_DARK='\033[0;32m'
+GREEN_UNDERLINE='\033[4;32m'
+NC='\033[0m'
+
+csudo=""
+if command -v sudo > /dev/null; then
+ csudo="sudo"
+fi
+
+update_flag=0
+
+initd_mod=0
+service_mod=2
+if pidof systemd &> /dev/null; then
+ service_mod=0
+elif $(which service &> /dev/null); then
+ service_mod=1
+ service_config_dir="/etc/init.d"
+ if $(which chkconfig &> /dev/null); then
+ initd_mod=1
+ elif $(which insserv &> /dev/null); then
+ initd_mod=2
+ elif $(which update-rc.d &> /dev/null); then
+ initd_mod=3
+ else
+ service_mod=2
+ fi
+else
+ service_mod=2
+fi
+
+
+# get the operating system type for using the corresponding init file
+# ubuntu/debian(deb), centos/fedora(rpm), others: opensuse, redhat, ..., no verification
+#osinfo=$(awk -F= '/^NAME/{print $2}' /etc/os-release)
+if [[ -e /etc/os-release ]]; then
+ osinfo=$(cat /etc/os-release | grep "NAME" | cut -d '"' -f2) ||:
+else
+ osinfo=""
+fi
+#echo "osinfo: ${osinfo}"
+os_type=0
+if echo $osinfo | grep -qwi "ubuntu" ; then
+# echo "This is ubuntu system"
+ os_type=1
+elif echo $osinfo | grep -qwi "debian" ; then
+# echo "This is debian system"
+ os_type=1
+elif echo $osinfo | grep -qwi "Kylin" ; then
+# echo "This is Kylin system"
+ os_type=1
+elif echo $osinfo | grep -qwi "centos" ; then
+# echo "This is centos system"
+ os_type=2
+elif echo $osinfo | grep -qwi "fedora" ; then
+# echo "This is fedora system"
+ os_type=2
+else
+ echo " osinfo: ${osinfo}"
+ echo " This is an officially unverified linux system,"
+ echo " if there are any problems with the installation and operation, "
+ echo " please feel free to contact wellintech.com for support."
+ os_type=1
+fi
+
+function kill_tarbitrator() {
+ pid=$(ps -ef | grep "tarbitrator" | grep -v "grep" | awk '{print $2}')
+ if [ -n "$pid" ]; then
+ ${csudo} kill -9 $pid || :
+ fi
+}
+
+function install_main_path() {
+ #create install main dir and all sub dir
+ ${csudo} rm -rf ${install_main_dir} || :
+ ${csudo} mkdir -p ${install_main_dir}
+ ${csudo} mkdir -p ${install_main_dir}/bin
+ #${csudo} mkdir -p ${install_main_dir}/include
+ ${csudo} mkdir -p ${install_main_dir}/init.d
+}
+
+function install_bin() {
+ # Remove links
+ ${csudo} rm -f ${bin_link_dir}/rmtarbitrator || :
+ ${csudo} rm -f ${bin_link_dir}/tarbitrator || :
+ ${csudo} cp -r ${script_dir}/bin/* ${install_main_dir}/bin && ${csudo} chmod 0555 ${install_main_dir}/bin/*
+
+ #Make link
+ [ -x ${install_main_dir}/bin/remove_arbi_kh.sh ] && ${csudo} ln -s ${install_main_dir}/bin/remove_arbi_kh.sh ${bin_link_dir}/rmtarbitrator || :
+ [ -x ${install_main_dir}/bin/tarbitrator ] && ${csudo} ln -s ${install_main_dir}/bin/tarbitrator ${bin_link_dir}/tarbitrator || :
+}
+
+function install_header() {
+ ${csudo} rm -f ${inc_link_dir}/taos.h ${inc_link_dir}/taoserror.h || :
+ ${csudo} cp -f ${script_dir}/inc/* ${install_main_dir}/include && ${csudo} chmod 644 ${install_main_dir}/include/*
+ ${csudo} ln -s ${install_main_dir}/include/taos.h ${inc_link_dir}/taos.h
+ ${csudo} ln -s ${install_main_dir}/include/taoserror.h ${inc_link_dir}/taoserror.h
+}
+
+function clean_service_on_sysvinit() {
+ if pidof tarbitrator &> /dev/null; then
+ ${csudo} service tarbitratord stop || :
+ fi
+
+ if ((${initd_mod}==1)); then
+ if [ -e ${service_config_dir}/tarbitratord ]; then
+ ${csudo} chkconfig --del tarbitratord || :
+ fi
+ elif ((${initd_mod}==2)); then
+ if [ -e ${service_config_dir}/tarbitratord ]; then
+ ${csudo} insserv -r tarbitratord || :
+ fi
+ elif ((${initd_mod}==3)); then
+ if [ -e ${service_config_dir}/tarbitratord ]; then
+ ${csudo} update-rc.d -f tarbitratord remove || :
+ fi
+ fi
+
+ ${csudo} rm -f ${service_config_dir}/tarbitratord || :
+
+ if $(which init &> /dev/null); then
+ ${csudo} init q || :
+ fi
+}
+
+function install_service_on_sysvinit() {
+ clean_service_on_sysvinit
+ sleep 1
+
+ # Install khserver service
+ if ((${os_type}==1)); then
+ ${csudo} cp -f ${script_dir}/init.d/tarbitratord.deb ${install_main_dir}/init.d/tarbitratord
+ ${csudo} cp ${script_dir}/init.d/tarbitratord.deb ${service_config_dir}/tarbitratord && ${csudo} chmod a+x ${service_config_dir}/tarbitratord
+ elif ((${os_type}==2)); then
+ ${csudo} cp -f ${script_dir}/init.d/tarbitratord.rpm ${install_main_dir}/init.d/tarbitratord
+ ${csudo} cp ${script_dir}/init.d/tarbitratord.rpm ${service_config_dir}/tarbitratord && ${csudo} chmod a+x ${service_config_dir}/tarbitratord
+ fi
+
+ if ((${initd_mod}==1)); then
+ ${csudo} chkconfig --add tarbitratord || :
+ ${csudo} chkconfig --level 2345 tarbitratord on || :
+ elif ((${initd_mod}==2)); then
+ ${csudo} insserv tarbitratord || :
+ ${csudo} insserv -d tarbitratord || :
+ elif ((${initd_mod}==3)); then
+ ${csudo} update-rc.d tarbitratord defaults || :
+ fi
+}
+
+function clean_service_on_systemd() {
+ tarbitratord_service_config="${service_config_dir}/tarbitratord.service"
+ if systemctl is-active --quiet tarbitratord; then
+ echo "tarbitrator is running, stopping it..."
+ ${csudo} systemctl stop tarbitratord &> /dev/null || echo &> /dev/null
+ fi
+ ${csudo} systemctl disable tarbitratord &> /dev/null || echo &> /dev/null
+
+ ${csudo} rm -f ${tarbitratord_service_config}
+}
+
+function install_service_on_systemd() {
+ clean_service_on_systemd
+
+ tarbitratord_service_config="${service_config_dir}/tarbitratord.service"
+
+ ${csudo} bash -c "echo '[Unit]' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'Description=KingHistorian arbitrator service' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'After=network-online.target' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'Wants=network-online.target' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo '[Service]' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'Type=simple' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'ExecStart=/usr/bin/tarbitrator' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'TimeoutStopSec=1000000s' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'LimitNOFILE=infinity' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'LimitNPROC=infinity' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'LimitCORE=infinity' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'TimeoutStartSec=0' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'StandardOutput=null' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'Restart=always' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'StartLimitBurst=3' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'StartLimitInterval=60s' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo '[Install]' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'WantedBy=multi-user.target' >> ${tarbitratord_service_config}"
+ ${csudo} systemctl enable tarbitratord
+}
+
+function install_service() {
+ if ((${service_mod}==0)); then
+ install_service_on_systemd
+ elif ((${service_mod}==1)); then
+ install_service_on_sysvinit
+ else
+ kill_tarbitrator
+ fi
+}
+
+function update() {
+ # Start to update
+ echo -e "${GREEN}Start to update KingHistorian's arbitrator ...${NC}"
+ # Stop the service if running
+ if pidof tarbitrator &> /dev/null; then
+ if ((${service_mod}==0)); then
+ ${csudo} systemctl stop tarbitratord || :
+ elif ((${service_mod}==1)); then
+ ${csudo} service tarbitratord stop || :
+ else
+ kill_tarbitrator
+ fi
+ sleep 1
+ fi
+
+ install_main_path
+ #install_header
+ install_bin
+ install_service
+
+ echo
+ if ((${service_mod}==0)); then
+ echo -e "${GREEN_DARK}To start arbitrator ${NC}: ${csudo} systemctl start tarbitratord${NC}"
+ elif ((${service_mod}==1)); then
+ echo -e "${GREEN_DARK}To start arbitrator ${NC}: ${csudo} service tarbitratord start${NC}"
+ else
+ echo -e "${GREEN_DARK}To start arbitrator ${NC}: ./tarbitrator${NC}"
+ fi
+ echo
+ echo -e "\033[44;32;1mKingHistorian's arbitrator is updated successfully!${NC}"
+}
+
+function install() {
+ # Start to install
+ echo -e "${GREEN}Start to install KingHistorian's arbitrator ...${NC}"
+
+ install_main_path
+ #install_header
+ install_bin
+ install_service
+ echo
+ if ((${service_mod}==0)); then
+ echo -e "${GREEN_DARK}To start arbitrator ${NC}: ${csudo} systemctl start tarbitratord${NC}"
+ elif ((${service_mod}==1)); then
+ echo -e "${GREEN_DARK}To start arbitrator ${NC}: ${csudo} service tarbitratord start${NC}"
+ else
+ echo -e "${GREEN_DARK}To start arbitrator ${NC}: tarbitrator${NC}"
+ fi
+
+ echo -e "\033[44;32;1mKingHistorian's arbitrator is installed successfully!${NC}"
+ echo
+}
+
+
+## ==============================Main program starts from here============================
+# Install server and client
+if [ -x ${bin_dir}/tarbitrator ]; then
+ update_flag=1
+ update
+else
+ install
+fi
+
diff --git a/packaging/tools/install_arbi_power.sh b/packaging/tools/install_arbi_power.sh
index 883db2b7169d125309125887cb72279c92c4602a..755684d1bdb1ea02781518e9a78ccf1d881fb271 100755
--- a/packaging/tools/install_arbi_power.sh
+++ b/packaging/tools/install_arbi_power.sh
@@ -160,7 +160,7 @@ function install_jemalloc() {
fi
if [ -d /etc/ld.so.conf.d ]; then
- ${csudo} echo "/usr/local/lib" > /etc/ld.so.conf.d/jemalloc.conf
+ echo "/usr/local/lib" | ${csudo} tee /etc/ld.so.conf.d/jemalloc.conf > /dev/null || echo -e "failed to write /etc/ld.so.conf.d/jemalloc.conf"
${csudo} ldconfig
else
echo "/etc/ld.so.conf.d not found!"
@@ -169,16 +169,14 @@ function install_jemalloc() {
}
function install_header() {
- ${csudo} rm -f ${inc_link_dir}/taos.h ${inc_link_dir}/taoserror.h || :
+ ${csudo} rm -f ${inc_link_dir}/taos.h ${inc_link_dir}/taosdef.h ${inc_link_dir}/taoserror.h || :
${csudo} cp -f ${script_dir}/inc/* ${install_main_dir}/include && ${csudo} chmod 644 ${install_main_dir}/include/*
${csudo} ln -s ${install_main_dir}/include/taos.h ${inc_link_dir}/taos.h
+ ${csudo} ln -s ${install_main_dir}/include/taosdef.h ${inc_link_dir}/taosdef.h
${csudo} ln -s ${install_main_dir}/include/taoserror.h ${inc_link_dir}/taoserror.h
}
function clean_service_on_sysvinit() {
- #restart_config_str="taos:2345:respawn:${service_config_dir}/taosd start"
- #${csudo} sed -i "\|${restart_config_str}|d" /etc/inittab || :
-
if pidof tarbitrator &> /dev/null; then
${csudo} service tarbitratord stop || :
fi
@@ -279,7 +277,6 @@ function install_service() {
elif ((${service_mod}==1)); then
install_service_on_sysvinit
else
- # must manual stop taosd
kill_tarbitrator
fi
}
@@ -306,7 +303,6 @@ function update_PowerDB() {
install_jemalloc
echo
- #echo -e "${GREEN_DARK}To configure PowerDB ${NC}: edit /etc/taos/taos.cfg"
if ((${service_mod}==0)); then
echo -e "${GREEN_DARK}To start arbitrator ${NC}: ${csudo} systemctl start tarbitratord${NC}"
elif ((${service_mod}==1)); then
@@ -329,7 +325,6 @@ function install_PowerDB() {
install_jemalloc
echo
- #echo -e "${GREEN_DARK}To configure PowerDB ${NC}: edit /etc/taos/taos.cfg"
if ((${service_mod}==0)); then
echo -e "${GREEN_DARK}To start arbitrator ${NC}: ${csudo} systemctl start tarbitratord${NC}"
elif ((${service_mod}==1)); then
diff --git a/packaging/tools/install_arbi_pro.sh b/packaging/tools/install_arbi_pro.sh
index 11165dbdd8bdf6afb4659250499cf1d9184c2395..3e80ad8215e3ec709c17a6a46e34f5bd6cf7ac6c 100755
--- a/packaging/tools/install_arbi_pro.sh
+++ b/packaging/tools/install_arbi_pro.sh
@@ -116,16 +116,14 @@ function install_bin() {
}
function install_header() {
- ${csudo} rm -f ${inc_link_dir}/taos.h ${inc_link_dir}/taoserror.h || :
+ ${csudo} rm -f ${inc_link_dir}/taos.h ${inc_link_dir}/taosdef.h ${inc_link_dir}/taoserror.h || :
${csudo} cp -f ${script_dir}/inc/* ${install_main_dir}/include && ${csudo} chmod 644 ${install_main_dir}/include/*
${csudo} ln -s ${install_main_dir}/include/taos.h ${inc_link_dir}/taos.h
+ ${csudo} ln -s ${install_main_dir}/include/taosdef.h ${inc_link_dir}/taosdef.h
${csudo} ln -s ${install_main_dir}/include/taoserror.h ${inc_link_dir}/taoserror.h
}
function clean_service_on_sysvinit() {
- #restart_config_str="taos:2345:respawn:${service_config_dir}/taosd start"
- #${csudo} sed -i "\|${restart_config_str}|d" /etc/inittab || :
-
if pidof tarbitrator &> /dev/null; then
${csudo} service tarbitratord stop || :
fi
@@ -221,7 +219,6 @@ function install_service() {
elif ((${service_mod}==1)); then
install_service_on_sysvinit
else
- # must manual stop taosd
kill_tarbitrator
fi
}
@@ -247,7 +244,6 @@ function update_prodb() {
install_service
echo
- #echo -e "${GREEN_DARK}To configure ProDB ${NC}: edit /etc/taos/taos.cfg"
if ((${service_mod}==0)); then
echo -e "${GREEN_DARK}To start arbitrator ${NC}: ${csudo} systemctl start tarbitratord${NC}"
elif ((${service_mod}==1)); then
@@ -268,7 +264,6 @@ function install_prodb() {
install_bin
install_service
echo
- #echo -e "${GREEN_DARK}To configure ProDB ${NC}: edit /etc/taos/taos.cfg"
if ((${service_mod}==0)); then
echo -e "${GREEN_DARK}To start arbitrator ${NC}: ${csudo} systemctl start tarbitratord${NC}"
elif ((${service_mod}==1)); then
diff --git a/packaging/tools/install_arbi_tq.sh b/packaging/tools/install_arbi_tq.sh
index bd852dd0ad2c9114f2424193adccf56b0cb40412..8757326605b05aca63a585d1c3e1c66c98f6aaa7 100755
--- a/packaging/tools/install_arbi_tq.sh
+++ b/packaging/tools/install_arbi_tq.sh
@@ -116,16 +116,14 @@ function install_bin() {
}
function install_header() {
- ${csudo} rm -f ${inc_link_dir}/taos.h ${inc_link_dir}/taoserror.h || :
- ${csudo} cp -f ${script_dir}/inc/* ${install_main_dir}/include && ${csudo} chmod 644 ${install_main_dir}/include/*
+ ${csudo} rm -f ${inc_link_dir}/taos.h ${inc_link_dir}/taosdef.h ${inc_link_dir}/taoserror.h || :
+ ${csudo} cp -f ${script_dir}/inc/* ${install_main_dir}/include && ${csudo} chmod 644 ${install_main_dir}/include/*
${csudo} ln -s ${install_main_dir}/include/taos.h ${inc_link_dir}/taos.h
+ ${csudo} ln -s ${install_main_dir}/include/taosdef.h ${inc_link_dir}/taosdef.h
${csudo} ln -s ${install_main_dir}/include/taoserror.h ${inc_link_dir}/taoserror.h
}
function clean_service_on_sysvinit() {
- #restart_config_str="taos:2345:respawn:${service_config_dir}/taosd start"
- #${csudo} sed -i "\|${restart_config_str}|d" /etc/inittab || :
-
if pidof tarbitrator &> /dev/null; then
${csudo} service tarbitratord stop || :
fi
@@ -226,7 +224,6 @@ function install_service() {
elif ((${service_mod}==1)); then
install_service_on_sysvinit
else
- # must manual stop taosd
kill_tarbitrator
fi
}
@@ -252,7 +249,6 @@ function update_tq() {
install_service
echo
- #echo -e "${GREEN_DARK}To configure TQ ${NC}: edit /etc/taos/taos.cfg"
if ((${service_mod}==0)); then
echo -e "${GREEN_DARK}To start arbitrator ${NC}: ${csudo} systemctl start tarbitratord${NC}"
elif ((${service_mod}==1)); then
@@ -273,7 +269,6 @@ function install_tq() {
install_bin
install_service
echo
- #echo -e "${GREEN_DARK}To configure TQ ${NC}: edit /etc/taos/taos.cfg"
if ((${service_mod}==0)); then
echo -e "${GREEN_DARK}To start arbitrator ${NC}: ${csudo} systemctl start tarbitratord${NC}"
elif ((${service_mod}==1)); then
diff --git a/packaging/tools/install_client.sh b/packaging/tools/install_client.sh
index 3df7013b197baaf4d78bb0f0ae5d507d6be92715..a73f6ac9718064855a245c0505e179b6376d7c96 100755
--- a/packaging/tools/install_client.sh
+++ b/packaging/tools/install_client.sh
@@ -86,7 +86,6 @@ function install_bin() {
${csudo} rm -f ${bin_link_dir}/taos || :
if [ "$osType" != "Darwin" ]; then
${csudo} rm -f ${bin_link_dir}/taosdemo || :
- ${csudo} rm -f ${bin_link_dir}/taosdump || :
fi
${csudo} rm -f ${bin_link_dir}/rmtaos || :
${csudo} rm -f ${bin_link_dir}/set_core || :
@@ -97,7 +96,6 @@ function install_bin() {
[ -x ${install_main_dir}/bin/taos ] && ${csudo} ln -s ${install_main_dir}/bin/taos ${bin_link_dir}/taos || :
if [ "$osType" != "Darwin" ]; then
[ -x ${install_main_dir}/bin/taosdemo ] && ${csudo} ln -s ${install_main_dir}/bin/taosdemo ${bin_link_dir}/taosdemo || :
- [ -x ${install_main_dir}/bin/taosdump ] && ${csudo} ln -s ${install_main_dir}/bin/taosdump ${bin_link_dir}/taosdump || :
fi
[ -x ${install_main_dir}/bin/remove_client.sh ] && ${csudo} ln -s ${install_main_dir}/bin/remove_client.sh ${bin_link_dir}/rmtaos || :
[ -x ${install_main_dir}/bin/set_core.sh ] && ${csudo} ln -s ${install_main_dir}/bin/set_core.sh ${bin_link_dir}/set_core || :
@@ -128,7 +126,7 @@ function install_lib() {
${csudo} ln -s ${install_main_dir}/driver/libtaos.* ${lib_link_dir}/libtaos.1.dylib
${csudo} ln -s ${lib_link_dir}/libtaos.1.dylib ${lib_link_dir}/libtaos.dylib
fi
-
+
if [ "$osType" != "Darwin" ]; then
${csudo} ldconfig
else
@@ -137,9 +135,10 @@ function install_lib() {
}
function install_header() {
- ${csudo} rm -f ${inc_link_dir}/taos.h ${inc_link_dir}/taoserror.h || :
+ ${csudo} rm -f ${inc_link_dir}/taos.h ${inc_link_dir}/taosdef.h ${inc_link_dir}/taoserror.h || :
${csudo} cp -f ${script_dir}/inc/* ${install_main_dir}/include && ${csudo} chmod 644 ${install_main_dir}/include/*
${csudo} ln -s ${install_main_dir}/include/taos.h ${inc_link_dir}/taos.h
+ ${csudo} ln -s ${install_main_dir}/include/taosdef.h ${inc_link_dir}/taosdef.h
${csudo} ln -s ${install_main_dir}/include/taoserror.h ${inc_link_dir}/taoserror.h
}
@@ -188,7 +187,7 @@ function install_jemalloc() {
fi
if [ -d /etc/ld.so.conf.d ]; then
- ${csudo} echo "/usr/local/lib" > /etc/ld.so.conf.d/jemalloc.conf
+ echo "/usr/local/lib" | ${csudo} tee /etc/ld.so.conf.d/jemalloc.conf > /dev/null || echo -e "failed to write /etc/ld.so.conf.d/jemalloc.conf"
${csudo} ldconfig
else
echo "/etc/ld.so.conf.d not found!"
diff --git a/packaging/tools/install_client_jh.sh b/packaging/tools/install_client_jh.sh
new file mode 100755
index 0000000000000000000000000000000000000000..cccf0a97be053796099d5b5f4a2c3db018c24955
--- /dev/null
+++ b/packaging/tools/install_client_jh.sh
@@ -0,0 +1,245 @@
+#!/bin/bash
+#
+# This file is used to install jh_taos client on linux systems. The operating system
+# is required to use systemd to manage services at boot
+
+set -e
+#set -x
+
+# -----------------------Variables definition---------------------
+
+osType=Linux
+pagMode=full
+
+if [ "$osType" != "Darwin" ]; then
+ script_dir=$(dirname $(readlink -f "$0"))
+ # Dynamic directory
+ data_dir="/var/lib/jh_taos"
+ log_dir="/var/log/jh_taos"
+else
+ script_dir=`dirname $0`
+ cd ${script_dir}
+ script_dir="$(pwd)"
+ data_dir="/var/lib/jh_taos"
+ log_dir="~/jh_taos/log"
+fi
+
+log_link_dir="/usr/local/jh_taos/log"
+
+cfg_install_dir="/etc/jh_taos"
+
+if [ "$osType" != "Darwin" ]; then
+ bin_link_dir="/usr/bin"
+ lib_link_dir="/usr/lib"
+ lib64_link_dir="/usr/lib64"
+ inc_link_dir="/usr/include"
+else
+ bin_link_dir="/usr/local/bin"
+ lib_link_dir="/usr/local/lib"
+ inc_link_dir="/usr/local/include"
+fi
+
+#install main path
+install_main_dir="/usr/local/jh_taos"
+
+# old bin dir
+bin_dir="/usr/local/jh_taos/bin"
+
+# Color setting
+RED='\033[0;31m'
+GREEN='\033[1;32m'
+GREEN_DARK='\033[0;32m'
+GREEN_UNDERLINE='\033[4;32m'
+NC='\033[0m'
+
+csudo=""
+if command -v sudo > /dev/null; then
+ csudo="sudo"
+fi
+
+update_flag=0
+
+function kill_client() {
+ pid=$(ps -ef | grep "jh_taos" | grep -v "grep" | awk '{print $2}')
+ if [ -n "$pid" ]; then
+ ${csudo} kill -9 $pid || :
+ fi
+}
+
+function install_main_path() {
+ #create install main dir and all sub dir
+ ${csudo} rm -rf ${install_main_dir} || :
+ ${csudo} mkdir -p ${install_main_dir}
+ ${csudo} mkdir -p ${install_main_dir}/cfg
+ ${csudo} mkdir -p ${install_main_dir}/bin
+ ${csudo} mkdir -p ${install_main_dir}/connector
+ ${csudo} mkdir -p ${install_main_dir}/driver
+ ${csudo} mkdir -p ${install_main_dir}/examples
+ ${csudo} mkdir -p ${install_main_dir}/include
+}
+
+function install_bin() {
+ # Remove links
+ ${csudo} rm -f ${bin_link_dir}/jh_taos || :
+ if [ "$osType" != "Darwin" ]; then
+ ${csudo} rm -f ${bin_link_dir}/jhdemo || :
+ ${csudo} rm -f ${bin_link_dir}/jh_taosdump || :
+ fi
+ ${csudo} rm -f ${bin_link_dir}/rmjh || :
+ ${csudo} rm -f ${bin_link_dir}/set_core || :
+
+ ${csudo} cp -r ${script_dir}/bin/* ${install_main_dir}/bin && ${csudo} chmod 0555 ${install_main_dir}/bin/*
+
+ #Make link
+ [ -x ${install_main_dir}/bin/jh_taos ] && ${csudo} ln -s ${install_main_dir}/bin/jh_taos ${bin_link_dir}/jh_taos || :
+ if [ "$osType" != "Darwin" ]; then
+ [ -x ${install_main_dir}/bin/jhdemo ] && ${csudo} ln -s ${install_main_dir}/bin/jhdemo ${bin_link_dir}/jhdemo || :
+ [ -x ${install_main_dir}/bin/jh_taosdump ] && ${csudo} ln -s ${install_main_dir}/bin/jh_taosdump ${bin_link_dir}/jh_taosdump || :
+ fi
+ [ -x ${install_main_dir}/bin/remove_client_jh.sh ] && ${csudo} ln -s ${install_main_dir}/bin/remove_client_jh.sh ${bin_link_dir}/rmjh || :
+ [ -x ${install_main_dir}/bin/set_core.sh ] && ${csudo} ln -s ${install_main_dir}/bin/set_core.sh ${bin_link_dir}/set_core || :
+}
+
+function clean_lib() {
+ sudo rm -f /usr/lib/libtaos.* || :
+ sudo rm -rf ${lib_dir} || :
+}
+
+function install_lib() {
+ # Remove links
+ ${csudo} rm -f ${lib_link_dir}/libtaos.* || :
+ ${csudo} rm -f ${lib64_link_dir}/libtaos.* || :
+
+ ${csudo} cp -rf ${script_dir}/driver/* ${install_main_dir}/driver && ${csudo} chmod 777 ${install_main_dir}/driver/*
+
+ if [ "$osType" != "Darwin" ]; then
+ ${csudo} ln -s ${install_main_dir}/driver/libtaos.* ${lib_link_dir}/libtaos.so.1
+ ${csudo} ln -s ${lib_link_dir}/libtaos.so.1 ${lib_link_dir}/libtaos.so
+
+ if [ -d "${lib64_link_dir}" ]; then
+ ${csudo} ln -s ${install_main_dir}/driver/libtaos.* ${lib64_link_dir}/libtaos.so.1 || :
+ ${csudo} ln -s ${lib64_link_dir}/libtaos.so.1 ${lib64_link_dir}/libtaos.so || :
+ fi
+ else
+ ${csudo} ln -s ${install_main_dir}/driver/libtaos.* ${lib_link_dir}/libtaos.1.dylib
+ ${csudo} ln -s ${lib_link_dir}/libtaos.1.dylib ${lib_link_dir}/libtaos.dylib
+ fi
+
+ ${csudo} ldconfig
+}
+
+function install_header() {
+ ${csudo} rm -f ${inc_link_dir}/taos.h ${inc_link_dir}/taoserror.h || :
+ ${csudo} cp -f ${script_dir}/inc/* ${install_main_dir}/include && ${csudo} chmod 644 ${install_main_dir}/include/*
+ ${csudo} ln -s ${install_main_dir}/include/taos.h ${inc_link_dir}/taos.h
+ ${csudo} ln -s ${install_main_dir}/include/taoserror.h ${inc_link_dir}/taoserror.h
+}
+
+function install_config() {
+ if [ ! -f ${cfg_install_dir}/taos.cfg ]; then
+ ${csudo} mkdir -p ${cfg_install_dir}
+ [ -f ${script_dir}/cfg/taos.cfg ] && ${csudo} cp ${script_dir}/cfg/taos.cfg ${cfg_install_dir}
+ ${csudo} chmod 644 ${cfg_install_dir}/*
+ fi
+
+ ${csudo} cp -f ${script_dir}/cfg/taos.cfg ${install_main_dir}/cfg/taos.cfg.org
+ ${csudo} ln -s ${cfg_install_dir}/taos.cfg ${install_main_dir}/cfg
+}
+
+
+function install_log() {
+ ${csudo} rm -rf ${log_dir} || :
+
+ if [ "$osType" != "Darwin" ]; then
+ ${csudo} mkdir -p ${log_dir} && ${csudo} chmod 777 ${log_dir}
+ else
+ mkdir -p ${log_dir} && ${csudo} chmod 777 ${log_dir}
+ fi
+ ${csudo} ln -s ${log_dir} ${install_main_dir}/log
+}
+
+function install_connector() {
+ ${csudo} cp -rf ${script_dir}/connector/* ${install_main_dir}/connector
+}
+
+function install_examples() {
+ if [ -d ${script_dir}/examples ]; then
+ ${csudo} cp -rf ${script_dir}/examples/* ${install_main_dir}/examples
+ fi
+}
+
+function update() {
+ # Start to update
+ if [ ! -e jh_taos.tar.gz ]; then
+ echo "File jh_taos.tar.gz does not exist"
+ exit 1
+ fi
+ tar -zxf jh_taos.tar.gz
+
+ echo -e "${GREEN}Start to update jh_iot client...${NC}"
+ # Stop the client shell if running
+ if pidof jh_taos &> /dev/null; then
+ kill_client
+ sleep 1
+ fi
+
+ install_main_path
+
+ install_log
+ install_header
+ install_lib
+ if [ "$pagMode" != "lite" ]; then
+ install_connector
+ fi
+ install_examples
+ install_bin
+ install_config
+
+ echo
+ echo -e "\033[44;32;1mjh_iot client is updated successfully!${NC}"
+
+ rm -rf $(tar -tf jh_taos.tar.gz)
+}
+
+function install() {
+ # Start to install
+ if [ ! -e jh_taos.tar.gz ]; then
+ echo "File jh_taos.tar.gz does not exist"
+ exit 1
+ fi
+ tar -zxf jh_taos.tar.gz
+
+ echo -e "${GREEN}Start to install jh_taos client...${NC}"
+
+ install_main_path
+ install_log
+ install_header
+ install_lib
+ if [ "$pagMode" != "lite" ]; then
+ install_connector
+ fi
+ install_examples
+ install_bin
+ install_config
+
+ echo
+ echo -e "\033[44;32;1mjh_iot client is installed successfully!${NC}"
+
+ rm -rf $(tar -tf jh_taos.tar.gz)
+}
+
+
+## ==============================Main program starts from here============================
+# Install or updata client and client
+# if server is already install, don't install client
+ if [ -e ${bin_dir}/jh_taosd ]; then
+ echo -e "\033[44;32;1mThere are already installed jh_iot server, so don't need install client!${NC}"
+ exit 0
+ fi
+
+ if [ -x ${bin_dir}/jh_taos ]; then
+ update_flag=1
+ update
+ else
+ install
+ fi
diff --git a/packaging/tools/install_client_kh.sh b/packaging/tools/install_client_kh.sh
new file mode 100755
index 0000000000000000000000000000000000000000..210fd27fb6978527d76f0c915d0293370b93cf3e
--- /dev/null
+++ b/packaging/tools/install_client_kh.sh
@@ -0,0 +1,246 @@
+#!/bin/bash
+#
+# This file is used to install kinghistorian client on linux systems. The operating system
+# is required to use systemd to manage services at boot
+
+set -e
+#set -x
+
+# -----------------------Variables definition---------------------
+
+osType=Linux
+pagMode=full
+
+if [ "$osType" != "Darwin" ]; then
+ script_dir=$(dirname $(readlink -f "$0"))
+ # Dynamic directory
+ data_dir="/var/lib/kinghistorian"
+ log_dir="/var/log/kinghistorian"
+else
+ script_dir=`dirname $0`
+ cd ${script_dir}
+ script_dir="$(pwd)"
+ data_dir="/var/lib/kinghistorian"
+ log_dir="~/kinghistorian/log"
+fi
+
+log_link_dir="/usr/local/kinghistorian/log"
+
+cfg_install_dir="/etc/kinghistorian"
+
+if [ "$osType" != "Darwin" ]; then
+ bin_link_dir="/usr/bin"
+ lib_link_dir="/usr/lib"
+ lib64_link_dir="/usr/lib64"
+ inc_link_dir="/usr/include"
+else
+ bin_link_dir="/usr/local/bin"
+ lib_link_dir="/usr/local/lib"
+ inc_link_dir="/usr/local/include"
+fi
+
+#install main path
+install_main_dir="/usr/local/kinghistorian"
+
+# old bin dir
+bin_dir="/usr/local/kinghistorian/bin"
+
+# Color setting
+RED='\033[0;31m'
+GREEN='\033[1;32m'
+GREEN_DARK='\033[0;32m'
+GREEN_UNDERLINE='\033[4;32m'
+NC='\033[0m'
+
+csudo=""
+if command -v sudo > /dev/null; then
+ csudo="sudo"
+fi
+
+update_flag=0
+
+function kill_client() {
+ pid=$(ps -ef | grep "khclient" | grep -v "grep" | awk '{print $2}')
+ if [ -n "$pid" ]; then
+ ${csudo} kill -9 $pid || :
+ fi
+}
+
+function install_main_path() {
+ #create install main dir and all sub dir
+ ${csudo} rm -rf ${install_main_dir} || :
+ ${csudo} mkdir -p ${install_main_dir}
+ ${csudo} mkdir -p ${install_main_dir}/cfg
+ ${csudo} mkdir -p ${install_main_dir}/bin
+ ${csudo} mkdir -p ${install_main_dir}/connector
+ ${csudo} mkdir -p ${install_main_dir}/driver
+ ${csudo} mkdir -p ${install_main_dir}/examples
+ ${csudo} mkdir -p ${install_main_dir}/include
+}
+
+function install_bin() {
+ # Remove links
+ ${csudo} rm -f ${bin_link_dir}/khclient || :
+ if [ "$osType" != "Darwin" ]; then
+ ${csudo} rm -f ${bin_link_dir}/khdemo || :
+ ${csudo} rm -f ${bin_link_dir}/khdump || :
+ fi
+ ${csudo} rm -f ${bin_link_dir}/rmkh || :
+ ${csudo} rm -f ${bin_link_dir}/set_core || :
+
+ ${csudo} cp -r ${script_dir}/bin/* ${install_main_dir}/bin && ${csudo} chmod 0555 ${install_main_dir}/bin/*
+
+ #Make link
+ [ -x ${install_main_dir}/bin/khclient ] && ${csudo} ln -s ${install_main_dir}/bin/khclient ${bin_link_dir}/khclient || :
+ if [ "$osType" != "Darwin" ]; then
+ [ -x ${install_main_dir}/bin/khdemo ] && ${csudo} ln -s ${install_main_dir}/bin/khdemo ${bin_link_dir}/khdemo || :
+ [ -x ${install_main_dir}/bin/khdump ] && ${csudo} ln -s ${install_main_dir}/bin/khdump ${bin_link_dir}/khdump || :
+ fi
+ [ -x ${install_main_dir}/bin/remove_client_kh.sh ] && ${csudo} ln -s ${install_main_dir}/bin/remove_client_kh.sh ${bin_link_dir}/rmkh || :
+ [ -x ${install_main_dir}/bin/set_core.sh ] && ${csudo} ln -s ${install_main_dir}/bin/set_core.sh ${bin_link_dir}/set_core || :
+}
+
+function clean_lib() {
+ sudo rm -f /usr/lib/libtaos.* || :
+ sudo rm -rf ${lib_dir} || :
+}
+
+function install_lib() {
+ # Remove links
+ ${csudo} rm -f ${lib_link_dir}/libtaos.* || :
+ ${csudo} rm -f ${lib64_link_dir}/libtaos.* || :
+ #${csudo} rm -rf ${v15_java_app_dir} || :
+
+ ${csudo} cp -rf ${script_dir}/driver/* ${install_main_dir}/driver && ${csudo} chmod 777 ${install_main_dir}/driver/*
+
+ if [ "$osType" != "Darwin" ]; then
+ ${csudo} ln -s ${install_main_dir}/driver/libtaos.* ${lib_link_dir}/libtaos.so.1
+ ${csudo} ln -s ${lib_link_dir}/libtaos.so.1 ${lib_link_dir}/libtaos.so
+
+ if [ -d "${lib64_link_dir}" ]; then
+ ${csudo} ln -s ${install_main_dir}/driver/libtaos.* ${lib64_link_dir}/libtaos.so.1 || :
+ ${csudo} ln -s ${lib64_link_dir}/libtaos.so.1 ${lib64_link_dir}/libtaos.so || :
+ fi
+ else
+ ${csudo} ln -s ${install_main_dir}/driver/libtaos.* ${lib_link_dir}/libtaos.1.dylib
+ ${csudo} ln -s ${lib_link_dir}/libtaos.1.dylib ${lib_link_dir}/libtaos.dylib
+ fi
+
+ ${csudo} ldconfig
+}
+
+function install_header() {
+ ${csudo} rm -f ${inc_link_dir}/taos.h ${inc_link_dir}/taoserror.h || :
+ ${csudo} cp -f ${script_dir}/inc/* ${install_main_dir}/include && ${csudo} chmod 644 ${install_main_dir}/include/*
+ ${csudo} ln -s ${install_main_dir}/include/taos.h ${inc_link_dir}/taos.h
+ ${csudo} ln -s ${install_main_dir}/include/taoserror.h ${inc_link_dir}/taoserror.h
+}
+
+function install_config() {
+ if [ ! -f ${cfg_install_dir}/kinghistorian.cfg ]; then
+ ${csudo} mkdir -p ${cfg_install_dir}
+ [ -f ${script_dir}/cfg/kinghistorian.cfg ] && ${csudo} cp ${script_dir}/cfg/kinghistorian.cfg ${cfg_install_dir}
+ ${csudo} chmod 644 ${cfg_install_dir}/*
+ fi
+
+ ${csudo} cp -f ${script_dir}/cfg/kinghistorian.cfg ${install_main_dir}/cfg/kinghistorian.cfg.org
+ ${csudo} ln -s ${cfg_install_dir}/kinghistorian.cfg ${install_main_dir}/cfg
+}
+
+
+function install_log() {
+ ${csudo} rm -rf ${log_dir} || :
+
+ if [ "$osType" != "Darwin" ]; then
+ ${csudo} mkdir -p ${log_dir} && ${csudo} chmod 777 ${log_dir}
+ else
+ mkdir -p ${log_dir} && ${csudo} chmod 777 ${log_dir}
+ fi
+ ${csudo} ln -s ${log_dir} ${install_main_dir}/log
+}
+
+function install_connector() {
+ ${csudo} cp -rf ${script_dir}/connector/* ${install_main_dir}/connector
+}
+
+function install_examples() {
+ if [ -d ${script_dir}/examples ]; then
+ ${csudo} cp -rf ${script_dir}/examples/* ${install_main_dir}/examples
+ fi
+}
+
+function update() {
+ # Start to update
+ if [ ! -e kinghistorian.tar.gz ]; then
+ echo "File kinghistorian.tar.gz does not exist"
+ exit 1
+ fi
+ tar -zxf kinghistorian.tar.gz
+
+ echo -e "${GREEN}Start to update KingHistorian client...${NC}"
+ # Stop the client shell if running
+ if pidof khclient &> /dev/null; then
+ kill_client
+ sleep 1
+ fi
+
+ install_main_path
+
+ install_log
+ install_header
+ install_lib
+ if [ "$pagMode" != "lite" ]; then
+ install_connector
+ fi
+ install_examples
+ install_bin
+ install_config
+
+ echo
+ echo -e "\033[44;32;1mKingHistorian client is updated successfully!${NC}"
+
+ rm -rf $(tar -tf kinghistorian.tar.gz)
+}
+
+function install() {
+ # Start to install
+ if [ ! -e kinghistorian.tar.gz ]; then
+ echo "File kinghistorian.tar.gz does not exist"
+ exit 1
+ fi
+ tar -zxf kinghistorian.tar.gz
+
+ echo -e "${GREEN}Start to install KingHistorian client...${NC}"
+
+ install_main_path
+ install_log
+ install_header
+ install_lib
+ if [ "$pagMode" != "lite" ]; then
+ install_connector
+ fi
+ install_examples
+ install_bin
+ install_config
+
+ echo
+ echo -e "\033[44;32;1mKingHistorian client is installed successfully!${NC}"
+
+ rm -rf $(tar -tf kinghistorian.tar.gz)
+}
+
+
+## ==============================Main program starts from here============================
+# Install or updata client and client
+# if server is already install, don't install client
+ if [ -e ${bin_dir}/khserver ]; then
+ echo -e "\033[44;32;1mThere are already installed KingHistorian server, so don't need install client!${NC}"
+ exit 0
+ fi
+
+ if [ -x ${bin_dir}/khclient ]; then
+ update_flag=1
+ update
+ else
+ install
+ fi
diff --git a/packaging/tools/install_client_power.sh b/packaging/tools/install_client_power.sh
index 31da0d61319045800fe3a454d071118aa3a4768e..f96b0134dc34f61425410360d0b0f935da7b39e5 100755
--- a/packaging/tools/install_client_power.sh
+++ b/packaging/tools/install_client_power.sh
@@ -133,9 +133,10 @@ function install_lib() {
}
function install_header() {
- ${csudo} rm -f ${inc_link_dir}/taos.h ${inc_link_dir}/taoserror.h || :
+ ${csudo} rm -f ${inc_link_dir}/taos.h ${inc_link_dir}/taosdef.h ${inc_link_dir}/taoserror.h || :
${csudo} cp -f ${script_dir}/inc/* ${install_main_dir}/include && ${csudo} chmod 644 ${install_main_dir}/include/*
${csudo} ln -s ${install_main_dir}/include/taos.h ${inc_link_dir}/taos.h
+ ${csudo} ln -s ${install_main_dir}/include/taosdef.h ${inc_link_dir}/taosdef.h
${csudo} ln -s ${install_main_dir}/include/taoserror.h ${inc_link_dir}/taoserror.h
}
@@ -184,7 +185,7 @@ function install_jemalloc() {
fi
if [ -d /etc/ld.so.conf.d ]; then
- ${csudo} echo "/usr/local/lib" > /etc/ld.so.conf.d/jemalloc.conf
+ echo "/usr/local/lib" | ${csudo} tee /etc/ld.so.conf.d/jemalloc.conf > /dev/null || echo -e "failed to write /etc/ld.so.conf.d/jemalloc.conf"
${csudo} ldconfig
else
echo "/etc/ld.so.conf.d not found!"
@@ -193,16 +194,14 @@ function install_jemalloc() {
}
function install_config() {
- #${csudo} rm -f ${install_main_dir}/cfg/taos.cfg || :
-
- if [ ! -f ${cfg_install_dir}/taos.cfg ]; then
+ if [ ! -f ${cfg_install_dir}/power.cfg ]; then
${csudo} mkdir -p ${cfg_install_dir}
- [ -f ${script_dir}/cfg/taos.cfg ] && ${csudo} cp ${script_dir}/cfg/taos.cfg ${cfg_install_dir}
+ [ -f ${script_dir}/cfg/power.cfg ] && ${csudo} cp ${script_dir}/cfg/power.cfg ${cfg_install_dir}
${csudo} chmod 644 ${cfg_install_dir}/*
fi
- ${csudo} cp -f ${script_dir}/cfg/taos.cfg ${install_main_dir}/cfg/taos.cfg.org
- ${csudo} ln -s ${cfg_install_dir}/taos.cfg ${install_main_dir}/cfg
+ ${csudo} cp -f ${script_dir}/cfg/power.cfg ${install_main_dir}/cfg/power.cfg.org
+ ${csudo} ln -s ${cfg_install_dir}/power.cfg ${install_main_dir}/cfg
}
diff --git a/packaging/tools/install_client_pro.sh b/packaging/tools/install_client_pro.sh
index fff8ae31200669ee3ab918a873e33fc32ece37c8..c21f9d2e6aa685096eb55dcc03924bf453906b8f 100755
--- a/packaging/tools/install_client_pro.sh
+++ b/packaging/tools/install_client_pro.sh
@@ -109,7 +109,6 @@ function install_lib() {
# Remove links
${csudo} rm -f ${lib_link_dir}/libtaos.* || :
${csudo} rm -f ${lib64_link_dir}/libtaos.* || :
- #${csudo} rm -rf ${v15_java_app_dir} || :
${csudo} cp -rf ${script_dir}/driver/* ${install_main_dir}/driver && ${csudo} chmod 777 ${install_main_dir}/driver/*
@@ -130,23 +129,22 @@ function install_lib() {
}
function install_header() {
- ${csudo} rm -f ${inc_link_dir}/taos.h ${inc_link_dir}/taoserror.h || :
+ ${csudo} rm -f ${inc_link_dir}/taos.h ${inc_link_dir}/taosdef.h ${inc_link_dir}/taoserror.h || :
${csudo} cp -f ${script_dir}/inc/* ${install_main_dir}/include && ${csudo} chmod 644 ${install_main_dir}/include/*
${csudo} ln -s ${install_main_dir}/include/taos.h ${inc_link_dir}/taos.h
+ ${csudo} ln -s ${install_main_dir}/include/taosdef.h ${inc_link_dir}/taosdef.h
${csudo} ln -s ${install_main_dir}/include/taoserror.h ${inc_link_dir}/taoserror.h
}
function install_config() {
- #${csudo} rm -f ${install_main_dir}/cfg/taos.cfg || :
-
- if [ ! -f ${cfg_install_dir}/taos.cfg ]; then
+ if [ ! -f ${cfg_install_dir}/prodb.cfg ]; then
${csudo} mkdir -p ${cfg_install_dir}
- [ -f ${script_dir}/cfg/taos.cfg ] && ${csudo} cp ${script_dir}/cfg/taos.cfg ${cfg_install_dir}
+ [ -f ${script_dir}/cfg/prodb.cfg ] && ${csudo} cp ${script_dir}/cfg/prodb.cfg ${cfg_install_dir}
${csudo} chmod 644 ${cfg_install_dir}/*
fi
- ${csudo} cp -f ${script_dir}/cfg/taos.cfg ${install_main_dir}/cfg/taos.cfg.org
- ${csudo} ln -s ${cfg_install_dir}/taos.cfg ${install_main_dir}/cfg
+ ${csudo} cp -f ${script_dir}/cfg/prodb.cfg ${install_main_dir}/cfg/prodb.cfg.org
+ ${csudo} ln -s ${cfg_install_dir}/prodb.cfg ${install_main_dir}/cfg
}
@@ -235,14 +233,14 @@ function install_prodb() {
## ==============================Main program starts from here============================
# Install or updata client and client
# if server is already install, don't install client
- if [ -e ${bin_dir}/prodbs ]; then
- echo -e "\033[44;32;1mThere are already installed ProDB server, so don't need install client!${NC}"
- exit 0
- fi
+if [ -e ${bin_dir}/prodbs ]; then
+ echo -e "\033[44;32;1mThere are already installed ProDB server, so don't need install client!${NC}"
+ exit 0
+fi
- if [ -x ${bin_dir}/prodbc ]; then
- update_flag=1
- update_prodb
- else
- install_prodb
- fi
+if [ -x ${bin_dir}/prodbc ]; then
+ update_flag=1
+ update_prodb
+else
+ install_prodb
+fi
diff --git a/packaging/tools/install_client_tq.sh b/packaging/tools/install_client_tq.sh
index 2537442ee264e9aeb4eb6b3d25a17faf60f4df9a..31a75f4fe74fbb4c68942f226602fa2117e1a01c 100755
--- a/packaging/tools/install_client_tq.sh
+++ b/packaging/tools/install_client_tq.sh
@@ -133,23 +133,22 @@ function install_lib() {
}
function install_header() {
- ${csudo} rm -f ${inc_link_dir}/taos.h ${inc_link_dir}/taoserror.h || :
+ ${csudo} rm -f ${inc_link_dir}/taos.h ${inc_link_dir}/taosdef.h ${inc_link_dir}/taoserror.h || :
${csudo} cp -f ${script_dir}/inc/* ${install_main_dir}/include && ${csudo} chmod 644 ${install_main_dir}/include/*
${csudo} ln -s ${install_main_dir}/include/taos.h ${inc_link_dir}/taos.h
+ ${csudo} ln -s ${install_main_dir}/include/taosdef.h ${inc_link_dir}/taosdef.h
${csudo} ln -s ${install_main_dir}/include/taoserror.h ${inc_link_dir}/taoserror.h
}
function install_config() {
- #${csudo} rm -f ${install_main_dir}/cfg/taos.cfg || :
-
- if [ ! -f ${cfg_install_dir}/taos.cfg ]; then
+ if [ ! -f ${cfg_install_dir}/tq.cfg ]; then
${csudo} mkdir -p ${cfg_install_dir}
- [ -f ${script_dir}/cfg/taos.cfg ] && ${csudo} cp ${script_dir}/cfg/taos.cfg ${cfg_install_dir}
+ [ -f ${script_dir}/cfg/tq.cfg ] && ${csudo} cp ${script_dir}/cfg/tq.cfg ${cfg_install_dir}
${csudo} chmod 644 ${cfg_install_dir}/*
fi
- ${csudo} cp -f ${script_dir}/cfg/taos.cfg ${install_main_dir}/cfg/taos.cfg.org
- ${csudo} ln -s ${cfg_install_dir}/taos.cfg ${install_main_dir}/cfg
+ ${csudo} cp -f ${script_dir}/cfg/tq.cfg ${install_main_dir}/cfg/tq.cfg.org
+ ${csudo} ln -s ${cfg_install_dir}/tq.cfg ${install_main_dir}/cfg
}
diff --git a/packaging/tools/install_jh.sh b/packaging/tools/install_jh.sh
new file mode 100755
index 0000000000000000000000000000000000000000..c97a882b37388b3fb364f272140974571291d940
--- /dev/null
+++ b/packaging/tools/install_jh.sh
@@ -0,0 +1,948 @@
+#!/bin/bash
+#
+# This file is used to install database on linux systems. The operating system
+# is required to use systemd to manage services at boot
+
+set -e
+#set -x
+
+verMode=edge
+pagMode=full
+
+iplist=""
+serverFqdn=""
+# -----------------------Variables definition---------------------
+script_dir=$(dirname $(readlink -f "$0"))
+# Dynamic directory
+data_dir="/var/lib/jh_taos"
+log_dir="/var/log/jh_taos"
+
+data_link_dir="/usr/local/jh_taos/data"
+log_link_dir="/usr/local/jh_taos/log"
+
+cfg_install_dir="/etc/jh_taos"
+
+bin_link_dir="/usr/bin"
+lib_link_dir="/usr/lib"
+lib64_link_dir="/usr/lib64"
+inc_link_dir="/usr/include"
+
+#install main path
+install_main_dir="/usr/local/jh_taos"
+
+# old bin dir
+bin_dir="/usr/local/jh_taos/bin"
+
+service_config_dir="/etc/systemd/system"
+nginx_port=6060
+nginx_dir="/usr/local/nginxd"
+
+# Color setting
+RED='\033[0;31m'
+GREEN='\033[1;32m'
+GREEN_DARK='\033[0;32m'
+GREEN_UNDERLINE='\033[4;32m'
+NC='\033[0m'
+
+csudo=""
+if command -v sudo > /dev/null; then
+ csudo="sudo"
+fi
+
+update_flag=0
+
+initd_mod=0
+service_mod=2
+if pidof systemd &> /dev/null; then
+ service_mod=0
+elif $(which service &> /dev/null); then
+ service_mod=1
+ service_config_dir="/etc/init.d"
+ if $(which chkconfig &> /dev/null); then
+ initd_mod=1
+ elif $(which insserv &> /dev/null); then
+ initd_mod=2
+ elif $(which update-rc.d &> /dev/null); then
+ initd_mod=3
+ else
+ service_mod=2
+ fi
+else
+ service_mod=2
+fi
+
+
+# get the operating system type for using the corresponding init file
+# ubuntu/debian(deb), centos/fedora(rpm), others: opensuse, redhat, ..., no verification
+#osinfo=$(awk -F= '/^NAME/{print $2}' /etc/os-release)
+if [[ -e /etc/os-release ]]; then
+ osinfo=$(cat /etc/os-release | grep "NAME" | cut -d '"' -f2) ||:
+else
+ osinfo=""
+fi
+#echo "osinfo: ${osinfo}"
+os_type=0
+if echo $osinfo | grep -qwi "ubuntu" ; then
+# echo "This is ubuntu system"
+ os_type=1
+elif echo $osinfo | grep -qwi "debian" ; then
+# echo "This is debian system"
+ os_type=1
+elif echo $osinfo | grep -qwi "Kylin" ; then
+# echo "This is Kylin system"
+ os_type=1
+elif echo $osinfo | grep -qwi "centos" ; then
+# echo "This is centos system"
+ os_type=2
+elif echo $osinfo | grep -qwi "fedora" ; then
+# echo "This is fedora system"
+ os_type=2
+else
+ echo " osinfo: ${osinfo}"
+ echo " This is an officially unverified linux system,"
+ echo " if there are any problems with the installation and operation, "
+ echo " please feel free to contact jhict.com for support."
+ os_type=1
+fi
+
+
+# ============================= get input parameters =================================================
+
+# install.sh -v [server | client] -e [yes | no] -i [systemd | service | ...]
+
+# set parameters by default value
+interactiveFqdn=yes # [yes | no]
+verType=server # [server | client]
+initType=systemd # [systemd | service | ...]
+
+while getopts "hv:e:i:" arg
+do
+ case $arg in
+ e)
+ #echo "interactiveFqdn=$OPTARG"
+ interactiveFqdn=$( echo $OPTARG )
+ ;;
+ v)
+ #echo "verType=$OPTARG"
+ verType=$(echo $OPTARG)
+ ;;
+ i)
+ #echo "initType=$OPTARG"
+ initType=$(echo $OPTARG)
+ ;;
+ h)
+ echo "Usage: `basename $0` -v [server | client] -e [yes | no]"
+ exit 0
+ ;;
+ ?) #unknow option
+ echo "unkonw argument"
+ exit 1
+ ;;
+ esac
+done
+
+function kill_process() {
+ pid=$(ps -ef | grep "$1" | grep -v "grep" | awk '{print $2}')
+ if [ -n "$pid" ]; then
+ ${csudo} kill -9 $pid || :
+ fi
+}
+
+function install_main_path() {
+ #create install main dir and all sub dir
+ ${csudo} rm -rf ${install_main_dir} || :
+ ${csudo} mkdir -p ${install_main_dir}
+ ${csudo} mkdir -p ${install_main_dir}/cfg
+ ${csudo} mkdir -p ${install_main_dir}/bin
+# ${csudo} mkdir -p ${install_main_dir}/connector
+ ${csudo} mkdir -p ${install_main_dir}/driver
+# ${csudo} mkdir -p ${install_main_dir}/examples
+ ${csudo} mkdir -p ${install_main_dir}/include
+ ${csudo} mkdir -p ${install_main_dir}/init.d
+ if [ "$verMode" == "cluster" ]; then
+ ${csudo} mkdir -p ${nginx_dir}
+ fi
+}
+
+function install_bin() {
+ # Remove links
+ ${csudo} rm -f ${bin_link_dir}/jh_taos || :
+ ${csudo} rm -f ${bin_link_dir}/jh_taosd || :
+ ${csudo} rm -f ${bin_link_dir}/jhdemo || :
+ ${csudo} rm -f ${bin_link_dir}/rmjh || :
+ ${csudo} rm -f ${bin_link_dir}/tarbitrator || :
+ ${csudo} rm -f ${bin_link_dir}/set_core || :
+
+ ${csudo} cp -r ${script_dir}/bin/* ${install_main_dir}/bin && ${csudo} chmod 0555 ${install_main_dir}/bin/*
+
+ #Make link
+ [ -x ${install_main_dir}/bin/jh_taos ] && ${csudo} ln -s ${install_main_dir}/bin/jh_taos ${bin_link_dir}/jh_taos || :
+ [ -x ${install_main_dir}/bin/jh_taosd ] && ${csudo} ln -s ${install_main_dir}/bin/jh_taosd ${bin_link_dir}/jh_taosd || :
+ [ -x ${install_main_dir}/bin/jhdemo ] && ${csudo} ln -s ${install_main_dir}/bin/jhdemo ${bin_link_dir}/jhdemo || :
+ [ -x ${install_main_dir}/bin/remove_jh.sh ] && ${csudo} ln -s ${install_main_dir}/bin/remove_jh.sh ${bin_link_dir}/rmjh || :
+ [ -x ${install_main_dir}/bin/set_core.sh ] && ${csudo} ln -s ${install_main_dir}/bin/set_core.sh ${bin_link_dir}/set_core || :
+ [ -x ${install_main_dir}/bin/tarbitrator ] && ${csudo} ln -s ${install_main_dir}/bin/tarbitrator ${bin_link_dir}/tarbitrator || :
+
+ if [ "$verMode" == "cluster" ]; then
+ ${csudo} cp -r ${script_dir}/nginxd/* ${nginx_dir} && ${csudo} chmod 0555 ${nginx_dir}/*
+ ${csudo} mkdir -p ${nginx_dir}/logs
+ ${csudo} chmod 777 ${nginx_dir}/sbin/nginx
+ fi
+}
+
+function install_lib() {
+ # Remove links
+ ${csudo} rm -f ${lib_link_dir}/libtaos.* || :
+ ${csudo} rm -f ${lib64_link_dir}/libtaos.* || :
+ ${csudo} cp -rf ${script_dir}/driver/* ${install_main_dir}/driver && ${csudo} chmod 777 ${install_main_dir}/driver/*
+
+ ${csudo} ln -s ${install_main_dir}/driver/libtaos.* ${lib_link_dir}/libtaos.so.1
+ ${csudo} ln -s ${lib_link_dir}/libtaos.so.1 ${lib_link_dir}/libtaos.so
+
+ if [[ -d ${lib64_link_dir} && ! -e ${lib64_link_dir}/libtaos.so ]]; then
+ ${csudo} ln -s ${install_main_dir}/driver/libtaos.* ${lib64_link_dir}/libtaos.so.1 || :
+ ${csudo} ln -s ${lib64_link_dir}/libtaos.so.1 ${lib64_link_dir}/libtaos.so || :
+ fi
+
+ if [ "$osType" != "Darwin" ]; then
+ ${csudo} ldconfig
+ else
+ ${csudo} update_dyld_shared_cache
+ fi
+}
+
+function install_header() {
+ ${csudo} rm -f ${inc_link_dir}/taos.h ${inc_link_dir}/taoserror.h || :
+ ${csudo} cp -f ${script_dir}/inc/* ${install_main_dir}/include && ${csudo} chmod 644 ${install_main_dir}/include/*
+ ${csudo} ln -s ${install_main_dir}/include/taos.h ${inc_link_dir}/taos.h
+ ${csudo} ln -s ${install_main_dir}/include/taoserror.h ${inc_link_dir}/taoserror.h
+}
+
+function install_jemalloc() {
+ jemalloc_dir=${script_dir}/jemalloc
+
+ if [ -d ${jemalloc_dir} ]; then
+ ${csudo} /usr/bin/install -c -d /usr/local/bin
+
+ if [ -f ${jemalloc_dir}/bin/jemalloc-config ]; then
+ ${csudo} /usr/bin/install -c -m 755 ${jemalloc_dir}/bin/jemalloc-config /usr/local/bin
+ fi
+ if [ -f ${jemalloc_dir}/bin/jemalloc.sh ]; then
+ ${csudo} /usr/bin/install -c -m 755 ${jemalloc_dir}/bin/jemalloc.sh /usr/local/bin
+ fi
+ if [ -f ${jemalloc_dir}/bin/jeprof ]; then
+ ${csudo} /usr/bin/install -c -m 755 ${jemalloc_dir}/bin/jeprof /usr/local/bin
+ fi
+ if [ -f ${jemalloc_dir}/include/jemalloc/jemalloc.h ]; then
+ ${csudo} /usr/bin/install -c -d /usr/local/include/jemalloc
+ ${csudo} /usr/bin/install -c -m 644 ${jemalloc_dir}/include/jemalloc/jemalloc.h /usr/local/include/jemalloc
+ fi
+ if [ -f ${jemalloc_dir}/lib/libjemalloc.so.2 ]; then
+ ${csudo} /usr/bin/install -c -d /usr/local/lib
+ ${csudo} /usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc.so.2 /usr/local/lib
+ ${csudo} ln -sf libjemalloc.so.2 /usr/local/lib/libjemalloc.so
+ ${csudo} /usr/bin/install -c -d /usr/local/lib
+ if [ -f ${jemalloc_dir}/lib/libjemalloc.a ]; then
+ ${csudo} /usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc.a /usr/local/lib
+ fi
+ if [ -f ${jemalloc_dir}/lib/libjemalloc_pic.a ]; then
+ ${csudo} /usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc_pic.a /usr/local/lib
+ fi
+ if [ -f ${jemalloc_dir}/lib/libjemalloc_pic.a ]; then
+ ${csudo} /usr/bin/install -c -d /usr/local/lib/pkgconfig
+ ${csudo} /usr/bin/install -c -m 644 ${jemalloc_dir}/lib/pkgconfig/jemalloc.pc /usr/local/lib/pkgconfig
+ fi
+ fi
+ if [ -f ${jemalloc_dir}/share/doc/jemalloc/jemalloc.html ]; then
+ ${csudo} /usr/bin/install -c -d /usr/local/share/doc/jemalloc
+ ${csudo} /usr/bin/install -c -m 644 ${jemalloc_dir}/share/doc/jemalloc/jemalloc.html /usr/local/share/doc/jemalloc
+ fi
+ if [ -f ${jemalloc_dir}/share/man/man3/jemalloc.3 ]; then
+ ${csudo} /usr/bin/install -c -d /usr/local/share/man/man3
+ ${csudo} /usr/bin/install -c -m 644 ${jemalloc_dir}/share/man/man3/jemalloc.3 /usr/local/share/man/man3
+ fi
+
+ if [ -d /etc/ld.so.conf.d ]; then
+ echo "/usr/local/lib" | ${csudo} tee /etc/ld.so.conf.d/jemalloc.conf > /dev/null || echo -e "failed to write /etc/ld.so.conf.d/jemalloc.conf"
+ ${csudo} ldconfig
+ else
+ echo "/etc/ld.so.conf.d not found!"
+ fi
+ fi
+}
+
+function add_newHostname_to_hosts() {
+ localIp="127.0.0.1"
+ OLD_IFS="$IFS"
+ IFS=" "
+ iphost=$(cat /etc/hosts | grep $1 | awk '{print $1}')
+ arr=($iphost)
+ IFS="$OLD_IFS"
+ for s in ${arr[@]}
+ do
+ if [[ "$s" == "$localIp" ]]; then
+ return
+ fi
+ done
+ ${csudo} echo "127.0.0.1 $1" >> /etc/hosts ||:
+}
+
+function set_hostname() {
+ echo -e -n "${GREEN}Please enter one hostname(must not be 'localhost')${NC}:"
+ read newHostname
+ while true; do
+ if [[ ! -z "$newHostname" && "$newHostname" != "localhost" ]]; then
+ break
+ else
+ read -p "Please enter one hostname(must not be 'localhost'):" newHostname
+ fi
+ done
+
+ ${csudo} hostname $newHostname ||:
+ retval=`echo $?`
+ if [[ $retval != 0 ]]; then
+ echo
+ echo "set hostname fail!"
+ return
+ fi
+
+ #ubuntu/centos /etc/hostname
+ if [[ -e /etc/hostname ]]; then
+ ${csudo} echo $newHostname > /etc/hostname ||:
+ fi
+
+ #debian: #HOSTNAME=yourname
+ if [[ -e /etc/sysconfig/network ]]; then
+ ${csudo} sed -i -r "s/#*\s*(HOSTNAME=\s*).*/\1$newHostname/" /etc/sysconfig/network ||:
+ fi
+
+ ${csudo} sed -i -r "s/#*\s*(fqdn\s*).*/\1$newHostname/" ${cfg_install_dir}/taos.cfg
+ serverFqdn=$newHostname
+
+ if [[ -e /etc/hosts ]]; then
+ add_newHostname_to_hosts $newHostname
+ fi
+}
+
+function is_correct_ipaddr() {
+ newIp=$1
+ OLD_IFS="$IFS"
+ IFS=" "
+ arr=($iplist)
+ IFS="$OLD_IFS"
+ for s in ${arr[@]}
+ do
+ if [[ "$s" == "$newIp" ]]; then
+ return 0
+ fi
+ done
+
+ return 1
+}
+
+function set_ipAsFqdn() {
+ iplist=$(ip address |grep inet |grep -v inet6 |grep -v 127.0.0.1 |awk '{print $2}' |awk -F "/" '{print $1}') ||:
+ if [ -z "$iplist" ]; then
+ iplist=$(ifconfig |grep inet |grep -v inet6 |grep -v 127.0.0.1 |awk '{print $2}' |awk -F ":" '{print $2}') ||:
+ fi
+
+ if [ -z "$iplist" ]; then
+ echo
+ echo -e -n "${GREEN}Unable to get local ip, use 127.0.0.1${NC}"
+ localFqdn="127.0.0.1"
+ # Write the local FQDN to configuration file
+ ${csudo} sed -i -r "s/#*\s*(fqdn\s*).*/\1$localFqdn/" ${cfg_install_dir}/taos.cfg
+ serverFqdn=$localFqdn
+ echo
+ return
+ fi
+
+ echo -e -n "${GREEN}Please choose an IP from local IP list${NC}:"
+ echo
+ echo -e -n "${GREEN}$iplist${NC}"
+ echo
+ echo
+ echo -e -n "${GREEN}Notes: if IP is used as the node name, data can NOT be migrated to other machine directly${NC}:"
+ read localFqdn
+ while true; do
+ if [ ! -z "$localFqdn" ]; then
+ # Check if correct ip address
+ is_correct_ipaddr $localFqdn
+ retval=`echo $?`
+ if [[ $retval != 0 ]]; then
+ read -p "Please choose an IP from local IP list:" localFqdn
+ else
+ # Write the local FQDN to configuration file
+ ${csudo} sed -i -r "s/#*\s*(fqdn\s*).*/\1$localFqdn/" ${cfg_install_dir}/taos.cfg
+ serverFqdn=$localFqdn
+ break
+ fi
+ else
+ read -p "Please choose an IP from local IP list:" localFqdn
+ fi
+ done
+}
+
+function local_fqdn_check() {
+ #serverFqdn=$(hostname)
+ echo
+ echo -e -n "System hostname is: ${GREEN}$serverFqdn${NC}"
+ echo
+ if [[ "$serverFqdn" == "" ]] || [[ "$serverFqdn" == "localhost" ]]; then
+ echo -e -n "${GREEN}It is strongly recommended to configure a hostname for this machine ${NC}"
+ echo
+
+ while true
+ do
+ read -r -p "Set hostname now? [Y/n] " input
+ if [ ! -n "$input" ]; then
+ set_hostname
+ break
+ else
+ case $input in
+ [yY][eE][sS]|[yY])
+ set_hostname
+ break
+ ;;
+
+ [nN][oO]|[nN])
+ set_ipAsFqdn
+ break
+ ;;
+
+ *)
+ echo "Invalid input..."
+ ;;
+ esac
+ fi
+ done
+ fi
+}
+
+function install_config() {
+ if [ ! -f ${cfg_install_dir}/taos.cfg ]; then
+ ${csudo} mkdir -p ${cfg_install_dir}
+ [ -f ${script_dir}/cfg/taos.cfg ] && ${csudo} cp ${script_dir}/cfg/taos.cfg ${cfg_install_dir}
+ ${csudo} chmod 644 ${cfg_install_dir}/*
+ fi
+
+ ${csudo} cp -f ${script_dir}/cfg/taos.cfg ${install_main_dir}/cfg/taos.cfg.org
+ ${csudo} ln -s ${cfg_install_dir}/taos.cfg ${install_main_dir}/cfg
+
+ [ ! -z $1 ] && return 0 || : # only install client
+
+ if ((${update_flag}==1)); then
+ return 0
+ fi
+
+ if [ "$interactiveFqdn" == "no" ]; then
+ return 0
+ fi
+
+ local_fqdn_check
+
+ #FQDN_FORMAT="(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)"
+ #FQDN_FORMAT="(:[1-6][0-9][0-9][0-9][0-9]$)"
+ #PORT_FORMAT="(/[1-6][0-9][0-9][0-9][0-9]?/)"
+ #FQDN_PATTERN=":[0-9]{1,5}$"
+
+ # first full-qualified domain name (FQDN) for jh_iot cluster system
+ echo
+ echo -e -n "${GREEN}Enter FQDN:port (like h1.jhict.com:6030) of an existing jh_iot cluster node to join${NC}"
+ echo
+ echo -e -n "${GREEN}OR leave it blank to build one${NC}:"
+ read firstEp
+ while true; do
+ if [ ! -z "$firstEp" ]; then
+ # check the format of the firstEp
+ #if [[ $firstEp == $FQDN_PATTERN ]]; then
+ # Write the first FQDN to configuration file
+ ${csudo} sed -i -r "s/#*\s*(firstEp\s*).*/\1$firstEp/" ${cfg_install_dir}/taos.cfg
+ break
+ #else
+ # read -p "Please enter the correct FQDN:port: " firstEp
+ #fi
+ else
+ break
+ fi
+ done
+}
+
+
+function install_log() {
+ ${csudo} rm -rf ${log_dir} || :
+ ${csudo} mkdir -p ${log_dir} && ${csudo} chmod 777 ${log_dir}
+
+ ${csudo} ln -s ${log_dir} ${install_main_dir}/log
+}
+
+function install_data() {
+ ${csudo} mkdir -p ${data_dir}
+
+ ${csudo} ln -s ${data_dir} ${install_main_dir}/data
+}
+
+function install_connector() {
+ ${csudo} cp -rf ${script_dir}/connector/* ${install_main_dir}/connector
+}
+
+function install_examples() {
+ if [ -d ${script_dir}/examples ]; then
+ ${csudo} cp -rf ${script_dir}/examples/* ${install_main_dir}/examples
+ fi
+}
+
+function clean_service_on_sysvinit() {
+ if pidof jh_taosd &> /dev/null; then
+ ${csudo} service jh_taosd stop || :
+ fi
+
+ if pidof tarbitrator &> /dev/null; then
+ ${csudo} service tarbitratord stop || :
+ fi
+
+ if ((${initd_mod}==1)); then
+ if [ -e ${service_config_dir}/jh_taosd ]; then
+ ${csudo} chkconfig --del jh_taosd || :
+ fi
+
+ if [ -e ${service_config_dir}/tarbitratord ]; then
+ ${csudo} chkconfig --del tarbitratord || :
+ fi
+ elif ((${initd_mod}==2)); then
+ if [ -e ${service_config_dir}/jh_taosd ]; then
+ ${csudo} insserv -r jh_taosd || :
+ fi
+ if [ -e ${service_config_dir}/tarbitratord ]; then
+ ${csudo} insserv -r tarbitratord || :
+ fi
+ elif ((${initd_mod}==3)); then
+ if [ -e ${service_config_dir}/jh_taosd ]; then
+ ${csudo} update-rc.d -f jh_taosd remove || :
+ fi
+ if [ -e ${service_config_dir}/tarbitratord ]; then
+ ${csudo} update-rc.d -f tarbitratord remove || :
+ fi
+ fi
+
+ ${csudo} rm -f ${service_config_dir}/jh_taosd || :
+ ${csudo} rm -f ${service_config_dir}/tarbitratord || :
+
+ if $(which init &> /dev/null); then
+ ${csudo} init q || :
+ fi
+}
+
+function install_service_on_sysvinit() {
+ clean_service_on_sysvinit
+ sleep 1
+
+ # Install jh_taosd service
+
+ if ((${os_type}==1)); then
+ ${csudo} cp -f ${script_dir}/init.d/jh_taosd.deb ${install_main_dir}/init.d/jh_taosd
+ ${csudo} cp ${script_dir}/init.d/jh_taosd.deb ${service_config_dir}/jh_taosd && ${csudo} chmod a+x ${service_config_dir}/jh_taosd
+ ${csudo} cp -f ${script_dir}/init.d/tarbitratord.deb ${install_main_dir}/init.d/tarbitratord
+ ${csudo} cp ${script_dir}/init.d/tarbitratord.deb ${service_config_dir}/tarbitratord && ${csudo} chmod a+x ${service_config_dir}/tarbitratord
+ elif ((${os_type}==2)); then
+ ${csudo} cp -f ${script_dir}/init.d/jh_taosd.rpm ${install_main_dir}/init.d/jh_taosd
+ ${csudo} cp ${script_dir}/init.d/jh_taosd.rpm ${service_config_dir}/jh_taosd && ${csudo} chmod a+x ${service_config_dir}/jh_taosd
+ ${csudo} cp -f ${script_dir}/init.d/tarbitratord.rpm ${install_main_dir}/init.d/tarbitratord
+ ${csudo} cp ${script_dir}/init.d/tarbitratord.rpm ${service_config_dir}/tarbitratord && ${csudo} chmod a+x ${service_config_dir}/tarbitratord
+ fi
+
+ if ((${initd_mod}==1)); then
+ ${csudo} chkconfig --add jh_taosd || :
+ ${csudo} chkconfig --level 2345 jh_taosd on || :
+ ${csudo} chkconfig --add tarbitratord || :
+ ${csudo} chkconfig --level 2345 tarbitratord on || :
+ elif ((${initd_mod}==2)); then
+ ${csudo} insserv jh_taosd || :
+ ${csudo} insserv -d jh_taosd || :
+ ${csudo} insserv tarbitratord || :
+ ${csudo} insserv -d tarbitratord || :
+ elif ((${initd_mod}==3)); then
+ ${csudo} update-rc.d jh_taosd defaults || :
+ ${csudo} update-rc.d tarbitratord defaults || :
+ fi
+}
+
+function clean_service_on_systemd() {
+ jh_taosd_service_config="${service_config_dir}/jh_taosd.service"
+ if systemctl is-active --quiet jh_taosd; then
+ echo "jh_iot is running, stopping it..."
+ ${csudo} systemctl stop jh_taosd &> /dev/null || echo &> /dev/null
+ fi
+ ${csudo} systemctl disable jh_taosd &> /dev/null || echo &> /dev/null
+ ${csudo} rm -f ${jh_taosd_service_config}
+
+ tarbitratord_service_config="${service_config_dir}/tarbitratord.service"
+ if systemctl is-active --quiet tarbitratord; then
+ echo "tarbitrator is running, stopping it..."
+ ${csudo} systemctl stop tarbitratord &> /dev/null || echo &> /dev/null
+ fi
+ ${csudo} systemctl disable tarbitratord &> /dev/null || echo &> /dev/null
+ ${csudo} rm -f ${tarbitratord_service_config}
+
+ if [ "$verMode" == "cluster" ]; then
+ nginx_service_config="${service_config_dir}/nginxd.service"
+ if systemctl is-active --quiet nginxd; then
+ echo "Nginx for jh_iot is running, stopping it..."
+ ${csudo} systemctl stop nginxd &> /dev/null || echo &> /dev/null
+ fi
+ ${csudo} systemctl disable nginxd &> /dev/null || echo &> /dev/null
+ ${csudo} rm -f ${nginx_service_config}
+ fi
+}
+
+function install_service_on_systemd() {
+ clean_service_on_systemd
+
+ service_config="${service_config_dir}/jh_taosd.service"
+ ${csudo} bash -c "echo '[Unit]' >> ${service_config}"
+ ${csudo} bash -c "echo 'Description=jh_iot server service' >> ${service_config}"
+ ${csudo} bash -c "echo 'After=network-online.target' >> ${service_config}"
+ ${csudo} bash -c "echo 'Wants=network-online.target' >> ${service_config}"
+ ${csudo} bash -c "echo >> ${service_config}"
+ ${csudo} bash -c "echo '[Service]' >> ${service_config}"
+ ${csudo} bash -c "echo 'Type=simple' >> ${service_config}"
+ ${csudo} bash -c "echo 'ExecStart=/usr/bin/jh_taosd' >> ${service_config}"
+ ${csudo} bash -c "echo 'ExecStartPre=/usr/local/jh_taos/bin/startPre.sh' >> ${service_config}"
+ ${csudo} bash -c "echo 'TimeoutStopSec=1000000s' >> ${service_config}"
+ ${csudo} bash -c "echo 'LimitNOFILE=infinity' >> ${service_config}"
+ ${csudo} bash -c "echo 'LimitNPROC=infinity' >> ${service_config}"
+ ${csudo} bash -c "echo 'LimitCORE=infinity' >> ${service_config}"
+ ${csudo} bash -c "echo 'TimeoutStartSec=0' >> ${service_config}"
+ ${csudo} bash -c "echo 'StandardOutput=null' >> ${service_config}"
+ ${csudo} bash -c "echo 'Restart=always' >> ${service_config}"
+ ${csudo} bash -c "echo 'StartLimitBurst=3' >> ${service_config}"
+ ${csudo} bash -c "echo 'StartLimitInterval=60s' >> ${service_config}"
+ ${csudo} bash -c "echo >> ${service_config}"
+ ${csudo} bash -c "echo '[Install]' >> ${service_config}"
+ ${csudo} bash -c "echo 'WantedBy=multi-user.target' >> ${service_config}"
+ ${csudo} systemctl enable jh_taosd
+
+ tarbitratord_service_config="${service_config_dir}/tarbitratord.service"
+ ${csudo} bash -c "echo '[Unit]' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'Description=jh_iot arbitrator service' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'After=network-online.target' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'Wants=network-online.target' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo '[Service]' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'Type=simple' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'ExecStart=/usr/bin/tarbitrator' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'TimeoutStopSec=1000000s' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'LimitNOFILE=infinity' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'LimitNPROC=infinity' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'LimitCORE=infinity' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'TimeoutStartSec=0' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'StandardOutput=null' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'Restart=always' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'StartLimitBurst=3' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'StartLimitInterval=60s' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo '[Install]' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'WantedBy=multi-user.target' >> ${tarbitratord_service_config}"
+ #${csudo} systemctl enable tarbitratord
+
+ if [ "$verMode" == "cluster" ]; then
+ nginx_service_config="${service_config_dir}/nginxd.service"
+ ${csudo} bash -c "echo '[Unit]' >> ${nginx_service_config}"
+ ${csudo} bash -c "echo 'Description=Nginx For jh_iot Service' >> ${nginx_service_config}"
+ ${csudo} bash -c "echo 'After=network-online.target' >> ${nginx_service_config}"
+ ${csudo} bash -c "echo 'Wants=network-online.target' >> ${nginx_service_config}"
+ ${csudo} bash -c "echo >> ${nginx_service_config}"
+ ${csudo} bash -c "echo '[Service]' >> ${nginx_service_config}"
+ ${csudo} bash -c "echo 'Type=forking' >> ${nginx_service_config}"
+ ${csudo} bash -c "echo 'PIDFile=/usr/local/nginxd/logs/nginx.pid' >> ${nginx_service_config}"
+ ${csudo} bash -c "echo 'ExecStart=/usr/local/nginxd/sbin/nginx' >> ${nginx_service_config}"
+ ${csudo} bash -c "echo 'ExecStop=/usr/local/nginxd/sbin/nginx -s stop' >> ${nginx_service_config}"
+ ${csudo} bash -c "echo 'TimeoutStopSec=1000000s' >> ${nginx_service_config}"
+ ${csudo} bash -c "echo 'LimitNOFILE=infinity' >> ${nginx_service_config}"
+ ${csudo} bash -c "echo 'LimitNPROC=infinity' >> ${nginx_service_config}"
+ ${csudo} bash -c "echo 'LimitCORE=infinity' >> ${nginx_service_config}"
+ ${csudo} bash -c "echo 'TimeoutStartSec=0' >> ${nginx_service_config}"
+ ${csudo} bash -c "echo 'StandardOutput=null' >> ${nginx_service_config}"
+ ${csudo} bash -c "echo 'Restart=always' >> ${nginx_service_config}"
+ ${csudo} bash -c "echo 'StartLimitBurst=3' >> ${nginx_service_config}"
+ ${csudo} bash -c "echo 'StartLimitInterval=60s' >> ${nginx_service_config}"
+ ${csudo} bash -c "echo >> ${nginx_service_config}"
+ ${csudo} bash -c "echo '[Install]' >> ${nginx_service_config}"
+ ${csudo} bash -c "echo 'WantedBy=multi-user.target' >> ${nginx_service_config}"
+ if ! ${csudo} systemctl enable nginxd &> /dev/null; then
+ ${csudo} systemctl daemon-reexec
+ ${csudo} systemctl enable nginxd
+ fi
+ ${csudo} systemctl start nginxd
+ fi
+}
+
+function install_service() {
+ if ((${service_mod}==0)); then
+ install_service_on_systemd
+ elif ((${service_mod}==1)); then
+ install_service_on_sysvinit
+ else
+ # must manual stop jh_taosd
+ kill_process jh_taosd
+ fi
+}
+
+vercomp () {
+ if [[ $1 == $2 ]]; then
+ return 0
+ fi
+ local IFS=.
+ local i ver1=($1) ver2=($2)
+ # fill empty fields in ver1 with zeros
+ for ((i=${#ver1[@]}; i<${#ver2[@]}; i++)); do
+ ver1[i]=0
+ done
+
+ for ((i=0; i<${#ver1[@]}; i++)); do
+ if [[ -z ${ver2[i]} ]]
+ then
+ # fill empty fields in ver2 with zeros
+ ver2[i]=0
+ fi
+ if ((10#${ver1[i]} > 10#${ver2[i]}))
+ then
+ return 1
+ fi
+ if ((10#${ver1[i]} < 10#${ver2[i]}))
+ then
+ return 2
+ fi
+ done
+ return 0
+}
+
+function is_version_compatible() {
+ curr_version=`ls ${script_dir}/driver/libtaos.so* |cut -d '.' -f 3-6`
+
+ if [ -f ${script_dir}/driver/vercomp.txt ]; then
+ min_compatible_version=`cat ${script_dir}/driver/vercomp.txt`
+ else
+ min_compatible_version=$(${script_dir}/bin/jh_taosd -V | head -1 | cut -d ' ' -f 5)
+ fi
+
+ vercomp $curr_version $min_compatible_version
+ case $? in
+ 0) return 0;;
+ 1) return 0;;
+ 2) return 1;;
+ esac
+}
+
+function update() {
+ # Start to update
+ if [ ! -e jh_taos.tar.gz ]; then
+ echo "File jh_taos.tar.gz does not exist"
+ exit 1
+ fi
+ tar -zxf jh_taos.tar.gz
+ install_jemalloc
+
+ # Check if version compatible
+ if ! is_version_compatible; then
+ echo -e "${RED}Version incompatible${NC}"
+ return 1
+ fi
+
+ echo -e "${GREEN}Start to update jh_iot...${NC}"
+ # Stop the service if running
+ if pidof jh_taosd &> /dev/null; then
+ if ((${service_mod}==0)); then
+ ${csudo} systemctl stop jh_taosd || :
+ elif ((${service_mod}==1)); then
+ ${csudo} service jh_taosd stop || :
+ else
+ kill_process jh_taosd
+ fi
+ sleep 1
+ fi
+ if [ "$verMode" == "cluster" ]; then
+ if pidof nginx &> /dev/null; then
+ if ((${service_mod}==0)); then
+ ${csudo} systemctl stop nginxd || :
+ elif ((${service_mod}==1)); then
+ ${csudo} service nginxd stop || :
+ else
+ kill_process nginx
+ fi
+ sleep 1
+ fi
+ fi
+
+ install_main_path
+
+ install_log
+ install_header
+ install_lib
+# if [ "$pagMode" != "lite" ]; then
+# install_connector
+# fi
+# install_examples
+ if [ -z $1 ]; then
+ install_bin
+ install_service
+ install_config
+
+ openresty_work=false
+ if [ "$verMode" == "cluster" ]; then
+ # Check if openresty is installed
+ # Check if nginx is installed successfully
+ if type curl &> /dev/null; then
+ if curl -sSf http://127.0.0.1:${nginx_port} &> /dev/null; then
+ echo -e "\033[44;32;1mNginx for jh_iot is updated successfully!${NC}"
+ openresty_work=true
+ else
+ echo -e "\033[44;31;5mNginx for jh_iot does not work! Please try again!\033[0m"
+ fi
+ fi
+ fi
+
+ #echo
+ #echo -e "\033[44;32;1mjh_iot is updated successfully!${NC}"
+ echo
+ echo -e "${GREEN_DARK}To configure jh_iot ${NC}: edit /etc/jh_taos/taos.cfg"
+ if ((${service_mod}==0)); then
+ echo -e "${GREEN_DARK}To start jh_iot ${NC}: ${csudo} systemctl start jh_taosd${NC}"
+ elif ((${service_mod}==1)); then
+ echo -e "${GREEN_DARK}To start jh_iot ${NC}: ${csudo} service jh_taosd start${NC}"
+ else
+ echo -e "${GREEN_DARK}To start jh_iot ${NC}: ./jh_taosd${NC}"
+ fi
+
+ if [ ${openresty_work} = 'true' ]; then
+ echo -e "${GREEN_DARK}To access jh_iot ${NC}: use ${GREEN_UNDERLINE}jh_taos -h $serverFqdn${NC} in shell OR from ${GREEN_UNDERLINE}http://127.0.0.1:${nginx_port}${NC}"
+ else
+ echo -e "${GREEN_DARK}To access jh_iot ${NC}: use ${GREEN_UNDERLINE}jh_taos -h $serverFqdn${NC} in shell${NC}"
+ fi
+
+ echo
+ echo -e "\033[44;32;1mjh_iot is updated successfully!${NC}"
+ else
+ install_bin
+ install_config
+
+ echo
+ echo -e "\033[44;32;1mjh_iot client is updated successfully!${NC}"
+ fi
+
+ rm -rf $(tar -tf jh_taos.tar.gz)
+}
+
+function install() {
+ # Start to install
+ if [ ! -e jh_taos.tar.gz ]; then
+ echo "File jh_taos.tar.gz does not exist"
+ exit 1
+ fi
+ tar -zxf jh_taos.tar.gz
+
+ echo -e "${GREEN}Start to install jh_iot...${NC}"
+
+ install_main_path
+
+ if [ -z $1 ]; then
+ install_data
+ fi
+
+ install_log
+ install_header
+ install_lib
+ install_jemalloc
+# if [ "$pagMode" != "lite" ]; then
+# install_connector
+# fi
+# install_examples
+
+ if [ -z $1 ]; then # install service and client
+ # For installing new
+ install_bin
+ install_service
+
+ openresty_work=false
+ if [ "$verMode" == "cluster" ]; then
+ # Check if nginx is installed successfully
+ if type curl &> /dev/null; then
+ if curl -sSf http://127.0.0.1:${nginx_port} &> /dev/null; then
+ echo -e "\033[44;32;1mNginx for jh_iot is installed successfully!${NC}"
+ openresty_work=true
+ else
+ echo -e "\033[44;31;5mNginx for jh_iot does not work! Please try again!\033[0m"
+ fi
+ fi
+ fi
+
+ install_config
+
+ # Ask if to start the service
+ #echo
+ #echo -e "\033[44;32;1mjh_iot is installed successfully!${NC}"
+ echo
+ echo -e "${GREEN_DARK}To configure jh_iot ${NC}: edit /etc/jh_taos/taos.cfg"
+ if ((${service_mod}==0)); then
+ echo -e "${GREEN_DARK}To start jh_iot ${NC}: ${csudo} systemctl start jh_taosd${NC}"
+ elif ((${service_mod}==1)); then
+ echo -e "${GREEN_DARK}To start jh_iot ${NC}: ${csudo} service jh_taosd start${NC}"
+ else
+ echo -e "${GREEN_DARK}To start jh_iot ${NC}: jh_taosd${NC}"
+ fi
+
+ if [ ! -z "$firstEp" ]; then
+ tmpFqdn=${firstEp%%:*}
+ substr=":"
+ if [[ $firstEp =~ $substr ]];then
+ tmpPort=${firstEp#*:}
+ else
+ tmpPort=""
+ fi
+ if [[ "$tmpPort" != "" ]];then
+ echo -e "${GREEN_DARK}To access jh_iot ${NC}: jh_taos -h $tmpFqdn -P $tmpPort${GREEN_DARK} to login into cluster, then${NC}"
+ else
+ echo -e "${GREEN_DARK}To access jh_iot ${NC}: jh_taos -h $tmpFqdn${GREEN_DARK} to login into cluster, then${NC}"
+ fi
+ echo -e "${GREEN_DARK}execute ${NC}: create dnode 'newDnodeFQDN:port'; ${GREEN_DARK}to add this new node${NC}"
+ echo
+ elif [ ! -z "$serverFqdn" ]; then
+ echo -e "${GREEN_DARK}To access jh_iot ${NC}: jh_taos -h $serverFqdn${GREEN_DARK} to login into jh_iot server${NC}"
+ echo
+ fi
+ echo -e "\033[44;32;1mjh_iot is installed successfully!${NC}"
+ echo
+ else # Only install client
+ install_bin
+ install_config
+
+ echo
+ echo -e "\033[44;32;1mjh_iot client is installed successfully!${NC}"
+ fi
+
+ rm -rf $(tar -tf jh_taos.tar.gz)
+}
+
+
+## ==============================Main program starts from here============================
+serverFqdn=$(hostname)
+if [ "$verType" == "server" ]; then
+ # Install server and client
+ if [ -x ${bin_dir}/jh_taosd ]; then
+ update_flag=1
+ update
+ else
+ install
+ fi
+elif [ "$verType" == "client" ]; then
+ interactiveFqdn=no
+ # Only install client
+ if [ -x ${bin_dir}/jh_taos ]; then
+ update_flag=1
+ update client
+ else
+ install client
+ fi
+else
+ echo "please input correct verType"
+fi
diff --git a/packaging/tools/install_kh.sh b/packaging/tools/install_kh.sh
new file mode 100755
index 0000000000000000000000000000000000000000..db7481d5acc88c3bc00db99163023da22e43da36
--- /dev/null
+++ b/packaging/tools/install_kh.sh
@@ -0,0 +1,948 @@
+#!/bin/bash
+#
+# This file is used to install database on linux systems. The operating system
+# is required to use systemd to manage services at boot
+
+set -e
+#set -x
+
+verMode=edge
+pagMode=full
+
+iplist=""
+serverFqdn=""
+# -----------------------Variables definition---------------------
+script_dir=$(dirname $(readlink -f "$0"))
+# Dynamic directory
+data_dir="/var/lib/kinghistorian"
+log_dir="/var/log/kinghistorian"
+
+data_link_dir="/usr/local/kinghistorian/data"
+log_link_dir="/usr/local/kinghistorian/log"
+
+cfg_install_dir="/etc/kinghistorian"
+
+bin_link_dir="/usr/bin"
+lib_link_dir="/usr/lib"
+lib64_link_dir="/usr/lib64"
+inc_link_dir="/usr/include"
+
+#install main path
+install_main_dir="/usr/local/kinghistorian"
+
+# old bin dir
+bin_dir="/usr/local/kinghistorian/bin"
+
+service_config_dir="/etc/systemd/system"
+nginx_port=6060
+nginx_dir="/usr/local/nginxd"
+
+# Color setting
+RED='\033[0;31m'
+GREEN='\033[1;32m'
+GREEN_DARK='\033[0;32m'
+GREEN_UNDERLINE='\033[4;32m'
+NC='\033[0m'
+
+csudo=""
+if command -v sudo > /dev/null; then
+ csudo="sudo"
+fi
+
+update_flag=0
+
+initd_mod=0
+service_mod=2
+if pidof systemd &> /dev/null; then
+ service_mod=0
+elif $(which service &> /dev/null); then
+ service_mod=1
+ service_config_dir="/etc/init.d"
+ if $(which chkconfig &> /dev/null); then
+ initd_mod=1
+ elif $(which insserv &> /dev/null); then
+ initd_mod=2
+ elif $(which update-rc.d &> /dev/null); then
+ initd_mod=3
+ else
+ service_mod=2
+ fi
+else
+ service_mod=2
+fi
+
+
+# get the operating system type for using the corresponding init file
+# ubuntu/debian(deb), centos/fedora(rpm), others: opensuse, redhat, ..., no verification
+#osinfo=$(awk -F= '/^NAME/{print $2}' /etc/os-release)
+if [[ -e /etc/os-release ]]; then
+ osinfo=$(cat /etc/os-release | grep "NAME" | cut -d '"' -f2) ||:
+else
+ osinfo=""
+fi
+#echo "osinfo: ${osinfo}"
+os_type=0
+if echo $osinfo | grep -qwi "ubuntu" ; then
+# echo "This is ubuntu system"
+ os_type=1
+elif echo $osinfo | grep -qwi "debian" ; then
+# echo "This is debian system"
+ os_type=1
+elif echo $osinfo | grep -qwi "Kylin" ; then
+# echo "This is Kylin system"
+ os_type=1
+elif echo $osinfo | grep -qwi "centos" ; then
+# echo "This is centos system"
+ os_type=2
+elif echo $osinfo | grep -qwi "fedora" ; then
+# echo "This is fedora system"
+ os_type=2
+else
+ echo " osinfo: ${osinfo}"
+ echo " This is an officially unverified linux system,"
+ echo " if there are any problems with the installation and operation, "
+ echo " please feel free to contact wellintech.com for support."
+ os_type=1
+fi
+
+
+# ============================= get input parameters =================================================
+
+# install.sh -v [server | client] -e [yes | no] -i [systemd | service | ...]
+
+# set parameters by default value
+interactiveFqdn=yes # [yes | no]
+verType=server # [server | client]
+initType=systemd # [systemd | service | ...]
+
+while getopts "hv:e:i:" arg
+do
+ case $arg in
+ e)
+ #echo "interactiveFqdn=$OPTARG"
+ interactiveFqdn=$( echo $OPTARG )
+ ;;
+ v)
+ #echo "verType=$OPTARG"
+ verType=$(echo $OPTARG)
+ ;;
+ i)
+ #echo "initType=$OPTARG"
+ initType=$(echo $OPTARG)
+ ;;
+ h)
+ echo "Usage: `basename $0` -v [server | client] -e [yes | no]"
+ exit 0
+ ;;
+ ?) #unknow option
+ echo "unkonw argument"
+ exit 1
+ ;;
+ esac
+done
+
+function kill_process() {
+ pid=$(ps -ef | grep "$1" | grep -v "grep" | awk '{print $2}')
+ if [ -n "$pid" ]; then
+ ${csudo} kill -9 $pid || :
+ fi
+}
+
+function install_main_path() {
+ #create install main dir and all sub dir
+ ${csudo} rm -rf ${install_main_dir} || :
+ ${csudo} mkdir -p ${install_main_dir}
+ ${csudo} mkdir -p ${install_main_dir}/cfg
+ ${csudo} mkdir -p ${install_main_dir}/bin
+# ${csudo} mkdir -p ${install_main_dir}/connector
+ ${csudo} mkdir -p ${install_main_dir}/driver
+# ${csudo} mkdir -p ${install_main_dir}/examples
+ ${csudo} mkdir -p ${install_main_dir}/include
+ ${csudo} mkdir -p ${install_main_dir}/init.d
+ if [ "$verMode" == "cluster" ]; then
+ ${csudo} mkdir -p ${nginx_dir}
+ fi
+}
+
+function install_bin() {
+ # Remove links
+ ${csudo} rm -f ${bin_link_dir}/khclient || :
+ ${csudo} rm -f ${bin_link_dir}/khserver || :
+ ${csudo} rm -f ${bin_link_dir}/khdemo || :
+ ${csudo} rm -f ${bin_link_dir}/rmkh || :
+ ${csudo} rm -f ${bin_link_dir}/tarbitrator || :
+ ${csudo} rm -f ${bin_link_dir}/set_core || :
+
+ ${csudo} cp -r ${script_dir}/bin/* ${install_main_dir}/bin && ${csudo} chmod 0555 ${install_main_dir}/bin/*
+
+ #Make link
+ [ -x ${install_main_dir}/bin/khclient ] && ${csudo} ln -s ${install_main_dir}/bin/khclient ${bin_link_dir}/khclient || :
+ [ -x ${install_main_dir}/bin/khserver ] && ${csudo} ln -s ${install_main_dir}/bin/khserver ${bin_link_dir}/khserver || :
+ [ -x ${install_main_dir}/bin/khdemo ] && ${csudo} ln -s ${install_main_dir}/bin/khdemo ${bin_link_dir}/khdemo || :
+ [ -x ${install_main_dir}/bin/remove_kh.sh ] && ${csudo} ln -s ${install_main_dir}/bin/remove_kh.sh ${bin_link_dir}/rmkh || :
+ [ -x ${install_main_dir}/bin/set_core.sh ] && ${csudo} ln -s ${install_main_dir}/bin/set_core.sh ${bin_link_dir}/set_core || :
+ [ -x ${install_main_dir}/bin/tarbitrator ] && ${csudo} ln -s ${install_main_dir}/bin/tarbitrator ${bin_link_dir}/tarbitrator || :
+
+ if [ "$verMode" == "cluster" ]; then
+ ${csudo} cp -r ${script_dir}/nginxd/* ${nginx_dir} && ${csudo} chmod 0555 ${nginx_dir}/*
+ ${csudo} mkdir -p ${nginx_dir}/logs
+ ${csudo} chmod 777 ${nginx_dir}/sbin/nginx
+ fi
+}
+
+function install_lib() {
+ # Remove links
+ ${csudo} rm -f ${lib_link_dir}/libtaos.* || :
+ ${csudo} rm -f ${lib64_link_dir}/libtaos.* || :
+ ${csudo} cp -rf ${script_dir}/driver/* ${install_main_dir}/driver && ${csudo} chmod 777 ${install_main_dir}/driver/*
+
+ ${csudo} ln -s ${install_main_dir}/driver/libtaos.* ${lib_link_dir}/libtaos.so.1
+ ${csudo} ln -s ${lib_link_dir}/libtaos.so.1 ${lib_link_dir}/libtaos.so
+
+ if [[ -d ${lib64_link_dir} && ! -e ${lib64_link_dir}/libtaos.so ]]; then
+ ${csudo} ln -s ${install_main_dir}/driver/libtaos.* ${lib64_link_dir}/libtaos.so.1 || :
+ ${csudo} ln -s ${lib64_link_dir}/libtaos.so.1 ${lib64_link_dir}/libtaos.so || :
+ fi
+
+ if [ "$osType" != "Darwin" ]; then
+ ${csudo} ldconfig
+ else
+ ${csudo} update_dyld_shared_cache
+ fi
+}
+
+function install_header() {
+ ${csudo} rm -f ${inc_link_dir}/taos.h ${inc_link_dir}/taoserror.h || :
+ ${csudo} cp -f ${script_dir}/inc/* ${install_main_dir}/include && ${csudo} chmod 644 ${install_main_dir}/include/*
+ ${csudo} ln -s ${install_main_dir}/include/taos.h ${inc_link_dir}/taos.h
+ ${csudo} ln -s ${install_main_dir}/include/taoserror.h ${inc_link_dir}/taoserror.h
+}
+
+function install_jemalloc() {
+ jemalloc_dir=${script_dir}/jemalloc
+
+ if [ -d ${jemalloc_dir} ]; then
+ ${csudo} /usr/bin/install -c -d /usr/local/bin
+
+ if [ -f ${jemalloc_dir}/bin/jemalloc-config ]; then
+ ${csudo} /usr/bin/install -c -m 755 ${jemalloc_dir}/bin/jemalloc-config /usr/local/bin
+ fi
+ if [ -f ${jemalloc_dir}/bin/jemalloc.sh ]; then
+ ${csudo} /usr/bin/install -c -m 755 ${jemalloc_dir}/bin/jemalloc.sh /usr/local/bin
+ fi
+ if [ -f ${jemalloc_dir}/bin/jeprof ]; then
+ ${csudo} /usr/bin/install -c -m 755 ${jemalloc_dir}/bin/jeprof /usr/local/bin
+ fi
+ if [ -f ${jemalloc_dir}/include/jemalloc/jemalloc.h ]; then
+ ${csudo} /usr/bin/install -c -d /usr/local/include/jemalloc
+ ${csudo} /usr/bin/install -c -m 644 ${jemalloc_dir}/include/jemalloc/jemalloc.h /usr/local/include/jemalloc
+ fi
+ if [ -f ${jemalloc_dir}/lib/libjemalloc.so.2 ]; then
+ ${csudo} /usr/bin/install -c -d /usr/local/lib
+ ${csudo} /usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc.so.2 /usr/local/lib
+ ${csudo} ln -sf libjemalloc.so.2 /usr/local/lib/libjemalloc.so
+ ${csudo} /usr/bin/install -c -d /usr/local/lib
+ if [ -f ${jemalloc_dir}/lib/libjemalloc.a ]; then
+ ${csudo} /usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc.a /usr/local/lib
+ fi
+ if [ -f ${jemalloc_dir}/lib/libjemalloc_pic.a ]; then
+ ${csudo} /usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc_pic.a /usr/local/lib
+ fi
+ if [ -f ${jemalloc_dir}/lib/libjemalloc_pic.a ]; then
+ ${csudo} /usr/bin/install -c -d /usr/local/lib/pkgconfig
+ ${csudo} /usr/bin/install -c -m 644 ${jemalloc_dir}/lib/pkgconfig/jemalloc.pc /usr/local/lib/pkgconfig
+ fi
+ fi
+ if [ -f ${jemalloc_dir}/share/doc/jemalloc/jemalloc.html ]; then
+ ${csudo} /usr/bin/install -c -d /usr/local/share/doc/jemalloc
+ ${csudo} /usr/bin/install -c -m 644 ${jemalloc_dir}/share/doc/jemalloc/jemalloc.html /usr/local/share/doc/jemalloc
+ fi
+ if [ -f ${jemalloc_dir}/share/man/man3/jemalloc.3 ]; then
+ ${csudo} /usr/bin/install -c -d /usr/local/share/man/man3
+ ${csudo} /usr/bin/install -c -m 644 ${jemalloc_dir}/share/man/man3/jemalloc.3 /usr/local/share/man/man3
+ fi
+
+ if [ -d /etc/ld.so.conf.d ]; then
+ echo "/usr/local/lib" | ${csudo} tee /etc/ld.so.conf.d/jemalloc.conf > /dev/null || echo -e "failed to write /etc/ld.so.conf.d/jemalloc.conf"
+ ${csudo} ldconfig
+ else
+ echo "/etc/ld.so.conf.d not found!"
+ fi
+ fi
+}
+
+function add_newHostname_to_hosts() {
+ localIp="127.0.0.1"
+ OLD_IFS="$IFS"
+ IFS=" "
+ iphost=$(cat /etc/hosts | grep $1 | awk '{print $1}')
+ arr=($iphost)
+ IFS="$OLD_IFS"
+ for s in ${arr[@]}
+ do
+ if [[ "$s" == "$localIp" ]]; then
+ return
+ fi
+ done
+ ${csudo} echo "127.0.0.1 $1" >> /etc/hosts ||:
+}
+
+function set_hostname() {
+ echo -e -n "${GREEN}Please enter one hostname(must not be 'localhost')${NC}:"
+ read newHostname
+ while true; do
+ if [[ ! -z "$newHostname" && "$newHostname" != "localhost" ]]; then
+ break
+ else
+ read -p "Please enter one hostname(must not be 'localhost'):" newHostname
+ fi
+ done
+
+ ${csudo} hostname $newHostname ||:
+ retval=`echo $?`
+ if [[ $retval != 0 ]]; then
+ echo
+ echo "set hostname fail!"
+ return
+ fi
+
+ #ubuntu/centos /etc/hostname
+ if [[ -e /etc/hostname ]]; then
+ ${csudo} echo $newHostname > /etc/hostname ||:
+ fi
+
+ #debian: #HOSTNAME=yourname
+ if [[ -e /etc/sysconfig/network ]]; then
+ ${csudo} sed -i -r "s/#*\s*(HOSTNAME=\s*).*/\1$newHostname/" /etc/sysconfig/network ||:
+ fi
+
+ ${csudo} sed -i -r "s/#*\s*(fqdn\s*).*/\1$newHostname/" ${cfg_install_dir}/kinghistorian.cfg
+ serverFqdn=$newHostname
+
+ if [[ -e /etc/hosts ]]; then
+ add_newHostname_to_hosts $newHostname
+ fi
+}
+
+function is_correct_ipaddr() {
+ newIp=$1
+ OLD_IFS="$IFS"
+ IFS=" "
+ arr=($iplist)
+ IFS="$OLD_IFS"
+ for s in ${arr[@]}
+ do
+ if [[ "$s" == "$newIp" ]]; then
+ return 0
+ fi
+ done
+
+ return 1
+}
+
+function set_ipAsFqdn() {
+ iplist=$(ip address |grep inet |grep -v inet6 |grep -v 127.0.0.1 |awk '{print $2}' |awk -F "/" '{print $1}') ||:
+ if [ -z "$iplist" ]; then
+ iplist=$(ifconfig |grep inet |grep -v inet6 |grep -v 127.0.0.1 |awk '{print $2}' |awk -F ":" '{print $2}') ||:
+ fi
+
+ if [ -z "$iplist" ]; then
+ echo
+ echo -e -n "${GREEN}Unable to get local ip, use 127.0.0.1${NC}"
+ localFqdn="127.0.0.1"
+ # Write the local FQDN to configuration file
+ ${csudo} sed -i -r "s/#*\s*(fqdn\s*).*/\1$localFqdn/" ${cfg_install_dir}/kinghistorian.cfg
+ serverFqdn=$localFqdn
+ echo
+ return
+ fi
+
+ echo -e -n "${GREEN}Please choose an IP from local IP list${NC}:"
+ echo
+ echo -e -n "${GREEN}$iplist${NC}"
+ echo
+ echo
+ echo -e -n "${GREEN}Notes: if IP is used as the node name, data can NOT be migrated to other machine directly${NC}:"
+ read localFqdn
+ while true; do
+ if [ ! -z "$localFqdn" ]; then
+ # Check if correct ip address
+ is_correct_ipaddr $localFqdn
+ retval=`echo $?`
+ if [[ $retval != 0 ]]; then
+ read -p "Please choose an IP from local IP list:" localFqdn
+ else
+ # Write the local FQDN to configuration file
+ ${csudo} sed -i -r "s/#*\s*(fqdn\s*).*/\1$localFqdn/" ${cfg_install_dir}/kinghistorian.cfg
+ serverFqdn=$localFqdn
+ break
+ fi
+ else
+ read -p "Please choose an IP from local IP list:" localFqdn
+ fi
+ done
+}
+
+function local_fqdn_check() {
+ #serverFqdn=$(hostname)
+ echo
+ echo -e -n "System hostname is: ${GREEN}$serverFqdn${NC}"
+ echo
+ if [[ "$serverFqdn" == "" ]] || [[ "$serverFqdn" == "localhost" ]]; then
+ echo -e -n "${GREEN}It is strongly recommended to configure a hostname for this machine ${NC}"
+ echo
+
+ while true
+ do
+ read -r -p "Set hostname now? [Y/n] " input
+ if [ ! -n "$input" ]; then
+ set_hostname
+ break
+ else
+ case $input in
+ [yY][eE][sS]|[yY])
+ set_hostname
+ break
+ ;;
+
+ [nN][oO]|[nN])
+ set_ipAsFqdn
+ break
+ ;;
+
+ *)
+ echo "Invalid input..."
+ ;;
+ esac
+ fi
+ done
+ fi
+}
+
+function install_config() {
+ if [ ! -f ${cfg_install_dir}/kinghistorian.cfg ]; then
+ ${csudo} mkdir -p ${cfg_install_dir}
+ [ -f ${script_dir}/cfg/kinghistorian.cfg ] && ${csudo} cp ${script_dir}/cfg/kinghistorian.cfg ${cfg_install_dir}
+ ${csudo} chmod 644 ${cfg_install_dir}/*
+ fi
+
+ ${csudo} cp -f ${script_dir}/cfg/kinghistorian.cfg ${install_main_dir}/cfg/kinghistorian.cfg.org
+ ${csudo} ln -s ${cfg_install_dir}/kinghistorian.cfg ${install_main_dir}/cfg
+
+ [ ! -z $1 ] && return 0 || : # only install client
+
+ if ((${update_flag}==1)); then
+ return 0
+ fi
+
+ if [ "$interactiveFqdn" == "no" ]; then
+ return 0
+ fi
+
+ local_fqdn_check
+
+ #FQDN_FORMAT="(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)"
+ #FQDN_FORMAT="(:[1-6][0-9][0-9][0-9][0-9]$)"
+ #PORT_FORMAT="(/[1-6][0-9][0-9][0-9][0-9]?/)"
+ #FQDN_PATTERN=":[0-9]{1,5}$"
+
+ # first full-qualified domain name (FQDN) for KingHistorian cluster system
+ echo
+ echo -e -n "${GREEN}Enter FQDN:port (like h1.wellintech.com:6030) of an existing KingHistorian cluster node to join${NC}"
+ echo
+ echo -e -n "${GREEN}OR leave it blank to build one${NC}:"
+ read firstEp
+ while true; do
+ if [ ! -z "$firstEp" ]; then
+ # check the format of the firstEp
+ #if [[ $firstEp == $FQDN_PATTERN ]]; then
+ # Write the first FQDN to configuration file
+ ${csudo} sed -i -r "s/#*\s*(firstEp\s*).*/\1$firstEp/" ${cfg_install_dir}/kinghistorian.cfg
+ break
+ #else
+ # read -p "Please enter the correct FQDN:port: " firstEp
+ #fi
+ else
+ break
+ fi
+ done
+}
+
+
+function install_log() {
+ ${csudo} rm -rf ${log_dir} || :
+ ${csudo} mkdir -p ${log_dir} && ${csudo} chmod 777 ${log_dir}
+
+ ${csudo} ln -s ${log_dir} ${install_main_dir}/log
+}
+
+function install_data() {
+ ${csudo} mkdir -p ${data_dir}
+
+ ${csudo} ln -s ${data_dir} ${install_main_dir}/data
+}
+
+function install_connector() {
+ ${csudo} cp -rf ${script_dir}/connector/* ${install_main_dir}/connector
+}
+
+function install_examples() {
+ if [ -d ${script_dir}/examples ]; then
+ ${csudo} cp -rf ${script_dir}/examples/* ${install_main_dir}/examples
+ fi
+}
+
+function clean_service_on_sysvinit() {
+ if pidof khserver &> /dev/null; then
+ ${csudo} service khserver stop || :
+ fi
+
+ if pidof tarbitrator &> /dev/null; then
+ ${csudo} service tarbitratord stop || :
+ fi
+
+ if ((${initd_mod}==1)); then
+ if [ -e ${service_config_dir}/khserver ]; then
+ ${csudo} chkconfig --del khserver || :
+ fi
+
+ if [ -e ${service_config_dir}/tarbitratord ]; then
+ ${csudo} chkconfig --del tarbitratord || :
+ fi
+ elif ((${initd_mod}==2)); then
+ if [ -e ${service_config_dir}/khserver ]; then
+ ${csudo} insserv -r khserver || :
+ fi
+ if [ -e ${service_config_dir}/tarbitratord ]; then
+ ${csudo} insserv -r tarbitratord || :
+ fi
+ elif ((${initd_mod}==3)); then
+ if [ -e ${service_config_dir}/khserver ]; then
+ ${csudo} update-rc.d -f khserver remove || :
+ fi
+ if [ -e ${service_config_dir}/tarbitratord ]; then
+ ${csudo} update-rc.d -f tarbitratord remove || :
+ fi
+ fi
+
+ ${csudo} rm -f ${service_config_dir}/khserver || :
+ ${csudo} rm -f ${service_config_dir}/tarbitratord || :
+
+ if $(which init &> /dev/null); then
+ ${csudo} init q || :
+ fi
+}
+
+function install_service_on_sysvinit() {
+ clean_service_on_sysvinit
+ sleep 1
+
+ # Install khserver service
+
+ if ((${os_type}==1)); then
+ ${csudo} cp -f ${script_dir}/init.d/khserver.deb ${install_main_dir}/init.d/khserver
+ ${csudo} cp ${script_dir}/init.d/khserver.deb ${service_config_dir}/khserver && ${csudo} chmod a+x ${service_config_dir}/khserver
+ ${csudo} cp -f ${script_dir}/init.d/tarbitratord.deb ${install_main_dir}/init.d/tarbitratord
+ ${csudo} cp ${script_dir}/init.d/tarbitratord.deb ${service_config_dir}/tarbitratord && ${csudo} chmod a+x ${service_config_dir}/tarbitratord
+ elif ((${os_type}==2)); then
+ ${csudo} cp -f ${script_dir}/init.d/khserver.rpm ${install_main_dir}/init.d/khserver
+ ${csudo} cp ${script_dir}/init.d/khserver.rpm ${service_config_dir}/khserver && ${csudo} chmod a+x ${service_config_dir}/khserver
+ ${csudo} cp -f ${script_dir}/init.d/tarbitratord.rpm ${install_main_dir}/init.d/tarbitratord
+ ${csudo} cp ${script_dir}/init.d/tarbitratord.rpm ${service_config_dir}/tarbitratord && ${csudo} chmod a+x ${service_config_dir}/tarbitratord
+ fi
+
+ if ((${initd_mod}==1)); then
+ ${csudo} chkconfig --add khserver || :
+ ${csudo} chkconfig --level 2345 khserver on || :
+ ${csudo} chkconfig --add tarbitratord || :
+ ${csudo} chkconfig --level 2345 tarbitratord on || :
+ elif ((${initd_mod}==2)); then
+ ${csudo} insserv khserver || :
+ ${csudo} insserv -d khserver || :
+ ${csudo} insserv tarbitratord || :
+ ${csudo} insserv -d tarbitratord || :
+ elif ((${initd_mod}==3)); then
+ ${csudo} update-rc.d khserver defaults || :
+ ${csudo} update-rc.d tarbitratord defaults || :
+ fi
+}
+
+function clean_service_on_systemd() {
+ khserver_service_config="${service_config_dir}/khserver.service"
+ if systemctl is-active --quiet khserver; then
+ echo "KingHistorian is running, stopping it..."
+ ${csudo} systemctl stop khserver &> /dev/null || echo &> /dev/null
+ fi
+ ${csudo} systemctl disable khserver &> /dev/null || echo &> /dev/null
+ ${csudo} rm -f ${khserver_service_config}
+
+ tarbitratord_service_config="${service_config_dir}/tarbitratord.service"
+ if systemctl is-active --quiet tarbitratord; then
+ echo "tarbitrator is running, stopping it..."
+ ${csudo} systemctl stop tarbitratord &> /dev/null || echo &> /dev/null
+ fi
+ ${csudo} systemctl disable tarbitratord &> /dev/null || echo &> /dev/null
+ ${csudo} rm -f ${tarbitratord_service_config}
+
+ if [ "$verMode" == "cluster" ]; then
+ nginx_service_config="${service_config_dir}/nginxd.service"
+ if systemctl is-active --quiet nginxd; then
+ echo "Nginx for KingHistorian is running, stopping it..."
+ ${csudo} systemctl stop nginxd &> /dev/null || echo &> /dev/null
+ fi
+ ${csudo} systemctl disable nginxd &> /dev/null || echo &> /dev/null
+ ${csudo} rm -f ${nginx_service_config}
+ fi
+}
+
+function install_service_on_systemd() {
+ clean_service_on_systemd
+
+ service_config="${service_config_dir}/khserver.service"
+ ${csudo} bash -c "echo '[Unit]' >> ${service_config}"
+ ${csudo} bash -c "echo 'Description=KingHistorian server service' >> ${service_config}"
+ ${csudo} bash -c "echo 'After=network-online.target' >> ${service_config}"
+ ${csudo} bash -c "echo 'Wants=network-online.target' >> ${service_config}"
+ ${csudo} bash -c "echo >> ${service_config}"
+ ${csudo} bash -c "echo '[Service]' >> ${service_config}"
+ ${csudo} bash -c "echo 'Type=simple' >> ${service_config}"
+ ${csudo} bash -c "echo 'ExecStart=/usr/bin/khserver' >> ${service_config}"
+ ${csudo} bash -c "echo 'ExecStartPre=/usr/local/kinghistorian/bin/startPre.sh' >> ${service_config}"
+ ${csudo} bash -c "echo 'TimeoutStopSec=1000000s' >> ${service_config}"
+ ${csudo} bash -c "echo 'LimitNOFILE=infinity' >> ${service_config}"
+ ${csudo} bash -c "echo 'LimitNPROC=infinity' >> ${service_config}"
+ ${csudo} bash -c "echo 'LimitCORE=infinity' >> ${service_config}"
+ ${csudo} bash -c "echo 'TimeoutStartSec=0' >> ${service_config}"
+ ${csudo} bash -c "echo 'StandardOutput=null' >> ${service_config}"
+ ${csudo} bash -c "echo 'Restart=always' >> ${service_config}"
+ ${csudo} bash -c "echo 'StartLimitBurst=3' >> ${service_config}"
+ ${csudo} bash -c "echo 'StartLimitInterval=60s' >> ${service_config}"
+ ${csudo} bash -c "echo >> ${service_config}"
+ ${csudo} bash -c "echo '[Install]' >> ${service_config}"
+ ${csudo} bash -c "echo 'WantedBy=multi-user.target' >> ${service_config}"
+ ${csudo} systemctl enable khserver
+
+ tarbitratord_service_config="${service_config_dir}/tarbitratord.service"
+ ${csudo} bash -c "echo '[Unit]' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'Description=KingHistorian arbitrator service' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'After=network-online.target' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'Wants=network-online.target' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo '[Service]' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'Type=simple' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'ExecStart=/usr/bin/tarbitrator' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'TimeoutStopSec=1000000s' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'LimitNOFILE=infinity' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'LimitNPROC=infinity' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'LimitCORE=infinity' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'TimeoutStartSec=0' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'StandardOutput=null' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'Restart=always' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'StartLimitBurst=3' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'StartLimitInterval=60s' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo '[Install]' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'WantedBy=multi-user.target' >> ${tarbitratord_service_config}"
+ #${csudo} systemctl enable tarbitratord
+
+ if [ "$verMode" == "cluster" ]; then
+ nginx_service_config="${service_config_dir}/nginxd.service"
+ ${csudo} bash -c "echo '[Unit]' >> ${nginx_service_config}"
+ ${csudo} bash -c "echo 'Description=Nginx For KingHistorian Service' >> ${nginx_service_config}"
+ ${csudo} bash -c "echo 'After=network-online.target' >> ${nginx_service_config}"
+ ${csudo} bash -c "echo 'Wants=network-online.target' >> ${nginx_service_config}"
+ ${csudo} bash -c "echo >> ${nginx_service_config}"
+ ${csudo} bash -c "echo '[Service]' >> ${nginx_service_config}"
+ ${csudo} bash -c "echo 'Type=forking' >> ${nginx_service_config}"
+ ${csudo} bash -c "echo 'PIDFile=/usr/local/nginxd/logs/nginx.pid' >> ${nginx_service_config}"
+ ${csudo} bash -c "echo 'ExecStart=/usr/local/nginxd/sbin/nginx' >> ${nginx_service_config}"
+ ${csudo} bash -c "echo 'ExecStop=/usr/local/nginxd/sbin/nginx -s stop' >> ${nginx_service_config}"
+ ${csudo} bash -c "echo 'TimeoutStopSec=1000000s' >> ${nginx_service_config}"
+ ${csudo} bash -c "echo 'LimitNOFILE=infinity' >> ${nginx_service_config}"
+ ${csudo} bash -c "echo 'LimitNPROC=infinity' >> ${nginx_service_config}"
+ ${csudo} bash -c "echo 'LimitCORE=infinity' >> ${nginx_service_config}"
+ ${csudo} bash -c "echo 'TimeoutStartSec=0' >> ${nginx_service_config}"
+ ${csudo} bash -c "echo 'StandardOutput=null' >> ${nginx_service_config}"
+ ${csudo} bash -c "echo 'Restart=always' >> ${nginx_service_config}"
+ ${csudo} bash -c "echo 'StartLimitBurst=3' >> ${nginx_service_config}"
+ ${csudo} bash -c "echo 'StartLimitInterval=60s' >> ${nginx_service_config}"
+ ${csudo} bash -c "echo >> ${nginx_service_config}"
+ ${csudo} bash -c "echo '[Install]' >> ${nginx_service_config}"
+ ${csudo} bash -c "echo 'WantedBy=multi-user.target' >> ${nginx_service_config}"
+ if ! ${csudo} systemctl enable nginxd &> /dev/null; then
+ ${csudo} systemctl daemon-reexec
+ ${csudo} systemctl enable nginxd
+ fi
+ ${csudo} systemctl start nginxd
+ fi
+}
+
+function install_service() {
+ if ((${service_mod}==0)); then
+ install_service_on_systemd
+ elif ((${service_mod}==1)); then
+ install_service_on_sysvinit
+ else
+ # must manual stop khserver
+ kill_process khserver
+ fi
+}
+
+vercomp () {
+ if [[ $1 == $2 ]]; then
+ return 0
+ fi
+ local IFS=.
+ local i ver1=($1) ver2=($2)
+ # fill empty fields in ver1 with zeros
+ for ((i=${#ver1[@]}; i<${#ver2[@]}; i++)); do
+ ver1[i]=0
+ done
+
+ for ((i=0; i<${#ver1[@]}; i++)); do
+ if [[ -z ${ver2[i]} ]]
+ then
+ # fill empty fields in ver2 with zeros
+ ver2[i]=0
+ fi
+ if ((10#${ver1[i]} > 10#${ver2[i]}))
+ then
+ return 1
+ fi
+ if ((10#${ver1[i]} < 10#${ver2[i]}))
+ then
+ return 2
+ fi
+ done
+ return 0
+}
+
+function is_version_compatible() {
+ curr_version=`ls ${script_dir}/driver/libtaos.so* |cut -d '.' -f 3-6`
+
+ if [ -f ${script_dir}/driver/vercomp.txt ]; then
+ min_compatible_version=`cat ${script_dir}/driver/vercomp.txt`
+ else
+ min_compatible_version=$(${script_dir}/bin/khserver -V | head -1 | cut -d ' ' -f 5)
+ fi
+
+ vercomp $curr_version $min_compatible_version
+ case $? in
+ 0) return 0;;
+ 1) return 0;;
+ 2) return 1;;
+ esac
+}
+
+function update() {
+ # Start to update
+ if [ ! -e kinghistorian.tar.gz ]; then
+ echo "File kinghistorian.tar.gz does not exist"
+ exit 1
+ fi
+ tar -zxf kinghistorian.tar.gz
+ install_jemalloc
+
+ # Check if version compatible
+ if ! is_version_compatible; then
+ echo -e "${RED}Version incompatible${NC}"
+ return 1
+ fi
+
+ echo -e "${GREEN}Start to update KingHistorian...${NC}"
+ # Stop the service if running
+ if pidof khserver &> /dev/null; then
+ if ((${service_mod}==0)); then
+ ${csudo} systemctl stop khserver || :
+ elif ((${service_mod}==1)); then
+ ${csudo} service khserver stop || :
+ else
+ kill_process khserver
+ fi
+ sleep 1
+ fi
+ if [ "$verMode" == "cluster" ]; then
+ if pidof nginx &> /dev/null; then
+ if ((${service_mod}==0)); then
+ ${csudo} systemctl stop nginxd || :
+ elif ((${service_mod}==1)); then
+ ${csudo} service nginxd stop || :
+ else
+ kill_process nginx
+ fi
+ sleep 1
+ fi
+ fi
+
+ install_main_path
+
+ install_log
+ install_header
+ install_lib
+# if [ "$pagMode" != "lite" ]; then
+# install_connector
+# fi
+# install_examples
+ if [ -z $1 ]; then
+ install_bin
+ install_service
+ install_config
+
+ openresty_work=false
+ if [ "$verMode" == "cluster" ]; then
+ # Check if openresty is installed
+ # Check if nginx is installed successfully
+ if type curl &> /dev/null; then
+ if curl -sSf http://127.0.0.1:${nginx_port} &> /dev/null; then
+ echo -e "\033[44;32;1mNginx for KingHistorian is updated successfully!${NC}"
+ openresty_work=true
+ else
+ echo -e "\033[44;31;5mNginx for KingHistorian does not work! Please try again!\033[0m"
+ fi
+ fi
+ fi
+
+ #echo
+ #echo -e "\033[44;32;1mKingHistorian is updated successfully!${NC}"
+ echo
+ echo -e "${GREEN_DARK}To configure KingHistorian ${NC}: edit /etc/kinghistorian/kinghistorian.cfg"
+ if ((${service_mod}==0)); then
+ echo -e "${GREEN_DARK}To start KingHistorian ${NC}: ${csudo} systemctl start khserver${NC}"
+ elif ((${service_mod}==1)); then
+ echo -e "${GREEN_DARK}To start KingHistorian ${NC}: ${csudo} service khserver start${NC}"
+ else
+ echo -e "${GREEN_DARK}To start KingHistorian ${NC}: ./khserver${NC}"
+ fi
+
+ if [ ${openresty_work} = 'true' ]; then
+ echo -e "${GREEN_DARK}To access KingHistorian ${NC}: use ${GREEN_UNDERLINE}khclient -h $serverFqdn${NC} in shell OR from ${GREEN_UNDERLINE}http://127.0.0.1:${nginx_port}${NC}"
+ else
+ echo -e "${GREEN_DARK}To access KingHistorian ${NC}: use ${GREEN_UNDERLINE}khclient -h $serverFqdn${NC} in shell${NC}"
+ fi
+
+ echo
+ echo -e "\033[44;32;1mKingHistorian is updated successfully!${NC}"
+ else
+ install_bin
+ install_config
+
+ echo
+ echo -e "\033[44;32;1mKingHistorian client is updated successfully!${NC}"
+ fi
+
+ rm -rf $(tar -tf kinghistorian.tar.gz)
+}
+
+function install() {
+ # Start to install
+ if [ ! -e kinghistorian.tar.gz ]; then
+ echo "File kinghistorian.tar.gz does not exist"
+ exit 1
+ fi
+ tar -zxf kinghistorian.tar.gz
+
+ echo -e "${GREEN}Start to install KingHistorian...${NC}"
+
+ install_main_path
+
+ if [ -z $1 ]; then
+ install_data
+ fi
+
+ install_log
+ install_header
+ install_lib
+ install_jemalloc
+# if [ "$pagMode" != "lite" ]; then
+# install_connector
+# fi
+# install_examples
+
+ if [ -z $1 ]; then # install service and client
+ # For installing new
+ install_bin
+ install_service
+
+ openresty_work=false
+ if [ "$verMode" == "cluster" ]; then
+ # Check if nginx is installed successfully
+ if type curl &> /dev/null; then
+ if curl -sSf http://127.0.0.1:${nginx_port} &> /dev/null; then
+ echo -e "\033[44;32;1mNginx for KingHistorian is installed successfully!${NC}"
+ openresty_work=true
+ else
+ echo -e "\033[44;31;5mNginx for KingHistorian does not work! Please try again!\033[0m"
+ fi
+ fi
+ fi
+
+ install_config
+
+ # Ask if to start the service
+ #echo
+ #echo -e "\033[44;32;1mKingHistorian is installed successfully!${NC}"
+ echo
+ echo -e "${GREEN_DARK}To configure KingHistorian ${NC}: edit /etc/kinghistorian/kinghistorian.cfg"
+ if ((${service_mod}==0)); then
+ echo -e "${GREEN_DARK}To start KingHistorian ${NC}: ${csudo} systemctl start khserver${NC}"
+ elif ((${service_mod}==1)); then
+ echo -e "${GREEN_DARK}To start KingHistorian ${NC}: ${csudo} service khserver start${NC}"
+ else
+ echo -e "${GREEN_DARK}To start KingHistorian ${NC}: khserver${NC}"
+ fi
+
+ if [ ! -z "$firstEp" ]; then
+ tmpFqdn=${firstEp%%:*}
+ substr=":"
+ if [[ $firstEp =~ $substr ]];then
+ tmpPort=${firstEp#*:}
+ else
+ tmpPort=""
+ fi
+ if [[ "$tmpPort" != "" ]];then
+ echo -e "${GREEN_DARK}To access KingHistorian ${NC}: khclient -h $tmpFqdn -P $tmpPort${GREEN_DARK} to login into cluster, then${NC}"
+ else
+ echo -e "${GREEN_DARK}To access KingHistorian ${NC}: khclient -h $tmpFqdn${GREEN_DARK} to login into cluster, then${NC}"
+ fi
+ echo -e "${GREEN_DARK}execute ${NC}: create dnode 'newDnodeFQDN:port'; ${GREEN_DARK}to add this new node${NC}"
+ echo
+ elif [ ! -z "$serverFqdn" ]; then
+ echo -e "${GREEN_DARK}To access KingHistorian ${NC}: khclient -h $serverFqdn${GREEN_DARK} to login into KingHistorian server${NC}"
+ echo
+ fi
+ echo -e "\033[44;32;1mKingHistorian is installed successfully!${NC}"
+ echo
+ else # Only install client
+ install_bin
+ install_config
+
+ echo
+ echo -e "\033[44;32;1mKingHistorian client is installed successfully!${NC}"
+ fi
+
+ rm -rf $(tar -tf kinghistorian.tar.gz)
+}
+
+
+## ==============================Main program starts from here============================
+serverFqdn=$(hostname)
+if [ "$verType" == "server" ]; then
+ # Install server and client
+ if [ -x ${bin_dir}/khserver ]; then
+ update_flag=1
+ update
+ else
+ install
+ fi
+elif [ "$verType" == "client" ]; then
+ interactiveFqdn=no
+ # Only install client
+ if [ -x ${bin_dir}/khclient ]; then
+ update_flag=1
+ update client
+ else
+ install client
+ fi
+else
+ echo "please input correct verType"
+fi
diff --git a/packaging/tools/install_power.sh b/packaging/tools/install_power.sh
index 0e0ee7ba31f4715b2c5585dd040727d604aa90b1..58096dc040e57ab5ae76cbf9f00e580ed35a8f78 100755
--- a/packaging/tools/install_power.sh
+++ b/packaging/tools/install_power.sh
@@ -210,13 +210,6 @@ function install_lib() {
${csudo} ln -s ${lib64_link_dir}/libtaos.so.1 ${lib64_link_dir}/libtaos.so || :
fi
- #if [ "$verMode" == "cluster" ]; then
- # # Compatible with version 1.5
- # ${csudo} mkdir -p ${v15_java_app_dir}
- # ${csudo} ln -s ${install_main_dir}/connector/taos-jdbcdriver-1.0.2-dist.jar ${v15_java_app_dir}/JDBCDriver-1.0.2-dist.jar
- # ${csudo} chmod 777 ${v15_java_app_dir} || :
- #fi
-
${csudo} ldconfig
}
@@ -265,7 +258,7 @@ function install_jemalloc() {
fi
if [ -d /etc/ld.so.conf.d ]; then
- ${csudo} echo "/usr/local/lib" > /etc/ld.so.conf.d/jemalloc.conf
+ echo "/usr/local/lib" | ${csudo} tee /etc/ld.so.conf.d/jemalloc.conf > /dev/null || echo -e "failed to write /etc/ld.so.conf.d/jemalloc.conf"
${csudo} ldconfig
else
echo "/etc/ld.so.conf.d not found!"
@@ -274,9 +267,10 @@ function install_jemalloc() {
}
function install_header() {
- ${csudo} rm -f ${inc_link_dir}/taos.h ${inc_link_dir}/taoserror.h || :
+ ${csudo} rm -f ${inc_link_dir}/taos.h ${inc_link_dir}/taosdef.h ${inc_link_dir}/taoserror.h || :
${csudo} cp -f ${script_dir}/inc/* ${install_main_dir}/include && ${csudo} chmod 644 ${install_main_dir}/include/*
${csudo} ln -s ${install_main_dir}/include/taos.h ${inc_link_dir}/taos.h
+ ${csudo} ln -s ${install_main_dir}/include/taosdef.h ${inc_link_dir}/taosdef.h
${csudo} ln -s ${install_main_dir}/include/taoserror.h ${inc_link_dir}/taoserror.h
}
@@ -328,7 +322,7 @@ function set_hostname() {
${csudo} sed -i -r "s/#*\s*(HOSTNAME=\s*).*/\1$newHostname/" /etc/sysconfig/network ||:
fi
- ${csudo} sed -i -r "s/#*\s*(fqdn\s*).*/\1$newHostname/" ${cfg_install_dir}/taos.cfg
+ ${csudo} sed -i -r "s/#*\s*(fqdn\s*).*/\1$newHostname/" ${cfg_install_dir}/power.cfg
serverFqdn=$newHostname
if [[ -e /etc/hosts ]]; then
@@ -363,7 +357,7 @@ function set_ipAsFqdn() {
echo -e -n "${GREEN}Unable to get local ip, use 127.0.0.1${NC}"
localFqdn="127.0.0.1"
# Write the local FQDN to configuration file
- ${csudo} sed -i -r "s/#*\s*(fqdn\s*).*/\1$localFqdn/" ${cfg_install_dir}/taos.cfg
+ ${csudo} sed -i -r "s/#*\s*(fqdn\s*).*/\1$localFqdn/" ${cfg_install_dir}/power.cfg
serverFqdn=$localFqdn
echo
return
@@ -385,7 +379,7 @@ function set_ipAsFqdn() {
read -p "Please choose an IP from local IP list:" localFqdn
else
# Write the local FQDN to configuration file
- ${csudo} sed -i -r "s/#*\s*(fqdn\s*).*/\1$localFqdn/" ${cfg_install_dir}/taos.cfg
+ ${csudo} sed -i -r "s/#*\s*(fqdn\s*).*/\1$localFqdn/" ${cfg_install_dir}/power.cfg
serverFqdn=$localFqdn
break
fi
@@ -432,16 +426,14 @@ function local_fqdn_check() {
}
function install_config() {
- #${csudo} rm -f ${install_main_dir}/cfg/taos.cfg || :
-
- if [ ! -f ${cfg_install_dir}/taos.cfg ]; then
+ if [ ! -f ${cfg_install_dir}/power.cfg ]; then
${csudo} mkdir -p ${cfg_install_dir}
- [ -f ${script_dir}/cfg/taos.cfg ] && ${csudo} cp ${script_dir}/cfg/taos.cfg ${cfg_install_dir}
+ [ -f ${script_dir}/cfg/power.cfg ] && ${csudo} cp ${script_dir}/cfg/power.cfg ${cfg_install_dir}
${csudo} chmod 644 ${cfg_install_dir}/*
fi
- ${csudo} cp -f ${script_dir}/cfg/taos.cfg ${install_main_dir}/cfg/taos.cfg.org
- ${csudo} ln -s ${cfg_install_dir}/taos.cfg ${install_main_dir}/cfg
+ ${csudo} cp -f ${script_dir}/cfg/power.cfg ${install_main_dir}/cfg/power.cfg.org
+ ${csudo} ln -s ${cfg_install_dir}/power.cfg ${install_main_dir}/cfg
[ ! -z $1 ] && return 0 || : # only install client
@@ -471,7 +463,7 @@ function install_config() {
# check the format of the firstEp
#if [[ $firstEp == $FQDN_PATTERN ]]; then
# Write the first FQDN to configuration file
- ${csudo} sed -i -r "s/#*\s*(firstEp\s*).*/\1$firstEp/" ${cfg_install_dir}/taos.cfg
+ ${csudo} sed -i -r "s/#*\s*(firstEp\s*).*/\1$firstEp/" ${cfg_install_dir}/power.cfg
break
#else
# read -p "Please enter the correct FQDN:port: " firstEp
@@ -607,7 +599,7 @@ function clean_service_on_systemd() {
if [ "$verMode" == "cluster" ]; then
nginx_service_config="${service_config_dir}/nginxd.service"
if systemctl is-active --quiet nginxd; then
- echo "Nginx for TDengine is running, stopping it..."
+ echo "Nginx for PowerDB is running, stopping it..."
${csudo} systemctl stop nginxd &> /dev/null || echo &> /dev/null
fi
${csudo} systemctl disable nginxd &> /dev/null || echo &> /dev/null
@@ -646,7 +638,7 @@ function install_service_on_systemd() {
tarbitratord_service_config="${service_config_dir}/tarbitratord.service"
${csudo} bash -c "echo '[Unit]' >> ${tarbitratord_service_config}"
- ${csudo} bash -c "echo 'Description=TDengine arbitrator service' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'Description=PowerDB arbitrator service' >> ${tarbitratord_service_config}"
${csudo} bash -c "echo 'After=network-online.target' >> ${tarbitratord_service_config}"
${csudo} bash -c "echo 'Wants=network-online.target' >> ${tarbitratord_service_config}"
${csudo} bash -c "echo >> ${tarbitratord_service_config}"
@@ -828,7 +820,7 @@ function update_PowerDB() {
#echo
#echo -e "\033[44;32;1mPowerDB is updated successfully!${NC}"
echo
- echo -e "${GREEN_DARK}To configure PowerDB ${NC}: edit /etc/power/taos.cfg"
+ echo -e "${GREEN_DARK}To configure PowerDB ${NC}: edit /etc/power/power.cfg"
if ((${service_mod}==0)); then
echo -e "${GREEN_DARK}To start PowerDB ${NC}: ${csudo} systemctl start powerd${NC}"
elif ((${service_mod}==1)); then
@@ -905,7 +897,7 @@ function install_PowerDB() {
#echo
#echo -e "\033[44;32;1mPowerDB is installed successfully!${NC}"
echo
- echo -e "${GREEN_DARK}To configure PowerDB ${NC}: edit /etc/power/taos.cfg"
+ echo -e "${GREEN_DARK}To configure PowerDB ${NC}: edit /etc/power/power.cfg"
if ((${service_mod}==0)); then
echo -e "${GREEN_DARK}To start PowerDB ${NC}: ${csudo} systemctl start powerd${NC}"
elif ((${service_mod}==1)); then
diff --git a/packaging/tools/install_pro.sh b/packaging/tools/install_pro.sh
index e5675b858066148df07508ad2438b0f00d7ce7bf..fbd38875f7dffa651f26ca5969cc06f61b3d0737 100755
--- a/packaging/tools/install_pro.sh
+++ b/packaging/tools/install_pro.sh
@@ -212,9 +212,10 @@ function install_lib() {
}
function install_header() {
- ${csudo} rm -f ${inc_link_dir}/taos.h ${inc_link_dir}/taoserror.h || :
+ ${csudo} rm -f ${inc_link_dir}/taos.h ${inc_link_dir}/taosdef.h ${inc_link_dir}/taoserror.h || :
${csudo} cp -f ${script_dir}/inc/* ${install_main_dir}/include && ${csudo} chmod 644 ${install_main_dir}/include/*
${csudo} ln -s ${install_main_dir}/include/taos.h ${inc_link_dir}/taos.h
+ ${csudo} ln -s ${install_main_dir}/include/taosdef.h ${inc_link_dir}/taosdef.h
${csudo} ln -s ${install_main_dir}/include/taoserror.h ${inc_link_dir}/taoserror.h
}
@@ -263,7 +264,7 @@ function install_jemalloc() {
fi
if [ -d /etc/ld.so.conf.d ]; then
- ${csudo} echo "/usr/local/lib" > /etc/ld.so.conf.d/jemalloc.conf
+ echo "/usr/local/lib" | ${csudo} tee /etc/ld.so.conf.d/jemalloc.conf > /dev/null || echo -e "failed to write /etc/ld.so.conf.d/jemalloc.conf"
${csudo} ldconfig
else
echo "/etc/ld.so.conf.d not found!"
@@ -316,7 +317,7 @@ function set_hostname() {
${csudo} sed -i -r "s/#*\s*(HOSTNAME=\s*).*/\1$newHostname/" /etc/sysconfig/network ||:
fi
- ${csudo} sed -i -r "s/#*\s*(fqdn\s*).*/\1$newHostname/" ${cfg_install_dir}/taos.cfg
+ ${csudo} sed -i -r "s/#*\s*(fqdn\s*).*/\1$newHostname/" ${cfg_install_dir}/prodb.cfg
serverFqdn=$newHostname
if [[ -e /etc/hosts ]]; then
@@ -351,7 +352,7 @@ function set_ipAsFqdn() {
echo -e -n "${GREEN}Unable to get local ip, use 127.0.0.1${NC}"
localFqdn="127.0.0.1"
# Write the local FQDN to configuration file
- ${csudo} sed -i -r "s/#*\s*(fqdn\s*).*/\1$localFqdn/" ${cfg_install_dir}/taos.cfg
+ ${csudo} sed -i -r "s/#*\s*(fqdn\s*).*/\1$localFqdn/" ${cfg_install_dir}/prodb.cfg
serverFqdn=$localFqdn
echo
return
@@ -373,7 +374,7 @@ function set_ipAsFqdn() {
read -p "Please choose an IP from local IP list:" localFqdn
else
# Write the local FQDN to configuration file
- ${csudo} sed -i -r "s/#*\s*(fqdn\s*).*/\1$localFqdn/" ${cfg_install_dir}/taos.cfg
+ ${csudo} sed -i -r "s/#*\s*(fqdn\s*).*/\1$localFqdn/" ${cfg_install_dir}/prodb.cfg
serverFqdn=$localFqdn
break
fi
@@ -420,14 +421,14 @@ function local_fqdn_check() {
}
function install_config() {
- if [ ! -f ${cfg_install_dir}/taos.cfg ]; then
+ if [ ! -f ${cfg_install_dir}/prodb.cfg ]; then
${csudo} mkdir -p ${cfg_install_dir}
- [ -f ${script_dir}/cfg/taos.cfg ] && ${csudo} cp ${script_dir}/cfg/taos.cfg ${cfg_install_dir}
+ [ -f ${script_dir}/cfg/prodb.cfg ] && ${csudo} cp ${script_dir}/cfg/prodb.cfg ${cfg_install_dir}
${csudo} chmod 644 ${cfg_install_dir}/*
fi
- ${csudo} cp -f ${script_dir}/cfg/taos.cfg ${install_main_dir}/cfg/taos.cfg.org
- ${csudo} ln -s ${cfg_install_dir}/taos.cfg ${install_main_dir}/cfg
+ ${csudo} cp -f ${script_dir}/cfg/prodb.cfg ${install_main_dir}/cfg/prodb.cfg.org
+ ${csudo} ln -s ${cfg_install_dir}/prodb.cfg ${install_main_dir}/cfg
[ ! -z $1 ] && return 0 || : # only install client
@@ -457,7 +458,7 @@ function install_config() {
# check the format of the firstEp
#if [[ $firstEp == $FQDN_PATTERN ]]; then
# Write the first FQDN to configuration file
- ${csudo} sed -i -r "s/#*\s*(firstEp\s*).*/\1$firstEp/" ${cfg_install_dir}/taos.cfg
+ ${csudo} sed -i -r "s/#*\s*(firstEp\s*).*/\1$firstEp/" ${cfg_install_dir}/prodb.cfg
break
#else
# read -p "Please enter the correct FQDN:port: " firstEp
@@ -805,7 +806,7 @@ function update_prodb() {
#echo
#echo -e "\033[44;32;1mProDB is updated successfully!${NC}"
echo
- echo -e "${GREEN_DARK}To configure ProDB ${NC}: edit /etc/ProDB/taos.cfg"
+ echo -e "${GREEN_DARK}To configure ProDB ${NC}: edit /etc/ProDB/prodb.cfg"
if ((${service_mod}==0)); then
echo -e "${GREEN_DARK}To start ProDB ${NC}: ${csudo} systemctl start prodbs${NC}"
elif ((${service_mod}==1)); then
@@ -882,7 +883,7 @@ function install_prodb() {
#echo
#echo -e "\033[44;32;1mProDB is installed successfully!${NC}"
echo
- echo -e "${GREEN_DARK}To configure ProDB ${NC}: edit /etc/ProDB/taos.cfg"
+ echo -e "${GREEN_DARK}To configure ProDB ${NC}: edit /etc/ProDB/prodb.cfg"
if ((${service_mod}==0)); then
echo -e "${GREEN_DARK}To start ProDB ${NC}: ${csudo} systemctl start prodbs${NC}"
elif ((${service_mod}==1)); then
diff --git a/packaging/tools/install_tq.sh b/packaging/tools/install_tq.sh
index ef5fb8c05a4a98a55918ee217125bd0f0a09b955..d335c759008e3df3eab249b3abc1cc9b2291bb53 100755
--- a/packaging/tools/install_tq.sh
+++ b/packaging/tools/install_tq.sh
@@ -210,20 +210,14 @@ function install_lib() {
${csudo} ln -s ${lib64_link_dir}/libtaos.so.1 ${lib64_link_dir}/libtaos.so || :
fi
- #if [ "$verMode" == "cluster" ]; then
- # # Compatible with version 1.5
- # ${csudo} mkdir -p ${v15_java_app_dir}
- # ${csudo} ln -s ${install_main_dir}/connector/taos-jdbcdriver-1.0.2-dist.jar ${v15_java_app_dir}/JDBCDriver-1.0.2-dist.jar
- # ${csudo} chmod 777 ${v15_java_app_dir} || :
- #fi
-
${csudo} ldconfig
}
function install_header() {
- ${csudo} rm -f ${inc_link_dir}/taos.h ${inc_link_dir}/taoserror.h || :
+ ${csudo} rm -f ${inc_link_dir}/taos.h ${inc_link_dir}/taosdef.h ${inc_link_dir}/taoserror.h || :
${csudo} cp -f ${script_dir}/inc/* ${install_main_dir}/include && ${csudo} chmod 644 ${install_main_dir}/include/*
${csudo} ln -s ${install_main_dir}/include/taos.h ${inc_link_dir}/taos.h
+ ${csudo} ln -s ${install_main_dir}/include/taosdef.h ${inc_link_dir}/taosdef.h
${csudo} ln -s ${install_main_dir}/include/taoserror.h ${inc_link_dir}/taoserror.h
}
@@ -272,7 +266,7 @@ function install_jemalloc() {
fi
if [ -d /etc/ld.so.conf.d ]; then
- ${csudo} echo "/usr/local/lib" > /etc/ld.so.conf.d/jemalloc.conf
+ echo "/usr/local/lib" | ${csudo} tee /etc/ld.so.conf.d/jemalloc.conf > /dev/null || echo -e "failed to write ld.so.conf.d/jemalloc.conf"
${csudo} ldconfig
else
echo "/etc/ld.so.conf.d not found!"
@@ -328,7 +322,7 @@ function set_hostname() {
${csudo} sed -i -r "s/#*\s*(HOSTNAME=\s*).*/\1$newHostname/" /etc/sysconfig/network ||:
fi
- ${csudo} sed -i -r "s/#*\s*(fqdn\s*).*/\1$newHostname/" ${cfg_install_dir}/taos.cfg
+ ${csudo} sed -i -r "s/#*\s*(fqdn\s*).*/\1$newHostname/" ${cfg_install_dir}/tq.cfg
serverFqdn=$newHostname
if [[ -e /etc/hosts ]]; then
@@ -363,7 +357,7 @@ function set_ipAsFqdn() {
echo -e -n "${GREEN}Unable to get local ip, use 127.0.0.1${NC}"
localFqdn="127.0.0.1"
# Write the local FQDN to configuration file
- ${csudo} sed -i -r "s/#*\s*(fqdn\s*).*/\1$localFqdn/" ${cfg_install_dir}/taos.cfg
+ ${csudo} sed -i -r "s/#*\s*(fqdn\s*).*/\1$localFqdn/" ${cfg_install_dir}/tq.cfg
serverFqdn=$localFqdn
echo
return
@@ -385,7 +379,7 @@ function set_ipAsFqdn() {
read -p "Please choose an IP from local IP list:" localFqdn
else
# Write the local FQDN to configuration file
- ${csudo} sed -i -r "s/#*\s*(fqdn\s*).*/\1$localFqdn/" ${cfg_install_dir}/taos.cfg
+ ${csudo} sed -i -r "s/#*\s*(fqdn\s*).*/\1$localFqdn/" ${cfg_install_dir}/tq.cfg
serverFqdn=$localFqdn
break
fi
@@ -432,16 +426,14 @@ function local_fqdn_check() {
}
function install_config() {
- #${csudo} rm -f ${install_main_dir}/cfg/taos.cfg || :
-
- if [ ! -f ${cfg_install_dir}/taos.cfg ]; then
+ if [ ! -f ${cfg_install_dir}/tq.cfg ]; then
${csudo} mkdir -p ${cfg_install_dir}
- [ -f ${script_dir}/cfg/taos.cfg ] && ${csudo} cp ${script_dir}/cfg/taos.cfg ${cfg_install_dir}
+ [ -f ${script_dir}/cfg/tq.cfg ] && ${csudo} cp ${script_dir}/cfg/tq.cfg ${cfg_install_dir}
${csudo} chmod 644 ${cfg_install_dir}/*
fi
- ${csudo} cp -f ${script_dir}/cfg/taos.cfg ${install_main_dir}/cfg/taos.cfg.org
- ${csudo} ln -s ${cfg_install_dir}/taos.cfg ${install_main_dir}/cfg
+ ${csudo} cp -f ${script_dir}/cfg/tq.cfg ${install_main_dir}/cfg/tq.cfg.org
+ ${csudo} ln -s ${cfg_install_dir}/tq.cfg ${install_main_dir}/cfg
[ ! -z $1 ] && return 0 || : # only install client
@@ -471,7 +463,7 @@ function install_config() {
# check the format of the firstEp
#if [[ $firstEp == $FQDN_PATTERN ]]; then
# Write the first FQDN to configuration file
- ${csudo} sed -i -r "s/#*\s*(firstEp\s*).*/\1$firstEp/" ${cfg_install_dir}/taos.cfg
+ ${csudo} sed -i -r "s/#*\s*(firstEp\s*).*/\1$firstEp/" ${cfg_install_dir}/tq.cfg
break
#else
# read -p "Please enter the correct FQDN:port: " firstEp
@@ -607,7 +599,7 @@ function clean_service_on_systemd() {
if [ "$verMode" == "cluster" ]; then
nginx_service_config="${service_config_dir}/nginxd.service"
if systemctl is-active --quiet nginxd; then
- echo "Nginx for TDengine is running, stopping it..."
+ echo "Nginx for TQ is running, stopping it..."
${csudo} systemctl stop nginxd &> /dev/null || echo &> /dev/null
fi
${csudo} systemctl disable nginxd &> /dev/null || echo &> /dev/null
@@ -646,7 +638,7 @@ function install_service_on_systemd() {
tarbitratord_service_config="${service_config_dir}/tarbitratord.service"
${csudo} bash -c "echo '[Unit]' >> ${tarbitratord_service_config}"
- ${csudo} bash -c "echo 'Description=TDengine arbitrator service' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'Description=TQ arbitrator service' >> ${tarbitratord_service_config}"
${csudo} bash -c "echo 'After=network-online.target' >> ${tarbitratord_service_config}"
${csudo} bash -c "echo 'Wants=network-online.target' >> ${tarbitratord_service_config}"
${csudo} bash -c "echo >> ${tarbitratord_service_config}"
@@ -828,7 +820,7 @@ function update_tq() {
#echo
#echo -e "\033[44;32;1mTQ is updated successfully!${NC}"
echo
- echo -e "${GREEN_DARK}To configure TQ ${NC}: edit /etc/tq/taos.cfg"
+ echo -e "${GREEN_DARK}To configure TQ ${NC}: edit /etc/tq/tq.cfg"
if ((${service_mod}==0)); then
echo -e "${GREEN_DARK}To start TQ ${NC}: ${csudo} systemctl start tqd${NC}"
elif ((${service_mod}==1)); then
@@ -905,7 +897,7 @@ function install_tq() {
#echo
#echo -e "\033[44;32;1mTQ is installed successfully!${NC}"
echo
- echo -e "${GREEN_DARK}To configure TQ ${NC}: edit /etc/tq/taos.cfg"
+ echo -e "${GREEN_DARK}To configure TQ ${NC}: edit /etc/tq/tq.cfg"
if ((${service_mod}==0)); then
echo -e "${GREEN_DARK}To start TQ ${NC}: ${csudo} systemctl start tqd${NC}"
elif ((${service_mod}==1)); then
diff --git a/packaging/tools/make_install.sh b/packaging/tools/make_install.sh
index 093b2bb0a7ea8033b7509e231200b8b4ad6901be..07a88f30162b4fb4d6cb6c0bd31cac29420e0bf3 100755
--- a/packaging/tools/make_install.sh
+++ b/packaging/tools/make_install.sh
@@ -4,7 +4,7 @@
# is required to use systemd to manage services at boot
set -e
-# set -x
+#set -x
# -----------------------Variables definition
source_dir=$1
@@ -232,7 +232,7 @@ function install_jemalloc() {
/usr/local/lib/pkgconfig
fi
if [ -d /etc/ld.so.conf.d ]; then
- echo "/usr/local/lib" | ${csudo} tee /etc/ld.so.conf.d/jemalloc.conf
+ echo "/usr/local/lib" | ${csudo} tee /etc/ld.so.conf.d/jemalloc.conf > /dev/null || echo -e "failed to write /etc/ld.so.conf.d/jemalloc.conf"
${csudo} ldconfig
else
echo "/etc/ld.so.conf.d not found!"
@@ -253,7 +253,7 @@ function install_jemalloc() {
function install_avro() {
if [ "$osType" != "Darwin" ]; then
- if [ -f "${binary_dir}/build/$1/libavro.so.23.0.0" ]; then
+ if [ -f "${binary_dir}/build/$1/libavro.so.23.0.0" ] && [ -d /usr/local/$1 ]; then
${csudo} /usr/bin/install -c -d /usr/local/$1
${csudo} /usr/bin/install -c -m 755 ${binary_dir}/build/$1/libavro.so.23.0.0 /usr/local/$1
${csudo} ln -sf libavro.so.23.0.0 /usr/local/$1/libavro.so.23
@@ -263,7 +263,7 @@ function install_avro() {
${csudo} /usr/bin/install -c -m 755 ${binary_dir}/build/$1/libavro.a /usr/local/$1
if [ -d /etc/ld.so.conf.d ]; then
- echo "/usr/local/$1" | ${csudo} tee /etc/ld.so.conf.d/libavro.conf
+ echo "/usr/local/$1" | ${csudo} tee /etc/ld.so.conf.d/libavro.conf > /dev/null || echo -e "failed to write /etc/ld.so.conf.d/libavro.conf"
${csudo} ldconfig
else
echo "/etc/ld.so.conf.d not found!"
@@ -329,15 +329,16 @@ function install_lib() {
function install_header() {
if [ "$osType" != "Darwin" ]; then
- ${csudo} rm -f ${inc_link_dir}/taos.h ${inc_link_dir}/taoserror.h || :
- ${csudo} cp -f ${source_dir}/src/inc/taos.h ${source_dir}/src/inc/taoserror.h \
+ ${csudo} rm -f ${inc_link_dir}/taos.h ${inc_link_dir}/taosdef.h ${inc_link_dir}/taoserror.h || :
+ ${csudo} cp -f ${source_dir}/src/inc/taos.h ${source_dir}/src/inc/taosdef.h ${source_dir}/src/inc/taoserror.h \
${install_main_dir}/include && ${csudo} chmod 644 ${install_main_dir}/include/*
${csudo} ln -s ${install_main_dir}/include/taos.h ${inc_link_dir}/taos.h
+ ${csudo} ln -s ${install_main_dir}/include/taosdef.h ${inc_link_dir}/taosdef.h
${csudo} ln -s ${install_main_dir}/include/taoserror.h ${inc_link_dir}/taoserror.h
else
- ${csudo} cp -f ${source_dir}/src/inc/taos.h ${source_dir}/src/inc/taoserror.h \
+ ${csudo} cp -f ${source_dir}/src/inc/taos.h ${source_dir}/src/inc/taosdef.h ${source_dir}/src/inc/taoserror.h \
${install_main_dir}/include \
- || ${csudo} cp -f ${source_dir}/src/inc/taos.h ${source_dir}/src/inc/taoserror.h \
+ || ${csudo} cp -f ${source_dir}/src/inc/taos.h ${source_dir}/src/inc/taosdef.h ${source_dir}/src/inc/taoserror.h \
${install_main_2_dir}/include \
&& ${csudo} chmod 644 ${install_main_dir}/include/* \
|| ${csudo} chmod 644 ${install_main_2_dir}/include/*
@@ -345,9 +346,7 @@ function install_header() {
}
function install_config() {
- #${csudo} rm -f ${install_main_dir}/cfg/taos.cfg || :
-
- if [ ! -f "${cfg_install_dir}/taos.cfg" ]; then
+ if [ ! -f ${cfg_install_dir}/taos.cfg ]; then
${csudo} mkdir -p ${cfg_install_dir}
[ -f ${script_dir}/../cfg/taos.cfg ] &&
${csudo} cp ${script_dir}/../cfg/taos.cfg ${cfg_install_dir}
diff --git a/packaging/tools/makearbi.sh b/packaging/tools/makearbi.sh
index 6dcabc2a0622e5fec67431c8663541a2b40048e1..d654910480e52b99e040df09e1fb9ecedbe5cad5 100755
--- a/packaging/tools/makearbi.sh
+++ b/packaging/tools/makearbi.sh
@@ -34,7 +34,7 @@ fi
bin_files="${build_dir}/bin/tarbitrator ${script_dir}/remove_arbi.sh"
install_files="${script_dir}/install_arbi.sh"
-#header_files="${code_dir}/inc/taos.h ${code_dir}/inc/taoserror.h"
+#header_files="${code_dir}/inc/taos.h ${code_dir}/inc/taosdef.h ${code_dir}/inc/taoserror.h"
init_file_tarbitrator_deb=${script_dir}/../deb/tarbitratord
init_file_tarbitrator_rpm=${script_dir}/../rpm/tarbitratord
diff --git a/packaging/tools/makearbi_jh.sh b/packaging/tools/makearbi_jh.sh
new file mode 100755
index 0000000000000000000000000000000000000000..5457b163599421d0a5917156efde1c8814a6f514
--- /dev/null
+++ b/packaging/tools/makearbi_jh.sh
@@ -0,0 +1,73 @@
+#!/bin/bash
+#
+# Generate arbitrator's tar.gz setup package for all os system
+
+set -e
+#set -x
+
+curr_dir=$(pwd)
+compile_dir=$1
+version=$2
+build_time=$3
+cpuType=$4
+osType=$5
+verMode=$6
+verType=$7
+pagMode=$8
+
+script_dir="$(dirname $(readlink -f $0))"
+top_dir="$(readlink -f ${script_dir}/../..)"
+
+# create compressed install file.
+build_dir="${compile_dir}/build"
+code_dir="${top_dir}/src"
+release_dir="${top_dir}/release"
+
+#package_name='linux'
+if [ "$verMode" == "cluster" ]; then
+ install_dir="${release_dir}/jh_iot-enterprise-arbitrator-${version}"
+else
+ install_dir="${release_dir}/jh_iot-arbitrator-${version}"
+fi
+
+# Directories and files.
+bin_files="${build_dir}/bin/tarbitrator ${script_dir}/remove_arbi_jh.sh"
+install_files="${script_dir}/install_arbi_jh.sh"
+
+init_file_tarbitrator_deb=${script_dir}/../deb/tarbitratord
+init_file_tarbitrator_rpm=${script_dir}/../rpm/tarbitratord
+
+# make directories.
+mkdir -p ${install_dir} && cp ${install_files} ${install_dir} && chmod a+x ${install_dir}/install_arbi_jh.sh || :
+mkdir -p ${install_dir}/bin && cp ${bin_files} ${install_dir}/bin && chmod a+x ${install_dir}/bin/* || :
+mkdir -p ${install_dir}/init.d && cp ${init_file_tarbitrator_deb} ${install_dir}/init.d/tarbitratord.deb || :
+mkdir -p ${install_dir}/init.d && cp ${init_file_tarbitrator_rpm} ${install_dir}/init.d/tarbitratord.rpm || :
+
+cd ${release_dir}
+
+if [ "$verMode" == "cluster" ]; then
+ pkg_name=${install_dir}-${osType}-${cpuType}
+elif [ "$verMode" == "edge" ]; then
+ pkg_name=${install_dir}-${osType}-${cpuType}
+else
+ echo "unknow verMode, nor cluster or edge"
+ exit 1
+fi
+
+if [ "$verType" == "beta" ]; then
+ pkg_name=${pkg_name}-${verType}
+elif [ "$verType" == "stable" ]; then
+ pkg_name=${pkg_name}
+else
+ echo "unknow verType, nor stabel or beta"
+ exit 1
+fi
+
+tar -zcv -f "$(basename ${pkg_name}).tar.gz" $(basename ${install_dir}) --remove-files || :
+exitcode=$?
+if [ "$exitcode" != "0" ]; then
+ echo "tar ${pkg_name}.tar.gz error !!!"
+ exit $exitcode
+fi
+
+cd ${curr_dir}
diff --git a/packaging/tools/makearbi_kh.sh b/packaging/tools/makearbi_kh.sh
new file mode 100755
index 0000000000000000000000000000000000000000..c7fa40eb4f1fc4003e6a584bdc5c4534616754d6
--- /dev/null
+++ b/packaging/tools/makearbi_kh.sh
@@ -0,0 +1,74 @@
+#!/bin/bash
+#
+# Generate arbitrator's tar.gz setup package for all os system
+
+set -e
+#set -x
+
+curr_dir=$(pwd)
+compile_dir=$1
+version=$2
+build_time=$3
+cpuType=$4
+osType=$5
+verMode=$6
+verType=$7
+pagMode=$8
+
+script_dir="$(dirname $(readlink -f $0))"
+top_dir="$(readlink -f ${script_dir}/../..)"
+
+# create compressed install file.
+build_dir="${compile_dir}/build"
+code_dir="${top_dir}/src"
+release_dir="${top_dir}/release"
+
+#package_name='linux'
+if [ "$verMode" == "cluster" ]; then
+ install_dir="${release_dir}/KingHistorian-enterprise-arbitrator-${version}"
+else
+ install_dir="${release_dir}/KingHistorian-arbitrator-${version}"
+fi
+
+# Directories and files.
+bin_files="${build_dir}/bin/tarbitrator ${script_dir}/remove_arbi_kh.sh"
+install_files="${script_dir}/install_arbi_kh.sh"
+
+init_file_tarbitrator_deb=${script_dir}/../deb/tarbitratord
+init_file_tarbitrator_rpm=${script_dir}/../rpm/tarbitratord
+
+# make directories.
+mkdir -p ${install_dir} && cp ${install_files} ${install_dir} && chmod a+x ${install_dir}/install_arbi_kh.sh || :
+#mkdir -p ${install_dir}/inc && cp ${header_files} ${install_dir}/inc || :
+mkdir -p ${install_dir}/bin && cp ${bin_files} ${install_dir}/bin && chmod a+x ${install_dir}/bin/* || :
+mkdir -p ${install_dir}/init.d && cp ${init_file_tarbitrator_deb} ${install_dir}/init.d/tarbitratord.deb || :
+mkdir -p ${install_dir}/init.d && cp ${init_file_tarbitrator_rpm} ${install_dir}/init.d/tarbitratord.rpm || :
+
+cd ${release_dir}
+
+if [ "$verMode" == "cluster" ]; then
+ pkg_name=${install_dir}-${osType}-${cpuType}
+elif [ "$verMode" == "edge" ]; then
+ pkg_name=${install_dir}-${osType}-${cpuType}
+else
+ echo "unknow verMode, nor cluster or edge"
+ exit 1
+fi
+
+if [ "$verType" == "beta" ]; then
+ pkg_name=${pkg_name}-${verType}
+elif [ "$verType" == "stable" ]; then
+ pkg_name=${pkg_name}
+else
+ echo "unknow verType, nor stabel or beta"
+ exit 1
+fi
+
+tar -zcv -f "$(basename ${pkg_name}).tar.gz" $(basename ${install_dir}) --remove-files || :
+exitcode=$?
+if [ "$exitcode" != "0" ]; then
+ echo "tar ${pkg_name}.tar.gz error !!!"
+ exit $exitcode
+fi
+
+cd ${curr_dir}
diff --git a/packaging/tools/makearbi_power.sh b/packaging/tools/makearbi_power.sh
index fd50ecd43878de08e7bb94249da8cb64c3630e6e..a942a7860dd4fd0a6590fceadc00abfc19815414 100755
--- a/packaging/tools/makearbi_power.sh
+++ b/packaging/tools/makearbi_power.sh
@@ -34,7 +34,6 @@ fi
bin_files="${build_dir}/bin/tarbitrator ${script_dir}/remove_arbi_power.sh"
install_files="${script_dir}/install_arbi_power.sh"
-#header_files="${code_dir}/inc/taos.h ${code_dir}/inc/taoserror.h"
init_file_tarbitrator_deb=${script_dir}/../deb/tarbitratord
init_file_tarbitrator_rpm=${script_dir}/../rpm/tarbitratord
@@ -45,7 +44,7 @@ mkdir -p ${install_dir}/bin && cp ${bin_files} ${install_dir}/bin && chmod a+x $
mkdir -p ${install_dir}/init.d && cp ${init_file_tarbitrator_deb} ${install_dir}/init.d/tarbitratord.deb || :
mkdir -p ${install_dir}/init.d && cp ${init_file_tarbitrator_rpm} ${install_dir}/init.d/tarbitratord.rpm || :
-cd ${release_dir}
+cd ${release_dir}
if [ "$verMode" == "cluster" ]; then
pkg_name=${install_dir}-${osType}-${cpuType}
@@ -58,8 +57,8 @@ fi
if [ "$verType" == "beta" ]; then
pkg_name=${pkg_name}-${verType}
-elif [ "$verType" == "stable" ]; then
- pkg_name=${pkg_name}
+elif [ "$verType" == "stable" ]; then
+ pkg_name=${pkg_name}
else
echo "unknow verType, nor stabel or beta"
exit 1
diff --git a/packaging/tools/makearbi_pro.sh b/packaging/tools/makearbi_pro.sh
index 6ce3765e44acc408ced9730c54b793338eb37b38..c432e97d4762da7a5a68672c46e118f76c59ae20 100755
--- a/packaging/tools/makearbi_pro.sh
+++ b/packaging/tools/makearbi_pro.sh
@@ -34,7 +34,6 @@ fi
bin_files="${build_dir}/bin/tarbitrator ${script_dir}/remove_arbi_pro.sh"
install_files="${script_dir}/install_arbi_pro.sh"
-#header_files="${code_dir}/inc/taos.h ${code_dir}/inc/taoserror.h"
init_file_tarbitrator_deb=${script_dir}/../deb/tarbitratord
init_file_tarbitrator_rpm=${script_dir}/../rpm/tarbitratord
@@ -45,7 +44,7 @@ mkdir -p ${install_dir}/bin && cp ${bin_files} ${install_dir}/bin && chmod a+x $
mkdir -p ${install_dir}/init.d && cp ${init_file_tarbitrator_deb} ${install_dir}/init.d/tarbitratord.deb || :
mkdir -p ${install_dir}/init.d && cp ${init_file_tarbitrator_rpm} ${install_dir}/init.d/tarbitratord.rpm || :
-cd ${release_dir}
+cd ${release_dir}
if [ "$verMode" == "cluster" ]; then
pkg_name=${install_dir}-${osType}-${cpuType}
@@ -58,8 +57,8 @@ fi
if [ "$verType" == "beta" ]; then
pkg_name=${pkg_name}-${verType}
-elif [ "$verType" == "stable" ]; then
- pkg_name=${pkg_name}
+elif [ "$verType" == "stable" ]; then
+ pkg_name=${pkg_name}
else
echo "unknow verType, nor stabel or beta"
exit 1
diff --git a/packaging/tools/makearbi_tq.sh b/packaging/tools/makearbi_tq.sh
index c10dfec255d411965a3887942e5d2aded4635979..3460696b08c11815a68edc12a61d53f2651d699a 100755
--- a/packaging/tools/makearbi_tq.sh
+++ b/packaging/tools/makearbi_tq.sh
@@ -34,7 +34,6 @@ fi
bin_files="${build_dir}/bin/tarbitrator ${script_dir}/remove_arbi_tq.sh"
install_files="${script_dir}/install_arbi_tq.sh"
-#header_files="${code_dir}/inc/taos.h ${code_dir}/inc/taoserror.h"
init_file_tarbitrator_deb=${script_dir}/../deb/tarbitratord
init_file_tarbitrator_rpm=${script_dir}/../rpm/tarbitratord
@@ -45,7 +44,7 @@ mkdir -p ${install_dir}/bin && cp ${bin_files} ${install_dir}/bin && chmod a+x $
mkdir -p ${install_dir}/init.d && cp ${init_file_tarbitrator_deb} ${install_dir}/init.d/tarbitratord.deb || :
mkdir -p ${install_dir}/init.d && cp ${init_file_tarbitrator_rpm} ${install_dir}/init.d/tarbitratord.rpm || :
-cd ${release_dir}
+cd ${release_dir}
if [ "$verMode" == "cluster" ]; then
pkg_name=${install_dir}-${osType}-${cpuType}
@@ -58,8 +57,8 @@ fi
if [ "$verType" == "beta" ]; then
pkg_name=${pkg_name}-${verType}
-elif [ "$verType" == "stable" ]; then
- pkg_name=${pkg_name}
+elif [ "$verType" == "stable" ]; then
+ pkg_name=${pkg_name}
else
echo "unknow verType, nor stabel or beta"
exit 1
diff --git a/packaging/tools/makeclient.sh b/packaging/tools/makeclient.sh
index 39a35e384fffdd4f319e72fbeb819fe08f7871b8..13172c46ee0458272b7ecb83fcde7a762ccdae2a 100755
--- a/packaging/tools/makeclient.sh
+++ b/packaging/tools/makeclient.sh
@@ -45,7 +45,7 @@ if [ "$osType" != "Darwin" ]; then
strip ${build_dir}/bin/taos
bin_files="${build_dir}/bin/taos ${script_dir}/remove_client.sh"
else
- bin_files="${build_dir}/bin/taos ${build_dir}/bin/taosdump ${build_dir}/bin/taosdemo \
+ bin_files="${build_dir}/bin/taos ${build_dir}/bin/taosdemo \
${script_dir}/remove_client.sh ${script_dir}/set_core.sh ${script_dir}/get_client.sh ${script_dir}/taosd-dump-cfg.gdb"
fi
lib_files="${build_dir}/lib/libtaos.so.${version}"
@@ -54,7 +54,7 @@ else
lib_files="${build_dir}/lib/libtaos.${version}.dylib"
fi
-header_files="${code_dir}/inc/taos.h ${code_dir}/inc/taoserror.h"
+header_files="${code_dir}/inc/taos.h ${code_dir}/inc/taosdef.h ${code_dir}/inc/taoserror.h"
if [ "$verMode" == "cluster" ]; then
cfg_dir="${top_dir}/../enterprise/packaging/cfg"
else
diff --git a/packaging/tools/makeclient_jh.sh b/packaging/tools/makeclient_jh.sh
new file mode 100755
index 0000000000000000000000000000000000000000..267d78b953be33c567f8175a369b13f326ee3f5a
--- /dev/null
+++ b/packaging/tools/makeclient_jh.sh
@@ -0,0 +1,180 @@
+#!/bin/bash
+#
+# Generate tar.gz package for linux client in all os system
+set -e
+#set -x
+
+curr_dir=$(pwd)
+compile_dir=$1
+version=$2
+build_time=$3
+cpuType=$4
+osType=$5
+verMode=$6
+verType=$7
+pagMode=$8
+
+if [ "$osType" != "Darwin" ]; then
+ script_dir="$(dirname $(readlink -f $0))"
+ top_dir="$(readlink -f ${script_dir}/../..)"
+else
+ script_dir=`dirname $0`
+ cd ${script_dir}
+ script_dir="$(pwd)"
+ top_dir=${script_dir}/../..
+fi
+
+# create compressed install file.
+build_dir="${compile_dir}/build"
+code_dir="${top_dir}/src"
+release_dir="${top_dir}/release"
+
+#package_name='linux'
+
+if [ "$verMode" == "cluster" ]; then
+ install_dir="${release_dir}/jh_iot-enterprise-client-${version}"
+else
+ install_dir="${release_dir}/jh_iot-client-${version}"
+fi
+
+# Directories and files.
+
+if [ "$osType" != "Darwin" ]; then
+ lib_files="${build_dir}/lib/libtaos.so.${version}"
+else
+ bin_files="${build_dir}/bin/taos ${script_dir}/remove_client_jh.sh"
+ lib_files="${build_dir}/lib/libtaos.${version}.dylib"
+fi
+
+header_files="${code_dir}/inc/taos.h ${code_dir}/inc/taoserror.h"
+if [ "$verMode" == "cluster" ]; then
+ cfg_dir="${top_dir}/../enterprise/packaging/cfg"
+else
+ cfg_dir="${top_dir}/packaging/cfg"
+fi
+
+install_files="${script_dir}/install_client_jh.sh"
+
+# make directories.
+mkdir -p ${install_dir}
+mkdir -p ${install_dir}/inc && cp ${header_files} ${install_dir}/inc
+mkdir -p ${install_dir}/cfg && cp ${cfg_dir}/taos.cfg ${install_dir}/cfg/taos.cfg
+
+sed -i '/dataDir/ {s/taos/jh_taos/g}' ${install_dir}/cfg/taos.cfg
+sed -i '/logDir/ {s/taos/jh_taos/g}' ${install_dir}/cfg/taos.cfg
+sed -i "s/TDengine/jh_iot/g" ${install_dir}/cfg/taos.cfg
+
+mkdir -p ${install_dir}/bin
+if [ "$osType" != "Darwin" ]; then
+ if [ "$pagMode" == "lite" ]; then
+ strip ${build_dir}/bin/taos
+ cp ${build_dir}/bin/taos ${install_dir}/bin/jh_taos
+ cp ${script_dir}/remove_jh.sh ${install_dir}/bin
+ else
+ cp ${build_dir}/bin/taos ${install_dir}/bin/jh_taos
+ cp ${script_dir}/remove_jh.sh ${install_dir}/bin
+ cp ${build_dir}/bin/taosdemo ${install_dir}/bin/jhdemo
+ cp ${build_dir}/bin/taosdump ${install_dir}/bin/jh_taosdump
+ cp ${script_dir}/set_core.sh ${install_dir}/bin
+ cp ${script_dir}/get_client.sh ${install_dir}/bin
+ cp ${script_dir}/taosd-dump-cfg.gdb ${install_dir}/bin
+ fi
+else
+ cp ${bin_files} ${install_dir}/bin
+fi
+chmod a+x ${install_dir}/bin/* || :
+
+if [ -f ${build_dir}/bin/jemalloc-config ]; then
+ mkdir -p ${install_dir}/jemalloc/{bin,lib,lib/pkgconfig,include/jemalloc,share/doc/jemalloc,share/man/man3}
+ cp ${build_dir}/bin/jemalloc-config ${install_dir}/jemalloc/bin
+ if [ -f ${build_dir}/bin/jemalloc.sh ]; then
+ cp ${build_dir}/bin/jemalloc.sh ${install_dir}/jemalloc/bin
+ fi
+ if [ -f ${build_dir}/bin/jeprof ]; then
+ cp ${build_dir}/bin/jeprof ${install_dir}/jemalloc/bin
+ fi
+ if [ -f ${build_dir}/include/jemalloc/jemalloc.h ]; then
+ cp ${build_dir}/include/jemalloc/jemalloc.h ${install_dir}/jemalloc/include/jemalloc
+ fi
+ if [ -f ${build_dir}/lib/libjemalloc.so.2 ]; then
+ cp ${build_dir}/lib/libjemalloc.so.2 ${install_dir}/jemalloc/lib
+ ln -sf libjemalloc.so.2 ${install_dir}/jemalloc/lib/libjemalloc.so
+ fi
+ if [ -f ${build_dir}/lib/libjemalloc.a ]; then
+ cp ${build_dir}/lib/libjemalloc.a ${install_dir}/jemalloc/lib
+ fi
+ if [ -f ${build_dir}/lib/libjemalloc_pic.a ]; then
+ cp ${build_dir}/lib/libjemalloc_pic.a ${install_dir}/jemalloc/lib
+ fi
+ if [ -f ${build_dir}/lib/pkgconfig/jemalloc.pc ]; then
+ cp ${build_dir}/lib/pkgconfig/jemalloc.pc ${install_dir}/jemalloc/lib/pkgconfig
+ fi
+ if [ -f ${build_dir}/share/doc/jemalloc/jemalloc.html ]; then
+ cp ${build_dir}/share/doc/jemalloc/jemalloc.html ${install_dir}/jemalloc/share/doc/jemalloc
+ fi
+ if [ -f ${build_dir}/share/man/man3/jemalloc.3 ]; then
+ cp ${build_dir}/share/man/man3/jemalloc.3 ${install_dir}/jemalloc/share/man/man3
+ fi
+fi
+
+cd ${install_dir}
+
+if [ "$osType" != "Darwin" ]; then
+ tar -zcv -f jh_taos.tar.gz * --remove-files || :
+else
+ tar -zcv -f jh_taos.tar.gz * || :
+ mv jh_taos.tar.gz ..
+ rm -rf ./*
+ mv ../jh_taos.tar.gz .
+fi
+
+cd ${curr_dir}
+cp ${install_files} ${install_dir}
+if [ "$osType" == "Darwin" ]; then
+ sed 's/osType=Linux/osType=Darwin/g' ${install_dir}/install_client_jh.sh >> install_client_jh_temp.sh
+ mv install_client_jh_temp.sh ${install_dir}/install_client_jh.sh
+fi
+if [ "$pagMode" == "lite" ]; then
+ sed 's/pagMode=full/pagMode=lite/g' ${install_dir}/install_client_jh.sh >> install_client_jh_temp.sh
+ mv install_client_jh_temp.sh ${install_dir}/install_client_jh.sh
+fi
+chmod a+x ${install_dir}/install_client_jh.sh
+
+# Copy driver
+mkdir -p ${install_dir}/driver
+cp ${lib_files} ${install_dir}/driver
+
+cd ${release_dir}
+
+if [ "$verMode" == "cluster" ]; then
+ pkg_name=${install_dir}-${osType}-${cpuType}
+elif [ "$verMode" == "edge" ]; then
+ pkg_name=${install_dir}-${osType}-${cpuType}
+else
+ echo "unknow verMode, nor cluster or edge"
+ exit 1
+fi
+
+if [ "$pagMode" == "lite" ]; then
+ pkg_name=${pkg_name}-Lite
+fi
+
+if [ "$verType" == "beta" ]; then
+ pkg_name=${pkg_name}-${verType}
+elif [ "$verType" == "stable" ]; then
+ pkg_name=${pkg_name}
+else
+ echo "unknow verType, nor stable or beta"
+ exit 1
+fi
+
+if [ "$osType" != "Darwin" ]; then
+ tar -zcv -f "$(basename ${pkg_name}).tar.gz" $(basename ${install_dir}) --remove-files || :
+else
+ tar -zcv -f "$(basename ${pkg_name}).tar.gz" $(basename ${install_dir}) || :
+ mv "$(basename ${pkg_name}).tar.gz" ..
+ rm -rf ./*
+ mv ../"$(basename ${pkg_name}).tar.gz" .
+fi
+
+cd ${curr_dir}
diff --git a/packaging/tools/makeclient_kh.sh b/packaging/tools/makeclient_kh.sh
new file mode 100755
index 0000000000000000000000000000000000000000..b991e2a6fae69a72d678cf5ff8751429a9f88fc6
--- /dev/null
+++ b/packaging/tools/makeclient_kh.sh
@@ -0,0 +1,180 @@
+#!/bin/bash
+#
+# Generate tar.gz package for linux client in all os system
+set -e
+#set -x
+
+curr_dir=$(pwd)
+compile_dir=$1
+version=$2
+build_time=$3
+cpuType=$4
+osType=$5
+verMode=$6
+verType=$7
+pagMode=$8
+
+if [ "$osType" != "Darwin" ]; then
+ script_dir="$(dirname $(readlink -f $0))"
+ top_dir="$(readlink -f ${script_dir}/../..)"
+else
+ script_dir=`dirname $0`
+ cd ${script_dir}
+ script_dir="$(pwd)"
+ top_dir=${script_dir}/../..
+fi
+
+# create compressed install file.
+build_dir="${compile_dir}/build"
+code_dir="${top_dir}/src"
+release_dir="${top_dir}/release"
+
+#package_name='linux'
+
+if [ "$verMode" == "cluster" ]; then
+ install_dir="${release_dir}/KingHistorian-enterprise-client-${version}"
+else
+ install_dir="${release_dir}/KingHistorian-client-${version}"
+fi
+
+# Directories and files.
+
+if [ "$osType" != "Darwin" ]; then
+ lib_files="${build_dir}/lib/libtaos.so.${version}"
+else
+ bin_files="${build_dir}/bin/taos ${script_dir}/remove_client_kh.sh"
+ lib_files="${build_dir}/lib/libtaos.${version}.dylib"
+fi
+
+header_files="${code_dir}/inc/taos.h ${code_dir}/inc/taoserror.h"
+if [ "$verMode" == "cluster" ]; then
+ cfg_dir="${top_dir}/../enterprise/packaging/cfg"
+else
+ cfg_dir="${top_dir}/packaging/cfg"
+fi
+
+install_files="${script_dir}/install_client_kh.sh"
+
+# make directories.
+mkdir -p ${install_dir}
+mkdir -p ${install_dir}/inc && cp ${header_files} ${install_dir}/inc
+mkdir -p ${install_dir}/cfg && cp ${cfg_dir}/taos.cfg ${install_dir}/cfg/kinghistorian.cfg
+
+sed -i '/dataDir/ {s/taos/kinghistorian/g}' ${install_dir}/cfg/kinghistorian.cfg
+sed -i '/logDir/ {s/taos/kinghistorian/g}' ${install_dir}/cfg/kinghistorian.cfg
+sed -i "s/TDengine/KingHistorian/g" ${install_dir}/cfg/kinghistorian.cfg
+
+mkdir -p ${install_dir}/bin
+if [ "$osType" != "Darwin" ]; then
+ if [ "$pagMode" == "lite" ]; then
+ strip ${build_dir}/bin/taos
+ cp ${build_dir}/bin/taos ${install_dir}/bin/khclient
+ cp ${script_dir}/remove_kh.sh ${install_dir}/bin
+ else
+ cp ${build_dir}/bin/taos ${install_dir}/bin/khclient
+ cp ${script_dir}/remove_kh.sh ${install_dir}/bin
+ cp ${build_dir}/bin/taosdemo ${install_dir}/bin/khdemo
+ cp ${build_dir}/bin/taosdump ${install_dir}/bin/khdump
+ cp ${script_dir}/set_core.sh ${install_dir}/bin
+ cp ${script_dir}/get_client.sh ${install_dir}/bin
+ cp ${script_dir}/taosd-dump-cfg.gdb ${install_dir}/bin
+ fi
+else
+ cp ${bin_files} ${install_dir}/bin
+fi
+chmod a+x ${install_dir}/bin/* || :
+
+if [ -f ${build_dir}/bin/jemalloc-config ]; then
+ mkdir -p ${install_dir}/jemalloc/{bin,lib,lib/pkgconfig,include/jemalloc,share/doc/jemalloc,share/man/man3}
+ cp ${build_dir}/bin/jemalloc-config ${install_dir}/jemalloc/bin
+ if [ -f ${build_dir}/bin/jemalloc.sh ]; then
+ cp ${build_dir}/bin/jemalloc.sh ${install_dir}/jemalloc/bin
+ fi
+ if [ -f ${build_dir}/bin/jeprof ]; then
+ cp ${build_dir}/bin/jeprof ${install_dir}/jemalloc/bin
+ fi
+ if [ -f ${build_dir}/include/jemalloc/jemalloc.h ]; then
+ cp ${build_dir}/include/jemalloc/jemalloc.h ${install_dir}/jemalloc/include/jemalloc
+ fi
+ if [ -f ${build_dir}/lib/libjemalloc.so.2 ]; then
+ cp ${build_dir}/lib/libjemalloc.so.2 ${install_dir}/jemalloc/lib
+ ln -sf libjemalloc.so.2 ${install_dir}/jemalloc/lib/libjemalloc.so
+ fi
+ if [ -f ${build_dir}/lib/libjemalloc.a ]; then
+ cp ${build_dir}/lib/libjemalloc.a ${install_dir}/jemalloc/lib
+ fi
+ if [ -f ${build_dir}/lib/libjemalloc_pic.a ]; then
+ cp ${build_dir}/lib/libjemalloc_pic.a ${install_dir}/jemalloc/lib
+ fi
+ if [ -f ${build_dir}/lib/pkgconfig/jemalloc.pc ]; then
+ cp ${build_dir}/lib/pkgconfig/jemalloc.pc ${install_dir}/jemalloc/lib/pkgconfig
+ fi
+ if [ -f ${build_dir}/share/doc/jemalloc/jemalloc.html ]; then
+ cp ${build_dir}/share/doc/jemalloc/jemalloc.html ${install_dir}/jemalloc/share/doc/jemalloc
+ fi
+ if [ -f ${build_dir}/share/man/man3/jemalloc.3 ]; then
+ cp ${build_dir}/share/man/man3/jemalloc.3 ${install_dir}/jemalloc/share/man/man3
+ fi
+fi
+
+cd ${install_dir}
+
+if [ "$osType" != "Darwin" ]; then
+ tar -zcv -f kinghistorian.tar.gz * --remove-files || :
+else
+ tar -zcv -f kinghistorian.tar.gz * || :
+ mv kinghistorian.tar.gz ..
+ rm -rf ./*
+ mv ../kinghistorian.tar.gz .
+fi
+
+cd ${curr_dir}
+cp ${install_files} ${install_dir}
+if [ "$osType" == "Darwin" ]; then
+ sed 's/osType=Linux/osType=Darwin/g' ${install_dir}/install_client_kh.sh >> install_client_kh_temp.sh
+ mv install_client_kh_temp.sh ${install_dir}/install_client_kh.sh
+fi
+if [ "$pagMode" == "lite" ]; then
+ sed 's/pagMode=full/pagMode=lite/g' ${install_dir}/install_client_kh.sh >> install_client_kh_temp.sh
+ mv install_client_kh_temp.sh ${install_dir}/install_client_kh.sh
+fi
+chmod a+x ${install_dir}/install_client_kh.sh
+
+# Copy driver
+mkdir -p ${install_dir}/driver
+cp ${lib_files} ${install_dir}/driver
+
+cd ${release_dir}
+
+if [ "$verMode" == "cluster" ]; then
+ pkg_name=${install_dir}-${osType}-${cpuType}
+elif [ "$verMode" == "edge" ]; then
+ pkg_name=${install_dir}-${osType}-${cpuType}
+else
+ echo "unknow verMode, nor cluster or edge"
+ exit 1
+fi
+
+if [ "$pagMode" == "lite" ]; then
+ pkg_name=${pkg_name}-Lite
+fi
+
+if [ "$verType" == "beta" ]; then
+ pkg_name=${pkg_name}-${verType}
+elif [ "$verType" == "stable" ]; then
+ pkg_name=${pkg_name}
+else
+ echo "unknow verType, nor stable or beta"
+ exit 1
+fi
+
+if [ "$osType" != "Darwin" ]; then
+ tar -zcv -f "$(basename ${pkg_name}).tar.gz" $(basename ${install_dir}) --remove-files || :
+else
+ tar -zcv -f "$(basename ${pkg_name}).tar.gz" $(basename ${install_dir}) || :
+ mv "$(basename ${pkg_name}).tar.gz" ..
+ rm -rf ./*
+ mv ../"$(basename ${pkg_name}).tar.gz" .
+fi
+
+cd ${curr_dir}
diff --git a/packaging/tools/makeclient_power.sh b/packaging/tools/makeclient_power.sh
index 19e24b3dafb7f1f95832e637e181449e4c381faf..07dc9d30d21e130aff15f1c84a3db7e209867f88 100755
--- a/packaging/tools/makeclient_power.sh
+++ b/packaging/tools/makeclient_power.sh
@@ -53,7 +53,7 @@ else
lib_files="${build_dir}/lib/libtaos.${version}.dylib"
fi
-header_files="${code_dir}/inc/taos.h ${code_dir}/inc/taoserror.h"
+header_files="${code_dir}/inc/taos.h ${code_dir}/inc/taosdef.h ${code_dir}/inc/taoserror.h"
if [ "$verMode" == "cluster" ]; then
cfg_dir="${top_dir}/../enterprise/packaging/cfg"
else
diff --git a/packaging/tools/makeclient_pro.sh b/packaging/tools/makeclient_pro.sh
index 4a0b033d30e6478f37a62f9cc896aee0903d39c9..0c5033d87dd5815e9e3ec309e6b1bb9abe98ca42 100755
--- a/packaging/tools/makeclient_pro.sh
+++ b/packaging/tools/makeclient_pro.sh
@@ -46,7 +46,7 @@ else
lib_files="${build_dir}/lib/libtaos.${version}.dylib"
fi
-header_files="${code_dir}/inc/taos.h ${code_dir}/inc/taoserror.h"
+header_files="${code_dir}/inc/taos.h ${code_dir}/inc/taosdef.h ${code_dir}/inc/taoserror.h"
if [ "$verMode" == "cluster" ]; then
cfg_dir="${top_dir}/../enterprise/packaging/cfg"
else
@@ -58,11 +58,11 @@ install_files="${script_dir}/install_client_pro.sh"
# make directories.
mkdir -p ${install_dir}
mkdir -p ${install_dir}/inc && cp ${header_files} ${install_dir}/inc
-mkdir -p ${install_dir}/cfg && cp ${cfg_dir}/taos.cfg ${install_dir}/cfg/taos.cfg
+mkdir -p ${install_dir}/cfg && cp ${cfg_dir}/taos.cfg ${install_dir}/cfg/prodb.cfg
-sed -i '/dataDir/ {s/taos/ProDB/g}' ${install_dir}/cfg/taos.cfg
-sed -i '/logDir/ {s/taos/ProDB/g}' ${install_dir}/cfg/taos.cfg
-sed -i "s/TDengine/ProDB/g" ${install_dir}/cfg/taos.cfg
+sed -i '/dataDir/ {s/taos/ProDB/g}' ${install_dir}/cfg/prodb.cfg
+sed -i '/logDir/ {s/taos/ProDB/g}' ${install_dir}/cfg/prodb.cfg
+sed -i "s/TDengine/ProDB/g" ${install_dir}/cfg/prodb.cfg
mkdir -p ${install_dir}/bin
if [ "$osType" != "Darwin" ]; then
diff --git a/packaging/tools/makeclient_tq.sh b/packaging/tools/makeclient_tq.sh
index 1cc7003661a7491b1df625916dd289de32434ee9..3ed97520939be51f2f634f8955f16ecf9a46821b 100755
--- a/packaging/tools/makeclient_tq.sh
+++ b/packaging/tools/makeclient_tq.sh
@@ -40,20 +40,13 @@ fi
# Directories and files.
if [ "$osType" != "Darwin" ]; then
-# if [ "$pagMode" == "lite" ]; then
-# strip ${build_dir}/bin/tqd
-# strip ${build_dir}/bin/tq
-# bin_files="${build_dir}/bin/tq ${script_dir}/remove_client_tq.sh"
-# else
-# bin_files="${build_dir}/bin/tq ${build_dir}/bin/tqdemo ${script_dir}/remove_client_tq.sh ${script_dir}/set_core.sh"
-# fi
lib_files="${build_dir}/lib/libtaos.so.${version}"
else
bin_files="${build_dir}/bin/tq ${script_dir}/remove_client_tq.sh"
lib_files="${build_dir}/lib/libtaos.${version}.dylib"
fi
-header_files="${code_dir}/inc/taos.h ${code_dir}/inc/taoserror.h"
+header_files="${code_dir}/inc/taos.h ${code_dir}/inc/taosdef.h ${code_dir}/inc/taoserror.h"
if [ "$verMode" == "cluster" ]; then
cfg_dir="${top_dir}/../enterprise/packaging/cfg"
else
@@ -65,11 +58,11 @@ install_files="${script_dir}/install_client_tq.sh"
# make directories.
mkdir -p ${install_dir}
mkdir -p ${install_dir}/inc && cp ${header_files} ${install_dir}/inc
-mkdir -p ${install_dir}/cfg && cp ${cfg_dir}/taos.cfg ${install_dir}/cfg/taos.cfg
+mkdir -p ${install_dir}/cfg && cp ${cfg_dir}/taos.cfg ${install_dir}/cfg/tq.cfg
-sed -i '/dataDir/ {s/taos/tq/g}' ${install_dir}/cfg/taos.cfg
-sed -i '/logDir/ {s/taos/tq/g}' ${install_dir}/cfg/taos.cfg
-sed -i "s/TDengine/TQ/g" ${install_dir}/cfg/taos.cfg
+sed -i '/dataDir/ {s/taos/tq/g}' ${install_dir}/cfg/tq.cfg
+sed -i '/logDir/ {s/taos/tq/g}' ${install_dir}/cfg/tq.cfg
+sed -i "s/TDengine/TQ/g" ${install_dir}/cfg/tq.cfg
mkdir -p ${install_dir}/bin
if [ "$osType" != "Darwin" ]; then
diff --git a/packaging/tools/makepkg.sh b/packaging/tools/makepkg.sh
index 7ad703be86016bd0c0ce55c80b76bf34914c54bb..0f226dbb21232047ba0c19d2141958c2111f8c57 100755
--- a/packaging/tools/makepkg.sh
+++ b/packaging/tools/makepkg.sh
@@ -51,7 +51,7 @@ else
fi
lib_files="${build_dir}/lib/libtaos.so.${version}"
-header_files="${code_dir}/inc/taos.h ${code_dir}/inc/taoserror.h"
+header_files="${code_dir}/inc/taos.h ${code_dir}/inc/taosdef.h ${code_dir}/inc/taoserror.h"
if [ "$verMode" == "cluster" ]; then
cfg_dir="${top_dir}/../enterprise/packaging/cfg"
else
@@ -92,6 +92,12 @@ mkdir -p ${install_dir}/init.d && cp ${init_file_rpm} ${install_dir}/init.d/taos
mkdir -p ${install_dir}/init.d && cp ${init_file_tarbitrator_deb} ${install_dir}/init.d/tarbitratord.deb || :
mkdir -p ${install_dir}/init.d && cp ${init_file_tarbitrator_rpm} ${install_dir}/init.d/tarbitratord.rpm || :
+if [ -f ${build_dir}/lib/libavro.so.23.0.0 ]; then
+ mkdir -p ${install_dir}/avro/{lib,lib/pkgconfig}
+ cp ${build_dir}/lib/libavro.* ${install_dir}/avro/lib
+ cp ${build_dir}/lib/pkgconfig/avro-c.pc ${install_dir}/avro/lib/pkgconfig
+fi
+
if [ -f ${build_dir}/bin/jemalloc-config ]; then
mkdir -p ${install_dir}/jemalloc/{bin,lib,lib/pkgconfig,include/jemalloc,share/doc/jemalloc,share/man/man3}
cp ${build_dir}/bin/jemalloc-config ${install_dir}/jemalloc/bin
diff --git a/packaging/tools/makepkg_jh.sh b/packaging/tools/makepkg_jh.sh
new file mode 100755
index 0000000000000000000000000000000000000000..d7b7746849578b0e2774ddf442566196102ea561
--- /dev/null
+++ b/packaging/tools/makepkg_jh.sh
@@ -0,0 +1,160 @@
+#!/bin/bash
+#
+# Generate tar.gz package for all os system
+
+set -e
+#set -x
+
+curr_dir=$(pwd)
+compile_dir=$1
+version=$2
+build_time=$3
+cpuType=$4
+osType=$5
+verMode=$6
+verType=$7
+pagMode=$8
+versionComp=$9
+
+script_dir="$(dirname $(readlink -f $0))"
+top_dir="$(readlink -f ${script_dir}/../..)"
+
+# create compressed install file.
+build_dir="${compile_dir}/build"
+code_dir="${top_dir}/src"
+release_dir="${top_dir}/release"
+
+# package_name='linux'
+if [ "$verMode" == "cluster" ]; then
+ install_dir="${release_dir}/jh_iot-enterprise-server-${version}"
+else
+ install_dir="${release_dir}/jh_iot-server-${version}"
+fi
+
+lib_files="${build_dir}/lib/libtaos.so.${version}"
+header_files="${code_dir}/inc/taos.h ${code_dir}/inc/taoserror.h"
+if [ "$verMode" == "cluster" ]; then
+ cfg_dir="${top_dir}/../enterprise/packaging/cfg"
+else
+ cfg_dir="${top_dir}/packaging/cfg"
+fi
+install_files="${script_dir}/install_jh.sh"
+nginx_dir="${code_dir}/../../enterprise/src/plugins/web"
+
+# make directories.
+mkdir -p ${install_dir}
+mkdir -p ${install_dir}/inc && cp ${header_files} ${install_dir}/inc
+mkdir -p ${install_dir}/cfg && cp ${cfg_dir}/taos.cfg ${install_dir}/cfg/taos.cfg
+mkdir -p ${install_dir}/bin
+
+# bin
+if [ "$pagMode" == "lite" ]; then
+ strip ${build_dir}/bin/taosd
+ strip ${build_dir}/bin/taos
+else
+ cp ${build_dir}/bin/taosdemo ${install_dir}/bin/jhdemo
+ cp ${build_dir}/bin/taosdump ${install_dir}/bin/jh_taosdump
+ cp ${build_dir}/bin/tarbitrator ${install_dir}/bin
+ cp ${script_dir}/set_core.sh ${install_dir}/bin
+ cp ${script_dir}/get_client.sh ${install_dir}/bin
+ cp ${script_dir}/startPre.sh ${install_dir}/bin
+ cp ${script_dir}/taosd-dump-cfg.gdb ${install_dir}/bin
+fi
+cp ${build_dir}/bin/taos ${install_dir}/bin/jh_taos
+cp ${build_dir}/bin/taosd ${install_dir}/bin/jh_taosd
+cp ${script_dir}/remove_jh.sh ${install_dir}/bin
+chmod a+x ${install_dir}/bin/* || :
+
+# cluster
+if [ "$verMode" == "cluster" ]; then
+ sed 's/verMode=edge/verMode=cluster/g' ${install_dir}/bin/remove_jh.sh >> remove_jh_temp.sh
+ mv remove_jh_temp.sh ${install_dir}/bin/remove_jh.sh
+
+ mkdir -p ${install_dir}/nginxd && cp -r ${nginx_dir}/* ${install_dir}/nginxd
+ cp ${nginx_dir}/png/taos.png ${install_dir}/nginxd/admin/images/taos.png
+ rm -rf ${install_dir}/nginxd/png
+
+ # replace the OEM name
+ sed -i -e 's/www.taosdata.com/www.jhict.com/g' $(grep -r 'www.taosdata.com' ${install_dir}/nginxd | sed -r "s/(.*\.html):\s*(.*)/\1/g")
+ sed -i -e 's/2017/2021/g' $(grep -r '2017' ${install_dir}/nginxd | sed -r "s/(.*\.html):\s*(.*)/\1/g")
+ sed -i -e 's/TAOS Data/Jinheng Technology/g' $(grep -r 'TAOS Data' ${install_dir}/nginxd | sed -r "s/(.*\.html):\s*(.*)/\1/g")
+ sed -i -e 's/taosd/jh_taosd/g' `grep -r 'taosd' ${install_dir}/nginxd | grep -E '*\.js\s*.*' | sed -r -e 's/(.*\.js):\s*(.*)/\1/g' | sort | uniq`
+ sed -i -e 's/taosd<\/th>/ | jh_taosd<\/th>/g' ${install_dir}/nginxd/admin/monitor.html
+ sed -i -e "s/data:\['taosd', 'system'\],/data:\['jh_taosd', 'system'\],/g" ${install_dir}/nginxd/admin/monitor.html
+ sed -i -e "s/name: 'taosd',/name: 'jh_taosd',/g" ${install_dir}/nginxd/admin/monitor.html
+ sed -i "s/TDengine/jh_iot/g" ${install_dir}/nginxd/admin/*.html
+ sed -i "s/TDengine/jh_iot/g" ${install_dir}/nginxd/admin/js/*.js
+
+ if [ "$cpuType" == "aarch64" ]; then
+ cp -f ${install_dir}/nginxd/sbin/arm/64bit/nginx ${install_dir}/nginxd/sbin/
+ elif [ "$cpuType" == "aarch32" ]; then
+ cp -f ${install_dir}/nginxd/sbin/arm/32bit/nginx ${install_dir}/nginxd/sbin/
+ fi
+ rm -rf ${install_dir}/nginxd/sbin/arm
+fi
+
+sed -i '/dataDir/ {s/taos/jh_taos/g}' ${install_dir}/cfg/taos.cfg
+sed -i '/logDir/ {s/taos/jh_taos/g}' ${install_dir}/cfg/taos.cfg
+sed -i "s/TDengine/jh_iot/g" ${install_dir}/cfg/taos.cfg
+sed -i "s/support@taosdata.com/jhkj@njsteel.com.cn/g" ${install_dir}/cfg/taos.cfg
+sed -i "s/taos client/client/g" ${install_dir}/cfg/taos.cfg
+sed -i "s/taosd/server/g" ${install_dir}/cfg/taos.cfg
+
+cd ${install_dir}
+tar -zcv -f jh_taos.tar.gz * --remove-files || :
+exitcode=$?
+if [ "$exitcode" != "0" ]; then
+ echo "tar jh_taos.tar.gz error !!!"
+ exit $exitcode
+fi
+
+cd ${curr_dir}
+cp ${install_files} ${install_dir}
+if [ "$verMode" == "cluster" ]; then
+ sed 's/verMode=edge/verMode=cluster/g' ${install_dir}/install_jh.sh >> install_jh_temp.sh
+ mv install_jh_temp.sh ${install_dir}/install_jh.sh
+fi
+if [ "$pagMode" == "lite" ]; then
+ sed -e "s/pagMode=full/pagMode=lite/g" -e "s/taos_history/jh_taos_history/g" ${install_dir}/install.sh >> install_jh_temp.sh
+ mv install_jh_temp.sh ${install_dir}/install_jh.sh
+fi
+
+sed -i "/install_connector$/d" ${install_dir}/install_jh.sh
+sed -i "/install_examples$/d" ${install_dir}/install_jh.sh
+chmod a+x ${install_dir}/install_jh.sh
+
+# Copy driver
+mkdir -p ${install_dir}/driver && cp ${lib_files} ${install_dir}/driver && echo "${versionComp}" > ${install_dir}/driver/vercomp.txt
+
+cd ${release_dir}
+
+if [ "$verMode" == "cluster" ]; then
+ pkg_name=${install_dir}-${osType}-${cpuType}
+elif [ "$verMode" == "edge" ]; then
+ pkg_name=${install_dir}-${osType}-${cpuType}
+else
+ echo "unknow verMode, nor cluster or edge"
+ exit 1
+fi
+
+if [ "$pagMode" == "lite" ]; then
+ pkg_name=${pkg_name}-Lite
+fi
+
+if [ "$verType" == "beta" ]; then
+ pkg_name=${pkg_name}-${verType}
+elif [ "$verType" == "stable" ]; then
+ pkg_name=${pkg_name}
+else
+ echo "unknow verType, nor stabel or beta"
+ exit 1
+fi
+
+tar -zcv -f "$(basename ${pkg_name}).tar.gz" $(basename ${install_dir}) --remove-files || :
+exitcode=$?
+if [ "$exitcode" != "0" ]; then
+ echo "tar ${pkg_name}.tar.gz error !!!"
+ exit $exitcode
+fi
+
+cd ${curr_dir}
diff --git a/packaging/tools/makepkg_kh.sh b/packaging/tools/makepkg_kh.sh
new file mode 100755
index 0000000000000000000000000000000000000000..80b7a06041c6c9dca926ecc0f87c8c8a7958c639
--- /dev/null
+++ b/packaging/tools/makepkg_kh.sh
@@ -0,0 +1,160 @@
+#!/bin/bash
+#
+# Generate tar.gz package for all os system
+
+set -e
+#set -x
+
+curr_dir=$(pwd)
+compile_dir=$1
+version=$2
+build_time=$3
+cpuType=$4
+osType=$5
+verMode=$6
+verType=$7
+pagMode=$8
+versionComp=$9
+
+script_dir="$(dirname $(readlink -f $0))"
+top_dir="$(readlink -f ${script_dir}/../..)"
+
+# create compressed install file.
+build_dir="${compile_dir}/build"
+code_dir="${top_dir}/src"
+release_dir="${top_dir}/release"
+
+# package_name='linux'
+if [ "$verMode" == "cluster" ]; then
+ install_dir="${release_dir}/KingHistorian-enterprise-server-${version}"
+else
+ install_dir="${release_dir}/KingHistorian-server-${version}"
+fi
+
+lib_files="${build_dir}/lib/libtaos.so.${version}"
+header_files="${code_dir}/inc/taos.h ${code_dir}/inc/taoserror.h"
+if [ "$verMode" == "cluster" ]; then
+ cfg_dir="${top_dir}/../enterprise/packaging/cfg"
+else
+ cfg_dir="${top_dir}/packaging/cfg"
+fi
+install_files="${script_dir}/install_kh.sh"
+nginx_dir="${code_dir}/../../enterprise/src/plugins/web"
+
+# make directories.
+mkdir -p ${install_dir}
+mkdir -p ${install_dir}/inc && cp ${header_files} ${install_dir}/inc
+mkdir -p ${install_dir}/cfg && cp ${cfg_dir}/taos.cfg ${install_dir}/cfg/kinghistorian.cfg
+mkdir -p ${install_dir}/bin
+
+# bin
+if [ "$pagMode" == "lite" ]; then
+ strip ${build_dir}/bin/taosd
+ strip ${build_dir}/bin/taos
+else
+ cp ${build_dir}/bin/taosdemo ${install_dir}/bin/khdemo
+ cp ${build_dir}/bin/taosdump ${install_dir}/bin/khdump
+ cp ${build_dir}/bin/tarbitrator ${install_dir}/bin
+ cp ${script_dir}/set_core.sh ${install_dir}/bin
+ cp ${script_dir}/get_client.sh ${install_dir}/bin
+ cp ${script_dir}/startPre.sh ${install_dir}/bin
+ cp ${script_dir}/taosd-dump-cfg.gdb ${install_dir}/bin
+fi
+cp ${build_dir}/bin/taos ${install_dir}/bin/khclient
+cp ${build_dir}/bin/taosd ${install_dir}/bin/khserver
+cp ${script_dir}/remove_kh.sh ${install_dir}/bin
+chmod a+x ${install_dir}/bin/* || :
+
+# cluster
+if [ "$verMode" == "cluster" ]; then
+ sed 's/verMode=edge/verMode=cluster/g' ${install_dir}/bin/remove_kh.sh >> remove_kh_temp.sh
+ mv remove_kh_temp.sh ${install_dir}/bin/remove_kh.sh
+
+ mkdir -p ${install_dir}/nginxd && cp -r ${nginx_dir}/* ${install_dir}/nginxd
+ cp ${nginx_dir}/png/taos.png ${install_dir}/nginxd/admin/images/taos.png
+ rm -rf ${install_dir}/nginxd/png
+
+ # replace the OEM name, add by yangzy@2021-09-22
+ sed -i -e 's/www.taosdata.com/www.wellintech.com/g' $(grep -r 'www.taosdata.com' ${install_dir}/nginxd | sed -r "s/(.*\.html):\s*(.*)/\1/g")
+ sed -i -e 's/2017/2021/g' $(grep -r '2017' ${install_dir}/nginxd | sed -r "s/(.*\.html):\s*(.*)/\1/g")
+ sed -i -e 's/TAOS Data/Wellintech/g' $(grep -r 'TAOS Data' ${install_dir}/nginxd | sed -r "s/(.*\.html):\s*(.*)/\1/g")
+ sed -i -e 's/taosd/khserver/g' `grep -r 'taosd' ${install_dir}/nginxd | grep -E '*\.js\s*.*' | sed -r -e 's/(.*\.js):\s*(.*)/\1/g' | sort | uniq`
+ sed -i -e 's/ | taosd<\/th>/ | khserver<\/th>/g' ${install_dir}/nginxd/admin/monitor.html
+ sed -i -e "s/data:\['taosd', 'system'\],/data:\['khserver', 'system'\],/g" ${install_dir}/nginxd/admin/monitor.html
+ sed -i -e "s/name: 'taosd',/name: 'khserver',/g" ${install_dir}/nginxd/admin/monitor.html
+ sed -i "s/TDengine/KingHistorian/g" ${install_dir}/nginxd/admin/*.html
+ sed -i "s/TDengine/KingHistorian/g" ${install_dir}/nginxd/admin/js/*.js
+
+ if [ "$cpuType" == "aarch64" ]; then
+ cp -f ${install_dir}/nginxd/sbin/arm/64bit/nginx ${install_dir}/nginxd/sbin/
+ elif [ "$cpuType" == "aarch32" ]; then
+ cp -f ${install_dir}/nginxd/sbin/arm/32bit/nginx ${install_dir}/nginxd/sbin/
+ fi
+ rm -rf ${install_dir}/nginxd/sbin/arm
+fi
+
+sed -i '/dataDir/ {s/taos/kinghistorian/g}' ${install_dir}/cfg/kinghistorian.cfg
+sed -i '/logDir/ {s/taos/kinghistorian/g}' ${install_dir}/cfg/kinghistorian.cfg
+sed -i "s/TDengine/KingHistorian/g" ${install_dir}/cfg/kinghistorian.cfg
+sed -i "s/support@taosdata.com/support@wellintech.com/g" ${install_dir}/cfg/kinghistorian.cfg
+sed -i "s/taos client/khclient/g" ${install_dir}/cfg/kinghistorian.cfg
+sed -i "s/taosd/khserver/g" ${install_dir}/cfg/kinghistorian.cfg
+
+cd ${install_dir}
+tar -zcv -f kinghistorian.tar.gz * --remove-files || :
+exitcode=$?
+if [ "$exitcode" != "0" ]; then
+ echo "tar kinghistorian.tar.gz error !!!"
+ exit $exitcode
+fi
+
+cd ${curr_dir}
+cp ${install_files} ${install_dir}
+if [ "$verMode" == "cluster" ]; then
+ sed 's/verMode=edge/verMode=cluster/g' ${install_dir}/install_kh.sh >> install_kh_temp.sh
+ mv install_kh_temp.sh ${install_dir}/install_kh.sh
+fi
+if [ "$pagMode" == "lite" ]; then
+ sed -e "s/pagMode=full/pagMode=lite/g" -e "s/taos_history/kh_history/g" ${install_dir}/install.sh >> install_kh_temp.sh
+ mv install_kh_temp.sh ${install_dir}/install_kh.sh
+fi
+
+sed -i "/install_connector$/d" ${install_dir}/install_kh.sh
+sed -i "/install_examples$/d" ${install_dir}/install_kh.sh
+chmod a+x ${install_dir}/install_kh.sh
+
+# Copy driver
+mkdir -p ${install_dir}/driver && cp ${lib_files} ${install_dir}/driver && echo "${versionComp}" > ${install_dir}/driver/vercomp.txt
+
+cd ${release_dir}
+
+if [ "$verMode" == "cluster" ]; then
+ pkg_name=${install_dir}-${osType}-${cpuType}
+elif [ "$verMode" == "edge" ]; then
+ pkg_name=${install_dir}-${osType}-${cpuType}
+else
+ echo "unknow verMode, nor cluster or edge"
+ exit 1
+fi
+
+if [ "$pagMode" == "lite" ]; then
+ pkg_name=${pkg_name}-Lite
+fi
+
+if [ "$verType" == "beta" ]; then
+ pkg_name=${pkg_name}-${verType}
+elif [ "$verType" == "stable" ]; then
+ pkg_name=${pkg_name}
+else
+ echo "unknow verType, nor stabel or beta"
+ exit 1
+fi
+
+tar -zcv -f "$(basename ${pkg_name}).tar.gz" $(basename ${install_dir}) --remove-files || :
+exitcode=$?
+if [ "$exitcode" != "0" ]; then
+ echo "tar ${pkg_name}.tar.gz error !!!"
+ exit $exitcode
+fi
+
+cd ${curr_dir}
diff --git a/packaging/tools/makepkg_power.sh b/packaging/tools/makepkg_power.sh
index 65200ddd047358f92f8e3a612c08eedb60053311..dadd42c64a990da74d0b696737300b84891df232 100755
--- a/packaging/tools/makepkg_power.sh
+++ b/packaging/tools/makepkg_power.sh
@@ -31,18 +31,8 @@ else
install_dir="${release_dir}/PowerDB-server-${version}"
fi
-# Directories and files.
-#if [ "$pagMode" == "lite" ]; then
-# strip ${build_dir}/bin/taosd
-# strip ${build_dir}/bin/taos
-# bin_files="${build_dir}/bin/powerd ${build_dir}/bin/power ${script_dir}/remove_power.sh"
-#else
-# bin_files="${build_dir}/bin/powerd ${build_dir}/bin/power ${build_dir}/bin/powerdemo ${build_dir}/bin/tarbitrator ${script_dir}/remove_power.sh\
-# ${script_dir}/set_core.sh ${script_dir}/startPre.sh ${script_dir}/taosd-dump-cfg.gdb"
-#fi
-
lib_files="${build_dir}/lib/libtaos.so.${version}"
-header_files="${code_dir}/inc/taos.h ${code_dir}/inc/taoserror.h"
+header_files="${code_dir}/inc/taos.h ${code_dir}/inc/taosdef.h ${code_dir}/inc/taoserror.h"
if [ "$verMode" == "cluster" ]; then
cfg_dir="${top_dir}/../enterprise/packaging/cfg"
else
@@ -51,13 +41,6 @@ fi
install_files="${script_dir}/install_power.sh"
nginx_dir="${code_dir}/../../enterprise/src/plugins/web"
-# Init file
-#init_dir=${script_dir}/deb
-#if [ $package_type = "centos" ]; then
-# init_dir=${script_dir}/rpm
-#fi
-#init_files=${init_dir}/powerd
-# temp use rpm's powerd. TODO: later modify according to os type
init_file_deb=${script_dir}/../deb/powerd
init_file_rpm=${script_dir}/../rpm/powerd
init_file_tarbitrator_deb=${script_dir}/../deb/tarbitratord
@@ -66,7 +49,7 @@ init_file_tarbitrator_rpm=${script_dir}/../rpm/tarbitratord
# make directories.
mkdir -p ${install_dir}
mkdir -p ${install_dir}/inc && cp ${header_files} ${install_dir}/inc
-mkdir -p ${install_dir}/cfg && cp ${cfg_dir}/taos.cfg ${install_dir}/cfg/taos.cfg
+mkdir -p ${install_dir}/cfg && cp ${cfg_dir}/taos.cfg ${install_dir}/cfg/power.cfg
#mkdir -p ${install_dir}/bin && cp ${bin_files} ${install_dir}/bin && chmod a+x ${install_dir}/bin/* || :
mkdir -p ${install_dir}/bin
@@ -109,9 +92,9 @@ if [ "$verMode" == "cluster" ]; then
sed -i "s/TDengine/PowerDB/g" ${install_dir}/nginxd/admin/*.html
sed -i "s/TDengine/PowerDB/g" ${install_dir}/nginxd/admin/js/*.js
- sed -i '/dataDir/ {s/taos/power/g}' ${install_dir}/cfg/taos.cfg
- sed -i '/logDir/ {s/taos/power/g}' ${install_dir}/cfg/taos.cfg
- sed -i "s/TDengine/PowerDB/g" ${install_dir}/cfg/taos.cfg
+ sed -i '/dataDir/ {s/taos/power/g}' ${install_dir}/cfg/power.cfg
+ sed -i '/logDir/ {s/taos/power/g}' ${install_dir}/cfg/power.cfg
+ sed -i "s/TDengine/PowerDB/g" ${install_dir}/cfg/power.cfg
if [ "$cpuType" == "aarch64" ]; then
cp -f ${install_dir}/nginxd/sbin/arm/64bit/nginx ${install_dir}/nginxd/sbin/
diff --git a/packaging/tools/makepkg_pro.sh b/packaging/tools/makepkg_pro.sh
index 457cb0de6f02f7000dc7437cde61bfec28c7205c..3896390068f8152a270e5c00b1b1f402be76bcc8 100755
--- a/packaging/tools/makepkg_pro.sh
+++ b/packaging/tools/makepkg_pro.sh
@@ -32,7 +32,7 @@ else
fi
lib_files="${build_dir}/lib/libtaos.so.${version}"
-header_files="${code_dir}/inc/taos.h ${code_dir}/inc/taoserror.h"
+header_files="${code_dir}/inc/taos.h ${code_dir}/inc/taosdef.h ${code_dir}/inc/taoserror.h"
if [ "$verMode" == "cluster" ]; then
cfg_dir="${top_dir}/../enterprise/packaging/cfg"
else
@@ -44,7 +44,7 @@ nginx_dir="${code_dir}/../../enterprise/src/plugins/web"
# make directories.
mkdir -p ${install_dir}
mkdir -p ${install_dir}/inc && cp ${header_files} ${install_dir}/inc
-mkdir -p ${install_dir}/cfg && cp ${cfg_dir}/taos.cfg ${install_dir}/cfg/taos.cfg
+mkdir -p ${install_dir}/cfg && cp ${cfg_dir}/taos.cfg ${install_dir}/cfg/prodb.cfg
mkdir -p ${install_dir}/bin
# bin
@@ -94,12 +94,12 @@ if [ "$verMode" == "cluster" ]; then
rm -rf ${install_dir}/nginxd/sbin/arm
fi
-sed -i '/dataDir/ {s/taos/ProDB/g}' ${install_dir}/cfg/taos.cfg
-sed -i '/logDir/ {s/taos/ProDB/g}' ${install_dir}/cfg/taos.cfg
-sed -i "s/TDengine/ProDB/g" ${install_dir}/cfg/taos.cfg
-sed -i "s/support@taosdata.com/support@hanatech.com.cn/g" ${install_dir}/cfg/taos.cfg
-sed -i "s/taos client/prodbc/g" ${install_dir}/cfg/taos.cfg
-sed -i "s/taosd/prodbs/g" ${install_dir}/cfg/taos.cfg
+sed -i '/dataDir/ {s/taos/ProDB/g}' ${install_dir}/cfg/prodb.cfg
+sed -i '/logDir/ {s/taos/ProDB/g}' ${install_dir}/cfg/prodb.cfg
+sed -i "s/TDengine/ProDB/g" ${install_dir}/cfg/prodb.cfg
+sed -i "s/support@taosdata.com/support@hanatech.com.cn/g" ${install_dir}/cfg/prodb.cfg
+sed -i "s/taos client/prodbc/g" ${install_dir}/cfg/prodb.cfg
+sed -i "s/taosd/prodbs/g" ${install_dir}/cfg/prodb.cfg
cd ${install_dir}
tar -zcv -f prodb.tar.gz * --remove-files || :
@@ -124,50 +124,9 @@ sed -i "/install_connector$/d" ${install_dir}/install_pro.sh
sed -i "/install_examples$/d" ${install_dir}/install_pro.sh
chmod a+x ${install_dir}/install_pro.sh
-# Copy example code
-#mkdir -p ${install_dir}/examples
-#examples_dir="${top_dir}/tests/examples"
-#cp -r ${examples_dir}/c ${install_dir}/examples
-#sed -i '/passwd/ {s/taosdata/prodb/g}' ${install_dir}/examples/c/*.c
-#sed -i '/root/ {s/taosdata/prodb/g}' ${install_dir}/examples/c/*.c
-#
-#if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then
-# cp -r ${examples_dir}/JDBC ${install_dir}/examples
-# cp -r ${examples_dir}/matlab ${install_dir}/examples
-# mv ${install_dir}/examples/matlab/TDengineDemo.m ${install_dir}/examples/matlab/ProDBDemo.m
-# sed -i '/password/ {s/taosdata/prodb/g}' ${install_dir}/examples/matlab/ProDBDemo.m
-# cp -r ${examples_dir}/python ${install_dir}/examples
-# sed -i '/password/ {s/taosdata/prodb/g}' ${install_dir}/examples/python/read_example.py
-# cp -r ${examples_dir}/R ${install_dir}/examples
-# sed -i '/password/ {s/taosdata/prodb/g}' ${install_dir}/examples/R/command.txt
-# cp -r ${examples_dir}/go ${install_dir}/examples
-# mv ${install_dir}/examples/go/taosdemo.go ${install_dir}/examples/go/prodemo.go
-# sed -i '/root/ {s/taosdata/prodb/g}' ${install_dir}/examples/go/prodemo.go
-#fi
-
# Copy driver
mkdir -p ${install_dir}/driver && cp ${lib_files} ${install_dir}/driver && echo "${versionComp}" > ${install_dir}/driver/vercomp.txt
-# Copy connector
-#connector_dir="${code_dir}/connector"
-#mkdir -p ${install_dir}/connector
-#if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then
-# cp ${build_dir}/lib/*.jar ${install_dir}/connector ||:
-
-# if find ${connector_dir}/go -mindepth 1 -maxdepth 1 | read; then
-# cp -r ${connector_dir}/go ${install_dir}/connector
-# else
-# echo "WARNING: go connector not found, please check if want to use it!"
-# fi
-# cp -r ${connector_dir}/python ${install_dir}/connector/
-# mv ${install_dir}/connector/python/taos ${install_dir}/connector/python/prodb
-# sed -i '/password/ {s/taosdata/prodb/g}' ${install_dir}/connector/python/prodb/cinterface.py
-
-# sed -i '/password/ {s/taosdata/prodb/g}' ${install_dir}/connector/python/prodb/subscription.py
-
-# sed -i '/self._password/ {s/taosdata/prodb/g}' ${install_dir}/connector/python/prodb/connection.py
-#fi
-
cd ${release_dir}
if [ "$verMode" == "cluster" ]; then
diff --git a/packaging/tools/makepkg_tq.sh b/packaging/tools/makepkg_tq.sh
index 07032379d7e4bab2636f3685b6edb620780a124a..94c563ab45e2e82f8cbd485fa17f11541323af17 100755
--- a/packaging/tools/makepkg_tq.sh
+++ b/packaging/tools/makepkg_tq.sh
@@ -31,18 +31,8 @@ else
install_dir="${release_dir}/TQ-server-${version}"
fi
-# Directories and files.
-#if [ "$pagMode" == "lite" ]; then
-# strip ${build_dir}/bin/taosd
-# strip ${build_dir}/bin/taos
-# bin_files="${build_dir}/bin/tqd ${build_dir}/bin/tq ${script_dir}/remove_tq.sh"
-#else
-# bin_files="${build_dir}/bin/tqd ${build_dir}/bin/tq ${build_dir}/bin/tqdemo ${build_dir}/bin/tarbitrator ${script_dir}/remove_tq.sh\
-# ${script_dir}/set_core.sh ${script_dir}/startPre.sh ${script_dir}/taosd-dump-cfg.gdb"
-#fi
-
lib_files="${build_dir}/lib/libtaos.so.${version}"
-header_files="${code_dir}/inc/taos.h ${code_dir}/inc/taoserror.h"
+header_files="${code_dir}/inc/taos.h ${code_dir}/inc/taosdef.h ${code_dir}/inc/taoserror.h"
if [ "$verMode" == "cluster" ]; then
cfg_dir="${top_dir}/../enterprise/packaging/cfg"
else
@@ -51,40 +41,24 @@ fi
install_files="${script_dir}/install_tq.sh"
nginx_dir="${code_dir}/../../enterprise/src/plugins/web"
-# Init file
-#init_dir=${script_dir}/deb
-#if [ $package_type = "centos" ]; then
-# init_dir=${script_dir}/rpm
-#fi
-#init_files=${init_dir}/tqd
-# temp use rpm's tqd. TODO: later modify according to os type
-#init_file_deb=${script_dir}/../deb/tqd
-#init_file_rpm=${script_dir}/../rpm/tqd
-#init_file_tarbitrator_deb=${script_dir}/../deb/tarbitratord
-#init_file_tarbitrator_rpm=${script_dir}/../rpm/tarbitratord
-
# make directories.
mkdir -p ${install_dir}
mkdir -p ${install_dir}/inc && cp ${header_files} ${install_dir}/inc
-mkdir -p ${install_dir}/cfg && cp ${cfg_dir}/taos.cfg ${install_dir}/cfg/taos.cfg
+mkdir -p ${install_dir}/cfg && cp ${cfg_dir}/taos.cfg ${install_dir}/cfg/tq.cfg
-#mkdir -p ${install_dir}/bin && cp ${bin_files} ${install_dir}/bin && chmod a+x ${install_dir}/bin/* || :
mkdir -p ${install_dir}/bin
if [ "$pagMode" == "lite" ]; then
strip ${build_dir}/bin/taosd
strip ${build_dir}/bin/taos
-# bin_files="${build_dir}/bin/tqd ${build_dir}/bin/tq ${script_dir}/remove_tq.sh"
cp ${build_dir}/bin/taos ${install_dir}/bin/tq
cp ${build_dir}/bin/taosd ${install_dir}/bin/tqd
cp ${script_dir}/remove_tq.sh ${install_dir}/bin
else
-# bin_files="${build_dir}/bin/tqd ${build_dir}/bin/tq ${build_dir}/bin/tqdemo ${build_dir}/bin/tarbitrator ${script_dir}/remove_tq.sh ${script_dir}/set_core.sh"
cp ${build_dir}/bin/taos ${install_dir}/bin/tq
cp ${build_dir}/bin/taosd ${install_dir}/bin/tqd
cp ${script_dir}/remove_tq.sh ${install_dir}/bin
cp ${build_dir}/bin/taosadapter ${install_dir}/bin/taosadapter ||:
cp ${build_dir}/bin/taosdemo ${install_dir}/bin/tqdemo
- cp ${build_dir}/bin/taosdump ${install_dir}/bin/tqdump
cp ${build_dir}/bin/tarbitrator ${install_dir}/bin
cp ${script_dir}/set_core.sh ${install_dir}/bin
cp ${script_dir}/get_client.sh ${install_dir}/bin
@@ -93,11 +67,6 @@ else
fi
chmod a+x ${install_dir}/bin/* || :
-#mkdir -p ${install_dir}/init.d && cp ${init_file_deb} ${install_dir}/init.d/tqd.deb
-#mkdir -p ${install_dir}/init.d && cp ${init_file_rpm} ${install_dir}/init.d/tqd.rpm
-#mkdir -p ${install_dir}/init.d && cp ${init_file_tarbitrator_deb} ${install_dir}/init.d/tarbitratord.deb || :
-#mkdir -p ${install_dir}/init.d && cp ${init_file_tarbitrator_rpm} ${install_dir}/init.d/tarbitratord.rpm || :
-
if [ "$verMode" == "cluster" ]; then
sed 's/verMode=edge/verMode=cluster/g' ${install_dir}/bin/remove_tq.sh >> remove_tq_temp.sh
mv remove_tq_temp.sh ${install_dir}/bin/remove_tq.sh
@@ -109,9 +78,9 @@ if [ "$verMode" == "cluster" ]; then
sed -i "s/TDengine/TQ/g" ${install_dir}/nginxd/admin/*.html
sed -i "s/TDengine/TQ/g" ${install_dir}/nginxd/admin/js/*.js
- sed -i '/dataDir/ {s/taos/tq/g}' ${install_dir}/cfg/taos.cfg
- sed -i '/logDir/ {s/taos/tq/g}' ${install_dir}/cfg/taos.cfg
- sed -i "s/TDengine/TQ/g" ${install_dir}/cfg/taos.cfg
+ sed -i '/dataDir/ {s/taos/tq/g}' ${install_dir}/cfg/tq.cfg
+ sed -i '/logDir/ {s/taos/tq/g}' ${install_dir}/cfg/tq.cfg
+ sed -i "s/TDengine/TQ/g" ${install_dir}/cfg/tq.cfg
if [ "$cpuType" == "aarch64" ]; then
cp -f ${install_dir}/nginxd/sbin/arm/64bit/nginx ${install_dir}/nginxd/sbin/
@@ -181,10 +150,6 @@ if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then
sed -i '/self._password/ {s/taosdata/tqueue/g}' ${install_dir}/connector/python/taos/connection.py
fi
-# Copy release note
-# cp ${script_dir}/release_note ${install_dir}
-
-# exit 1
cd ${release_dir}
diff --git a/packaging/tools/post.sh b/packaging/tools/post.sh
index 20ef425f56fb5d8f5d90b7a8cc4ef4a6da7a1b9c..7a90435d8f1ce8dc190e0f1513aee080838e4645 100755
--- a/packaging/tools/post.sh
+++ b/packaging/tools/post.sh
@@ -81,29 +81,12 @@ function kill_taosd() {
}
function install_include() {
- ${csudo} rm -f ${inc_link_dir}/taos.h ${inc_link_dir}/taoserror.h|| :
+ ${csudo} rm -f ${inc_link_dir}/taos.h ${inc_link_dir}/taosdef.h ${inc_link_dir}/taoserror.h|| :
${csudo} ln -s ${inc_dir}/taos.h ${inc_link_dir}/taos.h
+ ${csudo} ln -s ${inc_dir}/taosdef.h ${inc_link_dir}/taosdef.h
${csudo} ln -s ${inc_dir}/taoserror.h ${inc_link_dir}/taoserror.h
}
-function install_avro_lib() {
- ${csudo} rm -f ${lib_link_dir}/libavro* || :
- ${csudo} rm -f ${lib64_link_dir}/libavro* || :
-
- if [[ -f ${lib_dir}/libavro.so.23.0.0 ]]; then
- ${csudo} ln -s ${lib_dir}/libavro.so.23.0.0 ${lib_link_dir}/libavro.so.23.0.0
- ${csudo} ln -s ${lib_link_dir}/libavro.so.23.0.0 ${lib_link_dir}/libavro.so.23
- ${csudo} ln -s ${lib_link_dir}/libavro.so.23 ${lib_link_dir}/libavro.so
-
- if [[ -d ${lib64_link_dir} && ! -e ${lib64_link_dir}/libavro.so ]]; then
- ${csudo} ln -s ${lib_dir}/libavro.so.23.0.0 ${lib64_link_dir}/libavro.so.23.0.0 || :
- ${csudo} ln -s ${lib64_link_dir}/libavro.so.23.0.0 ${lib64_link_dir}/libavro.so.23 || :
- ${csudo} ln -s ${lib64_link_dir}/libavro.so.23 ${lib64_link_dir}/libavro.so || :
- fi
- fi
-
- ${csudo} ldconfig
-}
function install_lib() {
${csudo} rm -f ${lib_link_dir}/libtaos* || :
${csudo} rm -f ${lib64_link_dir}/libtaos* || :
@@ -503,7 +486,6 @@ function install_TDengine() {
# Install include, lib, binary and service
install_include
install_lib
- install_avro_lib
install_bin
install_config
install_taosadapter_config
diff --git a/packaging/tools/preun.sh b/packaging/tools/preun.sh
index d2d36364208f23492d2ba6aefa783c85ad6d5572..43fead76ba675dfa8ee45422d9f9dc8166b2488d 100755
--- a/packaging/tools/preun.sh
+++ b/packaging/tools/preun.sh
@@ -122,10 +122,10 @@ ${csudo} rm -f ${bin_link_dir}/taos || :
${csudo} rm -f ${bin_link_dir}/taosd || :
${csudo} rm -f ${bin_link_dir}/taosadapter || :
${csudo} rm -f ${bin_link_dir}/taosdemo || :
-${csudo} rm -f ${bin_link_dir}/taosdump || :
${csudo} rm -f ${bin_link_dir}/set_core || :
${csudo} rm -f ${cfg_link_dir}/*.new || :
${csudo} rm -f ${inc_link_dir}/taos.h || :
+${csudo} rm -f ${inc_link_dir}/taosdef.h || :
${csudo} rm -f ${inc_link_dir}/taoserror.h || :
${csudo} rm -f ${lib_link_dir}/libtaos.* || :
${csudo} rm -f ${lib64_link_dir}/libtaos.* || :
diff --git a/packaging/tools/remove.sh b/packaging/tools/remove.sh
index 07a8362b2c45676986513020da668ff9235f00fa..6241dd9eabdc065425ec6753273c2e269f65a42d 100755
--- a/packaging/tools/remove.sh
+++ b/packaging/tools/remove.sh
@@ -96,6 +96,7 @@ function clean_lib() {
function clean_header() {
# Remove link
${csudo} rm -f ${inc_link_dir}/taos.h || :
+ ${csudo} rm -f ${inc_link_dir}/taosdef.h || :
${csudo} rm -f ${inc_link_dir}/taoserror.h || :
}
diff --git a/packaging/tools/remove_arbi.sh b/packaging/tools/remove_arbi.sh
index 68fd9275fb922d3eb0ab82bc010262c6c61b2962..4495f25f3617c3c28c6dbd22ddeeda93cdf5423a 100755
--- a/packaging/tools/remove_arbi.sh
+++ b/packaging/tools/remove_arbi.sh
@@ -57,6 +57,7 @@ function clean_bin() {
function clean_header() {
# Remove link
${csudo} rm -f ${inc_link_dir}/taos.h || :
+ ${csudo} rm -f ${inc_link_dir}/taosdef.h || :
${csudo} rm -f ${inc_link_dir}/taoserror.h || :
}
diff --git a/packaging/tools/remove_arbi_jh.sh b/packaging/tools/remove_arbi_jh.sh
new file mode 100755
index 0000000000000000000000000000000000000000..8b690771c761ac51772dac83cafec46360a16be3
--- /dev/null
+++ b/packaging/tools/remove_arbi_jh.sh
@@ -0,0 +1,130 @@
+#!/bin/bash
+#
+# Script to stop the service and uninstall jh_iot's arbitrator
+
+set -e
+#set -x
+
+verMode=edge
+
+RED='\033[0;31m'
+GREEN='\033[1;32m'
+NC='\033[0m'
+
+#install main path
+install_main_dir="/usr/local/tarbitrator"
+bin_link_dir="/usr/bin"
+
+service_config_dir="/etc/systemd/system"
+tarbitrator_service_name="tarbitratord"
+csudo=""
+if command -v sudo > /dev/null; then
+ csudo="sudo"
+fi
+
+initd_mod=0
+service_mod=2
+if pidof systemd &> /dev/null; then
+ service_mod=0
+elif $(which service &> /dev/null); then
+ service_mod=1
+ service_config_dir="/etc/init.d"
+ if $(which chkconfig &> /dev/null); then
+ initd_mod=1
+ elif $(which insserv &> /dev/null); then
+ initd_mod=2
+ elif $(which update-rc.d &> /dev/null); then
+ initd_mod=3
+ else
+ service_mod=2
+ fi
+else
+ service_mod=2
+fi
+
+function kill_tarbitrator() {
+ pid=$(ps -ef | grep "tarbitrator" | grep -v "grep" | awk '{print $2}')
+ if [ -n "$pid" ]; then
+ ${csudo} kill -9 $pid || :
+ fi
+}
+
+function clean_bin() {
+ # Remove link
+ ${csudo} rm -f ${bin_link_dir}/tarbitrator || :
+}
+
+function clean_header() {
+ # Remove link
+ ${csudo} rm -f ${inc_link_dir}/taos.h || :
+ ${csudo} rm -f ${inc_link_dir}/taoserror.h || :
+}
+
+function clean_log() {
+ # Remove link
+ ${csudo} rm -rf /arbitrator.log || :
+}
+
+function clean_service_on_systemd() {
+ tarbitratord_service_config="${service_config_dir}/${tarbitrator_service_name}.service"
+
+ if systemctl is-active --quiet ${tarbitrator_service_name}; then
+ echo "jh_iot's tarbitrator is running, stopping it..."
+ ${csudo} systemctl stop ${tarbitrator_service_name} &> /dev/null || echo &> /dev/null
+ fi
+ ${csudo} systemctl disable ${tarbitrator_service_name} &> /dev/null || echo &> /dev/null
+
+ ${csudo} rm -f ${tarbitratord_service_config}
+}
+
+function clean_service_on_sysvinit() {
+ if pidof tarbitrator &> /dev/null; then
+ echo "jh_iot's tarbitrator is running, stopping it..."
+ ${csudo} service tarbitratord stop || :
+ fi
+
+ if ((${initd_mod}==1)); then
+ if [ -e ${service_config_dir}/tarbitratord ]; then
+ ${csudo} chkconfig --del tarbitratord || :
+ fi
+ elif ((${initd_mod}==2)); then
+ if [ -e ${service_config_dir}/tarbitratord ]; then
+ ${csudo} insserv -r tarbitratord || :
+ fi
+ elif ((${initd_mod}==3)); then
+ if [ -e ${service_config_dir}/tarbitratord ]; then
+ ${csudo} update-rc.d -f tarbitratord remove || :
+ fi
+ fi
+
+ ${csudo} rm -f ${service_config_dir}/tarbitratord || :
+
+ if $(which init &> /dev/null); then
+ ${csudo} init q || :
+ fi
+}
+
+function clean_service() {
+ if ((${service_mod}==0)); then
+ clean_service_on_systemd
+ elif ((${service_mod}==1)); then
+ clean_service_on_sysvinit
+ else
+ # must manual stop
+ kill_tarbitrator
+ fi
+}
+
+# Stop service and disable booting start.
+clean_service
+# Remove binary file and links
+clean_bin
+# Remove header file.
+##clean_header
+# Remove log file
+clean_log
+
+${csudo} rm -rf ${install_main_dir}
+
+echo -e "${GREEN}jh_iot's arbitrator is removed successfully!${NC}"
+echo
diff --git a/packaging/tools/remove_arbi_kh.sh b/packaging/tools/remove_arbi_kh.sh
new file mode 100755
index 0000000000000000000000000000000000000000..ec3254b01649add57f9485c59878059e086b2669
--- /dev/null
+++ b/packaging/tools/remove_arbi_kh.sh
@@ -0,0 +1,130 @@
+#!/bin/bash
+#
+# Script to stop the service and uninstall KingHistorian's arbitrator
+
+set -e
+#set -x
+
+verMode=edge
+
+RED='\033[0;31m'
+GREEN='\033[1;32m'
+NC='\033[0m'
+
+#install main path
+install_main_dir="/usr/local/tarbitrator"
+bin_link_dir="/usr/bin"
+
+service_config_dir="/etc/systemd/system"
+tarbitrator_service_name="tarbitratord"
+csudo=""
+if command -v sudo > /dev/null; then
+ csudo="sudo"
+fi
+
+initd_mod=0
+service_mod=2
+if pidof systemd &> /dev/null; then
+ service_mod=0
+elif $(which service &> /dev/null); then
+ service_mod=1
+ service_config_dir="/etc/init.d"
+ if $(which chkconfig &> /dev/null); then
+ initd_mod=1
+ elif $(which insserv &> /dev/null); then
+ initd_mod=2
+ elif $(which update-rc.d &> /dev/null); then
+ initd_mod=3
+ else
+ service_mod=2
+ fi
+else
+ service_mod=2
+fi
+
+function kill_tarbitrator() {
+ pid=$(ps -ef | grep "tarbitrator" | grep -v "grep" | awk '{print $2}')
+ if [ -n "$pid" ]; then
+ ${csudo} kill -9 $pid || :
+ fi
+}
+
+function clean_bin() {
+ # Remove link
+ ${csudo} rm -f ${bin_link_dir}/tarbitrator || :
+}
+
+function clean_header() {
+ # Remove link
+ ${csudo} rm -f ${inc_link_dir}/taos.h || :
+ ${csudo} rm -f ${inc_link_dir}/taoserror.h || :
+}
+
+function clean_log() {
+ # Remove link
+ ${csudo} rm -rf /arbitrator.log || :
+}
+
+function clean_service_on_systemd() {
+ tarbitratord_service_config="${service_config_dir}/${tarbitrator_service_name}.service"
+
+ if systemctl is-active --quiet ${tarbitrator_service_name}; then
+ echo "KingHistorian's tarbitrator is running, stopping it..."
+ ${csudo} systemctl stop ${tarbitrator_service_name} &> /dev/null || echo &> /dev/null
+ fi
+ ${csudo} systemctl disable ${tarbitrator_service_name} &> /dev/null || echo &> /dev/null
+
+ ${csudo} rm -f ${tarbitratord_service_config}
+}
+
+function clean_service_on_sysvinit() {
+ if pidof tarbitrator &> /dev/null; then
+ echo "KingHistorian's tarbitrator is running, stopping it..."
+ ${csudo} service tarbitratord stop || :
+ fi
+
+ if ((${initd_mod}==1)); then
+ if [ -e ${service_config_dir}/tarbitratord ]; then
+ ${csudo} chkconfig --del tarbitratord || :
+ fi
+ elif ((${initd_mod}==2)); then
+ if [ -e ${service_config_dir}/tarbitratord ]; then
+ ${csudo} insserv -r tarbitratord || :
+ fi
+ elif ((${initd_mod}==3)); then
+ if [ -e ${service_config_dir}/tarbitratord ]; then
+ ${csudo} update-rc.d -f tarbitratord remove || :
+ fi
+ fi
+
+ ${csudo} rm -f ${service_config_dir}/tarbitratord || :
+
+ if $(which init &> /dev/null); then
+ ${csudo} init q || :
+ fi
+}
+
+function clean_service() {
+ if ((${service_mod}==0)); then
+ clean_service_on_systemd
+ elif ((${service_mod}==1)); then
+ clean_service_on_sysvinit
+ else
+ # must manual stop
+ kill_tarbitrator
+ fi
+}
+
+# Stop service and disable booting start.
+clean_service
+# Remove binary file and links
+clean_bin
+# Remove header file.
+##clean_header
+# Remove log file
+clean_log
+
+${csudo} rm -rf ${install_main_dir}
+
+echo -e "${GREEN}KingHistorian's arbitrator is removed successfully!${NC}"
+echo
diff --git a/packaging/tools/remove_arbi_power.sh b/packaging/tools/remove_arbi_power.sh
index 077b19ec7d4208c604c2042c2aa1eacab2033c5b..27b08a47e87c28395faa004515702d9e1b51492a 100755
--- a/packaging/tools/remove_arbi_power.sh
+++ b/packaging/tools/remove_arbi_power.sh
@@ -57,6 +57,7 @@ function clean_bin() {
function clean_header() {
# Remove link
${csudo} rm -f ${inc_link_dir}/taos.h || :
+ ${csudo} rm -f ${inc_link_dir}/taosdef.h || :
${csudo} rm -f ${inc_link_dir}/taoserror.h || :
}
@@ -127,4 +128,4 @@ clean_log
${csudo} rm -rf ${install_main_dir}
echo -e "${GREEN}PowerDB's arbitrator is removed successfully!${NC}"
-echo
\ No newline at end of file
+echo
diff --git a/packaging/tools/remove_arbi_pro.sh b/packaging/tools/remove_arbi_pro.sh
index ff10478881628bdaf027c618a1b89f204ebbdb35..4bb435cc97e7b31341007ac56c6eb1bbe75a9fda 100755
--- a/packaging/tools/remove_arbi_pro.sh
+++ b/packaging/tools/remove_arbi_pro.sh
@@ -57,6 +57,7 @@ function clean_bin() {
function clean_header() {
# Remove link
${csudo} rm -f ${inc_link_dir}/taos.h || :
+ ${csudo} rm -f ${inc_link_dir}/taosdef.h || :
${csudo} rm -f ${inc_link_dir}/taoserror.h || :
}
diff --git a/packaging/tools/remove_arbi_tq.sh b/packaging/tools/remove_arbi_tq.sh
index 3d99b6d41a74938d74383df3d8cdfc75c2ebb7c8..e0b401255f1071a21f0a5e09cd9b6e0e307ec5ba 100755
--- a/packaging/tools/remove_arbi_tq.sh
+++ b/packaging/tools/remove_arbi_tq.sh
@@ -57,6 +57,7 @@ function clean_bin() {
function clean_header() {
# Remove link
${csudo} rm -f ${inc_link_dir}/taos.h || :
+ ${csudo} rm -f ${inc_link_dir}/taosdef.h || :
${csudo} rm -f ${inc_link_dir}/taoserror.h || :
}
@@ -127,4 +128,4 @@ clean_log
${csudo} rm -rf ${install_main_dir}
echo -e "${GREEN}TQ's arbitrator is removed successfully!${NC}"
-echo
\ No newline at end of file
+echo
diff --git a/packaging/tools/remove_client.sh b/packaging/tools/remove_client.sh
index 7579162dc60e290754e71ed6a71c10cfaee5537b..aad8d67d948d566b72820625391ba7592859c079 100755
--- a/packaging/tools/remove_client.sh
+++ b/packaging/tools/remove_client.sh
@@ -52,13 +52,14 @@ function clean_lib() {
function clean_header() {
# Remove link
- ${csudo} rm -f ${inc_link_dir}/taos.h || :
- ${csudo} rm -f ${inc_link_dir}/taoserror.h || :
+ ${csudo} rm -f ${inc_link_dir}/taos.h || :
+ ${csudo} rm -f ${inc_link_dir}/taosdef.h || :
+ ${csudo} rm -f ${inc_link_dir}/taoserror.h || :
}
function clean_config() {
# Remove link
- ${csudo} rm -f ${cfg_link_dir}/* || :
+ ${csudo} rm -f ${cfg_link_dir}/* || :
}
function clean_log() {
diff --git a/packaging/tools/remove_client_jh.sh b/packaging/tools/remove_client_jh.sh
new file mode 100755
index 0000000000000000000000000000000000000000..a3f5dfd10debb0a28211b3682becd083d49ca9c6
--- /dev/null
+++ b/packaging/tools/remove_client_jh.sh
@@ -0,0 +1,79 @@
+#!/bin/bash
+#
+# Script to stop the client and uninstall database, but retain the config and log files.
+set -e
+# set -x
+
+RED='\033[0;31m'
+GREEN='\033[1;32m'
+NC='\033[0m'
+
+#install main path
+install_main_dir="/usr/local/jh_taos"
+
+log_link_dir="/usr/local/jh_taos/log"
+cfg_link_dir="/usr/local/jh_taos/cfg"
+bin_link_dir="/usr/bin"
+lib_link_dir="/usr/lib"
+lib64_link_dir="/usr/lib64"
+inc_link_dir="/usr/include"
+
+csudo=""
+if command -v sudo > /dev/null; then
+ csudo="sudo"
+fi
+
+function kill_client() {
+ if [ -n "$(pidof jh_taos)" ]; then
+ ${csudo} kill -9 $pid || :
+ fi
+}
+
+function clean_bin() {
+ # Remove link
+ ${csudo} rm -f ${bin_link_dir}/jh_taos || :
+ ${csudo} rm -f ${bin_link_dir}/jhdemo || :
+ ${csudo} rm -f ${bin_link_dir}/jh_taosdump || :
+ ${csudo} rm -f ${bin_link_dir}/rmjh || :
+ ${csudo} rm -f ${bin_link_dir}/set_core || :
+}
+
+function clean_lib() {
+ # Remove link
+ ${csudo} rm -f ${lib_link_dir}/libtaos.* || :
+ ${csudo} rm -f ${lib64_link_dir}/libtaos.* || :
+}
+
+function clean_header() {
+ # Remove link
+ ${csudo} rm -f ${inc_link_dir}/taos.h || :
+ ${csudo} rm -f ${inc_link_dir}/taoserror.h || :
+}
+
+function clean_config() {
+ # Remove link
+ ${csudo} rm -f ${cfg_link_dir}/* || :
+}
+
+function clean_log() {
+ # Remove link
+ ${csudo} rm -rf ${log_link_dir} || :
+}
+
+# Stop client.
+kill_client
+# Remove binary file and links
+clean_bin
+# Remove header file.
+clean_header
+# Remove lib file
+clean_lib
+# Remove link log directory
+clean_log
+# Remove link configuration file
+clean_config
+
+${csudo} rm -rf ${install_main_dir}
+
+echo -e "${GREEN}jh_iot client is removed successfully!${NC}"
+echo
diff --git a/packaging/tools/remove_client_kh.sh b/packaging/tools/remove_client_kh.sh
new file mode 100755
index 0000000000000000000000000000000000000000..6a44e875e3426b14400508b1bdbd7510c2ae49cb
--- /dev/null
+++ b/packaging/tools/remove_client_kh.sh
@@ -0,0 +1,79 @@
+#!/bin/bash
+#
+# Script to stop the client and uninstall database, but retain the config and log files.
+set -e
+# set -x
+
+RED='\033[0;31m'
+GREEN='\033[1;32m'
+NC='\033[0m'
+
+#install main path
+install_main_dir="/usr/local/kinghistorian"
+
+log_link_dir="/usr/local/kinghistorian/log"
+cfg_link_dir="/usr/local/kinghistorian/cfg"
+bin_link_dir="/usr/bin"
+lib_link_dir="/usr/lib"
+lib64_link_dir="/usr/lib64"
+inc_link_dir="/usr/include"
+
+csudo=""
+if command -v sudo > /dev/null; then
+ csudo="sudo"
+fi
+
+function kill_client() {
+ if [ -n "$(pidof khclient)" ]; then
+ ${csudo} kill -9 $pid || :
+ fi
+}
+
+function clean_bin() {
+ # Remove link
+ ${csudo} rm -f ${bin_link_dir}/khclient || :
+ ${csudo} rm -f ${bin_link_dir}/khdemo || :
+ ${csudo} rm -f ${bin_link_dir}/khdump || :
+ ${csudo} rm -f ${bin_link_dir}/rmkh || :
+ ${csudo} rm -f ${bin_link_dir}/set_core || :
+}
+
+function clean_lib() {
+ # Remove link
+ ${csudo} rm -f ${lib_link_dir}/libtaos.* || :
+ ${csudo} rm -f ${lib64_link_dir}/libtaos.* || :
+}
+
+function clean_header() {
+ # Remove link
+ ${csudo} rm -f ${inc_link_dir}/taos.h || :
+ ${csudo} rm -f ${inc_link_dir}/taoserror.h || :
+}
+
+function clean_config() {
+ # Remove link
+ ${csudo} rm -f ${cfg_link_dir}/* || :
+}
+
+function clean_log() {
+ # Remove link
+ ${csudo} rm -rf ${log_link_dir} || :
+}
+
+# Stop client.
+kill_client
+# Remove binary file and links
+clean_bin
+# Remove header file.
+clean_header
+# Remove lib file
+clean_lib
+# Remove link log directory
+clean_log
+# Remove link configuration file
+clean_config
+
+${csudo} rm -rf ${install_main_dir}
+
+echo -e "${GREEN}KingHistorian client is removed successfully!${NC}"
+echo
diff --git a/packaging/tools/remove_client_power.sh b/packaging/tools/remove_client_power.sh
index 580c46e2077d7f21e06d4d4a8f69dcd5b6bbf51d..75e9717e54b6e02ad5a5d8b28244caf89ab570fb 100755
--- a/packaging/tools/remove_client_power.sh
+++ b/packaging/tools/remove_client_power.sh
@@ -52,13 +52,14 @@ function clean_lib() {
function clean_header() {
# Remove link
- ${csudo} rm -f ${inc_link_dir}/taos.h || :
- ${csudo} rm -f ${inc_link_dir}/taoserror.h || :
+ ${csudo} rm -f ${inc_link_dir}/taos.h || :
+ ${csudo} rm -f ${inc_link_dir}/taosdef.h || :
+ ${csudo} rm -f ${inc_link_dir}/taoserror.h || :
}
function clean_config() {
# Remove link
- ${csudo} rm -f ${cfg_link_dir}/* || :
+ ${csudo} rm -f ${cfg_link_dir}/* || :
}
function clean_log() {
diff --git a/packaging/tools/remove_client_pro.sh b/packaging/tools/remove_client_pro.sh
index 59e4e8997620af035821df5a975fe58f1357c9dc..b7ddb27bf48c3e416523c021d42c6ae468fb04e2 100755
--- a/packaging/tools/remove_client_pro.sh
+++ b/packaging/tools/remove_client_pro.sh
@@ -46,13 +46,14 @@ function clean_lib() {
function clean_header() {
# Remove link
- ${csudo} rm -f ${inc_link_dir}/taos.h || :
- ${csudo} rm -f ${inc_link_dir}/taoserror.h || :
+ ${csudo} rm -f ${inc_link_dir}/taos.h || :
+ ${csudo} rm -f ${inc_link_dir}/taosdef.h || :
+ ${csudo} rm -f ${inc_link_dir}/taoserror.h || :
}
function clean_config() {
# Remove link
- ${csudo} rm -f ${cfg_link_dir}/* || :
+ ${csudo} rm -f ${cfg_link_dir}/* || :
}
function clean_log() {
diff --git a/packaging/tools/remove_client_tq.sh b/packaging/tools/remove_client_tq.sh
index ad8056c18cc32623edb8b77bf6aa17070acc1cbc..d701217c77e671a4ad24234bdfb4a196f5545970 100755
--- a/packaging/tools/remove_client_tq.sh
+++ b/packaging/tools/remove_client_tq.sh
@@ -52,13 +52,14 @@ function clean_lib() {
function clean_header() {
# Remove link
- ${csudo} rm -f ${inc_link_dir}/taos.h || :
- ${csudo} rm -f ${inc_link_dir}/taoserror.h || :
+ ${csudo} rm -f ${inc_link_dir}/taos.h || :
+ ${csudo} rm -f ${inc_link_dir}/taosdef.h || :
+ ${csudo} rm -f ${inc_link_dir}/taoserror.h || :
}
function clean_config() {
# Remove link
- ${csudo} rm -f ${cfg_link_dir}/* || :
+ ${csudo} rm -f ${cfg_link_dir}/* || :
}
function clean_log() {
diff --git a/packaging/tools/remove_jh.sh b/packaging/tools/remove_jh.sh
new file mode 100755
index 0000000000000000000000000000000000000000..b962b824fdeeda632be28eeeaa97199adcd6ca2c
--- /dev/null
+++ b/packaging/tools/remove_jh.sh
@@ -0,0 +1,209 @@
+#!/bin/bash
+#
+# Script to stop the service and uninstall jh_taos, but retain the config, data and log files.
+
+set -e
+#set -x
+
+verMode=edge
+
+RED='\033[0;31m'
+GREEN='\033[1;32m'
+NC='\033[0m'
+
+#install main path
+install_main_dir="/usr/local/jh_taos"
+data_link_dir="/usr/local/jh_taos/data"
+log_link_dir="/usr/local/jh_taos/log"
+cfg_link_dir="/usr/local/jh_taos/cfg"
+bin_link_dir="/usr/bin"
+lib_link_dir="/usr/lib"
+lib64_link_dir="/usr/lib64"
+inc_link_dir="/usr/include"
+install_nginxd_dir="/usr/local/nginxd"
+
+service_config_dir="/etc/systemd/system"
+service_name="jh_taosd"
+tarbitrator_service_name="tarbitratord"
+nginx_service_name="nginxd"
+csudo=""
+if command -v sudo > /dev/null; then
+ csudo="sudo"
+fi
+
+initd_mod=0
+service_mod=2
+if pidof systemd &> /dev/null; then
+ service_mod=0
+elif $(which service &> /dev/null); then
+ service_mod=1
+ service_config_dir="/etc/init.d"
+ if $(which chkconfig &> /dev/null); then
+ initd_mod=1
+ elif $(which insserv &> /dev/null); then
+ initd_mod=2
+ elif $(which update-rc.d &> /dev/null); then
+ initd_mod=3
+ else
+ service_mod=2
+ fi
+else
+ service_mod=2
+fi
+
+function kill_process() {
+ pid=$(ps -ef | grep "jh_taosd" | grep -v "grep" | awk '{print $2}')
+ if [ -n "$pid" ]; then
+ ${csudo} kill -9 $pid || :
+ fi
+}
+
+function kill_tarbitrator() {
+ pid=$(ps -ef | grep "tarbitrator" | grep -v "grep" | awk '{print $2}')
+ if [ -n "$pid" ]; then
+ ${csudo} kill -9 $pid || :
+ fi
+}
+
+function clean_bin() {
+ # Remove link
+ ${csudo} rm -f ${bin_link_dir}/jh_taos || :
+ ${csudo} rm -f ${bin_link_dir}/jh_taosd || :
+ ${csudo} rm -f ${bin_link_dir}/jhdemo || :
+ ${csudo} rm -f ${bin_link_dir}/jh_taosdump || :
+ ${csudo} rm -f ${bin_link_dir}/rmjh || :
+ ${csudo} rm -f ${bin_link_dir}/tarbitrator || :
+ ${csudo} rm -f ${bin_link_dir}/set_core || :
+}
+
+function clean_lib() {
+ # Remove link
+ ${csudo} rm -f ${lib_link_dir}/libtaos.* || :
+ ${csudo} rm -f ${lib64_link_dir}/libtaos.* || :
+}
+
+function clean_header() {
+ # Remove link
+ ${csudo} rm -f ${inc_link_dir}/taos.h || :
+ ${csudo} rm -f ${inc_link_dir}/taoserror.h || :
+}
+
+function clean_config() {
+ # Remove link
+ ${csudo} rm -f ${cfg_link_dir}/* || :
+}
+
+function clean_log() {
+ # Remove link
+ ${csudo} rm -rf ${log_link_dir} || :
+}
+
+function clean_service_on_systemd() {
+ service_config="${service_config_dir}/${service_name}.service"
+ if systemctl is-active --quiet ${service_name}; then
+ echo "jh_iot's jh_taosd is running, stopping it..."
+ ${csudo} systemctl stop ${service_name} &> /dev/null || echo &> /dev/null
+ fi
+ ${csudo} systemctl disable ${service_name} &> /dev/null || echo &> /dev/null
+ ${csudo} rm -f ${service_config}
+
+ tarbitratord_service_config="${service_config_dir}/${tarbitrator_service_name}.service"
+ if systemctl is-active --quiet ${tarbitrator_service_name}; then
+ echo "jh_iot's tarbitrator is running, stopping it..."
+ ${csudo} systemctl stop ${tarbitrator_service_name} &> /dev/null || echo &> /dev/null
+ fi
+ ${csudo} systemctl disable ${tarbitrator_service_name} &> /dev/null || echo &> /dev/null
+ ${csudo} rm -f ${tarbitratord_service_config}
+
+ if [ "$verMode" == "cluster" ]; then
+ nginx_service_config="${service_config_dir}/${nginx_service_name}.service"
+ if [ -d ${bin_dir}/web ]; then
+ if systemctl is-active --quiet ${nginx_service_name}; then
+ echo "Nginx for jh_iot is running, stopping it..."
+ ${csudo} systemctl stop ${nginx_service_name} &> /dev/null || echo &> /dev/null
+ fi
+ ${csudo} systemctl disable ${nginx_service_name} &> /dev/null || echo &> /dev/null
+
+ ${csudo} rm -f ${nginx_service_config}
+ fi
+ fi
+}
+
+function clean_service_on_sysvinit() {
+ if pidof jh_taosd &> /dev/null; then
+ echo "jh_iot's jh_taosd is running, stopping it..."
+ ${csudo} service jh_taosd stop || :
+ fi
+
+ if pidof tarbitrator &> /dev/null; then
+ echo "jh_iot's tarbitrator is running, stopping it..."
+ ${csudo} service tarbitratord stop || :
+ fi
+
+ if ((${initd_mod}==1)); then
+ if [ -e ${service_config_dir}/jh_taosd ]; then
+ ${csudo} chkconfig --del jh_taosd || :
+ fi
+ if [ -e ${service_config_dir}/tarbitratord ]; then
+ ${csudo} chkconfig --del tarbitratord || :
+ fi
+ elif ((${initd_mod}==2)); then
+ if [ -e ${service_config_dir}/jh_taosd ]; then
+ ${csudo} insserv -r jh_taosd || :
+ fi
+ if [ -e ${service_config_dir}/tarbitratord ]; then
+ ${csudo} insserv -r tarbitratord || :
+ fi
+ elif ((${initd_mod}==3)); then
+ if [ -e ${service_config_dir}/jh_taosd ]; then
+ ${csudo} update-rc.d -f jh_taosd remove || :
+ fi
+ if [ -e ${service_config_dir}/tarbitratord ]; then
+ ${csudo} update-rc.d -f tarbitratord remove || :
+ fi
+ fi
+
+ ${csudo} rm -f ${service_config_dir}/jh_taosd || :
+ ${csudo} rm -f ${service_config_dir}/tarbitratord || :
+
+ if $(which init &> /dev/null); then
+ ${csudo} init q || :
+ fi
+}
+
+function clean_service() {
+ if ((${service_mod}==0)); then
+ clean_service_on_systemd
+ elif ((${service_mod}==1)); then
+ clean_service_on_sysvinit
+ else
+ kill_process
+ kill_tarbitrator
+ fi
+}
+
+# Stop service and disable booting start.
+clean_service
+# Remove binary file and links
+clean_bin
+# Remove header file.
+clean_header
+# Remove lib file
+clean_lib
+# Remove link log directory
+clean_log
+# Remove link configuration file
+clean_config
+# Remove data link directory
+${csudo} rm -rf ${data_link_dir} || :
+
+${csudo} rm -rf ${install_main_dir}
+${csudo} rm -rf ${install_nginxd_dir}
+if [[ -e /etc/os-release ]]; then
+ osinfo=$(awk -F= '/^NAME/{print $2}' /etc/os-release)
+else
+ osinfo=""
+fi
+
+echo -e "${GREEN}jh_iot is removed successfully!${NC}"
+echo
diff --git a/packaging/tools/remove_kh.sh b/packaging/tools/remove_kh.sh
new file mode 100755
index 0000000000000000000000000000000000000000..0055e043e74a3fa287cebbd325f83c7f8f98ca8a
--- /dev/null
+++ b/packaging/tools/remove_kh.sh
@@ -0,0 +1,209 @@
+#!/bin/bash
+#
+# Script to stop the service and uninstall kinghistorian, but retain the config, data and log files.
+
+set -e
+#set -x
+
+verMode=edge
+
+RED='\033[0;31m'
+GREEN='\033[1;32m'
+NC='\033[0m'
+
+#install main path
+install_main_dir="/usr/local/kinghistorian"
+data_link_dir="/usr/local/kinghistorian/data"
+log_link_dir="/usr/local/kinghistorian/log"
+cfg_link_dir="/usr/local/kinghistorian/cfg"
+bin_link_dir="/usr/bin"
+lib_link_dir="/usr/lib"
+lib64_link_dir="/usr/lib64"
+inc_link_dir="/usr/include"
+install_nginxd_dir="/usr/local/nginxd"
+
+service_config_dir="/etc/systemd/system"
+service_name="khserver"
+tarbitrator_service_name="tarbitratord"
+nginx_service_name="nginxd"
+csudo=""
+if command -v sudo > /dev/null; then
+ csudo="sudo"
+fi
+
+initd_mod=0
+service_mod=2
+if pidof systemd &> /dev/null; then
+ service_mod=0
+elif $(which service &> /dev/null); then
+ service_mod=1
+ service_config_dir="/etc/init.d"
+ if $(which chkconfig &> /dev/null); then
+ initd_mod=1
+ elif $(which insserv &> /dev/null); then
+ initd_mod=2
+ elif $(which update-rc.d &> /dev/null); then
+ initd_mod=3
+ else
+ service_mod=2
+ fi
+else
+ service_mod=2
+fi
+
+function kill_process() {
+ pid=$(ps -ef | grep "khserver" | grep -v "grep" | awk '{print $2}')
+ if [ -n "$pid" ]; then
+ ${csudo} kill -9 $pid || :
+ fi
+}
+
+function kill_tarbitrator() {
+ pid=$(ps -ef | grep "tarbitrator" | grep -v "grep" | awk '{print $2}')
+ if [ -n "$pid" ]; then
+ ${csudo} kill -9 $pid || :
+ fi
+}
+
+function clean_bin() {
+ # Remove link
+ ${csudo} rm -f ${bin_link_dir}/khclient || :
+ ${csudo} rm -f ${bin_link_dir}/khserver || :
+ ${csudo} rm -f ${bin_link_dir}/khdemo || :
+ ${csudo} rm -f ${bin_link_dir}/khdump || :
+ ${csudo} rm -f ${bin_link_dir}/rmkh || :
+ ${csudo} rm -f ${bin_link_dir}/tarbitrator || :
+ ${csudo} rm -f ${bin_link_dir}/set_core || :
+}
+
+function clean_lib() {
+ # Remove link
+ ${csudo} rm -f ${lib_link_dir}/libtaos.* || :
+ ${csudo} rm -f ${lib64_link_dir}/libtaos.* || :
+}
+
+function clean_header() {
+ # Remove link
+ ${csudo} rm -f ${inc_link_dir}/taos.h || :
+ ${csudo} rm -f ${inc_link_dir}/taoserror.h || :
+}
+
+function clean_config() {
+ # Remove link
+ ${csudo} rm -f ${cfg_link_dir}/* || :
+}
+
+function clean_log() {
+ # Remove link
+ ${csudo} rm -rf ${log_link_dir} || :
+}
+
+function clean_service_on_systemd() {
+ service_config="${service_config_dir}/${service_name}.service"
+ if systemctl is-active --quiet ${service_name}; then
+ echo "KingHistorian's khserver is running, stopping it..."
+ ${csudo} systemctl stop ${service_name} &> /dev/null || echo &> /dev/null
+ fi
+ ${csudo} systemctl disable ${service_name} &> /dev/null || echo &> /dev/null
+ ${csudo} rm -f ${service_config}
+
+ tarbitratord_service_config="${service_config_dir}/${tarbitrator_service_name}.service"
+ if systemctl is-active --quiet ${tarbitrator_service_name}; then
+ echo "KingHistorian's tarbitrator is running, stopping it..."
+ ${csudo} systemctl stop ${tarbitrator_service_name} &> /dev/null || echo &> /dev/null
+ fi
+ ${csudo} systemctl disable ${tarbitrator_service_name} &> /dev/null || echo &> /dev/null
+ ${csudo} rm -f ${tarbitratord_service_config}
+
+ if [ "$verMode" == "cluster" ]; then
+ nginx_service_config="${service_config_dir}/${nginx_service_name}.service"
+ if [ -d ${bin_dir}/web ]; then
+ if systemctl is-active --quiet ${nginx_service_name}; then
+ echo "Nginx for KingHistorian is running, stopping it..."
+ ${csudo} systemctl stop ${nginx_service_name} &> /dev/null || echo &> /dev/null
+ fi
+ ${csudo} systemctl disable ${nginx_service_name} &> /dev/null || echo &> /dev/null
+
+ ${csudo} rm -f ${nginx_service_config}
+ fi
+ fi
+}
+
+function clean_service_on_sysvinit() {
+ if pidof khserver &> /dev/null; then
+ echo "KingHistorian's khserver is running, stopping it..."
+ ${csudo} service khserver stop || :
+ fi
+
+ if pidof tarbitrator &> /dev/null; then
+ echo "KingHistorian's tarbitrator is running, stopping it..."
+ ${csudo} service tarbitratord stop || :
+ fi
+
+ if ((${initd_mod}==1)); then
+ if [ -e ${service_config_dir}/khserver ]; then
+ ${csudo} chkconfig --del khserver || :
+ fi
+ if [ -e ${service_config_dir}/tarbitratord ]; then
+ ${csudo} chkconfig --del tarbitratord || :
+ fi
+ elif ((${initd_mod}==2)); then
+ if [ -e ${service_config_dir}/khserver ]; then
+ ${csudo} insserv -r khserver || :
+ fi
+ if [ -e ${service_config_dir}/tarbitratord ]; then
+ ${csudo} insserv -r tarbitratord || :
+ fi
+ elif ((${initd_mod}==3)); then
+ if [ -e ${service_config_dir}/khserver ]; then
+ ${csudo} update-rc.d -f khserver remove || :
+ fi
+ if [ -e ${service_config_dir}/tarbitratord ]; then
+ ${csudo} update-rc.d -f tarbitratord remove || :
+ fi
+ fi
+
+ ${csudo} rm -f ${service_config_dir}/khserver || :
+ ${csudo} rm -f ${service_config_dir}/tarbitratord || :
+
+ if $(which init &> /dev/null); then
+ ${csudo} init q || :
+ fi
+}
+
+function clean_service() {
+ if ((${service_mod}==0)); then
+ clean_service_on_systemd
+ elif ((${service_mod}==1)); then
+ clean_service_on_sysvinit
+ else
+ kill_process
+ kill_tarbitrator
+ fi
+}
+
+# Stop service and disable booting start.
+clean_service
+# Remove binary file and links
+clean_bin
+# Remove header file.
+clean_header
+# Remove lib file
+clean_lib
+# Remove link log directory
+clean_log
+# Remove link configuration file
+clean_config
+# Remove data link directory
+${csudo} rm -rf ${data_link_dir} || :
+
+${csudo} rm -rf ${install_main_dir}
+${csudo} rm -rf ${install_nginxd_dir}
+if [[ -e /etc/os-release ]]; then
+ osinfo=$(awk -F= '/^NAME/{print $2}' /etc/os-release)
+else
+ osinfo=""
+fi
+
+echo -e "${GREEN}KingHistorian is removed successfully!${NC}"
+echo
diff --git a/packaging/tools/remove_power.sh b/packaging/tools/remove_power.sh
index 816869cf444d8001e0c0aae30840d2c40a9e6af4..1953458d1040a3f67e27a4cac9c380651b3ec949 100755
--- a/packaging/tools/remove_power.sh
+++ b/packaging/tools/remove_power.sh
@@ -1,6 +1,6 @@
#!/bin/bash
#
-# Script to stop the service and uninstall TDengine, but retain the config, data and log files.
+# Script to stop the service and uninstall PowerDB, but retain the config, data and log files.
set -e
#set -x
@@ -88,6 +88,7 @@ function clean_lib() {
function clean_header() {
# Remove link
${csudo} rm -f ${inc_link_dir}/taos.h || :
+ ${csudo} rm -f ${inc_link_dir}/taosdef.h || :
${csudo} rm -f ${inc_link_dir}/taoserror.h || :
}
@@ -112,7 +113,7 @@ function clean_service_on_systemd() {
tarbitratord_service_config="${service_config_dir}/${tarbitrator_service_name}.service"
if systemctl is-active --quiet ${tarbitrator_service_name}; then
- echo "TDengine tarbitrator is running, stopping it..."
+ echo "PowerDB tarbitrator is running, stopping it..."
${csudo} systemctl stop ${tarbitrator_service_name} &> /dev/null || echo &> /dev/null
fi
${csudo} systemctl disable ${tarbitrator_service_name} &> /dev/null || echo &> /dev/null
@@ -122,7 +123,7 @@ function clean_service_on_systemd() {
nginx_service_config="${service_config_dir}/${nginx_service_name}.service"
if [ -d ${bin_dir}/web ]; then
if systemctl is-active --quiet ${nginx_service_name}; then
- echo "Nginx for TDengine is running, stopping it..."
+ echo "Nginx for PowerDB is running, stopping it..."
${csudo} systemctl stop ${nginx_service_name} &> /dev/null || echo &> /dev/null
fi
${csudo} systemctl disable ${nginx_service_name} &> /dev/null || echo &> /dev/null
@@ -133,9 +134,6 @@ function clean_service_on_systemd() {
}
function clean_service_on_sysvinit() {
- #restart_config_str="power:2345:respawn:${service_config_dir}/powerd start"
- #${csudo} sed -i "\|${restart_config_str}|d" /etc/inittab || :
-
if pidof powerd &> /dev/null; then
echo "PowerDB powerd is running, stopping it..."
${csudo} service powerd stop || :
@@ -183,7 +181,6 @@ function clean_service() {
elif ((${service_mod}==1)); then
clean_service_on_sysvinit
else
- # must manual stop taosd
kill_powerd
kill_tarbitrator
fi
diff --git a/packaging/tools/remove_pro.sh b/packaging/tools/remove_pro.sh
index f6dad22bc21b02a9d717d530c50bc19c5a718478..f32b4204225b83287c818b80025e4544f75782d6 100755
--- a/packaging/tools/remove_pro.sh
+++ b/packaging/tools/remove_pro.sh
@@ -85,6 +85,7 @@ function clean_lib() {
function clean_header() {
# Remove link
${csudo} rm -f ${inc_link_dir}/taos.h || :
+ ${csudo} rm -f ${inc_link_dir}/taosdef.h || :
${csudo} rm -f ${inc_link_dir}/taoserror.h || :
}
@@ -177,7 +178,6 @@ function clean_service() {
elif ((${service_mod}==1)); then
clean_service_on_sysvinit
else
- # must manual stop taosd
kill_prodbs
kill_tarbitrator
fi
diff --git a/packaging/tools/remove_tq.sh b/packaging/tools/remove_tq.sh
index 211eed4dff09ab5da00d5c475cd93148b5ce1b24..e08ac47f7d866edf9c2e1478947822f350410b85 100755
--- a/packaging/tools/remove_tq.sh
+++ b/packaging/tools/remove_tq.sh
@@ -1,6 +1,6 @@
#!/bin/bash
#
-# Script to stop the service and uninstall TDengine, but retain the config, data and log files.
+# Script to stop the service and uninstall TQ, but retain the config, data and log files.
set -e
#set -x
@@ -87,13 +87,14 @@ function clean_lib() {
function clean_header() {
# Remove link
- ${csudo} rm -f ${inc_link_dir}/taos.h || :
- ${csudo} rm -f ${inc_link_dir}/taoserror.h || :
+ ${csudo} rm -f ${inc_link_dir}/taos.h || :
+ ${csudo} rm -f ${inc_link_dir}/taosdef.h || :
+ ${csudo} rm -f ${inc_link_dir}/taoserror.h || :
}
function clean_config() {
# Remove link
- ${csudo} rm -f ${cfg_link_dir}/* || :
+ ${csudo} rm -f ${cfg_link_dir}/* || :
}
function clean_log() {
@@ -109,10 +110,10 @@ function clean_service_on_systemd() {
fi
${csudo} systemctl disable ${tq_service_name} &> /dev/null || echo &> /dev/null
${csudo} rm -f ${tq_service_config}
-
+
tarbitratord_service_config="${service_config_dir}/${tarbitrator_service_name}.service"
if systemctl is-active --quiet ${tarbitrator_service_name}; then
- echo "TDengine tarbitrator is running, stopping it..."
+ echo "TQ tarbitrator is running, stopping it..."
${csudo} systemctl stop ${tarbitrator_service_name} &> /dev/null || echo &> /dev/null
fi
${csudo} systemctl disable ${tarbitrator_service_name} &> /dev/null || echo &> /dev/null
@@ -122,7 +123,7 @@ function clean_service_on_systemd() {
nginx_service_config="${service_config_dir}/${nginx_service_name}.service"
if [ -d ${bin_dir}/web ]; then
if systemctl is-active --quiet ${nginx_service_name}; then
- echo "Nginx for TDengine is running, stopping it..."
+ echo "Nginx for TQ is running, stopping it..."
${csudo} systemctl stop ${nginx_service_name} &> /dev/null || echo &> /dev/null
fi
${csudo} systemctl disable ${nginx_service_name} &> /dev/null || echo &> /dev/null
@@ -183,7 +184,6 @@ function clean_service() {
elif ((${service_mod}==1)); then
clean_service_on_sysvinit
else
- # must manual stop taosd
kill_tqd
kill_tarbitrator
fi
@@ -212,16 +212,5 @@ else
osinfo=""
fi
-#if echo $osinfo | grep -qwi "ubuntu" ; then
-## echo "this is ubuntu system"
-# ${csudo} rm -f /var/lib/dpkg/info/tdengine* || :
-#elif echo $osinfo | grep -qwi "debian" ; then
-## echo "this is debian system"
-# ${csudo} rm -f /var/lib/dpkg/info/tdengine* || :
-#elif echo $osinfo | grep -qwi "centos" ; then
-## echo "this is centos system"
-# ${csudo} rpm -e --noscripts tdengine || :
-#fi
-
echo -e "${GREEN}TQ is removed successfully!${NC}"
echo
diff --git a/src/client/CMakeLists.txt b/src/client/CMakeLists.txt
index e508b66a16a0c14f99ac6cbd14445882f42513c3..48d3fe8675f7c5ab0e1f0678a269b03bc154a337 100644
--- a/src/client/CMakeLists.txt
+++ b/src/client/CMakeLists.txt
@@ -26,7 +26,7 @@ IF (TD_LINUX)
ADD_LIBRARY(taos SHARED ${SRC})
TARGET_LINK_LIBRARIES(taos common query trpc tutil pthread m rt cJson)
IF (TD_LINUX_64)
- TARGET_LINK_LIBRARIES(taos lua cJson)
+ TARGET_LINK_LIBRARIES(taos ${LINK_LUA} cJson)
ENDIF ()
SET_TARGET_PROPERTIES(taos PROPERTIES CLEAN_DIRECT_OUTPUT 1)
@@ -45,13 +45,13 @@ ELSEIF (TD_DARWIN)
# set the static lib name
ADD_LIBRARY(taos_static STATIC ${SRC})
- TARGET_LINK_LIBRARIES(taos_static common query trpc tutil pthread m lua cJson)
+ TARGET_LINK_LIBRARIES(taos_static common query trpc tutil pthread m ${LINK_LUA} cJson)
SET_TARGET_PROPERTIES(taos_static PROPERTIES OUTPUT_NAME "taos_static")
SET_TARGET_PROPERTIES(taos_static PROPERTIES CLEAN_DIRECT_OUTPUT 1)
# generate dynamic library (*.dylib)
ADD_LIBRARY(taos SHARED ${SRC})
- TARGET_LINK_LIBRARIES(taos common query trpc tutil pthread m lua cJson)
+ TARGET_LINK_LIBRARIES(taos common query trpc tutil pthread m ${LINK_LUA} cJson)
SET_TARGET_PROPERTIES(taos PROPERTIES CLEAN_DIRECT_OUTPUT 1)
#set version of .dylib
@@ -77,7 +77,7 @@ ELSEIF (TD_WINDOWS)
IF (NOT TD_GODLL)
SET_TARGET_PROPERTIES(taos PROPERTIES LINK_FLAGS /DEF:${TD_COMMUNITY_DIR}/src/client/src/taos.def)
ENDIF ()
- TARGET_LINK_LIBRARIES(taos trpc tutil query lua cJson)
+ TARGET_LINK_LIBRARIES(taos trpc tutil query ${LINK_LUA} cJson)
ELSEIF (TD_DARWIN)
SET(CMAKE_MACOSX_RPATH 1)
@@ -85,12 +85,12 @@ ELSEIF (TD_DARWIN)
INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/deps/cJson/inc)
ADD_LIBRARY(taos_static STATIC ${SRC})
- TARGET_LINK_LIBRARIES(taos_static query trpc tutil pthread m lua cJson)
+ TARGET_LINK_LIBRARIES(taos_static query trpc tutil pthread m ${LINK_LUA} cJson)
SET_TARGET_PROPERTIES(taos_static PROPERTIES OUTPUT_NAME "taos_static")
# generate dynamic library (*.dylib)
ADD_LIBRARY(taos SHARED ${SRC})
- TARGET_LINK_LIBRARIES(taos query trpc tutil pthread m lua cJson)
+ TARGET_LINK_LIBRARIES(taos query trpc tutil pthread m ${LINK_LUA} cJson)
SET_TARGET_PROPERTIES(taos PROPERTIES CLEAN_DIRECT_OUTPUT 1)
diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c
index 4df28118f2745890ea845b76908857f9962f8764..921adf1a5fd72f65b370185089917179de67eaf4 100644
--- a/src/client/src/tscSQLParser.c
+++ b/src/client/src/tscSQLParser.c
@@ -3318,13 +3318,15 @@ int32_t doGetColumnIndexByName(SStrToken* pToken, SQueryInfo* pQueryInfo, SColum
const char* msg0 = "ambiguous column name";
const char* msg1 = "invalid column name";
+ if (pToken->n == 0) {
+ return TSDB_CODE_TSC_INVALID_OPERATION;
+ }
+
if (isTablenameToken(pToken)) {
pIndex->columnIndex = TSDB_TBNAME_COLUMN_INDEX;
} else if (strlen(DEFAULT_PRIMARY_TIMESTAMP_COL_NAME) == pToken->n &&
strncasecmp(pToken->z, DEFAULT_PRIMARY_TIMESTAMP_COL_NAME, pToken->n) == 0) {
pIndex->columnIndex = PRIMARYKEY_TIMESTAMP_COL_INDEX; // just make runtime happy, need fix java test case InsertSpecialCharacterJniTest
- } else if (pToken->n == 0) {
- pIndex->columnIndex = PRIMARYKEY_TIMESTAMP_COL_INDEX; // just make runtime happy, need fix java test case InsertSpecialCharacterJniTest
} else {
// not specify the table name, try to locate the table index by column name
if (pIndex->tableIndex == COLUMN_INDEX_INITIAL_VAL) {
@@ -3708,6 +3710,7 @@ static bool functionCompatibleCheck(SQueryInfo* pQueryInfo, bool joinQuery, bool
int32_t aggNum = 0;
int32_t scalarFuncNum = 0;
int32_t funcCompatFactor = INT_MAX;
+ int32_t countTbname = 0;
size_t numOfExpr = tscNumOfExprs(pQueryInfo);
assert(numOfExpr > 0);
@@ -3772,9 +3775,14 @@ static bool functionCompatibleCheck(SQueryInfo* pQueryInfo, bool joinQuery, bool
if (functionId == TSDB_FUNC_LAST_ROW && (joinQuery || twQuery || !groupbyTagsOrNull(pQueryInfo))) {
return false;
}
+
+ if (functionId == TSDB_FUNC_COUNT && (pExpr1->base.colInfo.colId == TSDB_TBNAME_COLUMN_INDEX || TSDB_COL_IS_TAG(pExpr1->base.colInfo.flag))) {
+ ++countTbname;
+ }
}
- aggNum = (int32_t)size - prjNum - scalarFuncNum - aggUdf - scalarUdf;
+
+ aggNum = (int32_t)size - prjNum - scalarFuncNum - aggUdf - scalarUdf - countTbname;
assert(aggNum >= 0);
@@ -3790,6 +3798,10 @@ static bool functionCompatibleCheck(SQueryInfo* pQueryInfo, bool joinQuery, bool
return false;
}
+ if (countTbname && (prjNum > 0 || aggNum > 0 || scalarUdf > 0 || aggUdf > 0)) {
+ return false;
+ }
+
return true;
}
@@ -6014,10 +6026,17 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq
* for table query, there is only one or none order option is allowed, which is the
* ts or values(top/bottom) order is supported.
*
- * for super table query, the order option must be less than 3.
+ * for super table query, the order option must be less than 3 and the second must be ts.
+ *
+ * order by has 5 situations
+ * 1. from stable group by tag1 order by tag1 [ts]
+ * 2. from stable group by tbname order by tbname [ts]
+ * 3. from stable/table group by column1 order by column1
+ * 4. from stable/table order by ts
+ * 5. select stable/table top(column2,1) ... order by column2
*/
size_t size = taosArrayGetSize(pSortOrder);
- if (UTIL_TABLE_IS_NORMAL_TABLE(pTableMetaInfo) || UTIL_TABLE_IS_TMP_TABLE(pTableMetaInfo)) {
+ if (!UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) {
if (size > 1) {
return invalidOperationMsg(pMsgBuf, msg0);
}
@@ -6027,15 +6046,14 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq
}
}
if (size > 0 && pQueryInfo->distinct) {
- return invalidOperationMsg(pMsgBuf, msg10);
+ return invalidOperationMsg(pMsgBuf, msg10);
}
// handle the first part of order by
tVariant* pVar = taosArrayGet(pSortOrder, 0);
- // e.g., order by 1 asc, return directly with out further check.
- if (pVar->nType >= TSDB_DATA_TYPE_TINYINT && pVar->nType <= TSDB_DATA_TYPE_BIGINT) {
- return TSDB_CODE_SUCCESS;
+ if (pVar->nType != TSDB_DATA_TYPE_BINARY){
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
SStrToken columnName = {pVar->nLen, pVar->nType, pVar->pz};
@@ -6044,7 +6062,7 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq
if (pQueryInfo->pUdfInfo && taosArrayGetSize(pQueryInfo->pUdfInfo) > 0) {
int32_t usize = (int32_t)taosArrayGetSize(pQueryInfo->pUdfInfo);
-
+
for (int32_t i = 0; i < usize; ++i) {
SUdfInfo* pUdfInfo = taosArrayGet(pQueryInfo->pUdfInfo, i);
if (pUdfInfo->funcType == TSDB_UDF_TYPE_SCALAR) {
@@ -6063,9 +6081,9 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq
bool orderByTS = false;
bool orderByGroupbyCol = false;
- if (index.columnIndex >= tscGetNumOfColumns(pTableMetaInfo->pTableMeta)) {
+ if (index.columnIndex >= tscGetNumOfColumns(pTableMetaInfo->pTableMeta)) { // order by tag1
int32_t relTagIndex = index.columnIndex - tscGetNumOfColumns(pTableMetaInfo->pTableMeta);
-
+
// it is a tag column
if (pQueryInfo->groupbyExpr.columnInfo == NULL) {
return invalidOperationMsg(pMsgBuf, msg4);
@@ -6074,26 +6092,29 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq
if (relTagIndex == pColIndex->colIndex) {
orderByTags = true;
}
- } else if (index.columnIndex == TSDB_TBNAME_COLUMN_INDEX) {
- orderByTags = true;
- }
-
- if (PRIMARYKEY_TIMESTAMP_COL_INDEX == index.columnIndex) {
+ } else if (index.columnIndex == TSDB_TBNAME_COLUMN_INDEX) { // order by tbname
+ // it is a tag column
+ if (pQueryInfo->groupbyExpr.columnInfo == NULL) {
+ return invalidOperationMsg(pMsgBuf, msg4);
+ }
+ SColIndex* pColIndex = taosArrayGet(pQueryInfo->groupbyExpr.columnInfo, 0);
+ if (TSDB_TBNAME_COLUMN_INDEX == pColIndex->colIndex) {
+ orderByTags = true;
+ }
+ }else if (index.columnIndex == PRIMARYKEY_TIMESTAMP_COL_INDEX) { // order by ts
orderByTS = true;
- }
-
- SArray *columnInfo = pQueryInfo->groupbyExpr.columnInfo;
- if (columnInfo != NULL && taosArrayGetSize(columnInfo) > 0) {
- SColIndex* pColIndex = taosArrayGet(columnInfo, 0);
- if (PRIMARYKEY_TIMESTAMP_COL_INDEX != index.columnIndex && pColIndex->colIndex == index.columnIndex) {
- orderByGroupbyCol = true;
+ }else{ // order by normal column
+ SArray *columnInfo = pQueryInfo->groupbyExpr.columnInfo;
+ if (columnInfo != NULL && taosArrayGetSize(columnInfo) > 0) {
+ SColIndex* pColIndex = taosArrayGet(columnInfo, 0);
+ if (pColIndex->colIndex == index.columnIndex) {
+ orderByGroupbyCol = true;
+ }
}
}
if (!(orderByTags || orderByTS || orderByGroupbyCol) && !isTopBottomQuery(pQueryInfo)) {
return invalidOperationMsg(pMsgBuf, msg3);
- } else { // order by top/bottom result value column is not supported in case of interval query.
- assert(!(orderByTags && orderByTS && orderByGroupbyCol));
}
size_t s = taosArrayGetSize(pSortOrder);
@@ -6228,7 +6249,7 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq
if (columnInfo != NULL && taosArrayGetSize(columnInfo) > 0) {
SColIndex* pColIndex = taosArrayGet(columnInfo, 0);
- if (pColIndex->colIndex == index.columnIndex) {
+ if (pColIndex->colIndex != index.columnIndex) {
return invalidOperationMsg(pMsgBuf, msg8);
}
} else {
diff --git a/src/client/src/tscSubquery.c b/src/client/src/tscSubquery.c
index 8000ccaacc47b1d392eacadf91b8d5f2867c1bbc..d2c8c10ec0b2c9a5e719e667a8cd4f48ae6424bf 100644
--- a/src/client/src/tscSubquery.c
+++ b/src/client/src/tscSubquery.c
@@ -2293,7 +2293,15 @@ void tscFirstRoundRetrieveCallback(void* param, TAOS_RES* tres, int numOfRows) {
if (row[i] == NULL) {
setNull(p + offset, pExpr->base.resType, pExpr->base.resBytes);
} else {
- memcpy(p + offset, row[i], length[i]);
+ if(pExpr->base.resType == TSDB_DATA_TYPE_NCHAR){
+ int32_t output = 0;
+ bool ret = taosMbsToUcs4(row[i], length[i], p + offset, pExpr->base.resBytes, &output);
+ if (!ret) {
+ tscError("stddev convert tag error:%d", ret);
+ }
+ }else{
+ memcpy(p + offset, row[i], length[i]);
+ }
}
offset += pExpr->base.resBytes;
}
diff --git a/src/client/src/tscSystem.c b/src/client/src/tscSystem.c
index 3b3770dae978f079576799190771b9c628cafb31..bbd448e2d8b5069fae438d7adb9de14a31446d1b 100644
--- a/src/client/src/tscSystem.c
+++ b/src/client/src/tscSystem.c
@@ -47,7 +47,17 @@ int32_t tscNumOfObj = 0; // number of sqlObj in current process.
static void *tscCheckDiskUsageTmr;
void *tscRpcCache; // cache to keep rpc obj
int32_t tscNumOfThreads = 1; // num of rpc threads
+#ifdef _TD_POWER_
+char tscLogFileName[12] = "powerlog";
+#elif (_TD_TQ_ == true)
+char tscLogFileName[12] = "tqlog";
+#elif (_TD_PRO_ == true)
+char tscLogFileName[12] = "prolog";
+#elif (_TD_KH_ == true)
+char tscLogFileName[12] = "khclientlog";
+#else
char tscLogFileName[12] = "taoslog";
+#endif
int tscLogFileNum = 10;
static pthread_mutex_t rpcObjMutex; // mutex to protect open the rpc obj concurrently
@@ -107,7 +117,7 @@ int32_t tscAcquireRpc(const char *key, const char *user, const char *secretEncry
rpcObj.pDnodeConn = rpcOpen(&rpcInit);
if (rpcObj.pDnodeConn == NULL) {
pthread_mutex_unlock(&rpcObjMutex);
- tscError("failed to init connection to TDengine");
+ tscError("failed to init connection to server");
return -1;
}
@@ -213,7 +223,7 @@ void taos_init_imp(void) {
#ifdef LUA_EMBEDDED
scriptEnvPoolInit();
#endif
- tscDebug("starting to initialize TAOS client ...");
+ tscDebug("starting to initialize client ...");
tscDebug("Local End Point is:%s", tsLocalEp);
}
diff --git a/src/dnode/CMakeLists.txt b/src/dnode/CMakeLists.txt
index d9c4a84234184b14d272854838625e023dd55cea..ac91b498309bd951fab5860b223162ef40aff606 100644
--- a/src/dnode/CMakeLists.txt
+++ b/src/dnode/CMakeLists.txt
@@ -19,10 +19,14 @@ ENDIF ()
ADD_EXECUTABLE(taosd ${SRC})
+IF (TD_LINUX_64 AND JEMALLOC_ENABLED)
+ ADD_DEPENDENCIES(taosd jemalloc)
+ENDIF ()
+
IF (TD_BUILD_HTTP)
-TARGET_LINK_LIBRARIES(taosd mnode monitor http tsdb twal vnode cJson lua lz4 balance sync ${LINK_JEMALLOC})
+ TARGET_LINK_LIBRARIES(taosd mnode monitor http tsdb twal vnode cJson ${LINK_LUA} lz4 balance sync ${LINK_JEMALLOC})
ELSE ()
-TARGET_LINK_LIBRARIES(taosd mnode monitor tsdb twal vnode cJson lua lz4 balance sync ${LINK_JEMALLOC})
+ TARGET_LINK_LIBRARIES(taosd mnode monitor tsdb twal vnode cJson ${LINK_LUA} lz4 balance sync ${LINK_JEMALLOC})
ENDIF ()
IF (TD_SOMODE_STATIC)
diff --git a/src/inc/taos.h b/src/inc/taos.h
index 6cd62d3177d2490c5c89bf910e258c956c2f69fc..910ec8c7d83b1f01ce4b14dcdcc718cc0fdbc1f9 100644
--- a/src/inc/taos.h
+++ b/src/inc/taos.h
@@ -208,6 +208,8 @@ DLL_EXPORT int taos_load_table_info(TAOS *taos, const char* tableNameList);
DLL_EXPORT TAOS_RES *taos_schemaless_insert(TAOS* taos, char* lines[], int numLines, int protocol, int precision);
+DLL_EXPORT int32_t taos_parse_time(char* timestr, int64_t* time, int32_t len, int32_t timePrec, int8_t dayligth);
+
#ifdef __cplusplus
}
#endif
diff --git a/src/inc/taosdef.h b/src/inc/taosdef.h
index 9d48ed59cecfffe1ea36971fa502ed9dae3fb0bc..f05d1466371f32358e3442f53735028b20641d16 100644
--- a/src/inc/taosdef.h
+++ b/src/inc/taosdef.h
@@ -80,12 +80,17 @@ extern const int32_t TYPE_BYTES[15];
#define TSDB_DATA_NULL_STR_L "null"
#define TSDB_DEFAULT_USER "root"
+
#ifdef _TD_POWER_
#define TSDB_DEFAULT_PASS "powerdb"
#elif (_TD_TQ_ == true)
#define TSDB_DEFAULT_PASS "tqueue"
#elif (_TD_PRO_ == true)
#define TSDB_DEFAULT_PASS "prodb"
+#elif (_TD_KH_ == true)
+#define TSDB_DEFAULT_PASS "khroot"
+#elif (_TD_JH_ == true)
+#define TSDB_DEFAULT_PASS "jhdata"
#else
#define TSDB_DEFAULT_PASS "taosdata"
#endif
diff --git a/src/kit/CMakeLists.txt b/src/kit/CMakeLists.txt
index 6bc22e5fc8ddcdae1ebd42e400c1c6707b959fea..8293a09231e638748c885f68bde3f6c64285f763 100644
--- a/src/kit/CMakeLists.txt
+++ b/src/kit/CMakeLists.txt
@@ -3,5 +3,10 @@ PROJECT(TDengine)
ADD_SUBDIRECTORY(shell)
ADD_SUBDIRECTORY(taosdemo)
-ADD_SUBDIRECTORY(taosdump)
+
+IF (TD_TAOS_TOOLS)
+ INCLUDE_DIRECTORIES(${CMAKE_BINARY_DIR}/src/kit/taos_tools/deps/avro/lang/c/src)
+ ADD_SUBDIRECTORY(taos-tools)
+ENDIF ()
+
ADD_SUBDIRECTORY(taospack)
diff --git a/src/kit/shell/CMakeLists.txt b/src/kit/shell/CMakeLists.txt
index d69a267707470e7a5df4edfa85764aae580a13a6..db572c9d310709dc6fe024b351126679ea9805e1 100644
--- a/src/kit/shell/CMakeLists.txt
+++ b/src/kit/shell/CMakeLists.txt
@@ -19,9 +19,9 @@ ELSE ()
ENDIF ()
IF (TD_SOMODE_STATIC)
- TARGET_LINK_LIBRARIES(shell taos_static cJson lua ${LINK_JEMALLOC})
+ TARGET_LINK_LIBRARIES(shell taos_static cJson ${LINK_LUA} ${LINK_JEMALLOC})
ELSE ()
- TARGET_LINK_LIBRARIES(shell taos cJson lua ${LINK_JEMALLOC})
+ TARGET_LINK_LIBRARIES(shell taos cJson ${LINK_LUA} ${LINK_JEMALLOC})
ENDIF ()
SET_TARGET_PROPERTIES(shell PROPERTIES OUTPUT_NAME taos)
@@ -36,8 +36,14 @@ ELSEIF (TD_WINDOWS)
IF (TD_POWER)
SET_TARGET_PROPERTIES(shell PROPERTIES OUTPUT_NAME power)
+ ELSEIF (TD_TQ)
+ SET_TARGET_PROPERTIES(shell PROPERTIES OUTPUT_NAME tq)
ELSEIF (TD_PRO)
SET_TARGET_PROPERTIES(shell PROPERTIES OUTPUT_NAME prodbc)
+ ELSEIF (TD_KH)
+ SET_TARGET_PROPERTIES(shell PROPERTIES OUTPUT_NAME khclient)
+ ELSEIF (TD_JH)
+ SET_TARGET_PROPERTIES(shell PROPERTIES OUTPUT_NAME jh_taos)
ELSE ()
SET_TARGET_PROPERTIES(shell PROPERTIES OUTPUT_NAME taos)
ENDIF ()
diff --git a/src/kit/shell/inc/shell.h b/src/kit/shell/inc/shell.h
index 03ccfe2d576df76407bc7a22cf17d884dd2bad51..9c5794278c5bd9545fb6260e4f8442d8c9e8cad9 100644
--- a/src/kit/shell/inc/shell.h
+++ b/src/kit/shell/inc/shell.h
@@ -28,8 +28,16 @@
#define MAX_HISTORY_SIZE 1000
#define MAX_COMMAND_SIZE 1048586
-#ifdef _TD_PRO_
+#ifdef _TD_POWER_
+ #define HISTORY_FILE ".power_history"
+#elif (_TD_TQ_ == true)
+ #define HISTORY_FILE ".tq_history"
+#elif (_TD_PRO_ == true)
#define HISTORY_FILE ".prodb_history"
+#elif (_TD_KH_ == true)
+ #define HISTORY_FILE ".kh_history"
+#elif (_TD_JH_ == true)
+ #define HISTORY_FILE ".jh_taos_history"
#else
#define HISTORY_FILE ".taos_history"
#endif
diff --git a/src/kit/shell/src/shellEngine.c b/src/kit/shell/src/shellEngine.c
index 8ab9bfcf4e7685081cd6f09990f5365d94c4094b..e3e86e518690c8c4f773471b93bdb381a5bb8dc1 100644
--- a/src/kit/shell/src/shellEngine.c
+++ b/src/kit/shell/src/shellEngine.c
@@ -34,28 +34,36 @@
char CLIENT_VERSION[] = "Welcome to the PowerDB shell from %s, Client Version:%s\n"
"Copyright (c) 2020 by PowerDB, Inc. All rights reserved.\n\n";
char PROMPT_HEADER[] = "power> ";
-
char CONTINUE_PROMPT[] = " -> ";
int prompt_size = 7;
#elif (_TD_TQ_ == true)
char CLIENT_VERSION[] = "Welcome to the TQ shell from %s, Client Version:%s\n"
"Copyright (c) 2020 by TQ, Inc. All rights reserved.\n\n";
char PROMPT_HEADER[] = "tq> ";
-
-char CONTINUE_PROMPT[] = " -> ";
+char CONTINUE_PROMPT[] = " -> ";
int prompt_size = 4;
#elif (_TD_PRO_ == true)
char CLIENT_VERSION[] = "Welcome to the ProDB shell from %s, Client Version:%s\n"
"Copyright (c) 2020 by Hanatech, Inc. All rights reserved.\n\n";
char PROMPT_HEADER[] = "ProDB> ";
-
char CONTINUE_PROMPT[] = " -> ";
int prompt_size = 7;
+#elif (_TD_KH_ == true)
+char CLIENT_VERSION[] = "Welcome to the KingHistorian shell from %s, Client Version:%s\n"
+ "Copyright (c) 2021 by Hanatech, Inc. All rights reserved.\n\n";
+char PROMPT_HEADER[] = "kh> ";
+char CONTINUE_PROMPT[] = " -> ";
+int prompt_size = 4;
+#elif (_TD_JH_ == true)
+char CLIENT_VERSION[] = "Welcome to the jh_iot shell from %s, Client Version:%s\n"
+ "Copyright (c) 2021 by jinheng, Inc. All rights reserved.\n\n";
+char PROMPT_HEADER[] = "jh_taos> ";
+char CONTINUE_PROMPT[] = " -> ";
+int prompt_size = 9;
#else
char CLIENT_VERSION[] = "Welcome to the TDengine shell from %s, Client Version:%s\n"
"Copyright (c) 2020 by TAOS Data, Inc. All rights reserved.\n\n";
char PROMPT_HEADER[] = "taos> ";
-
char CONTINUE_PROMPT[] = " -> ";
int prompt_size = 6;
#endif
diff --git a/src/kit/shell/src/shellWindows.c b/src/kit/shell/src/shellWindows.c
index 0301fe6df2a6a1fbf8a75507193dfacb55385895..0babd88333c846c1f0b5dbe4baede4a6d38cbcdd 100644
--- a/src/kit/shell/src/shellWindows.c
+++ b/src/kit/shell/src/shellWindows.c
@@ -17,6 +17,8 @@
#include "taos.h"
#include "shellCommand.h"
+#define SHELL_INPUT_MAX_COMMAND_SIZE 500000
+
extern char configDir[];
char WINCLIENT_VERSION[] = "Welcome to the TDengine shell from %s, Client Version:%s\n"
@@ -274,32 +276,35 @@ int32_t shellReadCommand(TAOS *con, char command[]) {
// Read input.
void *console = GetStdHandle(STD_INPUT_HANDLE);
unsigned long read;
- wchar_t c;
+ wchar_t *c= (wchar_t *)calloc(SHELL_INPUT_MAX_COMMAND_SIZE, sizeof(wchar_t));
char mbStr[16];
while (1) {
- int ret = ReadConsole(console, &c, 1, &read, NULL);
- int size = WideCharToMultiByte(CP_UTF8, 0, &c, read, mbStr, sizeof(mbStr), NULL, NULL);
- mbStr[size] = 0;
- switch (c) {
- case '\n':
- if (isReadyGo(&cmd)) {
- sprintf(command, "%s%s", cmd.buffer, cmd.command);
- free(cmd.buffer);
- cmd.buffer = NULL;
- free(cmd.command);
- cmd.command = NULL;
- return 0;
- } else {
- shellPrintContinuePrompt();
- updateBuffer(&cmd);
- }
- break;
- case '\r':
- break;
- default:
- for (int i = 0; i < size; ++i) {
- insertChar(&cmd, mbStr[i]);
- }
+ int ret = ReadConsole(console, c, SHELL_INPUT_MAX_COMMAND_SIZE, &read, NULL);
+ for (int input_index = 0; input_index < read; input_index++) {
+ int size = WideCharToMultiByte(CP_UTF8, 0, &c[input_index], 1, mbStr, sizeof(mbStr), NULL, NULL);
+ mbStr[size] = 0;
+ switch (c[input_index]) {
+ case '\n':
+ if (isReadyGo(&cmd)) {
+ sprintf(command, "%s%s", cmd.buffer, cmd.command);
+ free(cmd.buffer);
+ cmd.buffer = NULL;
+ free(cmd.command);
+ cmd.command = NULL;
+ free(c);
+ return 0;
+ } else {
+ shellPrintContinuePrompt();
+ updateBuffer(&cmd);
+ }
+ break;
+ case '\r':
+ break;
+ default:
+ for (int i = 0; i < size; ++i) {
+ insertChar(&cmd, mbStr[i]);
+ }
+ }
}
}
@@ -327,6 +332,20 @@ void *shellLoopQuery(void *arg) {
return NULL;
}
-void get_history_path(char *history) { sprintf(history, "C:/TDengine/%s", HISTORY_FILE); }
+void get_history_path(char *history) {
+#ifdef _TD_POWER_
+ sprintf(history, "C:/PowerDB/%s", HISTORY_FILE);
+#elif (_TD_TQ_ == true)
+ sprintf(history, "C:/TQueue/%s", HISTORY_FILE);
+#elif (_TD_PRO_ == true)
+ sprintf(history, "C:/ProDB/%s", HISTORY_FILE);
+#elif (_TD_KH_ == true)
+ sprintf(history, "C:/KingHistorian/%s", HISTORY_FILE);
+#elif (_TD_JH_ == true)
+ sprintf(history, "C:/jh_iot/%s", HISTORY_FILE);
+#else
+ sprintf(history, "C:/TDengine/%s", HISTORY_FILE);
+#endif
+}
void exitShell() { exit(EXIT_SUCCESS); }
diff --git a/src/kit/taos-tools b/src/kit/taos-tools
new file mode 160000
index 0000000000000000000000000000000000000000..7ed1e6b485d04cc93c4bb0bc9e844086da1b4714
--- /dev/null
+++ b/src/kit/taos-tools
@@ -0,0 +1 @@
+Subproject commit 7ed1e6b485d04cc93c4bb0bc9e844086da1b4714
diff --git a/src/kit/taosdemo/inc/demo.h b/src/kit/taosdemo/inc/demo.h
index 9208c8f0c25fe4ab2672a1d42660fc42eb2b781d..37dd01449da7e67e9165e0b22d7160d508595e52 100644
--- a/src/kit/taosdemo/inc/demo.h
+++ b/src/kit/taosdemo/inc/demo.h
@@ -158,6 +158,7 @@ extern char configDir[];
#define DEFAULT_TOTAL_INSERT 0
#define DEFAULT_TOTAL_AFFECT 0
#define DEFAULT_DEMO_MODE true
+#define DEFAULT_CHINESE_OPT false
#define DEFAULT_CREATE_BATCH 10
#define DEFAULT_SUB_INTERVAL 10000
#define DEFAULT_QUERY_INTERVAL 10000
@@ -329,6 +330,7 @@ typedef struct SArguments_S {
uint64_t totalInsertRows;
uint64_t totalAffectedRows;
bool demo_mode; // use default column name and semi-random data
+ bool chinese;
} SArguments;
typedef struct SColumn_S {
diff --git a/src/kit/taosdemo/src/demoCommandOpt.c b/src/kit/taosdemo/src/demoCommandOpt.c
index ba7f524e61f92c5e2e7b4058babc018a22acb3e8..3300c9ab889fc9b4664740f09e30de377fb33494 100644
--- a/src/kit/taosdemo/src/demoCommandOpt.c
+++ b/src/kit/taosdemo/src/demoCommandOpt.c
@@ -736,10 +736,12 @@ int parse_args(int argc, char *argv[]) {
g_args.columnCount = MAX_NUM_COLUMNS;
}
- for (int col = DEFAULT_DATATYPE_NUM; col < g_args.columnCount;
+ for (int col = 0; col < g_args.columnCount;
col++) {
- g_args.dataType[col] = "INT";
- g_args.data_type[col] = TSDB_DATA_TYPE_INT;
+ if (g_args.data_type[col] == TSDB_DATA_TYPE_NULL) {
+ g_args.dataType[col] = "INT";
+ g_args.data_type[col] = TSDB_DATA_TYPE_INT;
+ }
}
for (int col = g_args.columnCount; col < MAX_NUM_COLUMNS; col++) {
g_args.dataType[col] = NULL;
@@ -830,8 +832,10 @@ int parse_args(int argc, char *argv[]) {
g_args.data_type[0] = TSDB_DATA_TYPE_NULL;
}
g_args.dataType[0] = dataType;
- g_args.dataType[1] = NULL;
- g_args.data_type[1] = TSDB_DATA_TYPE_NULL;
+ if (g_args.data_type[1] != TSDB_DATA_TYPE_INT) {
+ g_args.dataType[1] = NULL;
+ g_args.data_type[1] = TSDB_DATA_TYPE_NULL;
+ }
} else {
// more than one col
int index = 0;
@@ -899,8 +903,10 @@ int parse_args(int argc, char *argv[]) {
token = strsep(&running, ",");
if (index >= MAX_NUM_COLUMNS) break;
}
- g_args.dataType[index] = NULL;
- g_args.data_type[index] = TSDB_DATA_TYPE_NULL;
+ if (g_args.data_type[index] != TSDB_DATA_TYPE_INT) {
+ g_args.dataType[index] = NULL;
+ g_args.data_type[index] = TSDB_DATA_TYPE_NULL;
+ }
}
} else if ((0 == strncmp(argv[i], "-w", strlen("-w"))) ||
(0 ==
@@ -972,6 +978,10 @@ int parse_args(int argc, char *argv[]) {
(0 == strncmp(argv[i], "--escape-character",
strlen("--escape-character")))) {
g_args.escapeChar = true;
+ } else if ((0 == strncmp(argv[i], "-C", strlen("-C"))) ||
+ (0 == strncmp(argv[i], "--chinese",
+ strlen("--chinese")))) {
+ g_args.chinese = true;
} else if ((strcmp(argv[i], "-N") == 0) ||
(0 == strcmp(argv[i], "--normal-table"))) {
g_args.demo_mode = false;
diff --git a/src/kit/taosdemo/src/demoData.c b/src/kit/taosdemo/src/demoData.c
index ae796a111cb0aa30e78cca20bab6929fe9a56557..59806864306b9ecf5c3ed35c9be631c50244da58 100644
--- a/src/kit/taosdemo/src/demoData.c
+++ b/src/kit/taosdemo/src/demoData.c
@@ -238,16 +238,81 @@ float UNUSED_FUNC demo_phase_float() {
360);
}
+static int usc2utf8(char* p, int unic) {
+ if ( unic <= 0x0000007F )
+ {
+ *p = (unic & 0x7F);
+ return 1;
+ }
+ else if ( unic >= 0x00000080 && unic <= 0x000007FF )
+ {
+ *(p+1) = (unic & 0x3F) | 0x80;
+ *p = ((unic >> 6) & 0x1F) | 0xC0;
+ return 2;
+ }
+ else if ( unic >= 0x00000800 && unic <= 0x0000FFFF )
+ {
+ *(p+2) = (unic & 0x3F) | 0x80;
+ *(p+1) = ((unic >> 6) & 0x3F) | 0x80;
+ *p = ((unic >> 12) & 0x0F) | 0xE0;
+ return 3;
+ }
+ else if ( unic >= 0x00010000 && unic <= 0x001FFFFF )
+ {
+ *(p+3) = (unic & 0x3F) | 0x80;
+ *(p+2) = ((unic >> 6) & 0x3F) | 0x80;
+ *(p+1) = ((unic >> 12) & 0x3F) | 0x80;
+ *p = ((unic >> 18) & 0x07) | 0xF0;
+ return 4;
+ }
+ else if ( unic >= 0x00200000 && unic <= 0x03FFFFFF )
+ {
+ *(p+4) = (unic & 0x3F) | 0x80;
+ *(p+3) = ((unic >> 6) & 0x3F) | 0x80;
+ *(p+2) = ((unic >> 12) & 0x3F) | 0x80;
+ *(p+1) = ((unic >> 18) & 0x3F) | 0x80;
+ *p = ((unic >> 24) & 0x03) | 0xF8;
+ return 5;
+ }
+ else if ( unic >= 0x04000000 && unic <= 0x7FFFFFFF )
+ {
+ *(p+5) = (unic & 0x3F) | 0x80;
+ *(p+4) = ((unic >> 6) & 0x3F) | 0x80;
+ *(p+3) = ((unic >> 12) & 0x3F) | 0x80;
+ *(p+2) = ((unic >> 18) & 0x3F) | 0x80;
+ *(p+1) = ((unic >> 24) & 0x3F) | 0x80;
+ *p = ((unic >> 30) & 0x01) | 0xFC;
+ return 6;
+ }
+ return 0;
+}
+
void rand_string(char *str, int size) {
- str[0] = 0;
- if (size > 0) {
- //--size;
- int n;
- for (n = 0; n < size; n++) {
- int key = abs(taosRandom()) % (int)(sizeof(charset) - 1);
- str[n] = charset[key];
+ if (g_args.chinese) {
+ char* pstr = str;
+ int move = 0;
+ while (size > 0) {
+ // Chinese Character need 3 bytes space
+ if (size < 3) {
+ break;
+ }
+ // Basic Chinese Character's Unicode is from 0x4e00 to 0x9fa5
+ int unic = 0x4e00 + rand() % (0x9fa5 - 0x4e00);
+ move = usc2utf8(pstr, unic);
+ pstr += move;
+ size -= move;
+ }
+ } else {
+ str[0] = 0;
+ if (size > 0) {
+ //--size;
+ int n;
+ for (n = 0; n < size; n++) {
+ int key = abs(taosRandom()) % (int)(sizeof(charset) - 1);
+ str[n] = charset[key];
+ }
+ str[n] = 0;
}
- str[n] = 0;
}
}
diff --git a/src/kit/taosdemo/src/demoInsert.c b/src/kit/taosdemo/src/demoInsert.c
index db300c2dfee0daf857ef01084b39653d9f139731..fe36aff5644c452ccb4216b75c6d7057ee6cee76 100644
--- a/src/kit/taosdemo/src/demoInsert.c
+++ b/src/kit/taosdemo/src/demoInsert.c
@@ -1138,7 +1138,17 @@ void postFreeResource() {
tmfree(g_randfloat_buff);
tmfree(g_rand_current_buff);
tmfree(g_rand_phase_buff);
-
+ tmfree(g_randdouble_buff);
+ tmfree(g_randuint_buff);
+ tmfree(g_randutinyint_buff);
+ tmfree(g_randusmallint_buff);
+ tmfree(g_randubigint_buff);
+ tmfree(g_randint);
+ tmfree(g_randuint);
+ tmfree(g_randbigint);
+ tmfree(g_randubigint);
+ tmfree(g_randfloat);
+ tmfree(g_randdouble);
tmfree(g_sampleDataBuf);
for (int l = 0; l < g_args.columnCount; l++) {
diff --git a/src/kit/taosdemo/src/demoJsonOpt.c b/src/kit/taosdemo/src/demoJsonOpt.c
index e74d2906c8f3294f0531145c8f13e5ce776e444f..b8d75ccacac07d225788946611d521b8b79a5c10 100644
--- a/src/kit/taosdemo/src/demoJsonOpt.c
+++ b/src/kit/taosdemo/src/demoJsonOpt.c
@@ -450,6 +450,25 @@ int getMetaFromInsertJsonFile(cJSON *root) {
goto PARSE_OVER;
}
+ cJSON *chineseOpt = cJSON_GetObjectItem(root, "chinese"); // yes, no,
+ if (chineseOpt && chineseOpt->type == cJSON_String &&
+ chineseOpt->valuestring != NULL) {
+ if (0 == strncasecmp(chineseOpt->valuestring, "yes", 3)) {
+ g_args.chinese = true;
+ } else if (0 == strncasecmp(chineseOpt->valuestring, "no", 2)) {
+ g_args.chinese = false;
+ } else {
+ g_args.chinese = DEFAULT_CHINESE_OPT;
+ }
+ } else if (!chineseOpt) {
+ g_args.chinese = DEFAULT_CHINESE_OPT;
+ } else {
+ errorPrint(
+ "%s",
+ "failed to read json, chinese input mistake\n");
+ goto PARSE_OVER;
+ }
+
cJSON *answerPrompt =
cJSON_GetObjectItem(root, "confirm_parameter_prompt"); // yes, no,
if (answerPrompt && answerPrompt->type == cJSON_String &&
diff --git a/src/kit/taosdemo/src/demoMain.c b/src/kit/taosdemo/src/demoMain.c
index 9251dc2ada07aa7b2708afa8280e4eadde117fe3..9941f608efa7c0d8a72171383bade43e309fede0 100644
--- a/src/kit/taosdemo/src/demoMain.c
+++ b/src/kit/taosdemo/src/demoMain.c
@@ -66,6 +66,7 @@ SArguments g_args = {
DEFAULT_TOTAL_INSERT, // totalInsertRows;
DEFAULT_TOTAL_AFFECT, // totalAffectedRows;
DEFAULT_DEMO_MODE, // demo_mode;
+ DEFAULT_CHINESE_OPT // chinese
};
int main(int argc, char *argv[]) {
diff --git a/src/kit/taosdemo/src/demoOutput.c b/src/kit/taosdemo/src/demoOutput.c
index 026673ca86edb67d752f3cee58de8ea5a6769247..c253967f8fe270dfbacd3f9dccbfd74bde4487c1 100644
--- a/src/kit/taosdemo/src/demoOutput.c
+++ b/src/kit/taosdemo/src/demoOutput.c
@@ -313,6 +313,8 @@ void printHelp() {
"Table prefix name. By default use 'd'.");
printf("%s%s%s%s\n", indent, "-E, --escape-character", "\t",
"Use escape character for Both Stable and normmal table name");
+ printf("%s%s%s%s\n", indent, "-C, --chinese", "\t",
+ "Use chinese characters as the data source for binary/nchar data");
printf("%s%s%s%s\n", indent, "-s, --sql-file=FILE", "\t\t",
"The select sql file.");
printf("%s%s%s%s\n", indent, "-N, --normal-table", "\t\t",
@@ -414,6 +416,8 @@ void printfInsertMeta() {
printf("number of records per req: \033[33m%u\033[0m\n", g_args.reqPerReq);
printf("max sql length: \033[33m%" PRIu64 "\033[0m\n",
g_args.max_sql_len);
+ printf("random prepare data: \033[33m%" PRId64 "\033[0m\n", g_args.prepared_rand);
+ printf("chinese: \033[33m%s\033[0m\n", g_args.chinese?"yes":"no");
printf("database count: \033[33m%d\033[0m\n", g_Dbs.dbCount);
diff --git a/src/kit/taosdemo/taosdemo.c b/src/kit/taosdemo/taosdemo.c
new file mode 100644
index 0000000000000000000000000000000000000000..0a573b799ae184042f1126cb179e3909c6a2249b
--- /dev/null
+++ b/src/kit/taosdemo/taosdemo.c
@@ -0,0 +1,12273 @@
+/*
+ * Copyright (c) 2019 TAOS Data, Inc.
+ *
+ * This program is free software: you can use, redistribute, and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3
+ * or later ("AGPL"), as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see .
+ */
+
+
+/*
+ when in some thread query return error, thread don't exit, but return, otherwise coredump in other thread.
+ */
+
+#include
+#include
+#include
+#define _GNU_SOURCE
+#define CURL_STATICLIB
+
+#ifdef LINUX
+#include
+#include
+#ifndef _ALPINE
+#include
+#endif
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#else
+#include
+#include
+#endif
+
+#include
+#include
+#include "cJSON.h"
+
+#include "os.h"
+#include "taos.h"
+#include "taoserror.h"
+#include "tutil.h"
+
+#define REQ_EXTRA_BUF_LEN 1024
+#define RESP_BUF_LEN 4096
+
+extern char configDir[];
+
+#define STR_INSERT_INTO "INSERT INTO "
+
+#define MAX_RECORDS_PER_REQ 32766
+
+#define HEAD_BUFF_LEN TSDB_MAX_COLUMNS*24 // 16*MAX_COLUMNS + (192+32)*2 + insert into ..
+
+#define BUFFER_SIZE TSDB_MAX_ALLOWED_SQL_LEN
+#define COND_BUF_LEN (BUFFER_SIZE - 30)
+#define COL_BUFFER_LEN ((TSDB_COL_NAME_LEN + 15) * TSDB_MAX_COLUMNS)
+
+#define MAX_USERNAME_SIZE 64
+#define MAX_HOSTNAME_SIZE 253 // https://man7.org/linux/man-pages/man7/hostname.7.html
+#define MAX_TB_NAME_SIZE 64
+#define MAX_DATA_SIZE (16*TSDB_MAX_COLUMNS)+20 // max record len: 16*MAX_COLUMNS, timestamp string and ,('') need extra space
+#define OPT_ABORT 1 /* –abort */
+#define MAX_FILE_NAME_LEN 256 // max file name length on linux is 255.
+#define MAX_PATH_LEN 4096
+
+#define DEFAULT_START_TIME 1500000000000
+
+#define MAX_PREPARED_RAND 1000000
+#define INT_BUFF_LEN 12
+#define BIGINT_BUFF_LEN 21
+#define SMALLINT_BUFF_LEN 7
+#define TINYINT_BUFF_LEN 5
+#define BOOL_BUFF_LEN 6
+#define FLOAT_BUFF_LEN 22
+#define DOUBLE_BUFF_LEN 42
+#define TIMESTAMP_BUFF_LEN 21
+
+#define MAX_SAMPLES 10000
+#define MAX_NUM_COLUMNS (TSDB_MAX_COLUMNS - 1) // exclude first column timestamp
+
+#define MAX_DB_COUNT 8
+#define MAX_SUPER_TABLE_COUNT 200
+
+#define MAX_QUERY_SQL_COUNT 100
+
+#define MAX_DATABASE_COUNT 256
+#define INPUT_BUF_LEN 256
+
+#define TBNAME_PREFIX_LEN (TSDB_TABLE_NAME_LEN - 20) // 20 characters reserved for seq
+#define SMALL_BUFF_LEN 8
+#define DATATYPE_BUFF_LEN (SMALL_BUFF_LEN*3)
+#define NOTE_BUFF_LEN (SMALL_BUFF_LEN*16)
+
+#define DEFAULT_NTHREADS 8
+#define DEFAULT_TIMESTAMP_STEP 1
+#define DEFAULT_INTERLACE_ROWS 0
+#define DEFAULT_DATATYPE_NUM 1
+#define DEFAULT_CHILDTABLES 10000
+
+#define STMT_BIND_PARAM_BATCH 1
+
+char* g_sampleDataBuf = NULL;
+#if STMT_BIND_PARAM_BATCH == 1
+ // bind param batch
+char* g_sampleBindBatchArray = NULL;
+#endif
+
+enum TEST_MODE {
+ INSERT_TEST, // 0
+ QUERY_TEST, // 1
+ SUBSCRIBE_TEST, // 2
+ INVAID_TEST
+};
+
+typedef enum CREATE_SUB_TABLE_MOD_EN {
+ PRE_CREATE_SUBTBL,
+ AUTO_CREATE_SUBTBL,
+ NO_CREATE_SUBTBL
+} CREATE_SUB_TABLE_MOD_EN;
+
+typedef enum TABLE_EXISTS_EN {
+ TBL_NO_EXISTS,
+ TBL_ALREADY_EXISTS,
+ TBL_EXISTS_BUTT
+} TABLE_EXISTS_EN;
+
+enum enumSYNC_MODE {
+ SYNC_MODE,
+ ASYNC_MODE,
+ MODE_BUT
+};
+
+enum enum_TAOS_INTERFACE {
+ TAOSC_IFACE,
+ REST_IFACE,
+ STMT_IFACE,
+ INTERFACE_BUT
+};
+
+typedef enum enumQUERY_CLASS {
+ SPECIFIED_CLASS,
+ STABLE_CLASS,
+ CLASS_BUT
+} QUERY_CLASS;
+
+typedef enum enum_PROGRESSIVE_OR_INTERLACE {
+ PROGRESSIVE_INSERT_MODE,
+ INTERLACE_INSERT_MODE,
+ INVALID_INSERT_MODE
+} PROG_OR_INTERLACE_MODE;
+
+typedef enum enumQUERY_TYPE {
+ NO_INSERT_TYPE,
+ INSERT_TYPE,
+ QUERY_TYPE_BUT
+} QUERY_TYPE;
+
+enum _show_db_index {
+ TSDB_SHOW_DB_NAME_INDEX,
+ TSDB_SHOW_DB_CREATED_TIME_INDEX,
+ TSDB_SHOW_DB_NTABLES_INDEX,
+ TSDB_SHOW_DB_VGROUPS_INDEX,
+ TSDB_SHOW_DB_REPLICA_INDEX,
+ TSDB_SHOW_DB_QUORUM_INDEX,
+ TSDB_SHOW_DB_DAYS_INDEX,
+ TSDB_SHOW_DB_KEEP_INDEX,
+ TSDB_SHOW_DB_CACHE_INDEX,
+ TSDB_SHOW_DB_BLOCKS_INDEX,
+ TSDB_SHOW_DB_MINROWS_INDEX,
+ TSDB_SHOW_DB_MAXROWS_INDEX,
+ TSDB_SHOW_DB_WALLEVEL_INDEX,
+ TSDB_SHOW_DB_FSYNC_INDEX,
+ TSDB_SHOW_DB_COMP_INDEX,
+ TSDB_SHOW_DB_CACHELAST_INDEX,
+ TSDB_SHOW_DB_PRECISION_INDEX,
+ TSDB_SHOW_DB_UPDATE_INDEX,
+ TSDB_SHOW_DB_STATUS_INDEX,
+ TSDB_MAX_SHOW_DB
+};
+
+// -----------------------------------------SHOW TABLES CONFIGURE -------------------------------------
+enum _show_stables_index {
+ TSDB_SHOW_STABLES_NAME_INDEX,
+ TSDB_SHOW_STABLES_CREATED_TIME_INDEX,
+ TSDB_SHOW_STABLES_COLUMNS_INDEX,
+ TSDB_SHOW_STABLES_METRIC_INDEX,
+ TSDB_SHOW_STABLES_UID_INDEX,
+ TSDB_SHOW_STABLES_TID_INDEX,
+ TSDB_SHOW_STABLES_VGID_INDEX,
+ TSDB_MAX_SHOW_STABLES
+};
+
+enum _describe_table_index {
+ TSDB_DESCRIBE_METRIC_FIELD_INDEX,
+ TSDB_DESCRIBE_METRIC_TYPE_INDEX,
+ TSDB_DESCRIBE_METRIC_LENGTH_INDEX,
+ TSDB_DESCRIBE_METRIC_NOTE_INDEX,
+ TSDB_MAX_DESCRIBE_METRIC
+};
+
+/* Used by main to communicate with parse_opt. */
+static char *g_dupstr = NULL;
+
+typedef struct SArguments_S {
+ char *metaFile;
+ uint32_t test_mode;
+ char *host;
+ uint16_t port;
+ uint16_t iface;
+ char * user;
+ char password[SHELL_MAX_PASSWORD_LEN];
+ char * database;
+ int replica;
+ char * tb_prefix;
+ char * sqlFile;
+ bool use_metric;
+ bool drop_database;
+ bool aggr_func;
+ bool answer_yes;
+ bool debug_print;
+ bool verbose_print;
+ bool performance_print;
+ char * output_file;
+ bool async_mode;
+ char data_type[MAX_NUM_COLUMNS+1];
+ char *dataType[MAX_NUM_COLUMNS+1];
+ uint32_t binwidth;
+ uint32_t columnCount;
+ uint64_t lenOfOneRow;
+ uint32_t nthreads;
+ uint64_t insert_interval;
+ uint64_t timestamp_step;
+ int64_t query_times;
+ int64_t prepared_rand;
+ uint32_t interlaceRows;
+ uint32_t reqPerReq; // num_of_records_per_req
+ uint64_t max_sql_len;
+ int64_t ntables;
+ int64_t insertRows;
+ int abort;
+ uint32_t disorderRatio; // 0: no disorder, >0: x%
+ int disorderRange; // ms, us or ns. according to database precision
+ uint32_t method_of_delete;
+ uint64_t totalInsertRows;
+ uint64_t totalAffectedRows;
+ bool demo_mode; // use default column name and semi-random data
+} SArguments;
+
+typedef struct SColumn_S {
+ char field[TSDB_COL_NAME_LEN];
+ char data_type;
+ char dataType[DATATYPE_BUFF_LEN];
+ uint32_t dataLen;
+ char note[NOTE_BUFF_LEN];
+} StrColumn;
+
+typedef struct SSuperTable_S {
+ char stbName[TSDB_TABLE_NAME_LEN];
+ char dataSource[SMALL_BUFF_LEN]; // rand_gen or sample
+ char childTblPrefix[TBNAME_PREFIX_LEN];
+ uint16_t childTblExists;
+ int64_t childTblCount;
+ uint64_t batchCreateTableNum; // 0: no batch, > 0: batch table number in one sql
+ uint8_t autoCreateTable; // 0: create sub table, 1: auto create sub table
+ uint16_t iface; // 0: taosc, 1: rest, 2: stmt
+ int64_t childTblLimit;
+ uint64_t childTblOffset;
+
+ // int multiThreadWriteOneTbl; // 0: no, 1: yes
+ uint32_t interlaceRows; //
+ int disorderRatio; // 0: no disorder, >0: x%
+ int disorderRange; // ms, us or ns. according to database precision
+ uint64_t maxSqlLen; //
+
+ uint64_t insertInterval; // insert interval, will override global insert interval
+ int64_t insertRows;
+ int64_t timeStampStep;
+ char startTimestamp[MAX_TB_NAME_SIZE];
+ char sampleFormat[SMALL_BUFF_LEN]; // csv, json
+ char sampleFile[MAX_FILE_NAME_LEN];
+ char tagsFile[MAX_FILE_NAME_LEN];
+
+ uint32_t columnCount;
+ StrColumn columns[TSDB_MAX_COLUMNS];
+ uint32_t tagCount;
+ StrColumn tags[TSDB_MAX_TAGS];
+
+ char* childTblName;
+ char* colsOfCreateChildTable;
+ uint64_t lenOfOneRow;
+ uint64_t lenOfTagOfOneRow;
+
+ char* sampleDataBuf;
+ bool useSampleTs;
+
+ uint32_t tagSource; // 0: rand, 1: tag sample
+ char* tagDataBuf;
+ uint32_t tagSampleCount;
+ uint32_t tagUsePos;
+
+#if STMT_BIND_PARAM_BATCH == 1
+ // bind param batch
+ char *sampleBindBatchArray;
+#endif
+ // statistics
+ uint64_t totalInsertRows;
+ uint64_t totalAffectedRows;
+} SSuperTable;
+
+typedef struct {
+ char name[TSDB_DB_NAME_LEN];
+ char create_time[32];
+ int64_t ntables;
+ int32_t vgroups;
+ int16_t replica;
+ int16_t quorum;
+ int16_t days;
+ char keeplist[64];
+ int32_t cache; //MB
+ int32_t blocks;
+ int32_t minrows;
+ int32_t maxrows;
+ int8_t wallevel;
+ int32_t fsync;
+ int8_t comp;
+ int8_t cachelast;
+ char precision[SMALL_BUFF_LEN]; // time resolution
+ int8_t update;
+ char status[16];
+} SDbInfo;
+
+typedef struct SDbCfg_S {
+ // int maxtablesPerVnode;
+ uint32_t minRows; // 0 means default
+ uint32_t maxRows; // 0 means default
+ int comp;
+ int walLevel;
+ int cacheLast;
+ int fsync;
+ int replica;
+ int update;
+ int keep;
+ int days;
+ int cache;
+ int blocks;
+ int quorum;
+ char precision[SMALL_BUFF_LEN];
+} SDbCfg;
+
+typedef struct SDataBase_S {
+ char dbName[TSDB_DB_NAME_LEN];
+ bool drop; // 0: use exists, 1: if exists, drop then new create
+ SDbCfg dbCfg;
+ uint64_t superTblCount;
+ SSuperTable* superTbls;
+} SDataBase;
+
+typedef struct SDbs_S {
+ char cfgDir[MAX_FILE_NAME_LEN];
+ char host[MAX_HOSTNAME_SIZE];
+ struct sockaddr_in serv_addr;
+
+ uint16_t port;
+ char user[MAX_USERNAME_SIZE];
+ char password[SHELL_MAX_PASSWORD_LEN];
+ char resultFile[MAX_FILE_NAME_LEN];
+ bool use_metric;
+ bool aggr_func;
+ bool asyncMode;
+
+ uint32_t threadCount;
+ uint32_t threadCountForCreateTbl;
+ uint32_t dbCount;
+ // statistics
+ uint64_t totalInsertRows;
+ uint64_t totalAffectedRows;
+
+ SDataBase* db;
+} SDbs;
+
+typedef struct SpecifiedQueryInfo_S {
+ uint64_t queryInterval; // 0: unlimited > 0 loop/s
+ uint32_t concurrent;
+ int sqlCount;
+ uint32_t asyncMode; // 0: sync, 1: async
+ uint64_t subscribeInterval; // ms
+ uint64_t queryTimes;
+ bool subscribeRestart;
+ int subscribeKeepProgress;
+ char sql[MAX_QUERY_SQL_COUNT][BUFFER_SIZE+1];
+ char result[MAX_QUERY_SQL_COUNT][MAX_FILE_NAME_LEN];
+ int resubAfterConsume[MAX_QUERY_SQL_COUNT];
+ int endAfterConsume[MAX_QUERY_SQL_COUNT];
+ TAOS_SUB* tsub[MAX_QUERY_SQL_COUNT];
+ char topic[MAX_QUERY_SQL_COUNT][32];
+ int consumed[MAX_QUERY_SQL_COUNT];
+ TAOS_RES* res[MAX_QUERY_SQL_COUNT];
+ uint64_t totalQueried;
+} SpecifiedQueryInfo;
+
+typedef struct SuperQueryInfo_S {
+ char stbName[TSDB_TABLE_NAME_LEN];
+ uint64_t queryInterval; // 0: unlimited > 0 loop/s
+ uint32_t threadCnt;
+ uint32_t asyncMode; // 0: sync, 1: async
+ uint64_t subscribeInterval; // ms
+ bool subscribeRestart;
+ int subscribeKeepProgress;
+ uint64_t queryTimes;
+ int64_t childTblCount;
+ char childTblPrefix[TBNAME_PREFIX_LEN]; // 20 characters reserved for seq
+ int sqlCount;
+ char sql[MAX_QUERY_SQL_COUNT][BUFFER_SIZE+1];
+ char result[MAX_QUERY_SQL_COUNT][MAX_FILE_NAME_LEN];
+ int resubAfterConsume;
+ int endAfterConsume;
+ TAOS_SUB* tsub[MAX_QUERY_SQL_COUNT];
+
+ char* childTblName;
+ uint64_t totalQueried;
+} SuperQueryInfo;
+
+typedef struct SQueryMetaInfo_S {
+ char cfgDir[MAX_FILE_NAME_LEN];
+ char host[MAX_HOSTNAME_SIZE];
+ uint16_t port;
+ struct sockaddr_in serv_addr;
+ char user[MAX_USERNAME_SIZE];
+ char password[SHELL_MAX_PASSWORD_LEN];
+ char dbName[TSDB_DB_NAME_LEN];
+ char queryMode[SMALL_BUFF_LEN]; // taosc, rest
+
+ SpecifiedQueryInfo specifiedQueryInfo;
+ SuperQueryInfo superQueryInfo;
+ uint64_t totalQueried;
+} SQueryMetaInfo;
+
+typedef struct SThreadInfo_S {
+ TAOS * taos;
+ TAOS_STMT *stmt;
+ int64_t *bind_ts;
+
+#if STMT_BIND_PARAM_BATCH == 1
+ int64_t *bind_ts_array;
+ char *bindParams;
+ char *is_null;
+#else
+ char* sampleBindArray;
+#endif
+
+ int threadID;
+ char db_name[TSDB_DB_NAME_LEN];
+ uint32_t time_precision;
+ char filePath[4096];
+ FILE *fp;
+ char tb_prefix[TSDB_TABLE_NAME_LEN];
+ uint64_t start_table_from;
+ uint64_t end_table_to;
+ int64_t ntables;
+ int64_t tables_created;
+ uint64_t data_of_rate;
+ int64_t start_time;
+ char* cols;
+ bool use_metric;
+ SSuperTable* stbInfo;
+ char *buffer; // sql cmd buffer
+
+ // for async insert
+ tsem_t lock_sem;
+ int64_t counter;
+ uint64_t st;
+ uint64_t et;
+ uint64_t lastTs;
+
+ // sample data
+ int64_t samplePos;
+ // statistics
+ uint64_t totalInsertRows;
+ uint64_t totalAffectedRows;
+
+ // insert delay statistics
+ uint64_t cntDelay;
+ uint64_t totalDelay;
+ uint64_t avgDelay;
+ uint64_t maxDelay;
+ uint64_t minDelay;
+
+ // seq of query or subscribe
+ uint64_t querySeq; // sequence number of sql command
+ TAOS_SUB* tsub;
+
+ int sockfd;
+} threadInfo;
+
+#ifdef WINDOWS
+#define _CRT_RAND_S
+
+#include
+#include
+
+typedef unsigned __int32 uint32_t;
+
+#pragma comment ( lib, "ws2_32.lib" )
+// Some old MinGW/CYGWIN distributions don't define this:
+#ifndef ENABLE_VIRTUAL_TERMINAL_PROCESSING
+#define ENABLE_VIRTUAL_TERMINAL_PROCESSING 0x0004
+#endif // ENABLE_VIRTUAL_TERMINAL_PROCESSING
+
+static HANDLE g_stdoutHandle;
+static DWORD g_consoleMode;
+
+static void setupForAnsiEscape(void) {
+ DWORD mode = 0;
+ g_stdoutHandle = GetStdHandle(STD_OUTPUT_HANDLE);
+
+ if(g_stdoutHandle == INVALID_HANDLE_VALUE) {
+ exit(GetLastError());
+ }
+
+ if(!GetConsoleMode(g_stdoutHandle, &mode)) {
+ exit(GetLastError());
+ }
+
+ g_consoleMode = mode;
+
+ // Enable ANSI escape codes
+ mode |= ENABLE_VIRTUAL_TERMINAL_PROCESSING;
+
+ if(!SetConsoleMode(g_stdoutHandle, mode)) {
+ exit(GetLastError());
+ }
+}
+
+static void resetAfterAnsiEscape(void) {
+ // Reset colors
+ printf("\x1b[0m");
+
+ // Reset console mode
+ if(!SetConsoleMode(g_stdoutHandle, g_consoleMode)) {
+ exit(GetLastError());
+ }
+}
+
+static int taosRandom()
+{
+ int number;
+ rand_s(&number);
+
+ return number;
+}
+#else // Not windows
+static void setupForAnsiEscape(void) {}
+
+static void resetAfterAnsiEscape(void) {
+ // Reset colors
+ printf("\x1b[0m");
+}
+
+#include
+
+static int taosRandom()
+{
+ return rand();
+}
+
+#endif // ifdef Windows
+
+static void prompt();
+static int createDatabasesAndStables();
+static void createChildTables();
+static int queryDbExec(TAOS *taos, char *command, QUERY_TYPE type, bool quiet);
+static int postProceSql(char *host, uint16_t port, char* sqlstr, threadInfo *pThreadInfo);
+static int64_t getTSRandTail(int64_t timeStampStep, int32_t seq,
+ int disorderRatio, int disorderRange);
+static bool getInfoFromJsonFile(char* file);
+static void init_rand_data();
+static int regexMatch(const char *s, const char *reg, int cflags);
+
+/* ************ Global variables ************ */
+
+int32_t* g_randint;
+uint32_t* g_randuint;
+int64_t* g_randbigint;
+uint64_t* g_randubigint;
+float* g_randfloat;
+double* g_randdouble;
+
+char *g_randbool_buff = NULL;
+char *g_randint_buff = NULL;
+char *g_randuint_buff = NULL;
+char *g_rand_voltage_buff = NULL;
+char *g_randbigint_buff = NULL;
+char *g_randubigint_buff = NULL;
+char *g_randsmallint_buff = NULL;
+char *g_randusmallint_buff = NULL;
+char *g_randtinyint_buff = NULL;
+char *g_randutinyint_buff = NULL;
+char *g_randfloat_buff = NULL;
+char *g_rand_current_buff = NULL;
+char *g_rand_phase_buff = NULL;
+char *g_randdouble_buff = NULL;
+
+char *g_aggreFuncDemo[] = {"*", "count(*)", "avg(current)", "sum(current)",
+ "max(current)", "min(current)", "first(current)", "last(current)"};
+
+char *g_aggreFunc[] = {"*", "count(*)", "avg(C0)", "sum(C0)",
+ "max(C0)", "min(C0)", "first(C0)", "last(C0)"};
+
+SArguments g_args = {
+ NULL, // metaFile
+ 0, // test_mode
+ "localhost", // host
+ 6030, // port
+ INTERFACE_BUT, // iface
+ "root", // user
+#ifdef _TD_POWER_
+ "powerdb", // password
+#elif (_TD_TQ_ == true)
+ "tqueue", // password
+#elif (_TD_PRO_ == true)
+ "prodb", // password
+#elif (_TD_KH_ == true)
+ "khroot", // password
+#elif (_TD_JH_ == true)
+ "jhdata", // password
+#else
+ "taosdata", // password
+#endif
+ "test", // database
+ 1, // replica
+ "d", // tb_prefix
+ NULL, // sqlFile
+ true, // use_metric
+ true, // drop_database
+ false, // aggr_func
+ false, // debug_print
+ false, // verbose_print
+ false, // performance statistic print
+ false, // answer_yes;
+ "./output.txt", // output_file
+ 0, // mode : sync or async
+ {TSDB_DATA_TYPE_FLOAT,
+ TSDB_DATA_TYPE_INT,
+ TSDB_DATA_TYPE_FLOAT},
+ {
+ "FLOAT", // dataType
+ "INT", // dataType
+ "FLOAT", // dataType. demo mode has 3 columns
+ },
+ 64, // binwidth
+ 4, // columnCount, timestamp + float + int + float
+ 20 + FLOAT_BUFF_LEN + INT_BUFF_LEN + FLOAT_BUFF_LEN, // lenOfOneRow
+ DEFAULT_NTHREADS,// nthreads
+ 0, // insert_interval
+ DEFAULT_TIMESTAMP_STEP, // timestamp_step
+ 1, // query_times
+ 10000, // prepared_rand
+ DEFAULT_INTERLACE_ROWS, // interlaceRows;
+ 30000, // reqPerReq
+ (1024*1024), // max_sql_len
+ DEFAULT_CHILDTABLES, // ntables
+ 10000, // insertRows
+ 0, // abort
+ 0, // disorderRatio
+ 1000, // disorderRange
+ 1, // method_of_delete
+ 0, // totalInsertRows;
+ 0, // totalAffectedRows;
+ true, // demo_mode;
+};
+
+static SDbs g_Dbs;
+static int64_t g_totalChildTables = DEFAULT_CHILDTABLES;
+static int64_t g_actualChildTables = 0;
+static SQueryMetaInfo g_queryInfo;
+static FILE * g_fpOfInsertResult = NULL;
+
+#if _MSC_VER <= 1900
+#define __func__ __FUNCTION__
+#endif
+
+#define debugPrint(fmt, ...) \
+ do { if (g_args.debug_print || g_args.verbose_print) \
+ fprintf(stderr, "DEBG: "fmt, __VA_ARGS__); } while(0)
+
+#define verbosePrint(fmt, ...) \
+ do { if (g_args.verbose_print) \
+ fprintf(stderr, "VERB: "fmt, __VA_ARGS__); } while(0)
+
+#define performancePrint(fmt, ...) \
+ do { if (g_args.performance_print) \
+ fprintf(stderr, "PERF: "fmt, __VA_ARGS__); } while(0)
+
+#define errorPrint(fmt, ...) \
+ do {\
+ fprintf(stderr, " \033[31m");\
+ fprintf(stderr, "ERROR: "fmt, __VA_ARGS__);\
+ fprintf(stderr, " \033[0m");\
+ } while(0)
+
+#define errorPrint2(fmt, ...) \
+ do {\
+ struct tm Tm, *ptm;\
+ struct timeval timeSecs; \
+ time_t curTime;\
+ gettimeofday(&timeSecs, NULL); \
+ curTime = timeSecs.tv_sec;\
+ ptm = localtime_r(&curTime, &Tm);\
+ fprintf(stderr, " \033[31m");\
+ fprintf(stderr, "%02d/%02d %02d:%02d:%02d.%06d %08" PRId64 " ",\
+ ptm->tm_mon + 1, ptm->tm_mday, ptm->tm_hour,\
+ ptm->tm_min, ptm->tm_sec, (int32_t)timeSecs.tv_usec,\
+ taosGetSelfPthreadId());\
+ fprintf(stderr, " \033[0m");\
+ errorPrint(fmt, __VA_ARGS__);\
+ } while(0)
+
+// for strncpy buffer overflow
+#define min(a, b) (((a) < (b)) ? (a) : (b))
+
+
+///////////////////////////////////////////////////
+
+static void ERROR_EXIT(const char *msg) { errorPrint("%s", msg); exit(-1); }
+
+#ifndef TAOSDEMO_COMMIT_SHA1
+#define TAOSDEMO_COMMIT_SHA1 "unknown"
+#endif
+
+#ifndef TD_VERNUMBER
+#define TD_VERNUMBER "unknown"
+#endif
+
+#ifndef TAOSDEMO_STATUS
+#define TAOSDEMO_STATUS "unknown"
+#endif
+
+static void printVersion() {
+ char tdengine_ver[] = TD_VERNUMBER;
+ char taosdemo_ver[] = TAOSDEMO_COMMIT_SHA1;
+ char taosdemo_status[] = TAOSDEMO_STATUS;
+
+ if (strlen(taosdemo_status) == 0) {
+ printf("taosdemo version %s-%s\n",
+ tdengine_ver, taosdemo_ver);
+ } else {
+ printf("taosdemo version %s-%s, status:%s\n",
+ tdengine_ver, taosdemo_ver, taosdemo_status);
+ }
+}
+
+static void printHelp() {
+ char indent[10] = " ";
+ printf("%s\n\n", "Usage: taosdemo [OPTION...]");
+ printf("%s%s%s%s\n", indent, "-f, --file=FILE", "\t\t",
+ "The meta file to the execution procedure.");
+ printf("%s%s%s%s\n", indent, "-u, --user=USER", "\t\t",
+ "The user name to use when connecting to the server.");
+ printf("%s%s%s%s\n", indent, "-p, --password", "\t\t",
+ "The password to use when connecting to the server.");
+ printf("%s%s%s%s\n", indent, "-c, --config-dir=CONFIG_DIR", "\t",
+ "Configuration directory.");
+ printf("%s%s%s%s\n", indent, "-h, --host=HOST", "\t\t",
+ "TDengine server FQDN to connect. The default host is localhost.");
+ printf("%s%s%s%s\n", indent, "-P, --port=PORT", "\t\t",
+ "The TCP/IP port number to use for the connection.");
+ printf("%s%s%s%s\n", indent, "-I, --interface=INTERFACE", "\t",
+ "The interface (taosc, rest, and stmt) taosdemo uses. By default use 'taosc'.");
+ printf("%s%s%s%s\n", indent, "-d, --database=DATABASE", "\t",
+ "Destination database. By default is 'test'.");
+ printf("%s%s%s%s\n", indent, "-a, --replica=REPLICA", "\t\t",
+ "Set the replica parameters of the database, By default use 1, min: 1, max: 3.");
+ printf("%s%s%s%s\n", indent, "-m, --table-prefix=TABLEPREFIX", "\t",
+ "Table prefix name. By default use 'd'.");
+ printf("%s%s%s%s\n", indent, "-E, --escape-character", "\t",
+ "Use escape character for Both Stable and normmal table name");
+ printf("%s%s%s%s\n", indent, "-s, --sql-file=FILE", "\t\t",
+ "The select sql file.");
+ printf("%s%s%s%s\n", indent, "-N, --normal-table", "\t\t", "Use normal table flag.");
+ printf("%s%s%s%s\n", indent, "-o, --output=FILE", "\t\t",
+ "Direct output to the named file. By default use './output.txt'.");
+ printf("%s%s%s%s\n", indent, "-q, --query-mode=MODE", "\t\t",
+ "Query mode -- 0: SYNC, 1: ASYNC. By default use SYNC.");
+ printf("%s%s%s%s\n", indent, "-b, --data-type=DATATYPE", "\t",
+ "The data_type of columns, By default use: FLOAT,INT,FLOAT. NCHAR and BINARY can also use custom length. Eg: NCHAR(16),BINARY(8)");
+ printf("%s%s%s%s%d\n", indent, "-w, --binwidth=WIDTH", "\t\t",
+ "The width of data_type 'BINARY' or 'NCHAR'. By default use ",
+ g_args.binwidth);
+ printf("%s%s%s%s%d%s%d\n", indent, "-l, --columns=COLUMNS", "\t\t",
+ "The number of columns per record. Demo mode by default is ",
+ DEFAULT_DATATYPE_NUM,
+ " (float, int, float). Max values is ",
+ MAX_NUM_COLUMNS);
+ printf("%s%s%s%s\n", indent, indent, indent,
+ "\t\t\t\tAll of the new column(s) type is INT. If use -b to specify column type, -l will be ignored.");
+ printf("%s%s%s%s%d.\n", indent, "-T, --threads=NUMBER", "\t\t",
+ "The number of threads. By default use ", DEFAULT_NTHREADS);
+ printf("%s%s%s%s\n", indent, "-i, --insert-interval=NUMBER", "\t",
+ "The sleep time (ms) between insertion. By default is 0.");
+ printf("%s%s%s%s%d.\n", indent, "-S, --time-step=TIME_STEP", "\t",
+ "The timestamp step between insertion. By default is ",
+ DEFAULT_TIMESTAMP_STEP);
+ printf("%s%s%s%s%d.\n", indent, "-B, --interlace-rows=NUMBER", "\t",
+ "The interlace rows of insertion. By default is ",
+ DEFAULT_INTERLACE_ROWS);
+ printf("%s%s%s%s\n", indent, "-r, --rec-per-req=NUMBER", "\t",
+ "The number of records per request. By default is 30000.");
+ printf("%s%s%s%s\n", indent, "-t, --tables=NUMBER", "\t\t",
+ "The number of tables. By default is 10000.");
+ printf("%s%s%s%s\n", indent, "-n, --records=NUMBER", "\t\t",
+ "The number of records per table. By default is 10000.");
+ printf("%s%s%s%s\n", indent, "-M, --random", "\t\t\t",
+ "The value of records generated are totally random.");
+ printf("%s\n", "\t\t\t\tBy default to simulate power equipment scenario.");
+ printf("%s%s%s%s\n", indent, "-x, --aggr-func", "\t\t",
+ "Test aggregation functions after insertion.");
+ printf("%s%s%s%s\n", indent, "-y, --answer-yes", "\t\t", "Input yes for prompt.");
+ printf("%s%s%s%s\n", indent, "-O, --disorder=NUMBER", "\t\t",
+ "Insert order mode--0: In order, 1 ~ 50: disorder ratio. By default is in order.");
+ printf("%s%s%s%s\n", indent, "-R, --disorder-range=NUMBER", "\t",
+ "Out of order data's range. Unit is ms. By default is 1000.");
+ printf("%s%s%s%s\n", indent, "-g, --debug", "\t\t\t",
+ "Print debug info.");
+ printf("%s%s%s%s\n", indent, "-?, --help\t", "\t\t",
+ "Give this help list");
+ printf("%s%s%s%s\n", indent, " --usage\t", "\t\t",
+ "Give a short usage message");
+ printf("%s%s\n", indent, "-V, --version\t\t\tPrint program version.");
+ /* printf("%s%s%s%s\n", indent, "-D", indent,
+ "Delete database if exists. 0: no, 1: yes, default is 1");
+ */
+ printf("\nMandatory or optional arguments to long options are also mandatory or optional\n\
+for any corresponding short options.\n\
+\n\
+Report bugs to .\n");
+}
+
+static bool isStringNumber(char *input)
+{
+ int len = strlen(input);
+ if (0 == len) {
+ return false;
+ }
+
+ for (int i = 0; i < len; i++) {
+ if (!isdigit(input[i]))
+ return false;
+ }
+
+ return true;
+}
+
+static void errorWrongValue(char *program, char *wrong_arg, char *wrong_value)
+{
+ fprintf(stderr, "%s %s: %s is an invalid value\n", program, wrong_arg, wrong_value);
+ fprintf(stderr, "Try `taosdemo --help' or `taosdemo --usage' for more information.\n");
+}
+
+static void errorUnrecognized(char *program, char *wrong_arg)
+{
+ fprintf(stderr, "%s: unrecognized options '%s'\n", program, wrong_arg);
+ fprintf(stderr, "Try `taosdemo --help' or `taosdemo --usage' for more information.\n");
+}
+
+static void errorPrintReqArg(char *program, char *wrong_arg)
+{
+ fprintf(stderr,
+ "%s: option requires an argument -- '%s'\n",
+ program, wrong_arg);
+ fprintf(stderr,
+ "Try `taosdemo --help' or `taosdemo --usage' for more information.\n");
+}
+
+static void errorPrintReqArg2(char *program, char *wrong_arg)
+{
+ fprintf(stderr,
+ "%s: option requires a number argument '-%s'\n",
+ program, wrong_arg);
+ fprintf(stderr,
+ "Try `taosdemo --help' or `taosdemo --usage' for more information.\n");
+}
+
+static void errorPrintReqArg3(char *program, char *wrong_arg)
+{
+ fprintf(stderr,
+ "%s: option '%s' requires an argument\n",
+ program, wrong_arg);
+ fprintf(stderr,
+ "Try `taosdemo --help' or `taosdemo --usage' for more information.\n");
+}
+
+static void parse_args(int argc, char *argv[], SArguments *arguments) {
+
+ for (int i = 1; i < argc; i++) {
+ if ((0 == strncmp(argv[i], "-f", strlen("-f")))
+ || (0 == strncmp(argv[i], "--file", strlen("--file")))) {
+ arguments->demo_mode = false;
+
+ if (2 == strlen(argv[i])) {
+ if (i+1 == argc) {
+ errorPrintReqArg(argv[0], "f");
+ exit(EXIT_FAILURE);
+ }
+ arguments->metaFile = argv[++i];
+ } else if (0 == strncmp(argv[i], "-f", strlen("-f"))) {
+ arguments->metaFile = (char *)(argv[i] + strlen("-f"));
+ } else if (strlen("--file") == strlen(argv[i])) {
+ if (i+1 == argc) {
+ errorPrintReqArg3(argv[0], "--file");
+ exit(EXIT_FAILURE);
+ }
+ arguments->metaFile = argv[++i];
+ } else if (0 == strncmp(argv[i], "--file=", strlen("--file="))) {
+ arguments->metaFile = (char *)(argv[i] + strlen("--file="));
+ } else {
+ errorUnrecognized(argv[0], argv[i]);
+ exit(EXIT_FAILURE);
+ }
+ } else if ((0 == strncmp(argv[i], "-c", strlen("-c")))
+ || (0 == strncmp(argv[i], "--config-dir", strlen("--config-dir")))) {
+ if (2 == strlen(argv[i])) {
+ if (argc == i+1) {
+ errorPrintReqArg(argv[0], "c");
+ exit(EXIT_FAILURE);
+ }
+ tstrncpy(configDir, argv[++i], TSDB_FILENAME_LEN);
+ } else if (0 == strncmp(argv[i], "-c", strlen("-c"))) {
+ tstrncpy(configDir, (char *)(argv[i] + strlen("-c")), TSDB_FILENAME_LEN);
+ } else if (strlen("--config-dir") == strlen(argv[i])) {
+ if (argc == i+1) {
+ errorPrintReqArg3(argv[0], "--config-dir");
+ exit(EXIT_FAILURE);
+ }
+ tstrncpy(configDir, argv[++i], TSDB_FILENAME_LEN);
+ } else if (0 == strncmp(argv[i], "--config-dir=", strlen("--config-dir="))) {
+ tstrncpy(configDir, (char *)(argv[i] + strlen("--config-dir=")), TSDB_FILENAME_LEN);
+ } else {
+ errorUnrecognized(argv[0], argv[i]);
+ exit(EXIT_FAILURE);
+ }
+ } else if ((0 == strncmp(argv[i], "-h", strlen("-h")))
+ || (0 == strncmp(argv[i], "--host", strlen("--host")))) {
+ if (2 == strlen(argv[i])) {
+ if (argc == i+1) {
+ errorPrintReqArg(argv[0], "h");
+ exit(EXIT_FAILURE);
+ }
+ arguments->host = argv[++i];
+ } else if (0 == strncmp(argv[i], "-h", strlen("-h"))) {
+ arguments->host = (char *)(argv[i] + strlen("-h"));
+ } else if (strlen("--host") == strlen(argv[i])) {
+ if (argc == i+1) {
+ errorPrintReqArg3(argv[0], "--host");
+ exit(EXIT_FAILURE);
+ }
+ arguments->host = argv[++i];
+ } else if (0 == strncmp(argv[i], "--host=", strlen("--host="))) {
+ arguments->host = (char *)(argv[i] + strlen("--host="));
+ } else {
+ errorUnrecognized(argv[0], argv[i]);
+ exit(EXIT_FAILURE);
+ }
+ } else if (strcmp(argv[i], "-PP") == 0) {
+ arguments->performance_print = true;
+ } else if ((0 == strncmp(argv[i], "-P", strlen("-P")))
+ || (0 == strncmp(argv[i], "--port", strlen("--port")))) {
+ uint64_t port;
+ char strPort[BIGINT_BUFF_LEN];
+
+ if (2 == strlen(argv[i])) {
+ if (argc == i+1) {
+ errorPrintReqArg(argv[0], "P");
+ exit(EXIT_FAILURE);
+ } else if (isStringNumber(argv[i+1])) {
+ tstrncpy(strPort, argv[++i], BIGINT_BUFF_LEN);
+ } else {
+ errorPrintReqArg2(argv[0], "P");
+ exit(EXIT_FAILURE);
+ }
+ } else if (0 == strncmp(argv[i], "--port=", strlen("--port="))) {
+ if (isStringNumber((char *)(argv[i] + strlen("--port=")))) {
+ tstrncpy(strPort, (char *)(argv[i]+strlen("--port=")), BIGINT_BUFF_LEN);
+ } else {
+ errorPrintReqArg2(argv[0], "--port");
+ exit(EXIT_FAILURE);
+ }
+ } else if (0 == strncmp(argv[i], "-P", strlen("-P"))) {
+ if (isStringNumber((char *)(argv[i] + strlen("-P")))) {
+ tstrncpy(strPort, (char *)(argv[i]+strlen("-P")), BIGINT_BUFF_LEN);
+ } else {
+ errorPrintReqArg2(argv[0], "--port");
+ exit(EXIT_FAILURE);
+ }
+ } else if (strlen("--port") == strlen(argv[i])) {
+ if (argc == i+1) {
+ errorPrintReqArg3(argv[0], "--port");
+ exit(EXIT_FAILURE);
+ } else if (isStringNumber(argv[i+1])) {
+ tstrncpy(strPort, argv[++i], BIGINT_BUFF_LEN);
+ } else {
+ errorPrintReqArg2(argv[0], "--port");
+ exit(EXIT_FAILURE);
+ }
+ } else {
+ errorUnrecognized(argv[0], argv[i]);
+ exit(EXIT_FAILURE);
+ }
+
+ port = atoi(strPort);
+ if (port > 65535) {
+ errorWrongValue("taosdump", "-P or --port", strPort);
+ exit(EXIT_FAILURE);
+ }
+ arguments->port = (uint16_t)port;
+
+ } else if ((0 == strncmp(argv[i], "-I", strlen("-I")))
+ || (0 == strncmp(argv[i], "--interface", strlen("--interface")))) {
+ if (2 == strlen(argv[i])) {
+ if (argc == i+1) {
+ errorPrintReqArg(argv[0], "I");
+ exit(EXIT_FAILURE);
+ }
+ if (0 == strcasecmp(argv[i+1], "taosc")) {
+ arguments->iface = TAOSC_IFACE;
+ } else if (0 == strcasecmp(argv[i+1], "rest")) {
+ arguments->iface = REST_IFACE;
+ } else if (0 == strcasecmp(argv[i+1], "stmt")) {
+ arguments->iface = STMT_IFACE;
+ } else {
+ errorWrongValue(argv[0], "-I", argv[i+1]);
+ exit(EXIT_FAILURE);
+ }
+ i++;
+ } else if (0 == strncmp(argv[i], "--interface=", strlen("--interface="))) {
+ if (0 == strcasecmp((char *)(argv[i] + strlen("--interface=")), "taosc")) {
+ arguments->iface = TAOSC_IFACE;
+ } else if (0 == strcasecmp((char *)(argv[i] + strlen("--interface=")), "rest")) {
+ arguments->iface = REST_IFACE;
+ } else if (0 == strcasecmp((char *)(argv[i] + strlen("--interface=")), "stmt")) {
+ arguments->iface = STMT_IFACE;
+ } else {
+ errorPrintReqArg3(argv[0], "--interface");
+ exit(EXIT_FAILURE);
+ }
+ } else if (0 == strncmp(argv[i], "-I", strlen("-I"))) {
+ if (0 == strcasecmp((char *)(argv[i] + strlen("-I")), "taosc")) {
+ arguments->iface = TAOSC_IFACE;
+ } else if (0 == strcasecmp((char *)(argv[i] + strlen("-I")), "rest")) {
+ arguments->iface = REST_IFACE;
+ } else if (0 == strcasecmp((char *)(argv[i] + strlen("-I")), "stmt")) {
+ arguments->iface = STMT_IFACE;
+ } else {
+ errorWrongValue(argv[0], "-I",
+ (char *)(argv[i] + strlen("-I")));
+ exit(EXIT_FAILURE);
+ }
+ } else if (strlen("--interface") == strlen(argv[i])) {
+ if (argc == i+1) {
+ errorPrintReqArg3(argv[0], "--interface");
+ exit(EXIT_FAILURE);
+ }
+ if (0 == strcasecmp(argv[i+1], "taosc")) {
+ arguments->iface = TAOSC_IFACE;
+ } else if (0 == strcasecmp(argv[i+1], "rest")) {
+ arguments->iface = REST_IFACE;
+ } else if (0 == strcasecmp(argv[i+1], "stmt")) {
+ arguments->iface = STMT_IFACE;
+ } else {
+ errorWrongValue(argv[0], "--interface", argv[i+1]);
+ exit(EXIT_FAILURE);
+ }
+ i++;
+ } else {
+ errorUnrecognized(argv[0], argv[i]);
+ exit(EXIT_FAILURE);
+ }
+ } else if ((0 == strncmp(argv[i], "-u", strlen("-u")))
+ || (0 == strncmp(argv[i], "--user", strlen("--user")))) {
+ if (2 == strlen(argv[i])) {
+ if (argc == i+1) {
+ errorPrintReqArg(argv[0], "u");
+ exit(EXIT_FAILURE);
+ }
+ arguments->user = argv[++i];
+ } else if (0 == strncmp(argv[i], "-u", strlen("-u"))) {
+ arguments->user = (char *)(argv[i++] + strlen("-u"));
+ } else if (0 == strncmp(argv[i], "--user=", strlen("--user="))) {
+ arguments->user = (char *)(argv[i++] + strlen("--user="));
+ } else if (strlen("--user") == strlen(argv[i])) {
+ if (argc == i+1) {
+ errorPrintReqArg3(argv[0], "--user");
+ exit(EXIT_FAILURE);
+ }
+ arguments->user = argv[++i];
+ } else {
+ errorUnrecognized(argv[0], argv[i]);
+ exit(EXIT_FAILURE);
+ }
+ } else if ((0 == strncmp(argv[i], "-p", strlen("-p")))
+ || (0 == strcmp(argv[i], "--password"))) {
+ if ((strlen(argv[i]) == 2) || (0 == strcmp(argv[i], "--password"))) {
+ printf("Enter password: ");
+ taosSetConsoleEcho(false);
+ if (scanf("%s", arguments->password) > 1) {
+ fprintf(stderr, "password read error!\n");
+ }
+ taosSetConsoleEcho(true);
+ } else {
+ tstrncpy(arguments->password, (char *)(argv[i] + 2), SHELL_MAX_PASSWORD_LEN);
+ }
+ } else if ((0 == strncmp(argv[i], "-o", strlen("-o")))
+ || (0 == strncmp(argv[i], "--output", strlen("--output")))) {
+ if (2 == strlen(argv[i])) {
+ if (argc == i+1) {
+ errorPrintReqArg3(argv[0], "--output");
+ exit(EXIT_FAILURE);
+ }
+ arguments->output_file = argv[++i];
+ } else if (0 == strncmp(argv[i], "--output=", strlen("--output="))) {
+ arguments->output_file = (char *)(argv[i++] + strlen("--output="));
+ } else if (0 == strncmp(argv[i], "-o", strlen("-o"))) {
+ arguments->output_file = (char *)(argv[i++] + strlen("-o"));
+ } else if (strlen("--output") == strlen(argv[i])) {
+ if (argc == i+1) {
+ errorPrintReqArg3(argv[0], "--output");
+ exit(EXIT_FAILURE);
+ }
+ arguments->output_file = argv[++i];
+ } else {
+ errorUnrecognized(argv[0], argv[i]);
+ exit(EXIT_FAILURE);
+ }
+ } else if ((0 == strncmp(argv[i], "-s", strlen("-s")))
+ || (0 == strncmp(argv[i], "--sql-file", strlen("--sql-file")))) {
+ if (2 == strlen(argv[i])) {
+ if (argc == i+1) {
+ errorPrintReqArg(argv[0], "s");
+ exit(EXIT_FAILURE);
+ }
+ arguments->sqlFile = argv[++i];
+ } else if (0 == strncmp(argv[i], "--sql-file=", strlen("--sql-file="))) {
+ arguments->sqlFile = (char *)(argv[i++] + strlen("--sql-file="));
+ } else if (0 == strncmp(argv[i], "-s", strlen("-s"))) {
+ arguments->sqlFile = (char *)(argv[i++] + strlen("-s"));
+ } else if (strlen("--sql-file") == strlen(argv[i])) {
+ if (argc == i+1) {
+ errorPrintReqArg3(argv[0], "--sql-file");
+ exit(EXIT_FAILURE);
+ }
+ arguments->sqlFile = argv[++i];
+ } else {
+ errorUnrecognized(argv[0], argv[i]);
+ exit(EXIT_FAILURE);
+ }
+ } else if ((0 == strncmp(argv[i], "-q", strlen("-q")))
+ || (0 == strncmp(argv[i], "--query-mode", strlen("--query-mode")))) {
+ if (2 == strlen(argv[i])) {
+ if (argc == i+1) {
+ errorPrintReqArg(argv[0], "q");
+ exit(EXIT_FAILURE);
+ } else if (!isStringNumber(argv[i+1])) {
+ errorPrintReqArg2(argv[0], "q");
+ exit(EXIT_FAILURE);
+ }
+ arguments->async_mode = atoi(argv[++i]);
+ } else if (0 == strncmp(argv[i], "--query-mode=", strlen("--query-mode="))) {
+ if (isStringNumber((char *)(argv[i] + strlen("--query-mode=")))) {
+ arguments->async_mode = atoi((char *)(argv[i]+strlen("--query-mode=")));
+ } else {
+ errorPrintReqArg2(argv[0], "--query-mode");
+ exit(EXIT_FAILURE);
+ }
+ } else if (0 == strncmp(argv[i], "-q", strlen("-q"))) {
+ if (isStringNumber((char *)(argv[i] + strlen("-q")))) {
+ arguments->async_mode = atoi((char *)(argv[i]+strlen("-q")));
+ } else {
+ errorPrintReqArg2(argv[0], "-q");
+ exit(EXIT_FAILURE);
+ }
+ } else if (strlen("--query-mode") == strlen(argv[i])) {
+ if (argc == i+1) {
+ errorPrintReqArg3(argv[0], "--query-mode");
+ exit(EXIT_FAILURE);
+ } else if (!isStringNumber(argv[i+1])) {
+ errorPrintReqArg2(argv[0], "--query-mode");
+ exit(EXIT_FAILURE);
+ }
+ arguments->async_mode = atoi(argv[++i]);
+ } else {
+ errorUnrecognized(argv[0], argv[i]);
+ exit(EXIT_FAILURE);
+ }
+ } else if ((0 == strncmp(argv[i], "-T", strlen("-T")))
+ || (0 == strncmp(argv[i], "--threads", strlen("--threads")))) {
+ if (2 == strlen(argv[i])) {
+ if (argc == i+1) {
+ errorPrintReqArg(argv[0], "T");
+ exit(EXIT_FAILURE);
+ } else if (!isStringNumber(argv[i+1])) {
+ errorPrintReqArg2(argv[0], "T");
+ exit(EXIT_FAILURE);
+ }
+ arguments->nthreads = atoi(argv[++i]);
+ } else if (0 == strncmp(argv[i], "--threads=", strlen("--threads="))) {
+ if (isStringNumber((char *)(argv[i] + strlen("--threads=")))) {
+ arguments->nthreads = atoi((char *)(argv[i]+strlen("--threads=")));
+ } else {
+ errorPrintReqArg2(argv[0], "--threads");
+ exit(EXIT_FAILURE);
+ }
+ } else if (0 == strncmp(argv[i], "-T", strlen("-T"))) {
+ if (isStringNumber((char *)(argv[i] + strlen("-T")))) {
+ arguments->nthreads = atoi((char *)(argv[i]+strlen("-T")));
+ } else {
+ errorPrintReqArg2(argv[0], "-T");
+ exit(EXIT_FAILURE);
+ }
+ } else if (strlen("--threads") == strlen(argv[i])) {
+ if (argc == i+1) {
+ errorPrintReqArg3(argv[0], "--threads");
+ exit(EXIT_FAILURE);
+ } else if (!isStringNumber(argv[i+1])) {
+ errorPrintReqArg2(argv[0], "--threads");
+ exit(EXIT_FAILURE);
+ }
+ arguments->nthreads = atoi(argv[++i]);
+ } else {
+ errorUnrecognized(argv[0], argv[i]);
+ exit(EXIT_FAILURE);
+ }
+ } else if ((0 == strncmp(argv[i], "-i", strlen("-i")))
+ || (0 == strncmp(argv[i], "--insert-interval", strlen("--insert-interval")))) {
+ if (2 == strlen(argv[i])) {
+ if (argc == i+1) {
+ errorPrintReqArg(argv[0], "i");
+ exit(EXIT_FAILURE);
+ } else if (!isStringNumber(argv[i+1])) {
+ errorPrintReqArg2(argv[0], "i");
+ exit(EXIT_FAILURE);
+ }
+ arguments->insert_interval = atoi(argv[++i]);
+ } else if (0 == strncmp(argv[i], "--insert-interval=", strlen("--insert-interval="))) {
+ if (isStringNumber((char *)(argv[i] + strlen("--insert-interval=")))) {
+ arguments->insert_interval = atoi((char *)(argv[i]+strlen("--insert-interval=")));
+ } else {
+ errorPrintReqArg3(argv[0], "--insert-innterval");
+ exit(EXIT_FAILURE);
+ }
+ } else if (0 == strncmp(argv[i], "-i", strlen("-i"))) {
+ if (isStringNumber((char *)(argv[i] + strlen("-i")))) {
+ arguments->insert_interval = atoi((char *)(argv[i]+strlen("-i")));
+ } else {
+ errorPrintReqArg3(argv[0], "-i");
+ exit(EXIT_FAILURE);
+ }
+ } else if (strlen("--insert-interval")== strlen(argv[i])) {
+ if (argc == i+1) {
+ errorPrintReqArg3(argv[0], "--insert-interval");
+ exit(EXIT_FAILURE);
+ } else if (!isStringNumber(argv[i+1])) {
+ errorPrintReqArg2(argv[0], "--insert-interval");
+ exit(EXIT_FAILURE);
+ }
+ arguments->insert_interval = atoi(argv[++i]);
+ } else {
+ errorUnrecognized(argv[0], argv[i]);
+ exit(EXIT_FAILURE);
+ }
+ } else if ((0 == strncmp(argv[i], "-S", strlen("-S")))
+ || (0 == strncmp(argv[i], "--time-step", strlen("--time-step")))) {
+ if (2 == strlen(argv[i])) {
+ if (argc == i+1) {
+ errorPrintReqArg(argv[0], "S");
+ exit(EXIT_FAILURE);
+ } else if (!isStringNumber(argv[i+1])) {
+ errorPrintReqArg2(argv[0], "S");
+ exit(EXIT_FAILURE);
+ }
+ arguments->timestamp_step = atoi(argv[++i]);
+ } else if (0 == strncmp(argv[i], "--time-step=", strlen("--time-step="))) {
+ if (isStringNumber((char *)(argv[i] + strlen("--time-step=")))) {
+ arguments->async_mode = atoi((char *)(argv[i]+strlen("--time-step=")));
+ } else {
+ errorPrintReqArg2(argv[0], "--time-step");
+ exit(EXIT_FAILURE);
+ }
+ } else if (0 == strncmp(argv[i], "-S", strlen("-S"))) {
+ if (isStringNumber((char *)(argv[i] + strlen("-S")))) {
+ arguments->timestamp_step = atoi((char *)(argv[i]+strlen("-S")));
+ } else {
+ errorPrintReqArg2(argv[0], "-S");
+ exit(EXIT_FAILURE);
+ }
+ } else if (strlen("--time-step") == strlen(argv[i])) {
+ if (argc == i+1) {
+ errorPrintReqArg3(argv[0], "--time-step");
+ exit(EXIT_FAILURE);
+ } else if (!isStringNumber(argv[i+1])) {
+ errorPrintReqArg2(argv[0], "--time-step");
+ exit(EXIT_FAILURE);
+ }
+ arguments->timestamp_step = atoi(argv[++i]);
+ } else {
+ errorUnrecognized(argv[0], argv[i]);
+ exit(EXIT_FAILURE);
+ }
+ } else if (strcmp(argv[i], "-qt") == 0) {
+ if ((argc == i+1)
+ || (!isStringNumber(argv[i+1]))) {
+ printHelp();
+ errorPrint("%s", "\n\t-qt need a number following!\n");
+ exit(EXIT_FAILURE);
+ }
+ arguments->query_times = atoi(argv[++i]);
+ } else if ((0 == strncmp(argv[i], "-B", strlen("-B")))
+ || (0 == strncmp(argv[i], "--interlace-rows", strlen("--interlace-rows")))) {
+ if (strlen("-B") == strlen(argv[i])) {
+ if (argc == i+1) {
+ errorPrintReqArg(argv[0], "B");
+ exit(EXIT_FAILURE);
+ } else if (!isStringNumber(argv[i+1])) {
+ errorPrintReqArg2(argv[0], "B");
+ exit(EXIT_FAILURE);
+ }
+ arguments->interlaceRows = atoi(argv[++i]);
+ } else if (0 == strncmp(argv[i], "--interlace-rows=", strlen("--interlace-rows="))) {
+ if (isStringNumber((char *)(argv[i] + strlen("--interlace-rows=")))) {
+ arguments->interlaceRows = atoi((char *)(argv[i]+strlen("--interlace-rows=")));
+ } else {
+ errorPrintReqArg2(argv[0], "--interlace-rows");
+ exit(EXIT_FAILURE);
+ }
+ } else if (0 == strncmp(argv[i], "-B", strlen("-B"))) {
+ if (isStringNumber((char *)(argv[i] + strlen("-B")))) {
+ arguments->interlaceRows = atoi((char *)(argv[i]+strlen("-B")));
+ } else {
+ errorPrintReqArg2(argv[0], "-B");
+ exit(EXIT_FAILURE);
+ }
+ } else if (strlen("--interlace-rows")== strlen(argv[i])) {
+ if (argc == i+1) {
+ errorPrintReqArg3(argv[0], "--interlace-rows");
+ exit(EXIT_FAILURE);
+ } else if (!isStringNumber(argv[i+1])) {
+ errorPrintReqArg2(argv[0], "--interlace-rows");
+ exit(EXIT_FAILURE);
+ }
+ arguments->interlaceRows = atoi(argv[++i]);
+ } else {
+ errorUnrecognized(argv[0], argv[i]);
+ exit(EXIT_FAILURE);
+ }
+ } else if ((0 == strncmp(argv[i], "-r", strlen("-r")))
+ || (0 == strncmp(argv[i], "--rec-per-req", 13))) {
+ if (strlen("-r") == strlen(argv[i])) {
+ if (argc == i+1) {
+ errorPrintReqArg(argv[0], "r");
+ exit(EXIT_FAILURE);
+ } else if (!isStringNumber(argv[i+1])) {
+ errorPrintReqArg2(argv[0], "r");
+ exit(EXIT_FAILURE);
+ }
+ arguments->reqPerReq = atoi(argv[++i]);
+ } else if (0 == strncmp(argv[i], "--rec-per-req=", strlen("--rec-per-req="))) {
+ if (isStringNumber((char *)(argv[i] + strlen("--rec-per-req=")))) {
+ arguments->reqPerReq = atoi((char *)(argv[i]+strlen("--rec-per-req=")));
+ } else {
+ errorPrintReqArg2(argv[0], "--rec-per-req");
+ exit(EXIT_FAILURE);
+ }
+ } else if (0 == strncmp(argv[i], "-r", strlen("-r"))) {
+ if (isStringNumber((char *)(argv[i] + strlen("-r")))) {
+ arguments->reqPerReq = atoi((char *)(argv[i]+strlen("-r")));
+ } else {
+ errorPrintReqArg2(argv[0], "-r");
+ exit(EXIT_FAILURE);
+ }
+ } else if (strlen("--rec-per-req")== strlen(argv[i])) {
+ if (argc == i+1) {
+ errorPrintReqArg3(argv[0], "--rec-per-req");
+ exit(EXIT_FAILURE);
+ } else if (!isStringNumber(argv[i+1])) {
+ errorPrintReqArg2(argv[0], "--rec-per-req");
+ exit(EXIT_FAILURE);
+ }
+ arguments->reqPerReq = atoi(argv[++i]);
+ } else {
+ errorUnrecognized(argv[0], argv[i]);
+ exit(EXIT_FAILURE);
+ }
+ } else if ((0 == strncmp(argv[i], "-t", strlen("-t")))
+ || (0 == strncmp(argv[i], "--tables", strlen("--tables")))) {
+ if (2 == strlen(argv[i])) {
+ if (argc == i+1) {
+ errorPrintReqArg(argv[0], "t");
+ exit(EXIT_FAILURE);
+ } else if (!isStringNumber(argv[i+1])) {
+ errorPrintReqArg2(argv[0], "t");
+ exit(EXIT_FAILURE);
+ }
+ arguments->ntables = atoi(argv[++i]);
+ } else if (0 == strncmp(argv[i], "--tables=", strlen("--tables="))) {
+ if (isStringNumber((char *)(argv[i] + strlen("--tables=")))) {
+ arguments->ntables = atoi((char *)(argv[i]+strlen("--tables=")));
+ } else {
+ errorPrintReqArg2(argv[0], "--tables");
+ exit(EXIT_FAILURE);
+ }
+ } else if (0 == strncmp(argv[i], "-t", strlen("-t"))) {
+ if (isStringNumber((char *)(argv[i] + strlen("-t")))) {
+ arguments->ntables = atoi((char *)(argv[i]+strlen("-t")));
+ } else {
+ errorPrintReqArg2(argv[0], "-t");
+ exit(EXIT_FAILURE);
+ }
+ } else if (strlen("--tables") == strlen(argv[i])) {
+ if (argc == i+1) {
+ errorPrintReqArg3(argv[0], "--tables");
+ exit(EXIT_FAILURE);
+ } else if (!isStringNumber(argv[i+1])) {
+ errorPrintReqArg2(argv[0], "--tables");
+ exit(EXIT_FAILURE);
+ }
+ arguments->ntables = atoi(argv[++i]);
+ } else {
+ errorUnrecognized(argv[0], argv[i]);
+ exit(EXIT_FAILURE);
+ }
+
+ g_totalChildTables = arguments->ntables;
+ } else if ((0 == strncmp(argv[i], "-n", strlen("-n")))
+ || (0 == strncmp(argv[i], "--records", strlen("--records")))) {
+ if (2 == strlen(argv[i])) {
+ if (argc == i+1) {
+ errorPrintReqArg(argv[0], "n");
+ exit(EXIT_FAILURE);
+ } else if (!isStringNumber(argv[i+1])) {
+ errorPrintReqArg2(argv[0], "n");
+ exit(EXIT_FAILURE);
+ }
+ arguments->insertRows = atoi(argv[++i]);
+ } else if (0 == strncmp(argv[i], "--records=", strlen("--records="))) {
+ if (isStringNumber((char *)(argv[i] + strlen("--records=")))) {
+ arguments->insertRows = atoi((char *)(argv[i]+strlen("--records=")));
+ } else {
+ errorPrintReqArg2(argv[0], "--records");
+ exit(EXIT_FAILURE);
+ }
+ } else if (0 == strncmp(argv[i], "-n", strlen("-n"))) {
+ if (isStringNumber((char *)(argv[i] + strlen("-n")))) {
+ arguments->insertRows = atoi((char *)(argv[i]+strlen("-n")));
+ } else {
+ errorPrintReqArg2(argv[0], "-n");
+ exit(EXIT_FAILURE);
+ }
+ } else if (strlen("--records") == strlen(argv[i])) {
+ if (argc == i+1) {
+ errorPrintReqArg3(argv[0], "--records");
+ exit(EXIT_FAILURE);
+ } else if (!isStringNumber(argv[i+1])) {
+ errorPrintReqArg2(argv[0], "--records");
+ exit(EXIT_FAILURE);
+ }
+ arguments->insertRows = atoi(argv[++i]);
+ } else {
+ errorUnrecognized(argv[0], argv[i]);
+ exit(EXIT_FAILURE);
+ }
+ } else if ((0 == strncmp(argv[i], "-d", strlen("-d")))
+ || (0 == strncmp(argv[i], "--database", strlen("--database")))) {
+ if (2 == strlen(argv[i])) {
+ if (argc == i+1) {
+ errorPrintReqArg(argv[0], "d");
+ exit(EXIT_FAILURE);
+ }
+ arguments->database = argv[++i];
+ } else if (0 == strncmp(argv[i], "--database=", strlen("--database="))) {
+ arguments->output_file = (char *)(argv[i] + strlen("--database="));
+ } else if (0 == strncmp(argv[i], "-d", strlen("-d"))) {
+ arguments->output_file = (char *)(argv[i] + strlen("-d"));
+ } else if (strlen("--database") == strlen(argv[i])) {
+ if (argc == i+1) {
+ errorPrintReqArg3(argv[0], "--database");
+ exit(EXIT_FAILURE);
+ }
+ arguments->database = argv[++i];
+ } else {
+ errorUnrecognized(argv[0], argv[i]);
+ exit(EXIT_FAILURE);
+ }
+ } else if ((0 == strncmp(argv[i], "-l", strlen("-l")))
+ || (0 == strncmp(argv[i], "--columns", strlen("--columns")))) {
+ arguments->demo_mode = false;
+ if (2 == strlen(argv[i])) {
+ if (argc == i+1) {
+ errorPrintReqArg(argv[0], "l");
+ exit(EXIT_FAILURE);
+ } else if (!isStringNumber(argv[i+1])) {
+ errorPrintReqArg2(argv[0], "l");
+ exit(EXIT_FAILURE);
+ }
+ arguments->columnCount = atoi(argv[++i]);
+ } else if (0 == strncmp(argv[i], "--columns=", strlen("--columns="))) {
+ if (isStringNumber((char *)(argv[i] + strlen("--columns=")))) {
+ arguments->columnCount = atoi((char *)(argv[i]+strlen("--columns=")));
+ } else {
+ errorPrintReqArg2(argv[0], "--columns");
+ exit(EXIT_FAILURE);
+ }
+ } else if (0 == strncmp(argv[i], "-l", strlen("-l"))) {
+ if (isStringNumber((char *)(argv[i] + strlen("-l")))) {
+ arguments->columnCount = atoi((char *)(argv[i]+strlen("-l")));
+ } else {
+ errorPrintReqArg2(argv[0], "-l");
+ exit(EXIT_FAILURE);
+ }
+ } else if (strlen("--columns")== strlen(argv[i])) {
+ if (argc == i+1) {
+ errorPrintReqArg3(argv[0], "--columns");
+ exit(EXIT_FAILURE);
+ } else if (!isStringNumber(argv[i+1])) {
+ errorPrintReqArg2(argv[0], "--columns");
+ exit(EXIT_FAILURE);
+ }
+ arguments->columnCount = atoi(argv[++i]);
+ } else {
+ errorUnrecognized(argv[0], argv[i]);
+ exit(EXIT_FAILURE);
+ }
+
+ if (arguments->columnCount > MAX_NUM_COLUMNS) {
+ printf("WARNING: max acceptable columns count is %d\n", MAX_NUM_COLUMNS);
+ prompt();
+ arguments->columnCount = MAX_NUM_COLUMNS;
+ }
+
+ for (int col = DEFAULT_DATATYPE_NUM; col < arguments->columnCount; col ++) {
+ arguments->dataType[col] = "INT";
+ arguments->data_type[col] = TSDB_DATA_TYPE_INT;
+ }
+ for (int col = arguments->columnCount; col < MAX_NUM_COLUMNS; col++) {
+ arguments->dataType[col] = NULL;
+ arguments->data_type[col] = TSDB_DATA_TYPE_NULL;
+ }
+ } else if ((0 == strncmp(argv[i], "-b", strlen("-b")))
+ || (0 == strncmp(argv[i], "--data-type", strlen("--data-type")))) {
+ arguments->demo_mode = false;
+
+ char *dataType;
+ if (2 == strlen(argv[i])) {
+ if (argc == i+1) {
+ errorPrintReqArg(argv[0], "b");
+ exit(EXIT_FAILURE);
+ }
+ dataType = argv[++i];
+ } else if (0 == strncmp(argv[i], "--data-type=", strlen("--data-type="))) {
+ dataType = (char *)(argv[i] + strlen("--data-type="));
+ } else if (0 == strncmp(argv[i], "-b", strlen("-b"))) {
+ dataType = (char *)(argv[i] + strlen("-b"));
+ } else if (strlen("--data-type") == strlen(argv[i])) {
+ if (argc == i+1) {
+ errorPrintReqArg3(argv[0], "--data-type");
+ exit(EXIT_FAILURE);
+ }
+ dataType = argv[++i];
+ } else {
+ errorUnrecognized(argv[0], argv[i]);
+ exit(EXIT_FAILURE);
+ }
+
+ if (strstr(dataType, ",") == NULL) {
+ // only one col
+ if (strcasecmp(dataType, "INT")
+ && strcasecmp(dataType, "FLOAT")
+ && strcasecmp(dataType, "TINYINT")
+ && strcasecmp(dataType, "BOOL")
+ && strcasecmp(dataType, "SMALLINT")
+ && strcasecmp(dataType, "BIGINT")
+ && strcasecmp(dataType, "DOUBLE")
+ && strcasecmp(dataType, "TIMESTAMP")
+ && !regexMatch(dataType,
+ "^(NCHAR|BINARY)(\\([1-9][0-9]*\\))?$",
+ REG_ICASE | REG_EXTENDED)
+ && strcasecmp(dataType, "UTINYINT")
+ && strcasecmp(dataType, "USMALLINT")
+ && strcasecmp(dataType, "UINT")
+ && strcasecmp(dataType, "UBIGINT")) {
+ printHelp();
+ errorPrint("%s", "-b: Invalid data_type!\n");
+ exit(EXIT_FAILURE);
+ }
+ arguments->dataType[0] = dataType;
+ if (0 == strcasecmp(dataType, "INT")) {
+ arguments->data_type[0] = TSDB_DATA_TYPE_INT;
+ } else if (0 == strcasecmp(dataType, "TINYINT")) {
+ arguments->data_type[0] = TSDB_DATA_TYPE_TINYINT;
+ } else if (0 == strcasecmp(dataType, "SMALLINT")) {
+ arguments->data_type[0] = TSDB_DATA_TYPE_SMALLINT;
+ } else if (0 == strcasecmp(dataType, "BIGINT")) {
+ arguments->data_type[0] = TSDB_DATA_TYPE_BIGINT;
+ } else if (0 == strcasecmp(dataType, "FLOAT")) {
+ arguments->data_type[0] = TSDB_DATA_TYPE_FLOAT;
+ } else if (0 == strcasecmp(dataType, "DOUBLE")) {
+ arguments->data_type[0] = TSDB_DATA_TYPE_DOUBLE;
+ } else if (1 == regexMatch(dataType,
+ "^BINARY(\\([1-9][0-9]*\\))?$",
+ REG_ICASE | REG_EXTENDED)) {
+ arguments->data_type[0] = TSDB_DATA_TYPE_BINARY;
+ } else if (1 == regexMatch(dataType,
+ "^NCHAR(\\([1-9][0-9]*\\))?$",
+ REG_ICASE | REG_EXTENDED)) {
+ arguments->data_type[0] = TSDB_DATA_TYPE_NCHAR;
+ } else if (0 == strcasecmp(dataType, "BOOL")) {
+ arguments->data_type[0] = TSDB_DATA_TYPE_BOOL;
+ } else if (0 == strcasecmp(dataType, "TIMESTAMP")) {
+ arguments->data_type[0] = TSDB_DATA_TYPE_TIMESTAMP;
+ } else if (0 == strcasecmp(dataType, "UTINYINT")) {
+ arguments->data_type[0] = TSDB_DATA_TYPE_UTINYINT;
+ } else if (0 == strcasecmp(dataType, "USMALLINT")) {
+ arguments->data_type[0] = TSDB_DATA_TYPE_USMALLINT;
+ } else if (0 == strcasecmp(dataType, "UINT")) {
+ arguments->data_type[0] = TSDB_DATA_TYPE_UINT;
+ } else if (0 == strcasecmp(dataType, "UBIGINT")) {
+ arguments->data_type[0] = TSDB_DATA_TYPE_UBIGINT;
+ } else {
+ arguments->data_type[0] = TSDB_DATA_TYPE_NULL;
+ }
+ arguments->dataType[1] = NULL;
+ arguments->data_type[1] = TSDB_DATA_TYPE_NULL;
+ } else {
+ // more than one col
+ int index = 0;
+ g_dupstr = strdup(dataType);
+ char *running = g_dupstr;
+ char *token = strsep(&running, ",");
+ while(token != NULL) {
+ if (strcasecmp(token, "INT")
+ && strcasecmp(token, "FLOAT")
+ && strcasecmp(token, "TINYINT")
+ && strcasecmp(token, "BOOL")
+ && strcasecmp(token, "SMALLINT")
+ && strcasecmp(token, "BIGINT")
+ && strcasecmp(token, "DOUBLE")
+ && strcasecmp(token, "TIMESTAMP")
+ && !regexMatch(token, "^(NCHAR|BINARY)(\\([1-9][0-9]*\\))?$", REG_ICASE | REG_EXTENDED)
+ && strcasecmp(token, "UTINYINT")
+ && strcasecmp(token, "USMALLINT")
+ && strcasecmp(token, "UINT")
+ && strcasecmp(token, "UBIGINT")) {
+ printHelp();
+ free(g_dupstr);
+ errorPrint("%s", "-b: Invalid data_type!\n");
+ exit(EXIT_FAILURE);
+ }
+
+ if (0 == strcasecmp(token, "INT")) {
+ arguments->data_type[index] = TSDB_DATA_TYPE_INT;
+ } else if (0 == strcasecmp(token, "FLOAT")) {
+ arguments->data_type[index] = TSDB_DATA_TYPE_FLOAT;
+ } else if (0 == strcasecmp(token, "SMALLINT")) {
+ arguments->data_type[index] = TSDB_DATA_TYPE_SMALLINT;
+ } else if (0 == strcasecmp(token, "BIGINT")) {
+ arguments->data_type[index] = TSDB_DATA_TYPE_BIGINT;
+ } else if (0 == strcasecmp(token, "DOUBLE")) {
+ arguments->data_type[index] = TSDB_DATA_TYPE_DOUBLE;
+ } else if (0 == strcasecmp(token, "TINYINT")) {
+ arguments->data_type[index] = TSDB_DATA_TYPE_TINYINT;
+ } else if (1 == regexMatch(token, "^BINARY(\\([1-9][0-9]*\\))?$", REG_ICASE |
+ REG_EXTENDED)) {
+ arguments->data_type[index] = TSDB_DATA_TYPE_BINARY;
+ } else if (1 == regexMatch(token, "^NCHAR(\\([1-9][0-9]*\\))?$", REG_ICASE |
+ REG_EXTENDED)) {
+ arguments->data_type[index] = TSDB_DATA_TYPE_NCHAR;
+ } else if (0 == strcasecmp(token, "BOOL")) {
+ arguments->data_type[index] = TSDB_DATA_TYPE_BOOL;
+ } else if (0 == strcasecmp(token, "TIMESTAMP")) {
+ arguments->data_type[index] = TSDB_DATA_TYPE_TIMESTAMP;
+ } else if (0 == strcasecmp(token, "UTINYINT")) {
+ arguments->data_type[index] = TSDB_DATA_TYPE_UTINYINT;
+ } else if (0 == strcasecmp(token, "USMALLINT")) {
+ arguments->data_type[index] = TSDB_DATA_TYPE_USMALLINT;
+ } else if (0 == strcasecmp(token, "UINT")) {
+ arguments->data_type[index] = TSDB_DATA_TYPE_UINT;
+ } else if (0 == strcasecmp(token, "UBIGINT")) {
+ arguments->data_type[index] = TSDB_DATA_TYPE_UBIGINT;
+ } else {
+ arguments->data_type[index] = TSDB_DATA_TYPE_NULL;
+ }
+ arguments->dataType[index] = token;
+ index ++;
+ token = strsep(&running, ",");
+ if (index >= MAX_NUM_COLUMNS) break;
+ }
+ arguments->dataType[index] = NULL;
+ arguments->data_type[index] = TSDB_DATA_TYPE_NULL;
+ }
+ } else if ((0 == strncmp(argv[i], "-w", strlen("-w")))
+ || (0 == strncmp(argv[i], "--binwidth", strlen("--binwidth")))) {
+ if (2 == strlen(argv[i])) {
+ if (argc == i+1) {
+ errorPrintReqArg(argv[0], "w");
+ exit(EXIT_FAILURE);
+ } else if (!isStringNumber(argv[i+1])) {
+ errorPrintReqArg2(argv[0], "w");
+ exit(EXIT_FAILURE);
+ }
+ arguments->binwidth = atoi(argv[++i]);
+ } else if (0 == strncmp(argv[i], "--binwidth=", strlen("--binwidth="))) {
+ if (isStringNumber((char *)(argv[i] + strlen("--binwidth=")))) {
+ arguments->binwidth = atoi((char *)(argv[i]+strlen("--binwidth=")));
+ } else {
+ errorPrintReqArg2(argv[0], "--binwidth");
+ exit(EXIT_FAILURE);
+ }
+ } else if (0 == strncmp(argv[i], "-w", strlen("-w"))) {
+ if (isStringNumber((char *)(argv[i] + strlen("-w")))) {
+ arguments->binwidth = atoi((char *)(argv[i]+strlen("-w")));
+ } else {
+ errorPrintReqArg2(argv[0], "-w");
+ exit(EXIT_FAILURE);
+ }
+ } else if (strlen("--binwidth") == strlen(argv[i])) {
+ if (argc == i+1) {
+ errorPrintReqArg3(argv[0], "--binwidth");
+ exit(EXIT_FAILURE);
+ } else if (!isStringNumber(argv[i+1])) {
+ errorPrintReqArg2(argv[0], "--binwidth");
+ exit(EXIT_FAILURE);
+ }
+ arguments->binwidth = atoi(argv[++i]);
+ } else {
+ errorUnrecognized(argv[0], argv[i]);
+ exit(EXIT_FAILURE);
+ }
+ } else if ((0 == strncmp(argv[i], "-m", strlen("-m")))
+ || (0 == strncmp(argv[i], "--table-prefix", strlen("--table-prefix")))) {
+ if (2 == strlen(argv[i])) {
+ if (argc == i+1) {
+ errorPrintReqArg(argv[0], "m");
+ exit(EXIT_FAILURE);
+ }
+ arguments->tb_prefix = argv[++i];
+ } else if (0 == strncmp(argv[i], "--table-prefix=", strlen("--table-prefix="))) {
+ arguments->tb_prefix = (char *)(argv[i] + strlen("--table-prefix="));
+ } else if (0 == strncmp(argv[i], "-m", strlen("-m"))) {
+ arguments->tb_prefix = (char *)(argv[i] + strlen("-m"));
+ } else if (strlen("--table-prefix") == strlen(argv[i])) {
+ if (argc == i+1) {
+ errorPrintReqArg3(argv[0], "--table-prefix");
+ exit(EXIT_FAILURE);
+ }
+ arguments->tb_prefix = argv[++i];
+ } else {
+ errorUnrecognized(argv[0], argv[i]);
+ exit(EXIT_FAILURE);
+ }
+ } else if ((strcmp(argv[i], "-N") == 0)
+ || (0 == strcmp(argv[i], "--normal-table"))) {
+ arguments->demo_mode = false;
+ arguments->use_metric = false;
+ } else if ((strcmp(argv[i], "-M") == 0)
+ || (0 == strcmp(argv[i], "--random"))) {
+ arguments->demo_mode = false;
+ } else if ((strcmp(argv[i], "-x") == 0)
+ || (0 == strcmp(argv[i], "--aggr-func"))) {
+ arguments->aggr_func = true;
+ } else if ((strcmp(argv[i], "-y") == 0)
+ || (0 == strcmp(argv[i], "--answer-yes"))) {
+ arguments->answer_yes = true;
+ } else if ((strcmp(argv[i], "-g") == 0)
+ || (0 == strcmp(argv[i], "--debug"))) {
+ arguments->debug_print = true;
+ } else if (strcmp(argv[i], "-gg") == 0) {
+ arguments->verbose_print = true;
+ } else if ((0 == strncmp(argv[i], "-R", strlen("-R")))
+ || (0 == strncmp(argv[i], "--disorder-range",
+ strlen("--disorder-range")))) {
+ if (strlen("-R") == strlen(argv[i])) {
+ if (argc == i+1) {
+ errorPrintReqArg(argv[0], "R");
+ exit(EXIT_FAILURE);
+ } else if (!isStringNumber(argv[i+1])) {
+ errorPrintReqArg2(argv[0], "R");
+ exit(EXIT_FAILURE);
+ }
+ arguments->disorderRange = atoi(argv[++i]);
+ } else if (0 == strncmp(argv[i], "--disorder-range=",
+ strlen("--disorder-range="))) {
+ if (isStringNumber((char *)(argv[i] + strlen("--disorder-range=")))) {
+ arguments->disorderRange =
+ atoi((char *)(argv[i]+strlen("--disorder-range=")));
+ } else {
+ errorPrintReqArg2(argv[0], "--disorder-range");
+ exit(EXIT_FAILURE);
+ }
+ } else if (0 == strncmp(argv[i], "-R", strlen("-R"))) {
+ if (isStringNumber((char *)(argv[i] + strlen("-R")))) {
+ arguments->disorderRange =
+ atoi((char *)(argv[i]+strlen("-R")));
+ } else {
+ errorPrintReqArg2(argv[0], "-R");
+ exit(EXIT_FAILURE);
+ }
+
+ if (arguments->disorderRange < 0) {
+ errorPrint("Invalid disorder range %d, will be set to %d\n",
+ arguments->disorderRange, 1000);
+ arguments->disorderRange = 1000;
+ }
+ } else if (strlen("--disorder-range") == strlen(argv[i])) {
+ if (argc == i+1) {
+ errorPrintReqArg3(argv[0], "--disorder-range");
+ exit(EXIT_FAILURE);
+ } else if (!isStringNumber(argv[i+1])) {
+ errorPrintReqArg2(argv[0], "--disorder-range");
+ exit(EXIT_FAILURE);
+ }
+ arguments->disorderRange = atoi(argv[++i]);
+ } else {
+ errorUnrecognized(argv[0], argv[i]);
+ exit(EXIT_FAILURE);
+ }
+ } else if ((0 == strncmp(argv[i], "-O", strlen("-O")))
+ || (0 == strncmp(argv[i], "--disorder", strlen("--disorder")))) {
+ if (2 == strlen(argv[i])) {
+ if (argc == i+1) {
+ errorPrintReqArg(argv[0], "O");
+ exit(EXIT_FAILURE);
+ } else if (!isStringNumber(argv[i+1])) {
+ errorPrintReqArg2(argv[0], "O");
+ exit(EXIT_FAILURE);
+ }
+ arguments->disorderRatio = atoi(argv[++i]);
+ } else if (0 == strncmp(argv[i], "--disorder=", strlen("--disorder="))) {
+ if (isStringNumber((char *)(argv[i] + strlen("--disorder=")))) {
+ arguments->disorderRatio = atoi((char *)(argv[i]+strlen("--disorder=")));
+ } else {
+ errorPrintReqArg2(argv[0], "--disorder");
+ exit(EXIT_FAILURE);
+ }
+ } else if (0 == strncmp(argv[i], "-O", strlen("-O"))) {
+ if (isStringNumber((char *)(argv[i] + strlen("-O")))) {
+ arguments->disorderRatio = atoi((char *)(argv[i]+strlen("-O")));
+ } else {
+ errorPrintReqArg2(argv[0], "-O");
+ exit(EXIT_FAILURE);
+ }
+ } else if (strlen("--disorder") == strlen(argv[i])) {
+ if (argc == i+1) {
+ errorPrintReqArg3(argv[0], "--disorder");
+ exit(EXIT_FAILURE);
+ } else if (!isStringNumber(argv[i+1])) {
+ errorPrintReqArg2(argv[0], "--disorder");
+ exit(EXIT_FAILURE);
+ }
+ arguments->disorderRatio = atoi(argv[++i]);
+ } else {
+ errorUnrecognized(argv[0], argv[i]);
+ exit(EXIT_FAILURE);
+ }
+
+ if (arguments->disorderRatio > 50) {
+ errorPrint("Invalid disorder ratio %d, will be set to %d\n",
+ arguments->disorderRatio, 50);
+ arguments->disorderRatio = 50;
+ }
+ } else if ((0 == strncmp(argv[i], "-a", strlen("-a")))
+ || (0 == strncmp(argv[i], "--replica",
+ strlen("--replica")))) {
+ if (2 == strlen(argv[i])) {
+ if (argc == i+1) {
+ errorPrintReqArg(argv[0], "a");
+ exit(EXIT_FAILURE);
+ } else if (!isStringNumber(argv[i+1])) {
+ errorPrintReqArg2(argv[0], "a");
+ exit(EXIT_FAILURE);
+ }
+ arguments->replica = atoi(argv[++i]);
+ } else if (0 == strncmp(argv[i], "--replica=",
+ strlen("--replica="))) {
+ if (isStringNumber((char *)(argv[i] + strlen("--replica=")))) {
+ arguments->replica =
+ atoi((char *)(argv[i]+strlen("--replica=")));
+ } else {
+ errorPrintReqArg2(argv[0], "--replica");
+ exit(EXIT_FAILURE);
+ }
+ } else if (0 == strncmp(argv[i], "-a", strlen("-a"))) {
+ if (isStringNumber((char *)(argv[i] + strlen("-a")))) {
+ arguments->replica =
+ atoi((char *)(argv[i]+strlen("-a")));
+ } else {
+ errorPrintReqArg2(argv[0], "-a");
+ exit(EXIT_FAILURE);
+ }
+ } else if (strlen("--replica") == strlen(argv[i])) {
+ if (argc == i+1) {
+ errorPrintReqArg3(argv[0], "--replica");
+ exit(EXIT_FAILURE);
+ } else if (!isStringNumber(argv[i+1])) {
+ errorPrintReqArg2(argv[0], "--replica");
+ exit(EXIT_FAILURE);
+ }
+ arguments->replica = atoi(argv[++i]);
+ } else {
+ errorUnrecognized(argv[0], argv[i]);
+ exit(EXIT_FAILURE);
+ }
+
+ if (arguments->replica > 3 || arguments->replica < 1) {
+ errorPrint("Invalid replica value %d, will be set to %d\n",
+ arguments->replica, 1);
+ arguments->replica = 1;
+ }
+ } else if (strcmp(argv[i], "-D") == 0) {
+ arguments->method_of_delete = atoi(argv[++i]);
+ if (arguments->method_of_delete > 3) {
+ errorPrint("%s", "\n\t-D need a value (0~3) number following!\n");
+ exit(EXIT_FAILURE);
+ }
+ } else if ((strcmp(argv[i], "--version") == 0)
+ || (strcmp(argv[i], "-V") == 0)) {
+ printVersion();
+ exit(0);
+ } else if ((strcmp(argv[i], "--help") == 0)
+ || (strcmp(argv[i], "-?") == 0)) {
+ printHelp();
+ exit(0);
+ } else if (strcmp(argv[i], "--usage") == 0) {
+ printf(" Usage: taosdemo [-f JSONFILE] [-u USER] [-p PASSWORD] [-c CONFIG_DIR]\n\
+ [-h HOST] [-P PORT] [-I INTERFACE] [-d DATABASE] [-a REPLICA]\n\
+ [-m TABLEPREFIX] [-s SQLFILE] [-N] [-o OUTPUTFILE] [-q QUERYMODE]\n\
+ [-b DATATYPES] [-w WIDTH_OF_BINARY] [-l COLUMNS] [-T THREADNUMBER]\n\
+ [-i SLEEPTIME] [-S TIME_STEP] [-B INTERLACE_ROWS] [-t TABLES]\n\
+ [-n RECORDS] [-M] [-x] [-y] [-O ORDERMODE] [-R RANGE] [-a REPLIcA][-g]\n\
+ [--help] [--usage] [--version]\n");
+ exit(0);
+ } else {
+ // to simulate argp_option output
+ if (strlen(argv[i]) > 2) {
+ if (0 == strncmp(argv[i], "--", 2)) {
+ fprintf(stderr, "%s: unrecognized options '%s'\n", argv[0], argv[i]);
+ } else if (0 == strncmp(argv[i], "-", 1)) {
+ char tmp[2] = {0};
+ tstrncpy(tmp, argv[i]+1, 2);
+ fprintf(stderr, "%s: invalid options -- '%s'\n", argv[0], tmp);
+ } else {
+ fprintf(stderr, "%s: Too many arguments\n", argv[0]);
+ }
+ } else {
+ fprintf(stderr, "%s invalid options -- '%s'\n", argv[0],
+ (char *)((char *)argv[i])+1);
+ }
+ fprintf(stderr, "Try `taosdemo --help' or `taosdemo --usage' for more information.\n");
+ exit(EXIT_FAILURE);
+ }
+ }
+
+ int columnCount;
+ for (columnCount = 0; columnCount < MAX_NUM_COLUMNS; columnCount ++) {
+ if (g_args.dataType[columnCount] == NULL) {
+ break;
+ }
+ }
+
+ if (0 == columnCount) {
+ ERROR_EXIT("data type error!");
+ }
+ g_args.columnCount = columnCount;
+
+ g_args.lenOfOneRow = 20; // timestamp
+ for (int c = 0; c < g_args.columnCount; c++) {
+ switch(g_args.data_type[c]) {
+ case TSDB_DATA_TYPE_BINARY:
+ g_args.lenOfOneRow += g_args.binwidth + 3;
+ break;
+
+ case TSDB_DATA_TYPE_NCHAR:
+ g_args.lenOfOneRow += g_args.binwidth + 3;
+ break;
+
+ case TSDB_DATA_TYPE_INT:
+ case TSDB_DATA_TYPE_UINT:
+ g_args.lenOfOneRow += INT_BUFF_LEN;
+ break;
+
+ case TSDB_DATA_TYPE_BIGINT:
+ case TSDB_DATA_TYPE_UBIGINT:
+ g_args.lenOfOneRow += BIGINT_BUFF_LEN;
+ break;
+
+ case TSDB_DATA_TYPE_SMALLINT:
+ case TSDB_DATA_TYPE_USMALLINT:
+ g_args.lenOfOneRow += SMALLINT_BUFF_LEN;
+ break;
+
+ case TSDB_DATA_TYPE_TINYINT:
+ case TSDB_DATA_TYPE_UTINYINT:
+ g_args.lenOfOneRow += TINYINT_BUFF_LEN;
+ break;
+
+ case TSDB_DATA_TYPE_BOOL:
+ g_args.lenOfOneRow += BOOL_BUFF_LEN;
+ break;
+
+ case TSDB_DATA_TYPE_FLOAT:
+ g_args.lenOfOneRow += FLOAT_BUFF_LEN;
+ break;
+
+ case TSDB_DATA_TYPE_DOUBLE:
+ g_args.lenOfOneRow += DOUBLE_BUFF_LEN;
+ break;
+
+ case TSDB_DATA_TYPE_TIMESTAMP:
+ g_args.lenOfOneRow += TIMESTAMP_BUFF_LEN;
+ break;
+
+ default:
+ errorPrint2("get error data type : %s\n", g_args.dataType[c]);
+ exit(EXIT_FAILURE);
+ }
+ }
+
+ if (((arguments->debug_print) && (NULL != arguments->metaFile))
+ || arguments->verbose_print) {
+ printf("###################################################################\n");
+ printf("# meta file: %s\n", arguments->metaFile);
+ printf("# Server IP: %s:%hu\n",
+ arguments->host == NULL ? "localhost" : arguments->host,
+ arguments->port );
+ printf("# User: %s\n", arguments->user);
+ printf("# Password: %s\n", arguments->password);
+ printf("# Use metric: %s\n",
+ arguments->use_metric ? "true" : "false");
+ if (*(arguments->dataType)) {
+ printf("# Specified data type: ");
+ for (int c = 0; c < MAX_NUM_COLUMNS; c++)
+ if (arguments->dataType[c])
+ printf("%s,", arguments->dataType[c]);
+ else
+ break;
+ printf("\n");
+ }
+ printf("# Insertion interval: %"PRIu64"\n",
+ arguments->insert_interval);
+ printf("# Number of records per req: %u\n",
+ arguments->reqPerReq);
+ printf("# Max SQL length: %"PRIu64"\n",
+ arguments->max_sql_len);
+ printf("# Length of Binary: %d\n", arguments->binwidth);
+ printf("# Number of Threads: %d\n", arguments->nthreads);
+ printf("# Number of Tables: %"PRId64"\n",
+ arguments->ntables);
+ printf("# Number of Data per Table: %"PRId64"\n",
+ arguments->insertRows);
+ printf("# Database name: %s\n", arguments->database);
+ printf("# Table prefix: %s\n", arguments->tb_prefix);
+ if (arguments->disorderRatio) {
+ printf("# Data order: %d\n", arguments->disorderRatio);
+ printf("# Data out of order rate: %d\n", arguments->disorderRange);
+ }
+ printf("# Delete method: %d\n", arguments->method_of_delete);
+ printf("# Answer yes when prompt: %d\n", arguments->answer_yes);
+ printf("# Print debug info: %d\n", arguments->debug_print);
+ printf("# Print verbose info: %d\n", arguments->verbose_print);
+ printf("###################################################################\n");
+
+ prompt();
+ }
+}
+
+static void tmfclose(FILE *fp) {
+ if (NULL != fp) {
+ fclose(fp);
+ }
+}
+
+static void tmfree(void *buf) {
+ if (NULL != buf) {
+ free(buf);
+ buf = NULL;
+ }
+}
+
+static int queryDbExec(TAOS *taos, char *command, QUERY_TYPE type, bool quiet) {
+
+ verbosePrint("%s() LN%d - command: %s\n", __func__, __LINE__, command);
+
+ TAOS_RES *res = taos_query(taos, command);
+ int32_t code = taos_errno(res);
+
+ if (code != 0) {
+ if (!quiet) {
+ errorPrint2("Failed to execute <%s>, reason: %s\n",
+ command, taos_errstr(res));
+ }
+ taos_free_result(res);
+ //taos_close(taos);
+ return -1;
+ }
+
+ if (INSERT_TYPE == type) {
+ int affectedRows = taos_affected_rows(res);
+ taos_free_result(res);
+ return affectedRows;
+ }
+
+ taos_free_result(res);
+ return 0;
+}
+
+static void appendResultBufToFile(char *resultBuf, threadInfo *pThreadInfo)
+{
+ pThreadInfo->fp = fopen(pThreadInfo->filePath, "at");
+ if (pThreadInfo->fp == NULL) {
+ errorPrint2(
+ "%s() LN%d, failed to open result file: %s, result will not save to file\n",
+ __func__, __LINE__, pThreadInfo->filePath);
+ return;
+ }
+
+ fprintf(pThreadInfo->fp, "%s", resultBuf);
+ tmfclose(pThreadInfo->fp);
+ pThreadInfo->fp = NULL;
+}
+
+static void fetchResult(TAOS_RES *res, threadInfo* pThreadInfo) {
+ TAOS_ROW row = NULL;
+ int num_rows = 0;
+ int num_fields = taos_field_count(res);
+ TAOS_FIELD *fields = taos_fetch_fields(res);
+
+ char* databuf = (char*) calloc(1, 100*1024*1024);
+ if (databuf == NULL) {
+ errorPrint2("%s() LN%d, failed to malloc, warning: save result to file slowly!\n",
+ __func__, __LINE__);
+ return ;
+ }
+
+ int64_t totalLen = 0;
+
+ // fetch the records row by row
+ while((row = taos_fetch_row(res))) {
+ if (totalLen >= (100*1024*1024 - HEAD_BUFF_LEN*2)) {
+ if (strlen(pThreadInfo->filePath) > 0)
+ appendResultBufToFile(databuf, pThreadInfo);
+ totalLen = 0;
+ memset(databuf, 0, 100*1024*1024);
+ }
+ num_rows++;
+ char temp[HEAD_BUFF_LEN] = {0};
+ int len = taos_print_row(temp, row, fields, num_fields);
+ len += sprintf(temp + len, "\n");
+ //printf("query result:%s\n", temp);
+ memcpy(databuf + totalLen, temp, len);
+ totalLen += len;
+ verbosePrint("%s() LN%d, totalLen: %"PRId64"\n",
+ __func__, __LINE__, totalLen);
+ }
+
+ verbosePrint("%s() LN%d, databuf=%s resultFile=%s\n",
+ __func__, __LINE__, databuf, pThreadInfo->filePath);
+ if (strlen(pThreadInfo->filePath) > 0) {
+ appendResultBufToFile(databuf, pThreadInfo);
+ }
+ free(databuf);
+}
+
+static void selectAndGetResult(
+ threadInfo *pThreadInfo, char *command)
+{
+ if (0 == strncasecmp(g_queryInfo.queryMode, "taosc", strlen("taosc"))) {
+ TAOS_RES *res = taos_query(pThreadInfo->taos, command);
+ if (res == NULL || taos_errno(res) != 0) {
+ errorPrint2("%s() LN%d, failed to execute sql:%s, reason:%s\n",
+ __func__, __LINE__, command, taos_errstr(res));
+ taos_free_result(res);
+ return;
+ }
+
+ fetchResult(res, pThreadInfo);
+ taos_free_result(res);
+
+ } else if (0 == strncasecmp(g_queryInfo.queryMode, "rest", strlen("rest"))) {
+ int retCode = postProceSql(
+ g_queryInfo.host, g_queryInfo.port,
+ command,
+ pThreadInfo);
+ if (0 != retCode) {
+ printf("====restful return fail, threadID[%d]\n", pThreadInfo->threadID);
+ }
+
+ } else {
+ errorPrint2("%s() LN%d, unknown query mode: %s\n",
+ __func__, __LINE__, g_queryInfo.queryMode);
+ }
+}
+
+static char *rand_bool_str() {
+ static int cursor;
+ cursor++;
+ if (cursor > (g_args.prepared_rand - 1)) cursor = 0;
+ return g_randbool_buff + ((cursor % g_args.prepared_rand) * BOOL_BUFF_LEN);
+}
+
+static int32_t rand_bool() {
+ static int cursor;
+ cursor++;
+ if (cursor > (g_args.prepared_rand - 1)) cursor = 0;
+ return g_randint[cursor % g_args.prepared_rand] % 2;
+}
+
+static char *rand_tinyint_str()
+{
+ static int cursor;
+ cursor++;
+ if (cursor > (g_args.prepared_rand - 1)) cursor = 0;
+ return g_randtinyint_buff +
+ ((cursor % g_args.prepared_rand) * TINYINT_BUFF_LEN);
+}
+
+static int32_t rand_tinyint()
+{
+ static int cursor;
+ cursor++;
+ if (cursor > (g_args.prepared_rand - 1)) cursor = 0;
+ return g_randint[cursor % g_args.prepared_rand] % 128;
+}
+
+static char *rand_utinyint_str()
+{
+ static int cursor;
+ cursor++;
+ if (cursor > (g_args.prepared_rand - 1)) cursor = 0;
+ return g_randutinyint_buff +
+ ((cursor % g_args.prepared_rand) * TINYINT_BUFF_LEN);
+}
+
+static int32_t rand_utinyint()
+{
+ static int cursor;
+ cursor++;
+ if (cursor > (g_args.prepared_rand - 1)) cursor = 0;
+ return g_randuint[cursor % g_args.prepared_rand] % 255;
+}
+
+static char *rand_smallint_str()
+{
+ static int cursor;
+ cursor++;
+ if (cursor > (g_args.prepared_rand - 1)) cursor = 0;
+ return g_randsmallint_buff +
+ ((cursor % g_args.prepared_rand) * SMALLINT_BUFF_LEN);
+}
+
+static int32_t rand_smallint()
+{
+ static int cursor;
+ cursor++;
+ if (cursor > (g_args.prepared_rand - 1)) cursor = 0;
+ return g_randint[cursor % g_args.prepared_rand] % 32768;
+}
+
+static char *rand_usmallint_str()
+{
+ static int cursor;
+ cursor++;
+ if (cursor > (g_args.prepared_rand - 1)) cursor = 0;
+ return g_randusmallint_buff +
+ ((cursor % g_args.prepared_rand) * SMALLINT_BUFF_LEN);
+}
+
+static int32_t rand_usmallint()
+{
+ static int cursor;
+ cursor++;
+ if (cursor > (g_args.prepared_rand - 1)) cursor = 0;
+ return g_randuint[cursor % g_args.prepared_rand] % 65535;
+}
+
+static char *rand_int_str()
+{
+ static int cursor;
+ cursor++;
+ if (cursor > (g_args.prepared_rand - 1)) cursor = 0;
+ return g_randint_buff + ((cursor % g_args.prepared_rand) * INT_BUFF_LEN);
+}
+
+static int32_t rand_int()
+{
+ static int cursor;
+ cursor++;
+ if (cursor > (g_args.prepared_rand - 1)) cursor = 0;
+ return g_randint[cursor % g_args.prepared_rand];
+}
+
+static char *rand_uint_str()
+{
+ static int cursor;
+ cursor++;
+ if (cursor > (g_args.prepared_rand - 1)) cursor = 0;
+ return g_randuint_buff + ((cursor % g_args.prepared_rand) * INT_BUFF_LEN);
+}
+
+static int32_t rand_uint()
+{
+ static int cursor;
+ cursor++;
+ if (cursor > (g_args.prepared_rand - 1)) cursor = 0;
+ return g_randuint[cursor % g_args.prepared_rand];
+}
+
+static char *rand_bigint_str()
+{
+ static int cursor;
+ cursor++;
+ if (cursor > (g_args.prepared_rand - 1)) cursor = 0;
+ return g_randbigint_buff +
+ ((cursor % g_args.prepared_rand) * BIGINT_BUFF_LEN);
+}
+
+static int64_t rand_bigint()
+{
+ static int cursor;
+ cursor++;
+ if (cursor > (g_args.prepared_rand - 1)) cursor = 0;
+ return g_randbigint[cursor % g_args.prepared_rand];
+}
+
+static char *rand_ubigint_str()
+{
+ static int cursor;
+ cursor++;
+ if (cursor > (g_args.prepared_rand - 1)) cursor = 0;
+ return g_randubigint_buff +
+ ((cursor % g_args.prepared_rand) * BIGINT_BUFF_LEN);
+}
+
+static int64_t rand_ubigint()
+{
+ static int cursor;
+ cursor++;
+ if (cursor > (g_args.prepared_rand - 1)) cursor = 0;
+ return g_randubigint[cursor % g_args.prepared_rand];
+}
+
+static char *rand_float_str()
+{
+ static int cursor;
+ cursor++;
+ if (cursor > (g_args.prepared_rand - 1)) cursor = 0;
+ return g_randfloat_buff + ((cursor % g_args.prepared_rand) * FLOAT_BUFF_LEN);
+}
+
+
+static float rand_float()
+{
+ static int cursor;
+ cursor++;
+ if (cursor > (g_args.prepared_rand - 1)) cursor = 0;
+ return g_randfloat[cursor % g_args.prepared_rand];
+}
+
+static char *demo_current_float_str()
+{
+ static int cursor;
+ cursor++;
+ if (cursor > (g_args.prepared_rand - 1)) cursor = 0;
+ return g_rand_current_buff +
+ ((cursor % g_args.prepared_rand) * FLOAT_BUFF_LEN);
+}
+
+static float UNUSED_FUNC demo_current_float()
+{
+ static int cursor;
+ cursor++;
+ if (cursor > (g_args.prepared_rand - 1)) cursor = 0;
+ return (float)(9.8 + 0.04 * (g_randint[cursor % g_args.prepared_rand] % 10)
+ + g_randfloat[cursor % g_args.prepared_rand]/1000000000);
+}
+
+static char *demo_voltage_int_str()
+{
+ static int cursor;
+ cursor++;
+ if (cursor > (g_args.prepared_rand - 1)) cursor = 0;
+ return g_rand_voltage_buff +
+ ((cursor % g_args.prepared_rand) * INT_BUFF_LEN);
+}
+
+static int32_t UNUSED_FUNC demo_voltage_int()
+{
+ static int cursor;
+ cursor++;
+ if (cursor > (g_args.prepared_rand - 1)) cursor = 0;
+ return 215 + g_randint[cursor % g_args.prepared_rand] % 10;
+}
+
+static char *demo_phase_float_str() {
+ static int cursor;
+ cursor++;
+ if (cursor > (g_args.prepared_rand - 1)) cursor = 0;
+ return g_rand_phase_buff + ((cursor % g_args.prepared_rand) * FLOAT_BUFF_LEN);
+}
+
+static float UNUSED_FUNC demo_phase_float() {
+ static int cursor;
+ cursor++;
+ if (cursor > (g_args.prepared_rand - 1)) cursor = 0;
+ return (float)((115 + g_randint[cursor % g_args.prepared_rand] % 10
+ + g_randfloat[cursor % g_args.prepared_rand]/1000000000)/360);
+}
+
+#if 0
+static const char charNum[] = "0123456789";
+
+static void nonrand_string(char *, int) __attribute__ ((unused)); // reserve for debugging purpose
+static void nonrand_string(char *str, int size)
+{
+ str[0] = 0;
+ if (size > 0) {
+ int n;
+ for (n = 0; n < size; n++) {
+ str[n] = charNum[n % 10];
+ }
+ str[n] = 0;
+ }
+}
+#endif
+
+static const char charset[] = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890";
+
+static void rand_string(char *str, int size) {
+ str[0] = 0;
+ if (size > 0) {
+ //--size;
+ int n;
+ for (n = 0; n < size; n++) {
+ int key = abs(rand_tinyint()) % (int)(sizeof(charset) - 1);
+ str[n] = charset[key];
+ }
+ str[n] = 0;
+ }
+}
+
+static char *rand_double_str()
+{
+ static int cursor;
+ cursor++;
+ if (cursor > (g_args.prepared_rand - 1)) cursor = 0;
+ return g_randdouble_buff + (cursor * DOUBLE_BUFF_LEN);
+}
+
+static double rand_double()
+{
+ static int cursor;
+ cursor++;
+ cursor = cursor % g_args.prepared_rand;
+ return g_randdouble[cursor];
+}
+
+static void init_rand_data() {
+
+ g_randint_buff = calloc(1, INT_BUFF_LEN * g_args.prepared_rand);
+ assert(g_randint_buff);
+ g_rand_voltage_buff = calloc(1, INT_BUFF_LEN * g_args.prepared_rand);
+ assert(g_rand_voltage_buff);
+ g_randbigint_buff = calloc(1, BIGINT_BUFF_LEN * g_args.prepared_rand);
+ assert(g_randbigint_buff);
+ g_randsmallint_buff = calloc(1, SMALLINT_BUFF_LEN * g_args.prepared_rand);
+ assert(g_randsmallint_buff);
+ g_randtinyint_buff = calloc(1, TINYINT_BUFF_LEN * g_args.prepared_rand);
+ assert(g_randtinyint_buff);
+ g_randbool_buff = calloc(1, BOOL_BUFF_LEN * g_args.prepared_rand);
+ assert(g_randbool_buff);
+ g_randfloat_buff = calloc(1, FLOAT_BUFF_LEN * g_args.prepared_rand);
+ assert(g_randfloat_buff);
+ g_rand_current_buff = calloc(1, FLOAT_BUFF_LEN * g_args.prepared_rand);
+ assert(g_rand_current_buff);
+ g_rand_phase_buff = calloc(1, FLOAT_BUFF_LEN * g_args.prepared_rand);
+ assert(g_rand_phase_buff);
+ g_randdouble_buff = calloc(1, DOUBLE_BUFF_LEN * g_args.prepared_rand);
+ assert(g_randdouble_buff);
+ g_randuint_buff = calloc(1, INT_BUFF_LEN * g_args.prepared_rand);
+ assert(g_randuint_buff);
+ g_randutinyint_buff = calloc(1, TINYINT_BUFF_LEN * g_args.prepared_rand);
+ assert(g_randutinyint_buff);
+ g_randusmallint_buff = calloc(1, SMALLINT_BUFF_LEN * g_args.prepared_rand);
+ assert(g_randusmallint_buff);
+ g_randubigint_buff = calloc(1, BIGINT_BUFF_LEN * g_args.prepared_rand);
+ assert(g_randubigint_buff);
+ g_randint = calloc(1, sizeof(int32_t) * g_args.prepared_rand);
+ assert(g_randint);
+ g_randuint = calloc(1, sizeof(uint32_t) * g_args.prepared_rand);
+ assert(g_randuint);
+ g_randbigint = calloc(1, sizeof(int64_t) * g_args.prepared_rand);
+ assert(g_randbigint);
+ g_randubigint = calloc(1, sizeof(uint64_t) * g_args.prepared_rand);
+ assert(g_randubigint);
+ g_randfloat = calloc(1, sizeof(float) * g_args.prepared_rand);
+ assert(g_randfloat);
+ g_randdouble = calloc(1, sizeof(double) * g_args.prepared_rand);
+ assert(g_randdouble);
+
+ for (int i = 0; i < g_args.prepared_rand; i++) {
+ g_randint[i] = (int)(taosRandom() % RAND_MAX - (RAND_MAX >> 1));
+ g_randuint[i] = (int)(taosRandom());
+ sprintf(g_randint_buff + i * INT_BUFF_LEN, "%d",
+ g_randint[i]);
+ sprintf(g_rand_voltage_buff + i * INT_BUFF_LEN, "%d",
+ 215 + g_randint[i] % 10);
+
+ sprintf(g_randbool_buff + i * BOOL_BUFF_LEN, "%s",
+ ((g_randint[i] % 2) & 1)?"true":"false");
+ sprintf(g_randsmallint_buff + i * SMALLINT_BUFF_LEN, "%d",
+ g_randint[i] % 32768);
+ sprintf(g_randtinyint_buff + i * TINYINT_BUFF_LEN, "%d",
+ g_randint[i] % 128);
+ sprintf(g_randuint_buff + i * INT_BUFF_LEN, "%d",
+ g_randuint[i]);
+ sprintf(g_randusmallint_buff + i * SMALLINT_BUFF_LEN, "%d",
+ g_randuint[i] % 65535);
+ sprintf(g_randutinyint_buff + i * TINYINT_BUFF_LEN, "%d",
+ g_randuint[i] % 255);
+
+ g_randbigint[i] = (int64_t)(taosRandom() % RAND_MAX - (RAND_MAX >> 1));
+ g_randubigint[i] = (uint64_t)(taosRandom());
+ sprintf(g_randbigint_buff + i * BIGINT_BUFF_LEN, "%"PRId64"",
+ g_randbigint[i]);
+ sprintf(g_randubigint_buff + i * BIGINT_BUFF_LEN, "%"PRId64"",
+ g_randubigint[i]);
+
+ g_randfloat[i] = (float)(taosRandom() / 1000.0) * (taosRandom() % 2 > 0.5 ? 1 : -1);
+ sprintf(g_randfloat_buff + i * FLOAT_BUFF_LEN, "%f",
+ g_randfloat[i]);
+ sprintf(g_rand_current_buff + i * FLOAT_BUFF_LEN, "%f",
+ (float)(9.8 + 0.04 * (g_randint[i] % 10)
+ + g_randfloat[i]/1000000000));
+ sprintf(g_rand_phase_buff + i * FLOAT_BUFF_LEN, "%f",
+ (float)((115 + g_randint[i] % 10
+ + g_randfloat[i]/1000000000)/360));
+
+ g_randdouble[i] = (double)(taosRandom() / 1000000.0) * (taosRandom() % 2 > 0.5 ? 1 : -1);
+ sprintf(g_randdouble_buff + i * DOUBLE_BUFF_LEN, "%f",
+ g_randdouble[i]);
+ }
+}
+
+#define SHOW_PARSE_RESULT_START() \
+ do { if (g_args.metaFile) \
+ printf("\033[1m\033[40;32m================ %s parse result START ================\033[0m\n", \
+ g_args.metaFile); } while(0)
+
+#define SHOW_PARSE_RESULT_END() \
+ do { if (g_args.metaFile) \
+ printf("\033[1m\033[40;32m================ %s parse result END================\033[0m\n", \
+ g_args.metaFile); } while(0)
+
+#define SHOW_PARSE_RESULT_START_TO_FILE(fp) \
+ do { if (g_args.metaFile) \
+ fprintf(fp, "\033[1m\033[40;32m================ %s parse result START ================\033[0m\n", \
+ g_args.metaFile); } while(0)
+
+#define SHOW_PARSE_RESULT_END_TO_FILE(fp) \
+ do { if (g_args.metaFile) \
+ fprintf(fp, "\033[1m\033[40;32m================ %s parse result END================\033[0m\n", \
+ g_args.metaFile); } while(0)
+
+static int printfInsertMeta() {
+ SHOW_PARSE_RESULT_START();
+
+ if (g_args.demo_mode) {
+ printf("\ntaosdemo is simulating data generated by power equipment monitoring...\n\n");
+ } else {
+ printf("\ntaosdemo is simulating random data as you request..\n\n");
+ }
+
+ if (g_args.iface != INTERFACE_BUT) {
+ // first time if no iface specified
+ printf("interface: \033[33m%s\033[0m\n",
+ (g_args.iface==TAOSC_IFACE)?"taosc":
+ (g_args.iface==REST_IFACE)?"rest":"stmt");
+ }
+
+ printf("host: \033[33m%s:%u\033[0m\n",
+ g_Dbs.host, g_Dbs.port);
+ printf("user: \033[33m%s\033[0m\n", g_Dbs.user);
+ printf("password: \033[33m%s\033[0m\n", g_Dbs.password);
+ printf("configDir: \033[33m%s\033[0m\n", configDir);
+ printf("resultFile: \033[33m%s\033[0m\n", g_Dbs.resultFile);
+ printf("thread num of insert data: \033[33m%d\033[0m\n", g_Dbs.threadCount);
+ printf("thread num of create table: \033[33m%d\033[0m\n",
+ g_Dbs.threadCountForCreateTbl);
+ printf("top insert interval: \033[33m%"PRIu64"\033[0m\n",
+ g_args.insert_interval);
+ printf("number of records per req: \033[33m%u\033[0m\n",
+ g_args.reqPerReq);
+ printf("max sql length: \033[33m%"PRIu64"\033[0m\n",
+ g_args.max_sql_len);
+
+ printf("database count: \033[33m%d\033[0m\n", g_Dbs.dbCount);
+
+ for (int i = 0; i < g_Dbs.dbCount; i++) {
+ printf("database[\033[33m%d\033[0m]:\n", i);
+ printf(" database[%d] name: \033[33m%s\033[0m\n",
+ i, g_Dbs.db[i].dbName);
+ if (0 == g_Dbs.db[i].drop) {
+ printf(" drop: \033[33m no\033[0m\n");
+ } else {
+ printf(" drop: \033[33m yes\033[0m\n");
+ }
+
+ if (g_Dbs.db[i].dbCfg.blocks > 0) {
+ printf(" blocks: \033[33m%d\033[0m\n",
+ g_Dbs.db[i].dbCfg.blocks);
+ }
+ if (g_Dbs.db[i].dbCfg.cache > 0) {
+ printf(" cache: \033[33m%d\033[0m\n",
+ g_Dbs.db[i].dbCfg.cache);
+ }
+ if (g_Dbs.db[i].dbCfg.days > 0) {
+ printf(" days: \033[33m%d\033[0m\n",
+ g_Dbs.db[i].dbCfg.days);
+ }
+ if (g_Dbs.db[i].dbCfg.keep > 0) {
+ printf(" keep: \033[33m%d\033[0m\n",
+ g_Dbs.db[i].dbCfg.keep);
+ }
+ if (g_Dbs.db[i].dbCfg.replica > 0) {
+ printf(" replica: \033[33m%d\033[0m\n",
+ g_Dbs.db[i].dbCfg.replica);
+ }
+ if (g_Dbs.db[i].dbCfg.update > 0) {
+ printf(" update: \033[33m%d\033[0m\n",
+ g_Dbs.db[i].dbCfg.update);
+ }
+ if (g_Dbs.db[i].dbCfg.minRows > 0) {
+ printf(" minRows: \033[33m%d\033[0m\n",
+ g_Dbs.db[i].dbCfg.minRows);
+ }
+ if (g_Dbs.db[i].dbCfg.maxRows > 0) {
+ printf(" maxRows: \033[33m%d\033[0m\n",
+ g_Dbs.db[i].dbCfg.maxRows);
+ }
+ if (g_Dbs.db[i].dbCfg.comp > 0) {
+ printf(" comp: \033[33m%d\033[0m\n", g_Dbs.db[i].dbCfg.comp);
+ }
+ if (g_Dbs.db[i].dbCfg.walLevel > 0) {
+ printf(" walLevel: \033[33m%d\033[0m\n",
+ g_Dbs.db[i].dbCfg.walLevel);
+ }
+ if (g_Dbs.db[i].dbCfg.fsync > 0) {
+ printf(" fsync: \033[33m%d\033[0m\n",
+ g_Dbs.db[i].dbCfg.fsync);
+ }
+ if (g_Dbs.db[i].dbCfg.quorum > 0) {
+ printf(" quorum: \033[33m%d\033[0m\n",
+ g_Dbs.db[i].dbCfg.quorum);
+ }
+ if (g_Dbs.db[i].dbCfg.precision[0] != 0) {
+ if ((0 == strncasecmp(g_Dbs.db[i].dbCfg.precision, "ms", 2))
+ || (0 == strncasecmp(g_Dbs.db[i].dbCfg.precision, "us", 2))
+ || (0 == strncasecmp(g_Dbs.db[i].dbCfg.precision, "ns", 2))) {
+ printf(" precision: \033[33m%s\033[0m\n",
+ g_Dbs.db[i].dbCfg.precision);
+ } else {
+ printf("\033[1m\033[40;31m precision error: %s\033[0m\n",
+ g_Dbs.db[i].dbCfg.precision);
+ return -1;
+ }
+ }
+
+
+ if (g_args.use_metric) {
+ printf(" super table count: \033[33m%"PRIu64"\033[0m\n",
+ g_Dbs.db[i].superTblCount);
+ for (uint64_t j = 0; j < g_Dbs.db[i].superTblCount; j++) {
+ printf(" super table[\033[33m%"PRIu64"\033[0m]:\n", j);
+
+ printf(" stbName: \033[33m%s\033[0m\n",
+ g_Dbs.db[i].superTbls[j].stbName);
+
+ if (PRE_CREATE_SUBTBL == g_Dbs.db[i].superTbls[j].autoCreateTable) {
+ printf(" autoCreateTable: \033[33m%s\033[0m\n", "no");
+ } else if (AUTO_CREATE_SUBTBL ==
+ g_Dbs.db[i].superTbls[j].autoCreateTable) {
+ printf(" autoCreateTable: \033[33m%s\033[0m\n", "yes");
+ } else {
+ printf(" autoCreateTable: \033[33m%s\033[0m\n", "error");
+ }
+
+ if (TBL_NO_EXISTS == g_Dbs.db[i].superTbls[j].childTblExists) {
+ printf(" childTblExists: \033[33m%s\033[0m\n", "no");
+ } else if (TBL_ALREADY_EXISTS == g_Dbs.db[i].superTbls[j].childTblExists) {
+ printf(" childTblExists: \033[33m%s\033[0m\n", "yes");
+ } else {
+ printf(" childTblExists: \033[33m%s\033[0m\n", "error");
+ }
+
+ printf(" childTblCount: \033[33m%"PRId64"\033[0m\n",
+ g_Dbs.db[i].superTbls[j].childTblCount);
+ printf(" childTblPrefix: \033[33m%s\033[0m\n",
+ g_Dbs.db[i].superTbls[j].childTblPrefix);
+ printf(" dataSource: \033[33m%s\033[0m\n",
+ g_Dbs.db[i].superTbls[j].dataSource);
+ printf(" iface: \033[33m%s\033[0m\n",
+ (g_Dbs.db[i].superTbls[j].iface==TAOSC_IFACE)?"taosc":
+ (g_Dbs.db[i].superTbls[j].iface==REST_IFACE)?"rest":"stmt");
+ if (g_Dbs.db[i].superTbls[j].childTblLimit > 0) {
+ printf(" childTblLimit: \033[33m%"PRId64"\033[0m\n",
+ g_Dbs.db[i].superTbls[j].childTblLimit);
+ }
+ if (g_Dbs.db[i].superTbls[j].childTblOffset > 0) {
+ printf(" childTblOffset: \033[33m%"PRIu64"\033[0m\n",
+ g_Dbs.db[i].superTbls[j].childTblOffset);
+ }
+ printf(" insertRows: \033[33m%"PRId64"\033[0m\n",
+ g_Dbs.db[i].superTbls[j].insertRows);
+ /*
+ if (0 == g_Dbs.db[i].superTbls[j].multiThreadWriteOneTbl) {
+ printf(" multiThreadWriteOneTbl: \033[33m no\033[0m\n");
+ }else {
+ printf(" multiThreadWriteOneTbl: \033[33m yes\033[0m\n");
+ }
+ */
+ printf(" interlaceRows: \033[33m%u\033[0m\n",
+ g_Dbs.db[i].superTbls[j].interlaceRows);
+
+ if (g_Dbs.db[i].superTbls[j].interlaceRows > 0) {
+ printf(" stable insert interval: \033[33m%"PRIu64"\033[0m\n",
+ g_Dbs.db[i].superTbls[j].insertInterval);
+ }
+
+ printf(" disorderRange: \033[33m%d\033[0m\n",
+ g_Dbs.db[i].superTbls[j].disorderRange);
+ printf(" disorderRatio: \033[33m%d\033[0m\n",
+ g_Dbs.db[i].superTbls[j].disorderRatio);
+ printf(" maxSqlLen: \033[33m%"PRIu64"\033[0m\n",
+ g_Dbs.db[i].superTbls[j].maxSqlLen);
+ printf(" timeStampStep: \033[33m%"PRId64"\033[0m\n",
+ g_Dbs.db[i].superTbls[j].timeStampStep);
+ printf(" startTimestamp: \033[33m%s\033[0m\n",
+ g_Dbs.db[i].superTbls[j].startTimestamp);
+ printf(" sampleFormat: \033[33m%s\033[0m\n",
+ g_Dbs.db[i].superTbls[j].sampleFormat);
+ printf(" sampleFile: \033[33m%s\033[0m\n",
+ g_Dbs.db[i].superTbls[j].sampleFile);
+ printf(" useSampleTs: \033[33m%s\033[0m\n",
+ g_Dbs.db[i].superTbls[j].useSampleTs ? "yes (warning: disorderRange/disorderRatio is disabled)" : "no");
+ printf(" tagsFile: \033[33m%s\033[0m\n",
+ g_Dbs.db[i].superTbls[j].tagsFile);
+ printf(" columnCount: \033[33m%d\033[0m\n ",
+ g_Dbs.db[i].superTbls[j].columnCount);
+ for (int k = 0; k < g_Dbs.db[i].superTbls[j].columnCount; k++) {
+ //printf("dataType:%s, dataLen:%d\t", g_Dbs.db[i].superTbls[j].columns[k].dataType, g_Dbs.db[i].superTbls[j].columns[k].dataLen);
+ if ((0 == strncasecmp(g_Dbs.db[i].superTbls[j].columns[k].dataType,
+ "binary", 6))
+ || (0 == strncasecmp(g_Dbs.db[i].superTbls[j].columns[k].dataType,
+ "nchar", 5))) {
+ printf("column[%d]:\033[33m%s(%d)\033[0m ", k,
+ g_Dbs.db[i].superTbls[j].columns[k].dataType,
+ g_Dbs.db[i].superTbls[j].columns[k].dataLen);
+ } else {
+ printf("column[%d]:\033[33m%s\033[0m ", k,
+ g_Dbs.db[i].superTbls[j].columns[k].dataType);
+ }
+ }
+ printf("\n");
+
+ printf(" tagCount: \033[33m%d\033[0m\n ",
+ g_Dbs.db[i].superTbls[j].tagCount);
+ for (int k = 0; k < g_Dbs.db[i].superTbls[j].tagCount; k++) {
+ //printf("dataType:%s, dataLen:%d\t", g_Dbs.db[i].superTbls[j].tags[k].dataType, g_Dbs.db[i].superTbls[j].tags[k].dataLen);
+ if ((0 == strncasecmp(g_Dbs.db[i].superTbls[j].tags[k].dataType,
+ "binary", strlen("binary")))
+ || (0 == strncasecmp(g_Dbs.db[i].superTbls[j].tags[k].dataType,
+ "nchar", strlen("nchar")))) {
+ printf("tag[%d]:\033[33m%s(%d)\033[0m ", k,
+ g_Dbs.db[i].superTbls[j].tags[k].dataType,
+ g_Dbs.db[i].superTbls[j].tags[k].dataLen);
+ } else {
+ printf("tag[%d]:\033[33m%s\033[0m ", k,
+ g_Dbs.db[i].superTbls[j].tags[k].dataType);
+ }
+ }
+ printf("\n");
+ }
+ } else {
+ printf(" childTblCount: \033[33m%"PRId64"\033[0m\n",
+ g_args.ntables);
+ printf(" insertRows: \033[33m%"PRId64"\033[0m\n",
+ g_args.insertRows);
+ }
+ printf("\n");
+ }
+
+ SHOW_PARSE_RESULT_END();
+
+ return 0;
+}
+
+static void printfInsertMetaToFile(FILE* fp) {
+
+ SHOW_PARSE_RESULT_START_TO_FILE(fp);
+
+ fprintf(fp, "host: %s:%u\n", g_Dbs.host, g_Dbs.port);
+ fprintf(fp, "user: %s\n", g_Dbs.user);
+ fprintf(fp, "configDir: %s\n", configDir);
+ fprintf(fp, "resultFile: %s\n", g_Dbs.resultFile);
+ fprintf(fp, "thread num of insert data: %d\n", g_Dbs.threadCount);
+ fprintf(fp, "thread num of create table: %d\n", g_Dbs.threadCountForCreateTbl);
+ fprintf(fp, "number of records per req: %u\n", g_args.reqPerReq);
+ fprintf(fp, "max sql length: %"PRIu64"\n", g_args.max_sql_len);
+ fprintf(fp, "database count: %d\n", g_Dbs.dbCount);
+
+ for (int i = 0; i < g_Dbs.dbCount; i++) {
+ fprintf(fp, "database[%d]:\n", i);
+ fprintf(fp, " database[%d] name: %s\n", i, g_Dbs.db[i].dbName);
+ if (0 == g_Dbs.db[i].drop) {
+ fprintf(fp, " drop: no\n");
+ }else {
+ fprintf(fp, " drop: yes\n");
+ }
+
+ if (g_Dbs.db[i].dbCfg.blocks > 0) {
+ fprintf(fp, " blocks: %d\n", g_Dbs.db[i].dbCfg.blocks);
+ }
+ if (g_Dbs.db[i].dbCfg.cache > 0) {
+ fprintf(fp, " cache: %d\n", g_Dbs.db[i].dbCfg.cache);
+ }
+ if (g_Dbs.db[i].dbCfg.days > 0) {
+ fprintf(fp, " days: %d\n", g_Dbs.db[i].dbCfg.days);
+ }
+ if (g_Dbs.db[i].dbCfg.keep > 0) {
+ fprintf(fp, " keep: %d\n", g_Dbs.db[i].dbCfg.keep);
+ }
+ if (g_Dbs.db[i].dbCfg.replica > 0) {
+ fprintf(fp, " replica: %d\n", g_Dbs.db[i].dbCfg.replica);
+ }
+ if (g_Dbs.db[i].dbCfg.update > 0) {
+ fprintf(fp, " update: %d\n", g_Dbs.db[i].dbCfg.update);
+ }
+ if (g_Dbs.db[i].dbCfg.minRows > 0) {
+ fprintf(fp, " minRows: %d\n", g_Dbs.db[i].dbCfg.minRows);
+ }
+ if (g_Dbs.db[i].dbCfg.maxRows > 0) {
+ fprintf(fp, " maxRows: %d\n", g_Dbs.db[i].dbCfg.maxRows);
+ }
+ if (g_Dbs.db[i].dbCfg.comp > 0) {
+ fprintf(fp, " comp: %d\n", g_Dbs.db[i].dbCfg.comp);
+ }
+ if (g_Dbs.db[i].dbCfg.walLevel > 0) {
+ fprintf(fp, " walLevel: %d\n", g_Dbs.db[i].dbCfg.walLevel);
+ }
+ if (g_Dbs.db[i].dbCfg.fsync > 0) {
+ fprintf(fp, " fsync: %d\n", g_Dbs.db[i].dbCfg.fsync);
+ }
+ if (g_Dbs.db[i].dbCfg.quorum > 0) {
+ fprintf(fp, " quorum: %d\n", g_Dbs.db[i].dbCfg.quorum);
+ }
+ if (g_Dbs.db[i].dbCfg.precision[0] != 0) {
+ if ((0 == strncasecmp(g_Dbs.db[i].dbCfg.precision, "ms", 2))
+ || (0 == strncasecmp(g_Dbs.db[i].dbCfg.precision, "ns", 2))
+ || (0 == strncasecmp(g_Dbs.db[i].dbCfg.precision, "us", 2))) {
+ fprintf(fp, " precision: %s\n",
+ g_Dbs.db[i].dbCfg.precision);
+ } else {
+ fprintf(fp, " precision error: %s\n",
+ g_Dbs.db[i].dbCfg.precision);
+ }
+ }
+
+ fprintf(fp, " super table count: %"PRIu64"\n",
+ g_Dbs.db[i].superTblCount);
+ for (int j = 0; j < g_Dbs.db[i].superTblCount; j++) {
+ fprintf(fp, " super table[%d]:\n", j);
+
+ fprintf(fp, " stbName: %s\n",
+ g_Dbs.db[i].superTbls[j].stbName);
+
+ if (PRE_CREATE_SUBTBL == g_Dbs.db[i].superTbls[j].autoCreateTable) {
+ fprintf(fp, " autoCreateTable: %s\n", "no");
+ } else if (AUTO_CREATE_SUBTBL
+ == g_Dbs.db[i].superTbls[j].autoCreateTable) {
+ fprintf(fp, " autoCreateTable: %s\n", "yes");
+ } else {
+ fprintf(fp, " autoCreateTable: %s\n", "error");
+ }
+
+ if (TBL_NO_EXISTS == g_Dbs.db[i].superTbls[j].childTblExists) {
+ fprintf(fp, " childTblExists: %s\n", "no");
+ } else if (TBL_ALREADY_EXISTS
+ == g_Dbs.db[i].superTbls[j].childTblExists) {
+ fprintf(fp, " childTblExists: %s\n", "yes");
+ } else {
+ fprintf(fp, " childTblExists: %s\n", "error");
+ }
+
+ fprintf(fp, " childTblCount: %"PRId64"\n",
+ g_Dbs.db[i].superTbls[j].childTblCount);
+ fprintf(fp, " childTblPrefix: %s\n",
+ g_Dbs.db[i].superTbls[j].childTblPrefix);
+ fprintf(fp, " dataSource: %s\n",
+ g_Dbs.db[i].superTbls[j].dataSource);
+ fprintf(fp, " iface: %s\n",
+ (g_Dbs.db[i].superTbls[j].iface==TAOSC_IFACE)?"taosc":
+ (g_Dbs.db[i].superTbls[j].iface==REST_IFACE)?"rest":"stmt");
+ fprintf(fp, " insertRows: %"PRId64"\n",
+ g_Dbs.db[i].superTbls[j].insertRows);
+ fprintf(fp, " interlace rows: %u\n",
+ g_Dbs.db[i].superTbls[j].interlaceRows);
+ if (g_Dbs.db[i].superTbls[j].interlaceRows > 0) {
+ fprintf(fp, " stable insert interval: %"PRIu64"\n",
+ g_Dbs.db[i].superTbls[j].insertInterval);
+ }
+ /*
+ if (0 == g_Dbs.db[i].superTbls[j].multiThreadWriteOneTbl) {
+ fprintf(fp, " multiThreadWriteOneTbl: no\n");
+ }else {
+ fprintf(fp, " multiThreadWriteOneTbl: yes\n");
+ }
+ */
+ fprintf(fp, " interlaceRows: %u\n",
+ g_Dbs.db[i].superTbls[j].interlaceRows);
+ fprintf(fp, " disorderRange: %d\n",
+ g_Dbs.db[i].superTbls[j].disorderRange);
+ fprintf(fp, " disorderRatio: %d\n",
+ g_Dbs.db[i].superTbls[j].disorderRatio);
+ fprintf(fp, " maxSqlLen: %"PRIu64"\n",
+ g_Dbs.db[i].superTbls[j].maxSqlLen);
+
+ fprintf(fp, " timeStampStep: %"PRId64"\n",
+ g_Dbs.db[i].superTbls[j].timeStampStep);
+ fprintf(fp, " startTimestamp: %s\n",
+ g_Dbs.db[i].superTbls[j].startTimestamp);
+ fprintf(fp, " sampleFormat: %s\n",
+ g_Dbs.db[i].superTbls[j].sampleFormat);
+ fprintf(fp, " sampleFile: %s\n",
+ g_Dbs.db[i].superTbls[j].sampleFile);
+ fprintf(fp, " tagsFile: %s\n",
+ g_Dbs.db[i].superTbls[j].tagsFile);
+
+ fprintf(fp, " columnCount: %d\n ",
+ g_Dbs.db[i].superTbls[j].columnCount);
+ for (int k = 0; k < g_Dbs.db[i].superTbls[j].columnCount; k++) {
+ //printf("dataType:%s, dataLen:%d\t", g_Dbs.db[i].superTbls[j].columns[k].dataType, g_Dbs.db[i].superTbls[j].columns[k].dataLen);
+ if ((0 == strncasecmp(
+ g_Dbs.db[i].superTbls[j].columns[k].dataType,
+ "binary", strlen("binary")))
+ || (0 == strncasecmp(
+ g_Dbs.db[i].superTbls[j].columns[k].dataType,
+ "nchar", strlen("nchar")))) {
+ fprintf(fp, "column[%d]:%s(%d) ", k,
+ g_Dbs.db[i].superTbls[j].columns[k].dataType,
+ g_Dbs.db[i].superTbls[j].columns[k].dataLen);
+ } else {
+ fprintf(fp, "column[%d]:%s ",
+ k, g_Dbs.db[i].superTbls[j].columns[k].dataType);
+ }
+ }
+ fprintf(fp, "\n");
+
+ fprintf(fp, " tagCount: %d\n ",
+ g_Dbs.db[i].superTbls[j].tagCount);
+ for (int k = 0; k < g_Dbs.db[i].superTbls[j].tagCount; k++) {
+ //printf("dataType:%s, dataLen:%d\t", g_Dbs.db[i].superTbls[j].tags[k].dataType, g_Dbs.db[i].superTbls[j].tags[k].dataLen);
+ if ((0 == strncasecmp(g_Dbs.db[i].superTbls[j].tags[k].dataType,
+ "binary", strlen("binary")))
+ || (0 == strncasecmp(g_Dbs.db[i].superTbls[j].tags[k].dataType,
+ "nchar", strlen("nchar")))) {
+ fprintf(fp, "tag[%d]:%s(%d) ",
+ k, g_Dbs.db[i].superTbls[j].tags[k].dataType,
+ g_Dbs.db[i].superTbls[j].tags[k].dataLen);
+ } else {
+ fprintf(fp, "tag[%d]:%s ", k, g_Dbs.db[i].superTbls[j].tags[k].dataType);
+ }
+ }
+ fprintf(fp, "\n");
+ }
+ fprintf(fp, "\n");
+ }
+
+ SHOW_PARSE_RESULT_END_TO_FILE(fp);
+}
+
+static void printfQueryMeta() {
+
+ SHOW_PARSE_RESULT_START();
+
+ printf("host: \033[33m%s:%u\033[0m\n",
+ g_queryInfo.host, g_queryInfo.port);
+ printf("user: \033[33m%s\033[0m\n", g_queryInfo.user);
+ printf("database name: \033[33m%s\033[0m\n", g_queryInfo.dbName);
+
+ printf("\n");
+
+ if ((SUBSCRIBE_TEST == g_args.test_mode) || (QUERY_TEST == g_args.test_mode)) {
+ printf("specified table query info: \n");
+ printf("sqlCount: \033[33m%d\033[0m\n",
+ g_queryInfo.specifiedQueryInfo.sqlCount);
+ if (g_queryInfo.specifiedQueryInfo.sqlCount > 0) {
+ printf("specified tbl query times:\n");
+ printf(" \033[33m%"PRIu64"\033[0m\n",
+ g_queryInfo.specifiedQueryInfo.queryTimes);
+ printf("query interval: \033[33m%"PRIu64" ms\033[0m\n",
+ g_queryInfo.specifiedQueryInfo.queryInterval);
+ printf("top query times:\033[33m%"PRIu64"\033[0m\n", g_args.query_times);
+ printf("concurrent: \033[33m%d\033[0m\n",
+ g_queryInfo.specifiedQueryInfo.concurrent);
+ printf("mod: \033[33m%s\033[0m\n",
+ (g_queryInfo.specifiedQueryInfo.asyncMode)?"async":"sync");
+ printf("interval: \033[33m%"PRIu64"\033[0m\n",
+ g_queryInfo.specifiedQueryInfo.subscribeInterval);
+ printf("restart: \033[33m%d\033[0m\n",
+ g_queryInfo.specifiedQueryInfo.subscribeRestart);
+ printf("keepProgress: \033[33m%d\033[0m\n",
+ g_queryInfo.specifiedQueryInfo.subscribeKeepProgress);
+
+ for (int i = 0; i < g_queryInfo.specifiedQueryInfo.sqlCount; i++) {
+ printf(" sql[%d]: \033[33m%s\033[0m\n",
+ i, g_queryInfo.specifiedQueryInfo.sql[i]);
+ }
+ printf("\n");
+ }
+
+ printf("super table query info:\n");
+ printf("sqlCount: \033[33m%d\033[0m\n",
+ g_queryInfo.superQueryInfo.sqlCount);
+
+ if (g_queryInfo.superQueryInfo.sqlCount > 0) {
+ printf("query interval: \033[33m%"PRIu64"\033[0m\n",
+ g_queryInfo.superQueryInfo.queryInterval);
+ printf("threadCnt: \033[33m%d\033[0m\n",
+ g_queryInfo.superQueryInfo.threadCnt);
+ printf("childTblCount: \033[33m%"PRId64"\033[0m\n",
+ g_queryInfo.superQueryInfo.childTblCount);
+ printf("stable name: \033[33m%s\033[0m\n",
+ g_queryInfo.superQueryInfo.stbName);
+ printf("stb query times:\033[33m%"PRIu64"\033[0m\n",
+ g_queryInfo.superQueryInfo.queryTimes);
+
+ printf("mod: \033[33m%s\033[0m\n",
+ (g_queryInfo.superQueryInfo.asyncMode)?"async":"sync");
+ printf("interval: \033[33m%"PRIu64"\033[0m\n",
+ g_queryInfo.superQueryInfo.subscribeInterval);
+ printf("restart: \033[33m%d\033[0m\n",
+ g_queryInfo.superQueryInfo.subscribeRestart);
+ printf("keepProgress: \033[33m%d\033[0m\n",
+ g_queryInfo.superQueryInfo.subscribeKeepProgress);
+
+ for (int i = 0; i < g_queryInfo.superQueryInfo.sqlCount; i++) {
+ printf(" sql[%d]: \033[33m%s\033[0m\n",
+ i, g_queryInfo.superQueryInfo.sql[i]);
+ }
+ printf("\n");
+ }
+ }
+
+ SHOW_PARSE_RESULT_END();
+}
+
+static char* formatTimestamp(char* buf, int64_t val, int precision) {
+ time_t tt;
+ if (precision == TSDB_TIME_PRECISION_MICRO) {
+ tt = (time_t)(val / 1000000);
+ } if (precision == TSDB_TIME_PRECISION_NANO) {
+ tt = (time_t)(val / 1000000000);
+ } else {
+ tt = (time_t)(val / 1000);
+ }
+
+ /* comment out as it make testcases like select_with_tags.sim fail.
+ but in windows, this may cause the call to localtime crash if tt < 0,
+ need to find a better solution.
+ if (tt < 0) {
+ tt = 0;
+ }
+ */
+
+#ifdef WINDOWS
+ if (tt < 0) tt = 0;
+#endif
+
+ struct tm* ptm = localtime(&tt);
+ size_t pos = strftime(buf, 32, "%Y-%m-%d %H:%M:%S", ptm);
+
+ if (precision == TSDB_TIME_PRECISION_MICRO) {
+ sprintf(buf + pos, ".%06d", (int)(val % 1000000));
+ } else if (precision == TSDB_TIME_PRECISION_NANO) {
+ sprintf(buf + pos, ".%09d", (int)(val % 1000000000));
+ } else {
+ sprintf(buf + pos, ".%03d", (int)(val % 1000));
+ }
+
+ return buf;
+}
+
+static void xDumpFieldToFile(FILE* fp, const char* val,
+ TAOS_FIELD* field, int32_t length, int precision) {
+
+ if (val == NULL) {
+ fprintf(fp, "%s", TSDB_DATA_NULL_STR);
+ return;
+ }
+
+ char buf[TSDB_MAX_BYTES_PER_ROW];
+ switch (field->type) {
+ case TSDB_DATA_TYPE_BOOL:
+ fprintf(fp, "%d", ((((int32_t)(*((int8_t*)val))) == 1) ? 1 : 0));
+ break;
+
+ case TSDB_DATA_TYPE_TINYINT:
+ fprintf(fp, "%d", *((int8_t *)val));
+ break;
+
+ case TSDB_DATA_TYPE_UTINYINT:
+ fprintf(fp, "%d", *((uint8_t *)val));
+ break;
+
+ case TSDB_DATA_TYPE_SMALLINT:
+ fprintf(fp, "%d", *((int16_t *)val));
+ break;
+
+ case TSDB_DATA_TYPE_USMALLINT:
+ fprintf(fp, "%d", *((uint16_t *)val));
+ break;
+
+ case TSDB_DATA_TYPE_INT:
+ fprintf(fp, "%d", *((int32_t *)val));
+ break;
+
+ case TSDB_DATA_TYPE_UINT:
+ fprintf(fp, "%d", *((uint32_t *)val));
+ break;
+
+ case TSDB_DATA_TYPE_BIGINT:
+ fprintf(fp, "%"PRId64"", *((int64_t *)val));
+ break;
+
+ case TSDB_DATA_TYPE_UBIGINT:
+ fprintf(fp, "%"PRId64"", *((uint64_t *)val));
+ break;
+
+ case TSDB_DATA_TYPE_FLOAT:
+ fprintf(fp, "%.5f", GET_FLOAT_VAL(val));
+ break;
+
+ case TSDB_DATA_TYPE_DOUBLE:
+ fprintf(fp, "%.9f", GET_DOUBLE_VAL(val));
+ break;
+
+ case TSDB_DATA_TYPE_BINARY:
+ case TSDB_DATA_TYPE_NCHAR:
+ memcpy(buf, val, length);
+ buf[length] = 0;
+ fprintf(fp, "\'%s\'", buf);
+ break;
+
+ case TSDB_DATA_TYPE_TIMESTAMP:
+ formatTimestamp(buf, *(int64_t*)val, precision);
+ fprintf(fp, "'%s'", buf);
+ break;
+
+ default:
+ break;
+ }
+}
+
+static int xDumpResultToFile(const char* fname, TAOS_RES* tres) {
+ TAOS_ROW row = taos_fetch_row(tres);
+ if (row == NULL) {
+ return 0;
+ }
+
+ FILE* fp = fopen(fname, "at");
+ if (fp == NULL) {
+ errorPrint2("%s() LN%d, failed to open file: %s\n",
+ __func__, __LINE__, fname);
+ return -1;
+ }
+
+ int num_fields = taos_num_fields(tres);
+ TAOS_FIELD *fields = taos_fetch_fields(tres);
+ int precision = taos_result_precision(tres);
+
+ for (int col = 0; col < num_fields; col++) {
+ if (col > 0) {
+ fprintf(fp, ",");
+ }
+ fprintf(fp, "%s", fields[col].name);
+ }
+ fputc('\n', fp);
+
+ int numOfRows = 0;
+ do {
+ int32_t* length = taos_fetch_lengths(tres);
+ for (int i = 0; i < num_fields; i++) {
+ if (i > 0) {
+ fputc(',', fp);
+ }
+ xDumpFieldToFile(fp,
+ (const char*)row[i], fields +i, length[i], precision);
+ }
+ fputc('\n', fp);
+
+ numOfRows++;
+ row = taos_fetch_row(tres);
+ } while( row != NULL);
+
+ fclose(fp);
+
+ return numOfRows;
+}
+
+static int getDbFromServer(TAOS * taos, SDbInfo** dbInfos) {
+ TAOS_RES * res;
+ TAOS_ROW row = NULL;
+ int count = 0;
+
+ res = taos_query(taos, "show databases;");
+ int32_t code = taos_errno(res);
+
+ if (code != 0) {
+ errorPrint2("failed to run , reason: %s\n",
+ taos_errstr(res));
+ return -1;
+ }
+
+ TAOS_FIELD *fields = taos_fetch_fields(res);
+
+ while((row = taos_fetch_row(res)) != NULL) {
+ // sys database name : 'log'
+ if (strncasecmp(row[TSDB_SHOW_DB_NAME_INDEX], "log",
+ fields[TSDB_SHOW_DB_NAME_INDEX].bytes) == 0) {
+ continue;
+ }
+
+ dbInfos[count] = (SDbInfo *)calloc(1, sizeof(SDbInfo));
+ if (dbInfos[count] == NULL) {
+ errorPrint2("failed to allocate memory for some dbInfo[%d]\n", count);
+ return -1;
+ }
+
+ tstrncpy(dbInfos[count]->name, (char *)row[TSDB_SHOW_DB_NAME_INDEX],
+ fields[TSDB_SHOW_DB_NAME_INDEX].bytes);
+ formatTimestamp(dbInfos[count]->create_time,
+ *(int64_t*)row[TSDB_SHOW_DB_CREATED_TIME_INDEX],
+ TSDB_TIME_PRECISION_MILLI);
+ dbInfos[count]->ntables = *((int64_t *)row[TSDB_SHOW_DB_NTABLES_INDEX]);
+ dbInfos[count]->vgroups = *((int32_t *)row[TSDB_SHOW_DB_VGROUPS_INDEX]);
+ dbInfos[count]->replica = *((int16_t *)row[TSDB_SHOW_DB_REPLICA_INDEX]);
+ dbInfos[count]->quorum = *((int16_t *)row[TSDB_SHOW_DB_QUORUM_INDEX]);
+ dbInfos[count]->days = *((int16_t *)row[TSDB_SHOW_DB_DAYS_INDEX]);
+
+ tstrncpy(dbInfos[count]->keeplist, (char *)row[TSDB_SHOW_DB_KEEP_INDEX],
+ fields[TSDB_SHOW_DB_KEEP_INDEX].bytes);
+ dbInfos[count]->cache = *((int32_t *)row[TSDB_SHOW_DB_CACHE_INDEX]);
+ dbInfos[count]->blocks = *((int32_t *)row[TSDB_SHOW_DB_BLOCKS_INDEX]);
+ dbInfos[count]->minrows = *((int32_t *)row[TSDB_SHOW_DB_MINROWS_INDEX]);
+ dbInfos[count]->maxrows = *((int32_t *)row[TSDB_SHOW_DB_MAXROWS_INDEX]);
+ dbInfos[count]->wallevel = *((int8_t *)row[TSDB_SHOW_DB_WALLEVEL_INDEX]);
+ dbInfos[count]->fsync = *((int32_t *)row[TSDB_SHOW_DB_FSYNC_INDEX]);
+ dbInfos[count]->comp = (int8_t)(*((int8_t *)row[TSDB_SHOW_DB_COMP_INDEX]));
+ dbInfos[count]->cachelast =
+ (int8_t)(*((int8_t *)row[TSDB_SHOW_DB_CACHELAST_INDEX]));
+
+ tstrncpy(dbInfos[count]->precision,
+ (char *)row[TSDB_SHOW_DB_PRECISION_INDEX],
+ fields[TSDB_SHOW_DB_PRECISION_INDEX].bytes);
+ dbInfos[count]->update = *((int8_t *)row[TSDB_SHOW_DB_UPDATE_INDEX]);
+ tstrncpy(dbInfos[count]->status, (char *)row[TSDB_SHOW_DB_STATUS_INDEX],
+ fields[TSDB_SHOW_DB_STATUS_INDEX].bytes);
+
+ count++;
+ if (count > MAX_DATABASE_COUNT) {
+ errorPrint("%s() LN%d, The database count overflow than %d\n",
+ __func__, __LINE__, MAX_DATABASE_COUNT);
+ break;
+ }
+ }
+
+ return count;
+}
+
+static void printfDbInfoForQueryToFile(
+ char* filename, SDbInfo* dbInfos, int index) {
+
+ if (filename[0] == 0)
+ return;
+
+ FILE *fp = fopen(filename, "at");
+ if (fp == NULL) {
+ errorPrint( "failed to open file: %s\n", filename);
+ return;
+ }
+
+ fprintf(fp, "================ database[%d] ================\n", index);
+ fprintf(fp, "name: %s\n", dbInfos->name);
+ fprintf(fp, "created_time: %s\n", dbInfos->create_time);
+ fprintf(fp, "ntables: %"PRId64"\n", dbInfos->ntables);
+ fprintf(fp, "vgroups: %d\n", dbInfos->vgroups);
+ fprintf(fp, "replica: %d\n", dbInfos->replica);
+ fprintf(fp, "quorum: %d\n", dbInfos->quorum);
+ fprintf(fp, "days: %d\n", dbInfos->days);
+ fprintf(fp, "keep0,keep1,keep(D): %s\n", dbInfos->keeplist);
+ fprintf(fp, "cache(MB): %d\n", dbInfos->cache);
+ fprintf(fp, "blocks: %d\n", dbInfos->blocks);
+ fprintf(fp, "minrows: %d\n", dbInfos->minrows);
+ fprintf(fp, "maxrows: %d\n", dbInfos->maxrows);
+ fprintf(fp, "wallevel: %d\n", dbInfos->wallevel);
+ fprintf(fp, "fsync: %d\n", dbInfos->fsync);
+ fprintf(fp, "comp: %d\n", dbInfos->comp);
+ fprintf(fp, "cachelast: %d\n", dbInfos->cachelast);
+ fprintf(fp, "precision: %s\n", dbInfos->precision);
+ fprintf(fp, "update: %d\n", dbInfos->update);
+ fprintf(fp, "status: %s\n", dbInfos->status);
+ fprintf(fp, "\n");
+
+ fclose(fp);
+}
+
+static void printfQuerySystemInfo(TAOS * taos) {
+ char filename[MAX_FILE_NAME_LEN] = {0};
+ char buffer[1024] = {0};
+ TAOS_RES* res;
+
+ time_t t;
+ struct tm* lt;
+ time(&t);
+ lt = localtime(&t);
+ snprintf(filename, MAX_FILE_NAME_LEN, "querySystemInfo-%d-%d-%d %d:%d:%d",
+ lt->tm_year+1900, lt->tm_mon, lt->tm_mday, lt->tm_hour, lt->tm_min,
+ lt->tm_sec);
+
+ // show variables
+ res = taos_query(taos, "show variables;");
+ //fetchResult(res, filename);
+ xDumpResultToFile(filename, res);
+
+ // show dnodes
+ res = taos_query(taos, "show dnodes;");
+ xDumpResultToFile(filename, res);
+ //fetchResult(res, filename);
+
+ // show databases
+ res = taos_query(taos, "show databases;");
+ SDbInfo** dbInfos = (SDbInfo **)calloc(MAX_DATABASE_COUNT, sizeof(SDbInfo *));
+ if (dbInfos == NULL) {
+ errorPrint("%s() LN%d, failed to allocate memory\n", __func__, __LINE__);
+ return;
+ }
+ int dbCount = getDbFromServer(taos, dbInfos);
+ if (dbCount <= 0) {
+ free(dbInfos);
+ return;
+ }
+
+ for (int i = 0; i < dbCount; i++) {
+ // printf database info
+ printfDbInfoForQueryToFile(filename, dbInfos[i], i);
+
+ // show db.vgroups
+ snprintf(buffer, 1024, "show %s.vgroups;", dbInfos[i]->name);
+ res = taos_query(taos, buffer);
+ xDumpResultToFile(filename, res);
+
+ // show db.stables
+ snprintf(buffer, 1024, "show %s.stables;", dbInfos[i]->name);
+ res = taos_query(taos, buffer);
+ xDumpResultToFile(filename, res);
+ free(dbInfos[i]);
+ }
+
+ free(dbInfos);
+}
+
+static int postProceSql(char *host, uint16_t port,
+ char* sqlstr, threadInfo *pThreadInfo)
+{
+ char *req_fmt = "POST %s HTTP/1.1\r\nHost: %s:%d\r\nAccept: */*\r\nAuthorization: Basic %s\r\nContent-Length: %d\r\nContent-Type: application/x-www-form-urlencoded\r\n\r\n%s";
+
+ char *url = "/rest/sql";
+
+ int bytes, sent, received, req_str_len, resp_len;
+ char *request_buf;
+ char response_buf[RESP_BUF_LEN];
+ uint16_t rest_port = port + TSDB_PORT_HTTP;
+
+ int req_buf_len = strlen(sqlstr) + REQ_EXTRA_BUF_LEN;
+
+ request_buf = malloc(req_buf_len);
+ if (NULL == request_buf) {
+ errorPrint("%s", "cannot allocate memory.\n");
+ exit(EXIT_FAILURE);
+ }
+
+ char userpass_buf[INPUT_BUF_LEN];
+ int mod_table[] = {0, 2, 1};
+
+ static char base64[] = {'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H',
+ 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P',
+ 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X',
+ 'Y', 'Z', 'a', 'b', 'c', 'd', 'e', 'f',
+ 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n',
+ 'o', 'p', 'q', 'r', 's', 't', 'u', 'v',
+ 'w', 'x', 'y', 'z', '0', '1', '2', '3',
+ '4', '5', '6', '7', '8', '9', '+', '/'};
+
+ if (g_args.test_mode == INSERT_TEST) {
+ snprintf(userpass_buf, INPUT_BUF_LEN, "%s:%s",
+ g_Dbs.user, g_Dbs.password);
+ } else {
+ snprintf(userpass_buf, INPUT_BUF_LEN, "%s:%s",
+ g_queryInfo.user, g_queryInfo.password);
+ }
+
+ size_t userpass_buf_len = strlen(userpass_buf);
+ size_t encoded_len = 4 * ((userpass_buf_len +2) / 3);
+
+ char base64_buf[INPUT_BUF_LEN];
+
+ memset(base64_buf, 0, INPUT_BUF_LEN);
+
+ for (int n = 0, m = 0; n < userpass_buf_len;) {
+ uint32_t oct_a = n < userpass_buf_len ?
+ (unsigned char) userpass_buf[n++]:0;
+ uint32_t oct_b = n < userpass_buf_len ?
+ (unsigned char) userpass_buf[n++]:0;
+ uint32_t oct_c = n < userpass_buf_len ?
+ (unsigned char) userpass_buf[n++]:0;
+ uint32_t triple = (oct_a << 0x10) + (oct_b << 0x08) + oct_c;
+
+ base64_buf[m++] = base64[(triple >> 3* 6) & 0x3f];
+ base64_buf[m++] = base64[(triple >> 2* 6) & 0x3f];
+ base64_buf[m++] = base64[(triple >> 1* 6) & 0x3f];
+ base64_buf[m++] = base64[(triple >> 0* 6) & 0x3f];
+ }
+
+ for (int l = 0; l < mod_table[userpass_buf_len % 3]; l++)
+ base64_buf[encoded_len - 1 - l] = '=';
+
+ debugPrint("%s() LN%d: auth string base64 encoded: %s\n",
+ __func__, __LINE__, base64_buf);
+ char *auth = base64_buf;
+
+ int r = snprintf(request_buf,
+ req_buf_len,
+ req_fmt, url, host, rest_port,
+ auth, strlen(sqlstr), sqlstr);
+ if (r >= req_buf_len) {
+ free(request_buf);
+ ERROR_EXIT("too long request");
+ }
+ verbosePrint("%s() LN%d: Request:\n%s\n", __func__, __LINE__, request_buf);
+
+ req_str_len = strlen(request_buf);
+ sent = 0;
+ do {
+#ifdef WINDOWS
+ bytes = send(pThreadInfo->sockfd, request_buf + sent, req_str_len - sent, 0);
+#else
+ bytes = write(pThreadInfo->sockfd, request_buf + sent, req_str_len - sent);
+#endif
+ if (bytes < 0)
+ ERROR_EXIT("writing message to socket");
+ if (bytes == 0)
+ break;
+ sent+=bytes;
+ } while(sent < req_str_len);
+
+ memset(response_buf, 0, RESP_BUF_LEN);
+ resp_len = sizeof(response_buf) - 1;
+ received = 0;
+
+ char resEncodingChunk[] = "Encoding: chunked";
+ char resHttp[] = "HTTP/1.1 ";
+ char resHttpOk[] = "HTTP/1.1 200 OK";
+
+ do {
+#ifdef WINDOWS
+ bytes = recv(pThreadInfo->sockfd, response_buf + received, resp_len - received, 0);
+#else
+ bytes = read(pThreadInfo->sockfd, response_buf + received, resp_len - received);
+#endif
+ verbosePrint("%s() LN%d: bytes:%d\n", __func__, __LINE__, bytes);
+ if (bytes < 0) {
+ free(request_buf);
+ ERROR_EXIT("reading response from socket");
+ }
+ if (bytes == 0)
+ break;
+ received += bytes;
+
+ verbosePrint("%s() LN%d: received:%d resp_len:%d, response_buf:\n%s\n",
+ __func__, __LINE__, received, resp_len, response_buf);
+
+ response_buf[RESP_BUF_LEN - 1] = '\0';
+ if (strlen(response_buf)) {
+ if (((NULL == strstr(response_buf, resEncodingChunk))
+ && (NULL != strstr(response_buf, resHttp)))
+ || ((NULL != strstr(response_buf, resHttpOk))
+ && (NULL != strstr(response_buf, "\"status\":")))) {
+ debugPrint(
+ "%s() LN%d: received:%d resp_len:%d, response_buf:\n%s\n",
+ __func__, __LINE__, received, resp_len, response_buf);
+ break;
+ }
+ }
+ } while(received < resp_len);
+
+ if (received == resp_len) {
+ free(request_buf);
+ ERROR_EXIT("storing complete response from socket");
+ }
+
+ if (strlen(pThreadInfo->filePath) > 0) {
+ appendResultBufToFile(response_buf, pThreadInfo);
+ }
+
+ free(request_buf);
+
+ response_buf[RESP_BUF_LEN - 1] = '\0';
+ if (NULL == strstr(response_buf, resHttpOk)) {
+ errorPrint("%s() LN%d, Response:\n%s\n",
+ __func__, __LINE__, response_buf);
+ return -1;
+ }
+ return 0;
+}
+
+static char* getTagValueFromTagSample(SSuperTable* stbInfo, int tagUsePos) {
+ char* dataBuf = (char*)calloc(TSDB_MAX_SQL_LEN+1, 1);
+ if (NULL == dataBuf) {
+ errorPrint2("%s() LN%d, calloc failed! size:%d\n",
+ __func__, __LINE__, TSDB_MAX_SQL_LEN+1);
+ return NULL;
+ }
+
+ int dataLen = 0;
+ dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen,
+ "(%s)", stbInfo->tagDataBuf + stbInfo->lenOfTagOfOneRow * tagUsePos);
+
+ return dataBuf;
+}
+
+static char *generateBinaryNCharTagValues(int64_t tableSeq, uint32_t len)
+{
+ char* buf = (char*)calloc(len, 1);
+ if (NULL == buf) {
+ printf("calloc failed! size:%d\n", len);
+ return NULL;
+ }
+
+ if (tableSeq % 2) {
+ tstrncpy(buf, "beijing", len);
+ } else {
+ tstrncpy(buf, "shanghai", len);
+ }
+ //rand_string(buf, stbInfo->tags[i].dataLen);
+
+ return buf;
+}
+
+static char* generateTagValuesForStb(SSuperTable* stbInfo, int64_t tableSeq) {
+ char* dataBuf = (char*)calloc(TSDB_MAX_SQL_LEN+1, 1);
+ if (NULL == dataBuf) {
+ printf("calloc failed! size:%d\n", TSDB_MAX_SQL_LEN+1);
+ return NULL;
+ }
+
+ int dataLen = 0;
+ dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen, "(");
+ for (int i = 0; i < stbInfo->tagCount; i++) {
+ if ((0 == strncasecmp(stbInfo->tags[i].dataType,
+ "binary", strlen("binary")))
+ || (0 == strncasecmp(stbInfo->tags[i].dataType,
+ "nchar", strlen("nchar")))) {
+ if (stbInfo->tags[i].dataLen > TSDB_MAX_BINARY_LEN) {
+ printf("binary or nchar length overflow, max size:%u\n",
+ (uint32_t)TSDB_MAX_BINARY_LEN);
+ tmfree(dataBuf);
+ return NULL;
+ }
+
+ int32_t tagBufLen = stbInfo->tags[i].dataLen + 1;
+ char *buf = generateBinaryNCharTagValues(tableSeq, tagBufLen);
+ if (NULL == buf) {
+ tmfree(dataBuf);
+ return NULL;
+ }
+ dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen,
+ "\'%s\',", buf);
+ tmfree(buf);
+ } else if (0 == strncasecmp(stbInfo->tags[i].dataType,
+ "int", strlen("int"))) {
+ if ((g_args.demo_mode) && (i == 0)) {
+ dataLen += snprintf(dataBuf + dataLen,
+ TSDB_MAX_SQL_LEN - dataLen,
+ "%"PRId64",", (tableSeq % 10) + 1);
+ } else {
+ dataLen += snprintf(dataBuf + dataLen,
+ TSDB_MAX_SQL_LEN - dataLen,
+ "%"PRId64",", tableSeq);
+ }
+ } else if (0 == strncasecmp(stbInfo->tags[i].dataType,
+ "bigint", strlen("bigint"))) {
+ dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen,
+ "%"PRId64",", rand_bigint());
+ } else if (0 == strncasecmp(stbInfo->tags[i].dataType,
+ "float", strlen("float"))) {
+ dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen,
+ "%f,", rand_float());
+ } else if (0 == strncasecmp(stbInfo->tags[i].dataType,
+ "double", strlen("double"))) {
+ dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen,
+ "%f,", rand_double());
+ } else if (0 == strncasecmp(stbInfo->tags[i].dataType,
+ "smallint", strlen("smallint"))) {
+ dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen,
+ "%d,", rand_smallint());
+ } else if (0 == strncasecmp(stbInfo->tags[i].dataType,
+ "tinyint", strlen("tinyint"))) {
+ dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen,
+ "%d,", rand_tinyint());
+ } else if (0 == strncasecmp(stbInfo->tags[i].dataType,
+ "bool", strlen("bool"))) {
+ dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen,
+ "%d,", rand_bool());
+ } else if (0 == strncasecmp(stbInfo->tags[i].dataType,
+ "timestamp", strlen("timestamp"))) {
+ dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen,
+ "%"PRId64",", rand_ubigint());
+ } else if (0 == strncasecmp(stbInfo->tags[i].dataType,
+ "utinyint", strlen("utinyint"))) {
+ dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen,
+ "%d,", rand_utinyint());
+ } else if (0 == strncasecmp(stbInfo->tags[i].dataType,
+ "usmallint", strlen("usmallint"))) {
+ dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen,
+ "%d,", rand_usmallint());
+ } else if (0 == strncasecmp(stbInfo->tags[i].dataType,
+ "uint", strlen("uint"))) {
+ dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen,
+ "%d,", rand_uint());
+ } else if (0 == strncasecmp(stbInfo->tags[i].dataType,
+ "ubigint", strlen("ubigint"))) {
+ dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen,
+ "%"PRId64",", rand_ubigint());
+ } else {
+ errorPrint2("No support data type: %s\n", stbInfo->tags[i].dataType);
+ tmfree(dataBuf);
+ return NULL;
+ }
+ }
+
+ dataLen -= 1;
+ dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen, ")");
+ return dataBuf;
+}
+
+static int calcRowLen(SSuperTable* superTbls) {
+ int colIndex;
+ int lenOfOneRow = 0;
+
+ for (colIndex = 0; colIndex < superTbls->columnCount; colIndex++) {
+ char* dataType = superTbls->columns[colIndex].dataType;
+
+ switch(superTbls->columns[colIndex].data_type) {
+ case TSDB_DATA_TYPE_BINARY:
+ lenOfOneRow += superTbls->columns[colIndex].dataLen + 3;
+ break;
+
+ case TSDB_DATA_TYPE_NCHAR:
+ lenOfOneRow += superTbls->columns[colIndex].dataLen + 3;
+ break;
+
+ case TSDB_DATA_TYPE_INT:
+ case TSDB_DATA_TYPE_UINT:
+ lenOfOneRow += INT_BUFF_LEN;
+ break;
+
+ case TSDB_DATA_TYPE_BIGINT:
+ case TSDB_DATA_TYPE_UBIGINT:
+ lenOfOneRow += BIGINT_BUFF_LEN;
+ break;
+
+ case TSDB_DATA_TYPE_SMALLINT:
+ case TSDB_DATA_TYPE_USMALLINT:
+ lenOfOneRow += SMALLINT_BUFF_LEN;
+ break;
+
+ case TSDB_DATA_TYPE_TINYINT:
+ case TSDB_DATA_TYPE_UTINYINT:
+ lenOfOneRow += TINYINT_BUFF_LEN;
+ break;
+
+ case TSDB_DATA_TYPE_BOOL:
+ lenOfOneRow += BOOL_BUFF_LEN;
+ break;
+
+ case TSDB_DATA_TYPE_FLOAT:
+ lenOfOneRow += FLOAT_BUFF_LEN;
+ break;
+
+ case TSDB_DATA_TYPE_DOUBLE:
+ lenOfOneRow += DOUBLE_BUFF_LEN;
+ break;
+
+ case TSDB_DATA_TYPE_TIMESTAMP:
+ lenOfOneRow += TIMESTAMP_BUFF_LEN;
+ break;
+
+ default:
+ errorPrint2("get error data type : %s\n", dataType);
+ exit(EXIT_FAILURE);
+ }
+ }
+
+ superTbls->lenOfOneRow = lenOfOneRow + 20; // timestamp
+
+ int tagIndex;
+ int lenOfTagOfOneRow = 0;
+ for (tagIndex = 0; tagIndex < superTbls->tagCount; tagIndex++) {
+ char * dataType = superTbls->tags[tagIndex].dataType;
+ switch (superTbls->tags[tagIndex].data_type)
+ {
+ case TSDB_DATA_TYPE_BINARY:
+ lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + 3;
+ break;
+ case TSDB_DATA_TYPE_NCHAR:
+ lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + 3;
+ break;
+ case TSDB_DATA_TYPE_INT:
+ case TSDB_DATA_TYPE_UINT:
+ lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + INT_BUFF_LEN;
+ break;
+ case TSDB_DATA_TYPE_BIGINT:
+ case TSDB_DATA_TYPE_UBIGINT:
+ lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + BIGINT_BUFF_LEN;
+ break;
+ case TSDB_DATA_TYPE_SMALLINT:
+ case TSDB_DATA_TYPE_USMALLINT:
+ lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + SMALLINT_BUFF_LEN;
+ break;
+ case TSDB_DATA_TYPE_TINYINT:
+ case TSDB_DATA_TYPE_UTINYINT:
+ lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + TINYINT_BUFF_LEN;
+ break;
+ case TSDB_DATA_TYPE_BOOL:
+ lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + BOOL_BUFF_LEN;
+ break;
+ case TSDB_DATA_TYPE_FLOAT:
+ lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + FLOAT_BUFF_LEN;
+ break;
+ case TSDB_DATA_TYPE_DOUBLE:
+ lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + DOUBLE_BUFF_LEN;
+ break;
+ default:
+ errorPrint2("get error tag type : %s\n", dataType);
+ exit(EXIT_FAILURE);
+ }
+ }
+
+ superTbls->lenOfTagOfOneRow = lenOfTagOfOneRow;
+
+ return 0;
+}
+
+static int getChildNameOfSuperTableWithLimitAndOffset(TAOS * taos,
+ char* dbName, char* stbName, char** childTblNameOfSuperTbl,
+ int64_t* childTblCountOfSuperTbl, int64_t limit, uint64_t offset) {
+
+ char command[1024] = "\0";
+ char limitBuf[100] = "\0";
+
+ TAOS_RES * res;
+ TAOS_ROW row = NULL;
+
+ char* childTblName = *childTblNameOfSuperTbl;
+
+ snprintf(limitBuf, 100, " limit %"PRId64" offset %"PRIu64"",
+ limit, offset);
+
+ //get all child table name use cmd: select tbname from superTblName;
+ snprintf(command, 1024, "select tbname from %s.%s %s", dbName, stbName, limitBuf);
+
+ res = taos_query(taos, command);
+ int32_t code = taos_errno(res);
+ if (code != 0) {
+ taos_free_result(res);
+ taos_close(taos);
+ errorPrint2("%s() LN%d, failed to run command %s\n",
+ __func__, __LINE__, command);
+ exit(EXIT_FAILURE);
+ }
+
+ int64_t childTblCount = (limit < 0)?10000:limit;
+ int64_t count = 0;
+ if (childTblName == NULL) {
+ childTblName = (char*)calloc(1, childTblCount * TSDB_TABLE_NAME_LEN);
+ if (NULL == childTblName) {
+ taos_free_result(res);
+ taos_close(taos);
+ errorPrint2("%s() LN%d, failed to allocate memory!\n", __func__, __LINE__);
+ exit(EXIT_FAILURE);
+ }
+ }
+
+ char* pTblName = childTblName;
+ while((row = taos_fetch_row(res)) != NULL) {
+ int32_t* len = taos_fetch_lengths(res);
+
+ if (0 == strlen((char *)row[0])) {
+ errorPrint2("%s() LN%d, No.%"PRId64" table return empty name\n",
+ __func__, __LINE__, count);
+ exit(EXIT_FAILURE);
+ }
+
+ tstrncpy(pTblName, (char *)row[0], len[0]+1);
+ //printf("==== sub table name: %s\n", pTblName);
+ count++;
+ if (count >= childTblCount - 1) {
+ char *tmp = realloc(childTblName,
+ (size_t)childTblCount*1.5*TSDB_TABLE_NAME_LEN+1);
+ if (tmp != NULL) {
+ childTblName = tmp;
+ childTblCount = (int)(childTblCount*1.5);
+ memset(childTblName + count*TSDB_TABLE_NAME_LEN, 0,
+ (size_t)((childTblCount-count)*TSDB_TABLE_NAME_LEN));
+ } else {
+ // exit, if allocate more memory failed
+ tmfree(childTblName);
+ taos_free_result(res);
+ taos_close(taos);
+ errorPrint2("%s() LN%d, realloc fail for save child table name of %s.%s\n",
+ __func__, __LINE__, dbName, stbName);
+ exit(EXIT_FAILURE);
+ }
+ }
+ pTblName = childTblName + count * TSDB_TABLE_NAME_LEN;
+ }
+
+ *childTblCountOfSuperTbl = count;
+ *childTblNameOfSuperTbl = childTblName;
+
+ taos_free_result(res);
+ return 0;
+}
+
+static int getAllChildNameOfSuperTable(TAOS * taos, char* dbName,
+ char* stbName, char** childTblNameOfSuperTbl,
+ int64_t* childTblCountOfSuperTbl) {
+
+ return getChildNameOfSuperTableWithLimitAndOffset(taos, dbName, stbName,
+ childTblNameOfSuperTbl, childTblCountOfSuperTbl,
+ -1, 0);
+}
+
+static int getSuperTableFromServer(TAOS * taos, char* dbName,
+ SSuperTable* superTbls) {
+
+ char command[1024] = "\0";
+ TAOS_RES * res;
+ TAOS_ROW row = NULL;
+ int count = 0;
+
+ //get schema use cmd: describe superTblName;
+ snprintf(command, 1024, "describe %s.%s", dbName, superTbls->stbName);
+ res = taos_query(taos, command);
+ int32_t code = taos_errno(res);
+ if (code != 0) {
+ printf("failed to run command %s\n", command);
+ taos_free_result(res);
+ return -1;
+ }
+
+ int tagIndex = 0;
+ int columnIndex = 0;
+ TAOS_FIELD *fields = taos_fetch_fields(res);
+ while((row = taos_fetch_row(res)) != NULL) {
+ if (0 == count) {
+ count++;
+ continue;
+ }
+
+ if (strcmp((char *)row[TSDB_DESCRIBE_METRIC_NOTE_INDEX], "TAG") == 0) {
+ tstrncpy(superTbls->tags[tagIndex].field,
+ (char *)row[TSDB_DESCRIBE_METRIC_FIELD_INDEX],
+ fields[TSDB_DESCRIBE_METRIC_FIELD_INDEX].bytes);
+ if (0 == strncasecmp((char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX],
+ "INT", strlen("INT"))) {
+ superTbls->tags[tagIndex].data_type = TSDB_DATA_TYPE_INT;
+ } else if (0 == strncasecmp((char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX],
+ "TINYINT", strlen("TINYINT"))) {
+ superTbls->tags[tagIndex].data_type = TSDB_DATA_TYPE_TINYINT;
+ } else if (0 == strncasecmp((char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX],
+ "SMALLINT", strlen("SMALLINT"))) {
+ superTbls->tags[tagIndex].data_type = TSDB_DATA_TYPE_SMALLINT;
+ } else if (0 == strncasecmp((char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX],
+ "BIGINT", strlen("BIGINT"))) {
+ superTbls->tags[tagIndex].data_type = TSDB_DATA_TYPE_BIGINT;
+ } else if (0 == strncasecmp((char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX],
+ "FLOAT", strlen("FLOAT"))) {
+ superTbls->tags[tagIndex].data_type = TSDB_DATA_TYPE_FLOAT;
+ } else if (0 == strncasecmp((char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX],
+ "DOUBLE", strlen("DOUBLE"))) {
+ superTbls->tags[tagIndex].data_type = TSDB_DATA_TYPE_DOUBLE;
+ } else if (0 == strncasecmp((char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX],
+ "BINARY", strlen("BINARY"))) {
+ superTbls->tags[tagIndex].data_type = TSDB_DATA_TYPE_BINARY;
+ } else if (0 == strncasecmp((char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX],
+ "NCHAR", strlen("NCHAR"))) {
+ superTbls->tags[tagIndex].data_type = TSDB_DATA_TYPE_NCHAR;
+ } else if (0 == strncasecmp((char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX],
+ "BOOL", strlen("BOOL"))) {
+ superTbls->tags[tagIndex].data_type = TSDB_DATA_TYPE_BOOL;
+ } else if (0 == strncasecmp((char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX],
+ "TIMESTAMP", strlen("TIMESTAMP"))) {
+ superTbls->tags[tagIndex].data_type = TSDB_DATA_TYPE_TIMESTAMP;
+ } else if (0 == strncasecmp((char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX],
+ "TINYINT UNSIGNED", strlen("TINYINT UNSIGNED"))) {
+ superTbls->tags[tagIndex].data_type = TSDB_DATA_TYPE_UTINYINT;
+ tstrncpy(superTbls->tags[tagIndex].dataType,"UTINYINT",
+ min(DATATYPE_BUFF_LEN,
+ fields[TSDB_DESCRIBE_METRIC_TYPE_INDEX].bytes) + 1);
+ } else if (0 == strncasecmp((char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX],
+ "SMALLINT UNSIGNED", strlen("SMALLINT UNSIGNED"))) {
+ superTbls->tags[tagIndex].data_type = TSDB_DATA_TYPE_USMALLINT;
+ tstrncpy(superTbls->tags[tagIndex].dataType,"USMALLINT",
+ min(DATATYPE_BUFF_LEN,
+ fields[TSDB_DESCRIBE_METRIC_TYPE_INDEX].bytes) + 1);
+ } else if (0 == strncasecmp((char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX],
+ "INT UNSIGNED", strlen("INT UNSIGNED"))) {
+ superTbls->tags[tagIndex].data_type = TSDB_DATA_TYPE_UINT;
+ tstrncpy(superTbls->tags[tagIndex].dataType,"UINT",
+ min(DATATYPE_BUFF_LEN,
+ fields[TSDB_DESCRIBE_METRIC_TYPE_INDEX].bytes) + 1);
+ }else if (0 == strncasecmp((char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX],
+ "BIGINT UNSIGNED", strlen("BIGINT UNSIGNED"))) {
+ superTbls->tags[tagIndex].data_type = TSDB_DATA_TYPE_UBIGINT;
+ tstrncpy(superTbls->tags[tagIndex].dataType,"UBIGINT",
+ min(DATATYPE_BUFF_LEN,
+ fields[TSDB_DESCRIBE_METRIC_TYPE_INDEX].bytes) + 1);
+ } else {
+ superTbls->tags[tagIndex].data_type = TSDB_DATA_TYPE_NULL;
+ }
+ superTbls->tags[tagIndex].dataLen =
+ *((int *)row[TSDB_DESCRIBE_METRIC_LENGTH_INDEX]);
+ tstrncpy(superTbls->tags[tagIndex].note,
+ (char *)row[TSDB_DESCRIBE_METRIC_NOTE_INDEX],
+ min(NOTE_BUFF_LEN,
+ fields[TSDB_DESCRIBE_METRIC_NOTE_INDEX].bytes) + 1);
+ if (strstr((char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX], "UNSIGNED") == NULL)
+ {
+ tstrncpy(superTbls->tags[tagIndex].dataType,
+ (char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX],
+ min(DATATYPE_BUFF_LEN,
+ fields[TSDB_DESCRIBE_METRIC_TYPE_INDEX].bytes) + 1);
+ }
+ tagIndex++;
+ } else {
+ tstrncpy(superTbls->columns[columnIndex].field,
+ (char *)row[TSDB_DESCRIBE_METRIC_FIELD_INDEX],
+ fields[TSDB_DESCRIBE_METRIC_FIELD_INDEX].bytes);
+
+
+ if (0 == strncasecmp((char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX],
+ "INT", strlen("INT")) &&
+ strstr((char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX], "UNSIGNED") == NULL) {
+ superTbls->columns[columnIndex].data_type = TSDB_DATA_TYPE_INT;
+ } else if (0 == strncasecmp((char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX],
+ "TINYINT", strlen("TINYINT")) &&
+ strstr((char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX], "UNSIGNED") == NULL) {
+ superTbls->columns[columnIndex].data_type = TSDB_DATA_TYPE_TINYINT;
+ } else if (0 == strncasecmp((char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX],
+ "SMALLINT", strlen("SMALLINT")) &&
+ strstr((char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX], "UNSIGNED") == NULL) {
+ superTbls->columns[columnIndex].data_type = TSDB_DATA_TYPE_SMALLINT;
+ } else if (0 == strncasecmp((char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX],
+ "BIGINT", strlen("BIGINT")) &&
+ strstr((char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX], "UNSIGNED") == NULL) {
+ superTbls->columns[columnIndex].data_type = TSDB_DATA_TYPE_BIGINT;
+ } else if (0 == strncasecmp((char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX],
+ "FLOAT", strlen("FLOAT"))) {
+ superTbls->columns[columnIndex].data_type = TSDB_DATA_TYPE_FLOAT;
+ } else if (0 == strncasecmp((char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX],
+ "DOUBLE", strlen("DOUBLE"))) {
+ superTbls->columns[columnIndex].data_type = TSDB_DATA_TYPE_DOUBLE;
+ } else if (0 == strncasecmp((char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX],
+ "BINARY", strlen("BINARY"))) {
+ superTbls->columns[columnIndex].data_type = TSDB_DATA_TYPE_BINARY;
+ } else if (0 == strncasecmp((char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX],
+ "NCHAR", strlen("NCHAR"))) {
+ superTbls->columns[columnIndex].data_type = TSDB_DATA_TYPE_NCHAR;
+ } else if (0 == strncasecmp((char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX],
+ "BOOL", strlen("BOOL"))) {
+ superTbls->columns[columnIndex].data_type = TSDB_DATA_TYPE_BOOL;
+ } else if (0 == strncasecmp((char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX],
+ "TIMESTAMP", strlen("TIMESTAMP"))) {
+ superTbls->columns[columnIndex].data_type = TSDB_DATA_TYPE_TIMESTAMP;
+ } else if (0 == strncasecmp((char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX],
+ "TINYINT UNSIGNED", strlen("TINYINT UNSIGNED"))) {
+ superTbls->columns[columnIndex].data_type = TSDB_DATA_TYPE_UTINYINT;
+ tstrncpy(superTbls->columns[columnIndex].dataType,"UTINYINT",
+ min(DATATYPE_BUFF_LEN,
+ fields[TSDB_DESCRIBE_METRIC_TYPE_INDEX].bytes) + 1);
+ } else if (0 == strncasecmp((char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX],
+ "SMALLINT UNSIGNED", strlen("SMALLINT UNSIGNED"))) {
+ superTbls->columns[columnIndex].data_type = TSDB_DATA_TYPE_USMALLINT;
+ tstrncpy(superTbls->columns[columnIndex].dataType,"USMALLINT",
+ min(DATATYPE_BUFF_LEN,
+ fields[TSDB_DESCRIBE_METRIC_TYPE_INDEX].bytes) + 1);
+ } else if (0 == strncasecmp((char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX],
+ "INT UNSIGNED", strlen("INT UNSIGNED"))) {
+ superTbls->columns[columnIndex].data_type = TSDB_DATA_TYPE_UINT;
+ tstrncpy(superTbls->columns[columnIndex].dataType,"UINT",
+ min(DATATYPE_BUFF_LEN,
+ fields[TSDB_DESCRIBE_METRIC_TYPE_INDEX].bytes) + 1);
+ } else if (0 == strncasecmp((char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX],
+ "BIGINT UNSIGNED", strlen("BIGINT UNSIGNED"))) {
+ superTbls->columns[columnIndex].data_type = TSDB_DATA_TYPE_UBIGINT;
+ tstrncpy(superTbls->columns[columnIndex].dataType,"UBIGINT",
+ min(DATATYPE_BUFF_LEN,
+ fields[TSDB_DESCRIBE_METRIC_TYPE_INDEX].bytes) + 1);
+ } else {
+ superTbls->columns[columnIndex].data_type = TSDB_DATA_TYPE_NULL;
+ }
+ superTbls->columns[columnIndex].dataLen =
+ *((int *)row[TSDB_DESCRIBE_METRIC_LENGTH_INDEX]);
+ tstrncpy(superTbls->columns[columnIndex].note,
+ (char *)row[TSDB_DESCRIBE_METRIC_NOTE_INDEX],
+ min(NOTE_BUFF_LEN,
+ fields[TSDB_DESCRIBE_METRIC_NOTE_INDEX].bytes) + 1);
+
+ if (strstr((char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX], "UNSIGNED") == NULL) {
+ tstrncpy(superTbls->columns[columnIndex].dataType,
+ (char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX],
+ min(DATATYPE_BUFF_LEN,
+ fields[TSDB_DESCRIBE_METRIC_TYPE_INDEX].bytes) + 1);
+ }
+
+ columnIndex++;
+ }
+ count++;
+ }
+
+ superTbls->columnCount = columnIndex;
+ superTbls->tagCount = tagIndex;
+ taos_free_result(res);
+
+ calcRowLen(superTbls);
+
+ /*
+ if (TBL_ALREADY_EXISTS == superTbls->childTblExists) {
+ //get all child table name use cmd: select tbname from superTblName;
+ int childTblCount = 10000;
+ superTbls->childTblName = (char*)calloc(1, childTblCount * TSDB_TABLE_NAME_LEN);
+ if (superTbls->childTblName == NULL) {
+ errorPrint2("%s() LN%d, alloc memory failed!\n", __func__, __LINE__);
+ return -1;
+ }
+ getAllChildNameOfSuperTable(taos, dbName,
+ superTbls->stbName,
+ &superTbls->childTblName,
+ &superTbls->childTblCount);
+ }
+ */
+ return 0;
+}
+
+static int createSuperTable(
+ TAOS * taos, char* dbName,
+ SSuperTable* superTbl) {
+
+ char *command = calloc(1, BUFFER_SIZE);
+ assert(command);
+
+ char cols[COL_BUFFER_LEN] = "\0";
+ int len = 0;
+
+ int lenOfOneRow = 0;
+
+ if (superTbl->columnCount == 0) {
+ errorPrint2("%s() LN%d, super table column count is %d\n",
+ __func__, __LINE__, superTbl->columnCount);
+ free(command);
+ return -1;
+ }
+
+ for (int colIndex = 0; colIndex < superTbl->columnCount; colIndex++) {
+
+ switch(superTbl->columns[colIndex].data_type) {
+ case TSDB_DATA_TYPE_BINARY:
+ len += snprintf(cols + len, COL_BUFFER_LEN - len,
+ ",C%d %s(%d)", colIndex, "BINARY",
+ superTbl->columns[colIndex].dataLen);
+ lenOfOneRow += superTbl->columns[colIndex].dataLen + 3;
+ break;
+
+ case TSDB_DATA_TYPE_NCHAR:
+ len += snprintf(cols + len, COL_BUFFER_LEN - len,
+ ",C%d %s(%d)", colIndex, "NCHAR",
+ superTbl->columns[colIndex].dataLen);
+ lenOfOneRow += superTbl->columns[colIndex].dataLen + 3;
+ break;
+
+ case TSDB_DATA_TYPE_INT:
+ if ((g_args.demo_mode) && (colIndex == 1)) {
+ len += snprintf(cols + len, COL_BUFFER_LEN - len,
+ ", VOLTAGE INT");
+ } else {
+ len += snprintf(cols + len, COL_BUFFER_LEN - len, ",C%d %s", colIndex, "INT");
+ }
+ lenOfOneRow += INT_BUFF_LEN;
+ break;
+
+ case TSDB_DATA_TYPE_BIGINT:
+ len += snprintf(cols + len, COL_BUFFER_LEN - len, ",C%d %s",
+ colIndex, "BIGINT");
+ lenOfOneRow += BIGINT_BUFF_LEN;
+ break;
+
+ case TSDB_DATA_TYPE_SMALLINT:
+ len += snprintf(cols + len, COL_BUFFER_LEN - len, ",C%d %s",
+ colIndex, "SMALLINT");
+ lenOfOneRow += SMALLINT_BUFF_LEN;
+ break;
+
+ case TSDB_DATA_TYPE_TINYINT:
+ len += snprintf(cols + len, COL_BUFFER_LEN - len, ",C%d %s", colIndex, "TINYINT");
+ lenOfOneRow += TINYINT_BUFF_LEN;
+ break;
+
+ case TSDB_DATA_TYPE_BOOL:
+ len += snprintf(cols + len, COL_BUFFER_LEN - len, ",C%d %s", colIndex, "BOOL");
+ lenOfOneRow += BOOL_BUFF_LEN;
+ break;
+
+ case TSDB_DATA_TYPE_FLOAT:
+ if (g_args.demo_mode) {
+ if (colIndex == 0) {
+ len += snprintf(cols + len, COL_BUFFER_LEN - len, ", CURRENT FLOAT");
+ } else if (colIndex == 2) {
+ len += snprintf(cols + len, COL_BUFFER_LEN - len, ", PHASE FLOAT");
+ }
+ } else {
+ len += snprintf(cols + len, COL_BUFFER_LEN - len, ",C%d %s", colIndex, "FLOAT");
+ }
+
+ lenOfOneRow += FLOAT_BUFF_LEN;
+ break;
+
+ case TSDB_DATA_TYPE_DOUBLE:
+ len += snprintf(cols + len, COL_BUFFER_LEN - len, ",C%d %s",
+ colIndex, "DOUBLE");
+ lenOfOneRow += DOUBLE_BUFF_LEN;
+ break;
+
+ case TSDB_DATA_TYPE_TIMESTAMP:
+ len += snprintf(cols + len, COL_BUFFER_LEN - len, ",C%d %s",
+ colIndex, "TIMESTAMP");
+ lenOfOneRow += TIMESTAMP_BUFF_LEN;
+ break;
+
+ case TSDB_DATA_TYPE_UTINYINT:
+ len += snprintf(cols + len, COL_BUFFER_LEN - len, ",C%d %s",
+ colIndex, "TINYINT UNSIGNED");
+ lenOfOneRow += TINYINT_BUFF_LEN;
+ break;
+
+ case TSDB_DATA_TYPE_USMALLINT:
+ len += snprintf(cols + len, COL_BUFFER_LEN - len, ",C%d %s",
+ colIndex, "SMALLINT UNSIGNED");
+ lenOfOneRow += SMALLINT_BUFF_LEN;
+ break;
+
+ case TSDB_DATA_TYPE_UINT:
+ len += snprintf(cols + len, COL_BUFFER_LEN - len, ",C%d %s",
+ colIndex, "INT UNSIGNED");
+ lenOfOneRow += INT_BUFF_LEN;
+ break;
+
+ case TSDB_DATA_TYPE_UBIGINT:
+ len += snprintf(cols + len, COL_BUFFER_LEN - len, ",C%d %s",
+ colIndex, "BIGINT UNSIGNED");
+ lenOfOneRow += BIGINT_BUFF_LEN;
+ break;
+
+ default:
+ taos_close(taos);
+ free(command);
+ errorPrint2("%s() LN%d, config error data type : %s\n",
+ __func__, __LINE__, superTbl->columns[colIndex].dataType);
+ exit(EXIT_FAILURE);
+ }
+ }
+
+ superTbl->lenOfOneRow = lenOfOneRow + 20; // timestamp
+
+ // save for creating child table
+ superTbl->colsOfCreateChildTable = (char*)calloc(len+20, 1);
+ if (NULL == superTbl->colsOfCreateChildTable) {
+ taos_close(taos);
+ free(command);
+ errorPrint2("%s() LN%d, Failed when calloc, size:%d",
+ __func__, __LINE__, len+1);
+ exit(EXIT_FAILURE);
+ }
+
+ snprintf(superTbl->colsOfCreateChildTable, len+20, "(ts timestamp%s)", cols);
+ verbosePrint("%s() LN%d: %s\n",
+ __func__, __LINE__, superTbl->colsOfCreateChildTable);
+
+ if (superTbl->tagCount == 0) {
+ errorPrint2("%s() LN%d, super table tag count is %d\n",
+ __func__, __LINE__, superTbl->tagCount);
+ free(command);
+ return -1;
+ }
+
+ char tags[TSDB_MAX_TAGS_LEN] = "\0";
+ int tagIndex;
+ len = 0;
+
+ int lenOfTagOfOneRow = 0;
+ len += snprintf(tags + len, TSDB_MAX_TAGS_LEN - len, "(");
+ for (tagIndex = 0; tagIndex < superTbl->tagCount; tagIndex++) {
+ char* dataType = superTbl->tags[tagIndex].dataType;
+
+ if (strcasecmp(dataType, "BINARY") == 0) {
+ if ((g_args.demo_mode) && (tagIndex == 1)) {
+ len += snprintf(tags + len, TSDB_MAX_TAGS_LEN - len,
+ "location BINARY(%d),",
+ superTbl->tags[tagIndex].dataLen);
+ } else {
+ len += snprintf(tags + len, TSDB_MAX_TAGS_LEN - len,
+ "T%d %s(%d),", tagIndex, "BINARY",
+ superTbl->tags[tagIndex].dataLen);
+ }
+ lenOfTagOfOneRow += superTbl->tags[tagIndex].dataLen + 3;
+ } else if (strcasecmp(dataType, "NCHAR") == 0) {
+ len += snprintf(tags + len, TSDB_MAX_TAGS_LEN - len,
+ "T%d %s(%d),", tagIndex,
+ "NCHAR", superTbl->tags[tagIndex].dataLen);
+ lenOfTagOfOneRow += superTbl->tags[tagIndex].dataLen + 3;
+ } else if (strcasecmp(dataType, "INT") == 0) {
+ if ((g_args.demo_mode) && (tagIndex == 0)) {
+ len += snprintf(tags + len, TSDB_MAX_TAGS_LEN - len,
+ "groupId INT, ");
+ } else {
+ len += snprintf(tags + len, TSDB_MAX_TAGS_LEN - len,
+ "T%d %s,", tagIndex, "INT");
+ }
+ lenOfTagOfOneRow += superTbl->tags[tagIndex].dataLen + INT_BUFF_LEN;
+ } else if (strcasecmp(dataType, "BIGINT") == 0) {
+ len += snprintf(tags + len, TSDB_MAX_TAGS_LEN - len,
+ "T%d %s,", tagIndex, "BIGINT");
+ lenOfTagOfOneRow += superTbl->tags[tagIndex].dataLen + BIGINT_BUFF_LEN;
+ } else if (strcasecmp(dataType, "SMALLINT") == 0) {
+ len += snprintf(tags + len, TSDB_MAX_TAGS_LEN - len,
+ "T%d %s,", tagIndex, "SMALLINT");
+ lenOfTagOfOneRow += superTbl->tags[tagIndex].dataLen + SMALLINT_BUFF_LEN;
+ } else if (strcasecmp(dataType, "TINYINT") == 0) {
+ len += snprintf(tags + len, TSDB_MAX_TAGS_LEN - len,
+ "T%d %s,", tagIndex, "TINYINT");
+ lenOfTagOfOneRow += superTbl->tags[tagIndex].dataLen + TINYINT_BUFF_LEN;
+ } else if (strcasecmp(dataType, "BOOL") == 0) {
+ len += snprintf(tags + len, TSDB_MAX_TAGS_LEN - len,
+ "T%d %s,", tagIndex, "BOOL");
+ lenOfTagOfOneRow += superTbl->tags[tagIndex].dataLen + BOOL_BUFF_LEN;
+ } else if (strcasecmp(dataType, "FLOAT") == 0) {
+ len += snprintf(tags + len, TSDB_MAX_TAGS_LEN - len,
+ "T%d %s,", tagIndex, "FLOAT");
+ lenOfTagOfOneRow += superTbl->tags[tagIndex].dataLen + FLOAT_BUFF_LEN;
+ } else if (strcasecmp(dataType, "DOUBLE") == 0) {
+ len += snprintf(tags + len, TSDB_MAX_TAGS_LEN - len,
+ "T%d %s,", tagIndex, "DOUBLE");
+ lenOfTagOfOneRow += superTbl->tags[tagIndex].dataLen + DOUBLE_BUFF_LEN;
+ } else if (strcasecmp(dataType, "UTINYINT") == 0) {
+ len += snprintf(tags + len, TSDB_MAX_TAGS_LEN - len,
+ "T%d %s,", tagIndex, "TINYINT UNSIGNED");
+ lenOfTagOfOneRow += superTbl->tags[tagIndex].dataLen + TINYINT_BUFF_LEN;
+ } else if (strcasecmp(dataType, "USMALLINT") == 0) {
+ len += snprintf(tags + len, TSDB_MAX_TAGS_LEN - len,
+ "T%d %s,", tagIndex, "SMALLINT UNSIGNED");
+ lenOfTagOfOneRow += superTbl->tags[tagIndex].dataLen + SMALLINT_BUFF_LEN;
+ } else if (strcasecmp(dataType, "UINT") == 0) {
+ len += snprintf(tags + len, TSDB_MAX_TAGS_LEN - len,
+ "T%d %s,", tagIndex, "INT UNSIGNED");
+ lenOfTagOfOneRow += superTbl->tags[tagIndex].dataLen + INT_BUFF_LEN;
+ } else if (strcasecmp(dataType, "UBIGINT") == 0) {
+ len += snprintf(tags + len, TSDB_MAX_TAGS_LEN - len,
+ "T%d %s,", tagIndex, "BIGINT UNSIGNED");
+ lenOfTagOfOneRow += superTbl->tags[tagIndex].dataLen + BIGINT_BUFF_LEN;
+ } else if (strcasecmp(dataType, "TIMESTAMP") == 0) {
+ len += snprintf(tags + len, TSDB_MAX_TAGS_LEN - len,
+ "T%d %s,", tagIndex, "TIMESTAMP");
+ lenOfTagOfOneRow += superTbl->tags[tagIndex].dataLen + TIMESTAMP_BUFF_LEN;
+ } else {
+ taos_close(taos);
+ free(command);
+ errorPrint2("%s() LN%d, config error tag type : %s\n",
+ __func__, __LINE__, dataType);
+ exit(EXIT_FAILURE);
+ }
+ }
+
+ len -= 1;
+ len += snprintf(tags + len, TSDB_MAX_TAGS_LEN - len, ")");
+
+ superTbl->lenOfTagOfOneRow = lenOfTagOfOneRow;
+
+
+ snprintf(command, BUFFER_SIZE,
+ "CREATE TABLE IF NOT EXISTS %s.%s (ts TIMESTAMP%s) TAGS %s",
+ dbName, superTbl->stbName, cols, tags);
+ if (0 != queryDbExec(taos, command, NO_INSERT_TYPE, false)) {
+ errorPrint2("create supertable %s failed!\n\n",
+ superTbl->stbName);
+ free(command);
+ return -1;
+ }
+
+ debugPrint("create supertable %s success!\n\n", superTbl->stbName);
+ free(command);
+ return 0;
+}
+
+int createDatabasesAndStables(char *command) {
+ TAOS * taos = NULL;
+ int ret = 0;
+ taos = taos_connect(g_Dbs.host, g_Dbs.user, g_Dbs.password, NULL, g_Dbs.port);
+ if (taos == NULL) {
+ errorPrint2("Failed to connect to TDengine, reason:%s\n", taos_errstr(NULL));
+ return -1;
+ }
+
+ for (int i = 0; i < g_Dbs.dbCount; i++) {
+ if (g_Dbs.db[i].drop) {
+ sprintf(command, "drop database if exists %s;", g_Dbs.db[i].dbName);
+ if (0 != queryDbExec(taos, command, NO_INSERT_TYPE, false)) {
+ taos_close(taos);
+ return -1;
+ }
+
+ int dataLen = 0;
+ dataLen += snprintf(command + dataLen,
+ BUFFER_SIZE - dataLen, "CREATE DATABASE IF NOT EXISTS %s",
+ g_Dbs.db[i].dbName);
+
+ if (g_Dbs.db[i].dbCfg.blocks > 0) {
+ dataLen += snprintf(command + dataLen,
+ BUFFER_SIZE - dataLen, " BLOCKS %d",
+ g_Dbs.db[i].dbCfg.blocks);
+ }
+ if (g_Dbs.db[i].dbCfg.cache > 0) {
+ dataLen += snprintf(command + dataLen,
+ BUFFER_SIZE - dataLen, " CACHE %d",
+ g_Dbs.db[i].dbCfg.cache);
+ }
+ if (g_Dbs.db[i].dbCfg.days > 0) {
+ dataLen += snprintf(command + dataLen,
+ BUFFER_SIZE - dataLen, " DAYS %d",
+ g_Dbs.db[i].dbCfg.days);
+ }
+ if (g_Dbs.db[i].dbCfg.keep > 0) {
+ dataLen += snprintf(command + dataLen,
+ BUFFER_SIZE - dataLen, " KEEP %d",
+ g_Dbs.db[i].dbCfg.keep);
+ }
+ if (g_Dbs.db[i].dbCfg.quorum > 1) {
+ dataLen += snprintf(command + dataLen,
+ BUFFER_SIZE - dataLen, " QUORUM %d",
+ g_Dbs.db[i].dbCfg.quorum);
+ }
+ if (g_Dbs.db[i].dbCfg.replica > 0) {
+ dataLen += snprintf(command + dataLen,
+ BUFFER_SIZE - dataLen, " REPLICA %d",
+ g_Dbs.db[i].dbCfg.replica);
+ }
+ if (g_Dbs.db[i].dbCfg.update > 0) {
+ dataLen += snprintf(command + dataLen,
+ BUFFER_SIZE - dataLen, " UPDATE %d",
+ g_Dbs.db[i].dbCfg.update);
+ }
+ //if (g_Dbs.db[i].dbCfg.maxtablesPerVnode > 0) {
+ // dataLen += snprintf(command + dataLen,
+ // BUFFER_SIZE - dataLen, "tables %d ", g_Dbs.db[i].dbCfg.maxtablesPerVnode);
+ //}
+ if (g_Dbs.db[i].dbCfg.minRows > 0) {
+ dataLen += snprintf(command + dataLen,
+ BUFFER_SIZE - dataLen, " MINROWS %d",
+ g_Dbs.db[i].dbCfg.minRows);
+ }
+ if (g_Dbs.db[i].dbCfg.maxRows > 0) {
+ dataLen += snprintf(command + dataLen,
+ BUFFER_SIZE - dataLen, " MAXROWS %d",
+ g_Dbs.db[i].dbCfg.maxRows);
+ }
+ if (g_Dbs.db[i].dbCfg.comp > 0) {
+ dataLen += snprintf(command + dataLen,
+ BUFFER_SIZE - dataLen, " COMP %d",
+ g_Dbs.db[i].dbCfg.comp);
+ }
+ if (g_Dbs.db[i].dbCfg.walLevel > 0) {
+ dataLen += snprintf(command + dataLen,
+ BUFFER_SIZE - dataLen, " wal %d",
+ g_Dbs.db[i].dbCfg.walLevel);
+ }
+ if (g_Dbs.db[i].dbCfg.cacheLast > 0) {
+ dataLen += snprintf(command + dataLen,
+ BUFFER_SIZE - dataLen, " CACHELAST %d",
+ g_Dbs.db[i].dbCfg.cacheLast);
+ }
+ if (g_Dbs.db[i].dbCfg.fsync > 0) {
+ dataLen += snprintf(command + dataLen, BUFFER_SIZE - dataLen,
+ " FSYNC %d", g_Dbs.db[i].dbCfg.fsync);
+ }
+ if ((0 == strncasecmp(g_Dbs.db[i].dbCfg.precision, "ms", 2))
+ || (0 == strncasecmp(g_Dbs.db[i].dbCfg.precision,
+ "ns", 2))
+ || (0 == strncasecmp(g_Dbs.db[i].dbCfg.precision,
+ "us", 2))) {
+ dataLen += snprintf(command + dataLen, BUFFER_SIZE - dataLen,
+ " precision \'%s\';", g_Dbs.db[i].dbCfg.precision);
+ }
+
+ if (0 != queryDbExec(taos, command, NO_INSERT_TYPE, false)) {
+ taos_close(taos);
+ errorPrint("\ncreate database %s failed!\n\n",
+ g_Dbs.db[i].dbName);
+ return -1;
+ }
+ printf("\ncreate database %s success!\n\n", g_Dbs.db[i].dbName);
+ }
+
+ debugPrint("%s() LN%d supertbl count:%"PRIu64"\n",
+ __func__, __LINE__, g_Dbs.db[i].superTblCount);
+
+ int validStbCount = 0;
+
+ for (uint64_t j = 0; j < g_Dbs.db[i].superTblCount; j++) {
+ sprintf(command, "describe %s.%s;", g_Dbs.db[i].dbName,
+ g_Dbs.db[i].superTbls[j].stbName);
+ ret = queryDbExec(taos, command, NO_INSERT_TYPE, true);
+
+ if ((ret != 0) || (g_Dbs.db[i].drop)) {
+ ret = createSuperTable(taos, g_Dbs.db[i].dbName,
+ &g_Dbs.db[i].superTbls[j]);
+
+ if (0 != ret) {
+ errorPrint("create super table %"PRIu64" failed!\n\n", j);
+ continue;
+ }
+ } else {
+ ret = getSuperTableFromServer(taos, g_Dbs.db[i].dbName,
+ &g_Dbs.db[i].superTbls[j]);
+ if (0 != ret) {
+ errorPrint2("\nget super table %s.%s info failed!\n\n",
+ g_Dbs.db[i].dbName, g_Dbs.db[i].superTbls[j].stbName);
+ continue;
+ }
+ }
+ validStbCount ++;
+ }
+ g_Dbs.db[i].superTblCount = validStbCount;
+ }
+
+ taos_close(taos);
+ return 0;
+}
+
+static void* createTable(void *sarg)
+{
+ threadInfo *pThreadInfo = (threadInfo *)sarg;
+ SSuperTable* stbInfo = pThreadInfo->stbInfo;
+
+ setThreadName("createTable");
+
+ uint64_t lastPrintTime = taosGetTimestampMs();
+
+ int buff_len = BUFFER_SIZE;
+
+ pThreadInfo->buffer = calloc(buff_len, 1);
+ if (pThreadInfo->buffer == NULL) {
+ errorPrint2("%s() LN%d, Memory allocated failed!\n", __func__, __LINE__);
+ exit(EXIT_FAILURE);
+ }
+
+ int len = 0;
+ int batchNum = 0;
+
+ verbosePrint("%s() LN%d: Creating table from %"PRIu64" to %"PRIu64"\n",
+ __func__, __LINE__,
+ pThreadInfo->start_table_from, pThreadInfo->end_table_to);
+
+ for (uint64_t i = pThreadInfo->start_table_from;
+ i <= pThreadInfo->end_table_to; i++) {
+ if (0 == g_Dbs.use_metric) {
+ snprintf(pThreadInfo->buffer, buff_len,
+ "CREATE TABLE IF NOT EXISTS %s.%s%"PRIu64" %s;",
+ pThreadInfo->db_name,
+ g_args.tb_prefix, i,
+ pThreadInfo->cols);
+ batchNum ++;
+ } else {
+ if (stbInfo == NULL) {
+ free(pThreadInfo->buffer);
+ errorPrint2("%s() LN%d, use metric, but super table info is NULL\n",
+ __func__, __LINE__);
+ exit(EXIT_FAILURE);
+ } else {
+ if (0 == len) {
+ batchNum = 0;
+ memset(pThreadInfo->buffer, 0, buff_len);
+ len += snprintf(pThreadInfo->buffer + len,
+ buff_len - len, "CREATE TABLE ");
+ }
+
+ char* tagsValBuf = NULL;
+ if (0 == stbInfo->tagSource) {
+ tagsValBuf = generateTagValuesForStb(stbInfo, i);
+ } else {
+ if (0 == stbInfo->tagSampleCount) {
+ free(pThreadInfo->buffer);
+ ERROR_EXIT("use sample file for tag, but has no content!\n");
+ }
+ tagsValBuf = getTagValueFromTagSample(
+ stbInfo,
+ i % stbInfo->tagSampleCount);
+ }
+
+ if (NULL == tagsValBuf) {
+ free(pThreadInfo->buffer);
+ ERROR_EXIT("use metric, but tag buffer is NULL\n");
+ }
+ len += snprintf(pThreadInfo->buffer + len,
+ buff_len - len,
+ "if not exists %s.%s%"PRIu64" using %s.%s tags %s ",
+ pThreadInfo->db_name, stbInfo->childTblPrefix,
+ i, pThreadInfo->db_name,
+ stbInfo->stbName, tagsValBuf);
+ free(tagsValBuf);
+ batchNum++;
+ if ((batchNum < stbInfo->batchCreateTableNum)
+ && ((buff_len - len)
+ >= (stbInfo->lenOfTagOfOneRow + 256))) {
+ continue;
+ }
+ }
+ }
+
+ len = 0;
+
+ if (0 != queryDbExec(pThreadInfo->taos, pThreadInfo->buffer,
+ NO_INSERT_TYPE, false)) {
+ errorPrint2("queryDbExec() failed. buffer:\n%s\n", pThreadInfo->buffer);
+ free(pThreadInfo->buffer);
+ return NULL;
+ }
+ pThreadInfo->tables_created += batchNum;
+ uint64_t currentPrintTime = taosGetTimestampMs();
+ if (currentPrintTime - lastPrintTime > 30*1000) {
+ printf("thread[%d] already create %"PRIu64" - %"PRIu64" tables\n",
+ pThreadInfo->threadID, pThreadInfo->start_table_from, i);
+ lastPrintTime = currentPrintTime;
+ }
+ }
+
+ if (0 != len) {
+ if (0 != queryDbExec(pThreadInfo->taos, pThreadInfo->buffer,
+ NO_INSERT_TYPE, false)) {
+ errorPrint2("queryDbExec() failed. buffer:\n%s\n", pThreadInfo->buffer);
+ }
+ pThreadInfo->tables_created += batchNum;
+ }
+ free(pThreadInfo->buffer);
+ return NULL;
+}
+
+static int startMultiThreadCreateChildTable(
+ char* cols, int threads, uint64_t tableFrom, int64_t ntables,
+ char* db_name, SSuperTable* stbInfo) {
+
+ pthread_t *pids = calloc(1, threads * sizeof(pthread_t));
+ threadInfo *infos = calloc(1, threads * sizeof(threadInfo));
+
+ if ((NULL == pids) || (NULL == infos)) {
+ ERROR_EXIT("createChildTable malloc failed\n");
+ }
+
+ if (threads < 1) {
+ threads = 1;
+ }
+
+ int64_t a = ntables / threads;
+ if (a < 1) {
+ threads = ntables;
+ a = 1;
+ }
+
+ int64_t b = 0;
+ b = ntables % threads;
+
+ for (int64_t i = 0; i < threads; i++) {
+ threadInfo *pThreadInfo = infos + i;
+ pThreadInfo->threadID = i;
+ tstrncpy(pThreadInfo->db_name, db_name, TSDB_DB_NAME_LEN);
+ pThreadInfo->stbInfo = stbInfo;
+ verbosePrint("%s() %d db_name: %s\n", __func__, __LINE__, db_name);
+ pThreadInfo->taos = taos_connect(
+ g_Dbs.host,
+ g_Dbs.user,
+ g_Dbs.password,
+ db_name,
+ g_Dbs.port);
+ if (pThreadInfo->taos == NULL) {
+ errorPrint2("%s() LN%d, Failed to connect to TDengine, reason:%s\n",
+ __func__, __LINE__, taos_errstr(NULL));
+ free(pids);
+ free(infos);
+ return -1;
+ }
+
+ pThreadInfo->start_table_from = tableFrom;
+ pThreadInfo->ntables = iend_table_to = i < b ? tableFrom + a : tableFrom + a - 1;
+ tableFrom = pThreadInfo->end_table_to + 1;
+ pThreadInfo->use_metric = true;
+ pThreadInfo->cols = cols;
+ pThreadInfo->minDelay = UINT64_MAX;
+ pThreadInfo->tables_created = 0;
+ pthread_create(pids + i, NULL, createTable, pThreadInfo);
+ }
+
+ for (int i = 0; i < threads; i++) {
+ pthread_join(pids[i], NULL);
+ }
+
+ for (int i = 0; i < threads; i++) {
+ threadInfo *pThreadInfo = infos + i;
+ taos_close(pThreadInfo->taos);
+
+ g_actualChildTables += pThreadInfo->tables_created;
+ }
+
+ free(pids);
+ free(infos);
+
+ return 0;
+}
+
+static void createChildTables() {
+ char tblColsBuf[TSDB_MAX_BYTES_PER_ROW];
+ int len;
+
+ for (int i = 0; i < g_Dbs.dbCount; i++) {
+ if (g_Dbs.use_metric) {
+ if (g_Dbs.db[i].superTblCount > 0) {
+ // with super table
+ for (int j = 0; j < g_Dbs.db[i].superTblCount; j++) {
+ if ((AUTO_CREATE_SUBTBL
+ == g_Dbs.db[i].superTbls[j].autoCreateTable)
+ || (TBL_ALREADY_EXISTS
+ == g_Dbs.db[i].superTbls[j].childTblExists)) {
+ continue;
+ }
+ verbosePrint("%s() LN%d: %s\n", __func__, __LINE__,
+ g_Dbs.db[i].superTbls[j].colsOfCreateChildTable);
+ uint64_t startFrom = 0;
+
+ verbosePrint("%s() LN%d: create %"PRId64" child tables from %"PRIu64"\n",
+ __func__, __LINE__, g_totalChildTables, startFrom);
+
+ startMultiThreadCreateChildTable(
+ g_Dbs.db[i].superTbls[j].colsOfCreateChildTable,
+ g_Dbs.threadCountForCreateTbl,
+ startFrom,
+ g_Dbs.db[i].superTbls[j].childTblCount,
+ g_Dbs.db[i].dbName, &(g_Dbs.db[i].superTbls[j]));
+ }
+ }
+ } else {
+ // normal table
+ len = snprintf(tblColsBuf, TSDB_MAX_BYTES_PER_ROW, "(TS TIMESTAMP");
+ for (int j = 0; j < g_args.columnCount; j++) {
+ if ((strncasecmp(g_args.dataType[j], "BINARY", strlen("BINARY")) == 0)
+ || (strncasecmp(g_args.dataType[j],
+ "NCHAR", strlen("NCHAR")) == 0)) {
+ snprintf(tblColsBuf + len, TSDB_MAX_BYTES_PER_ROW - len,
+ ",C%d %s(%d)", j, g_args.dataType[j], g_args.binwidth);
+ } else {
+ snprintf(tblColsBuf + len, TSDB_MAX_BYTES_PER_ROW - len,
+ ",C%d %s", j, g_args.dataType[j]);
+ }
+ len = strlen(tblColsBuf);
+ }
+
+ snprintf(tblColsBuf + len, TSDB_MAX_BYTES_PER_ROW - len, ")");
+
+ verbosePrint("%s() LN%d: dbName: %s num of tb: %"PRId64" schema: %s\n",
+ __func__, __LINE__,
+ g_Dbs.db[i].dbName, g_args.ntables, tblColsBuf);
+ startMultiThreadCreateChildTable(
+ tblColsBuf,
+ g_Dbs.threadCountForCreateTbl,
+ 0,
+ g_args.ntables,
+ g_Dbs.db[i].dbName,
+ NULL);
+ }
+ }
+}
+
+/*
+ Read 10000 lines at most. If more than 10000 lines, continue to read after using
+ */
+static int readTagFromCsvFileToMem(SSuperTable * stbInfo) {
+ size_t n = 0;
+ ssize_t readLen = 0;
+ char * line = NULL;
+
+ FILE *fp = fopen(stbInfo->tagsFile, "r");
+ if (fp == NULL) {
+ printf("Failed to open tags file: %s, reason:%s\n",
+ stbInfo->tagsFile, strerror(errno));
+ return -1;
+ }
+
+ if (stbInfo->tagDataBuf) {
+ free(stbInfo->tagDataBuf);
+ stbInfo->tagDataBuf = NULL;
+ }
+
+ int tagCount = 10000;
+ int count = 0;
+ char* tagDataBuf = calloc(1, stbInfo->lenOfTagOfOneRow * tagCount);
+ if (tagDataBuf == NULL) {
+ printf("Failed to calloc, reason:%s\n", strerror(errno));
+ fclose(fp);
+ return -1;
+ }
+
+ while((readLen = tgetline(&line, &n, fp)) != -1) {
+ if (('\r' == line[readLen - 1]) || ('\n' == line[readLen - 1])) {
+ line[--readLen] = 0;
+ }
+
+ if (readLen == 0) {
+ continue;
+ }
+
+ memcpy(tagDataBuf + count * stbInfo->lenOfTagOfOneRow, line, readLen);
+ count++;
+
+ if (count >= tagCount - 1) {
+ char *tmp = realloc(tagDataBuf,
+ (size_t)tagCount*1.5*stbInfo->lenOfTagOfOneRow);
+ if (tmp != NULL) {
+ tagDataBuf = tmp;
+ tagCount = (int)(tagCount*1.5);
+ memset(tagDataBuf + count*stbInfo->lenOfTagOfOneRow,
+ 0, (size_t)((tagCount-count)*stbInfo->lenOfTagOfOneRow));
+ } else {
+ // exit, if allocate more memory failed
+ printf("realloc fail for save tag val from %s\n", stbInfo->tagsFile);
+ tmfree(tagDataBuf);
+ free(line);
+ fclose(fp);
+ return -1;
+ }
+ }
+ }
+
+ stbInfo->tagDataBuf = tagDataBuf;
+ stbInfo->tagSampleCount = count;
+
+ free(line);
+ fclose(fp);
+ return 0;
+}
+
+static void getAndSetRowsFromCsvFile(SSuperTable *stbInfo) {
+ FILE *fp = fopen(stbInfo->sampleFile, "r");
+ int line_count = 0;
+ if (fp == NULL) {
+ errorPrint("Failed to open sample file: %s, reason:%s\n",
+ stbInfo->sampleFile, strerror(errno));
+ exit(EXIT_FAILURE);
+ }
+ char *buf = calloc(1, stbInfo->maxSqlLen);
+ while (fgets(buf, stbInfo->maxSqlLen, fp)) {
+ line_count++;
+ }
+ fclose(fp);
+ tmfree(buf);
+ stbInfo->insertRows = line_count;
+}
+
+/*
+ Read 10000 lines at most. If more than 10000 lines, continue to read after using
+ */
+static int generateSampleFromCsvForStb(
+ SSuperTable* stbInfo) {
+ size_t n = 0;
+ ssize_t readLen = 0;
+ char * line = NULL;
+ int getRows = 0;
+
+ FILE* fp = fopen(stbInfo->sampleFile, "r");
+ if (fp == NULL) {
+ errorPrint("Failed to open sample file: %s, reason:%s\n",
+ stbInfo->sampleFile, strerror(errno));
+ return -1;
+ }
+
+ assert(stbInfo->sampleDataBuf);
+ memset(stbInfo->sampleDataBuf, 0,
+ MAX_SAMPLES * stbInfo->lenOfOneRow);
+ while(1) {
+ readLen = tgetline(&line, &n, fp);
+ if (-1 == readLen) {
+ if(0 != fseek(fp, 0, SEEK_SET)) {
+ errorPrint("Failed to fseek file: %s, reason:%s\n",
+ stbInfo->sampleFile, strerror(errno));
+ fclose(fp);
+ return -1;
+ }
+ continue;
+ }
+
+ if (('\r' == line[readLen - 1]) || ('\n' == line[readLen - 1])) {
+ line[--readLen] = 0;
+ }
+
+ if (readLen == 0) {
+ continue;
+ }
+
+ if (readLen > stbInfo->lenOfOneRow) {
+ printf("sample row len[%d] overflow define schema len[%"PRIu64"], so discard this row\n",
+ (int32_t)readLen, stbInfo->lenOfOneRow);
+ continue;
+ }
+
+ memcpy(stbInfo->sampleDataBuf + getRows * stbInfo->lenOfOneRow,
+ line, readLen);
+ getRows++;
+
+ if (getRows == MAX_SAMPLES) {
+ break;
+ }
+ }
+
+ fclose(fp);
+ tmfree(line);
+ return 0;
+}
+
+static bool getColumnAndTagTypeFromInsertJsonFile(
+ cJSON* stbInfo, SSuperTable* superTbls) {
+ bool ret = false;
+
+ // columns
+ cJSON *columns = cJSON_GetObjectItem(stbInfo, "columns");
+ if (columns && columns->type != cJSON_Array) {
+ errorPrint("%s", "failed to read json, columns not found\n");
+ goto PARSE_OVER;
+ } else if (NULL == columns) {
+ superTbls->columnCount = 0;
+ superTbls->tagCount = 0;
+ return true;
+ }
+
+ int columnSize = cJSON_GetArraySize(columns);
+ if ((columnSize + 1/* ts */) > TSDB_MAX_COLUMNS) {
+ errorPrint("failed to read json, column size overflow, max column size is %d\n",
+ TSDB_MAX_COLUMNS);
+ goto PARSE_OVER;
+ }
+
+ int count = 1;
+ int index = 0;
+ StrColumn columnCase;
+
+ //superTbls->columnCount = columnSize;
+ for (int k = 0; k < columnSize; ++k) {
+ cJSON* column = cJSON_GetArrayItem(columns, k);
+ if (column == NULL) continue;
+
+ count = 1;
+ cJSON* countObj = cJSON_GetObjectItem(column, "count");
+ if (countObj && countObj->type == cJSON_Number) {
+ count = countObj->valueint;
+ } else if (countObj && countObj->type != cJSON_Number) {
+ errorPrint("%s", "failed to read json, column count not found\n");
+ goto PARSE_OVER;
+ } else {
+ count = 1;
+ }
+
+ // column info
+ memset(&columnCase, 0, sizeof(StrColumn));
+ cJSON *dataType = cJSON_GetObjectItem(column, "type");
+ if (!dataType || dataType->type != cJSON_String
+ || dataType->valuestring == NULL) {
+ errorPrint("%s", "failed to read json, column type not found\n");
+ goto PARSE_OVER;
+ }
+ //tstrncpy(superTbls->columns[k].dataType, dataType->valuestring, DATATYPE_BUFF_LEN);
+ tstrncpy(columnCase.dataType, dataType->valuestring,
+ min(DATATYPE_BUFF_LEN, strlen(dataType->valuestring) + 1));
+
+ cJSON* dataLen = cJSON_GetObjectItem(column, "len");
+ if (dataLen && dataLen->type == cJSON_Number) {
+ columnCase.dataLen = dataLen->valueint;
+ } else if (dataLen && dataLen->type != cJSON_Number) {
+ debugPrint("%s() LN%d: failed to read json, column len not found\n",
+ __func__, __LINE__);
+ goto PARSE_OVER;
+ } else {
+ columnCase.dataLen = SMALL_BUFF_LEN;
+ }
+
+ for (int n = 0; n < count; ++n) {
+ tstrncpy(superTbls->columns[index].dataType,
+ columnCase.dataType,
+ min(DATATYPE_BUFF_LEN, strlen(columnCase.dataType) + 1));
+
+ superTbls->columns[index].dataLen = columnCase.dataLen;
+ index++;
+ }
+ }
+
+ if ((index + 1 /* ts */) > MAX_NUM_COLUMNS) {
+ errorPrint("failed to read json, column size overflow, allowed max column size is %d\n",
+ MAX_NUM_COLUMNS);
+ goto PARSE_OVER;
+ }
+
+ superTbls->columnCount = index;
+
+ for (int c = 0; c < superTbls->columnCount; c++) {
+ if (0 == strncasecmp(superTbls->columns[c].dataType,
+ "INT", strlen("INT"))) {
+ superTbls->columns[c].data_type = TSDB_DATA_TYPE_INT;
+ } else if (0 == strncasecmp(superTbls->columns[c].dataType,
+ "TINYINT", strlen("TINYINT"))) {
+ superTbls->columns[c].data_type = TSDB_DATA_TYPE_TINYINT;
+ } else if (0 == strncasecmp(superTbls->columns[c].dataType,
+ "SMALLINT", strlen("SMALLINT"))) {
+ superTbls->columns[c].data_type = TSDB_DATA_TYPE_SMALLINT;
+ } else if (0 == strncasecmp(superTbls->columns[c].dataType,
+ "BIGINT", strlen("BIGINT"))) {
+ superTbls->columns[c].data_type = TSDB_DATA_TYPE_BIGINT;
+ } else if (0 == strncasecmp(superTbls->columns[c].dataType,
+ "FLOAT", strlen("FLOAT"))) {
+ superTbls->columns[c].data_type = TSDB_DATA_TYPE_FLOAT;
+ } else if (0 == strncasecmp(superTbls->columns[c].dataType,
+ "DOUBLE", strlen("DOUBLE"))) {
+ superTbls->columns[c].data_type = TSDB_DATA_TYPE_DOUBLE;
+ } else if (0 == strncasecmp(superTbls->columns[c].dataType,
+ "BINARY", strlen("BINARY"))) {
+ superTbls->columns[c].data_type = TSDB_DATA_TYPE_BINARY;
+ } else if (0 == strncasecmp(superTbls->columns[c].dataType,
+ "NCHAR", strlen("NCHAR"))) {
+ superTbls->columns[c].data_type = TSDB_DATA_TYPE_NCHAR;
+ } else if (0 == strncasecmp(superTbls->columns[c].dataType,
+ "BOOL", strlen("BOOL"))) {
+ superTbls->columns[c].data_type = TSDB_DATA_TYPE_BOOL;
+ } else if (0 == strncasecmp(superTbls->columns[c].dataType,
+ "TIMESTAMP", strlen("TIMESTAMP"))) {
+ superTbls->columns[c].data_type = TSDB_DATA_TYPE_TIMESTAMP;
+ } else if (0 == strncasecmp(superTbls->columns[c].dataType,
+ "UTINYINT", strlen("UTINYINT"))) {
+ superTbls->columns[c].data_type = TSDB_DATA_TYPE_UTINYINT;
+ } else if (0 == strncasecmp(superTbls->columns[c].dataType,
+ "USMALLINT", strlen("USMALLINT"))) {
+ superTbls->columns[c].data_type = TSDB_DATA_TYPE_USMALLINT;
+ } else if (0 == strncasecmp(superTbls->columns[c].dataType,
+ "UINT", strlen("UINT"))) {
+ superTbls->columns[c].data_type = TSDB_DATA_TYPE_UINT;
+ } else if (0 == strncasecmp(superTbls->columns[c].dataType,
+ "UBIGINT", strlen("UBIGINT"))) {
+ superTbls->columns[c].data_type = TSDB_DATA_TYPE_UBIGINT;
+ } else {
+ superTbls->columns[c].data_type = TSDB_DATA_TYPE_NULL;
+ }
+ }
+
+ count = 1;
+ index = 0;
+ // tags
+ cJSON *tags = cJSON_GetObjectItem(stbInfo, "tags");
+ if (!tags || tags->type != cJSON_Array) {
+ errorPrint("%s", "failed to read json, tags not found\n");
+ goto PARSE_OVER;
+ }
+
+ int tagSize = cJSON_GetArraySize(tags);
+ if (tagSize > TSDB_MAX_TAGS) {
+ errorPrint("failed to read json, tags size overflow, max tag size is %d\n",
+ TSDB_MAX_TAGS);
+ goto PARSE_OVER;
+ }
+
+ //superTbls->tagCount = tagSize;
+ for (int k = 0; k < tagSize; ++k) {
+ cJSON* tag = cJSON_GetArrayItem(tags, k);
+ if (tag == NULL) continue;
+
+ count = 1;
+ cJSON* countObj = cJSON_GetObjectItem(tag, "count");
+ if (countObj && countObj->type == cJSON_Number) {
+ count = countObj->valueint;
+ } else if (countObj && countObj->type != cJSON_Number) {
+ errorPrint("%s", "failed to read json, column count not found\n");
+ goto PARSE_OVER;
+ } else {
+ count = 1;
+ }
+
+ // column info
+ memset(&columnCase, 0, sizeof(StrColumn));
+ cJSON *dataType = cJSON_GetObjectItem(tag, "type");
+ if (!dataType || dataType->type != cJSON_String
+ || dataType->valuestring == NULL) {
+ errorPrint("%s", "failed to read json, tag type not found\n");
+ goto PARSE_OVER;
+ }
+ tstrncpy(columnCase.dataType, dataType->valuestring,
+ min(DATATYPE_BUFF_LEN, strlen(dataType->valuestring) + 1));
+
+ cJSON* dataLen = cJSON_GetObjectItem(tag, "len");
+ if (dataLen && dataLen->type == cJSON_Number) {
+ columnCase.dataLen = dataLen->valueint;
+ } else if (dataLen && dataLen->type != cJSON_Number) {
+ errorPrint("%s", "failed to read json, column len not found\n");
+ goto PARSE_OVER;
+ } else {
+ columnCase.dataLen = 0;
+ }
+
+ for (int n = 0; n < count; ++n) {
+ tstrncpy(superTbls->tags[index].dataType, columnCase.dataType,
+ min(DATATYPE_BUFF_LEN, strlen(columnCase.dataType) + 1));
+ superTbls->tags[index].dataLen = columnCase.dataLen;
+ index++;
+ }
+ }
+
+ if (index > TSDB_MAX_TAGS) {
+ errorPrint("failed to read json, tags size overflow, allowed max tag count is %d\n",
+ TSDB_MAX_TAGS);
+ goto PARSE_OVER;
+ }
+
+ superTbls->tagCount = index;
+
+ for (int t = 0; t < superTbls->tagCount; t++) {
+ if (0 == strncasecmp(superTbls->tags[t].dataType,
+ "INT", strlen("INT"))) {
+ superTbls->tags[t].data_type = TSDB_DATA_TYPE_INT;
+ } else if (0 == strncasecmp(superTbls->tags[t].dataType,
+ "TINYINT", strlen("TINYINT"))) {
+ superTbls->tags[t].data_type = TSDB_DATA_TYPE_TINYINT;
+ } else if (0 == strncasecmp(superTbls->tags[t].dataType,
+ "SMALLINT", strlen("SMALLINT"))) {
+ superTbls->tags[t].data_type = TSDB_DATA_TYPE_SMALLINT;
+ } else if (0 == strncasecmp(superTbls->tags[t].dataType,
+ "BIGINT", strlen("BIGINT"))) {
+ superTbls->tags[t].data_type = TSDB_DATA_TYPE_BIGINT;
+ } else if (0 == strncasecmp(superTbls->tags[t].dataType,
+ "FLOAT", strlen("FLOAT"))) {
+ superTbls->tags[t].data_type = TSDB_DATA_TYPE_FLOAT;
+ } else if (0 == strncasecmp(superTbls->tags[t].dataType,
+ "DOUBLE", strlen("DOUBLE"))) {
+ superTbls->tags[t].data_type = TSDB_DATA_TYPE_DOUBLE;
+ } else if (0 == strncasecmp(superTbls->tags[t].dataType,
+ "BINARY", strlen("BINARY"))) {
+ superTbls->tags[t].data_type = TSDB_DATA_TYPE_BINARY;
+ } else if (0 == strncasecmp(superTbls->tags[t].dataType,
+ "NCHAR", strlen("NCHAR"))) {
+ superTbls->tags[t].data_type = TSDB_DATA_TYPE_NCHAR;
+ } else if (0 == strncasecmp(superTbls->tags[t].dataType,
+ "BOOL", strlen("BOOL"))) {
+ superTbls->tags[t].data_type = TSDB_DATA_TYPE_BOOL;
+ } else if (0 == strncasecmp(superTbls->tags[t].dataType,
+ "TIMESTAMP", strlen("TIMESTAMP"))) {
+ superTbls->tags[t].data_type = TSDB_DATA_TYPE_TIMESTAMP;
+ } else if (0 == strncasecmp(superTbls->tags[t].dataType,
+ "UTINYINT", strlen("UTINYINT"))) {
+ superTbls->tags[t].data_type = TSDB_DATA_TYPE_UTINYINT;
+ } else if (0 == strncasecmp(superTbls->tags[t].dataType,
+ "USMALLINT", strlen("USMALLINT"))) {
+ superTbls->tags[t].data_type = TSDB_DATA_TYPE_USMALLINT;
+ } else if (0 == strncasecmp(superTbls->tags[t].dataType,
+ "UINT", strlen("UINT"))) {
+ superTbls->tags[t].data_type = TSDB_DATA_TYPE_UINT;
+ } else if (0 == strncasecmp(superTbls->tags[t].dataType,
+ "UBIGINT", strlen("UBIGINT"))) {
+ superTbls->tags[t].data_type = TSDB_DATA_TYPE_UBIGINT;
+ } else {
+ superTbls->tags[t].data_type = TSDB_DATA_TYPE_NULL;
+ }
+ }
+
+ if ((superTbls->columnCount + superTbls->tagCount + 1 /* ts */) > TSDB_MAX_COLUMNS) {
+ errorPrint("columns + tags is more than allowed max columns count: %d\n",
+ TSDB_MAX_COLUMNS);
+ goto PARSE_OVER;
+ }
+ ret = true;
+
+PARSE_OVER:
+ return ret;
+}
+
+static bool getMetaFromInsertJsonFile(cJSON* root) {
+ bool ret = false;
+
+ cJSON* cfgdir = cJSON_GetObjectItem(root, "cfgdir");
+ if (cfgdir && cfgdir->type == cJSON_String && cfgdir->valuestring != NULL) {
+ tstrncpy(g_Dbs.cfgDir, cfgdir->valuestring, MAX_FILE_NAME_LEN);
+ }
+
+ cJSON* host = cJSON_GetObjectItem(root, "host");
+ if (host && host->type == cJSON_String && host->valuestring != NULL) {
+ tstrncpy(g_Dbs.host, host->valuestring, MAX_HOSTNAME_SIZE);
+ } else if (!host) {
+ tstrncpy(g_Dbs.host, "127.0.0.1", MAX_HOSTNAME_SIZE);
+ } else {
+ errorPrint("%s", "failed to read json, host not found\n");
+ goto PARSE_OVER;
+ }
+
+ cJSON* port = cJSON_GetObjectItem(root, "port");
+ if (port && port->type == cJSON_Number) {
+ g_Dbs.port = port->valueint;
+ } else if (!port) {
+ g_Dbs.port = 6030;
+ }
+
+ cJSON* user = cJSON_GetObjectItem(root, "user");
+ if (user && user->type == cJSON_String && user->valuestring != NULL) {
+ tstrncpy(g_Dbs.user, user->valuestring, MAX_USERNAME_SIZE);
+ } else if (!user) {
+ tstrncpy(g_Dbs.user, "root", MAX_USERNAME_SIZE);
+ }
+
+ cJSON* password = cJSON_GetObjectItem(root, "password");
+ if (password && password->type == cJSON_String && password->valuestring != NULL) {
+ tstrncpy(g_Dbs.password, password->valuestring, SHELL_MAX_PASSWORD_LEN);
+ } else if (!password) {
+ tstrncpy(g_Dbs.password, "taosdata", SHELL_MAX_PASSWORD_LEN);
+ }
+
+ cJSON* resultfile = cJSON_GetObjectItem(root, "result_file");
+ if (resultfile && resultfile->type == cJSON_String && resultfile->valuestring != NULL) {
+ tstrncpy(g_Dbs.resultFile, resultfile->valuestring, MAX_FILE_NAME_LEN);
+ } else if (!resultfile) {
+ tstrncpy(g_Dbs.resultFile, "./insert_res.txt", MAX_FILE_NAME_LEN);
+ }
+
+ cJSON* threads = cJSON_GetObjectItem(root, "thread_count");
+ if (threads && threads->type == cJSON_Number) {
+ g_Dbs.threadCount = threads->valueint;
+ } else if (!threads) {
+ g_Dbs.threadCount = 1;
+ } else {
+ errorPrint("%s", "failed to read json, threads not found\n");
+ goto PARSE_OVER;
+ }
+
+ cJSON* threads2 = cJSON_GetObjectItem(root, "thread_count_create_tbl");
+ if (threads2 && threads2->type == cJSON_Number) {
+ g_Dbs.threadCountForCreateTbl = threads2->valueint;
+ } else if (!threads2) {
+ g_Dbs.threadCountForCreateTbl = 1;
+ } else {
+ errorPrint("%s", "failed to read json, threads2 not found\n");
+ goto PARSE_OVER;
+ }
+
+ cJSON* gInsertInterval = cJSON_GetObjectItem(root, "insert_interval");
+ if (gInsertInterval && gInsertInterval->type == cJSON_Number) {
+ if (gInsertInterval->valueint <0) {
+ errorPrint("%s", "failed to read json, insert interval input mistake\n");
+ goto PARSE_OVER;
+ }
+ g_args.insert_interval = gInsertInterval->valueint;
+ } else if (!gInsertInterval) {
+ g_args.insert_interval = 0;
+ } else {
+ errorPrint("%s", "failed to read json, insert_interval input mistake\n");
+ goto PARSE_OVER;
+ }
+
+ cJSON* interlaceRows = cJSON_GetObjectItem(root, "interlace_rows");
+ if (interlaceRows && interlaceRows->type == cJSON_Number) {
+ if (interlaceRows->valueint < 0) {
+ errorPrint("%s", "failed to read json, interlaceRows input mistake\n");
+ goto PARSE_OVER;
+
+ }
+ g_args.interlaceRows = interlaceRows->valueint;
+ } else if (!interlaceRows) {
+ g_args.interlaceRows = 0; // 0 means progressive mode, > 0 mean interlace mode. max value is less or equ num_of_records_per_req
+ } else {
+ errorPrint("%s", "failed to read json, interlaceRows input mistake\n");
+ goto PARSE_OVER;
+ }
+
+ cJSON* maxSqlLen = cJSON_GetObjectItem(root, "max_sql_len");
+ if (maxSqlLen && maxSqlLen->type == cJSON_Number) {
+ if (maxSqlLen->valueint < 0) {
+ errorPrint("%s() LN%d, failed to read json, max_sql_len input mistake\n",
+ __func__, __LINE__);
+ goto PARSE_OVER;
+ }
+ g_args.max_sql_len = maxSqlLen->valueint;
+ } else if (!maxSqlLen) {
+ g_args.max_sql_len = (1024*1024);
+ } else {
+ errorPrint("%s() LN%d, failed to read json, max_sql_len input mistake\n",
+ __func__, __LINE__);
+ goto PARSE_OVER;
+ }
+
+ cJSON* numRecPerReq = cJSON_GetObjectItem(root, "num_of_records_per_req");
+ if (numRecPerReq && numRecPerReq->type == cJSON_Number) {
+ if (numRecPerReq->valueint <= 0) {
+ errorPrint("%s() LN%d, failed to read json, num_of_records_per_req input mistake\n",
+ __func__, __LINE__);
+ goto PARSE_OVER;
+ } else if (numRecPerReq->valueint > MAX_RECORDS_PER_REQ) {
+ printf("NOTICE: number of records per request value %"PRIu64" > %d\n\n",
+ numRecPerReq->valueint, MAX_RECORDS_PER_REQ);
+ printf(" number of records per request value will be set to %d\n\n",
+ MAX_RECORDS_PER_REQ);
+ prompt();
+ numRecPerReq->valueint = MAX_RECORDS_PER_REQ;
+ }
+ g_args.reqPerReq = numRecPerReq->valueint;
+ } else if (!numRecPerReq) {
+ g_args.reqPerReq = MAX_RECORDS_PER_REQ;
+ } else {
+ errorPrint("%s() LN%d, failed to read json, num_of_records_per_req not found\n",
+ __func__, __LINE__);
+ goto PARSE_OVER;
+ }
+
+ cJSON* prepareRand = cJSON_GetObjectItem(root, "prepared_rand");
+ if (prepareRand && prepareRand->type == cJSON_Number) {
+ if (prepareRand->valueint <= 0) {
+ errorPrint("%s() LN%d, failed to read json, prepared_rand input mistake\n",
+ __func__, __LINE__);
+ goto PARSE_OVER;
+ }
+ g_args.prepared_rand = prepareRand->valueint;
+ } else if (!prepareRand) {
+ g_args.prepared_rand = 10000;
+ } else {
+ errorPrint("%s() LN%d, failed to read json, prepared_rand not found\n",
+ __func__, __LINE__);
+ goto PARSE_OVER;
+ }
+
+ cJSON *answerPrompt = cJSON_GetObjectItem(root, "confirm_parameter_prompt"); // yes, no,
+ if (answerPrompt
+ && answerPrompt->type == cJSON_String
+ && answerPrompt->valuestring != NULL) {
+ if (0 == strncasecmp(answerPrompt->valuestring, "yes", 3)) {
+ g_args.answer_yes = false;
+ } else if (0 == strncasecmp(answerPrompt->valuestring, "no", 2)) {
+ g_args.answer_yes = true;
+ } else {
+ g_args.answer_yes = false;
+ }
+ } else if (!answerPrompt) {
+ g_args.answer_yes = true; // default is no, mean answer_yes.
+ } else {
+ errorPrint("%s", "failed to read json, confirm_parameter_prompt input mistake\n");
+ goto PARSE_OVER;
+ }
+
+ // rows per table need be less than insert batch
+ if (g_args.interlaceRows > g_args.reqPerReq) {
+ printf("NOTICE: interlace rows value %u > num_of_records_per_req %u\n\n",
+ g_args.interlaceRows, g_args.reqPerReq);
+ printf(" interlace rows value will be set to num_of_records_per_req %u\n\n",
+ g_args.reqPerReq);
+ prompt();
+ g_args.interlaceRows = g_args.reqPerReq;
+ }
+
+ cJSON* dbs = cJSON_GetObjectItem(root, "databases");
+ if (!dbs || dbs->type != cJSON_Array) {
+ errorPrint("%s", "failed to read json, databases not found\n");
+ goto PARSE_OVER;
+ }
+
+ int dbSize = cJSON_GetArraySize(dbs);
+ if (dbSize > MAX_DB_COUNT) {
+ errorPrint(
+ "failed to read json, databases size overflow, max database is %d\n",
+ MAX_DB_COUNT);
+ goto PARSE_OVER;
+ }
+ g_Dbs.db = calloc(1, sizeof(SDataBase)*dbSize);
+ assert(g_Dbs.db);
+ g_Dbs.dbCount = dbSize;
+ for (int i = 0; i < dbSize; ++i) {
+ cJSON* dbinfos = cJSON_GetArrayItem(dbs, i);
+ if (dbinfos == NULL) continue;
+
+ // dbinfo
+ cJSON *dbinfo = cJSON_GetObjectItem(dbinfos, "dbinfo");
+ if (!dbinfo || dbinfo->type != cJSON_Object) {
+ errorPrint("%s", "failed to read json, dbinfo not found\n");
+ goto PARSE_OVER;
+ }
+
+ cJSON *dbName = cJSON_GetObjectItem(dbinfo, "name");
+ if (!dbName || dbName->type != cJSON_String || dbName->valuestring == NULL) {
+ errorPrint("%s", "failed to read json, db name not found\n");
+ goto PARSE_OVER;
+ }
+ tstrncpy(g_Dbs.db[i].dbName, dbName->valuestring, TSDB_DB_NAME_LEN);
+
+ cJSON *drop = cJSON_GetObjectItem(dbinfo, "drop");
+ if (drop && drop->type == cJSON_String && drop->valuestring != NULL) {
+ if (0 == strncasecmp(drop->valuestring, "yes", strlen("yes"))) {
+ g_Dbs.db[i].drop = true;
+ } else {
+ g_Dbs.db[i].drop = false;
+ }
+ } else if (!drop) {
+ g_Dbs.db[i].drop = g_args.drop_database;
+ } else {
+ errorPrint("%s", "failed to read json, drop input mistake\n");
+ goto PARSE_OVER;
+ }
+
+ cJSON *precision = cJSON_GetObjectItem(dbinfo, "precision");
+ if (precision && precision->type == cJSON_String
+ && precision->valuestring != NULL) {
+ tstrncpy(g_Dbs.db[i].dbCfg.precision, precision->valuestring,
+ SMALL_BUFF_LEN);
+ } else if (!precision) {
+ memset(g_Dbs.db[i].dbCfg.precision, 0, SMALL_BUFF_LEN);
+ } else {
+ errorPrint("%s", "failed to read json, precision not found\n");
+ goto PARSE_OVER;
+ }
+
+ cJSON* update = cJSON_GetObjectItem(dbinfo, "update");
+ if (update && update->type == cJSON_Number) {
+ g_Dbs.db[i].dbCfg.update = update->valueint;
+ } else if (!update) {
+ g_Dbs.db[i].dbCfg.update = -1;
+ } else {
+ errorPrint("%s", "failed to read json, update not found\n");
+ goto PARSE_OVER;
+ }
+
+ cJSON* replica = cJSON_GetObjectItem(dbinfo, "replica");
+ if (replica && replica->type == cJSON_Number) {
+ g_Dbs.db[i].dbCfg.replica = replica->valueint;
+ } else if (!replica) {
+ g_Dbs.db[i].dbCfg.replica = -1;
+ } else {
+ errorPrint("%s", "failed to read json, replica not found\n");
+ goto PARSE_OVER;
+ }
+
+ cJSON* keep = cJSON_GetObjectItem(dbinfo, "keep");
+ if (keep && keep->type == cJSON_Number) {
+ g_Dbs.db[i].dbCfg.keep = keep->valueint;
+ } else if (!keep) {
+ g_Dbs.db[i].dbCfg.keep = -1;
+ } else {
+ errorPrint("%s", "failed to read json, keep not found\n");
+ goto PARSE_OVER;
+ }
+
+ cJSON* days = cJSON_GetObjectItem(dbinfo, "days");
+ if (days && days->type == cJSON_Number) {
+ g_Dbs.db[i].dbCfg.days = days->valueint;
+ } else if (!days) {
+ g_Dbs.db[i].dbCfg.days = -1;
+ } else {
+ errorPrint("%s", "failed to read json, days not found\n");
+ goto PARSE_OVER;
+ }
+
+ cJSON* cache = cJSON_GetObjectItem(dbinfo, "cache");
+ if (cache && cache->type == cJSON_Number) {
+ g_Dbs.db[i].dbCfg.cache = cache->valueint;
+ } else if (!cache) {
+ g_Dbs.db[i].dbCfg.cache = -1;
+ } else {
+ errorPrint("%s", "failed to read json, cache not found\n");
+ goto PARSE_OVER;
+ }
+
+ cJSON* blocks= cJSON_GetObjectItem(dbinfo, "blocks");
+ if (blocks && blocks->type == cJSON_Number) {
+ g_Dbs.db[i].dbCfg.blocks = blocks->valueint;
+ } else if (!blocks) {
+ g_Dbs.db[i].dbCfg.blocks = -1;
+ } else {
+ errorPrint("%s", "failed to read json, block not found\n");
+ goto PARSE_OVER;
+ }
+
+ //cJSON* maxtablesPerVnode= cJSON_GetObjectItem(dbinfo, "maxtablesPerVnode");
+ //if (maxtablesPerVnode && maxtablesPerVnode->type == cJSON_Number) {
+ // g_Dbs.db[i].dbCfg.maxtablesPerVnode = maxtablesPerVnode->valueint;
+ //} else if (!maxtablesPerVnode) {
+ // g_Dbs.db[i].dbCfg.maxtablesPerVnode = TSDB_DEFAULT_TABLES;
+ //} else {
+ // printf("failed to read json, maxtablesPerVnode not found");
+ // goto PARSE_OVER;
+ //}
+
+ cJSON* minRows= cJSON_GetObjectItem(dbinfo, "minRows");
+ if (minRows && minRows->type == cJSON_Number) {
+ g_Dbs.db[i].dbCfg.minRows = minRows->valueint;
+ } else if (!minRows) {
+ g_Dbs.db[i].dbCfg.minRows = 0; // 0 means default
+ } else {
+ errorPrint("%s", "failed to read json, minRows not found\n");
+ goto PARSE_OVER;
+ }
+
+ cJSON* maxRows= cJSON_GetObjectItem(dbinfo, "maxRows");
+ if (maxRows && maxRows->type == cJSON_Number) {
+ g_Dbs.db[i].dbCfg.maxRows = maxRows->valueint;
+ } else if (!maxRows) {
+ g_Dbs.db[i].dbCfg.maxRows = 0; // 0 means default
+ } else {
+ errorPrint("%s", "failed to read json, maxRows not found\n");
+ goto PARSE_OVER;
+ }
+
+ cJSON* comp= cJSON_GetObjectItem(dbinfo, "comp");
+ if (comp && comp->type == cJSON_Number) {
+ g_Dbs.db[i].dbCfg.comp = comp->valueint;
+ } else if (!comp) {
+ g_Dbs.db[i].dbCfg.comp = -1;
+ } else {
+ errorPrint("%s", "failed to read json, comp not found\n");
+ goto PARSE_OVER;
+ }
+
+ cJSON* walLevel= cJSON_GetObjectItem(dbinfo, "walLevel");
+ if (walLevel && walLevel->type == cJSON_Number) {
+ g_Dbs.db[i].dbCfg.walLevel = walLevel->valueint;
+ } else if (!walLevel) {
+ g_Dbs.db[i].dbCfg.walLevel = -1;
+ } else {
+ errorPrint("%s", "failed to read json, walLevel not found\n");
+ goto PARSE_OVER;
+ }
+
+ cJSON* cacheLast= cJSON_GetObjectItem(dbinfo, "cachelast");
+ if (cacheLast && cacheLast->type == cJSON_Number) {
+ g_Dbs.db[i].dbCfg.cacheLast = cacheLast->valueint;
+ } else if (!cacheLast) {
+ g_Dbs.db[i].dbCfg.cacheLast = -1;
+ } else {
+ errorPrint("%s", "failed to read json, cacheLast not found\n");
+ goto PARSE_OVER;
+ }
+
+ cJSON* quorum= cJSON_GetObjectItem(dbinfo, "quorum");
+ if (quorum && quorum->type == cJSON_Number) {
+ g_Dbs.db[i].dbCfg.quorum = quorum->valueint;
+ } else if (!quorum) {
+ g_Dbs.db[i].dbCfg.quorum = 1;
+ } else {
+ printf("failed to read json, quorum input mistake");
+ goto PARSE_OVER;
+ }
+
+ cJSON* fsync= cJSON_GetObjectItem(dbinfo, "fsync");
+ if (fsync && fsync->type == cJSON_Number) {
+ g_Dbs.db[i].dbCfg.fsync = fsync->valueint;
+ } else if (!fsync) {
+ g_Dbs.db[i].dbCfg.fsync = -1;
+ } else {
+ errorPrint("%s", "failed to read json, fsync input mistake\n");
+ goto PARSE_OVER;
+ }
+
+ // super_tables
+ cJSON *stables = cJSON_GetObjectItem(dbinfos, "super_tables");
+ if (!stables || stables->type != cJSON_Array) {
+ errorPrint("%s", "failed to read json, super_tables not found\n");
+ goto PARSE_OVER;
+ }
+
+ int stbSize = cJSON_GetArraySize(stables);
+ if (stbSize > MAX_SUPER_TABLE_COUNT) {
+ errorPrint(
+ "failed to read json, supertable size overflow, max supertable is %d\n",
+ MAX_SUPER_TABLE_COUNT);
+ goto PARSE_OVER;
+ }
+ g_Dbs.db[i].superTbls = calloc(1, stbSize * sizeof(SSuperTable));
+ assert(g_Dbs.db[i].superTbls);
+ g_Dbs.db[i].superTblCount = stbSize;
+ for (int j = 0; j < stbSize; ++j) {
+ cJSON* stbInfo = cJSON_GetArrayItem(stables, j);
+ if (stbInfo == NULL) continue;
+
+ // dbinfo
+ cJSON *stbName = cJSON_GetObjectItem(stbInfo, "name");
+ if (!stbName || stbName->type != cJSON_String
+ || stbName->valuestring == NULL) {
+ errorPrint("%s", "failed to read json, stb name not found\n");
+ goto PARSE_OVER;
+ }
+ tstrncpy(g_Dbs.db[i].superTbls[j].stbName, stbName->valuestring,
+ TSDB_TABLE_NAME_LEN);
+
+ cJSON *prefix = cJSON_GetObjectItem(stbInfo, "childtable_prefix");
+ if (!prefix || prefix->type != cJSON_String || prefix->valuestring == NULL) {
+ errorPrint("%s", "failed to read json, childtable_prefix not found\n");
+ goto PARSE_OVER;
+ }
+ tstrncpy(g_Dbs.db[i].superTbls[j].childTblPrefix, prefix->valuestring,
+ TBNAME_PREFIX_LEN);
+
+ cJSON *autoCreateTbl = cJSON_GetObjectItem(stbInfo, "auto_create_table");
+ if (autoCreateTbl
+ && autoCreateTbl->type == cJSON_String
+ && autoCreateTbl->valuestring != NULL) {
+ if ((0 == strncasecmp(autoCreateTbl->valuestring, "yes", 3))
+ && (TBL_ALREADY_EXISTS != g_Dbs.db[i].superTbls[j].childTblExists)) {
+ g_Dbs.db[i].superTbls[j].autoCreateTable = AUTO_CREATE_SUBTBL;
+ } else if (0 == strncasecmp(autoCreateTbl->valuestring, "no", 2)) {
+ g_Dbs.db[i].superTbls[j].autoCreateTable = PRE_CREATE_SUBTBL;
+ } else {
+ g_Dbs.db[i].superTbls[j].autoCreateTable = PRE_CREATE_SUBTBL;
+ }
+ } else if (!autoCreateTbl) {
+ g_Dbs.db[i].superTbls[j].autoCreateTable = PRE_CREATE_SUBTBL;
+ } else {
+ errorPrint("%s", "failed to read json, auto_create_table not found\n");
+ goto PARSE_OVER;
+ }
+
+ cJSON* batchCreateTbl = cJSON_GetObjectItem(stbInfo, "batch_create_tbl_num");
+ if (batchCreateTbl && batchCreateTbl->type == cJSON_Number) {
+ g_Dbs.db[i].superTbls[j].batchCreateTableNum = batchCreateTbl->valueint;
+ } else if (!batchCreateTbl) {
+ g_Dbs.db[i].superTbls[j].batchCreateTableNum = 10;
+ } else {
+ errorPrint("%s", "failed to read json, batch_create_tbl_num not found\n");
+ goto PARSE_OVER;
+ }
+
+ cJSON *childTblExists = cJSON_GetObjectItem(stbInfo, "child_table_exists"); // yes, no
+ if (childTblExists
+ && childTblExists->type == cJSON_String
+ && childTblExists->valuestring != NULL) {
+ if ((0 == strncasecmp(childTblExists->valuestring, "yes", 3))
+ && (g_Dbs.db[i].drop == false)) {
+ g_Dbs.db[i].superTbls[j].childTblExists = TBL_ALREADY_EXISTS;
+ } else if ((0 == strncasecmp(childTblExists->valuestring, "no", 2)
+ || (g_Dbs.db[i].drop == true))) {
+ g_Dbs.db[i].superTbls[j].childTblExists = TBL_NO_EXISTS;
+ } else {
+ g_Dbs.db[i].superTbls[j].childTblExists = TBL_NO_EXISTS;
+ }
+ } else if (!childTblExists) {
+ g_Dbs.db[i].superTbls[j].childTblExists = TBL_NO_EXISTS;
+ } else {
+ errorPrint("%s",
+ "failed to read json, child_table_exists not found\n");
+ goto PARSE_OVER;
+ }
+
+ if (TBL_ALREADY_EXISTS == g_Dbs.db[i].superTbls[j].childTblExists) {
+ g_Dbs.db[i].superTbls[j].autoCreateTable = PRE_CREATE_SUBTBL;
+ }
+
+ cJSON* count = cJSON_GetObjectItem(stbInfo, "childtable_count");
+ if (!count || count->type != cJSON_Number || 0 >= count->valueint) {
+ errorPrint("%s",
+ "failed to read json, childtable_count input mistake\n");
+ goto PARSE_OVER;
+ }
+ g_Dbs.db[i].superTbls[j].childTblCount = count->valueint;
+ g_totalChildTables += g_Dbs.db[i].superTbls[j].childTblCount;
+
+ cJSON *dataSource = cJSON_GetObjectItem(stbInfo, "data_source");
+ if (dataSource && dataSource->type == cJSON_String
+ && dataSource->valuestring != NULL) {
+ tstrncpy(g_Dbs.db[i].superTbls[j].dataSource,
+ dataSource->valuestring,
+ min(SMALL_BUFF_LEN, strlen(dataSource->valuestring) + 1));
+ } else if (!dataSource) {
+ tstrncpy(g_Dbs.db[i].superTbls[j].dataSource, "rand",
+ min(SMALL_BUFF_LEN, strlen("rand") + 1));
+ } else {
+ errorPrint("%s", "failed to read json, data_source not found\n");
+ goto PARSE_OVER;
+ }
+
+ cJSON *stbIface = cJSON_GetObjectItem(stbInfo, "insert_mode"); // taosc , rest, stmt
+ if (stbIface && stbIface->type == cJSON_String
+ && stbIface->valuestring != NULL) {
+ if (0 == strcasecmp(stbIface->valuestring, "taosc")) {
+ g_Dbs.db[i].superTbls[j].iface= TAOSC_IFACE;
+ } else if (0 == strcasecmp(stbIface->valuestring, "rest")) {
+ g_Dbs.db[i].superTbls[j].iface= REST_IFACE;
+ } else if (0 == strcasecmp(stbIface->valuestring, "stmt")) {
+ g_Dbs.db[i].superTbls[j].iface= STMT_IFACE;
+ } else {
+ errorPrint("failed to read json, insert_mode %s not recognized\n",
+ stbIface->valuestring);
+ goto PARSE_OVER;
+ }
+ } else if (!stbIface) {
+ g_Dbs.db[i].superTbls[j].iface = TAOSC_IFACE;
+ } else {
+ errorPrint("%s", "failed to read json, insert_mode not found\n");
+ goto PARSE_OVER;
+ }
+
+ cJSON* childTbl_limit = cJSON_GetObjectItem(stbInfo, "childtable_limit");
+ if ((childTbl_limit) && (g_Dbs.db[i].drop != true)
+ && (g_Dbs.db[i].superTbls[j].childTblExists == TBL_ALREADY_EXISTS)) {
+ if (childTbl_limit->type != cJSON_Number) {
+ errorPrint("%s", "failed to read json, childtable_limit\n");
+ goto PARSE_OVER;
+ }
+ g_Dbs.db[i].superTbls[j].childTblLimit = childTbl_limit->valueint;
+ } else {
+ g_Dbs.db[i].superTbls[j].childTblLimit = -1; // select ... limit -1 means all query result, drop = yes mean all table need recreate, limit value is invalid.
+ }
+
+ cJSON* childTbl_offset = cJSON_GetObjectItem(stbInfo, "childtable_offset");
+ if ((childTbl_offset) && (g_Dbs.db[i].drop != true)
+ && (g_Dbs.db[i].superTbls[j].childTblExists == TBL_ALREADY_EXISTS)) {
+ if ((childTbl_offset->type != cJSON_Number)
+ || (0 > childTbl_offset->valueint)) {
+ errorPrint("%s", "failed to read json, childtable_offset\n");
+ goto PARSE_OVER;
+ }
+ g_Dbs.db[i].superTbls[j].childTblOffset = childTbl_offset->valueint;
+ } else {
+ g_Dbs.db[i].superTbls[j].childTblOffset = 0;
+ }
+
+ cJSON *ts = cJSON_GetObjectItem(stbInfo, "start_timestamp");
+ if (ts && ts->type == cJSON_String && ts->valuestring != NULL) {
+ tstrncpy(g_Dbs.db[i].superTbls[j].startTimestamp,
+ ts->valuestring, TSDB_DB_NAME_LEN);
+ } else if (!ts) {
+ tstrncpy(g_Dbs.db[i].superTbls[j].startTimestamp,
+ "now", TSDB_DB_NAME_LEN);
+ } else {
+ errorPrint("%s", "failed to read json, start_timestamp not found\n");
+ goto PARSE_OVER;
+ }
+
+ cJSON* timestampStep = cJSON_GetObjectItem(stbInfo, "timestamp_step");
+ if (timestampStep && timestampStep->type == cJSON_Number) {
+ g_Dbs.db[i].superTbls[j].timeStampStep = timestampStep->valueint;
+ } else if (!timestampStep) {
+ g_Dbs.db[i].superTbls[j].timeStampStep = g_args.timestamp_step;
+ } else {
+ errorPrint("%s", "failed to read json, timestamp_step not found\n");
+ goto PARSE_OVER;
+ }
+
+ cJSON *sampleFormat = cJSON_GetObjectItem(stbInfo, "sample_format");
+ if (sampleFormat && sampleFormat->type
+ == cJSON_String && sampleFormat->valuestring != NULL) {
+ tstrncpy(g_Dbs.db[i].superTbls[j].sampleFormat,
+ sampleFormat->valuestring,
+ min(SMALL_BUFF_LEN,
+ strlen(sampleFormat->valuestring) + 1));
+ } else if (!sampleFormat) {
+ tstrncpy(g_Dbs.db[i].superTbls[j].sampleFormat, "csv",
+ SMALL_BUFF_LEN);
+ } else {
+ errorPrint("%s", "failed to read json, sample_format not found\n");
+ goto PARSE_OVER;
+ }
+
+ cJSON *sampleFile = cJSON_GetObjectItem(stbInfo, "sample_file");
+ if (sampleFile && sampleFile->type == cJSON_String
+ && sampleFile->valuestring != NULL) {
+ tstrncpy(g_Dbs.db[i].superTbls[j].sampleFile,
+ sampleFile->valuestring,
+ min(MAX_FILE_NAME_LEN,
+ strlen(sampleFile->valuestring) + 1));
+ } else if (!sampleFile) {
+ memset(g_Dbs.db[i].superTbls[j].sampleFile, 0,
+ MAX_FILE_NAME_LEN);
+ } else {
+ errorPrint("%s", "failed to read json, sample_file not found\n");
+ goto PARSE_OVER;
+ }
+
+ cJSON *useSampleTs = cJSON_GetObjectItem(stbInfo, "use_sample_ts");
+ if (useSampleTs && useSampleTs->type == cJSON_String
+ && useSampleTs->valuestring != NULL) {
+ if (0 == strncasecmp(useSampleTs->valuestring, "yes", 3)) {
+ g_Dbs.db[i].superTbls[j].useSampleTs = true;
+ } else if (0 == strncasecmp(useSampleTs->valuestring, "no", 2)){
+ g_Dbs.db[i].superTbls[j].useSampleTs = false;
+ } else {
+ g_Dbs.db[i].superTbls[j].useSampleTs = false;
+ }
+ } else if (!useSampleTs) {
+ g_Dbs.db[i].superTbls[j].useSampleTs = false;
+ } else {
+ errorPrint("%s", "failed to read json, use_sample_ts not found\n");
+ goto PARSE_OVER;
+ }
+
+ cJSON *tagsFile = cJSON_GetObjectItem(stbInfo, "tags_file");
+ if ((tagsFile && tagsFile->type == cJSON_String)
+ && (tagsFile->valuestring != NULL)) {
+ tstrncpy(g_Dbs.db[i].superTbls[j].tagsFile,
+ tagsFile->valuestring, MAX_FILE_NAME_LEN);
+ if (0 == g_Dbs.db[i].superTbls[j].tagsFile[0]) {
+ g_Dbs.db[i].superTbls[j].tagSource = 0;
+ } else {
+ g_Dbs.db[i].superTbls[j].tagSource = 1;
+ }
+ } else if (!tagsFile) {
+ memset(g_Dbs.db[i].superTbls[j].tagsFile, 0, MAX_FILE_NAME_LEN);
+ g_Dbs.db[i].superTbls[j].tagSource = 0;
+ } else {
+ errorPrint("%s", "failed to read json, tags_file not found\n");
+ goto PARSE_OVER;
+ }
+
+ cJSON* stbMaxSqlLen = cJSON_GetObjectItem(stbInfo, "max_sql_len");
+ if (stbMaxSqlLen && stbMaxSqlLen->type == cJSON_Number) {
+ int32_t len = stbMaxSqlLen->valueint;
+ if (len > TSDB_MAX_ALLOWED_SQL_LEN) {
+ len = TSDB_MAX_ALLOWED_SQL_LEN;
+ } else if (len < 5) {
+ len = 5;
+ }
+ g_Dbs.db[i].superTbls[j].maxSqlLen = len;
+ } else if (!maxSqlLen) {
+ g_Dbs.db[i].superTbls[j].maxSqlLen = g_args.max_sql_len;
+ } else {
+ errorPrint("%s", "failed to read json, stbMaxSqlLen input mistake\n");
+ goto PARSE_OVER;
+ }
+ /*
+ cJSON *multiThreadWriteOneTbl =
+ cJSON_GetObjectItem(stbInfo, "multi_thread_write_one_tbl"); // no , yes
+ if (multiThreadWriteOneTbl
+ && multiThreadWriteOneTbl->type == cJSON_String
+ && multiThreadWriteOneTbl->valuestring != NULL) {
+ if (0 == strncasecmp(multiThreadWriteOneTbl->valuestring, "yes", 3)) {
+ g_Dbs.db[i].superTbls[j].multiThreadWriteOneTbl = 1;
+ } else {
+ g_Dbs.db[i].superTbls[j].multiThreadWriteOneTbl = 0;
+ }
+ } else if (!multiThreadWriteOneTbl) {
+ g_Dbs.db[i].superTbls[j].multiThreadWriteOneTbl = 0;
+ } else {
+ errorPrint("%s", "failed to read json, multiThreadWriteOneTbl not found\n");
+ goto PARSE_OVER;
+ }
+ */
+ cJSON* insertRows = cJSON_GetObjectItem(stbInfo, "insert_rows");
+ if (insertRows && insertRows->type == cJSON_Number) {
+ if (insertRows->valueint < 0) {
+ errorPrint("%s", "failed to read json, insert_rows input mistake\n");
+ goto PARSE_OVER;
+ }
+ g_Dbs.db[i].superTbls[j].insertRows = insertRows->valueint;
+ } else if (!insertRows) {
+ g_Dbs.db[i].superTbls[j].insertRows = 0x7FFFFFFFFFFFFFFF;
+ } else {
+ errorPrint("%s", "failed to read json, insert_rows input mistake\n");
+ goto PARSE_OVER;
+ }
+
+ cJSON* stbInterlaceRows = cJSON_GetObjectItem(stbInfo, "interlace_rows");
+ if (stbInterlaceRows && stbInterlaceRows->type == cJSON_Number) {
+ if (stbInterlaceRows->valueint < 0) {
+ errorPrint("%s", "failed to read json, interlace rows input mistake\n");
+ goto PARSE_OVER;
+ }
+ g_Dbs.db[i].superTbls[j].interlaceRows = stbInterlaceRows->valueint;
+
+ if (g_Dbs.db[i].superTbls[j].interlaceRows > g_Dbs.db[i].superTbls[j].insertRows) {
+ printf("NOTICE: db[%d].superTbl[%d]'s interlace rows value %u > insert_rows %"PRId64"\n\n",
+ i, j, g_Dbs.db[i].superTbls[j].interlaceRows,
+ g_Dbs.db[i].superTbls[j].insertRows);
+ printf(" interlace rows value will be set to insert_rows %"PRId64"\n\n",
+ g_Dbs.db[i].superTbls[j].insertRows);
+ prompt();
+ g_Dbs.db[i].superTbls[j].interlaceRows = g_Dbs.db[i].superTbls[j].insertRows;
+ }
+ } else if (!stbInterlaceRows) {
+ g_Dbs.db[i].superTbls[j].interlaceRows = g_args.interlaceRows; // 0 means progressive mode, > 0 mean interlace mode. max value is less or equ num_of_records_per_req
+ } else {
+ errorPrint(
+ "%s", "failed to read json, interlace rows input mistake\n");
+ goto PARSE_OVER;
+ }
+
+ cJSON* disorderRatio = cJSON_GetObjectItem(stbInfo, "disorder_ratio");
+ if (disorderRatio && disorderRatio->type == cJSON_Number) {
+ if (disorderRatio->valueint > 50)
+ disorderRatio->valueint = 50;
+
+ if (disorderRatio->valueint < 0)
+ disorderRatio->valueint = 0;
+
+ g_Dbs.db[i].superTbls[j].disorderRatio = disorderRatio->valueint;
+ } else if (!disorderRatio) {
+ g_Dbs.db[i].superTbls[j].disorderRatio = 0;
+ } else {
+ errorPrint("%s", "failed to read json, disorderRatio not found\n");
+ goto PARSE_OVER;
+ }
+
+ cJSON* disorderRange = cJSON_GetObjectItem(stbInfo, "disorder_range");
+ if (disorderRange && disorderRange->type == cJSON_Number) {
+ g_Dbs.db[i].superTbls[j].disorderRange = disorderRange->valueint;
+ } else if (!disorderRange) {
+ g_Dbs.db[i].superTbls[j].disorderRange = 1000;
+ } else {
+ errorPrint("%s", "failed to read json, disorderRange not found\n");
+ goto PARSE_OVER;
+ }
+
+ cJSON* insertInterval = cJSON_GetObjectItem(stbInfo, "insert_interval");
+ if (insertInterval && insertInterval->type == cJSON_Number) {
+ g_Dbs.db[i].superTbls[j].insertInterval = insertInterval->valueint;
+ if (insertInterval->valueint < 0) {
+ errorPrint("%s", "failed to read json, insert_interval input mistake\n");
+ goto PARSE_OVER;
+ }
+ } else if (!insertInterval) {
+ verbosePrint("%s() LN%d: stable insert interval be overrode by global %"PRIu64".\n",
+ __func__, __LINE__, g_args.insert_interval);
+ g_Dbs.db[i].superTbls[j].insertInterval = g_args.insert_interval;
+ } else {
+ errorPrint("%s", "failed to read json, insert_interval input mistake\n");
+ goto PARSE_OVER;
+ }
+
+ int retVal = getColumnAndTagTypeFromInsertJsonFile(
+ stbInfo, &g_Dbs.db[i].superTbls[j]);
+ if (false == retVal) {
+ goto PARSE_OVER;
+ }
+ }
+ }
+
+ ret = true;
+
+PARSE_OVER:
+ return ret;
+}
+
+static bool getMetaFromQueryJsonFile(cJSON* root) {
+ bool ret = false;
+
+ cJSON* cfgdir = cJSON_GetObjectItem(root, "cfgdir");
+ if (cfgdir && cfgdir->type == cJSON_String && cfgdir->valuestring != NULL) {
+ tstrncpy(g_queryInfo.cfgDir, cfgdir->valuestring, MAX_FILE_NAME_LEN);
+ }
+
+ cJSON* host = cJSON_GetObjectItem(root, "host");
+ if (host && host->type == cJSON_String && host->valuestring != NULL) {
+ tstrncpy(g_queryInfo.host, host->valuestring, MAX_HOSTNAME_SIZE);
+ } else if (!host) {
+ tstrncpy(g_queryInfo.host, "127.0.0.1", MAX_HOSTNAME_SIZE);
+ } else {
+ errorPrint("%s", "failed to read json, host not found\n");
+ goto PARSE_OVER;
+ }
+
+ cJSON* port = cJSON_GetObjectItem(root, "port");
+ if (port && port->type == cJSON_Number) {
+ g_queryInfo.port = port->valueint;
+ } else if (!port) {
+ g_queryInfo.port = 6030;
+ }
+
+ cJSON* user = cJSON_GetObjectItem(root, "user");
+ if (user && user->type == cJSON_String && user->valuestring != NULL) {
+ tstrncpy(g_queryInfo.user, user->valuestring, MAX_USERNAME_SIZE);
+ } else if (!user) {
+ tstrncpy(g_queryInfo.user, "root", MAX_USERNAME_SIZE); ;
+ }
+
+ cJSON* password = cJSON_GetObjectItem(root, "password");
+ if (password && password->type == cJSON_String && password->valuestring != NULL) {
+ tstrncpy(g_queryInfo.password, password->valuestring, SHELL_MAX_PASSWORD_LEN);
+ } else if (!password) {
+ tstrncpy(g_queryInfo.password, "taosdata", SHELL_MAX_PASSWORD_LEN);;
+ }
+
+ cJSON *answerPrompt = cJSON_GetObjectItem(root, "confirm_parameter_prompt"); // yes, no,
+ if (answerPrompt && answerPrompt->type == cJSON_String
+ && answerPrompt->valuestring != NULL) {
+ if (0 == strncasecmp(answerPrompt->valuestring, "yes", 3)) {
+ g_args.answer_yes = false;
+ } else if (0 == strncasecmp(answerPrompt->valuestring, "no", 2)) {
+ g_args.answer_yes = true;
+ } else {
+ g_args.answer_yes = false;
+ }
+ } else if (!answerPrompt) {
+ g_args.answer_yes = false;
+ } else {
+ errorPrint("%s", "failed to read json, confirm_parameter_prompt not found\n");
+ goto PARSE_OVER;
+ }
+
+ cJSON* gQueryTimes = cJSON_GetObjectItem(root, "query_times");
+ if (gQueryTimes && gQueryTimes->type == cJSON_Number) {
+ if (gQueryTimes->valueint <= 0) {
+ errorPrint("%s()", "failed to read json, query_times input mistake\n");
+ goto PARSE_OVER;
+ }
+ g_args.query_times = gQueryTimes->valueint;
+ } else if (!gQueryTimes) {
+ g_args.query_times = 1;
+ } else {
+ errorPrint("%s", "failed to read json, query_times input mistake\n");
+ goto PARSE_OVER;
+ }
+
+ cJSON* dbs = cJSON_GetObjectItem(root, "databases");
+ if (dbs && dbs->type == cJSON_String && dbs->valuestring != NULL) {
+ tstrncpy(g_queryInfo.dbName, dbs->valuestring, TSDB_DB_NAME_LEN);
+ } else if (!dbs) {
+ errorPrint("%s", "failed to read json, databases not found\n");
+ goto PARSE_OVER;
+ }
+
+ cJSON* queryMode = cJSON_GetObjectItem(root, "query_mode");
+ if (queryMode
+ && queryMode->type == cJSON_String
+ && queryMode->valuestring != NULL) {
+ tstrncpy(g_queryInfo.queryMode, queryMode->valuestring,
+ min(SMALL_BUFF_LEN, strlen(queryMode->valuestring) + 1));
+ } else if (!queryMode) {
+ tstrncpy(g_queryInfo.queryMode, "taosc",
+ min(SMALL_BUFF_LEN, strlen("taosc") + 1));
+ } else {
+ errorPrint("%s", "failed to read json, query_mode not found\n");
+ goto PARSE_OVER;
+ }
+
+ // specified_table_query
+ cJSON *specifiedQuery = cJSON_GetObjectItem(root, "specified_table_query");
+ if (!specifiedQuery) {
+ g_queryInfo.specifiedQueryInfo.concurrent = 1;
+ g_queryInfo.specifiedQueryInfo.sqlCount = 0;
+ } else if (specifiedQuery->type != cJSON_Object) {
+ errorPrint("%s", "failed to read json, super_table_query not found\n");
+ goto PARSE_OVER;
+ } else {
+ cJSON* queryInterval = cJSON_GetObjectItem(specifiedQuery, "query_interval");
+ if (queryInterval && queryInterval->type == cJSON_Number) {
+ g_queryInfo.specifiedQueryInfo.queryInterval = queryInterval->valueint;
+ } else if (!queryInterval) {
+ g_queryInfo.specifiedQueryInfo.queryInterval = 0;
+ }
+
+ cJSON* specifiedQueryTimes = cJSON_GetObjectItem(specifiedQuery,
+ "query_times");
+ if (specifiedQueryTimes && specifiedQueryTimes->type == cJSON_Number) {
+ if (specifiedQueryTimes->valueint <= 0) {
+ errorPrint(
+ "failed to read json, query_times: %"PRId64", need be a valid (>0) number\n",
+ specifiedQueryTimes->valueint);
+ goto PARSE_OVER;
+
+ }
+ g_queryInfo.specifiedQueryInfo.queryTimes = specifiedQueryTimes->valueint;
+ } else if (!specifiedQueryTimes) {
+ g_queryInfo.specifiedQueryInfo.queryTimes = g_args.query_times;
+ } else {
+ errorPrint("%s() LN%d, failed to read json, query_times input mistake\n",
+ __func__, __LINE__);
+ goto PARSE_OVER;
+ }
+
+ cJSON* concurrent = cJSON_GetObjectItem(specifiedQuery, "concurrent");
+ if (concurrent && concurrent->type == cJSON_Number) {
+ if (concurrent->valueint <= 0) {
+ errorPrint(
+ "query sqlCount %d or concurrent %d is not correct.\n",
+ g_queryInfo.specifiedQueryInfo.sqlCount,
+ g_queryInfo.specifiedQueryInfo.concurrent);
+ goto PARSE_OVER;
+ }
+ g_queryInfo.specifiedQueryInfo.concurrent = concurrent->valueint;
+ } else if (!concurrent) {
+ g_queryInfo.specifiedQueryInfo.concurrent = 1;
+ }
+
+ cJSON* specifiedAsyncMode = cJSON_GetObjectItem(specifiedQuery, "mode");
+ if (specifiedAsyncMode && specifiedAsyncMode->type == cJSON_String
+ && specifiedAsyncMode->valuestring != NULL) {
+ if (0 == strcmp("sync", specifiedAsyncMode->valuestring)) {
+ g_queryInfo.specifiedQueryInfo.asyncMode = SYNC_MODE;
+ } else if (0 == strcmp("async", specifiedAsyncMode->valuestring)) {
+ g_queryInfo.specifiedQueryInfo.asyncMode = ASYNC_MODE;
+ } else {
+ errorPrint("%s", "failed to read json, async mode input error\n");
+ goto PARSE_OVER;
+ }
+ } else {
+ g_queryInfo.specifiedQueryInfo.asyncMode = SYNC_MODE;
+ }
+
+ cJSON* interval = cJSON_GetObjectItem(specifiedQuery, "interval");
+ if (interval && interval->type == cJSON_Number) {
+ g_queryInfo.specifiedQueryInfo.subscribeInterval = interval->valueint;
+ } else if (!interval) {
+ //printf("failed to read json, subscribe interval no found\n");
+ //goto PARSE_OVER;
+ g_queryInfo.specifiedQueryInfo.subscribeInterval = 10000;
+ }
+
+ cJSON* restart = cJSON_GetObjectItem(specifiedQuery, "restart");
+ if (restart && restart->type == cJSON_String && restart->valuestring != NULL) {
+ if (0 == strcmp("yes", restart->valuestring)) {
+ g_queryInfo.specifiedQueryInfo.subscribeRestart = true;
+ } else if (0 == strcmp("no", restart->valuestring)) {
+ g_queryInfo.specifiedQueryInfo.subscribeRestart = false;
+ } else {
+ errorPrint("%s", "failed to read json, subscribe restart error\n");
+ goto PARSE_OVER;
+ }
+ } else {
+ g_queryInfo.specifiedQueryInfo.subscribeRestart = true;
+ }
+
+ cJSON* keepProgress = cJSON_GetObjectItem(specifiedQuery, "keepProgress");
+ if (keepProgress
+ && keepProgress->type == cJSON_String
+ && keepProgress->valuestring != NULL) {
+ if (0 == strcmp("yes", keepProgress->valuestring)) {
+ g_queryInfo.specifiedQueryInfo.subscribeKeepProgress = 1;
+ } else if (0 == strcmp("no", keepProgress->valuestring)) {
+ g_queryInfo.specifiedQueryInfo.subscribeKeepProgress = 0;
+ } else {
+ errorPrint("%s", "failed to read json, subscribe keepProgress error\n");
+ goto PARSE_OVER;
+ }
+ } else {
+ g_queryInfo.specifiedQueryInfo.subscribeKeepProgress = 0;
+ }
+
+ // sqls
+ cJSON* specifiedSqls = cJSON_GetObjectItem(specifiedQuery, "sqls");
+ if (!specifiedSqls) {
+ g_queryInfo.specifiedQueryInfo.sqlCount = 0;
+ } else if (specifiedSqls->type != cJSON_Array) {
+ errorPrint("%s", "failed to read json, super sqls not found\n");
+ goto PARSE_OVER;
+ } else {
+ int superSqlSize = cJSON_GetArraySize(specifiedSqls);
+ if (superSqlSize * g_queryInfo.specifiedQueryInfo.concurrent
+ > MAX_QUERY_SQL_COUNT) {
+ errorPrint("failed to read json, query sql(%d) * concurrent(%d) overflow, max is %d\n",
+ superSqlSize,
+ g_queryInfo.specifiedQueryInfo.concurrent,
+ MAX_QUERY_SQL_COUNT);
+ goto PARSE_OVER;
+ }
+
+ g_queryInfo.specifiedQueryInfo.sqlCount = superSqlSize;
+ for (int j = 0; j < superSqlSize; ++j) {
+ cJSON* sql = cJSON_GetArrayItem(specifiedSqls, j);
+ if (sql == NULL) continue;
+
+ cJSON *sqlStr = cJSON_GetObjectItem(sql, "sql");
+ if (!sqlStr || sqlStr->type != cJSON_String || sqlStr->valuestring == NULL) {
+ errorPrint("%s", "failed to read json, sql not found\n");
+ goto PARSE_OVER;
+ }
+ tstrncpy(g_queryInfo.specifiedQueryInfo.sql[j],
+ sqlStr->valuestring, BUFFER_SIZE);
+
+ // default value is -1, which mean infinite loop
+ g_queryInfo.specifiedQueryInfo.endAfterConsume[j] = -1;
+ cJSON* endAfterConsume =
+ cJSON_GetObjectItem(specifiedQuery, "endAfterConsume");
+ if (endAfterConsume
+ && endAfterConsume->type == cJSON_Number) {
+ g_queryInfo.specifiedQueryInfo.endAfterConsume[j]
+ = endAfterConsume->valueint;
+ }
+ if (g_queryInfo.specifiedQueryInfo.endAfterConsume[j] < -1)
+ g_queryInfo.specifiedQueryInfo.endAfterConsume[j] = -1;
+
+ g_queryInfo.specifiedQueryInfo.resubAfterConsume[j] = -1;
+ cJSON* resubAfterConsume =
+ cJSON_GetObjectItem(specifiedQuery, "resubAfterConsume");
+ if ((resubAfterConsume)
+ && (resubAfterConsume->type == cJSON_Number)
+ && (resubAfterConsume->valueint >= 0)) {
+ g_queryInfo.specifiedQueryInfo.resubAfterConsume[j]
+ = resubAfterConsume->valueint;
+ }
+
+ if (g_queryInfo.specifiedQueryInfo.resubAfterConsume[j] < -1)
+ g_queryInfo.specifiedQueryInfo.resubAfterConsume[j] = -1;
+
+ cJSON *result = cJSON_GetObjectItem(sql, "result");
+ if ((NULL != result) && (result->type == cJSON_String)
+ && (result->valuestring != NULL)) {
+ tstrncpy(g_queryInfo.specifiedQueryInfo.result[j],
+ result->valuestring, MAX_FILE_NAME_LEN);
+ } else if (NULL == result) {
+ memset(g_queryInfo.specifiedQueryInfo.result[j],
+ 0, MAX_FILE_NAME_LEN);
+ } else {
+ errorPrint("%s",
+ "failed to read json, super query result file not found\n");
+ goto PARSE_OVER;
+ }
+ }
+ }
+ }
+
+ // super_table_query
+ cJSON *superQuery = cJSON_GetObjectItem(root, "super_table_query");
+ if (!superQuery) {
+ g_queryInfo.superQueryInfo.threadCnt = 1;
+ g_queryInfo.superQueryInfo.sqlCount = 0;
+ } else if (superQuery->type != cJSON_Object) {
+ errorPrint("%s", "failed to read json, sub_table_query not found\n");
+ ret = true;
+ goto PARSE_OVER;
+ } else {
+ cJSON* subrate = cJSON_GetObjectItem(superQuery, "query_interval");
+ if (subrate && subrate->type == cJSON_Number) {
+ g_queryInfo.superQueryInfo.queryInterval = subrate->valueint;
+ } else if (!subrate) {
+ g_queryInfo.superQueryInfo.queryInterval = 0;
+ }
+
+ cJSON* superQueryTimes = cJSON_GetObjectItem(superQuery, "query_times");
+ if (superQueryTimes && superQueryTimes->type == cJSON_Number) {
+ if (superQueryTimes->valueint <= 0) {
+ errorPrint("failed to read json, query_times: %"PRId64", need be a valid (>0) number\n",
+ superQueryTimes->valueint);
+ goto PARSE_OVER;
+ }
+ g_queryInfo.superQueryInfo.queryTimes = superQueryTimes->valueint;
+ } else if (!superQueryTimes) {
+ g_queryInfo.superQueryInfo.queryTimes = g_args.query_times;
+ } else {
+ errorPrint("%s", "failed to read json, query_times input mistake\n");
+ goto PARSE_OVER;
+ }
+
+ cJSON* threads = cJSON_GetObjectItem(superQuery, "threads");
+ if (threads && threads->type == cJSON_Number) {
+ if (threads->valueint <= 0) {
+ errorPrint("%s", "failed to read json, threads input mistake\n");
+ goto PARSE_OVER;
+
+ }
+ g_queryInfo.superQueryInfo.threadCnt = threads->valueint;
+ } else if (!threads) {
+ g_queryInfo.superQueryInfo.threadCnt = 1;
+ }
+
+ //cJSON* subTblCnt = cJSON_GetObjectItem(superQuery, "childtable_count");
+ //if (subTblCnt && subTblCnt->type == cJSON_Number) {
+ // g_queryInfo.superQueryInfo.childTblCount = subTblCnt->valueint;
+ //} else if (!subTblCnt) {
+ // g_queryInfo.superQueryInfo.childTblCount = 0;
+ //}
+
+ cJSON* stblname = cJSON_GetObjectItem(superQuery, "stblname");
+ if (stblname && stblname->type == cJSON_String
+ && stblname->valuestring != NULL) {
+ tstrncpy(g_queryInfo.superQueryInfo.stbName, stblname->valuestring,
+ TSDB_TABLE_NAME_LEN);
+ } else {
+ errorPrint("%s", "failed to read json, super table name input error\n");
+ goto PARSE_OVER;
+ }
+
+ cJSON* superAsyncMode = cJSON_GetObjectItem(superQuery, "mode");
+ if (superAsyncMode && superAsyncMode->type == cJSON_String
+ && superAsyncMode->valuestring != NULL) {
+ if (0 == strcmp("sync", superAsyncMode->valuestring)) {
+ g_queryInfo.superQueryInfo.asyncMode = SYNC_MODE;
+ } else if (0 == strcmp("async", superAsyncMode->valuestring)) {
+ g_queryInfo.superQueryInfo.asyncMode = ASYNC_MODE;
+ } else {
+ errorPrint("%s", "failed to read json, async mode input error\n");
+ goto PARSE_OVER;
+ }
+ } else {
+ g_queryInfo.superQueryInfo.asyncMode = SYNC_MODE;
+ }
+
+ cJSON* superInterval = cJSON_GetObjectItem(superQuery, "interval");
+ if (superInterval && superInterval->type == cJSON_Number) {
+ if (superInterval->valueint < 0) {
+ errorPrint("%s", "failed to read json, interval input mistake\n");
+ goto PARSE_OVER;
+ }
+ g_queryInfo.superQueryInfo.subscribeInterval = superInterval->valueint;
+ } else if (!superInterval) {
+ //printf("failed to read json, subscribe interval no found\n");
+ //goto PARSE_OVER;
+ g_queryInfo.superQueryInfo.subscribeInterval = 10000;
+ }
+
+ cJSON* subrestart = cJSON_GetObjectItem(superQuery, "restart");
+ if (subrestart && subrestart->type == cJSON_String
+ && subrestart->valuestring != NULL) {
+ if (0 == strcmp("yes", subrestart->valuestring)) {
+ g_queryInfo.superQueryInfo.subscribeRestart = true;
+ } else if (0 == strcmp("no", subrestart->valuestring)) {
+ g_queryInfo.superQueryInfo.subscribeRestart = false;
+ } else {
+ errorPrint("%s", "failed to read json, subscribe restart error\n");
+ goto PARSE_OVER;
+ }
+ } else {
+ g_queryInfo.superQueryInfo.subscribeRestart = true;
+ }
+
+ cJSON* superkeepProgress = cJSON_GetObjectItem(superQuery, "keepProgress");
+ if (superkeepProgress &&
+ superkeepProgress->type == cJSON_String
+ && superkeepProgress->valuestring != NULL) {
+ if (0 == strcmp("yes", superkeepProgress->valuestring)) {
+ g_queryInfo.superQueryInfo.subscribeKeepProgress = 1;
+ } else if (0 == strcmp("no", superkeepProgress->valuestring)) {
+ g_queryInfo.superQueryInfo.subscribeKeepProgress = 0;
+ } else {
+ errorPrint("%s",
+ "failed to read json, subscribe super table keepProgress error\n");
+ goto PARSE_OVER;
+ }
+ } else {
+ g_queryInfo.superQueryInfo.subscribeKeepProgress = 0;
+ }
+
+ // default value is -1, which mean do not resub
+ g_queryInfo.superQueryInfo.endAfterConsume = -1;
+ cJSON* superEndAfterConsume =
+ cJSON_GetObjectItem(superQuery, "endAfterConsume");
+ if (superEndAfterConsume
+ && superEndAfterConsume->type == cJSON_Number) {
+ g_queryInfo.superQueryInfo.endAfterConsume =
+ superEndAfterConsume->valueint;
+ }
+ if (g_queryInfo.superQueryInfo.endAfterConsume < -1)
+ g_queryInfo.superQueryInfo.endAfterConsume = -1;
+
+ // default value is -1, which mean do not resub
+ g_queryInfo.superQueryInfo.resubAfterConsume = -1;
+ cJSON* superResubAfterConsume =
+ cJSON_GetObjectItem(superQuery, "resubAfterConsume");
+ if ((superResubAfterConsume)
+ && (superResubAfterConsume->type == cJSON_Number)
+ && (superResubAfterConsume->valueint >= 0)) {
+ g_queryInfo.superQueryInfo.resubAfterConsume =
+ superResubAfterConsume->valueint;
+ }
+ if (g_queryInfo.superQueryInfo.resubAfterConsume < -1)
+ g_queryInfo.superQueryInfo.resubAfterConsume = -1;
+
+ // supert table sqls
+ cJSON* superSqls = cJSON_GetObjectItem(superQuery, "sqls");
+ if (!superSqls) {
+ g_queryInfo.superQueryInfo.sqlCount = 0;
+ } else if (superSqls->type != cJSON_Array) {
+ errorPrint("%s", "failed to read json, super sqls not found\n");
+ goto PARSE_OVER;
+ } else {
+ int superSqlSize = cJSON_GetArraySize(superSqls);
+ if (superSqlSize > MAX_QUERY_SQL_COUNT) {
+ errorPrint("failed to read json, query sql size overflow, max is %d\n",
+ MAX_QUERY_SQL_COUNT);
+ goto PARSE_OVER;
+ }
+
+ g_queryInfo.superQueryInfo.sqlCount = superSqlSize;
+ for (int j = 0; j < superSqlSize; ++j) {
+ cJSON* sql = cJSON_GetArrayItem(superSqls, j);
+ if (sql == NULL) continue;
+
+ cJSON *sqlStr = cJSON_GetObjectItem(sql, "sql");
+ if (!sqlStr || sqlStr->type != cJSON_String
+ || sqlStr->valuestring == NULL) {
+ errorPrint("%s", "failed to read json, sql not found\n");
+ goto PARSE_OVER;
+ }
+ tstrncpy(g_queryInfo.superQueryInfo.sql[j], sqlStr->valuestring,
+ BUFFER_SIZE);
+
+ cJSON *result = cJSON_GetObjectItem(sql, "result");
+ if (result != NULL && result->type == cJSON_String
+ && result->valuestring != NULL) {
+ tstrncpy(g_queryInfo.superQueryInfo.result[j],
+ result->valuestring, MAX_FILE_NAME_LEN);
+ } else if (NULL == result) {
+ memset(g_queryInfo.superQueryInfo.result[j], 0, MAX_FILE_NAME_LEN);
+ } else {
+ errorPrint("%s", "failed to read json, sub query result file not found\n");
+ goto PARSE_OVER;
+ }
+ }
+ }
+ }
+
+ ret = true;
+
+PARSE_OVER:
+ return ret;
+}
+
+static bool getInfoFromJsonFile(char* file) {
+ debugPrint("%s %d %s\n", __func__, __LINE__, file);
+
+ FILE *fp = fopen(file, "r");
+ if (!fp) {
+ errorPrint("failed to read %s, reason:%s\n", file, strerror(errno));
+ return false;
+ }
+
+ bool ret = false;
+ int maxLen = 6400000;
+ char *content = calloc(1, maxLen + 1);
+ int len = fread(content, 1, maxLen, fp);
+ if (len <= 0) {
+ free(content);
+ fclose(fp);
+ errorPrint("failed to read %s, content is null", file);
+ return false;
+ }
+
+ content[len] = 0;
+ cJSON* root = cJSON_Parse(content);
+ if (root == NULL) {
+ errorPrint("failed to cjson parse %s, invalid json format\n", file);
+ goto PARSE_OVER;
+ }
+
+ cJSON* filetype = cJSON_GetObjectItem(root, "filetype");
+ if (filetype && filetype->type == cJSON_String && filetype->valuestring != NULL) {
+ if (0 == strcasecmp("insert", filetype->valuestring)) {
+ g_args.test_mode = INSERT_TEST;
+ } else if (0 == strcasecmp("query", filetype->valuestring)) {
+ g_args.test_mode = QUERY_TEST;
+ } else if (0 == strcasecmp("subscribe", filetype->valuestring)) {
+ g_args.test_mode = SUBSCRIBE_TEST;
+ } else {
+ errorPrint("%s", "failed to read json, filetype not support\n");
+ goto PARSE_OVER;
+ }
+ } else if (!filetype) {
+ g_args.test_mode = INSERT_TEST;
+ } else {
+ errorPrint("%s", "failed to read json, filetype not found\n");
+ goto PARSE_OVER;
+ }
+
+ if (INSERT_TEST == g_args.test_mode) {
+ memset(&g_Dbs, 0, sizeof(SDbs));
+ g_Dbs.use_metric = g_args.use_metric;
+ ret = getMetaFromInsertJsonFile(root);
+ } else if ((QUERY_TEST == g_args.test_mode)
+ || (SUBSCRIBE_TEST == g_args.test_mode)) {
+ memset(&g_queryInfo, 0, sizeof(SQueryMetaInfo));
+ ret = getMetaFromQueryJsonFile(root);
+ } else {
+ errorPrint("%s",
+ "input json file type error! please input correct file type: insert or query or subscribe\n");
+ goto PARSE_OVER;
+ }
+
+PARSE_OVER:
+ free(content);
+ cJSON_Delete(root);
+ fclose(fp);
+ return ret;
+}
+
+static int prepareSampleData() {
+ for (int i = 0; i < g_Dbs.dbCount; i++) {
+ for (int j = 0; j < g_Dbs.db[i].superTblCount; j++) {
+ if (g_Dbs.db[i].superTbls[j].tagsFile[0] != 0) {
+ if (readTagFromCsvFileToMem(&g_Dbs.db[i].superTbls[j]) != 0) {
+ return -1;
+ }
+ }
+ }
+ }
+
+ return 0;
+}
+
+static void postFreeResource() {
+ tmfclose(g_fpOfInsertResult);
+
+ for (int i = 0; i < g_Dbs.dbCount; i++) {
+ for (uint64_t j = 0; j < g_Dbs.db[i].superTblCount; j++) {
+ if (0 != g_Dbs.db[i].superTbls[j].colsOfCreateChildTable) {
+ tmfree(g_Dbs.db[i].superTbls[j].colsOfCreateChildTable);
+ g_Dbs.db[i].superTbls[j].colsOfCreateChildTable = NULL;
+ }
+ if (0 != g_Dbs.db[i].superTbls[j].sampleDataBuf) {
+ tmfree(g_Dbs.db[i].superTbls[j].sampleDataBuf);
+ g_Dbs.db[i].superTbls[j].sampleDataBuf = NULL;
+ }
+
+#if STMT_BIND_PARAM_BATCH == 1
+ for (int c = 0;
+ c < g_Dbs.db[i].superTbls[j].columnCount; c ++) {
+
+ if (g_Dbs.db[i].superTbls[j].sampleBindBatchArray) {
+
+ tmfree((char *)((uintptr_t)*(uintptr_t*)(
+ g_Dbs.db[i].superTbls[j].sampleBindBatchArray
+ + sizeof(char*) * c)));
+ }
+ }
+ tmfree(g_Dbs.db[i].superTbls[j].sampleBindBatchArray);
+#endif
+ if (0 != g_Dbs.db[i].superTbls[j].tagDataBuf) {
+ tmfree(g_Dbs.db[i].superTbls[j].tagDataBuf);
+ g_Dbs.db[i].superTbls[j].tagDataBuf = NULL;
+ }
+ if (0 != g_Dbs.db[i].superTbls[j].childTblName) {
+ tmfree(g_Dbs.db[i].superTbls[j].childTblName);
+ g_Dbs.db[i].superTbls[j].childTblName = NULL;
+ }
+ }
+ tmfree(g_Dbs.db[i].superTbls);
+ }
+ tmfree(g_Dbs.db);
+ tmfree(g_randbool_buff);
+ tmfree(g_randint_buff);
+ tmfree(g_rand_voltage_buff);
+ tmfree(g_randbigint_buff);
+ tmfree(g_randsmallint_buff);
+ tmfree(g_randtinyint_buff);
+ tmfree(g_randfloat_buff);
+ tmfree(g_rand_current_buff);
+ tmfree(g_rand_phase_buff);
+ tmfree(g_randdouble_buff);
+ tmfree(g_randuint_buff);
+ tmfree(g_randutinyint_buff);
+ tmfree(g_randusmallint_buff);
+ tmfree(g_randubigint_buff);
+ tmfree(g_randint);
+ tmfree(g_randuint);
+ tmfree(g_randbigint);
+ tmfree(g_randubigint);
+ tmfree(g_randfloat);
+ tmfree(g_randdouble);
+
+ tmfree(g_sampleDataBuf);
+
+#if STMT_BIND_PARAM_BATCH == 1
+ for (int l = 0;
+ l < g_args.columnCount; l ++) {
+ if (g_sampleBindBatchArray) {
+ tmfree((char *)((uintptr_t)*(uintptr_t*)(
+ g_sampleBindBatchArray
+ + sizeof(char*) * l)));
+ }
+ }
+ tmfree(g_sampleBindBatchArray);
+
+#endif
+}
+
+static int getRowDataFromSample(
+ char* dataBuf, int64_t maxLen, int64_t timestamp,
+ SSuperTable* stbInfo, int64_t* sampleUsePos)
+{
+ if ((*sampleUsePos) == MAX_SAMPLES) {
+ *sampleUsePos = 0;
+ }
+
+ int dataLen = 0;
+ if(stbInfo->useSampleTs) {
+ dataLen += snprintf(dataBuf + dataLen, maxLen - dataLen,
+ "(%s",
+ stbInfo->sampleDataBuf
+ + stbInfo->lenOfOneRow * (*sampleUsePos));
+ } else {
+ dataLen += snprintf(dataBuf + dataLen, maxLen - dataLen,
+ "(%" PRId64 ", ", timestamp);
+ dataLen += snprintf(dataBuf + dataLen, maxLen - dataLen,
+ "%s",
+ stbInfo->sampleDataBuf
+ + stbInfo->lenOfOneRow * (*sampleUsePos));
+ }
+
+ dataLen += snprintf(dataBuf + dataLen, maxLen - dataLen, ")");
+
+ (*sampleUsePos)++;
+
+ return dataLen;
+}
+
+static int64_t generateStbRowData(
+ SSuperTable* stbInfo,
+ char* recBuf,
+ int64_t remainderBufLen,
+ int64_t timestamp)
+{
+ int64_t dataLen = 0;
+ char *pstr = recBuf;
+ int64_t maxLen = MAX_DATA_SIZE;
+ int tmpLen;
+
+ dataLen += snprintf(pstr + dataLen, maxLen - dataLen,
+ "(%" PRId64 "", timestamp);
+
+ for (int i = 0; i < stbInfo->columnCount; i++) {
+ tstrncpy(pstr + dataLen, ",", 2);
+ dataLen += 1;
+
+ if ((stbInfo->columns[i].data_type == TSDB_DATA_TYPE_BINARY)
+ || (stbInfo->columns[i].data_type == TSDB_DATA_TYPE_NCHAR)) {
+ if (stbInfo->columns[i].dataLen > TSDB_MAX_BINARY_LEN) {
+ errorPrint2("binary or nchar length overflow, max size:%u\n",
+ (uint32_t)TSDB_MAX_BINARY_LEN);
+ return -1;
+ }
+
+ if ((stbInfo->columns[i].dataLen + 1) >
+ /* need count 3 extra chars \', \', and , */
+ (remainderBufLen - dataLen - 3)) {
+ return 0;
+ }
+ char* buf = (char*)calloc(stbInfo->columns[i].dataLen+1, 1);
+ if (NULL == buf) {
+ errorPrint2("calloc failed! size:%d\n", stbInfo->columns[i].dataLen);
+ return -1;
+ }
+ rand_string(buf, stbInfo->columns[i].dataLen);
+ dataLen += snprintf(pstr + dataLen, maxLen - dataLen, "\'%s\'", buf);
+ tmfree(buf);
+
+ } else {
+ char *tmp = NULL;
+ switch(stbInfo->columns[i].data_type) {
+ case TSDB_DATA_TYPE_INT:
+ if ((g_args.demo_mode) && (i == 1)) {
+ tmp = demo_voltage_int_str();
+ } else {
+ tmp = rand_int_str();
+ }
+ tmpLen = strlen(tmp);
+ tstrncpy(pstr + dataLen, tmp, min(tmpLen + 1, INT_BUFF_LEN));
+ break;
+
+ case TSDB_DATA_TYPE_UINT:
+ tmp = rand_uint_str();
+ tmpLen = strlen(tmp);
+ tstrncpy(pstr + dataLen, tmp, min(tmpLen + 1, INT_BUFF_LEN));
+ break;
+
+ case TSDB_DATA_TYPE_BIGINT:
+ tmp = rand_bigint_str();
+ tmpLen = strlen(tmp);
+ tstrncpy(pstr + dataLen, tmp, min(tmpLen + 1, BIGINT_BUFF_LEN));
+ break;
+
+ case TSDB_DATA_TYPE_UBIGINT:
+ tmp = rand_ubigint_str();
+ tmpLen = strlen(tmp);
+ tstrncpy(pstr + dataLen, tmp, min(tmpLen + 1, BIGINT_BUFF_LEN));
+ break;
+
+ case TSDB_DATA_TYPE_FLOAT:
+ if (g_args.demo_mode) {
+ if (i == 0) {
+ tmp = demo_current_float_str();
+ } else {
+ tmp = demo_phase_float_str();
+ }
+ } else {
+ tmp = rand_float_str();
+ }
+ tmpLen = strlen(tmp);
+ tstrncpy(pstr + dataLen, tmp, min(tmpLen + 1, FLOAT_BUFF_LEN));
+ break;
+
+ case TSDB_DATA_TYPE_DOUBLE:
+ tmp = rand_double_str();
+ tmpLen = strlen(tmp);
+ tstrncpy(pstr + dataLen, tmp, min(tmpLen + 1, DOUBLE_BUFF_LEN));
+ break;
+
+ case TSDB_DATA_TYPE_SMALLINT:
+ tmp = rand_smallint_str();
+ tmpLen = strlen(tmp);
+ tstrncpy(pstr + dataLen, tmp, min(tmpLen + 1, SMALLINT_BUFF_LEN));
+ break;
+
+ case TSDB_DATA_TYPE_USMALLINT:
+ tmp = rand_usmallint_str();
+ tmpLen = strlen(tmp);
+ tstrncpy(pstr + dataLen, tmp, min(tmpLen + 1, SMALLINT_BUFF_LEN));
+ break;
+
+ case TSDB_DATA_TYPE_TINYINT:
+ tmp = rand_tinyint_str();
+ tmpLen = strlen(tmp);
+ tstrncpy(pstr + dataLen, tmp, min(tmpLen + 1, TINYINT_BUFF_LEN));
+ break;
+
+ case TSDB_DATA_TYPE_UTINYINT:
+ tmp = rand_utinyint_str();
+ tmpLen = strlen(tmp);
+ tstrncpy(pstr + dataLen, tmp, min(tmpLen + 1, TINYINT_BUFF_LEN));
+ break;
+
+ case TSDB_DATA_TYPE_BOOL:
+ tmp = rand_bool_str();
+ tmpLen = strlen(tmp);
+ tstrncpy(pstr + dataLen, tmp, min(tmpLen + 1, BOOL_BUFF_LEN));
+ break;
+
+ case TSDB_DATA_TYPE_TIMESTAMP:
+ tmp = rand_bigint_str();
+ tmpLen = strlen(tmp);
+ tstrncpy(pstr + dataLen, tmp, min(tmpLen + 1, BIGINT_BUFF_LEN));
+ break;
+
+ case TSDB_DATA_TYPE_NULL:
+ break;
+
+ default:
+ errorPrint2("Not support data type: %s\n",
+ stbInfo->columns[i].dataType);
+ exit(EXIT_FAILURE);
+ }
+ if (tmp) {
+ dataLen += tmpLen;
+ }
+ }
+
+ if (dataLen > (remainderBufLen - (128)))
+ return 0;
+ }
+
+ dataLen += snprintf(pstr + dataLen, 2, ")");
+
+ verbosePrint("%s() LN%d, dataLen:%"PRId64"\n", __func__, __LINE__, dataLen);
+ verbosePrint("%s() LN%d, recBuf:\n\t%s\n", __func__, __LINE__, recBuf);
+
+ return strlen(recBuf);
+}
+
+static int64_t generateData(char *recBuf, char *data_type,
+ int64_t timestamp, int lenOfBinary) {
+ memset(recBuf, 0, MAX_DATA_SIZE);
+ char *pstr = recBuf;
+ pstr += sprintf(pstr, "(%"PRId64"", timestamp);
+
+ int columnCount = g_args.columnCount;
+
+ bool b;
+ char *s;
+ for (int i = 0; i < columnCount; i++) {
+ switch (data_type[i]) {
+ case TSDB_DATA_TYPE_TINYINT:
+ pstr += sprintf(pstr, ",%d", rand_tinyint() );
+ break;
+
+ case TSDB_DATA_TYPE_SMALLINT:
+ pstr += sprintf(pstr, ",%d", rand_smallint());
+ break;
+
+ case TSDB_DATA_TYPE_INT:
+ pstr += sprintf(pstr, ",%d", rand_int());
+ break;
+
+ case TSDB_DATA_TYPE_BIGINT:
+ pstr += sprintf(pstr, ",%"PRId64"", rand_bigint());
+ break;
+
+ case TSDB_DATA_TYPE_TIMESTAMP:
+ pstr += sprintf(pstr, ",%"PRId64"", rand_bigint());
+ break;
+
+ case TSDB_DATA_TYPE_FLOAT:
+ pstr += sprintf(pstr, ",%10.4f", rand_float());
+ break;
+
+ case TSDB_DATA_TYPE_DOUBLE:
+ pstr += sprintf(pstr, ",%20.8f", rand_double());
+ break;
+
+ case TSDB_DATA_TYPE_BOOL:
+ b = rand_bool() & 1;
+ pstr += sprintf(pstr, ",%s", b ? "true" : "false");
+ break;
+
+ case TSDB_DATA_TYPE_BINARY:
+ s = malloc(lenOfBinary + 1);
+ if (s == NULL) {
+ errorPrint2("%s() LN%d, memory allocation %d bytes failed\n",
+ __func__, __LINE__, lenOfBinary + 1);
+ exit(EXIT_FAILURE);
+ }
+ rand_string(s, lenOfBinary);
+ pstr += sprintf(pstr, ",\"%s\"", s);
+ free(s);
+ break;
+
+ case TSDB_DATA_TYPE_NCHAR:
+ s = malloc(lenOfBinary + 1);
+ if (s == NULL) {
+ errorPrint2("%s() LN%d, memory allocation %d bytes failed\n",
+ __func__, __LINE__, lenOfBinary + 1);
+ exit(EXIT_FAILURE);
+ }
+ rand_string(s, lenOfBinary);
+ pstr += sprintf(pstr, ",\"%s\"", s);
+ free(s);
+ break;
+
+ case TSDB_DATA_TYPE_UTINYINT:
+ pstr += sprintf(pstr, ",%d", rand_utinyint() );
+ break;
+
+ case TSDB_DATA_TYPE_USMALLINT:
+ pstr += sprintf(pstr, ",%d", rand_usmallint());
+ break;
+
+ case TSDB_DATA_TYPE_UINT:
+ pstr += sprintf(pstr, ",%d", rand_uint());
+ break;
+
+ case TSDB_DATA_TYPE_UBIGINT:
+ pstr += sprintf(pstr, ",%"PRId64"", rand_ubigint());
+ break;
+
+ case TSDB_DATA_TYPE_NULL:
+ break;
+
+ default:
+ errorPrint2("%s() LN%d, Unknown data type %d\n",
+ __func__, __LINE__,
+ data_type[i]);
+ exit(EXIT_FAILURE);
+ }
+
+ if (strlen(recBuf) > MAX_DATA_SIZE) {
+ ERROR_EXIT("column length too long, abort");
+ }
+ }
+
+ pstr += sprintf(pstr, ")");
+
+ verbosePrint("%s() LN%d, recBuf:\n\t%s\n", __func__, __LINE__, recBuf);
+
+ return (int32_t)strlen(recBuf);
+}
+
+static int generateSampleFromRand(
+ char *sampleDataBuf,
+ uint64_t lenOfOneRow,
+ int columnCount,
+ StrColumn *columns
+ )
+{
+ char data[MAX_DATA_SIZE];
+ memset(data, 0, MAX_DATA_SIZE);
+
+ char *buff = malloc(lenOfOneRow);
+ if (NULL == buff) {
+ errorPrint2("%s() LN%d, memory allocation %"PRIu64" bytes failed\n",
+ __func__, __LINE__, lenOfOneRow);
+ exit(EXIT_FAILURE);
+ }
+
+ for (int i=0; i < MAX_SAMPLES; i++) {
+ uint64_t pos = 0;
+ memset(buff, 0, lenOfOneRow);
+
+ for (int c = 0; c < columnCount; c++) {
+ char *tmp = NULL;
+
+ uint32_t dataLen;
+ char data_type = (columns)?(columns[c].data_type):g_args.data_type[c];
+
+ switch(data_type) {
+ case TSDB_DATA_TYPE_BINARY:
+ dataLen = (columns)?columns[c].dataLen:g_args.binwidth;
+ rand_string(data, dataLen);
+ pos += sprintf(buff + pos, "%s,", data);
+ break;
+
+ case TSDB_DATA_TYPE_NCHAR:
+ dataLen = (columns)?columns[c].dataLen:g_args.binwidth;
+ rand_string(data, dataLen - 1);
+ pos += sprintf(buff + pos, "%s,", data);
+ break;
+
+ case TSDB_DATA_TYPE_INT:
+ if ((g_args.demo_mode) && (c == 1)) {
+ tmp = demo_voltage_int_str();
+ } else {
+ tmp = rand_int_str();
+ }
+ pos += sprintf(buff + pos, "%s,", tmp);
+ break;
+
+ case TSDB_DATA_TYPE_UINT:
+ pos += sprintf(buff + pos, "%s,", rand_uint_str());
+ break;
+
+ case TSDB_DATA_TYPE_BIGINT:
+ pos += sprintf(buff + pos, "%s,", rand_bigint_str());
+ break;
+
+ case TSDB_DATA_TYPE_UBIGINT:
+ pos += sprintf(buff + pos, "%s,", rand_ubigint_str());
+ break;
+
+ case TSDB_DATA_TYPE_FLOAT:
+ if (g_args.demo_mode) {
+ if (c == 0) {
+ tmp = demo_current_float_str();
+ } else {
+ tmp = demo_phase_float_str();
+ }
+ } else {
+ tmp = rand_float_str();
+ }
+ pos += sprintf(buff + pos, "%s,", tmp);
+ break;
+
+ case TSDB_DATA_TYPE_DOUBLE:
+ pos += sprintf(buff + pos, "%s,", rand_double_str());
+ break;
+
+ case TSDB_DATA_TYPE_SMALLINT:
+ pos += sprintf(buff + pos, "%s,", rand_smallint_str());
+ break;
+
+ case TSDB_DATA_TYPE_USMALLINT:
+ pos += sprintf(buff + pos, "%s,", rand_usmallint_str());
+ break;
+
+ case TSDB_DATA_TYPE_TINYINT:
+ pos += sprintf(buff + pos, "%s,", rand_tinyint_str());
+ break;
+
+ case TSDB_DATA_TYPE_UTINYINT:
+ pos += sprintf(buff + pos, "%s,", rand_utinyint_str());
+ break;
+
+ case TSDB_DATA_TYPE_BOOL:
+ pos += sprintf(buff + pos, "%s,", rand_bool_str());
+ break;
+
+ case TSDB_DATA_TYPE_TIMESTAMP:
+ pos += sprintf(buff + pos, "%s,", rand_bigint_str());
+ break;
+
+ case TSDB_DATA_TYPE_NULL:
+ break;
+
+ default:
+ errorPrint2("%s() LN%d, Unknown data type %s\n",
+ __func__, __LINE__,
+ (columns)?(columns[c].dataType):g_args.dataType[c]);
+ exit(EXIT_FAILURE);
+ }
+ }
+
+ *(buff + pos - 1) = 0;
+ memcpy(sampleDataBuf + i * lenOfOneRow, buff, pos);
+ }
+
+ free(buff);
+ return 0;
+}
+
+static int generateSampleFromRandForNtb()
+{
+ return generateSampleFromRand(
+ g_sampleDataBuf,
+ g_args.lenOfOneRow,
+ g_args.columnCount,
+ NULL);
+}
+
+static int generateSampleFromRandForStb(SSuperTable *stbInfo)
+{
+ return generateSampleFromRand(
+ stbInfo->sampleDataBuf,
+ stbInfo->lenOfOneRow,
+ stbInfo->columnCount,
+ stbInfo->columns);
+}
+
+static int prepareSampleForNtb() {
+ g_sampleDataBuf = calloc(g_args.lenOfOneRow * MAX_SAMPLES, 1);
+ if (NULL == g_sampleDataBuf) {
+ errorPrint2("%s() LN%d, Failed to calloc %"PRIu64" Bytes, reason:%s\n",
+ __func__, __LINE__,
+ g_args.lenOfOneRow * MAX_SAMPLES,
+ strerror(errno));
+ return -1;
+ }
+
+ return generateSampleFromRandForNtb();
+}
+
+static int prepareSampleForStb(SSuperTable *stbInfo) {
+
+ stbInfo->sampleDataBuf = calloc(
+ stbInfo->lenOfOneRow * MAX_SAMPLES, 1);
+ if (NULL == stbInfo->sampleDataBuf) {
+ errorPrint2("%s() LN%d, Failed to calloc %"PRIu64" Bytes, reason:%s\n",
+ __func__, __LINE__,
+ stbInfo->lenOfOneRow * MAX_SAMPLES,
+ strerror(errno));
+ return -1;
+ }
+
+ int ret;
+ if (0 == strncasecmp(stbInfo->dataSource, "sample", strlen("sample"))) {
+ if(stbInfo->useSampleTs) {
+ getAndSetRowsFromCsvFile(stbInfo);
+ }
+ ret = generateSampleFromCsvForStb(stbInfo);
+ } else {
+ ret = generateSampleFromRandForStb(stbInfo);
+ }
+
+ if (0 != ret) {
+ errorPrint2("%s() LN%d, read sample from csv file failed.\n",
+ __func__, __LINE__);
+ tmfree(stbInfo->sampleDataBuf);
+ stbInfo->sampleDataBuf = NULL;
+ return -1;
+ }
+
+ return 0;
+}
+
+static int32_t execInsert(threadInfo *pThreadInfo, uint32_t k)
+{
+ int32_t affectedRows;
+ SSuperTable* stbInfo = pThreadInfo->stbInfo;
+
+ uint16_t iface;
+ if (stbInfo)
+ iface = stbInfo->iface;
+ else {
+ if (g_args.iface == INTERFACE_BUT)
+ iface = TAOSC_IFACE;
+ else
+ iface = g_args.iface;
+ }
+
+ debugPrint("[%d] %s() LN%d %s\n", pThreadInfo->threadID,
+ __func__, __LINE__,
+ (iface==TAOSC_IFACE)?
+ "taosc":(iface==REST_IFACE)?"rest":"stmt");
+
+ switch(iface) {
+ case TAOSC_IFACE:
+ verbosePrint("[%d] %s() LN%d %s\n", pThreadInfo->threadID,
+ __func__, __LINE__, pThreadInfo->buffer);
+
+ affectedRows = queryDbExec(
+ pThreadInfo->taos,
+ pThreadInfo->buffer, INSERT_TYPE, false);
+ break;
+
+ case REST_IFACE:
+ verbosePrint("[%d] %s() LN%d %s\n", pThreadInfo->threadID,
+ __func__, __LINE__, pThreadInfo->buffer);
+
+ if (0 != postProceSql(g_Dbs.host, g_Dbs.port,
+ pThreadInfo->buffer, pThreadInfo)) {
+ affectedRows = -1;
+ printf("========restful return fail, threadID[%d]\n",
+ pThreadInfo->threadID);
+ } else {
+ affectedRows = k;
+ }
+ break;
+
+ case STMT_IFACE:
+ debugPrint("%s() LN%d, stmt=%p",
+ __func__, __LINE__, pThreadInfo->stmt);
+ if (0 != taos_stmt_execute(pThreadInfo->stmt)) {
+ errorPrint2("%s() LN%d, failied to execute insert statement. reason: %s\n",
+ __func__, __LINE__, taos_stmt_errstr(pThreadInfo->stmt));
+
+ fprintf(stderr, "\n\033[31m === Please reduce batch number if WAL size exceeds limit. ===\033[0m\n\n");
+ exit(EXIT_FAILURE);
+ }
+ affectedRows = k;
+ break;
+
+ default:
+ errorPrint2("%s() LN%d: unknown insert mode: %d\n",
+ __func__, __LINE__, stbInfo->iface);
+ affectedRows = 0;
+ }
+
+ return affectedRows;
+}
+
+static void getTableName(char *pTblName,
+ threadInfo* pThreadInfo, uint64_t tableSeq)
+{
+ SSuperTable* stbInfo = pThreadInfo->stbInfo;
+ if (stbInfo) {
+ if (AUTO_CREATE_SUBTBL != stbInfo->autoCreateTable) {
+ if (stbInfo->childTblLimit > 0) {
+ snprintf(pTblName, TSDB_TABLE_NAME_LEN, "%s",
+ stbInfo->childTblName +
+ (tableSeq - stbInfo->childTblOffset) * TSDB_TABLE_NAME_LEN);
+ } else {
+ verbosePrint("[%d] %s() LN%d: from=%"PRIu64" count=%"PRId64" seq=%"PRIu64"\n",
+ pThreadInfo->threadID, __func__, __LINE__,
+ pThreadInfo->start_table_from,
+ pThreadInfo->ntables, tableSeq);
+ snprintf(pTblName, TSDB_TABLE_NAME_LEN, "%s",
+ stbInfo->childTblName + tableSeq * TSDB_TABLE_NAME_LEN);
+ }
+ } else {
+ snprintf(pTblName, TSDB_TABLE_NAME_LEN,
+ "%s%"PRIu64"", stbInfo->childTblPrefix, tableSeq);
+ }
+ } else {
+ snprintf(pTblName, TSDB_TABLE_NAME_LEN, "%s%"PRIu64"", g_args.tb_prefix, tableSeq);
+ }
+}
+
+static int32_t generateDataTailWithoutStb(
+ uint32_t batch, char* buffer,
+ int64_t remainderBufLen, int64_t insertRows,
+ uint64_t recordFrom, int64_t startTime,
+ /* int64_t *pSamplePos, */int64_t *dataLen) {
+
+ uint64_t len = 0;
+ char *pstr = buffer;
+
+ verbosePrint("%s() LN%d batch=%d\n", __func__, __LINE__, batch);
+
+ int32_t k = 0;
+ for (k = 0; k < batch;) {
+ char *data = pstr;
+ memset(data, 0, MAX_DATA_SIZE);
+
+ int64_t retLen = 0;
+
+ char *data_type = g_args.data_type;
+ int lenOfBinary = g_args.binwidth;
+
+ if (g_args.disorderRatio) {
+ retLen = generateData(data, data_type,
+ startTime + getTSRandTail(
+ g_args.timestamp_step, k,
+ g_args.disorderRatio,
+ g_args.disorderRange),
+ lenOfBinary);
+ } else {
+ retLen = generateData(data, data_type,
+ startTime + g_args.timestamp_step * k,
+ lenOfBinary);
+ }
+
+ if (len > remainderBufLen)
+ break;
+
+ pstr += retLen;
+ k++;
+ len += retLen;
+ remainderBufLen -= retLen;
+
+ verbosePrint("%s() LN%d len=%"PRIu64" k=%d \nbuffer=%s\n",
+ __func__, __LINE__, len, k, buffer);
+
+ recordFrom ++;
+
+ if (recordFrom >= insertRows) {
+ break;
+ }
+ }
+
+ *dataLen = len;
+ return k;
+}
+
+static int64_t getTSRandTail(int64_t timeStampStep, int32_t seq,
+ int disorderRatio, int disorderRange)
+{
+ int64_t randTail = timeStampStep * seq;
+ if (disorderRatio > 0) {
+ int rand_num = taosRandom() % 100;
+ if(rand_num < disorderRatio) {
+ randTail = (randTail +
+ (taosRandom() % disorderRange + 1)) * (-1);
+ debugPrint("rand data generated, back %"PRId64"\n", randTail);
+ }
+ }
+
+ return randTail;
+}
+
+static int32_t generateStbDataTail(
+ SSuperTable* stbInfo,
+ uint32_t batch, char* buffer,
+ int64_t remainderBufLen, int64_t insertRows,
+ uint64_t recordFrom, int64_t startTime,
+ int64_t *pSamplePos, int64_t *dataLen) {
+ uint64_t len = 0;
+
+ char *pstr = buffer;
+
+ bool tsRand;
+ if (0 == strncasecmp(stbInfo->dataSource, "rand", strlen("rand"))) {
+ tsRand = true;
+ } else {
+ tsRand = false;
+ }
+ verbosePrint("%s() LN%d batch=%u buflen=%"PRId64"\n",
+ __func__, __LINE__, batch, remainderBufLen);
+
+ int32_t k;
+ for (k = 0; k < batch;) {
+ char *data = pstr;
+
+ int64_t lenOfRow = 0;
+
+ if (tsRand) {
+ if (stbInfo->disorderRatio > 0) {
+ lenOfRow = generateStbRowData(stbInfo, data,
+ remainderBufLen,
+ startTime + getTSRandTail(
+ stbInfo->timeStampStep, k,
+ stbInfo->disorderRatio,
+ stbInfo->disorderRange)
+ );
+ } else {
+ lenOfRow = generateStbRowData(stbInfo, data,
+ remainderBufLen,
+ startTime + stbInfo->timeStampStep * k
+ );
+ }
+ } else {
+ lenOfRow = getRowDataFromSample(
+ data,
+ (remainderBufLen < MAX_DATA_SIZE)?remainderBufLen:MAX_DATA_SIZE,
+ startTime + stbInfo->timeStampStep * k,
+ stbInfo,
+ pSamplePos);
+ }
+
+ if (lenOfRow == 0) {
+ data[0] = '\0';
+ break;
+ }
+ if ((lenOfRow + 1) > remainderBufLen) {
+ break;
+ }
+
+ pstr += lenOfRow;
+ k++;
+ len += lenOfRow;
+ remainderBufLen -= lenOfRow;
+
+ verbosePrint("%s() LN%d len=%"PRIu64" k=%u \nbuffer=%s\n",
+ __func__, __LINE__, len, k, buffer);
+
+ recordFrom ++;
+
+ if (recordFrom >= insertRows) {
+ break;
+ }
+ }
+
+ *dataLen = len;
+ return k;
+}
+
+
+static int generateSQLHeadWithoutStb(char *tableName,
+ char *dbName,
+ char *buffer, int remainderBufLen)
+{
+ int len;
+
+ char headBuf[HEAD_BUFF_LEN];
+
+ len = snprintf(
+ headBuf,
+ HEAD_BUFF_LEN,
+ "%s.%s values",
+ dbName,
+ tableName);
+
+ if (len > remainderBufLen)
+ return -1;
+
+ tstrncpy(buffer, headBuf, len + 1);
+
+ return len;
+}
+
+static int generateStbSQLHead(
+ SSuperTable* stbInfo,
+ char *tableName, int64_t tableSeq,
+ char *dbName,
+ char *buffer, int remainderBufLen)
+{
+ int len;
+
+ char headBuf[HEAD_BUFF_LEN];
+
+ if (AUTO_CREATE_SUBTBL == stbInfo->autoCreateTable) {
+ char* tagsValBuf = NULL;
+ if (0 == stbInfo->tagSource) {
+ tagsValBuf = generateTagValuesForStb(stbInfo, tableSeq);
+ } else {
+ tagsValBuf = getTagValueFromTagSample(
+ stbInfo,
+ tableSeq % stbInfo->tagSampleCount);
+ }
+ if (NULL == tagsValBuf) {
+ errorPrint2("%s() LN%d, tag buf failed to allocate memory\n",
+ __func__, __LINE__);
+ return -1;
+ }
+
+ len = snprintf(
+ headBuf,
+ HEAD_BUFF_LEN,
+ "%s.%s using %s.%s TAGS%s values",
+ dbName,
+ tableName,
+ dbName,
+ stbInfo->stbName,
+ tagsValBuf);
+ tmfree(tagsValBuf);
+ } else if (TBL_ALREADY_EXISTS == stbInfo->childTblExists) {
+ len = snprintf(
+ headBuf,
+ HEAD_BUFF_LEN,
+ "%s.%s values",
+ dbName,
+ tableName);
+ } else {
+ len = snprintf(
+ headBuf,
+ HEAD_BUFF_LEN,
+ "%s.%s values",
+ dbName,
+ tableName);
+ }
+
+ if (len > remainderBufLen)
+ return -1;
+
+ tstrncpy(buffer, headBuf, len + 1);
+
+ return len;
+}
+
+static int32_t generateStbInterlaceData(
+ threadInfo *pThreadInfo,
+ char *tableName, uint32_t batchPerTbl,
+ uint64_t i,
+ uint32_t batchPerTblTimes,
+ uint64_t tableSeq,
+ char *buffer,
+ int64_t insertRows,
+ int64_t startTime,
+ uint64_t *pRemainderBufLen)
+{
+ assert(buffer);
+ char *pstr = buffer;
+
+ SSuperTable *stbInfo = pThreadInfo->stbInfo;
+ int headLen = generateStbSQLHead(
+ stbInfo,
+ tableName, tableSeq, pThreadInfo->db_name,
+ pstr, *pRemainderBufLen);
+
+ if (headLen <= 0) {
+ return 0;
+ }
+ // generate data buffer
+ verbosePrint("[%d] %s() LN%d i=%"PRIu64" buffer:\n%s\n",
+ pThreadInfo->threadID, __func__, __LINE__, i, buffer);
+
+ pstr += headLen;
+ *pRemainderBufLen -= headLen;
+
+ int64_t dataLen = 0;
+
+ verbosePrint("[%d] %s() LN%d i=%"PRIu64" batchPerTblTimes=%u batchPerTbl = %u\n",
+ pThreadInfo->threadID, __func__, __LINE__,
+ i, batchPerTblTimes, batchPerTbl);
+
+ if (0 == strncasecmp(stbInfo->startTimestamp, "now", 3)) {
+ startTime = taosGetTimestamp(pThreadInfo->time_precision);
+ }
+
+ int32_t k = generateStbDataTail(
+ stbInfo,
+ batchPerTbl, pstr, *pRemainderBufLen, insertRows, 0,
+ startTime,
+ &(pThreadInfo->samplePos), &dataLen);
+
+ if (k == batchPerTbl) {
+ pstr += dataLen;
+ *pRemainderBufLen -= dataLen;
+ } else {
+ debugPrint("%s() LN%d, generated data tail: %u, not equal batch per table: %u\n",
+ __func__, __LINE__, k, batchPerTbl);
+ pstr -= headLen;
+ pstr[0] = '\0';
+ k = 0;
+ }
+
+ return k;
+}
+
+static int64_t generateInterlaceDataWithoutStb(
+ char *tableName, uint32_t batch,
+ uint64_t tableSeq,
+ char *dbName, char *buffer,
+ int64_t insertRows,
+ int64_t startTime,
+ uint64_t *pRemainderBufLen)
+{
+ assert(buffer);
+ char *pstr = buffer;
+
+ int headLen = generateSQLHeadWithoutStb(
+ tableName, dbName,
+ pstr, *pRemainderBufLen);
+
+ if (headLen <= 0) {
+ return 0;
+ }
+
+ pstr += headLen;
+ *pRemainderBufLen -= headLen;
+
+ int64_t dataLen = 0;
+
+ int32_t k = generateDataTailWithoutStb(
+ batch, pstr, *pRemainderBufLen, insertRows, 0,
+ startTime,
+ &dataLen);
+
+ if (k == batch) {
+ pstr += dataLen;
+ *pRemainderBufLen -= dataLen;
+ } else {
+ debugPrint("%s() LN%d, generated data tail: %d, not equal batch per table: %u\n",
+ __func__, __LINE__, k, batch);
+ pstr -= headLen;
+ pstr[0] = '\0';
+ k = 0;
+ }
+
+ return k;
+}
+
+static int32_t prepareStmtBindArrayByType(
+ TAOS_BIND *bind,
+ char data_type, int32_t dataLen,
+ int32_t timePrec,
+ char *value)
+{
+ int32_t *bind_int;
+ uint32_t *bind_uint;
+ int64_t *bind_bigint;
+ uint64_t *bind_ubigint;
+ float *bind_float;
+ double *bind_double;
+ int8_t *bind_bool;
+ int64_t *bind_ts2;
+ int16_t *bind_smallint;
+ uint16_t *bind_usmallint;
+ int8_t *bind_tinyint;
+ uint8_t *bind_utinyint;
+
+ switch(data_type) {
+ case TSDB_DATA_TYPE_BINARY:
+ if (dataLen > TSDB_MAX_BINARY_LEN) {
+ errorPrint2("binary length overflow, max size:%u\n",
+ (uint32_t)TSDB_MAX_BINARY_LEN);
+ return -1;
+ }
+ char *bind_binary;
+
+ bind->buffer_type = TSDB_DATA_TYPE_BINARY;
+ if (value) {
+ bind_binary = calloc(1, strlen(value) + 1);
+ strncpy(bind_binary, value, strlen(value));
+ bind->buffer_length = strlen(bind_binary);
+ } else {
+ bind_binary = calloc(1, dataLen + 1);
+ rand_string(bind_binary, dataLen);
+ bind->buffer_length = dataLen;
+ }
+
+ bind->length = &bind->buffer_length;
+ bind->buffer = bind_binary;
+ bind->is_null = NULL;
+ break;
+
+ case TSDB_DATA_TYPE_NCHAR:
+ if (dataLen > TSDB_MAX_BINARY_LEN) {
+ errorPrint2("nchar length overflow, max size:%u\n",
+ (uint32_t)TSDB_MAX_BINARY_LEN);
+ return -1;
+ }
+ char *bind_nchar;
+
+ bind->buffer_type = TSDB_DATA_TYPE_NCHAR;
+ if (value) {
+ bind_nchar = calloc(1, strlen(value) + 1);
+ strncpy(bind_nchar, value, strlen(value));
+ } else {
+ bind_nchar = calloc(1, dataLen + 1);
+ rand_string(bind_nchar, dataLen);
+ }
+
+ bind->buffer_length = strlen(bind_nchar);
+ bind->buffer = bind_nchar;
+ bind->length = &bind->buffer_length;
+ bind->is_null = NULL;
+ break;
+
+ case TSDB_DATA_TYPE_INT:
+ bind_int = malloc(sizeof(int32_t));
+ assert(bind_int);
+
+ if (value) {
+ *bind_int = atoi(value);
+ } else {
+ *bind_int = rand_int();
+ }
+ bind->buffer_type = TSDB_DATA_TYPE_INT;
+ bind->buffer_length = sizeof(int32_t);
+ bind->buffer = bind_int;
+ bind->length = &bind->buffer_length;
+ bind->is_null = NULL;
+ break;
+
+ case TSDB_DATA_TYPE_UINT:
+ bind_uint = malloc(sizeof(uint32_t));
+ assert(bind_uint);
+
+ if (value) {
+ *bind_uint = atoi(value);
+ } else {
+ *bind_uint = rand_int();
+ }
+ bind->buffer_type = TSDB_DATA_TYPE_UINT;
+ bind->buffer_length = sizeof(uint32_t);
+ bind->buffer = bind_uint;
+ bind->length = &bind->buffer_length;
+ bind->is_null = NULL;
+ break;
+
+ case TSDB_DATA_TYPE_BIGINT:
+ bind_bigint = malloc(sizeof(int64_t));
+ assert(bind_bigint);
+
+ if (value) {
+ *bind_bigint = atoll(value);
+ } else {
+ *bind_bigint = rand_bigint();
+ }
+ bind->buffer_type = TSDB_DATA_TYPE_BIGINT;
+ bind->buffer_length = sizeof(int64_t);
+ bind->buffer = bind_bigint;
+ bind->length = &bind->buffer_length;
+ bind->is_null = NULL;
+ break;
+
+ case TSDB_DATA_TYPE_UBIGINT:
+ bind_ubigint = malloc(sizeof(uint64_t));
+ assert(bind_ubigint);
+
+ if (value) {
+ *bind_ubigint = atoll(value);
+ } else {
+ *bind_ubigint = rand_bigint();
+ }
+ bind->buffer_type = TSDB_DATA_TYPE_UBIGINT;
+ bind->buffer_length = sizeof(uint64_t);
+ bind->buffer = bind_ubigint;
+ bind->length = &bind->buffer_length;
+ bind->is_null = NULL;
+ break;
+
+ case TSDB_DATA_TYPE_FLOAT:
+ bind_float = malloc(sizeof(float));
+ assert(bind_float);
+
+ if (value) {
+ *bind_float = (float)atof(value);
+ } else {
+ *bind_float = rand_float();
+ }
+ bind->buffer_type = TSDB_DATA_TYPE_FLOAT;
+ bind->buffer_length = sizeof(float);
+ bind->buffer = bind_float;
+ bind->length = &bind->buffer_length;
+ bind->is_null = NULL;
+ break;
+
+ case TSDB_DATA_TYPE_DOUBLE:
+ bind_double = malloc(sizeof(double));
+ assert(bind_double);
+
+ if (value) {
+ *bind_double = atof(value);
+ } else {
+ *bind_double = rand_double();
+ }
+ bind->buffer_type = TSDB_DATA_TYPE_DOUBLE;
+ bind->buffer_length = sizeof(double);
+ bind->buffer = bind_double;
+ bind->length = &bind->buffer_length;
+ bind->is_null = NULL;
+ break;
+
+ case TSDB_DATA_TYPE_SMALLINT:
+ bind_smallint = malloc(sizeof(int16_t));
+ assert(bind_smallint);
+
+ if (value) {
+ *bind_smallint = (int16_t)atoi(value);
+ } else {
+ *bind_smallint = rand_smallint();
+ }
+ bind->buffer_type = TSDB_DATA_TYPE_SMALLINT;
+ bind->buffer_length = sizeof(int16_t);
+ bind->buffer = bind_smallint;
+ bind->length = &bind->buffer_length;
+ bind->is_null = NULL;
+ break;
+
+ case TSDB_DATA_TYPE_USMALLINT:
+ bind_usmallint = malloc(sizeof(uint16_t));
+ assert(bind_usmallint);
+
+ if (value) {
+ *bind_usmallint = (uint16_t)atoi(value);
+ } else {
+ *bind_usmallint = rand_smallint();
+ }
+ bind->buffer_type = TSDB_DATA_TYPE_SMALLINT;
+ bind->buffer_length = sizeof(uint16_t);
+ bind->buffer = bind_usmallint;
+ bind->length = &bind->buffer_length;
+ bind->is_null = NULL;
+ break;
+
+ case TSDB_DATA_TYPE_TINYINT:
+ bind_tinyint = malloc(sizeof(int8_t));
+ assert(bind_tinyint);
+
+ if (value) {
+ *bind_tinyint = (int8_t)atoi(value);
+ } else {
+ *bind_tinyint = rand_tinyint();
+ }
+ bind->buffer_type = TSDB_DATA_TYPE_TINYINT;
+ bind->buffer_length = sizeof(int8_t);
+ bind->buffer = bind_tinyint;
+ bind->length = &bind->buffer_length;
+ bind->is_null = NULL;
+ break;
+
+ case TSDB_DATA_TYPE_UTINYINT:
+ bind_utinyint = malloc(sizeof(uint8_t));
+ assert(bind_utinyint);
+
+ if (value) {
+ *bind_utinyint = (int8_t)atoi(value);
+ } else {
+ *bind_utinyint = rand_tinyint();
+ }
+ bind->buffer_type = TSDB_DATA_TYPE_UTINYINT;
+ bind->buffer_length = sizeof(uint8_t);
+ bind->buffer = bind_utinyint;
+ bind->length = &bind->buffer_length;
+ bind->is_null = NULL;
+ break;
+
+ case TSDB_DATA_TYPE_BOOL:
+ bind_bool = malloc(sizeof(int8_t));
+ assert(bind_bool);
+
+ if (value) {
+ if (strncasecmp(value, "true", 4)) {
+ *bind_bool = true;
+ } else {
+ *bind_bool = false;
+ }
+ } else {
+ *bind_bool = rand_bool();
+ }
+ bind->buffer_type = TSDB_DATA_TYPE_BOOL;
+ bind->buffer_length = sizeof(int8_t);
+ bind->buffer = bind_bool;
+ bind->length = &bind->buffer_length;
+ bind->is_null = NULL;
+ break;
+
+ case TSDB_DATA_TYPE_TIMESTAMP:
+ bind_ts2 = malloc(sizeof(int64_t));
+ assert(bind_ts2);
+
+ if (value) {
+ if (strchr(value, ':') && strchr(value, '-')) {
+ int i = 0;
+ while(value[i] != '\0') {
+ if (value[i] == '\"' || value[i] == '\'') {
+ value[i] = ' ';
+ }
+ i++;
+ }
+ int64_t tmpEpoch;
+ if (TSDB_CODE_SUCCESS != taosParseTime(
+ value, &tmpEpoch, strlen(value),
+ timePrec, 0)) {
+ free(bind_ts2);
+ errorPrint2("Input %s, time format error!\n", value);
+ return -1;
+ }
+ *bind_ts2 = tmpEpoch;
+ } else {
+ *bind_ts2 = atoll(value);
+ }
+ } else {
+ *bind_ts2 = rand_bigint();
+ }
+ bind->buffer_type = TSDB_DATA_TYPE_TIMESTAMP;
+ bind->buffer_length = sizeof(int64_t);
+ bind->buffer = bind_ts2;
+ bind->length = &bind->buffer_length;
+ bind->is_null = NULL;
+ break;
+
+ case TSDB_DATA_TYPE_NULL:
+ break;
+
+ default:
+ errorPrint2("Not support data type: %d\n", data_type);
+ exit(EXIT_FAILURE);
+ }
+
+ return 0;
+}
+
+static int32_t prepareStmtBindArrayByTypeForRand(
+ TAOS_BIND *bind,
+ char data_type, int32_t dataLen,
+ int32_t timePrec,
+ char **ptr,
+ char *value)
+{
+ int32_t *bind_int;
+ uint32_t *bind_uint;
+ int64_t *bind_bigint;
+ uint64_t *bind_ubigint;
+ float *bind_float;
+ double *bind_double;
+ int16_t *bind_smallint;
+ uint16_t *bind_usmallint;
+ int8_t *bind_tinyint;
+ uint8_t *bind_utinyint;
+ int8_t *bind_bool;
+ int64_t *bind_ts2;
+
+ switch(data_type) {
+ case TSDB_DATA_TYPE_BINARY:
+
+ if (dataLen > TSDB_MAX_BINARY_LEN) {
+ errorPrint2("binary length overflow, max size:%u\n",
+ (uint32_t)TSDB_MAX_BINARY_LEN);
+ return -1;
+ }
+ char *bind_binary = (char *)*ptr;
+
+ bind->buffer_type = TSDB_DATA_TYPE_BINARY;
+ if (value) {
+ strncpy(bind_binary, value, strlen(value));
+ bind->buffer_length = strlen(bind_binary);
+ } else {
+ rand_string(bind_binary, dataLen);
+ bind->buffer_length = dataLen;
+ }
+
+ bind->length = &bind->buffer_length;
+ bind->buffer = bind_binary;
+ bind->is_null = NULL;
+
+ *ptr += bind->buffer_length;
+ break;
+
+ case TSDB_DATA_TYPE_NCHAR:
+ if (dataLen > TSDB_MAX_BINARY_LEN) {
+ errorPrint2("nchar length overflow, max size: %u\n",
+ (uint32_t)TSDB_MAX_BINARY_LEN);
+ return -1;
+ }
+ char *bind_nchar = (char *)*ptr;
+
+ bind->buffer_type = TSDB_DATA_TYPE_NCHAR;
+ if (value) {
+ strncpy(bind_nchar, value, strlen(value));
+ } else {
+ rand_string(bind_nchar, dataLen);
+ }
+
+ bind->buffer_length = strlen(bind_nchar);
+ bind->buffer = bind_nchar;
+ bind->length = &bind->buffer_length;
+ bind->is_null = NULL;
+
+ *ptr += bind->buffer_length;
+ break;
+
+ case TSDB_DATA_TYPE_INT:
+ bind_int = (int32_t *)*ptr;
+
+ if (value) {
+ *bind_int = atoi(value);
+ } else {
+ *bind_int = rand_int();
+ }
+ bind->buffer_type = TSDB_DATA_TYPE_INT;
+ bind->buffer_length = sizeof(int32_t);
+ bind->buffer = bind_int;
+ bind->length = &bind->buffer_length;
+ bind->is_null = NULL;
+
+ *ptr += bind->buffer_length;
+ break;
+
+ case TSDB_DATA_TYPE_UINT:
+ bind_uint = (uint32_t *)*ptr;
+
+ if (value) {
+ *bind_uint = atoi(value);
+ } else {
+ *bind_uint = rand_int();
+ }
+ bind->buffer_type = TSDB_DATA_TYPE_UINT;
+ bind->buffer_length = sizeof(uint32_t);
+ bind->buffer = bind_uint;
+ bind->length = &bind->buffer_length;
+ bind->is_null = NULL;
+
+ *ptr += bind->buffer_length;
+ break;
+
+ case TSDB_DATA_TYPE_BIGINT:
+ bind_bigint = (int64_t *)*ptr;
+
+ if (value) {
+ *bind_bigint = atoll(value);
+ } else {
+ *bind_bigint = rand_bigint();
+ }
+ bind->buffer_type = TSDB_DATA_TYPE_BIGINT;
+ bind->buffer_length = sizeof(int64_t);
+ bind->buffer = bind_bigint;
+ bind->length = &bind->buffer_length;
+ bind->is_null = NULL;
+
+ *ptr += bind->buffer_length;
+ break;
+
+ case TSDB_DATA_TYPE_UBIGINT:
+ bind_ubigint = (uint64_t *)*ptr;
+
+ if (value) {
+ *bind_ubigint = atoll(value);
+ } else {
+ *bind_ubigint = rand_bigint();
+ }
+ bind->buffer_type = TSDB_DATA_TYPE_UBIGINT;
+ bind->buffer_length = sizeof(uint64_t);
+ bind->buffer = bind_ubigint;
+ bind->length = &bind->buffer_length;
+ bind->is_null = NULL;
+
+ *ptr += bind->buffer_length;
+ break;
+
+ case TSDB_DATA_TYPE_FLOAT:
+ bind_float = (float *)*ptr;
+
+ if (value) {
+ *bind_float = (float)atof(value);
+ } else {
+ *bind_float = rand_float();
+ }
+ bind->buffer_type = TSDB_DATA_TYPE_FLOAT;
+ bind->buffer_length = sizeof(float);
+ bind->buffer = bind_float;
+ bind->length = &bind->buffer_length;
+ bind->is_null = NULL;
+
+ *ptr += bind->buffer_length;
+ break;
+
+ case TSDB_DATA_TYPE_DOUBLE:
+ bind_double = (double *)*ptr;
+
+ if (value) {
+ *bind_double = atof(value);
+ } else {
+ *bind_double = rand_double();
+ }
+ bind->buffer_type = TSDB_DATA_TYPE_DOUBLE;
+ bind->buffer_length = sizeof(double);
+ bind->buffer = bind_double;
+ bind->length = &bind->buffer_length;
+ bind->is_null = NULL;
+
+ *ptr += bind->buffer_length;
+ break;
+
+ case TSDB_DATA_TYPE_SMALLINT:
+ bind_smallint = (int16_t *)*ptr;
+
+ if (value) {
+ *bind_smallint = (int16_t)atoi(value);
+ } else {
+ *bind_smallint = rand_smallint();
+ }
+ bind->buffer_type = TSDB_DATA_TYPE_SMALLINT;
+ bind->buffer_length = sizeof(int16_t);
+ bind->buffer = bind_smallint;
+ bind->length = &bind->buffer_length;
+ bind->is_null = NULL;
+
+ *ptr += bind->buffer_length;
+ break;
+
+ case TSDB_DATA_TYPE_USMALLINT:
+ bind_usmallint = (uint16_t *)*ptr;
+
+ if (value) {
+ *bind_usmallint = (uint16_t)atoi(value);
+ } else {
+ *bind_usmallint = rand_smallint();
+ }
+ bind->buffer_type = TSDB_DATA_TYPE_USMALLINT;
+ bind->buffer_length = sizeof(uint16_t);
+ bind->buffer = bind_usmallint;
+ bind->length = &bind->buffer_length;
+ bind->is_null = NULL;
+
+ *ptr += bind->buffer_length;
+ break;
+
+ case TSDB_DATA_TYPE_TINYINT:
+ bind_tinyint = (int8_t *)*ptr;
+
+ if (value) {
+ *bind_tinyint = (int8_t)atoi(value);
+ } else {
+ *bind_tinyint = rand_tinyint();
+ }
+ bind->buffer_type = TSDB_DATA_TYPE_TINYINT;
+ bind->buffer_length = sizeof(int8_t);
+ bind->buffer = bind_tinyint;
+ bind->length = &bind->buffer_length;
+ bind->is_null = NULL;
+
+ *ptr += bind->buffer_length;
+ break;
+
+ case TSDB_DATA_TYPE_UTINYINT:
+ bind_utinyint = (uint8_t *)*ptr;
+
+ if (value) {
+ *bind_utinyint = (uint8_t)atoi(value);
+ } else {
+ *bind_utinyint = rand_tinyint();
+ }
+ bind->buffer_type = TSDB_DATA_TYPE_UTINYINT;
+ bind->buffer_length = sizeof(uint8_t);
+ bind->buffer = bind_utinyint;
+ bind->length = &bind->buffer_length;
+ bind->is_null = NULL;
+
+ *ptr += bind->buffer_length;
+ break;
+
+ case TSDB_DATA_TYPE_BOOL:
+ bind_bool = (int8_t *)*ptr;
+
+ if (value) {
+ if (strncasecmp(value, "true", 4)) {
+ *bind_bool = true;
+ } else {
+ *bind_bool = false;
+ }
+ } else {
+ *bind_bool = rand_bool();
+ }
+ bind->buffer_type = TSDB_DATA_TYPE_BOOL;
+ bind->buffer_length = sizeof(int8_t);
+ bind->buffer = bind_bool;
+ bind->length = &bind->buffer_length;
+ bind->is_null = NULL;
+
+ *ptr += bind->buffer_length;
+ break;
+
+ case TSDB_DATA_TYPE_TIMESTAMP:
+ bind_ts2 = (int64_t *)*ptr;
+
+ if (value) {
+ if (strchr(value, ':') && strchr(value, '-')) {
+ int i = 0;
+ while(value[i] != '\0') {
+ if (value[i] == '\"' || value[i] == '\'') {
+ value[i] = ' ';
+ }
+ i++;
+ }
+ int64_t tmpEpoch;
+ if (TSDB_CODE_SUCCESS != taosParseTime(
+ value, &tmpEpoch, strlen(value),
+ timePrec, 0)) {
+ errorPrint2("Input %s, time format error!\n", value);
+ return -1;
+ }
+ *bind_ts2 = tmpEpoch;
+ } else {
+ *bind_ts2 = atoll(value);
+ }
+ } else {
+ *bind_ts2 = rand_bigint();
+ }
+ bind->buffer_type = TSDB_DATA_TYPE_TIMESTAMP;
+ bind->buffer_length = sizeof(int64_t);
+ bind->buffer = bind_ts2;
+ bind->length = &bind->buffer_length;
+ bind->is_null = NULL;
+
+ *ptr += bind->buffer_length;
+ break;
+
+ default:
+ errorPrint2("No support data type: %d\n", data_type);
+ return -1;
+ }
+
+ return 0;
+}
+
+static int32_t prepareStmtWithoutStb(
+ threadInfo *pThreadInfo,
+ char *tableName,
+ uint32_t batch,
+ int64_t insertRows,
+ int64_t recordFrom,
+ int64_t startTime)
+{
+ TAOS_STMT *stmt = pThreadInfo->stmt;
+ int ret = taos_stmt_set_tbname(stmt, tableName);
+ if (ret != 0) {
+ errorPrint2("failed to execute taos_stmt_set_tbname(%s). return 0x%x. reason: %s\n",
+ tableName, ret, taos_stmt_errstr(stmt));
+ return ret;
+ }
+
+ char *data_type = g_args.data_type;
+
+ char *bindArray = malloc(sizeof(TAOS_BIND) * (g_args.columnCount + 1));
+ if (bindArray == NULL) {
+ errorPrint2("Failed to allocate %d bind params\n",
+ (g_args.columnCount + 1));
+ return -1;
+ }
+
+ int32_t k = 0;
+ for (k = 0; k < batch;) {
+ /* columnCount + 1 (ts) */
+
+ TAOS_BIND *bind = (TAOS_BIND *)(bindArray + 0);
+
+ int64_t *bind_ts = pThreadInfo->bind_ts;
+
+ bind->buffer_type = TSDB_DATA_TYPE_TIMESTAMP;
+
+ if (g_args.disorderRatio) {
+ *bind_ts = startTime + getTSRandTail(
+ g_args.timestamp_step, k,
+ g_args.disorderRatio,
+ g_args.disorderRange);
+ } else {
+ *bind_ts = startTime + g_args.timestamp_step * k;
+ }
+ bind->buffer_length = sizeof(int64_t);
+ bind->buffer = bind_ts;
+ bind->length = &bind->buffer_length;
+ bind->is_null = NULL;
+
+ for (int i = 0; i < g_args.columnCount; i ++) {
+ bind = (TAOS_BIND *)((char *)bindArray
+ + (sizeof(TAOS_BIND) * (i + 1)));
+ if ( -1 == prepareStmtBindArrayByType(
+ bind,
+ data_type[i],
+ g_args.binwidth,
+ pThreadInfo->time_precision,
+ NULL)) {
+ free(bindArray);
+ return -1;
+ }
+ }
+ if (0 != taos_stmt_bind_param(stmt, (TAOS_BIND *)bindArray)) {
+ errorPrint2("%s() LN%d, stmt_bind_param() failed! reason: %s\n",
+ __func__, __LINE__, taos_stmt_errstr(stmt));
+ break;
+ }
+ // if msg > 3MB, break
+ if (0 != taos_stmt_add_batch(stmt)) {
+ errorPrint2("%s() LN%d, stmt_add_batch() failed! reason: %s\n",
+ __func__, __LINE__, taos_stmt_errstr(stmt));
+ break;
+ }
+
+ k++;
+ recordFrom ++;
+ if (recordFrom >= insertRows) {
+ break;
+ }
+ }
+
+ free(bindArray);
+ return k;
+}
+
+static int32_t prepareStbStmtBindTag(
+ char *bindArray, SSuperTable *stbInfo,
+ char *tagsVal,
+ int32_t timePrec)
+{
+ TAOS_BIND *tag;
+
+ for (int t = 0; t < stbInfo->tagCount; t ++) {
+ tag = (TAOS_BIND *)((char *)bindArray + (sizeof(TAOS_BIND) * t));
+ if ( -1 == prepareStmtBindArrayByType(
+ tag,
+ stbInfo->tags[t].data_type,
+ stbInfo->tags[t].dataLen,
+ timePrec,
+ NULL)) {
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+static int32_t prepareStbStmtBindRand(
+ int64_t *ts,
+ char *bindArray, SSuperTable *stbInfo,
+ int64_t startTime, int32_t recSeq,
+ int32_t timePrec)
+{
+ char data[MAX_DATA_SIZE];
+ memset(data, 0, MAX_DATA_SIZE);
+ char *ptr = data;
+
+ TAOS_BIND *bind;
+
+ for (int i = 0; i < stbInfo->columnCount + 1; i ++) {
+ bind = (TAOS_BIND *)((char *)bindArray + (sizeof(TAOS_BIND) * i));
+
+ if (i == 0) {
+ int64_t *bind_ts = ts;
+
+ bind->buffer_type = TSDB_DATA_TYPE_TIMESTAMP;
+ if (stbInfo->disorderRatio) {
+ *bind_ts = startTime + getTSRandTail(
+ stbInfo->timeStampStep, recSeq,
+ stbInfo->disorderRatio,
+ stbInfo->disorderRange);
+ } else {
+ *bind_ts = startTime + stbInfo->timeStampStep * recSeq;
+ }
+ bind->buffer_length = sizeof(int64_t);
+ bind->buffer = bind_ts;
+ bind->length = &bind->buffer_length;
+ bind->is_null = NULL;
+
+ ptr += bind->buffer_length;
+ } else if ( -1 == prepareStmtBindArrayByTypeForRand(
+ bind,
+ stbInfo->columns[i-1].data_type,
+ stbInfo->columns[i-1].dataLen,
+ timePrec,
+ &ptr,
+ NULL)) {
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+UNUSED_FUNC static int32_t prepareStbStmtRand(
+ threadInfo *pThreadInfo,
+ char *tableName,
+ int64_t tableSeq,
+ uint32_t batch,
+ uint64_t insertRows,
+ uint64_t recordFrom,
+ int64_t startTime)
+{
+ int ret;
+ SSuperTable *stbInfo = pThreadInfo->stbInfo;
+ TAOS_STMT *stmt = pThreadInfo->stmt;
+
+ if (AUTO_CREATE_SUBTBL == stbInfo->autoCreateTable) {
+ char* tagsValBuf = NULL;
+
+ if (0 == stbInfo->tagSource) {
+ tagsValBuf = generateTagValuesForStb(stbInfo, tableSeq);
+ } else {
+ tagsValBuf = getTagValueFromTagSample(
+ stbInfo,
+ tableSeq % stbInfo->tagSampleCount);
+ }
+
+ if (NULL == tagsValBuf) {
+ errorPrint2("%s() LN%d, tag buf failed to allocate memory\n",
+ __func__, __LINE__);
+ return -1;
+ }
+
+ char *tagsArray = calloc(1, sizeof(TAOS_BIND) * stbInfo->tagCount);
+ if (NULL == tagsArray) {
+ tmfree(tagsValBuf);
+ errorPrint2("%s() LN%d, tag buf failed to allocate memory\n",
+ __func__, __LINE__);
+ return -1;
+ }
+
+ if (-1 == prepareStbStmtBindTag(
+ tagsArray, stbInfo, tagsValBuf, pThreadInfo->time_precision
+ /* is tag */)) {
+ tmfree(tagsValBuf);
+ tmfree(tagsArray);
+ return -1;
+ }
+
+ ret = taos_stmt_set_tbname_tags(stmt, tableName, (TAOS_BIND *)tagsArray);
+
+ tmfree(tagsValBuf);
+ tmfree(tagsArray);
+
+ if (0 != ret) {
+ errorPrint2("%s() LN%d, stmt_set_tbname_tags() failed! reason: %s\n",
+ __func__, __LINE__, taos_stmt_errstr(stmt));
+ return -1;
+ }
+ } else {
+ ret = taos_stmt_set_tbname(stmt, tableName);
+ if (0 != ret) {
+ errorPrint2("%s() LN%d, stmt_set_tbname() failed! reason: %s\n",
+ __func__, __LINE__, taos_stmt_errstr(stmt));
+ return -1;
+ }
+ }
+
+ char *bindArray = calloc(1, sizeof(TAOS_BIND) * (stbInfo->columnCount + 1));
+ if (bindArray == NULL) {
+ errorPrint2("%s() LN%d, Failed to allocate %d bind params\n",
+ __func__, __LINE__, (stbInfo->columnCount + 1));
+ return -1;
+ }
+
+ uint32_t k;
+ for (k = 0; k < batch;) {
+ /* columnCount + 1 (ts) */
+ if (-1 == prepareStbStmtBindRand(
+ pThreadInfo->bind_ts,
+ bindArray, stbInfo,
+ startTime, k,
+ pThreadInfo->time_precision
+ /* is column */)) {
+ free(bindArray);
+ return -1;
+ }
+ ret = taos_stmt_bind_param(stmt, (TAOS_BIND *)bindArray);
+ if (0 != ret) {
+ errorPrint2("%s() LN%d, stmt_bind_param() failed! reason: %s\n",
+ __func__, __LINE__, taos_stmt_errstr(stmt));
+ free(bindArray);
+ return -1;
+ }
+ // if msg > 3MB, break
+ ret = taos_stmt_add_batch(stmt);
+ if (0 != ret) {
+ errorPrint2("%s() LN%d, stmt_add_batch() failed! reason: %s\n",
+ __func__, __LINE__, taos_stmt_errstr(stmt));
+ free(bindArray);
+ return -1;
+ }
+
+ k++;
+ recordFrom ++;
+
+ if (recordFrom >= insertRows) {
+ break;
+ }
+ }
+
+ free(bindArray);
+ return k;
+}
+
+#if STMT_BIND_PARAM_BATCH == 1
+static int execStbBindParamBatch(
+ threadInfo *pThreadInfo,
+ char *tableName,
+ int64_t tableSeq,
+ uint32_t batch,
+ uint64_t insertRows,
+ uint64_t recordFrom,
+ int64_t startTime,
+ int64_t *pSamplePos)
+{
+ int ret;
+ TAOS_STMT *stmt = pThreadInfo->stmt;
+
+ SSuperTable *stbInfo = pThreadInfo->stbInfo;
+ assert(stbInfo);
+
+ uint32_t columnCount = pThreadInfo->stbInfo->columnCount;
+
+ uint32_t thisBatch = MAX_SAMPLES - (*pSamplePos);
+
+ if (thisBatch > batch) {
+ thisBatch = batch;
+ }
+ verbosePrint("%s() LN%d, batch=%d pos=%"PRId64" thisBatch=%d\n",
+ __func__, __LINE__, batch, *pSamplePos, thisBatch);
+
+ memset(pThreadInfo->bindParams, 0,
+ (sizeof(TAOS_MULTI_BIND) * (columnCount + 1)));
+ memset(pThreadInfo->is_null, 0, thisBatch);
+
+ for (int c = 0; c < columnCount + 1; c ++) {
+ TAOS_MULTI_BIND *param = (TAOS_MULTI_BIND *)(pThreadInfo->bindParams + sizeof(TAOS_MULTI_BIND) * c);
+
+ char data_type;
+
+ if (c == 0) {
+ data_type = TSDB_DATA_TYPE_TIMESTAMP;
+ param->buffer_length = sizeof(int64_t);
+ param->buffer = pThreadInfo->bind_ts_array;
+
+ } else {
+ data_type = stbInfo->columns[c-1].data_type;
+
+ char *tmpP;
+
+ switch(data_type) {
+ case TSDB_DATA_TYPE_BINARY:
+ param->buffer_length =
+ stbInfo->columns[c-1].dataLen;
+
+ tmpP =
+ (char *)((uintptr_t)*(uintptr_t*)(stbInfo->sampleBindBatchArray
+ +sizeof(char*)*(c-1)));
+
+ verbosePrint("%s() LN%d, tmpP=%p pos=%"PRId64" width=%"PRIxPTR" position=%"PRId64"\n",
+ __func__, __LINE__, tmpP, *pSamplePos, param->buffer_length,
+ (*pSamplePos) * param->buffer_length);
+
+ param->buffer = (void *)(tmpP + *pSamplePos * param->buffer_length);
+ break;
+
+ case TSDB_DATA_TYPE_NCHAR:
+ param->buffer_length =
+ stbInfo->columns[c-1].dataLen;
+
+ tmpP =
+ (char *)((uintptr_t)*(uintptr_t*)(stbInfo->sampleBindBatchArray
+ +sizeof(char*)*(c-1)));
+
+ verbosePrint("%s() LN%d, tmpP=%p pos=%"PRId64" width=%"PRIxPTR" position=%"PRId64"\n",
+ __func__, __LINE__, tmpP, *pSamplePos, param->buffer_length,
+ (*pSamplePos) * param->buffer_length);
+
+ param->buffer = (void *)(tmpP + *pSamplePos * param->buffer_length);
+ break;
+
+ case TSDB_DATA_TYPE_INT:
+ case TSDB_DATA_TYPE_UINT:
+ param->buffer_length = sizeof(int32_t);
+ param->buffer =
+ (void *)((uintptr_t)*(uintptr_t*)(stbInfo->sampleBindBatchArray+sizeof(char*)*(c-1))
+ + stbInfo->columns[c-1].dataLen * (*pSamplePos));
+ break;
+
+ case TSDB_DATA_TYPE_TINYINT:
+ case TSDB_DATA_TYPE_UTINYINT:
+ param->buffer_length = sizeof(int8_t);
+ param->buffer =
+ (void *)((uintptr_t)*(uintptr_t*)(
+ stbInfo->sampleBindBatchArray
+ +sizeof(char*)*(c-1))
+ + stbInfo->columns[c-1].dataLen*(*pSamplePos));
+ break;
+
+ case TSDB_DATA_TYPE_SMALLINT:
+ case TSDB_DATA_TYPE_USMALLINT:
+ param->buffer_length = sizeof(int16_t);
+ param->buffer =
+ (void *)((uintptr_t)*(uintptr_t*)(stbInfo->sampleBindBatchArray+sizeof(char*)*(c-1))
+ + stbInfo->columns[c-1].dataLen * (*pSamplePos));
+ break;
+
+ case TSDB_DATA_TYPE_BIGINT:
+ case TSDB_DATA_TYPE_UBIGINT:
+ param->buffer_length = sizeof(int64_t);
+ param->buffer =
+ (void *)((uintptr_t)*(uintptr_t*)(stbInfo->sampleBindBatchArray+sizeof(char*)*(c-1))
+ + stbInfo->columns[c-1].dataLen * (*pSamplePos));
+ break;
+
+ case TSDB_DATA_TYPE_BOOL:
+ param->buffer_length = sizeof(int8_t);
+ param->buffer =
+ (void *)((uintptr_t)*(uintptr_t*)(stbInfo->sampleBindBatchArray+sizeof(char*)*(c-1))
+ + stbInfo->columns[c-1].dataLen * (*pSamplePos));
+ break;
+
+ case TSDB_DATA_TYPE_FLOAT:
+ param->buffer_length = sizeof(float);
+ param->buffer =
+ (void *)((uintptr_t)*(uintptr_t*)(stbInfo->sampleBindBatchArray+sizeof(char*)*(c-1))
+ + stbInfo->columns[c-1].dataLen * (*pSamplePos));
+ break;
+
+ case TSDB_DATA_TYPE_DOUBLE:
+ param->buffer_length = sizeof(double);
+ param->buffer =
+ (void *)((uintptr_t)*(uintptr_t*)(stbInfo->sampleBindBatchArray+sizeof(char*)*(c-1))
+ + stbInfo->columns[c-1].dataLen * (*pSamplePos));
+ break;
+
+ case TSDB_DATA_TYPE_TIMESTAMP:
+ param->buffer_length = sizeof(int64_t);
+ param->buffer =
+ (void *)((uintptr_t)*(uintptr_t*)(stbInfo->sampleBindBatchArray+sizeof(char*)*(c-1))
+ + stbInfo->columns[c-1].dataLen * (*pSamplePos));
+ break;
+
+ default:
+ errorPrint("%s() LN%d, wrong data type: %d\n",
+ __func__,
+ __LINE__,
+ data_type);
+ exit(EXIT_FAILURE);
+
+ }
+ }
+
+ param->buffer_type = data_type;
+ param->length = malloc(sizeof(int32_t) * thisBatch);
+ assert(param->length);
+
+ for (int b = 0; b < thisBatch; b++) {
+ if (param->buffer_type == TSDB_DATA_TYPE_NCHAR) {
+ param->length[b] = strlen(
+ (char *)param->buffer + b *
+ stbInfo->columns[c].dataLen
+ );
+ } else {
+ param->length[b] = param->buffer_length;
+ }
+ }
+ param->is_null = pThreadInfo->is_null;
+ param->num = thisBatch;
+ }
+
+ uint32_t k;
+ for (k = 0; k < thisBatch;) {
+ /* columnCount + 1 (ts) */
+ if (stbInfo->disorderRatio) {
+ *(pThreadInfo->bind_ts_array + k) = startTime + getTSRandTail(
+ stbInfo->timeStampStep, k,
+ stbInfo->disorderRatio,
+ stbInfo->disorderRange);
+ } else {
+ *(pThreadInfo->bind_ts_array + k) = startTime + stbInfo->timeStampStep * k;
+ }
+
+ debugPrint("%s() LN%d, k=%d ts=%"PRId64"\n",
+ __func__, __LINE__,
+ k, *(pThreadInfo->bind_ts_array +k));
+ k++;
+ recordFrom ++;
+
+ (*pSamplePos) ++;
+ if ((*pSamplePos) == MAX_SAMPLES) {
+ *pSamplePos = 0;
+ }
+
+ if (recordFrom >= insertRows) {
+ break;
+ }
+ }
+
+ ret = taos_stmt_bind_param_batch(stmt, (TAOS_MULTI_BIND *)pThreadInfo->bindParams);
+ if (0 != ret) {
+ errorPrint2("%s() LN%d, stmt_bind_param() failed! reason: %s\n",
+ __func__, __LINE__, taos_stmt_errstr(stmt));
+ return -1;
+ }
+
+ for (int c = 0; c < stbInfo->columnCount + 1; c ++) {
+ TAOS_MULTI_BIND *param = (TAOS_MULTI_BIND *)(pThreadInfo->bindParams + sizeof(TAOS_MULTI_BIND) * c);
+ free(param->length);
+ }
+
+ // if msg > 3MB, break
+ ret = taos_stmt_add_batch(stmt);
+ if (0 != ret) {
+ errorPrint2("%s() LN%d, stmt_add_batch() failed! reason: %s\n",
+ __func__, __LINE__, taos_stmt_errstr(stmt));
+ return -1;
+ }
+ return k;
+}
+
+static int parseSamplefileToStmtBatch(
+ SSuperTable* stbInfo)
+{
+ // char *sampleDataBuf = (stbInfo)?
+ // stbInfo->sampleDataBuf:g_sampleDataBuf;
+ int32_t columnCount = (stbInfo)?stbInfo->columnCount:g_args.columnCount;
+ char *sampleBindBatchArray = NULL;
+
+ if (stbInfo) {
+ stbInfo->sampleBindBatchArray = calloc(1, sizeof(uintptr_t *) * columnCount);
+ sampleBindBatchArray = stbInfo->sampleBindBatchArray;
+ } else {
+ g_sampleBindBatchArray = calloc(1, sizeof(uintptr_t *) * columnCount);
+ sampleBindBatchArray = g_sampleBindBatchArray;
+ }
+ assert(sampleBindBatchArray);
+
+ for (int c = 0; c < columnCount; c++) {
+ char data_type = (stbInfo)?stbInfo->columns[c].data_type:g_args.data_type[c];
+
+ char *tmpP = NULL;
+
+ switch(data_type) {
+ case TSDB_DATA_TYPE_INT:
+ case TSDB_DATA_TYPE_UINT:
+ tmpP = calloc(1, sizeof(int) * MAX_SAMPLES);
+ assert(tmpP);
+ *(uintptr_t*)(sampleBindBatchArray+ sizeof(uintptr_t*)*c) = (uintptr_t)tmpP;
+ break;
+
+ case TSDB_DATA_TYPE_TINYINT:
+ case TSDB_DATA_TYPE_UTINYINT:
+ tmpP = calloc(1, sizeof(int8_t) * MAX_SAMPLES);
+ assert(tmpP);
+ *(uintptr_t*)(sampleBindBatchArray+ sizeof(uintptr_t*)*c) = (uintptr_t)tmpP;
+ break;
+
+ case TSDB_DATA_TYPE_SMALLINT:
+ case TSDB_DATA_TYPE_USMALLINT:
+ tmpP = calloc(1, sizeof(int16_t) * MAX_SAMPLES);
+ assert(tmpP);
+ *(uintptr_t*)(sampleBindBatchArray+ sizeof(uintptr_t*)*c) = (uintptr_t)tmpP;
+ break;
+
+ case TSDB_DATA_TYPE_BIGINT:
+ case TSDB_DATA_TYPE_UBIGINT:
+ tmpP = calloc(1, sizeof(int64_t) * MAX_SAMPLES);
+ assert(tmpP);
+ *(uintptr_t*)(sampleBindBatchArray+ sizeof(uintptr_t*)*c) = (uintptr_t)tmpP;
+ break;
+
+ case TSDB_DATA_TYPE_BOOL:
+ tmpP = calloc(1, sizeof(int8_t) * MAX_SAMPLES);
+ assert(tmpP);
+ *(uintptr_t*)(sampleBindBatchArray+ sizeof(uintptr_t*)*c) = (uintptr_t)tmpP;
+ break;
+
+ case TSDB_DATA_TYPE_FLOAT:
+ tmpP = calloc(1, sizeof(float) * MAX_SAMPLES);
+ assert(tmpP);
+ *(uintptr_t*)(sampleBindBatchArray+ sizeof(uintptr_t*)*c) = (uintptr_t)tmpP;
+ break;
+
+ case TSDB_DATA_TYPE_DOUBLE:
+ tmpP = calloc(1, sizeof(double) * MAX_SAMPLES);
+ assert(tmpP);
+ *(uintptr_t*)(sampleBindBatchArray+ sizeof(uintptr_t*)*c) = (uintptr_t)tmpP;
+ break;
+
+ case TSDB_DATA_TYPE_BINARY:
+ case TSDB_DATA_TYPE_NCHAR:
+ tmpP = calloc(1, MAX_SAMPLES *
+ (((stbInfo)?stbInfo->columns[c].dataLen:g_args.binwidth)));
+ assert(tmpP);
+ *(uintptr_t*)(sampleBindBatchArray+ sizeof(uintptr_t*)*c) = (uintptr_t)tmpP;
+ break;
+
+ case TSDB_DATA_TYPE_TIMESTAMP:
+ tmpP = calloc(1, sizeof(int64_t) * MAX_SAMPLES);
+ assert(tmpP);
+ *(uintptr_t*)(sampleBindBatchArray+ sizeof(uintptr_t*)*c) = (uintptr_t)tmpP;
+ break;
+
+ default:
+ errorPrint("Unknown data type: %s\n",
+ (stbInfo)?stbInfo->columns[c].dataType:g_args.dataType[c]);
+ exit(EXIT_FAILURE);
+ }
+ }
+
+ char *sampleDataBuf = (stbInfo)?stbInfo->sampleDataBuf:g_sampleDataBuf;
+ int64_t lenOfOneRow = (stbInfo)?stbInfo->lenOfOneRow:g_args.lenOfOneRow;
+
+ for (int i=0; i < MAX_SAMPLES; i++) {
+ int cursor = 0;
+
+ for (int c = 0; c < columnCount; c++) {
+ char data_type = (stbInfo)?
+ stbInfo->columns[c].data_type:
+ g_args.data_type[c];
+ char *restStr = sampleDataBuf
+ + lenOfOneRow * i + cursor;
+ int lengthOfRest = strlen(restStr);
+
+ int index = 0;
+ for (index = 0; index < lengthOfRest; index ++) {
+ if (restStr[index] == ',') {
+ break;
+ }
+ }
+
+ char *tmpStr = calloc(1, index + 1);
+ if (NULL == tmpStr) {
+ errorPrint2("%s() LN%d, Failed to allocate %d bind buffer\n",
+ __func__, __LINE__, index + 1);
+ return -1;
+ }
+
+ strncpy(tmpStr, restStr, index);
+ cursor += index + 1; // skip ',' too
+ char *tmpP;
+
+ switch(data_type) {
+ case TSDB_DATA_TYPE_INT:
+ case TSDB_DATA_TYPE_UINT:
+ *((int32_t*)((uintptr_t)*(uintptr_t*)(sampleBindBatchArray
+ +sizeof(char*)*c)+sizeof(int32_t)*i)) =
+ atoi(tmpStr);
+ break;
+
+ case TSDB_DATA_TYPE_FLOAT:
+ *(float*)(((uintptr_t)*(uintptr_t*)(sampleBindBatchArray
+ +sizeof(char*)*c)+sizeof(float)*i)) =
+ (float)atof(tmpStr);
+ break;
+
+ case TSDB_DATA_TYPE_DOUBLE:
+ *(double*)(((uintptr_t)*(uintptr_t*)(sampleBindBatchArray
+ +sizeof(char*)*c)+sizeof(double)*i)) =
+ atof(tmpStr);
+ break;
+
+ case TSDB_DATA_TYPE_TINYINT:
+ case TSDB_DATA_TYPE_UTINYINT:
+ *((int8_t*)((uintptr_t)*(uintptr_t*)(sampleBindBatchArray
+ +sizeof(char*)*c)+sizeof(int8_t)*i)) =
+ (int8_t)atoi(tmpStr);
+ break;
+
+ case TSDB_DATA_TYPE_SMALLINT:
+ case TSDB_DATA_TYPE_USMALLINT:
+ *((int16_t*)((uintptr_t)*(uintptr_t*)(sampleBindBatchArray
+ +sizeof(char*)*c)+sizeof(int16_t)*i)) =
+ (int16_t)atoi(tmpStr);
+ break;
+
+ case TSDB_DATA_TYPE_BIGINT:
+ case TSDB_DATA_TYPE_UBIGINT:
+ *((int64_t*)((uintptr_t)*(uintptr_t*)(sampleBindBatchArray
+ +sizeof(char*)*c)+sizeof(int64_t)*i)) =
+ (int64_t)atol(tmpStr);
+ break;
+
+ case TSDB_DATA_TYPE_BOOL:
+ *((int8_t*)((uintptr_t)*(uintptr_t*)(sampleBindBatchArray
+ +sizeof(char*)*c)+sizeof(int8_t)*i)) =
+ (int8_t)atoi(tmpStr);
+ break;
+
+ case TSDB_DATA_TYPE_TIMESTAMP:
+ *((int64_t*)((uintptr_t)*(uintptr_t*)(sampleBindBatchArray
+ +sizeof(char*)*c)+sizeof(int64_t)*i)) =
+ (int64_t)atol(tmpStr);
+ break;
+
+ case TSDB_DATA_TYPE_BINARY:
+ case TSDB_DATA_TYPE_NCHAR:
+ tmpP = (char *)(*(uintptr_t*)(sampleBindBatchArray
+ +sizeof(char*)*c));
+ strcpy(tmpP + i*
+ (((stbInfo)?stbInfo->columns[c].dataLen:g_args.binwidth))
+ , tmpStr);
+ break;
+
+ default:
+ break;
+ }
+
+ free(tmpStr);
+ }
+ }
+
+ return 0;
+}
+
+static int parseSampleToStmtBatchForThread(
+ threadInfo *pThreadInfo, SSuperTable *stbInfo,
+ uint32_t timePrec,
+ uint32_t batch)
+{
+ uint32_t columnCount = (stbInfo)?stbInfo->columnCount:g_args.columnCount;
+
+ pThreadInfo->bind_ts_array = malloc(sizeof(int64_t) * batch);
+ assert(pThreadInfo->bind_ts_array);
+
+ pThreadInfo->bindParams = malloc(sizeof(TAOS_MULTI_BIND) * (columnCount + 1));
+ assert(pThreadInfo->bindParams);
+
+ pThreadInfo->is_null = malloc(batch);
+ assert(pThreadInfo->is_null);
+
+ return 0;
+}
+
+static int parseStbSampleToStmtBatchForThread(
+ threadInfo *pThreadInfo,
+ SSuperTable *stbInfo,
+ uint32_t timePrec,
+ uint32_t batch)
+{
+ return parseSampleToStmtBatchForThread(
+ pThreadInfo, stbInfo, timePrec, batch);
+}
+
+static int parseNtbSampleToStmtBatchForThread(
+ threadInfo *pThreadInfo, uint32_t timePrec, uint32_t batch)
+{
+ return parseSampleToStmtBatchForThread(
+ pThreadInfo, NULL, timePrec, batch);
+}
+
+#else
+static int parseSampleToStmt(
+ threadInfo *pThreadInfo,
+ SSuperTable *stbInfo, uint32_t timePrec)
+{
+ pThreadInfo->sampleBindArray =
+ (char *)calloc(1, sizeof(char *) * MAX_SAMPLES);
+ if (pThreadInfo->sampleBindArray == NULL) {
+ errorPrint2("%s() LN%d, Failed to allocate %"PRIu64" bind array buffer\n",
+ __func__, __LINE__,
+ (uint64_t)sizeof(char *) * MAX_SAMPLES);
+ return -1;
+ }
+
+ int32_t columnCount = (stbInfo)?stbInfo->columnCount:g_args.columnCount;
+ char *sampleDataBuf = (stbInfo)?stbInfo->sampleDataBuf:g_sampleDataBuf;
+ int64_t lenOfOneRow = (stbInfo)?stbInfo->lenOfOneRow:g_args.lenOfOneRow;
+
+ for (int i=0; i < MAX_SAMPLES; i++) {
+ char *bindArray =
+ calloc(1, sizeof(TAOS_BIND) * (columnCount + 1));
+ if (bindArray == NULL) {
+ errorPrint2("%s() LN%d, Failed to allocate %d bind params\n",
+ __func__, __LINE__, (columnCount + 1));
+ return -1;
+ }
+
+ TAOS_BIND *bind;
+ int cursor = 0;
+
+ for (int c = 0; c < columnCount + 1; c++) {
+ bind = (TAOS_BIND *)((char *)bindArray + (sizeof(TAOS_BIND) * c));
+
+ if (c == 0) {
+ bind->buffer_type = TSDB_DATA_TYPE_TIMESTAMP;
+ bind->buffer_length = sizeof(int64_t);
+ bind->buffer = NULL; //bind_ts;
+ bind->length = &bind->buffer_length;
+ bind->is_null = NULL;
+ } else {
+ char data_type = (stbInfo)?
+ stbInfo->columns[c-1].data_type:
+ g_args.data_type[c-1];
+ int32_t dataLen = (stbInfo)?
+ stbInfo->columns[c-1].dataLen:
+ g_args.binwidth;
+ char *restStr = sampleDataBuf
+ + lenOfOneRow * i + cursor;
+ int lengthOfRest = strlen(restStr);
+
+ int index = 0;
+ for (index = 0; index < lengthOfRest; index ++) {
+ if (restStr[index] == ',') {
+ break;
+ }
+ }
+
+ char *bindBuffer = calloc(1, index + 1);
+ if (bindBuffer == NULL) {
+ errorPrint2("%s() LN%d, Failed to allocate %d bind buffer\n",
+ __func__, __LINE__, index + 1);
+ return -1;
+ }
+
+ strncpy(bindBuffer, restStr, index);
+ cursor += index + 1; // skip ',' too
+
+ if (-1 == prepareStmtBindArrayByType(
+ bind,
+ data_type,
+ dataLen,
+ timePrec,
+ bindBuffer)) {
+ free(bindBuffer);
+ free(bindArray);
+ return -1;
+ }
+ free(bindBuffer);
+ }
+ }
+ *((uintptr_t *)(pThreadInfo->sampleBindArray + (sizeof(char *)) * i)) =
+ (uintptr_t)bindArray;
+ }
+
+ return 0;
+}
+
+static int parseStbSampleToStmt(
+ threadInfo *pThreadInfo,
+ SSuperTable *stbInfo, uint32_t timePrec)
+{
+ return parseSampleToStmt(
+ pThreadInfo,
+ stbInfo, timePrec);
+}
+
+static int parseNtbSampleToStmt(
+ threadInfo *pThreadInfo,
+ uint32_t timePrec)
+{
+ return parseSampleToStmt(
+ pThreadInfo,
+ NULL,
+ timePrec);
+}
+
+static int32_t prepareStbStmtBindStartTime(
+ char *tableName,
+ int64_t *ts,
+ char *bindArray, SSuperTable *stbInfo,
+ int64_t startTime, int32_t recSeq)
+{
+ TAOS_BIND *bind;
+
+ bind = (TAOS_BIND *)bindArray;
+
+ int64_t *bind_ts = ts;
+
+ bind->buffer_type = TSDB_DATA_TYPE_TIMESTAMP;
+ if (stbInfo->disorderRatio) {
+ *bind_ts = startTime + getTSRandTail(
+ stbInfo->timeStampStep, recSeq,
+ stbInfo->disorderRatio,
+ stbInfo->disorderRange);
+ } else {
+ *bind_ts = startTime + stbInfo->timeStampStep * recSeq;
+ }
+
+ verbosePrint("%s() LN%d, tableName: %s, bind_ts=%"PRId64"\n",
+ __func__, __LINE__, tableName, *bind_ts);
+
+ bind->buffer_length = sizeof(int64_t);
+ bind->buffer = bind_ts;
+ bind->length = &bind->buffer_length;
+ bind->is_null = NULL;
+
+ return 0;
+}
+
+static uint32_t execBindParam(
+ threadInfo *pThreadInfo,
+ char *tableName,
+ int64_t tableSeq,
+ uint32_t batch,
+ uint64_t insertRows,
+ uint64_t recordFrom,
+ int64_t startTime,
+ int64_t *pSamplePos)
+{
+ int ret;
+ SSuperTable *stbInfo = pThreadInfo->stbInfo;
+ TAOS_STMT *stmt = pThreadInfo->stmt;
+
+ uint32_t k;
+ for (k = 0; k < batch;) {
+ char *bindArray = (char *)(*((uintptr_t *)
+ (pThreadInfo->sampleBindArray + (sizeof(char *)) * (*pSamplePos))));
+ /* columnCount + 1 (ts) */
+ if (-1 == prepareStbStmtBindStartTime(
+ tableName,
+ pThreadInfo->bind_ts,
+ bindArray, stbInfo,
+ startTime, k
+ /* is column */)) {
+ return -1;
+ }
+ ret = taos_stmt_bind_param(stmt, (TAOS_BIND *)bindArray);
+ if (0 != ret) {
+ errorPrint2("%s() LN%d, stmt_bind_param() failed! reason: %s\n",
+ __func__, __LINE__, taos_stmt_errstr(stmt));
+ return -1;
+ }
+ // if msg > 3MB, break
+ ret = taos_stmt_add_batch(stmt);
+ if (0 != ret) {
+ errorPrint2("%s() LN%d, stmt_add_batch() failed! reason: %s\n",
+ __func__, __LINE__, taos_stmt_errstr(stmt));
+ return -1;
+ }
+
+ k++;
+ recordFrom ++;
+
+ (*pSamplePos) ++;
+ if ((*pSamplePos) == MAX_SAMPLES) {
+ *pSamplePos = 0;
+ }
+
+ if (recordFrom >= insertRows) {
+ break;
+ }
+ }
+
+ return k;
+}
+#endif
+
+static int32_t prepareStbStmt(
+ threadInfo *pThreadInfo,
+ char *tableName,
+ int64_t tableSeq,
+ uint32_t batch,
+ uint64_t insertRows,
+ uint64_t recordFrom,
+ int64_t startTime,
+ int64_t *pSamplePos)
+{
+ int ret;
+ SSuperTable *stbInfo = pThreadInfo->stbInfo;
+ TAOS_STMT *stmt = pThreadInfo->stmt;
+
+ if (AUTO_CREATE_SUBTBL == stbInfo->autoCreateTable) {
+ char* tagsValBuf = NULL;
+
+ if (0 == stbInfo->tagSource) {
+ tagsValBuf = generateTagValuesForStb(stbInfo, tableSeq);
+ } else {
+ tagsValBuf = getTagValueFromTagSample(
+ stbInfo,
+ tableSeq % stbInfo->tagSampleCount);
+ }
+
+ if (NULL == tagsValBuf) {
+ errorPrint2("%s() LN%d, tag buf failed to allocate memory\n",
+ __func__, __LINE__);
+ return -1;
+ }
+
+ char *tagsArray = calloc(1, sizeof(TAOS_BIND) * stbInfo->tagCount);
+ if (NULL == tagsArray) {
+ tmfree(tagsValBuf);
+ errorPrint2("%s() LN%d, tag buf failed to allocate memory\n",
+ __func__, __LINE__);
+ return -1;
+ }
+
+ if (-1 == prepareStbStmtBindTag(
+ tagsArray, stbInfo, tagsValBuf, pThreadInfo->time_precision
+ /* is tag */)) {
+ tmfree(tagsValBuf);
+ tmfree(tagsArray);
+ return -1;
+ }
+
+ ret = taos_stmt_set_tbname_tags(stmt, tableName, (TAOS_BIND *)tagsArray);
+
+ tmfree(tagsValBuf);
+ tmfree(tagsArray);
+
+ if (0 != ret) {
+ errorPrint2("%s() LN%d, stmt_set_tbname_tags() failed! reason: %s\n",
+ __func__, __LINE__, taos_stmt_errstr(stmt));
+ return -1;
+ }
+ } else {
+ ret = taos_stmt_set_tbname(stmt, tableName);
+ if (0 != ret) {
+ errorPrint2("%s() LN%d, stmt_set_tbname() failed! reason: %s\n",
+ __func__, __LINE__, taos_stmt_errstr(stmt));
+ return -1;
+ }
+ }
+
+#if STMT_BIND_PARAM_BATCH == 1
+ return execStbBindParamBatch(
+ pThreadInfo,
+ tableName,
+ tableSeq,
+ batch,
+ insertRows,
+ recordFrom,
+ startTime,
+ pSamplePos);
+#else
+ return execBindParam(
+ pThreadInfo,
+ tableName,
+ tableSeq,
+ batch,
+ insertRows,
+ recordFrom,
+ startTime,
+ pSamplePos);
+#endif
+}
+
+static int32_t generateStbProgressiveData(
+ SSuperTable *stbInfo,
+ char *tableName,
+ int64_t tableSeq,
+ char *dbName, char *buffer,
+ int64_t insertRows,
+ uint64_t recordFrom, int64_t startTime, int64_t *pSamplePos,
+ int64_t *pRemainderBufLen)
+{
+ assert(buffer != NULL);
+ char *pstr = buffer;
+
+ memset(pstr, 0, *pRemainderBufLen);
+
+ int64_t headLen = generateStbSQLHead(
+ stbInfo,
+ tableName, tableSeq, dbName,
+ buffer, *pRemainderBufLen);
+
+ if (headLen <= 0) {
+ return 0;
+ }
+ pstr += headLen;
+ *pRemainderBufLen -= headLen;
+
+ int64_t dataLen;
+
+ return generateStbDataTail(stbInfo,
+ g_args.reqPerReq, pstr, *pRemainderBufLen,
+ insertRows, recordFrom,
+ startTime,
+ pSamplePos, &dataLen);
+}
+
+static int32_t generateProgressiveDataWithoutStb(
+ char *tableName,
+ /* int64_t tableSeq, */
+ threadInfo *pThreadInfo, char *buffer,
+ int64_t insertRows,
+ uint64_t recordFrom, int64_t startTime, /*int64_t *pSamplePos, */
+ int64_t *pRemainderBufLen)
+{
+ assert(buffer != NULL);
+ char *pstr = buffer;
+
+ memset(buffer, 0, *pRemainderBufLen);
+
+ int64_t headLen = generateSQLHeadWithoutStb(
+ tableName, pThreadInfo->db_name,
+ buffer, *pRemainderBufLen);
+
+ if (headLen <= 0) {
+ return 0;
+ }
+ pstr += headLen;
+ *pRemainderBufLen -= headLen;
+
+ int64_t dataLen;
+
+ return generateDataTailWithoutStb(
+ g_args.reqPerReq, pstr, *pRemainderBufLen, insertRows, recordFrom,
+ startTime,
+ /*pSamplePos, */&dataLen);
+}
+
+static void printStatPerThread(threadInfo *pThreadInfo)
+{
+ if (0 == pThreadInfo->totalDelay)
+ pThreadInfo->totalDelay = 1;
+
+ fprintf(stderr, "====thread[%d] completed total inserted rows: %"PRIu64 ", total affected rows: %"PRIu64". %.2f records/second====\n",
+ pThreadInfo->threadID,
+ pThreadInfo->totalInsertRows,
+ pThreadInfo->totalAffectedRows,
+ (double)(pThreadInfo->totalAffectedRows/((double)pThreadInfo->totalDelay/1000000.0))
+ );
+}
+
+#if STMT_BIND_PARAM_BATCH == 1
+// stmt sync write interlace data
+static void* syncWriteInterlaceStmtBatch(threadInfo *pThreadInfo, uint32_t interlaceRows) {
+ debugPrint("[%d] %s() LN%d: ### stmt interlace write\n",
+ pThreadInfo->threadID, __func__, __LINE__);
+
+ int64_t insertRows;
+ int64_t timeStampStep;
+ uint64_t insert_interval;
+
+ SSuperTable* stbInfo = pThreadInfo->stbInfo;
+
+ if (stbInfo) {
+ insertRows = stbInfo->insertRows;
+ timeStampStep = stbInfo->timeStampStep;
+ insert_interval = stbInfo->insertInterval;
+ } else {
+ insertRows = g_args.insertRows;
+ timeStampStep = g_args.timestamp_step;
+ insert_interval = g_args.insert_interval;
+ }
+
+ debugPrint("[%d] %s() LN%d: start_table_from=%"PRIu64" ntables=%"PRId64" insertRows=%"PRIu64"\n",
+ pThreadInfo->threadID, __func__, __LINE__,
+ pThreadInfo->start_table_from,
+ pThreadInfo->ntables, insertRows);
+
+ uint64_t timesInterlace = (insertRows / interlaceRows) + 1;
+ uint32_t precalcBatch = interlaceRows;
+
+ if (precalcBatch > g_args.reqPerReq)
+ precalcBatch = g_args.reqPerReq;
+
+ if (precalcBatch > MAX_SAMPLES)
+ precalcBatch = MAX_SAMPLES;
+
+ pThreadInfo->totalInsertRows = 0;
+ pThreadInfo->totalAffectedRows = 0;
+
+ uint64_t st = 0;
+ uint64_t et = UINT64_MAX;
+
+ uint64_t lastPrintTime = taosGetTimestampMs();
+ uint64_t startTs = taosGetTimestampMs();
+ uint64_t endTs;
+
+ uint64_t tableSeq = pThreadInfo->start_table_from;
+ int64_t startTime;
+
+ bool flagSleep = true;
+ uint64_t sleepTimeTotal = 0;
+
+ int percentComplete = 0;
+ int64_t totalRows = insertRows * pThreadInfo->ntables;
+ pThreadInfo->samplePos = 0;
+
+ for (int64_t interlace = 0;
+ interlace < timesInterlace; interlace ++) {
+ if ((flagSleep) && (insert_interval)) {
+ st = taosGetTimestampMs();
+ flagSleep = false;
+ }
+
+ int64_t generated = 0;
+ int64_t samplePos;
+
+ for (; tableSeq < pThreadInfo->start_table_from + pThreadInfo->ntables; tableSeq ++) {
+ char tableName[TSDB_TABLE_NAME_LEN];
+ getTableName(tableName, pThreadInfo, tableSeq);
+ if (0 == strlen(tableName)) {
+ errorPrint2("[%d] %s() LN%d, getTableName return null\n",
+ pThreadInfo->threadID, __func__, __LINE__);
+ return NULL;
+ }
+
+ samplePos = pThreadInfo->samplePos;
+ startTime = pThreadInfo->start_time
+ + interlace * interlaceRows * timeStampStep;
+ uint64_t remainRecPerTbl =
+ insertRows - interlaceRows * interlace;
+ uint64_t recPerTbl = 0;
+
+ uint64_t remainPerInterlace;
+ if (remainRecPerTbl > interlaceRows) {
+ remainPerInterlace = interlaceRows;
+ } else {
+ remainPerInterlace = remainRecPerTbl;
+ }
+
+ while(remainPerInterlace > 0) {
+
+ uint32_t batch;
+ if (remainPerInterlace > precalcBatch) {
+ batch = precalcBatch;
+ } else {
+ batch = remainPerInterlace;
+ }
+ debugPrint("[%d] %s() LN%d, tableName:%s, batch:%d startTime:%"PRId64"\n",
+ pThreadInfo->threadID,
+ __func__, __LINE__,
+ tableName, batch, startTime);
+
+ if (stbInfo) {
+ generated = prepareStbStmt(
+ pThreadInfo,
+ tableName,
+ tableSeq,
+ batch,
+ insertRows, 0,
+ startTime,
+ &samplePos);
+ } else {
+ generated = prepareStmtWithoutStb(
+ pThreadInfo,
+ tableName,
+ batch,
+ insertRows,
+ interlaceRows * interlace + recPerTbl,
+ startTime);
+ }
+
+ debugPrint("[%d] %s() LN%d, generated records is %"PRId64"\n",
+ pThreadInfo->threadID, __func__, __LINE__, generated);
+ if (generated < 0) {
+ errorPrint2("[%d] %s() LN%d, generated records is %"PRId64"\n",
+ pThreadInfo->threadID, __func__, __LINE__, generated);
+ goto free_of_interlace_stmt;
+ } else if (generated == 0) {
+ break;
+ }
+
+ recPerTbl += generated;
+ remainPerInterlace -= generated;
+ pThreadInfo->totalInsertRows += generated;
+
+ verbosePrint("[%d] %s() LN%d totalInsertRows=%"PRIu64"\n",
+ pThreadInfo->threadID, __func__, __LINE__,
+ pThreadInfo->totalInsertRows);
+
+ startTs = taosGetTimestampUs();
+
+ int64_t affectedRows = execInsert(pThreadInfo, generated);
+
+ endTs = taosGetTimestampUs();
+ uint64_t delay = endTs - startTs;
+ performancePrint("%s() LN%d, insert execution time is %10.2f ms\n",
+ __func__, __LINE__, delay / 1000.0);
+ verbosePrint("[%d] %s() LN%d affectedRows=%"PRId64"\n",
+ pThreadInfo->threadID,
+ __func__, __LINE__, affectedRows);
+
+ if (delay > pThreadInfo->maxDelay) pThreadInfo->maxDelay = delay;
+ if (delay < pThreadInfo->minDelay) pThreadInfo->minDelay = delay;
+ pThreadInfo->cntDelay++;
+ pThreadInfo->totalDelay += delay;
+
+ if (generated != affectedRows) {
+ errorPrint2("[%d] %s() LN%d execInsert() insert %"PRId64", affected rows: %"PRId64"\n\n",
+ pThreadInfo->threadID, __func__, __LINE__,
+ generated, affectedRows);
+ goto free_of_interlace_stmt;
+ }
+
+ pThreadInfo->totalAffectedRows += affectedRows;
+
+ int currentPercent = pThreadInfo->totalAffectedRows * 100 / totalRows;
+ if (currentPercent > percentComplete ) {
+ printf("[%d]:%d%%\n", pThreadInfo->threadID, currentPercent);
+ percentComplete = currentPercent;
+ }
+ int64_t currentPrintTime = taosGetTimestampMs();
+ if (currentPrintTime - lastPrintTime > 30*1000) {
+ printf("thread[%d] has currently inserted rows: %"PRIu64 ", affected rows: %"PRIu64 "\n",
+ pThreadInfo->threadID,
+ pThreadInfo->totalInsertRows,
+ pThreadInfo->totalAffectedRows);
+ lastPrintTime = currentPrintTime;
+ }
+
+ startTime += (generated * timeStampStep);
+ }
+ }
+ pThreadInfo->samplePos = samplePos;
+
+ if (tableSeq == pThreadInfo->start_table_from
+ + pThreadInfo->ntables) {
+ // turn to first table
+ tableSeq = pThreadInfo->start_table_from;
+
+ flagSleep = true;
+ }
+
+ if ((insert_interval) && flagSleep) {
+ et = taosGetTimestampMs();
+
+ if (insert_interval > (et - st) ) {
+ uint64_t sleepTime = insert_interval - (et -st);
+ performancePrint("%s() LN%d sleep: %"PRId64" ms for insert interval\n",
+ __func__, __LINE__, sleepTime);
+ taosMsleep(sleepTime); // ms
+ sleepTimeTotal += insert_interval;
+ }
+ }
+ }
+ if (percentComplete < 100)
+ printf("[%d]:%d%%\n", pThreadInfo->threadID, percentComplete);
+
+free_of_interlace_stmt:
+ printStatPerThread(pThreadInfo);
+ return NULL;
+}
+#else
+// stmt sync write interlace data
+static void* syncWriteInterlaceStmt(threadInfo *pThreadInfo, uint32_t interlaceRows) {
+ debugPrint("[%d] %s() LN%d: ### stmt interlace write\n",
+ pThreadInfo->threadID, __func__, __LINE__);
+
+ int64_t insertRows;
+ uint64_t maxSqlLen;
+ int64_t timeStampStep;
+ uint64_t insert_interval;
+
+ SSuperTable* stbInfo = pThreadInfo->stbInfo;
+
+ if (stbInfo) {
+ insertRows = stbInfo->insertRows;
+ maxSqlLen = stbInfo->maxSqlLen;
+ timeStampStep = stbInfo->timeStampStep;
+ insert_interval = stbInfo->insertInterval;
+ } else {
+ insertRows = g_args.insertRows;
+ maxSqlLen = g_args.max_sql_len;
+ timeStampStep = g_args.timestamp_step;
+ insert_interval = g_args.insert_interval;
+ }
+
+ debugPrint("[%d] %s() LN%d: start_table_from=%"PRIu64" ntables=%"PRId64" insertRows=%"PRIu64"\n",
+ pThreadInfo->threadID, __func__, __LINE__,
+ pThreadInfo->start_table_from,
+ pThreadInfo->ntables, insertRows);
+
+ uint32_t batchPerTbl = interlaceRows;
+ uint32_t batchPerTblTimes;
+
+ if (interlaceRows > g_args.reqPerReq)
+ interlaceRows = g_args.reqPerReq;
+
+ if ((interlaceRows > 0) && (pThreadInfo->ntables > 1)) {
+ batchPerTblTimes =
+ g_args.reqPerReq / interlaceRows;
+ } else {
+ batchPerTblTimes = 1;
+ }
+
+ pThreadInfo->totalInsertRows = 0;
+ pThreadInfo->totalAffectedRows = 0;
+
+ uint64_t st = 0;
+ uint64_t et = UINT64_MAX;
+
+ uint64_t lastPrintTime = taosGetTimestampMs();
+ uint64_t startTs = taosGetTimestampMs();
+ uint64_t endTs;
+
+ uint64_t tableSeq = pThreadInfo->start_table_from;
+ int64_t startTime = pThreadInfo->start_time;
+
+ uint64_t generatedRecPerTbl = 0;
+ bool flagSleep = true;
+ uint64_t sleepTimeTotal = 0;
+
+ int percentComplete = 0;
+ int64_t totalRows = insertRows * pThreadInfo->ntables;
+
+ while(pThreadInfo->totalInsertRows < pThreadInfo->ntables * insertRows) {
+ if ((flagSleep) && (insert_interval)) {
+ st = taosGetTimestampMs();
+ flagSleep = false;
+ }
+
+ uint32_t recOfBatch = 0;
+
+ int32_t generated;
+ for (uint64_t i = 0; i < batchPerTblTimes; i ++) {
+ char tableName[TSDB_TABLE_NAME_LEN];
+
+ getTableName(tableName, pThreadInfo, tableSeq);
+ if (0 == strlen(tableName)) {
+ errorPrint2("[%d] %s() LN%d, getTableName return null\n",
+ pThreadInfo->threadID, __func__, __LINE__);
+ return NULL;
+ }
+
+ debugPrint("[%d] %s() LN%d, tableName:%s, batch:%d startTime:%"PRId64"\n",
+ pThreadInfo->threadID,
+ __func__, __LINE__,
+ tableName, batchPerTbl, startTime);
+ if (stbInfo) {
+ generated = prepareStbStmt(
+ pThreadInfo,
+ tableName,
+ tableSeq,
+ batchPerTbl,
+ insertRows, 0,
+ startTime,
+ &(pThreadInfo->samplePos));
+ } else {
+ generated = prepareStmtWithoutStb(
+ pThreadInfo,
+ tableName,
+ batchPerTbl,
+ insertRows, i,
+ startTime);
+ }
+
+ debugPrint("[%d] %s() LN%d, generated records is %d\n",
+ pThreadInfo->threadID, __func__, __LINE__, generated);
+ if (generated < 0) {
+ errorPrint2("[%d] %s() LN%d, generated records is %d\n",
+ pThreadInfo->threadID, __func__, __LINE__, generated);
+ goto free_of_interlace_stmt;
+ } else if (generated == 0) {
+ break;
+ }
+
+ tableSeq ++;
+ recOfBatch += batchPerTbl;
+
+ pThreadInfo->totalInsertRows += batchPerTbl;
+
+ verbosePrint("[%d] %s() LN%d batchPerTbl=%d recOfBatch=%d\n",
+ pThreadInfo->threadID, __func__, __LINE__,
+ batchPerTbl, recOfBatch);
+
+ if (tableSeq == pThreadInfo->start_table_from + pThreadInfo->ntables) {
+ // turn to first table
+ tableSeq = pThreadInfo->start_table_from;
+ generatedRecPerTbl += batchPerTbl;
+
+ startTime = pThreadInfo->start_time
+ + generatedRecPerTbl * timeStampStep;
+
+ flagSleep = true;
+ if (generatedRecPerTbl >= insertRows)
+ break;
+
+ int64_t remainRows = insertRows - generatedRecPerTbl;
+ if ((remainRows > 0) && (batchPerTbl > remainRows))
+ batchPerTbl = remainRows;
+
+ if (pThreadInfo->ntables * batchPerTbl < g_args.reqPerReq)
+ break;
+ }
+
+ verbosePrint("[%d] %s() LN%d generatedRecPerTbl=%"PRId64" insertRows=%"PRId64"\n",
+ pThreadInfo->threadID, __func__, __LINE__,
+ generatedRecPerTbl, insertRows);
+
+ if ((g_args.reqPerReq - recOfBatch) < batchPerTbl)
+ break;
+ }
+
+ verbosePrint("[%d] %s() LN%d recOfBatch=%d totalInsertRows=%"PRIu64"\n",
+ pThreadInfo->threadID, __func__, __LINE__, recOfBatch,
+ pThreadInfo->totalInsertRows);
+
+ startTs = taosGetTimestampUs();
+
+ if (recOfBatch == 0) {
+ errorPrint2("[%d] %s() LN%d Failed to insert records of batch %d\n",
+ pThreadInfo->threadID, __func__, __LINE__,
+ batchPerTbl);
+ if (batchPerTbl > 0) {
+ errorPrint("\tIf the batch is %d, the length of the SQL to insert a row must be less then %"PRId64"\n",
+ batchPerTbl, maxSqlLen / batchPerTbl);
+ }
+ goto free_of_interlace_stmt;
+ }
+ int64_t affectedRows = execInsert(pThreadInfo, recOfBatch);
+
+ endTs = taosGetTimestampUs();
+ uint64_t delay = endTs - startTs;
+ performancePrint("%s() LN%d, insert execution time is %10.2f ms\n",
+ __func__, __LINE__, delay / 1000.0);
+ verbosePrint("[%d] %s() LN%d affectedRows=%"PRId64"\n",
+ pThreadInfo->threadID,
+ __func__, __LINE__, affectedRows);
+
+ if (delay > pThreadInfo->maxDelay) pThreadInfo->maxDelay = delay;
+ if (delay < pThreadInfo->minDelay) pThreadInfo->minDelay = delay;
+ pThreadInfo->cntDelay++;
+ pThreadInfo->totalDelay += delay;
+
+ if (recOfBatch != affectedRows) {
+ errorPrint2("[%d] %s() LN%d execInsert insert %d, affected rows: %"PRId64"\n\n",
+ pThreadInfo->threadID, __func__, __LINE__,
+ recOfBatch, affectedRows);
+ goto free_of_interlace_stmt;
+ }
+
+ pThreadInfo->totalAffectedRows += affectedRows;
+
+ int currentPercent = pThreadInfo->totalAffectedRows * 100 / totalRows;
+ if (currentPercent > percentComplete ) {
+ printf("[%d]:%d%%\n", pThreadInfo->threadID, currentPercent);
+ percentComplete = currentPercent;
+ }
+ int64_t currentPrintTime = taosGetTimestampMs();
+ if (currentPrintTime - lastPrintTime > 30*1000) {
+ printf("thread[%d] has currently inserted rows: %"PRIu64 ", affected rows: %"PRIu64 "\n",
+ pThreadInfo->threadID,
+ pThreadInfo->totalInsertRows,
+ pThreadInfo->totalAffectedRows);
+ lastPrintTime = currentPrintTime;
+ }
+
+ if ((insert_interval) && flagSleep) {
+ et = taosGetTimestampMs();
+
+ if (insert_interval > (et - st) ) {
+ uint64_t sleepTime = insert_interval - (et -st);
+ performancePrint("%s() LN%d sleep: %"PRId64" ms for insert interval\n",
+ __func__, __LINE__, sleepTime);
+ taosMsleep(sleepTime); // ms
+ sleepTimeTotal += insert_interval;
+ }
+ }
+ }
+ if (percentComplete < 100)
+ printf("[%d]:%d%%\n", pThreadInfo->threadID, percentComplete);
+
+free_of_interlace_stmt:
+ printStatPerThread(pThreadInfo);
+ return NULL;
+}
+
+#endif
+
+// sync write interlace data
+static void* syncWriteInterlace(threadInfo *pThreadInfo, uint32_t interlaceRows) {
+ debugPrint("[%d] %s() LN%d: ### interlace write\n",
+ pThreadInfo->threadID, __func__, __LINE__);
+
+ int64_t insertRows;
+ uint64_t maxSqlLen;
+ int64_t timeStampStep;
+ uint64_t insert_interval;
+
+ SSuperTable* stbInfo = pThreadInfo->stbInfo;
+
+ if (stbInfo) {
+ insertRows = stbInfo->insertRows;
+ maxSqlLen = stbInfo->maxSqlLen;
+ timeStampStep = stbInfo->timeStampStep;
+ insert_interval = stbInfo->insertInterval;
+ } else {
+ insertRows = g_args.insertRows;
+ maxSqlLen = g_args.max_sql_len;
+ timeStampStep = g_args.timestamp_step;
+ insert_interval = g_args.insert_interval;
+ }
+
+ debugPrint("[%d] %s() LN%d: start_table_from=%"PRIu64" ntables=%"PRId64" insertRows=%"PRIu64"\n",
+ pThreadInfo->threadID, __func__, __LINE__,
+ pThreadInfo->start_table_from,
+ pThreadInfo->ntables, insertRows);
+#if 1
+ if (interlaceRows > g_args.reqPerReq)
+ interlaceRows = g_args.reqPerReq;
+
+ uint32_t batchPerTbl = interlaceRows;
+ uint32_t batchPerTblTimes;
+
+ if ((interlaceRows > 0) && (pThreadInfo->ntables > 1)) {
+ batchPerTblTimes =
+ g_args.reqPerReq / interlaceRows;
+ } else {
+ batchPerTblTimes = 1;
+ }
+#else
+ uint32_t batchPerTbl;
+ if (interlaceRows > g_args.reqPerReq)
+ batchPerTbl = g_args.reqPerReq;
+ else
+ batchPerTbl = interlaceRows;
+
+ uint32_t batchPerTblTimes;
+
+ if ((interlaceRows > 0) && (pThreadInfo->ntables > 1)) {
+ batchPerTblTimes =
+ interlaceRows / batchPerTbl;
+ } else {
+ batchPerTblTimes = 1;
+ }
+#endif
+ pThreadInfo->buffer = calloc(maxSqlLen, 1);
+ if (NULL == pThreadInfo->buffer) {
+ errorPrint2( "%s() LN%d, Failed to alloc %"PRIu64" Bytes, reason:%s\n",
+ __func__, __LINE__, maxSqlLen, strerror(errno));
+ return NULL;
+ }
+
+ pThreadInfo->totalInsertRows = 0;
+ pThreadInfo->totalAffectedRows = 0;
+
+ uint64_t st = 0;
+ uint64_t et = UINT64_MAX;
+
+ uint64_t lastPrintTime = taosGetTimestampMs();
+ uint64_t startTs = taosGetTimestampMs();
+ uint64_t endTs;
+
+ uint64_t tableSeq = pThreadInfo->start_table_from;
+ int64_t startTime = pThreadInfo->start_time;
+
+ uint64_t generatedRecPerTbl = 0;
+ bool flagSleep = true;
+ uint64_t sleepTimeTotal = 0;
+
+ int percentComplete = 0;
+ int64_t totalRows = insertRows * pThreadInfo->ntables;
+
+ while(pThreadInfo->totalInsertRows < pThreadInfo->ntables * insertRows) {
+ if ((flagSleep) && (insert_interval)) {
+ st = taosGetTimestampMs();
+ flagSleep = false;
+ }
+
+ // generate data
+ memset(pThreadInfo->buffer, 0, maxSqlLen);
+ uint64_t remainderBufLen = maxSqlLen;
+
+ char *pstr = pThreadInfo->buffer;
+
+ int len = snprintf(pstr,
+ strlen(STR_INSERT_INTO) + 1, "%s", STR_INSERT_INTO);
+ pstr += len;
+ remainderBufLen -= len;
+
+ uint32_t recOfBatch = 0;
+
+ int32_t generated;
+ for (uint64_t i = 0; i < batchPerTblTimes; i ++) {
+ char tableName[TSDB_TABLE_NAME_LEN];
+
+ getTableName(tableName, pThreadInfo, tableSeq);
+ if (0 == strlen(tableName)) {
+ errorPrint2("[%d] %s() LN%d, getTableName return null\n",
+ pThreadInfo->threadID, __func__, __LINE__);
+ free(pThreadInfo->buffer);
+ return NULL;
+ }
+
+ uint64_t oldRemainderLen = remainderBufLen;
+
+ if (stbInfo) {
+ generated = generateStbInterlaceData(
+ pThreadInfo,
+ tableName, batchPerTbl, i,
+ batchPerTblTimes,
+ tableSeq,
+ pstr,
+ insertRows,
+ startTime,
+ &remainderBufLen);
+ } else {
+ generated = generateInterlaceDataWithoutStb(
+ tableName, batchPerTbl,
+ tableSeq,
+ pThreadInfo->db_name, pstr,
+ insertRows,
+ startTime,
+ &remainderBufLen);
+ }
+
+ debugPrint("[%d] %s() LN%d, generated records is %d\n",
+ pThreadInfo->threadID, __func__, __LINE__, generated);
+ if (generated < 0) {
+ errorPrint2("[%d] %s() LN%d, generated records is %d\n",
+ pThreadInfo->threadID, __func__, __LINE__, generated);
+ goto free_of_interlace;
+ } else if (generated == 0) {
+ break;
+ }
+
+ tableSeq ++;
+ recOfBatch += batchPerTbl;
+
+ pstr += (oldRemainderLen - remainderBufLen);
+ pThreadInfo->totalInsertRows += batchPerTbl;
+
+ verbosePrint("[%d] %s() LN%d batchPerTbl=%d recOfBatch=%d\n",
+ pThreadInfo->threadID, __func__, __LINE__,
+ batchPerTbl, recOfBatch);
+
+ if (tableSeq == pThreadInfo->start_table_from + pThreadInfo->ntables) {
+ // turn to first table
+ tableSeq = pThreadInfo->start_table_from;
+ generatedRecPerTbl += batchPerTbl;
+
+ startTime = pThreadInfo->start_time
+ + generatedRecPerTbl * timeStampStep;
+
+ flagSleep = true;
+ if (generatedRecPerTbl >= insertRows)
+ break;
+
+ int64_t remainRows = insertRows - generatedRecPerTbl;
+ if ((remainRows > 0) && (batchPerTbl > remainRows))
+ batchPerTbl = remainRows;
+
+ if (pThreadInfo->ntables * batchPerTbl < g_args.reqPerReq)
+ break;
+ }
+
+ verbosePrint("[%d] %s() LN%d generatedRecPerTbl=%"PRId64" insertRows=%"PRId64"\n",
+ pThreadInfo->threadID, __func__, __LINE__,
+ generatedRecPerTbl, insertRows);
+
+ if ((g_args.reqPerReq - recOfBatch) < batchPerTbl)
+ break;
+ }
+
+ verbosePrint("[%d] %s() LN%d recOfBatch=%d totalInsertRows=%"PRIu64"\n",
+ pThreadInfo->threadID, __func__, __LINE__, recOfBatch,
+ pThreadInfo->totalInsertRows);
+ verbosePrint("[%d] %s() LN%d, buffer=%s\n",
+ pThreadInfo->threadID, __func__, __LINE__, pThreadInfo->buffer);
+
+ startTs = taosGetTimestampUs();
+
+ if (recOfBatch == 0) {
+ errorPrint2("[%d] %s() LN%d Failed to insert records of batch %d\n",
+ pThreadInfo->threadID, __func__, __LINE__,
+ batchPerTbl);
+ if (batchPerTbl > 0) {
+ errorPrint("\tIf the batch is %d, the length of the SQL to insert a row must be less then %"PRId64"\n",
+ batchPerTbl, maxSqlLen / batchPerTbl);
+ }
+ errorPrint("\tPlease check if the buffer length(%"PRId64") or batch(%d) is set with proper value!\n",
+ maxSqlLen, batchPerTbl);
+ goto free_of_interlace;
+ }
+ int64_t affectedRows = execInsert(pThreadInfo, recOfBatch);
+
+ endTs = taosGetTimestampUs();
+ uint64_t delay = endTs - startTs;
+ performancePrint("%s() LN%d, insert execution time is %10.2f ms\n",
+ __func__, __LINE__, delay / 1000.0);
+ verbosePrint("[%d] %s() LN%d affectedRows=%"PRId64"\n",
+ pThreadInfo->threadID,
+ __func__, __LINE__, affectedRows);
+
+ if (delay > pThreadInfo->maxDelay) pThreadInfo->maxDelay = delay;
+ if (delay < pThreadInfo->minDelay) pThreadInfo->minDelay = delay;
+ pThreadInfo->cntDelay++;
+ pThreadInfo->totalDelay += delay;
+
+ if (recOfBatch != affectedRows) {
+ errorPrint2("[%d] %s() LN%d execInsert insert %d, affected rows: %"PRId64"\n%s\n",
+ pThreadInfo->threadID, __func__, __LINE__,
+ recOfBatch, affectedRows, pThreadInfo->buffer);
+ goto free_of_interlace;
+ }
+
+ pThreadInfo->totalAffectedRows += affectedRows;
+
+ int currentPercent = pThreadInfo->totalAffectedRows * 100 / totalRows;
+ if (currentPercent > percentComplete ) {
+ printf("[%d]:%d%%\n", pThreadInfo->threadID, currentPercent);
+ percentComplete = currentPercent;
+ }
+ int64_t currentPrintTime = taosGetTimestampMs();
+ if (currentPrintTime - lastPrintTime > 30*1000) {
+ printf("thread[%d] has currently inserted rows: %"PRIu64 ", affected rows: %"PRIu64 "\n",
+ pThreadInfo->threadID,
+ pThreadInfo->totalInsertRows,
+ pThreadInfo->totalAffectedRows);
+ lastPrintTime = currentPrintTime;
+ }
+
+ if ((insert_interval) && flagSleep) {
+ et = taosGetTimestampMs();
+
+ if (insert_interval > (et - st) ) {
+ uint64_t sleepTime = insert_interval - (et -st);
+ performancePrint("%s() LN%d sleep: %"PRId64" ms for insert interval\n",
+ __func__, __LINE__, sleepTime);
+ taosMsleep(sleepTime); // ms
+ sleepTimeTotal += insert_interval;
+ }
+ }
+ }
+ if (percentComplete < 100)
+ printf("[%d]:%d%%\n", pThreadInfo->threadID, percentComplete);
+
+free_of_interlace:
+ tmfree(pThreadInfo->buffer);
+ printStatPerThread(pThreadInfo);
+ return NULL;
+}
+
+static void* syncWriteProgressiveStmt(threadInfo *pThreadInfo) {
+ debugPrint("%s() LN%d: ### stmt progressive write\n", __func__, __LINE__);
+
+ SSuperTable* stbInfo = pThreadInfo->stbInfo;
+ int64_t timeStampStep =
+ stbInfo?stbInfo->timeStampStep:g_args.timestamp_step;
+ int64_t insertRows =
+ (stbInfo)?stbInfo->insertRows:g_args.insertRows;
+ verbosePrint("%s() LN%d insertRows=%"PRId64"\n",
+ __func__, __LINE__, insertRows);
+
+ uint64_t lastPrintTime = taosGetTimestampMs();
+ uint64_t startTs = taosGetTimestampMs();
+ uint64_t endTs;
+
+ pThreadInfo->totalInsertRows = 0;
+ pThreadInfo->totalAffectedRows = 0;
+
+ pThreadInfo->samplePos = 0;
+
+ int percentComplete = 0;
+ int64_t totalRows = insertRows * pThreadInfo->ntables;
+
+ for (uint64_t tableSeq = pThreadInfo->start_table_from;
+ tableSeq <= pThreadInfo->end_table_to;
+ tableSeq ++) {
+ int64_t start_time = pThreadInfo->start_time;
+
+ for (uint64_t i = 0; i < insertRows;) {
+ char tableName[TSDB_TABLE_NAME_LEN];
+ getTableName(tableName, pThreadInfo, tableSeq);
+ verbosePrint("%s() LN%d: tid=%d seq=%"PRId64" tableName=%s\n",
+ __func__, __LINE__,
+ pThreadInfo->threadID, tableSeq, tableName);
+ if (0 == strlen(tableName)) {
+ errorPrint2("[%d] %s() LN%d, getTableName return null\n",
+ pThreadInfo->threadID, __func__, __LINE__);
+ return NULL;
+ }
+
+ // measure prepare + insert
+ startTs = taosGetTimestampUs();
+
+ int32_t generated;
+ if (stbInfo) {
+ generated = prepareStbStmt(
+ pThreadInfo,
+ tableName,
+ tableSeq,
+ (g_args.reqPerReq>stbInfo->insertRows)?
+ stbInfo->insertRows:
+ g_args.reqPerReq,
+ insertRows, i, start_time,
+ &(pThreadInfo->samplePos));
+ } else {
+ generated = prepareStmtWithoutStb(
+ pThreadInfo,
+ tableName,
+ g_args.reqPerReq,
+ insertRows, i,
+ start_time);
+ }
+
+ verbosePrint("[%d] %s() LN%d generated=%d\n",
+ pThreadInfo->threadID,
+ __func__, __LINE__, generated);
+
+ if (generated > 0)
+ i += generated;
+ else
+ goto free_of_stmt_progressive;
+
+ start_time += generated * timeStampStep;
+ pThreadInfo->totalInsertRows += generated;
+
+ // only measure insert
+ // startTs = taosGetTimestampUs();
+
+ int32_t affectedRows = execInsert(pThreadInfo, generated);
+
+ endTs = taosGetTimestampUs();
+ uint64_t delay = endTs - startTs;
+ performancePrint("%s() LN%d, insert execution time is %10.f ms\n",
+ __func__, __LINE__, delay/1000.0);
+ verbosePrint("[%d] %s() LN%d affectedRows=%d\n",
+ pThreadInfo->threadID,
+ __func__, __LINE__, affectedRows);
+
+ if (delay > pThreadInfo->maxDelay) pThreadInfo->maxDelay = delay;
+ if (delay < pThreadInfo->minDelay) pThreadInfo->minDelay = delay;
+ pThreadInfo->cntDelay++;
+ pThreadInfo->totalDelay += delay;
+
+ if (affectedRows < 0) {
+ errorPrint2("%s() LN%d, affected rows: %d\n",
+ __func__, __LINE__, affectedRows);
+ goto free_of_stmt_progressive;
+ }
+
+ pThreadInfo->totalAffectedRows += affectedRows;
+
+ int currentPercent = pThreadInfo->totalAffectedRows * 100 / totalRows;
+ if (currentPercent > percentComplete ) {
+ printf("[%d]:%d%%\n", pThreadInfo->threadID, currentPercent);
+ percentComplete = currentPercent;
+ }
+ int64_t currentPrintTime = taosGetTimestampMs();
+ if (currentPrintTime - lastPrintTime > 30*1000) {
+ printf("thread[%d] has currently inserted rows: %"PRId64 ", affected rows: %"PRId64 "\n",
+ pThreadInfo->threadID,
+ pThreadInfo->totalInsertRows,
+ pThreadInfo->totalAffectedRows);
+ lastPrintTime = currentPrintTime;
+ }
+
+ if (i >= insertRows)
+ break;
+ } // insertRows
+
+ if ((g_args.verbose_print) &&
+ (tableSeq == pThreadInfo->ntables - 1) && (stbInfo)
+ && (0 == strncasecmp(
+ stbInfo->dataSource,
+ "sample", strlen("sample")))) {
+ verbosePrint("%s() LN%d samplePos=%"PRId64"\n",
+ __func__, __LINE__, pThreadInfo->samplePos);
+ }
+ } // tableSeq
+
+ if (percentComplete < 100) {
+ printf("[%d]:%d%%\n", pThreadInfo->threadID, percentComplete);
+ }
+
+free_of_stmt_progressive:
+ tmfree(pThreadInfo->buffer);
+ printStatPerThread(pThreadInfo);
+ return NULL;
+}
+// sync insertion progressive data
+static void* syncWriteProgressive(threadInfo *pThreadInfo) {
+ debugPrint("%s() LN%d: ### progressive write\n", __func__, __LINE__);
+
+ SSuperTable* stbInfo = pThreadInfo->stbInfo;
+ uint64_t maxSqlLen = stbInfo?stbInfo->maxSqlLen:g_args.max_sql_len;
+ int64_t timeStampStep =
+ stbInfo?stbInfo->timeStampStep:g_args.timestamp_step;
+ int64_t insertRows =
+ (stbInfo)?stbInfo->insertRows:g_args.insertRows;
+ verbosePrint("%s() LN%d insertRows=%"PRId64"\n",
+ __func__, __LINE__, insertRows);
+
+ pThreadInfo->buffer = calloc(maxSqlLen, 1);
+ if (NULL == pThreadInfo->buffer) {
+ errorPrint2("Failed to alloc %"PRIu64" bytes, reason:%s\n",
+ maxSqlLen,
+ strerror(errno));
+ return NULL;
+ }
+
+ uint64_t lastPrintTime = taosGetTimestampMs();
+ uint64_t startTs = taosGetTimestampMs();
+ uint64_t endTs;
+
+ pThreadInfo->totalInsertRows = 0;
+ pThreadInfo->totalAffectedRows = 0;
+
+ pThreadInfo->samplePos = 0;
+
+ int percentComplete = 0;
+ int64_t totalRows = insertRows * pThreadInfo->ntables;
+
+ for (uint64_t tableSeq = pThreadInfo->start_table_from;
+ tableSeq <= pThreadInfo->end_table_to;
+ tableSeq ++) {
+ int64_t start_time = pThreadInfo->start_time;
+
+ for (uint64_t i = 0; i < insertRows;) {
+ char tableName[TSDB_TABLE_NAME_LEN];
+ getTableName(tableName, pThreadInfo, tableSeq);
+ verbosePrint("%s() LN%d: tid=%d seq=%"PRId64" tableName=%s\n",
+ __func__, __LINE__,
+ pThreadInfo->threadID, tableSeq, tableName);
+ if (0 == strlen(tableName)) {
+ errorPrint2("[%d] %s() LN%d, getTableName return null\n",
+ pThreadInfo->threadID, __func__, __LINE__);
+ free(pThreadInfo->buffer);
+ return NULL;
+ }
+
+ int64_t remainderBufLen = maxSqlLen - 2000;
+ char *pstr = pThreadInfo->buffer;
+
+ int len = snprintf(pstr,
+ strlen(STR_INSERT_INTO) + 1, "%s", STR_INSERT_INTO);
+
+ pstr += len;
+ remainderBufLen -= len;
+
+ // measure prepare + insert
+ startTs = taosGetTimestampUs();
+
+ int32_t generated;
+ if (stbInfo) {
+ if (stbInfo->iface == STMT_IFACE) {
+ generated = prepareStbStmt(
+ pThreadInfo,
+ tableName,
+ tableSeq,
+ (g_args.reqPerReq>stbInfo->insertRows)?
+ stbInfo->insertRows:
+ g_args.reqPerReq,
+ insertRows, i, start_time,
+ &(pThreadInfo->samplePos));
+ } else {
+ generated = generateStbProgressiveData(
+ stbInfo,
+ tableName, tableSeq,
+ pThreadInfo->db_name, pstr,
+ insertRows, i, start_time,
+ &(pThreadInfo->samplePos),
+ &remainderBufLen);
+ }
+ } else {
+ if (g_args.iface == STMT_IFACE) {
+ generated = prepareStmtWithoutStb(
+ pThreadInfo,
+ tableName,
+ g_args.reqPerReq,
+ insertRows, i,
+ start_time);
+ } else {
+ generated = generateProgressiveDataWithoutStb(
+ tableName,
+ /* tableSeq, */
+ pThreadInfo, pstr, insertRows,
+ i, start_time,
+ /* &(pThreadInfo->samplePos), */
+ &remainderBufLen);
+ }
+ }
+
+ verbosePrint("[%d] %s() LN%d generated=%d\n",
+ pThreadInfo->threadID,
+ __func__, __LINE__, generated);
+
+ if (generated > 0)
+ i += generated;
+ else
+ goto free_of_progressive;
+
+ start_time += generated * timeStampStep;
+ pThreadInfo->totalInsertRows += generated;
+
+ // only measure insert
+ // startTs = taosGetTimestampUs();
+
+ int32_t affectedRows = execInsert(pThreadInfo, generated);
+
+ endTs = taosGetTimestampUs();
+ uint64_t delay = endTs - startTs;
+ performancePrint("%s() LN%d, insert execution time is %10.f ms\n",
+ __func__, __LINE__, delay/1000.0);
+ verbosePrint("[%d] %s() LN%d affectedRows=%d\n",
+ pThreadInfo->threadID,
+ __func__, __LINE__, affectedRows);
+
+ if (delay > pThreadInfo->maxDelay) pThreadInfo->maxDelay = delay;
+ if (delay < pThreadInfo->minDelay) pThreadInfo->minDelay = delay;
+ pThreadInfo->cntDelay++;
+ pThreadInfo->totalDelay += delay;
+
+ if (affectedRows < 0) {
+ errorPrint2("%s() LN%d, affected rows: %d\n",
+ __func__, __LINE__, affectedRows);
+ goto free_of_progressive;
+ }
+
+ pThreadInfo->totalAffectedRows += affectedRows;
+
+ int currentPercent = pThreadInfo->totalAffectedRows * 100 / totalRows;
+ if (currentPercent > percentComplete ) {
+ printf("[%d]:%d%%\n", pThreadInfo->threadID, currentPercent);
+ percentComplete = currentPercent;
+ }
+ int64_t currentPrintTime = taosGetTimestampMs();
+ if (currentPrintTime - lastPrintTime > 30*1000) {
+ printf("thread[%d] has currently inserted rows: %"PRId64 ", affected rows: %"PRId64 "\n",
+ pThreadInfo->threadID,
+ pThreadInfo->totalInsertRows,
+ pThreadInfo->totalAffectedRows);
+ lastPrintTime = currentPrintTime;
+ }
+
+ if (i >= insertRows)
+ break;
+ } // insertRows
+
+ if ((g_args.verbose_print) &&
+ (tableSeq == pThreadInfo->ntables - 1) && (stbInfo)
+ && (0 == strncasecmp(
+ stbInfo->dataSource,
+ "sample", strlen("sample")))) {
+ verbosePrint("%s() LN%d samplePos=%"PRId64"\n",
+ __func__, __LINE__, pThreadInfo->samplePos);
+ }
+ } // tableSeq
+
+ if (percentComplete < 100) {
+ printf("[%d]:%d%%\n", pThreadInfo->threadID, percentComplete);
+ }
+
+free_of_progressive:
+ tmfree(pThreadInfo->buffer);
+ printStatPerThread(pThreadInfo);
+ return NULL;
+}
+
+static void* syncWrite(void *sarg) {
+
+ threadInfo *pThreadInfo = (threadInfo *)sarg;
+ SSuperTable* stbInfo = pThreadInfo->stbInfo;
+
+ setThreadName("syncWrite");
+
+ uint32_t interlaceRows = 0;
+
+ if (stbInfo) {
+ if (stbInfo->interlaceRows < stbInfo->insertRows)
+ interlaceRows = stbInfo->interlaceRows;
+ } else {
+ if (g_args.interlaceRows < g_args.insertRows)
+ interlaceRows = g_args.interlaceRows;
+ }
+
+ if (interlaceRows > 0) {
+ // interlace mode
+ if (stbInfo) {
+ if (STMT_IFACE == stbInfo->iface) {
+#if STMT_BIND_PARAM_BATCH == 1
+ return syncWriteInterlaceStmtBatch(pThreadInfo, interlaceRows);
+#else
+ return syncWriteInterlaceStmt(pThreadInfo, interlaceRows);
+#endif
+ } else {
+ return syncWriteInterlace(pThreadInfo, interlaceRows);
+ }
+ }
+ } else {
+ // progressive mode
+ if (((stbInfo) && (STMT_IFACE == stbInfo->iface))
+ || (STMT_IFACE == g_args.iface)) {
+ return syncWriteProgressiveStmt(pThreadInfo);
+ } else {
+ return syncWriteProgressive(pThreadInfo);
+ }
+ }
+
+ return NULL;
+}
+
+static void callBack(void *param, TAOS_RES *res, int code) {
+ threadInfo* pThreadInfo = (threadInfo*)param;
+ SSuperTable* stbInfo = pThreadInfo->stbInfo;
+
+ int insert_interval =
+ stbInfo?stbInfo->insertInterval:g_args.insert_interval;
+ if (insert_interval) {
+ pThreadInfo->et = taosGetTimestampMs();
+ if ((pThreadInfo->et - pThreadInfo->st) < insert_interval) {
+ taosMsleep(insert_interval - (pThreadInfo->et - pThreadInfo->st)); // ms
+ }
+ }
+
+ char *buffer = calloc(1, pThreadInfo->stbInfo->maxSqlLen);
+ char data[MAX_DATA_SIZE];
+ char *pstr = buffer;
+ pstr += sprintf(pstr, "INSERT INTO %s.%s%"PRId64" VALUES",
+ pThreadInfo->db_name, pThreadInfo->tb_prefix,
+ pThreadInfo->start_table_from);
+ // if (pThreadInfo->counter >= pThreadInfo->stbInfo->insertRows) {
+ if (pThreadInfo->counter >= g_args.reqPerReq) {
+ pThreadInfo->start_table_from++;
+ pThreadInfo->counter = 0;
+ }
+ if (pThreadInfo->start_table_from > pThreadInfo->end_table_to) {
+ tsem_post(&pThreadInfo->lock_sem);
+ free(buffer);
+ taos_free_result(res);
+ return;
+ }
+
+ for (int i = 0; i < g_args.reqPerReq; i++) {
+ int rand_num = taosRandom() % 100;
+ if (0 != pThreadInfo->stbInfo->disorderRatio
+ && rand_num < pThreadInfo->stbInfo->disorderRatio) {
+ int64_t d = pThreadInfo->lastTs
+ - (taosRandom() % pThreadInfo->stbInfo->disorderRange + 1);
+ generateStbRowData(pThreadInfo->stbInfo, data,
+ MAX_DATA_SIZE,
+ d);
+ } else {
+ generateStbRowData(pThreadInfo->stbInfo,
+ data,
+ MAX_DATA_SIZE,
+ pThreadInfo->lastTs += 1000);
+ }
+ pstr += sprintf(pstr, "%s", data);
+ pThreadInfo->counter++;
+
+ if (pThreadInfo->counter >= pThreadInfo->stbInfo->insertRows) {
+ break;
+ }
+ }
+
+ if (insert_interval) {
+ pThreadInfo->st = taosGetTimestampMs();
+ }
+ taos_query_a(pThreadInfo->taos, buffer, callBack, pThreadInfo);
+ free(buffer);
+
+ taos_free_result(res);
+}
+
+static void *asyncWrite(void *sarg) {
+ threadInfo *pThreadInfo = (threadInfo *)sarg;
+ SSuperTable* stbInfo = pThreadInfo->stbInfo;
+
+ setThreadName("asyncWrite");
+
+ pThreadInfo->st = 0;
+ pThreadInfo->et = 0;
+ pThreadInfo->lastTs = pThreadInfo->start_time;
+
+ int insert_interval =
+ stbInfo?stbInfo->insertInterval:g_args.insert_interval;
+ if (insert_interval) {
+ pThreadInfo->st = taosGetTimestampMs();
+ }
+ taos_query_a(pThreadInfo->taos, "show databases", callBack, pThreadInfo);
+
+ tsem_wait(&(pThreadInfo->lock_sem));
+
+ return NULL;
+}
+
+static int convertHostToServAddr(char *host, uint16_t port, struct sockaddr_in *serv_addr)
+{
+ uint16_t rest_port = port + TSDB_PORT_HTTP;
+ struct hostent *server = gethostbyname(host);
+ if ((server == NULL) || (server->h_addr == NULL)) {
+ errorPrint2("%s", "no such host");
+ return -1;
+ }
+
+ debugPrint("h_name: %s\nh_addr=%p\nh_addretype: %s\nh_length: %d\n",
+ server->h_name,
+ server->h_addr,
+ (server->h_addrtype == AF_INET)?"ipv4":"ipv6",
+ server->h_length);
+
+ memset(serv_addr, 0, sizeof(struct sockaddr_in));
+ serv_addr->sin_family = AF_INET;
+ serv_addr->sin_port = htons(rest_port);
+#ifdef WINDOWS
+ serv_addr->sin_addr.s_addr = inet_addr(host);
+#else
+ memcpy(&(serv_addr->sin_addr.s_addr), server->h_addr, server->h_length);
+#endif
+ return 0;
+}
+
+static void startMultiThreadInsertData(int threads, char* db_name,
+ char* precision, SSuperTable* stbInfo) {
+
+ int32_t timePrec = TSDB_TIME_PRECISION_MILLI;
+ if (0 != precision[0]) {
+ if (0 == strncasecmp(precision, "ms", 2)) {
+ timePrec = TSDB_TIME_PRECISION_MILLI;
+ } else if (0 == strncasecmp(precision, "us", 2)) {
+ timePrec = TSDB_TIME_PRECISION_MICRO;
+ } else if (0 == strncasecmp(precision, "ns", 2)) {
+ timePrec = TSDB_TIME_PRECISION_NANO;
+ } else {
+ errorPrint2("Not support precision: %s\n", precision);
+ exit(EXIT_FAILURE);
+ }
+ }
+
+ int64_t startTime;
+ if (stbInfo) {
+ if (0 == strncasecmp(stbInfo->startTimestamp, "now", 3)) {
+ startTime = taosGetTimestamp(timePrec);
+ } else {
+ if (TSDB_CODE_SUCCESS != taosParseTime(
+ stbInfo->startTimestamp,
+ &startTime,
+ strlen(stbInfo->startTimestamp),
+ timePrec, 0)) {
+ ERROR_EXIT("failed to parse time!\n");
+ }
+ }
+ } else {
+ startTime = DEFAULT_START_TIME;
+ }
+ debugPrint("%s() LN%d, startTime= %"PRId64"\n",
+ __func__, __LINE__, startTime);
+
+ // read sample data from file first
+ int ret;
+ if (stbInfo) {
+ ret = prepareSampleForStb(stbInfo);
+ } else {
+ ret = prepareSampleForNtb();
+ }
+
+ if (0 != ret) {
+ errorPrint2("%s() LN%d, prepare sample data for stable failed!\n",
+ __func__, __LINE__);
+ exit(EXIT_FAILURE);
+ }
+
+ TAOS* taos0 = taos_connect(
+ g_Dbs.host, g_Dbs.user,
+ g_Dbs.password, db_name, g_Dbs.port);
+ if (NULL == taos0) {
+ errorPrint2("%s() LN%d, connect to server fail , reason: %s\n",
+ __func__, __LINE__, taos_errstr(NULL));
+ exit(EXIT_FAILURE);
+ }
+
+ int64_t ntables = 0;
+ uint64_t tableFrom = 0;
+
+ if (stbInfo) {
+ int64_t limit;
+ uint64_t offset;
+
+ if ((NULL != g_args.sqlFile)
+ && (stbInfo->childTblExists == TBL_NO_EXISTS)
+ && ((stbInfo->childTblOffset != 0)
+ || (stbInfo->childTblLimit >= 0))) {
+ printf("WARNING: offset and limit will not be used since the child tables not exists!\n");
+ }
+
+ if (stbInfo->childTblExists == TBL_ALREADY_EXISTS) {
+ if ((stbInfo->childTblLimit < 0)
+ || ((stbInfo->childTblOffset
+ + stbInfo->childTblLimit)
+ > (stbInfo->childTblCount))) {
+
+ if (stbInfo->childTblCount < stbInfo->childTblOffset) {
+ printf("WARNING: offset will not be used since the child tables count is less then offset!\n");
+
+ stbInfo->childTblOffset = 0;
+ }
+ stbInfo->childTblLimit =
+ stbInfo->childTblCount - stbInfo->childTblOffset;
+ }
+
+ offset = stbInfo->childTblOffset;
+ limit = stbInfo->childTblLimit;
+ } else {
+ limit = stbInfo->childTblCount;
+ offset = 0;
+ }
+
+ ntables = limit;
+ tableFrom = offset;
+
+ if ((stbInfo->childTblExists != TBL_NO_EXISTS)
+ && ((stbInfo->childTblOffset + stbInfo->childTblLimit)
+ > stbInfo->childTblCount)) {
+ printf("WARNING: specified offset + limit > child table count!\n");
+ prompt();
+ }
+
+ if ((stbInfo->childTblExists != TBL_NO_EXISTS)
+ && (0 == stbInfo->childTblLimit)) {
+ printf("WARNING: specified limit = 0, which cannot find table name to insert or query! \n");
+ prompt();
+ }
+
+ stbInfo->childTblName = (char*)calloc(1,
+ limit * TSDB_TABLE_NAME_LEN);
+ if (stbInfo->childTblName == NULL) {
+ taos_close(taos0);
+ errorPrint2("%s() LN%d, alloc memory failed!\n", __func__, __LINE__);
+ exit(EXIT_FAILURE);
+ }
+
+ int64_t childTblCount;
+ getChildNameOfSuperTableWithLimitAndOffset(
+ taos0,
+ db_name, stbInfo->stbName,
+ &stbInfo->childTblName, &childTblCount,
+ limit,
+ offset);
+ ntables = childTblCount;
+ } else {
+ ntables = g_args.ntables;
+ tableFrom = 0;
+ }
+
+ taos_close(taos0);
+
+ int64_t a = ntables / threads;
+ if (a < 1) {
+ threads = ntables;
+ a = 1;
+ }
+
+ int64_t b = 0;
+ if (threads != 0) {
+ b = ntables % threads;
+ }
+
+ if (g_args.iface == REST_IFACE || ((stbInfo) && (stbInfo->iface == REST_IFACE))) {
+ if (convertHostToServAddr(
+ g_Dbs.host, g_Dbs.port, &(g_Dbs.serv_addr)) != 0) {
+ ERROR_EXIT("convert host to server address");
+ }
+ }
+
+ pthread_t *pids = calloc(1, threads * sizeof(pthread_t));
+ threadInfo *infos = calloc(1, threads * sizeof(threadInfo));
+ assert(pids != NULL);
+ assert(infos != NULL);
+
+ char *stmtBuffer = calloc(1, BUFFER_SIZE);
+ assert(stmtBuffer);
+
+#if STMT_BIND_PARAM_BATCH == 1
+ uint32_t interlaceRows = 0;
+ uint32_t batch;
+
+ if (stbInfo) {
+ if (stbInfo->interlaceRows < stbInfo->insertRows)
+ interlaceRows = stbInfo->interlaceRows;
+ } else {
+ if (g_args.interlaceRows < g_args.insertRows)
+ interlaceRows = g_args.interlaceRows;
+ }
+
+ if (interlaceRows > 0) {
+ batch = interlaceRows;
+ } else {
+ batch = (g_args.reqPerReq>g_args.insertRows)?
+ g_args.insertRows:g_args.reqPerReq;
+ }
+
+#endif
+
+ if ((g_args.iface == STMT_IFACE)
+ || ((stbInfo)
+ && (stbInfo->iface == STMT_IFACE))) {
+ char *pstr = stmtBuffer;
+
+ if ((stbInfo)
+ && (AUTO_CREATE_SUBTBL
+ == stbInfo->autoCreateTable)) {
+ pstr += sprintf(pstr, "INSERT INTO ? USING %s TAGS(?",
+ stbInfo->stbName);
+ for (int tag = 0; tag < (stbInfo->tagCount - 1);
+ tag ++ ) {
+ pstr += sprintf(pstr, ",?");
+ }
+ pstr += sprintf(pstr, ") VALUES(?");
+ } else {
+ pstr += sprintf(pstr, "INSERT INTO ? VALUES(?");
+ }
+
+ int columnCount = (stbInfo)?
+ stbInfo->columnCount:
+ g_args.columnCount;
+
+ for (int col = 0; col < columnCount; col ++) {
+ pstr += sprintf(pstr, ",?");
+ }
+ pstr += sprintf(pstr, ")");
+
+ debugPrint("%s() LN%d, stmtBuffer: %s", __func__, __LINE__, stmtBuffer);
+#if STMT_BIND_PARAM_BATCH == 1
+ parseSamplefileToStmtBatch(stbInfo);
+#endif
+ }
+
+ for (int i = 0; i < threads; i++) {
+ threadInfo *pThreadInfo = infos + i;
+ pThreadInfo->threadID = i;
+
+ tstrncpy(pThreadInfo->db_name, db_name, TSDB_DB_NAME_LEN);
+ pThreadInfo->time_precision = timePrec;
+ pThreadInfo->stbInfo = stbInfo;
+
+ pThreadInfo->start_time = startTime;
+ pThreadInfo->minDelay = UINT64_MAX;
+
+ if ((NULL == stbInfo) ||
+ (stbInfo->iface != REST_IFACE)) {
+ //t_info->taos = taos;
+ pThreadInfo->taos = taos_connect(
+ g_Dbs.host, g_Dbs.user,
+ g_Dbs.password, db_name, g_Dbs.port);
+ if (NULL == pThreadInfo->taos) {
+ free(infos);
+ errorPrint2(
+ "%s() LN%d, connect to server fail from insert sub thread, reason: %s\n",
+ __func__, __LINE__,
+ taos_errstr(NULL));
+ exit(EXIT_FAILURE);
+ }
+
+ if ((g_args.iface == STMT_IFACE)
+ || ((stbInfo)
+ && (stbInfo->iface == STMT_IFACE))) {
+
+ pThreadInfo->stmt = taos_stmt_init(pThreadInfo->taos);
+ if (NULL == pThreadInfo->stmt) {
+ free(pids);
+ free(infos);
+ errorPrint2(
+ "%s() LN%d, failed init stmt, reason: %s\n",
+ __func__, __LINE__,
+ taos_errstr(NULL));
+ exit(EXIT_FAILURE);
+ }
+
+ if (0 != taos_stmt_prepare(pThreadInfo->stmt, stmtBuffer, 0)) {
+ free(pids);
+ free(infos);
+ free(stmtBuffer);
+ errorPrint2("failed to execute taos_stmt_prepare. return 0x%x. reason: %s\n",
+ ret, taos_stmt_errstr(pThreadInfo->stmt));
+ exit(EXIT_FAILURE);
+ }
+ pThreadInfo->bind_ts = malloc(sizeof(int64_t));
+
+ if (stbInfo) {
+#if STMT_BIND_PARAM_BATCH == 1
+ parseStbSampleToStmtBatchForThread(
+ pThreadInfo, stbInfo, timePrec, batch);
+#else
+ parseStbSampleToStmt(pThreadInfo, stbInfo, timePrec);
+#endif
+ } else {
+#if STMT_BIND_PARAM_BATCH == 1
+ parseNtbSampleToStmtBatchForThread(
+ pThreadInfo, timePrec, batch);
+#else
+ parseNtbSampleToStmt(pThreadInfo, timePrec);
+#endif
+ }
+ }
+ } else {
+ pThreadInfo->taos = NULL;
+ }
+
+ /* if ((NULL == stbInfo)
+ || (0 == stbInfo->multiThreadWriteOneTbl)) {
+ */
+ pThreadInfo->start_table_from = tableFrom;
+ pThreadInfo->ntables = iend_table_to = i < b ? tableFrom + a : tableFrom + a - 1;
+ tableFrom = pThreadInfo->end_table_to + 1;
+ /* } else {
+ pThreadInfo->start_table_from = 0;
+ pThreadInfo->ntables = stbInfo->childTblCount;
+ pThreadInfo->start_time = pThreadInfo->start_time + rand_int() % 10000 - rand_tinyint();
+ }
+ */
+
+ if (g_args.iface == REST_IFACE || ((stbInfo) && (stbInfo->iface == REST_IFACE))) {
+#ifdef WINDOWS
+ WSADATA wsaData;
+ WSAStartup(MAKEWORD(2, 2), &wsaData);
+ SOCKET sockfd;
+#else
+ int sockfd;
+#endif
+ sockfd = socket(AF_INET, SOCK_STREAM, 0);
+ if (sockfd < 0) {
+#ifdef WINDOWS
+ errorPrint( "Could not create socket : %d" , WSAGetLastError());
+#endif
+ debugPrint("%s() LN%d, sockfd=%d\n", __func__, __LINE__, sockfd);
+ ERROR_EXIT("opening socket");
+ }
+
+ int retConn = connect(sockfd, (struct sockaddr *)&(g_Dbs.serv_addr), sizeof(struct sockaddr));
+ debugPrint("%s() LN%d connect() return %d\n", __func__, __LINE__, retConn);
+ if (retConn < 0) {
+ ERROR_EXIT("connecting");
+ }
+ pThreadInfo->sockfd = sockfd;
+ }
+
+
+ tsem_init(&(pThreadInfo->lock_sem), 0, 0);
+ if (ASYNC_MODE == g_Dbs.asyncMode) {
+ pthread_create(pids + i, NULL, asyncWrite, pThreadInfo);
+ } else {
+ pthread_create(pids + i, NULL, syncWrite, pThreadInfo);
+ }
+ }
+
+ free(stmtBuffer);
+
+ int64_t start = taosGetTimestampUs();
+
+ for (int i = 0; i < threads; i++) {
+ pthread_join(pids[i], NULL);
+ }
+
+ uint64_t totalDelay = 0;
+ uint64_t maxDelay = 0;
+ uint64_t minDelay = UINT64_MAX;
+ uint64_t cntDelay = 1;
+ double avgDelay = 0;
+
+ for (int i = 0; i < threads; i++) {
+ threadInfo *pThreadInfo = infos + i;
+
+ tsem_destroy(&(pThreadInfo->lock_sem));
+ taos_close(pThreadInfo->taos);
+
+ if (pThreadInfo->stmt) {
+ taos_stmt_close(pThreadInfo->stmt);
+ }
+
+ tmfree((char *)pThreadInfo->bind_ts);
+#if STMT_BIND_PARAM_BATCH == 1
+ tmfree((char *)pThreadInfo->bind_ts_array);
+ tmfree(pThreadInfo->bindParams);
+ tmfree(pThreadInfo->is_null);
+ if (g_args.iface == REST_IFACE || ((stbInfo) && (stbInfo->iface == REST_IFACE))) {
+#ifdef WINDOWS
+ closesocket(pThreadInfo->sockfd);
+ WSACleanup();
+#else
+ close(pThreadInfo->sockfd);
+#endif
+ }
+#else
+ if (pThreadInfo->sampleBindArray) {
+ for (int k = 0; k < MAX_SAMPLES; k++) {
+ uintptr_t *tmp = (uintptr_t *)(*(uintptr_t *)(
+ pThreadInfo->sampleBindArray
+ + sizeof(uintptr_t *) * k));
+ int columnCount = (pThreadInfo->stbInfo)?
+ pThreadInfo->stbInfo->columnCount:
+ g_args.columnCount;
+ for (int c = 1; c < columnCount + 1; c++) {
+ TAOS_BIND *bind = (TAOS_BIND *)((char *)tmp + (sizeof(TAOS_BIND) * c));
+ if (bind)
+ tmfree(bind->buffer);
+ }
+ tmfree((char *)tmp);
+ }
+ tmfree(pThreadInfo->sampleBindArray);
+ }
+#endif
+
+ debugPrint("%s() LN%d, [%d] totalInsert=%"PRIu64" totalAffected=%"PRIu64"\n",
+ __func__, __LINE__,
+ pThreadInfo->threadID, pThreadInfo->totalInsertRows,
+ pThreadInfo->totalAffectedRows);
+ if (stbInfo) {
+ stbInfo->totalAffectedRows += pThreadInfo->totalAffectedRows;
+ stbInfo->totalInsertRows += pThreadInfo->totalInsertRows;
+ } else {
+ g_args.totalAffectedRows += pThreadInfo->totalAffectedRows;
+ g_args.totalInsertRows += pThreadInfo->totalInsertRows;
+ }
+
+ totalDelay += pThreadInfo->totalDelay;
+ cntDelay += pThreadInfo->cntDelay;
+ if (pThreadInfo->maxDelay > maxDelay) maxDelay = pThreadInfo->maxDelay;
+ if (pThreadInfo->minDelay < minDelay) minDelay = pThreadInfo->minDelay;
+ }
+
+ if (cntDelay == 0) cntDelay = 1;
+ avgDelay = (double)totalDelay / cntDelay;
+
+ int64_t end = taosGetTimestampUs();
+ int64_t t = end - start;
+ if (0 == t) t = 1;
+
+ double tInMs = (double) t / 1000000.0;
+
+ if (stbInfo) {
+ fprintf(stderr, "Spent %.4f seconds to insert rows: %"PRIu64", affected rows: %"PRIu64" with %d thread(s) into %s.%s. %.2f records/second\n\n",
+ tInMs, stbInfo->totalInsertRows,
+ stbInfo->totalAffectedRows,
+ threads, db_name, stbInfo->stbName,
+ (double)(stbInfo->totalInsertRows/tInMs));
+
+ if (g_fpOfInsertResult) {
+ fprintf(g_fpOfInsertResult,
+ "Spent %.4f seconds to insert rows: %"PRIu64", affected rows: %"PRIu64" with %d thread(s) into %s.%s. %.2f records/second\n\n",
+ tInMs, stbInfo->totalInsertRows,
+ stbInfo->totalAffectedRows,
+ threads, db_name, stbInfo->stbName,
+ (double)(stbInfo->totalInsertRows/tInMs));
+ }
+ } else {
+ fprintf(stderr, "Spent %.4f seconds to insert rows: %"PRIu64", affected rows: %"PRIu64" with %d thread(s) into %s %.2f records/second\n\n",
+ tInMs, g_args.totalInsertRows,
+ g_args.totalAffectedRows,
+ threads, db_name,
+ (double)(g_args.totalInsertRows/tInMs));
+ if (g_fpOfInsertResult) {
+ fprintf(g_fpOfInsertResult,
+ "Spent %.4f seconds to insert rows: %"PRIu64", affected rows: %"PRIu64" with %d thread(s) into %s %.2f records/second\n\n",
+ tInMs, g_args.totalInsertRows,
+ g_args.totalAffectedRows,
+ threads, db_name,
+ (double)(g_args.totalInsertRows/tInMs));
+ }
+ }
+
+ if (minDelay != UINT64_MAX) {
+ fprintf(stderr, "insert delay, avg: %10.2fms, max: %10.2fms, min: %10.2fms\n\n",
+ (double)avgDelay/1000.0,
+ (double)maxDelay/1000.0,
+ (double)minDelay/1000.0);
+
+ if (g_fpOfInsertResult) {
+ fprintf(g_fpOfInsertResult, "insert delay, avg:%10.2fms, max: %10.2fms, min: %10.2fms\n\n",
+ (double)avgDelay/1000.0,
+ (double)maxDelay/1000.0,
+ (double)minDelay/1000.0);
+ }
+ }
+
+ //taos_close(taos);
+
+ free(pids);
+ free(infos);
+}
+
+static void *queryNtableAggrFunc(void *sarg) {
+ threadInfo *pThreadInfo = (threadInfo *)sarg;
+ TAOS *taos = pThreadInfo->taos;
+ setThreadName("queryNtableAggrFunc");
+ char *command = calloc(1, BUFFER_SIZE);
+ assert(command);
+
+ uint64_t startTime = pThreadInfo->start_time;
+ char *tb_prefix = pThreadInfo->tb_prefix;
+ FILE *fp = fopen(pThreadInfo->filePath, "a");
+ if (NULL == fp) {
+ errorPrint2("fopen %s fail, reason:%s.\n", pThreadInfo->filePath, strerror(errno));
+ free(command);
+ return NULL;
+ }
+
+ int64_t insertRows;
+ /* if (pThreadInfo->stbInfo) {
+ insertRows = pThreadInfo->stbInfo->insertRows; // nrecords_per_table;
+ } else {
+ */
+ insertRows = g_args.insertRows;
+ // }
+
+ int64_t ntables = pThreadInfo->ntables; // pThreadInfo->end_table_to - pThreadInfo->start_table_from + 1;
+ int64_t totalData = insertRows * ntables;
+ bool aggr_func = g_Dbs.aggr_func;
+
+ char **aggreFunc;
+ int n;
+
+ if (g_args.demo_mode) {
+ aggreFunc = g_aggreFuncDemo;
+ n = aggr_func?(sizeof(g_aggreFuncDemo) / sizeof(g_aggreFuncDemo[0])) : 2;
+ } else {
+ aggreFunc = g_aggreFunc;
+ n = aggr_func?(sizeof(g_aggreFunc) / sizeof(g_aggreFunc[0])) : 2;
+ }
+
+ if (!aggr_func) {
+ printf("\nThe first field is either Binary or Bool. Aggregation functions are not supported.\n");
+ }
+ printf("%"PRId64" records:\n", totalData);
+ fprintf(fp, "| QFunctions | QRecords | QSpeed(R/s) | QLatency(ms) |\n");
+
+ for (int j = 0; j < n; j++) {
+ double totalT = 0;
+ uint64_t count = 0;
+ for (int64_t i = 0; i < ntables; i++) {
+ sprintf(command, "SELECT %s FROM %s%"PRId64" WHERE ts>= %" PRIu64,
+ aggreFunc[j], tb_prefix, i, startTime);
+
+ double t = taosGetTimestampUs();
+ debugPrint("%s() LN%d, sql command: %s\n",
+ __func__, __LINE__, command);
+ TAOS_RES *pSql = taos_query(taos, command);
+ int32_t code = taos_errno(pSql);
+
+ if (code != 0) {
+ errorPrint2("Failed to query:%s\n", taos_errstr(pSql));
+ taos_free_result(pSql);
+ taos_close(taos);
+ fclose(fp);
+ free(command);
+ return NULL;
+ }
+
+ while(taos_fetch_row(pSql) != NULL) {
+ count++;
+ }
+
+ t = taosGetTimestampUs() - t;
+ totalT += t;
+
+ taos_free_result(pSql);
+ }
+
+ fprintf(fp, "|%10s | %"PRId64" | %12.2f | %10.2f |\n",
+ aggreFunc[j][0] == '*' ? " * " : aggreFunc[j], totalData,
+ (double)(ntables * insertRows) / totalT, totalT / 1000000);
+ printf("select %10s took %.6f second(s)\n", aggreFunc[j], totalT / 1000000);
+ }
+ fprintf(fp, "\n");
+ fclose(fp);
+ free(command);
+ return NULL;
+}
+
+static void *queryStableAggrFunc(void *sarg) {
+ threadInfo *pThreadInfo = (threadInfo *)sarg;
+ TAOS *taos = pThreadInfo->taos;
+ setThreadName("queryStableAggrFunc");
+ char *command = calloc(1, BUFFER_SIZE);
+ assert(command);
+
+ FILE *fp = fopen(pThreadInfo->filePath, "a");
+ if (NULL == fp) {
+ printf("fopen %s fail, reason:%s.\n", pThreadInfo->filePath, strerror(errno));
+ free(command);
+ return NULL;
+ }
+
+ int64_t insertRows = pThreadInfo->stbInfo->insertRows;
+ int64_t ntables = pThreadInfo->ntables; // pThreadInfo->end_table_to - pThreadInfo->start_table_from + 1;
+ int64_t totalData = insertRows * ntables;
+ bool aggr_func = g_Dbs.aggr_func;
+
+ char **aggreFunc;
+ int n;
+
+ if (g_args.demo_mode) {
+ aggreFunc = g_aggreFuncDemo;
+ n = aggr_func?(sizeof(g_aggreFuncDemo) / sizeof(g_aggreFuncDemo[0])) : 2;
+ } else {
+ aggreFunc = g_aggreFunc;
+ n = aggr_func?(sizeof(g_aggreFunc) / sizeof(g_aggreFunc[0])) : 2;
+ }
+
+ if (!aggr_func) {
+ printf("\nThe first field is either Binary or Bool. Aggregation functions are not supported.\n");
+ }
+
+ printf("%"PRId64" records:\n", totalData);
+ fprintf(fp, "Querying On %"PRId64" records:\n", totalData);
+
+ for (int j = 0; j < n; j++) {
+ char condition[COND_BUF_LEN] = "\0";
+ char tempS[64] = "\0";
+
+ int64_t m = 10 < ntables ? 10 : ntables;
+
+ for (int64_t i = 1; i <= m; i++) {
+ if (i == 1) {
+ if (g_args.demo_mode) {
+ sprintf(tempS, "groupid = %"PRId64"", i);
+ } else {
+ sprintf(tempS, "t0 = %"PRId64"", i);
+ }
+ } else {
+ if (g_args.demo_mode) {
+ sprintf(tempS, " or groupid = %"PRId64" ", i);
+ } else {
+ sprintf(tempS, " or t0 = %"PRId64" ", i);
+ }
+ }
+ strncat(condition, tempS, COND_BUF_LEN - 1);
+
+ sprintf(command, "SELECT %s FROM meters WHERE %s", aggreFunc[j], condition);
+
+ printf("Where condition: %s\n", condition);
+
+ debugPrint("%s() LN%d, sql command: %s\n",
+ __func__, __LINE__, command);
+ fprintf(fp, "%s\n", command);
+
+ double t = taosGetTimestampUs();
+
+ TAOS_RES *pSql = taos_query(taos, command);
+ int32_t code = taos_errno(pSql);
+
+ if (code != 0) {
+ errorPrint2("Failed to query:%s\n", taos_errstr(pSql));
+ taos_free_result(pSql);
+ taos_close(taos);
+ fclose(fp);
+ free(command);
+ return NULL;
+ }
+ int count = 0;
+ while(taos_fetch_row(pSql) != NULL) {
+ count++;
+ }
+ t = taosGetTimestampUs() - t;
+
+ fprintf(fp, "| Speed: %12.2f(per s) | Latency: %.4f(ms) |\n",
+ ntables * insertRows / (t / 1000), t);
+ printf("select %10s took %.6f second(s)\n\n", aggreFunc[j], t / 1000000);
+
+ taos_free_result(pSql);
+ }
+ fprintf(fp, "\n");
+ }
+ fclose(fp);
+ free(command);
+
+ return NULL;
+}
+
+static void prompt()
+{
+ if (!g_args.answer_yes) {
+ printf(" Press enter key to continue or Ctrl-C to stop\n\n");
+ (void)getchar();
+ }
+}
+
+static int insertTestProcess() {
+
+ setupForAnsiEscape();
+ int ret = printfInsertMeta();
+ resetAfterAnsiEscape();
+
+ if (ret == -1)
+ exit(EXIT_FAILURE);
+
+ debugPrint("%d result file: %s\n", __LINE__, g_Dbs.resultFile);
+ g_fpOfInsertResult = fopen(g_Dbs.resultFile, "a");
+ if (NULL == g_fpOfInsertResult) {
+ errorPrint("Failed to open %s for save result\n", g_Dbs.resultFile);
+ return -1;
+ }
+
+ if (g_fpOfInsertResult)
+ printfInsertMetaToFile(g_fpOfInsertResult);
+
+ prompt();
+
+ init_rand_data();
+
+ // create database and super tables
+ char *cmdBuffer = calloc(1, BUFFER_SIZE);
+ assert(cmdBuffer);
+
+ if(createDatabasesAndStables(cmdBuffer) != 0) {
+ if (g_fpOfInsertResult)
+ fclose(g_fpOfInsertResult);
+ free(cmdBuffer);
+ return -1;
+ }
+ free(cmdBuffer);
+
+ // pretreatment
+ if (prepareSampleData() != 0) {
+ if (g_fpOfInsertResult)
+ fclose(g_fpOfInsertResult);
+ return -1;
+ }
+
+ double start;
+ double end;
+
+ if (g_totalChildTables > 0) {
+ fprintf(stderr,
+ "creating %"PRId64" table(s) with %d thread(s)\n\n",
+ g_totalChildTables, g_Dbs.threadCountForCreateTbl);
+ if (g_fpOfInsertResult) {
+ fprintf(g_fpOfInsertResult,
+ "creating %"PRId64" table(s) with %d thread(s)\n\n",
+ g_totalChildTables, g_Dbs.threadCountForCreateTbl);
+ }
+
+ // create child tables
+ start = taosGetTimestampMs();
+ createChildTables();
+ end = taosGetTimestampMs();
+
+ fprintf(stderr,
+ "\nSpent %.4f seconds to create %"PRId64" table(s) with %d thread(s), actual %"PRId64" table(s) created\n\n",
+ (end - start)/1000.0, g_totalChildTables,
+ g_Dbs.threadCountForCreateTbl, g_actualChildTables);
+ if (g_fpOfInsertResult) {
+ fprintf(g_fpOfInsertResult,
+ "\nSpent %.4f seconds to create %"PRId64" table(s) with %d thread(s), actual %"PRId64" table(s) created\n\n",
+ (end - start)/1000.0, g_totalChildTables,
+ g_Dbs.threadCountForCreateTbl, g_actualChildTables);
+ }
+ }
+
+ // create sub threads for inserting data
+ //start = taosGetTimestampMs();
+ for (int i = 0; i < g_Dbs.dbCount; i++) {
+ if (g_Dbs.use_metric) {
+ if (g_Dbs.db[i].superTblCount > 0) {
+ for (uint64_t j = 0; j < g_Dbs.db[i].superTblCount; j++) {
+
+ SSuperTable* stbInfo = &g_Dbs.db[i].superTbls[j];
+
+ if (stbInfo && (stbInfo->insertRows > 0)) {
+ startMultiThreadInsertData(
+ g_Dbs.threadCount,
+ g_Dbs.db[i].dbName,
+ g_Dbs.db[i].dbCfg.precision,
+ stbInfo);
+ }
+ }
+ }
+ } else {
+ startMultiThreadInsertData(
+ g_Dbs.threadCount,
+ g_Dbs.db[i].dbName,
+ g_Dbs.db[i].dbCfg.precision,
+ NULL);
+ }
+ }
+ //end = taosGetTimestampMs();
+
+ //int64_t totalInsertRows = 0;
+ //int64_t totalAffectedRows = 0;
+ //for (int i = 0; i < g_Dbs.dbCount; i++) {
+ // for (int j = 0; j < g_Dbs.db[i].superTblCount; j++) {
+ // totalInsertRows+= g_Dbs.db[i].superTbls[j].totalInsertRows;
+ // totalAffectedRows += g_Dbs.db[i].superTbls[j].totalAffectedRows;
+ //}
+ //printf("Spent %.4f seconds to insert rows: %"PRId64", affected rows: %"PRId64" with %d thread(s)\n\n", end - start, totalInsertRows, totalAffectedRows, g_Dbs.threadCount);
+
+ return 0;
+}
+
+static void *specifiedTableQuery(void *sarg) {
+ threadInfo *pThreadInfo = (threadInfo *)sarg;
+
+ setThreadName("specTableQuery");
+
+ if (pThreadInfo->taos == NULL) {
+ TAOS * taos = NULL;
+ taos = taos_connect(g_queryInfo.host,
+ g_queryInfo.user,
+ g_queryInfo.password,
+ NULL,
+ g_queryInfo.port);
+ if (taos == NULL) {
+ errorPrint2("[%d] Failed to connect to TDengine, reason:%s\n",
+ pThreadInfo->threadID, taos_errstr(NULL));
+ return NULL;
+ } else {
+ pThreadInfo->taos = taos;
+ }
+ }
+
+ char sqlStr[TSDB_DB_NAME_LEN + 5];
+ sprintf(sqlStr, "use %s", g_queryInfo.dbName);
+ if (0 != queryDbExec(pThreadInfo->taos, sqlStr, NO_INSERT_TYPE, false)) {
+ taos_close(pThreadInfo->taos);
+ errorPrint("use database %s failed!\n\n",
+ g_queryInfo.dbName);
+ return NULL;
+ }
+
+ uint64_t st = 0;
+ uint64_t et = 0;
+
+ uint64_t queryTimes = g_queryInfo.specifiedQueryInfo.queryTimes;
+
+ uint64_t totalQueried = 0;
+ uint64_t lastPrintTime = taosGetTimestampMs();
+ uint64_t startTs = taosGetTimestampMs();
+
+ if (g_queryInfo.specifiedQueryInfo.result[pThreadInfo->querySeq][0] != '\0') {
+ sprintf(pThreadInfo->filePath, "%s-%d",
+ g_queryInfo.specifiedQueryInfo.result[pThreadInfo->querySeq],
+ pThreadInfo->threadID);
+ }
+
+ while(queryTimes --) {
+ if (g_queryInfo.specifiedQueryInfo.queryInterval && (et - st) <
+ (int64_t)g_queryInfo.specifiedQueryInfo.queryInterval) {
+ taosMsleep(g_queryInfo.specifiedQueryInfo.queryInterval - (et - st)); // ms
+ }
+
+ st = taosGetTimestampMs();
+
+ selectAndGetResult(pThreadInfo,
+ g_queryInfo.specifiedQueryInfo.sql[pThreadInfo->querySeq]);
+
+ et = taosGetTimestampMs();
+ printf("=thread[%"PRId64"] use %s complete one sql, Spent %10.3f s\n",
+ taosGetSelfPthreadId(), g_queryInfo.queryMode, (et - st)/1000.0);
+
+ totalQueried ++;
+ g_queryInfo.specifiedQueryInfo.totalQueried ++;
+
+ uint64_t currentPrintTime = taosGetTimestampMs();
+ uint64_t endTs = taosGetTimestampMs();
+ if (currentPrintTime - lastPrintTime > 30*1000) {
+ debugPrint("%s() LN%d, endTs=%"PRIu64" ms, startTs=%"PRIu64" ms\n",
+ __func__, __LINE__, endTs, startTs);
+ printf("thread[%d] has currently completed queries: %"PRIu64", QPS: %10.6f\n",
+ pThreadInfo->threadID,
+ totalQueried,
+ (double)(totalQueried/((endTs-startTs)/1000.0)));
+ lastPrintTime = currentPrintTime;
+ }
+ }
+ return NULL;
+}
+
+static void replaceChildTblName(char* inSql, char* outSql, int tblIndex) {
+ char sourceString[32] = "xxxx";
+ char subTblName[TSDB_TABLE_NAME_LEN];
+ sprintf(subTblName, "%s.%s",
+ g_queryInfo.dbName,
+ g_queryInfo.superQueryInfo.childTblName + tblIndex*TSDB_TABLE_NAME_LEN);
+
+ //printf("inSql: %s\n", inSql);
+
+ char* pos = strstr(inSql, sourceString);
+ if (0 == pos) {
+ return;
+ }
+
+ tstrncpy(outSql, inSql, pos - inSql + 1);
+ //printf("1: %s\n", outSql);
+ strncat(outSql, subTblName, BUFFER_SIZE - 1);
+ //printf("2: %s\n", outSql);
+ strncat(outSql, pos+strlen(sourceString), BUFFER_SIZE - 1);
+ //printf("3: %s\n", outSql);
+}
+
+static void *superTableQuery(void *sarg) {
+ char *sqlstr = calloc(1, BUFFER_SIZE);
+ assert(sqlstr);
+
+ threadInfo *pThreadInfo = (threadInfo *)sarg;
+
+ setThreadName("superTableQuery");
+
+ if (pThreadInfo->taos == NULL) {
+ TAOS * taos = NULL;
+ taos = taos_connect(g_queryInfo.host,
+ g_queryInfo.user,
+ g_queryInfo.password,
+ NULL,
+ g_queryInfo.port);
+ if (taos == NULL) {
+ errorPrint("[%d] Failed to connect to TDengine, reason:%s\n",
+ pThreadInfo->threadID, taos_errstr(NULL));
+ free(sqlstr);
+ return NULL;
+ } else {
+ pThreadInfo->taos = taos;
+ }
+ }
+
+ uint64_t st = 0;
+ uint64_t et = (int64_t)g_queryInfo.superQueryInfo.queryInterval;
+
+ uint64_t queryTimes = g_queryInfo.superQueryInfo.queryTimes;
+ uint64_t totalQueried = 0;
+ uint64_t startTs = taosGetTimestampMs();
+
+ uint64_t lastPrintTime = taosGetTimestampMs();
+ while(queryTimes --) {
+ if (g_queryInfo.superQueryInfo.queryInterval
+ && (et - st) < (int64_t)g_queryInfo.superQueryInfo.queryInterval) {
+ taosMsleep(g_queryInfo.superQueryInfo.queryInterval - (et - st)); // ms
+ //printf("========sleep duration:%"PRId64 "========inserted rows:%d, table range:%d - %d\n", (1000 - (et - st)), i, pThreadInfo->start_table_from, pThreadInfo->end_table_to);
+ }
+
+ st = taosGetTimestampMs();
+ for (int i = pThreadInfo->start_table_from; i <= pThreadInfo->end_table_to; i++) {
+ for (int j = 0; j < g_queryInfo.superQueryInfo.sqlCount; j++) {
+ memset(sqlstr, 0, BUFFER_SIZE);
+ replaceChildTblName(g_queryInfo.superQueryInfo.sql[j], sqlstr, i);
+ if (g_queryInfo.superQueryInfo.result[j][0] != '\0') {
+ sprintf(pThreadInfo->filePath, "%s-%d",
+ g_queryInfo.superQueryInfo.result[j],
+ pThreadInfo->threadID);
+ }
+ selectAndGetResult(pThreadInfo, sqlstr);
+
+ totalQueried++;
+ g_queryInfo.superQueryInfo.totalQueried ++;
+
+ int64_t currentPrintTime = taosGetTimestampMs();
+ int64_t endTs = taosGetTimestampMs();
+ if (currentPrintTime - lastPrintTime > 30*1000) {
+ printf("thread[%d] has currently completed queries: %"PRIu64", QPS: %10.3f\n",
+ pThreadInfo->threadID,
+ totalQueried,
+ (double)(totalQueried/((endTs-startTs)/1000.0)));
+ lastPrintTime = currentPrintTime;
+ }
+ }
+ }
+ et = taosGetTimestampMs();
+ printf("####thread[%"PRId64"] complete all sqls to allocate all sub-tables[%"PRIu64" - %"PRIu64"] once queries duration:%.4fs\n\n",
+ taosGetSelfPthreadId(),
+ pThreadInfo->start_table_from,
+ pThreadInfo->end_table_to,
+ (double)(et - st)/1000.0);
+ }
+
+ free(sqlstr);
+ return NULL;
+}
+
+static int queryTestProcess() {
+
+ setupForAnsiEscape();
+ printfQueryMeta();
+ resetAfterAnsiEscape();
+
+ TAOS * taos = NULL;
+ taos = taos_connect(g_queryInfo.host,
+ g_queryInfo.user,
+ g_queryInfo.password,
+ NULL,
+ g_queryInfo.port);
+ if (taos == NULL) {
+ errorPrint("Failed to connect to TDengine, reason:%s\n",
+ taos_errstr(NULL));
+ exit(EXIT_FAILURE);
+ }
+
+ if (0 != g_queryInfo.superQueryInfo.sqlCount) {
+ getAllChildNameOfSuperTable(taos,
+ g_queryInfo.dbName,
+ g_queryInfo.superQueryInfo.stbName,
+ &g_queryInfo.superQueryInfo.childTblName,
+ &g_queryInfo.superQueryInfo.childTblCount);
+ }
+
+ prompt();
+
+ if (g_args.debug_print || g_args.verbose_print) {
+ printfQuerySystemInfo(taos);
+ }
+
+ if (0 == strncasecmp(g_queryInfo.queryMode, "rest", strlen("rest"))) {
+ if (convertHostToServAddr(
+ g_queryInfo.host, g_queryInfo.port, &g_queryInfo.serv_addr) != 0)
+ ERROR_EXIT("convert host to server address");
+ }
+
+ pthread_t *pids = NULL;
+ threadInfo *infos = NULL;
+ //==== create sub threads for query from specify table
+ int nConcurrent = g_queryInfo.specifiedQueryInfo.concurrent;
+ uint64_t nSqlCount = g_queryInfo.specifiedQueryInfo.sqlCount;
+
+ uint64_t startTs = taosGetTimestampMs();
+
+ if ((nSqlCount > 0) && (nConcurrent > 0)) {
+
+ pids = calloc(1, nConcurrent * nSqlCount * sizeof(pthread_t));
+ infos = calloc(1, nConcurrent * nSqlCount * sizeof(threadInfo));
+
+ if ((NULL == pids) || (NULL == infos)) {
+ taos_close(taos);
+ ERROR_EXIT("memory allocation failed for create threads\n");
+ }
+
+ for (uint64_t i = 0; i < nSqlCount; i++) {
+ for (int j = 0; j < nConcurrent; j++) {
+ uint64_t seq = i * nConcurrent + j;
+ threadInfo *pThreadInfo = infos + seq;
+ pThreadInfo->threadID = seq;
+ pThreadInfo->querySeq = i;
+
+ if (0 == strncasecmp(g_queryInfo.queryMode, "taosc", 5)) {
+
+ char sqlStr[TSDB_DB_NAME_LEN + 5];
+ sprintf(sqlStr, "USE %s", g_queryInfo.dbName);
+ if (0 != queryDbExec(taos, sqlStr, NO_INSERT_TYPE, false)) {
+ taos_close(taos);
+ free(infos);
+ free(pids);
+ errorPrint2("use database %s failed!\n\n",
+ g_queryInfo.dbName);
+ return -1;
+ }
+ }
+
+ if (0 == strncasecmp(g_queryInfo.queryMode, "rest", 4)) {
+#ifdef WINDOWS
+ WSADATA wsaData;
+ WSAStartup(MAKEWORD(2, 2), &wsaData);
+ SOCKET sockfd;
+#else
+ int sockfd;
+#endif
+ sockfd = socket(AF_INET, SOCK_STREAM, 0);
+ if (sockfd < 0) {
+#ifdef WINDOWS
+ errorPrint( "Could not create socket : %d" , WSAGetLastError());
+#endif
+ debugPrint("%s() LN%d, sockfd=%d\n", __func__, __LINE__, sockfd);
+ ERROR_EXIT("opening socket");
+ }
+
+ int retConn = connect(sockfd, (struct sockaddr *)&(g_queryInfo.serv_addr),
+ sizeof(struct sockaddr));
+ debugPrint("%s() LN%d connect() return %d\n", __func__, __LINE__, retConn);
+ if (retConn < 0) {
+ ERROR_EXIT("connecting");
+ }
+ pThreadInfo->sockfd = sockfd;
+ }
+ pThreadInfo->taos = NULL;// workaround to use separate taos connection;
+
+ pthread_create(pids + seq, NULL, specifiedTableQuery,
+ pThreadInfo);
+ }
+ }
+ } else {
+ g_queryInfo.specifiedQueryInfo.concurrent = 0;
+ }
+
+ taos_close(taos);
+
+ pthread_t *pidsOfSub = NULL;
+ threadInfo *infosOfSub = NULL;
+ //==== create sub threads for query from all sub table of the super table
+ if ((g_queryInfo.superQueryInfo.sqlCount > 0)
+ && (g_queryInfo.superQueryInfo.threadCnt > 0)) {
+ pidsOfSub = calloc(1, g_queryInfo.superQueryInfo.threadCnt * sizeof(pthread_t));
+ infosOfSub = calloc(1, g_queryInfo.superQueryInfo.threadCnt * sizeof(threadInfo));
+
+ if ((NULL == pidsOfSub) || (NULL == infosOfSub)) {
+ free(infos);
+ free(pids);
+
+ ERROR_EXIT("memory allocation failed for create threads\n");
+ }
+
+ int64_t ntables = g_queryInfo.superQueryInfo.childTblCount;
+ int threads = g_queryInfo.superQueryInfo.threadCnt;
+
+ int64_t a = ntables / threads;
+ if (a < 1) {
+ threads = ntables;
+ a = 1;
+ }
+
+ int64_t b = 0;
+ if (threads != 0) {
+ b = ntables % threads;
+ }
+
+ uint64_t tableFrom = 0;
+ for (int i = 0; i < threads; i++) {
+ threadInfo *pThreadInfo = infosOfSub + i;
+ pThreadInfo->threadID = i;
+
+ pThreadInfo->start_table_from = tableFrom;
+ pThreadInfo->ntables = iend_table_to = i < b ? tableFrom + a : tableFrom + a - 1;
+ tableFrom = pThreadInfo->end_table_to + 1;
+ pThreadInfo->taos = NULL; // workaround to use separate taos connection;
+ if (0 == strncasecmp(g_queryInfo.queryMode, "rest", 4)) {
+#ifdef WINDOWS
+ WSADATA wsaData;
+ WSAStartup(MAKEWORD(2, 2), &wsaData);
+ SOCKET sockfd;
+#else
+ int sockfd;
+#endif
+ sockfd = socket(AF_INET, SOCK_STREAM, 0);
+ if (sockfd < 0) {
+#ifdef WINDOWS
+ errorPrint( "Could not create socket : %d" , WSAGetLastError());
+#endif
+ debugPrint("%s() LN%d, sockfd=%d\n", __func__, __LINE__, sockfd);
+ ERROR_EXIT("opening socket");
+ }
+
+ int retConn = connect(sockfd, (struct sockaddr *)&(g_queryInfo.serv_addr),
+ sizeof(struct sockaddr));
+ debugPrint("%s() LN%d connect() return %d\n", __func__, __LINE__, retConn);
+ if (retConn < 0) {
+ ERROR_EXIT("connecting");
+ }
+ pThreadInfo->sockfd = sockfd;
+ }
+ pthread_create(pidsOfSub + i, NULL, superTableQuery, pThreadInfo);
+ }
+
+ g_queryInfo.superQueryInfo.threadCnt = threads;
+ } else {
+ g_queryInfo.superQueryInfo.threadCnt = 0;
+ }
+
+ if ((nSqlCount > 0) && (nConcurrent > 0)) {
+ for (int i = 0; i < nConcurrent; i++) {
+ for (int j = 0; j < nSqlCount; j++) {
+ pthread_join(pids[i * nSqlCount + j], NULL);
+ if (0 == strncasecmp(g_queryInfo.queryMode, "rest", 4)) {
+ threadInfo *pThreadInfo = infos + i * nSqlCount + j;
+#ifdef WINDOWS
+ closesocket(pThreadInfo->sockfd);
+ WSACleanup();
+#else
+ close(pThreadInfo->sockfd);
+#endif
+ }
+ }
+ }
+ }
+
+ tmfree((char*)pids);
+ tmfree((char*)infos);
+
+ for (int i = 0; i < g_queryInfo.superQueryInfo.threadCnt; i++) {
+ pthread_join(pidsOfSub[i], NULL);
+ if (0 == strncasecmp(g_queryInfo.queryMode, "rest", 4)) {
+ threadInfo *pThreadInfo = infosOfSub + i;
+#ifdef WINDOWS
+ closesocket(pThreadInfo->sockfd);
+ WSACleanup();
+#else
+ close(pThreadInfo->sockfd);
+#endif
+ }
+ }
+
+ tmfree((char*)pidsOfSub);
+ tmfree((char*)infosOfSub);
+
+ // taos_close(taos);// workaround to use separate taos connection;
+ uint64_t endTs = taosGetTimestampMs();
+
+ uint64_t totalQueried = g_queryInfo.specifiedQueryInfo.totalQueried +
+ g_queryInfo.superQueryInfo.totalQueried;
+
+ fprintf(stderr, "==== completed total queries: %"PRIu64", the QPS of all threads: %10.3f====\n",
+ totalQueried,
+ (double)(totalQueried/((endTs-startTs)/1000.0)));
+ return 0;
+}
+
+static void stable_sub_callback(
+ TAOS_SUB* tsub, TAOS_RES *res, void* param, int code) {
+ if (res == NULL || taos_errno(res) != 0) {
+ errorPrint2("%s() LN%d, failed to subscribe result, code:%d, reason:%s\n",
+ __func__, __LINE__, code, taos_errstr(res));
+ return;
+ }
+
+ if (param)
+ fetchResult(res, (threadInfo *)param);
+ // tao_unsubscribe() will free result.
+}
+
+static void specified_sub_callback(
+ TAOS_SUB* tsub, TAOS_RES *res, void* param, int code) {
+ if (res == NULL || taos_errno(res) != 0) {
+ errorPrint2("%s() LN%d, failed to subscribe result, code:%d, reason:%s\n",
+ __func__, __LINE__, code, taos_errstr(res));
+ return;
+ }
+
+ if (param)
+ fetchResult(res, (threadInfo *)param);
+ // tao_unsubscribe() will free result.
+}
+
+static TAOS_SUB* subscribeImpl(
+ QUERY_CLASS class,
+ threadInfo *pThreadInfo,
+ char *sql, char* topic, bool restart, uint64_t interval)
+{
+ TAOS_SUB* tsub = NULL;
+
+ if ((SPECIFIED_CLASS == class)
+ && (ASYNC_MODE == g_queryInfo.specifiedQueryInfo.asyncMode)) {
+ tsub = taos_subscribe(
+ pThreadInfo->taos,
+ restart,
+ topic, sql, specified_sub_callback, (void*)pThreadInfo,
+ g_queryInfo.specifiedQueryInfo.subscribeInterval);
+ } else if ((STABLE_CLASS == class)
+ && (ASYNC_MODE == g_queryInfo.superQueryInfo.asyncMode)) {
+ tsub = taos_subscribe(
+ pThreadInfo->taos,
+ restart,
+ topic, sql, stable_sub_callback, (void*)pThreadInfo,
+ g_queryInfo.superQueryInfo.subscribeInterval);
+ } else {
+ tsub = taos_subscribe(
+ pThreadInfo->taos,
+ restart,
+ topic, sql, NULL, NULL, interval);
+ }
+
+ if (tsub == NULL) {
+ errorPrint2("failed to create subscription. topic:%s, sql:%s\n", topic, sql);
+ return NULL;
+ }
+
+ return tsub;
+}
+
+static void *superSubscribe(void *sarg) {
+ threadInfo *pThreadInfo = (threadInfo *)sarg;
+ char *subSqlStr = calloc(1, BUFFER_SIZE);
+ assert(subSqlStr);
+
+ TAOS_SUB* tsub[MAX_QUERY_SQL_COUNT] = {0};
+ uint64_t tsubSeq;
+
+ setThreadName("superSub");
+
+ if (pThreadInfo->ntables > MAX_QUERY_SQL_COUNT) {
+ free(subSqlStr);
+ errorPrint("The table number(%"PRId64") of the thread is more than max query sql count: %d\n",
+ pThreadInfo->ntables, MAX_QUERY_SQL_COUNT);
+ exit(EXIT_FAILURE);
+ }
+
+ if (pThreadInfo->taos == NULL) {
+ pThreadInfo->taos = taos_connect(g_queryInfo.host,
+ g_queryInfo.user,
+ g_queryInfo.password,
+ g_queryInfo.dbName,
+ g_queryInfo.port);
+ if (pThreadInfo->taos == NULL) {
+ errorPrint2("[%d] Failed to connect to TDengine, reason:%s\n",
+ pThreadInfo->threadID, taos_errstr(NULL));
+ free(subSqlStr);
+ return NULL;
+ }
+ }
+
+ char sqlStr[TSDB_DB_NAME_LEN + 5];
+ sprintf(sqlStr, "USE %s", g_queryInfo.dbName);
+ if (0 != queryDbExec(pThreadInfo->taos, sqlStr, NO_INSERT_TYPE, false)) {
+ taos_close(pThreadInfo->taos);
+ errorPrint2("use database %s failed!\n\n",
+ g_queryInfo.dbName);
+ free(subSqlStr);
+ return NULL;
+ }
+
+ char topic[32] = {0};
+ for (uint64_t i = pThreadInfo->start_table_from;
+ i <= pThreadInfo->end_table_to; i++) {
+ tsubSeq = i - pThreadInfo->start_table_from;
+ verbosePrint("%s() LN%d, [%d], start=%"PRId64" end=%"PRId64" i=%"PRIu64"\n",
+ __func__, __LINE__,
+ pThreadInfo->threadID,
+ pThreadInfo->start_table_from,
+ pThreadInfo->end_table_to, i);
+ sprintf(topic, "taosdemo-subscribe-%"PRIu64"-%"PRIu64"",
+ i, pThreadInfo->querySeq);
+ memset(subSqlStr, 0, BUFFER_SIZE);
+ replaceChildTblName(
+ g_queryInfo.superQueryInfo.sql[pThreadInfo->querySeq],
+ subSqlStr, i);
+ if (g_queryInfo.superQueryInfo.result[pThreadInfo->querySeq][0] != 0) {
+ sprintf(pThreadInfo->filePath, "%s-%d",
+ g_queryInfo.superQueryInfo.result[pThreadInfo->querySeq],
+ pThreadInfo->threadID);
+ }
+
+ verbosePrint("%s() LN%d, [%d] subSqlStr: %s\n",
+ __func__, __LINE__, pThreadInfo->threadID, subSqlStr);
+ tsub[tsubSeq] = subscribeImpl(
+ STABLE_CLASS,
+ pThreadInfo, subSqlStr, topic,
+ g_queryInfo.superQueryInfo.subscribeRestart,
+ g_queryInfo.superQueryInfo.subscribeInterval);
+ if (NULL == tsub[tsubSeq]) {
+ taos_close(pThreadInfo->taos);
+ free(subSqlStr);
+ return NULL;
+ }
+ }
+
+ // start loop to consume result
+ int consumed[MAX_QUERY_SQL_COUNT];
+ for (int i = 0; i < MAX_QUERY_SQL_COUNT; i++) {
+ consumed[i] = 0;
+ }
+ TAOS_RES* res = NULL;
+
+ uint64_t st = 0, et = 0;
+
+ while ((g_queryInfo.superQueryInfo.endAfterConsume == -1)
+ || (g_queryInfo.superQueryInfo.endAfterConsume >
+ consumed[pThreadInfo->end_table_to
+ - pThreadInfo->start_table_from])) {
+
+ verbosePrint("super endAfterConsume: %d, consumed: %d\n",
+ g_queryInfo.superQueryInfo.endAfterConsume,
+ consumed[pThreadInfo->end_table_to
+ - pThreadInfo->start_table_from]);
+ for (uint64_t i = pThreadInfo->start_table_from;
+ i <= pThreadInfo->end_table_to; i++) {
+ tsubSeq = i - pThreadInfo->start_table_from;
+ if (ASYNC_MODE == g_queryInfo.superQueryInfo.asyncMode) {
+ continue;
+ }
+
+ st = taosGetTimestampMs();
+ performancePrint("st: %"PRIu64" et: %"PRIu64" st-et: %"PRIu64"\n", st, et, (st - et));
+ res = taos_consume(tsub[tsubSeq]);
+ et = taosGetTimestampMs();
+ performancePrint("st: %"PRIu64" et: %"PRIu64" delta: %"PRIu64"\n", st, et, (et - st));
+
+ if (res) {
+ if (g_queryInfo.superQueryInfo.result[pThreadInfo->querySeq][0] != 0) {
+ sprintf(pThreadInfo->filePath, "%s-%d",
+ g_queryInfo.superQueryInfo.result[pThreadInfo->querySeq],
+ pThreadInfo->threadID);
+ fetchResult(res, pThreadInfo);
+ }
+ consumed[tsubSeq] ++;
+
+ if ((g_queryInfo.superQueryInfo.resubAfterConsume != -1)
+ && (consumed[tsubSeq] >=
+ g_queryInfo.superQueryInfo.resubAfterConsume)) {
+ verbosePrint("%s() LN%d, keepProgress:%d, resub super table query: %"PRIu64"\n",
+ __func__, __LINE__,
+ g_queryInfo.superQueryInfo.subscribeKeepProgress,
+ pThreadInfo->querySeq);
+ taos_unsubscribe(tsub[tsubSeq],
+ g_queryInfo.superQueryInfo.subscribeKeepProgress);
+ consumed[tsubSeq]= 0;
+ tsub[tsubSeq] = subscribeImpl(
+ STABLE_CLASS,
+ pThreadInfo, subSqlStr, topic,
+ g_queryInfo.superQueryInfo.subscribeRestart,
+ g_queryInfo.superQueryInfo.subscribeInterval
+ );
+ if (NULL == tsub[tsubSeq]) {
+ taos_close(pThreadInfo->taos);
+ free(subSqlStr);
+ return NULL;
+ }
+ }
+ }
+ }
+ }
+ verbosePrint("%s() LN%d, super endAfterConsume: %d, consumed: %d\n",
+ __func__, __LINE__,
+ g_queryInfo.superQueryInfo.endAfterConsume,
+ consumed[pThreadInfo->end_table_to - pThreadInfo->start_table_from]);
+ taos_free_result(res);
+
+ for (uint64_t i = pThreadInfo->start_table_from;
+ i <= pThreadInfo->end_table_to; i++) {
+ tsubSeq = i - pThreadInfo->start_table_from;
+ taos_unsubscribe(tsub[tsubSeq], 0);
+ }
+
+ taos_close(pThreadInfo->taos);
+ free(subSqlStr);
+ return NULL;
+}
+
+static void *specifiedSubscribe(void *sarg) {
+ threadInfo *pThreadInfo = (threadInfo *)sarg;
+ // TAOS_SUB* tsub = NULL;
+
+ setThreadName("specSub");
+
+ if (pThreadInfo->taos == NULL) {
+ pThreadInfo->taos = taos_connect(g_queryInfo.host,
+ g_queryInfo.user,
+ g_queryInfo.password,
+ g_queryInfo.dbName,
+ g_queryInfo.port);
+ if (pThreadInfo->taos == NULL) {
+ errorPrint2("[%d] Failed to connect to TDengine, reason:%s\n",
+ pThreadInfo->threadID, taos_errstr(NULL));
+ return NULL;
+ }
+ }
+
+ char sqlStr[TSDB_DB_NAME_LEN + 5];
+ sprintf(sqlStr, "USE %s", g_queryInfo.dbName);
+ if (0 != queryDbExec(pThreadInfo->taos, sqlStr, NO_INSERT_TYPE, false)) {
+ taos_close(pThreadInfo->taos);
+ return NULL;
+ }
+
+ sprintf(g_queryInfo.specifiedQueryInfo.topic[pThreadInfo->threadID],
+ "taosdemo-subscribe-%"PRIu64"-%d",
+ pThreadInfo->querySeq,
+ pThreadInfo->threadID);
+ if (g_queryInfo.specifiedQueryInfo.result[pThreadInfo->querySeq][0] != '\0') {
+ sprintf(pThreadInfo->filePath, "%s-%d",
+ g_queryInfo.specifiedQueryInfo.result[pThreadInfo->querySeq],
+ pThreadInfo->threadID);
+ }
+ g_queryInfo.specifiedQueryInfo.tsub[pThreadInfo->threadID] = subscribeImpl(
+ SPECIFIED_CLASS, pThreadInfo,
+ g_queryInfo.specifiedQueryInfo.sql[pThreadInfo->querySeq],
+ g_queryInfo.specifiedQueryInfo.topic[pThreadInfo->threadID],
+ g_queryInfo.specifiedQueryInfo.subscribeRestart,
+ g_queryInfo.specifiedQueryInfo.subscribeInterval);
+ if (NULL == g_queryInfo.specifiedQueryInfo.tsub[pThreadInfo->threadID]) {
+ taos_close(pThreadInfo->taos);
+ return NULL;
+ }
+
+ // start loop to consume result
+
+ g_queryInfo.specifiedQueryInfo.consumed[pThreadInfo->threadID] = 0;
+ while((g_queryInfo.specifiedQueryInfo.endAfterConsume[pThreadInfo->querySeq] == -1)
+ || (g_queryInfo.specifiedQueryInfo.consumed[pThreadInfo->threadID] <
+ g_queryInfo.specifiedQueryInfo.endAfterConsume[pThreadInfo->querySeq])) {
+
+ printf("consumed[%d]: %d, endAfterConsum[%"PRId64"]: %d\n",
+ pThreadInfo->threadID,
+ g_queryInfo.specifiedQueryInfo.consumed[pThreadInfo->threadID],
+ pThreadInfo->querySeq,
+ g_queryInfo.specifiedQueryInfo.endAfterConsume[pThreadInfo->querySeq]);
+ if (ASYNC_MODE == g_queryInfo.specifiedQueryInfo.asyncMode) {
+ continue;
+ }
+
+ g_queryInfo.specifiedQueryInfo.res[pThreadInfo->threadID] = taos_consume(
+ g_queryInfo.specifiedQueryInfo.tsub[pThreadInfo->threadID]);
+ if (g_queryInfo.specifiedQueryInfo.res[pThreadInfo->threadID]) {
+ if (g_queryInfo.specifiedQueryInfo.result[pThreadInfo->querySeq][0]
+ != 0) {
+ sprintf(pThreadInfo->filePath, "%s-%d",
+ g_queryInfo.specifiedQueryInfo.result[pThreadInfo->querySeq],
+ pThreadInfo->threadID);
+ }
+ fetchResult(
+ g_queryInfo.specifiedQueryInfo.res[pThreadInfo->threadID],
+ pThreadInfo);
+
+ g_queryInfo.specifiedQueryInfo.consumed[pThreadInfo->threadID] ++;
+ if ((g_queryInfo.specifiedQueryInfo.resubAfterConsume[pThreadInfo->querySeq] != -1)
+ && (g_queryInfo.specifiedQueryInfo.consumed[pThreadInfo->threadID] >=
+ g_queryInfo.specifiedQueryInfo.resubAfterConsume[pThreadInfo->querySeq])) {
+ printf("keepProgress:%d, resub specified query: %"PRIu64"\n",
+ g_queryInfo.specifiedQueryInfo.subscribeKeepProgress,
+ pThreadInfo->querySeq);
+ g_queryInfo.specifiedQueryInfo.consumed[pThreadInfo->threadID] = 0;
+ taos_unsubscribe(g_queryInfo.specifiedQueryInfo.tsub[pThreadInfo->threadID],
+ g_queryInfo.specifiedQueryInfo.subscribeKeepProgress);
+ g_queryInfo.specifiedQueryInfo.tsub[pThreadInfo->threadID] =
+ subscribeImpl(
+ SPECIFIED_CLASS,
+ pThreadInfo,
+ g_queryInfo.specifiedQueryInfo.sql[pThreadInfo->querySeq],
+ g_queryInfo.specifiedQueryInfo.topic[pThreadInfo->threadID],
+ g_queryInfo.specifiedQueryInfo.subscribeRestart,
+ g_queryInfo.specifiedQueryInfo.subscribeInterval);
+ if (NULL == g_queryInfo.specifiedQueryInfo.tsub[pThreadInfo->threadID]) {
+ taos_close(pThreadInfo->taos);
+ return NULL;
+ }
+ }
+ }
+ }
+ taos_free_result(g_queryInfo.specifiedQueryInfo.res[pThreadInfo->threadID]);
+ taos_close(pThreadInfo->taos);
+
+ return NULL;
+}
+
+static int subscribeTestProcess() {
+ setupForAnsiEscape();
+ printfQueryMeta();
+ resetAfterAnsiEscape();
+
+ prompt();
+
+ TAOS * taos = NULL;
+ taos = taos_connect(g_queryInfo.host,
+ g_queryInfo.user,
+ g_queryInfo.password,
+ g_queryInfo.dbName,
+ g_queryInfo.port);
+ if (taos == NULL) {
+ errorPrint2("Failed to connect to TDengine, reason:%s\n",
+ taos_errstr(NULL));
+ exit(EXIT_FAILURE);
+ }
+
+ if (0 != g_queryInfo.superQueryInfo.sqlCount) {
+ getAllChildNameOfSuperTable(taos,
+ g_queryInfo.dbName,
+ g_queryInfo.superQueryInfo.stbName,
+ &g_queryInfo.superQueryInfo.childTblName,
+ &g_queryInfo.superQueryInfo.childTblCount);
+ }
+
+ taos_close(taos); // workaround to use separate taos connection;
+
+ pthread_t *pids = NULL;
+ threadInfo *infos = NULL;
+
+ pthread_t *pidsOfStable = NULL;
+ threadInfo *infosOfStable = NULL;
+
+ //==== create threads for query for specified table
+ if (g_queryInfo.specifiedQueryInfo.sqlCount <= 0) {
+ debugPrint("%s() LN%d, specified query sqlCount %d.\n",
+ __func__, __LINE__,
+ g_queryInfo.specifiedQueryInfo.sqlCount);
+ } else {
+ if (g_queryInfo.specifiedQueryInfo.concurrent <= 0) {
+ errorPrint2("%s() LN%d, specified query sqlCount %d.\n",
+ __func__, __LINE__,
+ g_queryInfo.specifiedQueryInfo.sqlCount);
+ exit(EXIT_FAILURE);
+ }
+
+ pids = calloc(
+ 1,
+ g_queryInfo.specifiedQueryInfo.sqlCount *
+ g_queryInfo.specifiedQueryInfo.concurrent *
+ sizeof(pthread_t));
+ infos = calloc(
+ 1,
+ g_queryInfo.specifiedQueryInfo.sqlCount *
+ g_queryInfo.specifiedQueryInfo.concurrent *
+ sizeof(threadInfo));
+ if ((NULL == pids) || (NULL == infos)) {
+ errorPrint2("%s() LN%d, malloc failed for create threads\n", __func__, __LINE__);
+ exit(EXIT_FAILURE);
+ }
+
+ for (int i = 0; i < g_queryInfo.specifiedQueryInfo.sqlCount; i++) {
+ for (int j = 0; j < g_queryInfo.specifiedQueryInfo.concurrent; j++) {
+ uint64_t seq = i * g_queryInfo.specifiedQueryInfo.concurrent + j;
+ threadInfo *pThreadInfo = infos + seq;
+ pThreadInfo->threadID = seq;
+ pThreadInfo->querySeq = i;
+ pThreadInfo->taos = NULL; // workaround to use separate taos connection;
+ pthread_create(pids + seq, NULL, specifiedSubscribe, pThreadInfo);
+ }
+ }
+ }
+
+ //==== create threads for super table query
+ if (g_queryInfo.superQueryInfo.sqlCount <= 0) {
+ debugPrint("%s() LN%d, super table query sqlCount %d.\n",
+ __func__, __LINE__,
+ g_queryInfo.superQueryInfo.sqlCount);
+ } else {
+ if ((g_queryInfo.superQueryInfo.sqlCount > 0)
+ && (g_queryInfo.superQueryInfo.threadCnt > 0)) {
+ pidsOfStable = calloc(
+ 1,
+ g_queryInfo.superQueryInfo.sqlCount *
+ g_queryInfo.superQueryInfo.threadCnt *
+ sizeof(pthread_t));
+ infosOfStable = calloc(
+ 1,
+ g_queryInfo.superQueryInfo.sqlCount *
+ g_queryInfo.superQueryInfo.threadCnt *
+ sizeof(threadInfo));
+ if ((NULL == pidsOfStable) || (NULL == infosOfStable)) {
+ errorPrint2("%s() LN%d, malloc failed for create threads\n",
+ __func__, __LINE__);
+ // taos_close(taos);
+ exit(EXIT_FAILURE);
+ }
+
+ int64_t ntables = g_queryInfo.superQueryInfo.childTblCount;
+ int threads = g_queryInfo.superQueryInfo.threadCnt;
+
+ int64_t a = ntables / threads;
+ if (a < 1) {
+ threads = ntables;
+ a = 1;
+ }
+
+ int64_t b = 0;
+ if (threads != 0) {
+ b = ntables % threads;
+ }
+
+ for (uint64_t i = 0; i < g_queryInfo.superQueryInfo.sqlCount; i++) {
+ uint64_t tableFrom = 0;
+ for (int j = 0; j < threads; j++) {
+ uint64_t seq = i * threads + j;
+ threadInfo *pThreadInfo = infosOfStable + seq;
+ pThreadInfo->threadID = seq;
+ pThreadInfo->querySeq = i;
+
+ pThreadInfo->start_table_from = tableFrom;
+ pThreadInfo->ntables = jend_table_to = jend_table_to + 1;
+ pThreadInfo->taos = NULL; // workaround to use separate taos connection;
+ pthread_create(pidsOfStable + seq,
+ NULL, superSubscribe, pThreadInfo);
+ }
+ }
+
+ g_queryInfo.superQueryInfo.threadCnt = threads;
+
+ for (int i = 0; i < g_queryInfo.superQueryInfo.sqlCount; i++) {
+ for (int j = 0; j < threads; j++) {
+ uint64_t seq = i * threads + j;
+ pthread_join(pidsOfStable[seq], NULL);
+ }
+ }
+ }
+ }
+
+ for (int i = 0; i < g_queryInfo.specifiedQueryInfo.sqlCount; i++) {
+ for (int j = 0; j < g_queryInfo.specifiedQueryInfo.concurrent; j++) {
+ uint64_t seq = i * g_queryInfo.specifiedQueryInfo.concurrent + j;
+ pthread_join(pids[seq], NULL);
+ }
+ }
+
+ tmfree((char*)pids);
+ tmfree((char*)infos);
+
+ tmfree((char*)pidsOfStable);
+ tmfree((char*)infosOfStable);
+ // taos_close(taos);
+ return 0;
+}
+
+static void setParaFromArg() {
+ char type[20];
+ char length[20];
+ if (g_args.host) {
+ tstrncpy(g_Dbs.host, g_args.host, MAX_HOSTNAME_SIZE);
+ } else {
+ tstrncpy(g_Dbs.host, "127.0.0.1", MAX_HOSTNAME_SIZE);
+ }
+
+ if (g_args.user) {
+ tstrncpy(g_Dbs.user, g_args.user, MAX_USERNAME_SIZE);
+ }
+
+ tstrncpy(g_Dbs.password, g_args.password, SHELL_MAX_PASSWORD_LEN);
+
+ if (g_args.port) {
+ g_Dbs.port = g_args.port;
+ }
+
+ g_Dbs.threadCount = g_args.nthreads;
+ g_Dbs.threadCountForCreateTbl = g_args.nthreads;
+
+ g_Dbs.dbCount = 1;
+ g_Dbs.db[0].drop = true;
+
+ tstrncpy(g_Dbs.db[0].dbName, g_args.database, TSDB_DB_NAME_LEN);
+ g_Dbs.db[0].dbCfg.replica = g_args.replica;
+ tstrncpy(g_Dbs.db[0].dbCfg.precision, "ms", SMALL_BUFF_LEN);
+
+ tstrncpy(g_Dbs.resultFile, g_args.output_file, MAX_FILE_NAME_LEN);
+
+ g_Dbs.use_metric = g_args.use_metric;
+ g_args.prepared_rand = min(g_args.insertRows, MAX_PREPARED_RAND);
+ g_Dbs.aggr_func = g_args.aggr_func;
+
+ char dataString[TSDB_MAX_BYTES_PER_ROW];
+ char *data_type = g_args.data_type;
+ char **dataType = g_args.dataType;
+
+ memset(dataString, 0, TSDB_MAX_BYTES_PER_ROW);
+
+ if ((data_type[0] == TSDB_DATA_TYPE_BINARY)
+ || (data_type[0] == TSDB_DATA_TYPE_BOOL)
+ || (data_type[0] == TSDB_DATA_TYPE_NCHAR)) {
+ g_Dbs.aggr_func = false;
+ }
+
+ if (g_args.use_metric) {
+ g_Dbs.db[0].superTblCount = 1;
+ tstrncpy(g_Dbs.db[0].superTbls[0].stbName, "meters", TSDB_TABLE_NAME_LEN);
+ g_Dbs.db[0].superTbls[0].childTblCount = g_args.ntables;
+ g_Dbs.threadCount = g_args.nthreads;
+ g_Dbs.threadCountForCreateTbl = g_args.nthreads;
+ g_Dbs.asyncMode = g_args.async_mode;
+
+ g_Dbs.db[0].superTbls[0].autoCreateTable = PRE_CREATE_SUBTBL;
+ g_Dbs.db[0].superTbls[0].childTblExists = TBL_NO_EXISTS;
+ g_Dbs.db[0].superTbls[0].disorderRange = g_args.disorderRange;
+ g_Dbs.db[0].superTbls[0].disorderRatio = g_args.disorderRatio;
+ tstrncpy(g_Dbs.db[0].superTbls[0].childTblPrefix,
+ g_args.tb_prefix, TBNAME_PREFIX_LEN);
+ tstrncpy(g_Dbs.db[0].superTbls[0].dataSource, "rand", SMALL_BUFF_LEN);
+
+ if (g_args.iface == INTERFACE_BUT) {
+ g_Dbs.db[0].superTbls[0].iface = TAOSC_IFACE;
+ } else {
+ g_Dbs.db[0].superTbls[0].iface = g_args.iface;
+ }
+ tstrncpy(g_Dbs.db[0].superTbls[0].startTimestamp,
+ "2017-07-14 10:40:00.000", MAX_TB_NAME_SIZE);
+ g_Dbs.db[0].superTbls[0].timeStampStep = g_args.timestamp_step;
+
+ g_Dbs.db[0].superTbls[0].insertRows = g_args.insertRows;
+ g_Dbs.db[0].superTbls[0].maxSqlLen = g_args.max_sql_len;
+
+ g_Dbs.db[0].superTbls[0].columnCount = 0;
+ for (int i = 0; i < MAX_NUM_COLUMNS; i++) {
+ if (data_type[i] == TSDB_DATA_TYPE_NULL) {
+ break;
+ }
+
+ g_Dbs.db[0].superTbls[0].columns[i].data_type = data_type[i];
+ tstrncpy(g_Dbs.db[0].superTbls[0].columns[i].dataType,
+ dataType[i], min(DATATYPE_BUFF_LEN, strlen(dataType[i]) + 1));
+ if (1 == regexMatch(dataType[i], "^(NCHAR|BINARY)(\\([1-9][0-9]*\\))$", REG_ICASE |
+ REG_EXTENDED)) {
+ sscanf(dataType[i], "%[^(](%[^)]", type, length);
+ g_Dbs.db[0].superTbls[0].columns[i].dataLen = atoi(length);
+ tstrncpy(g_Dbs.db[0].superTbls[0].columns[i].dataType,
+ type, min(DATATYPE_BUFF_LEN, strlen(type) + 1));
+ } else {
+ g_Dbs.db[0].superTbls[0].columns[i].dataLen = g_args.binwidth;
+ tstrncpy(g_Dbs.db[0].superTbls[0].columns[i].dataType,
+ dataType[i], min(DATATYPE_BUFF_LEN, strlen(dataType[i]) + 1));
+ }
+ g_Dbs.db[0].superTbls[0].columnCount++;
+ }
+
+ if (g_Dbs.db[0].superTbls[0].columnCount > g_args.columnCount) {
+ g_Dbs.db[0].superTbls[0].columnCount = g_args.columnCount;
+ } else {
+ for (int i = g_Dbs.db[0].superTbls[0].columnCount;
+ i < g_args.columnCount; i++) {
+ g_Dbs.db[0].superTbls[0].columns[i].data_type = TSDB_DATA_TYPE_INT;
+ tstrncpy(g_Dbs.db[0].superTbls[0].columns[i].dataType,
+ "INT", min(DATATYPE_BUFF_LEN, strlen("INT") + 1));
+ g_Dbs.db[0].superTbls[0].columns[i].dataLen = 0;
+ g_Dbs.db[0].superTbls[0].columnCount++;
+ }
+ }
+
+ tstrncpy(g_Dbs.db[0].superTbls[0].tags[0].dataType,
+ "INT", min(DATATYPE_BUFF_LEN, strlen("INT") + 1));
+ g_Dbs.db[0].superTbls[0].tags[0].dataLen = 0;
+
+ tstrncpy(g_Dbs.db[0].superTbls[0].tags[1].dataType,
+ "BINARY", min(DATATYPE_BUFF_LEN, strlen("BINARY") + 1));
+ g_Dbs.db[0].superTbls[0].tags[1].dataLen = g_args.binwidth;
+ g_Dbs.db[0].superTbls[0].tagCount = 2;
+ } else {
+ g_Dbs.threadCountForCreateTbl = g_args.nthreads;
+ g_Dbs.db[0].superTbls[0].tagCount = 0;
+ }
+}
+
+/* Function to do regular expression check */
+static int regexMatch(const char *s, const char *reg, int cflags) {
+ regex_t regex;
+ char msgbuf[100] = {0};
+
+ /* Compile regular expression */
+ if (regcomp(®ex, reg, cflags) != 0) {
+ ERROR_EXIT("Fail to compile regex\n");
+ }
+
+ /* Execute regular expression */
+ int reti = regexec(®ex, s, 0, NULL, 0);
+ if (!reti) {
+ regfree(®ex);
+ return 1;
+ } else if (reti == REG_NOMATCH) {
+ regfree(®ex);
+ return 0;
+ } else {
+ regerror(reti, ®ex, msgbuf, sizeof(msgbuf));
+ regfree(®ex);
+ printf("Regex match failed: %s\n", msgbuf);
+ exit(EXIT_FAILURE);
+ }
+ return 0;
+}
+
+static int isCommentLine(char *line) {
+ if (line == NULL) return 1;
+
+ return regexMatch(line, "^\\s*#.*", REG_EXTENDED);
+}
+
+static void querySqlFile(TAOS* taos, char* sqlFile)
+{
+ FILE *fp = fopen(sqlFile, "r");
+ if (fp == NULL) {
+ printf("failed to open file %s, reason:%s\n", sqlFile, strerror(errno));
+ return;
+ }
+
+ int read_len = 0;
+ char * cmd = calloc(1, TSDB_MAX_BYTES_PER_ROW);
+ size_t cmd_len = 0;
+ char * line = NULL;
+ size_t line_len = 0;
+
+ double t = taosGetTimestampMs();
+
+ while((read_len = tgetline(&line, &line_len, fp)) != -1) {
+ if (read_len >= TSDB_MAX_BYTES_PER_ROW) continue;
+ line[--read_len] = '\0';
+
+ if (read_len == 0 || isCommentLine(line)) { // line starts with #
+ continue;
+ }
+
+ if (line[read_len - 1] == '\\') {
+ line[read_len - 1] = ' ';
+ memcpy(cmd + cmd_len, line, read_len);
+ cmd_len += read_len;
+ continue;
+ }
+
+ memcpy(cmd + cmd_len, line, read_len);
+ if (0 != queryDbExec(taos, cmd, NO_INSERT_TYPE, false)) {
+ errorPrint2("%s() LN%d, queryDbExec %s failed!\n",
+ __func__, __LINE__, cmd);
+ tmfree(cmd);
+ tmfree(line);
+ tmfclose(fp);
+ return;
+ }
+ memset(cmd, 0, TSDB_MAX_BYTES_PER_ROW);
+ cmd_len = 0;
+ }
+
+ t = taosGetTimestampMs() - t;
+ printf("run %s took %.6f second(s)\n\n", sqlFile, t);
+
+ tmfree(cmd);
+ tmfree(line);
+ tmfclose(fp);
+ return;
+}
+
+static void testMetaFile() {
+ if (INSERT_TEST == g_args.test_mode) {
+ if (g_Dbs.cfgDir[0])
+ taos_options(TSDB_OPTION_CONFIGDIR, g_Dbs.cfgDir);
+
+ insertTestProcess();
+
+ } else if (QUERY_TEST == g_args.test_mode) {
+ if (g_queryInfo.cfgDir[0])
+ taos_options(TSDB_OPTION_CONFIGDIR, g_queryInfo.cfgDir);
+
+ queryTestProcess();
+
+ } else if (SUBSCRIBE_TEST == g_args.test_mode) {
+ if (g_queryInfo.cfgDir[0])
+ taos_options(TSDB_OPTION_CONFIGDIR, g_queryInfo.cfgDir);
+
+ subscribeTestProcess();
+
+ } else {
+ ;
+ }
+}
+
+static void queryAggrFunc() {
+ // query data
+
+ pthread_t read_id;
+ threadInfo *pThreadInfo = calloc(1, sizeof(threadInfo));
+ assert(pThreadInfo);
+ pThreadInfo->start_time = DEFAULT_START_TIME; // 2017-07-14 10:40:00.000
+ pThreadInfo->start_table_from = 0;
+
+ if (g_args.use_metric) {
+ pThreadInfo->ntables = g_Dbs.db[0].superTbls[0].childTblCount;
+ pThreadInfo->end_table_to = g_Dbs.db[0].superTbls[0].childTblCount - 1;
+ pThreadInfo->stbInfo = &g_Dbs.db[0].superTbls[0];
+ tstrncpy(pThreadInfo->tb_prefix,
+ g_Dbs.db[0].superTbls[0].childTblPrefix, TBNAME_PREFIX_LEN);
+ } else {
+ pThreadInfo->ntables = g_args.ntables;
+ pThreadInfo->end_table_to = g_args.ntables -1;
+ tstrncpy(pThreadInfo->tb_prefix, g_args.tb_prefix, TSDB_TABLE_NAME_LEN);
+ }
+
+ pThreadInfo->taos = taos_connect(
+ g_Dbs.host,
+ g_Dbs.user,
+ g_Dbs.password,
+ g_Dbs.db[0].dbName,
+ g_Dbs.port);
+ if (pThreadInfo->taos == NULL) {
+ free(pThreadInfo);
+ errorPrint2("Failed to connect to TDengine, reason:%s\n",
+ taos_errstr(NULL));
+ exit(EXIT_FAILURE);
+ }
+
+ tstrncpy(pThreadInfo->filePath, g_Dbs.resultFile, MAX_FILE_NAME_LEN);
+
+ if (!g_Dbs.use_metric) {
+ pthread_create(&read_id, NULL, queryNtableAggrFunc, pThreadInfo);
+ } else {
+ pthread_create(&read_id, NULL, queryStableAggrFunc, pThreadInfo);
+ }
+ pthread_join(read_id, NULL);
+ taos_close(pThreadInfo->taos);
+ free(pThreadInfo);
+}
+
+static void testCmdLine() {
+
+ if (strlen(configDir)) {
+ wordexp_t full_path;
+ if (wordexp(configDir, &full_path, 0) != 0) {
+ errorPrint("Invalid path %s\n", configDir);
+ return;
+ }
+ taos_options(TSDB_OPTION_CONFIGDIR, full_path.we_wordv[0]);
+ wordfree(&full_path);
+ }
+
+ g_args.test_mode = INSERT_TEST;
+ insertTestProcess();
+
+ if (g_Dbs.aggr_func) {
+ queryAggrFunc();
+ }
+}
+
+int main(int argc, char *argv[]) {
+ parse_args(argc, argv, &g_args);
+
+ debugPrint("meta file: %s\n", g_args.metaFile);
+
+ if (g_args.metaFile) {
+ g_totalChildTables = 0;
+
+ if (false == getInfoFromJsonFile(g_args.metaFile)) {
+ printf("Failed to read %s\n", g_args.metaFile);
+ return 1;
+ }
+
+ testMetaFile();
+ } else {
+ memset(&g_Dbs, 0, sizeof(SDbs));
+ g_Dbs.db = calloc(1, sizeof(SDataBase));
+ assert(g_Dbs.db);
+ g_Dbs.db[0].superTbls = calloc(1, sizeof(SSuperTable));
+ assert(g_Dbs.db[0].superTbls);
+ setParaFromArg();
+
+ if (NULL != g_args.sqlFile) {
+ TAOS* qtaos = taos_connect(
+ g_Dbs.host,
+ g_Dbs.user,
+ g_Dbs.password,
+ g_Dbs.db[0].dbName,
+ g_Dbs.port);
+ querySqlFile(qtaos, g_args.sqlFile);
+ taos_close(qtaos);
+
+ } else {
+ testCmdLine();
+ }
+
+ if (g_dupstr)
+ free(g_dupstr);
+ }
+ postFreeResource();
+
+ return 0;
+}
diff --git a/src/kit/taosdump/CMakeLists.txt b/src/kit/taosdump/CMakeLists.txt
deleted file mode 100644
index 1daff0c75956072e02f8439acac2850b9315235a..0000000000000000000000000000000000000000
--- a/src/kit/taosdump/CMakeLists.txt
+++ /dev/null
@@ -1,92 +0,0 @@
-CMAKE_MINIMUM_REQUIRED(VERSION 3.0...3.20)
-PROJECT(TDengine)
-
-INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/client/inc)
-INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/query/inc)
-INCLUDE_DIRECTORIES(${CMAKE_BINARY_DIR}/build/include)
-INCLUDE_DIRECTORIES(inc)
-AUX_SOURCE_DIRECTORY(. SRC)
-
-FIND_PACKAGE(Git)
-IF(GIT_FOUND)
- EXECUTE_PROCESS(
- COMMAND ${GIT_EXECUTABLE} log --pretty=oneline -n 1 ${CMAKE_CURRENT_LIST_DIR}/taosdump.c
- WORKING_DIRECTORY ${CMAKE_CURRENT_LIST_DIR}
- RESULT_VARIABLE RESULT
- OUTPUT_VARIABLE TAOSDUMP_COMMIT_SHA1
- )
- IF ("${TAOSDUMP_COMMIT_SHA1}" STREQUAL "")
- SET(TAOSDUMP_COMMIT_SHA1 "unknown")
- ELSE ()
- STRING(SUBSTRING "${TAOSDUMP_COMMIT_SHA1}" 0 7 TAOSDUMP_COMMIT_SHA1)
- STRING(STRIP "${TAOSDUMP_COMMIT_SHA1}" TAOSDUMP_COMMIT_SHA1)
- ENDIF ()
- EXECUTE_PROCESS(
- COMMAND ${GIT_EXECUTABLE} status -z -s ${CMAKE_CURRENT_LIST_DIR}/taosdump.c
- RESULT_VARIABLE RESULT
- OUTPUT_VARIABLE TAOSDUMP_STATUS
- )
- IF (TD_LINUX)
- EXECUTE_PROCESS(
- COMMAND bash "-c" "echo '${TAOSDUMP_STATUS}' | awk '{print $1}'"
- RESULT_VARIABLE RESULT
- OUTPUT_VARIABLE TAOSDUMP_STATUS
- )
- ENDIF (TD_LINUX)
-ELSE()
- MESSAGE("Git not found")
- SET(TAOSDUMP_COMMIT_SHA1 "unknown")
- SET(TAOSDUMP_STATUS "unknown")
-ENDIF (GIT_FOUND)
-
-MESSAGE("taosdump's latest commit in short is:" ${TAOSDUMP_COMMIT_SHA1})
-STRING(STRIP "${TAOSDUMP_STATUS}" TAOSDUMP_STATUS)
-
-IF (TAOSDUMP_STATUS MATCHES "M")
- SET(TAOSDUMP_STATUS "modified")
-ELSE()
- SET(TAOSDUMP_STATUS "")
-ENDIF ()
-
-MESSAGE("taosdump's status is:" ${TAOSDUMP_STATUS})
-
-ADD_DEFINITIONS(-DTAOSDUMP_COMMIT_SHA1="${TAOSDUMP_COMMIT_SHA1}")
-ADD_DEFINITIONS(-DTAOSDUMP_STATUS="${TAOSDUMP_STATUS}")
-
-MESSAGE("TD_VER_NUMBER is:" ${TD_VER_NUMBER})
-IF ("${TD_VER_NUMBER}" STREQUAL "")
- SET(TD_VERSION_NUMBER "TDengine-version-unknown")
-ELSE()
- SET(TD_VERSION_NUMBER ${TD_VER_NUMBER})
-ENDIF ()
-MESSAGE("TD_VERSION_NUMBER is:" ${TD_VERSION_NUMBER})
-ADD_DEFINITIONS(-DTD_VERNUMBER="${TD_VERSION_NUMBER}")
-
-LINK_DIRECTORIES(${CMAKE_BINARY_DIR}/build/lib ${CMAKE_BINARY_DIR}/build/lib64)
-
-IF (TD_LINUX)
- ADD_EXECUTABLE(taosdump ${SRC})
- IF (TD_SOMODE_STATIC)
- IF (TD_AVRO_SUPPORT)
- TARGET_LINK_LIBRARIES(taosdump taos_static avro jansson)
- ELSE ()
- TARGET_LINK_LIBRARIES(taosdump taos_static)
- ENDIF()
- ELSE ()
- IF (TD_AVRO_SUPPORT)
- TARGET_LINK_LIBRARIES(taosdump taos avro jansson)
- ELSE ()
- TARGET_LINK_LIBRARIES(taosdump taos)
- ENDIF ()
- ENDIF ()
-ENDIF ()
-
-IF (TD_DARWIN)
- # missing for macosx
- # ADD_EXECUTABLE(taosdump ${SRC})
- # IF (TD_SOMODE_STATIC)
- # TARGET_LINK_LIBRARIES(taosdump taos_static jansson)
- # ELSE ()
- # TARGET_LINK_LIBRARIES(taosdump taos jansson)
- # ENDIF ()
-ENDIF ()
diff --git a/src/kit/taosdump/taosdump.c b/src/kit/taosdump/taosdump.c
deleted file mode 100644
index d552e6123fd6d3e496006a0cb79f662d5c139cc1..0000000000000000000000000000000000000000
--- a/src/kit/taosdump/taosdump.c
+++ /dev/null
@@ -1,4050 +0,0 @@
-/*
- * Copyright (c) 2019 TAOS Data, Inc.
- *
- * This program is free software: you can use, redistribute, and/or modify
- * it under the terms of the GNU Affero General Public License, version 3
- * or later ("AGPL"), as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.
- *
- * You should have received a copy of the GNU Affero General Public License
- * along with this program. If not, see .
- */
-
-#include
-#include
-#include
-#include
-#include
-
-#include "os.h"
-#include "taos.h"
-#include "taosdef.h"
-#include "taosmsg.h"
-#include "tglobal.h"
-#include "tsclient.h"
-#include "tsdb.h"
-#include "tutil.h"
-
-
-static char **g_tsDumpInSqlFiles = NULL;
-static char g_tsCharset[63] = {0};
-
-#ifdef AVRO_SUPPORT
-#include
-#include
-
-static char **g_tsDumpInAvroFiles = NULL;
-
-static void print_json_aux(json_t *element, int indent);
-
-#endif /* AVRO_SUPPORT */
-
-#define TSDB_SUPPORT_NANOSECOND 1
-
-#define MAX_FILE_NAME_LEN 256 // max file name length on linux is 255
-#define MAX_PATH_LEN 4096 // max path length on linux is 4095
-#define COMMAND_SIZE 65536
-#define MAX_RECORDS_PER_REQ 32766
-//#define DEFAULT_DUMP_FILE "taosdump.sql"
-
-// for strncpy buffer overflow
-#define min(a, b) (((a) < (b)) ? (a) : (b))
-
-static int converStringToReadable(char *str, int size, char *buf, int bufsize);
-static int convertNCharToReadable(char *str, int size, char *buf, int bufsize);
-
-typedef struct {
- short bytes;
- int8_t type;
-} SOColInfo;
-
-#define debugPrint(fmt, ...) \
- do { if (g_args.debug_print || g_args.verbose_print) \
- fprintf(stderr, "DEBG: "fmt, __VA_ARGS__); } while(0)
-
-#define verbosePrint(fmt, ...) \
- do { if (g_args.verbose_print) \
- fprintf(stderr, "VERB: "fmt, __VA_ARGS__); } while(0)
-
-#define performancePrint(fmt, ...) \
- do { if (g_args.performance_print) \
- fprintf(stderr, "PERF: "fmt, __VA_ARGS__); } while(0)
-
-#define warnPrint(fmt, ...) \
- do { fprintf(stderr, "\033[33m"); \
- fprintf(stderr, "WARN: "fmt, __VA_ARGS__); \
- fprintf(stderr, "\033[0m"); } while(0)
-
-#define errorPrint(fmt, ...) \
- do { fprintf(stderr, "\033[31m"); \
- fprintf(stderr, "ERROR: "fmt, __VA_ARGS__); \
- fprintf(stderr, "\033[0m"); } while(0)
-
-#define okPrint(fmt, ...) \
- do { fprintf(stderr, "\033[32m"); \
- fprintf(stderr, "OK: "fmt, __VA_ARGS__); \
- fprintf(stderr, "\033[0m"); } while(0)
-
-static bool isStringNumber(char *input)
-{
- int len = strlen(input);
- if (0 == len) {
- return false;
- }
-
- for (int i = 0; i < len; i++) {
- if (!isdigit(input[i]))
- return false;
- }
-
- return true;
-}
-
-// -------------------------- SHOW DATABASE INTERFACE-----------------------
-enum _show_db_index {
- TSDB_SHOW_DB_NAME_INDEX,
- TSDB_SHOW_DB_CREATED_TIME_INDEX,
- TSDB_SHOW_DB_NTABLES_INDEX,
- TSDB_SHOW_DB_VGROUPS_INDEX,
- TSDB_SHOW_DB_REPLICA_INDEX,
- TSDB_SHOW_DB_QUORUM_INDEX,
- TSDB_SHOW_DB_DAYS_INDEX,
- TSDB_SHOW_DB_KEEP_INDEX,
- TSDB_SHOW_DB_CACHE_INDEX,
- TSDB_SHOW_DB_BLOCKS_INDEX,
- TSDB_SHOW_DB_MINROWS_INDEX,
- TSDB_SHOW_DB_MAXROWS_INDEX,
- TSDB_SHOW_DB_WALLEVEL_INDEX,
- TSDB_SHOW_DB_FSYNC_INDEX,
- TSDB_SHOW_DB_COMP_INDEX,
- TSDB_SHOW_DB_CACHELAST_INDEX,
- TSDB_SHOW_DB_PRECISION_INDEX,
- TSDB_SHOW_DB_UPDATE_INDEX,
- TSDB_SHOW_DB_STATUS_INDEX,
- TSDB_MAX_SHOW_DB
-};
-
-// -----------------------------------------SHOW TABLES CONFIGURE -------------------------------------
-enum _show_tables_index {
- TSDB_SHOW_TABLES_NAME_INDEX,
- TSDB_SHOW_TABLES_CREATED_TIME_INDEX,
- TSDB_SHOW_TABLES_COLUMNS_INDEX,
- TSDB_SHOW_TABLES_METRIC_INDEX,
- TSDB_SHOW_TABLES_UID_INDEX,
- TSDB_SHOW_TABLES_TID_INDEX,
- TSDB_SHOW_TABLES_VGID_INDEX,
- TSDB_MAX_SHOW_TABLES
-};
-
-// ---------------------------------- DESCRIBE STABLE CONFIGURE ------------------------------
-enum _describe_table_index {
- TSDB_DESCRIBE_METRIC_FIELD_INDEX,
- TSDB_DESCRIBE_METRIC_TYPE_INDEX,
- TSDB_DESCRIBE_METRIC_LENGTH_INDEX,
- TSDB_DESCRIBE_METRIC_NOTE_INDEX,
- TSDB_MAX_DESCRIBE_METRIC
-};
-
-#define COL_NOTE_LEN 4
-#define COL_TYPEBUF_LEN 16
-#define COL_VALUEBUF_LEN 32
-
-typedef struct {
- char field[TSDB_COL_NAME_LEN];
- char type[COL_TYPEBUF_LEN];
- int length;
- char note[COL_NOTE_LEN];
- char value[COL_VALUEBUF_LEN];
- char *var_value;
-} ColDes;
-
-typedef struct {
- char name[TSDB_TABLE_NAME_LEN];
- ColDes cols[];
-} TableDef;
-
-extern char version[];
-
-#define DB_PRECISION_LEN 8
-#define DB_STATUS_LEN 16
-
-typedef struct {
- char name[TSDB_TABLE_NAME_LEN];
- bool belongStb;
- char stable[TSDB_TABLE_NAME_LEN];
-} TableInfo;
-
-typedef struct {
- char name[TSDB_TABLE_NAME_LEN];
- char stable[TSDB_TABLE_NAME_LEN];
-} TableRecord;
-
-typedef struct {
- bool isStb;
- bool belongStb;
- int64_t dumpNtbCount;
- TableRecord **dumpNtbInfos;
- TableRecord tableRecord;
-} TableRecordInfo;
-
-typedef struct {
- char name[TSDB_DB_NAME_LEN];
- char create_time[32];
- int64_t ntables;
- int32_t vgroups;
- int16_t replica;
- int16_t quorum;
- int16_t days;
- char keeplist[32];
- //int16_t daysToKeep;
- //int16_t daysToKeep1;
- //int16_t daysToKeep2;
- int32_t cache; //MB
- int32_t blocks;
- int32_t minrows;
- int32_t maxrows;
- int8_t wallevel;
- int32_t fsync;
- int8_t comp;
- int8_t cachelast;
- char precision[DB_PRECISION_LEN]; // time resolution
- int8_t update;
- char status[DB_STATUS_LEN];
- int64_t dumpTbCount;
- TableRecordInfo **dumpTbInfos;
-} SDbInfo;
-
-typedef struct {
- pthread_t threadID;
- int32_t threadIndex;
- char dbName[TSDB_DB_NAME_LEN];
- char stbName[TSDB_TABLE_NAME_LEN];
- int precision;
- TAOS *taos;
- int64_t rowsOfDumpOut;
- int64_t count;
- int64_t from;
-} threadInfo;
-
-typedef struct {
- int64_t totalRowsOfDumpOut;
- int64_t totalChildTblsOfDumpOut;
- int32_t totalSuperTblsOfDumpOut;
- int32_t totalDatabasesOfDumpOut;
-} resultStatistics;
-
-#ifdef AVRO_SUPPORT
-
-enum enAvro_Codec {
- AVRO_CODEC_START = 0,
- AVRO_CODEC_NULL = AVRO_CODEC_START,
- AVRO_CODEC_DEFLATE,
- AVRO_CODEC_SNAPPY,
- AVRO_CODEC_LZMA,
- AVRO_CODEC_UNKNOWN = 255
-};
-
-char *g_avro_codec[] = {
- "null",
- "deflate",
- "snappy",
- "lzma",
- "unknown"
-};
-
-/* avro sectin begin */
-#define RECORD_NAME_LEN 64
-#define FIELD_NAME_LEN 64
-#define TYPE_NAME_LEN 16
-
-typedef struct FieldStruct_S {
- char name[FIELD_NAME_LEN];
- char type[TYPE_NAME_LEN];
-} FieldStruct;
-
-typedef struct RecordSchema_S {
- char name[RECORD_NAME_LEN];
- char *fields;
- int num_fields;
-} RecordSchema;
-
-/* avro section end */
-#endif
-
-static int64_t g_totalDumpOutRows = 0;
-
-SDbInfo **g_dbInfos = NULL;
-TableInfo *g_tablesList = NULL;
-
-const char *argp_program_version = version;
-const char *argp_program_bug_address = "";
-
-/* Program documentation. */
-static char doc[] = "";
-/* "Argp example #4 -- a program with somewhat more complicated\ */
-/* options\ */
-/* \vThis part of the documentation comes *after* the options;\ */
-/* note that the text is automatically filled, but it's possible\ */
-/* to force a line-break, e.g.\n<-- here."; */
-
-/* A description of the arguments we accept. */
-static char args_doc[] = "dbname [tbname ...]\n--databases db1,db2,... \n--all-databases\n-i inpath\n-o outpath";
-
-/* Keys for options without short-options. */
-#define OPT_ABORT 1 /* –abort */
-
-/* The options we understand. */
-static struct argp_option options[] = {
- // connection option
- {"host", 'h', "HOST", 0, "Server host dumping data from. Default is localhost.", 0},
- {"user", 'u', "USER", 0, "User name used to connect to server. Default is root.", 0},
-#ifdef _TD_POWER_
- {"password", 'p', 0, 0, "User password to connect to server. Default is powerdb.", 0},
-#else
- {"password", 'p', 0, 0, "User password to connect to server. Default is taosdata.", 0},
-#endif
- {"port", 'P', "PORT", 0, "Port to connect", 0},
- {"mysqlFlag", 'q', "MYSQLFLAG", 0, "mysqlFlag, Default is 0", 0},
- // input/output file
- {"outpath", 'o', "OUTPATH", 0, "Output file path.", 1},
- {"inpath", 'i', "INPATH", 0, "Input file path.", 1},
- {"resultFile", 'r', "RESULTFILE", 0, "DumpOut/In Result file path and name.", 1},
-#ifdef _TD_POWER_
- {"config-dir", 'c', "CONFIG_DIR", 0, "Configure directory. Default is /etc/power/taos.cfg.", 1},
-#else
- {"config-dir", 'c', "CONFIG_DIR", 0, "Configure directory. Default is /etc/taos/taos.cfg.", 1},
-#endif
- {"encode", 'e', "ENCODE", 0, "Input file encoding.", 1},
- // dump unit options
- {"all-databases", 'A', 0, 0, "Dump all databases.", 2},
- {"databases", 'D', "DATABASES", 0, "Dump inputed databases. Use comma to seprate databases\' name.", 2},
- {"allow-sys", 'a', 0, 0, "Allow to dump sys database", 2},
- // dump format options
- {"schemaonly", 's', 0, 0, "Only dump schema.", 2},
- {"without-property", 'N', 0, 0, "Dump schema without properties.", 2},
-#ifdef AVRO_SUPPORT
- {"avro", 'v', 0, 0, "Dump apache avro format data file. By default, dump sql command sequence.", 3},
- {"avro-codec", 'd', "snappy", 0, "Choose an avro codec among null, deflate, snappy, and lzma.", 4},
-#endif
- {"start-time", 'S', "START_TIME", 0, "Start time to dump. Either epoch or ISO8601/RFC3339 format is acceptable. ISO8601 format example: 2017-10-01T00:00:00.000+0800 or 2017-10-0100:00:00:000+0800 or '2017-10-01 00:00:00.000+0800'", 8},
- {"end-time", 'E', "END_TIME", 0, "End time to dump. Either epoch or ISO8601/RFC3339 format is acceptable. ISO8601 format example: 2017-10-01T00:00:00.000+0800 or 2017-10-0100:00:00.000+0800 or '2017-10-01 00:00:00.000+0800'", 9},
- {"data-batch", 'B', "DATA_BATCH", 0, "Number of data point per insert statement. Max value is 32766. Default is 1.", 10},
- {"max-sql-len", 'L', "SQL_LEN", 0, "Max length of one sql. Default is 65480.", 10},
- {"table-batch", 't', "TABLE_BATCH", 0, "Number of table dumpout into one output file. Default is 1.", 10},
- {"thread_num", 'T', "THREAD_NUM", 0, "Number of thread for dump in file. Default is 5.", 10},
- {"debug", 'g', 0, 0, "Print debug info.", 15},
- {0}
-};
-
-#define HUMAN_TIME_LEN 28
-
-/* Used by main to communicate with parse_opt. */
-typedef struct arguments {
- // connection option
- char *host;
- char *user;
- char password[SHELL_MAX_PASSWORD_LEN];
- uint16_t port;
- uint16_t mysqlFlag;
- // output file
- char outpath[MAX_FILE_NAME_LEN];
- char inpath[MAX_FILE_NAME_LEN];
- // result file
- char *resultFile;
- char *encode;
- // dump unit option
- bool all_databases;
- bool databases;
- char *databasesSeq;
- // dump format option
- bool schemaonly;
- bool with_property;
-#ifdef AVRO_SUPPORT
- bool avro;
- int avro_codec;
-#endif
- int64_t start_time;
- char humanStartTime[HUMAN_TIME_LEN];
- int64_t end_time;
- char humanEndTime[HUMAN_TIME_LEN];
- char precision[8];
-
- int32_t data_batch;
- int32_t max_sql_len;
- int32_t table_batch; // num of table which will be dump into one output file.
- bool allow_sys;
- // other options
- int32_t thread_num;
- int abort;
- char **arg_list;
- int arg_list_len;
- bool isDumpIn;
- bool debug_print;
- bool verbose_print;
- bool performance_print;
-
- int dumpDbCount;
-} SArguments;
-
-/* Our argp parser. */
-static error_t parse_opt(int key, char *arg, struct argp_state *state);
-
-static struct argp argp = {options, parse_opt, args_doc, doc};
-static resultStatistics g_resultStatistics = {0};
-static FILE *g_fpOfResult = NULL;
-static int g_numOfCores = 1;
-
-struct arguments g_args = {
- // connection option
- NULL,
- "root",
-#ifdef _TD_POWER_
- "powerdb",
-#else
- "taosdata",
-#endif
- 0,
- 0,
- // outpath and inpath
- "",
- "",
- "./dump_result.txt",
- NULL,
- // dump unit option
- false, // all_databases
- false, // databases
- NULL, // databasesSeq
- // dump format option
- false, // schemaonly
- true, // with_property
-#ifdef AVRO_SUPPORT
- false, // avro
- AVRO_CODEC_SNAPPY, // avro_codec
-#endif
- -INT64_MAX + 1, // start_time
- {0}, // humanStartTime
- INT64_MAX, // end_time
- {0}, // humanEndTime
- "ms", // precision
- 1, // data_batch
- TSDB_MAX_SQL_LEN, // max_sql_len
- 1, // table_batch
- false, // allow_sys
- // other options
- 8, // thread_num
- 0, // abort
- NULL, // arg_list
- 0, // arg_list_len
- false, // isDumpIn
- false, // debug_print
- false, // verbose_print
- false, // performance_print
- 0, // dumpDbCount
-};
-
-// get taosdump commit number version
-#ifndef TAOSDUMP_COMMIT_SHA1
-#define TAOSDUMP_COMMIT_SHA1 "unknown"
-#endif
-
-#ifndef TD_VERNUMBER
-#define TD_VERNUMBER "unknown"
-#endif
-
-#ifndef TAOSDUMP_STATUS
-#define TAOSDUMP_STATUS "unknown"
-#endif
-
-static void printVersion() {
- char tdengine_ver[] = TD_VERNUMBER;
- char taosdump_ver[] = TAOSDUMP_COMMIT_SHA1;
- char taosdump_status[] = TAOSDUMP_STATUS;
-
- if (strlen(taosdump_status) == 0) {
- printf("taosdump version %s-%s\n",
- tdengine_ver, taosdump_ver);
- } else {
- printf("taosdump version %s-%s, status:%s\n",
- tdengine_ver, taosdump_ver, taosdump_status);
- }
-}
-
-void errorWrongValue(char *program, char *wrong_arg, char *wrong_value)
-{
- fprintf(stderr, "%s %s: %s is an invalid value\n", program, wrong_arg, wrong_value);
- fprintf(stderr, "Try `taosdump --help' or `taosdump --usage' for more information.\n");
-}
-
-static void errorUnrecognized(char *program, char *wrong_arg)
-{
- fprintf(stderr, "%s: unrecognized options '%s'\n", program, wrong_arg);
- fprintf(stderr, "Try `taosdump --help' or `taosdump --usage' for more information.\n");
-}
-
-static void errorPrintReqArg(char *program, char *wrong_arg)
-{
- fprintf(stderr,
- "%s: option requires an argument -- '%s'\n",
- program, wrong_arg);
- fprintf(stderr,
- "Try `taosdump --help' or `taosdump --usage' for more information.\n");
-}
-
-static void errorPrintReqArg2(char *program, char *wrong_arg)
-{
- fprintf(stderr,
- "%s: option requires a number argument '-%s'\n",
- program, wrong_arg);
- fprintf(stderr,
- "Try `taosdump --help' or `taosdump --usage' for more information.\n");
-}
-
-static void errorPrintReqArg3(char *program, char *wrong_arg)
-{
- fprintf(stderr,
- "%s: option '%s' requires an argument\n",
- program, wrong_arg);
- fprintf(stderr,
- "Try `taosdump --help' or `taosdump --usage' for more information.\n");
-}
-
-/* Parse a single option. */
-static error_t parse_opt(int key, char *arg, struct argp_state *state) {
- /* Get the input argument from argp_parse, which we
- know is a pointer to our arguments structure. */
- wordexp_t full_path;
-
- switch (key) {
- // connection option
- case 'a':
- g_args.allow_sys = true;
- break;
- case 'h':
- g_args.host = arg;
- break;
- case 'u':
- g_args.user = arg;
- break;
- case 'p':
- break;
- case 'P':
- if (!isStringNumber(arg)) {
- errorPrintReqArg2("taosdump", "P");
- exit(EXIT_FAILURE);
- }
-
- uint64_t port = atoi(arg);
- if (port > 65535) {
- errorWrongValue("taosdump", "-P or --port", arg);
- exit(EXIT_FAILURE);
- }
- g_args.port = (uint16_t)port;
-
- break;
- case 'q':
- g_args.mysqlFlag = atoi(arg);
- break;
- case 'o':
- if (wordexp(arg, &full_path, 0) != 0) {
- errorPrint("Invalid path %s\n", arg);
- return -1;
- }
-
- if (full_path.we_wordv[0]) {
- tstrncpy(g_args.outpath, full_path.we_wordv[0],
- MAX_FILE_NAME_LEN);
- wordfree(&full_path);
- } else {
- errorPrintReqArg3("taosdump", "-o or --outpath");
- exit(EXIT_FAILURE);
- }
- break;
-
- case 'g':
- g_args.debug_print = true;
- break;
-
- case 'i':
- g_args.isDumpIn = true;
- if (wordexp(arg, &full_path, 0) != 0) {
- errorPrint("Invalid path %s\n", arg);
- return -1;
- }
-
- if (full_path.we_wordv[0]) {
- tstrncpy(g_args.inpath, full_path.we_wordv[0],
- MAX_FILE_NAME_LEN);
- wordfree(&full_path);
- } else {
- errorPrintReqArg3("taosdump", "-i or --inpath");
- exit(EXIT_FAILURE);
- }
- break;
-
-#ifdef AVRO_SUPPORT
- case 'v':
- g_args.avro = true;
- break;
-
- case 'd':
- for (int i = AVRO_CODEC_START; i < AVRO_CODEC_UNKNOWN; i ++) {
- if (0 == strcmp(arg, g_avro_codec[i])) {
- g_args.avro_codec = i;
- break;
- }
- }
- break;
-#endif
-
- case 'r':
- g_args.resultFile = arg;
- break;
- case 'c':
- if (0 == strlen(arg)) {
- errorPrintReqArg3("taosdump", "-c or --config-dir");
- exit(EXIT_FAILURE);
- }
- if (wordexp(arg, &full_path, 0) != 0) {
- errorPrint("Invalid path %s\n", arg);
- exit(EXIT_FAILURE);
- }
- tstrncpy(configDir, full_path.we_wordv[0], MAX_FILE_NAME_LEN);
- wordfree(&full_path);
- break;
- case 'e':
- g_args.encode = arg;
- break;
- // dump unit option
- case 'A':
- break;
- case 'D':
- g_args.databases = true;
- break;
- // dump format option
- case 's':
- g_args.schemaonly = true;
- break;
- case 'N':
- g_args.with_property = false;
- break;
- case 'S':
- // parse time here.
- break;
- case 'E':
- break;
- case 'B':
- g_args.data_batch = atoi(arg);
- if (g_args.data_batch > MAX_RECORDS_PER_REQ) {
- g_args.data_batch = MAX_RECORDS_PER_REQ;
- }
- break;
- case 'L':
- {
- int32_t len = atoi(arg);
- if (len > TSDB_MAX_ALLOWED_SQL_LEN) {
- len = TSDB_MAX_ALLOWED_SQL_LEN;
- } else if (len < TSDB_MAX_SQL_LEN) {
- len = TSDB_MAX_SQL_LEN;
- }
- g_args.max_sql_len = len;
- break;
- }
- case 't':
- g_args.table_batch = atoi(arg);
- break;
- case 'T':
- if (!isStringNumber(arg)) {
- errorPrint("%s", "\n\t-T need a number following!\n");
- exit(EXIT_FAILURE);
- }
- g_args.thread_num = atoi(arg);
- break;
- case OPT_ABORT:
- g_args.abort = 1;
- break;
- case ARGP_KEY_ARG:
- if (strlen(state->argv[state->next - 1])) {
- g_args.arg_list = &state->argv[state->next - 1];
- g_args.arg_list_len = state->argc - state->next + 1;
- }
- state->next = state->argc;
- break;
-
- default:
- return ARGP_ERR_UNKNOWN;
- }
- return 0;
-}
-
-static void freeTbDes(TableDef *tableDes)
-{
- for (int i = 0; i < TSDB_MAX_COLUMNS; i ++) {
- if (tableDes->cols[i].var_value) {
- free(tableDes->cols[i].var_value);
- }
- }
-
- free(tableDes);
-}
-
-static int queryDbImpl(TAOS *taos, char *command) {
- TAOS_RES *res = NULL;
- int32_t code = -1;
-
- res = taos_query(taos, command);
- code = taos_errno(res);
-
- if (code != 0) {
- errorPrint("Failed to run <%s>, reason: %s\n",
- command, taos_errstr(res));
- taos_free_result(res);
- //taos_close(taos);
- return code;
- }
-
- taos_free_result(res);
- return 0;
-}
-
-static void parse_args(
- int argc, char *argv[], SArguments *arguments) {
-
- for (int i = 1; i < argc; i++) {
- if ((strncmp(argv[i], "-p", 2) == 0)
- || (strncmp(argv[i], "--password", 10) == 0)) {
- if ((strlen(argv[i]) == 2)
- || (strncmp(argv[i], "--password", 10) == 0)) {
- printf("Enter password: ");
- taosSetConsoleEcho(false);
- if(scanf("%20s", arguments->password) > 1) {
- errorPrint("%s() LN%d, password read error!\n", __func__, __LINE__);
- }
- taosSetConsoleEcho(true);
- } else {
- tstrncpy(arguments->password, (char *)(argv[i] + 2),
- SHELL_MAX_PASSWORD_LEN);
- strcpy(argv[i], "-p");
- }
- } else if (strcmp(argv[i], "-gg") == 0) {
- arguments->verbose_print = true;
- strcpy(argv[i], "");
- } else if (strcmp(argv[i], "-PP") == 0) {
- arguments->performance_print = true;
- strcpy(argv[i], "");
- } else if ((strcmp(argv[i], "-A") == 0)
- || (0 == strncmp(
- argv[i], "--all-database",
- strlen("--all-database")))) {
- g_args.all_databases = true;
- } else if ((strncmp(argv[i], "-D", strlen("-D")) == 0)
- || (0 == strncmp(
- argv[i], "--database",
- strlen("--database")))) {
- if (2 == strlen(argv[i])) {
- if (argc == i+1) {
- errorPrintReqArg(argv[0], "D");
- exit(EXIT_FAILURE);
- }
- arguments->databasesSeq = argv[++i];
- } else if (0 == strncmp(argv[i], "--databases=", strlen("--databases="))) {
- arguments->databasesSeq = (char *)(argv[i] + strlen("--databases="));
- } else if (0 == strncmp(argv[i], "-D", strlen("-D"))) {
- arguments->databasesSeq = (char *)(argv[i] + strlen("-D"));
- } else if (strlen("--databases") == strlen(argv[i])) {
- if (argc == i+1) {
- errorPrintReqArg3(argv[0], "--databases");
- exit(EXIT_FAILURE);
- }
- arguments->databasesSeq = argv[++i];
- } else {
- errorUnrecognized(argv[0], argv[i]);
- exit(EXIT_FAILURE);
- }
- g_args.databases = true;
- } else if (0 == strncmp(argv[i], "--version", strlen("--version")) ||
- 0 == strncmp(argv[i], "-V", strlen("-V"))) {
- printVersion();
- exit(EXIT_SUCCESS);
- } else {
- continue;
- }
-
- }
-}
-
-static void copyHumanTimeToArg(char *timeStr, bool isStartTime)
-{
- if (isStartTime)
- tstrncpy(g_args.humanStartTime, timeStr, HUMAN_TIME_LEN);
- else
- tstrncpy(g_args.humanEndTime, timeStr, HUMAN_TIME_LEN);
-}
-
-static void copyTimestampToArg(char *timeStr, bool isStartTime)
-{
- if (isStartTime)
- g_args.start_time = atol(timeStr);
- else
- g_args.end_time = atol(timeStr);
-}
-
-static void parse_timestamp(
- int argc, char *argv[], SArguments *arguments) {
- for (int i = 1; i < argc; i++) {
- char *tmp;
- bool isStartTime = false;
- bool isEndTime = false;
-
- if (strcmp(argv[i], "-S") == 0) {
- isStartTime = true;
- } else if (strcmp(argv[i], "-E") == 0) {
- isEndTime = true;
- }
-
- if (isStartTime || isEndTime) {
- if (NULL == argv[i+1]) {
- errorPrint("%s need a valid value following!\n", argv[i]);
- exit(-1);
- }
- tmp = strdup(argv[i+1]);
-
- if (strchr(tmp, ':') && strchr(tmp, '-')) {
- copyHumanTimeToArg(tmp, isStartTime);
- } else {
- copyTimestampToArg(tmp, isStartTime);
- }
-
- free(tmp);
- }
- }
-}
-
-static int getPrecisionByString(char *precision)
-{
- if (0 == strncasecmp(precision,
- "ms", 2)) {
- return TSDB_TIME_PRECISION_MILLI;
- } else if (0 == strncasecmp(precision,
- "us", 2)) {
- return TSDB_TIME_PRECISION_MICRO;
-#if TSDB_SUPPORT_NANOSECOND == 1
- } else if (0 == strncasecmp(precision,
- "ns", 2)) {
- return TSDB_TIME_PRECISION_NANO;
-#endif
- } else {
- errorPrint("Invalid time precision: %s",
- precision);
- }
-
- return -1;
-}
-
-static void freeDbInfos() {
- if (g_dbInfos == NULL) return;
- for (int i = 0; i < g_args.dumpDbCount; i++)
- tfree(g_dbInfos[i]);
- tfree(g_dbInfos);
-}
-
-// check table is normal table or super table
-static int getTableRecordInfo(
- char *dbName,
- char *table, TableRecordInfo *pTableRecordInfo) {
- TAOS *taos = taos_connect(g_args.host, g_args.user, g_args.password,
- dbName, g_args.port);
- if (taos == NULL) {
- errorPrint("Failed to connect to TDengine server %s\n", g_args.host);
- return -1;
- }
-
- TAOS_ROW row = NULL;
- bool isSet = false;
- TAOS_RES *result = NULL;
-
- memset(pTableRecordInfo, 0, sizeof(TableRecordInfo));
-
- char command[COMMAND_SIZE];
-
- sprintf(command, "USE %s", dbName);
- result = taos_query(taos, command);
- int32_t code = taos_errno(result);
- if (code != 0) {
- errorPrint("invalid database %s, reason: %s\n",
- dbName, taos_errstr(result));
- return 0;
- }
-
- sprintf(command, "SHOW TABLES LIKE \'%s\'", table);
-
- result = taos_query(taos, command);
- code = taos_errno(result);
-
- if (code != 0) {
- errorPrint("%s() LN%d, failed to run command <%s>. reason: %s\n",
- __func__, __LINE__, command, taos_errstr(result));
- taos_free_result(result);
- return -1;
- }
-
- TAOS_FIELD *fields = taos_fetch_fields(result);
-
- while ((row = taos_fetch_row(result)) != NULL) {
- isSet = true;
- pTableRecordInfo->isStb = false;
- tstrncpy(pTableRecordInfo->tableRecord.name,
- (char *)row[TSDB_SHOW_TABLES_NAME_INDEX],
- min(TSDB_TABLE_NAME_LEN,
- fields[TSDB_SHOW_TABLES_NAME_INDEX].bytes + 1));
- if (strlen((char *)row[TSDB_SHOW_TABLES_METRIC_INDEX]) > 0) {
- pTableRecordInfo->belongStb = true;
- tstrncpy(pTableRecordInfo->tableRecord.stable,
- (char *)row[TSDB_SHOW_TABLES_METRIC_INDEX],
- min(TSDB_TABLE_NAME_LEN,
- fields[TSDB_SHOW_TABLES_METRIC_INDEX].bytes + 1));
- } else {
- pTableRecordInfo->belongStb = false;
- }
- break;
- }
-
- taos_free_result(result);
- result = NULL;
-
- if (isSet) {
- return 0;
- }
-
- sprintf(command, "SHOW STABLES LIKE \'%s\'", table);
-
- result = taos_query(taos, command);
- code = taos_errno(result);
-
- if (code != 0) {
- errorPrint("%s() LN%d, failed to run command <%s>. reason: %s\n",
- __func__, __LINE__, command, taos_errstr(result));
- taos_free_result(result);
- return -1;
- }
-
- while ((row = taos_fetch_row(result)) != NULL) {
- isSet = true;
- pTableRecordInfo->isStb = true;
- tstrncpy(pTableRecordInfo->tableRecord.stable, table,
- TSDB_TABLE_NAME_LEN);
- break;
- }
-
- taos_free_result(result);
- result = NULL;
-
- if (isSet) {
- return 0;
- }
- errorPrint("%s() LN%d, invalid table/stable %s\n",
- __func__, __LINE__, table);
- return -1;
-}
-
-static int inDatabasesSeq(
- char *name,
- int len)
-{
- if (strstr(g_args.databasesSeq, ",") == NULL) {
- if (0 == strncmp(g_args.databasesSeq, name, len)) {
- return 0;
- }
- } else {
- char *dupSeq = strdup(g_args.databasesSeq);
- char *running = dupSeq;
- char *dbname = strsep(&running, ",");
- while (dbname) {
- if (0 == strncmp(dbname, name, len)) {
- tfree(dupSeq);
- return 0;
- }
-
- dbname = strsep(&running, ",");
- }
- }
-
- return -1;
-}
-
-static int getDumpDbCount()
-{
- int count = 0;
-
- TAOS *taos = NULL;
- TAOS_RES *result = NULL;
- char *command = "show databases";
- TAOS_ROW row;
-
- /* Connect to server */
- taos = taos_connect(g_args.host, g_args.user, g_args.password,
- NULL, g_args.port);
- if (NULL == taos) {
- errorPrint("Failed to connect to TDengine server %s\n", g_args.host);
- return 0;
- }
-
- result = taos_query(taos, command);
- int32_t code = taos_errno(result);
-
- if (0 != code) {
- errorPrint("%s() LN%d, failed to run command <%s>, reason: %s\n",
- __func__, __LINE__, command, taos_errstr(result));
- taos_close(taos);
- return 0;
- }
-
- TAOS_FIELD *fields = taos_fetch_fields(result);
-
- while ((row = taos_fetch_row(result)) != NULL) {
- // sys database name : 'log', but subsequent version changed to 'log'
- if ((strncasecmp(row[TSDB_SHOW_DB_NAME_INDEX], "log",
- fields[TSDB_SHOW_DB_NAME_INDEX].bytes) == 0)
- && (!g_args.allow_sys)) {
- continue;
- }
-
- if (g_args.databases) { // input multi dbs
- if (inDatabasesSeq(
- (char *)row[TSDB_SHOW_DB_NAME_INDEX],
- fields[TSDB_SHOW_DB_NAME_INDEX].bytes) != 0)
- continue;
- } else if (!g_args.all_databases) { // only input one db
- if (strncasecmp(g_args.arg_list[0],
- (char *)row[TSDB_SHOW_DB_NAME_INDEX],
- fields[TSDB_SHOW_DB_NAME_INDEX].bytes) != 0)
- continue;
- }
-
- count++;
- }
-
- if (count == 0) {
- errorPrint("%d databases valid to dump\n", count);
- }
-
- taos_close(taos);
- return count;
-}
-
-static void dumpCreateMTableClause(
- char* dbName,
- char *stable,
- TableDef *tableDes,
- int numOfCols,
- FILE *fp
- ) {
- int counter = 0;
- int count_temp = 0;
-
- char* tmpBuf = (char *)malloc(COMMAND_SIZE);
- if (tmpBuf == NULL) {
- errorPrint("%s() LN%d, failed to allocate %d memory\n",
- __func__, __LINE__, COMMAND_SIZE);
- return;
- }
-
- char *pstr = NULL;
- pstr = tmpBuf;
-
- pstr += sprintf(tmpBuf,
- "CREATE TABLE IF NOT EXISTS %s.%s USING %s.%s TAGS (",
- dbName, tableDes->name, dbName, stable);
-
- for (; counter < numOfCols; counter++) {
- if (tableDes->cols[counter].note[0] != '\0') break;
- }
-
- assert(counter < numOfCols);
- count_temp = counter;
-
- for (; counter < numOfCols; counter++) {
- if (counter != count_temp) {
- if (0 == strcasecmp(tableDes->cols[counter].type, "binary")
- || 0 == strcasecmp(tableDes->cols[counter].type, "nchar")) {
- //pstr += sprintf(pstr, ", \'%s\'", tableDes->cols[counter].note);
- if (tableDes->cols[counter].var_value) {
- pstr += sprintf(pstr, ", \'%s\'",
- tableDes->cols[counter].var_value);
- } else {
- pstr += sprintf(pstr, ", \'%s\'", tableDes->cols[counter].value);
- }
- } else {
- pstr += sprintf(pstr, ", \'%s\'", tableDes->cols[counter].value);
- }
- } else {
- if (0 == strcasecmp(tableDes->cols[counter].type, "binary")
- || 0 == strcasecmp(tableDes->cols[counter].type, "nchar")) {
- //pstr += sprintf(pstr, "\'%s\'", tableDes->cols[counter].note);
- if (tableDes->cols[counter].var_value) {
- pstr += sprintf(pstr, "\'%s\'", tableDes->cols[counter].var_value);
- } else {
- pstr += sprintf(pstr, "\'%s\'", tableDes->cols[counter].value);
- }
- } else {
- pstr += sprintf(pstr, "\'%s\'", tableDes->cols[counter].value);
- }
- /* pstr += sprintf(pstr, "%s", tableDes->cols[counter].note); */
- }
-
- /* if (strcasecmp(tableDes->cols[counter].type, "binary") == 0 || strcasecmp(tableDes->cols[counter].type, "nchar")
- * == 0) { */
- /* pstr += sprintf(pstr, "(%d)", tableDes->cols[counter].length); */
- /* } */
- }
-
- pstr += sprintf(pstr, ");");
-
- fprintf(fp, "%s\n", tmpBuf);
- free(tmpBuf);
-}
-
-static int64_t getNtbCountOfStb(char *dbName, char *stbName)
-{
- TAOS *taos = taos_connect(g_args.host, g_args.user, g_args.password,
- dbName, g_args.port);
- if (taos == NULL) {
- errorPrint("Failed to connect to TDengine server %s\n", g_args.host);
- return -1;
- }
-
- int64_t count = 0;
-
- char command[COMMAND_SIZE];
-
- sprintf(command, "SELECT COUNT(TBNAME) FROM %s.%s", dbName, stbName);
-
- TAOS_RES *res = taos_query(taos, command);
- int32_t code = taos_errno(res);
- if (code != 0) {
- errorPrint("%s() LN%d, failed to run command <%s>. reason: %s\n",
- __func__, __LINE__, command, taos_errstr(res));
- taos_free_result(res);
- taos_close(taos);
- return -1;
- }
-
- TAOS_ROW row = NULL;
-
- if ((row = taos_fetch_row(res)) != NULL) {
- count = *(int64_t*)row[TSDB_SHOW_TABLES_NAME_INDEX];
- }
-
- taos_close(taos);
- return count;
-}
-
-static int getTableDes(
- TAOS *taos,
- char* dbName, char *table,
- TableDef *tableDes, bool isSuperTable) {
- TAOS_ROW row = NULL;
- TAOS_RES* res = NULL;
- int colCount = 0;
-
- char sqlstr[COMMAND_SIZE];
- sprintf(sqlstr, "describe %s.%s;", dbName, table);
-
- res = taos_query(taos, sqlstr);
- int32_t code = taos_errno(res);
- if (code != 0) {
- errorPrint("%s() LN%d, failed to run command <%s>, reason: %s\n",
- __func__, __LINE__, sqlstr, taos_errstr(res));
- taos_free_result(res);
- return -1;
- }
-
- TAOS_FIELD *fields = taos_fetch_fields(res);
-
- tstrncpy(tableDes->name, table, TSDB_TABLE_NAME_LEN);
- while ((row = taos_fetch_row(res)) != NULL) {
- tstrncpy(tableDes->cols[colCount].field,
- (char *)row[TSDB_DESCRIBE_METRIC_FIELD_INDEX],
- min(TSDB_COL_NAME_LEN,
- fields[TSDB_DESCRIBE_METRIC_FIELD_INDEX].bytes + 1));
- tstrncpy(tableDes->cols[colCount].type,
- (char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX],
- min(16, fields[TSDB_DESCRIBE_METRIC_TYPE_INDEX].bytes + 1));
- tableDes->cols[colCount].length =
- *((int *)row[TSDB_DESCRIBE_METRIC_LENGTH_INDEX]);
- tstrncpy(tableDes->cols[colCount].note,
- (char *)row[TSDB_DESCRIBE_METRIC_NOTE_INDEX],
- min(COL_NOTE_LEN,
- fields[TSDB_DESCRIBE_METRIC_NOTE_INDEX].bytes + 1));
- colCount++;
- }
-
- taos_free_result(res);
- res = NULL;
-
- if (isSuperTable) {
- return colCount;
- }
-
- // if child-table have tag, using select tagName from table to get tagValue
- for (int i = 0 ; i < colCount; i++) {
- if (strcmp(tableDes->cols[i].note, "TAG") != 0) continue;
-
- sprintf(sqlstr, "select %s from %s.%s",
- tableDes->cols[i].field, dbName, table);
-
- res = taos_query(taos, sqlstr);
- code = taos_errno(res);
- if (code != 0) {
- errorPrint("%s() LN%d, failed to run command <%s>, reason: %s\n",
- __func__, __LINE__, sqlstr, taos_errstr(res));
- taos_free_result(res);
- taos_close(taos);
- return -1;
- }
-
- fields = taos_fetch_fields(res);
-
- row = taos_fetch_row(res);
- if (NULL == row) {
- errorPrint("%s() LN%d, fetch failed to run command <%s>, reason:%s\n",
- __func__, __LINE__, sqlstr, taos_errstr(res));
- taos_free_result(res);
- taos_close(taos);
- return -1;
- }
-
- if (row[TSDB_SHOW_TABLES_NAME_INDEX] == NULL) {
- sprintf(tableDes->cols[i].note, "%s", "NUL");
- sprintf(tableDes->cols[i].value, "%s", "NULL");
- taos_free_result(res);
- res = NULL;
- continue;
- }
-
- int32_t* length = taos_fetch_lengths(res);
-
- switch (fields[0].type) {
- case TSDB_DATA_TYPE_BOOL:
- sprintf(tableDes->cols[i].value, "%d",
- ((((int32_t)(*((char *)
- row[TSDB_SHOW_TABLES_NAME_INDEX])))==1)
- ?1:0));
- break;
- case TSDB_DATA_TYPE_TINYINT:
- sprintf(tableDes->cols[i].value, "%d",
- *((int8_t *)row[TSDB_SHOW_TABLES_NAME_INDEX]));
- break;
- case TSDB_DATA_TYPE_SMALLINT:
- sprintf(tableDes->cols[i].value, "%d",
- *((int16_t *)row[TSDB_SHOW_TABLES_NAME_INDEX]));
- break;
- case TSDB_DATA_TYPE_INT:
- sprintf(tableDes->cols[i].value, "%d",
- *((int32_t *)row[TSDB_SHOW_TABLES_NAME_INDEX]));
- break;
- case TSDB_DATA_TYPE_BIGINT:
- sprintf(tableDes->cols[i].value, "%" PRId64 "",
- *((int64_t *)row[TSDB_SHOW_TABLES_NAME_INDEX]));
- break;
- case TSDB_DATA_TYPE_FLOAT:
- sprintf(tableDes->cols[i].value, "%f",
- GET_FLOAT_VAL(row[TSDB_SHOW_TABLES_NAME_INDEX]));
- break;
- case TSDB_DATA_TYPE_DOUBLE:
- sprintf(tableDes->cols[i].value, "%f",
- GET_DOUBLE_VAL(row[TSDB_SHOW_TABLES_NAME_INDEX]));
- break;
- case TSDB_DATA_TYPE_BINARY:
- memset(tableDes->cols[i].value, 0,
- sizeof(tableDes->cols[i].value));
- int len = strlen((char *)row[TSDB_SHOW_TABLES_NAME_INDEX]);
- // FIXME for long value
- if (len < (COL_VALUEBUF_LEN - 2)) {
- converStringToReadable(
- (char *)row[TSDB_SHOW_TABLES_NAME_INDEX],
- length[0],
- tableDes->cols[i].value,
- len);
- } else {
- tableDes->cols[i].var_value = calloc(1, len * 2);
- if (tableDes->cols[i].var_value == NULL) {
- errorPrint("%s() LN%d, memory alalocation failed!\n",
- __func__, __LINE__);
- taos_free_result(res);
- return -1;
- }
- converStringToReadable((char *)row[0],
- length[0],
- (char *)(tableDes->cols[i].var_value), len);
- }
- break;
-
- case TSDB_DATA_TYPE_NCHAR:
- memset(tableDes->cols[i].value, 0,
- sizeof(tableDes->cols[i].note));
- int nlen = strlen((char *)row[TSDB_SHOW_TABLES_NAME_INDEX]);
- if (nlen < (COL_VALUEBUF_LEN-2)) {
- char tbuf[COL_VALUEBUF_LEN-2]; // need reserve 2 bytes for ' '
- convertNCharToReadable(
- (char *)row[TSDB_SHOW_TABLES_NAME_INDEX],
- length[0], tbuf, COL_VALUEBUF_LEN-2);
- sprintf(tableDes->cols[i].value, "%s", tbuf);
- } else {
- tableDes->cols[i].var_value = calloc(1, nlen * 4);
- if (tableDes->cols[i].var_value == NULL) {
- errorPrint("%s() LN%d, memory alalocation failed!\n",
- __func__, __LINE__);
- taos_free_result(res);
- return -1;
- }
- converStringToReadable(
- (char *)row[TSDB_SHOW_TABLES_NAME_INDEX],
- length[0],
- (char *)(tableDes->cols[i].var_value), nlen);
- }
- break;
- case TSDB_DATA_TYPE_TIMESTAMP:
- sprintf(tableDes->cols[i].value, "%" PRId64 "",
- *(int64_t *)row[TSDB_SHOW_TABLES_NAME_INDEX]);
-#if 0
- if (!g_args.mysqlFlag) {
- sprintf(tableDes->cols[i].value, "%" PRId64 "",
- *(int64_t *)row[TSDB_SHOW_TABLES_NAME_INDEX]);
- } else {
- char buf[64] = "\0";
- int64_t ts = *((int64_t *)row[TSDB_SHOW_TABLES_NAME_INDEX]);
- time_t tt = (time_t)(ts / 1000);
- struct tm *ptm = localtime(&tt);
- strftime(buf, 64, "%y-%m-%d %H:%M:%S", ptm);
- sprintf(tableDes->cols[i].value, "\'%s.%03d\'", buf,
- (int)(ts % 1000));
- }
-#endif
- break;
- default:
- break;
- }
-
- taos_free_result(res);
- }
-
- return colCount;
-}
-
-static int dumpCreateTableClause(TableDef *tableDes, int numOfCols,
- FILE *fp, char* dbName) {
- int counter = 0;
- int count_temp = 0;
- char sqlstr[COMMAND_SIZE];
-
- char* pstr = sqlstr;
-
- pstr += sprintf(sqlstr, "CREATE TABLE IF NOT EXISTS %s.%s",
- dbName, tableDes->name);
-
- for (; counter < numOfCols; counter++) {
- if (tableDes->cols[counter].note[0] != '\0') break;
-
- if (counter == 0) {
- pstr += sprintf(pstr, " (%s %s",
- tableDes->cols[counter].field, tableDes->cols[counter].type);
- } else {
- pstr += sprintf(pstr, ", %s %s",
- tableDes->cols[counter].field, tableDes->cols[counter].type);
- }
-
- if (0 == strcasecmp(tableDes->cols[counter].type, "binary")
- || 0 == strcasecmp(tableDes->cols[counter].type, "nchar")) {
- pstr += sprintf(pstr, "(%d)", tableDes->cols[counter].length);
- }
- }
-
- count_temp = counter;
-
- for (; counter < numOfCols; counter++) {
- if (counter == count_temp) {
- pstr += sprintf(pstr, ") TAGS (%s %s",
- tableDes->cols[counter].field, tableDes->cols[counter].type);
- } else {
- pstr += sprintf(pstr, ", %s %s",
- tableDes->cols[counter].field, tableDes->cols[counter].type);
- }
-
- if (0 == strcasecmp(tableDes->cols[counter].type, "binary")
- || 0 == strcasecmp(tableDes->cols[counter].type, "nchar")) {
- pstr += sprintf(pstr, "(%d)", tableDes->cols[counter].length);
- }
- }
-
- pstr += sprintf(pstr, ");");
-
- debugPrint("%s() LN%d, write string: %s\n", __func__, __LINE__, sqlstr);
- return fprintf(fp, "%s\n\n", sqlstr);
-}
-
-static int dumpStableClasuse(TAOS *taos, SDbInfo *dbInfo, char *stbName, FILE *fp)
-{
- uint64_t sizeOfTableDes =
- (uint64_t)(sizeof(TableDef) + sizeof(ColDes) * TSDB_MAX_COLUMNS);
-
- TableDef *tableDes = (TableDef *)calloc(1, sizeOfTableDes);
- if (NULL == tableDes) {
- errorPrint("%s() LN%d, failed to allocate %"PRIu64" memory\n",
- __func__, __LINE__, sizeOfTableDes);
- exit(-1);
- }
-
- int colCount = getTableDes(taos, dbInfo->name,
- stbName, tableDes, true);
-
- if (colCount < 0) {
- free(tableDes);
- errorPrint("%s() LN%d, failed to get stable[%s] schema\n",
- __func__, __LINE__, stbName);
- exit(-1);
- }
-
- dumpCreateTableClause(tableDes, colCount, fp, dbInfo->name);
- free(tableDes);
-
- return 0;
-}
-
-static int64_t dumpCreateSTableClauseOfDb(
- SDbInfo *dbInfo, FILE *fp)
-{
- TAOS *taos = taos_connect(g_args.host,
- g_args.user, g_args.password, dbInfo->name, g_args.port);
- if (NULL == taos) {
- errorPrint(
- "Failed to connect to TDengine server %s by specified database %s\n",
- g_args.host, dbInfo->name);
- return 0;
- }
-
- TAOS_ROW row;
- char command[COMMAND_SIZE] = {0};
-
- sprintf(command, "SHOW %s.STABLES", dbInfo->name);
-
- TAOS_RES* res = taos_query(taos, command);
- int32_t code = taos_errno(res);
- if (code != 0) {
- errorPrint("%s() LN%d, failed to run command <%s>, reason: %s\n",
- __func__, __LINE__, command, taos_errstr(res));
- taos_free_result(res);
- taos_close(taos);
- exit(-1);
- }
-
- int64_t superTblCnt = 0;
- while ((row = taos_fetch_row(res)) != NULL) {
- if (0 == dumpStableClasuse(taos, dbInfo,
- row[TSDB_SHOW_TABLES_NAME_INDEX], fp)) {
- superTblCnt ++;
- }
- }
-
- taos_free_result(res);
-
- fprintf(g_fpOfResult,
- "# super table counter: %"PRId64"\n",
- superTblCnt);
- g_resultStatistics.totalSuperTblsOfDumpOut += superTblCnt;
-
- taos_close(taos);
-
- return superTblCnt;
-}
-
-static void dumpCreateDbClause(
- SDbInfo *dbInfo, bool isDumpProperty, FILE *fp) {
- char sqlstr[TSDB_MAX_SQL_LEN] = {0};
-
- char *pstr = sqlstr;
- pstr += sprintf(pstr, "CREATE DATABASE IF NOT EXISTS %s ", dbInfo->name);
- if (isDumpProperty) {
- pstr += sprintf(pstr,
- "REPLICA %d QUORUM %d DAYS %d KEEP %s CACHE %d BLOCKS %d MINROWS %d MAXROWS %d FSYNC %d CACHELAST %d COMP %d PRECISION '%s' UPDATE %d",
- dbInfo->replica, dbInfo->quorum, dbInfo->days,
- dbInfo->keeplist,
- dbInfo->cache,
- dbInfo->blocks, dbInfo->minrows, dbInfo->maxrows,
- dbInfo->fsync,
- dbInfo->cachelast,
- dbInfo->comp, dbInfo->precision, dbInfo->update);
- }
-
- pstr += sprintf(pstr, ";");
- fprintf(fp, "%s\n\n", sqlstr);
-}
-
-static FILE* openDumpInFile(char *fptr) {
- wordexp_t full_path;
-
- if (wordexp(fptr, &full_path, 0) != 0) {
- errorPrint("illegal file name: %s\n", fptr);
- return NULL;
- }
-
- char *fname = full_path.we_wordv[0];
-
- FILE *f = NULL;
- if ((fname) && (strlen(fname) > 0)) {
- f = fopen(fname, "r");
- if (f == NULL) {
- errorPrint("%s() LN%d, failed to open file %s\n",
- __func__, __LINE__, fname);
- }
- }
-
- wordfree(&full_path);
- return f;
-}
-
-static uint64_t getFilesNum(char *ext)
-{
- uint64_t count = 0;
-
- int namelen, extlen;
- struct dirent *pDirent;
- DIR *pDir;
-
- extlen = strlen(ext);
-
- bool isSql = (0 == strcmp(ext, "sql"));
-
- pDir = opendir(g_args.inpath);
- if (pDir != NULL) {
- while ((pDirent = readdir(pDir)) != NULL) {
- namelen = strlen (pDirent->d_name);
-
- if (namelen > extlen) {
- if (strcmp (ext, &(pDirent->d_name[namelen - extlen])) == 0) {
- if (isSql) {
- if (0 == strcmp(pDirent->d_name, "dbs.sql")) {
- continue;
- }
- }
- verbosePrint("%s found\n", pDirent->d_name);
- count ++;
- }
- }
- }
- closedir (pDir);
- }
-
- debugPrint("%"PRId64" .%s files found!\n", count, ext);
- return count;
-}
-
-static void freeFileList(char **fileList, int64_t count)
-{
- for (int64_t i = 0; i < count; i++) {
- tfree(fileList[i]);
- }
- tfree(fileList);
-}
-
-static void createDumpinList(char *ext, int64_t count)
-{
- bool isSql = (0 == strcmp(ext, "sql"));
-
- if (isSql) {
- g_tsDumpInSqlFiles = (char **)calloc(count, sizeof(char *));
- assert(g_tsDumpInSqlFiles);
-
- for (int64_t i = 0; i < count; i++) {
- g_tsDumpInSqlFiles[i] = calloc(1, MAX_FILE_NAME_LEN);
- assert(g_tsDumpInSqlFiles[i]);
- }
- }
-#ifdef AVRO_SUPPORT
- else {
- g_tsDumpInAvroFiles = (char **)calloc(count, sizeof(char *));
- assert(g_tsDumpInAvroFiles);
-
- for (int64_t i = 0; i < count; i++) {
- g_tsDumpInAvroFiles[i] = calloc(1, MAX_FILE_NAME_LEN);
- assert(g_tsDumpInAvroFiles[i]);
- }
-
- }
-#endif
-
- int namelen, extlen;
- struct dirent *pDirent;
- DIR *pDir;
-
- extlen = strlen(ext);
-
- count = 0;
- pDir = opendir(g_args.inpath);
- if (pDir != NULL) {
- while ((pDirent = readdir(pDir)) != NULL) {
- namelen = strlen (pDirent->d_name);
-
- if (namelen > extlen) {
- if (strcmp (ext, &(pDirent->d_name[namelen - extlen])) == 0) {
- verbosePrint("%s found\n", pDirent->d_name);
- if (isSql) {
- if (0 == strcmp(pDirent->d_name, "dbs.sql")) {
- continue;
- }
- strncpy(g_tsDumpInSqlFiles[count++], pDirent->d_name, MAX_FILE_NAME_LEN);
- }
-#ifdef AVRO_SUPPORT
- else {
- strncpy(g_tsDumpInAvroFiles[count++], pDirent->d_name, MAX_FILE_NAME_LEN);
- }
-#endif
- }
- }
- }
- closedir (pDir);
- }
-
- debugPrint("%"PRId64" .%s files filled to list!\n", count, ext);
-}
-
-#ifdef AVRO_SUPPORT
-
-static int convertTbDesToJson(
- char *dbName, char *tbName, TableDef *tableDes, int colCount,
- char **jsonSchema)
-{
- // {
- // "type": "record",
- // "name": "dbname.tbname",
- // "fields": [
- // {
- // "name": "col0 name",
- // "type": "long"
- // },
- // {
- // "name": "col1 name",
- // "type": "int"
- // },
- // {
- // "name": "col2 name",
- // "type": "float"
- // },
- // {
- // "name": "col3 name",
- // "type": "boolean"
- // },
- // ...
- // {
- // "name": "coln name",
- // "type": "string"
- // }
- // ]
- // }
- *jsonSchema = (char *)calloc(1,
- 17 + TSDB_DB_NAME_LEN /* dbname section */
- + 17 /* type: record */
- + 11 + TSDB_TABLE_NAME_LEN /* tbname section */
- + 10 /* fields section */
- + (TSDB_COL_NAME_LEN + 11 + 16) * colCount + 4); /* fields section */
- if (*jsonSchema == NULL) {
- errorPrint("%s() LN%d, memory allocation failed!\n", __func__, __LINE__);
- return -1;
- }
-
- char *pstr = *jsonSchema;
- pstr += sprintf(pstr,
- "{\"type\": \"record\", \"name\": \"%s.%s\", \"fields\": [",
- dbName, tbName);
- for (int i = 0; i < colCount; i ++) {
- if (0 == i) {
- pstr += sprintf(pstr,
- "{\"name\": \"%s\", \"type\": \"%s\"",
- tableDes->cols[i].field, "long");
- } else {
- if (strcasecmp(tableDes->cols[i].type, "binary") == 0) {
- pstr += sprintf(pstr,
- "{\"name\": \"%s\", \"type\": \"%s\"",
- tableDes->cols[i].field, "string");
- } else if (strcasecmp(tableDes->cols[i].type, "nchar") == 0) {
- pstr += sprintf(pstr,
- "{\"name\": \"%s\", \"type\": \"%s\"",
- tableDes->cols[i].field, "bytes");
- } else if (strcasecmp(tableDes->cols[i].type, "bool") == 0) {
- pstr += sprintf(pstr,
- "{\"name\": \"%s\", \"type\": \"%s\"",
- tableDes->cols[i].field, "boolean");
- } else if (strcasecmp(tableDes->cols[i].type, "tinyint") == 0) {
- pstr += sprintf(pstr,
- "{\"name\": \"%s\", \"type\": \"%s\"",
- tableDes->cols[i].field, "int");
- } else if (strcasecmp(tableDes->cols[i].type, "smallint") == 0) {
- pstr += sprintf(pstr,
- "{\"name\": \"%s\", \"type\": \"%s\"",
- tableDes->cols[i].field, "int");
- } else if (strcasecmp(tableDes->cols[i].type, "bigint") == 0) {
- pstr += sprintf(pstr,
- "{\"name\": \"%s\", \"type\": \"%s\"",
- tableDes->cols[i].field, "long");
- } else if (strcasecmp(tableDes->cols[i].type, "timestamp") == 0) {
- pstr += sprintf(pstr,
- "{\"name\": \"%s\", \"type\": \"%s\"",
- tableDes->cols[i].field, "long");
- } else {
- pstr += sprintf(pstr,
- "{\"name\": \"%s\", \"type\": \"%s\"",
- tableDes->cols[i].field,
- strtolower(tableDes->cols[i].type, tableDes->cols[i].type));
- }
- }
- if ((i != (colCount -1))
- && (strcmp(tableDes->cols[i + 1].note, "TAG") != 0)) {
- pstr += sprintf(pstr, "},");
- } else {
- pstr += sprintf(pstr, "}");
- break;
- }
- }
-
- pstr += sprintf(pstr, "]}");
-
- debugPrint("%s() LN%d, jsonSchema:\n %s\n", __func__, __LINE__, *jsonSchema);
-
- return 0;
-}
-
-static void print_json_indent(int indent) {
- int i;
- for (i = 0; i < indent; i++) {
- putchar(' ');
- }
-}
-
-const char *json_plural(size_t count) { return count == 1 ? "" : "s"; }
-
-static void print_json_object(json_t *element, int indent) {
- size_t size;
- const char *key;
- json_t *value;
-
- print_json_indent(indent);
- size = json_object_size(element);
-
- printf("JSON Object of %lld pair%s:\n", (long long)size, json_plural(size));
- json_object_foreach(element, key, value) {
- print_json_indent(indent + 2);
- printf("JSON Key: \"%s\"\n", key);
- print_json_aux(value, indent + 2);
- }
-}
-
-static void print_json_array(json_t *element, int indent) {
- size_t i;
- size_t size = json_array_size(element);
- print_json_indent(indent);
-
- printf("JSON Array of %lld element%s:\n", (long long)size, json_plural(size));
- for (i = 0; i < size; i++) {
- print_json_aux(json_array_get(element, i), indent + 2);
- }
-}
-
-static void print_json_string(json_t *element, int indent) {
- print_json_indent(indent);
- printf("JSON String: \"%s\"\n", json_string_value(element));
-}
-
-static void print_json_integer(json_t *element, int indent) {
- print_json_indent(indent);
- printf("JSON Integer: \"%" JSON_INTEGER_FORMAT "\"\n", json_integer_value(element));
-}
-
-static void print_json_real(json_t *element, int indent) {
- print_json_indent(indent);
- printf("JSON Real: %f\n", json_real_value(element));
-}
-
-static void print_json_true(json_t *element, int indent) {
- (void)element;
- print_json_indent(indent);
- printf("JSON True\n");
-}
-
-static void print_json_false(json_t *element, int indent) {
- (void)element;
- print_json_indent(indent);
- printf("JSON False\n");
-}
-
-static void print_json_null(json_t *element, int indent) {
- (void)element;
- print_json_indent(indent);
- printf("JSON Null\n");
-}
-
-static void print_json_aux(json_t *element, int indent)
-{
- switch(json_typeof(element)) {
- case JSON_OBJECT:
- print_json_object(element, indent);
- break;
-
- case JSON_ARRAY:
- print_json_array(element, indent);
- break;
-
- case JSON_STRING:
- print_json_string(element, indent);
- break;
-
- case JSON_INTEGER:
- print_json_integer(element, indent);
- break;
-
- case JSON_REAL:
- print_json_real(element, indent);
- break;
-
- case JSON_TRUE:
- print_json_true(element, indent);
- break;
-
- case JSON_FALSE:
- print_json_false(element, indent);
- break;
-
- case JSON_NULL:
- print_json_null(element, indent);
- break;
-
- default:
- fprintf(stderr, "unrecongnized JSON type %d\n", json_typeof(element));
- }
-}
-
-static void print_json(json_t *root) { print_json_aux(root, 0); }
-
-static json_t *load_json(char *jsonbuf)
-{
- json_t *root;
- json_error_t error;
-
- root = json_loads(jsonbuf, 0, &error);
-
- if (root) {
- return root;
- } else {
- fprintf(stderr, "json error on line %d: %s\n", error.line, error.text);
- return NULL;
- }
-}
-
-static RecordSchema *parse_json_to_recordschema(json_t *element)
-{
- RecordSchema *recordSchema = malloc(sizeof(RecordSchema));
- assert(recordSchema);
-
- if (JSON_OBJECT != json_typeof(element)) {
- fprintf(stderr, "%s() LN%d, json passed is not an object\n",
- __func__, __LINE__);
- return NULL;
- }
-
- const char *key;
- json_t *value;
-
- json_object_foreach(element, key, value) {
- if (0 == strcmp(key, "name")) {
- tstrncpy(recordSchema->name, json_string_value(value), RECORD_NAME_LEN-1);
- } else if (0 == strcmp(key, "fields")) {
- if (JSON_ARRAY == json_typeof(value)) {
-
- size_t i;
- size_t size = json_array_size(value);
-
- verbosePrint("%s() LN%d, JSON Array of %lld element%s:\n",
- __func__, __LINE__,
- (long long)size, json_plural(size));
-
- recordSchema->num_fields = size;
- recordSchema->fields = malloc(sizeof(FieldStruct) * size);
- assert(recordSchema->fields);
-
- for (i = 0; i < size; i++) {
- FieldStruct *field = (FieldStruct *)(recordSchema->fields + sizeof(FieldStruct) * i);
- json_t *arr_element = json_array_get(value, i);
- const char *ele_key;
- json_t *ele_value;
-
- json_object_foreach(arr_element, ele_key, ele_value) {
- if (0 == strcmp(ele_key, "name")) {
- tstrncpy(field->name, json_string_value(ele_value), FIELD_NAME_LEN-1);
- } else if (0 == strcmp(ele_key, "type")) {
- if (JSON_STRING == json_typeof(ele_value)) {
- tstrncpy(field->type, json_string_value(ele_value), TYPE_NAME_LEN-1);
- } else if (JSON_OBJECT == json_typeof(ele_value)) {
- const char *obj_key;
- json_t *obj_value;
-
- json_object_foreach(ele_value, obj_key, obj_value) {
- if (0 == strcmp(obj_key, "type")) {
- if (JSON_STRING == json_typeof(obj_value)) {
- tstrncpy(field->type,
- json_string_value(obj_value), TYPE_NAME_LEN-1);
- }
- }
- }
- }
- }
- }
- }
- } else {
- fprintf(stderr, "%s() LN%d, fields have no array\n",
- __func__, __LINE__);
- return NULL;
- }
-
- break;
- }
- }
-
- return recordSchema;
-}
-
-static void freeRecordSchema(RecordSchema *recordSchema)
-{
- if (recordSchema) {
- if (recordSchema->fields) {
- free(recordSchema->fields);
- }
- free(recordSchema);
- }
-}
-
-static int64_t writeResultToAvro(
- char *avroFilename,
- char *jsonSchema,
- TAOS_RES *res)
-{
- avro_schema_t schema;
- if (avro_schema_from_json_length(jsonSchema, strlen(jsonSchema), &schema)) {
- errorPrint("%s() LN%d, Unable to parse:\n%s \nto schema\nerror message: %s\n",
- __func__, __LINE__, jsonSchema, avro_strerror());
- exit(EXIT_FAILURE);
- }
-
- json_t *json_root = load_json(jsonSchema);
- debugPrint("\n%s() LN%d\n *** Schema parsed:\n", __func__, __LINE__);
-
- RecordSchema *recordSchema;
- if (json_root) {
- if (g_args.debug_print || g_args.verbose_print) {
- print_json(json_root);
- }
-
- recordSchema = parse_json_to_recordschema(json_root);
- if (NULL == recordSchema) {
- fprintf(stderr, "Failed to parse json to recordschema\n");
- exit(EXIT_FAILURE);
- }
-
- json_decref(json_root);
- } else {
- errorPrint("json:\n%s\n can't be parsed by jansson\n", jsonSchema);
- exit(EXIT_FAILURE);
- }
-
- avro_file_writer_t db;
-
- int rval = avro_file_writer_create_with_codec
- (avroFilename, schema, &db, g_avro_codec[g_args.avro_codec], 0);
- if (rval) {
- errorPrint("There was an error creating %s. reason: %s\n",
- avroFilename, avro_strerror());
- exit(EXIT_FAILURE);
- }
-
- TAOS_ROW row = NULL;
-
- int numFields = taos_field_count(res);
- assert(numFields > 0);
- TAOS_FIELD *fields = taos_fetch_fields(res);
-
- avro_value_iface_t *wface =
- avro_generic_class_from_schema(schema);
-
- avro_value_t record;
- avro_generic_value_new(wface, &record);
-
- int64_t count = 0;
- while ((row = taos_fetch_row(res)) != NULL) {
- avro_value_t value;
-
- for (int col = 0; col < numFields; col++) {
- if (0 != avro_value_get_by_name(
- &record, fields[col].name, &value, NULL)) {
- errorPrint("%s() LN%d, avro_value_get_by_name(..%s..) failed",
- __func__, __LINE__, fields[col].name);
- continue;
- }
-
- int len;
- switch (fields[col].type) {
- case TSDB_DATA_TYPE_BOOL:
- if (NULL == row[col]) {
- avro_value_set_int(&value, TSDB_DATA_BOOL_NULL);
- } else {
- avro_value_set_boolean(&value,
- ((((int32_t)(*((char *)row[col])))==1)?1:0));
- }
- break;
-
- case TSDB_DATA_TYPE_TINYINT:
- if (NULL == row[col]) {
- avro_value_set_int(&value, TSDB_DATA_TINYINT_NULL);
- } else {
- avro_value_set_int(&value, *((int8_t *)row[col]));
- }
- break;
-
- case TSDB_DATA_TYPE_SMALLINT:
- if (NULL == row[col]) {
- avro_value_set_int(&value, TSDB_DATA_SMALLINT_NULL);
- } else {
- avro_value_set_int(&value, *((int16_t *)row[col]));
- }
- break;
-
- case TSDB_DATA_TYPE_INT:
- if (NULL == row[col]) {
- avro_value_set_int(&value, TSDB_DATA_INT_NULL);
- } else {
- avro_value_set_int(&value, *((int32_t *)row[col]));
- }
- break;
-
- case TSDB_DATA_TYPE_BIGINT:
- if (NULL == row[col]) {
- avro_value_set_long(&value, TSDB_DATA_BIGINT_NULL);
- } else {
- avro_value_set_long(&value, *((int64_t *)row[col]));
- }
- break;
-
- case TSDB_DATA_TYPE_FLOAT:
- if (NULL == row[col]) {
- avro_value_set_float(&value, TSDB_DATA_FLOAT_NULL);
- } else {
- avro_value_set_float(&value, GET_FLOAT_VAL(row[col]));
- }
- break;
-
- case TSDB_DATA_TYPE_DOUBLE:
- if (NULL == row[col]) {
- avro_value_set_double(&value, TSDB_DATA_DOUBLE_NULL);
- } else {
- avro_value_set_double(&value, GET_DOUBLE_VAL(row[col]));
- }
- break;
-
- case TSDB_DATA_TYPE_BINARY:
- if (NULL == row[col]) {
- avro_value_set_string(&value,
- (char *)NULL);
- } else {
- avro_value_set_string(&value, (char *)row[col]);
- }
- break;
-
- case TSDB_DATA_TYPE_NCHAR:
- if (NULL == row[col]) {
- avro_value_set_bytes(&value,
- (void*)NULL,0);
- } else {
- len = strlen((char*)row[col]);
- avro_value_set_bytes(&value, (void*)(row[col]),len);
- }
- break;
-
- case TSDB_DATA_TYPE_TIMESTAMP:
- if (NULL == row[col]) {
- avro_value_set_long(&value, TSDB_DATA_BIGINT_NULL);
- } else {
- avro_value_set_long(&value, *((int64_t *)row[col]));
- }
- break;
-
- default:
- break;
- }
- }
-
- if (0 != avro_file_writer_append_value(db, &record)) {
- errorPrint("%s() LN%d, Unable to write record to file. Message: %s\n",
- __func__, __LINE__,
- avro_strerror());
- } else {
- count ++;
- }
- }
-
- avro_value_decref(&record);
- avro_value_iface_decref(wface);
- freeRecordSchema(recordSchema);
- avro_file_writer_close(db);
- avro_schema_decref(schema);
-
- return count;
-}
-
-void freeBindArray(char *bindArray, int onlyCol)
-{
- TAOS_BIND *bind;
-
- for (int j = 0; j < onlyCol; j++) {
- bind = (TAOS_BIND *)((char *)bindArray + (sizeof(TAOS_BIND) * j));
- if ((TSDB_DATA_TYPE_BINARY != bind->buffer_type)
- && (TSDB_DATA_TYPE_NCHAR != bind->buffer_type)) {
- tfree(bind->buffer);
- }
- }
-}
-
-static int dumpInOneAvroFile(char* fcharset,
- char* encode, char *avroFilepath)
-{
- debugPrint("avroFilepath: %s\n", avroFilepath);
-
- avro_file_reader_t reader;
-
- if(avro_file_reader(avroFilepath, &reader)) {
- fprintf(stderr, "Unable to open avro file %s: %s\n",
- avroFilepath, avro_strerror());
- return -1;
- }
-
- int buf_len = TSDB_MAX_COLUMNS * (TSDB_COL_NAME_LEN + 11 + 16) + 4;
- char *jsonbuf = calloc(1, buf_len);
- assert(jsonbuf);
-
- avro_writer_t jsonwriter = avro_writer_memory(jsonbuf, buf_len);;
-
- avro_schema_t schema;
- schema = avro_file_reader_get_writer_schema(reader);
- avro_schema_to_json(schema, jsonwriter);
-
- if (0 == strlen(jsonbuf)) {
- errorPrint("Failed to parse avro file: %s schema. reason: %s\n",
- avroFilepath, avro_strerror());
- avro_schema_decref(schema);
- avro_file_reader_close(reader);
- avro_writer_free(jsonwriter);
- return -1;
- }
- debugPrint("Schema:\n %s\n", jsonbuf);
-
- json_t *json_root = load_json(jsonbuf);
- debugPrint("\n%s() LN%d\n *** Schema parsed:\n", __func__, __LINE__);
- if (g_args.debug_print) {
- print_json(json_root);
- }
-
- const char *namespace = avro_schema_namespace((const avro_schema_t)schema);
- debugPrint("Namespace: %s\n", namespace);
-
- TAOS *taos = taos_connect(g_args.host, g_args.user, g_args.password,
- namespace, g_args.port);
- if (taos == NULL) {
- errorPrint("Failed to connect to TDengine server %s\n", g_args.host);
- return -1;
- }
-
- TAOS_STMT *stmt = taos_stmt_init(taos);
- if (NULL == stmt) {
- taos_close(taos);
- errorPrint("%s() LN%d, stmt init failed! reason: %s\n",
- __func__, __LINE__, taos_errstr(NULL));
- return -1;
- }
-
- RecordSchema *recordSchema = parse_json_to_recordschema(json_root);
- if (NULL == recordSchema) {
- errorPrint("Failed to parse json to recordschema. reason: %s\n",
- avro_strerror());
- avro_schema_decref(schema);
- avro_file_reader_close(reader);
- avro_writer_free(jsonwriter);
- return -1;
- }
- json_decref(json_root);
-
- TableDef *tableDes = (TableDef *)calloc(1, sizeof(TableDef)
- + sizeof(ColDes) * TSDB_MAX_COLUMNS);
-
- int allColCount = getTableDes(taos, (char *)namespace, recordSchema->name, tableDes, false);
-
- if (allColCount < 0) {
- errorPrint("%s() LN%d, failed to get table[%s] schema\n",
- __func__,
- __LINE__,
- recordSchema->name);
- free(tableDes);
- freeRecordSchema(recordSchema);
- avro_schema_decref(schema);
- avro_file_reader_close(reader);
- avro_writer_free(jsonwriter);
- return -1;
- }
-
- char *stmtBuffer = calloc(1, TSDB_MAX_ALLOWED_SQL_LEN);
- assert(stmtBuffer);
- char *pstr = stmtBuffer;
- pstr += sprintf(pstr, "INSERT INTO ? VALUES(?");
-
- int onlyCol = 1; // at least timestamp
- for (int col = 1; col < allColCount; col++) {
- if (strcmp(tableDes->cols[col].note, "TAG") == 0) continue;
- pstr += sprintf(pstr, ",?");
- onlyCol ++;
- }
- pstr += sprintf(pstr, ")");
-
- if (0 != taos_stmt_prepare(stmt, stmtBuffer, 0)) {
- errorPrint("Failed to execute taos_stmt_prepare(). reason: %s\n",
- taos_stmt_errstr(stmt));
-
- free(stmtBuffer);
- free(tableDes);
- freeRecordSchema(recordSchema);
- avro_schema_decref(schema);
- avro_file_reader_close(reader);
- avro_writer_free(jsonwriter);
- return -1;
- }
-
- if (0 != taos_stmt_set_tbname(stmt, recordSchema->name)) {
- errorPrint("Failed to execute taos_stmt_set_tbname(%s). reason: %s\n",
- recordSchema->name, taos_stmt_errstr(stmt));
-
- free(stmtBuffer);
- free(tableDes);
- avro_schema_decref(schema);
- avro_file_reader_close(reader);
- avro_writer_free(jsonwriter);
- return -1;
- }
-
- avro_value_iface_t *value_class = avro_generic_class_from_schema(schema);
- avro_value_t value;
- avro_generic_value_new(value_class, &value);
-
- char *bindArray =
- malloc(sizeof(TAOS_BIND) * onlyCol);
- assert(bindArray);
-
- int success = 0;
- int failed = 0;
- while(!avro_file_reader_read_value(reader, &value)) {
- memset(bindArray, 0, sizeof(TAOS_BIND) * onlyCol);
- TAOS_BIND *bind;
-
- for (int i = 0; i < recordSchema->num_fields; i++) {
- bind = (TAOS_BIND *)((char *)bindArray + (sizeof(TAOS_BIND) * i));
-
- avro_value_t field_value;
-
- FieldStruct *field = (FieldStruct *)(recordSchema->fields + sizeof(FieldStruct) * i);
-
- bind->is_null = NULL;
- int is_null = 1;
- if (0 == i) {
- int64_t *ts = malloc(sizeof(int64_t));
- assert(ts);
-
- avro_value_get_by_name(&value, field->name, &field_value, NULL);
- avro_value_get_long(&field_value, ts);
-
- bind->buffer_type = TSDB_DATA_TYPE_TIMESTAMP;
- bind->buffer_length = sizeof(int64_t);
- bind->buffer = ts;
- bind->length = &bind->buffer_length;
- } else if (0 == avro_value_get_by_name(
- &value, field->name, &field_value, NULL)) {
-
- if (0 == strcasecmp(tableDes->cols[i].type, "int")) {
- int32_t *n32 = malloc(sizeof(int32_t));
- assert(n32);
-
- avro_value_get_int(&field_value, n32);
- debugPrint("%d | ", *n32);
- bind->buffer_type = TSDB_DATA_TYPE_INT;
- bind->buffer_length = sizeof(int32_t);
- bind->buffer = n32;
- } else if (0 == strcasecmp(tableDes->cols[i].type, "tinyint")) {
- int32_t *n8 = malloc(sizeof(int32_t));
- assert(n8);
-
- avro_value_get_int(&field_value, n8);
- debugPrint("%d | ", *n8);
- bind->buffer_type = TSDB_DATA_TYPE_TINYINT;
- bind->buffer_length = sizeof(int8_t);
- bind->buffer = (int8_t *)n8;
- } else if (0 == strcasecmp(tableDes->cols[i].type, "smallint")) {
- int32_t *n16 = malloc(sizeof(int32_t));
- assert(n16);
-
- avro_value_get_int(&field_value, n16);
- debugPrint("%d | ", *n16);
- bind->buffer_type = TSDB_DATA_TYPE_SMALLINT;
- bind->buffer_length = sizeof(int16_t);
- bind->buffer = (int32_t*)n16;
- } else if (0 == strcasecmp(tableDes->cols[i].type, "bigint")) {
- int64_t *n64 = malloc(sizeof(int64_t));
- assert(n64);
-
- avro_value_get_long(&field_value, n64);
- debugPrint("%"PRId64" | ", *n64);
- bind->buffer_type = TSDB_DATA_TYPE_BIGINT;
- bind->buffer_length = sizeof(int64_t);
- bind->buffer = n64;
- } else if (0 == strcasecmp(tableDes->cols[i].type, "timestamp")) {
- int64_t *n64 = malloc(sizeof(int64_t));
- assert(n64);
-
- avro_value_get_long(&field_value, n64);
- debugPrint("%"PRId64" | ", *n64);
- bind->buffer_type = TSDB_DATA_TYPE_TIMESTAMP;
- bind->buffer_length = sizeof(int64_t);
- bind->buffer = n64;
- } else if (0 == strcasecmp(tableDes->cols[i].type, "float")) {
- float *f = malloc(sizeof(float));
- assert(f);
-
- avro_value_get_float(&field_value, f);
- if (TSDB_DATA_FLOAT_NULL == *f) {
- debugPrint("%s | ", "NULL");
- bind->is_null = &is_null;
- } else {
- debugPrint("%f | ", *f);
- bind->buffer = f;
- }
- bind->buffer_type = TSDB_DATA_TYPE_FLOAT;
- bind->buffer_length = sizeof(float);
- } else if (0 == strcasecmp(tableDes->cols[i].type, "double")) {
- double *dbl = malloc(sizeof(double));
- assert(dbl);
-
- avro_value_get_double(&field_value, dbl);
- if (TSDB_DATA_DOUBLE_NULL == *dbl) {
- debugPrint("%s | ", "NULL");
- bind->is_null = &is_null;
- } else {
- debugPrint("%f | ", *dbl);
- bind->buffer = dbl;
- }
- bind->buffer = dbl;
- bind->buffer_type = TSDB_DATA_TYPE_DOUBLE;
- bind->buffer_length = sizeof(double);
- } else if (0 == strcasecmp(tableDes->cols[i].type, "binary")) {
- size_t size;
-
- char *buf = NULL;
- avro_value_get_string(&field_value, (const char **)&buf, &size);
- debugPrint("%s | ", (char *)buf);
- bind->buffer_type = TSDB_DATA_TYPE_BINARY;
- bind->buffer_length = tableDes->cols[i].length;
- bind->buffer = buf;
- } else if (0 == strcasecmp(tableDes->cols[i].type, "nchar")) {
- size_t bytessize;
- void *bytesbuf = NULL;
-
- avro_value_get_bytes(&field_value, (const void **)&bytesbuf, &bytessize);
- debugPrint("%s | ", (char*)bytesbuf);
- bind->buffer_type = TSDB_DATA_TYPE_NCHAR;
- bind->buffer_length = tableDes->cols[i].length;
- bind->buffer = bytesbuf;
- } else if (0 == strcasecmp(tableDes->cols[i].type, "bool")) {
- int32_t *bl = malloc(sizeof(int32_t));
- assert(bl);
-
- avro_value_get_boolean(&field_value, bl);
- debugPrint("%s | ", (*bl)?"true":"false");
- bind->buffer_type = TSDB_DATA_TYPE_BOOL;
- bind->buffer_length = sizeof(int8_t);
- bind->buffer = (int8_t*)bl;
- }
-
- bind->length = &bind->buffer_length;
- }
-
- }
- debugPrint("%s", "\n");
-
- if (0 != taos_stmt_bind_param(stmt, (TAOS_BIND *)bindArray)) {
- errorPrint("%s() LN%d stmt_bind_param() failed! reason: %s\n",
- __func__, __LINE__, taos_stmt_errstr(stmt));
- freeBindArray(bindArray, onlyCol);
- failed --;
- continue;
- }
- if (0 != taos_stmt_add_batch(stmt)) {
- errorPrint("%s() LN%d stmt_bind_param() failed! reason: %s\n",
- __func__, __LINE__, taos_stmt_errstr(stmt));
- freeBindArray(bindArray, onlyCol);
- failed --;
- continue;
- }
-
- freeBindArray(bindArray, onlyCol);
-
- success ++;
- continue;
- }
-
- if (0 != taos_stmt_execute(stmt)) {
- errorPrint("%s() LN%d stmt_bind_param() failed! reason: %s\n",
- __func__, __LINE__, taos_stmt_errstr(stmt));
- failed = success;
- }
-
- avro_value_decref(&value);
- avro_value_iface_decref(value_class);
-
- tfree(bindArray);
-
- tfree(stmtBuffer);
- tfree(tableDes);
-
- freeRecordSchema(recordSchema);
- avro_schema_decref(schema);
- avro_file_reader_close(reader);
- avro_writer_free(jsonwriter);
-
- tfree(jsonbuf);
-
- taos_stmt_close(stmt);
- taos_close(taos);
-
- if (failed < 0)
- return failed;
- return success;
-}
-
-static void* dumpInAvroWorkThreadFp(void *arg)
-{
- threadInfo *pThread = (threadInfo*)arg;
- setThreadName("dumpInAvroWorkThrd");
- verbosePrint("[%d] process %"PRId64" files from %"PRId64"\n",
- pThread->threadIndex, pThread->count, pThread->from);
-
- for (int64_t i = 0; i < pThread->count; i++) {
- char avroFile[MAX_PATH_LEN];
- sprintf(avroFile, "%s/%s", g_args.inpath,
- g_tsDumpInAvroFiles[pThread->from + i]);
-
- if (0 == dumpInOneAvroFile(g_tsCharset,
- g_args.encode,
- avroFile)) {
- okPrint("[%d] Success dump in file: %s\n",
- pThread->threadIndex, avroFile);
- }
- }
-
- return NULL;
-}
-
-static int64_t dumpInAvroWorkThreads()
-{
- int64_t ret = 0;
-
- int32_t threads = g_args.thread_num;
-
- uint64_t avroFileCount = getFilesNum("avro");
- if (0 == avroFileCount) {
- debugPrint("No .avro file found in %s\n", g_args.inpath);
- return 0;
- }
-
- createDumpinList("avro", avroFileCount);
-
- threadInfo *pThread;
-
- pthread_t *pids = calloc(1, threads * sizeof(pthread_t));
- threadInfo *infos = (threadInfo *)calloc(
- threads, sizeof(threadInfo));
- assert(pids);
- assert(infos);
-
- int64_t a = avroFileCount / threads;
- if (a < 1) {
- threads = avroFileCount;
- a = 1;
- }
-
- int64_t b = 0;
- if (threads != 0) {
- b = avroFileCount % threads;
- }
-
- int64_t from = 0;
-
- for (int32_t t = 0; t < threads; ++t) {
- pThread = infos + t;
- pThread->threadIndex = t;
-
- pThread->from = from;
- pThread->count = tcount;
- verbosePrint(
- "Thread[%d] takes care avro files total %"PRId64" files from %"PRId64"\n",
- t, pThread->count, pThread->from);
-
- if (pthread_create(pids + t, NULL,
- dumpInAvroWorkThreadFp, (void*)pThread) != 0) {
- errorPrint("%s() LN%d, thread[%d] failed to start\n",
- __func__, __LINE__, pThread->threadIndex);
- exit(EXIT_FAILURE);
- }
- }
-
- for (int t = 0; t < threads; ++t) {
- pthread_join(pids[t], NULL);
- }
-
- free(infos);
- free(pids);
-
- freeFileList(g_tsDumpInAvroFiles, avroFileCount);
-
- return ret;
-}
-
-#endif /* AVRO_SUPPORT */
-
-static int64_t writeResultToSql(TAOS_RES *res, FILE *fp, char *dbName, char *tbName)
-{
- int64_t totalRows = 0;
-
- int32_t sql_buf_len = g_args.max_sql_len;
- char* tmpBuffer = (char *)calloc(1, sql_buf_len + 128);
- assert(tmpBuffer);
-
- char *pstr = tmpBuffer;
-
- TAOS_ROW row = NULL;
- int rowFlag = 0;
- int64_t lastRowsPrint = 5000000;
- int count = 0;
-
- int numFields = taos_field_count(res);
- assert(numFields > 0);
- TAOS_FIELD *fields = taos_fetch_fields(res);
-
- int32_t curr_sqlstr_len = 0;
- int32_t total_sqlstr_len = 0;
-
- while ((row = taos_fetch_row(res)) != NULL) {
- curr_sqlstr_len = 0;
-
- int32_t* length = taos_fetch_lengths(res); // act len
-
- if (count == 0) {
- total_sqlstr_len = 0;
- curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len,
- "INSERT INTO %s.%s VALUES (", dbName, tbName);
- } else {
- if (g_args.mysqlFlag) {
- if (0 == rowFlag) {
- curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "(");
- rowFlag++;
- } else {
- curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, ", (");
- }
- } else {
- curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "(");
- }
- }
-
- for (int col = 0; col < numFields; col++) {
- if (col != 0) curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, ", ");
-
- if (row[col] == NULL) {
- curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "NULL");
- continue;
- }
-
- switch (fields[col].type) {
- case TSDB_DATA_TYPE_BOOL:
- curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "%d",
- ((((int32_t)(*((char *)row[col])))==1)?1:0));
- break;
-
- case TSDB_DATA_TYPE_TINYINT:
- curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "%d",
- *((int8_t *)row[col]));
- break;
-
- case TSDB_DATA_TYPE_SMALLINT:
- curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "%d",
- *((int16_t *)row[col]));
- break;
-
- case TSDB_DATA_TYPE_INT:
- curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "%d",
- *((int32_t *)row[col]));
- break;
-
- case TSDB_DATA_TYPE_BIGINT:
- curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len,
- "%" PRId64 "",
- *((int64_t *)row[col]));
- break;
-
- case TSDB_DATA_TYPE_FLOAT:
- curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "%f",
- GET_FLOAT_VAL(row[col]));
- break;
-
- case TSDB_DATA_TYPE_DOUBLE:
- curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "%f",
- GET_DOUBLE_VAL(row[col]));
- break;
-
- case TSDB_DATA_TYPE_BINARY:
- {
- char tbuf[COMMAND_SIZE] = {0};
- converStringToReadable((char *)row[col], length[col],
- tbuf, COMMAND_SIZE);
- curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len,
- "\'%s\'", tbuf);
- break;
- }
- case TSDB_DATA_TYPE_NCHAR:
- {
- char tbuf[COMMAND_SIZE] = {0};
- convertNCharToReadable((char *)row[col], length[col],
- tbuf, COMMAND_SIZE);
- curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len,
- "\'%s\'", tbuf);
- break;
- }
- case TSDB_DATA_TYPE_TIMESTAMP:
- if (!g_args.mysqlFlag) {
- curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len,
- "%" PRId64 "",
- *(int64_t *)row[col]);
- } else {
- char buf[64] = "\0";
- int64_t ts = *((int64_t *)row[col]);
- time_t tt = (time_t)(ts / 1000);
- struct tm *ptm = localtime(&tt);
- strftime(buf, 64, "%y-%m-%d %H:%M:%S", ptm);
- curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len,
- "\'%s.%03d\'",
- buf, (int)(ts % 1000));
- }
- break;
- default:
- break;
- }
- }
-
- curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, ")");
-
- totalRows++;
- count++;
- fprintf(fp, "%s", tmpBuffer);
-
- if (totalRows >= lastRowsPrint) {
- printf(" %"PRId64 " rows already be dumpout from %s.%s\n",
- totalRows, dbName, tbName);
- lastRowsPrint += 5000000;
- }
-
- total_sqlstr_len += curr_sqlstr_len;
-
- if ((count >= g_args.data_batch)
- || (sql_buf_len - total_sqlstr_len < TSDB_MAX_BYTES_PER_ROW)) {
- fprintf(fp, ";\n");
- count = 0;
- }
- }
-
- debugPrint("total_sqlstr_len: %d\n", total_sqlstr_len);
-
- fprintf(fp, "\n");
- free(tmpBuffer);
-
- return totalRows;
-}
-
-static int64_t dumpTableData(FILE *fp, char *tbName,
- char* dbName, int precision,
- char *jsonSchema) {
- int64_t totalRows = 0;
-
- char sqlstr[1024] = {0};
-
- int64_t start_time, end_time;
- if (strlen(g_args.humanStartTime)) {
- if (TSDB_CODE_SUCCESS != taosParseTime(
- g_args.humanStartTime, &start_time,
- strlen(g_args.humanStartTime),
- precision, 0)) {
- errorPrint("Input %s, time format error!\n",
- g_args.humanStartTime);
- return -1;
- }
- } else {
- start_time = g_args.start_time;
- }
-
- if (strlen(g_args.humanEndTime)) {
- if (TSDB_CODE_SUCCESS != taosParseTime(
- g_args.humanEndTime, &end_time, strlen(g_args.humanEndTime),
- precision, 0)) {
- errorPrint("Input %s, time format error!\n", g_args.humanEndTime);
- return -1;
- }
- } else {
- end_time = g_args.end_time;
- }
-
- sprintf(sqlstr,
- "select * from %s.%s where _c0 >= %" PRId64 " and _c0 <= %" PRId64 " order by _c0 asc;",
- dbName, tbName, start_time, end_time);
-
- TAOS *taos = taos_connect(g_args.host,
- g_args.user, g_args.password, dbName, g_args.port);
- if (NULL == taos) {
- errorPrint(
- "Failed to connect to TDengine server %s by specified database %s\n",
- g_args.host, dbName);
- return -1;
- }
-
- TAOS_RES* res = taos_query(taos, sqlstr);
- int32_t code = taos_errno(res);
- if (code != 0) {
- errorPrint("failed to run command %s, reason: %s\n",
- sqlstr, taos_errstr(res));
- taos_free_result(res);
- taos_close(taos);
- return -1;
- }
-
-#ifdef AVRO_SUPPORT
- if (g_args.avro) {
- char avroFilename[MAX_PATH_LEN] = {0};
-
- if (g_args.outpath[0] != 0) {
- sprintf(avroFilename, "%s/%s.%s.avro",
- g_args.outpath, dbName, tbName);
- } else {
- sprintf(avroFilename, "%s.%s.avro",
- dbName, tbName);
- }
-
- totalRows = writeResultToAvro(avroFilename, jsonSchema, res);
- } else
-#endif
- totalRows = writeResultToSql(res, fp, dbName, tbName);
-
- taos_free_result(res);
- taos_close(taos);
- return totalRows;
-}
-
-static int64_t dumpNormalTable(
- TAOS *taos,
- char *dbName,
- char *stable,
- char *tbName,
- int precision,
- FILE *fp
- ) {
- int colCount = 0;
-
- TableDef *tableDes = (TableDef *)calloc(1, sizeof(TableDef)
- + sizeof(ColDes) * TSDB_MAX_COLUMNS);
-
- if (stable != NULL && stable[0] != '\0') { // dump table schema which is created by using super table
- colCount = getTableDes(taos, dbName, tbName, tableDes, false);
-
- if (colCount < 0) {
- errorPrint("%s() LN%d, failed to get table[%s] schema\n",
- __func__,
- __LINE__,
- tbName);
- free(tableDes);
- return -1;
- }
-
- // create child-table using super-table
- dumpCreateMTableClause(dbName, stable, tableDes, colCount, fp);
- } else { // dump table definition
- colCount = getTableDes(taos, dbName, tbName, tableDes, false);
-
- if (colCount < 0) {
- errorPrint("%s() LN%d, failed to get table[%s] schema\n",
- __func__,
- __LINE__,
- tbName);
- free(tableDes);
- return -1;
- }
-
- // create normal-table or super-table
- dumpCreateTableClause(tableDes, colCount, fp, dbName);
- }
-
- char *jsonSchema = NULL;
-#ifdef AVRO_SUPPORT
- if (g_args.avro) {
- if (0 != convertTbDesToJson(
- dbName, tbName, tableDes, colCount, &jsonSchema)) {
- errorPrint("%s() LN%d, convertTbDesToJson failed\n",
- __func__,
- __LINE__);
- freeTbDes(tableDes);
- return -1;
- }
- }
-#endif
-
- int64_t totalRows = 0;
- if (!g_args.schemaonly) {
- totalRows = dumpTableData(fp, tbName, dbName, precision,
- jsonSchema);
- }
-
- tfree(jsonSchema);
- freeTbDes(tableDes);
- return totalRows;
-}
-
-static int64_t dumpNormalTableWithoutStb(TAOS *taos, SDbInfo *dbInfo, char *ntbName)
-{
- int64_t count = 0;
-
- char tmpBuf[MAX_PATH_LEN] = {0};
- FILE *fp = NULL;
-
- if (g_args.outpath[0] != 0) {
- sprintf(tmpBuf, "%s/%s.%s.sql",
- g_args.outpath, dbInfo->name, ntbName);
- } else {
- sprintf(tmpBuf, "%s.%s.sql",
- dbInfo->name, ntbName);
- }
-
- fp = fopen(tmpBuf, "w");
- if (fp == NULL) {
- errorPrint("%s() LN%d, failed to open file %s\n",
- __func__, __LINE__, tmpBuf);
- return -1;
- }
-
- count = dumpNormalTable(
- taos,
- dbInfo->name,
- NULL,
- ntbName,
- getPrecisionByString(dbInfo->precision),
- fp);
- if (count > 0) {
- atomic_add_fetch_64(&g_totalDumpOutRows, count);
- }
- fclose(fp);
- return count;
-}
-
-static int64_t dumpNormalTableBelongStb(
- TAOS *taos,
- SDbInfo *dbInfo, char *stbName, char *ntbName)
-{
- int64_t count = 0;
-
- char tmpBuf[MAX_PATH_LEN] = {0};
- FILE *fp = NULL;
-
- if (g_args.outpath[0] != 0) {
- sprintf(tmpBuf, "%s/%s.%s.sql",
- g_args.outpath, dbInfo->name, ntbName);
- } else {
- sprintf(tmpBuf, "%s.%s.sql",
- dbInfo->name, ntbName);
- }
-
- fp = fopen(tmpBuf, "w");
- if (fp == NULL) {
- errorPrint("%s() LN%d, failed to open file %s\n",
- __func__, __LINE__, tmpBuf);
- return -1;
- }
-
- count = dumpNormalTable(
- taos,
- dbInfo->name,
- stbName,
- ntbName,
- getPrecisionByString(dbInfo->precision),
- fp);
- if (count > 0) {
- atomic_add_fetch_64(&g_totalDumpOutRows, count);
- }
-
- fclose(fp);
- return count;
-}
-
-static void *dumpNtbOfDb(void *arg) {
- threadInfo *pThreadInfo = (threadInfo *)arg;
-
- debugPrint("dump table from = \t%"PRId64"\n", pThreadInfo->from);
- debugPrint("dump table count = \t%"PRId64"\n",
- pThreadInfo->count);
-
- FILE *fp = NULL;
- char tmpBuf[MAX_PATH_LEN] = {0};
-
- if (g_args.outpath[0] != 0) {
- sprintf(tmpBuf, "%s/%s.%d.sql",
- g_args.outpath, pThreadInfo->dbName, pThreadInfo->threadIndex);
- } else {
- sprintf(tmpBuf, "%s.%d.sql",
- pThreadInfo->dbName, pThreadInfo->threadIndex);
- }
-
- fp = fopen(tmpBuf, "w");
-
- if (fp == NULL) {
- errorPrint("%s() LN%d, failed to open file %s\n",
- __func__, __LINE__, tmpBuf);
- return NULL;
- }
-
- int64_t count;
- for (int64_t i = 0; i < pThreadInfo->count; i++) {
- debugPrint("[%d] No.\t%"PRId64" table name: %s\n",
- pThreadInfo->threadIndex, i,
- ((TableInfo *)(g_tablesList + pThreadInfo->from+i))->name);
- count = dumpNormalTable(
- pThreadInfo->taos,
- pThreadInfo->dbName,
- ((TableInfo *)(g_tablesList + pThreadInfo->from+i))->stable,
- ((TableInfo *)(g_tablesList + pThreadInfo->from+i))->name,
- pThreadInfo->precision,
- fp);
- if (count < 0) {
- break;
- } else {
- atomic_add_fetch_64(&g_totalDumpOutRows, count);
- }
- }
-
- fclose(fp);
- return NULL;
-}
-
-static int checkParam() {
- if (g_args.all_databases && g_args.databases) {
- errorPrint("%s", "conflict option --all-databases and --databases\n");
- return -1;
- }
-
- if (g_args.start_time > g_args.end_time) {
- errorPrint("%s", "start time is larger than end time\n");
- return -1;
- }
-
- if (g_args.arg_list_len == 0) {
- if ((!g_args.all_databases) && (!g_args.databases) && (!g_args.isDumpIn)) {
- errorPrint("%s", "taosdump requires parameters\n");
- return -1;
- }
- }
- /*
- if (g_args.isDumpIn && (strcmp(g_args.outpath, DEFAULT_DUMP_FILE) != 0)) {
- fprintf(stderr, "duplicate parameter input and output file path\n");
- return -1;
- }
- */
- if (!g_args.isDumpIn && g_args.encode != NULL) {
- fprintf(stderr, "invalid option in dump out\n");
- return -1;
- }
-
- if (g_args.table_batch <= 0) {
- fprintf(stderr, "invalid option in dump out\n");
- return -1;
- }
-
- return 0;
-}
-
-/*
-static bool isEmptyCommand(char *cmd) {
- char *pchar = cmd;
-
- while (*pchar != '\0') {
- if (*pchar != ' ') return false;
- pchar++;
- }
-
- return true;
-}
-
-static void taosReplaceCtrlChar(char *str) {
- bool ctrlOn = false;
- char *pstr = NULL;
-
- for (pstr = str; *str != '\0'; ++str) {
- if (ctrlOn) {
- switch (*str) {
- case 'n':
- *pstr = '\n';
- pstr++;
- break;
- case 'r':
- *pstr = '\r';
- pstr++;
- break;
- case 't':
- *pstr = '\t';
- pstr++;
- break;
- case '\\':
- *pstr = '\\';
- pstr++;
- break;
- case '\'':
- *pstr = '\'';
- pstr++;
- break;
- default:
- break;
- }
- ctrlOn = false;
- } else {
- if (*str == '\\') {
- ctrlOn = true;
- } else {
- *pstr = *str;
- pstr++;
- }
- }
- }
-
- *pstr = '\0';
-}
-*/
-
-char *ascii_literal_list[] = {
- "\\x00", "\\x01", "\\x02", "\\x03", "\\x04", "\\x05", "\\x06", "\\x07", "\\x08", "\\t", "\\n", "\\x0b", "\\x0c",
- "\\r", "\\x0e", "\\x0f", "\\x10", "\\x11", "\\x12", "\\x13", "\\x14", "\\x15", "\\x16", "\\x17", "\\x18", "\\x19",
- "\\x1a", "\\x1b", "\\x1c", "\\x1d", "\\x1e", "\\x1f", " ", "!", "\\\"", "#", "$", "%", "&",
- "\\'", "(", ")", "*", "+", ",", "-", ".", "/", "0", "1", "2", "3",
- "4", "5", "6", "7", "8", "9", ":", ";", "<", "=", ">", "?", "@",
- "A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M",
- "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z",
- "[", "\\\\", "]", "^", "_", "`", "a", "b", "c", "d", "e", "f", "g",
- "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t",
- "u", "v", "w", "x", "y", "z", "{", "|", "}", "~", "\\x7f", "\\x80", "\\x81",
- "\\x82", "\\x83", "\\x84", "\\x85", "\\x86", "\\x87", "\\x88", "\\x89", "\\x8a", "\\x8b", "\\x8c", "\\x8d", "\\x8e",
- "\\x8f", "\\x90", "\\x91", "\\x92", "\\x93", "\\x94", "\\x95", "\\x96", "\\x97", "\\x98", "\\x99", "\\x9a", "\\x9b",
- "\\x9c", "\\x9d", "\\x9e", "\\x9f", "\\xa0", "\\xa1", "\\xa2", "\\xa3", "\\xa4", "\\xa5", "\\xa6", "\\xa7", "\\xa8",
- "\\xa9", "\\xaa", "\\xab", "\\xac", "\\xad", "\\xae", "\\xaf", "\\xb0", "\\xb1", "\\xb2", "\\xb3", "\\xb4", "\\xb5",
- "\\xb6", "\\xb7", "\\xb8", "\\xb9", "\\xba", "\\xbb", "\\xbc", "\\xbd", "\\xbe", "\\xbf", "\\xc0", "\\xc1", "\\xc2",
- "\\xc3", "\\xc4", "\\xc5", "\\xc6", "\\xc7", "\\xc8", "\\xc9", "\\xca", "\\xcb", "\\xcc", "\\xcd", "\\xce", "\\xcf",
- "\\xd0", "\\xd1", "\\xd2", "\\xd3", "\\xd4", "\\xd5", "\\xd6", "\\xd7", "\\xd8", "\\xd9", "\\xda", "\\xdb", "\\xdc",
- "\\xdd", "\\xde", "\\xdf", "\\xe0", "\\xe1", "\\xe2", "\\xe3", "\\xe4", "\\xe5", "\\xe6", "\\xe7", "\\xe8", "\\xe9",
- "\\xea", "\\xeb", "\\xec", "\\xed", "\\xee", "\\xef", "\\xf0", "\\xf1", "\\xf2", "\\xf3", "\\xf4", "\\xf5", "\\xf6",
- "\\xf7", "\\xf8", "\\xf9", "\\xfa", "\\xfb", "\\xfc", "\\xfd", "\\xfe", "\\xff"};
-
-static int converStringToReadable(char *str, int size, char *buf, int bufsize) {
- char *pstr = str;
- char *pbuf = buf;
- while (size > 0) {
- if (*pstr == '\0') break;
- pbuf = stpcpy(pbuf, ascii_literal_list[((uint8_t)(*pstr))]);
- pstr++;
- size--;
- }
- *pbuf = '\0';
- return 0;
-}
-
-static int convertNCharToReadable(char *str, int size, char *buf, int bufsize) {
- char *pstr = str;
- char *pbuf = buf;
- wchar_t wc;
- while (size > 0) {
- if (*pstr == '\0') break;
- int byte_width = mbtowc(&wc, pstr, MB_CUR_MAX);
- if (byte_width < 0) {
- errorPrint("%s() LN%d, mbtowc() return fail.\n", __func__, __LINE__);
- exit(-1);
- }
-
- if ((int)wc < 256) {
- pbuf = stpcpy(pbuf, ascii_literal_list[(int)wc]);
- } else {
- memcpy(pbuf, pstr, byte_width);
- pbuf += byte_width;
- }
- pstr += byte_width;
- }
-
- *pbuf = '\0';
-
- return 0;
-}
-
-static void dumpCharset(FILE *fp) {
- char charsetline[256];
-
- (void)fseek(fp, 0, SEEK_SET);
- sprintf(charsetline, "#!%s\n", tsCharset);
- (void)fwrite(charsetline, strlen(charsetline), 1, fp);
-}
-
-static void loadFileCharset(FILE *fp, char *fcharset) {
- char * line = NULL;
- size_t line_size = 0;
-
- (void)fseek(fp, 0, SEEK_SET);
- ssize_t size = getline(&line, &line_size, fp);
- if (size <= 2) {
- goto _exit_no_charset;
- }
-
- if (strncmp(line, "#!", 2) != 0) {
- goto _exit_no_charset;
- }
- if (line[size - 1] == '\n') {
- line[size - 1] = '\0';
- size--;
- }
- strcpy(fcharset, line + 2);
-
- tfree(line);
- return;
-
-_exit_no_charset:
- (void)fseek(fp, 0, SEEK_SET);
- *fcharset = '\0';
- tfree(line);
- return;
-}
-
-// ======== dumpIn support multi threads functions ================================//
-
-static int dumpInOneSqlFile(TAOS* taos, FILE* fp, char* fcharset,
- char* encode, char* fileName) {
- int read_len = 0;
- char * cmd = NULL;
- size_t cmd_len = 0;
- char * line = NULL;
- size_t line_len = 0;
-
- cmd = (char *)malloc(TSDB_MAX_ALLOWED_SQL_LEN);
- if (cmd == NULL) {
- errorPrint("%s() LN%d, failed to allocate memory\n",
- __func__, __LINE__);
- return -1;
- }
-
- int lastRowsPrint = 5000000;
- int lineNo = 0;
- while ((read_len = getline(&line, &line_len, fp)) != -1) {
- ++lineNo;
- if (read_len >= TSDB_MAX_ALLOWED_SQL_LEN) continue;
- line[--read_len] = '\0';
-
- //if (read_len == 0 || isCommentLine(line)) { // line starts with #
- if (read_len == 0 ) {
- continue;
- }
-
- if (line[read_len - 1] == '\\') {
- line[read_len - 1] = ' ';
- memcpy(cmd + cmd_len, line, read_len);
- cmd_len += read_len;
- continue;
- }
-
- memcpy(cmd + cmd_len, line, read_len);
- cmd[read_len + cmd_len]= '\0';
- if (queryDbImpl(taos, cmd)) {
- errorPrint("%s() LN%d, error sql: lineno:%d, file:%s\n",
- __func__, __LINE__, lineNo, fileName);
- fprintf(g_fpOfResult, "error sql: lineno:%d, file:%s\n", lineNo, fileName);
- }
-
- memset(cmd, 0, TSDB_MAX_ALLOWED_SQL_LEN);
- cmd_len = 0;
-
- if (lineNo >= lastRowsPrint) {
- printf(" %d lines already be executed from file %s\n", lineNo, fileName);
- lastRowsPrint += 5000000;
- }
- }
-
- tfree(cmd);
- tfree(line);
- return 0;
-}
-
-static void* dumpInSqlWorkThreadFp(void *arg)
-{
- threadInfo *pThread = (threadInfo*)arg;
- setThreadName("dumpInSqlWorkThrd");
- fprintf(stderr, "[%d] Start to process %"PRId64" files from %"PRId64"\n",
- pThread->threadIndex, pThread->count, pThread->from);
-
- for (int64_t i = 0; i < pThread->count; i++) {
- char sqlFile[MAX_PATH_LEN];
- sprintf(sqlFile, "%s/%s", g_args.inpath, g_tsDumpInSqlFiles[pThread->from + i]);
-
- FILE* fp = openDumpInFile(sqlFile);
- if (NULL == fp) {
- errorPrint("[%d] Failed to open input file: %s\n",
- pThread->threadIndex, sqlFile);
- continue;
- }
-
- if (0 == dumpInOneSqlFile(pThread->taos, fp, g_tsCharset, g_args.encode,
- sqlFile)) {
- okPrint("[%d] Success dump in file: %s\n",
- pThread->threadIndex, sqlFile);
- }
- fclose(fp);
- }
-
- return NULL;
-}
-
-static int dumpInSqlWorkThreads()
-{
- int32_t threads = g_args.thread_num;
-
- uint64_t sqlFileCount = getFilesNum("sql");
- if (0 == sqlFileCount) {
- debugPrint("No .sql file found in %s\n", g_args.inpath);
- return 0;
- }
-
- createDumpinList("sql", sqlFileCount);
-
- threadInfo *pThread;
-
- pthread_t *pids = calloc(1, threads * sizeof(pthread_t));
- threadInfo *infos = (threadInfo *)calloc(
- threads, sizeof(threadInfo));
- assert(pids);
- assert(infos);
-
- int64_t a = sqlFileCount / threads;
- if (a < 1) {
- threads = sqlFileCount;
- a = 1;
- }
-
- int64_t b = 0;
- if (threads != 0) {
- b = sqlFileCount % threads;
- }
-
- int64_t from = 0;
-
- for (int32_t t = 0; t < threads; ++t) {
- pThread = infos + t;
- pThread->threadIndex = t;
-
- pThread->from = from;
- pThread->count = tcount;
- verbosePrint(
- "Thread[%d] takes care sql files total %"PRId64" files from %"PRId64"\n",
- t, pThread->count, pThread->from);
-
- pThread->taos = taos_connect(g_args.host, g_args.user, g_args.password,
- NULL, g_args.port);
- if (pThread->taos == NULL) {
- errorPrint("Failed to connect to TDengine server %s\n", g_args.host);
- free(infos);
- free(pids);
- return -1;
- }
-
- if (pthread_create(pids + t, NULL,
- dumpInSqlWorkThreadFp, (void*)pThread) != 0) {
- errorPrint("%s() LN%d, thread[%d] failed to start\n",
- __func__, __LINE__, pThread->threadIndex);
- exit(EXIT_FAILURE);
- }
- }
-
- for (int t = 0; t < threads; ++t) {
- pthread_join(pids[t], NULL);
- }
-
- for (int t = 0; t < threads; ++t) {
- taos_close(infos[t].taos);
- }
- free(infos);
- free(pids);
-
- freeFileList(g_tsDumpInSqlFiles, sqlFileCount);
-
- return 0;
-}
-
-static int dumpInDbs()
-{
- TAOS *taos = taos_connect(
- g_args.host, g_args.user, g_args.password,
- NULL, g_args.port);
-
- if (taos == NULL) {
- errorPrint("%s() LN%d, failed to connect to TDengine server\n",
- __func__, __LINE__);
- return -1;
- }
-
- char dbsSql[MAX_PATH_LEN];
- sprintf(dbsSql, "%s/%s", g_args.inpath, "dbs.sql");
-
- FILE *fp = openDumpInFile(dbsSql);
- if (NULL == fp) {
- errorPrint("%s() LN%d, failed to open input file %s\n",
- __func__, __LINE__, dbsSql);
- return -1;
- }
- debugPrint("Success Open input file: %s\n", dbsSql);
- loadFileCharset(fp, g_tsCharset);
-
- if(0 == dumpInOneSqlFile(taos, fp, g_tsCharset, g_args.encode, dbsSql)) {
- okPrint("Success dump in file: %s !\n", dbsSql);
- }
-
- fclose(fp);
- taos_close(taos);
-
- return 0;
-}
-
-static int64_t dumpIn() {
- assert(g_args.isDumpIn);
-
- int64_t ret = 0;
- if (dumpInDbs()) {
- errorPrint("%s", "Failed to dump dbs in!\n");
- exit(EXIT_FAILURE);
- }
-
- ret = dumpInSqlWorkThreads();
-
-#ifdef AVRO_SUPPORT
- if (0 == ret) {
- ret = dumpInAvroWorkThreads();
- }
-#endif
-
- return ret;
-}
-
-static void *dumpNormalTablesOfStb(void *arg) {
- threadInfo *pThreadInfo = (threadInfo *)arg;
-
- debugPrint("dump table from = \t%"PRId64"\n", pThreadInfo->from);
- debugPrint("dump table count = \t%"PRId64"\n", pThreadInfo->count);
-
- char command[COMMAND_SIZE];
-
- sprintf(command, "SELECT TBNAME FROM %s.%s LIMIT %"PRId64" OFFSET %"PRId64"",
- pThreadInfo->dbName, pThreadInfo->stbName,
- pThreadInfo->count, pThreadInfo->from);
-
- TAOS_RES *res = taos_query(pThreadInfo->taos, command);
- int32_t code = taos_errno(res);
- if (code) {
- errorPrint("%s() LN%d, failed to run command <%s>. reason: %s\n",
- __func__, __LINE__, command, taos_errstr(res));
- taos_free_result(res);
- return NULL;
- }
-
- FILE *fp = NULL;
- char tmpBuf[MAX_PATH_LEN] = {0};
-
- if (g_args.outpath[0] != 0) {
- sprintf(tmpBuf, "%s/%s.%s.%d.sql",
- g_args.outpath,
- pThreadInfo->dbName,
- pThreadInfo->stbName,
- pThreadInfo->threadIndex);
- } else {
- sprintf(tmpBuf, "%s.%s.%d.sql",
- pThreadInfo->dbName,
- pThreadInfo->stbName,
- pThreadInfo->threadIndex);
- }
-
- fp = fopen(tmpBuf, "w");
-
- if (fp == NULL) {
- errorPrint("%s() LN%d, failed to open file %s\n",
- __func__, __LINE__, tmpBuf);
- return NULL;
- }
-
- TAOS_ROW row = NULL;
- int64_t i = 0;
- int64_t count;
- while((row = taos_fetch_row(res)) != NULL) {
- debugPrint("[%d] sub table %"PRId64": name: %s\n",
- pThreadInfo->threadIndex, i++, (char *)row[TSDB_SHOW_TABLES_NAME_INDEX]);
-
- count = dumpNormalTable(
- pThreadInfo->taos,
- pThreadInfo->dbName,
- pThreadInfo->stbName,
- (char *)row[TSDB_SHOW_TABLES_NAME_INDEX],
- pThreadInfo->precision,
- fp);
- if (count < 0) {
- break;
- } else {
- atomic_add_fetch_64(&g_totalDumpOutRows, count);
- }
- }
-
- fclose(fp);
- return NULL;
-}
-
-static int64_t dumpNtbOfDbByThreads(
- SDbInfo *dbInfo,
- int64_t ntbCount)
-{
- if (ntbCount <= 0) {
- return 0;
- }
-
- int threads = g_args.thread_num;
-
- int64_t a = ntbCount / threads;
- if (a < 1) {
- threads = ntbCount;
- a = 1;
- }
-
- assert(threads);
- int64_t b = ntbCount % threads;
-
- threadInfo *infos = calloc(1, threads * sizeof(threadInfo));
- pthread_t *pids = calloc(1, threads * sizeof(pthread_t));
- assert(pids);
- assert(infos);
-
- for (int64_t i = 0; i < threads; i++) {
- threadInfo *pThreadInfo = infos + i;
- pThreadInfo->taos = taos_connect(
- g_args.host,
- g_args.user,
- g_args.password,
- dbInfo->name,
- g_args.port
- );
- if (NULL == pThreadInfo->taos) {
- errorPrint("%s() LN%d, Failed to connect to TDengine, reason: %s\n",
- __func__,
- __LINE__,
- taos_errstr(NULL));
- free(pids);
- free(infos);
-
- return -1;
- }
-
- pThreadInfo->threadIndex = i;
- pThreadInfo->count = (ifrom = (i==0)?0:
- ((threadInfo *)(infos + i - 1))->from +
- ((threadInfo *)(infos + i - 1))->count;
- strcpy(pThreadInfo->dbName, dbInfo->name);
- pThreadInfo->precision = getPrecisionByString(dbInfo->precision);
-
- pthread_create(pids + i, NULL, dumpNtbOfDb, pThreadInfo);
- }
-
- for (int64_t i = 0; i < threads; i++) {
- pthread_join(pids[i], NULL);
- }
-
- for (int64_t i = 0; i < threads; i++) {
- threadInfo *pThreadInfo = infos + i;
- taos_close(pThreadInfo->taos);
- }
-
- free(pids);
- free(infos);
-
- return 0;
-}
-
-static int64_t dumpNTablesOfDb(SDbInfo *dbInfo)
-{
- TAOS *taos = taos_connect(g_args.host,
- g_args.user, g_args.password, dbInfo->name, g_args.port);
- if (NULL == taos) {
- errorPrint(
- "Failed to connect to TDengine server %s by specified database %s\n",
- g_args.host, dbInfo->name);
- return 0;
- }
-
- char command[COMMAND_SIZE];
- TAOS_RES *result;
- int32_t code;
-
- sprintf(command, "USE %s", dbInfo->name);
- result = taos_query(taos, command);
- code = taos_errno(result);
- if (code != 0) {
- errorPrint("invalid database %s, reason: %s\n",
- dbInfo->name, taos_errstr(result));
- taos_close(taos);
- return 0;
- }
-
- sprintf(command, "SHOW TABLES");
- result = taos_query(taos, command);
- code = taos_errno(result);
- if (code != 0) {
- errorPrint("Failed to show %s\'s tables, reason: %s\n",
- dbInfo->name, taos_errstr(result));
- taos_close(taos);
- return 0;
- }
-
- g_tablesList = calloc(1, dbInfo->ntables * sizeof(TableInfo));
- assert(g_tablesList);
-
- TAOS_ROW row;
- int64_t count = 0;
- while(NULL != (row = taos_fetch_row(result))) {
- debugPrint("%s() LN%d, No.\t%"PRId64" table name: %s\n",
- __func__, __LINE__,
- count, (char *)row[TSDB_SHOW_TABLES_NAME_INDEX]);
- tstrncpy(((TableInfo *)(g_tablesList + count))->name,
- (char *)row[TSDB_SHOW_TABLES_NAME_INDEX], TSDB_TABLE_NAME_LEN);
- char *stbName = (char *) row[TSDB_SHOW_TABLES_METRIC_INDEX];
- if (stbName) {
- tstrncpy(((TableInfo *)(g_tablesList + count))->stable,
- (char *)row[TSDB_SHOW_TABLES_METRIC_INDEX], TSDB_TABLE_NAME_LEN);
- ((TableInfo *)(g_tablesList + count))->belongStb = true;
- }
- count ++;
- }
- taos_close(taos);
-
- int64_t records = dumpNtbOfDbByThreads(dbInfo, count);
-
- free(g_tablesList);
- g_tablesList = NULL;
-
- return records;
-}
-
-static int64_t dumpNtbOfStbByThreads(
- SDbInfo *dbInfo, char *stbName)
-{
- int64_t ntbCount = getNtbCountOfStb(dbInfo->name, stbName);
-
- if (ntbCount <= 0) {
- return 0;
- }
-
- int threads = g_args.thread_num;
-
- int64_t a = ntbCount / threads;
- if (a < 1) {
- threads = ntbCount;
- a = 1;
- }
-
- assert(threads);
- int64_t b = ntbCount % threads;
-
- pthread_t *pids = calloc(1, threads * sizeof(pthread_t));
- threadInfo *infos = calloc(1, threads * sizeof(threadInfo));
- assert(pids);
- assert(infos);
-
- for (int64_t i = 0; i < threads; i++) {
- threadInfo *pThreadInfo = infos + i;
- pThreadInfo->taos = taos_connect(
- g_args.host,
- g_args.user,
- g_args.password,
- dbInfo->name,
- g_args.port
- );
- if (NULL == pThreadInfo->taos) {
- errorPrint("%s() LN%d, Failed to connect to TDengine, reason: %s\n",
- __func__,
- __LINE__,
- taos_errstr(NULL));
- free(pids);
- free(infos);
-
- return -1;
- }
-
- pThreadInfo->threadIndex = i;
- pThreadInfo->count = (ifrom = (i==0)?0:
- ((threadInfo *)(infos + i - 1))->from +
- ((threadInfo *)(infos + i - 1))->count;
- strcpy(pThreadInfo->dbName, dbInfo->name);
- pThreadInfo->precision = getPrecisionByString(dbInfo->precision);
-
- strcpy(pThreadInfo->stbName, stbName);
- pthread_create(pids + i, NULL, dumpNormalTablesOfStb, pThreadInfo);
- }
-
- for (int64_t i = 0; i < threads; i++) {
- pthread_join(pids[i], NULL);
- }
-
- int64_t records = 0;
- for (int64_t i = 0; i < threads; i++) {
- threadInfo *pThreadInfo = infos + i;
- records += pThreadInfo->rowsOfDumpOut;
- taos_close(pThreadInfo->taos);
- }
-
- free(pids);
- free(infos);
-
- return records;
-}
-
-static int64_t dumpWholeDatabase(SDbInfo *dbInfo, FILE *fp)
-{
- dumpCreateDbClause(dbInfo, g_args.with_property, fp);
-
- fprintf(g_fpOfResult, "\n#### database: %s\n",
- dbInfo->name);
- g_resultStatistics.totalDatabasesOfDumpOut++;
-
- dumpCreateSTableClauseOfDb(dbInfo, fp);
-
- return dumpNTablesOfDb(dbInfo);
-}
-
-static int dumpOut() {
- TAOS *taos = NULL;
- TAOS_RES *result = NULL;
-
- TAOS_ROW row;
- FILE *fp = NULL;
- int32_t count = 0;
-
- char tmpBuf[MAX_PATH_LEN] = {0};
- if (g_args.outpath[0] != 0) {
- sprintf(tmpBuf, "%s/dbs.sql", g_args.outpath);
- } else {
- sprintf(tmpBuf, "dbs.sql");
- }
-
- fp = fopen(tmpBuf, "w");
- if (fp == NULL) {
- errorPrint("%s() LN%d, failed to open file %s\n",
- __func__, __LINE__, tmpBuf);
- return -1;
- }
-
- g_args.dumpDbCount = getDumpDbCount();
- debugPrint("%s() LN%d, dump db count: %d\n",
- __func__, __LINE__, g_args.dumpDbCount);
-
- if (0 == g_args.dumpDbCount) {
- errorPrint("%d databases valid to dump\n", g_args.dumpDbCount);
- fclose(fp);
- return -1;
- }
-
- g_dbInfos = (SDbInfo **)calloc(g_args.dumpDbCount, sizeof(SDbInfo *));
- if (g_dbInfos == NULL) {
- errorPrint("%s() LN%d, failed to allocate memory\n",
- __func__, __LINE__);
- goto _exit_failure;
- }
-
- char command[COMMAND_SIZE];
-
- /* Connect to server */
- taos = taos_connect(g_args.host, g_args.user, g_args.password,
- NULL, g_args.port);
- if (taos == NULL) {
- errorPrint("Failed to connect to TDengine server %s\n", g_args.host);
- goto _exit_failure;
- }
-
- /* --------------------------------- Main Code -------------------------------- */
- /* if (g_args.databases || g_args.all_databases) { // dump part of databases or all databases */
- /* */
- dumpCharset(fp);
-
- sprintf(command, "show databases");
- result = taos_query(taos, command);
- int32_t code = taos_errno(result);
-
- if (code != 0) {
- errorPrint("%s() LN%d, failed to run command <%s>, reason: %s\n",
- __func__, __LINE__, command, taos_errstr(result));
- goto _exit_failure;
- }
-
- TAOS_FIELD *fields = taos_fetch_fields(result);
-
- while ((row = taos_fetch_row(result)) != NULL) {
- // sys database name : 'log', but subsequent version changed to 'log'
- if ((strncasecmp(row[TSDB_SHOW_DB_NAME_INDEX], "log",
- fields[TSDB_SHOW_DB_NAME_INDEX].bytes) == 0)
- && (!g_args.allow_sys)) {
- continue;
- }
-
- if (g_args.databases) { // input multi dbs
- if (inDatabasesSeq(
- (char *)row[TSDB_SHOW_DB_NAME_INDEX],
- fields[TSDB_SHOW_DB_NAME_INDEX].bytes) != 0) {
- continue;
- }
- } else if (!g_args.all_databases) { // only input one db
- if (strncasecmp(g_args.arg_list[0],
- (char *)row[TSDB_SHOW_DB_NAME_INDEX],
- fields[TSDB_SHOW_DB_NAME_INDEX].bytes) != 0)
- continue;
- }
-
- g_dbInfos[count] = (SDbInfo *)calloc(1, sizeof(SDbInfo));
- if (g_dbInfos[count] == NULL) {
- errorPrint("%s() LN%d, failed to allocate %"PRIu64" memory\n",
- __func__, __LINE__, (uint64_t)sizeof(SDbInfo));
- goto _exit_failure;
- }
-
- okPrint("%s exists\n", (char *)row[TSDB_SHOW_DB_NAME_INDEX]);
- tstrncpy(g_dbInfos[count]->name, (char *)row[TSDB_SHOW_DB_NAME_INDEX],
- min(TSDB_DB_NAME_LEN,
- fields[TSDB_SHOW_DB_NAME_INDEX].bytes + 1));
- if (g_args.with_property) {
- g_dbInfos[count]->ntables =
- *((int32_t *)row[TSDB_SHOW_DB_NTABLES_INDEX]);
- g_dbInfos[count]->vgroups =
- *((int32_t *)row[TSDB_SHOW_DB_VGROUPS_INDEX]);
- g_dbInfos[count]->replica =
- *((int16_t *)row[TSDB_SHOW_DB_REPLICA_INDEX]);
- g_dbInfos[count]->quorum =
- *((int16_t *)row[TSDB_SHOW_DB_QUORUM_INDEX]);
- g_dbInfos[count]->days =
- *((int16_t *)row[TSDB_SHOW_DB_DAYS_INDEX]);
-
- tstrncpy(g_dbInfos[count]->keeplist,
- (char *)row[TSDB_SHOW_DB_KEEP_INDEX],
- min(32, fields[TSDB_SHOW_DB_KEEP_INDEX].bytes + 1));
- //g_dbInfos[count]->daysToKeep = *((int16_t *)row[TSDB_SHOW_DB_KEEP_INDEX]);
- //g_dbInfos[count]->daysToKeep1;
- //g_dbInfos[count]->daysToKeep2;
- g_dbInfos[count]->cache =
- *((int32_t *)row[TSDB_SHOW_DB_CACHE_INDEX]);
- g_dbInfos[count]->blocks =
- *((int32_t *)row[TSDB_SHOW_DB_BLOCKS_INDEX]);
- g_dbInfos[count]->minrows =
- *((int32_t *)row[TSDB_SHOW_DB_MINROWS_INDEX]);
- g_dbInfos[count]->maxrows =
- *((int32_t *)row[TSDB_SHOW_DB_MAXROWS_INDEX]);
- g_dbInfos[count]->wallevel =
- *((int8_t *)row[TSDB_SHOW_DB_WALLEVEL_INDEX]);
- g_dbInfos[count]->fsync =
- *((int32_t *)row[TSDB_SHOW_DB_FSYNC_INDEX]);
- g_dbInfos[count]->comp =
- (int8_t)(*((int8_t *)row[TSDB_SHOW_DB_COMP_INDEX]));
- g_dbInfos[count]->cachelast =
- (int8_t)(*((int8_t *)row[TSDB_SHOW_DB_CACHELAST_INDEX]));
-
- tstrncpy(g_dbInfos[count]->precision,
- (char *)row[TSDB_SHOW_DB_PRECISION_INDEX],
- DB_PRECISION_LEN);
- g_dbInfos[count]->update =
- *((int8_t *)row[TSDB_SHOW_DB_UPDATE_INDEX]);
- }
- count++;
-
- if (g_args.databases) {
- if (count > g_args.dumpDbCount)
- break;
- } else if (!g_args.all_databases) {
- if (count >= 1)
- break;
- }
- }
-
- if (count == 0) {
- errorPrint("%d databases valid to dump\n", count);
- goto _exit_failure;
- }
-
- if (g_args.databases || g_args.all_databases) { // case: taosdump --databases dbx,dby ... OR taosdump --all-databases
- for (int i = 0; i < count; i++) {
- int64_t records = 0;
- records = dumpWholeDatabase(g_dbInfos[i], fp);
- if (records >= 0) {
- okPrint("Database %s dumped\n", g_dbInfos[i]->name);
- g_totalDumpOutRows += records;
- }
- }
- } else {
- if (1 == g_args.arg_list_len) {
- int64_t records = dumpWholeDatabase(g_dbInfos[0], fp);
- if (records >= 0) {
- okPrint("Database %s dumped\n", g_dbInfos[0]->name);
- g_totalDumpOutRows += records;
- }
- } else {
- dumpCreateDbClause(g_dbInfos[0], g_args.with_property, fp);
- }
-
- int superTblCnt = 0 ;
- for (int i = 1; g_args.arg_list[i]; i++) {
- TableRecordInfo tableRecordInfo;
-
- if (getTableRecordInfo(g_dbInfos[0]->name,
- g_args.arg_list[i],
- &tableRecordInfo) < 0) {
- errorPrint("input the invalid table %s\n",
- g_args.arg_list[i]);
- continue;
- }
-
- int64_t records = 0;
- if (tableRecordInfo.isStb) { // dump all table of this stable
- int ret = dumpStableClasuse(
- taos,
- g_dbInfos[0],
- tableRecordInfo.tableRecord.stable,
- fp);
- if (ret >= 0) {
- superTblCnt++;
- records = dumpNtbOfStbByThreads(g_dbInfos[0], g_args.arg_list[i]);
- }
- } else if (tableRecordInfo.belongStb){
- dumpStableClasuse(
- taos,
- g_dbInfos[0],
- tableRecordInfo.tableRecord.stable,
- fp);
- records = dumpNormalTableBelongStb(
- taos,
- g_dbInfos[0],
- tableRecordInfo.tableRecord.stable,
- g_args.arg_list[i]);
- } else {
- records = dumpNormalTableWithoutStb(taos, g_dbInfos[0], g_args.arg_list[i]);
- }
-
- if (records >= 0) {
- okPrint("table: %s dumped\n", g_args.arg_list[i]);
- g_totalDumpOutRows += records;
- }
- }
- }
-
- taos_close(taos);
-
- /* Close the handle and return */
- fclose(fp);
- taos_free_result(result);
- freeDbInfos();
- fprintf(stderr, "dump out rows: %" PRId64 "\n", g_totalDumpOutRows);
- return 0;
-
-_exit_failure:
- fclose(fp);
- taos_close(taos);
- taos_free_result(result);
- freeDbInfos();
- errorPrint("dump out rows: %" PRId64 "\n", g_totalDumpOutRows);
- return -1;
-}
-
-int main(int argc, char *argv[]) {
- static char verType[32] = {0};
- sprintf(verType, "version: %s\n", version);
- argp_program_version = verType;
-
- int ret = 0;
- /* Parse our arguments; every option seen by parse_opt will be
- reflected in arguments. */
- if (argc > 1) {
-// parse_precision_first(argc, argv, &g_args);
- parse_timestamp(argc, argv, &g_args);
- parse_args(argc, argv, &g_args);
- }
-
- argp_parse(&argp, argc, argv, 0, 0, &g_args);
-
- if (g_args.abort) {
-#ifndef _ALPINE
- error(10, 0, "ABORTED");
-#else
- abort();
-#endif
- }
-
- printf("====== arguments config ======\n");
-
- printf("host: %s\n", g_args.host);
- printf("user: %s\n", g_args.user);
- printf("password: %s\n", g_args.password);
- printf("port: %u\n", g_args.port);
- printf("mysqlFlag: %d\n", g_args.mysqlFlag);
- printf("outpath: %s\n", g_args.outpath);
- printf("inpath: %s\n", g_args.inpath);
- printf("resultFile: %s\n", g_args.resultFile);
- printf("encode: %s\n", g_args.encode);
- printf("all_databases: %s\n", g_args.all_databases?"true":"false");
- printf("databases: %d\n", g_args.databases);
- printf("databasesSeq: %s\n", g_args.databasesSeq);
- printf("schemaonly: %s\n", g_args.schemaonly?"true":"false");
- printf("with_property: %s\n", g_args.with_property?"true":"false");
-#ifdef AVRO_SUPPORT
- printf("avro format: %s\n", g_args.avro?"true":"false");
- printf("avro codec: %s\n", g_avro_codec[g_args.avro_codec]);
-#endif
- printf("start_time: %" PRId64 "\n", g_args.start_time);
- printf("human readable start time: %s \n", g_args.humanStartTime);
- printf("end_time: %" PRId64 "\n", g_args.end_time);
- printf("human readable end time: %s \n", g_args.humanEndTime);
- printf("precision: %s\n", g_args.precision);
- printf("data_batch: %d\n", g_args.data_batch);
- printf("max_sql_len: %d\n", g_args.max_sql_len);
- printf("table_batch: %d\n", g_args.table_batch);
- printf("thread_num: %d\n", g_args.thread_num);
- printf("allow_sys: %d\n", g_args.allow_sys);
- printf("abort: %d\n", g_args.abort);
- printf("isDumpIn: %d\n", g_args.isDumpIn);
- printf("arg_list_len: %d\n", g_args.arg_list_len);
- printf("debug_print: %d\n", g_args.debug_print);
-
- for (int32_t i = 0; i < g_args.arg_list_len; i++) {
- if (g_args.databases || g_args.all_databases) {
- errorPrint("%s is an invalid input if database(s) be already specified.\n",
- g_args.arg_list[i]);
- exit(EXIT_FAILURE);
- } else {
- printf("arg_list[%d]: %s\n", i, g_args.arg_list[i]);
- }
- }
-
- printf("==============================\n");
- if (checkParam(&g_args) < 0) {
- exit(EXIT_FAILURE);
- }
-
- g_fpOfResult = fopen(g_args.resultFile, "a");
- if (NULL == g_fpOfResult) {
- errorPrint("Failed to open %s for save result\n", g_args.resultFile);
- exit(-1);
- };
-
- fprintf(g_fpOfResult, "#############################################################################\n");
- fprintf(g_fpOfResult, "============================== arguments config =============================\n");
-
- fprintf(g_fpOfResult, "host: %s\n", g_args.host);
- fprintf(g_fpOfResult, "user: %s\n", g_args.user);
- fprintf(g_fpOfResult, "password: %s\n", g_args.password);
- fprintf(g_fpOfResult, "port: %u\n", g_args.port);
- fprintf(g_fpOfResult, "mysqlFlag: %d\n", g_args.mysqlFlag);
- fprintf(g_fpOfResult, "outpath: %s\n", g_args.outpath);
- fprintf(g_fpOfResult, "inpath: %s\n", g_args.inpath);
- fprintf(g_fpOfResult, "resultFile: %s\n", g_args.resultFile);
- fprintf(g_fpOfResult, "encode: %s\n", g_args.encode);
- fprintf(g_fpOfResult, "all_databases: %s\n", g_args.all_databases?"true":"false");
- fprintf(g_fpOfResult, "databases: %d\n", g_args.databases);
- fprintf(g_fpOfResult, "databasesSeq: %s\n", g_args.databasesSeq);
- fprintf(g_fpOfResult, "schemaonly: %s\n", g_args.schemaonly?"true":"false");
- fprintf(g_fpOfResult, "with_property: %s\n", g_args.with_property?"true":"false");
-#ifdef AVRO_SUPPORT
- fprintf(g_fpOfResult, "avro format: %s\n", g_args.avro?"true":"false");
- fprintf(g_fpOfResult, "avro codec: %s\n", g_avro_codec[g_args.avro_codec]);
-#endif
- fprintf(g_fpOfResult, "start_time: %" PRId64 "\n", g_args.start_time);
- fprintf(g_fpOfResult, "human readable start time: %s \n", g_args.humanStartTime);
- fprintf(g_fpOfResult, "end_time: %" PRId64 "\n", g_args.end_time);
- fprintf(g_fpOfResult, "human readable end time: %s \n", g_args.humanEndTime);
- fprintf(g_fpOfResult, "precision: %s\n", g_args.precision);
- fprintf(g_fpOfResult, "data_batch: %d\n", g_args.data_batch);
- fprintf(g_fpOfResult, "max_sql_len: %d\n", g_args.max_sql_len);
- fprintf(g_fpOfResult, "table_batch: %d\n", g_args.table_batch);
- fprintf(g_fpOfResult, "thread_num: %d\n", g_args.thread_num);
- fprintf(g_fpOfResult, "allow_sys: %d\n", g_args.allow_sys);
- fprintf(g_fpOfResult, "abort: %d\n", g_args.abort);
- fprintf(g_fpOfResult, "isDumpIn: %d\n", g_args.isDumpIn);
- fprintf(g_fpOfResult, "arg_list_len: %d\n", g_args.arg_list_len);
-
- for (int32_t i = 0; i < g_args.arg_list_len; i++) {
- fprintf(g_fpOfResult, "arg_list[%d]: %s\n", i, g_args.arg_list[i]);
- }
-
- g_numOfCores = (int32_t)sysconf(_SC_NPROCESSORS_ONLN);
-
- time_t tTime = time(NULL);
- struct tm tm = *localtime(&tTime);
-
- if (g_args.isDumpIn) {
- fprintf(g_fpOfResult, "============================== DUMP IN ============================== \n");
- fprintf(g_fpOfResult, "# DumpIn start time: %d-%02d-%02d %02d:%02d:%02d\n",
- tm.tm_year + 1900, tm.tm_mon + 1,
- tm.tm_mday, tm.tm_hour, tm.tm_min, tm.tm_sec);
- if (dumpIn() < 0) {
- errorPrint("%s\n", "dumpIn() failed!");
- ret = -1;
- }
- } else {
- fprintf(g_fpOfResult, "============================== DUMP OUT ============================== \n");
- fprintf(g_fpOfResult, "# DumpOut start time: %d-%02d-%02d %02d:%02d:%02d\n",
- tm.tm_year + 1900, tm.tm_mon + 1,
- tm.tm_mday, tm.tm_hour, tm.tm_min, tm.tm_sec);
- if (dumpOut() < 0) {
- ret = -1;
- } else {
- fprintf(g_fpOfResult, "\n============================== TOTAL STATISTICS ============================== \n");
- fprintf(g_fpOfResult, "# total database count: %d\n",
- g_resultStatistics.totalDatabasesOfDumpOut);
- fprintf(g_fpOfResult, "# total super table count: %d\n",
- g_resultStatistics.totalSuperTblsOfDumpOut);
- fprintf(g_fpOfResult, "# total child table count: %"PRId64"\n",
- g_resultStatistics.totalChildTblsOfDumpOut);
- fprintf(g_fpOfResult, "# total row count: %"PRId64"\n",
- g_resultStatistics.totalRowsOfDumpOut);
- }
- }
-
- fprintf(g_fpOfResult, "\n");
- fclose(g_fpOfResult);
-
- if (g_tablesList) {
- free(g_tablesList);
- }
-
- return ret;
-}
diff --git a/src/kit/taosdump/taosdump.sh b/src/kit/taosdump/taosdump.sh
deleted file mode 100755
index 6d32c090dbb0f538b0fc0abb4a9588ee08037a95..0000000000000000000000000000000000000000
--- a/src/kit/taosdump/taosdump.sh
+++ /dev/null
@@ -1,48 +0,0 @@
-taos1_6="/root/mnt/work/test/td1.6/build/bin/taos"
-taosdump1_6="/root/mnt/work/test/td1.6/build/bin/taosdump"
-taoscfg1_6="/root/mnt/work/test/td1.6/test/cfg"
-
-taos2_0="/root/mnt/work/test/td2.0/build/bin/taos"
-taosdump2_0="/root/mnt/work/test/td2.0/build/bin/taosdump"
-taoscfg2_0="/root/mnt/work/test/td2.0/test/cfg"
-
-data_dir="/root/mnt/work/test/td1.6/output"
-table_list="/root/mnt/work/test/td1.6/tables"
-
-DBNAME="test"
-NTABLES=$(wc -l ${table_list} | awk '{print $1;}')
-NTABLES_PER_DUMP=101
-
-mkdir -p ${data_dir}
-i=0
-round=0
-command="${taosdump1_6} -c ${taoscfg1_6} -o ${data_dir} -N 100 -T 20 ${DBNAME}"
-while IFS= read -r line
-do
- i=$((i+1))
-
- command="${command} ${line}"
-
- if [[ "$i" -eq ${NTABLES_PER_DUMP} ]]; then
- round=$((round+1))
- echo "Starting round ${round} dump out..."
- rm -f ${data_dir}/*
- ${command}
- echo "Starting round ${round} dump in..."
- ${taosdump2_0} -c ${taoscfg2_0} -i ${data_dir}
-
- # Reset variables
- # command="${taosdump1_6} -c ${taoscfg1_6} -o ${data_dir} -N 100 ${DBNAME}"
- command="${taosdump1_6} -c ${taoscfg1_6} -o ${data_dir} -N 100 -T 20 ${DBNAME}"
- i=0
- fi
-done < "${table_list}"
-
-if [[ ${i} -ne "0" ]]; then
- round=$((round+1))
- echo "Starting round ${round} dump out..."
- rm -f ${data_dir}/*
- ${command}
- echo "Starting round ${round} dump in..."
- ${taosdump2_0} -c ${taoscfg2_0} -i ${data_dir}
-fi
diff --git a/src/os/inc/osTime.h b/src/os/inc/osTime.h
index 798a08e3e6e16470a750cbd8bfed429539b44d8d..52e6c376a6c240d8c10b8596effa8b398e1e61c4 100644
--- a/src/os/inc/osTime.h
+++ b/src/os/inc/osTime.h
@@ -103,6 +103,8 @@ int32_t parseAbsoluteDuration(char* token, int32_t tokenlen, int64_t* ts, char*
int32_t parseNatualDuration(const char* token, int32_t tokenLen, int64_t* duration, char* unit, int32_t timePrecision);
int32_t taosParseTime(char* timestr, int64_t* time, int32_t len, int32_t timePrec, int8_t dayligth);
+int32_t taos_parse_time(char* timestr, int64_t* time, int32_t len, int32_t timePrec, int8_t dayligth);
+
void deltaToUtcInitOnce();
int64_t convertTimePrecision(int64_t time, int32_t fromPrecision, int32_t toPrecision);
diff --git a/src/os/src/detail/osFile.c b/src/os/src/detail/osFile.c
index cc12968c72eef5b3970ca68cf660de502b402e1e..039d688526c4cb1bbcc3ad3163bf3d47437ee625 100644
--- a/src/os/src/detail/osFile.c
+++ b/src/os/src/detail/osFile.c
@@ -28,6 +28,7 @@ void taosClose(FileFd fd) {
void taosGetTmpfilePath(const char *fileNamePrefix, char *dstPath) {
const char *tdengineTmpFileNamePrefix = "tdengine-";
char tmpPath[PATH_MAX];
+ static uint64_t seqId = 0;
int32_t len = (int32_t)strlen(tsTempDir);
memcpy(tmpPath, tsTempDir, len);
@@ -43,8 +44,10 @@ void taosGetTmpfilePath(const char *fileNamePrefix, char *dstPath) {
strcat(tmpPath, "-%d-%s");
}
- char rand[8] = {0};
- taosRandStr(rand, tListLen(rand) - 1);
+ char rand[32] = {0};
+
+ sprintf(rand, "%" PRIu64, atomic_add_fetch_64(&seqId, 1));
+
snprintf(dstPath, PATH_MAX, tmpPath, getpid(), rand);
}
diff --git a/src/os/src/detail/osTime.c b/src/os/src/detail/osTime.c
index 73345426c9a266c57ac286efac716f5c5490b8bf..a76010b37f4dec456d1be1134efbf6153451f911 100644
--- a/src/os/src/detail/osTime.c
+++ b/src/os/src/detail/osTime.c
@@ -121,6 +121,10 @@ bool checkTzPresent(char *str, int32_t len) {
}
+inline int32_t taos_parse_time(char* timestr, int64_t* time, int32_t len, int32_t timePrec, int8_t day_light) {
+ return taosParseTime(timestr, time, len, timePrec, day_light);
+}
+
char* forwardToTimeStringEnd(char* str) {
int32_t i = 0;
int32_t numOfSep = 0;
diff --git a/src/os/src/linux/linuxEnv.c b/src/os/src/linux/linuxEnv.c
index 35ca64d79f8b7a883014fd6ca980300ede22d6e2..84c873202b685e690252890e347632e096a4b39e 100644
--- a/src/os/src/linux/linuxEnv.c
+++ b/src/os/src/linux/linuxEnv.c
@@ -39,6 +39,20 @@ void osInit() {
strcpy(tsDataDir, "/var/lib/ProDB");
strcpy(tsLogDir, "/var/log/ProDB");
strcpy(tsScriptDir, "/etc/ProDB");
+#elif (_TD_KH_ == true)
+ if (configDir[0] == 0) {
+ strcpy(configDir, "/etc/kinghistorian");
+ }
+ strcpy(tsDataDir, "/var/lib/kinghistorian");
+ strcpy(tsLogDir, "/var/log/kinghistorian");
+ strcpy(tsScriptDir, "/etc/kinghistorian");
+#elif (_TD_JH_ == true)
+ if (configDir[0] == 0) {
+ strcpy(configDir, "/etc/jh_taos");
+ }
+ strcpy(tsDataDir, "/var/lib/jh_taos");
+ strcpy(tsLogDir, "/var/log/jh_taos");
+ strcpy(tsScriptDir, "/etc/jh_taos");
#else
if (configDir[0] == 0) {
strcpy(configDir, "/etc/taos");
diff --git a/src/os/src/windows/wEnv.c b/src/os/src/windows/wEnv.c
index 6f46bb43c75ff2c9735fc53a11bce585c1c213f6..6e087c9b29d7468b7c5a4e82c0f69b38f2c01223 100644
--- a/src/os/src/windows/wEnv.c
+++ b/src/os/src/windows/wEnv.c
@@ -33,12 +33,12 @@ void osInit() {
strcpy(tsScriptDir, "C:/PowerDB/script");
#elif (_TD_TQ_ == true)
if (configDir[0] == 0) {
- strcpy(configDir, "C:/TQ/cfg");
+ strcpy(configDir, "C:/TQueue/cfg");
}
- strcpy(tsVnodeDir, "C:/TQ/data");
- strcpy(tsDataDir, "C:/TQ/data");
- strcpy(tsLogDir, "C:/TQ/log");
- strcpy(tsScriptDir, "C:/TQ/script");
+ strcpy(tsVnodeDir, "C:/TQueue/data");
+ strcpy(tsDataDir, "C:/TQueue/data");
+ strcpy(tsLogDir, "C:/TQueue/log");
+ strcpy(tsScriptDir, "C:/TQueue/script");
#elif (_TD_PRO_ == true)
if (configDir[0] == 0) {
strcpy(configDir, "C:/ProDB/cfg");
@@ -47,6 +47,22 @@ void osInit() {
strcpy(tsDataDir, "C:/ProDB/data");
strcpy(tsLogDir, "C:/ProDB/log");
strcpy(tsScriptDir, "C:/ProDB/script");
+#elif (_TD_KH_ == true)
+ if (configDir[0] == 0) {
+ strcpy(configDir, "C:/KingHistorian/cfg");
+ }
+ strcpy(tsVnodeDir, "C:/KingHistorian/data");
+ strcpy(tsDataDir, "C:/KingHistorian/data");
+ strcpy(tsLogDir, "C:/KingHistorian/log");
+ strcpy(tsScriptDir, "C:/KingHistorian/script");
+#elif (_TD_JH_ == true)
+ if (configDir[0] == 0) {
+ strcpy(configDir, "C:/jh_iot/cfg");
+ }
+ strcpy(tsVnodeDir, "C:/jh_iot/data");
+ strcpy(tsDataDir, "C:/jh_iot/data");
+ strcpy(tsLogDir, "C:/jh_iot/log");
+ strcpy(tsScriptDir, "C:/jh_iot/script");
#else
if (configDir[0] == 0) {
strcpy(configDir, "C:/TDengine/cfg");
diff --git a/src/plugins/monitor/src/monMain.c b/src/plugins/monitor/src/monMain.c
index fc06b920939b1edb0ebfb1ed16da9dcb60edfd3a..ed60ba42caa4a882f21b511aa8009d10d52dad52 100644
--- a/src/plugins/monitor/src/monMain.c
+++ b/src/plugins/monitor/src/monMain.c
@@ -43,6 +43,8 @@
#define QUERY_ID_LEN 24
#define CHECK_INTERVAL 1000
+#define SQL_STR_FMT "\"%s\""
+
static SMonHttpStatus monHttpStatusTable[] = {
{"HTTP_CODE_CONTINUE", 100},
{"HTTP_CODE_SWITCHING_PROTOCOL", 101},
@@ -611,11 +613,11 @@ static int32_t monGetRowElemCharLen(TAOS_FIELD field, char *rowElem) {
}
static int32_t monBuildFirstEpSql(char *sql) {
- return snprintf(sql, SQL_LENGTH, ", \"%s\"", tsFirst);
+ return snprintf(sql, SQL_LENGTH, ", "SQL_STR_FMT, tsFirst);
}
static int32_t monBuildVersionSql(char *sql) {
- return snprintf(sql, SQL_LENGTH, ", \"%s\"", version);
+ return snprintf(sql, SQL_LENGTH, ", "SQL_STR_FMT, version);
}
static int32_t monBuildMasterUptimeSql(char *sql) {
@@ -628,7 +630,8 @@ static int32_t monBuildMasterUptimeSql(char *sql) {
while ((row = taos_fetch_row(result))) {
for (int i = 0; i < num_fields; ++i) {
- if (strcmp(fields[i].name, "role") == 0 && strcmp((char *)row[i], "master") == 0) {
+ int32_t charLen = monGetRowElemCharLen(fields[i], (char *)row[i]);
+ if (strcmp(fields[i].name, "role") == 0 && strncmp((char *)row[i], "master", charLen) == 0) {
if (strcmp(fields[i + 1].name, "role_time") == 0) {
int64_t now = taosGetTimestamp(TSDB_TIME_PRECISION_MILLI);
//master uptime in seconds
@@ -768,11 +771,11 @@ static int32_t monGetVnodesTotalStats(char *ep, int32_t *totalVnodes,
int32_t *totalVnodesAlive) {
char subsql[TSDB_EP_LEN + 15];
memset(subsql, 0, sizeof(subsql));
- snprintf(subsql, TSDB_EP_LEN, "show vnodes \"%s\"", ep);
+ snprintf(subsql, TSDB_EP_LEN, "show vnodes "SQL_STR_FMT, ep);
TAOS_RES *result = taos_query(tsMonitor.conn, subsql);
int32_t code = taos_errno(result);
if (code != TSDB_CODE_SUCCESS) {
- monError("failed to execute cmd: show vnodes \"%s\", reason:%s", ep, tstrerror(code));
+ monError("failed to execute cmd: show vnodes "SQL_STR_FMT", reason:%s", ep, tstrerror(code));
}
TAOS_ROW row;
@@ -931,11 +934,11 @@ static int32_t monBuildDnodeVnodesSql(char *sql) {
int32_t vnodeNum = 0, masterNum = 0;
char sqlStr[TSDB_EP_LEN + 15];
memset(sqlStr, 0, sizeof(sqlStr));
- snprintf(sqlStr, TSDB_EP_LEN + 14, "show vnodes \"%s\"", tsLocalEp);
+ snprintf(sqlStr, TSDB_EP_LEN + 14, "show vnodes "SQL_STR_FMT, tsLocalEp);
TAOS_RES *result = taos_query(tsMonitor.conn, sqlStr);
int32_t code = taos_errno(result);
if (code != TSDB_CODE_SUCCESS) {
- monError("failed to execute cmd: show vnodes \"%s\", reason:%s", tsLocalEp, tstrerror(code));
+ monError("failed to execute cmd: show vnodes "SQL_STR_FMT", reason:%s", tsLocalEp, tstrerror(code));
}
TAOS_ROW row;
@@ -970,17 +973,18 @@ static int32_t monBuildDnodeMnodeSql(char *sql) {
int32_t num_fields = taos_num_fields(result);
TAOS_FIELD *fields = taos_fetch_fields(result);
+ int32_t charLen;
while ((row = taos_fetch_row(result))) {
has_mnode_row = false;
for (int i = 0; i < num_fields; ++i) {
if (strcmp(fields[i].name, "end_point") == 0) {
- int32_t charLen = monGetRowElemCharLen(fields[i], (char *)row[i]);
+ charLen = monGetRowElemCharLen(fields[i], (char *)row[i]);
if (strncmp((char *)row[i], tsLocalEp, charLen) == 0) {
has_mnode = true;
has_mnode_row = true;
}
} else if (strcmp(fields[i].name, "role") == 0) {
- int32_t charLen = monGetRowElemCharLen(fields[i], (char *)row[i]);
+ charLen = monGetRowElemCharLen(fields[i], (char *)row[i]);
if (strncmp((char *)row[i], "master", charLen) == 0) {
if (has_mnode_row) {
monHasMnodeMaster = true;
@@ -1107,7 +1111,7 @@ static int32_t checkCreateVgroupTable(int32_t vgId) {
}
static uint32_t monBuildVgroupsInfoSql(char *sql, char *dbName) {
- char v_dnode_ids[256], v_dnode_status[1024];
+ char v_dnode_ids[256] = {0}, v_dnode_status[1024] = {0};
int64_t ts = taosGetTimestampUs();
memset(sql, 0, SQL_LENGTH + 1);
@@ -1122,6 +1126,7 @@ static uint32_t monBuildVgroupsInfoSql(char *sql, char *dbName) {
int32_t num_fields = taos_num_fields(result);
TAOS_FIELD *fields = taos_fetch_fields(result);
+ int32_t charLen;
while ((row = taos_fetch_row(result))) {
int32_t vgId;
int32_t pos = 0;
@@ -1132,25 +1137,26 @@ static uint32_t monBuildVgroupsInfoSql(char *sql, char *dbName) {
vgId = *(int32_t *)row[i];
if (checkCreateVgroupTable(vgId) == TSDB_CODE_SUCCESS) {
memset(sql, 0, SQL_LENGTH + 1);
- pos += snprintf(sql, SQL_LENGTH, "insert into %s.vgroup_%d values(%" PRId64 ", \"%s\"",
+ pos += snprintf(sql, SQL_LENGTH, "insert into %s.vgroup_%d values(%" PRId64 ", "SQL_STR_FMT,
tsMonitorDbName, vgId, ts, dbName);
} else {
return TSDB_CODE_SUCCESS;
}
} else if (strcmp(fields[i].name, "tables") == 0) {
- pos += snprintf(sql + pos, SQL_LENGTH, ", %d", *(int32_t *)row[i]);
-
+ pos += snprintf(sql + pos, SQL_LENGTH, ", %d", *(int32_t *)row[i]);
} else if (strcmp(fields[i].name, "status") == 0) {
- pos += snprintf(sql + pos, SQL_LENGTH, ", \"%s\"", (char *)row[i]);
+ charLen = monGetRowElemCharLen(fields[i], (char *)row[i]);
+ pos += snprintf(sql + pos, strlen(SQL_STR_FMT) + charLen + 1, ", "SQL_STR_FMT, (char *)row[i]);
} else if (strcmp(fields[i].name, "onlines") == 0) {
pos += snprintf(sql + pos, SQL_LENGTH, ", %d", *(int32_t *)row[i]);
} else if (v_dnode_str && strcmp(v_dnode_str, "_dnode") == 0) {
snprintf(v_dnode_ids, sizeof(v_dnode_ids), "%d;", *(int16_t *)row[i]);
} else if (v_dnode_str && strcmp(v_dnode_str, "_status") == 0) {
- snprintf(v_dnode_status, sizeof(v_dnode_status), "%s;", (char *)row[i]);
+ charLen = monGetRowElemCharLen(fields[i], (char *)row[i]);
+ snprintf(v_dnode_status, charLen + 1, "%s;", (char *)row[i]);
} else if (strcmp(fields[i].name, "compacting") == 0) {
//flush dnode_ids and dnode_role in to sql
- pos += snprintf(sql + pos, SQL_LENGTH, ", \"%s\", \"%s\")", v_dnode_ids, v_dnode_status);
+ pos += snprintf(sql + pos, SQL_LENGTH, ", "SQL_STR_FMT", "SQL_STR_FMT")", v_dnode_ids, v_dnode_status);
}
}
monDebug("save vgroups, sql:%s", sql);
@@ -1209,15 +1215,19 @@ static void monSaveSlowQueryInfo() {
int32_t num_fields = taos_num_fields(result);
TAOS_FIELD *fields = taos_fetch_fields(result);
+ int32_t charLen;
while ((row = taos_fetch_row(result))) {
for (int i = 0; i < num_fields; ++i) {
if (strcmp(fields[i].name, "query_id") == 0) {
has_slowquery = true;
- pos += snprintf(sql + pos, SQL_LENGTH, ", \"%s\"", (char *)row[i]);
+ charLen = monGetRowElemCharLen(fields[i], (char *)row[i]);
+ pos += snprintf(sql + pos, strlen(SQL_STR_FMT) + charLen + 1, ", "SQL_STR_FMT, (char *)row[i]);
} else if (strcmp(fields[i].name, "user") == 0) {
- pos += snprintf(sql + pos, SQL_LENGTH, ", \"%s\"", (char *)row[i]);
+ charLen = monGetRowElemCharLen(fields[i], (char *)row[i]);
+ pos += snprintf(sql + pos, strlen(SQL_STR_FMT) + charLen + 1, ", "SQL_STR_FMT, (char *)row[i]);
} else if (strcmp(fields[i].name, "qid") == 0) {
- pos += snprintf(sql + pos, SQL_LENGTH, ", \"%s\"", (char *)row[i]);
+ charLen = monGetRowElemCharLen(fields[i], (char *)row[i]);
+ pos += snprintf(sql + pos, strlen(SQL_STR_FMT) + charLen + 1, ", "SQL_STR_FMT, (char *)row[i]);
} else if (strcmp(fields[i].name, "created_time") == 0) {
int64_t create_time = *(int64_t *)row[i];
create_time = convertTimePrecision(create_time, TSDB_TIME_PRECISION_MILLI, TSDB_TIME_PRECISION_MICRO);
@@ -1225,9 +1235,11 @@ static void monSaveSlowQueryInfo() {
} else if (strcmp(fields[i].name, "time") == 0) {
pos += snprintf(sql + pos, SQL_LENGTH, ", %" PRId64 "", *(int64_t *)row[i]);
} else if (strcmp(fields[i].name, "ep") == 0) {
- pos += snprintf(sql + pos, SQL_LENGTH, ", \"%s\"", (char *)row[i]);
+ charLen = monGetRowElemCharLen(fields[i], (char *)row[i]);
+ pos += snprintf(sql + pos, strlen(SQL_STR_FMT) + charLen + 1, ", "SQL_STR_FMT, (char *)row[i]);
} else if (strcmp(fields[i].name, "sql") == 0) {
- pos += snprintf(sql + pos, SQL_LENGTH, ", \"%s\")", (char *)row[i]);
+ charLen = monGetRowElemCharLen(fields[i], (char *)row[i]);
+ pos += snprintf(sql + pos, strlen(SQL_STR_FMT) + charLen + 2, ", "SQL_STR_FMT")", (char *)row[i]);
}
}
}
diff --git a/src/query/CMakeLists.txt b/src/query/CMakeLists.txt
index a815942fbedb4f3b99e3595c3960d931ddde192a..37bf80ae5dcac8c9ee4d4816cc55ea9de5a81693 100644
--- a/src/query/CMakeLists.txt
+++ b/src/query/CMakeLists.txt
@@ -8,11 +8,11 @@ INCLUDE_DIRECTORIES(inc)
AUX_SOURCE_DIRECTORY(src SRC)
ADD_LIBRARY(query ${SRC})
SET_SOURCE_FILES_PROPERTIES(src/sql.c PROPERTIES COMPILE_FLAGS -w)
-TARGET_LINK_LIBRARIES(query tsdb tutil lua)
+TARGET_LINK_LIBRARIES(query tsdb tutil ${LINK_LUA})
IF (TD_LINUX)
IF (TD_BUILD_LUA)
- TARGET_LINK_LIBRARIES(query m rt lua)
+ TARGET_LINK_LIBRARIES(query m rt ${LINK_LUA})
ELSE ()
TARGET_LINK_LIBRARIES(query m rt)
ENDIF ()
@@ -21,7 +21,7 @@ ENDIF ()
IF (TD_DARWIN)
IF (TD_BUILD_LUA)
- TARGET_LINK_LIBRARIES(query m lua)
+ TARGET_LINK_LIBRARIES(query m ${LINK_LUA})
ELSE ()
TARGET_LINK_LIBRARIES(query m)
ENDIF ()
diff --git a/src/query/src/qTsbuf.c b/src/query/src/qTsbuf.c
index 99572f6e9345b933434e3685ecb79750a04388fc..9893533a589af0ca7a87dd05628db5059ecbe8eb 100644
--- a/src/query/src/qTsbuf.c
+++ b/src/query/src/qTsbuf.c
@@ -5,7 +5,7 @@
#include "queryLog.h"
static int32_t getDataStartOffset();
-static void TSBufUpdateGroupInfo(STSBuf* pTSBuf, int32_t index, STSGroupBlockInfo* pBlockInfo);
+static void TSBufUpdateGroupInfo(STSBuf* pTSBuf, int32_t qry_index, STSGroupBlockInfo* pBlockInfo);
static STSBuf* allocResForTSBuf(STSBuf* pTSBuf);
static int32_t STSBufUpdateHeader(STSBuf* pTSBuf, STSBufFileHeader* pHeader);
@@ -697,8 +697,8 @@ bool tsBufNextPos(STSBuf* pTSBuf) {
int32_t groupIndex = pTSBuf->numOfGroups - 1;
pCur->vgroupIndex = groupIndex;
- int32_t id = pTSBuf->pData[pCur->vgroupIndex].info.id;
- STSGroupBlockInfo* pBlockInfo = tsBufGetGroupBlockInfo(pTSBuf, id);
+ // get current vgroupIndex BlockInfo
+ STSGroupBlockInfo* pBlockInfo = &pTSBuf->pData[pCur->vgroupIndex].info;
int32_t blockIndex = pBlockInfo->numOfBlocks - 1;
tsBufGetBlock(pTSBuf, groupIndex, blockIndex);
@@ -718,32 +718,43 @@ bool tsBufNextPos(STSBuf* pTSBuf) {
while (1) {
assert(pTSBuf->tsData.len == pTSBuf->block.numOfElem * TSDB_KEYSIZE);
+ // tsIndex is last
if ((pCur->order == TSDB_ORDER_ASC && pCur->tsIndex >= pTSBuf->block.numOfElem - 1) ||
(pCur->order == TSDB_ORDER_DESC && pCur->tsIndex <= 0)) {
- int32_t id = pTSBuf->pData[pCur->vgroupIndex].info.id;
- STSGroupBlockInfo* pBlockInfo = tsBufGetGroupBlockInfo(pTSBuf, id);
- if (pBlockInfo == NULL || (pCur->blockIndex >= pBlockInfo->numOfBlocks - 1 && pCur->order == TSDB_ORDER_ASC) ||
+ // get current vgroupIndex BlockInfo
+ STSGroupBlockInfo* pBlockInfo = &pTSBuf->pData[pCur->vgroupIndex].info;
+ if (pBlockInfo == NULL) {
+ return false;
+ }
+
+ // blockIndex is last
+ if ((pCur->blockIndex >= pBlockInfo->numOfBlocks - 1 && pCur->order == TSDB_ORDER_ASC) ||
(pCur->blockIndex <= 0 && pCur->order == TSDB_ORDER_DESC)) {
+
+ // vgroupIndex is last
if ((pCur->vgroupIndex >= pTSBuf->numOfGroups - 1 && pCur->order == TSDB_ORDER_ASC) ||
(pCur->vgroupIndex <= 0 && pCur->order == TSDB_ORDER_DESC)) {
+ // this is end. both vgroupIndex and blockindex and tsIndex is last
pCur->vgroupIndex = -1;
return false;
}
- if (pBlockInfo == NULL) {
- return false;
- }
-
+ // blockIndex must match with next group
+ int32_t nextGroupIdx = pCur->vgroupIndex + step;
+ pBlockInfo = &pTSBuf->pData[nextGroupIdx].info;
int32_t blockIndex = (pCur->order == TSDB_ORDER_ASC) ? 0 : (pBlockInfo->numOfBlocks - 1);
+ // vgroupIndex move next and set value in tsBufGetBlock()
tsBufGetBlock(pTSBuf, pCur->vgroupIndex + step, blockIndex);
break;
} else {
+ // blockIndex move next and set value in tsBufGetBlock()
tsBufGetBlock(pTSBuf, pCur->vgroupIndex, pCur->blockIndex + step);
break;
}
} else {
+ // tsIndex move next
pCur->tsIndex += step;
break;
}
@@ -767,7 +778,7 @@ STSElem tsBufGetElem(STSBuf* pTSBuf) {
}
STSCursor* pCur = &pTSBuf->cur;
- if (pCur != NULL && pCur->vgroupIndex < 0) {
+ if (pCur->vgroupIndex < 0) {
return elem1;
}
@@ -796,7 +807,7 @@ int32_t tsBufMerge(STSBuf* pDestBuf, const STSBuf* pSrcBuf) {
return -1;
}
- // src can only have one vnode index
+ // src can only have one vnode qry_index
assert(pSrcBuf->numOfGroups == 1);
// there are data in buffer, flush to disk first
@@ -819,7 +830,7 @@ int32_t tsBufMerge(STSBuf* pDestBuf, const STSBuf* pSrcBuf) {
pDestBuf->pData = tmp;
}
- // directly copy the vnode index information
+ // directly copy the vnode qry_index information
memcpy(&pDestBuf->pData[oldSize], pSrcBuf->pData, (size_t)pSrcBuf->numOfGroups * sizeof(STSGroupBlockInfoEx));
// set the new offset value
@@ -1012,8 +1023,8 @@ static int32_t getDataStartOffset() {
}
// update prev vnode length info in file
-static void TSBufUpdateGroupInfo(STSBuf* pTSBuf, int32_t index, STSGroupBlockInfo* pBlockInfo) {
- int32_t offset = sizeof(STSBufFileHeader) + index * sizeof(STSGroupBlockInfo);
+static void TSBufUpdateGroupInfo(STSBuf* pTSBuf, int32_t qry_index, STSGroupBlockInfo* pBlockInfo) {
+ int32_t offset = sizeof(STSBufFileHeader) + qry_index * sizeof(STSGroupBlockInfo);
doUpdateGroupInfo(pTSBuf, offset, pBlockInfo);
}
diff --git a/src/tsdb/src/tsdbMain.c b/src/tsdb/src/tsdbMain.c
index dfa4b74b7a5720398f9fc748078a0be6d870dda7..5e1c2bc69e6aad67cae979713ea9e3caa3b73584 100644
--- a/src/tsdb/src/tsdbMain.c
+++ b/src/tsdb/src/tsdbMain.c
@@ -660,9 +660,9 @@ static int tsdbRestoreLastColumns(STsdbRepo *pRepo, STable *pTable, SReadH* pRea
int numColumns;
int32_t blockIdx;
SDataStatis* pBlockStatis = NULL;
- SMemRow row = NULL;
+ // SMemRow row = NULL;
// restore last column data with last schema
-
+
int err = 0;
numColumns = schemaNCols(pSchema);
@@ -676,15 +676,15 @@ static int tsdbRestoreLastColumns(STsdbRepo *pRepo, STable *pTable, SReadH* pRea
}
}
- row = taosTMalloc(memRowMaxBytesFromSchema(pSchema));
- if (row == NULL) {
- terrno = TSDB_CODE_TDB_OUT_OF_MEMORY;
- err = -1;
- goto out;
- }
+ // row = taosTMalloc(memRowMaxBytesFromSchema(pSchema));
+ // if (row == NULL) {
+ // terrno = TSDB_CODE_TDB_OUT_OF_MEMORY;
+ // err = -1;
+ // goto out;
+ // }
- memRowSetType(row, SMEM_ROW_DATA);
- tdInitDataRow(memRowDataBody(row), pSchema);
+ // memRowSetType(row, SMEM_ROW_DATA);
+ // tdInitDataRow(memRowDataBody(row), pSchema);
// first load block index info
if (tsdbLoadBlockInfo(pReadh, NULL, NULL) < 0) {
@@ -743,10 +743,12 @@ static int tsdbRestoreLastColumns(STsdbRepo *pRepo, STable *pTable, SReadH* pRea
for (int32_t rowId = pBlock->numOfRows - 1; rowId >= 0; rowId--) {
SDataCol *pDataCol = pReadh->pDCols[0]->cols + i;
const void* pColData = tdGetColDataOfRow(pDataCol, rowId);
- tdAppendColVal(memRowDataBody(row), pColData, pCol->type, pCol->offset);
- //SDataCol *pDataCol = readh.pDCols[0]->cols + j;
- void *value = tdGetRowDataOfCol(memRowDataBody(row), (int8_t)pCol->type, TD_DATA_ROW_HEAD_SIZE + pCol->offset);
- if (isNull(value, pCol->type)) {
+ // tdAppendColVal(memRowDataBody(row), pColData, pCol->type, pCol->offset);
+ // SDataCol *pDataCol = readh.pDCols[0]->cols + j;
+ // void *value = tdGetRowDataOfCol(memRowDataBody(row), (int8_t)pCol->type, TD_DATA_ROW_HEAD_SIZE +
+ //
+ // pCol->offset);
+ if (isNull(pColData, pCol->type)) {
continue;
}
@@ -761,13 +763,14 @@ static int tsdbRestoreLastColumns(STsdbRepo *pRepo, STable *pTable, SReadH* pRea
pLastCol->pData = malloc(bytes);
pLastCol->bytes = bytes;
pLastCol->colId = pCol->colId;
- memcpy(pLastCol->pData, value, bytes);
+ memcpy(pLastCol->pData, pColData, bytes);
// save row ts(in column 0)
pDataCol = pReadh->pDCols[0]->cols + 0;
- pCol = schemaColAt(pSchema, 0);
- tdAppendColVal(memRowDataBody(row), tdGetColDataOfRow(pDataCol, rowId), pCol->type, pCol->offset);
- pLastCol->ts = memRowKey(row);
+ // pCol = schemaColAt(pSchema, 0);
+ // tdAppendColVal(memRowDataBody(row), tdGetColDataOfRow(pDataCol, rowId), pCol->type, pCol->offset);
+ // pLastCol->ts = memRowKey(row);
+ pLastCol->ts = tdGetKey(*(TKEY *)(tdGetColDataOfRow(pDataCol, rowId)));
pTable->restoreColumnNum += 1;
@@ -779,7 +782,7 @@ static int tsdbRestoreLastColumns(STsdbRepo *pRepo, STable *pTable, SReadH* pRea
}
out:
- taosTZfree(row);
+ // taosTZfree(row);
tfree(pBlockStatis);
if (err == 0 && numColumns <= pTable->restoreColumnNum) {
diff --git a/src/util/src/tconfig.c b/src/util/src/tconfig.c
index 4c968a8362766b564230befafb27c226c338c51b..2a49862bac14633f77db921b7d2e17b160019425 100644
--- a/src/util/src/tconfig.c
+++ b/src/util/src/tconfig.c
@@ -383,6 +383,12 @@ void taosReadGlobalLogCfg() {
#elif (_TD_PRO_ == true)
printf("configDir:%s not there, use default value: /etc/ProDB", configDir);
strcpy(configDir, "/etc/ProDB");
+ #elif (_TD_KH_ == true)
+ printf("configDir:%s not there, use default value: /etc/kinghistorian", configDir);
+ strcpy(configDir, "/etc/kinghistorian");
+ #elif (_TD_JH_ == true)
+ printf("configDir:%s not there, use default value: /etc/jh_taos", configDir);
+ strcpy(configDir, "/etc/jh_taos");
#else
printf("configDir:%s not there, use default value: /etc/taos", configDir);
strcpy(configDir, "/etc/taos");
diff --git a/src/util/src/tlog.c b/src/util/src/tlog.c
index a2eea5aa7d99a43f2cf7f0552e843ce9a52034c0..232d10a7d07594c9c62cd13767c320da27af2a73 100644
--- a/src/util/src/tlog.c
+++ b/src/util/src/tlog.c
@@ -87,6 +87,10 @@ char tsLogDir[TSDB_FILENAME_LEN] = "/var/log/power";
char tsLogDir[TSDB_FILENAME_LEN] = "/var/log/tq";
#elif (_TD_PRO_ == true)
char tsLogDir[TSDB_FILENAME_LEN] = "/var/log/ProDB";
+#elif (_TD_KH_ == true)
+char tsLogDir[TSDB_FILENAME_LEN] = "/var/log/kinghistorian";
+#elif (_TD_JH_ == true)
+char tsLogDir[TSDB_FILENAME_LEN] = "/var/log/jh_taos";
#else
char tsLogDir[PATH_MAX] = "/var/log/taos";
#endif
diff --git a/tests/examples/c/CMakeLists.txt b/tests/examples/c/CMakeLists.txt
index e94de3cbca574de71c8bcefc4b52173922c05a98..9d5dfc37b1045cb771cf6bd20da7087d7523e2e2 100644
--- a/tests/examples/c/CMakeLists.txt
+++ b/tests/examples/c/CMakeLists.txt
@@ -10,13 +10,13 @@ IF (TD_LINUX)
ADD_EXECUTABLE(subscribe subscribe.c)
TARGET_LINK_LIBRARIES(subscribe taos_static trpc tutil pthread )
ADD_EXECUTABLE(epoll epoll.c)
- TARGET_LINK_LIBRARIES(epoll taos_static trpc tutil pthread lua)
+ TARGET_LINK_LIBRARIES(epoll taos_static trpc tutil pthread ${LINK_LUA})
ENDIF ()
IF (TD_DARWIN)
INCLUDE_DIRECTORIES(. ${TD_COMMUNITY_DIR}/src/inc ${TD_COMMUNITY_DIR}/src/client/inc ${TD_COMMUNITY_DIR}/inc)
AUX_SOURCE_DIRECTORY(. SRC)
ADD_EXECUTABLE(demo demo.c)
- TARGET_LINK_LIBRARIES(demo taos_static trpc tutil pthread lua)
+ TARGET_LINK_LIBRARIES(demo taos_static trpc tutil pthread ${LINK_LUA})
ADD_EXECUTABLE(epoll epoll.c)
- TARGET_LINK_LIBRARIES(epoll taos_static trpc tutil pthread lua)
+ TARGET_LINK_LIBRARIES(epoll taos_static trpc tutil pthread ${LINK_LUA})
ENDIF ()
diff --git a/tests/examples/lua/luaconnector.so b/tests/examples/lua/luaconnector.so
deleted file mode 100755
index 08bf6a6156aebe053132545193cd111fb436bc4b..0000000000000000000000000000000000000000
Binary files a/tests/examples/lua/luaconnector.so and /dev/null differ
diff --git a/tests/perftest-scripts/taosadapter_perftest/opentsdb_json_MixTbRows.jmx b/tests/perftest-scripts/taosadapter_perftest/opentsdb_json_MixTbRows.jmx
new file mode 100644
index 0000000000000000000000000000000000000000..bbc1e5a27a857a8d374e935b41e91a614c48dc4b
--- /dev/null
+++ b/tests/perftest-scripts/taosadapter_perftest/opentsdb_json_MixTbRows.jmx
@@ -0,0 +1,209 @@
+
+
+
+
+
+ false
+ true
+ false
+
+
+
+
+
+
+
+ continue
+
+ false
+ 1
+
+ 1
+ 1
+ false
+
+
+ true
+
+
+
+ true
+
+
+
+ false
+ create database if not exists test precision 'ms'
+ =
+
+
+
+
+
+
+
+ http://192.168.1.85:6041/rest/sql
+ POST
+ true
+ false
+ true
+ false
+
+
+
+
+
+
+ false
+
+ saveConfig
+
+
+ true
+ true
+ true
+
+ true
+ true
+ true
+ true
+ false
+ true
+ true
+ false
+ false
+ false
+ true
+ false
+ false
+ false
+ true
+ 0
+ true
+ true
+ true
+ true
+ true
+ true
+
+
+
+
+
+
+
+
+ Authorization
+ Basic cm9vdDp0YW9zZGF0YQ==
+
+
+
+
+
+
+ continue
+
+ false
+ looptimes
+
+ 100
+
+ false
+
+
+ true
+
+
+
+ true
+
+
+
+ false
+ {"metric": "cpu.usage_user.rows", "timestamp":${ts_counter}, "value":32.261068286779754, "tags":{"arch":"x64","datacenter":"us-west-1b","hostname":"host_5","os":"Ubuntu16","rack":"13","region":"us-west-1","service":"10","service_environment":"staging","service_version":"${rows_counter}","team":"NYC"}}
+ =
+
+
+
+
+
+
+
+ http://192.168.1.85:6041/opentsdb/v1/put/json/test
+ POST
+ true
+ false
+ true
+ false
+
+
+
+
+
+
+ false
+
+ saveConfig
+
+
+ true
+ true
+ true
+
+ true
+ true
+ true
+ true
+ false
+ true
+ true
+ false
+ false
+ false
+ true
+ false
+ false
+ false
+ true
+ 0
+ true
+ true
+ true
+ true
+ true
+ true
+
+
+
+
+
+
+
+
+ Authorization
+ Basic cm9vdDp0YW9zZGF0YQ==
+
+
+
+
+
+ 1614530008000
+
+ 1
+ ts_counter
+
+ false
+
+
+
+ 1
+ row_count
+ 1
+ rows_counter
+
+ false
+
+
+
+
+
+
diff --git a/tests/perftest-scripts/taosadapter_perftest/opentsdb_json_createStb.jmx b/tests/perftest-scripts/taosadapter_perftest/opentsdb_json_createStb.jmx
new file mode 100644
index 0000000000000000000000000000000000000000..11f0ed8f64a51aee50be99cc4ee8f1b7450b34e8
--- /dev/null
+++ b/tests/perftest-scripts/taosadapter_perftest/opentsdb_json_createStb.jmx
@@ -0,0 +1,191 @@
+
+
+
+
+
+ false
+ true
+ false
+
+
+
+
+
+
+
+ continue
+
+ false
+ 1
+
+ 1
+ 1
+ false
+
+
+ true
+
+
+
+ true
+
+
+
+ false
+ create database if not exists test precision 'ms'
+ =
+
+
+
+
+
+
+
+ http://192.168.1.85:6041/rest/sql
+ POST
+ true
+ false
+ true
+ false
+
+
+
+
+
+
+ false
+
+ saveConfig
+
+
+ true
+ true
+ true
+
+ true
+ true
+ true
+ true
+ false
+ true
+ true
+ false
+ false
+ false
+ true
+ false
+ false
+ false
+ true
+ 0
+ true
+ true
+ true
+ true
+ true
+ true
+
+
+
+
+
+
+
+
+ Authorization
+ Basic cm9vdDp0YW9zZGF0YQ==
+
+
+
+
+
+
+ continue
+
+ false
+ looptimes
+
+ 100
+
+ false
+
+
+ true
+
+
+
+ true
+
+
+
+ false
+ {"metric": "cpu.usage_user_${__Random(1,100000,)}_${__Random(1,100000,)}_${__Random(1,100000,)}", "timestamp":1626006833640, "value":32.261068286779754, "tags":{"arch":"x64","datacenter":"us-west-1b","hostname":"host_5","os":"Ubuntu16","rack":"13","region":"us-west-1","service":"10","service_environment":"staging","service_version":"0","team":"NYC"}}
+ =
+
+
+
+
+
+
+
+ http://192.168.1.85:6041/opentsdb/v1/put/json/test
+ POST
+ true
+ false
+ true
+ false
+
+
+
+
+
+
+ false
+
+ saveConfig
+
+
+ true
+ true
+ true
+
+ true
+ true
+ true
+ true
+ false
+ true
+ true
+ false
+ false
+ false
+ true
+ false
+ false
+ false
+ true
+ 0
+ true
+ true
+ true
+ true
+ true
+ true
+
+
+
+
+
+
+
+
+ Authorization
+ Basic cm9vdDp0YW9zZGF0YQ==
+
+
+
+
+
+
+
+
diff --git a/tests/perftest-scripts/taosadapter_perftest/opentsdb_json_createTb.jmx b/tests/perftest-scripts/taosadapter_perftest/opentsdb_json_createTb.jmx
new file mode 100644
index 0000000000000000000000000000000000000000..053480dc47a9c26cd5faf0b0fb277c3fa060b87d
--- /dev/null
+++ b/tests/perftest-scripts/taosadapter_perftest/opentsdb_json_createTb.jmx
@@ -0,0 +1,191 @@
+
+
+
+
+
+ false
+ true
+ false
+
+
+
+
+
+
+
+ continue
+
+ false
+ 1
+
+ 1
+ 1
+ false
+
+
+ true
+
+
+
+ true
+
+
+
+ false
+ create database if not exists test precision 'ms'
+ =
+
+
+
+
+
+
+
+ http://192.168.1.85:6041/rest/sql
+ POST
+ true
+ false
+ true
+ false
+
+
+
+
+
+
+ false
+
+ saveConfig
+
+
+ true
+ true
+ true
+
+ true
+ true
+ true
+ true
+ false
+ true
+ true
+ false
+ false
+ false
+ true
+ false
+ false
+ false
+ true
+ 0
+ true
+ true
+ true
+ true
+ true
+ true
+
+
+
+
+
+
+
+
+ Authorization
+ Basic cm9vdDp0YW9zZGF0YQ==
+
+
+
+
+
+
+ continue
+
+ false
+ looptimes
+
+ 100
+
+ false
+
+
+ true
+
+
+
+ true
+
+
+
+ false
+ {"metric": "cpu.usage_user", "timestamp":1626006833640, "value":32.261068286779754, "tags":{"arch":"x64","datacenter":"us-west-1b","hostname":"host_5","os":"Ubuntu16","rack":"${__Random(1,100000,)}","region":"us-west-1","service":"${__Random(1,100000,)}","service_environment":"staging","service_version":"${__Random(1,100000,)}","team":"NYC"}}
+ =
+
+
+
+
+
+
+
+ http://192.168.1.85:6041/opentsdb/v1/put/json/test
+ POST
+ true
+ false
+ true
+ false
+
+
+
+
+
+
+ false
+
+ saveConfig
+
+
+ true
+ true
+ true
+
+ true
+ true
+ true
+ true
+ false
+ true
+ true
+ false
+ false
+ false
+ true
+ false
+ false
+ false
+ true
+ 0
+ true
+ true
+ true
+ true
+ true
+ true
+
+
+
+
+
+
+
+
+ Authorization
+ Basic cm9vdDp0YW9zZGF0YQ==
+
+
+
+
+
+
+
+
diff --git a/tests/perftest-scripts/taosadapter_perftest/opentsdb_json_insertRows.jmx b/tests/perftest-scripts/taosadapter_perftest/opentsdb_json_insertRows.jmx
new file mode 100644
index 0000000000000000000000000000000000000000..be8b8bdc2b8491b6f1689ad45b31e026dcae6f23
--- /dev/null
+++ b/tests/perftest-scripts/taosadapter_perftest/opentsdb_json_insertRows.jmx
@@ -0,0 +1,200 @@
+
+
+
+
+
+ false
+ true
+ false
+
+
+
+
+
+
+
+ continue
+
+ false
+ 1
+
+ 1
+ 1
+ false
+
+
+ true
+
+
+
+ true
+
+
+
+ false
+ create database if not exists test precision 'ms'
+ =
+
+
+
+
+
+
+
+ http://192.168.1.85:6041/rest/sql
+ POST
+ true
+ false
+ true
+ false
+
+
+
+
+
+
+ false
+
+ saveConfig
+
+
+ true
+ true
+ true
+
+ true
+ true
+ true
+ true
+ false
+ true
+ true
+ false
+ false
+ false
+ true
+ false
+ false
+ false
+ true
+ 0
+ true
+ true
+ true
+ true
+ true
+ true
+
+
+
+
+
+
+
+
+ Authorization
+ Basic cm9vdDp0YW9zZGF0YQ==
+
+
+
+
+
+
+ continue
+
+ false
+ looptimes
+
+ 24
+
+ false
+
+
+ true
+
+
+
+ true
+
+
+
+ false
+ {"metric": "cpu.usage_user.rows", "timestamp":${ts_counter}, "value":32.261068286779754, "tags":{"arch":"x64","datacenter":"us-west-1b","hostname":"host_5","os":"Ubuntu16","rack":"13","region":"us-west-1","service":"10","service_environment":"staging","service_version":"0","team":"NYC"}}
+ =
+
+
+
+
+
+
+
+ http://192.168.1.85:6041/opentsdb/v1/put/json/test
+ POST
+ true
+ false
+ true
+ false
+
+
+
+
+
+
+ false
+
+ saveConfig
+
+
+ true
+ true
+ true
+
+ true
+ true
+ true
+ true
+ false
+ true
+ true
+ false
+ false
+ false
+ true
+ false
+ false
+ false
+ true
+ 0
+ true
+ true
+ true
+ true
+ true
+ true
+
+
+
+
+
+
+
+
+ Authorization
+ Basic cm9vdDp0YW9zZGF0YQ==
+
+
+
+
+
+ 1614530008000
+
+ 1
+ ts_counter
+
+ false
+
+
+
+
+
+
diff --git a/tests/perftest-scripts/taosadapter_perftest/opentsdb_json_jmeter_csv_import.jmx b/tests/perftest-scripts/taosadapter_perftest/opentsdb_json_jmeter_csv_import.jmx
new file mode 100644
index 0000000000000000000000000000000000000000..7192421de4ca8f88f86226852bbe7242423efe65
--- /dev/null
+++ b/tests/perftest-scripts/taosadapter_perftest/opentsdb_json_jmeter_csv_import.jmx
@@ -0,0 +1,203 @@
+
+
+
+
+
+ false
+ true
+ false
+
+
+
+
+
+
+
+ continue
+
+ false
+ 1
+
+ 1
+ 1
+ false
+
+
+ true
+
+
+
+ true
+
+
+
+ false
+ create database if not exists test precision 'ms'
+ =
+
+
+
+
+
+
+
+ http://192.168.1.85:6041/rest/sql
+ POST
+ true
+ false
+ true
+ false
+
+
+
+
+
+
+ false
+
+ saveConfig
+
+
+ true
+ true
+ true
+
+ true
+ true
+ true
+ true
+ false
+ true
+ true
+ false
+ false
+ false
+ true
+ false
+ false
+ false
+ true
+ 0
+ true
+ true
+ true
+ true
+ true
+ true
+
+
+
+
+
+
+
+
+ Authorization
+ Basic cm9vdDp0YW9zZGF0YQ==
+
+
+
+
+
+
+ continue
+
+ false
+ looptimes
+
+ 100
+
+ false
+
+
+ true
+
+
+
+ true
+
+
+
+ false
+ {"metric": "cpu.usage_user.rows", "timestamp":${ts_csv_count}, "value":32.261068286779754, "tags":{"arch":"x64","datacenter":"us-west-1b","hostname":"host_5","os":"Ubuntu16","rack":"13","region":"us-west-1","service":"10","service_environment":"staging","service_version":"${row_csv_count}","team":"NYC"}}
+ =
+
+
+
+
+
+
+
+ http://192.168.1.85:6041/opentsdb/v1/put/json/test
+ POST
+ true
+ false
+ true
+ false
+
+
+
+
+
+
+ false
+
+ saveConfig
+
+
+ true
+ true
+ true
+
+ true
+ true
+ true
+ true
+ false
+ true
+ true
+ false
+ false
+ false
+ true
+ false
+ false
+ false
+ true
+ 0
+ true
+ true
+ true
+ true
+ true
+ true
+
+
+
+
+
+
+
+
+ Authorization
+ Basic cm9vdDp0YW9zZGF0YQ==
+
+
+
+
+
+
+ ,
+ UTF-8
+ import_file_name
+ false
+ false
+ true
+ shareMode.all
+ false
+ ts_csv_count,row_csv_count
+
+
+
+
+
diff --git a/tests/perftest-scripts/taosadapter_perftest/opentsdb_telnet_MixTbRows.jmx b/tests/perftest-scripts/taosadapter_perftest/opentsdb_telnet_MixTbRows.jmx
new file mode 100644
index 0000000000000000000000000000000000000000..0b001cd57776deaf0fc1268ac13bab72c50faafa
--- /dev/null
+++ b/tests/perftest-scripts/taosadapter_perftest/opentsdb_telnet_MixTbRows.jmx
@@ -0,0 +1,209 @@
+
+
+
+
+
+ false
+ true
+ false
+
+
+
+
+
+
+
+ continue
+
+ false
+ 1
+
+ 1
+ 1
+ false
+
+
+ true
+
+
+
+ true
+
+
+
+ false
+ create database if not exists test precision 'ms'
+ =
+
+
+
+
+
+
+
+ http://192.168.1.85:6041/rest/sql
+ POST
+ true
+ false
+ true
+ false
+
+
+
+
+
+
+ false
+
+ saveConfig
+
+
+ true
+ true
+ true
+
+ true
+ true
+ true
+ true
+ false
+ true
+ true
+ false
+ false
+ false
+ true
+ false
+ false
+ false
+ true
+ 0
+ true
+ true
+ true
+ true
+ true
+ true
+
+
+
+
+
+
+
+
+ Authorization
+ Basic cm9vdDp0YW9zZGF0YQ==
+
+
+
+
+
+
+ continue
+
+ false
+ looptimes
+
+ 100
+
+ false
+
+
+ true
+
+
+
+ true
+
+
+
+ false
+ cpu.usage_user.rows ${ts_counter} 22.345567 arch=x64 datacenter=us-west-1b hostname=host_5 os=Ubuntu16 rack=13 region=us-west-1 service=10 service_environment=staging service_version=${rows_counter} team=NYC
+ =
+
+
+
+
+
+
+
+ http://192.168.1.85:6041/opentsdb/v1/put/telnet/test
+ POST
+ true
+ false
+ true
+ false
+
+
+
+
+
+
+ false
+
+ saveConfig
+
+
+ true
+ true
+ true
+
+ true
+ true
+ true
+ true
+ false
+ true
+ true
+ false
+ false
+ false
+ true
+ false
+ false
+ false
+ true
+ 0
+ true
+ true
+ true
+ true
+ true
+ true
+
+
+
+
+
+
+
+
+ Authorization
+ Basic cm9vdDp0YW9zZGF0YQ==
+
+
+
+
+
+ 1614530008000
+
+ 1
+ ts_counter
+
+ false
+
+
+
+ 1
+ row_count
+ 1
+ rows_counter
+
+ false
+
+
+
+
+
+
diff --git a/tests/perftest-scripts/taosadapter_perftest/opentsdb_telnet_createStb.jmx b/tests/perftest-scripts/taosadapter_perftest/opentsdb_telnet_createStb.jmx
new file mode 100644
index 0000000000000000000000000000000000000000..0a7a6aad4790bb0e8dca56f58eb8ef447ebe1f89
--- /dev/null
+++ b/tests/perftest-scripts/taosadapter_perftest/opentsdb_telnet_createStb.jmx
@@ -0,0 +1,191 @@
+
+
+
+
+
+ false
+ true
+ false
+
+
+
+
+
+
+
+ continue
+
+ false
+ 1
+
+ 1
+ 1
+ false
+
+
+ true
+
+
+
+ true
+
+
+
+ false
+ create database if not exists test precision 'ms'
+ =
+
+
+
+
+
+
+
+ http://192.168.1.85:6041/rest/sql
+ POST
+ true
+ false
+ true
+ false
+
+
+
+
+
+
+ false
+
+ saveConfig
+
+
+ true
+ true
+ true
+
+ true
+ true
+ true
+ true
+ false
+ true
+ true
+ false
+ false
+ false
+ true
+ false
+ false
+ false
+ true
+ 0
+ true
+ true
+ true
+ true
+ true
+ true
+
+
+
+
+
+
+
+
+ Authorization
+ Basic cm9vdDp0YW9zZGF0YQ==
+
+
+
+
+
+
+ continue
+
+ false
+ looptimes
+
+ 100
+
+ false
+
+
+ true
+
+
+
+ true
+
+
+
+ false
+ cpu.usage_user_${__Random(1,100000,)}_${__Random(1,100000,)}_${__Random(1,100000,)} 1626006833640 32.261068286779754 arch=x64 datacenter=us-west-1b hostname=host_5 os=Ubuntu16 rack=13 region=us-west-1 service=10 service_environment=staging service_version=0 team=NYC
+ =
+
+
+
+
+
+
+
+ http://192.168.1.85:6041/opentsdb/v1/put/telnet/test
+ POST
+ true
+ false
+ true
+ false
+
+
+
+
+
+
+ false
+
+ saveConfig
+
+
+ true
+ true
+ true
+
+ true
+ true
+ true
+ true
+ false
+ true
+ true
+ false
+ false
+ false
+ true
+ false
+ false
+ false
+ true
+ 0
+ true
+ true
+ true
+ true
+ true
+ true
+
+
+
+
+
+
+
+
+ Authorization
+ Basic cm9vdDp0YW9zZGF0YQ==
+
+
+
+
+
+
+
+
diff --git a/tests/perftest-scripts/taosadapter_perftest/opentsdb_telnet_createTb.jmx b/tests/perftest-scripts/taosadapter_perftest/opentsdb_telnet_createTb.jmx
new file mode 100644
index 0000000000000000000000000000000000000000..6e7a8268f092c13ddf3e1896702aa23fe94221d0
--- /dev/null
+++ b/tests/perftest-scripts/taosadapter_perftest/opentsdb_telnet_createTb.jmx
@@ -0,0 +1,191 @@
+
+
+
+
+
+ false
+ true
+ false
+
+
+
+
+
+
+
+ continue
+
+ false
+ 1
+
+ 1
+ 1
+ false
+
+
+ true
+
+
+
+ true
+
+
+
+ false
+ create database if not exists test precision 'ms'
+ =
+
+
+
+
+
+
+
+ http://192.168.1.85:6041/rest/sql
+ POST
+ true
+ false
+ true
+ false
+
+
+
+
+
+
+ false
+
+ saveConfig
+
+
+ true
+ true
+ true
+
+ true
+ true
+ true
+ true
+ false
+ true
+ true
+ false
+ false
+ false
+ true
+ false
+ false
+ false
+ true
+ 0
+ true
+ true
+ true
+ true
+ true
+ true
+
+
+
+
+
+
+
+
+ Authorization
+ Basic cm9vdDp0YW9zZGF0YQ==
+
+
+
+
+
+
+ continue
+
+ false
+ looptimes
+
+ 100
+
+ false
+
+
+ true
+
+
+
+ true
+
+
+
+ false
+ cpu.usage_user 1626006833640 32.261068286779754 arch=x64 datacenter=us-west-1b hostname=host_5 os=Ubuntu16 rack=${__Random(1,100000,)} region=us-west-1 service=${__Random(1,100000,)} service_environment=staging service_version=${__Random(1,100000,)} team=NYC
+ =
+
+
+
+
+
+
+
+ http://192.168.1.85:6041/opentsdb/v1/put/telnet/test
+ POST
+ true
+ false
+ true
+ false
+
+
+
+
+
+
+ false
+
+ saveConfig
+
+
+ true
+ true
+ true
+
+ true
+ true
+ true
+ true
+ false
+ true
+ true
+ false
+ false
+ false
+ true
+ false
+ false
+ false
+ true
+ 0
+ true
+ true
+ true
+ true
+ true
+ true
+
+
+
+
+
+
+
+
+ Authorization
+ Basic cm9vdDp0YW9zZGF0YQ==
+
+
+
+
+
+
+
+
diff --git a/tests/perftest-scripts/taosadapter_perftest/opentsdb_telnet_insertRows.jmx b/tests/perftest-scripts/taosadapter_perftest/opentsdb_telnet_insertRows.jmx
new file mode 100644
index 0000000000000000000000000000000000000000..eb86c0b2fc1b307fae1881f278fde9c5f0ed27b8
--- /dev/null
+++ b/tests/perftest-scripts/taosadapter_perftest/opentsdb_telnet_insertRows.jmx
@@ -0,0 +1,200 @@
+
+
+
+
+
+ false
+ true
+ false
+
+
+
+
+
+
+
+ continue
+
+ false
+ 1
+
+ 1
+ 1
+ false
+
+
+ true
+
+
+
+ true
+
+
+
+ false
+ create database if not exists test precision 'ms'
+ =
+
+
+
+
+
+
+
+ http://192.168.1.85:6041/rest/sql
+ POST
+ true
+ false
+ true
+ false
+
+
+
+
+
+
+ false
+
+ saveConfig
+
+
+ true
+ true
+ true
+
+ true
+ true
+ true
+ true
+ false
+ true
+ true
+ false
+ false
+ false
+ true
+ false
+ false
+ false
+ true
+ 0
+ true
+ true
+ true
+ true
+ true
+ true
+
+
+
+
+
+
+
+
+ Authorization
+ Basic cm9vdDp0YW9zZGF0YQ==
+
+
+
+
+
+
+ continue
+
+ false
+ looptimes
+
+ 24
+
+ false
+
+
+ true
+
+
+
+ true
+
+
+
+ false
+ cpu.usage_user.rows ${ts_counter} 22.345567 arch=x64 datacenter=us-west-1b hostname=host_5 os=Ubuntu16 rack=13 region=us-west-1 service=10 service_environment=staging service_version=0 team=NYC
+ =
+
+
+
+
+
+
+
+ http://192.168.1.85:6041/opentsdb/v1/put/telnet/test
+ POST
+ true
+ false
+ true
+ false
+
+
+
+
+
+
+ false
+
+ saveConfig
+
+
+ true
+ true
+ true
+
+ true
+ true
+ true
+ true
+ false
+ true
+ true
+ false
+ false
+ false
+ true
+ false
+ false
+ false
+ true
+ 0
+ true
+ true
+ true
+ true
+ true
+ true
+
+
+
+
+
+
+
+
+ Authorization
+ Basic cm9vdDp0YW9zZGF0YQ==
+
+
+
+
+
+ 1614530008000
+
+ 1
+ ts_counter
+
+ false
+
+
+
+
+
+
diff --git a/tests/perftest-scripts/taosadapter_perftest/opentsdb_telnet_jmeter_csv_import.jmx b/tests/perftest-scripts/taosadapter_perftest/opentsdb_telnet_jmeter_csv_import.jmx
new file mode 100644
index 0000000000000000000000000000000000000000..672c377fcad2e988443efa28ae10c0b3c09152be
--- /dev/null
+++ b/tests/perftest-scripts/taosadapter_perftest/opentsdb_telnet_jmeter_csv_import.jmx
@@ -0,0 +1,203 @@
+
+
+
+
+
+ false
+ true
+ false
+
+
+
+
+
+
+
+ continue
+
+ false
+ 1
+
+ 1
+ 1
+ false
+
+
+ true
+
+
+
+ true
+
+
+
+ false
+ create database if not exists test precision 'ms'
+ =
+
+
+
+
+
+
+
+ http://192.168.1.85:6041/rest/sql
+ POST
+ true
+ false
+ true
+ false
+
+
+
+
+
+
+ false
+
+ saveConfig
+
+
+ true
+ true
+ true
+
+ true
+ true
+ true
+ true
+ false
+ true
+ true
+ false
+ false
+ false
+ true
+ false
+ false
+ false
+ true
+ 0
+ true
+ true
+ true
+ true
+ true
+ true
+
+
+
+
+
+
+
+
+ Authorization
+ Basic cm9vdDp0YW9zZGF0YQ==
+
+
+
+
+
+
+ continue
+
+ false
+ looptimes
+
+ 100
+
+ false
+
+
+ true
+
+
+
+ true
+
+
+
+ false
+ cpu.usage_user.rows ${ts_csv_count} 22.345567 arch=x64 datacenter=us-west-1b hostname=host_5 os=Ubuntu16 rack=13 region=us-west-1 service=10 service_environment=staging service_version=${row_csv_count} team=NYC
+ =
+
+
+
+
+
+
+
+ http://192.168.1.85:6041/opentsdb/v1/put/telnet/test
+ POST
+ true
+ false
+ true
+ false
+
+
+
+
+
+
+ false
+
+ saveConfig
+
+
+ true
+ true
+ true
+
+ true
+ true
+ true
+ true
+ false
+ true
+ true
+ false
+ false
+ false
+ true
+ false
+ false
+ false
+ true
+ 0
+ true
+ true
+ true
+ true
+ true
+ true
+
+
+
+
+
+
+
+
+ Authorization
+ Basic cm9vdDp0YW9zZGF0YQ==
+
+
+
+
+
+
+ ,
+ UTF-8
+ import_file_name
+ false
+ false
+ true
+ shareMode.all
+ false
+ ts_csv_count,row_csv_count
+
+
+
+
+
diff --git a/tests/perftest-scripts/taosadapter_perftest/taosadapter_perftest.py b/tests/perftest-scripts/taosadapter_perftest/taosadapter_perftest.py
new file mode 100644
index 0000000000000000000000000000000000000000..480c25206065fc62a53156e899c213f8e2b487db
--- /dev/null
+++ b/tests/perftest-scripts/taosadapter_perftest/taosadapter_perftest.py
@@ -0,0 +1,224 @@
+from fabric import Connection
+from loguru import logger
+import shutil
+import os
+import time
+
+class TaosadapterPerftest():
+ def __init__(self):
+ self.ip = "192.168.1.85"
+ self.port = "22"
+ self.user = "root"
+ self.passwd = "tbase125!"
+ self.telnetCreateStbJmxFile = "opentsdb_telnet_createStb.jmx"
+ self.telnetCreateTbJmxFile = "opentsdb_telnet_createTb.jmx"
+ self.telnetInsertRowsFile = "opentsdb_telnet_insertRows.jmx"
+ # self.telnetMixJmxFile = "opentsdb_telnet_MixTbRows.jmx"
+ self.telnetMixJmxFile = "opentsdb_telnet_jmeter_csv_import.jmx"
+
+ self.jsonCreateStbJmxFile = "opentsdb_json_createStb.jmx"
+ self.jsonCreateTbJmxFile = "opentsdb_json_createTb.jmx"
+ self.jsonInsertRowsFile = "opentsdb_json_insertRows.jmx"
+ # self.jsonMixJmxFile = "opentsdb_json_MixTbRows.jmx"
+ self.jsonMixJmxFile = "opentsdb_json_jmeter_csv_import.jmx"
+
+ self.logfile = "taosadapter_perftest.log"
+ self.createStbThreads = 100
+ self.createTbThreads = 100
+ self.insertRowsThreads = 24
+
+ logger.add(self.logfile)
+
+ def exec_remote_cmd(self, cmd):
+ """
+ remote exec shell cmd
+ """
+ try:
+ c = Connection(self.ip, user=self.user, port=self.port, connect_timeout=120, connect_kwargs={"password": self.passwd})
+ result = c.run(cmd, pty=False, warn=True, hide=True).stdout
+ c.close()
+ return result
+ except Exception as e:
+ logger.error(f"exec cmd {cmd} failed:{e}");
+
+ def exec_local_cmd(self, shell_cmd):
+ '''
+ exec local shell cmd
+ '''
+ result = os.popen(shell_cmd).read().strip()
+ return result
+
+ def modifyJxmLooptimes(self, filename, looptimes, row_count=None, import_file_name=None):
+ '''
+ modify looptimes
+ '''
+ with open(filename, "r", encoding="utf-8") as f:
+ lines = f.readlines()
+ with open(filename, "w", encoding="utf-8") as f_w:
+ for line in lines:
+ if "looptimes" in line:
+ line = line.replace("looptimes", looptimes)
+ if row_count is not None:
+ if "row_count" in line:
+ line = line.replace("row_count", row_count)
+ if import_file_name is not None:
+ if "import_file_name" in line:
+ line = line.replace("import_file_name", import_file_name)
+ f_w.write(line)
+
+ def cleanAndRestartTaosd(self):
+ '''
+ restart taosd and clean env
+ '''
+ logger.info("---- restarting taosd and taosadapter ----")
+ self.exec_remote_cmd("systemctl stop taosd")
+ self.exec_remote_cmd("rm -rf /var/lib/taos/* /var/log/taos/*")
+ self.exec_remote_cmd("systemctl start taosd")
+ logger.info("---- finish restart ----")
+ time.sleep(60)
+
+ def recreateReportDir(self, path):
+ '''
+ recreate jmeter report path
+ '''
+ if os.path.exists(path):
+ self.exec_local_cmd(f'rm -rf {path}/*')
+ else:
+ os.makedirs(path)
+
+ def cleanLog(self):
+ '''
+ clean log
+ '''
+ with open(self.logfile, 'w') as f:
+ f.seek(0)
+ f.truncate()
+
+ def genMixTbRows(self, filename, table_count, row_count):
+ logger.info('generating import data file')
+ ts_start = 1614530008000
+ with open(filename, "w", encoding="utf-8") as f_w:
+ for i in range(table_count):
+ for j in range(row_count):
+ input_line = str(ts_start) + "," + str(i) + '\n'
+ ts_start += 1
+ f_w.write(input_line)
+
+ def outputParams(self, protocol, create_type):
+ '''
+ procotol is "telnet" or "json"
+ create_type is "stb" or "tb" or "rows"
+ '''
+ if protocol == "telnet":
+ if create_type == "stb":
+ return self.telnetCreateStbJmxFile, self.createStbThreads
+ elif create_type == "tb":
+ return self.telnetCreateTbJmxFile, self.createTbThreads
+ elif create_type == "rows":
+ return self.telnetInsertRowsFile, self.insertRowsThreads
+ else:
+ logger.error("create type error!")
+ else:
+ if create_type == "stb":
+ return self.jsonCreateStbJmxFile, self.createStbThreads
+ elif create_type == "tb":
+ return self.jsonCreateTbJmxFile, self.createTbThreads
+ elif create_type == "rows":
+ return self.jsonInsertRowsFile, self.insertRowsThreads
+ else:
+ logger.error("create type error!")
+
+ def insertTDengine(self, procotol, create_type, count):
+ '''
+ create stb/tb or insert rows
+ '''
+ self.cleanAndRestartTaosd()
+ jmxfile, threads = self.outputParams(procotol, create_type)
+ handle_file = str(count) + jmxfile
+ report_dir = f'testreport/{handle_file}'
+ self.recreateReportDir(report_dir)
+ shutil.copyfile(jmxfile, handle_file)
+ replace_count = int(count/threads)
+ self.modifyJxmLooptimes(handle_file, str(replace_count))
+ logger.info(f'jmeter running ----- jmeter -n -t {handle_file} -l {report_dir}/{handle_file}.txt -e -o {report_dir}')
+ result = self.exec_local_cmd(f"jmeter -n -t {handle_file} -l {report_dir}/{handle_file}.txt -e -o {report_dir}")
+ logger.info(result)
+ logger.info("----- sleep 120s and please record data -----")
+ time.sleep(120)
+
+ def insertMixTbRows(self, procotol, table_count, row_count):
+ self.cleanAndRestartTaosd()
+ local_path = os.getcwd()
+ jmxfile = f"opentsdb_{procotol}_{table_count}Tb{row_count}Rows.jmx"
+ import_file_name = f"import_opentsdb_{procotol}_{table_count}Tb{row_count}Rows.txt"
+ import_file_path = local_path + '/' + import_file_name
+ self.genMixTbRows(import_file_name, table_count, row_count)
+ report_dir = f'testreport/{jmxfile}'
+ self.recreateReportDir(report_dir)
+ if procotol == "telnet":
+ shutil.copyfile(self.telnetMixJmxFile, jmxfile)
+ else:
+ shutil.copyfile(self.jsonMixJmxFile, jmxfile)
+ self.modifyJxmLooptimes(jmxfile, str(int(table_count*row_count/100)), import_file_name=import_file_path)
+ logger.info(f'jmeter running ----- jmeter -n -t {jmxfile} -l {report_dir}/{jmxfile}.txt -e -o {report_dir}')
+ result = self.exec_local_cmd(f"jmeter -n -t {jmxfile} -l {report_dir}/{jmxfile}.txt -e -o {report_dir}")
+ logger.info(result)
+ logger.info("----- sleep 120s and please record data -----")
+ time.sleep(120)
+
+ # def insertMixTbRows(self, procotol, looptimes, row_count):
+ # self.cleanAndRestartTaosd()
+ # jmxfile = f"opentsdb_{procotol}_{looptimes}Tb100Rows.jmx"
+ # report_dir = f'testreport/{jmxfile}'
+ # self.recreateReportDir(report_dir)
+ # if procotol == "telnet":
+ # shutil.copyfile(self.telnetMixJmxFile, jmxfile)
+ # else:
+ # shutil.copyfile(self.jsonMixJmxFile, jmxfile)
+
+ # self.modifyJxmLooptimes(jmxfile, str(looptimes), str(row_count))
+ # result = self.exec_local_cmd(f"jmeter -n -t {jmxfile} -l {report_dir}/{jmxfile}.txt -e -o {report_dir}")
+ # logger.info(result)
+ # logger.info("----- sleep 120s and please record data -----")
+ # time.sleep(120)
+
+
+
+if __name__ == '__main__':
+ taosadapterPerftest = TaosadapterPerftest()
+ taosadapterPerftest.cleanLog()
+
+ logger.info('------------ Start testing the scenarios in the report chapter 3.4.1 ------------')
+ for procotol in ["telnet", "json"]:
+ logger.info(f'----- {procotol} protocol ------- Creating 30W stable ------------')
+ taosadapterPerftest.insertTDengine(procotol, "stb", 300000)
+ logger.info(f'----- {procotol} protocol ------- Creating 100W table with stb "cpu.usage_user" ------------')
+ taosadapterPerftest.insertTDengine(procotol, "tb", 1000000)
+ logger.info(f'----- {procotol} protocol ------- inserting 100W rows ------------')
+ taosadapterPerftest.insertTDengine(procotol, "rows", 1000000)
+
+ logger.info(f'----- {procotol} protocol ------- Creating 50W stable ------------')
+ taosadapterPerftest.insertTDengine(procotol, "stb", 500000)
+ logger.info(f'----- {procotol} protocol ------- Creating 500W table with stb "cpu.usage_user" ------------')
+ taosadapterPerftest.insertTDengine(procotol, "tb", 5000000)
+ logger.info(f'----- {procotol} protocol ------- inserting 500W rows ------------')
+ taosadapterPerftest.insertTDengine(procotol, "rows", 5000000)
+
+ logger.info(f'----- {procotol} protocol ------- Creating 100W stable ------------')
+ taosadapterPerftest.insertTDengine(procotol, "stb", 1000000)
+ logger.info(f'----- {procotol} protocol ------- Creating 1000W table with stb "cpu.usage_user" ------------')
+ taosadapterPerftest.insertTDengine(procotol, "tb", 10000000)
+ logger.info(f'----- {procotol} protocol ------- inserting 1000W rows ------------')
+ taosadapterPerftest.insertTDengine(procotol, "rows", 10000000)
+
+ logger.info(f'----- {procotol} protocol ------- Creating 10W stable 1000Rows ------------')
+ taosadapterPerftest.insertMixTbRows(procotol, 100000, 1000)
+
+ logger.info(f'----- {procotol} protocol ------- Creating 100W stable 100Rows ------------')
+ taosadapterPerftest.insertMixTbRows(procotol, 1000000, 100)
+
+ logger.info(f'----- {procotol} protocol ------- Creating 500W stable 20Rows ------------')
+ taosadapterPerftest.insertMixTbRows(procotol, 5000000, 20)
+
+ logger.info(f'----- {procotol} protocol ------- Creating 1000W stable 10Rows ------------')
+ taosadapterPerftest.insertMixTbRows(procotol, 10000000, 10)
diff --git a/tests/pytest/fulltest.sh b/tests/pytest/fulltest.sh
index e057c5f542cb31570c8b7c810f512634bffc53f6..effe48ab366fcbb700583323f9a0eefbc36bfcc4 100755
--- a/tests/pytest/fulltest.sh
+++ b/tests/pytest/fulltest.sh
@@ -182,7 +182,7 @@ python3 test.py -f tools/taosdemoAllTest/NanoTestCase/taosdemoTestSupportNanoIns
python3 test.py -f tools/taosdemoAllTest/NanoTestCase/taosdemoTestSupportNanoQuery.py
python3 test.py -f tools/taosdemoAllTest/NanoTestCase/taosdemoTestSupportNanosubscribe.py
python3 test.py -f tools/taosdemoAllTest/NanoTestCase/taosdemoTestInsertTime_step.py
-python3 test.py -f tools/taosdumpTestNanoSupport.py
+# disable due to taosdump don't support sql format. python3 test.py -f tools/taosdumpTestNanoSupport.py
#
python3 ./test.py -f tsdb/tsdbComp.py
@@ -305,7 +305,7 @@ python3 ./test.py -f client/client.py
python3 ./test.py -f client/version.py
python3 ./test.py -f client/alterDatabase.py
python3 ./test.py -f client/noConnectionErrorTest.py
-python3 ./test.py -f client/taoshellCheckCase.py
+# disable due to taosdump don't support sql format. python3 ./test.py -f client/taoshellCheckCase.py
# python3 test.py -f client/change_time_1_1.py
# python3 test.py -f client/change_time_1_2.py
diff --git a/tests/pytest/functions/function_stddev.py b/tests/pytest/functions/function_stddev.py
index 3ff2b82bf6b326ed4d07a5a51027c9e266c2fd72..b9eadeb3443127c927b29fbb16bda4c12378e71a 100644
--- a/tests/pytest/functions/function_stddev.py
+++ b/tests/pytest/functions/function_stddev.py
@@ -123,8 +123,33 @@ class TDTestCase:
tdSql.execute("insert into t1 values(now, 1, 'abc');")
tdLog.info("select stddev(k) from t1 where b <> 'abc' interval(1s);")
tdSql.query("select stddev(k) from t1 where b <> 'abc' interval(1s);")
-
-
+
+ tdSql.execute("create table stdtable(ts timestamp, col1 int) tags(loc nchar(64))")
+ tdSql.execute("create table std1 using stdtable tags('beijing')")
+ tdSql.execute("create table std2 using stdtable tags('shanghai')")
+ tdSql.execute("create table std3 using stdtable tags('河南')")
+ tdSql.execute("insert into std1 values(now + 1s, 1)")
+ tdSql.execute("insert into std1 values(now + 2s, 2);")
+ tdSql.execute("insert into std2 values(now + 3s, 1);")
+ tdSql.execute("insert into std2 values(now + 4s, 2);")
+ tdSql.execute("insert into std3 values(now + 5s, 4);")
+ tdSql.execute("insert into std3 values(now + 6s, 8);")
+ tdSql.query("select stddev(col1) from stdtable group by loc;")
+ tdSql.checkData(0, 0, 2.0)
+ tdSql.checkData(1, 0, 0.5)
+ tdSql.checkData(2, 0, 0.5)
+
+ tdSql.execute("create table stdtableint(ts timestamp, col1 int) tags(num int)")
+ tdSql.execute("create table stdint1 using stdtableint tags(1)")
+ tdSql.execute("create table stdint2 using stdtableint tags(2)")
+ tdSql.execute("insert into stdint1 values(now + 1s, 1)")
+ tdSql.execute("insert into stdint1 values(now + 2s, 2);")
+ tdSql.execute("insert into stdint2 values(now + 3s, 1);")
+ tdSql.execute("insert into stdint2 values(now + 4s, 2);")
+ tdSql.query("select stddev(col1) from stdtableint group by num")
+ tdSql.checkData(0, 0, 0.5)
+ tdSql.checkData(1, 0, 0.5)
+
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
diff --git a/tests/pytest/query/queryCnameDisplay.py b/tests/pytest/query/queryCnameDisplay.py
index 66a7f85120fe13293996d1bd3153b6fe9b1d6a72..186b3bfe1d1d06c4210c950fff097cb37a73d5df 100644
--- a/tests/pytest/query/queryCnameDisplay.py
+++ b/tests/pytest/query/queryCnameDisplay.py
@@ -79,16 +79,22 @@ class TDTestCase:
tdSql.execute('insert into st1 values (now, 1, 2, 1.1, 2.2, "a", 1, 1, false, "bb");')
# select as cname with cname_list
- sql_seq = f'select count(ts) as {cname_list[0]}, sum(pi1) as {cname_list[1]}, avg(pi2) as {cname_list[2]}, count(pf1) as {cname_list[3]}, count(pf2) as {cname_list[4]}, count(ps1) as {cname_list[5]}, min(pi3) as {cname_list[6]}, max(pi4) as {cname_list[7]}, count(pb1) as {cname_list[8]}, count(ps2) as {cname_list[9]}, count(si1) as {cname_list[10]}, count(si2) as {cname_list[11]}, count(sf1) as {cname_list[12]}, count(sf2) as {cname_list[13]}, count(ss1) as {cname_list[14]}, count(si3) as {cname_list[15]}, count(si4) as {cname_list[16]}, count(sb1) as {cname_list[17]}, count(ss2) as {cname_list[18]} from super_table_cname_check'
- sql_seq_no_as = sql_seq.replace(' as ', ' ')
- res = tdSql.getColNameList(sql_seq)
- res_no_as = tdSql.getColNameList(sql_seq_no_as)
+ sql_seq1 = f'select count(ts) as {cname_list[0]}, sum(pi1) as {cname_list[1]}, avg(pi2) as {cname_list[2]}, count(pf1) as {cname_list[3]}, count(pf2) as {cname_list[4]}, count(ps1) as {cname_list[5]}, min(pi3) as {cname_list[6]}, max(pi4) as {cname_list[7]}, count(pb1) as {cname_list[8]}, count(ps2) as {cname_list[9]} from super_table_cname_check'
+ sql_seq2 = f'select count(si1) as {cname_list[10]}, count(si2) as {cname_list[11]}, count(sf1) as {cname_list[12]}, count(sf2) as {cname_list[13]}, count(ss1) as {cname_list[14]}, count(si3) as {cname_list[15]}, count(si4) as {cname_list[16]}, count(sb1) as {cname_list[17]}, count(ss2) as {cname_list[18]} from super_table_cname_check'
+ sql_seq_no_as1 = sql_seq1.replace(' as ', ' ')
+ sql_seq_no_as2 = sql_seq2.replace(' as ', ' ')
+ res1 = tdSql.getColNameList(sql_seq1)
+ res2 = tdSql.getColNameList(sql_seq2)
+ res_no_as1 = tdSql.getColNameList(sql_seq_no_as1)
+ res_no_as2 = tdSql.getColNameList(sql_seq_no_as2)
# cname[1] > 64, it is expected to be equal to 64
cname_list_1_expected = cname_list[1][:-1]
cname_list[1] = cname_list_1_expected
- checkColNameList = tdSql.checkColNameList(res, cname_list)
- checkColNameList = tdSql.checkColNameList(res_no_as, cname_list)
+ tdSql.checkColNameList(res1, cname_list[:10])
+ tdSql.checkColNameList(res2, cname_list[10:])
+ tdSql.checkColNameList(res_no_as1, cname_list[:10])
+ tdSql.checkColNameList(res_no_as2, cname_list[10:])
def run(self):
tdSql.prepare()
diff --git a/tests/pytest/query/querySort.py b/tests/pytest/query/querySort.py
index 17022bdc41057bcb67e1530a2cb6d399bada20ff..31d53ea0041b0656c52ebf0afe3954290f8763d4 100644
--- a/tests/pytest/query/querySort.py
+++ b/tests/pytest/query/querySort.py
@@ -97,7 +97,7 @@ class TDTestCase:
self.checkColumnSorted(0, "desc")
print("======= step 2: verify order for special column =========")
-
+
tdSql.query("select tbcol1 from st order by ts desc")
tdSql.query("select tbcol6 from st order by ts desc")
@@ -122,6 +122,44 @@ class TDTestCase:
(i, i))
self.checkColumnSorted(1, "desc")
+ # order by rules: https://jira.taosdata.com:18090/pages/viewpage.action?pageId=123455481
+ tdSql.error("select tbcol1 from st order by 123")
+ tdSql.error("select tbcol1 from st order by tbname")
+ tdSql.error("select tbcol1 from st order by tagcol1")
+ tdSql.error("select tbcol1 from st order by ''")
+ tdSql.error("select top(tbcol1, 12) from st1 order by tbcol1,ts")
+ tdSql.error("select top(tbcol1, 12) from st order by tbcol1,ts,tbcol2")
+ tdSql.error("select top(tbcol1, 12) from st order by ts, tbcol1")
+ tdSql.error("select top(tbcol1, 2) from st1 group by tbcol1 order by tbcol2")
+
+ tdSql.query("select top(tbcol1, 2) from st1 group by tbcol2 order by tbcol2")
+ tdSql.query("select top(tbcol1, 12) from st order by tbcol1, ts")
+
+ tdSql.query("select avg(tbcol1) from st group by tbname order by tbname")
+ tdSql.checkData(1, 0, 5.5)
+ tdSql.checkData(5, 1, "st6")
+
+ tdSql.query("select top(tbcol1, 2) from st group by tbname order by tbname")
+ tdSql.checkData(1, 1, 10)
+ tdSql.checkData(2, 2, "st2")
+
+ tdSql.query("select top(tbcol1, 12) from st order by tbcol1")
+ tdSql.checkData(1, 1, 9)
+
+ tdSql.error("select top(tbcol1, 12) from st1 order by tbcol1,ts")
+ tdSql.error("select top(tbcol1, 12),tbname from st order by tbcol1,tbname")
+
+ tdSql.query("select top(tbcol1, 12) from st group by tbname order by tbname desc")
+ tdSql.checkData(1, 2, "st10")
+ tdSql.checkData(10, 2, "st9")
+
+ tdSql.query("select top(tbcol1, 2) from st group by tbname order by tbname desc,ts")
+ tdSql.checkData(1, 2, "st10")
+ tdSql.checkData(10, 2, "st5")
+ tdSql.checkData(0, 0, "2018-09-17 09:00:00.109")
+ tdSql.checkData(1, 0, "2018-09-17 09:00:00.110")
+ tdSql.checkData(2, 0, "2018-09-17 09:00:00.099")
+
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
diff --git a/tests/pytest/query/select_last_crash.py b/tests/pytest/query/select_last_crash.py
index 9b580a24acd145e2e90ae6ee513e946d72820f2c..8bfd71ef2788fdcf1fc0f2876b52934633638068 100644
--- a/tests/pytest/query/select_last_crash.py
+++ b/tests/pytest/query/select_last_crash.py
@@ -16,6 +16,8 @@ import taos
from util.log import *
from util.cases import *
from util.sql import *
+from util.dnodes import *
+import random
class TDTestCase:
@@ -41,6 +43,32 @@ class TDTestCase:
tdSql.query("select last(*) from st")
tdSql.checkRows(1)
+
+ # TS-717
+ tdLog.info("case for TS-717")
+ cachelast_values = [0, 1, 2, 3]
+
+ for value in cachelast_values:
+ tdLog.info("case for cachelast value: %d" % value)
+ tdSql.execute("drop database if exists db")
+ tdLog.sleep(1)
+ tdSql.execute("create database db cachelast %d" % value)
+ tdSql.execute("use db")
+ tdSql.execute("create table stb(ts timestamp, c1 int, c2 binary(20), c3 binary(5)) tags(t1 int)")
+
+ sql = "insert into t1 using stb tags(1) (ts, c1, c2) values"
+ for i in range(self.rowNum):
+ sql += "(%d, %d, 'test')" % (self.ts + i, random.randint(1,100))
+ tdSql.execute(sql)
+
+ tdSql.query("select * from stb")
+ tdSql.checkRows(self.rowNum)
+
+ tdDnodes.stop(1)
+ tdDnodes.start(1)
+
+ tdSql.query("select * from stb")
+ tdSql.checkRows(self.rowNum)
def stop(self):
tdSql.close()
diff --git a/tests/pytest/tools/taosdemoAllTest/TD-10539/create_taosdemo.py b/tests/pytest/tools/taosdemoAllTest/TD-10539/create_taosdemo.py
index d7926d6e5b5a3db80f3c66df0655266a5c673999..d64bf201f6cd7d9a1ce7870c578e7a80761f3c9c 100644
--- a/tests/pytest/tools/taosdemoAllTest/TD-10539/create_taosdemo.py
+++ b/tests/pytest/tools/taosdemoAllTest/TD-10539/create_taosdemo.py
@@ -112,7 +112,7 @@ class TDTestCase:
tdSql.checkRows(11)
tdSql.query("show create table test.`%s1` ; " %self.tsdemo)
tdSql.checkData(0, 0, self.tsdemo+str(1))
- tdSql.checkData(0, 1, "create table `%s1` (ts TIMESTAMP,c0 FLOAT,c1 INT,c2 INT,c3 INT,c4 INT,c5 INT,c6 INT,c7 INT,c8 INT,c9 INT)" %self.tsdemo)
+ tdSql.checkData(0, 1, "create table `%s1` (ts TIMESTAMP,c0 FLOAT,c1 INT,c2 FLOAT,c3 INT,c4 INT,c5 INT,c6 INT,c7 INT,c8 INT,c9 INT)" %self.tsdemo)
print("==============drop table\stable")
try:
|