diff --git a/.gitmodules b/.gitmodules index 5bb7cff2cd9fe465fac3ab932732069127a6a5b7..c890f52ad1c49439a6bee4a5e25bd333b053654e 100644 --- a/.gitmodules +++ b/.gitmodules @@ -1,9 +1,6 @@ [submodule "src/connector/go"] path = src/connector/go url = https://github.com/taosdata/driver-go.git -[submodule "src/connector/grafanaplugin"] - path = src/connector/grafanaplugin - url = https://github.com/taosdata/grafanaplugin.git [submodule "src/connector/hivemq-tdengine-extension"] path = src/connector/hivemq-tdengine-extension url = https://github.com/taosdata/hivemq-tdengine-extension.git diff --git a/Jenkinsfile b/Jenkinsfile index 297dde63e196ff3ae4083926f51bd6a88c1c9f3a..85ba42a96edf68aaaf39d5bb51237fef381be5fb 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -66,6 +66,7 @@ def pre_test(){ } sh''' cd ${WKC} + [ -f src/connector/grafanaplugin/README.md ] && rm -f src/connector/grafanaplugin/README.md > /dev/null || echo "failed to remove grafanaplugin README.md" git pull >/dev/null git fetch origin +refs/pull/${CHANGE_ID}/merge git checkout -qf FETCH_HEAD @@ -139,6 +140,7 @@ def pre_test_noinstall(){ } sh''' cd ${WKC} + [ -f src/connector/grafanaplugin/README.md ] && rm -f src/connector/grafanaplugin/README.md > /dev/null || echo "failed to remove grafanaplugin README.md" git pull >/dev/null git fetch origin +refs/pull/${CHANGE_ID}/merge git checkout -qf FETCH_HEAD @@ -209,6 +211,7 @@ def pre_test_mac(){ } sh''' cd ${WKC} + [ -f src/connector/grafanaplugin/README.md ] && rm -f src/connector/grafanaplugin/README.md > /dev/null || echo "failed to remove grafanaplugin README.md" git pull >/dev/null git fetch origin +refs/pull/${CHANGE_ID}/merge git checkout -qf FETCH_HEAD @@ -247,6 +250,8 @@ def pre_test_mac(){ mkdir debug cd debug cmake .. > /dev/null + go env -w GOPROXY=https://goproxy.cn,direct + go env -w GO111MODULE=on cmake --build . ''' return 1 diff --git a/README.md b/README.md index a14003397bae69aa74a8a1ff7a55db18ae53a149..fb6eed0267c19259b5e931c8a98c19aff90d8deb 100644 --- a/README.md +++ b/README.md @@ -64,7 +64,7 @@ To build the [taos-tools](https://github.com/taosdata/taos-tools) on Ubuntu/Debi sudo apt install libjansson-dev libsnappy-dev liblzma-dev libz-dev pkg-config ``` -### Centos 7: +### CentOS 7: ```bash sudo yum install epel-release sudo yum update @@ -82,7 +82,7 @@ To install Apache Maven: sudo yum install -y maven ``` -### Centos 8 & Fedora: +### CentOS 8 & Fedora: ```bash sudo dnf install -y gcc gcc-c++ make cmake epel-release git ``` @@ -100,8 +100,9 @@ sudo dnf install -y maven #### Install build dependencies for taos-tools To build the [taos-tools](https://github.com/taosdata/taos-tools) on CentOS, the following packages need to be installed. ```bash -sudo yum install xz-devel snappy-devel jansson-devel pkgconfig libatomic +sudo yum install libz-devel xz-devel snappy-devel jansson-devel pkgconfig libatomic ``` +Note: Since snappy lacks pkg-config support (refer to [link](https://github.com/google/snappy/pull/86)), it lead a cmake prompt libsnappy not found. But snappy will works well. ### Setup golang environment TDengine includes few components developed by Go language. Please refer to golang.org official documentation for golang environment setup. diff --git a/cmake/install.inc b/cmake/install.inc index e78bba8d8d293f8c9c76e00f22b74efedf9591b3..111efdae2dc3d186db16114ef238ebaddc5e5924 100755 --- a/cmake/install.inc +++ b/cmake/install.inc @@ -41,7 +41,6 @@ ELSEIF (TD_WINDOWS) INSTALL(FILES ${EXECUTABLE_OUTPUT_PATH}/jh_taos.exe DESTINATION .) ELSE () INSTALL(FILES ${EXECUTABLE_OUTPUT_PATH}/taos.exe DESTINATION .) - INSTALL(FILES ${EXECUTABLE_OUTPUT_PATH}/taosdemo.exe DESTINATION .) ENDIF () #INSTALL(TARGETS taos RUNTIME DESTINATION driver) diff --git a/documentation20/cn/00.index/docs.md b/documentation20/cn/00.index/docs.md index bfd870bf6412bf19898f9f5d569e6536bc156b1a..d94a58eebb129e84137a8c55b1ca07be37ec15af 100644 --- a/documentation20/cn/00.index/docs.md +++ b/documentation20/cn/00.index/docs.md @@ -83,6 +83,10 @@ TDengine是一个高效的存储、查询、分析时序大数据的平台,专 * [Windows客户端](https://www.taosdata.com/blog/2019/07/26/514.html):自行编译windows客户端,Windows环境的各种连接器都需要它 * [Rust Connector](/connector/rust): Rust语言下通过libtaos客户端或RESTful接口,连接TDengine服务器。 +## [TDengine 组件与工具](/tools/adapter) + +* [taosAdapter用户手册](/tools/adapter) + ## [与其他工具的连接](/connections) * [Grafana](/connections#grafana):获取并可视化保存在TDengine的数据 diff --git a/documentation20/cn/05.insert/docs.md b/documentation20/cn/05.insert/docs.md index 3c9a30af995296b4711bfeef1fc00ba03eac38d0..243d8509f069e5073cc15ee69438ff6e2cc28e3d 100644 --- a/documentation20/cn/05.insert/docs.md +++ b/documentation20/cn/05.insert/docs.md @@ -29,7 +29,7 @@ INSERT INTO d1001 VALUES (1538548685000, 10.3, 219, 0.31) (1538548695000, 12.6, ## 无模式(Schemaless)写入 **前言** -
在物联网应用中,常会采集比较多的数据项,用于实现智能控制、业务分析、设备监控等。由于应用逻辑的版本升级,或者设备自身的硬件调整等原因,数据采集项就有可能比较频繁地出现变动。为了在这种情况下方便地完成数据记录工作,TDengine 从 2.2.0.0 版本开始,提供调用 Schemaless 写入方式,可以免于预先创建超级表/子表的步骤,随着数据写入写入接口能够自动创建与数据对应的存储结构。并且在必要时,Schemaless 将自动增加必要的数据列,保证用户写入的数据可以被正确存储。 +
在物联网应用中,常会采集比较多的数据项,用于实现智能控制、业务分析、设备监控等。由于应用逻辑的版本升级,或者设备自身的硬件调整等原因,数据采集项就有可能比较频繁地出现变动。为了在这种情况下方便地完成数据记录工作,TDengine 从 2.2.0.0 版本开始,提供调用 Schemaless 写入方式,可以免于预先创建超级表/子表的步骤,随着数据写入接口能够自动创建与数据对应的存储结构。并且在必要时,Schemaless 将自动增加必要的数据列,保证用户写入的数据可以被正确存储。
目前,TDengine 的 C/C++ Connector 提供支持 Schemaless 的操作接口,详情请参见 [Schemaless 方式写入接口](https://www.taosdata.com/cn/documentation/connector#schemaless)章节。这里对 Schemaless 的数据表达格式进行了描述。
无模式写入方式建立的超级表及其对应的子表与通过 SQL 直接建立的超级表和子表完全没有区别,您也可以通过 SQL 语句直接向其中写入数据。需要注意的是,通过无模式写入方式建立的表,其表名是基于标签值按照固定的映射规则生成,所以无法明确地进行表意,缺乏可读性。 @@ -74,21 +74,19 @@ st,t1=3,t2=4,t3=t3 c1=3i64,c3="passit",c2=false,c4=4f64 1626006833639000000 ### 无模式写入的主要处理逻辑 无模式写入按照如下原则来处理行数据: -1. 当 tag_set 中有 ID 字段时,该字段的值将作为子表的表名。 -2. 没有 ID 字段时,将使用如下规则来生成子表名: -首先将measurement 的名称和标签的 key 和 value 组合成为如下的字符串 +
1. 将使用如下规则来生成子表名:首先将measurement 的名称和标签的 key 和 value 组合成为如下的字符串 ```json "measurement,tag_key1=tag_value1,tag_key2=tag_value2" ``` 需要注意的是,这里的tag_key1, tag_key2并不是用户输入的标签的原始顺序,而是使用了标签名称按照字符串升序排列后的结果。所以,tag_key1 并不是在行协议中输入的第一个标签。 排列完成以后计算该字符串的 MD5 散列值 "md5_val"。然后将计算的结果与字符串组合生成表名:“t_md5_val”。其中的 “t_” 是固定的前缀,每个通过该映射关系自动生成的表都具有该前缀。 -
3. 如果解析行协议获得的超级表不存在,则会创建这个超级表。 -
4. 如果解析行协议获得子表不存在,则 Schemaless 会按照步骤 1 或 2 确定的子表名来创建子表。 -
5. 如果数据行中指定的标签列或普通列不存在,则在超级表中增加对应的标签列或普通列(只增不减)。 -
6. 如果超级表中存在一些标签列或普通列未在一个数据行中被指定取值,那么这些列的值在这一行中会被置为 NULL。 -
7. 对 BINARY 或 NCHAR 列,如果数据行中所提供值的长度超出了列类型的限制,自动增加该列允许存储的字符长度上限(只增不减),以保证数据的完整保存。 -
8. 如果指定的数据子表已经存在,而且本次指定的标签列取值跟已保存的值不一样,那么最新的数据行中的值会覆盖旧的标签列取值。 -
9. 整个处理过程中遇到的错误会中断写入过程,并返回错误代码。 +
2. 如果解析行协议获得的超级表不存在,则会创建这个超级表。 +
3. 如果解析行协议获得子表不存在,则 Schemaless 会按照步骤 1 或 2 确定的子表名来创建子表。 +
4. 如果数据行中指定的标签列或普通列不存在,则在超级表中增加对应的标签列或普通列(只增不减)。 +
5. 如果超级表中存在一些标签列或普通列未在一个数据行中被指定取值,那么这些列的值在这一行中会被置为 NULL。 +
6. 对 BINARY 或 NCHAR 列,如果数据行中所提供值的长度超出了列类型的限制,自动增加该列允许存储的字符长度上限(只增不减),以保证数据的完整保存。 +
7. 如果指定的数据子表已经存在,而且本次指定的标签列取值跟已保存的值不一样,那么最新的数据行中的值会覆盖旧的标签列取值。 +
8. 整个处理过程中遇到的错误会中断写入过程,并返回错误代码。 **备注:**
无模式所有的处理逻辑,仍会遵循 TDengine 对数据结构的底层限制,例如每行数据的总长度不能超过 16k 字节。这方面的具体限制约束请参见 [TAOS SQL 边界限制](https://www.taosdata.com/cn/documentation/taos-sql#limitation) 章节。 @@ -116,6 +114,17 @@ st,t1=3,t2=4,t3=t3 c1=3i64,c3="passit",c2=false,c4=4f64 1626006833639000000 在 SML_TELNET_PROTOCOL 和 SML_JSON_PROTOCOL 模式下,根据时间戳的长度来确定时间精度(与 OpenTSDB 标准操作方式相同),此时会忽略用户指定的时间分辨率。 +**数据模式映射规则** +
本节将说明行协议的数据如何映射成为具有模式的数据。每个行协议中数据 measurement 映射为 超级表名称。tag_set 中的 标签名称为 数据模式中的标签名,field_set 中的名称为列名称。以如下数据为例,说明映射规则: + +```json +st,t1=3,t2=4,t3=t3 c1=3i64,c3="passit",c2=false,c4=4f64 1626006833639000000 +``` +该行数据映射生成一个超级表: st, 其包含了 3 个类型为 nchar 的标签,分别是:t1, t2, t3。五个数据列,分别是ts(timestamp),c1 (bigint),c3(binary),c2 (bool), c4 (bigint)。映射成为如下 SQL 语句: +```json +create stable st (_ts timestamp, c1 bigint, c2 bool, c3 binary(6), c4 bigint) tags(t1 nchar(1), t2 nchar(1), t3 nchar(2)) +``` + **数据模式变更处理**
本节将说明不同行数据写入情况下,对于数据模式的影响。 diff --git a/documentation20/cn/08.connector/docs.md b/documentation20/cn/08.connector/docs.md index a788e9fa372207bc9085511fe0b16c925800627d..8ab9c6703f6aec0fda9f9cf5720d39b4fe90ff69 100644 --- a/documentation20/cn/08.connector/docs.md +++ b/documentation20/cn/08.connector/docs.md @@ -1037,43 +1037,62 @@ HTTP 请求 URL 采用 `sqlutc` 时,返回结果集的时间戳将采用 UTC ## CSharp Connector -C#连接器支持的系统有:Linux 64/Windows x64/Windows x86 +* C#连接器支持的系统有:Linux 64/Windows x64/Windows x86 +* C#连接器现在也支持从[Nuget下载引用](https://www.nuget.org/packages/TDengine.Connector/) + +* 在Windows系统上,C#应用程序可以使用TDengine的原生C接口来执行所有数据库操作,后续版本将提供ORM(Dapper)框架驱动。 ### 安装准备 * 应用驱动安装请参考[安装连接器驱动步骤](https://www.taosdata.com/cn/documentation/connector#driver)。 -* 接口文件TDengineDrivercs.cs和参考程序示例TDengineTest.cs均位于Windows客户端install_directory/examples/C#目录下。 -* 在Windows系统上,C#应用程序可以使用TDengine的原生C接口来执行所有数据库操作,后续版本将提供ORM(Dapper)框架驱动。 +* 接口文件TDengineDrivercs.cs和参考程序示例TDengineTest.cs均位于Windows客户端install_directory/examples/C#目录下。 +* 安装[.NET SDK](https://dotnet.microsoft.com/download) ### 示例程序 -示例程序源码位于install_directory/examples/C#,有: +示例程序源码位于 +* {client_install_directory}/examples/C# +* [github C# example source code](https://github.com/taosdata/TDengine/tree/develop/tests/examples/C%2523) -TDengineTest.cs C#示例源程序 +**注意:** TDengineTest.cs C#示例源程序,包含了数据库连接参数,以及如何执行数据插入、查询等操作。 ### 安装验证 -运行install_directory/examples/C#/C#Checker/C#Checker.exe - +需要先安装 .Net SDK ```cmd -cd {install_directory}/examples/C#/C#Checker -csc /optimize *.cs -C#Checker.exe -h +cd {client_install_directory}/examples/C#/C#Checker +//运行测试 +dotnet run -- -h . // 此步骤会先build,然后再运行。 ``` ### C#连接器的使用 在Windows系统上,C#应用程序可以使用TDengine的C#连接器接口来执行所有数据库的操作。使用的具体步骤如下所示: -1. 将接口文件TDengineDrivercs.cs加入到应用程序所在的项目空间中。 -2. 用户可以参考TDengineTest.cs来定义数据库连接参数,以及如何执行数据插入、查询等操作。 +需要 .NET SDK +* 创建一个c# project. +``` cmd +mkdir test +cd test +dotnet new console +``` +* 通过Nuget引用TDengineDriver包 +``` cmd +dotnet add package TDengine.Connector +``` +* 在项目中需要用到TDengineConnector的地方引用TDengineDriver namespace。 +```c# +using TDengineDriver; +``` +* 用户可以参考[TDengineTest.cs](https://github.com/taosdata/TDengine/tree/develop/tests/examples/C%2523/TDengineTest)来定义数据库连接参数,以及如何执行数据插入、查询等操作。 -此接口需要用到taos.dll文件,所以在执行应用程序前,拷贝Windows客户端install_directory/driver目录中的taos.dll文件到项目最后生成.exe可执行文件所在的文件夹。之后运行exe文件,即可访问TDengine数据库并做插入、查询等操作。 **注意:** -1. TDengine V2.0.3.0之后同时支持32位和64位Windows系统,所以C#项目在生成.exe文件时,“解决方案”/“项目”的“平台”请选择对应的“X86” 或“x64”。 -2. 此接口目前已经在Visual Studio 2015/2017中验证过,其它VS版本尚待验证。 +* TDengine V2.0.3.0之后同时支持32位和64位Windows系统,所以C#项目在生成.exe文件时,“解决方案”/“项目”的“平台”请选择对应的“X86” 或“x64”。 +* 此接口目前已经在Visual Studio 2015/2017中验证过,其它VS版本尚待验证。 +* 此连接器需要用到taos.dll文件,所以在未安装客户端时需要在执行应用程序前,拷贝Windows{client_install_directory}/driver目录中的taos.dll文件到项目最后生成.exe可执行文件所在的文件夹。之后运行exe文件,即可访问TDengine数据库并做插入、查询等操作。 + ### 第三方驱动 diff --git a/documentation20/cn/14.devops/01.telegraf/docs.md b/documentation20/cn/14.devops/01.telegraf/docs.md index 04765602dab18fbacf7d92d44ca324db660c0ac4..485e7038f0e8aa122b20ba6608a629de66d7dc8c 100644 --- a/documentation20/cn/14.devops/01.telegraf/docs.md +++ b/documentation20/cn/14.devops/01.telegraf/docs.md @@ -25,8 +25,8 @@ IT 运维监测数据通常都是对时间特性比较敏感的数据,例如 ### Grafana 请参考[官方文档](https://grafana.com/grafana/download)。 -### 安装 TDengine -从涛思数据官网[下载](http://taosdata.com/cn/all-downloads/)页面下载最新 TDengine-server 2.3.0.0 或以上版本安装。 +### TDengine +从涛思数据官网[下载](http://taosdata.com/cn/all-downloads/)页面下载最新 TDengine-server 2.3.0.0 或以上版本安装。 ## 数据链路设置 diff --git a/documentation20/cn/14.devops/03.immigrate/docs.md b/documentation20/cn/14.devops/03.immigrate/docs.md index 980dc0c0f40632de40b54aec4de719eea4d8bc59..4acfecd0cfe903cd993e8c548e9c6b9032dde48a 100644 --- a/documentation20/cn/14.devops/03.immigrate/docs.md +++ b/documentation20/cn/14.devops/03.immigrate/docs.md @@ -8,7 +8,7 @@ - 数据写入和查询的性能远超 OpenTSDB; - 针对时序数据的高效压缩机制,压缩后在磁盘上的存储空间不到 1/5; -- 安装部署非常简单,单一安装包完成安装部署,除了 taosAdapter 需要依赖 Go 运行环境外,不依赖其他的第三方软件,整个安装部署过程秒级搞定; +- 安装部署非常简单,单一安装包完成安装部署,不依赖其他的第三方软件,整个安装部署过程秒级搞定; - 提供的内建函数覆盖 OpenTSDB 支持的全部查询函数,还支持更多的时序数据查询函数、标量函数及聚合函数,支持多种时间窗口聚合、连接查询、表达式运算、多种分组聚合、用户定义排序、以及用户定义函数等高级查询功能。采用类 SQL 的语法规则,更加简单易学,基本上没有学习成本。 - 支持多达 128 个标签,标签总长度可达到 16 KB; - 除 HTTP 之外,还提供 Java、Python、C、Rust、Go 等多种语言的接口,支持 JDBC 等多种企业级标准连接器协议。 @@ -40,7 +40,11 @@ - **调整数据收集器配置** -在 TDengine 2.3 版本中,后台服务 taosd 启动后一个 HTTP 的服务 taosAdapter 也会自动启用*。*利用 taosAdapter 能够兼容 Influxdb 的 Line Protocol 和 OpenTSDB 的 telnet/JSON 写入协议,可以将 collectd 和 StatsD 收集的数据直接推送到TDengine。 +在TDengine 2.3版本中,我们发布了taosAdapter ,taosAdapter 是一个无状态、可快速弹性伸缩的组件,它可以兼容 Influxdb 的 Line Protocol 和 OpenTSDB 的 telnet/JSON 写入协议规范,提供了丰富的数据接入能力,有效的节省用户迁移成本,降低用户应用迁移的难度。 + +用户可以根据需求弹性部署 taosAdapter 实例,结合场景的需要,快速提升数据写入的吞吐量,为不同应用场景下的数据写入提供保障。 + +通过taosAdapter,用户可以将 collectd 和 StatsD 收集的数据直接推送到TDengine ,实现应用场景的无缝迁移,非常的轻松便捷。taosAdapter还支持Telegraf、Icinga、TCollector 、node_exporter的数据接入,使用详情参考[taosAdapter](https://www.taosdata.com/cn/documentation/tools/adapter)。 如果使用 collectd,修改其默认位置 `/etc/collectd/collectd.conf` 的配置文件为指向 taosAdapter 部署的节点 IP 地址和端口。假设 taosAdapter 的 IP 地址为192.168.1.130,端口为 6046,配置如下: @@ -61,24 +65,9 @@ LoadPlugin write_tsdb - **调整看板(Dashborad)系统** -在数据能够正常写入TDengine 后,可以调整适配 Grafana 将写入 TDengine 的数据可视化呈现出来。Grafana 暂时还不能够直接连接 TDengine,在 TDengine 的安装目录下 connector/grafanaplugin 有为 Grafana 提供的连接插件。使用该插件的方式很简单: - -首先将grafanaplugin目录下的dist目录整体拷贝到Grafana的插件目录(默认地址为 `/var/lib/grafana/plugins/`),然后重启 Grafana 即可在 **Add Data Source** 菜单下看见 TDengine 数据源。 - -```shell -sudo cp -r . /var/lib/grafana/plugins/tdengine -sudo chown grafana:grafana -R /var/lib/grafana/plugins/tdengine -echo -e "[plugins]\nallow_loading_unsigned_plugins = taosdata-tdengine-datasource\n" | sudo tee -a /etc/grafana/grafana.ini - -# start grafana service -sudo service grafana-server restart -# or with systemd -sudo systemctl start grafana-server -``` - +在数据能够正常写入TDengine 后,可以调整适配 Grafana 将写入 TDengine 的数据可视化呈现出来。获取和使用TDengine提供的Grafana插件请参考[与其他工具的连接](https://www.taosdata.com/cn/documentation/connections#grafana)。 - -此外,TDengine 还提供了默认的两套Dashboard 模板,供用户快速查看保存到TDengine库里的信息。你只需要将 Grafana 目录下的模板导入到Grafana中即可激活使用。 +TDengine 提供了默认的两套Dashboard 模板,用户只需要将 Grafana 目录下的模板导入到Grafana中即可激活使用。 ![](../../images/IT-DevOps-Solutions-Immigrate-OpenTSDB-Dashboard.jpg) @@ -129,8 +118,8 @@ TDengine 当前只支持 Grafana 的可视化看板呈现,所以如果你的 | 序号 | 测量(metric) | 值名称 | 类型 | tag1 | tag2 | tag3 | tag4 | tag5 | | ---- | -------------- | ------ | ------ | ---- | ----------- | -------------------- | --------- | ------ | -| 1 | memory | value | double | host | memory_type | memory_type_instance | source | | -| 2 | swap | value | double | host | swap_type | swap_type_instance | source | | +| 1 | memory | value | double | host | memory_type | memory_type_instance | source | n/a | +| 2 | swap | value | double | host | swap_type | swap_type_instance | source | n/a | | 3 | disk | value | double | host | disk_point | disk_instance | disk_type | source | @@ -181,7 +170,7 @@ select count(*) from memory 完成查询后,如果写入的数据与预期的相比没有差别,同时写入程序本身没有异常的报错信息,那么可用确认数据写入是完整有效的。 -TDengine不支持采用OpenTSDB的查询语法进行查询或数据获取处理,但是针对OpenTSDB的每种查询都提供对应的支持。你可以用检查附件2获取对应的查询处理的调整和应用使用的方式,如果需要全面了解TDengine支持的查询类型,请参阅TDengine的用户手册。 +TDengine不支持采用OpenTSDB的查询语法进行查询或数据获取处理,但是针对OpenTSDB的每种查询都提供对应的支持。可以用检查附录1获取对应的查询处理的调整和应用使用的方式,如果需要全面了解TDengine支持的查询类型,请参阅TDengine的用户手册。 TDengine支持标准的JDBC 3.0接口操纵数据库,你也可以使用其他类型的高级语言的连接器来查询读取数据,以适配你的应用。具体的操作和使用帮助也请参阅用户手册。 @@ -191,7 +180,21 @@ TDengine支持标准的JDBC 3.0接口操纵数据库,你也可以使用其他 为了方便历史数据的迁移工作,我们为数据同步工具DataX提供了插件,能够将数据自动写入到TDengine中,需要注意的是DataX的自动化数据迁移只能够支持单值模型的数据迁移过程。 -DataX 具体的使用方式及如何使用DataX将数据写入TDengine请参见其使用帮助手册 [github.com/taosdata/datax](http://github.com/taosdata/datax)。 +DataX 具体的使用方式及如何使用DataX将数据写入TDengine请参见[基于DataX的TDeninge数据迁移工具](https://www.taosdata.com/blog/2021/10/26/3156.html)。 + +在对DataX进行迁移实践后,我们发现通过启动多个进程,同时迁移多个metric 的方式,可以大幅度的提高迁移历史数据的效率,下面是迁移过程中的部分记录,希望这些能为应用迁移工作带来参考。 + +| datax实例个数 (并发进程个数) | 迁移记录速度 (条/秒) | +| ---- | -------------- | +| 1 | 约13.9万 | +| 2 | 约21.8万 | +| 3 | 约24.9万 | +| 5 | 约29.5万 | +| 10 | 约33万 | + + +
(注:测试数据源自 单节点 Intel(R) Core(TM) i7-10700 CPU@2.90GHz 16核64G硬件设备,channel和batchSize 分别为8和1000,每条记录包含10个tag) + ### 2、手动迁移数据 diff --git a/documentation20/en/00.index/docs.md b/documentation20/en/00.index/docs.md index 2042a9c963664e65f960f8b7109511d63dd398f6..15f742e8b2935be70926341499dd357ee957b992 100644 --- a/documentation20/en/00.index/docs.md +++ b/documentation20/en/00.index/docs.md @@ -79,6 +79,10 @@ TDengine is a highly efficient platform to store, query, and analyze time-series - [Windows Client](https://www.taosdata.com/blog/2019/07/26/514.html): compile your own Windows client, which is required by various connectors on the Windows environment - [Rust Connector](/connector/rust): A taosc/RESTful API based TDengine client for Rust +## [Components and Tools](/tools/adapter) + +* [taosAdapter](/tools/adapter) + ## [Connections with Other Tools](/connections) - [Grafana](/connections#grafana): query the data saved in TDengine and provide visualization @@ -128,4 +132,4 @@ TDengine is a highly efficient platform to store, query, and analyze time-series ## FAQ -- [FAQ: Common questions and answers](/faq) \ No newline at end of file +- [FAQ: Common questions and answers](/faq) diff --git a/documentation20/en/08.connector/docs.md b/documentation20/en/08.connector/docs.md index 4f471f024ef0294badbfe7d02b97daae670c1cfa..da3c455a6dc83f7ee513f23a25793c31ee6c11b0 100644 --- a/documentation20/en/08.connector/docs.md +++ b/documentation20/en/08.connector/docs.md @@ -841,37 +841,60 @@ Only some configuration parameters related to RESTful interface are listed below ## CSharp Connector -The C # connector supports: Linux 64/Windows x64/Windows x86. + +* The C # connector supports: Linux 64/Windows x64/Windows x86. +* C# connector can be download and include as normal table form [Nuget.org](https://www.nuget.org/packages/TDengine.Connector/). +* On Windows, C # applications can use the native C interface of TDengine to perform all database operations, and future versions will provide the ORM (Dapper) framework driver. ### Installation preparation -- For application driver installation, please refer to the[ steps of installing connector driver](https://www.taosdata.com/en/documentation/connector#driver). -- . NET interface file TDengineDrivercs.cs and reference sample TDengineTest.cs are both located in the Windows client install_directory/examples/C# directory. -- On Windows, C # applications can use the native C interface of TDengine to perform all database operations, and future versions will provide the ORM (Dapper) framework driver. +* For application driver installation, please refer to the[ steps of installing connector driver](https://www.taosdata.com/en/documentation/connector#driver). +* .NET interface file TDengineDrivercs.cs and reference sample TDengineTest.cs are both located in the Windows client install_directory/examples/C# directory. +* Install [.NET SDK](https://dotnet.microsoft.com/download) -### Installation verification +### Example Source Code +you can find sample code under follow directions: +* {client_install_directory}/examples/C# +* [github C# example source code](https://github.com/taosdata/TDengine/tree/develop/tests/examples/C%2523) -Run install_directory/examples/C#/C#Checker/C#Checker.exe +**Tips:** TDengineTest.cs One of C# connector's sample code that include basic examples like connection,sql executions and so on. + +### Installation verification +Run {client_install_directory}/examples/C#/C#Checker/C#Checker.cs +Need install .Net SDK first ```cmd -cd {install_directory}/examples/C#/C#Checker -csc /optimize *.cs -C#Checker.exe -h +cd {client_install_directory}/examples/C#/C#Checker +//run c#checker.cs +dotnet run -- -h //dotnet run will build project first by default. ``` ### How to use C# connector - On Windows system, .NET applications can use the .NET interface of TDengine to perform all database operations. The steps to use it are as follows: -1. Add the. NET interface file TDengineDrivercs.cs to the .NET project where the application is located. -2. Users can refer to TDengineTest.cs to define database connection parameters and how to perform data insert, query and other operations; +need to install .NET SDK first +* create a c# project. +``` cmd +mkdir test +cd test +dotnet new console +``` +* add TDengineDriver as an package through Nuget -This. NET interface requires the taos.dll file, so before executing the application, copy the taos.dll file in the Windows client install_directory/driver directory to the folder where the. NET project finally generated the .exe executable file. After running the exe file, you can access the TDengine database and do operations such as insert and query. +``` cmd +dotnet add package TDengine.Connector +``` +* inlucde the TDnengineDriver in you application's namespace +```C# +using TDengineDriver; +``` +* user can reference from[TDengineTest.cs](https://github.com/taosdata/TDengine/tree/develop/tests/examples/C%2523/TDengineTest) and learn how to define database connection,query,insert and other basic data manipulations. **Note:** -1. TDengine V2.0. 3.0 supports both 32-bit and 64-bit Windows systems, so when. NET project generates a .exe file, please select the corresponding "X86" or "x64" for the "Platform" under "Solution"/"Project". -2. This. NET interface has been verified in Visual Studio 2015/2017, and other VS versions have yet to be verified. +* TDengine V2.0. 3.0 supports both 32-bit and 64-bit Windows systems, so when. NET project generates a .exe file, please select the corresponding "X86" or "x64" for the "Platform" under "Solution"/"Project". +* This. NET interface has been verified in Visual Studio 2015/2017, and other VS versions have yet to be verified. +* Since this. NET connector interface requires the taos.dll file, so before executing the application, copy the taos.dll file in the Windows {client_install_directory}/driver directory to the folder where the. NET project finally generated the .exe executable file. After running the exe file, you can access the TDengine database and do operations such as insert and query(This step can be skip if the client has been installed on you machine). ### Third-party Driver diff --git a/documentation20/en/14.devops/01.telegraf/docs.md b/documentation20/en/14.devops/01.telegraf/docs.md new file mode 100644 index 0000000000000000000000000000000000000000..a8b5db08ccc1131611c12fb53970115a89368376 --- /dev/null +++ b/documentation20/en/14.devops/01.telegraf/docs.md @@ -0,0 +1,75 @@ +# Rapidly build an IT DevOps system with TDengine + Telegraf + Grafana + +## Background +TDengine is an open-source big data platform designed and optimized for Internet of Things (IoT), Connected Vehicles, and Industrial IoT. Besides the 10x faster time-series database, it provides caching, stream computing, message queuing and other functionalities to reduce the complexity and costs of development and operations. + +There are a lot of time-series data in the IT DevOps scenario, for example: +- Metrics of system resource: CPU, memory, IO and network status, etc. +- Metrics for software system: service status, number of connections, number of requests, number of the timeout, number of errors, response time, service type, and other metrics related to the specific business. + +A mainstream IT DevOps system generally includes a data-collection module, a data persistent module, and a visualization module. Telegraf and Grafana are some of the most popular data-collection and visualization modules. But data persistent module can be varied. OpenTSDB and InfluxDB are some prominent from others. In recent times, TDengine, as emerged time-series data platform provides more advantages including high performance, high reliability, easier management, easier maintenance. + +Here we introduce a way to build an IT DevOps system with TDengine, Telegraf, and Grafana. Even no need one line program code but just modify a few lines of configuration files. + +![IT-DevOps-Solutions-Telegraf.png](../../images/IT-DevOps-Solutions-Telegraf.png) + + +## Installation steps + +### Install Telegraf,Grafana and TDengine +Please refer to each component's official document for Telegraf, Grafana, and TDengine installation. + +### Telegraf +Please refer to the [official document](https://portal.influxdata.com/downloads/). + +### Grafana +Please refer to the [official document](https://grafana.com/grafana/download). + +### TDengine +Please download TDengine 2.3.0.0 or the above version from TAOS Data's [official website](http://taosdata.com/en/all-downloads/). + + +## Setup data chain +### Download TDengine plugin to Grafana plugin's directory + +```bash +1. wget -c https://github.com/taosdata/grafanaplugin/releases/download/v3.1.1/tdengine-datasource-3.1.1.zip +2. sudo unzip tdengine-datasource-3.1.1.zip -d /var/lib/grafana/plugins/ +3. sudo chown grafana:grafana -R /var/lib/grafana/plugins/tdengine +4. echo -e "[plugins]\nallow_loading_unsigned_plugins = tdengine-datasource\n" | sudo tee -a /etc/grafana/grafana.ini +5. sudo systemctl restart grafana-server.service +``` + +### Modify /etc/telegraf/telegraf.conf +Please add few lines in /etc/telegraf/telegraf.conf as below. Please fill database name for what you desire to save Telegraf's data in TDengine. Please specify the correct value for the hostname of the TDengine server/cluster, username, and password: +``` +[[outputs.http]] + url = "http://:6041/influxdb/v1/write?db=" + method = "POST" + timeout = "5s" + username = "" + password = "" + data_format = "influx" + influx_max_line_bytes = 250 +``` + +Then restart telegraf: +``` +sudo systemctl start telegraf +``` + + +### Import dashboard + +Use your Web browser to access IP:3000 to log in to the Grafana management interface. The default username and password are admin/admin。 + +Click the 'gear' icon from the left bar to select 'Plugins'. You could find the icon of the TDengine data source plugin. + +Click the 'plus' icon from the left bar to select 'Import'. You can download the dashboard JSON file from https://github.com/taosdata/grafanaplugin/blob/master/examples/telegraf/grafana/dashboards/telegraf-dashboard-v0.1.0.json then import it to the Grafana. After that, you should see the interface like: + +![IT-DevOps-Solutions-telegraf-dashboard.png](../../images/IT-DevOps-Solutions-telegraf-dashboard.png) + + +## Summary + +We demonstrated how to build a full-function IT DevOps system with TDengine, Telegraf, and Grafana. TDengine supports schemaless protocol data insertion capability from 2.3.0.0. Based on TDengine's powerful ecosystem software integration capability, the user can build a high efficient and easy-to-maintain IT DevOps system in a few minutes. Please find more detailed documentation about TDengine high-performance data insertion/query functions and more use cases from TAOS Data's official website. diff --git a/documentation20/en/14.devops/02.collectd/docs.md b/documentation20/en/14.devops/02.collectd/docs.md new file mode 100644 index 0000000000000000000000000000000000000000..15a83d7f0c78f9e36122d4c7a0c125daddfa1c6a --- /dev/null +++ b/documentation20/en/14.devops/02.collectd/docs.md @@ -0,0 +1,84 @@ +# Rapidly build a IT DevOps system with TDengine + collectd/StatsD + Grafana + +## Background +TDengine is an open-source big data platform designed and optimized for Internet of Things (IoT), Connected Vehicles, and Industrial IoT. Besides the 10x faster time-series database, it provides caching, stream computing, message queuing and other functionalities to reduce the complexity and costs of development and operations. + +There are a lot of time-series data in the IT DevOps scenario, for example: +- Metrics of system resource: CPU, memory, IO and network status, etc. +- Metrics for software system: service status, number of connections, number of requests, number of the timeout, number of errors, response time, service type, and other metrics related to the specific business. + +A mainstream IT DevOps system generally includes a data-collection module, a data persistent module, and a visualization module. Telegraf and Grafana are some of the most popular data-collection and visualization modules. But data persistent module can be varied. OpenTSDB and InfluxDB are some prominent from others. In recent times, TDengine, as emerged time-series data platform provides more advantages including high performance, high reliability, easier management, easier maintenance. + +Here we introduce a way to build an IT DevOps system with TDengine, collectd/statsD, and Grafana. Even no need one line program code but just modify a few lines of configuration files. + +![IT-DevOps-Solutions-Collectd-StatsD.png](../../images/IT-DevOps-Solutions-Collectd-StatsD.png) + +## Installation steps +Please refer to each component's official document for collectd, StatsD, Grafana, and TDengine installation. + +### collectd +Please refer to the [official document](https://collectd.org/documentation.shtml). + +### StatsD +Please refer to the [official document](https://github.com/statsd/statsd). + +### Grafana +Please refer to the [official document](https://grafana.com/grafana/download). + +### TDengine +Please download TDengine 2.3.0.0 or the above version from TAOS Data's [official website](http://taosdata.com/cn/all-downloads/). + +## Setup data chain +### Download TDengine plugin to Grafana plugin's directory + +```bash +1. wget -c https://github.com/taosdata/grafanaplugin/releases/download/v3.1.1/tdengine-datasource-3.1.1.zip +2. sudo unzip tdengine-datasource-3.1.1.zip -d /var/lib/grafana/plugins/ +3. sudo chown grafana:grafana -R /var/lib/grafana/plugins/tdengine +4. echo -e "[plugins]\nallow_loading_unsigned_plugins = tdengine-datasource\n" | sudo tee -a /etc/grafana/grafana.ini +5. sudo systemctl restart grafana-server.service +``` + +### To configure collectd +Please add a few lines in /etc/collectd/collectd.conf as below. Please specify the correct value for hostname and the port number: +``` +LoadPlugin network + + Server "" "" + + +sudo systemctl start collectd +``` + +### To configure StatsD +Please add a few lines in the config.js file then restart StatsD. Please use the correct hostname and port number of TDengine and taosAdapter: +``` +fill backends section with "./backends/repeater" +fill repeater section with { host:'', port: } +``` + +### Import dashboard + +Use your Web browser to access IP:3000 to log in to the Grafana management interface. The default username and password are admin/admin。 + +Click the gear icon from the left bar to select 'Plugins'. You could find the icon of the TDengine data source plugin. + +#### Import collectd dashboard + +Please download the dashboard JSON file from https://github.com/taosdata/grafanaplugin/blob/master/examples/collectd/grafana/dashboards/collect-metrics-with-tdengine-v0.1.0.json. + +Click the 'plus' icon from the left bar to select 'Import'. Then you should see the interface like: + +![IT-DevOps-Solutions-collectd-dashboard.png](../../images/IT-DevOps-Solutions-collectd-dashboard.png) + +#### Import StatsD dashboard + +Please download dashboard JSON file from https://github.com/taosdata/grafanaplugin/blob/master/examples/statsd/dashboards/statsd-with-tdengine-v0.1.0.json. + +Click the 'plus' icon from the left bar to select 'Import'. Then you should see the interface like: + +![IT-DevOps-Solutions-statsd-dashboard.png](../../images/IT-DevOps-Solutions-statsd-dashboard.png) + +## Summary + +We demonstrated how to build a full-function IT DevOps system with TDengine, collectd, StatsD, and Grafana. TDengine supports schemaless protocol data insertion capability from 2.3.0.0. Based on TDengine's powerful ecosystem software integration capability, the user can build a high efficient and easy-to-maintain IT DevOps system in few minutes. Please find more detailed documentation about TDengine high-performance data insertion/query functions and more use cases from TAOS Data's official website. diff --git a/documentation20/en/14.devops/03.immigrate/docs.md b/documentation20/en/14.devops/03.immigrate/docs.md new file mode 100644 index 0000000000000000000000000000000000000000..ebba18710f6218ce7043b563a99246ccf62035f9 --- /dev/null +++ b/documentation20/en/14.devops/03.immigrate/docs.md @@ -0,0 +1,436 @@ +# Best practice of immigration from OpenTSDB to TDengine + +As a distributed, scalable, HBase-based distributed temporal database system, OpenTSDB has been introduced and widely used in the field of operation and monitoring by people in DevOps due to its first-mover advantage. However, in recent years, with the rapid development of new technologies such as cloud computing, microservices, and containerization, enterprise-level services have become more and more diverse, and the architecture has become more and more complex, and the application operation infrastructure environment has become more and more diverse, which brings more and more pressure on system and operation monitoring. From this status quo, the use of OpenTSDB as the monitoring backend storage for DevOps is increasingly plagued by performance issues and slow feature upgrades, as well as the resulting increase in application deployment costs and reduced operational efficiency, which are becoming more and more serious as the system scales up. + +In this context, to meet the fast-growing IoT big data market and technical demands, TOS Data has developed an innovative big data processing product TDengine independently after learning the advantages of many traditional relational databases, NoSQL databases, stream computing engines, message queues, etc. TDengine has its unique advantages in time-series big data processing. TDengine can effectively solve the problems currently encountered by OpenTSDB. + +Compared with OpenTSDB, TDengine has the following distinctive features. + +- Performance of data writing and querying far exceeds that of OpenTSDB. +- Efficient compression mechanism for time-series data, which compresses less than 1/5 of the storage space on disk. +- The installation and deployment is very simple, a single installation package to complete the installation and deployment, no other third-party software, the entire installation and deployment process in seconds; +- The built-in functions cover all the query functions supported by OpenTSDB, and also support more time-series data query functions, scalar functions and aggregation functions, and support advanced query functions such as multiple time-window aggregation, join query, expression operation, multiple group aggregation, user-defined sorting, and user-defined functions. Adopting SQL-like syntax rules, it is easier to learn and basically has no learning cost. +- Supports up to 128 tags with a total tag length of up to 16 KB. +- In addition to HTTP, it also provides interfaces to Java, Python, C, Rust, Go, and other languages, and supports a variety of enterprise-class standard connector protocols such as JDBC. + +If we migrate applications originally running on OpenTSDB to TDengine, we can not only effectively reduce the consumption of computing and storage resources and the scale of deployed servers, but also greatly reduce the output of operation and maintenance costs, making operation and maintenance management simpler and easier, and significantly reducing the total cost of ownership. Like OpenTSDB, TDengine has also been open sourced, but the difference is that in addition to the stand-alone version, the latter has also achieved the open source of the cluster version, and the concern of being bound by the vendor has been swept away. + +In the following section we will explain how to migrate OpenTSDB applications to TDengine quickly, securely and reliably without coding, using the most typical and widely used DevOps scenarios. Subsequent chapters will provide more in-depth coverage to facilitate migration for non-DevOps scenarios. + +## Rapid migration of DevOps applications + +### 1. Typical Application Scenarios + +The overall system architecture of a typical DevOps application scenario is shown in the figure below (Figure 1). + +![IT-DevOps-Solutions-Immigrate-OpenTSDB-Arch](../../images/IT-DevOps-Solutions-Immigrate-OpenTSDB-Arch.jpg) +
Figure 1. Typical architecture in a DevOps scenario
+ +In this application scenario, there are Agent tools deployed in the application environment to collect machine metrics, network metrics, and application metrics, data collectors to aggregate information collected by agents, systems for data persistence storage and management, and tools for monitoring data visualization (e.g., Grafana, etc.). + +Among them, Agents deployed in application nodes are responsible for providing operational metrics from different sources to collectd/Statsd, and collectd/StatsD is responsible for pushing the aggregated data to the OpenTSDB cluster system and then visualizing the data using the visualization kanban board Grafana. + +### 2. Migration Service + +- **TDengine installation and deployment** + +First of all, TDengine should be installed. Download the latest stable version of TDengine from the official website, unzip it and run install.sh to install it. For help on using various installation packages, please refer to the blog ["Installation and uninstallation of various TDengine installation packages"](https://www.taosdata.com/blog/2019/08/09/566.html). + +Note that after the installation, do not start the taosd service immediately, but start it after the parameters are correctly configured. + +- **Adjusting the data collector configuration** + +In TDengine version 2.3, an HTTP service taosAdapter is automatically enabled after the backend service taosd is started. The taosAdapter is compatible with Influxdb's Line Protocol and OpenTSDB's telnet/JSON write protocol, allowing data collected by collectd and StatsD to be pushed directly to TDengine. + +If you use collectd, modify the configuration file in its default location /etc/collectd/collectd.conf to point to the IP address and port of the node where taosAdapter is deployed. Assuming the taosAdapter IP address is 192.168.1.130 and the port is 6046, configure it as follows + +```html +LoadPlugin write_tsdb + + + Host "192.168.1.130" + Port "6046" + HostTags "status=production" + StoreRates false + AlwaysAppendDS false + +``` + +This allows collectd to push the data to taosAdapter using the push to OpenTSDB plugin. taosAdapter will call the API to write the data to taosd, thus completing the writing of the data. If you are using StatsD adjust the profile information accordingly. + +- **Adjusting the Dashboard system** + +After the data has been written to TDengine properly, you can adapt Grafana to visualize the data written to TDengine. There is a connection plugin for Grafana in the TDengine installation directory connector/grafanaplugin. The way to use this plugin is simple. + +First copy the entire dist directory under the grafanaplugin directory to Grafana's plugins directory (the default address is /var/lib/grafana/plugins/), and then restart Grafana to see the TDengine data source under the Add Data Source menu. + +```shell +sudo cp -r . /var/lib/grafana/plugins/tdengine +sudo chown grafana:grafana -R /var/lib/grafana/plugins/tdengine +echo -e "[plugins]\nallow_loading_unsigned_plugins = taosdata-tdengine-datasource\n" | sudo tee -a /etc/grafana/grafana.ini + +# start grafana service +sudo service grafana-server restart +# or with systemd +sudo systemctl start grafana-server +``` + + + +In addition, TDengine provides two default Dashboard templates for users to quickly view the information saved to the TDengine repository. You can simply import the templates from the Grafana directory into Grafana to activate their use. + +![](../../images/IT-DevOps-Solutions-Immigrate-OpenTSDB-Dashboard.jpg) + +
Figure 2. Importing Grafana Templates
+ +After the above steps, you have completed the migration of OpenTSDB to TDengine. You can see that the whole process is very simple, no code needs to be written, and only some configuration files need to be adjusted to complete the migration work. + +### 3. Post-migration architecture + +After the migration is completed, the overall architecture of the system at this time is shown in the figure below (Figure 3), and the acquisition side, data writing side, and monitoring presentation side all remain stable during the whole process, which does not involve any important changes or alterations except for very few configuration adjustments. OpenTSDB to TDengine migration action, using TDengine more powerful processing power and query performance. + +In most DevOps scenarios, if you have a small OpenTSDB cluster (3 nodes or less) as the storage side of DevOps, relying on OpenTSDB to provide data storage and query functions for the system persistence layer, then you can safely replace it with TDengine and save more compute and storage resources. With the same configuration of computing resources, a single TDengine can meet the service capacity provided by 3~5 OpenTSDB nodes. If the scale is relatively large, then a TDengine cluster is required. + +If your application is particularly complex, or the application domain is not a DevOps scenario, you can continue reading subsequent chapters for a more comprehensive and in-depth look at the advanced topics of migrating OpenTSDB applications to TDengine. + +![IT-DevOps-Solutions-Immigrate-TDengine-Arch](../../images/IT-DevOps-Solutions-Immigrate-TDengine-Arch.jpg) + +
Figure 3. System architecture after the migration is complete
+ +## Migration evaluation and strategy for other scenarios + +### 1. Differences between TDengine and OpenTSDB + +This chapter describes in detail the differences between OpenTSDB and TDengine at the system functionality level. After reading this chapter, you can thoroughly evaluate whether you can migrate certain complex OpenTSDB-based applications to TDengine, and what you should pay attention to after the migration. + +TDengine currently only supports Grafana visual kanban rendering, so if your application uses a front-end kanban other than Grafana (e.g. [TSDash](https://github.com/facebook/tsdash), [Status Wolf](https://github.com/box/StatusWolf), etc.), then the front-end kanban cannot be migrated directly to TDengine and will need to be re-adapted to Grafana before it can function properly. + +As of version 2.3.0.x, TDengine can only support collectd and StatsD as data collection aggregation software, but more data collection aggregation software will be provided in the future. If you use other types of data aggregators on the collection side, your application needs to be adapted to these two data aggregation systems to be able to write data properly. In addition to the two data aggregation end software protocols mentioned above, TDengine also supports writing data directly via InfluxDB's row protocol and OpenTSDB's data writing protocol, JSON format, and you can rewrite the logic on the data push side to write data using the row protocols supported by TDengine. + +In addition, if you use the following features of OpenTSDB in your application, you need to understand the following considerations before migrating your application to TDengine. + +1. `/api/stats`: TDengine provides a new mechanism for handling cluster state monitoring to meet your application's monitoring and maintenance needs of your application. +2. `/api/tree`: TDengine uses a hierarchy of database -> supertable -> sub-table to organize and maintain timelines, with all timelines belonging to the same supertable at the same level in the system. However, it is possible to simulate a logical multi-level structure of the application through the special construction of different tag values. +3. `Rollup And PreAggregates`: The use of Rollup and PreAggregates requires the application to decide where to access the Rollup results and in some scenarios to access the original results, the opaqueness of this structure makes the application processing logic extremely complex and completely non-portable. TDengine does not support automatic downsampling of multiple timelines and preaggregates (for a range of periods) for the time being, but due to its high-performance query processing logic, it can provide high performance even without relying on Rollup and preaggregates. +4. `Rate`: TDengine provides two functions to calculate the rate of change of values, namely Derivative (whose calculation results are consistent with InfluxDB's Derivative behavior) and IRate (whose calculation results are consistent with the IRate function in Prometheus). However, the results of these two functions differ slightly from Rate, but are more powerful overall. In addition,** all the calculation functions provided by OpenTSDB are supported by TDengine with corresponding query functions, and the query functions of TDengine far exceed the query functions supported by OpenTSDB,** which can greatly simplify your application processing logic. + +Through the above introduction, I believe you should be able to understand the changes brought by the migration of OpenTSDB to TDengine, and this information will also help you correctly judge whether it is acceptable to migrate your application to TDengine, and experience the powerful timing data processing capability and convenient user experience provided by TDengine. + +### 2. Migration strategy + +First of all, the OpenTSDB-based system will be migrated involving data schema design, system scale estimation, data write end transformation, data streaming, and application adaptation; after that, the two systems will run in parallel for a period of time, and then the historical data will be migrated to TDengine. Of course, if your application has some functions that strongly depend on the above OpenTSDB features, and at the same time, You can consider keeping the original OpenTSDB system running while starting TDengine to provide the main services. + +## Data model design + +On the one hand, TDengine requires a strict schema definition for its incoming data. On the other hand, the data model of TDengine is richer than that of OpenTSDB, and the multi-valued model is compatible with all single-valued model building requirements. + +Now let's assume a DevOps scenario where we use collectd to collect base metrics of devices, including memory, swap, disk, etc. The schema in OpenTSDB is as follows: + +| No. | metric | value | type | tag1 | tag2 | tag3 | tag4 | tag5 | +| ---- | -------------- | ------ | ------ | ---- | ----------- | -------------------- | --------- | ------ | +| 1 | memory | value | double | host | memory_type | memory_type_instance | source | | +| 2 | swap | value | double | host | swap_type | swap_type_instance | source | | +| 3 | disk | value | double | host | disk_point | disk_instance | disk_type | source | + + + +TDengine requires data stored to have a data schema, i.e., you need to create a supertable and specify the schema of the supertable before writing the data. For data schema creation, you have two ways to do this: 1) Take full advantage of TDengine's native data writing support for OpenTSDB by calling the API provided by TDengine to write the data (in text line or JSON format) to the super table and automate the creation of the single-value model. And automate the creation of single-value models. This approach does not require major adjustments to the data writing application, nor does it require conversion of the written data format. + +At the C level, TDengine provides taos_insert_lines to write data in OpenTSDB format directly (in version 2.3.x this function corresponds to taos_schemaless_insert). For the code reference example, please refer to the sample code schemaless.c in the installation package directory. + + (2) Based on the full understanding of TDengine's data model, establish the mapping relationship between OpenTSDB and TDengine's data model adjustment manually, taking into account that OpenTSDB is a single-value mapping model, it is recommended to use the single-value model in TDengine. TDengine supports both multi-value and single-value models. + +- **Single-valued model**. + +The steps are as follows: the name of the metrics is used as the name of the TDengine super table, which is built with two basic data columns - timestamp and value, and the labels of the super table are equivalent to the label information of the metrics, and the number of labels is equal to the number of labels of the metrics. The sub-tables are named using a fixed rule row naming: `metric + '_' + tags1_value + '_' + tag2_value + '_' + tag3_value ... ` as sub-table names. + +Create 3 super tables in TDengine. + +```sql +create stable memory(ts timestamp, val float) tags(host binary(12),memory_type binary(20), memory_type_instance binary(20), source binary(20)); +create stable swap(ts timestamp, val double) tags(host binary(12), swap_type binary(20), swap_type_binary binary(20), source binary(20)); +create stable disk(ts timestamp, val double) tags(host binary(12), disk_point binary(20), disk_instance binary(20), disk_type binary(20), source binary(20)); +``` + + + +For sub-tables use dynamic table creation as shown below: + +```sql +insert into memory_vm130_memory_bufferred_collectd using memory tags(‘vm130’, ‘memory’, 'buffer', 'collectd') values(1632979445, 3.0656); +``` + +Eventually about 340 sub-tables and 3 super-tables will be created in the system. Note that if the use of concatenated tagged values causes the sub-table names to exceed the system limit (191 bytes), then some encoding (e.g. MD5) needs to be used to convert them to an acceptable length. + +- **Multi-value model** + +If you want to take advantage of TDengine's multi-value modeling capabilities, you need to first meet the requirements that different collection quantities have the same collection frequency and can reach the **data writing side simultaneously via a message queue**, thus ensuring that multiple metrics are written at once using SQL statements. The name of the metric is used as the name of the super table to create a multi-column model of data with the same collection frequency and capable of arriving at the same. The data can be collected with the same frequency and arrive in multiple columns. The names of the sub-tables are named using a fixed rule. Each metric above contains only one measurement value, so it cannot be transformed into a multi-value model. + + + +## Data triage and application adaptation + +Data is subscribed from the message queue and an adapted writer is started to write the data. + +After the data starts to be written for a sustained period, SQL statements can be used to check whether the amount of data written meets the expected write requirements. The following SQL statement is used to count the amount of data. + +```sql +select count(*) from memory +``` + +After completing the query, if the written data does not differ from the expected one, and there are no abnormal error messages from the writing program itself, then you can confirm that the data writing is complete and valid. + +TDengine does not support query or data fetch processing using OpenTSDB query syntax, but it does provide support for each type of OpenTSDB query. You can check Annex 2 for the corresponding query processing adjustments and application usage, or refer to the TDengine user manual for a full understanding of the types of queries supported by TDengine. + +TDengine supports the standard JDBC 3.0 interface for manipulating databases, but you can also use other types of high-level language connectors for querying and reading data to suit your application. See also the user manual for the specific operation and usage help. + +## Historical data migration + +### 1. Use the tool to migrate data automatically + +To facilitate the migration of historical data, we provide a plug-in for the data synchronization tool DataX, which can automatically write data to TDengine, it should be noted that DataX's automated data migration can only support the data migration process of single-value models. + +DataX Please refer to its help manual [github.com/taosdata/datax](http://github.com/taosdata/datax) for details on how to use DataX and how to use it to write data to TDengine. + +### 2. Migrate data manually + +If you need to use a multi-value model for data writing, you need to develop your tool to export data from OpenTSDB, then confirm which timelines can be merged and imported into the same timeline, and then write the time that can be imported at the same time to the database by SQL statement. + +The manual migration of data requires attention to two issues. + +1) When storing the exported data on the disk, the disk needs to have enough storage space to be able to adequately accommodate the exported data files. To avoid straining the disk file storage after exporting the full amount of data, a partial import mode can be adopted, with the timelines belonging to the same super table being exported first, and then the exported part of the data files are imported into the TDengine system + +(2) Under the full-load operation of the system, if there are enough remaining computing and IO resources, a multi-threaded import mechanism can be established to maximize the efficiency of data migration. Considering the huge load on the CPU brought by data parsing, the maximum number of parallel tasks needs to be controlled to avoid the overall system overload triggered by importing historical data. + +Due to the ease of operation of TDegnine itself, there is no need to perform index maintenance, data format change processing, etc. throughout the process, and the whole process only needs to be executed sequentially. + +Once the historical data is fully imported into TDengine, the two systems are running simultaneously, after which the query requests can be switched to TDengine, thus achieving a seamless application switchover. + +## Appendix 1: Correspondence table of OpenTSDB query functions + +**Avg** + +Equivalent function: avg + +Example. + +SELECT avg(val) FROM (SELECT first(val) FROM super_table WHERE ts >= startTime and ts <= endTime INTERVAL(20s) Fill(linear)) INTERVAL(20s) + +Notes. + +1. the value within the Interval needs to be the same as the interval value of the outer query. +As the interpolation of values in OpenTSDB uses linear interpolation, use fill(linear) to declare the interpolation type in the interpolation clause. The following functions with the same interpolation requirements are handled by this method. 3. +3. The 20s parameter in Interval means that the inner query will generate results in a 20-second window. In a real query, it needs to be adjusted to the time interval between different records. This ensures that the interpolation results are generated equivalently to the original data. +Due to the special interpolation strategy and mechanism of OpenTSDB, the way of interpolation before computation in Aggregate query makes it impossible for the computation result to be the same as TDengine. However, in the case of Downsample, TDengine, and OpenTSDB can obtain the same result (because OpenTSDB uses a completely different interpolation strategy for Aggregate and Downsample queries). +(since OpenTSDB uses a completely different interpolation strategy for aggregated and downsampled queries).[]() + + +**Count** + +Equivalent function: count + +Example. + +select count(*) from super_table_name; + + + +**Dev** + +Equivalent function: stddev + +Example. + +Select stddev(val) from table_name + + + +**Estimated percentiles** + +Equivalent function: apercentile + +Example. + +Select apercentile(col1, 50, “t-digest”) from table_name + +Remark. + +1. t-digest algorithm is used by default in OpenTSDB during approximate query processing, so to get the same calculation result, you need to specify the algorithm used in the apercentile function. tDengine can support two different approximate processing algorithms, which are declared by "default " and "t-digest" to declare. + + + +**First** + +Equivalent function: first + +Example. + +Select first(col1) from table_name + + + +**Last** + +Equivalent function: last + +Example. + +Select last(col1) from table_name + + + +**Max** + +Equivalent function: max + +Example. + +Select max(value) from (select first(val) value from table_name interval(10s) fill(linear)) interval(10s) + +Note: The Max function requires interpolation, for the reasons given above. + + + +**Min** + +Equivalent function: min + +Example. + +Select min(value) from (select first(val) value from table_name interval(10s) fill(linear)) interval(10s); + + + +**MinMax** + +Equivalent function: max + +Select max(val) from table_name + +Note: This function does not require interpolation, so it can be calculated directly. + + + +**MimMin** + +Equivalent function: min + +Select min(val) from table_name + +Note: This function does not require interpolation, so it can be calculated directly. + + + +**Percentile** + +Equivalent function: percentile + +备注: + + + +**Sum** + +Equivalent function: sum + +Select max(value) from (select first(val) value from table_name interval(10s) fill(linear)) interval(10s) + +Note: This function does not require interpolation, so it can be calculated directly. + + + +**Zimsum** + +Equivalent function: sum + +Select sum(val) from table_name + +Note: This function does not require interpolation, so it can be calculated directly. + + + +完整示例: + +```json +//OpenTSDB query JSON +query = { +"start":1510560000, +"end": 1515000009, +"queries":[{ +"aggregator": "count", +"metric":"cpu.usage_user", +}] +} + +// Equivalent SQL: +SELECT count(*) +FROM `cpu.usage_user` +WHERE ts>=1510560000 AND ts<=1515000009 +``` + + + +## Appendix 2: Resource Estimation Methodology + +### Data generation environment + +We still use the hypothetical environment from Chapter 4 with 3 measurements. The data writing rate for temperature and humidity is one record every 5 seconds, with a timeline of 100,000. Air quality is written at a rate of one record every 10 seconds, with a timeline of 10,000, and a query request frequency of 500 QPS. + +### Storage resource estimation + +Assuming that the number of sensor devices that generate data and require storage is `n`, the frequency of data generation is ` t` records/second, and the length of each record is `L` bytes, the size of data generated per day is `n×t×L` bytes. assuming a compression ratio of C, the size of data generated per day is `(n×t×L)/C` bytes. storage resources are estimated to be able to accommodate 1.5 years The storage resources are estimated to be able to accommodate 1.5 years of data size. Under the production environment, the compression ratio C of TDengine is generally between 5 and 7, while adding 20% redundancy to the final result, we can calculate the required storage resources. + +```matlab +(n×t×L)×(365×1.5)×(1+20%)/C +``` + +Combining the above formula and bringing the parameters into the calculation formula, the size of the raw data generated per year without considering tagging information is 11.8 TB. It should be noted that since tagging information is associated with each timeline in TDengine, it is not per record. So the size of the data volume to be recorded is somewhat reduced relative to the data generated, and this part of the tag data as a whole can be neglected. Assuming a compression ratio of 5, the size of the retained data ends up being 2.56 TB. + +### Storage device selection considerations + +The hard disk should be used with a better random read performance hard disk device, if you can have SSD, consider using SSD as much as possible. better random read performance of the disk is extremely helpful to improve the system query performance and can improve the overall query response performance of the system. To obtain better query performance, the performance index of single-threaded random read IOPS of the hard disk device should not be lower than 1000, it is better to reach 5000 IOPS or more. To obtain an evaluation of the current device random read IO performance, it is recommended that fio software be used to evaluate its operational performance (see Appendix 1 for details on how to use it) to confirm whether it can meet the large file random read performance requirements. + +Hard disk write performance has little impact on TDengine; TDengine writes in append write mode, so as long as it has good sequential write performance, both SAS hard disks and SSDs, in general, can meet TDengine's requirements for disk write performance well. + +### Computational resource estimation + +Due to the specificity of IoT data, after the frequency of data generation is fixed, the process of TDengine writing maintains a relatively fixed amount of resource consumption (both computation and storage). As described in [TDengine Operation and Maintenance](https://www.taosdata.com/cn/documentation/administrator), 22,000 writes per second in this system consumes less than 1 CPU core. + +In terms of estimating the CPU resources required for queries, assuming that the application requires 10,000 QPS from the database and each query consumes about 1 ms of CPU time, then each core provides 1,000 QPS of queries per second, and at least 10 cores are required to satisfy 10,000 QPS of query requests. To make the overall CPU load of the system less than 50%, the whole cluster needs 10 cores twice as many, i.e., 20 cores. + +### Memory resource estimation + +The database allocates memory for each Vnode by default 16MB*3 buffers, the cluster system includes 22 CPU cores, then 22 virtual node Vnodes will be established by default, each Vnode contains 1000 tables, then it can accommodate all the tables. Then it takes about 1 and a half hours to write a full block, thus triggering a dropped disk, which can be unadjusted. 22 Vnodes require a total memory cache of about 1GB. considering the memory required for queries, assuming a memory overhead of about 50MB per query, then 500 queries concurrently require about 25GB of memory. + +In summary, a single 16-core 32GB machine can be used, or a cluster of two 8-core 16GB machines can be used. + +## Appendix 3: Cluster Deployment and Startup + +TDengine provides a wealth of help documentation on many aspects of cluster installation and deployment, here is an index of responsive documentation for your reference. + +### Cluster Deployment + +The first step is to install TDengine. Download the latest stable version of TDengine from the official website, unzip it and run install.sh to install it. Please refer to the blog ["Installing and uninstalling TDengine packages"](https://www.taosdata.com/blog/2019/08/09/566.html) for help on using the various installation packages. + +Be careful not to start the taosd service immediately after the installation is complete, but only after the parameters are properly configured. + +### Set the running parameters and start the service + +To ensure that the system can get the necessary information to run properly. Please set the following key parameters correctly on the server-side. + +FQDN, firstEp, secondEP, dataDir, logDir, tmpDir, serverPort. The specific meaning of each parameter and the requirements for setting them can be found in the documentation "TDengine Cluster Installation, Management" (https://www.taosdata.com/cn/ documentation/cluster)". + +Follow the same steps to set the parameters on the node that needs to run and start the taosd service, then add the Dnode to the cluster. + +Finally, start taos and execute the command show dnodes, if you can see all the nodes that have joined the cluster, then the cluster is successfully built. For the specific operation procedure and notes, please refer to the document "[TDengine Cluster Installation, Management](https://www.taosdata.com/cn/documentation/cluster)". + +## Appendix 4: Super table names + +Since the metric name of OpenTSDB has a dot (". "However, the dot has a special meaning in TDengine, as a separator between database and table names. TDengine also provides escapes to allow users to use keywords or special separators (e.g., dot) in (super) table names. To use special characters, the table name needs to be enclosed in escape characters, e.g. `cpu.usage_user` would be a legal (super) table name. + +## Appendix 5: Reference Articles + +1. [Quickly build an IT Ops monitoring system using TDengine + collectd/StatsD + Grafana](https://www.taosdata.com/cn/documentation20/devops/collectd)(Chinese)_ +2. [Writing collection data directly to TDengine via collectd](https://www.taosdata.com/cn/documentation20/insert#collectd) (Chinese) + diff --git a/documentation20/en/images/IT-DevOps-Solutions-Collectd-StatsD.png b/documentation20/en/images/IT-DevOps-Solutions-Collectd-StatsD.png new file mode 100644 index 0000000000000000000000000000000000000000..b34aec45bdbe30bebbce532d6150c40f80399c25 Binary files /dev/null and b/documentation20/en/images/IT-DevOps-Solutions-Collectd-StatsD.png differ diff --git a/documentation20/en/images/IT-DevOps-Solutions-Immigrate-OpenTSDB-Arch.jpg b/documentation20/en/images/IT-DevOps-Solutions-Immigrate-OpenTSDB-Arch.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d3de5fb7a10a1cb22693468029bc26ad63a96d71 Binary files /dev/null and b/documentation20/en/images/IT-DevOps-Solutions-Immigrate-OpenTSDB-Arch.jpg differ diff --git a/documentation20/en/images/IT-DevOps-Solutions-Immigrate-OpenTSDB-Dashboard.jpg b/documentation20/en/images/IT-DevOps-Solutions-Immigrate-OpenTSDB-Dashboard.jpg new file mode 100644 index 0000000000000000000000000000000000000000..be3704cb72d6c2614614852bfef17147ce49d061 Binary files /dev/null and b/documentation20/en/images/IT-DevOps-Solutions-Immigrate-OpenTSDB-Dashboard.jpg differ diff --git a/documentation20/en/images/IT-DevOps-Solutions-Immigrate-TDengine-Arch.jpg b/documentation20/en/images/IT-DevOps-Solutions-Immigrate-TDengine-Arch.jpg new file mode 100644 index 0000000000000000000000000000000000000000..fd406a140beea43fbfe2c417c85b872cfd6a2219 Binary files /dev/null and b/documentation20/en/images/IT-DevOps-Solutions-Immigrate-TDengine-Arch.jpg differ diff --git a/documentation20/en/images/IT-DevOps-Solutions-Telegraf.png b/documentation20/en/images/IT-DevOps-Solutions-Telegraf.png new file mode 100644 index 0000000000000000000000000000000000000000..e1334bb937febd395eca0b0c44c8a2f315910606 Binary files /dev/null and b/documentation20/en/images/IT-DevOps-Solutions-Telegraf.png differ diff --git a/documentation20/en/images/IT-DevOps-Solutions-collectd-dashboard.png b/documentation20/en/images/IT-DevOps-Solutions-collectd-dashboard.png new file mode 100644 index 0000000000000000000000000000000000000000..17d0fd31b9424b071783696668d5706b90274867 Binary files /dev/null and b/documentation20/en/images/IT-DevOps-Solutions-collectd-dashboard.png differ diff --git a/documentation20/en/images/IT-DevOps-Solutions-statsd-dashboard.png b/documentation20/en/images/IT-DevOps-Solutions-statsd-dashboard.png new file mode 100644 index 0000000000000000000000000000000000000000..f122cbc5dc0bb5b7faccdbc7c4c8bcca59b6c9ed Binary files /dev/null and b/documentation20/en/images/IT-DevOps-Solutions-statsd-dashboard.png differ diff --git a/documentation20/en/images/IT-DevOps-Solutions-telegraf-dashboard.png b/documentation20/en/images/IT-DevOps-Solutions-telegraf-dashboard.png new file mode 100644 index 0000000000000000000000000000000000000000..d695a3af30154d2fc2217996f3ff4878abab097c Binary files /dev/null and b/documentation20/en/images/IT-DevOps-Solutions-telegraf-dashboard.png differ diff --git a/packaging/cfg/nginxd.service b/packaging/cfg/nginxd.service new file mode 100644 index 0000000000000000000000000000000000000000..50bbc1a21de5e6645404ec1d4e9bcd6f177f69d2 --- /dev/null +++ b/packaging/cfg/nginxd.service @@ -0,0 +1,22 @@ +[Unit] +Description=Nginx For TDengine Service +After=network-online.target +Wants=network-online.target + +[Service] +Type=forking +PIDFile=/usr/local/nginxd/logs/nginx.pid +ExecStart=/usr/local/nginxd/sbin/nginx +ExecStop=/usr/local/nginxd/sbin/nginx -s stop +TimeoutStopSec=1000000s +LimitNOFILE=infinity +LimitNPROC=infinity +LimitCORE=infinity +TimeoutStartSec=0 +StandardOutput=null +Restart=always +StartLimitBurst=3 +StartLimitInterval=60s + +[Install] +WantedBy=multi-user.target diff --git a/packaging/cfg/taosd.service b/packaging/cfg/taosd.service new file mode 100644 index 0000000000000000000000000000000000000000..fff4b74e62a6da8f2bda9a6306a79132d7585e42 --- /dev/null +++ b/packaging/cfg/taosd.service @@ -0,0 +1,21 @@ +[Unit] +Description=TDengine server service +After=network-online.target +Wants=network-online.target + +[Service] +Type=simple +ExecStart=/usr/bin/taosd +ExecStartPre=/usr/local/taos/bin/startPre.sh +TimeoutStopSec=1000000s +LimitNOFILE=infinity +LimitNPROC=infinity +LimitCORE=infinity +TimeoutStartSec=0 +StandardOutput=null +Restart=always +StartLimitBurst=3 +StartLimitInterval=60s + +[Install] +WantedBy=multi-user.target diff --git a/packaging/cfg/tarbitratord.service b/packaging/cfg/tarbitratord.service new file mode 100644 index 0000000000000000000000000000000000000000..d60cb536b094fe6b6c472d55076dc4d1db669d68 --- /dev/null +++ b/packaging/cfg/tarbitratord.service @@ -0,0 +1,20 @@ +[Unit] +Description=TDengine arbitrator service +After=network-online.target +Wants=network-online.target + +[Service] +Type=simple +ExecStart=/usr/bin/tarbitrator +TimeoutStopSec=1000000s +LimitNOFILE=infinity +LimitNPROC=infinity +LimitCORE=infinity +TimeoutStartSec=0 +StandardOutput=null +Restart=always +StartLimitBurst=3 +StartLimitInterval=60s + +[Install] +WantedBy=multi-user.target diff --git a/packaging/release.sh b/packaging/release.sh index 8049e974b807363c856f63eebe026c74c6972c0a..19a0a7702bb25bf60efd9bff4af166a1336d721f 100755 --- a/packaging/release.sh +++ b/packaging/release.sh @@ -14,6 +14,7 @@ set -e # -d [taos | power | tq | pro | kh | jh] # -n [2.0.0.3] # -m [2.0.0.0] +# -H [ false | true] # set parameters by default value verMode=edge # [cluster, edge] @@ -26,8 +27,9 @@ dbName=taos # [taos | power | tq | pro | kh | jh] allocator=glibc # [glibc | jemalloc] verNumber="" verNumberComp="1.0.0.0" +httpdBuild=false -while getopts "hv:V:c:o:l:s:d:a:n:m:" arg +while getopts "hv:V:c:o:l:s:d:a:n:m:H:" arg do case $arg in v) @@ -70,6 +72,10 @@ do #echo "osType=$OPTARG" osType=$(echo $OPTARG) ;; + H) + #echo "httpdBuild=$OPTARG" + httpdBuild=$(echo $OPTARG) + ;; h) echo "Usage: `basename $0` -v [cluster | edge] " echo " -c [aarch32 | aarch64 | x64 | x86 | mips64 ...] " @@ -81,6 +87,7 @@ do echo " -d [taos | power | tq | pro | kh | jh] " echo " -n [version number] " echo " -m [compatible version number] " + echo " -H [false | true] " exit 0 ;; ?) #unknow option @@ -90,7 +97,7 @@ do esac done -echo "verMode=${verMode} verType=${verType} cpuType=${cpuType} osType=${osType} pagMode=${pagMode} soMode=${soMode} dbName=${dbName} allocator=${allocator} verNumber=${verNumber} verNumberComp=${verNumberComp}" +echo "verMode=${verMode} verType=${verType} cpuType=${cpuType} osType=${osType} pagMode=${pagMode} soMode=${soMode} dbName=${dbName} allocator=${allocator} verNumber=${verNumber} verNumberComp=${verNumberComp} httpdBuild=${httpdBuild}" curr_dir=$(pwd) @@ -405,8 +412,7 @@ if [[ "$dbName" == "jh" ]]; then # TODO: src/dnode/CMakeLists.txt fi -echo "build ${pagMode} package ..." -if [[ "$pagMode" == "lite" ]]; then +if [[ "$httpdBuild" == "true" ]]; then BUILD_HTTP=true BUILD_TOOLS=false else diff --git a/packaging/tools/install.sh b/packaging/tools/install.sh index c0377cf8345b66ed8d588a6b36a4b7cdcdba028d..95bf3e7b74f6e0f782a8cd5caefd196510358f87 100755 --- a/packaging/tools/install.sh +++ b/packaging/tools/install.sh @@ -700,78 +700,93 @@ function clean_service_on_systemd() { function install_service_on_systemd() { clean_service_on_systemd - taosd_service_config="${service_config_dir}/taosd.service" - ${csudo} bash -c "echo '[Unit]' >> ${taosd_service_config}" - ${csudo} bash -c "echo 'Description=TDengine server service' >> ${taosd_service_config}" - ${csudo} bash -c "echo 'After=network-online.target taosadapter.service' >> ${taosd_service_config}" - ${csudo} bash -c "echo 'Wants=network-online.target taosadapter.service' >> ${taosd_service_config}" - ${csudo} bash -c "echo >> ${taosd_service_config}" - ${csudo} bash -c "echo '[Service]' >> ${taosd_service_config}" - ${csudo} bash -c "echo 'Type=simple' >> ${taosd_service_config}" - ${csudo} bash -c "echo 'ExecStart=/usr/bin/taosd' >> ${taosd_service_config}" - ${csudo} bash -c "echo 'ExecStartPre=/usr/local/taos/bin/startPre.sh' >> ${taosd_service_config}" - ${csudo} bash -c "echo 'TimeoutStopSec=1000000s' >> ${taosd_service_config}" - ${csudo} bash -c "echo 'LimitNOFILE=infinity' >> ${taosd_service_config}" - ${csudo} bash -c "echo 'LimitNPROC=infinity' >> ${taosd_service_config}" - ${csudo} bash -c "echo 'LimitCORE=infinity' >> ${taosd_service_config}" - ${csudo} bash -c "echo 'TimeoutStartSec=0' >> ${taosd_service_config}" - ${csudo} bash -c "echo 'StandardOutput=null' >> ${taosd_service_config}" - ${csudo} bash -c "echo 'Restart=always' >> ${taosd_service_config}" - ${csudo} bash -c "echo 'StartLimitBurst=3' >> ${taosd_service_config}" - ${csudo} bash -c "echo 'StartLimitInterval=60s' >> ${taosd_service_config}" - #${csudo} bash -c "echo 'StartLimitIntervalSec=60s' >> ${taosd_service_config}" - ${csudo} bash -c "echo >> ${taosd_service_config}" - ${csudo} bash -c "echo '[Install]' >> ${taosd_service_config}" - ${csudo} bash -c "echo 'WantedBy=multi-user.target' >> ${taosd_service_config}" + [ -f ${script_dir}/cfg/taosd.service ] &&\ + ${csudo} cp ${script_dir}/cfg/taosd.service \ + ${service_config_dir}/ || : + ${csudo} systemctl daemon-reload + + #taosd_service_config="${service_config_dir}/taosd.service" + #${csudo} bash -c "echo '[Unit]' >> ${taosd_service_config}" + #${csudo} bash -c "echo 'Description=TDengine server service' >> ${taosd_service_config}" + #${csudo} bash -c "echo 'After=network-online.target taosadapter.service' >> ${taosd_service_config}" + #${csudo} bash -c "echo 'Wants=network-online.target taosadapter.service' >> ${taosd_service_config}" + #${csudo} bash -c "echo >> ${taosd_service_config}" + #${csudo} bash -c "echo '[Service]' >> ${taosd_service_config}" + #${csudo} bash -c "echo 'Type=simple' >> ${taosd_service_config}" + #${csudo} bash -c "echo 'ExecStart=/usr/bin/taosd' >> ${taosd_service_config}" + #${csudo} bash -c "echo 'ExecStartPre=/usr/local/taos/bin/startPre.sh' >> ${taosd_service_config}" + #${csudo} bash -c "echo 'TimeoutStopSec=1000000s' >> ${taosd_service_config}" + #${csudo} bash -c "echo 'LimitNOFILE=infinity' >> ${taosd_service_config}" + #${csudo} bash -c "echo 'LimitNPROC=infinity' >> ${taosd_service_config}" + #${csudo} bash -c "echo 'LimitCORE=infinity' >> ${taosd_service_config}" + #${csudo} bash -c "echo 'TimeoutStartSec=0' >> ${taosd_service_config}" + #${csudo} bash -c "echo 'StandardOutput=null' >> ${taosd_service_config}" + #${csudo} bash -c "echo 'Restart=always' >> ${taosd_service_config}" + #${csudo} bash -c "echo 'StartLimitBurst=3' >> ${taosd_service_config}" + #${csudo} bash -c "echo 'StartLimitInterval=60s' >> ${taosd_service_config}" + ##${csudo} bash -c "echo 'StartLimitIntervalSec=60s' >> ${taosd_service_config}" + #${csudo} bash -c "echo >> ${taosd_service_config}" + #${csudo} bash -c "echo '[Install]' >> ${taosd_service_config}" + #${csudo} bash -c "echo 'WantedBy=multi-user.target' >> ${taosd_service_config}" ${csudo} systemctl enable taosd - tarbitratord_service_config="${service_config_dir}/tarbitratord.service" - ${csudo} bash -c "echo '[Unit]' >> ${tarbitratord_service_config}" - ${csudo} bash -c "echo 'Description=TDengine arbitrator service' >> ${tarbitratord_service_config}" - ${csudo} bash -c "echo 'After=network-online.target' >> ${tarbitratord_service_config}" - ${csudo} bash -c "echo 'Wants=network-online.target' >> ${tarbitratord_service_config}" - ${csudo} bash -c "echo >> ${tarbitratord_service_config}" - ${csudo} bash -c "echo '[Service]' >> ${tarbitratord_service_config}" - ${csudo} bash -c "echo 'Type=simple' >> ${tarbitratord_service_config}" - ${csudo} bash -c "echo 'ExecStart=/usr/bin/tarbitrator' >> ${tarbitratord_service_config}" - ${csudo} bash -c "echo 'TimeoutStopSec=1000000s' >> ${tarbitratord_service_config}" - ${csudo} bash -c "echo 'LimitNOFILE=infinity' >> ${tarbitratord_service_config}" - ${csudo} bash -c "echo 'LimitNPROC=infinity' >> ${tarbitratord_service_config}" - ${csudo} bash -c "echo 'LimitCORE=infinity' >> ${tarbitratord_service_config}" - ${csudo} bash -c "echo 'TimeoutStartSec=0' >> ${tarbitratord_service_config}" - ${csudo} bash -c "echo 'StandardOutput=null' >> ${tarbitratord_service_config}" - ${csudo} bash -c "echo 'Restart=always' >> ${tarbitratord_service_config}" - ${csudo} bash -c "echo 'StartLimitBurst=3' >> ${tarbitratord_service_config}" - ${csudo} bash -c "echo 'StartLimitInterval=60s' >> ${tarbitratord_service_config}" - ${csudo} bash -c "echo >> ${tarbitratord_service_config}" - ${csudo} bash -c "echo '[Install]' >> ${tarbitratord_service_config}" - ${csudo} bash -c "echo 'WantedBy=multi-user.target' >> ${tarbitratord_service_config}" + [ -f ${script_dir}/cfg/tarbitratord.service ] &&\ + ${csudo} cp ${script_dir}/cfg/tarbitratord.service \ + ${service_config_dir}/ || : + ${csudo} systemctl daemon-reload + + #tarbitratord_service_config="${service_config_dir}/tarbitratord.service" + #${csudo} bash -c "echo '[Unit]' >> ${tarbitratord_service_config}" + #${csudo} bash -c "echo 'Description=TDengine arbitrator service' >> ${tarbitratord_service_config}" + #${csudo} bash -c "echo 'After=network-online.target' >> ${tarbitratord_service_config}" + #${csudo} bash -c "echo 'Wants=network-online.target' >> ${tarbitratord_service_config}" + #${csudo} bash -c "echo >> ${tarbitratord_service_config}" + #${csudo} bash -c "echo '[Service]' >> ${tarbitratord_service_config}" + #${csudo} bash -c "echo 'Type=simple' >> ${tarbitratord_service_config}" + #${csudo} bash -c "echo 'ExecStart=/usr/bin/tarbitrator' >> ${tarbitratord_service_config}" + #${csudo} bash -c "echo 'TimeoutStopSec=1000000s' >> ${tarbitratord_service_config}" + #${csudo} bash -c "echo 'LimitNOFILE=infinity' >> ${tarbitratord_service_config}" + #${csudo} bash -c "echo 'LimitNPROC=infinity' >> ${tarbitratord_service_config}" + #${csudo} bash -c "echo 'LimitCORE=infinity' >> ${tarbitratord_service_config}" + #${csudo} bash -c "echo 'TimeoutStartSec=0' >> ${tarbitratord_service_config}" + #${csudo} bash -c "echo 'StandardOutput=null' >> ${tarbitratord_service_config}" + #${csudo} bash -c "echo 'Restart=always' >> ${tarbitratord_service_config}" + #${csudo} bash -c "echo 'StartLimitBurst=3' >> ${tarbitratord_service_config}" + #${csudo} bash -c "echo 'StartLimitInterval=60s' >> ${tarbitratord_service_config}" + #${csudo} bash -c "echo >> ${tarbitratord_service_config}" + #${csudo} bash -c "echo '[Install]' >> ${tarbitratord_service_config}" + #${csudo} bash -c "echo 'WantedBy=multi-user.target' >> ${tarbitratord_service_config}" #${csudo} systemctl enable tarbitratord if [ "$verMode" == "cluster" ]; then - nginx_service_config="${service_config_dir}/nginxd.service" - ${csudo} bash -c "echo '[Unit]' >> ${nginx_service_config}" - ${csudo} bash -c "echo 'Description=Nginx For TDengine Service' >> ${nginx_service_config}" - ${csudo} bash -c "echo 'After=network-online.target' >> ${nginx_service_config}" - ${csudo} bash -c "echo 'Wants=network-online.target' >> ${nginx_service_config}" - ${csudo} bash -c "echo >> ${nginx_service_config}" - ${csudo} bash -c "echo '[Service]' >> ${nginx_service_config}" - ${csudo} bash -c "echo 'Type=forking' >> ${nginx_service_config}" - ${csudo} bash -c "echo 'PIDFile=/usr/local/nginxd/logs/nginx.pid' >> ${nginx_service_config}" - ${csudo} bash -c "echo 'ExecStart=/usr/local/nginxd/sbin/nginx' >> ${nginx_service_config}" - ${csudo} bash -c "echo 'ExecStop=/usr/local/nginxd/sbin/nginx -s stop' >> ${nginx_service_config}" - ${csudo} bash -c "echo 'TimeoutStopSec=1000000s' >> ${nginx_service_config}" - ${csudo} bash -c "echo 'LimitNOFILE=infinity' >> ${nginx_service_config}" - ${csudo} bash -c "echo 'LimitNPROC=infinity' >> ${nginx_service_config}" - ${csudo} bash -c "echo 'LimitCORE=infinity' >> ${nginx_service_config}" - ${csudo} bash -c "echo 'TimeoutStartSec=0' >> ${nginx_service_config}" - ${csudo} bash -c "echo 'StandardOutput=null' >> ${nginx_service_config}" - ${csudo} bash -c "echo 'Restart=always' >> ${nginx_service_config}" - ${csudo} bash -c "echo 'StartLimitBurst=3' >> ${nginx_service_config}" - ${csudo} bash -c "echo 'StartLimitInterval=60s' >> ${nginx_service_config}" - ${csudo} bash -c "echo >> ${nginx_service_config}" - ${csudo} bash -c "echo '[Install]' >> ${nginx_service_config}" - ${csudo} bash -c "echo 'WantedBy=multi-user.target' >> ${nginx_service_config}" + [ -f ${script_dir}/cfg/nginxd.service ] &&\ + ${csudo} cp ${script_dir}/cfg/nginxd.service \ + ${service_config_dir}/ || : + ${csudo} systemctl daemon-reload + + #nginx_service_config="${service_config_dir}/nginxd.service" + #${csudo} bash -c "echo '[Unit]' >> ${nginx_service_config}" + #${csudo} bash -c "echo 'Description=Nginx For TDengine Service' >> ${nginx_service_config}" + #${csudo} bash -c "echo 'After=network-online.target' >> ${nginx_service_config}" + #${csudo} bash -c "echo 'Wants=network-online.target' >> ${nginx_service_config}" + #${csudo} bash -c "echo >> ${nginx_service_config}" + #${csudo} bash -c "echo '[Service]' >> ${nginx_service_config}" + #${csudo} bash -c "echo 'Type=forking' >> ${nginx_service_config}" + #${csudo} bash -c "echo 'PIDFile=/usr/local/nginxd/logs/nginx.pid' >> ${nginx_service_config}" + #${csudo} bash -c "echo 'ExecStart=/usr/local/nginxd/sbin/nginx' >> ${nginx_service_config}" + #${csudo} bash -c "echo 'ExecStop=/usr/local/nginxd/sbin/nginx -s stop' >> ${nginx_service_config}" + #${csudo} bash -c "echo 'TimeoutStopSec=1000000s' >> ${nginx_service_config}" + #${csudo} bash -c "echo 'LimitNOFILE=infinity' >> ${nginx_service_config}" + #${csudo} bash -c "echo 'LimitNPROC=infinity' >> ${nginx_service_config}" + #${csudo} bash -c "echo 'LimitCORE=infinity' >> ${nginx_service_config}" + #${csudo} bash -c "echo 'TimeoutStartSec=0' >> ${nginx_service_config}" + #${csudo} bash -c "echo 'StandardOutput=null' >> ${nginx_service_config}" + #${csudo} bash -c "echo 'Restart=always' >> ${nginx_service_config}" + #${csudo} bash -c "echo 'StartLimitBurst=3' >> ${nginx_service_config}" + #${csudo} bash -c "echo 'StartLimitInterval=60s' >> ${nginx_service_config}" + #${csudo} bash -c "echo >> ${nginx_service_config}" + #${csudo} bash -c "echo '[Install]' >> ${nginx_service_config}" + #${csudo} bash -c "echo 'WantedBy=multi-user.target' >> ${nginx_service_config}" if ! ${csudo} systemctl enable nginxd &> /dev/null; then ${csudo} systemctl daemon-reexec ${csudo} systemctl enable nginxd @@ -831,7 +846,7 @@ vercomp () { function is_version_compatible() { - curr_version=`ls ${script_dir}/driver/libtaos.so* |cut -d '.' -f 3-6` + curr_version=`ls ${script_dir}/driver/libtaos.so* | awk -F 'libtaos.so.' '{print $2}'` if [ -f ${script_dir}/driver/vercomp.txt ]; then min_compatible_version=`cat ${script_dir}/driver/vercomp.txt` diff --git a/packaging/tools/makepkg.sh b/packaging/tools/makepkg.sh index 0f226dbb21232047ba0c19d2141958c2111f8c57..c9555291bc77828da336c6bfe9c6215f208ff178 100755 --- a/packaging/tools/makepkg.sh +++ b/packaging/tools/makepkg.sh @@ -86,6 +86,16 @@ if [ -f "${compile_dir}/test/cfg/taosadapter.service" ]; then cp ${compile_dir}/test/cfg/taosadapter.service ${install_dir}/cfg || : fi +if [ -f "${cfg_dir}/taosd.service" ]; then + cp ${cfg_dir}/taosd.service ${install_dir}/cfg || : +fi +if [ -f "${cfg_dir}/tarbitratord.service" ]; then + cp ${cfg_dir}/tarbitratord.service ${install_dir}/cfg || : +fi +if [ -f "${cfg_dir}/nginxd.service" ]; then + cp ${cfg_dir}/nginxd.service ${install_dir}/cfg || : +fi + mkdir -p ${install_dir}/bin && cp ${bin_files} ${install_dir}/bin && chmod a+x ${install_dir}/bin/* || : mkdir -p ${install_dir}/init.d && cp ${init_file_deb} ${install_dir}/init.d/taosd.deb mkdir -p ${install_dir}/init.d && cp ${init_file_rpm} ${install_dir}/init.d/taosd.rpm diff --git a/src/client/src/tscGlobalmerge.c b/src/client/src/tscGlobalmerge.c index 68677423d429d494de6595e47e05c396560c2e54..2808221080d0313f73a91243898692022f6ca56b 100644 --- a/src/client/src/tscGlobalmerge.c +++ b/src/client/src/tscGlobalmerge.c @@ -364,7 +364,9 @@ static int32_t createOrderDescriptor(tOrderDescriptor **pOrderDesc, SQueryInfo* SExprInfo* pExprInfo = tscExprGet(pQueryInfo, j); int32_t functionId = pExprInfo->base.functionId; + if (pColIndex->colId == pExprInfo->base.colInfo.colId && (functionId == TSDB_FUNC_PRJ || functionId == TSDB_FUNC_TAG || functionId == TSDB_FUNC_TAGPRJ)) { + orderColIndexList[i] = j; break; } diff --git a/src/client/src/tscParseLineProtocol.c b/src/client/src/tscParseLineProtocol.c index 2c590ef74ef8946b37765d78ddb3731d0a8d4029..15d3e58daeec37643980815665879770f77e4c63 100644 --- a/src/client/src/tscParseLineProtocol.c +++ b/src/client/src/tscParseLineProtocol.c @@ -152,7 +152,9 @@ static int32_t buildSmlKvSchema(TAOS_SML_KV* smlKv, SHashObj* hash, SArray* arra static int32_t getSmlMd5ChildTableName(TAOS_SML_DATA_POINT* point, char* tableName, int* tableNameLen, SSmlLinesInfo* info) { tscDebug("SML:0x%"PRIx64" taos_sml_insert get child table name through md5", info->id); - qsort(point->tags, point->tagNum, sizeof(TAOS_SML_KV), compareSmlColKv); + if (point->tagNum) { + qsort(point->tags, point->tagNum, sizeof(TAOS_SML_KV), compareSmlColKv); + } SStringBuilder sb; memset(&sb, 0, sizeof(sb)); char sTableName[TSDB_TABLE_NAME_LEN + TS_ESCAPE_CHAR_SIZE] = {0}; @@ -185,6 +187,18 @@ static int32_t getSmlMd5ChildTableName(TAOS_SML_DATA_POINT* point, char* tableNa return 0; } +static int32_t buildSmlChildTableName(TAOS_SML_DATA_POINT* point, SSmlLinesInfo* info) { + tscDebug("SML:0x%"PRIx64" taos_sml_insert build child table name", info->id); + char childTableName[TSDB_TABLE_NAME_LEN + TS_ESCAPE_CHAR_SIZE]; + int32_t tableNameLen = TSDB_TABLE_NAME_LEN + TS_ESCAPE_CHAR_SIZE; + getSmlMd5ChildTableName(point, childTableName, &tableNameLen, info); + point->childTableName = calloc(1, tableNameLen+1); + strncpy(point->childTableName, childTableName, tableNameLen); + point->childTableName[tableNameLen] = '\0'; + return 0; +} + + static int32_t buildDataPointSchemas(TAOS_SML_DATA_POINT* points, int numPoint, SArray* stableSchemas, SSmlLinesInfo* info) { int32_t code = 0; SHashObj* sname2shema = taosHashInit(32, @@ -216,12 +230,7 @@ static int32_t buildDataPointSchemas(TAOS_SML_DATA_POINT* points, int numPoint, for (int j = 0; j < point->tagNum; ++j) { TAOS_SML_KV* tagKv = point->tags + j; if (!point->childTableName) { - char childTableName[TSDB_TABLE_NAME_LEN + TS_ESCAPE_CHAR_SIZE]; - int32_t tableNameLen = TSDB_TABLE_NAME_LEN + TS_ESCAPE_CHAR_SIZE; - getSmlMd5ChildTableName(point, childTableName, &tableNameLen, info); - point->childTableName = calloc(1, tableNameLen+1); - strncpy(point->childTableName, childTableName, tableNameLen); - point->childTableName[tableNameLen] = '\0'; + buildSmlChildTableName(point, info); } code = buildSmlKvSchema(tagKv, pStableSchema->tagHash, pStableSchema->tags, info); @@ -231,6 +240,27 @@ static int32_t buildDataPointSchemas(TAOS_SML_DATA_POINT* points, int numPoint, } } + //for Line Protocol tags may be omitted, add a tag with NULL value + if (point->tagNum == 0) { + if (!point->childTableName) { + buildSmlChildTableName(point, info); + } + char tagNullName[TSDB_COL_NAME_LEN] = {0}; + size_t nameLen = strlen(tsSmlTagNullName); + strncpy(tagNullName, tsSmlTagNullName, nameLen); + addEscapeCharToString(tagNullName, (int32_t)nameLen); + size_t* pTagNullIdx = taosHashGet(pStableSchema->tagHash, tagNullName, nameLen + TS_ESCAPE_CHAR_SIZE); + if (!pTagNullIdx) { + SSchema tagNull = {0}; + tagNull.type = TSDB_DATA_TYPE_NCHAR; + tagNull.bytes = TSDB_NCHAR_SIZE + VARSTR_HEADER_SIZE; + strncpy(tagNull.name, tagNullName, nameLen + TS_ESCAPE_CHAR_SIZE); + taosArrayPush(pStableSchema->tags, &tagNull); + size_t tagNullIdx = taosArrayGetSize(pStableSchema->tags) - 1; + taosHashPut(pStableSchema->tagHash, tagNull.name, nameLen + TS_ESCAPE_CHAR_SIZE, &tagNullIdx, sizeof(tagNullIdx)); + } + } + for (int j = 0; j < point->fieldNum; ++j) { TAOS_SML_KV* fieldKv = point->fields + j; code = buildSmlKvSchema(fieldKv, pStableSchema->fieldHash, pStableSchema->fields, info); @@ -952,7 +982,7 @@ static int32_t applyChildTableTags(TAOS* taos, char* cTableName, char* sTableNam tagKVs[kv->fieldSchemaIdx] = kv; } } - + SArray* tagBinds = taosArrayInit(numTags, sizeof(TAOS_BIND)); taosArraySetSize(tagBinds, numTags); int isNullColBind = TSDB_TRUE; diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c index 198dd3c7f2ebd1296f8f33e075ec5198a15b005a..4c65e8e45a5eae445d48dd48ffe26af613cca37c 100644 --- a/src/client/src/tscSQLParser.c +++ b/src/client/src/tscSQLParser.c @@ -2576,6 +2576,7 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col const char* msg13 = "parameter list required"; const char* msg14 = "third parameter algorithm must be 'default' or 't-digest'"; const char* msg15 = "parameter is out of range [1, 1000]"; + const char* msg16 = "elapsed duration should be greater than or equal to database precision"; switch (functionId) { case TSDB_FUNC_COUNT: { @@ -2667,19 +2668,21 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col case TSDB_FUNC_DERIVATIVE: case TSDB_FUNC_CSUM: case TSDB_FUNC_STDDEV: - case TSDB_FUNC_LEASTSQR: { + case TSDB_FUNC_LEASTSQR: + case TSDB_FUNC_ELAPSED: { // 1. valid the number of parameters int32_t numOfParams = (pItem->pNode->Expr.paramList == NULL)? 0: (int32_t) taosArrayGetSize(pItem->pNode->Expr.paramList); // no parameters or more than one parameter for function if (pItem->pNode->Expr.paramList == NULL || - (functionId != TSDB_FUNC_LEASTSQR && functionId != TSDB_FUNC_DERIVATIVE && numOfParams != 1) || - ((functionId == TSDB_FUNC_LEASTSQR || functionId == TSDB_FUNC_DERIVATIVE) && numOfParams != 3)) { + (functionId != TSDB_FUNC_LEASTSQR && functionId != TSDB_FUNC_DERIVATIVE && functionId != TSDB_FUNC_ELAPSED && numOfParams != 1) || + ((functionId == TSDB_FUNC_LEASTSQR || functionId == TSDB_FUNC_DERIVATIVE) && numOfParams != 3) || + (functionId == TSDB_FUNC_ELAPSED && numOfParams > 2)) { return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2); } tSqlExprItem* pParamElem = taosArrayGet(pItem->pNode->Expr.paramList, 0); - if (pParamElem->pNode->tokenId != TK_ALL && pParamElem->pNode->tokenId != TK_ID) { + if ((pParamElem->pNode->tokenId != TK_ALL && pParamElem->pNode->tokenId != TK_ID) || 0 == pParamElem->pNode->columnName.n) { return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2); } @@ -2688,6 +2691,11 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg3); } + // elapsed only can be applied to primary key + if (functionId == TSDB_FUNC_ELAPSED && index.columnIndex != PRIMARYKEY_TIMESTAMP_COL_INDEX) { + return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), "elapsed only can be applied to primary key"); + } + pTableMetaInfo = tscGetMetaInfo(pQueryInfo, index.tableIndex); STableComInfo info = tscGetTableInfo(pTableMetaInfo->pTableMeta); @@ -2699,7 +2707,7 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col // 2. check if sql function can be applied on this column data type SSchema* pSchema = tscGetTableColumnSchema(pTableMetaInfo->pTableMeta, index.columnIndex); - if (!IS_NUMERIC_TYPE(pSchema->type)) { + if (!IS_NUMERIC_TYPE(pSchema->type) && (functionId != TSDB_FUNC_ELAPSED)) { return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1); } else if (IS_UNSIGNED_NUMERIC_TYPE(pSchema->type) && (functionId == TSDB_FUNC_DIFF || functionId == TSDB_FUNC_DERIVATIVE)) { return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg9); @@ -2744,11 +2752,11 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col } else if (functionId == TSDB_FUNC_IRATE) { int64_t prec = info.precision; tscExprAddParams(&pExpr->base, (char*)&prec, TSDB_DATA_TYPE_BIGINT, LONG_BYTES); - } else if (functionId == TSDB_FUNC_DERIVATIVE) { + } else if (functionId == TSDB_FUNC_DERIVATIVE || (functionId == TSDB_FUNC_ELAPSED && 2 == numOfParams)) { char val[8] = {0}; int64_t tickPerSec = 0; - if (tVariantDump(&pParamElem[1].pNode->value, (char*) &tickPerSec, TSDB_DATA_TYPE_BIGINT, true) < 0) { + if ((TSDB_DATA_TYPE_NULL == pParamElem[1].pNode->value.nType) || tVariantDump(&pParamElem[1].pNode->value, (char*) &tickPerSec, TSDB_DATA_TYPE_BIGINT, true) < 0) { return TSDB_CODE_TSC_INVALID_OPERATION; } @@ -2758,23 +2766,27 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col tickPerSec /= TSDB_TICK_PER_SECOND(TSDB_TIME_PRECISION_MILLI); } - if (tickPerSec <= 0 || tickPerSec < TSDB_TICK_PER_SECOND(info.precision)) { + if ((tickPerSec < TSDB_TICK_PER_SECOND(info.precision)) && (functionId == TSDB_FUNC_DERIVATIVE)) { return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg10); - } + } else if (tickPerSec <= 0) { + return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg16); + } tscExprAddParams(&pExpr->base, (char*) &tickPerSec, TSDB_DATA_TYPE_BIGINT, LONG_BYTES); - memset(val, 0, tListLen(val)); + if (functionId == TSDB_FUNC_DERIVATIVE) { + memset(val, 0, tListLen(val)); - if (tVariantDump(&pParamElem[2].pNode->value, val, TSDB_DATA_TYPE_BIGINT, true) < 0) { - return TSDB_CODE_TSC_INVALID_OPERATION; - } + if (tVariantDump(&pParamElem[2].pNode->value, val, TSDB_DATA_TYPE_BIGINT, true) < 0) { + return TSDB_CODE_TSC_INVALID_OPERATION; + } - int64_t v = *(int64_t*) val; - if (v != 0 && v != 1) { - return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg11); - } + int64_t v = *(int64_t*) val; + if (v != 0 && v != 1) { + return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg11); + } - tscExprAddParams(&pExpr->base, val, TSDB_DATA_TYPE_BIGINT, LONG_BYTES); + tscExprAddParams(&pExpr->base, val, TSDB_DATA_TYPE_BIGINT, LONG_BYTES); + } } SColumnList ids = createColumnList(1, index.tableIndex, index.columnIndex); @@ -3193,7 +3205,6 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col return TSDB_CODE_SUCCESS; } - default: { assert(!TSDB_FUNC_IS_SCALAR(functionId)); pUdfInfo = isValidUdf(pQueryInfo->pUdfInfo, pItem->pNode->Expr.operand.z, pItem->pNode->Expr.operand.n); @@ -3569,7 +3580,7 @@ int32_t tscTansformFuncForSTableQuery(SQueryInfo* pQueryInfo) { if ((functionId >= TSDB_FUNC_SUM && functionId <= TSDB_FUNC_TWA) || (functionId >= TSDB_FUNC_FIRST_DST && functionId <= TSDB_FUNC_STDDEV_DST) || (functionId >= TSDB_FUNC_RATE && functionId <= TSDB_FUNC_IRATE) || - (functionId == TSDB_FUNC_SAMPLE)) { + (functionId == TSDB_FUNC_SAMPLE) || (functionId == TSDB_FUNC_ELAPSED)) { if (getResultDataInfo(pSrcSchema->type, pSrcSchema->bytes, functionId, (int32_t)pExpr->base.param[0].i64, &type, &bytes, &interBytes, 0, true, NULL) != TSDB_CODE_SUCCESS) { return TSDB_CODE_TSC_INVALID_OPERATION; @@ -3624,8 +3635,8 @@ void tscRestoreFuncForSTableQuery(SQueryInfo* pQueryInfo) { } bool hasUnsupportFunctionsForSTableQuery(SSqlCmd* pCmd, SQueryInfo* pQueryInfo) { - const char* msg1 = "TWA/Diff/Derivative/Irate/CSUM/MAVG/SAMPLE/INTERP are not allowed to apply to super table directly"; - const char* msg2 = "TWA/Diff/Derivative/Irate/CSUM/MAVG/SAMPLE/INTERP only support group by tbname for super table query"; + const char* msg1 = "TWA/Diff/Derivative/Irate/CSUM/MAVG/SAMPLE/INTERP/Elapsed are not allowed to apply to super table directly"; + const char* msg2 = "TWA/Diff/Derivative/Irate/CSUM/MAVG/SAMPLE/INTERP/Elapsed only support group by tbname for super table query"; const char* msg3 = "functions not support for super table query"; // filter sql function not supported by metric query yet. @@ -3647,7 +3658,7 @@ bool hasUnsupportFunctionsForSTableQuery(SSqlCmd* pCmd, SQueryInfo* pQueryInfo) } if (tscIsTWAQuery(pQueryInfo) || tscIsDiffDerivLikeQuery(pQueryInfo) || tscIsIrateQuery(pQueryInfo) || - tscQueryContainsFunction(pQueryInfo, TSDB_FUNC_SAMPLE) || tscGetPointInterpQuery(pQueryInfo)) { + tscQueryContainsFunction(pQueryInfo, TSDB_FUNC_SAMPLE) || tscGetPointInterpQuery(pQueryInfo) || tscQueryContainsFunction(pQueryInfo, TSDB_FUNC_ELAPSED)) { if (pQueryInfo->groupbyExpr.numOfGroupCols == 0) { invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1); return true; @@ -7675,7 +7686,7 @@ int32_t doFunctionsCompatibleCheck(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, char* const char* msg3 = "group by/session/state_window not allowed on projection query"; const char* msg4 = "retrieve tags not compatible with group by or interval query"; const char* msg5 = "functions can not be mixed up"; - const char* msg6 = "TWA/Diff/Derivative/Irate/CSum/MAvg only support group by tbname"; + const char* msg6 = "TWA/Diff/Derivative/Irate/CSum/MAvg/Elapsed only support group by tbname"; // only retrieve tags, group by is not supportted if (tscQueryTags(pQueryInfo)) { @@ -7742,7 +7753,7 @@ int32_t doFunctionsCompatibleCheck(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, char* } if ((!pQueryInfo->stateWindow) && (f == TSDB_FUNC_DIFF || f == TSDB_FUNC_DERIVATIVE || f == TSDB_FUNC_TWA || - f == TSDB_FUNC_IRATE || f == TSDB_FUNC_CSUM || f == TSDB_FUNC_MAVG)) { + f == TSDB_FUNC_IRATE || f == TSDB_FUNC_CSUM || f == TSDB_FUNC_MAVG || f == TSDB_FUNC_ELAPSED)) { for (int32_t j = 0; j < pQueryInfo->groupbyExpr.numOfGroupCols; ++j) { SColIndex* pColIndex = taosArrayGet(pQueryInfo->groupbyExpr.columnInfo, j); if (j == 0) { @@ -7788,7 +7799,7 @@ int32_t doFunctionsCompatibleCheck(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, char* int32_t validateFunctionFromUpstream(SQueryInfo* pQueryInfo, char* msg) { - const char* msg1 = "TWA/Diff/Derivative/Irate are not allowed to apply to super table without group by tbname"; + const char* msg1 = "TWA/Diff/Derivative/Irate/elapsed are not allowed to apply to super table without group by tbname"; const char* msg2 = "group by not supported in nested interp query"; const char* msg3 = "order by not supported in nested interp query"; const char* msg4 = "first column should be timestamp for interp query"; @@ -7801,7 +7812,7 @@ int32_t validateFunctionFromUpstream(SQueryInfo* pQueryInfo, char* msg) { SExprInfo* pExpr = tscExprGet(pQueryInfo, i); int32_t f = pExpr->base.functionId; - if (f == TSDB_FUNC_DERIVATIVE || f == TSDB_FUNC_TWA || f == TSDB_FUNC_IRATE || f == TSDB_FUNC_DIFF) { + if (f == TSDB_FUNC_DERIVATIVE || f == TSDB_FUNC_TWA || f == TSDB_FUNC_IRATE || f == TSDB_FUNC_DIFF || f == TSDB_FUNC_ELAPSED) { for (int32_t j = 0; j < upNum; ++j) { SQueryInfo* pUp = taosArrayGetP(pQueryInfo->pUpstream, j); STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pUp, 0); diff --git a/src/client/src/tscServer.c b/src/client/src/tscServer.c index 98693c94f1d68c194946fbf8b4c00e92c410c9ea..361f73945533b03017b5e156fff975fa1106925f 100644 --- a/src/client/src/tscServer.c +++ b/src/client/src/tscServer.c @@ -943,6 +943,7 @@ int tscBuildQueryMsg(SSqlObj *pSql, SSqlInfo *pInfo) { pQueryMsg->tsCompQuery = query.tsCompQuery; pQueryMsg->simpleAgg = query.simpleAgg; pQueryMsg->pointInterpQuery = query.pointInterpQuery; + pQueryMsg->needTableSeqScan = query.needTableSeqScan; pQueryMsg->needReverseScan = query.needReverseScan; pQueryMsg->stateWindow = query.stateWindow; pQueryMsg->numOfTags = htonl(numOfTags); diff --git a/src/client/src/tscUtil.c b/src/client/src/tscUtil.c index 62167d9fedbdc67e3422374f4c1e1fc55684681d..df556b850a93118efc0d1e0a29a30628878a4b8e 100644 --- a/src/client/src/tscUtil.c +++ b/src/client/src/tscUtil.c @@ -373,6 +373,10 @@ bool tscIsPointInterpQuery(SQueryInfo* pQueryInfo) { return true; } +bool tscNeedTableSeqScan(SQueryInfo* pQueryInfo) { + return pQueryInfo->stableQuery && (tscQueryContainsFunction(pQueryInfo, TSDB_FUNC_TWA) || tscQueryContainsFunction(pQueryInfo, TSDB_FUNC_ELAPSED)); +} + bool tscGetPointInterpQuery(SQueryInfo* pQueryInfo) { size_t size = tscNumOfExprs(pQueryInfo); for (int32_t i = 0; i < size; ++i) { @@ -389,7 +393,6 @@ bool tscGetPointInterpQuery(SQueryInfo* pQueryInfo) { return false; } - bool tsIsArithmeticQueryOnAggResult(SQueryInfo* pQueryInfo) { if (tscIsProjectionQuery(pQueryInfo)) { return false; @@ -522,7 +525,7 @@ bool timeWindowInterpoRequired(SQueryInfo *pQueryInfo) { } int32_t functionId = pExpr->base.functionId; - if (functionId == TSDB_FUNC_TWA || functionId == TSDB_FUNC_INTERP) { + if (functionId == TSDB_FUNC_TWA || functionId == TSDB_FUNC_INTERP || functionId == TSDB_FUNC_ELAPSED) { return true; } } @@ -5058,6 +5061,7 @@ int32_t tscCreateQueryFromQueryInfo(SQueryInfo* pQueryInfo, SQueryAttr* pQueryAt pQueryAttr->groupbyColumn = (!pQueryInfo->stateWindow) && tscGroupbyColumn(pQueryInfo); pQueryAttr->queryBlockDist = isBlockDistQuery(pQueryInfo); pQueryAttr->pointInterpQuery = tscIsPointInterpQuery(pQueryInfo); + pQueryAttr->needTableSeqScan = tscNeedTableSeqScan(pQueryInfo); pQueryAttr->timeWindowInterpo = timeWindowInterpoRequired(pQueryInfo); pQueryAttr->distinct = pQueryInfo->distinct; pQueryAttr->sw = pQueryInfo->sessionWindow; diff --git a/src/common/inc/tglobal.h b/src/common/inc/tglobal.h index 317d48ea5987935c5d53af6ad578834071643f26..49ca1e31c5f4fc21beeed169b431f21e8985a642 100644 --- a/src/common/inc/tglobal.h +++ b/src/common/inc/tglobal.h @@ -237,6 +237,7 @@ extern int8_t tsDeadLockKillQuery; // schemaless extern char tsDefaultJSONStrType[]; extern char tsSmlChildTableName[]; +extern char tsSmlTagNullName[]; typedef struct { diff --git a/src/common/src/tglobal.c b/src/common/src/tglobal.c index 62baaadbac2596bc66bf5955262a3d5ff35fcfc1..83026a3774ab77aa4b5d4998225a4dcc5d10c121 100644 --- a/src/common/src/tglobal.c +++ b/src/common/src/tglobal.c @@ -291,7 +291,11 @@ int8_t tsDeadLockKillQuery = 0; // default JSON string type char tsDefaultJSONStrType[7] = "nchar"; -char tsSmlChildTableName[TSDB_TABLE_NAME_LEN] = ""; //user defined child table name can be specified in tag value. If set to empty system will generate table name using MD5 hash. +char tsSmlChildTableName[TSDB_TABLE_NAME_LEN] = ""; //user defined child table name can be specified in tag value. + //If set to empty system will generate table name using MD5 hash. +char tsSmlTagNullName[TSDB_COL_NAME_LEN] = "_tag_null"; //for line protocol if tag is omitted, add a tag with NULL value + //to make sure inserted records belongs to the same measurement + //default name is _tag_null and can be user configurable int32_t (*monStartSystemFp)() = NULL; void (*monStopSystemFp)() = NULL; @@ -1701,6 +1705,17 @@ static void doInitGlobalConfig(void) { cfg.unitType = TAOS_CFG_UTYPE_NONE; taosInitConfigOption(cfg); + // name for a NULL value tag added for Line Protocol when tag fields are omitted + cfg.option = "smlTagNullName"; + cfg.ptr = tsSmlTagNullName; + cfg.valType = TAOS_CFG_VTYPE_STRING; + cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW | TSDB_CFG_CTYPE_B_CLIENT; + cfg.minValue = 0; + cfg.maxValue = 0; + cfg.ptrLength = tListLen(tsSmlTagNullName); + cfg.unitType = TAOS_CFG_UTYPE_NONE; + taosInitConfigOption(cfg); + // flush vnode wal file if walSize > walFlushSize and walSize > cache*0.5*blocks cfg.option = "walFlushSize"; cfg.ptr = &tsdbWalFlushSize; diff --git a/src/connector/C#/TDengineDriver.cs b/src/connector/C#/TDengineDriver.cs index f9a5890eedb8714616cb4d624f9036ffdeef35fb..14fb240d0c860790b29f957774ee65016aeb5de8 100644 --- a/src/connector/C#/TDengineDriver.cs +++ b/src/connector/C#/TDengineDriver.cs @@ -19,7 +19,7 @@ using System.Runtime.InteropServices; namespace TDengineDriver { - enum TDengineDataType + public enum TDengineDataType { TSDB_DATA_TYPE_NULL = 0, // 1 bytes TSDB_DATA_TYPE_BOOL = 1, // 1 bytes @@ -33,12 +33,12 @@ namespace TDengineDriver TSDB_DATA_TYPE_TIMESTAMP = 9,// 8 bytes TSDB_DATA_TYPE_NCHAR = 10, // unicode string TSDB_DATA_TYPE_UTINYINT = 11,// 1 byte - TSDB_DATA_TYPE_USMALLINT= 12,// 2 bytes + TSDB_DATA_TYPE_USMALLINT = 12,// 2 bytes TSDB_DATA_TYPE_UINT = 13, // 4 bytes - TSDB_DATA_TYPE_UBIGINT= 14 // 8 bytes + TSDB_DATA_TYPE_UBIGINT = 14 // 8 bytes } - enum TDengineInitOption + public enum TDengineInitOption { TSDB_OPTION_LOCALE = 0, TSDB_OPTION_CHARSET = 1, @@ -47,7 +47,7 @@ namespace TDengineDriver TDDB_OPTION_SHELL_ACTIVITY_TIMER = 4 } - class TDengineMeta + public class TDengineMeta { public string name; public short size; @@ -90,7 +90,7 @@ namespace TDengineDriver } } - class TDengine + public class TDengine { public const int TSDB_CODE_SUCCESS = 0; diff --git a/src/connector/grafana-pluain-is-removed-from-TDengine.md b/src/connector/grafana-pluain-is-removed-from-TDengine.md new file mode 100644 index 0000000000000000000000000000000000000000..6e7833d5ac6bf24af353eb8fc709875e27d2297e --- /dev/null +++ b/src/connector/grafana-pluain-is-removed-from-TDengine.md @@ -0,0 +1 @@ +TDengine Grafana plugin is no more part of the TDengine repo. Please check it out from https://github.com/taosdata/grafanaplugin. diff --git a/src/connector/grafanaplugin b/src/connector/grafanaplugin deleted file mode 160000 index 792ef7c3036f15068796e09883d3f4d47a038fe2..0000000000000000000000000000000000000000 --- a/src/connector/grafanaplugin +++ /dev/null @@ -1 +0,0 @@ -Subproject commit 792ef7c3036f15068796e09883d3f4d47a038fe2 diff --git a/src/connector/grafanaplugin/README.md b/src/connector/grafanaplugin/README.md new file mode 100644 index 0000000000000000000000000000000000000000..6e7833d5ac6bf24af353eb8fc709875e27d2297e --- /dev/null +++ b/src/connector/grafanaplugin/README.md @@ -0,0 +1 @@ +TDengine Grafana plugin is no more part of the TDengine repo. Please check it out from https://github.com/taosdata/grafanaplugin. diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/AbstractConnection.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/AbstractConnection.java index c992cf58ba43eb0e052d9bc80824d94e98b725ca..15695ae9204c40db16c9f4d367c80a285335cbef 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/AbstractConnection.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/AbstractConnection.java @@ -107,16 +107,6 @@ public abstract class AbstractConnection extends WrapperImpl implements Connecti public void setCatalog(String catalog) throws SQLException { if (isClosed()) throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); - /* - try (Statement stmt = createStatement()) { - boolean execute = stmt.execute("use " + catalog); - if (execute) - this.catalog = catalog; - } catch (SQLException e) { - // do nothing - } - */ - this.catalog = catalog; } @@ -416,7 +406,6 @@ public abstract class AbstractConnection extends WrapperImpl implements Connecti } catch (InterruptedException | ExecutionException ignored) { } catch (TimeoutException e) { future.cancel(true); - status = false; } finally { executor.shutdownNow(); } diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBConnection.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBConnection.java index 8cd8da6de4f7d5324afbc6d5a5d54d6b8dcc7a8d..77a97d644ca3da3a51bce021ab7904883ed885f4 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBConnection.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBConnection.java @@ -1,17 +1,3 @@ -/*************************************************************************** - * Copyright (c) 2019 TAOS Data, Inc. - * - * This program is free software: you can use, redistribute, and/or modify - * it under the terms of the GNU Affero General Public License, version 3 - * or later ("AGPL"), as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. - * - * You should have received a copy of the GNU Affero General Public License - * along with this program. If not, see . - *****************************************************************************/ package com.taosdata.jdbc; import java.sql.*; @@ -66,7 +52,7 @@ public class TSDBConnection extends AbstractConnection { throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); } - long id = this.connector.subscribe(topic, sql, restart, 0); + long id = this.connector.subscribe(topic, sql, restart); if (id == 0) { throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_SUBSCRIBE_FAILED); } diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBDriver.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBDriver.java index 0fef64a6f82706e30677ad4e74604924c5cc2e60..00eff99f45cb6aa8cc0fbc7bce40e0d82f401e05 100755 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBDriver.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBDriver.java @@ -1,23 +1,8 @@ -/*************************************************************************** - * Copyright (c) 2019 TAOS Data, Inc. - * - * This program is free software: you can use, redistribute, and/or modify - * it under the terms of the GNU Affero General Public License, version 3 - * or later ("AGPL"), as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. - * - * You should have received a copy of the GNU Affero General Public License - * along with this program. If not, see . - *****************************************************************************/ package com.taosdata.jdbc; -import java.net.URLEncoder; -import java.nio.charset.StandardCharsets; import java.sql.*; -import java.util.*; +import java.util.Properties; +import java.util.StringTokenizer; import java.util.logging.Logger; /** @@ -139,7 +124,7 @@ public class TSDBDriver extends AbstractDriver { } catch (SQLException sqlEx) { throw sqlEx; } catch (Exception ex) { - throw new SQLException("SQLException:" + ex.toString(), ex); + throw new SQLException("SQLException:" + ex, ex); } } @@ -152,7 +137,7 @@ public class TSDBDriver extends AbstractDriver { public boolean acceptsURL(String url) throws SQLException { if (url == null) throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_URL_NOT_SET); - return url.length() > 0 && url.trim().length() > 0 && (url.startsWith(URL_PREFIX) || url.startsWith(URL_PREFIX1)); + return url.trim().length() > 0 && (url.startsWith(URL_PREFIX) || url.startsWith(URL_PREFIX1)); } public DriverPropertyInfo[] getPropertyInfo(String url, Properties info) throws SQLException { diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBJNIConnector.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBJNIConnector.java index ee2c8141a81bb9dc2aa51ba14247dfbb834ec746..247ae929dabc9aba4d50309433a9b1866125909d 100755 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBJNIConnector.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBJNIConnector.java @@ -1,19 +1,3 @@ -/** - * ************************************************************************* - * Copyright (c) 2019 TAOS Data, Inc. - *

- * This program is free software: you can use, redistribute, and/or modify - * it under the terms of the GNU Affero General Public License, version 3 - * or later ("AGPL"), as published by the Free Software Foundation. - *

- * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. - *

- * You should have received a copy of the GNU Affero General Public License - * along with this program. If not, see . - * *************************************************************************** - */ package com.taosdata.jdbc; import com.alibaba.fastjson.JSONObject; @@ -261,8 +245,8 @@ public class TSDBJNIConnector { /** * Create a subscription */ - long subscribe(String topic, String sql, boolean restart, int period) { - return subscribeImp(this.taos, restart, topic, sql, period); + long subscribe(String topic, String sql, boolean restart) { + return subscribeImp(this.taos, restart, topic, sql, 0); } private native long subscribeImp(long connection, boolean restart, String topic, String sql, int period); @@ -285,16 +269,6 @@ public class TSDBJNIConnector { private native void unsubscribeImp(long subscription, boolean isKeep); - /** - * Validate if a create table SQL statement is correct without actually creating that table - */ - public boolean validateCreateTableSql(String sql) { - int res = validateCreateTableSqlImp(taos, sql.getBytes()); - return res == 0; - } - - private native int validateCreateTableSqlImp(long connection, byte[] sqlBytes); - public long prepareStmt(String sql) throws SQLException { long stmt = prepareStmtImp(sql.getBytes(), this.taos); diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulDriver.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulDriver.java index 36714893e3ca519dea07910a95d5ee1c1b6fb731..fdd034a641d7fd829059c73061305bdf38eae1bf 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulDriver.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulDriver.java @@ -39,7 +39,7 @@ public class RestfulDriver extends AbstractDriver { String port = props.getProperty(TSDBDriver.PROPERTY_KEY_PORT, "6041"); String database = props.containsKey(TSDBDriver.PROPERTY_KEY_DBNAME) ? props.getProperty(TSDBDriver.PROPERTY_KEY_DBNAME) : null; - String loginUrl = "http://" + host + ":" + port + "/rest/login/" + props.getProperty(TSDBDriver.PROPERTY_KEY_USER) + "/" + props.getProperty(TSDBDriver.PROPERTY_KEY_PASSWORD) + ""; + String loginUrl; try { if (!props.containsKey(TSDBDriver.PROPERTY_KEY_USER)) throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_USER_IS_REQUIRED); @@ -53,8 +53,8 @@ public class RestfulDriver extends AbstractDriver { throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_INVALID_VARIABLE, "unsupported UTF-8 concoding, user: " + props.getProperty(TSDBDriver.PROPERTY_KEY_USER) + ", password: " + props.getProperty(TSDBDriver.PROPERTY_KEY_PASSWORD)); } - int poolSize = Integer.valueOf(props.getProperty("httpPoolSize", HttpClientPoolUtil.DEFAULT_MAX_PER_ROUTE)); - boolean keepAlive = Boolean.valueOf(props.getProperty("httpKeepAlive", HttpClientPoolUtil.DEFAULT_HTTP_KEEP_ALIVE)); + int poolSize = Integer.parseInt(props.getProperty("httpPoolSize", HttpClientPoolUtil.DEFAULT_MAX_PER_ROUTE)); + boolean keepAlive = Boolean.parseBoolean(props.getProperty("httpKeepAlive", HttpClientPoolUtil.DEFAULT_HTTP_KEEP_ALIVE)); HttpClientPoolUtil.init(poolSize, keepAlive); String result = HttpClientPoolUtil.execute(loginUrl); @@ -79,7 +79,7 @@ public class RestfulDriver extends AbstractDriver { public boolean acceptsURL(String url) throws SQLException { if (url == null) throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_URL_NOT_SET); - return url.length() > 0 && url.trim().length() > 0 && url.startsWith(URL_PREFIX); + return url.trim().length() > 0 && url.startsWith(URL_PREFIX); } @Override diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulStatement.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulStatement.java index fb8b82271b02b70b348b43a7c88a0084adaa5ab5..cdcd2eec482cc39e940bf20f6ae636568257faf2 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulStatement.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulStatement.java @@ -65,7 +65,11 @@ public class RestfulStatement extends AbstractStatement { boolean result = true; if (SqlSyntaxValidator.isUseSql(sql)) { - HttpClientPoolUtil.execute(getUrl(), sql, this.conn.getToken()); + String ret = HttpClientPoolUtil.execute(getUrl(), sql, this.conn.getToken()); + JSONObject resultJson = JSON.parseObject(ret); + if (resultJson.getString("status").equals("error")) { + throw TSDBError.createSQLException(resultJson.getInteger("code"), "sql: " + sql + ", desc: " + resultJson.getString("desc")); + } this.database = sql.trim().replace("use", "").trim(); this.conn.setCatalog(this.database); result = false; @@ -114,7 +118,7 @@ public class RestfulStatement extends AbstractStatement { String result = HttpClientPoolUtil.execute(getUrl(), sql, this.conn.getToken()); JSONObject resultJson = JSON.parseObject(result); if (resultJson.getString("status").equals("error")) { - throw TSDBError.createSQLException(resultJson.getInteger("code"), resultJson.getString("desc")); + throw TSDBError.createSQLException(resultJson.getInteger("code"), "sql: " + sql + ", desc: " + resultJson.getString("desc")); } this.resultSet = new RestfulResultSet(database, this, resultJson); this.affectedRows = -1; @@ -125,7 +129,7 @@ public class RestfulStatement extends AbstractStatement { String result = HttpClientPoolUtil.execute(getUrl(), sql, this.conn.getToken()); JSONObject jsonObject = JSON.parseObject(result); if (jsonObject.getString("status").equals("error")) { - throw TSDBError.createSQLException(jsonObject.getInteger("code"), jsonObject.getString("desc")); + throw TSDBError.createSQLException(jsonObject.getInteger("code"), "sql: " + sql + ", desc: " + jsonObject.getString("desc")); } this.resultSet = null; this.affectedRows = getAffectedRows(jsonObject); @@ -133,16 +137,14 @@ public class RestfulStatement extends AbstractStatement { } private int getAffectedRows(JSONObject jsonObject) throws SQLException { - // create ... SQLs should return 0 , and Restful result like this: - // {"status": "succ", "head": ["affected_rows"], "data": [[0]], "rows": 1} JSONArray head = jsonObject.getJSONArray("head"); if (head.size() != 1 || !"affected_rows".equals(head.getString(0))) - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_INVALID_VARIABLE); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_INVALID_VARIABLE, "invalid variable: [" + head.toJSONString() + "]"); JSONArray data = jsonObject.getJSONArray("data"); if (data != null) { return data.getJSONArray(0).getInteger(0); } - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_INVALID_VARIABLE); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_INVALID_VARIABLE, "invalid variable: [" + jsonObject.toJSONString() + "]"); } @Override diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/AuthenticationTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/AuthenticationTest.java index 5b38f9b0640bb6eec6d1c9749db0abf0388c04ce..d2f5b915ee1b39146ccc91131fae801c291d08cc 100644 --- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/AuthenticationTest.java +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/AuthenticationTest.java @@ -2,7 +2,6 @@ package com.taosdata.jdbc.cases; import com.taosdata.jdbc.TSDBErrorNumbers; import org.junit.Assert; -import org.junit.Before; import org.junit.Ignore; import org.junit.Test; @@ -59,38 +58,31 @@ public class AuthenticationTest { @Test public void test() throws SQLException { // change password - String url = "jdbc:TAOS-RS://" + host + ":6041/restful_test?user=" + user + "&password=taosdata"; - try (Connection conn = DriverManager.getConnection(url); - Statement stmt = conn.createStatement();) { - stmt.execute("alter user " + user + " pass '" + password + "'"); - } + conn = DriverManager.getConnection("jdbc:TAOS-RS://" + host + ":6041/?user=" + user + "&password=taosdata"); + Statement stmt = conn.createStatement(); + stmt.execute("alter user " + user + " pass '" + password + "'"); + stmt.close(); + conn.close(); // use new to login and execute query - url = "jdbc:TAOS-RS://" + host + ":6041/restful_test?user=" + user + "&password=" + password; - try (Connection conn = DriverManager.getConnection(url); - Statement stmt = conn.createStatement()) { - stmt.execute("show databases"); - ResultSet rs = stmt.getResultSet(); - ResultSetMetaData meta = rs.getMetaData(); - while (rs.next()) { + conn = DriverManager.getConnection("jdbc:TAOS-RS://" + host + ":6041/?user=" + user + "&password=" + password); + stmt = conn.createStatement(); + stmt.execute("show databases"); + ResultSet rs = stmt.getResultSet(); + ResultSetMetaData meta = rs.getMetaData(); + while (rs.next()) { + for (int i = 1; i <= meta.getColumnCount(); i++) { + System.out.print(meta.getColumnLabel(i) + ":" + rs.getString(i) + "\t"); } + System.out.println(); } // change password back - url = "jdbc:TAOS-RS://" + host + ":6041/restful_test?user=" + user + "&password=" + password; - try (Connection conn = DriverManager.getConnection(url); - Statement stmt = conn.createStatement()) { - stmt.execute("alter user " + user + " pass 'taosdata'"); - } - } - - @Before - public void before() { - try { - Class.forName("com.taosdata.jdbc.rs.RestfulDriver"); - } catch (ClassNotFoundException e) { - e.printStackTrace(); - } + conn = DriverManager.getConnection("jdbc:TAOS-RS://" + host + ":6041/?user=" + user + "&password=" + password); + stmt = conn.createStatement(); + stmt.execute("alter user " + user + " pass 'taosdata'"); + stmt.close(); + conn.close(); } } diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/ConnectWrongDatabaseTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/ConnectWrongDatabaseTest.java index d60ee14fbc87ba5d2bd2e851b5195b513fc4e028..5f821c5cc34dde0050d8e62afb6fc8ab17534a17 100644 --- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/ConnectWrongDatabaseTest.java +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/ConnectWrongDatabaseTest.java @@ -8,8 +8,13 @@ import java.sql.SQLException; public class ConnectWrongDatabaseTest { @Test(expected = SQLException.class) - public void connect() throws SQLException { + public void connectByJni() throws SQLException { DriverManager.getConnection("jdbc:TAOS://localhost:6030/wrong_db?user=root&password=taosdata"); } + @Test(expected = SQLException.class) + public void connectByRestful() throws SQLException { + DriverManager.getConnection("jdbc:TAOS-RS://localhost:6041/wrong_db?user=root&password=taosdata"); + } + } diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/InsertDbwithoutUseDbTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/InsertDbwithoutUseDbTest.java index beea990456ec98c2ab51fc2086034e0b31b570b6..05c7b0feca21f3f5b9062f9cbc26921aa607732a 100644 --- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/InsertDbwithoutUseDbTest.java +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/InsertDbwithoutUseDbTest.java @@ -18,9 +18,8 @@ public class InsertDbwithoutUseDbTest { private static final Random random = new Random(System.currentTimeMillis()); @Test - public void case001() throws ClassNotFoundException, SQLException { + public void case001() throws SQLException { // prepare schema - Class.forName("com.taosdata.jdbc.TSDBDriver"); String url = "jdbc:TAOS://127.0.0.1:6030/?user=root&password=taosdata"; Connection conn = DriverManager.getConnection(url, properties); try (Statement stmt = conn.createStatement()) { @@ -51,9 +50,8 @@ public class InsertDbwithoutUseDbTest { } @Test - public void case002() throws ClassNotFoundException, SQLException { + public void case002() throws SQLException { // prepare the schema - Class.forName("com.taosdata.jdbc.rs.RestfulDriver"); final String url = "jdbc:TAOS-RS://" + host + ":6041/inWithoutDb?user=root&password=taosdata"; Connection conn = DriverManager.getConnection(url, properties); try (Statement stmt = conn.createStatement()) { diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/TimestampPrecisonInNanoRestTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/TimestampPrecisonInNanoRestTest.java index 2ae03b4e5cd92056ce0ea995c8edcd21e51e24bb..cfd6a066acc2c2abd94e525fb69d4027a317134c 100644 --- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/TimestampPrecisonInNanoRestTest.java +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/TimestampPrecisonInNanoRestTest.java @@ -25,7 +25,7 @@ public class TimestampPrecisonInNanoRestTest { private static final String date4 = format.format(new Date(timestamp1 + 10L)); private static final String date2 = date1 + "123455"; private static final String date3 = date4 + "123456"; - + private static Connection conn; @@ -43,7 +43,7 @@ public class TimestampPrecisonInNanoRestTest { stmt.execute("drop database if exists " + ns_timestamp_db); stmt.execute("create database if not exists " + ns_timestamp_db + " precision 'ns'"); stmt.execute("create table " + ns_timestamp_db + ".weather(ts timestamp, ts2 timestamp, f1 int)"); - stmt.executeUpdate("insert into " + ns_timestamp_db + ".weather(ts, ts2, f1) values(\"" + date3 + "\", \"" + date3 + "\", 128)"); + stmt.executeUpdate("insert into " + ns_timestamp_db + ".weather(ts, ts2, f1) values(\"" + date3 + "\", \"" + date3 + "\", 128)"); stmt.executeUpdate("insert into " + ns_timestamp_db + ".weather(ts, ts2, f1) values(" + timestamp2 + "," + timestamp2 + ", 127)"); stmt.close(); } @@ -54,7 +54,7 @@ public class TimestampPrecisonInNanoRestTest { stmt.execute("drop database if exists " + ns_timestamp_db); stmt.execute("create database if not exists " + ns_timestamp_db + " precision 'ns'"); stmt.execute("create table " + ns_timestamp_db + ".weather(ts timestamp, ts2 timestamp, f1 int)"); - stmt.executeUpdate("insert into " + ns_timestamp_db + ".weather(ts, ts2, f1) values(\"" + date3 + "\", \"" + date3 + "\", 128)"); + stmt.executeUpdate("insert into " + ns_timestamp_db + ".weather(ts, ts2, f1) values(\"" + date3 + "\", \"" + date3 + "\", 128)"); stmt.executeUpdate("insert into " + ns_timestamp_db + ".weather(ts, ts2, f1) values(" + timestamp2 + "," + timestamp2 + ", 127)"); stmt.close(); } @@ -105,7 +105,7 @@ public class TimestampPrecisonInNanoRestTest { @Test public void canImportTimestampAndQueryByEqualToInDateTypeInBothFirstAndSecondCol() { try (Statement stmt = conn.createStatement()) { - stmt.executeUpdate("import into " + ns_timestamp_db + ".weather(ts, ts2, f1) values(\"" + date1 + "123123\", \"" + date1 + "123123\", 127)"); + stmt.executeUpdate("import into " + ns_timestamp_db + ".weather(ts, ts2, f1) values(\"" + date1 + "123123\", \"" + date1 + "123123\", 127)"); ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts = '" + date1 + "123123'"); checkCount(1l, rs); rs = stmt.executeQuery("select ts from " + ns_timestamp_db + ".weather where ts = '" + date1 + "123123'"); @@ -139,7 +139,7 @@ public class TimestampPrecisonInNanoRestTest { public void canImportTimestampAndQueryByEqualToInNumberTypeInBothFirstAndSecondCol() { try (Statement stmt = conn.createStatement()) { long timestamp4 = timestamp1 * 1000_000 + 123123; - stmt.executeUpdate("import into " + ns_timestamp_db + ".weather(ts, ts2, f1) values(" + timestamp4 + ", " + timestamp4 + ", 127)"); + stmt.executeUpdate("import into " + ns_timestamp_db + ".weather(ts, ts2, f1) values(" + timestamp4 + ", " + timestamp4 + ", 127)"); ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts = '" + timestamp4 + "'"); checkCount(1l, rs); rs = stmt.executeQuery("select ts from " + ns_timestamp_db + ".weather where ts = '" + timestamp4 + "'"); @@ -215,7 +215,7 @@ public class TimestampPrecisonInNanoRestTest { } catch (SQLException e) { e.printStackTrace(); } - } + } @Test public void canQueryLargerThanInNumberTypeForFirstCol() { @@ -279,7 +279,7 @@ public class TimestampPrecisonInNanoRestTest { } catch (SQLException e) { e.printStackTrace(); } - } + } @Test public void canQueryLessThanInDateTypeForFirstCol() { @@ -347,7 +347,7 @@ public class TimestampPrecisonInNanoRestTest { } catch (SQLException e) { e.printStackTrace(); } - } + } @Test public void canQueryLessThanOrEqualToInNumberTypeForFirstCol() { @@ -466,7 +466,7 @@ public class TimestampPrecisonInNanoRestTest { } @Test - public void canInsertTimestampWithNowAndNsOffsetInBothFirstAndSecondCol(){ + public void canInsertTimestampWithNowAndNsOffsetInBothFirstAndSecondCol() { try (Statement stmt = conn.createStatement()) { stmt.executeUpdate("insert into " + ns_timestamp_db + ".weather(ts, ts2, f1) values(now + 1000b, now - 1000b, 128)"); ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather"); @@ -477,7 +477,7 @@ public class TimestampPrecisonInNanoRestTest { } @Test - public void canIntervalAndSlidingAcceptNsUnitForFirstCol(){ + public void canIntervalAndSlidingAcceptNsUnitForFirstCol() { try (Statement stmt = conn.createStatement()) { ResultSet rs = stmt.executeQuery("select sum(f1) from " + ns_timestamp_db + ".weather where ts >= '" + date2 + "' and ts <= '" + date3 + "' interval(10000000b) sliding(10000000b)"); rs.next(); @@ -492,7 +492,7 @@ public class TimestampPrecisonInNanoRestTest { } @Test - public void canIntervalAndSlidingAcceptNsUnitForSecondCol(){ + public void canIntervalAndSlidingAcceptNsUnitForSecondCol() { try (Statement stmt = conn.createStatement()) { ResultSet rs = stmt.executeQuery("select sum(f1) from " + ns_timestamp_db + ".weather where ts2 >= '" + date2 + "' and ts <= '" + date3 + "' interval(10000000b) sliding(10000000b)"); rs.next(); @@ -506,21 +506,17 @@ public class TimestampPrecisonInNanoRestTest { } } - @Test - public void testDataOutOfRangeExceptionForFirstCol() { + @Test(expected = SQLException.class) + public void testDataOutOfRangeExceptionForFirstCol() throws SQLException { try (Statement stmt = conn.createStatement()) { stmt.executeUpdate("insert into " + ns_timestamp_db + ".weather(ts, ts2, f1) values(123456789012345678, 1234567890123456789, 127)"); - } catch (SQLException e) { - Assert.assertEquals("TDengine ERROR (60b): Timestamp data out of range", e.getMessage()); } } - @Test - public void testDataOutOfRangeExceptionForSecondCol() { + @Test(expected = SQLException.class) + public void testDataOutOfRangeExceptionForSecondCol() throws SQLException { try (Statement stmt = conn.createStatement()) { stmt.executeUpdate("insert into " + ns_timestamp_db + ".weather(ts, ts2, f1) values(1234567890123456789, 123456789012345678, 127)"); - } catch (SQLException e) { - Assert.assertEquals("TDengine ERROR (60b): Timestamp data out of range", e.getMessage()); } } diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/RestfulConnectionTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/RestfulConnectionTest.java index b08f8ff227dc16e1b413391e58a9de8fd0182c42..e7ce1d76f123a043d49eb64931c0d537d09664df 100644 --- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/RestfulConnectionTest.java +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/RestfulConnectionTest.java @@ -373,11 +373,12 @@ public class RestfulConnectionTest { properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8"); properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8"); properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8"); - conn = DriverManager.getConnection("jdbc:TAOS-RS://" + host + ":6041/log?user=root&password=taosdata", properties); + conn = DriverManager.getConnection("jdbc:TAOS-RS://" + host + ":6041/?user=root&password=taosdata", properties); // create test database for test cases try (Statement stmt = conn.createStatement()) { stmt.execute("create database if not exists test"); } + } @AfterClass diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/RestfulJDBCTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/RestfulJDBCTest.java index 858f7b32f0d8a72be5b6cfa68aa120b08909df6c..5de1655ee48776b6798619814fe2729625282764 100644 --- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/RestfulJDBCTest.java +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/RestfulJDBCTest.java @@ -9,9 +9,10 @@ import java.util.Random; @FixMethodOrder(MethodSorters.NAME_ASCENDING) public class RestfulJDBCTest { - private static final String host = "127.0.0.1"; - private final Random random = new Random(System.currentTimeMillis()); - private Connection connection; + // private static final String host = "127.0.0.1"; + private static final String host = "master"; + private static final Random random = new Random(System.currentTimeMillis()); + private static Connection connection; @Test public void testCase001() throws SQLException { @@ -129,15 +130,23 @@ public class RestfulJDBCTest { } } - @Before - public void before() throws SQLException { - connection = DriverManager.getConnection("jdbc:TAOS-RS://" + host + ":6041/restful_test?user=root&password=taosdata&httpKeepAlive=false"); + @BeforeClass + public static void beforeClass() { + try { + connection = DriverManager.getConnection("jdbc:TAOS-RS://" + host + ":6041/?user=root&password=taosdata"); + } catch (SQLException e) { + e.printStackTrace(); + } } - @After - public void after() throws SQLException { - if (connection != null) + @AfterClass + public static void afterClass() throws SQLException { + if (connection != null) { + Statement stmt = connection.createStatement(); + stmt.execute("drop database if exists restful_test"); + stmt.close(); connection.close(); + } } } diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/RestfulResultSetMetaDataTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/RestfulResultSetMetaDataTest.java index c7fc81297264f3cf38795d9d5a3b7eccc51574c9..f3011af799c987ed399920875ae512fd8533ec77 100644 --- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/RestfulResultSetMetaDataTest.java +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/RestfulResultSetMetaDataTest.java @@ -186,22 +186,17 @@ public class RestfulResultSetMetaDataTest { } @BeforeClass - public static void beforeClass() { - try { - Class.forName("com.taosdata.jdbc.rs.RestfulDriver"); - conn = DriverManager.getConnection("jdbc:TAOS-RS://" + host + ":6041/restful_test?user=root&password=taosdata"); - stmt = conn.createStatement(); - stmt.execute("create database if not exists restful_test"); - stmt.execute("use restful_test"); - stmt.execute("drop table if exists weather"); - stmt.execute("create table if not exists weather(f1 timestamp, f2 int, f3 bigint, f4 float, f5 double, f6 binary(64), f7 smallint, f8 tinyint, f9 bool, f10 nchar(64))"); - stmt.execute("insert into restful_test.weather values('2021-01-01 00:00:00.000', 1, 100, 3.1415, 3.1415926, 'abc', 10, 10, true, '涛思数据')"); - rs = stmt.executeQuery("select * from restful_test.weather"); - rs.next(); - meta = rs.getMetaData(); - } catch (ClassNotFoundException | SQLException e) { - e.printStackTrace(); - } + public static void beforeClass() throws SQLException { + conn = DriverManager.getConnection("jdbc:TAOS-RS://" + host + ":6041/?user=root&password=taosdata"); + stmt = conn.createStatement(); + stmt.execute("create database if not exists restful_test"); + stmt.execute("use restful_test"); + stmt.execute("drop table if exists weather"); + stmt.execute("create table if not exists weather(f1 timestamp, f2 int, f3 bigint, f4 float, f5 double, f6 binary(64), f7 smallint, f8 tinyint, f9 bool, f10 nchar(64))"); + stmt.execute("insert into restful_test.weather values('2021-01-01 00:00:00.000', 1, 100, 3.1415, 3.1415926, 'abc', 10, 10, true, '涛思数据')"); + rs = stmt.executeQuery("select * from restful_test.weather"); + rs.next(); + meta = rs.getMetaData(); } @AfterClass diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/RestfulResultSetTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/RestfulResultSetTest.java index 86b0f1be9e7ee99f50201dc98f197c07f5bb9aef..4058dd8b550b6e9ac5553144de92d908d804dce1 100644 --- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/RestfulResultSetTest.java +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/RestfulResultSetTest.java @@ -17,7 +17,8 @@ import java.text.SimpleDateFormat; public class RestfulResultSetTest { - private static final String host = "127.0.0.1"; + // private static final String host = "127.0.0.1"; + private static final String host = "master"; private static Connection conn; private static Statement stmt; private static ResultSet rs; @@ -658,35 +659,29 @@ public class RestfulResultSetTest { } @BeforeClass - public static void beforeClass() { - try { - conn = DriverManager.getConnection("jdbc:TAOS-RS://" + host + ":6041/restful_test?user=root&password=taosdata"); - stmt = conn.createStatement(); - stmt.execute("create database if not exists restful_test"); - stmt.execute("use restful_test"); - stmt.execute("drop table if exists weather"); - stmt.execute("create table if not exists weather(f1 timestamp, f2 int, f3 bigint, f4 float, f5 double, f6 binary(64), f7 smallint, f8 tinyint, f9 bool, f10 nchar(64))"); - stmt.execute("insert into restful_test.weather values('2021-01-01 00:00:00.000', 1, 100, 3.1415, 3.1415926, 'abc', 10, 10, true, '涛思数据')"); - rs = stmt.executeQuery("select * from restful_test.weather"); - rs.next(); - } catch (SQLException e) { - e.printStackTrace(); - } - + public static void beforeClass() throws SQLException { + conn = DriverManager.getConnection("jdbc:TAOS-RS://" + host + ":6041/?user=root&password=taosdata"); + stmt = conn.createStatement(); + stmt.execute("drop database if exists restful_test"); + stmt.execute("create database if not exists restful_test"); + stmt.execute("use restful_test"); + stmt.execute("drop table if exists weather"); + stmt.execute("create table if not exists weather(f1 timestamp, f2 int, f3 bigint, f4 float, f5 double, f6 binary(64), f7 smallint, f8 tinyint, f9 bool, f10 nchar(64))"); + stmt.execute("insert into restful_test.weather values('2021-01-01 00:00:00.000', 1, 100, 3.1415, 3.1415926, 'abc', 10, 10, true, '涛思数据')"); + rs = stmt.executeQuery("select * from restful_test.weather"); + rs.next(); } @AfterClass - public static void afterClass() { - try { - if (rs != null) - rs.close(); - if (stmt != null) - stmt.close(); - if (conn != null) - conn.close(); - } catch (SQLException e) { - e.printStackTrace(); + public static void afterClass() throws SQLException { + if (rs != null) + rs.close(); + if (stmt != null) { + stmt.execute("drop database if exists restful_test"); + stmt.close(); } + if (conn != null) + conn.close(); } } \ No newline at end of file diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/SQLTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/SQLTest.java index a28bdbe2e5f6e0d545241a80071d85b0964a4102..4893e6062f8719152539d80a6da21730d47dfa92 100644 --- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/SQLTest.java +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/SQLTest.java @@ -572,11 +572,14 @@ public class SQLTest { @BeforeClass public static void before() throws SQLException { - connection = DriverManager.getConnection("jdbc:TAOS-RS://" + host + ":6041/restful_test?user=root&password=taosdata"); + connection = DriverManager.getConnection("jdbc:TAOS-RS://" + host + ":6041/?user=root&password=taosdata"); } @AfterClass public static void after() throws SQLException { + Statement stmt = connection.createStatement(); + stmt.execute("drop database if exists restful_test"); + stmt.close(); connection.close(); } diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/WasNullTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/WasNullTest.java index a78284b7a2ecf1b43b96180fa9d819e89ecdc595..f0cd200e04bc66bb0571534c99a348c3a823fcb3 100644 --- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/WasNullTest.java +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/WasNullTest.java @@ -1,6 +1,9 @@ package com.taosdata.jdbc.rs; -import org.junit.*; +import org.junit.After; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; import java.sql.*; @@ -9,9 +12,8 @@ public class WasNullTest { private static final String host = "127.0.0.1"; private Connection conn; - @Test - public void testGetTimestamp() { + public void testGetTimestamp() throws SQLException { try (Statement stmt = conn.createStatement()) { stmt.execute("drop table if exists weather"); stmt.execute("create table if not exists weather(f1 timestamp, f2 timestamp, f3 int)"); @@ -34,14 +36,11 @@ public class WasNullTest { } } } - - } catch (SQLException e) { - e.printStackTrace(); } } @Test - public void testGetObject() { + public void testGetObject() throws SQLException { try (Statement stmt = conn.createStatement()) { stmt.execute("drop table if exists weather"); stmt.execute("create table if not exists weather(f1 timestamp, f2 int, f3 bigint, f4 float, f5 double, f6 binary(64), f7 smallint, f8 tinyint, f9 bool, f10 nchar(64))"); @@ -63,32 +62,25 @@ public class WasNullTest { } } - } catch (SQLException e) { - e.printStackTrace(); } } @Before - public void before() { - try { - conn = DriverManager.getConnection("jdbc:TAOS-RS://" + host + ":6041/restful_test?user=root&password=taosdata"); - Statement stmt = conn.createStatement(); + public void before() throws SQLException { + conn = DriverManager.getConnection("jdbc:TAOS-RS://" + host + ":6041/?user=root&password=taosdata"); + try (Statement stmt = conn.createStatement()) { stmt.execute("drop database if exists restful_test"); stmt.execute("create database if not exists restful_test"); - } catch (SQLException e) { - e.printStackTrace(); + stmt.execute("use restful_test"); } } @After - public void after() { - try { + public void after() throws SQLException { + if (conn != null) { Statement statement = conn.createStatement(); statement.execute("drop database if exists restful_test"); - if (conn != null) - conn.close(); - } catch (SQLException e) { - e.printStackTrace(); + conn.close(); } } } diff --git a/src/connector/python/README.md b/src/connector/python/README.md index 679735131105739ae59940c29b51f57496a2057d..1bde964828f1c52bf65e62ef67f2fdb7fc90c355 100644 --- a/src/connector/python/README.md +++ b/src/connector/python/README.md @@ -51,7 +51,7 @@ conn.close() import taos conn = taos.connect() -conn.exec("create database if not exists pytest") +conn.execute("create database if not exists pytest") result = conn.query("show databases") num_of_fields = result.field_count @@ -60,7 +60,7 @@ for field in result.fields: for row in result: print(row) result.close() -conn.exec("drop database pytest") +conn.execute("drop database pytest") conn.close() ``` @@ -136,11 +136,11 @@ from taos import * conn = connect() dbname = "pytest_taos_stmt" -conn.exec("drop database if exists %s" % dbname) -conn.exec("create database if not exists %s" % dbname) +conn.execute("drop database if exists %s" % dbname) +conn.execute("create database if not exists %s" % dbname) conn.select_db(dbname) -conn.exec( +conn.execute( "create table if not exists log(ts timestamp, bo bool, nil tinyint, \ ti tinyint, si smallint, ii int, bi bigint, tu tinyint unsigned, \ su smallint unsigned, iu int unsigned, bu bigint unsigned, \ @@ -196,11 +196,11 @@ from taos import * conn = connect() dbname = "pytest_taos_stmt" -conn.exec("drop database if exists %s" % dbname) -conn.exec("create database if not exists %s" % dbname) +conn.execute("drop database if exists %s" % dbname) +conn.execute("create database if not exists %s" % dbname) conn.select_db(dbname) -conn.exec( +conn.execute( "create table if not exists log(ts timestamp, bo bool, nil tinyint, \ ti tinyint, si smallint, ii int, bi bigint, tu tinyint unsigned, \ su smallint unsigned, iu int unsigned, bu bigint unsigned, \ @@ -249,12 +249,12 @@ import taos conn = taos.connect() dbname = "pytest_taos_subscribe_callback" -conn.exec("drop database if exists %s" % dbname) -conn.exec("create database if not exists %s" % dbname) +conn.execute("drop database if exists %s" % dbname) +conn.execute("create database if not exists %s" % dbname) conn.select_db(dbname) -conn.exec("create table if not exists log(ts timestamp, n int)") +conn.execute("create table if not exists log(ts timestamp, n int)") for i in range(10): - conn.exec("insert into log values(now, %d)" % i) + conn.execute("insert into log values(now, %d)" % i) sub = conn.subscribe(True, "test", "select * from log", 1000) print("# consume from begin") @@ -263,14 +263,14 @@ for ts, n in sub.consume(): print("# consume new data") for i in range(5): - conn.exec("insert into log values(now, %d)(now+1s, %d)" % (i, i)) + conn.execute("insert into log values(now, %d)(now+1s, %d)" % (i, i)) result = sub.consume() for ts, n in result: print(ts, n) print("# consume with a stop condition") for i in range(10): - conn.exec("insert into log values(now, %d)" % int(random() * 10)) + conn.execute("insert into log values(now, %d)" % int(random() * 10)) result = sub.consume() try: ts, n = next(result) @@ -284,7 +284,7 @@ for i in range(10): sub.close() -conn.exec("drop database if exists %s" % dbname) +conn.execute("drop database if exists %s" % dbname) conn.close() ``` @@ -311,23 +311,23 @@ def test_subscribe_callback(conn): # type: (TaosConnection) -> None dbname = "pytest_taos_subscribe_callback" try: - conn.exec("drop database if exists %s" % dbname) - conn.exec("create database if not exists %s" % dbname) + conn.execute("drop database if exists %s" % dbname) + conn.execute("create database if not exists %s" % dbname) conn.select_db(dbname) - conn.exec("create table if not exists log(ts timestamp, n int)") + conn.execute("create table if not exists log(ts timestamp, n int)") print("# subscribe with callback") sub = conn.subscribe(False, "test", "select * from log", 1000, subscribe_callback) for i in range(10): - conn.exec("insert into log values(now, %d)" % i) + conn.execute("insert into log values(now, %d)" % i) time.sleep(0.7) sub.close() - conn.exec("drop database if exists %s" % dbname) + conn.execute("drop database if exists %s" % dbname) conn.close() except Exception as err: - conn.exec("drop database if exists %s" % dbname) + conn.execute("drop database if exists %s" % dbname) conn.close() raise err @@ -374,10 +374,10 @@ def test_stream(conn): # type: (TaosConnection) -> None dbname = "pytest_taos_stream" try: - conn.exec("drop database if exists %s" % dbname) - conn.exec("create database if not exists %s" % dbname) + conn.execute("drop database if exists %s" % dbname) + conn.execute("create database if not exists %s" % dbname) conn.select_db(dbname) - conn.exec("create table if not exists log(ts timestamp, n int)") + conn.execute("create table if not exists log(ts timestamp, n int)") result = conn.query("select count(*) from log interval(5s)") assert result.field_count == 2 @@ -386,13 +386,13 @@ def test_stream(conn): stream = conn.stream("select count(*) from log interval(5s)", stream_callback, param=byref(counter)) for _ in range(0, 20): - conn.exec("insert into log values(now,0)(now+1s, 1)(now + 2s, 2)") + conn.execute("insert into log values(now,0)(now+1s, 1)(now + 2s, 2)") time.sleep(2) stream.close() - conn.exec("drop database if exists %s" % dbname) + conn.execute("drop database if exists %s" % dbname) conn.close() except Exception as err: - conn.exec("drop database if exists %s" % dbname) + conn.execute("drop database if exists %s" % dbname) conn.close() raise err @@ -408,8 +408,8 @@ import taos conn = taos.connect() dbname = "pytest_line" -conn.exec("drop database if exists %s" % dbname) -conn.exec("create database if not exists %s precision 'us'" % dbname) +conn.execute("drop database if exists %s" % dbname) +conn.execute("create database if not exists %s precision 'us'" % dbname) conn.select_db(dbname) lines = [ @@ -431,7 +431,7 @@ for row in result: result.close() -conn.exec("drop database if exists %s" % dbname) +conn.execute("drop database if exists %s" % dbname) conn.close() ``` diff --git a/src/dnode/src/dnodeMain.c b/src/dnode/src/dnodeMain.c index 87128521730c9c58f3c3dd9b35ab3f919f6921ec..bc6631284593c74e12842b4c6ea9994d099c3dd9 100644 --- a/src/dnode/src/dnodeMain.c +++ b/src/dnode/src/dnodeMain.c @@ -78,14 +78,14 @@ static SStep tsDnodeSteps[] = { {"dnode-vmgmt", dnodeInitVMgmt, dnodeCleanupVMgmt}, {"dnode-mread", dnodeInitMRead, NULL}, {"dnode-mwrite", dnodeInitMWrite, NULL}, - {"dnode-mpeer", dnodeInitMPeer, NULL}, + {"dnode-mpeer", dnodeInitMPeer, NULL}, {"dnode-client", dnodeInitClient, dnodeCleanupClient}, {"dnode-server", dnodeInitServer, dnodeCleanupServer}, {"dnode-vnodes", dnodeInitVnodes, dnodeCleanupVnodes}, {"dnode-modules", dnodeInitModules, dnodeCleanupModules}, {"dnode-mread", NULL, dnodeCleanupMRead}, {"dnode-mwrite", NULL, dnodeCleanupMWrite}, - {"dnode-mpeer", NULL, dnodeCleanupMPeer}, + {"dnode-mpeer", NULL, dnodeCleanupMPeer}, {"dnode-shell", dnodeInitShell, dnodeCleanupShell}, {"dnode-statustmr", dnodeInitStatusTimer,dnodeCleanupStatusTimer}, {"dnode-telemetry", dnodeInitTelemetry, dnodeCleanupTelemetry}, @@ -121,7 +121,7 @@ static int dnodeCreateDir(const char *dir) { if (mkdir(dir, 0755) != 0 && errno != EEXIST) { return -1; } - + return 0; } @@ -263,8 +263,8 @@ static int32_t dnodeInitStorage() { if (tsDiskCfgNum == 1 && dnodeCreateDir(tsDataDir) < 0) { dError("failed to create dir: %s, reason: %s", tsDataDir, strerror(errno)); return -1; - } - + } + if (tfsInit(tsDiskCfg, tsDiskCfgNum) < 0) { dError("failed to init TFS since %s", tstrerror(terrno)); return -1; @@ -296,7 +296,7 @@ static int32_t dnodeInitStorage() { if (dnodeCreateDir(tsMnodeDir) < 0) { dError("failed to create dir: %s, reason: %s", tsMnodeDir, strerror(errno)); return -1; - } + } if (dnodeCreateDir(tsDnodeDir) < 0) { dError("failed to create dir: %s, reason: %s", tsDnodeDir, strerror(errno)); diff --git a/src/dnode/src/dnodeVnodes.c b/src/dnode/src/dnodeVnodes.c index a5b0e9fe30e88f89af2e79af16602dac9500a305..93d218581cfb7d1738fa5ba3f3afd8c8f0d70dd7 100644 --- a/src/dnode/src/dnodeVnodes.c +++ b/src/dnode/src/dnodeVnodes.c @@ -127,9 +127,20 @@ int32_t dnodeInitVnodes() { int32_t threadNum = tsNumOfCores; int32_t vnodesPerThread = numOfVnodes / threadNum + 1; SOpenVnodeThread *threads = calloc(threadNum, sizeof(SOpenVnodeThread)); + + if (threads == NULL) { + return TSDB_CODE_DND_OUT_OF_MEMORY; + } + for (int32_t t = 0; t < threadNum; ++t) { threads[t].threadIndex = t; threads[t].vnodeList = calloc(vnodesPerThread, sizeof(int32_t)); + + if (threads[t].vnodeList == NULL) { + dError("vnodeList allocation failed"); + status = TSDB_CODE_DND_OUT_OF_MEMORY; + goto DNODE_INIT_VNODES_OUT; + } } for (int32_t v = 0; v < numOfVnodes; ++v) { @@ -163,18 +174,24 @@ int32_t dnodeInitVnodes() { } openVnodes += pThread->opened; failedVnodes += pThread->failed; - free(pThread->vnodeList); } - free(threads); dInfo("there are total vnodes:%d, opened:%d", numOfVnodes, openVnodes); if (failedVnodes != 0) { dError("there are total vnodes:%d, failed:%d", numOfVnodes, failedVnodes); - return -1; + status = TSDB_CODE_DND_VNODE_OPEN_FAILED; } - return TSDB_CODE_SUCCESS; +DNODE_INIT_VNODES_OUT: + + for (int32_t t = 0; t < threadNum; ++t) { + SOpenVnodeThread *pThread = &threads[t]; + free(pThread->vnodeList); + } + free(threads); + + return status; } void dnodeCleanupVnodes() { diff --git a/src/inc/taoserror.h b/src/inc/taoserror.h index 53c99f05bc44951202e2b673a40aced68c90eda5..72c725ed198538f34f6b317cffadd5a3682a3d5d 100644 --- a/src/inc/taoserror.h +++ b/src/inc/taoserror.h @@ -24,7 +24,7 @@ extern "C" { #include #define TAOS_DEF_ERROR_CODE(mod, code) ((int32_t)((0x80000000 | ((mod)<<16) | (code)))) - + #define TAOS_SYSTEM_ERROR(code) (0x80ff0000 | (code)) #define TAOS_SUCCEEDED(err) ((err) >= 0) #define TAOS_FAILED(err) ((err) < 0) @@ -33,183 +33,183 @@ const char* tstrerror(int32_t err); int32_t* taosGetErrno(); #define terrno (*taosGetErrno()) - + #define TSDB_CODE_SUCCESS 0 #define TSDB_CODE_FAILED -1 // unknown or needn't tell detail error // rpc -#define TSDB_CODE_RPC_ACTION_IN_PROGRESS TAOS_DEF_ERROR_CODE(0, 0x0001) //"Action in progress") -#define TSDB_CODE_RPC_AUTH_REQUIRED TAOS_DEF_ERROR_CODE(0, 0x0002) //"Authentication required") -#define TSDB_CODE_RPC_AUTH_FAILURE TAOS_DEF_ERROR_CODE(0, 0x0003) //"Authentication failure") -#define TSDB_CODE_RPC_REDIRECT TAOS_DEF_ERROR_CODE(0, 0x0004) //"Redirect") -#define TSDB_CODE_RPC_NOT_READY TAOS_DEF_ERROR_CODE(0, 0x0005) //"System not ready") // peer is not ready to process data -#define TSDB_CODE_RPC_ALREADY_PROCESSED TAOS_DEF_ERROR_CODE(0, 0x0006) //"Message already processed") -#define TSDB_CODE_RPC_LAST_SESSION_NOT_FINISHED TAOS_DEF_ERROR_CODE(0, 0x0007) //"Last session not finished") -#define TSDB_CODE_RPC_MISMATCHED_LINK_ID TAOS_DEF_ERROR_CODE(0, 0x0008) //"Mismatched meter id") -#define TSDB_CODE_RPC_TOO_SLOW TAOS_DEF_ERROR_CODE(0, 0x0009) //"Processing of request timed out") -#define TSDB_CODE_RPC_MAX_SESSIONS TAOS_DEF_ERROR_CODE(0, 0x000A) //"Number of sessions reached limit") // too many sessions -#define TSDB_CODE_RPC_NETWORK_UNAVAIL TAOS_DEF_ERROR_CODE(0, 0x000B) //"Unable to establish connection") -#define TSDB_CODE_RPC_APP_ERROR TAOS_DEF_ERROR_CODE(0, 0x000C) //"Unexpected generic error in RPC") -#define TSDB_CODE_RPC_UNEXPECTED_RESPONSE TAOS_DEF_ERROR_CODE(0, 0x000D) //"Unexpected response") -#define TSDB_CODE_RPC_INVALID_VALUE TAOS_DEF_ERROR_CODE(0, 0x000E) //"Invalid value") -#define TSDB_CODE_RPC_INVALID_TRAN_ID TAOS_DEF_ERROR_CODE(0, 0x000F) //"Invalid transaction id") -#define TSDB_CODE_RPC_INVALID_SESSION_ID TAOS_DEF_ERROR_CODE(0, 0x0010) //"Invalid session id") -#define TSDB_CODE_RPC_INVALID_MSG_TYPE TAOS_DEF_ERROR_CODE(0, 0x0011) //"Invalid message type") -#define TSDB_CODE_RPC_INVALID_RESPONSE_TYPE TAOS_DEF_ERROR_CODE(0, 0x0012) //"Invalid response type") -#define TSDB_CODE_RPC_INVALID_TIME_STAMP TAOS_DEF_ERROR_CODE(0, 0x0013) //"Client and server's time is not synchronized") -#define TSDB_CODE_APP_NOT_READY TAOS_DEF_ERROR_CODE(0, 0x0014) //"Database not ready") -#define TSDB_CODE_RPC_FQDN_ERROR TAOS_DEF_ERROR_CODE(0, 0x0015) //"Unable to resolve FQDN") -#define TSDB_CODE_RPC_INVALID_VERSION TAOS_DEF_ERROR_CODE(0, 0x0016) //"Invalid app version") +#define TSDB_CODE_RPC_ACTION_IN_PROGRESS TAOS_DEF_ERROR_CODE(0, 0x0001) //"Action in progress" +#define TSDB_CODE_RPC_AUTH_REQUIRED TAOS_DEF_ERROR_CODE(0, 0x0002) //"Authentication required" +#define TSDB_CODE_RPC_AUTH_FAILURE TAOS_DEF_ERROR_CODE(0, 0x0003) //"Authentication failure" +#define TSDB_CODE_RPC_REDIRECT TAOS_DEF_ERROR_CODE(0, 0x0004) //"Redirect" +#define TSDB_CODE_RPC_NOT_READY TAOS_DEF_ERROR_CODE(0, 0x0005) //"System not ready" // peer is not ready to process data +#define TSDB_CODE_RPC_ALREADY_PROCESSED TAOS_DEF_ERROR_CODE(0, 0x0006) //"Message already processed" +#define TSDB_CODE_RPC_LAST_SESSION_NOT_FINISHED TAOS_DEF_ERROR_CODE(0, 0x0007) //"Last session not finished" +#define TSDB_CODE_RPC_MISMATCHED_LINK_ID TAOS_DEF_ERROR_CODE(0, 0x0008) //"Mismatched meter id" +#define TSDB_CODE_RPC_TOO_SLOW TAOS_DEF_ERROR_CODE(0, 0x0009) //"Processing of request timed out" +#define TSDB_CODE_RPC_MAX_SESSIONS TAOS_DEF_ERROR_CODE(0, 0x000A) //"Number of sessions reached limit" // too many sessions +#define TSDB_CODE_RPC_NETWORK_UNAVAIL TAOS_DEF_ERROR_CODE(0, 0x000B) //"Unable to establish connection" +#define TSDB_CODE_RPC_APP_ERROR TAOS_DEF_ERROR_CODE(0, 0x000C) //"Unexpected generic error in RPC" +#define TSDB_CODE_RPC_UNEXPECTED_RESPONSE TAOS_DEF_ERROR_CODE(0, 0x000D) //"Unexpected response" +#define TSDB_CODE_RPC_INVALID_VALUE TAOS_DEF_ERROR_CODE(0, 0x000E) //"Invalid value" +#define TSDB_CODE_RPC_INVALID_TRAN_ID TAOS_DEF_ERROR_CODE(0, 0x000F) //"Invalid transaction id" +#define TSDB_CODE_RPC_INVALID_SESSION_ID TAOS_DEF_ERROR_CODE(0, 0x0010) //"Invalid session id" +#define TSDB_CODE_RPC_INVALID_MSG_TYPE TAOS_DEF_ERROR_CODE(0, 0x0011) //"Invalid message type" +#define TSDB_CODE_RPC_INVALID_RESPONSE_TYPE TAOS_DEF_ERROR_CODE(0, 0x0012) //"Invalid response type" +#define TSDB_CODE_RPC_INVALID_TIME_STAMP TAOS_DEF_ERROR_CODE(0, 0x0013) //"Client and server's time is not synchronized" +#define TSDB_CODE_APP_NOT_READY TAOS_DEF_ERROR_CODE(0, 0x0014) //"Database not ready" +#define TSDB_CODE_RPC_FQDN_ERROR TAOS_DEF_ERROR_CODE(0, 0x0015) //"Unable to resolve FQDN" +#define TSDB_CODE_RPC_INVALID_VERSION TAOS_DEF_ERROR_CODE(0, 0x0016) //"Invalid app version" //common & util -#define TSDB_CODE_COM_OPS_NOT_SUPPORT TAOS_DEF_ERROR_CODE(0, 0x0100) //"Operation not supported") -#define TSDB_CODE_COM_MEMORY_CORRUPTED TAOS_DEF_ERROR_CODE(0, 0x0101) //"Memory corrupted") -#define TSDB_CODE_COM_OUT_OF_MEMORY TAOS_DEF_ERROR_CODE(0, 0x0102) //"Out of memory") -#define TSDB_CODE_COM_INVALID_CFG_MSG TAOS_DEF_ERROR_CODE(0, 0x0103) //"Invalid config message") -#define TSDB_CODE_COM_FILE_CORRUPTED TAOS_DEF_ERROR_CODE(0, 0x0104) //"Data file corrupted") -#define TSDB_CODE_REF_NO_MEMORY TAOS_DEF_ERROR_CODE(0, 0x0105) //"Ref out of memory") -#define TSDB_CODE_REF_FULL TAOS_DEF_ERROR_CODE(0, 0x0106) //"too many Ref Objs") -#define TSDB_CODE_REF_ID_REMOVED TAOS_DEF_ERROR_CODE(0, 0x0107) //"Ref ID is removed") -#define TSDB_CODE_REF_INVALID_ID TAOS_DEF_ERROR_CODE(0, 0x0108) //"Invalid Ref ID") -#define TSDB_CODE_REF_ALREADY_EXIST TAOS_DEF_ERROR_CODE(0, 0x0109) //"Ref is already there") -#define TSDB_CODE_REF_NOT_EXIST TAOS_DEF_ERROR_CODE(0, 0x010A) //"Ref is not there") +#define TSDB_CODE_COM_OPS_NOT_SUPPORT TAOS_DEF_ERROR_CODE(0, 0x0100) //"Operation not supported" +#define TSDB_CODE_COM_MEMORY_CORRUPTED TAOS_DEF_ERROR_CODE(0, 0x0101) //"Memory corrupted" +#define TSDB_CODE_COM_OUT_OF_MEMORY TAOS_DEF_ERROR_CODE(0, 0x0102) //"Out of memory" +#define TSDB_CODE_COM_INVALID_CFG_MSG TAOS_DEF_ERROR_CODE(0, 0x0103) //"Invalid config message" +#define TSDB_CODE_COM_FILE_CORRUPTED TAOS_DEF_ERROR_CODE(0, 0x0104) //"Data file corrupted" +#define TSDB_CODE_REF_NO_MEMORY TAOS_DEF_ERROR_CODE(0, 0x0105) //"Ref out of memory" +#define TSDB_CODE_REF_FULL TAOS_DEF_ERROR_CODE(0, 0x0106) //"too many Ref Objs" +#define TSDB_CODE_REF_ID_REMOVED TAOS_DEF_ERROR_CODE(0, 0x0107) //"Ref ID is removed" +#define TSDB_CODE_REF_INVALID_ID TAOS_DEF_ERROR_CODE(0, 0x0108) //"Invalid Ref ID" +#define TSDB_CODE_REF_ALREADY_EXIST TAOS_DEF_ERROR_CODE(0, 0x0109) //"Ref is already there" +#define TSDB_CODE_REF_NOT_EXIST TAOS_DEF_ERROR_CODE(0, 0x010A) //"Ref is not there" //client -#define TSDB_CODE_TSC_INVALID_OPERATION TAOS_DEF_ERROR_CODE(0, 0x0200) //"Invalid Operation") -#define TSDB_CODE_TSC_INVALID_QHANDLE TAOS_DEF_ERROR_CODE(0, 0x0201) //"Invalid qhandle") -#define TSDB_CODE_TSC_INVALID_TIME_STAMP TAOS_DEF_ERROR_CODE(0, 0x0202) //"Invalid combination of client/service time") -#define TSDB_CODE_TSC_INVALID_VALUE TAOS_DEF_ERROR_CODE(0, 0x0203) //"Invalid value in client") -#define TSDB_CODE_TSC_INVALID_VERSION TAOS_DEF_ERROR_CODE(0, 0x0204) //"Invalid client version") -#define TSDB_CODE_TSC_INVALID_IE TAOS_DEF_ERROR_CODE(0, 0x0205) //"Invalid client ie") -#define TSDB_CODE_TSC_INVALID_FQDN TAOS_DEF_ERROR_CODE(0, 0x0206) //"Invalid host name") -#define TSDB_CODE_TSC_INVALID_USER_LENGTH TAOS_DEF_ERROR_CODE(0, 0x0207) //"Invalid user name") -#define TSDB_CODE_TSC_INVALID_PASS_LENGTH TAOS_DEF_ERROR_CODE(0, 0x0208) //"Invalid password") -#define TSDB_CODE_TSC_INVALID_DB_LENGTH TAOS_DEF_ERROR_CODE(0, 0x0209) //"Database name too long") -#define TSDB_CODE_TSC_INVALID_TABLE_ID_LENGTH TAOS_DEF_ERROR_CODE(0, 0x020A) //"Table name too long") -#define TSDB_CODE_TSC_INVALID_CONNECTION TAOS_DEF_ERROR_CODE(0, 0x020B) //"Invalid connection") -#define TSDB_CODE_TSC_OUT_OF_MEMORY TAOS_DEF_ERROR_CODE(0, 0x020C) //"System out of memory") -#define TSDB_CODE_TSC_NO_DISKSPACE TAOS_DEF_ERROR_CODE(0, 0x020D) //"System out of disk space") -#define TSDB_CODE_TSC_QUERY_CACHE_ERASED TAOS_DEF_ERROR_CODE(0, 0x020E) //"Query cache erased") -#define TSDB_CODE_TSC_QUERY_CANCELLED TAOS_DEF_ERROR_CODE(0, 0x020F) //"Query terminated") -#define TSDB_CODE_TSC_SORTED_RES_TOO_MANY TAOS_DEF_ERROR_CODE(0, 0x0210) //"Result set too large to be sorted") // too many result for ordered super table projection query -#define TSDB_CODE_TSC_APP_ERROR TAOS_DEF_ERROR_CODE(0, 0x0211) //"Application error") -#define TSDB_CODE_TSC_ACTION_IN_PROGRESS TAOS_DEF_ERROR_CODE(0, 0x0212) //"Action in progress") -#define TSDB_CODE_TSC_DISCONNECTED TAOS_DEF_ERROR_CODE(0, 0x0213) //"Disconnected from service") -#define TSDB_CODE_TSC_NO_WRITE_AUTH TAOS_DEF_ERROR_CODE(0, 0x0214) //"No write permission") -#define TSDB_CODE_TSC_CONN_KILLED TAOS_DEF_ERROR_CODE(0, 0x0215) //"Connection killed") -#define TSDB_CODE_TSC_SQL_SYNTAX_ERROR TAOS_DEF_ERROR_CODE(0, 0x0216) //"Syntax error in SQL") -#define TSDB_CODE_TSC_DB_NOT_SELECTED TAOS_DEF_ERROR_CODE(0, 0x0217) //"Database not specified or available") -#define TSDB_CODE_TSC_INVALID_TABLE_NAME TAOS_DEF_ERROR_CODE(0, 0x0218) //"Table does not exist") -#define TSDB_CODE_TSC_EXCEED_SQL_LIMIT TAOS_DEF_ERROR_CODE(0, 0x0219) //"SQL statement too long check maxSQLLength config") -#define TSDB_CODE_TSC_FILE_EMPTY TAOS_DEF_ERROR_CODE(0, 0x021A) //"File is empty") -#define TSDB_CODE_TSC_LINE_SYNTAX_ERROR TAOS_DEF_ERROR_CODE(0, 0x021B) //"Syntax error in Line") -#define TSDB_CODE_TSC_NO_META_CACHED TAOS_DEF_ERROR_CODE(0, 0x021C) //"No table meta cached") -#define TSDB_CODE_TSC_DUP_COL_NAMES TAOS_DEF_ERROR_CODE(0, 0x021D) //"duplicated column names") -#define TSDB_CODE_TSC_INVALID_TAG_LENGTH TAOS_DEF_ERROR_CODE(0, 0x021E) //"Invalid tag length") -#define TSDB_CODE_TSC_INVALID_COLUMN_LENGTH TAOS_DEF_ERROR_CODE(0, 0x021F) //"Invalid column length") -#define TSDB_CODE_TSC_DUP_TAG_NAMES TAOS_DEF_ERROR_CODE(0, 0x0220) //"duplicated tag names") -#define TSDB_CODE_TSC_INVALID_JSON TAOS_DEF_ERROR_CODE(0, 0x0221) //"Invalid JSON format") -#define TSDB_CODE_TSC_INVALID_JSON_TYPE TAOS_DEF_ERROR_CODE(0, 0x0222) //"Invalid JSON data type") -#define TSDB_CODE_TSC_INVALID_JSON_CONFIG TAOS_DEF_ERROR_CODE(0, 0x0223) //"Invalid JSON configuration") -#define TSDB_CODE_TSC_VALUE_OUT_OF_RANGE TAOS_DEF_ERROR_CODE(0, 0x0224) //"Value out of range") -#define TSDB_CODE_TSC_INVALID_PROTOCOL_TYPE TAOS_DEF_ERROR_CODE(0, 0x0225) //"Invalid line protocol type") -#define TSDB_CODE_TSC_INVALID_PRECISION_TYPE TAOS_DEF_ERROR_CODE(0, 0x0226) //"Invalid timestamp precision type") +#define TSDB_CODE_TSC_INVALID_OPERATION TAOS_DEF_ERROR_CODE(0, 0x0200) //"Invalid Operation" +#define TSDB_CODE_TSC_INVALID_QHANDLE TAOS_DEF_ERROR_CODE(0, 0x0201) //"Invalid qhandle" +#define TSDB_CODE_TSC_INVALID_TIME_STAMP TAOS_DEF_ERROR_CODE(0, 0x0202) //"Invalid combination of client/service time" +#define TSDB_CODE_TSC_INVALID_VALUE TAOS_DEF_ERROR_CODE(0, 0x0203) //"Invalid value in client" +#define TSDB_CODE_TSC_INVALID_VERSION TAOS_DEF_ERROR_CODE(0, 0x0204) //"Invalid client version" +#define TSDB_CODE_TSC_INVALID_IE TAOS_DEF_ERROR_CODE(0, 0x0205) //"Invalid client ie" +#define TSDB_CODE_TSC_INVALID_FQDN TAOS_DEF_ERROR_CODE(0, 0x0206) //"Invalid host name" +#define TSDB_CODE_TSC_INVALID_USER_LENGTH TAOS_DEF_ERROR_CODE(0, 0x0207) //"Invalid user name" +#define TSDB_CODE_TSC_INVALID_PASS_LENGTH TAOS_DEF_ERROR_CODE(0, 0x0208) //"Invalid password" +#define TSDB_CODE_TSC_INVALID_DB_LENGTH TAOS_DEF_ERROR_CODE(0, 0x0209) //"Database name too long" +#define TSDB_CODE_TSC_INVALID_TABLE_ID_LENGTH TAOS_DEF_ERROR_CODE(0, 0x020A) //"Table name too long" +#define TSDB_CODE_TSC_INVALID_CONNECTION TAOS_DEF_ERROR_CODE(0, 0x020B) //"Invalid connection" +#define TSDB_CODE_TSC_OUT_OF_MEMORY TAOS_DEF_ERROR_CODE(0, 0x020C) //"System out of memory" +#define TSDB_CODE_TSC_NO_DISKSPACE TAOS_DEF_ERROR_CODE(0, 0x020D) //"System out of disk space" +#define TSDB_CODE_TSC_QUERY_CACHE_ERASED TAOS_DEF_ERROR_CODE(0, 0x020E) //"Query cache erased" +#define TSDB_CODE_TSC_QUERY_CANCELLED TAOS_DEF_ERROR_CODE(0, 0x020F) //"Query terminated" +#define TSDB_CODE_TSC_SORTED_RES_TOO_MANY TAOS_DEF_ERROR_CODE(0, 0x0210) //"Result set too large to be sorted" // too many result for ordered super table projection query +#define TSDB_CODE_TSC_APP_ERROR TAOS_DEF_ERROR_CODE(0, 0x0211) //"Application error" +#define TSDB_CODE_TSC_ACTION_IN_PROGRESS TAOS_DEF_ERROR_CODE(0, 0x0212) //"Action in progress" +#define TSDB_CODE_TSC_DISCONNECTED TAOS_DEF_ERROR_CODE(0, 0x0213) //"Disconnected from service" +#define TSDB_CODE_TSC_NO_WRITE_AUTH TAOS_DEF_ERROR_CODE(0, 0x0214) //"No write permission" +#define TSDB_CODE_TSC_CONN_KILLED TAOS_DEF_ERROR_CODE(0, 0x0215) //"Connection killed" +#define TSDB_CODE_TSC_SQL_SYNTAX_ERROR TAOS_DEF_ERROR_CODE(0, 0x0216) //"Syntax error in SQL" +#define TSDB_CODE_TSC_DB_NOT_SELECTED TAOS_DEF_ERROR_CODE(0, 0x0217) //"Database not specified or available" +#define TSDB_CODE_TSC_INVALID_TABLE_NAME TAOS_DEF_ERROR_CODE(0, 0x0218) //"Table does not exist" +#define TSDB_CODE_TSC_EXCEED_SQL_LIMIT TAOS_DEF_ERROR_CODE(0, 0x0219) //"SQL statement too long check maxSQLLength config" +#define TSDB_CODE_TSC_FILE_EMPTY TAOS_DEF_ERROR_CODE(0, 0x021A) //"File is empty" +#define TSDB_CODE_TSC_LINE_SYNTAX_ERROR TAOS_DEF_ERROR_CODE(0, 0x021B) //"Syntax error in Line" +#define TSDB_CODE_TSC_NO_META_CACHED TAOS_DEF_ERROR_CODE(0, 0x021C) //"No table meta cached" +#define TSDB_CODE_TSC_DUP_COL_NAMES TAOS_DEF_ERROR_CODE(0, 0x021D) //"duplicated column names" +#define TSDB_CODE_TSC_INVALID_TAG_LENGTH TAOS_DEF_ERROR_CODE(0, 0x021E) //"Invalid tag length" +#define TSDB_CODE_TSC_INVALID_COLUMN_LENGTH TAOS_DEF_ERROR_CODE(0, 0x021F) //"Invalid column length" +#define TSDB_CODE_TSC_DUP_TAG_NAMES TAOS_DEF_ERROR_CODE(0, 0x0220) //"duplicated tag names" +#define TSDB_CODE_TSC_INVALID_JSON TAOS_DEF_ERROR_CODE(0, 0x0221) //"Invalid JSON format" +#define TSDB_CODE_TSC_INVALID_JSON_TYPE TAOS_DEF_ERROR_CODE(0, 0x0222) //"Invalid JSON data type" +#define TSDB_CODE_TSC_INVALID_JSON_CONFIG TAOS_DEF_ERROR_CODE(0, 0x0223) //"Invalid JSON configuration" +#define TSDB_CODE_TSC_VALUE_OUT_OF_RANGE TAOS_DEF_ERROR_CODE(0, 0x0224) //"Value out of range" +#define TSDB_CODE_TSC_INVALID_PROTOCOL_TYPE TAOS_DEF_ERROR_CODE(0, 0x0225) //"Invalid line protocol type" +#define TSDB_CODE_TSC_INVALID_PRECISION_TYPE TAOS_DEF_ERROR_CODE(0, 0x0226) //"Invalid timestamp precision type" // mnode -#define TSDB_CODE_MND_MSG_NOT_PROCESSED TAOS_DEF_ERROR_CODE(0, 0x0300) //"Message not processed") -#define TSDB_CODE_MND_ACTION_IN_PROGRESS TAOS_DEF_ERROR_CODE(0, 0x0301) //"Message is progressing") -#define TSDB_CODE_MND_ACTION_NEED_REPROCESSED TAOS_DEF_ERROR_CODE(0, 0x0302) //"Messag need to be reprocessed") -#define TSDB_CODE_MND_NO_RIGHTS TAOS_DEF_ERROR_CODE(0, 0x0303) //"Insufficient privilege for operation") -#define TSDB_CODE_MND_APP_ERROR TAOS_DEF_ERROR_CODE(0, 0x0304) //"Unexpected generic error in mnode") -#define TSDB_CODE_MND_INVALID_CONNECTION TAOS_DEF_ERROR_CODE(0, 0x0305) //"Invalid message connection") -#define TSDB_CODE_MND_INVALID_MSG_VERSION TAOS_DEF_ERROR_CODE(0, 0x0306) //"Incompatible protocol version") -#define TSDB_CODE_MND_INVALID_MSG_LEN TAOS_DEF_ERROR_CODE(0, 0x0307) //"Invalid message length") -#define TSDB_CODE_MND_INVALID_MSG_TYPE TAOS_DEF_ERROR_CODE(0, 0x0308) //"Invalid message type") -#define TSDB_CODE_MND_TOO_MANY_SHELL_CONNS TAOS_DEF_ERROR_CODE(0, 0x0309) //"Too many connections") -#define TSDB_CODE_MND_OUT_OF_MEMORY TAOS_DEF_ERROR_CODE(0, 0x030A) //"Out of memory in mnode") -#define TSDB_CODE_MND_INVALID_SHOWOBJ TAOS_DEF_ERROR_CODE(0, 0x030B) //"Data expired") -#define TSDB_CODE_MND_INVALID_QUERY_ID TAOS_DEF_ERROR_CODE(0, 0x030C) //"Invalid query id") -#define TSDB_CODE_MND_INVALID_STREAM_ID TAOS_DEF_ERROR_CODE(0, 0x030D) //"Invalid stream id") -#define TSDB_CODE_MND_INVALID_CONN_ID TAOS_DEF_ERROR_CODE(0, 0x030E) //"Invalid connection id") -#define TSDB_CODE_MND_MNODE_IS_RUNNING TAOS_DEF_ERROR_CODE(0, 0x0310) //"mnode is already running") -#define TSDB_CODE_MND_FAILED_TO_CONFIG_SYNC TAOS_DEF_ERROR_CODE(0, 0x0311) //"failed to config sync") -#define TSDB_CODE_MND_FAILED_TO_START_SYNC TAOS_DEF_ERROR_CODE(0, 0x0312) //"failed to start sync") -#define TSDB_CODE_MND_FAILED_TO_CREATE_DIR TAOS_DEF_ERROR_CODE(0, 0x0313) //"failed to create mnode dir") -#define TSDB_CODE_MND_FAILED_TO_INIT_STEP TAOS_DEF_ERROR_CODE(0, 0x0314) //"failed to init components") - -#define TSDB_CODE_MND_SDB_OBJ_ALREADY_THERE TAOS_DEF_ERROR_CODE(0, 0x0320) //"Object already there") -#define TSDB_CODE_MND_SDB_ERROR TAOS_DEF_ERROR_CODE(0, 0x0321) //"Unexpected generic error in sdb") -#define TSDB_CODE_MND_SDB_INVALID_TABLE_TYPE TAOS_DEF_ERROR_CODE(0, 0x0322) //"Invalid table type") -#define TSDB_CODE_MND_SDB_OBJ_NOT_THERE TAOS_DEF_ERROR_CODE(0, 0x0323) //"Object not there") -#define TSDB_CODE_MND_SDB_INVAID_META_ROW TAOS_DEF_ERROR_CODE(0, 0x0324) //"Invalid meta row") -#define TSDB_CODE_MND_SDB_INVAID_KEY_TYPE TAOS_DEF_ERROR_CODE(0, 0x0325) //"Invalid key type") - -#define TSDB_CODE_MND_DNODE_ALREADY_EXIST TAOS_DEF_ERROR_CODE(0, 0x0330) //"DNode already exists") -#define TSDB_CODE_MND_DNODE_NOT_EXIST TAOS_DEF_ERROR_CODE(0, 0x0331) //"DNode does not exist") -#define TSDB_CODE_MND_VGROUP_NOT_EXIST TAOS_DEF_ERROR_CODE(0, 0x0332) //"VGroup does not exist") -#define TSDB_CODE_MND_NO_REMOVE_MASTER TAOS_DEF_ERROR_CODE(0, 0x0333) //"Master DNode cannot be removed") -#define TSDB_CODE_MND_NO_ENOUGH_DNODES TAOS_DEF_ERROR_CODE(0, 0x0334) //"Out of DNodes") -#define TSDB_CODE_MND_CLUSTER_CFG_INCONSISTENT TAOS_DEF_ERROR_CODE(0, 0x0335) //"Cluster cfg inconsistent") -#define TSDB_CODE_MND_INVALID_DNODE_CFG_OPTION TAOS_DEF_ERROR_CODE(0, 0x0336) //"Invalid dnode cfg option") -#define TSDB_CODE_MND_BALANCE_ENABLED TAOS_DEF_ERROR_CODE(0, 0x0337) //"Balance already enabled") -#define TSDB_CODE_MND_VGROUP_NOT_IN_DNODE TAOS_DEF_ERROR_CODE(0, 0x0338) //"Vgroup not in dnode") -#define TSDB_CODE_MND_VGROUP_ALREADY_IN_DNODE TAOS_DEF_ERROR_CODE(0, 0x0339) //"Vgroup already in dnode") -#define TSDB_CODE_MND_DNODE_NOT_FREE TAOS_DEF_ERROR_CODE(0, 0x033A) //"Dnode not avaliable") -#define TSDB_CODE_MND_INVALID_CLUSTER_ID TAOS_DEF_ERROR_CODE(0, 0x033B) //"Cluster id not match") -#define TSDB_CODE_MND_NOT_READY TAOS_DEF_ERROR_CODE(0, 0x033C) //"Cluster not ready") -#define TSDB_CODE_MND_DNODE_ID_NOT_CONFIGURED TAOS_DEF_ERROR_CODE(0, 0x033D) //"Dnode Id not configured") -#define TSDB_CODE_MND_DNODE_EP_NOT_CONFIGURED TAOS_DEF_ERROR_CODE(0, 0x033E) //"Dnode Ep not configured") - -#define TSDB_CODE_MND_ACCT_ALREADY_EXIST TAOS_DEF_ERROR_CODE(0, 0x0340) //"Account already exists") -#define TSDB_CODE_MND_INVALID_ACCT TAOS_DEF_ERROR_CODE(0, 0x0341) //"Invalid account") -#define TSDB_CODE_MND_INVALID_ACCT_OPTION TAOS_DEF_ERROR_CODE(0, 0x0342) //"Invalid account options") -#define TSDB_CODE_MND_ACCT_EXPIRED TAOS_DEF_ERROR_CODE(0, 0x0343) //"Account authorization has expired") - -#define TSDB_CODE_MND_USER_ALREADY_EXIST TAOS_DEF_ERROR_CODE(0, 0x0350) //"User already exists") -#define TSDB_CODE_MND_INVALID_USER TAOS_DEF_ERROR_CODE(0, 0x0351) //"Invalid user") -#define TSDB_CODE_MND_INVALID_USER_FORMAT TAOS_DEF_ERROR_CODE(0, 0x0352) //"Invalid user format") -#define TSDB_CODE_MND_INVALID_PASS_FORMAT TAOS_DEF_ERROR_CODE(0, 0x0353) //"Invalid password format") -#define TSDB_CODE_MND_NO_USER_FROM_CONN TAOS_DEF_ERROR_CODE(0, 0x0354) //"Can not get user from conn") -#define TSDB_CODE_MND_TOO_MANY_USERS TAOS_DEF_ERROR_CODE(0, 0x0355) //"Too many users") - -#define TSDB_CODE_MND_TABLE_ALREADY_EXIST TAOS_DEF_ERROR_CODE(0, 0x0360) //"Table already exists") -#define TSDB_CODE_MND_INVALID_TABLE_ID TAOS_DEF_ERROR_CODE(0, 0x0361) //"Table name too long") -#define TSDB_CODE_MND_INVALID_TABLE_NAME TAOS_DEF_ERROR_CODE(0, 0x0362) //"Table does not exist") -#define TSDB_CODE_MND_INVALID_TABLE_TYPE TAOS_DEF_ERROR_CODE(0, 0x0363) //"Invalid table type in tsdb") -#define TSDB_CODE_MND_TOO_MANY_TAGS TAOS_DEF_ERROR_CODE(0, 0x0364) //"Too many tags") -#define TSDB_CODE_MND_TOO_MANY_COLUMNS TAOS_DEF_ERROR_CODE(0, 0x0365) //"Too many columns") -#define TSDB_CODE_MND_TOO_MANY_TIMESERIES TAOS_DEF_ERROR_CODE(0, 0x0366) //"Too many time series") -#define TSDB_CODE_MND_NOT_SUPER_TABLE TAOS_DEF_ERROR_CODE(0, 0x0367) //"Not super table") // operation only available for super table -#define TSDB_CODE_MND_COL_NAME_TOO_LONG TAOS_DEF_ERROR_CODE(0, 0x0368) //"Tag name too long") -#define TSDB_CODE_MND_TAG_ALREAY_EXIST TAOS_DEF_ERROR_CODE(0, 0x0369) //"Tag already exists") -#define TSDB_CODE_MND_TAG_NOT_EXIST TAOS_DEF_ERROR_CODE(0, 0x036A) //"Tag does not exist") -#define TSDB_CODE_MND_FIELD_ALREAY_EXIST TAOS_DEF_ERROR_CODE(0, 0x036B) //"Field already exists") -#define TSDB_CODE_MND_FIELD_NOT_EXIST TAOS_DEF_ERROR_CODE(0, 0x036C) //"Field does not exist") -#define TSDB_CODE_MND_INVALID_STABLE_NAME TAOS_DEF_ERROR_CODE(0, 0x036D) //"Super table does not exist") -#define TSDB_CODE_MND_INVALID_CREATE_TABLE_MSG TAOS_DEF_ERROR_CODE(0, 0x036E) //"Invalid create table message") -#define TSDB_CODE_MND_EXCEED_MAX_ROW_BYTES TAOS_DEF_ERROR_CODE(0, 0x036F) //"Exceed max row bytes") - -#define TSDB_CODE_MND_INVALID_FUNC_NAME TAOS_DEF_ERROR_CODE(0, 0x0370) //"Invalid func name") -#define TSDB_CODE_MND_INVALID_FUNC_LEN TAOS_DEF_ERROR_CODE(0, 0x0371) //"Invalid func length") -#define TSDB_CODE_MND_INVALID_FUNC_CODE TAOS_DEF_ERROR_CODE(0, 0x0372) //"Invalid func code") -#define TSDB_CODE_MND_FUNC_ALREADY_EXIST TAOS_DEF_ERROR_CODE(0, 0x0373) //"Func already exists") -#define TSDB_CODE_MND_INVALID_FUNC TAOS_DEF_ERROR_CODE(0, 0x0374) //"Invalid func") -#define TSDB_CODE_MND_INVALID_FUNC_BUFSIZE TAOS_DEF_ERROR_CODE(0, 0x0375) //"Invalid func bufSize") - -#define TSDB_CODE_MND_INVALID_TAG_LENGTH TAOS_DEF_ERROR_CODE(0, 0x0376) //"invalid tag length") -#define TSDB_CODE_MND_INVALID_COLUMN_LENGTH TAOS_DEF_ERROR_CODE(0, 0x0377) //"invalid column length") - -#define TSDB_CODE_MND_DB_NOT_SELECTED TAOS_DEF_ERROR_CODE(0, 0x0380) //"Database not specified or available") -#define TSDB_CODE_MND_DB_ALREADY_EXIST TAOS_DEF_ERROR_CODE(0, 0x0381) //"Database already exists") -#define TSDB_CODE_MND_INVALID_DB_OPTION TAOS_DEF_ERROR_CODE(0, 0x0382) //"Invalid database options") -#define TSDB_CODE_MND_INVALID_DB TAOS_DEF_ERROR_CODE(0, 0x0383) //"Invalid database name") -#define TSDB_CODE_MND_MONITOR_DB_FORBIDDEN TAOS_DEF_ERROR_CODE(0, 0x0384) //"Cannot delete monitor database") -#define TSDB_CODE_MND_TOO_MANY_DATABASES TAOS_DEF_ERROR_CODE(0, 0x0385) //"Too many databases for account") -#define TSDB_CODE_MND_DB_IN_DROPPING TAOS_DEF_ERROR_CODE(0, 0x0386) //"Database not available") -#define TSDB_CODE_MND_VGROUP_NOT_READY TAOS_DEF_ERROR_CODE(0, 0x0387) //"Database unsynced") - -#define TSDB_CODE_MND_INVALID_DB_OPTION_DAYS TAOS_DEF_ERROR_CODE(0, 0x0390) //"Invalid database option: days out of range") -#define TSDB_CODE_MND_INVALID_DB_OPTION_KEEP TAOS_DEF_ERROR_CODE(0, 0x0391) //"Invalid database option: keep >= keep1 >= keep0 >= days") +#define TSDB_CODE_MND_MSG_NOT_PROCESSED TAOS_DEF_ERROR_CODE(0, 0x0300) //"Message not processed" +#define TSDB_CODE_MND_ACTION_IN_PROGRESS TAOS_DEF_ERROR_CODE(0, 0x0301) //"Message is progressing" +#define TSDB_CODE_MND_ACTION_NEED_REPROCESSED TAOS_DEF_ERROR_CODE(0, 0x0302) //"Messag need to be reprocessed" +#define TSDB_CODE_MND_NO_RIGHTS TAOS_DEF_ERROR_CODE(0, 0x0303) //"Insufficient privilege for operation" +#define TSDB_CODE_MND_APP_ERROR TAOS_DEF_ERROR_CODE(0, 0x0304) //"Unexpected generic error in mnode" +#define TSDB_CODE_MND_INVALID_CONNECTION TAOS_DEF_ERROR_CODE(0, 0x0305) //"Invalid message connection" +#define TSDB_CODE_MND_INVALID_MSG_VERSION TAOS_DEF_ERROR_CODE(0, 0x0306) //"Incompatible protocol version" +#define TSDB_CODE_MND_INVALID_MSG_LEN TAOS_DEF_ERROR_CODE(0, 0x0307) //"Invalid message length" +#define TSDB_CODE_MND_INVALID_MSG_TYPE TAOS_DEF_ERROR_CODE(0, 0x0308) //"Invalid message type" +#define TSDB_CODE_MND_TOO_MANY_SHELL_CONNS TAOS_DEF_ERROR_CODE(0, 0x0309) //"Too many connections" +#define TSDB_CODE_MND_OUT_OF_MEMORY TAOS_DEF_ERROR_CODE(0, 0x030A) //"Out of memory in mnode" +#define TSDB_CODE_MND_INVALID_SHOWOBJ TAOS_DEF_ERROR_CODE(0, 0x030B) //"Data expired" +#define TSDB_CODE_MND_INVALID_QUERY_ID TAOS_DEF_ERROR_CODE(0, 0x030C) //"Invalid query id" +#define TSDB_CODE_MND_INVALID_STREAM_ID TAOS_DEF_ERROR_CODE(0, 0x030D) //"Invalid stream id" +#define TSDB_CODE_MND_INVALID_CONN_ID TAOS_DEF_ERROR_CODE(0, 0x030E) //"Invalid connection id" +#define TSDB_CODE_MND_MNODE_IS_RUNNING TAOS_DEF_ERROR_CODE(0, 0x0310) //"mnode is already running" +#define TSDB_CODE_MND_FAILED_TO_CONFIG_SYNC TAOS_DEF_ERROR_CODE(0, 0x0311) //"failed to config sync" +#define TSDB_CODE_MND_FAILED_TO_START_SYNC TAOS_DEF_ERROR_CODE(0, 0x0312) //"failed to start sync" +#define TSDB_CODE_MND_FAILED_TO_CREATE_DIR TAOS_DEF_ERROR_CODE(0, 0x0313) //"failed to create mnode dir" +#define TSDB_CODE_MND_FAILED_TO_INIT_STEP TAOS_DEF_ERROR_CODE(0, 0x0314) //"failed to init components" + +#define TSDB_CODE_MND_SDB_OBJ_ALREADY_THERE TAOS_DEF_ERROR_CODE(0, 0x0320) //"Object already there" +#define TSDB_CODE_MND_SDB_ERROR TAOS_DEF_ERROR_CODE(0, 0x0321) //"Unexpected generic error in sdb" +#define TSDB_CODE_MND_SDB_INVALID_TABLE_TYPE TAOS_DEF_ERROR_CODE(0, 0x0322) //"Invalid table type" +#define TSDB_CODE_MND_SDB_OBJ_NOT_THERE TAOS_DEF_ERROR_CODE(0, 0x0323) //"Object not there" +#define TSDB_CODE_MND_SDB_INVAID_META_ROW TAOS_DEF_ERROR_CODE(0, 0x0324) //"Invalid meta row" +#define TSDB_CODE_MND_SDB_INVAID_KEY_TYPE TAOS_DEF_ERROR_CODE(0, 0x0325) //"Invalid key type" + +#define TSDB_CODE_MND_DNODE_ALREADY_EXIST TAOS_DEF_ERROR_CODE(0, 0x0330) //"DNode already exists" +#define TSDB_CODE_MND_DNODE_NOT_EXIST TAOS_DEF_ERROR_CODE(0, 0x0331) //"DNode does not exist" +#define TSDB_CODE_MND_VGROUP_NOT_EXIST TAOS_DEF_ERROR_CODE(0, 0x0332) //"VGroup does not exist" +#define TSDB_CODE_MND_NO_REMOVE_MASTER TAOS_DEF_ERROR_CODE(0, 0x0333) //"Master DNode cannot be removed" +#define TSDB_CODE_MND_NO_ENOUGH_DNODES TAOS_DEF_ERROR_CODE(0, 0x0334) //"Out of DNodes" +#define TSDB_CODE_MND_CLUSTER_CFG_INCONSISTENT TAOS_DEF_ERROR_CODE(0, 0x0335) //"Cluster cfg inconsistent" +#define TSDB_CODE_MND_INVALID_DNODE_CFG_OPTION TAOS_DEF_ERROR_CODE(0, 0x0336) //"Invalid dnode cfg option" +#define TSDB_CODE_MND_BALANCE_ENABLED TAOS_DEF_ERROR_CODE(0, 0x0337) //"Balance already enabled" +#define TSDB_CODE_MND_VGROUP_NOT_IN_DNODE TAOS_DEF_ERROR_CODE(0, 0x0338) //"Vgroup not in dnode" +#define TSDB_CODE_MND_VGROUP_ALREADY_IN_DNODE TAOS_DEF_ERROR_CODE(0, 0x0339) //"Vgroup already in dnode" +#define TSDB_CODE_MND_DNODE_NOT_FREE TAOS_DEF_ERROR_CODE(0, 0x033A) //"Dnode not avaliable" +#define TSDB_CODE_MND_INVALID_CLUSTER_ID TAOS_DEF_ERROR_CODE(0, 0x033B) //"Cluster id not match" +#define TSDB_CODE_MND_NOT_READY TAOS_DEF_ERROR_CODE(0, 0x033C) //"Cluster not ready" +#define TSDB_CODE_MND_DNODE_ID_NOT_CONFIGURED TAOS_DEF_ERROR_CODE(0, 0x033D) //"Dnode Id not configured" +#define TSDB_CODE_MND_DNODE_EP_NOT_CONFIGURED TAOS_DEF_ERROR_CODE(0, 0x033E) //"Dnode Ep not configured" + +#define TSDB_CODE_MND_ACCT_ALREADY_EXIST TAOS_DEF_ERROR_CODE(0, 0x0340) //"Account already exists" +#define TSDB_CODE_MND_INVALID_ACCT TAOS_DEF_ERROR_CODE(0, 0x0341) //"Invalid account" +#define TSDB_CODE_MND_INVALID_ACCT_OPTION TAOS_DEF_ERROR_CODE(0, 0x0342) //"Invalid account options" +#define TSDB_CODE_MND_ACCT_EXPIRED TAOS_DEF_ERROR_CODE(0, 0x0343) //"Account authorization has expired" + +#define TSDB_CODE_MND_USER_ALREADY_EXIST TAOS_DEF_ERROR_CODE(0, 0x0350) //"User already exists" +#define TSDB_CODE_MND_INVALID_USER TAOS_DEF_ERROR_CODE(0, 0x0351) //"Invalid user" +#define TSDB_CODE_MND_INVALID_USER_FORMAT TAOS_DEF_ERROR_CODE(0, 0x0352) //"Invalid user format" +#define TSDB_CODE_MND_INVALID_PASS_FORMAT TAOS_DEF_ERROR_CODE(0, 0x0353) //"Invalid password format" +#define TSDB_CODE_MND_NO_USER_FROM_CONN TAOS_DEF_ERROR_CODE(0, 0x0354) //"Can not get user from conn" +#define TSDB_CODE_MND_TOO_MANY_USERS TAOS_DEF_ERROR_CODE(0, 0x0355) //"Too many users" + +#define TSDB_CODE_MND_TABLE_ALREADY_EXIST TAOS_DEF_ERROR_CODE(0, 0x0360) //"Table already exists" +#define TSDB_CODE_MND_INVALID_TABLE_ID TAOS_DEF_ERROR_CODE(0, 0x0361) //"Table name too long" +#define TSDB_CODE_MND_INVALID_TABLE_NAME TAOS_DEF_ERROR_CODE(0, 0x0362) //"Table does not exist" +#define TSDB_CODE_MND_INVALID_TABLE_TYPE TAOS_DEF_ERROR_CODE(0, 0x0363) //"Invalid table type in tsdb" +#define TSDB_CODE_MND_TOO_MANY_TAGS TAOS_DEF_ERROR_CODE(0, 0x0364) //"Too many tags" +#define TSDB_CODE_MND_TOO_MANY_COLUMNS TAOS_DEF_ERROR_CODE(0, 0x0365) //"Too many columns" +#define TSDB_CODE_MND_TOO_MANY_TIMESERIES TAOS_DEF_ERROR_CODE(0, 0x0366) //"Too many time series" +#define TSDB_CODE_MND_NOT_SUPER_TABLE TAOS_DEF_ERROR_CODE(0, 0x0367) //"Not super table" // operation only available for super table +#define TSDB_CODE_MND_COL_NAME_TOO_LONG TAOS_DEF_ERROR_CODE(0, 0x0368) //"Tag name too long" +#define TSDB_CODE_MND_TAG_ALREAY_EXIST TAOS_DEF_ERROR_CODE(0, 0x0369) //"Tag already exists" +#define TSDB_CODE_MND_TAG_NOT_EXIST TAOS_DEF_ERROR_CODE(0, 0x036A) //"Tag does not exist" +#define TSDB_CODE_MND_FIELD_ALREAY_EXIST TAOS_DEF_ERROR_CODE(0, 0x036B) //"Field already exists" +#define TSDB_CODE_MND_FIELD_NOT_EXIST TAOS_DEF_ERROR_CODE(0, 0x036C) //"Field does not exist" +#define TSDB_CODE_MND_INVALID_STABLE_NAME TAOS_DEF_ERROR_CODE(0, 0x036D) //"Super table does not exist" +#define TSDB_CODE_MND_INVALID_CREATE_TABLE_MSG TAOS_DEF_ERROR_CODE(0, 0x036E) //"Invalid create table message" +#define TSDB_CODE_MND_EXCEED_MAX_ROW_BYTES TAOS_DEF_ERROR_CODE(0, 0x036F) //"Exceed max row bytes" + +#define TSDB_CODE_MND_INVALID_FUNC_NAME TAOS_DEF_ERROR_CODE(0, 0x0370) //"Invalid func name" +#define TSDB_CODE_MND_INVALID_FUNC_LEN TAOS_DEF_ERROR_CODE(0, 0x0371) //"Invalid func length" +#define TSDB_CODE_MND_INVALID_FUNC_CODE TAOS_DEF_ERROR_CODE(0, 0x0372) //"Invalid func code" +#define TSDB_CODE_MND_FUNC_ALREADY_EXIST TAOS_DEF_ERROR_CODE(0, 0x0373) //"Func already exists" +#define TSDB_CODE_MND_INVALID_FUNC TAOS_DEF_ERROR_CODE(0, 0x0374) //"Invalid func" +#define TSDB_CODE_MND_INVALID_FUNC_BUFSIZE TAOS_DEF_ERROR_CODE(0, 0x0375) //"Invalid func bufSize" + +#define TSDB_CODE_MND_INVALID_TAG_LENGTH TAOS_DEF_ERROR_CODE(0, 0x0376) //"invalid tag length" +#define TSDB_CODE_MND_INVALID_COLUMN_LENGTH TAOS_DEF_ERROR_CODE(0, 0x0377) //"invalid column length" + +#define TSDB_CODE_MND_DB_NOT_SELECTED TAOS_DEF_ERROR_CODE(0, 0x0380) //"Database not specified or available" +#define TSDB_CODE_MND_DB_ALREADY_EXIST TAOS_DEF_ERROR_CODE(0, 0x0381) //"Database already exists" +#define TSDB_CODE_MND_INVALID_DB_OPTION TAOS_DEF_ERROR_CODE(0, 0x0382) //"Invalid database options" +#define TSDB_CODE_MND_INVALID_DB TAOS_DEF_ERROR_CODE(0, 0x0383) //"Invalid database name" +#define TSDB_CODE_MND_MONITOR_DB_FORBIDDEN TAOS_DEF_ERROR_CODE(0, 0x0384) //"Cannot delete monitor database" +#define TSDB_CODE_MND_TOO_MANY_DATABASES TAOS_DEF_ERROR_CODE(0, 0x0385) //"Too many databases for account" +#define TSDB_CODE_MND_DB_IN_DROPPING TAOS_DEF_ERROR_CODE(0, 0x0386) //"Database not available" +#define TSDB_CODE_MND_VGROUP_NOT_READY TAOS_DEF_ERROR_CODE(0, 0x0387) //"Database unsynced" + +#define TSDB_CODE_MND_INVALID_DB_OPTION_DAYS TAOS_DEF_ERROR_CODE(0, 0x0390) //"Invalid database option: days out of range" +#define TSDB_CODE_MND_INVALID_DB_OPTION_KEEP TAOS_DEF_ERROR_CODE(0, 0x0391) //"Invalid database option: keep >= keep1 >= keep0 >= days" #define TSDB_CODE_MND_INVALID_TOPIC TAOS_DEF_ERROR_CODE(0, 0x0392) //"Invalid topic name) #define TSDB_CODE_MND_INVALID_TOPIC_OPTION TAOS_DEF_ERROR_CODE(0, 0x0393) //"Invalid topic option) @@ -217,251 +217,252 @@ int32_t* taosGetErrno(); #define TSDB_CODE_MND_TOPIC_ALREADY_EXIST TAOS_DEF_ERROR_CODE(0, 0x0395) //"Topic already exists) // dnode -#define TSDB_CODE_DND_MSG_NOT_PROCESSED TAOS_DEF_ERROR_CODE(0, 0x0400) //"Message not processed") -#define TSDB_CODE_DND_OUT_OF_MEMORY TAOS_DEF_ERROR_CODE(0, 0x0401) //"Dnode out of memory") -#define TSDB_CODE_DND_NO_WRITE_ACCESS TAOS_DEF_ERROR_CODE(0, 0x0402) //"No permission for disk files in dnode") -#define TSDB_CODE_DND_INVALID_MSG_LEN TAOS_DEF_ERROR_CODE(0, 0x0403) //"Invalid message length") -#define TSDB_CODE_DND_ACTION_IN_PROGRESS TAOS_DEF_ERROR_CODE(0, 0x0404) //"Action in progress") -#define TSDB_CODE_DND_TOO_MANY_VNODES TAOS_DEF_ERROR_CODE(0, 0x0405) //"Too many vnode directories") +#define TSDB_CODE_DND_MSG_NOT_PROCESSED TAOS_DEF_ERROR_CODE(0, 0x0400) //"Message not processed" +#define TSDB_CODE_DND_OUT_OF_MEMORY TAOS_DEF_ERROR_CODE(0, 0x0401) //"Dnode out of memory" +#define TSDB_CODE_DND_NO_WRITE_ACCESS TAOS_DEF_ERROR_CODE(0, 0x0402) //"No permission for disk files in dnode" +#define TSDB_CODE_DND_INVALID_MSG_LEN TAOS_DEF_ERROR_CODE(0, 0x0403) //"Invalid message length" +#define TSDB_CODE_DND_ACTION_IN_PROGRESS TAOS_DEF_ERROR_CODE(0, 0x0404) //"Action in progress" +#define TSDB_CODE_DND_TOO_MANY_VNODES TAOS_DEF_ERROR_CODE(0, 0x0405) //"Too many vnode directories" #define TSDB_CODE_DND_EXITING TAOS_DEF_ERROR_CODE(0, 0x0406) //"Dnode is exiting" +#define TSDB_CODE_DND_VNODE_OPEN_FAILED TAOS_DEF_ERROR_CODE(0, 0x0407) //"Vnode open failed" // vnode -#define TSDB_CODE_VND_ACTION_IN_PROGRESS TAOS_DEF_ERROR_CODE(0, 0x0500) //"Action in progress") -#define TSDB_CODE_VND_MSG_NOT_PROCESSED TAOS_DEF_ERROR_CODE(0, 0x0501) //"Message not processed") -#define TSDB_CODE_VND_ACTION_NEED_REPROCESSED TAOS_DEF_ERROR_CODE(0, 0x0502) //"Action need to be reprocessed") -#define TSDB_CODE_VND_INVALID_VGROUP_ID TAOS_DEF_ERROR_CODE(0, 0x0503) //"Invalid Vgroup ID") -#define TSDB_CODE_VND_INIT_FAILED TAOS_DEF_ERROR_CODE(0, 0x0504) //"Vnode initialization failed") -#define TSDB_CODE_VND_NO_DISKSPACE TAOS_DEF_ERROR_CODE(0, 0x0505) //"System out of disk space") -#define TSDB_CODE_VND_NO_DISK_PERMISSIONS TAOS_DEF_ERROR_CODE(0, 0x0506) //"No write permission for disk files") -#define TSDB_CODE_VND_NO_SUCH_FILE_OR_DIR TAOS_DEF_ERROR_CODE(0, 0x0507) //"Missing data file") -#define TSDB_CODE_VND_OUT_OF_MEMORY TAOS_DEF_ERROR_CODE(0, 0x0508) //"Out of memory") -#define TSDB_CODE_VND_APP_ERROR TAOS_DEF_ERROR_CODE(0, 0x0509) //"Unexpected generic error in vnode") -#define TSDB_CODE_VND_INVALID_VRESION_FILE TAOS_DEF_ERROR_CODE(0, 0x050A) //"Invalid version file") -#define TSDB_CODE_VND_IS_FULL TAOS_DEF_ERROR_CODE(0, 0x050B) //"Database memory is full for commit failed") -#define TSDB_CODE_VND_IS_FLOWCTRL TAOS_DEF_ERROR_CODE(0, 0x050C) //"Database memory is full for waiting commit") -#define TSDB_CODE_VND_IS_DROPPING TAOS_DEF_ERROR_CODE(0, 0x050D) //"Database is dropping") -#define TSDB_CODE_VND_IS_BALANCING TAOS_DEF_ERROR_CODE(0, 0x050E) //"Database is balancing") -#define TSDB_CODE_VND_IS_CLOSING TAOS_DEF_ERROR_CODE(0, 0x0510) //"Database is closing") -#define TSDB_CODE_VND_NOT_SYNCED TAOS_DEF_ERROR_CODE(0, 0x0511) //"Database suspended") -#define TSDB_CODE_VND_NO_WRITE_AUTH TAOS_DEF_ERROR_CODE(0, 0x0512) //"Database write operation denied") -#define TSDB_CODE_VND_IS_SYNCING TAOS_DEF_ERROR_CODE(0, 0x0513) //"Database is syncing") -#define TSDB_CODE_VND_INVALID_TSDB_STATE TAOS_DEF_ERROR_CODE(0, 0x0514) //"Invalid tsdb state") +#define TSDB_CODE_VND_ACTION_IN_PROGRESS TAOS_DEF_ERROR_CODE(0, 0x0500) //"Action in progress" +#define TSDB_CODE_VND_MSG_NOT_PROCESSED TAOS_DEF_ERROR_CODE(0, 0x0501) //"Message not processed" +#define TSDB_CODE_VND_ACTION_NEED_REPROCESSED TAOS_DEF_ERROR_CODE(0, 0x0502) //"Action need to be reprocessed" +#define TSDB_CODE_VND_INVALID_VGROUP_ID TAOS_DEF_ERROR_CODE(0, 0x0503) //"Invalid Vgroup ID" +#define TSDB_CODE_VND_INIT_FAILED TAOS_DEF_ERROR_CODE(0, 0x0504) //"Vnode initialization failed" +#define TSDB_CODE_VND_NO_DISKSPACE TAOS_DEF_ERROR_CODE(0, 0x0505) //"System out of disk space" +#define TSDB_CODE_VND_NO_DISK_PERMISSIONS TAOS_DEF_ERROR_CODE(0, 0x0506) //"No write permission for disk files" +#define TSDB_CODE_VND_NO_SUCH_FILE_OR_DIR TAOS_DEF_ERROR_CODE(0, 0x0507) //"Missing data file" +#define TSDB_CODE_VND_OUT_OF_MEMORY TAOS_DEF_ERROR_CODE(0, 0x0508) //"Out of memory" +#define TSDB_CODE_VND_APP_ERROR TAOS_DEF_ERROR_CODE(0, 0x0509) //"Unexpected generic error in vnode" +#define TSDB_CODE_VND_INVALID_VRESION_FILE TAOS_DEF_ERROR_CODE(0, 0x050A) //"Invalid version file" +#define TSDB_CODE_VND_IS_FULL TAOS_DEF_ERROR_CODE(0, 0x050B) //"Database memory is full for commit failed" +#define TSDB_CODE_VND_IS_FLOWCTRL TAOS_DEF_ERROR_CODE(0, 0x050C) //"Database memory is full for waiting commit" +#define TSDB_CODE_VND_IS_DROPPING TAOS_DEF_ERROR_CODE(0, 0x050D) //"Database is dropping" +#define TSDB_CODE_VND_IS_BALANCING TAOS_DEF_ERROR_CODE(0, 0x050E) //"Database is balancing" +#define TSDB_CODE_VND_IS_CLOSING TAOS_DEF_ERROR_CODE(0, 0x0510) //"Database is closing" +#define TSDB_CODE_VND_NOT_SYNCED TAOS_DEF_ERROR_CODE(0, 0x0511) //"Database suspended" +#define TSDB_CODE_VND_NO_WRITE_AUTH TAOS_DEF_ERROR_CODE(0, 0x0512) //"Database write operation denied" +#define TSDB_CODE_VND_IS_SYNCING TAOS_DEF_ERROR_CODE(0, 0x0513) //"Database is syncing" +#define TSDB_CODE_VND_INVALID_TSDB_STATE TAOS_DEF_ERROR_CODE(0, 0x0514) //"Invalid tsdb state" // tsdb -#define TSDB_CODE_TDB_INVALID_TABLE_ID TAOS_DEF_ERROR_CODE(0, 0x0600) //"Invalid table ID") -#define TSDB_CODE_TDB_INVALID_TABLE_TYPE TAOS_DEF_ERROR_CODE(0, 0x0601) //"Invalid table type") -#define TSDB_CODE_TDB_IVD_TB_SCHEMA_VERSION TAOS_DEF_ERROR_CODE(0, 0x0602) //"Invalid table schema version") -#define TSDB_CODE_TDB_TABLE_ALREADY_EXIST TAOS_DEF_ERROR_CODE(0, 0x0603) //"Table already exists") -#define TSDB_CODE_TDB_INVALID_CONFIG TAOS_DEF_ERROR_CODE(0, 0x0604) //"Invalid configuration") -#define TSDB_CODE_TDB_INIT_FAILED TAOS_DEF_ERROR_CODE(0, 0x0605) //"Tsdb init failed") -#define TSDB_CODE_TDB_NO_DISKSPACE TAOS_DEF_ERROR_CODE(0, 0x0606) //"No diskspace for tsdb") -#define TSDB_CODE_TDB_NO_DISK_PERMISSIONS TAOS_DEF_ERROR_CODE(0, 0x0607) //"No permission for disk files") -#define TSDB_CODE_TDB_FILE_CORRUPTED TAOS_DEF_ERROR_CODE(0, 0x0608) //"Data file(s) corrupted") -#define TSDB_CODE_TDB_OUT_OF_MEMORY TAOS_DEF_ERROR_CODE(0, 0x0609) //"Out of memory") -#define TSDB_CODE_TDB_TAG_VER_OUT_OF_DATE TAOS_DEF_ERROR_CODE(0, 0x060A) //"Tag too old") -#define TSDB_CODE_TDB_TIMESTAMP_OUT_OF_RANGE TAOS_DEF_ERROR_CODE(0, 0x060B) //"Timestamp data out of range") -#define TSDB_CODE_TDB_SUBMIT_MSG_MSSED_UP TAOS_DEF_ERROR_CODE(0, 0x060C) //"Submit message is messed up") -#define TSDB_CODE_TDB_INVALID_ACTION TAOS_DEF_ERROR_CODE(0, 0x060D) //"Invalid operation") -#define TSDB_CODE_TDB_INVALID_CREATE_TB_MSG TAOS_DEF_ERROR_CODE(0, 0x060E) //"Invalid creation of table") -#define TSDB_CODE_TDB_NO_TABLE_DATA_IN_MEM TAOS_DEF_ERROR_CODE(0, 0x060F) //"No table data in memory skiplist") -#define TSDB_CODE_TDB_FILE_ALREADY_EXISTS TAOS_DEF_ERROR_CODE(0, 0x0610) //"File already exists") -#define TSDB_CODE_TDB_TABLE_RECONFIGURE TAOS_DEF_ERROR_CODE(0, 0x0611) //"Need to reconfigure table") -#define TSDB_CODE_TDB_IVD_CREATE_TABLE_INFO TAOS_DEF_ERROR_CODE(0, 0x0612) //"Invalid information to create table") -#define TSDB_CODE_TDB_NO_AVAIL_DISK TAOS_DEF_ERROR_CODE(0, 0x0613) //"No available disk") -#define TSDB_CODE_TDB_MESSED_MSG TAOS_DEF_ERROR_CODE(0, 0x0614) //"TSDB messed message") -#define TSDB_CODE_TDB_IVLD_TAG_VAL TAOS_DEF_ERROR_CODE(0, 0x0615) //"TSDB invalid tag value") -#define TSDB_CODE_TDB_NO_CACHE_LAST_ROW TAOS_DEF_ERROR_CODE(0, 0x0616) //"TSDB no cache last row data") -#define TSDB_CODE_TDB_INCOMPLETE_DFILESET TAOS_DEF_ERROR_CODE(0, 0x0617) //"TSDB incomplete DFileSet") +#define TSDB_CODE_TDB_INVALID_TABLE_ID TAOS_DEF_ERROR_CODE(0, 0x0600) //"Invalid table ID" +#define TSDB_CODE_TDB_INVALID_TABLE_TYPE TAOS_DEF_ERROR_CODE(0, 0x0601) //"Invalid table type" +#define TSDB_CODE_TDB_IVD_TB_SCHEMA_VERSION TAOS_DEF_ERROR_CODE(0, 0x0602) //"Invalid table schema version" +#define TSDB_CODE_TDB_TABLE_ALREADY_EXIST TAOS_DEF_ERROR_CODE(0, 0x0603) //"Table already exists" +#define TSDB_CODE_TDB_INVALID_CONFIG TAOS_DEF_ERROR_CODE(0, 0x0604) //"Invalid configuration" +#define TSDB_CODE_TDB_INIT_FAILED TAOS_DEF_ERROR_CODE(0, 0x0605) //"Tsdb init failed" +#define TSDB_CODE_TDB_NO_DISKSPACE TAOS_DEF_ERROR_CODE(0, 0x0606) //"No diskspace for tsdb" +#define TSDB_CODE_TDB_NO_DISK_PERMISSIONS TAOS_DEF_ERROR_CODE(0, 0x0607) //"No permission for disk files" +#define TSDB_CODE_TDB_FILE_CORRUPTED TAOS_DEF_ERROR_CODE(0, 0x0608) //"Data file(s) corrupted" +#define TSDB_CODE_TDB_OUT_OF_MEMORY TAOS_DEF_ERROR_CODE(0, 0x0609) //"Out of memory" +#define TSDB_CODE_TDB_TAG_VER_OUT_OF_DATE TAOS_DEF_ERROR_CODE(0, 0x060A) //"Tag too old" +#define TSDB_CODE_TDB_TIMESTAMP_OUT_OF_RANGE TAOS_DEF_ERROR_CODE(0, 0x060B) //"Timestamp data out of range" +#define TSDB_CODE_TDB_SUBMIT_MSG_MSSED_UP TAOS_DEF_ERROR_CODE(0, 0x060C) //"Submit message is messed up" +#define TSDB_CODE_TDB_INVALID_ACTION TAOS_DEF_ERROR_CODE(0, 0x060D) //"Invalid operation" +#define TSDB_CODE_TDB_INVALID_CREATE_TB_MSG TAOS_DEF_ERROR_CODE(0, 0x060E) //"Invalid creation of table" +#define TSDB_CODE_TDB_NO_TABLE_DATA_IN_MEM TAOS_DEF_ERROR_CODE(0, 0x060F) //"No table data in memory skiplist" +#define TSDB_CODE_TDB_FILE_ALREADY_EXISTS TAOS_DEF_ERROR_CODE(0, 0x0610) //"File already exists" +#define TSDB_CODE_TDB_TABLE_RECONFIGURE TAOS_DEF_ERROR_CODE(0, 0x0611) //"Need to reconfigure table" +#define TSDB_CODE_TDB_IVD_CREATE_TABLE_INFO TAOS_DEF_ERROR_CODE(0, 0x0612) //"Invalid information to create table" +#define TSDB_CODE_TDB_NO_AVAIL_DISK TAOS_DEF_ERROR_CODE(0, 0x0613) //"No available disk" +#define TSDB_CODE_TDB_MESSED_MSG TAOS_DEF_ERROR_CODE(0, 0x0614) //"TSDB messed message" +#define TSDB_CODE_TDB_IVLD_TAG_VAL TAOS_DEF_ERROR_CODE(0, 0x0615) //"TSDB invalid tag value" +#define TSDB_CODE_TDB_NO_CACHE_LAST_ROW TAOS_DEF_ERROR_CODE(0, 0x0616) //"TSDB no cache last row data" +#define TSDB_CODE_TDB_INCOMPLETE_DFILESET TAOS_DEF_ERROR_CODE(0, 0x0617) //"TSDB incomplete DFileSet" // query -#define TSDB_CODE_QRY_INVALID_QHANDLE TAOS_DEF_ERROR_CODE(0, 0x0700) //"Invalid handle") -#define TSDB_CODE_QRY_INVALID_MSG TAOS_DEF_ERROR_CODE(0, 0x0701) //"Invalid message") // failed to validate the sql expression msg by vnode -#define TSDB_CODE_QRY_NO_DISKSPACE TAOS_DEF_ERROR_CODE(0, 0x0702) //"No diskspace for query") -#define TSDB_CODE_QRY_OUT_OF_MEMORY TAOS_DEF_ERROR_CODE(0, 0x0703) //"System out of memory") -#define TSDB_CODE_QRY_APP_ERROR TAOS_DEF_ERROR_CODE(0, 0x0704) //"Unexpected generic error in query") -#define TSDB_CODE_QRY_DUP_JOIN_KEY TAOS_DEF_ERROR_CODE(0, 0x0705) //"Duplicated join key") -#define TSDB_CODE_QRY_EXCEED_TAGS_LIMIT TAOS_DEF_ERROR_CODE(0, 0x0706) //"Tag condition too many") -#define TSDB_CODE_QRY_NOT_READY TAOS_DEF_ERROR_CODE(0, 0x0707) //"Query not ready") -#define TSDB_CODE_QRY_HAS_RSP TAOS_DEF_ERROR_CODE(0, 0x0708) //"Query should response") -#define TSDB_CODE_QRY_IN_EXEC TAOS_DEF_ERROR_CODE(0, 0x0709) //"Multiple retrieval of this query") -#define TSDB_CODE_QRY_TOO_MANY_TIMEWINDOW TAOS_DEF_ERROR_CODE(0, 0x070A) //"Too many time window in query") -#define TSDB_CODE_QRY_NOT_ENOUGH_BUFFER TAOS_DEF_ERROR_CODE(0, 0x070B) //"Query buffer limit has reached") -#define TSDB_CODE_QRY_INCONSISTAN TAOS_DEF_ERROR_CODE(0, 0x070C) //"File inconsistency in replica") -#define TSDB_CODE_QRY_SYS_ERROR TAOS_DEF_ERROR_CODE(0, 0x070D) //"System error") -#define TSDB_CODE_QRY_INVALID_TIME_CONDITION TAOS_DEF_ERROR_CODE(0, 0x070E) //"invalid time condition") +#define TSDB_CODE_QRY_INVALID_QHANDLE TAOS_DEF_ERROR_CODE(0, 0x0700) //"Invalid handle" +#define TSDB_CODE_QRY_INVALID_MSG TAOS_DEF_ERROR_CODE(0, 0x0701) //"Invalid message" // failed to validate the sql expression msg by vnode +#define TSDB_CODE_QRY_NO_DISKSPACE TAOS_DEF_ERROR_CODE(0, 0x0702) //"No diskspace for query" +#define TSDB_CODE_QRY_OUT_OF_MEMORY TAOS_DEF_ERROR_CODE(0, 0x0703) //"System out of memory" +#define TSDB_CODE_QRY_APP_ERROR TAOS_DEF_ERROR_CODE(0, 0x0704) //"Unexpected generic error in query" +#define TSDB_CODE_QRY_DUP_JOIN_KEY TAOS_DEF_ERROR_CODE(0, 0x0705) //"Duplicated join key" +#define TSDB_CODE_QRY_EXCEED_TAGS_LIMIT TAOS_DEF_ERROR_CODE(0, 0x0706) //"Tag condition too many" +#define TSDB_CODE_QRY_NOT_READY TAOS_DEF_ERROR_CODE(0, 0x0707) //"Query not ready" +#define TSDB_CODE_QRY_HAS_RSP TAOS_DEF_ERROR_CODE(0, 0x0708) //"Query should response" +#define TSDB_CODE_QRY_IN_EXEC TAOS_DEF_ERROR_CODE(0, 0x0709) //"Multiple retrieval of this query" +#define TSDB_CODE_QRY_TOO_MANY_TIMEWINDOW TAOS_DEF_ERROR_CODE(0, 0x070A) //"Too many time window in query" +#define TSDB_CODE_QRY_NOT_ENOUGH_BUFFER TAOS_DEF_ERROR_CODE(0, 0x070B) //"Query buffer limit has reached" +#define TSDB_CODE_QRY_INCONSISTAN TAOS_DEF_ERROR_CODE(0, 0x070C) //"File inconsistency in replica" +#define TSDB_CODE_QRY_SYS_ERROR TAOS_DEF_ERROR_CODE(0, 0x070D) //"System error" +#define TSDB_CODE_QRY_INVALID_TIME_CONDITION TAOS_DEF_ERROR_CODE(0, 0x070E) //"invalid time condition" // grant -#define TSDB_CODE_GRANT_EXPIRED TAOS_DEF_ERROR_CODE(0, 0x0800) //"License expired") -#define TSDB_CODE_GRANT_DNODE_LIMITED TAOS_DEF_ERROR_CODE(0, 0x0801) //"DNode creation limited by licence") -#define TSDB_CODE_GRANT_ACCT_LIMITED TAOS_DEF_ERROR_CODE(0, 0x0802) //"Account creation limited by license") -#define TSDB_CODE_GRANT_TIMESERIES_LIMITED TAOS_DEF_ERROR_CODE(0, 0x0803) //"Table creation limited by license") -#define TSDB_CODE_GRANT_DB_LIMITED TAOS_DEF_ERROR_CODE(0, 0x0804) //"DB creation limited by license") -#define TSDB_CODE_GRANT_USER_LIMITED TAOS_DEF_ERROR_CODE(0, 0x0805) //"User creation limited by license") -#define TSDB_CODE_GRANT_CONN_LIMITED TAOS_DEF_ERROR_CODE(0, 0x0806) //"Conn creation limited by license") -#define TSDB_CODE_GRANT_STREAM_LIMITED TAOS_DEF_ERROR_CODE(0, 0x0807) //"Stream creation limited by license") -#define TSDB_CODE_GRANT_SPEED_LIMITED TAOS_DEF_ERROR_CODE(0, 0x0808) //"Write speed limited by license") -#define TSDB_CODE_GRANT_STORAGE_LIMITED TAOS_DEF_ERROR_CODE(0, 0x0809) //"Storage capacity limited by license") -#define TSDB_CODE_GRANT_QUERYTIME_LIMITED TAOS_DEF_ERROR_CODE(0, 0x080A) //"Query time limited by license") -#define TSDB_CODE_GRANT_CPU_LIMITED TAOS_DEF_ERROR_CODE(0, 0x080B) //"CPU cores limited by license") +#define TSDB_CODE_GRANT_EXPIRED TAOS_DEF_ERROR_CODE(0, 0x0800) //"License expired" +#define TSDB_CODE_GRANT_DNODE_LIMITED TAOS_DEF_ERROR_CODE(0, 0x0801) //"DNode creation limited by licence" +#define TSDB_CODE_GRANT_ACCT_LIMITED TAOS_DEF_ERROR_CODE(0, 0x0802) //"Account creation limited by license" +#define TSDB_CODE_GRANT_TIMESERIES_LIMITED TAOS_DEF_ERROR_CODE(0, 0x0803) //"Table creation limited by license" +#define TSDB_CODE_GRANT_DB_LIMITED TAOS_DEF_ERROR_CODE(0, 0x0804) //"DB creation limited by license" +#define TSDB_CODE_GRANT_USER_LIMITED TAOS_DEF_ERROR_CODE(0, 0x0805) //"User creation limited by license" +#define TSDB_CODE_GRANT_CONN_LIMITED TAOS_DEF_ERROR_CODE(0, 0x0806) //"Conn creation limited by license" +#define TSDB_CODE_GRANT_STREAM_LIMITED TAOS_DEF_ERROR_CODE(0, 0x0807) //"Stream creation limited by license" +#define TSDB_CODE_GRANT_SPEED_LIMITED TAOS_DEF_ERROR_CODE(0, 0x0808) //"Write speed limited by license" +#define TSDB_CODE_GRANT_STORAGE_LIMITED TAOS_DEF_ERROR_CODE(0, 0x0809) //"Storage capacity limited by license" +#define TSDB_CODE_GRANT_QUERYTIME_LIMITED TAOS_DEF_ERROR_CODE(0, 0x080A) //"Query time limited by license" +#define TSDB_CODE_GRANT_CPU_LIMITED TAOS_DEF_ERROR_CODE(0, 0x080B) //"CPU cores limited by license" // sync -#define TSDB_CODE_SYN_INVALID_CONFIG TAOS_DEF_ERROR_CODE(0, 0x0900) //"Invalid Sync Configuration") -#define TSDB_CODE_SYN_NOT_ENABLED TAOS_DEF_ERROR_CODE(0, 0x0901) //"Sync module not enabled") -#define TSDB_CODE_SYN_INVALID_VERSION TAOS_DEF_ERROR_CODE(0, 0x0902) //"Invalid Sync version") -#define TSDB_CODE_SYN_CONFIRM_EXPIRED TAOS_DEF_ERROR_CODE(0, 0x0903) //"Sync confirm expired") -#define TSDB_CODE_SYN_TOO_MANY_FWDINFO TAOS_DEF_ERROR_CODE(0, 0x0904) //"Too many sync fwd infos") -#define TSDB_CODE_SYN_MISMATCHED_PROTOCOL TAOS_DEF_ERROR_CODE(0, 0x0905) //"Mismatched protocol") -#define TSDB_CODE_SYN_MISMATCHED_CLUSTERID TAOS_DEF_ERROR_CODE(0, 0x0906) //"Mismatched clusterId") -#define TSDB_CODE_SYN_MISMATCHED_SIGNATURE TAOS_DEF_ERROR_CODE(0, 0x0907) //"Mismatched signature") -#define TSDB_CODE_SYN_INVALID_CHECKSUM TAOS_DEF_ERROR_CODE(0, 0x0908) //"Invalid msg checksum") -#define TSDB_CODE_SYN_INVALID_MSGLEN TAOS_DEF_ERROR_CODE(0, 0x0909) //"Invalid msg length") -#define TSDB_CODE_SYN_INVALID_MSGTYPE TAOS_DEF_ERROR_CODE(0, 0x090A) //"Invalid msg type") +#define TSDB_CODE_SYN_INVALID_CONFIG TAOS_DEF_ERROR_CODE(0, 0x0900) //"Invalid Sync Configuration" +#define TSDB_CODE_SYN_NOT_ENABLED TAOS_DEF_ERROR_CODE(0, 0x0901) //"Sync module not enabled" +#define TSDB_CODE_SYN_INVALID_VERSION TAOS_DEF_ERROR_CODE(0, 0x0902) //"Invalid Sync version" +#define TSDB_CODE_SYN_CONFIRM_EXPIRED TAOS_DEF_ERROR_CODE(0, 0x0903) //"Sync confirm expired" +#define TSDB_CODE_SYN_TOO_MANY_FWDINFO TAOS_DEF_ERROR_CODE(0, 0x0904) //"Too many sync fwd infos" +#define TSDB_CODE_SYN_MISMATCHED_PROTOCOL TAOS_DEF_ERROR_CODE(0, 0x0905) //"Mismatched protocol" +#define TSDB_CODE_SYN_MISMATCHED_CLUSTERID TAOS_DEF_ERROR_CODE(0, 0x0906) //"Mismatched clusterId" +#define TSDB_CODE_SYN_MISMATCHED_SIGNATURE TAOS_DEF_ERROR_CODE(0, 0x0907) //"Mismatched signature" +#define TSDB_CODE_SYN_INVALID_CHECKSUM TAOS_DEF_ERROR_CODE(0, 0x0908) //"Invalid msg checksum" +#define TSDB_CODE_SYN_INVALID_MSGLEN TAOS_DEF_ERROR_CODE(0, 0x0909) //"Invalid msg length" +#define TSDB_CODE_SYN_INVALID_MSGTYPE TAOS_DEF_ERROR_CODE(0, 0x090A) //"Invalid msg type" // wal -#define TSDB_CODE_WAL_APP_ERROR TAOS_DEF_ERROR_CODE(0, 0x1000) //"Unexpected generic error in wal") -#define TSDB_CODE_WAL_FILE_CORRUPTED TAOS_DEF_ERROR_CODE(0, 0x1001) //"WAL file is corrupted") -#define TSDB_CODE_WAL_SIZE_LIMIT TAOS_DEF_ERROR_CODE(0, 0x1002) //"WAL size exceeds limit") +#define TSDB_CODE_WAL_APP_ERROR TAOS_DEF_ERROR_CODE(0, 0x1000) //"Unexpected generic error in wal" +#define TSDB_CODE_WAL_FILE_CORRUPTED TAOS_DEF_ERROR_CODE(0, 0x1001) //"WAL file is corrupted" +#define TSDB_CODE_WAL_SIZE_LIMIT TAOS_DEF_ERROR_CODE(0, 0x1002) //"WAL size exceeds limit" // http -#define TSDB_CODE_HTTP_SERVER_OFFLINE TAOS_DEF_ERROR_CODE(0, 0x1100) //"http server is not online") -#define TSDB_CODE_HTTP_UNSUPPORT_URL TAOS_DEF_ERROR_CODE(0, 0x1101) //"url is not support") -#define TSDB_CODE_HTTP_INVALID_URL TAOS_DEF_ERROR_CODE(0, 0x1102) //invalid url format") -#define TSDB_CODE_HTTP_NO_ENOUGH_MEMORY TAOS_DEF_ERROR_CODE(0, 0x1103) //"no enough memory") -#define TSDB_CODE_HTTP_REQUSET_TOO_BIG TAOS_DEF_ERROR_CODE(0, 0x1104) //"request size is too big") -#define TSDB_CODE_HTTP_NO_AUTH_INFO TAOS_DEF_ERROR_CODE(0, 0x1105) //"no auth info input") -#define TSDB_CODE_HTTP_NO_MSG_INPUT TAOS_DEF_ERROR_CODE(0, 0x1106) //"request is empty") -#define TSDB_CODE_HTTP_NO_SQL_INPUT TAOS_DEF_ERROR_CODE(0, 0x1107) //"no sql input") -#define TSDB_CODE_HTTP_NO_EXEC_USEDB TAOS_DEF_ERROR_CODE(0, 0x1108) //"no need to execute use db cmd") -#define TSDB_CODE_HTTP_SESSION_FULL TAOS_DEF_ERROR_CODE(0, 0x1109) //"session list was full") -#define TSDB_CODE_HTTP_GEN_TAOSD_TOKEN_ERR TAOS_DEF_ERROR_CODE(0, 0x110A) //"generate taosd token error") -#define TSDB_CODE_HTTP_INVALID_MULTI_REQUEST TAOS_DEF_ERROR_CODE(0, 0x110B) //"size of multi request is 0") -#define TSDB_CODE_HTTP_CREATE_GZIP_FAILED TAOS_DEF_ERROR_CODE(0, 0x110C) //"failed to create gzip") -#define TSDB_CODE_HTTP_FINISH_GZIP_FAILED TAOS_DEF_ERROR_CODE(0, 0x110D) //"failed to finish gzip") -#define TSDB_CODE_HTTP_LOGIN_FAILED TAOS_DEF_ERROR_CODE(0, 0x110E) //"failed to login") - -#define TSDB_CODE_HTTP_INVALID_VERSION TAOS_DEF_ERROR_CODE(0, 0x1120) //"invalid http version") -#define TSDB_CODE_HTTP_INVALID_CONTENT_LENGTH TAOS_DEF_ERROR_CODE(0, 0x1121) //"invalid content length") -#define TSDB_CODE_HTTP_INVALID_AUTH_TYPE TAOS_DEF_ERROR_CODE(0, 0x1122) //"invalid type of Authorization") -#define TSDB_CODE_HTTP_INVALID_AUTH_FORMAT TAOS_DEF_ERROR_CODE(0, 0x1123) //"invalid format of Authorization") -#define TSDB_CODE_HTTP_INVALID_BASIC_AUTH TAOS_DEF_ERROR_CODE(0, 0x1124) //"invalid basic Authorization") -#define TSDB_CODE_HTTP_INVALID_TAOSD_AUTH TAOS_DEF_ERROR_CODE(0, 0x1125) //"invalid taosd Authorization") -#define TSDB_CODE_HTTP_PARSE_METHOD_FAILED TAOS_DEF_ERROR_CODE(0, 0x1126) //"failed to parse method") -#define TSDB_CODE_HTTP_PARSE_TARGET_FAILED TAOS_DEF_ERROR_CODE(0, 0x1127) //"failed to parse target") -#define TSDB_CODE_HTTP_PARSE_VERSION_FAILED TAOS_DEF_ERROR_CODE(0, 0x1128) //"failed to parse http version") -#define TSDB_CODE_HTTP_PARSE_SP_FAILED TAOS_DEF_ERROR_CODE(0, 0x1129) //"failed to parse sp") -#define TSDB_CODE_HTTP_PARSE_STATUS_FAILED TAOS_DEF_ERROR_CODE(0, 0x112A) //"failed to parse status") -#define TSDB_CODE_HTTP_PARSE_PHRASE_FAILED TAOS_DEF_ERROR_CODE(0, 0x112B) //"failed to parse phrase") -#define TSDB_CODE_HTTP_PARSE_CRLF_FAILED TAOS_DEF_ERROR_CODE(0, 0x112C) //"failed to parse crlf") -#define TSDB_CODE_HTTP_PARSE_HEADER_FAILED TAOS_DEF_ERROR_CODE(0, 0x112D) //"failed to parse header") -#define TSDB_CODE_HTTP_PARSE_HEADER_KEY_FAILED TAOS_DEF_ERROR_CODE(0, 0x112E) //"failed to parse header key") -#define TSDB_CODE_HTTP_PARSE_HEADER_VAL_FAILED TAOS_DEF_ERROR_CODE(0, 0x112F) //"failed to parse header val") -#define TSDB_CODE_HTTP_PARSE_CHUNK_SIZE_FAILED TAOS_DEF_ERROR_CODE(0, 0x1130) //"failed to parse chunk size") -#define TSDB_CODE_HTTP_PARSE_CHUNK_FAILED TAOS_DEF_ERROR_CODE(0, 0x1131) //"failed to parse chunk") -#define TSDB_CODE_HTTP_PARSE_END_FAILED TAOS_DEF_ERROR_CODE(0, 0x1132) //"failed to parse end section") -#define TSDB_CODE_HTTP_PARSE_INVALID_STATE TAOS_DEF_ERROR_CODE(0, 0x1134) //"invalid parse state") -#define TSDB_CODE_HTTP_PARSE_ERROR_STATE TAOS_DEF_ERROR_CODE(0, 0x1135) //"failed to parse error section") - -#define TSDB_CODE_HTTP_GC_QUERY_NULL TAOS_DEF_ERROR_CODE(0, 0x1150) //"query size is 0") -#define TSDB_CODE_HTTP_GC_QUERY_SIZE TAOS_DEF_ERROR_CODE(0, 0x1151) //"query size can not more than 100") -#define TSDB_CODE_HTTP_GC_REQ_PARSE_ERROR TAOS_DEF_ERROR_CODE(0, 0x1152) //"parse grafana json error") - -#define TSDB_CODE_HTTP_TG_DB_NOT_INPUT TAOS_DEF_ERROR_CODE(0, 0x1160) //"database name can not be null") -#define TSDB_CODE_HTTP_TG_DB_TOO_LONG TAOS_DEF_ERROR_CODE(0, 0x1161) //"database name too long") -#define TSDB_CODE_HTTP_TG_INVALID_JSON TAOS_DEF_ERROR_CODE(0, 0x1162) //"invalid telegraf json fromat") -#define TSDB_CODE_HTTP_TG_METRICS_NULL TAOS_DEF_ERROR_CODE(0, 0x1163) //"metrics size is 0") -#define TSDB_CODE_HTTP_TG_METRICS_SIZE TAOS_DEF_ERROR_CODE(0, 0x1164) //"metrics size can not more than 1K") -#define TSDB_CODE_HTTP_TG_METRIC_NULL TAOS_DEF_ERROR_CODE(0, 0x1165) //"metric name not find") -#define TSDB_CODE_HTTP_TG_METRIC_TYPE TAOS_DEF_ERROR_CODE(0, 0x1166) //"metric name type should be string") -#define TSDB_CODE_HTTP_TG_METRIC_NAME_NULL TAOS_DEF_ERROR_CODE(0, 0x1167) //"metric name length is 0") -#define TSDB_CODE_HTTP_TG_METRIC_NAME_LONG TAOS_DEF_ERROR_CODE(0, 0x1168) //"metric name length too long") -#define TSDB_CODE_HTTP_TG_TIMESTAMP_NULL TAOS_DEF_ERROR_CODE(0, 0x1169) //"timestamp not find") -#define TSDB_CODE_HTTP_TG_TIMESTAMP_TYPE TAOS_DEF_ERROR_CODE(0, 0x116A) //"timestamp type should be integer") -#define TSDB_CODE_HTTP_TG_TIMESTAMP_VAL_NULL TAOS_DEF_ERROR_CODE(0, 0x116B) //"timestamp value smaller than 0") -#define TSDB_CODE_HTTP_TG_TAGS_NULL TAOS_DEF_ERROR_CODE(0, 0x116C) //"tags not find") -#define TSDB_CODE_HTTP_TG_TAGS_SIZE_0 TAOS_DEF_ERROR_CODE(0, 0x116D) //"tags size is 0") -#define TSDB_CODE_HTTP_TG_TAGS_SIZE_LONG TAOS_DEF_ERROR_CODE(0, 0x116E) //"tags size too long") -#define TSDB_CODE_HTTP_TG_TAG_NULL TAOS_DEF_ERROR_CODE(0, 0x116F) //"tag is null") -#define TSDB_CODE_HTTP_TG_TAG_NAME_NULL TAOS_DEF_ERROR_CODE(0, 0x1170) //"tag name is null") -#define TSDB_CODE_HTTP_TG_TAG_NAME_SIZE TAOS_DEF_ERROR_CODE(0, 0x1171) //"tag name length too long") -#define TSDB_CODE_HTTP_TG_TAG_VALUE_TYPE TAOS_DEF_ERROR_CODE(0, 0x1172) //"tag value type should be number or string") -#define TSDB_CODE_HTTP_TG_TAG_VALUE_NULL TAOS_DEF_ERROR_CODE(0, 0x1173) //"tag value is null") -#define TSDB_CODE_HTTP_TG_TABLE_NULL TAOS_DEF_ERROR_CODE(0, 0x1174) //"table is null") -#define TSDB_CODE_HTTP_TG_TABLE_SIZE TAOS_DEF_ERROR_CODE(0, 0x1175) //"table name length too long") -#define TSDB_CODE_HTTP_TG_FIELDS_NULL TAOS_DEF_ERROR_CODE(0, 0x1176) //"fields not find") -#define TSDB_CODE_HTTP_TG_FIELDS_SIZE_0 TAOS_DEF_ERROR_CODE(0, 0x1177) //"fields size is 0") -#define TSDB_CODE_HTTP_TG_FIELDS_SIZE_LONG TAOS_DEF_ERROR_CODE(0, 0x1178) //"fields size too long") -#define TSDB_CODE_HTTP_TG_FIELD_NULL TAOS_DEF_ERROR_CODE(0, 0x1179) //"field is null") -#define TSDB_CODE_HTTP_TG_FIELD_NAME_NULL TAOS_DEF_ERROR_CODE(0, 0x117A) //"field name is null") -#define TSDB_CODE_HTTP_TG_FIELD_NAME_SIZE TAOS_DEF_ERROR_CODE(0, 0x117B) //"field name length too long") -#define TSDB_CODE_HTTP_TG_FIELD_VALUE_TYPE TAOS_DEF_ERROR_CODE(0, 0x117C) //"field value type should be number or string") -#define TSDB_CODE_HTTP_TG_FIELD_VALUE_NULL TAOS_DEF_ERROR_CODE(0, 0x117D) //"field value is null") -#define TSDB_CODE_HTTP_TG_HOST_NOT_STRING TAOS_DEF_ERROR_CODE(0, 0x117E) //"host type should be string") -#define TSDB_CODE_HTTP_TG_STABLE_NOT_EXIST TAOS_DEF_ERROR_CODE(0, 0x117F) //"stable not exist") - -#define TSDB_CODE_HTTP_OP_DB_NOT_INPUT TAOS_DEF_ERROR_CODE(0, 0x1190) //"database name can not be null") -#define TSDB_CODE_HTTP_OP_DB_TOO_LONG TAOS_DEF_ERROR_CODE(0, 0x1191) //"database name too long") -#define TSDB_CODE_HTTP_OP_INVALID_JSON TAOS_DEF_ERROR_CODE(0, 0x1192) //"invalid opentsdb json fromat") -#define TSDB_CODE_HTTP_OP_METRICS_NULL TAOS_DEF_ERROR_CODE(0, 0x1193) //"metrics size is 0") -#define TSDB_CODE_HTTP_OP_METRICS_SIZE TAOS_DEF_ERROR_CODE(0, 0x1194) //"metrics size can not more than 10K") -#define TSDB_CODE_HTTP_OP_METRIC_NULL TAOS_DEF_ERROR_CODE(0, 0x1195) //"metric name not find") -#define TSDB_CODE_HTTP_OP_METRIC_TYPE TAOS_DEF_ERROR_CODE(0, 0x1196) //"metric name type should be string") -#define TSDB_CODE_HTTP_OP_METRIC_NAME_NULL TAOS_DEF_ERROR_CODE(0, 0x1197) //"metric name length is 0") -#define TSDB_CODE_HTTP_OP_METRIC_NAME_LONG TAOS_DEF_ERROR_CODE(0, 0x1198) //"metric name length can not more than 22") -#define TSDB_CODE_HTTP_OP_TIMESTAMP_NULL TAOS_DEF_ERROR_CODE(0, 0x1199) //"timestamp not find") -#define TSDB_CODE_HTTP_OP_TIMESTAMP_TYPE TAOS_DEF_ERROR_CODE(0, 0x119A) //"timestamp type should be integer") -#define TSDB_CODE_HTTP_OP_TIMESTAMP_VAL_NULL TAOS_DEF_ERROR_CODE(0, 0x119B) //"timestamp value smaller than 0") -#define TSDB_CODE_HTTP_OP_TAGS_NULL TAOS_DEF_ERROR_CODE(0, 0x119C) //"tags not find") -#define TSDB_CODE_HTTP_OP_TAGS_SIZE_0 TAOS_DEF_ERROR_CODE(0, 0x119D) //"tags size is 0") -#define TSDB_CODE_HTTP_OP_TAGS_SIZE_LONG TAOS_DEF_ERROR_CODE(0, 0x119E) //"tags size too long") -#define TSDB_CODE_HTTP_OP_TAG_NULL TAOS_DEF_ERROR_CODE(0, 0x119F) //"tag is null") -#define TSDB_CODE_HTTP_OP_TAG_NAME_NULL TAOS_DEF_ERROR_CODE(0, 0x11A0) //"tag name is null") -#define TSDB_CODE_HTTP_OP_TAG_NAME_SIZE TAOS_DEF_ERROR_CODE(0, 0x11A1) //"tag name length too long") -#define TSDB_CODE_HTTP_OP_TAG_VALUE_TYPE TAOS_DEF_ERROR_CODE(0, 0x11A2) //"tag value type should be boolean number or string") -#define TSDB_CODE_HTTP_OP_TAG_VALUE_NULL TAOS_DEF_ERROR_CODE(0, 0x11A3) //"tag value is null") -#define TSDB_CODE_HTTP_OP_TAG_VALUE_TOO_LONG TAOS_DEF_ERROR_CODE(0, 0x11A4) //"tag value can not more than 64") -#define TSDB_CODE_HTTP_OP_VALUE_NULL TAOS_DEF_ERROR_CODE(0, 0x11A5) //"value not find") -#define TSDB_CODE_HTTP_OP_VALUE_TYPE TAOS_DEF_ERROR_CODE(0, 0x11A6) //"value type should be boolean number or string") - -#define TSDB_CODE_HTTP_REQUEST_JSON_ERROR TAOS_DEF_ERROR_CODE(0, 0x1F00) //"http request json error") +#define TSDB_CODE_HTTP_SERVER_OFFLINE TAOS_DEF_ERROR_CODE(0, 0x1100) //"http server is not online" +#define TSDB_CODE_HTTP_UNSUPPORT_URL TAOS_DEF_ERROR_CODE(0, 0x1101) //"url is not support" +#define TSDB_CODE_HTTP_INVALID_URL TAOS_DEF_ERROR_CODE(0, 0x1102) //invalid url format" +#define TSDB_CODE_HTTP_NO_ENOUGH_MEMORY TAOS_DEF_ERROR_CODE(0, 0x1103) //"no enough memory" +#define TSDB_CODE_HTTP_REQUSET_TOO_BIG TAOS_DEF_ERROR_CODE(0, 0x1104) //"request size is too big" +#define TSDB_CODE_HTTP_NO_AUTH_INFO TAOS_DEF_ERROR_CODE(0, 0x1105) //"no auth info input" +#define TSDB_CODE_HTTP_NO_MSG_INPUT TAOS_DEF_ERROR_CODE(0, 0x1106) //"request is empty" +#define TSDB_CODE_HTTP_NO_SQL_INPUT TAOS_DEF_ERROR_CODE(0, 0x1107) //"no sql input" +#define TSDB_CODE_HTTP_NO_EXEC_USEDB TAOS_DEF_ERROR_CODE(0, 0x1108) //"no need to execute use db cmd" +#define TSDB_CODE_HTTP_SESSION_FULL TAOS_DEF_ERROR_CODE(0, 0x1109) //"session list was full" +#define TSDB_CODE_HTTP_GEN_TAOSD_TOKEN_ERR TAOS_DEF_ERROR_CODE(0, 0x110A) //"generate taosd token error" +#define TSDB_CODE_HTTP_INVALID_MULTI_REQUEST TAOS_DEF_ERROR_CODE(0, 0x110B) //"size of multi request is 0" +#define TSDB_CODE_HTTP_CREATE_GZIP_FAILED TAOS_DEF_ERROR_CODE(0, 0x110C) //"failed to create gzip" +#define TSDB_CODE_HTTP_FINISH_GZIP_FAILED TAOS_DEF_ERROR_CODE(0, 0x110D) //"failed to finish gzip" +#define TSDB_CODE_HTTP_LOGIN_FAILED TAOS_DEF_ERROR_CODE(0, 0x110E) //"failed to login" + +#define TSDB_CODE_HTTP_INVALID_VERSION TAOS_DEF_ERROR_CODE(0, 0x1120) //"invalid http version" +#define TSDB_CODE_HTTP_INVALID_CONTENT_LENGTH TAOS_DEF_ERROR_CODE(0, 0x1121) //"invalid content length" +#define TSDB_CODE_HTTP_INVALID_AUTH_TYPE TAOS_DEF_ERROR_CODE(0, 0x1122) //"invalid type of Authorization" +#define TSDB_CODE_HTTP_INVALID_AUTH_FORMAT TAOS_DEF_ERROR_CODE(0, 0x1123) //"invalid format of Authorization" +#define TSDB_CODE_HTTP_INVALID_BASIC_AUTH TAOS_DEF_ERROR_CODE(0, 0x1124) //"invalid basic Authorization" +#define TSDB_CODE_HTTP_INVALID_TAOSD_AUTH TAOS_DEF_ERROR_CODE(0, 0x1125) //"invalid taosd Authorization" +#define TSDB_CODE_HTTP_PARSE_METHOD_FAILED TAOS_DEF_ERROR_CODE(0, 0x1126) //"failed to parse method" +#define TSDB_CODE_HTTP_PARSE_TARGET_FAILED TAOS_DEF_ERROR_CODE(0, 0x1127) //"failed to parse target" +#define TSDB_CODE_HTTP_PARSE_VERSION_FAILED TAOS_DEF_ERROR_CODE(0, 0x1128) //"failed to parse http version" +#define TSDB_CODE_HTTP_PARSE_SP_FAILED TAOS_DEF_ERROR_CODE(0, 0x1129) //"failed to parse sp" +#define TSDB_CODE_HTTP_PARSE_STATUS_FAILED TAOS_DEF_ERROR_CODE(0, 0x112A) //"failed to parse status" +#define TSDB_CODE_HTTP_PARSE_PHRASE_FAILED TAOS_DEF_ERROR_CODE(0, 0x112B) //"failed to parse phrase" +#define TSDB_CODE_HTTP_PARSE_CRLF_FAILED TAOS_DEF_ERROR_CODE(0, 0x112C) //"failed to parse crlf" +#define TSDB_CODE_HTTP_PARSE_HEADER_FAILED TAOS_DEF_ERROR_CODE(0, 0x112D) //"failed to parse header" +#define TSDB_CODE_HTTP_PARSE_HEADER_KEY_FAILED TAOS_DEF_ERROR_CODE(0, 0x112E) //"failed to parse header key" +#define TSDB_CODE_HTTP_PARSE_HEADER_VAL_FAILED TAOS_DEF_ERROR_CODE(0, 0x112F) //"failed to parse header val" +#define TSDB_CODE_HTTP_PARSE_CHUNK_SIZE_FAILED TAOS_DEF_ERROR_CODE(0, 0x1130) //"failed to parse chunk size" +#define TSDB_CODE_HTTP_PARSE_CHUNK_FAILED TAOS_DEF_ERROR_CODE(0, 0x1131) //"failed to parse chunk" +#define TSDB_CODE_HTTP_PARSE_END_FAILED TAOS_DEF_ERROR_CODE(0, 0x1132) //"failed to parse end section" +#define TSDB_CODE_HTTP_PARSE_INVALID_STATE TAOS_DEF_ERROR_CODE(0, 0x1134) //"invalid parse state" +#define TSDB_CODE_HTTP_PARSE_ERROR_STATE TAOS_DEF_ERROR_CODE(0, 0x1135) //"failed to parse error section" + +#define TSDB_CODE_HTTP_GC_QUERY_NULL TAOS_DEF_ERROR_CODE(0, 0x1150) //"query size is 0" +#define TSDB_CODE_HTTP_GC_QUERY_SIZE TAOS_DEF_ERROR_CODE(0, 0x1151) //"query size can not more than 100" +#define TSDB_CODE_HTTP_GC_REQ_PARSE_ERROR TAOS_DEF_ERROR_CODE(0, 0x1152) //"parse grafana json error" + +#define TSDB_CODE_HTTP_TG_DB_NOT_INPUT TAOS_DEF_ERROR_CODE(0, 0x1160) //"database name can not be null" +#define TSDB_CODE_HTTP_TG_DB_TOO_LONG TAOS_DEF_ERROR_CODE(0, 0x1161) //"database name too long" +#define TSDB_CODE_HTTP_TG_INVALID_JSON TAOS_DEF_ERROR_CODE(0, 0x1162) //"invalid telegraf json fromat" +#define TSDB_CODE_HTTP_TG_METRICS_NULL TAOS_DEF_ERROR_CODE(0, 0x1163) //"metrics size is 0" +#define TSDB_CODE_HTTP_TG_METRICS_SIZE TAOS_DEF_ERROR_CODE(0, 0x1164) //"metrics size can not more than 1K" +#define TSDB_CODE_HTTP_TG_METRIC_NULL TAOS_DEF_ERROR_CODE(0, 0x1165) //"metric name not find" +#define TSDB_CODE_HTTP_TG_METRIC_TYPE TAOS_DEF_ERROR_CODE(0, 0x1166) //"metric name type should be string" +#define TSDB_CODE_HTTP_TG_METRIC_NAME_NULL TAOS_DEF_ERROR_CODE(0, 0x1167) //"metric name length is 0" +#define TSDB_CODE_HTTP_TG_METRIC_NAME_LONG TAOS_DEF_ERROR_CODE(0, 0x1168) //"metric name length too long" +#define TSDB_CODE_HTTP_TG_TIMESTAMP_NULL TAOS_DEF_ERROR_CODE(0, 0x1169) //"timestamp not find" +#define TSDB_CODE_HTTP_TG_TIMESTAMP_TYPE TAOS_DEF_ERROR_CODE(0, 0x116A) //"timestamp type should be integer" +#define TSDB_CODE_HTTP_TG_TIMESTAMP_VAL_NULL TAOS_DEF_ERROR_CODE(0, 0x116B) //"timestamp value smaller than 0" +#define TSDB_CODE_HTTP_TG_TAGS_NULL TAOS_DEF_ERROR_CODE(0, 0x116C) //"tags not find" +#define TSDB_CODE_HTTP_TG_TAGS_SIZE_0 TAOS_DEF_ERROR_CODE(0, 0x116D) //"tags size is 0" +#define TSDB_CODE_HTTP_TG_TAGS_SIZE_LONG TAOS_DEF_ERROR_CODE(0, 0x116E) //"tags size too long" +#define TSDB_CODE_HTTP_TG_TAG_NULL TAOS_DEF_ERROR_CODE(0, 0x116F) //"tag is null" +#define TSDB_CODE_HTTP_TG_TAG_NAME_NULL TAOS_DEF_ERROR_CODE(0, 0x1170) //"tag name is null" +#define TSDB_CODE_HTTP_TG_TAG_NAME_SIZE TAOS_DEF_ERROR_CODE(0, 0x1171) //"tag name length too long" +#define TSDB_CODE_HTTP_TG_TAG_VALUE_TYPE TAOS_DEF_ERROR_CODE(0, 0x1172) //"tag value type should be number or string" +#define TSDB_CODE_HTTP_TG_TAG_VALUE_NULL TAOS_DEF_ERROR_CODE(0, 0x1173) //"tag value is null" +#define TSDB_CODE_HTTP_TG_TABLE_NULL TAOS_DEF_ERROR_CODE(0, 0x1174) //"table is null" +#define TSDB_CODE_HTTP_TG_TABLE_SIZE TAOS_DEF_ERROR_CODE(0, 0x1175) //"table name length too long" +#define TSDB_CODE_HTTP_TG_FIELDS_NULL TAOS_DEF_ERROR_CODE(0, 0x1176) //"fields not find" +#define TSDB_CODE_HTTP_TG_FIELDS_SIZE_0 TAOS_DEF_ERROR_CODE(0, 0x1177) //"fields size is 0" +#define TSDB_CODE_HTTP_TG_FIELDS_SIZE_LONG TAOS_DEF_ERROR_CODE(0, 0x1178) //"fields size too long" +#define TSDB_CODE_HTTP_TG_FIELD_NULL TAOS_DEF_ERROR_CODE(0, 0x1179) //"field is null" +#define TSDB_CODE_HTTP_TG_FIELD_NAME_NULL TAOS_DEF_ERROR_CODE(0, 0x117A) //"field name is null" +#define TSDB_CODE_HTTP_TG_FIELD_NAME_SIZE TAOS_DEF_ERROR_CODE(0, 0x117B) //"field name length too long" +#define TSDB_CODE_HTTP_TG_FIELD_VALUE_TYPE TAOS_DEF_ERROR_CODE(0, 0x117C) //"field value type should be number or string" +#define TSDB_CODE_HTTP_TG_FIELD_VALUE_NULL TAOS_DEF_ERROR_CODE(0, 0x117D) //"field value is null" +#define TSDB_CODE_HTTP_TG_HOST_NOT_STRING TAOS_DEF_ERROR_CODE(0, 0x117E) //"host type should be string" +#define TSDB_CODE_HTTP_TG_STABLE_NOT_EXIST TAOS_DEF_ERROR_CODE(0, 0x117F) //"stable not exist" + +#define TSDB_CODE_HTTP_OP_DB_NOT_INPUT TAOS_DEF_ERROR_CODE(0, 0x1190) //"database name can not be null" +#define TSDB_CODE_HTTP_OP_DB_TOO_LONG TAOS_DEF_ERROR_CODE(0, 0x1191) //"database name too long" +#define TSDB_CODE_HTTP_OP_INVALID_JSON TAOS_DEF_ERROR_CODE(0, 0x1192) //"invalid opentsdb json fromat" +#define TSDB_CODE_HTTP_OP_METRICS_NULL TAOS_DEF_ERROR_CODE(0, 0x1193) //"metrics size is 0" +#define TSDB_CODE_HTTP_OP_METRICS_SIZE TAOS_DEF_ERROR_CODE(0, 0x1194) //"metrics size can not more than 10K" +#define TSDB_CODE_HTTP_OP_METRIC_NULL TAOS_DEF_ERROR_CODE(0, 0x1195) //"metric name not find" +#define TSDB_CODE_HTTP_OP_METRIC_TYPE TAOS_DEF_ERROR_CODE(0, 0x1196) //"metric name type should be string" +#define TSDB_CODE_HTTP_OP_METRIC_NAME_NULL TAOS_DEF_ERROR_CODE(0, 0x1197) //"metric name length is 0" +#define TSDB_CODE_HTTP_OP_METRIC_NAME_LONG TAOS_DEF_ERROR_CODE(0, 0x1198) //"metric name length can not more than 22" +#define TSDB_CODE_HTTP_OP_TIMESTAMP_NULL TAOS_DEF_ERROR_CODE(0, 0x1199) //"timestamp not find" +#define TSDB_CODE_HTTP_OP_TIMESTAMP_TYPE TAOS_DEF_ERROR_CODE(0, 0x119A) //"timestamp type should be integer" +#define TSDB_CODE_HTTP_OP_TIMESTAMP_VAL_NULL TAOS_DEF_ERROR_CODE(0, 0x119B) //"timestamp value smaller than 0" +#define TSDB_CODE_HTTP_OP_TAGS_NULL TAOS_DEF_ERROR_CODE(0, 0x119C) //"tags not find" +#define TSDB_CODE_HTTP_OP_TAGS_SIZE_0 TAOS_DEF_ERROR_CODE(0, 0x119D) //"tags size is 0" +#define TSDB_CODE_HTTP_OP_TAGS_SIZE_LONG TAOS_DEF_ERROR_CODE(0, 0x119E) //"tags size too long" +#define TSDB_CODE_HTTP_OP_TAG_NULL TAOS_DEF_ERROR_CODE(0, 0x119F) //"tag is null" +#define TSDB_CODE_HTTP_OP_TAG_NAME_NULL TAOS_DEF_ERROR_CODE(0, 0x11A0) //"tag name is null" +#define TSDB_CODE_HTTP_OP_TAG_NAME_SIZE TAOS_DEF_ERROR_CODE(0, 0x11A1) //"tag name length too long" +#define TSDB_CODE_HTTP_OP_TAG_VALUE_TYPE TAOS_DEF_ERROR_CODE(0, 0x11A2) //"tag value type should be boolean number or string" +#define TSDB_CODE_HTTP_OP_TAG_VALUE_NULL TAOS_DEF_ERROR_CODE(0, 0x11A3) //"tag value is null" +#define TSDB_CODE_HTTP_OP_TAG_VALUE_TOO_LONG TAOS_DEF_ERROR_CODE(0, 0x11A4) //"tag value can not more than 64" +#define TSDB_CODE_HTTP_OP_VALUE_NULL TAOS_DEF_ERROR_CODE(0, 0x11A5) //"value not find" +#define TSDB_CODE_HTTP_OP_VALUE_TYPE TAOS_DEF_ERROR_CODE(0, 0x11A6) //"value type should be boolean number or string" + +#define TSDB_CODE_HTTP_REQUEST_JSON_ERROR TAOS_DEF_ERROR_CODE(0, 0x1F00) //"http request json error" // odbc -#define TSDB_CODE_ODBC_OOM TAOS_DEF_ERROR_CODE(0, 0x2100) //"out of memory") -#define TSDB_CODE_ODBC_CONV_CHAR_NOT_NUM TAOS_DEF_ERROR_CODE(0, 0x2101) //"convertion not a valid literal input") -#define TSDB_CODE_ODBC_CONV_UNDEF TAOS_DEF_ERROR_CODE(0, 0x2102) //"convertion undefined") -#define TSDB_CODE_ODBC_CONV_TRUNC_FRAC TAOS_DEF_ERROR_CODE(0, 0x2103) //"convertion fractional truncated") -#define TSDB_CODE_ODBC_CONV_TRUNC TAOS_DEF_ERROR_CODE(0, 0x2104) //"convertion truncated") -#define TSDB_CODE_ODBC_CONV_NOT_SUPPORT TAOS_DEF_ERROR_CODE(0, 0x2105) //"convertion not supported") -#define TSDB_CODE_ODBC_CONV_OOR TAOS_DEF_ERROR_CODE(0, 0x2106) //"convertion numeric value out of range") -#define TSDB_CODE_ODBC_OUT_OF_RANGE TAOS_DEF_ERROR_CODE(0, 0x2107) //"out of range") -#define TSDB_CODE_ODBC_NOT_SUPPORT TAOS_DEF_ERROR_CODE(0, 0x2108) //"not supported yet") -#define TSDB_CODE_ODBC_INVALID_HANDLE TAOS_DEF_ERROR_CODE(0, 0x2109) //"invalid handle") -#define TSDB_CODE_ODBC_NO_RESULT TAOS_DEF_ERROR_CODE(0, 0x210a) //"no result set") -#define TSDB_CODE_ODBC_NO_FIELDS TAOS_DEF_ERROR_CODE(0, 0x210b) //"no fields returned") -#define TSDB_CODE_ODBC_INVALID_CURSOR TAOS_DEF_ERROR_CODE(0, 0x210c) //"invalid cursor") -#define TSDB_CODE_ODBC_STATEMENT_NOT_READY TAOS_DEF_ERROR_CODE(0, 0x210d) //"statement not ready") -#define TSDB_CODE_ODBC_CONNECTION_BUSY TAOS_DEF_ERROR_CODE(0, 0x210e) //"connection still busy") -#define TSDB_CODE_ODBC_BAD_CONNSTR TAOS_DEF_ERROR_CODE(0, 0x210f) //"bad connection string") -#define TSDB_CODE_ODBC_BAD_ARG TAOS_DEF_ERROR_CODE(0, 0x2110) //"bad argument") -#define TSDB_CODE_ODBC_CONV_NOT_VALID_TS TAOS_DEF_ERROR_CODE(0, 0x2111) //"not a valid timestamp") -#define TSDB_CODE_ODBC_CONV_SRC_TOO_LARGE TAOS_DEF_ERROR_CODE(0, 0x2112) //"src too large") -#define TSDB_CODE_ODBC_CONV_SRC_BAD_SEQ TAOS_DEF_ERROR_CODE(0, 0x2113) //"src bad sequence") -#define TSDB_CODE_ODBC_CONV_SRC_INCOMPLETE TAOS_DEF_ERROR_CODE(0, 0x2114) //"src incomplete") -#define TSDB_CODE_ODBC_CONV_SRC_GENERAL TAOS_DEF_ERROR_CODE(0, 0x2115) //"src general") +#define TSDB_CODE_ODBC_OOM TAOS_DEF_ERROR_CODE(0, 0x2100) //"out of memory" +#define TSDB_CODE_ODBC_CONV_CHAR_NOT_NUM TAOS_DEF_ERROR_CODE(0, 0x2101) //"convertion not a valid literal input" +#define TSDB_CODE_ODBC_CONV_UNDEF TAOS_DEF_ERROR_CODE(0, 0x2102) //"convertion undefined" +#define TSDB_CODE_ODBC_CONV_TRUNC_FRAC TAOS_DEF_ERROR_CODE(0, 0x2103) //"convertion fractional truncated" +#define TSDB_CODE_ODBC_CONV_TRUNC TAOS_DEF_ERROR_CODE(0, 0x2104) //"convertion truncated" +#define TSDB_CODE_ODBC_CONV_NOT_SUPPORT TAOS_DEF_ERROR_CODE(0, 0x2105) //"convertion not supported" +#define TSDB_CODE_ODBC_CONV_OOR TAOS_DEF_ERROR_CODE(0, 0x2106) //"convertion numeric value out of range" +#define TSDB_CODE_ODBC_OUT_OF_RANGE TAOS_DEF_ERROR_CODE(0, 0x2107) //"out of range" +#define TSDB_CODE_ODBC_NOT_SUPPORT TAOS_DEF_ERROR_CODE(0, 0x2108) //"not supported yet" +#define TSDB_CODE_ODBC_INVALID_HANDLE TAOS_DEF_ERROR_CODE(0, 0x2109) //"invalid handle" +#define TSDB_CODE_ODBC_NO_RESULT TAOS_DEF_ERROR_CODE(0, 0x210a) //"no result set" +#define TSDB_CODE_ODBC_NO_FIELDS TAOS_DEF_ERROR_CODE(0, 0x210b) //"no fields returned" +#define TSDB_CODE_ODBC_INVALID_CURSOR TAOS_DEF_ERROR_CODE(0, 0x210c) //"invalid cursor" +#define TSDB_CODE_ODBC_STATEMENT_NOT_READY TAOS_DEF_ERROR_CODE(0, 0x210d) //"statement not ready" +#define TSDB_CODE_ODBC_CONNECTION_BUSY TAOS_DEF_ERROR_CODE(0, 0x210e) //"connection still busy" +#define TSDB_CODE_ODBC_BAD_CONNSTR TAOS_DEF_ERROR_CODE(0, 0x210f) //"bad connection string" +#define TSDB_CODE_ODBC_BAD_ARG TAOS_DEF_ERROR_CODE(0, 0x2110) //"bad argument" +#define TSDB_CODE_ODBC_CONV_NOT_VALID_TS TAOS_DEF_ERROR_CODE(0, 0x2111) //"not a valid timestamp" +#define TSDB_CODE_ODBC_CONV_SRC_TOO_LARGE TAOS_DEF_ERROR_CODE(0, 0x2112) //"src too large" +#define TSDB_CODE_ODBC_CONV_SRC_BAD_SEQ TAOS_DEF_ERROR_CODE(0, 0x2113) //"src bad sequence" +#define TSDB_CODE_ODBC_CONV_SRC_INCOMPLETE TAOS_DEF_ERROR_CODE(0, 0x2114) //"src incomplete" +#define TSDB_CODE_ODBC_CONV_SRC_GENERAL TAOS_DEF_ERROR_CODE(0, 0x2115) //"src general" // tfs -#define TSDB_CODE_FS_OUT_OF_MEMORY TAOS_DEF_ERROR_CODE(0, 0x2200) //"tfs out of memory") -#define TSDB_CODE_FS_INVLD_CFG TAOS_DEF_ERROR_CODE(0, 0x2201) //"tfs invalid mount config") -#define TSDB_CODE_FS_TOO_MANY_MOUNT TAOS_DEF_ERROR_CODE(0, 0x2202) //"tfs too many mount") -#define TSDB_CODE_FS_DUP_PRIMARY TAOS_DEF_ERROR_CODE(0, 0x2203) //"tfs duplicate primary mount") -#define TSDB_CODE_FS_NO_PRIMARY_DISK TAOS_DEF_ERROR_CODE(0, 0x2204) //"tfs no primary mount") -#define TSDB_CODE_FS_NO_MOUNT_AT_TIER TAOS_DEF_ERROR_CODE(0, 0x2205) //"tfs no mount at tier") -#define TSDB_CODE_FS_FILE_ALREADY_EXISTS TAOS_DEF_ERROR_CODE(0, 0x2206) //"tfs file already exists") -#define TSDB_CODE_FS_INVLD_LEVEL TAOS_DEF_ERROR_CODE(0, 0x2207) //"tfs invalid level") -#define TSDB_CODE_FS_NO_VALID_DISK TAOS_DEF_ERROR_CODE(0, 0x2208) //"tfs no valid disk") +#define TSDB_CODE_FS_OUT_OF_MEMORY TAOS_DEF_ERROR_CODE(0, 0x2200) //"tfs out of memory" +#define TSDB_CODE_FS_INVLD_CFG TAOS_DEF_ERROR_CODE(0, 0x2201) //"tfs invalid mount config" +#define TSDB_CODE_FS_TOO_MANY_MOUNT TAOS_DEF_ERROR_CODE(0, 0x2202) //"tfs too many mount" +#define TSDB_CODE_FS_DUP_PRIMARY TAOS_DEF_ERROR_CODE(0, 0x2203) //"tfs duplicate primary mount" +#define TSDB_CODE_FS_NO_PRIMARY_DISK TAOS_DEF_ERROR_CODE(0, 0x2204) //"tfs no primary mount" +#define TSDB_CODE_FS_NO_MOUNT_AT_TIER TAOS_DEF_ERROR_CODE(0, 0x2205) //"tfs no mount at tier" +#define TSDB_CODE_FS_FILE_ALREADY_EXISTS TAOS_DEF_ERROR_CODE(0, 0x2206) //"tfs file already exists" +#define TSDB_CODE_FS_INVLD_LEVEL TAOS_DEF_ERROR_CODE(0, 0x2207) //"tfs invalid level" +#define TSDB_CODE_FS_NO_VALID_DISK TAOS_DEF_ERROR_CODE(0, 0x2208) //"tfs no valid disk" // monitor -#define TSDB_CODE_MON_CONNECTION_INVALID TAOS_DEF_ERROR_CODE(0, 0x2300) //"monitor invalid monitor db connection") +#define TSDB_CODE_MON_CONNECTION_INVALID TAOS_DEF_ERROR_CODE(0, 0x2300) //"monitor invalid monitor db connection" #ifdef __cplusplus } diff --git a/src/inc/taosmsg.h b/src/inc/taosmsg.h index 0f291936f5519b1db7f98b098e5f9f82303cd0f5..84491e0a438fdb3b5dd2905acffaf32c76b23c9b 100644 --- a/src/inc/taosmsg.h +++ b/src/inc/taosmsg.h @@ -475,6 +475,7 @@ typedef struct { bool tsCompQuery; // is tscomp query bool simpleAgg; bool pointInterpQuery; // point interpolation query + bool needTableSeqScan; // need scan table by table bool needReverseScan; // need reverse scan bool stateWindow; // state window flag diff --git a/src/kit/CMakeLists.txt b/src/kit/CMakeLists.txt index 8293a09231e638748c885f68bde3f6c64285f763..1bdd49267dacc1674cda3ebfd48a0ab11a7cba3a 100644 --- a/src/kit/CMakeLists.txt +++ b/src/kit/CMakeLists.txt @@ -2,7 +2,6 @@ CMAKE_MINIMUM_REQUIRED(VERSION 3.0...3.20) PROJECT(TDengine) ADD_SUBDIRECTORY(shell) -ADD_SUBDIRECTORY(taosdemo) IF (TD_TAOS_TOOLS) INCLUDE_DIRECTORIES(${CMAKE_BINARY_DIR}/src/kit/taos_tools/deps/avro/lang/c/src) diff --git a/src/kit/shell/src/shellWindows.c b/src/kit/shell/src/shellWindows.c index 0babd88333c846c1f0b5dbe4baede4a6d38cbcdd..b1c85d951bf1f8cf801286f51b84d47d9c893b5c 100644 --- a/src/kit/shell/src/shellWindows.c +++ b/src/kit/shell/src/shellWindows.c @@ -17,7 +17,7 @@ #include "taos.h" #include "shellCommand.h" -#define SHELL_INPUT_MAX_COMMAND_SIZE 500000 +#define SHELL_INPUT_MAX_COMMAND_SIZE 10000 extern char configDir[]; diff --git a/src/kit/taos-tools b/src/kit/taos-tools index 7ed1e6b485d04cc93c4bb0bc9e844086da1b4714..b76b5a76756a5c6530ba1d418de51fd336ae23b1 160000 --- a/src/kit/taos-tools +++ b/src/kit/taos-tools @@ -1 +1 @@ -Subproject commit 7ed1e6b485d04cc93c4bb0bc9e844086da1b4714 +Subproject commit b76b5a76756a5c6530ba1d418de51fd336ae23b1 diff --git a/src/kit/taosdemo/CMakeLists.txt b/src/kit/taosdemo/CMakeLists.txt deleted file mode 100644 index 57d6242d5343ad727b1706ab614ad0add844ddb6..0000000000000000000000000000000000000000 --- a/src/kit/taosdemo/CMakeLists.txt +++ /dev/null @@ -1,104 +0,0 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 3.0...3.20) -PROJECT(TDengine) - -INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/client/inc) -INCLUDE_DIRECTORIES(inc) - -FIND_PACKAGE(Git) -IF (GIT_FOUND) - MESSAGE("Git found") - EXECUTE_PROCESS( - COMMAND ${GIT_EXECUTABLE} log --pretty=oneline -n 1 ${CMAKE_CURRENT_LIST_DIR} - WORKING_DIRECTORY ${CMAKE_CURRENT_LIST_DIR} - RESULT_VARIABLE RESULT - OUTPUT_VARIABLE TAOSDEMO_COMMIT_SHA1) - IF ("${TAOSDEMO_COMMIT_SHA1}" STREQUAL "") - SET(TAOSDEMO_COMMIT_SHA1 "unknown") - ELSE () - STRING(SUBSTRING "${TAOSDEMO_COMMIT_SHA1}" 0 7 TAOSDEMO_COMMIT_SHA1) - STRING(STRIP "${TAOSDEMO_COMMIT_SHA1}" TAOSDEMO_COMMIT_SHA1) - ENDIF () - EXECUTE_PROCESS( - COMMAND ${GIT_EXECUTABLE} status -z -s ${CMAKE_CURRENT_LIST_DIR} - RESULT_VARIABLE RESULT - OUTPUT_VARIABLE TAOSDEMO_STATUS) - IF (TD_LINUX) - EXECUTE_PROCESS( - COMMAND bash "-c" "echo '${TAOSDEMO_STATUS}' | awk '{print $1}'" - RESULT_VARIABLE RESULT - OUTPUT_VARIABLE TAOSDEMO_STATUS) - ENDIF (TD_LINUX) -ELSE() - MESSAGE("Git not found") - SET(TAOSDEMO_COMMIT_SHA1 "unknown") - SET(TAOSDEMO_STATUS "unknown") -ENDIF (GIT_FOUND) - - -MESSAGE("taosdemo's latest commit in short is:" ${TAOSDEMO_COMMIT_SHA1}) -STRING(STRIP "${TAOSDEMO_STATUS}" TAOSDEMO_STATUS) - -IF (TAOSDEMO_STATUS MATCHES "M") - SET(TAOSDEMO_STATUS "modified") -ELSE() - SET(TAOSDEMO_STATUS "") -ENDIF () -MESSAGE("taosdemo's status is:" ${TAOSDEMO_STATUS}) - -ADD_DEFINITIONS(-DTAOSDEMO_COMMIT_SHA1="${TAOSDEMO_COMMIT_SHA1}") -ADD_DEFINITIONS(-DTAOSDEMO_STATUS="${TAOSDEMO_STATUS}") - -MESSAGE("TD_VER_NUMBER is:" ${TD_VER_NUMBER}) -IF ("${TD_VER_NUMBER}" STREQUAL "") - SET(TD_VERSION_NUMBER "TDengine-version-unknown") -ELSE() - SET(TD_VERSION_NUMBER ${TD_VER_NUMBER}) -ENDIF () -MESSAGE("TD_VERSION_NUMBER is:" ${TD_VERSION_NUMBER}) -ADD_DEFINITIONS(-DTD_VERNUMBER="${TD_VERSION_NUMBER}") - -IF (TD_LINUX_64 AND JEMALLOC_ENABLED) - ADD_DEFINITIONS(-DTD_JEMALLOC_ENABLED -I${CMAKE_BINARY_DIR}/build/include -L${CMAKE_BINARY_DIR}/build/lib -Wl,-rpath,${CMAKE_BINARY_DIR}/build/lib -ljemalloc) - SET(LINK_JEMALLOC "-L${CMAKE_BINARY_DIR}/build/lib -ljemalloc") -ELSE () - SET(LINK_JEMALLOC "") -ENDIF () - -IF (TD_LINUX) - AUX_SOURCE_DIRECTORY(./src SRC) - ADD_EXECUTABLE(taosdemo ${SRC}) - - IF (TD_SOMODE_STATIC) - TARGET_LINK_LIBRARIES(taosdemo taos_static cJson lua ${LINK_JEMALLOC}) - ELSE () - TARGET_LINK_LIBRARIES(taosdemo taos cJson ${LINK_JEMALLOC}) - ENDIF () -ELSEIF (TD_WINDOWS) - AUX_SOURCE_DIRECTORY(./src SRC) - ADD_EXECUTABLE(taosdemo ${SRC}) - SET_SOURCE_FILES_PROPERTIES(./src/demoUtil.c PROPERTIES COMPILE_FLAGS -w) - SET_SOURCE_FILES_PROPERTIES(./src/demoData.c PROPERTIES COMPILE_FLAGS -w) - SET_SOURCE_FILES_PROPERTIES(./src/demoInsert.c PROPERTIES COMPILE_FLAGS -w) - SET_SOURCE_FILES_PROPERTIES(./src/demoCommandOpt.c PROPERTIES COMPILE_FLAGS -w) - SET_SOURCE_FILES_PROPERTIES(./src/demoQuery.c PROPERTIES COMPILE_FLAGS -w) - SET_SOURCE_FILES_PROPERTIES(./src/demoMain.c PROPERTIES COMPILE_FLAGS -w) - SET_SOURCE_FILES_PROPERTIES(./src/demoSubscribe.c PROPERTIES COMPILE_FLAGS -w) - SET_SOURCE_FILES_PROPERTIES(./src/demoOutput.c PROPERTIES COMPILE_FLAGS -w) - SET_SOURCE_FILES_PROPERTIES(./src/demoJsonOpt.c PROPERTIES COMPILE_FLAGS -w) - IF (TD_SOMODE_STATIC) - TARGET_LINK_LIBRARIES(taosdemo taos_static cJson lua) - ELSE () - TARGET_LINK_LIBRARIES(taosdemo taos cJson lua) - ENDIF () -ELSEIF (TD_DARWIN) - # missing a few dependencies, such as - AUX_SOURCE_DIRECTORY(./src SRC) - ADD_EXECUTABLE(taosdemo ${SRC}) - - IF (TD_SOMODE_STATIC) - TARGET_LINK_LIBRARIES(taosdemo taos_static cJson lua) - ELSE () - TARGET_LINK_LIBRARIES(taosdemo taos cJson lua) - ENDIF () -ENDIF () - diff --git a/src/kit/taosdemo/async-sub.json b/src/kit/taosdemo/async-sub.json deleted file mode 100644 index a30a1be45cd8bcc6a6fadffd7473df7df067e839..0000000000000000000000000000000000000000 --- a/src/kit/taosdemo/async-sub.json +++ /dev/null @@ -1,41 +0,0 @@ -{ - "filetype": "subscribe", - "cfgdir": "/etc/taos", - "host": "127.0.0.1", - "port": 6030, - "user": "root", - "password": "taosdata", - "databases": "test", - "specified_table_query": { - "concurrent": 1, - "mode": "async", - "interval": 1000, - "restart": "yes", - "keepProgress": "yes", - "resubAfterConsume": 10, - "sqls": [ - { - "sql": "select col1 from meters where col1 > 1;", - "result": "./subscribe_res0.txt" - }, - { - "sql": "select col2 from meters where col2 > 1;", - "result": "./subscribe_res2.txt" - } - ] - }, - "super_table_query": { - "stblname": "meters", - "threads": 1, - "mode": "sync", - "interval": 1000, - "restart": "yes", - "keepProgress": "yes", - "sqls": [ - { - "sql": "select col1 from xxxx where col1 > 10;", - "result": "./subscribe_res1.txt" - } - ] - } -} diff --git a/src/kit/taosdemo/inc/demo.h b/src/kit/taosdemo/inc/demo.h deleted file mode 100644 index a9b6b83580215f3a9e49c3fac98d0cd62e7ca1e6..0000000000000000000000000000000000000000 --- a/src/kit/taosdemo/inc/demo.h +++ /dev/null @@ -1,650 +0,0 @@ -/* - * Copyright (c) 2019 TAOS Data, Inc. - * - * This program is free software: you can use, redistribute, and/or modify - * it under the terms of the GNU Affero General Public License, version 3 - * or later ("AGPL"), as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. - * - * You should have received a copy of the GNU Affero General Public License - * along with this program. If not, see . - */ - -#ifndef __DEMO__ -#define __DEMO__ - -#include -#include -#include -#define _GNU_SOURCE -#define CURL_STATICLIB - -#ifdef LINUX -#include -#include -#ifndef _ALPINE -#include -#endif -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#else -#include -#include -#endif - -#include -#include - -// #include "os.h" -#include "taos.h" -#include "taoserror.h" -#include "tutil.h" - -#define REQ_EXTRA_BUF_LEN 1024 -#define RESP_BUF_LEN 4096 -#define SQL_BUFF_LEN 1024 - -extern char configDir[]; - -#define STR_INSERT_INTO "INSERT INTO " - -#define MAX_RECORDS_PER_REQ 32766 - -#define HEAD_BUFF_LEN \ - TSDB_MAX_COLUMNS * 24 // 16*MAX_COLUMNS + (192+32)*2 + insert into .. - -#define BUFFER_SIZE TSDB_MAX_ALLOWED_SQL_LEN -#define FETCH_BUFFER_SIZE 100 * TSDB_MAX_ALLOWED_SQL_LEN -#define COND_BUF_LEN (BUFFER_SIZE - 30) -#define COL_BUFFER_LEN ((TSDB_COL_NAME_LEN + 15) * TSDB_MAX_COLUMNS) - -#define MAX_USERNAME_SIZE 64 -#define MAX_HOSTNAME_SIZE \ - 253 // https://man7.org/linux/man-pages/man7/hostname.7.html -#define MAX_TB_NAME_SIZE 64 -#define MAX_DATA_SIZE \ - (16 * TSDB_MAX_COLUMNS) + 20 // max record len: 16*MAX_COLUMNS, timestamp - // string and ,('') need extra space -#define OPT_ABORT 1 /* –abort */ -#define MAX_FILE_NAME_LEN 256 // max file name length on linux is 255. -#define MAX_PATH_LEN 4096 - -#define DEFAULT_START_TIME 1500000000000 - -#define MAX_PREPARED_RAND 1000000 -#define INT_BUFF_LEN 12 -#define BIGINT_BUFF_LEN 21 -#define SMALLINT_BUFF_LEN 7 -#define TINYINT_BUFF_LEN 5 -#define BOOL_BUFF_LEN 6 -#define FLOAT_BUFF_LEN 22 -#define DOUBLE_BUFF_LEN 42 -#define TIMESTAMP_BUFF_LEN 21 -#define PRINT_STAT_INTERVAL 30 * 1000 - -#define MAX_SAMPLES 10000 -#define MAX_NUM_COLUMNS \ - (TSDB_MAX_COLUMNS - 1) // exclude first column timestamp - -#define MAX_DB_COUNT 8 -#define MAX_SUPER_TABLE_COUNT 200 - -#define MAX_QUERY_SQL_COUNT 100 - -#define MAX_DATABASE_COUNT 256 -#define MAX_JSON_BUFF 6400000 - -#define INPUT_BUF_LEN 256 -#define EXTRA_SQL_LEN 256 -#define TBNAME_PREFIX_LEN \ - (TSDB_TABLE_NAME_LEN - 20) // 20 characters reserved for seq -#define SMALL_BUFF_LEN 8 -#define DATATYPE_BUFF_LEN (SMALL_BUFF_LEN * 3) -#define NOTE_BUFF_LEN (SMALL_BUFF_LEN * 16) - -#define DEFAULT_NTHREADS 8 -#define DEFAULT_TIMESTAMP_STEP 1 -#define DEFAULT_INTERLACE_ROWS 0 -#define DEFAULT_DATATYPE_NUM 1 -#define DEFAULT_CHILDTABLES 10000 -#define DEFAULT_TEST_MODE 0 -#define DEFAULT_METAFILE NULL -#define DEFAULT_SQLFILE NULL -#define DEFAULT_HOST "localhost" -#define DEFAULT_PORT 6030 -#define DEFAULT_IFACE INTERFACE_BUT -#define DEFAULT_DATABASE "test" -#define DEFAULT_REPLICA 1 -#define DEFAULT_TB_PREFIX "d" -#define DEFAULT_ESCAPE_CHAR false -#define DEFAULT_USE_METRIC true -#define DEFAULT_DROP_DB true -#define DEFAULT_AGGR_FUNC false -#define DEFAULT_DEBUG false -#define DEFAULT_VERBOSE false -#define DEFAULT_PERF_STAT false -#define DEFAULT_ANS_YES false -#define DEFAULT_OUTPUT "./output.txt" -#define DEFAULT_SYNC_MODE 0 -#define DEFAULT_DATA_TYPE \ - { TSDB_DATA_TYPE_FLOAT, TSDB_DATA_TYPE_INT, TSDB_DATA_TYPE_FLOAT } -#define DEFAULT_DATATYPE \ - { "FLOAT", "INT", "FLOAT" } -#define DEFAULT_DATALENGTH \ - { 4, 4, 4 } -#define DEFAULT_BINWIDTH 64 -#define DEFAULT_COL_COUNT 4 -#define DEFAULT_LEN_ONE_ROW 76 -#define DEFAULT_INSERT_INTERVAL 0 -#define DEFAULT_QUERY_TIME 1 -#define DEFAULT_PREPARED_RAND 10000 -#define DEFAULT_REQ_PER_REQ 30000 -#define DEFAULT_INSERT_ROWS 10000 -#define DEFAULT_ABORT 0 -#define DEFAULT_RATIO 0 -#define DEFAULT_DISORDER_RANGE 1000 -#define DEFAULT_METHOD_DEL 1 -#define DEFAULT_TOTAL_INSERT 0 -#define DEFAULT_TOTAL_AFFECT 0 -#define DEFAULT_DEMO_MODE true -#define DEFAULT_CHINESE_OPT false -#define DEFAULT_CREATE_BATCH 10 -#define DEFAULT_SUB_INTERVAL 10000 -#define DEFAULT_QUERY_INTERVAL 10000 - -#define SML_LINE_SQL_SYNTAX_OFFSET 7 - -#if _MSC_VER <= 1900 -#define __func__ __FUNCTION__ -#endif - -#define debugPrint(fmt, ...) \ - do { \ - if (g_args.debug_print || g_args.verbose_print) \ - fprintf(stderr, "DEBG: " fmt, __VA_ARGS__); \ - } while (0) - -#define verbosePrint(fmt, ...) \ - do { \ - if (g_args.verbose_print) fprintf(stderr, "VERB: " fmt, __VA_ARGS__); \ - } while (0) - -#define performancePrint(fmt, ...) \ - do { \ - if (g_args.performance_print) \ - fprintf(stderr, "PERF: " fmt, __VA_ARGS__); \ - } while (0) - -#define errorPrint(fmt, ...) \ - do { \ - fprintf(stderr, "\033[31m"); \ - fprintf(stderr, "%s(%d) ", __FILE__, __LINE__); \ - fprintf(stderr, "ERROR: " fmt, __VA_ARGS__); \ - fprintf(stderr, "\033[0m"); \ - } while (0) - -enum TEST_MODE { - INSERT_TEST, // 0 - QUERY_TEST, // 1 - SUBSCRIBE_TEST, // 2 - INVAID_TEST -}; - -typedef enum CREATE_SUB_TABLE_MOD_EN { - PRE_CREATE_SUBTBL, - AUTO_CREATE_SUBTBL, - NO_CREATE_SUBTBL -} CREATE_SUB_TABLE_MOD_EN; - -typedef enum TABLE_EXISTS_EN { - TBL_NO_EXISTS, - TBL_ALREADY_EXISTS, - TBL_EXISTS_BUTT -} TABLE_EXISTS_EN; - -enum enumSYNC_MODE { SYNC_MODE, ASYNC_MODE, MODE_BUT }; - -enum enum_TAOS_INTERFACE { - TAOSC_IFACE, - REST_IFACE, - STMT_IFACE, - SML_IFACE, - INTERFACE_BUT -}; - -typedef enum enumQUERY_CLASS { - SPECIFIED_CLASS, - STABLE_CLASS, - CLASS_BUT -} QUERY_CLASS; - -typedef enum enum_PROGRESSIVE_OR_INTERLACE { - PROGRESSIVE_INSERT_MODE, - INTERLACE_INSERT_MODE, - INVALID_INSERT_MODE -} PROG_OR_INTERLACE_MODE; - -typedef enum enumQUERY_TYPE { - NO_INSERT_TYPE, - INSERT_TYPE, - QUERY_TYPE_BUT -} QUERY_TYPE; - -enum _show_db_index { - TSDB_SHOW_DB_NAME_INDEX, - TSDB_SHOW_DB_CREATED_TIME_INDEX, - TSDB_SHOW_DB_NTABLES_INDEX, - TSDB_SHOW_DB_VGROUPS_INDEX, - TSDB_SHOW_DB_REPLICA_INDEX, - TSDB_SHOW_DB_QUORUM_INDEX, - TSDB_SHOW_DB_DAYS_INDEX, - TSDB_SHOW_DB_KEEP_INDEX, - TSDB_SHOW_DB_CACHE_INDEX, - TSDB_SHOW_DB_BLOCKS_INDEX, - TSDB_SHOW_DB_MINROWS_INDEX, - TSDB_SHOW_DB_MAXROWS_INDEX, - TSDB_SHOW_DB_WALLEVEL_INDEX, - TSDB_SHOW_DB_FSYNC_INDEX, - TSDB_SHOW_DB_COMP_INDEX, - TSDB_SHOW_DB_CACHELAST_INDEX, - TSDB_SHOW_DB_PRECISION_INDEX, - TSDB_SHOW_DB_UPDATE_INDEX, - TSDB_SHOW_DB_STATUS_INDEX, - TSDB_MAX_SHOW_DB -}; - -// -----------------------------------------SHOW TABLES CONFIGURE -// ------------------------------------- -enum _show_stables_index { - TSDB_SHOW_STABLES_NAME_INDEX, - TSDB_SHOW_STABLES_CREATED_TIME_INDEX, - TSDB_SHOW_STABLES_COLUMNS_INDEX, - TSDB_SHOW_STABLES_METRIC_INDEX, - TSDB_SHOW_STABLES_UID_INDEX, - TSDB_SHOW_STABLES_TID_INDEX, - TSDB_SHOW_STABLES_VGID_INDEX, - TSDB_MAX_SHOW_STABLES -}; - -enum _describe_table_index { - TSDB_DESCRIBE_METRIC_FIELD_INDEX, - TSDB_DESCRIBE_METRIC_TYPE_INDEX, - TSDB_DESCRIBE_METRIC_LENGTH_INDEX, - TSDB_DESCRIBE_METRIC_NOTE_INDEX, - TSDB_MAX_DESCRIBE_METRIC -}; - -typedef struct SArguments_S { - char * metaFile; - uint32_t test_mode; - char * host; - uint16_t port; - uint16_t iface; - char * user; - char password[SHELL_MAX_PASSWORD_LEN]; - char * database; - int replica; - char * tb_prefix; - bool escapeChar; - char * sqlFile; - bool use_metric; - bool drop_database; - bool aggr_func; - bool answer_yes; - bool debug_print; - bool verbose_print; - bool performance_print; - char * output_file; - bool async_mode; - char data_type[MAX_NUM_COLUMNS + 1]; - char * dataType[MAX_NUM_COLUMNS + 1]; - int32_t data_length[MAX_NUM_COLUMNS + 1]; - uint32_t binwidth; - uint32_t columnCount; - uint64_t lenOfOneRow; - uint32_t nthreads; - uint64_t insert_interval; - uint64_t timestamp_step; - int64_t query_times; - int64_t prepared_rand; - uint32_t interlaceRows; - uint32_t reqPerReq; // num_of_records_per_req - uint64_t max_sql_len; - int64_t ntables; - int64_t insertRows; - int abort; - uint32_t disorderRatio; // 0: no disorder, >0: x% - int disorderRange; // ms, us or ns. according to database precision - uint32_t method_of_delete; - uint64_t totalInsertRows; - uint64_t totalAffectedRows; - bool demo_mode; // use default column name and semi-random data - bool chinese; -} SArguments; - -typedef struct SColumn_S { - char field[TSDB_COL_NAME_LEN]; - char data_type; - char dataType[DATATYPE_BUFF_LEN]; - uint32_t dataLen; - char note[NOTE_BUFF_LEN]; -} StrColumn; - -typedef struct SSuperTable_S { - char stbName[TSDB_TABLE_NAME_LEN]; - char dataSource[SMALL_BUFF_LEN]; // rand_gen or sample - char childTblPrefix[TBNAME_PREFIX_LEN]; - uint16_t childTblExists; - int64_t childTblCount; - uint64_t batchCreateTableNum; // 0: no batch, > 0: batch table number in - // one sql - uint8_t autoCreateTable; // 0: create sub table, 1: auto create sub table - uint16_t iface; // 0: taosc, 1: rest, 2: stmt - uint16_t lineProtocol; - int64_t childTblLimit; - uint64_t childTblOffset; - - // int multiThreadWriteOneTbl; // 0: no, 1: yes - uint32_t interlaceRows; // - int disorderRatio; // 0: no disorder, >0: x% - int disorderRange; // ms, us or ns. according to database precision - uint64_t maxSqlLen; // - - uint64_t insertInterval; // insert interval, will override global insert - // interval - int64_t insertRows; - int64_t timeStampStep; - int tsPrecision; - char startTimestamp[MAX_TB_NAME_SIZE]; - char sampleFormat[SMALL_BUFF_LEN]; // csv, json - char sampleFile[MAX_FILE_NAME_LEN]; - char tagsFile[MAX_FILE_NAME_LEN]; - - uint32_t columnCount; - StrColumn columns[TSDB_MAX_COLUMNS]; - uint32_t tagCount; - StrColumn tags[TSDB_MAX_TAGS]; - - char * childTblName; - bool escapeChar; - char * colsOfCreateChildTable; - uint64_t lenOfOneRow; - uint64_t lenOfTagOfOneRow; - - char *sampleDataBuf; - bool useSampleTs; - - uint32_t tagSource; // 0: rand, 1: tag sample - char * tagDataBuf; - uint32_t tagSampleCount; - uint32_t tagUsePos; - - // bind param batch - char *sampleBindBatchArray; - // statistics - uint64_t totalInsertRows; - uint64_t totalAffectedRows; -} SSuperTable; - -typedef struct { - char name[TSDB_DB_NAME_LEN]; - char create_time[32]; - int64_t ntables; - int32_t vgroups; - int16_t replica; - int16_t quorum; - int16_t days; - char keeplist[64]; - int32_t cache; // MB - int32_t blocks; - int32_t minrows; - int32_t maxrows; - int8_t wallevel; - int32_t fsync; - int8_t comp; - int8_t cachelast; - char precision[SMALL_BUFF_LEN]; // time resolution - int8_t update; - char status[16]; -} SDbInfo; - -typedef struct SDbCfg_S { - // int maxtablesPerVnode; - uint32_t minRows; // 0 means default - uint32_t maxRows; // 0 means default - int comp; - int walLevel; - int cacheLast; - int fsync; - int replica; - int update; - int keep; - int days; - int cache; - int blocks; - int quorum; - char precision[SMALL_BUFF_LEN]; -} SDbCfg; - -typedef struct SDataBase_S { - char dbName[TSDB_DB_NAME_LEN]; - bool drop; // 0: use exists, 1: if exists, drop then new create - SDbCfg dbCfg; - uint64_t superTblCount; - SSuperTable *superTbls; -} SDataBase; - -typedef struct SDbs_S { - char cfgDir[MAX_FILE_NAME_LEN]; - char host[MAX_HOSTNAME_SIZE]; - struct sockaddr_in serv_addr; - - uint16_t port; - char user[MAX_USERNAME_SIZE]; - char password[SHELL_MAX_PASSWORD_LEN]; - char resultFile[MAX_FILE_NAME_LEN]; - bool use_metric; - bool aggr_func; - bool asyncMode; - - uint32_t threadCount; - uint32_t threadCountForCreateTbl; - uint32_t dbCount; - // statistics - uint64_t totalInsertRows; - uint64_t totalAffectedRows; - - SDataBase *db; -} SDbs; - -typedef struct SpecifiedQueryInfo_S { - uint64_t queryInterval; // 0: unlimited > 0 loop/s - uint32_t concurrent; - int sqlCount; - uint32_t asyncMode; // 0: sync, 1: async - uint64_t subscribeInterval; // ms - uint64_t queryTimes; - bool subscribeRestart; - int subscribeKeepProgress; - char sql[MAX_QUERY_SQL_COUNT][BUFFER_SIZE + 1]; - char result[MAX_QUERY_SQL_COUNT][MAX_FILE_NAME_LEN]; - int resubAfterConsume[MAX_QUERY_SQL_COUNT]; - int endAfterConsume[MAX_QUERY_SQL_COUNT]; - TAOS_SUB *tsub[MAX_QUERY_SQL_COUNT]; - char topic[MAX_QUERY_SQL_COUNT][32]; - int consumed[MAX_QUERY_SQL_COUNT]; - TAOS_RES *res[MAX_QUERY_SQL_COUNT]; - uint64_t totalQueried; -} SpecifiedQueryInfo; - -typedef struct SuperQueryInfo_S { - char stbName[TSDB_TABLE_NAME_LEN]; - uint64_t queryInterval; // 0: unlimited > 0 loop/s - uint32_t threadCnt; - uint32_t asyncMode; // 0: sync, 1: async - uint64_t subscribeInterval; // ms - bool subscribeRestart; - int subscribeKeepProgress; - uint64_t queryTimes; - int64_t childTblCount; - char childTblPrefix[TBNAME_PREFIX_LEN]; // 20 characters reserved for seq - int sqlCount; - char sql[MAX_QUERY_SQL_COUNT][BUFFER_SIZE + 1]; - char result[MAX_QUERY_SQL_COUNT][MAX_FILE_NAME_LEN]; - int resubAfterConsume; - int endAfterConsume; - TAOS_SUB *tsub[MAX_QUERY_SQL_COUNT]; - char * childTblName; - uint64_t totalQueried; -} SuperQueryInfo; - -typedef struct SQueryMetaInfo_S { - char cfgDir[MAX_FILE_NAME_LEN]; - char host[MAX_HOSTNAME_SIZE]; - uint16_t port; - struct sockaddr_in serv_addr; - char user[MAX_USERNAME_SIZE]; - char password[SHELL_MAX_PASSWORD_LEN]; - char dbName[TSDB_DB_NAME_LEN]; - char queryMode[SMALL_BUFF_LEN]; // taosc, rest - SpecifiedQueryInfo specifiedQueryInfo; - SuperQueryInfo superQueryInfo; - uint64_t totalQueried; -} SQueryMetaInfo; - -typedef struct SThreadInfo_S { - TAOS * taos; - TAOS_STMT * stmt; - int64_t * bind_ts; - int64_t * bind_ts_array; - char * bindParams; - char * is_null; - int threadID; - char db_name[TSDB_DB_NAME_LEN]; - uint32_t time_precision; - char filePath[MAX_PATH_LEN]; - FILE * fp; - char tb_prefix[TSDB_TABLE_NAME_LEN]; - uint64_t start_table_from; - uint64_t end_table_to; - int64_t ntables; - int64_t tables_created; - uint64_t data_of_rate; - int64_t start_time; - char * cols; - bool use_metric; - SSuperTable *stbInfo; - char * buffer; // sql cmd buffer - - // for async insert - tsem_t lock_sem; - int64_t counter; - uint64_t st; - uint64_t et; - uint64_t lastTs; - - // sample data - int64_t samplePos; - // statistics - uint64_t totalInsertRows; - uint64_t totalAffectedRows; - - // insert delay statistics - uint64_t cntDelay; - uint64_t totalDelay; - uint64_t avgDelay; - uint64_t maxDelay; - uint64_t minDelay; - - // seq of query or subscribe - uint64_t querySeq; // sequence number of sql command - TAOS_SUB *tsub; - - char **lines; - SOCKET sockfd; -} threadInfo; - -/* ************ Global variables ************ */ -extern char * g_aggreFuncDemo[]; -extern char * g_aggreFunc[]; -extern SArguments g_args; -extern SDbs g_Dbs; -extern char * g_dupstr; -extern int64_t g_totalChildTables; -extern int64_t g_actualChildTables; -extern SQueryMetaInfo g_queryInfo; -extern FILE * g_fpOfInsertResult; -extern bool g_fail; - -#define min(a, b) (((a) < (b)) ? (a) : (b)) - -/* ************ Function declares ************ */ -/* demoCommandOpt.c */ -int parse_args(int argc, char *argv[]); -void setParaFromArg(); -void querySqlFile(TAOS *taos, char *sqlFile); -void testCmdLine(); -/* demoJsonOpt.c */ -int getInfoFromJsonFile(char *file); -int testMetaFile(); -/* demoUtil.c */ -int isCommentLine(char *line); -void replaceChildTblName(char *inSql, char *outSql, int tblIndex); -void setupForAnsiEscape(void); -void resetAfterAnsiEscape(void); -int taosRandom(); -void tmfree(void *buf); -void tmfclose(FILE *fp); -void fetchResult(TAOS_RES *res, threadInfo *pThreadInfo); -void prompt(); -void ERROR_EXIT(const char *msg); -int postProceSql(char *host, uint16_t port, char *sqlstr, - threadInfo *pThreadInfo); -int queryDbExec(TAOS *taos, char *command, QUERY_TYPE type, bool quiet); -int regexMatch(const char *s, const char *reg, int cflags); -int convertHostToServAddr(char *host, uint16_t port, - struct sockaddr_in *serv_addr); -char *formatTimestamp(char *buf, int64_t val, int precision); -void errorWrongValue(char *program, char *wrong_arg, char *wrong_value); -void errorUnrecognized(char *program, char *wrong_arg); -void errorPrintReqArg(char *program, char *wrong_arg); -void errorPrintReqArg2(char *program, char *wrong_arg); -void errorPrintReqArg3(char *program, char *wrong_arg); -bool isStringNumber(char *input); -int getAllChildNameOfSuperTable(TAOS *taos, char *dbName, char *stbName, - char ** childTblNameOfSuperTbl, - int64_t *childTblCountOfSuperTbl); -int getChildNameOfSuperTableWithLimitAndOffset(TAOS *taos, char *dbName, - char * stbName, - char ** childTblNameOfSuperTbl, - int64_t *childTblCountOfSuperTbl, - int64_t limit, uint64_t offset, - bool escapChar); -/* demoInsert.c */ -int insertTestProcess(); -void postFreeResource(); -/* demoOutput.c */ -void printVersion(); -void printfInsertMeta(); -void printfInsertMetaToFile(FILE *fp); -void printStatPerThread(threadInfo *pThreadInfo); -void appendResultBufToFile(char *resultBuf, threadInfo *pThreadInfo); -void printfQueryMeta(); -void printHelp(); -void printfQuerySystemInfo(TAOS *taos); -/* demoQuery.c */ -int queryTestProcess(); -/* demoSubscribe.c */ -int subscribeTestProcess(); -#endif \ No newline at end of file diff --git a/src/kit/taosdemo/inc/demoData.h b/src/kit/taosdemo/inc/demoData.h deleted file mode 100644 index f0ac1f2501fa76d1da6c537328d8dd319bbe3c95..0000000000000000000000000000000000000000 --- a/src/kit/taosdemo/inc/demoData.h +++ /dev/null @@ -1,123 +0,0 @@ -/* - * Copyright (c) 2019 TAOS Data, Inc. - * - * This program is free software: you can use, redistribute, and/or modify - * it under the terms of the GNU Affero General Public License, version 3 - * or later ("AGPL"), as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. - * - * You should have received a copy of the GNU Affero General Public License - * along with this program. If not, see . - */ - -#ifndef __DEMODATA__ -#define __DEMODATA__ -#include "cJSON.h" -#include "demo.h" -/***** Global variables ******/ - -extern char * g_sampleDataBuf; -extern char * g_sampleBindBatchArray; -extern int32_t * g_randint; -extern uint32_t *g_randuint; -extern int64_t * g_randbigint; -extern uint64_t *g_randubigint; -extern float * g_randfloat; -extern double * g_randdouble; -extern char * g_randbool_buff; -extern char * g_randint_buff; -extern char * g_randuint_buff; -extern char * g_rand_voltage_buff; -extern char * g_randbigint_buff; -extern char * g_randubigint_buff; -extern char * g_randsmallint_buff; -extern char * g_randusmallint_buff; -extern char * g_randtinyint_buff; -extern char * g_randutinyint_buff; -extern char * g_randfloat_buff; -extern char * g_rand_current_buff; -extern char * g_rand_phase_buff; -extern char * g_randdouble_buff; -/***** Declare functions *****/ -int init_rand_data(); -char * rand_bool_str(); -int32_t rand_bool(); -char * rand_tinyint_str(); -int32_t rand_tinyint(); -char * rand_utinyint_str(); -int32_t rand_utinyint(); -char * rand_smallint_str(); -int32_t rand_smallint(); -char * rand_usmallint_str(); -int32_t rand_usmallint(); -char * rand_int_str(); -int32_t rand_int(); -char * rand_uint_str(); -int32_t rand_uint(); -char * rand_bigint_str(); -int64_t rand_bigint(); -char * rand_ubigint_str(); -int64_t rand_ubigint(); -char * rand_float_str(); -float rand_float(); -char * demo_current_float_str(); -float UNUSED_FUNC demo_current_float(); -char * demo_voltage_int_str(); -int32_t UNUSED_FUNC demo_voltage_int(); -char * demo_phase_float_str(); -float UNUSED_FUNC demo_phase_float(); -void rand_string(char *str, int size); -char * rand_double_str(); -double rand_double(); - -int generateTagValuesForStb(SSuperTable *stbInfo, int64_t tableSeq, - char *tagsValBuf); -int64_t getTSRandTail(int64_t timeStampStep, int32_t seq, int disorderRatio, - int disorderRange); -int32_t prepareStbStmtBindTag(char *bindArray, SSuperTable *stbInfo, - char *tagsVal, int32_t timePrec); -int32_t prepareStmtWithoutStb(threadInfo *pThreadInfo, char *tableName, - uint32_t batch, int64_t insertRows, - int64_t recordFrom, int64_t startTime); -int32_t generateStbInterlaceData(threadInfo *pThreadInfo, char *tableName, - uint32_t batchPerTbl, uint64_t i, - uint32_t batchPerTblTimes, uint64_t tableSeq, - char *buffer, int64_t insertRows, - int64_t startTime, uint64_t *pRemainderBufLen); -int64_t generateInterlaceDataWithoutStb(char *tableName, uint32_t batch, - uint64_t tableSeq, char *dbName, - char *buffer, int64_t insertRows, - int64_t startTime, - uint64_t *pRemainderBufLen); -int32_t generateStbProgressiveData(SSuperTable *stbInfo, char *tableName, - int64_t tableSeq, char *dbName, char *buffer, - int64_t insertRows, uint64_t recordFrom, - int64_t startTime, int64_t *pSamplePos, - int64_t *pRemainderBufLen); -int32_t generateProgressiveDataWithoutStb( - char *tableName, threadInfo *pThreadInfo, char *buffer, int64_t insertRows, - uint64_t recordFrom, int64_t startTime, int64_t *pRemainderBufLen); -int64_t generateStbRowData(SSuperTable *stbInfo, char *recBuf, - int64_t remainderBufLen, int64_t timestamp); -int prepareSampleForStb(SSuperTable *stbInfo); -int prepareSampleForNtb(); -int parseSamplefileToStmtBatch(SSuperTable *stbInfo); -int parseStbSampleToStmtBatchForThread(threadInfo * pThreadInfo, - SSuperTable *stbInfo, uint32_t timePrec, - uint32_t batch); -int parseNtbSampleToStmtBatchForThread(threadInfo *pThreadInfo, - uint32_t timePrec, uint32_t batch); -int prepareSampleData(); -int32_t generateSmlConstPart(char *sml, SSuperTable *stbInfo, - threadInfo *pThreadInfo, int tbSeq); - -int32_t generateSmlMutablePart(char *line, char *sml, SSuperTable *stbInfo, - threadInfo *pThreadInfo, int64_t timestamp); -int32_t generateSmlJsonTags(cJSON *tagsList, SSuperTable *stbInfo, - threadInfo *pThreadInfo, int tbSeq); -int32_t generateSmlJsonCols(cJSON *array, cJSON *tag, SSuperTable *stbInfo, - threadInfo *pThreadInfo, int64_t timestamp); -#endif \ No newline at end of file diff --git a/src/kit/taosdemo/insert-interlace.json b/src/kit/taosdemo/insert-interlace.json deleted file mode 100644 index cf3e1de2f4a76f5cc242399b9a268c95c2dca878..0000000000000000000000000000000000000000 --- a/src/kit/taosdemo/insert-interlace.json +++ /dev/null @@ -1,56 +0,0 @@ -{ - "filetype": "insert", - "cfgdir": "/etc/taos", - "host": "127.0.0.1", - "port": 6030, - "user": "root", - "password": "taosdata", - "thread_count": 4, - "thread_count_create_tbl": 4, - "result_file": "./insert_res.txt", - "confirm_parameter_prompt": "no", - "insert_interval": 1000, - "num_of_records_per_req": 100, - "max_sql_len": 1024000, - "databases": [{ - "dbinfo": { - "name": "db", - "drop": "yes", - "replica": 1, - "days": 10, - "cache": 16, - "blocks": 8, - "precision": "ms", - "keep": 365, - "minRows": 100, - "maxRows": 4096, - "comp":2, - "walLevel":1, - "cachelast":0, - "quorum":1, - "fsync":3000, - "update": 0 - }, - "super_tables": [{ - "name": "stb", - "child_table_exists":"no", - "childtable_count": 100, - "childtable_prefix": "stb_", - "auto_create_table": "no", - "data_source": "rand", - "insert_mode": "taosc", - "insert_rows": 1000, - "interlace_rows": 20, - "max_sql_len": 1024000, - "disorder_ratio": 0, - "disorder_range": 1000, - "timestamp_step": 1, - "start_timestamp": "2020-10-01 00:00:00.000", - "sample_format": "csv", - "sample_file": "./sample.csv", - "tags_file": "", - "columns": [{"type": "INT"}], - "tags": [{"type": "TINYINT", "count":1}] - }] - }] -} diff --git a/src/kit/taosdemo/insert.json b/src/kit/taosdemo/insert.json deleted file mode 100644 index 43c729502cbf9ac11e138d9cbea60e459d3c27e5..0000000000000000000000000000000000000000 --- a/src/kit/taosdemo/insert.json +++ /dev/null @@ -1,56 +0,0 @@ -{ - "filetype": "insert", - "cfgdir": "/etc/taos", - "host": "127.0.0.1", - "port": 6030, - "user": "root", - "password": "taosdata", - "thread_count": 4, - "thread_count_create_tbl": 4, - "result_file": "./insert_res.txt", - "confirm_parameter_prompt": "no", - "insert_interval": 0, - "num_of_records_per_req": 100, - "max_sql_len": 1024000, - "databases": [{ - "dbinfo": { - "name": "db", - "drop": "yes", - "replica": 1, - "days": 10, - "cache": 16, - "blocks": 8, - "precision": "ms", - "keep": 36500, - "minRows": 100, - "maxRows": 4096, - "comp":2, - "walLevel":1, - "cachelast":0, - "quorum":1, - "fsync":3000, - "update": 0 - }, - "super_tables": [{ - "name": "stb", - "child_table_exists":"no", - "childtable_count": 10000, - "childtable_prefix": "stb_", - "auto_create_table": "no", - "data_source": "rand", - "insert_mode": "taosc", - "insert_rows": 100000, - "interlace_rows": 0, - "max_sql_len": 1024000, - "disorder_ratio": 0, - "disorder_range": 1000, - "timestamp_step": 1, - "start_timestamp": "2020-10-01 00:00:00.000", - "sample_format": "csv", - "sample_file": "./sample.csv", - "tags_file": "", - "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":10}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], - "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] - }] - }] -} diff --git a/src/kit/taosdemo/query.json b/src/kit/taosdemo/query.json deleted file mode 100644 index d84f997c329f005e62642ac32856b8face1c8048..0000000000000000000000000000000000000000 --- a/src/kit/taosdemo/query.json +++ /dev/null @@ -1,36 +0,0 @@ -{ - "filetype": "query", - "cfgdir": "/etc/taos", - "host": "127.0.0.1", - "port": 6030, - "user": "root", - "password": "taosdata", - "confirm_parameter_prompt": "yes", - "databases": "dbx", - "query_times": 1, - "specified_table_query": { - "query_interval": 1, - "concurrent": 4, - "sqls": [ - { - "sql": "select last_row(*) from stb where color='red'", - "result": "./query_res0.txt" - }, - { - "sql": "select count(*) from stb_01", - "result": "./query_res1.txt" - } - ] - }, - "super_table_query": { - "stblname": "stb", - "query_interval": 1, - "threads": 4, - "sqls": [ - { - "sql": "select last_row(*) from xxxx", - "result": "./query_res2.txt" - } - ] - } -} diff --git a/src/kit/taosdemo/src/demoCommandOpt.c b/src/kit/taosdemo/src/demoCommandOpt.c deleted file mode 100644 index 3300c9ab889fc9b4664740f09e30de377fb33494..0000000000000000000000000000000000000000 --- a/src/kit/taosdemo/src/demoCommandOpt.c +++ /dev/null @@ -1,1862 +0,0 @@ -/* - * Copyright (c) 2019 TAOS Data, Inc. - * - * This program is free software: you can use, redistribute, and/or modify - * it under the terms of the GNU Affero General Public License, version 3 - * or later ("AGPL"), as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. - * - * You should have received a copy of the GNU Affero General Public License - * along with this program. If not, see . - */ - -#include "demo.h" -#include "demoData.h" - -char *g_aggreFuncDemo[] = {"*", - "count(*)", - "avg(current)", - "sum(current)", - "max(current)", - "min(current)", - "first(current)", - "last(current)"}; -char *g_aggreFunc[] = {"*", "count(*)", "avg(C0)", "sum(C0)", - "max(C0)", "min(C0)", "first(C0)", "last(C0)"}; - -int parse_args(int argc, char *argv[]) { - int32_t code = -1; - for (int i = 1; i < argc; i++) { - if ((0 == strncmp(argv[i], "-f", strlen("-f"))) || - (0 == strncmp(argv[i], "--file", strlen("--file")))) { - g_args.demo_mode = false; - - if (2 == strlen(argv[i])) { - if (i + 1 == argc) { - errorPrintReqArg(argv[0], "f"); - goto end_parse_command; - } - g_args.metaFile = argv[++i]; - } else if (0 == strncmp(argv[i], "-f", strlen("-f"))) { - g_args.metaFile = (char *)(argv[i] + strlen("-f")); - } else if (strlen("--file") == strlen(argv[i])) { - if (i + 1 == argc) { - errorPrintReqArg3(argv[0], "--file"); - goto end_parse_command; - } - g_args.metaFile = argv[++i]; - } else if (0 == strncmp(argv[i], "--file=", strlen("--file="))) { - g_args.metaFile = (char *)(argv[i] + strlen("--file=")); - } else { - errorUnrecognized(argv[0], argv[i]); - goto end_parse_command; - } - } else if ((0 == strncmp(argv[i], "-c", strlen("-c"))) || - (0 == - strncmp(argv[i], "--config-dir", strlen("--config-dir")))) { - if (2 == strlen(argv[i])) { - if (argc == i + 1) { - errorPrintReqArg(argv[0], "c"); - goto end_parse_command; - } - tstrncpy(configDir, argv[++i], TSDB_FILENAME_LEN); - } else if (0 == strncmp(argv[i], "-c", strlen("-c"))) { - tstrncpy(configDir, (char *)(argv[i] + strlen("-c")), - TSDB_FILENAME_LEN); - } else if (strlen("--config-dir") == strlen(argv[i])) { - if (argc == i + 1) { - errorPrintReqArg3(argv[0], "--config-dir"); - goto end_parse_command; - } - tstrncpy(configDir, argv[++i], TSDB_FILENAME_LEN); - } else if (0 == strncmp(argv[i], - "--config-dir=", strlen("--config-dir="))) { - tstrncpy(configDir, (char *)(argv[i] + strlen("--config-dir=")), - TSDB_FILENAME_LEN); - } else { - errorUnrecognized(argv[0], argv[i]); - goto end_parse_command; - } - } else if ((0 == strncmp(argv[i], "-h", strlen("-h"))) || - (0 == strncmp(argv[i], "--host", strlen("--host")))) { - if (2 == strlen(argv[i])) { - if (argc == i + 1) { - errorPrintReqArg(argv[0], "h"); - goto end_parse_command; - } - g_args.host = argv[++i]; - } else if (0 == strncmp(argv[i], "-h", strlen("-h"))) { - g_args.host = (char *)(argv[i] + strlen("-h")); - } else if (strlen("--host") == strlen(argv[i])) { - if (argc == i + 1) { - errorPrintReqArg3(argv[0], "--host"); - goto end_parse_command; - } - g_args.host = argv[++i]; - } else if (0 == strncmp(argv[i], "--host=", strlen("--host="))) { - g_args.host = (char *)(argv[i] + strlen("--host=")); - } else { - errorUnrecognized(argv[0], argv[i]); - goto end_parse_command; - } - } else if (strcmp(argv[i], "-PP") == 0) { - g_args.performance_print = true; - } else if ((0 == strncmp(argv[i], "-P", strlen("-P"))) || - (0 == strncmp(argv[i], "--port", strlen("--port")))) { - uint64_t port; - char strPort[BIGINT_BUFF_LEN]; - - if (2 == strlen(argv[i])) { - if (argc == i + 1) { - errorPrintReqArg(argv[0], "P"); - goto end_parse_command; - } else if (isStringNumber(argv[i + 1])) { - tstrncpy(strPort, argv[++i], BIGINT_BUFF_LEN); - } else { - errorPrintReqArg2(argv[0], "P"); - goto end_parse_command; - } - } else if (0 == strncmp(argv[i], "--port=", strlen("--port="))) { - if (isStringNumber((char *)(argv[i] + strlen("--port=")))) { - tstrncpy(strPort, (char *)(argv[i] + strlen("--port=")), - BIGINT_BUFF_LEN); - } else { - errorPrintReqArg2(argv[0], "--port"); - goto end_parse_command; - } - } else if (0 == strncmp(argv[i], "-P", strlen("-P"))) { - if (isStringNumber((char *)(argv[i] + strlen("-P")))) { - tstrncpy(strPort, (char *)(argv[i] + strlen("-P")), - BIGINT_BUFF_LEN); - } else { - errorPrintReqArg2(argv[0], "--port"); - goto end_parse_command; - } - } else if (strlen("--port") == strlen(argv[i])) { - if (argc == i + 1) { - errorPrintReqArg3(argv[0], "--port"); - goto end_parse_command; - } else if (isStringNumber(argv[i + 1])) { - tstrncpy(strPort, argv[++i], BIGINT_BUFF_LEN); - } else { - errorPrintReqArg2(argv[0], "--port"); - goto end_parse_command; - } - } else { - errorUnrecognized(argv[0], argv[i]); - goto end_parse_command; - } - - port = atoi(strPort); - if (port > 65535) { - errorWrongValue("taosdump", "-P or --port", strPort); - goto end_parse_command; - } - g_args.port = (uint16_t)port; - - } else if ((0 == strncmp(argv[i], "-I", strlen("-I"))) || - (0 == - strncmp(argv[i], "--interface", strlen("--interface")))) { - if (2 == strlen(argv[i])) { - if (argc == i + 1) { - errorPrintReqArg(argv[0], "I"); - goto end_parse_command; - } - if (0 == strcasecmp(argv[i + 1], "taosc")) { - g_args.iface = TAOSC_IFACE; - } else if (0 == strcasecmp(argv[i + 1], "rest")) { - g_args.iface = REST_IFACE; - } else if (0 == strcasecmp(argv[i + 1], "stmt")) { - g_args.iface = STMT_IFACE; - } else if (0 == strcasecmp(argv[i + 1], "sml")) { - g_args.iface = SML_IFACE; - } else { - errorWrongValue(argv[0], "-I", argv[i + 1]); - goto end_parse_command; - } - i++; - } else if (0 == strncmp(argv[i], - "--interface=", strlen("--interface="))) { - if (0 == strcasecmp((char *)(argv[i] + strlen("--interface=")), - "taosc")) { - g_args.iface = TAOSC_IFACE; - } else if (0 == strcasecmp( - (char *)(argv[i] + strlen("--interface=")), - "rest")) { - g_args.iface = REST_IFACE; - } else if (0 == strcasecmp( - (char *)(argv[i] + strlen("--interface=")), - "stmt")) { - g_args.iface = STMT_IFACE; - } else if (0 == strcasecmp( - (char *)(argv[i] + strlen("--interface=")), - "sml")) { - g_args.iface = SML_IFACE; - } else { - errorPrintReqArg3(argv[0], "--interface"); - goto end_parse_command; - } - } else if (0 == strncmp(argv[i], "-I", strlen("-I"))) { - if (0 == - strcasecmp((char *)(argv[i] + strlen("-I")), "taosc")) { - g_args.iface = TAOSC_IFACE; - } else if (0 == strcasecmp((char *)(argv[i] + strlen("-I")), - "rest")) { - g_args.iface = REST_IFACE; - } else if (0 == strcasecmp((char *)(argv[i] + strlen("-I")), - "stmt")) { - g_args.iface = STMT_IFACE; - } else if (0 == strcasecmp((char *)(argv[i] + strlen("-I")), - "sml")) { - g_args.iface = SML_IFACE; - } else { - errorWrongValue(argv[0], "-I", - (char *)(argv[i] + strlen("-I"))); - goto end_parse_command; - } - } else if (strlen("--interface") == strlen(argv[i])) { - if (argc == i + 1) { - errorPrintReqArg3(argv[0], "--interface"); - goto end_parse_command; - } - if (0 == strcasecmp(argv[i + 1], "taosc")) { - g_args.iface = TAOSC_IFACE; - } else if (0 == strcasecmp(argv[i + 1], "rest")) { - g_args.iface = REST_IFACE; - } else if (0 == strcasecmp(argv[i + 1], "stmt")) { - g_args.iface = STMT_IFACE; - } else if (0 == strcasecmp(argv[i + 1], "sml")) { - g_args.iface = SML_IFACE; - } else { - errorWrongValue(argv[0], "--interface", argv[i + 1]); - goto end_parse_command; - } - i++; - } else { - errorUnrecognized(argv[0], argv[i]); - goto end_parse_command; - } - } else if ((0 == strncmp(argv[i], "-u", strlen("-u"))) || - (0 == strncmp(argv[i], "--user", strlen("--user")))) { - if (2 == strlen(argv[i])) { - if (argc == i + 1) { - errorPrintReqArg(argv[0], "u"); - goto end_parse_command; - } - g_args.user = argv[++i]; - } else if (0 == strncmp(argv[i], "-u", strlen("-u"))) { - g_args.user = (char *)(argv[i++] + strlen("-u")); - } else if (0 == strncmp(argv[i], "--user=", strlen("--user="))) { - g_args.user = (char *)(argv[i++] + strlen("--user=")); - } else if (strlen("--user") == strlen(argv[i])) { - if (argc == i + 1) { - errorPrintReqArg3(argv[0], "--user"); - goto end_parse_command; - } - g_args.user = argv[++i]; - } else { - errorUnrecognized(argv[0], argv[i]); - goto end_parse_command; - } - } else if ((0 == strncmp(argv[i], "-p", strlen("-p"))) || - (0 == strcmp(argv[i], "--password"))) { - if ((strlen(argv[i]) == 2) || - (0 == strcmp(argv[i], "--password"))) { - printf("Enter password: "); - taosSetConsoleEcho(false); - if (scanf("%s", g_args.password) > 1) { - fprintf(stderr, "password read error!\n"); - } - taosSetConsoleEcho(true); - } else { - tstrncpy(g_args.password, (char *)(argv[i] + 2), - SHELL_MAX_PASSWORD_LEN); - } - } else if ((0 == strncmp(argv[i], "-o", strlen("-o"))) || - (0 == strncmp(argv[i], "--output", strlen("--output")))) { - if (2 == strlen(argv[i])) { - if (argc == i + 1) { - errorPrintReqArg3(argv[0], "--output"); - goto end_parse_command; - } - g_args.output_file = argv[++i]; - } else if (0 == - strncmp(argv[i], "--output=", strlen("--output="))) { - g_args.output_file = (char *)(argv[i++] + strlen("--output=")); - } else if (0 == strncmp(argv[i], "-o", strlen("-o"))) { - g_args.output_file = (char *)(argv[i++] + strlen("-o")); - } else if (strlen("--output") == strlen(argv[i])) { - if (argc == i + 1) { - errorPrintReqArg3(argv[0], "--output"); - goto end_parse_command; - } - g_args.output_file = argv[++i]; - } else { - errorUnrecognized(argv[0], argv[i]); - goto end_parse_command; - } - } else if ((0 == strncmp(argv[i], "-s", strlen("-s"))) || - (0 == - strncmp(argv[i], "--sql-file", strlen("--sql-file")))) { - if (2 == strlen(argv[i])) { - if (argc == i + 1) { - errorPrintReqArg(argv[0], "s"); - goto end_parse_command; - } - g_args.sqlFile = argv[++i]; - } else if (0 == - strncmp(argv[i], "--sql-file=", strlen("--sql-file="))) { - g_args.sqlFile = (char *)(argv[i++] + strlen("--sql-file=")); - } else if (0 == strncmp(argv[i], "-s", strlen("-s"))) { - g_args.sqlFile = (char *)(argv[i++] + strlen("-s")); - } else if (strlen("--sql-file") == strlen(argv[i])) { - if (argc == i + 1) { - errorPrintReqArg3(argv[0], "--sql-file"); - goto end_parse_command; - } - g_args.sqlFile = argv[++i]; - } else { - errorUnrecognized(argv[0], argv[i]); - goto end_parse_command; - } - } else if ((0 == strncmp(argv[i], "-q", strlen("-q"))) || - (0 == - strncmp(argv[i], "--query-mode", strlen("--query-mode")))) { - if (2 == strlen(argv[i])) { - if (argc == i + 1) { - errorPrintReqArg(argv[0], "q"); - goto end_parse_command; - } else if (!isStringNumber(argv[i + 1])) { - errorPrintReqArg2(argv[0], "q"); - goto end_parse_command; - } - g_args.async_mode = atoi(argv[++i]); - } else if (0 == strncmp(argv[i], - "--query-mode=", strlen("--query-mode="))) { - if (isStringNumber( - (char *)(argv[i] + strlen("--query-mode=")))) { - g_args.async_mode = - atoi((char *)(argv[i] + strlen("--query-mode="))); - } else { - errorPrintReqArg2(argv[0], "--query-mode"); - goto end_parse_command; - } - } else if (0 == strncmp(argv[i], "-q", strlen("-q"))) { - if (isStringNumber((char *)(argv[i] + strlen("-q")))) { - g_args.async_mode = atoi((char *)(argv[i] + strlen("-q"))); - } else { - errorPrintReqArg2(argv[0], "-q"); - goto end_parse_command; - } - } else if (strlen("--query-mode") == strlen(argv[i])) { - if (argc == i + 1) { - errorPrintReqArg3(argv[0], "--query-mode"); - goto end_parse_command; - } else if (!isStringNumber(argv[i + 1])) { - errorPrintReqArg2(argv[0], "--query-mode"); - goto end_parse_command; - } - g_args.async_mode = atoi(argv[++i]); - } else { - errorUnrecognized(argv[0], argv[i]); - goto end_parse_command; - } - } else if ((0 == strncmp(argv[i], "-T", strlen("-T"))) || - (0 == strncmp(argv[i], "--threads", strlen("--threads")))) { - if (2 == strlen(argv[i])) { - if (argc == i + 1) { - errorPrintReqArg(argv[0], "T"); - goto end_parse_command; - } else if (!isStringNumber(argv[i + 1])) { - errorPrintReqArg2(argv[0], "T"); - goto end_parse_command; - } - g_args.nthreads = atoi(argv[++i]); - } else if (0 == - strncmp(argv[i], "--threads=", strlen("--threads="))) { - if (isStringNumber((char *)(argv[i] + strlen("--threads=")))) { - g_args.nthreads = - atoi((char *)(argv[i] + strlen("--threads="))); - } else { - errorPrintReqArg2(argv[0], "--threads"); - goto end_parse_command; - } - } else if (0 == strncmp(argv[i], "-T", strlen("-T"))) { - if (isStringNumber((char *)(argv[i] + strlen("-T")))) { - g_args.nthreads = atoi((char *)(argv[i] + strlen("-T"))); - } else { - errorPrintReqArg2(argv[0], "-T"); - goto end_parse_command; - } - } else if (strlen("--threads") == strlen(argv[i])) { - if (argc == i + 1) { - errorPrintReqArg3(argv[0], "--threads"); - goto end_parse_command; - } else if (!isStringNumber(argv[i + 1])) { - errorPrintReqArg2(argv[0], "--threads"); - goto end_parse_command; - } - g_args.nthreads = atoi(argv[++i]); - } else { - errorUnrecognized(argv[0], argv[i]); - goto end_parse_command; - } - } else if ((0 == strncmp(argv[i], "-i", strlen("-i"))) || - (0 == strncmp(argv[i], "--insert-interval", - strlen("--insert-interval")))) { - if (2 == strlen(argv[i])) { - if (argc == i + 1) { - errorPrintReqArg(argv[0], "i"); - goto end_parse_command; - } else if (!isStringNumber(argv[i + 1])) { - errorPrintReqArg2(argv[0], "i"); - goto end_parse_command; - } - g_args.insert_interval = atoi(argv[++i]); - } else if (0 == strncmp(argv[i], "--insert-interval=", - strlen("--insert-interval="))) { - if (isStringNumber( - (char *)(argv[i] + strlen("--insert-interval=")))) { - g_args.insert_interval = - atoi((char *)(argv[i] + strlen("--insert-interval="))); - } else { - errorPrintReqArg3(argv[0], "--insert-innterval"); - goto end_parse_command; - } - } else if (0 == strncmp(argv[i], "-i", strlen("-i"))) { - if (isStringNumber((char *)(argv[i] + strlen("-i")))) { - g_args.insert_interval = - atoi((char *)(argv[i] + strlen("-i"))); - } else { - errorPrintReqArg3(argv[0], "-i"); - goto end_parse_command; - } - } else if (strlen("--insert-interval") == strlen(argv[i])) { - if (argc == i + 1) { - errorPrintReqArg3(argv[0], "--insert-interval"); - goto end_parse_command; - } else if (!isStringNumber(argv[i + 1])) { - errorPrintReqArg2(argv[0], "--insert-interval"); - goto end_parse_command; - } - g_args.insert_interval = atoi(argv[++i]); - } else { - errorUnrecognized(argv[0], argv[i]); - goto end_parse_command; - } - } else if ((0 == strncmp(argv[i], "-S", strlen("-S"))) || - (0 == - strncmp(argv[i], "--time-step", strlen("--time-step")))) { - if (2 == strlen(argv[i])) { - if (argc == i + 1) { - errorPrintReqArg(argv[0], "S"); - goto end_parse_command; - } else if (!isStringNumber(argv[i + 1])) { - errorPrintReqArg2(argv[0], "S"); - goto end_parse_command; - } - g_args.timestamp_step = atoi(argv[++i]); - } else if (0 == strncmp(argv[i], - "--time-step=", strlen("--time-step="))) { - if (isStringNumber( - (char *)(argv[i] + strlen("--time-step=")))) { - g_args.timestamp_step = - atoi((char *)(argv[i] + strlen("--time-step="))); - } else { - errorPrintReqArg2(argv[0], "--time-step"); - goto end_parse_command; - } - } else if (0 == strncmp(argv[i], "-S", strlen("-S"))) { - if (isStringNumber((char *)(argv[i] + strlen("-S")))) { - g_args.timestamp_step = - atoi((char *)(argv[i] + strlen("-S"))); - } else { - errorPrintReqArg2(argv[0], "-S"); - goto end_parse_command; - } - } else if (strlen("--time-step") == strlen(argv[i])) { - if (argc == i + 1) { - errorPrintReqArg3(argv[0], "--time-step"); - goto end_parse_command; - } else if (!isStringNumber(argv[i + 1])) { - errorPrintReqArg2(argv[0], "--time-step"); - goto end_parse_command; - } - g_args.timestamp_step = atoi(argv[++i]); - } else { - errorUnrecognized(argv[0], argv[i]); - goto end_parse_command; - } - } else if (strcmp(argv[i], "-qt") == 0) { - if ((argc == i + 1) || (!isStringNumber(argv[i + 1]))) { - printHelp(); - errorPrint("%s", "\n\t-qt need a number following!\n"); - goto end_parse_command; - } - g_args.query_times = atoi(argv[++i]); - } else if ((0 == strncmp(argv[i], "-B", strlen("-B"))) || - (0 == strncmp(argv[i], "--interlace-rows", - strlen("--interlace-rows")))) { - if (strlen("-B") == strlen(argv[i])) { - if (argc == i + 1) { - errorPrintReqArg(argv[0], "B"); - goto end_parse_command; - } else if (!isStringNumber(argv[i + 1])) { - errorPrintReqArg2(argv[0], "B"); - goto end_parse_command; - } - g_args.interlaceRows = atoi(argv[++i]); - } else if (0 == strncmp(argv[i], "--interlace-rows=", - strlen("--interlace-rows="))) { - if (isStringNumber( - (char *)(argv[i] + strlen("--interlace-rows=")))) { - g_args.interlaceRows = - atoi((char *)(argv[i] + strlen("--interlace-rows="))); - } else { - errorPrintReqArg2(argv[0], "--interlace-rows"); - goto end_parse_command; - } - } else if (0 == strncmp(argv[i], "-B", strlen("-B"))) { - if (isStringNumber((char *)(argv[i] + strlen("-B")))) { - g_args.interlaceRows = - atoi((char *)(argv[i] + strlen("-B"))); - } else { - errorPrintReqArg2(argv[0], "-B"); - goto end_parse_command; - } - } else if (strlen("--interlace-rows") == strlen(argv[i])) { - if (argc == i + 1) { - errorPrintReqArg3(argv[0], "--interlace-rows"); - goto end_parse_command; - } else if (!isStringNumber(argv[i + 1])) { - errorPrintReqArg2(argv[0], "--interlace-rows"); - goto end_parse_command; - } - g_args.interlaceRows = atoi(argv[++i]); - } else { - errorUnrecognized(argv[0], argv[i]); - goto end_parse_command; - } - } else if ((0 == strncmp(argv[i], "-r", strlen("-r"))) || - (0 == strncmp(argv[i], "--rec-per-req", 13))) { - if (strlen("-r") == strlen(argv[i])) { - if (argc == i + 1) { - errorPrintReqArg(argv[0], "r"); - goto end_parse_command; - } else if (!isStringNumber(argv[i + 1])) { - errorPrintReqArg2(argv[0], "r"); - goto end_parse_command; - } - g_args.reqPerReq = atoi(argv[++i]); - } else if (0 == strncmp(argv[i], "--rec-per-req=", - strlen("--rec-per-req="))) { - if (isStringNumber( - (char *)(argv[i] + strlen("--rec-per-req=")))) { - g_args.reqPerReq = - atoi((char *)(argv[i] + strlen("--rec-per-req="))); - } else { - errorPrintReqArg2(argv[0], "--rec-per-req"); - goto end_parse_command; - } - } else if (0 == strncmp(argv[i], "-r", strlen("-r"))) { - if (isStringNumber((char *)(argv[i] + strlen("-r")))) { - g_args.reqPerReq = atoi((char *)(argv[i] + strlen("-r"))); - } else { - errorPrintReqArg2(argv[0], "-r"); - goto end_parse_command; - } - } else if (strlen("--rec-per-req") == strlen(argv[i])) { - if (argc == i + 1) { - errorPrintReqArg3(argv[0], "--rec-per-req"); - goto end_parse_command; - } else if (!isStringNumber(argv[i + 1])) { - errorPrintReqArg2(argv[0], "--rec-per-req"); - goto end_parse_command; - } - g_args.reqPerReq = atoi(argv[++i]); - } else { - errorUnrecognized(argv[0], argv[i]); - goto end_parse_command; - } - } else if ((0 == strncmp(argv[i], "-t", strlen("-t"))) || - (0 == strncmp(argv[i], "--tables", strlen("--tables")))) { - if (2 == strlen(argv[i])) { - if (argc == i + 1) { - errorPrintReqArg(argv[0], "t"); - goto end_parse_command; - } else if (!isStringNumber(argv[i + 1])) { - errorPrintReqArg2(argv[0], "t"); - goto end_parse_command; - } - g_args.ntables = atoi(argv[++i]); - } else if (0 == - strncmp(argv[i], "--tables=", strlen("--tables="))) { - if (isStringNumber((char *)(argv[i] + strlen("--tables=")))) { - g_args.ntables = - atoi((char *)(argv[i] + strlen("--tables="))); - } else { - errorPrintReqArg2(argv[0], "--tables"); - goto end_parse_command; - } - } else if (0 == strncmp(argv[i], "-t", strlen("-t"))) { - if (isStringNumber((char *)(argv[i] + strlen("-t")))) { - g_args.ntables = atoi((char *)(argv[i] + strlen("-t"))); - } else { - errorPrintReqArg2(argv[0], "-t"); - goto end_parse_command; - } - } else if (strlen("--tables") == strlen(argv[i])) { - if (argc == i + 1) { - errorPrintReqArg3(argv[0], "--tables"); - goto end_parse_command; - } else if (!isStringNumber(argv[i + 1])) { - errorPrintReqArg2(argv[0], "--tables"); - goto end_parse_command; - } - g_args.ntables = atoi(argv[++i]); - } else { - errorUnrecognized(argv[0], argv[i]); - goto end_parse_command; - } - - g_totalChildTables = g_args.ntables; - } else if ((0 == strncmp(argv[i], "-n", strlen("-n"))) || - (0 == strncmp(argv[i], "--records", strlen("--records")))) { - if (2 == strlen(argv[i])) { - if (argc == i + 1) { - errorPrintReqArg(argv[0], "n"); - goto end_parse_command; - } else if (!isStringNumber(argv[i + 1])) { - errorPrintReqArg2(argv[0], "n"); - goto end_parse_command; - } - g_args.insertRows = atoi(argv[++i]); - } else if (0 == - strncmp(argv[i], "--records=", strlen("--records="))) { - if (isStringNumber((char *)(argv[i] + strlen("--records=")))) { - g_args.insertRows = - atoi((char *)(argv[i] + strlen("--records="))); - } else { - errorPrintReqArg2(argv[0], "--records"); - goto end_parse_command; - } - } else if (0 == strncmp(argv[i], "-n", strlen("-n"))) { - if (isStringNumber((char *)(argv[i] + strlen("-n")))) { - g_args.insertRows = atoi((char *)(argv[i] + strlen("-n"))); - } else { - errorPrintReqArg2(argv[0], "-n"); - goto end_parse_command; - } - } else if (strlen("--records") == strlen(argv[i])) { - if (argc == i + 1) { - errorPrintReqArg3(argv[0], "--records"); - goto end_parse_command; - } else if (!isStringNumber(argv[i + 1])) { - errorPrintReqArg2(argv[0], "--records"); - goto end_parse_command; - } - g_args.insertRows = atoi(argv[++i]); - } else { - errorUnrecognized(argv[0], argv[i]); - goto end_parse_command; - } - } else if ((0 == strncmp(argv[i], "-d", strlen("-d"))) || - (0 == - strncmp(argv[i], "--database", strlen("--database")))) { - if (2 == strlen(argv[i])) { - if (argc == i + 1) { - errorPrintReqArg(argv[0], "d"); - goto end_parse_command; - } - g_args.database = argv[++i]; - } else if (0 == - strncmp(argv[i], "--database=", strlen("--database="))) { - g_args.output_file = (char *)(argv[i] + strlen("--database=")); - } else if (0 == strncmp(argv[i], "-d", strlen("-d"))) { - g_args.output_file = (char *)(argv[i] + strlen("-d")); - } else if (strlen("--database") == strlen(argv[i])) { - if (argc == i + 1) { - errorPrintReqArg3(argv[0], "--database"); - goto end_parse_command; - } - g_args.database = argv[++i]; - } else { - errorUnrecognized(argv[0], argv[i]); - goto end_parse_command; - } - } else if ((0 == strncmp(argv[i], "-l", strlen("-l"))) || - (0 == strncmp(argv[i], "--columns", strlen("--columns")))) { - g_args.demo_mode = false; - if (2 == strlen(argv[i])) { - if (argc == i + 1) { - errorPrintReqArg(argv[0], "l"); - goto end_parse_command; - } else if (!isStringNumber(argv[i + 1])) { - errorPrintReqArg2(argv[0], "l"); - goto end_parse_command; - } - g_args.columnCount = atoi(argv[++i]); - } else if (0 == - strncmp(argv[i], "--columns=", strlen("--columns="))) { - if (isStringNumber((char *)(argv[i] + strlen("--columns=")))) { - g_args.columnCount = - atoi((char *)(argv[i] + strlen("--columns="))); - } else { - errorPrintReqArg2(argv[0], "--columns"); - goto end_parse_command; - } - } else if (0 == strncmp(argv[i], "-l", strlen("-l"))) { - if (isStringNumber((char *)(argv[i] + strlen("-l")))) { - g_args.columnCount = atoi((char *)(argv[i] + strlen("-l"))); - } else { - errorPrintReqArg2(argv[0], "-l"); - goto end_parse_command; - } - } else if (strlen("--columns") == strlen(argv[i])) { - if (argc == i + 1) { - errorPrintReqArg3(argv[0], "--columns"); - goto end_parse_command; - } else if (!isStringNumber(argv[i + 1])) { - errorPrintReqArg2(argv[0], "--columns"); - goto end_parse_command; - } - g_args.columnCount = atoi(argv[++i]); - } else { - errorUnrecognized(argv[0], argv[i]); - goto end_parse_command; - } - - if (g_args.columnCount > MAX_NUM_COLUMNS) { - printf("WARNING: max acceptable columns count is %d\n", - MAX_NUM_COLUMNS); - prompt(); - g_args.columnCount = MAX_NUM_COLUMNS; - } - - for (int col = 0; col < g_args.columnCount; - col++) { - if (g_args.data_type[col] == TSDB_DATA_TYPE_NULL) { - g_args.dataType[col] = "INT"; - g_args.data_type[col] = TSDB_DATA_TYPE_INT; - } - } - for (int col = g_args.columnCount; col < MAX_NUM_COLUMNS; col++) { - g_args.dataType[col] = NULL; - g_args.data_type[col] = TSDB_DATA_TYPE_NULL; - } - } else if ((0 == strncmp(argv[i], "-b", strlen("-b"))) || - (0 == - strncmp(argv[i], "--data-type", strlen("--data-type")))) { - g_args.demo_mode = false; - - char *dataType; - if (2 == strlen(argv[i])) { - if (argc == i + 1) { - errorPrintReqArg(argv[0], "b"); - goto end_parse_command; - } - dataType = argv[++i]; - } else if (0 == strncmp(argv[i], - "--data-type=", strlen("--data-type="))) { - dataType = (char *)(argv[i] + strlen("--data-type=")); - } else if (0 == strncmp(argv[i], "-b", strlen("-b"))) { - dataType = (char *)(argv[i] + strlen("-b")); - } else if (strlen("--data-type") == strlen(argv[i])) { - if (argc == i + 1) { - errorPrintReqArg3(argv[0], "--data-type"); - goto end_parse_command; - } - dataType = argv[++i]; - } else { - errorUnrecognized(argv[0], argv[i]); - goto end_parse_command; - } - - if (strstr(dataType, ",") == NULL) { - // only one col - if (strcasecmp(dataType, "INT") && - strcasecmp(dataType, "FLOAT") && - strcasecmp(dataType, "TINYINT") && - strcasecmp(dataType, "BOOL") && - strcasecmp(dataType, "SMALLINT") && - strcasecmp(dataType, "BIGINT") && - strcasecmp(dataType, "DOUBLE") && - strcasecmp(dataType, "TIMESTAMP") && - !regexMatch(dataType, - "^(NCHAR|BINARY)(\\([1-9][0-9]*\\))?$", - REG_ICASE | REG_EXTENDED) && - strcasecmp(dataType, "UTINYINT") && - strcasecmp(dataType, "USMALLINT") && - strcasecmp(dataType, "UINT") && - strcasecmp(dataType, "UBIGINT")) { - printHelp(); - errorPrint("%s", "-b: Invalid data_type!\n"); - goto end_parse_command; - } - if (0 == strcasecmp(dataType, "INT")) { - g_args.data_type[0] = TSDB_DATA_TYPE_INT; - } else if (0 == strcasecmp(dataType, "TINYINT")) { - g_args.data_type[0] = TSDB_DATA_TYPE_TINYINT; - } else if (0 == strcasecmp(dataType, "SMALLINT")) { - g_args.data_type[0] = TSDB_DATA_TYPE_SMALLINT; - } else if (0 == strcasecmp(dataType, "BIGINT")) { - g_args.data_type[0] = TSDB_DATA_TYPE_BIGINT; - } else if (0 == strcasecmp(dataType, "FLOAT")) { - g_args.data_type[0] = TSDB_DATA_TYPE_FLOAT; - } else if (0 == strcasecmp(dataType, "DOUBLE")) { - g_args.data_type[0] = TSDB_DATA_TYPE_DOUBLE; - } else if (1 == regexMatch(dataType, - "^BINARY(\\([1-9][0-9]*\\))?$", - REG_ICASE | REG_EXTENDED)) { - g_args.data_type[0] = TSDB_DATA_TYPE_BINARY; - } else if (1 == regexMatch(dataType, - "^NCHAR(\\([1-9][0-9]*\\))?$", - REG_ICASE | REG_EXTENDED)) { - g_args.data_type[0] = TSDB_DATA_TYPE_NCHAR; - } else if (0 == strcasecmp(dataType, "BOOL")) { - g_args.data_type[0] = TSDB_DATA_TYPE_BOOL; - } else if (0 == strcasecmp(dataType, "TIMESTAMP")) { - g_args.data_type[0] = TSDB_DATA_TYPE_TIMESTAMP; - } else if (0 == strcasecmp(dataType, "UTINYINT")) { - g_args.data_type[0] = TSDB_DATA_TYPE_UTINYINT; - } else if (0 == strcasecmp(dataType, "USMALLINT")) { - g_args.data_type[0] = TSDB_DATA_TYPE_USMALLINT; - } else if (0 == strcasecmp(dataType, "UINT")) { - g_args.data_type[0] = TSDB_DATA_TYPE_UINT; - } else if (0 == strcasecmp(dataType, "UBIGINT")) { - g_args.data_type[0] = TSDB_DATA_TYPE_UBIGINT; - } else { - g_args.data_type[0] = TSDB_DATA_TYPE_NULL; - } - g_args.dataType[0] = dataType; - if (g_args.data_type[1] != TSDB_DATA_TYPE_INT) { - g_args.dataType[1] = NULL; - g_args.data_type[1] = TSDB_DATA_TYPE_NULL; - } - } else { - // more than one col - int index = 0; - g_dupstr = strdup(dataType); - char *running = g_dupstr; - char *token = strsep(&running, ","); - while (token != NULL) { - if (strcasecmp(token, "INT") && - strcasecmp(token, "FLOAT") && - strcasecmp(token, "TINYINT") && - strcasecmp(token, "BOOL") && - strcasecmp(token, "SMALLINT") && - strcasecmp(token, "BIGINT") && - strcasecmp(token, "DOUBLE") && - strcasecmp(token, "TIMESTAMP") && - !regexMatch(token, - "^(NCHAR|BINARY)(\\([1-9][0-9]*\\))?$", - REG_ICASE | REG_EXTENDED) && - strcasecmp(token, "UTINYINT") && - strcasecmp(token, "USMALLINT") && - strcasecmp(token, "UINT") && - strcasecmp(token, "UBIGINT")) { - printHelp(); - errorPrint("%s", "-b: Invalid data_type!\n"); - goto end_parse_command; - } - - if (0 == strcasecmp(token, "INT")) { - g_args.data_type[index] = TSDB_DATA_TYPE_INT; - } else if (0 == strcasecmp(token, "FLOAT")) { - g_args.data_type[index] = TSDB_DATA_TYPE_FLOAT; - } else if (0 == strcasecmp(token, "SMALLINT")) { - g_args.data_type[index] = TSDB_DATA_TYPE_SMALLINT; - } else if (0 == strcasecmp(token, "BIGINT")) { - g_args.data_type[index] = TSDB_DATA_TYPE_BIGINT; - } else if (0 == strcasecmp(token, "DOUBLE")) { - g_args.data_type[index] = TSDB_DATA_TYPE_DOUBLE; - } else if (0 == strcasecmp(token, "TINYINT")) { - g_args.data_type[index] = TSDB_DATA_TYPE_TINYINT; - } else if (1 == regexMatch(token, - "^BINARY(\\([1-9][0-9]*\\))?$", - REG_ICASE | REG_EXTENDED)) { - g_args.data_type[index] = TSDB_DATA_TYPE_BINARY; - } else if (1 == regexMatch(token, - "^NCHAR(\\([1-9][0-9]*\\))?$", - REG_ICASE | REG_EXTENDED)) { - g_args.data_type[index] = TSDB_DATA_TYPE_NCHAR; - } else if (0 == strcasecmp(token, "BOOL")) { - g_args.data_type[index] = TSDB_DATA_TYPE_BOOL; - } else if (0 == strcasecmp(token, "TIMESTAMP")) { - g_args.data_type[index] = TSDB_DATA_TYPE_TIMESTAMP; - } else if (0 == strcasecmp(token, "UTINYINT")) { - g_args.data_type[index] = TSDB_DATA_TYPE_UTINYINT; - } else if (0 == strcasecmp(token, "USMALLINT")) { - g_args.data_type[index] = TSDB_DATA_TYPE_USMALLINT; - } else if (0 == strcasecmp(token, "UINT")) { - g_args.data_type[index] = TSDB_DATA_TYPE_UINT; - } else if (0 == strcasecmp(token, "UBIGINT")) { - g_args.data_type[index] = TSDB_DATA_TYPE_UBIGINT; - } else { - g_args.data_type[index] = TSDB_DATA_TYPE_NULL; - } - g_args.dataType[index] = token; - index++; - token = strsep(&running, ","); - if (index >= MAX_NUM_COLUMNS) break; - } - if (g_args.data_type[index] != TSDB_DATA_TYPE_INT) { - g_args.dataType[index] = NULL; - g_args.data_type[index] = TSDB_DATA_TYPE_NULL; - } - } - } else if ((0 == strncmp(argv[i], "-w", strlen("-w"))) || - (0 == - strncmp(argv[i], "--binwidth", strlen("--binwidth")))) { - if (2 == strlen(argv[i])) { - if (argc == i + 1) { - errorPrintReqArg(argv[0], "w"); - goto end_parse_command; - } else if (!isStringNumber(argv[i + 1])) { - errorPrintReqArg2(argv[0], "w"); - goto end_parse_command; - } - g_args.binwidth = atoi(argv[++i]); - } else if (0 == - strncmp(argv[i], "--binwidth=", strlen("--binwidth="))) { - if (isStringNumber((char *)(argv[i] + strlen("--binwidth=")))) { - g_args.binwidth = - atoi((char *)(argv[i] + strlen("--binwidth="))); - } else { - errorPrintReqArg2(argv[0], "--binwidth"); - goto end_parse_command; - } - } else if (0 == strncmp(argv[i], "-w", strlen("-w"))) { - if (isStringNumber((char *)(argv[i] + strlen("-w")))) { - g_args.binwidth = atoi((char *)(argv[i] + strlen("-w"))); - } else { - errorPrintReqArg2(argv[0], "-w"); - goto end_parse_command; - } - } else if (strlen("--binwidth") == strlen(argv[i])) { - if (argc == i + 1) { - errorPrintReqArg3(argv[0], "--binwidth"); - goto end_parse_command; - } else if (!isStringNumber(argv[i + 1])) { - errorPrintReqArg2(argv[0], "--binwidth"); - goto end_parse_command; - } - g_args.binwidth = atoi(argv[++i]); - } else { - errorUnrecognized(argv[0], argv[i]); - goto end_parse_command; - } - } else if ((0 == strncmp(argv[i], "-m", strlen("-m"))) || - (0 == strncmp(argv[i], "--table-prefix", - strlen("--table-prefix")))) { - if (2 == strlen(argv[i])) { - if (argc == i + 1) { - errorPrintReqArg(argv[0], "m"); - goto end_parse_command; - } - g_args.tb_prefix = argv[++i]; - } else if (0 == strncmp(argv[i], "--table-prefix=", - strlen("--table-prefix="))) { - g_args.tb_prefix = - (char *)(argv[i] + strlen("--table-prefix=")); - } else if (0 == strncmp(argv[i], "-m", strlen("-m"))) { - g_args.tb_prefix = (char *)(argv[i] + strlen("-m")); - } else if (strlen("--table-prefix") == strlen(argv[i])) { - if (argc == i + 1) { - errorPrintReqArg3(argv[0], "--table-prefix"); - goto end_parse_command; - } - g_args.tb_prefix = argv[++i]; - } else { - errorUnrecognized(argv[0], argv[i]); - goto end_parse_command; - } - } else if ((0 == strncmp(argv[i], "-E", strlen("-E"))) || - (0 == strncmp(argv[i], "--escape-character", - strlen("--escape-character")))) { - g_args.escapeChar = true; - } else if ((0 == strncmp(argv[i], "-C", strlen("-C"))) || - (0 == strncmp(argv[i], "--chinese", - strlen("--chinese")))) { - g_args.chinese = true; - } else if ((strcmp(argv[i], "-N") == 0) || - (0 == strcmp(argv[i], "--normal-table"))) { - g_args.demo_mode = false; - g_args.use_metric = false; - } else if ((strcmp(argv[i], "-M") == 0) || - (0 == strcmp(argv[i], "--random"))) { - g_args.demo_mode = false; - } else if ((strcmp(argv[i], "-x") == 0) || - (0 == strcmp(argv[i], "--aggr-func"))) { - g_args.aggr_func = true; - } else if ((strcmp(argv[i], "-y") == 0) || - (0 == strcmp(argv[i], "--answer-yes"))) { - g_args.answer_yes = true; - } else if ((strcmp(argv[i], "-g") == 0) || - (0 == strcmp(argv[i], "--debug"))) { - g_args.debug_print = true; - } else if (strcmp(argv[i], "-gg") == 0) { - g_args.verbose_print = true; - } else if ((0 == strncmp(argv[i], "-R", strlen("-R"))) || - (0 == strncmp(argv[i], "--disorder-range", - strlen("--disorder-range")))) { - if (strlen("-R") == strlen(argv[i])) { - if (argc == i + 1) { - errorPrintReqArg(argv[0], "R"); - goto end_parse_command; - } else if (!isStringNumber(argv[i + 1])) { - errorPrintReqArg2(argv[0], "R"); - goto end_parse_command; - } - g_args.disorderRange = atoi(argv[++i]); - } else if (0 == strncmp(argv[i], "--disorder-range=", - strlen("--disorder-range="))) { - if (isStringNumber( - (char *)(argv[i] + strlen("--disorder-range=")))) { - g_args.disorderRange = - atoi((char *)(argv[i] + strlen("--disorder-range="))); - } else { - errorPrintReqArg2(argv[0], "--disorder-range"); - goto end_parse_command; - } - } else if (0 == strncmp(argv[i], "-R", strlen("-R"))) { - if (isStringNumber((char *)(argv[i] + strlen("-R")))) { - g_args.disorderRange = - atoi((char *)(argv[i] + strlen("-R"))); - } else { - errorPrintReqArg2(argv[0], "-R"); - goto end_parse_command; - } - - if (g_args.disorderRange < 0) { - errorPrint("Invalid disorder range %d, will be set to %d\n", - g_args.disorderRange, 1000); - g_args.disorderRange = 1000; - } - } else if (strlen("--disorder-range") == strlen(argv[i])) { - if (argc == i + 1) { - errorPrintReqArg3(argv[0], "--disorder-range"); - goto end_parse_command; - } else if (!isStringNumber(argv[i + 1])) { - errorPrintReqArg2(argv[0], "--disorder-range"); - goto end_parse_command; - } - g_args.disorderRange = atoi(argv[++i]); - } else { - errorUnrecognized(argv[0], argv[i]); - goto end_parse_command; - } - } else if ((0 == strncmp(argv[i], "-O", strlen("-O"))) || - (0 == - strncmp(argv[i], "--disorder", strlen("--disorder")))) { - if (2 == strlen(argv[i])) { - if (argc == i + 1) { - errorPrintReqArg(argv[0], "O"); - goto end_parse_command; - } else if (!isStringNumber(argv[i + 1])) { - errorPrintReqArg2(argv[0], "O"); - goto end_parse_command; - } - g_args.disorderRatio = atoi(argv[++i]); - } else if (0 == - strncmp(argv[i], "--disorder=", strlen("--disorder="))) { - if (isStringNumber((char *)(argv[i] + strlen("--disorder=")))) { - g_args.disorderRatio = - atoi((char *)(argv[i] + strlen("--disorder="))); - } else { - errorPrintReqArg2(argv[0], "--disorder"); - goto end_parse_command; - } - } else if (0 == strncmp(argv[i], "-O", strlen("-O"))) { - if (isStringNumber((char *)(argv[i] + strlen("-O")))) { - g_args.disorderRatio = - atoi((char *)(argv[i] + strlen("-O"))); - } else { - errorPrintReqArg2(argv[0], "-O"); - goto end_parse_command; - } - } else if (strlen("--disorder") == strlen(argv[i])) { - if (argc == i + 1) { - errorPrintReqArg3(argv[0], "--disorder"); - goto end_parse_command; - } else if (!isStringNumber(argv[i + 1])) { - errorPrintReqArg2(argv[0], "--disorder"); - goto end_parse_command; - } - g_args.disorderRatio = atoi(argv[++i]); - } else { - errorUnrecognized(argv[0], argv[i]); - goto end_parse_command; - } - - if (g_args.disorderRatio > 50) { - errorPrint("Invalid disorder ratio %d, will be set to %d\n", - g_args.disorderRatio, 50); - g_args.disorderRatio = 50; - } - } else if ((0 == strncmp(argv[i], "-a", strlen("-a"))) || - (0 == strncmp(argv[i], "--replica", strlen("--replica")))) { - if (2 == strlen(argv[i])) { - if (argc == i + 1) { - errorPrintReqArg(argv[0], "a"); - goto end_parse_command; - } else if (!isStringNumber(argv[i + 1])) { - errorPrintReqArg2(argv[0], "a"); - goto end_parse_command; - } - g_args.replica = atoi(argv[++i]); - } else if (0 == - strncmp(argv[i], "--replica=", strlen("--replica="))) { - if (isStringNumber((char *)(argv[i] + strlen("--replica=")))) { - g_args.replica = - atoi((char *)(argv[i] + strlen("--replica="))); - } else { - errorPrintReqArg2(argv[0], "--replica"); - goto end_parse_command; - } - } else if (0 == strncmp(argv[i], "-a", strlen("-a"))) { - if (isStringNumber((char *)(argv[i] + strlen("-a")))) { - g_args.replica = atoi((char *)(argv[i] + strlen("-a"))); - } else { - errorPrintReqArg2(argv[0], "-a"); - goto end_parse_command; - } - } else if (strlen("--replica") == strlen(argv[i])) { - if (argc == i + 1) { - errorPrintReqArg3(argv[0], "--replica"); - goto end_parse_command; - } else if (!isStringNumber(argv[i + 1])) { - errorPrintReqArg2(argv[0], "--replica"); - goto end_parse_command; - } - g_args.replica = atoi(argv[++i]); - } else { - errorUnrecognized(argv[0], argv[i]); - goto end_parse_command; - } - - if (g_args.replica > 3 || g_args.replica < 1) { - errorPrint("Invalid replica value %d, will be set to %d\n", - g_args.replica, 1); - g_args.replica = 1; - } - } else if (strcmp(argv[i], "-D") == 0) { - g_args.method_of_delete = atoi(argv[++i]); - if (g_args.method_of_delete > 3) { - errorPrint("%s", - "\n\t-D need a value (0~3) number following!\n"); - goto end_parse_command; - } - } else if ((strcmp(argv[i], "--version") == 0) || - (strcmp(argv[i], "-V") == 0)) { - printVersion(); - } else if ((strcmp(argv[i], "--help") == 0) || - (strcmp(argv[i], "-?") == 0)) { - printHelp(); - } else if (strcmp(argv[i], "--usage") == 0) { - printf( - " Usage: taosdemo [-f JSONFILE] [-u USER] [-p PASSWORD] [-c CONFIG_DIR]\n\ - [-h HOST] [-P PORT] [-I INTERFACE] [-d DATABASE] [-a REPLICA]\n\ - [-m TABLEPREFIX] [-s SQLFILE] [-N] [-o OUTPUTFILE] [-q QUERYMODE]\n\ - [-b DATATYPES] [-w WIDTH_OF_BINARY] [-l COLUMNS] [-T THREADNUMBER]\n\ - [-i SLEEPTIME] [-S TIME_STEP] [-B INTERLACE_ROWS] [-t TABLES]\n\ - [-n RECORDS] [-M] [-x] [-y] [-O ORDERMODE] [-R RANGE] [-a REPLIcA][-g]\n\ - [--help] [--usage] [--version]\n"); - exit(EXIT_SUCCESS); - } else { - // to simulate argp_option output - if (strlen(argv[i]) > 2) { - if (0 == strncmp(argv[i], "--", 2)) { - fprintf(stderr, "%s: unrecognized options '%s'\n", argv[0], - argv[i]); - } else if (0 == strncmp(argv[i], "-", 1)) { - char tmp[2] = {0}; - tstrncpy(tmp, argv[i] + 1, 2); - fprintf(stderr, "%s: invalid options -- '%s'\n", argv[0], - tmp); - } else { - fprintf(stderr, "%s: Too many arguments\n", argv[0]); - } - } else { - fprintf(stderr, "%s invalid options -- '%s'\n", argv[0], - (char *)((char *)argv[i]) + 1); - } - fprintf(stderr, - "Try `taosdemo --help' or `taosdemo --usage' for more " - "information.\n"); - goto end_parse_command; - } - } - - int columnCount; - for (columnCount = 0; columnCount < MAX_NUM_COLUMNS; columnCount++) { - if (g_args.dataType[columnCount] == NULL) { - break; - } - } - - if (0 == columnCount) { - errorPrint("%s", "data type error!\n"); - goto end_parse_command; - } - g_args.columnCount = columnCount; - - g_args.lenOfOneRow = TIMESTAMP_BUFF_LEN; // timestamp - for (int c = 0; c < g_args.columnCount; c++) { - switch (g_args.data_type[c]) { - case TSDB_DATA_TYPE_BINARY: - g_args.lenOfOneRow += g_args.binwidth + 3; - break; - - case TSDB_DATA_TYPE_NCHAR: - g_args.lenOfOneRow += g_args.binwidth + 3; - break; - - case TSDB_DATA_TYPE_INT: - case TSDB_DATA_TYPE_UINT: - g_args.lenOfOneRow += INT_BUFF_LEN; - break; - - case TSDB_DATA_TYPE_BIGINT: - case TSDB_DATA_TYPE_UBIGINT: - g_args.lenOfOneRow += BIGINT_BUFF_LEN; - break; - - case TSDB_DATA_TYPE_SMALLINT: - case TSDB_DATA_TYPE_USMALLINT: - g_args.lenOfOneRow += SMALLINT_BUFF_LEN; - break; - - case TSDB_DATA_TYPE_TINYINT: - case TSDB_DATA_TYPE_UTINYINT: - g_args.lenOfOneRow += TINYINT_BUFF_LEN; - break; - - case TSDB_DATA_TYPE_BOOL: - g_args.lenOfOneRow += BOOL_BUFF_LEN; - break; - - case TSDB_DATA_TYPE_FLOAT: - g_args.lenOfOneRow += FLOAT_BUFF_LEN; - break; - - case TSDB_DATA_TYPE_DOUBLE: - g_args.lenOfOneRow += DOUBLE_BUFF_LEN; - break; - - case TSDB_DATA_TYPE_TIMESTAMP: - g_args.lenOfOneRow += TIMESTAMP_BUFF_LEN; - break; - - default: - errorPrint("get error data type : %s\n", g_args.dataType[c]); - goto end_parse_command; - } - } - - if (((g_args.debug_print) && (NULL != g_args.metaFile)) || - g_args.verbose_print) { - printf( - "##################################################################" - "#\n"); - printf("# meta file: %s\n", g_args.metaFile); - printf("# Server IP: %s:%hu\n", - g_args.host == NULL ? "localhost" : g_args.host, g_args.port); - printf("# User: %s\n", g_args.user); - printf("# Password: %s\n", g_args.password); - printf("# Use metric: %s\n", - g_args.use_metric ? "true" : "false"); - if (*(g_args.dataType)) { - printf("# Specified data type: "); - for (int c = 0; c < MAX_NUM_COLUMNS; c++) - if (g_args.dataType[c]) - printf("%s,", g_args.dataType[c]); - else - break; - printf("\n"); - } - printf("# Insertion interval: %" PRIu64 "\n", - g_args.insert_interval); - printf("# Number of records per req: %u\n", g_args.reqPerReq); - printf("# Max SQL length: %" PRIu64 "\n", - g_args.max_sql_len); - printf("# Length of Binary: %d\n", g_args.binwidth); - printf("# Number of Threads: %d\n", g_args.nthreads); - printf("# Number of Tables: %" PRId64 "\n", - g_args.ntables); - printf("# Number of Data per Table: %" PRId64 "\n", - g_args.insertRows); - printf("# Database name: %s\n", g_args.database); - printf("# Table prefix: %s\n", g_args.tb_prefix); - if (g_args.disorderRatio) { - printf("# Data order: %d\n", - g_args.disorderRatio); - printf("# Data out of order rate: %d\n", - g_args.disorderRange); - } - printf("# Delete method: %d\n", - g_args.method_of_delete); - printf("# Answer yes when prompt: %d\n", g_args.answer_yes); - printf("# Print debug info: %d\n", g_args.debug_print); - printf("# Print verbose info: %d\n", - g_args.verbose_print); - printf( - "##################################################################" - "#\n"); - - prompt(); - } - code = 0; -end_parse_command: - return code; -} -void setParaFromArg() { - char type[20]; - char length[20]; - if (g_args.host) { - tstrncpy(g_Dbs.host, g_args.host, MAX_HOSTNAME_SIZE); - } else { - tstrncpy(g_Dbs.host, "127.0.0.1", MAX_HOSTNAME_SIZE); - } - - if (g_args.user) { - tstrncpy(g_Dbs.user, g_args.user, MAX_USERNAME_SIZE); - } - - tstrncpy(g_Dbs.password, g_args.password, SHELL_MAX_PASSWORD_LEN); - - if (g_args.port) { - g_Dbs.port = g_args.port; - } - - g_Dbs.threadCount = g_args.nthreads; - g_Dbs.threadCountForCreateTbl = g_args.nthreads; - - g_Dbs.dbCount = 1; - g_Dbs.db[0].drop = true; - - tstrncpy(g_Dbs.db[0].dbName, g_args.database, TSDB_DB_NAME_LEN); - g_Dbs.db[0].dbCfg.replica = g_args.replica; - tstrncpy(g_Dbs.db[0].dbCfg.precision, "ms", SMALL_BUFF_LEN); - - tstrncpy(g_Dbs.resultFile, g_args.output_file, MAX_FILE_NAME_LEN); - - g_Dbs.use_metric = g_args.use_metric; - g_args.prepared_rand = min(g_args.insertRows, MAX_PREPARED_RAND); - g_Dbs.aggr_func = g_args.aggr_func; - - char dataString[TSDB_MAX_BYTES_PER_ROW]; - char * data_type = g_args.data_type; - char ** dataType = g_args.dataType; - int32_t *data_length = g_args.data_length; - - memset(dataString, 0, TSDB_MAX_BYTES_PER_ROW); - - if ((data_type[0] == TSDB_DATA_TYPE_BINARY) || - (data_type[0] == TSDB_DATA_TYPE_BOOL) || - (data_type[0] == TSDB_DATA_TYPE_NCHAR)) { - g_Dbs.aggr_func = false; - } - - if (g_args.use_metric) { - g_Dbs.db[0].superTblCount = 1; - tstrncpy(g_Dbs.db[0].superTbls[0].stbName, "meters", - TSDB_TABLE_NAME_LEN); - g_Dbs.db[0].superTbls[0].childTblCount = g_args.ntables; - g_Dbs.db[0].superTbls[0].escapeChar = g_args.escapeChar; - g_Dbs.threadCount = g_args.nthreads; - g_Dbs.threadCountForCreateTbl = g_args.nthreads; - g_Dbs.asyncMode = g_args.async_mode; - - g_Dbs.db[0].superTbls[0].autoCreateTable = PRE_CREATE_SUBTBL; - g_Dbs.db[0].superTbls[0].childTblExists = TBL_NO_EXISTS; - g_Dbs.db[0].superTbls[0].disorderRange = g_args.disorderRange; - g_Dbs.db[0].superTbls[0].disorderRatio = g_args.disorderRatio; - tstrncpy(g_Dbs.db[0].superTbls[0].childTblPrefix, g_args.tb_prefix, - TBNAME_PREFIX_LEN); - tstrncpy(g_Dbs.db[0].superTbls[0].dataSource, "rand", SMALL_BUFF_LEN); - - if (g_args.iface == INTERFACE_BUT) { - g_Dbs.db[0].superTbls[0].iface = TAOSC_IFACE; - } else { - g_Dbs.db[0].superTbls[0].iface = g_args.iface; - } - g_Dbs.db[0].superTbls[0].lineProtocol = TSDB_SML_LINE_PROTOCOL; - g_Dbs.db[0].superTbls[0].tsPrecision = TSDB_SML_TIMESTAMP_MILLI_SECONDS; - tstrncpy(g_Dbs.db[0].superTbls[0].startTimestamp, - "2017-07-14 10:40:00.000", MAX_TB_NAME_SIZE); - g_Dbs.db[0].superTbls[0].timeStampStep = g_args.timestamp_step; - - g_Dbs.db[0].superTbls[0].insertRows = g_args.insertRows; - g_Dbs.db[0].superTbls[0].maxSqlLen = g_args.max_sql_len; - - g_Dbs.db[0].superTbls[0].columnCount = 0; - for (int i = 0; i < MAX_NUM_COLUMNS; i++) { - if (data_type[i] == TSDB_DATA_TYPE_NULL) { - break; - } - - g_Dbs.db[0].superTbls[0].columns[i].data_type = data_type[i]; - tstrncpy(g_Dbs.db[0].superTbls[0].columns[i].dataType, dataType[i], - min(DATATYPE_BUFF_LEN, strlen(dataType[i]) + 1)); - if (1 == regexMatch(dataType[i], - "^(NCHAR|BINARY)(\\([1-9][0-9]*\\))$", - REG_ICASE | REG_EXTENDED)) { - sscanf(dataType[i], "%[^(](%[^)]", type, length); - g_Dbs.db[0].superTbls[0].columns[i].dataLen = atoi(length); - tstrncpy(g_Dbs.db[0].superTbls[0].columns[i].dataType, type, - min(DATATYPE_BUFF_LEN, strlen(type) + 1)); - } else { - switch (g_Dbs.db[0].superTbls[0].columns[i].data_type) { - case TSDB_DATA_TYPE_BOOL: - case TSDB_DATA_TYPE_UTINYINT: - case TSDB_DATA_TYPE_TINYINT: - g_Dbs.db[0].superTbls[0].columns[i].dataLen = - sizeof(char); - break; - case TSDB_DATA_TYPE_SMALLINT: - case TSDB_DATA_TYPE_USMALLINT: - g_Dbs.db[0].superTbls[0].columns[i].dataLen = - sizeof(int16_t); - break; - case TSDB_DATA_TYPE_INT: - case TSDB_DATA_TYPE_UINT: - g_Dbs.db[0].superTbls[0].columns[i].dataLen = - sizeof(int32_t); - break; - case TSDB_DATA_TYPE_TIMESTAMP: - case TSDB_DATA_TYPE_BIGINT: - case TSDB_DATA_TYPE_UBIGINT: - g_Dbs.db[0].superTbls[0].columns[i].dataLen = - sizeof(int64_t); - break; - case TSDB_DATA_TYPE_FLOAT: - g_Dbs.db[0].superTbls[0].columns[i].dataLen = - sizeof(float); - break; - case TSDB_DATA_TYPE_DOUBLE: - g_Dbs.db[0].superTbls[0].columns[i].dataLen = - sizeof(double); - break; - default: - g_Dbs.db[0].superTbls[0].columns[i].dataLen = - g_args.binwidth; - break; - } - tstrncpy(g_Dbs.db[0].superTbls[0].columns[i].dataType, - dataType[i], - min(DATATYPE_BUFF_LEN, strlen(dataType[i]) + 1)); - } - g_Dbs.db[0].superTbls[0].columnCount++; - } - - if (g_Dbs.db[0].superTbls[0].columnCount > g_args.columnCount) { - g_Dbs.db[0].superTbls[0].columnCount = g_args.columnCount; - } else { - for (int i = g_Dbs.db[0].superTbls[0].columnCount; - i < g_args.columnCount; i++) { - g_Dbs.db[0].superTbls[0].columns[i].data_type = - TSDB_DATA_TYPE_INT; - tstrncpy(g_Dbs.db[0].superTbls[0].columns[i].dataType, "INT", - min(DATATYPE_BUFF_LEN, strlen("INT") + 1)); - g_Dbs.db[0].superTbls[0].columns[i].dataLen = sizeof(int32_t); - g_Dbs.db[0].superTbls[0].columnCount++; - } - } - - tstrncpy(g_Dbs.db[0].superTbls[0].tags[0].dataType, "INT", - min(DATATYPE_BUFF_LEN, strlen("INT") + 1)); - g_Dbs.db[0].superTbls[0].tags[0].data_type = TSDB_DATA_TYPE_INT; - g_Dbs.db[0].superTbls[0].tags[0].dataLen = sizeof(int32_t); - - tstrncpy(g_Dbs.db[0].superTbls[0].tags[1].dataType, "BINARY", - min(DATATYPE_BUFF_LEN, strlen("BINARY") + 1)); - g_Dbs.db[0].superTbls[0].tags[1].data_type = TSDB_DATA_TYPE_BINARY; - g_Dbs.db[0].superTbls[0].tags[1].dataLen = g_args.binwidth; - g_Dbs.db[0].superTbls[0].tagCount = 2; - } else { - g_Dbs.threadCountForCreateTbl = g_args.nthreads; - g_Dbs.db[0].superTbls[0].tagCount = 0; - for (int i = 0; i < MAX_NUM_COLUMNS; i++) { - if (data_type[i] == TSDB_DATA_TYPE_NULL) { - break; - } - if (1 == regexMatch(dataType[i], - "^(NCHAR|BINARY)(\\([1-9][0-9]*\\))$", - REG_ICASE | REG_EXTENDED)) { - sscanf(dataType[i], "%[^(](%[^)]", type, length); - data_length[i] = atoi(length); - } else { - switch (data_type[i]) { - case TSDB_DATA_TYPE_BOOL: - case TSDB_DATA_TYPE_UTINYINT: - case TSDB_DATA_TYPE_TINYINT: - data_length[i] = sizeof(char); - break; - case TSDB_DATA_TYPE_SMALLINT: - case TSDB_DATA_TYPE_USMALLINT: - data_length[i] = sizeof(int16_t); - break; - case TSDB_DATA_TYPE_INT: - case TSDB_DATA_TYPE_UINT: - data_length[i] = sizeof(int32_t); - break; - case TSDB_DATA_TYPE_TIMESTAMP: - case TSDB_DATA_TYPE_BIGINT: - case TSDB_DATA_TYPE_UBIGINT: - data_length[i] = sizeof(int64_t); - break; - case TSDB_DATA_TYPE_FLOAT: - data_length[i] = sizeof(float); - break; - case TSDB_DATA_TYPE_DOUBLE: - data_length[i] = sizeof(double); - break; - default: - data_length[i] = g_args.binwidth; - break; - } - } - } - } -} - -void querySqlFile(TAOS *taos, char *sqlFile) { - FILE *fp = fopen(sqlFile, "r"); - if (fp == NULL) { - printf("failed to open file %s, reason:%s\n", sqlFile, strerror(errno)); - return; - } - - int read_len = 0; - char *cmd = calloc(1, TSDB_MAX_BYTES_PER_ROW); - if (cmd == NULL) { - errorPrint("%s", "failde to allocate memory\n"); - return; - } - size_t cmd_len = 0; - char * line = NULL; - size_t line_len = 0; - - double t = (double)taosGetTimestampMs(); - - while ((read_len = tgetline(&line, &line_len, fp)) != -1) { - if (read_len >= TSDB_MAX_BYTES_PER_ROW) continue; - line[--read_len] = '\0'; - - if (read_len == 0 || isCommentLine(line)) { // line starts with # - continue; - } - - if (line[read_len - 1] == '\\') { - line[read_len - 1] = ' '; - memcpy(cmd + cmd_len, line, read_len); - cmd_len += read_len; - continue; - } - - memcpy(cmd + cmd_len, line, read_len); - if (0 != queryDbExec(taos, cmd, NO_INSERT_TYPE, false)) { - errorPrint("queryDbExec %s failed!\n", cmd); - tmfree(cmd); - tmfree(line); - tmfclose(fp); - return; - } - memset(cmd, 0, TSDB_MAX_BYTES_PER_ROW); - cmd_len = 0; - } - - t = taosGetTimestampMs() - t; - printf("run %s took %.6f second(s)\n\n", sqlFile, t / 1000000); - - tmfree(cmd); - tmfree(line); - tmfclose(fp); - return; -} - -void *queryStableAggrFunc(void *sarg) { - threadInfo *pThreadInfo = (threadInfo *)sarg; - TAOS * taos = pThreadInfo->taos; - setThreadName("queryStableAggrFunc"); - char *command = calloc(1, BUFFER_SIZE); - if (NULL == command) { - errorPrint("%s", "failed to allocate memory\n"); - return NULL; - } - - FILE *fp = fopen(pThreadInfo->filePath, "a"); - if (NULL == fp) { - errorPrint("fopen %s fail, reason:%s.\n", pThreadInfo->filePath, - strerror(errno)); - free(command); - return NULL; - } - - int64_t insertRows = pThreadInfo->stbInfo->insertRows; - int64_t ntables = - pThreadInfo->ntables; // pThreadInfo->end_table_to - - // pThreadInfo->start_table_from + 1; - int64_t totalData = insertRows * ntables; - bool aggr_func = g_Dbs.aggr_func; - - char **aggreFunc; - int n; - - if (g_args.demo_mode) { - aggreFunc = g_aggreFuncDemo; - n = aggr_func ? (sizeof(g_aggreFuncDemo) / sizeof(g_aggreFuncDemo[0])) - : 2; - } else { - aggreFunc = g_aggreFunc; - n = aggr_func ? (sizeof(g_aggreFunc) / sizeof(g_aggreFunc[0])) : 2; - } - - if (!aggr_func) { - printf( - "\nThe first field is either Binary or Bool. Aggregation functions " - "are not supported.\n"); - } - - printf("%" PRId64 " records:\n", totalData); - fprintf(fp, "Querying On %" PRId64 " records:\n", totalData); - - for (int j = 0; j < n; j++) { - char condition[COND_BUF_LEN] = "\0"; - char tempS[64] = "\0"; - - int64_t m = 10 < ntables ? 10 : ntables; - - for (int64_t i = 1; i <= m; i++) { - if (i == 1) { - if (g_args.demo_mode) { - sprintf(tempS, "groupid = %" PRId64 "", i); - } else { - sprintf(tempS, "t0 = %" PRId64 "", i); - } - } else { - if (g_args.demo_mode) { - sprintf(tempS, " or groupid = %" PRId64 " ", i); - } else { - sprintf(tempS, " or t0 = %" PRId64 " ", i); - } - } - strncat(condition, tempS, COND_BUF_LEN - 1); - - sprintf(command, "SELECT %s FROM meters WHERE %s", aggreFunc[j], - condition); - - printf("Where condition: %s\n", condition); - - debugPrint("%s() LN%d, sql command: %s\n", __func__, __LINE__, - command); - fprintf(fp, "%s\n", command); - - double t = (double)taosGetTimestampUs(); - - TAOS_RES *pSql = taos_query(taos, command); - int32_t code = taos_errno(pSql); - - if (code != 0) { - errorPrint("Failed to query:%s\n", taos_errstr(pSql)); - taos_free_result(pSql); - fclose(fp); - free(command); - return NULL; - } - int count = 0; - while (taos_fetch_row(pSql) != NULL) { - count++; - } - t = taosGetTimestampUs() - t; - - fprintf(fp, "| Speed: %12.2f(per s) | Latency: %.4f(ms) |\n", - ntables * insertRows / (t / 1000), t); - printf("select %10s took %.6f second(s)\n\n", aggreFunc[j], - t / 1000000); - - taos_free_result(pSql); - } - fprintf(fp, "\n"); - } - fclose(fp); - free(command); - - return NULL; -} - -void *queryNtableAggrFunc(void *sarg) { - threadInfo *pThreadInfo = (threadInfo *)sarg; - TAOS * taos = pThreadInfo->taos; - setThreadName("queryNtableAggrFunc"); - char *command = calloc(1, BUFFER_SIZE); - if (NULL == command) { - errorPrint("%s", "failed to allocate memory\n"); - return NULL; - } - - uint64_t startTime = pThreadInfo->start_time; - char * tb_prefix = pThreadInfo->tb_prefix; - FILE * fp = fopen(pThreadInfo->filePath, "a"); - if (NULL == fp) { - errorPrint("fopen %s fail, reason:%s.\n", pThreadInfo->filePath, - strerror(errno)); - free(command); - return NULL; - } - - int64_t insertRows; - /* if (pThreadInfo->stbInfo) { - insertRows = pThreadInfo->stbInfo->insertRows; // nrecords_per_table; - } else { - */ - insertRows = g_args.insertRows; - // } - - int64_t ntables = - pThreadInfo->ntables; // pThreadInfo->end_table_to - - // pThreadInfo->start_table_from + 1; - int64_t totalData = insertRows * ntables; - bool aggr_func = g_Dbs.aggr_func; - - char **aggreFunc; - int n; - - if (g_args.demo_mode) { - aggreFunc = g_aggreFuncDemo; - n = aggr_func ? (sizeof(g_aggreFuncDemo) / sizeof(g_aggreFuncDemo[0])) - : 2; - } else { - aggreFunc = g_aggreFunc; - n = aggr_func ? (sizeof(g_aggreFunc) / sizeof(g_aggreFunc[0])) : 2; - } - - if (!aggr_func) { - printf( - "\nThe first field is either Binary or Bool. Aggregation functions " - "are not supported.\n"); - } - printf("%" PRId64 " records:\n", totalData); - fprintf( - fp, - "| QFunctions | QRecords | QSpeed(R/s) | QLatency(ms) |\n"); - - for (int j = 0; j < n; j++) { - double totalT = 0; - uint64_t count = 0; - for (int64_t i = 0; i < ntables; i++) { - if (g_args.escapeChar) { - sprintf(command, - "SELECT %s FROM `%s%" PRId64 "` WHERE ts>= %" PRIu64, - aggreFunc[j], tb_prefix, i, startTime); - } else { - sprintf(command, - "SELECT %s FROM %s%" PRId64 " WHERE ts>= %" PRIu64, - aggreFunc[j], tb_prefix, i, startTime); - } - - double t = (double)taosGetTimestampUs(); - debugPrint("%s() LN%d, sql command: %s\n", __func__, __LINE__, - command); - TAOS_RES *pSql = taos_query(taos, command); - int32_t code = taos_errno(pSql); - - if (code != 0) { - errorPrint("Failed to query <%s>, reason:%s\n", command, - taos_errstr(pSql)); - taos_free_result(pSql); - fclose(fp); - free(command); - return NULL; - } - - while (taos_fetch_row(pSql) != NULL) { - count++; - } - - t = taosGetTimestampUs() - t; - totalT += t; - - taos_free_result(pSql); - } - - fprintf(fp, "|%10s | %" PRId64 " | %12.2f | %10.2f |\n", - aggreFunc[j][0] == '*' ? " * " : aggreFunc[j], totalData, - (double)(ntables * insertRows) / totalT, totalT / 1000000); - printf("select %10s took %.6f second(s)\n", aggreFunc[j], - totalT / 1000000); - } - fprintf(fp, "\n"); - fclose(fp); - free(command); - return NULL; -} - -void queryAggrFunc() { - // query data - - pthread_t read_id; - threadInfo *pThreadInfo = calloc(1, sizeof(threadInfo)); - if (pThreadInfo == NULL) { - errorPrint("%s", "failde to allocate memory\n"); - return; - } - - pThreadInfo->start_time = DEFAULT_START_TIME; // 2017-07-14 10:40:00.000 - pThreadInfo->start_table_from = 0; - - if (g_args.use_metric) { - pThreadInfo->ntables = g_Dbs.db[0].superTbls[0].childTblCount; - pThreadInfo->end_table_to = g_Dbs.db[0].superTbls[0].childTblCount - 1; - pThreadInfo->stbInfo = &g_Dbs.db[0].superTbls[0]; - tstrncpy(pThreadInfo->tb_prefix, - g_Dbs.db[0].superTbls[0].childTblPrefix, TBNAME_PREFIX_LEN); - } else { - pThreadInfo->ntables = g_args.ntables; - pThreadInfo->end_table_to = g_args.ntables - 1; - tstrncpy(pThreadInfo->tb_prefix, g_args.tb_prefix, TSDB_TABLE_NAME_LEN); - } - - pThreadInfo->taos = taos_connect(g_Dbs.host, g_Dbs.user, g_Dbs.password, - g_Dbs.db[0].dbName, g_Dbs.port); - if (pThreadInfo->taos == NULL) { - free(pThreadInfo); - errorPrint("Failed to connect to TDengine, reason:%s\n", - taos_errstr(NULL)); - exit(EXIT_FAILURE); - } - - tstrncpy(pThreadInfo->filePath, g_Dbs.resultFile, MAX_FILE_NAME_LEN); - - if (!g_Dbs.use_metric) { - pthread_create(&read_id, NULL, queryNtableAggrFunc, pThreadInfo); - } else { - pthread_create(&read_id, NULL, queryStableAggrFunc, pThreadInfo); - } - pthread_join(read_id, NULL); - taos_close(pThreadInfo->taos); - free(pThreadInfo); -} - -void testCmdLine() { - if (strlen(configDir)) { - wordexp_t full_path; - if (wordexp(configDir, &full_path, 0) != 0) { - errorPrint("Invalid path %s\n", configDir); - return; - } - taos_options(TSDB_OPTION_CONFIGDIR, full_path.we_wordv[0]); - wordfree(&full_path); - } - - g_args.test_mode = INSERT_TEST; - insertTestProcess(); - - if (g_Dbs.aggr_func) { - queryAggrFunc(); - } -} \ No newline at end of file diff --git a/src/kit/taosdemo/src/demoData.c b/src/kit/taosdemo/src/demoData.c deleted file mode 100644 index 59806864306b9ecf5c3ed35c9be631c50244da58..0000000000000000000000000000000000000000 --- a/src/kit/taosdemo/src/demoData.c +++ /dev/null @@ -1,2573 +0,0 @@ -/* - * Copyright (c) 2019 TAOS Data, Inc. - * - * This program is free software: you can use, redistribute, and/or modify - * it under the terms of the GNU Affero General Public License, version 3 - * or later ("AGPL"), as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. - * - * You should have received a copy of the GNU Affero General Public License - * along with this program. If not, see . - */ - -#include "demoData.h" -#include "demo.h" - -char * g_sampleDataBuf = NULL; -char * g_sampleBindBatchArray = NULL; -int32_t * g_randint = NULL; -uint32_t *g_randuint = NULL; -int64_t * g_randbigint = NULL; -uint64_t *g_randubigint = NULL; -float * g_randfloat = NULL; -double * g_randdouble = NULL; -char * g_randbool_buff = NULL; -char * g_randint_buff = NULL; -char * g_randuint_buff = NULL; -char * g_rand_voltage_buff = NULL; -char * g_randbigint_buff = NULL; -char * g_randubigint_buff = NULL; -char * g_randsmallint_buff = NULL; -char * g_randusmallint_buff = NULL; -char * g_randtinyint_buff = NULL; -char * g_randutinyint_buff = NULL; -char * g_randfloat_buff = NULL; -char * g_rand_current_buff = NULL; -char * g_rand_phase_buff = NULL; -char * g_randdouble_buff = NULL; - -const char charset[] = - "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890"; - -char *rand_bool_str() { - static int cursor; - cursor++; - if (cursor > (g_args.prepared_rand - 1)) cursor = 0; - return g_randbool_buff + ((cursor % g_args.prepared_rand) * BOOL_BUFF_LEN); -} - -int32_t rand_bool() { - static int cursor; - cursor++; - if (cursor > (g_args.prepared_rand - 1)) cursor = 0; - return g_randint[cursor % g_args.prepared_rand] % TSDB_DATA_BOOL_NULL; -} - -char *rand_tinyint_str() { - static int cursor; - cursor++; - if (cursor > (g_args.prepared_rand - 1)) cursor = 0; - return g_randtinyint_buff + - ((cursor % g_args.prepared_rand) * TINYINT_BUFF_LEN); -} - -int32_t rand_tinyint() { - static int cursor; - cursor++; - if (cursor > (g_args.prepared_rand - 1)) cursor = 0; - return g_randint[cursor % g_args.prepared_rand] % TSDB_DATA_TINYINT_NULL; -} - -char *rand_utinyint_str() { - static int cursor; - cursor++; - if (cursor > (g_args.prepared_rand - 1)) cursor = 0; - return g_randutinyint_buff + - ((cursor % g_args.prepared_rand) * TINYINT_BUFF_LEN); -} - -int32_t rand_utinyint() { - static int cursor; - cursor++; - if (cursor > (g_args.prepared_rand - 1)) cursor = 0; - return g_randuint[cursor % g_args.prepared_rand] % TSDB_DATA_UTINYINT_NULL; -} - -char *rand_smallint_str() { - static int cursor; - cursor++; - if (cursor > (g_args.prepared_rand - 1)) cursor = 0; - return g_randsmallint_buff + - ((cursor % g_args.prepared_rand) * SMALLINT_BUFF_LEN); -} - -int32_t rand_smallint() { - static int cursor; - cursor++; - if (cursor > (g_args.prepared_rand - 1)) cursor = 0; - return g_randint[cursor % g_args.prepared_rand] % TSDB_DATA_SMALLINT_NULL; -} - -char *rand_usmallint_str() { - static int cursor; - cursor++; - if (cursor > (g_args.prepared_rand - 1)) cursor = 0; - return g_randusmallint_buff + - ((cursor % g_args.prepared_rand) * SMALLINT_BUFF_LEN); -} - -int32_t rand_usmallint() { - static int cursor; - cursor++; - if (cursor > (g_args.prepared_rand - 1)) cursor = 0; - return g_randuint[cursor % g_args.prepared_rand] % TSDB_DATA_USMALLINT_NULL; -} - -char *rand_int_str() { - static int cursor; - cursor++; - if (cursor > (g_args.prepared_rand - 1)) cursor = 0; - return g_randint_buff + ((cursor % g_args.prepared_rand) * INT_BUFF_LEN); -} - -int32_t rand_int() { - static int cursor; - cursor++; - if (cursor > (g_args.prepared_rand - 1)) cursor = 0; - return g_randint[cursor % g_args.prepared_rand]; -} - -char *rand_uint_str() { - static int cursor; - cursor++; - if (cursor > (g_args.prepared_rand - 1)) cursor = 0; - return g_randuint_buff + ((cursor % g_args.prepared_rand) * INT_BUFF_LEN); -} - -int32_t rand_uint() { - static int cursor; - cursor++; - if (cursor > (g_args.prepared_rand - 1)) cursor = 0; - return g_randuint[cursor % g_args.prepared_rand]; -} - -char *rand_bigint_str() { - static int cursor; - cursor++; - if (cursor > (g_args.prepared_rand - 1)) cursor = 0; - return g_randbigint_buff + - ((cursor % g_args.prepared_rand) * BIGINT_BUFF_LEN); -} - -int64_t rand_bigint() { - static int cursor; - cursor++; - if (cursor > (g_args.prepared_rand - 1)) cursor = 0; - return g_randbigint[cursor % g_args.prepared_rand]; -} - -char *rand_ubigint_str() { - static int cursor; - cursor++; - if (cursor > (g_args.prepared_rand - 1)) cursor = 0; - return g_randubigint_buff + - ((cursor % g_args.prepared_rand) * BIGINT_BUFF_LEN); -} - -int64_t rand_ubigint() { - static int cursor; - cursor++; - if (cursor > (g_args.prepared_rand - 1)) cursor = 0; - return g_randubigint[cursor % g_args.prepared_rand]; -} - -char *rand_float_str() { - static int cursor; - cursor++; - if (cursor > (g_args.prepared_rand - 1)) cursor = 0; - return g_randfloat_buff + - ((cursor % g_args.prepared_rand) * FLOAT_BUFF_LEN); -} - -float rand_float() { - static int cursor; - cursor++; - if (cursor > (g_args.prepared_rand - 1)) cursor = 0; - return g_randfloat[cursor % g_args.prepared_rand]; -} - -char *demo_current_float_str() { - static int cursor; - cursor++; - if (cursor > (g_args.prepared_rand - 1)) cursor = 0; - return g_rand_current_buff + - ((cursor % g_args.prepared_rand) * FLOAT_BUFF_LEN); -} - -float UNUSED_FUNC demo_current_float() { - static int cursor; - cursor++; - if (cursor > (g_args.prepared_rand - 1)) cursor = 0; - return (float)(9.8 + - 0.04 * (g_randint[cursor % g_args.prepared_rand] % 10) + - g_randfloat[cursor % g_args.prepared_rand] / 1000000000); -} - -char *demo_voltage_int_str() { - static int cursor; - cursor++; - if (cursor > (g_args.prepared_rand - 1)) cursor = 0; - return g_rand_voltage_buff + - ((cursor % g_args.prepared_rand) * INT_BUFF_LEN); -} - -int32_t UNUSED_FUNC demo_voltage_int() { - static int cursor; - cursor++; - if (cursor > (g_args.prepared_rand - 1)) cursor = 0; - return 215 + g_randint[cursor % g_args.prepared_rand] % 10; -} - -char *demo_phase_float_str() { - static int cursor; - cursor++; - if (cursor > (g_args.prepared_rand - 1)) cursor = 0; - return g_rand_phase_buff + - ((cursor % g_args.prepared_rand) * FLOAT_BUFF_LEN); -} - -float UNUSED_FUNC demo_phase_float() { - static int cursor; - cursor++; - if (cursor > (g_args.prepared_rand - 1)) cursor = 0; - return (float)((115 + g_randint[cursor % g_args.prepared_rand] % 10 + - g_randfloat[cursor % g_args.prepared_rand] / 1000000000) / - 360); -} - -static int usc2utf8(char* p, int unic) { - if ( unic <= 0x0000007F ) - { - *p = (unic & 0x7F); - return 1; - } - else if ( unic >= 0x00000080 && unic <= 0x000007FF ) - { - *(p+1) = (unic & 0x3F) | 0x80; - *p = ((unic >> 6) & 0x1F) | 0xC0; - return 2; - } - else if ( unic >= 0x00000800 && unic <= 0x0000FFFF ) - { - *(p+2) = (unic & 0x3F) | 0x80; - *(p+1) = ((unic >> 6) & 0x3F) | 0x80; - *p = ((unic >> 12) & 0x0F) | 0xE0; - return 3; - } - else if ( unic >= 0x00010000 && unic <= 0x001FFFFF ) - { - *(p+3) = (unic & 0x3F) | 0x80; - *(p+2) = ((unic >> 6) & 0x3F) | 0x80; - *(p+1) = ((unic >> 12) & 0x3F) | 0x80; - *p = ((unic >> 18) & 0x07) | 0xF0; - return 4; - } - else if ( unic >= 0x00200000 && unic <= 0x03FFFFFF ) - { - *(p+4) = (unic & 0x3F) | 0x80; - *(p+3) = ((unic >> 6) & 0x3F) | 0x80; - *(p+2) = ((unic >> 12) & 0x3F) | 0x80; - *(p+1) = ((unic >> 18) & 0x3F) | 0x80; - *p = ((unic >> 24) & 0x03) | 0xF8; - return 5; - } - else if ( unic >= 0x04000000 && unic <= 0x7FFFFFFF ) - { - *(p+5) = (unic & 0x3F) | 0x80; - *(p+4) = ((unic >> 6) & 0x3F) | 0x80; - *(p+3) = ((unic >> 12) & 0x3F) | 0x80; - *(p+2) = ((unic >> 18) & 0x3F) | 0x80; - *(p+1) = ((unic >> 24) & 0x3F) | 0x80; - *p = ((unic >> 30) & 0x01) | 0xFC; - return 6; - } - return 0; -} - -void rand_string(char *str, int size) { - if (g_args.chinese) { - char* pstr = str; - int move = 0; - while (size > 0) { - // Chinese Character need 3 bytes space - if (size < 3) { - break; - } - // Basic Chinese Character's Unicode is from 0x4e00 to 0x9fa5 - int unic = 0x4e00 + rand() % (0x9fa5 - 0x4e00); - move = usc2utf8(pstr, unic); - pstr += move; - size -= move; - } - } else { - str[0] = 0; - if (size > 0) { - //--size; - int n; - for (n = 0; n < size; n++) { - int key = abs(taosRandom()) % (int)(sizeof(charset) - 1); - str[n] = charset[key]; - } - str[n] = 0; - } - } -} - -char *rand_double_str() { - static int cursor; - cursor++; - if (cursor > (g_args.prepared_rand - 1)) cursor = 0; - return g_randdouble_buff + (cursor * DOUBLE_BUFF_LEN); -} - -double rand_double() { - static int cursor; - cursor++; - cursor = cursor % g_args.prepared_rand; - return g_randdouble[cursor]; -} - -int init_rand_data() { - int32_t code = -1; - g_randint_buff = calloc(1, INT_BUFF_LEN * g_args.prepared_rand); - if (NULL == g_randint_buff) { - errorPrint("%s", "failed to allocate memory\n"); - goto end_init_rand_data; - } - g_rand_voltage_buff = calloc(1, INT_BUFF_LEN * g_args.prepared_rand); - if (NULL == g_randint_buff) { - errorPrint("%s", "failed to allocate memory\n"); - goto end_init_rand_data; - } - g_randbigint_buff = calloc(1, BIGINT_BUFF_LEN * g_args.prepared_rand); - if (NULL == g_randbigint_buff) { - errorPrint("%s", "failed to allocate memory\n"); - goto end_init_rand_data; - } - g_randsmallint_buff = calloc(1, SMALLINT_BUFF_LEN * g_args.prepared_rand); - if (NULL == g_randsmallint_buff) { - errorPrint("%s", "failed to allocate memory\n"); - goto end_init_rand_data; - } - g_randtinyint_buff = calloc(1, TINYINT_BUFF_LEN * g_args.prepared_rand); - if (NULL == g_randtinyint_buff) { - errorPrint("%s", "failed to allocate memory\n"); - goto end_init_rand_data; - } - g_randbool_buff = calloc(1, BOOL_BUFF_LEN * g_args.prepared_rand); - if (NULL == g_randbool_buff) { - errorPrint("%s", "failed to allocate memory\n"); - goto end_init_rand_data; - } - g_randfloat_buff = calloc(1, FLOAT_BUFF_LEN * g_args.prepared_rand); - if (NULL == g_randfloat_buff) { - errorPrint("%s", "failed to allocate memory\n"); - goto end_init_rand_data; - } - g_rand_current_buff = calloc(1, FLOAT_BUFF_LEN * g_args.prepared_rand); - if (NULL == g_rand_current_buff) { - errorPrint("%s", "failed to allocate memory\n"); - goto end_init_rand_data; - } - g_rand_phase_buff = calloc(1, FLOAT_BUFF_LEN * g_args.prepared_rand); - if (NULL == g_rand_phase_buff) { - errorPrint("%s", "failed to allocate memory\n"); - goto end_init_rand_data; - } - g_randdouble_buff = calloc(1, DOUBLE_BUFF_LEN * g_args.prepared_rand); - if (NULL == g_randdouble_buff) { - errorPrint("%s", "failed to allocate memory\n"); - goto end_init_rand_data; - } - g_randuint_buff = calloc(1, INT_BUFF_LEN * g_args.prepared_rand); - if (NULL == g_randuint_buff) { - errorPrint("%s", "failed to allocate memory\n"); - goto end_init_rand_data; - } - g_randutinyint_buff = calloc(1, TINYINT_BUFF_LEN * g_args.prepared_rand); - if (NULL == g_randutinyint_buff) { - errorPrint("%s", "failed to allocate memory\n"); - goto end_init_rand_data; - } - g_randusmallint_buff = calloc(1, SMALLINT_BUFF_LEN * g_args.prepared_rand); - if (NULL == g_randusmallint_buff) { - errorPrint("%s", "failed to allocate memory\n"); - goto end_init_rand_data; - } - g_randubigint_buff = calloc(1, BIGINT_BUFF_LEN * g_args.prepared_rand); - if (NULL == g_randubigint_buff) { - errorPrint("%s", "failed to allocate memory\n"); - goto end_init_rand_data; - } - g_randint = calloc(1, sizeof(int32_t) * g_args.prepared_rand); - if (NULL == g_randint) { - errorPrint("%s", "failed to allocate memory\n"); - goto end_init_rand_data; - } - g_randuint = calloc(1, sizeof(uint32_t) * g_args.prepared_rand); - if (NULL == g_randuint) { - errorPrint("%s", "failed to allocate memory\n"); - goto end_init_rand_data; - } - g_randbigint = calloc(1, sizeof(int64_t) * g_args.prepared_rand); - if (NULL == g_randbigint) { - errorPrint("%s", "failed to allocate memory\n"); - goto end_init_rand_data; - } - g_randubigint = calloc(1, sizeof(uint64_t) * g_args.prepared_rand); - if (NULL == g_randubigint) { - errorPrint("%s", "failed to allocate memory\n"); - goto end_init_rand_data; - } - g_randfloat = calloc(1, sizeof(float) * g_args.prepared_rand); - if (NULL == g_randfloat) { - errorPrint("%s", "failed to allocate memory\n"); - goto end_init_rand_data; - } - g_randdouble = calloc(1, sizeof(double) * g_args.prepared_rand); - if (NULL == g_randdouble) { - errorPrint("%s", "failed to allocate memory\n"); - goto end_init_rand_data; - } - - for (int i = 0; i < g_args.prepared_rand; i++) { - g_randint[i] = (int)(taosRandom() % RAND_MAX - (RAND_MAX >> 1)); - g_randuint[i] = (int)(taosRandom()); - sprintf(g_randint_buff + i * INT_BUFF_LEN, "%d", g_randint[i]); - sprintf(g_rand_voltage_buff + i * INT_BUFF_LEN, "%d", - 215 + g_randint[i] % 10); - - sprintf(g_randbool_buff + i * BOOL_BUFF_LEN, "%s", - ((g_randint[i] % 2) & 1) ? "true" : "false"); - sprintf(g_randsmallint_buff + i * SMALLINT_BUFF_LEN, "%d", - g_randint[i] % 32768); - sprintf(g_randtinyint_buff + i * TINYINT_BUFF_LEN, "%d", - g_randint[i] % 128); - sprintf(g_randuint_buff + i * INT_BUFF_LEN, "%d", g_randuint[i]); - sprintf(g_randusmallint_buff + i * SMALLINT_BUFF_LEN, "%d", - g_randuint[i] % 65535); - sprintf(g_randutinyint_buff + i * TINYINT_BUFF_LEN, "%d", - g_randuint[i] % 255); - - g_randbigint[i] = (int64_t)(taosRandom() % RAND_MAX - (RAND_MAX >> 1)); - g_randubigint[i] = (uint64_t)(taosRandom()); - sprintf(g_randbigint_buff + i * BIGINT_BUFF_LEN, "%" PRId64 "", - g_randbigint[i]); - sprintf(g_randubigint_buff + i * BIGINT_BUFF_LEN, "%" PRId64 "", - g_randubigint[i]); - - g_randfloat[i] = - (float)(taosRandom() / 1000.0) * (taosRandom() % 2 > 0.5 ? 1 : -1); - sprintf(g_randfloat_buff + i * FLOAT_BUFF_LEN, "%f", g_randfloat[i]); - sprintf(g_rand_current_buff + i * FLOAT_BUFF_LEN, "%f", - (float)(9.8 + 0.04 * (g_randint[i] % 10) + - g_randfloat[i] / 1000000000)); - sprintf( - g_rand_phase_buff + i * FLOAT_BUFF_LEN, "%f", - (float)((115 + g_randint[i] % 10 + g_randfloat[i] / 1000000000) / - 360)); - - g_randdouble[i] = (double)(taosRandom() / 1000000.0) * - (taosRandom() % 2 > 0.5 ? 1 : -1); - sprintf(g_randdouble_buff + i * DOUBLE_BUFF_LEN, "%f", g_randdouble[i]); - } - code = 0; -end_init_rand_data: - return code; -} - -static void generateBinaryNCharTagValues(int64_t tableSeq, uint32_t len, - char *buf) { - if (tableSeq % 2) { - tstrncpy(buf, "beijing", len); - } else { - tstrncpy(buf, "shanghai", len); - } - // rand_string(buf, stbInfo->tags[i].dataLen); -} - -int generateTagValuesForStb(SSuperTable *stbInfo, int64_t tableSeq, - char *tagsValBuf) { - int dataLen = 0; - dataLen += snprintf(tagsValBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen, "("); - for (int i = 0; i < stbInfo->tagCount; i++) { - if ((0 == strncasecmp(stbInfo->tags[i].dataType, "binary", - strlen("binary"))) || - (0 == strncasecmp(stbInfo->tags[i].dataType, "nchar", - strlen("nchar")))) { - if (stbInfo->tags[i].dataLen > TSDB_MAX_BINARY_LEN) { - errorPrint("binary or nchar length overflow, max size:%u\n", - (uint32_t)TSDB_MAX_BINARY_LEN); - return -1; - } - - int32_t tagBufLen = stbInfo->tags[i].dataLen + 1; - char * buf = (char *)calloc(1, tagBufLen); - if (NULL == buf) { - errorPrint("%s", "failed to allocate memory\n"); - return -1; - } - generateBinaryNCharTagValues(tableSeq, tagBufLen, buf); - dataLen += snprintf(tagsValBuf + dataLen, - TSDB_MAX_SQL_LEN - dataLen, "\'%s\',", buf); - tmfree(buf); - } else if (0 == strncasecmp(stbInfo->tags[i].dataType, "int", - strlen("int"))) { - if ((g_args.demo_mode) && (i == 0)) { - dataLen += - snprintf(tagsValBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen, - "%" PRId64 ",", (tableSeq % 10) + 1); - } else { - dataLen += - snprintf(tagsValBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen, - "%" PRId64 ",", tableSeq); - } - } else if (0 == strncasecmp(stbInfo->tags[i].dataType, "bigint", - strlen("bigint"))) { - dataLen += - snprintf(tagsValBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen, - "%" PRId64 ",", rand_bigint()); - } else if (0 == strncasecmp(stbInfo->tags[i].dataType, "float", - strlen("float"))) { - dataLen += - snprintf(tagsValBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen, - "%f,", rand_float()); - } else if (0 == strncasecmp(stbInfo->tags[i].dataType, "double", - strlen("double"))) { - dataLen += - snprintf(tagsValBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen, - "%f,", rand_double()); - } else if (0 == strncasecmp(stbInfo->tags[i].dataType, "smallint", - strlen("smallint"))) { - dataLen += - snprintf(tagsValBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen, - "%d,", rand_smallint()); - } else if (0 == strncasecmp(stbInfo->tags[i].dataType, "tinyint", - strlen("tinyint"))) { - dataLen += - snprintf(tagsValBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen, - "%d,", rand_tinyint()); - } else if (0 == strncasecmp(stbInfo->tags[i].dataType, "bool", - strlen("bool"))) { - dataLen += snprintf(tagsValBuf + dataLen, - TSDB_MAX_SQL_LEN - dataLen, "%d,", rand_bool()); - } else if (0 == strncasecmp(stbInfo->tags[i].dataType, "timestamp", - strlen("timestamp"))) { - dataLen += - snprintf(tagsValBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen, - "%" PRId64 ",", rand_ubigint()); - } else if (0 == strncasecmp(stbInfo->tags[i].dataType, "utinyint", - strlen("utinyint"))) { - dataLen += - snprintf(tagsValBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen, - "%d,", rand_utinyint()); - } else if (0 == strncasecmp(stbInfo->tags[i].dataType, "usmallint", - strlen("usmallint"))) { - dataLen += - snprintf(tagsValBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen, - "%d,", rand_usmallint()); - } else if (0 == strncasecmp(stbInfo->tags[i].dataType, "uint", - strlen("uint"))) { - dataLen += snprintf(tagsValBuf + dataLen, - TSDB_MAX_SQL_LEN - dataLen, "%d,", rand_uint()); - } else if (0 == strncasecmp(stbInfo->tags[i].dataType, "ubigint", - strlen("ubigint"))) { - dataLen += - snprintf(tagsValBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen, - "%" PRId64 ",", rand_ubigint()); - } else { - errorPrint("unsupport data type: %s\n", stbInfo->tags[i].dataType); - return -1; - } - } - - dataLen -= 1; - dataLen += snprintf(tagsValBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen, ")"); - return 0; -} - -static int readTagFromCsvFileToMem(SSuperTable *stbInfo) { - size_t n = 0; - ssize_t readLen = 0; - char * line = NULL; - - FILE *fp = fopen(stbInfo->tagsFile, "r"); - if (fp == NULL) { - printf("Failed to open tags file: %s, reason:%s\n", stbInfo->tagsFile, - strerror(errno)); - return -1; - } - - if (stbInfo->tagDataBuf) { - free(stbInfo->tagDataBuf); - stbInfo->tagDataBuf = NULL; - } - - int tagCount = MAX_SAMPLES; - int count = 0; - char *tagDataBuf = calloc(1, stbInfo->lenOfTagOfOneRow * tagCount); - if (tagDataBuf == NULL) { - printf("Failed to calloc, reason:%s\n", strerror(errno)); - fclose(fp); - return -1; - } - - while ((readLen = tgetline(&line, &n, fp)) != -1) { - if (('\r' == line[readLen - 1]) || ('\n' == line[readLen - 1])) { - line[--readLen] = 0; - } - - if (readLen == 0) { - continue; - } - - memcpy(tagDataBuf + count * stbInfo->lenOfTagOfOneRow, line, readLen); - count++; - - if (count >= tagCount - 1) { - char *tmp = - realloc(tagDataBuf, - (size_t)(tagCount * 1.5 * stbInfo->lenOfTagOfOneRow)); - if (tmp != NULL) { - tagDataBuf = tmp; - tagCount = (int)(tagCount * 1.5); - memset( - tagDataBuf + count * stbInfo->lenOfTagOfOneRow, 0, - (size_t)((tagCount - count) * stbInfo->lenOfTagOfOneRow)); - } else { - // exit, if allocate more memory failed - printf("realloc fail for save tag val from %s\n", - stbInfo->tagsFile); - tmfree(tagDataBuf); - free(line); - fclose(fp); - return -1; - } - } - } - - stbInfo->tagDataBuf = tagDataBuf; - stbInfo->tagSampleCount = count; - - free(line); - fclose(fp); - return 0; -} - -static void getAndSetRowsFromCsvFile(SSuperTable *stbInfo) { - FILE *fp = fopen(stbInfo->sampleFile, "r"); - int line_count = 0; - if (fp == NULL) { - errorPrint("Failed to open sample file: %s, reason:%s\n", - stbInfo->sampleFile, strerror(errno)); - return; - } - char *buf = calloc(1, stbInfo->maxSqlLen); - if (buf == NULL) { - errorPrint("%s", "failed to allocate memory\n"); - return; - } - - while (fgets(buf, (int)stbInfo->maxSqlLen, fp)) { - line_count++; - } - fclose(fp); - tmfree(buf); - stbInfo->insertRows = line_count; -} - -static int generateSampleFromCsvForStb(SSuperTable *stbInfo) { - size_t n = 0; - ssize_t readLen = 0; - char * line = NULL; - int getRows = 0; - - FILE *fp = fopen(stbInfo->sampleFile, "r"); - if (fp == NULL) { - errorPrint("Failed to open sample file: %s, reason:%s\n", - stbInfo->sampleFile, strerror(errno)); - return -1; - } - while (1) { - readLen = tgetline(&line, &n, fp); - if (-1 == readLen) { - if (0 != fseek(fp, 0, SEEK_SET)) { - errorPrint("Failed to fseek file: %s, reason:%s\n", - stbInfo->sampleFile, strerror(errno)); - fclose(fp); - return -1; - } - continue; - } - - if (('\r' == line[readLen - 1]) || ('\n' == line[readLen - 1])) { - line[--readLen] = 0; - } - - if (readLen == 0) { - continue; - } - - if (readLen > stbInfo->lenOfOneRow) { - printf("sample row len[%d] overflow define schema len[%" PRIu64 - "], so discard this row\n", - (int32_t)readLen, stbInfo->lenOfOneRow); - continue; - } - - memcpy(stbInfo->sampleDataBuf + getRows * stbInfo->lenOfOneRow, line, - readLen); - getRows++; - - if (getRows == MAX_SAMPLES) { - break; - } - } - - fclose(fp); - tmfree(line); - return 0; -} - -int prepareSampleData() { - for (int i = 0; i < g_Dbs.dbCount; i++) { - for (int j = 0; j < g_Dbs.db[i].superTblCount; j++) { - if (g_Dbs.db[i].superTbls[j].tagsFile[0] != 0) { - if (readTagFromCsvFileToMem(&g_Dbs.db[i].superTbls[j]) != 0) { - return -1; - } - } - } - } - - return 0; -} - -static int getRowDataFromSample(char *dataBuf, int64_t maxLen, - int64_t timestamp, SSuperTable *stbInfo, - int64_t *sampleUsePos) { - if ((*sampleUsePos) == MAX_SAMPLES) { - *sampleUsePos = 0; - } - - int dataLen = 0; - if (stbInfo->useSampleTs) { - dataLen += snprintf( - dataBuf + dataLen, maxLen - dataLen, "(%s", - stbInfo->sampleDataBuf + stbInfo->lenOfOneRow * (*sampleUsePos)); - } else { - dataLen += snprintf(dataBuf + dataLen, maxLen - dataLen, - "(%" PRId64 ", ", timestamp); - dataLen += snprintf( - dataBuf + dataLen, maxLen - dataLen, "%s", - stbInfo->sampleDataBuf + stbInfo->lenOfOneRow * (*sampleUsePos)); - } - dataLen += snprintf(dataBuf + dataLen, maxLen - dataLen, ")"); - - (*sampleUsePos)++; - - return dataLen; -} - -int64_t generateStbRowData(SSuperTable *stbInfo, char *recBuf, - int64_t remainderBufLen, int64_t timestamp) { - int64_t dataLen = 0; - char * pstr = recBuf; - int64_t maxLen = MAX_DATA_SIZE; - int tmpLen; - - dataLen += - snprintf(pstr + dataLen, maxLen - dataLen, "(%" PRId64 "", timestamp); - - for (int i = 0; i < stbInfo->columnCount; i++) { - tstrncpy(pstr + dataLen, ",", 2); - dataLen += 1; - - if ((stbInfo->columns[i].data_type == TSDB_DATA_TYPE_BINARY) || - (stbInfo->columns[i].data_type == TSDB_DATA_TYPE_NCHAR)) { - if (stbInfo->columns[i].dataLen > TSDB_MAX_BINARY_LEN) { - errorPrint("binary or nchar length overflow, max size:%u\n", - (uint32_t)TSDB_MAX_BINARY_LEN); - return -1; - } - - if ((stbInfo->columns[i].dataLen + 1) > - /* need count 3 extra chars \', \', and , */ - (remainderBufLen - dataLen - 3)) { - return 0; - } - char *buf = (char *)calloc(stbInfo->columns[i].dataLen + 1, 1); - if (NULL == buf) { - errorPrint("%s", "failed to allocate memory\n"); - return -1; - } - rand_string(buf, stbInfo->columns[i].dataLen); - dataLen += - snprintf(pstr + dataLen, maxLen - dataLen, "\'%s\'", buf); - tmfree(buf); - - } else { - char *tmp = NULL; - switch (stbInfo->columns[i].data_type) { - case TSDB_DATA_TYPE_INT: - if ((g_args.demo_mode) && (i == 1)) { - tmp = demo_voltage_int_str(); - } else { - tmp = rand_int_str(); - } - tmpLen = (int)strlen(tmp); - tstrncpy(pstr + dataLen, tmp, - min(tmpLen + 1, INT_BUFF_LEN)); - break; - - case TSDB_DATA_TYPE_UINT: - tmp = rand_uint_str(); - tmpLen = (int)strlen(tmp); - tstrncpy(pstr + dataLen, tmp, - min(tmpLen + 1, INT_BUFF_LEN)); - break; - - case TSDB_DATA_TYPE_BIGINT: - tmp = rand_bigint_str(); - tmpLen = (int)strlen(tmp); - tstrncpy(pstr + dataLen, tmp, - min(tmpLen + 1, BIGINT_BUFF_LEN)); - break; - - case TSDB_DATA_TYPE_UBIGINT: - tmp = rand_ubigint_str(); - tmpLen = (int)strlen(tmp); - tstrncpy(pstr + dataLen, tmp, - min(tmpLen + 1, BIGINT_BUFF_LEN)); - break; - - case TSDB_DATA_TYPE_FLOAT: - if (g_args.demo_mode) { - if (i == 0) { - tmp = demo_current_float_str(); - } else { - tmp = demo_phase_float_str(); - } - } else { - tmp = rand_float_str(); - } - tmpLen = (int)strlen(tmp); - tstrncpy(pstr + dataLen, tmp, - min(tmpLen + 1, FLOAT_BUFF_LEN)); - break; - - case TSDB_DATA_TYPE_DOUBLE: - tmp = rand_double_str(); - tmpLen = (int)strlen(tmp); - tstrncpy(pstr + dataLen, tmp, - min(tmpLen + 1, DOUBLE_BUFF_LEN)); - break; - - case TSDB_DATA_TYPE_SMALLINT: - tmp = rand_smallint_str(); - tmpLen = (int)strlen(tmp); - tstrncpy(pstr + dataLen, tmp, - min(tmpLen + 1, SMALLINT_BUFF_LEN)); - break; - - case TSDB_DATA_TYPE_USMALLINT: - tmp = rand_usmallint_str(); - tmpLen = (int)strlen(tmp); - tstrncpy(pstr + dataLen, tmp, - min(tmpLen + 1, SMALLINT_BUFF_LEN)); - break; - - case TSDB_DATA_TYPE_TINYINT: - tmp = rand_tinyint_str(); - tmpLen = (int)strlen(tmp); - tstrncpy(pstr + dataLen, tmp, - min(tmpLen + 1, TINYINT_BUFF_LEN)); - break; - - case TSDB_DATA_TYPE_UTINYINT: - tmp = rand_utinyint_str(); - tmpLen = (int)strlen(tmp); - tstrncpy(pstr + dataLen, tmp, - min(tmpLen + 1, TINYINT_BUFF_LEN)); - break; - - case TSDB_DATA_TYPE_BOOL: - tmp = rand_bool_str(); - tmpLen = (int)strlen(tmp); - tstrncpy(pstr + dataLen, tmp, - min(tmpLen + 1, BOOL_BUFF_LEN)); - break; - - case TSDB_DATA_TYPE_TIMESTAMP: - tmp = rand_bigint_str(); - tmpLen = (int)strlen(tmp); - tstrncpy(pstr + dataLen, tmp, - min(tmpLen + 1, BIGINT_BUFF_LEN)); - break; - - case TSDB_DATA_TYPE_NULL: - break; - - default: - errorPrint("Not support data type: %s\n", - stbInfo->columns[i].dataType); - exit(EXIT_FAILURE); - } - if (tmp) { - dataLen += tmpLen; - } - } - - if (dataLen > (remainderBufLen - (128))) return 0; - } - - dataLen += snprintf(pstr + dataLen, 2, ")"); - - verbosePrint("%s() LN%d, dataLen:%" PRId64 "\n", __func__, __LINE__, - dataLen); - verbosePrint("%s() LN%d, recBuf:\n\t%s\n", __func__, __LINE__, recBuf); - - return strlen(recBuf); -} - -static int64_t generateData(char *recBuf, char *data_type, int32_t *data_length, - int64_t timestamp) { - memset(recBuf, 0, MAX_DATA_SIZE); - char *pstr = recBuf; - pstr += sprintf(pstr, "(%" PRId64 "", timestamp); - - int columnCount = g_args.columnCount; - - bool b; - char *s; - for (int i = 0; i < columnCount; i++) { - switch (data_type[i]) { - case TSDB_DATA_TYPE_TINYINT: - pstr += sprintf(pstr, ",%d", rand_tinyint()); - break; - - case TSDB_DATA_TYPE_SMALLINT: - pstr += sprintf(pstr, ",%d", rand_smallint()); - break; - - case TSDB_DATA_TYPE_INT: - pstr += sprintf(pstr, ",%d", rand_int()); - break; - - case TSDB_DATA_TYPE_BIGINT: - pstr += sprintf(pstr, ",%" PRId64 "", rand_bigint()); - break; - - case TSDB_DATA_TYPE_TIMESTAMP: - pstr += sprintf(pstr, ",%" PRId64 "", rand_bigint()); - break; - - case TSDB_DATA_TYPE_FLOAT: - pstr += sprintf(pstr, ",%10.4f", rand_float()); - break; - - case TSDB_DATA_TYPE_DOUBLE: - pstr += sprintf(pstr, ",%20.8f", rand_double()); - break; - - case TSDB_DATA_TYPE_BOOL: - b = rand_bool() & 1; - pstr += sprintf(pstr, ",%s", b ? "true" : "false"); - break; - - case TSDB_DATA_TYPE_BINARY: - case TSDB_DATA_TYPE_NCHAR: - s = calloc(1, data_length[i] + 1); - if (NULL == s) { - errorPrint("%s", "failed to allocate memory\n"); - return -1; - } - - rand_string(s, data_length[i]); - pstr += sprintf(pstr, ",\"%s\"", s); - free(s); - break; - - case TSDB_DATA_TYPE_UTINYINT: - pstr += sprintf(pstr, ",%d", rand_utinyint()); - break; - - case TSDB_DATA_TYPE_USMALLINT: - pstr += sprintf(pstr, ",%d", rand_usmallint()); - break; - - case TSDB_DATA_TYPE_UINT: - pstr += sprintf(pstr, ",%d", rand_uint()); - break; - - case TSDB_DATA_TYPE_UBIGINT: - pstr += sprintf(pstr, ",%" PRId64 "", rand_ubigint()); - break; - - case TSDB_DATA_TYPE_NULL: - break; - - default: - errorPrint("Unknown data type %d\n", data_type[i]); - return -1; - } - - if (strlen(recBuf) > MAX_DATA_SIZE) { - errorPrint("%s", "column length too long, abort\n"); - return -1; - } - } - - pstr += sprintf(pstr, ")"); - - verbosePrint("%s() LN%d, recBuf:\n\t%s\n", __func__, __LINE__, recBuf); - - return (int32_t)strlen(recBuf); -} - -static int generateSampleFromRand(char *sampleDataBuf, uint64_t lenOfOneRow, - int columnCount, StrColumn *columns) { - char data[MAX_DATA_SIZE]; - memset(data, 0, MAX_DATA_SIZE); - - char *buff = calloc(lenOfOneRow, 1); - if (NULL == buff) { - errorPrint("%s", "failed to allocate memory\n"); - return -1; - } - - for (int i = 0; i < MAX_SAMPLES; i++) { - uint64_t pos = 0; - memset(buff, 0, lenOfOneRow); - - for (int c = 0; c < columnCount; c++) { - char *tmp = NULL; - - uint32_t dataLen; - char data_type = - (columns) ? (columns[c].data_type) : g_args.data_type[c]; - - switch (data_type) { - case TSDB_DATA_TYPE_BINARY: - dataLen = (columns) ? columns[c].dataLen : g_args.binwidth; - rand_string(data, dataLen); - pos += sprintf(buff + pos, "%s,", data); - break; - - case TSDB_DATA_TYPE_NCHAR: - dataLen = (columns) ? columns[c].dataLen : g_args.binwidth; - rand_string(data, dataLen - 1); - pos += sprintf(buff + pos, "%s,", data); - break; - - case TSDB_DATA_TYPE_INT: - if ((g_args.demo_mode) && (c == 1)) { - tmp = demo_voltage_int_str(); - } else { - tmp = rand_int_str(); - } - pos += sprintf(buff + pos, "%s,", tmp); - break; - - case TSDB_DATA_TYPE_UINT: - pos += sprintf(buff + pos, "%s,", rand_uint_str()); - break; - - case TSDB_DATA_TYPE_BIGINT: - pos += sprintf(buff + pos, "%s,", rand_bigint_str()); - break; - - case TSDB_DATA_TYPE_UBIGINT: - pos += sprintf(buff + pos, "%s,", rand_ubigint_str()); - break; - - case TSDB_DATA_TYPE_FLOAT: - if (g_args.demo_mode) { - if (c == 0) { - tmp = demo_current_float_str(); - } else { - tmp = demo_phase_float_str(); - } - } else { - tmp = rand_float_str(); - } - pos += sprintf(buff + pos, "%s,", tmp); - break; - - case TSDB_DATA_TYPE_DOUBLE: - pos += sprintf(buff + pos, "%s,", rand_double_str()); - break; - - case TSDB_DATA_TYPE_SMALLINT: - pos += sprintf(buff + pos, "%s,", rand_smallint_str()); - break; - - case TSDB_DATA_TYPE_USMALLINT: - pos += sprintf(buff + pos, "%s,", rand_usmallint_str()); - break; - - case TSDB_DATA_TYPE_TINYINT: - pos += sprintf(buff + pos, "%s,", rand_tinyint_str()); - break; - - case TSDB_DATA_TYPE_UTINYINT: - pos += sprintf(buff + pos, "%s,", rand_utinyint_str()); - break; - - case TSDB_DATA_TYPE_BOOL: - pos += sprintf(buff + pos, "%s,", rand_bool_str()); - break; - - case TSDB_DATA_TYPE_TIMESTAMP: - pos += sprintf(buff + pos, "%s,", rand_bigint_str()); - break; - - case TSDB_DATA_TYPE_NULL: - break; - - default: - errorPrint( - "%s() LN%d, Unknown data type %s\n", __func__, __LINE__, - (columns) ? (columns[c].dataType) : g_args.dataType[c]); - exit(EXIT_FAILURE); - } - } - - *(buff + pos - 1) = 0; - memcpy(sampleDataBuf + i * lenOfOneRow, buff, pos); - } - - free(buff); - return 0; -} - -static int generateSampleFromRandForNtb() { - return generateSampleFromRand(g_sampleDataBuf, g_args.lenOfOneRow, - g_args.columnCount, NULL); -} - -static int generateSampleFromRandForStb(SSuperTable *stbInfo) { - return generateSampleFromRand(stbInfo->sampleDataBuf, stbInfo->lenOfOneRow, - stbInfo->columnCount, stbInfo->columns); -} - -int prepareSampleForNtb() { - g_sampleDataBuf = calloc(g_args.lenOfOneRow * MAX_SAMPLES, 1); - if (NULL == g_sampleDataBuf) { - errorPrint("%s", "failed to allocate memory\n"); - return -1; - } - - return generateSampleFromRandForNtb(); -} - -int prepareSampleForStb(SSuperTable *stbInfo) { - stbInfo->sampleDataBuf = calloc(stbInfo->lenOfOneRow * MAX_SAMPLES, 1); - if (NULL == stbInfo->sampleDataBuf) { - errorPrint("%s", "failed to allocate memory\n"); - return -1; - } - - int ret; - if (0 == strncasecmp(stbInfo->dataSource, "sample", strlen("sample"))) { - if (stbInfo->useSampleTs) { - getAndSetRowsFromCsvFile(stbInfo); - } - ret = generateSampleFromCsvForStb(stbInfo); - } else { - ret = generateSampleFromRandForStb(stbInfo); - } - - if (0 != ret) { - errorPrint("read sample from %s file failed.\n", stbInfo->sampleFile); - tmfree(stbInfo->sampleDataBuf); - stbInfo->sampleDataBuf = NULL; - return -1; - } - - return 0; -} - -int64_t getTSRandTail(int64_t timeStampStep, int32_t seq, int disorderRatio, - int disorderRange) { - int64_t randTail = timeStampStep * seq; - if (disorderRatio > 0) { - int rand_num = taosRandom() % 100; - if (rand_num < disorderRatio) { - randTail = (randTail + (taosRandom() % disorderRange + 1)) * (-1); - debugPrint("rand data generated, back %" PRId64 "\n", randTail); - } - } - - return randTail; -} - -static int32_t generateDataTailWithoutStb( - uint32_t batch, char *buffer, int64_t remainderBufLen, int64_t insertRows, - uint64_t recordFrom, int64_t startTime, - /* int64_t *pSamplePos, */ int64_t *dataLen) { - uint64_t len = 0; - char * pstr = buffer; - - verbosePrint("%s() LN%d batch=%d\n", __func__, __LINE__, batch); - - int32_t k = 0; - for (k = 0; k < batch;) { - char *data = pstr; - memset(data, 0, MAX_DATA_SIZE); - - int64_t retLen = 0; - - char * data_type = g_args.data_type; - int32_t *data_length = g_args.data_length; - - if (g_args.disorderRatio) { - retLen = - generateData(data, data_type, data_length, - startTime + getTSRandTail(g_args.timestamp_step, k, - g_args.disorderRatio, - g_args.disorderRange)); - } else { - retLen = generateData(data, data_type, data_length, - startTime + g_args.timestamp_step * k); - } - - if (len > remainderBufLen) break; - - pstr += retLen; - k++; - len += retLen; - remainderBufLen -= retLen; - - verbosePrint("%s() LN%d len=%" PRIu64 " k=%d \nbuffer=%s\n", __func__, - __LINE__, len, k, buffer); - - recordFrom++; - - if (recordFrom >= insertRows) { - break; - } - } - - *dataLen = len; - return k; -} - -static int32_t generateStbDataTail(SSuperTable *stbInfo, uint32_t batch, - char *buffer, int64_t remainderBufLen, - int64_t insertRows, uint64_t recordFrom, - int64_t startTime, int64_t *pSamplePos, - int64_t *dataLen) { - uint64_t len = 0; - - char *pstr = buffer; - - bool tsRand; - if (0 == strncasecmp(stbInfo->dataSource, "rand", strlen("rand"))) { - tsRand = true; - } else { - tsRand = false; - } - verbosePrint("%s() LN%d batch=%u buflen=%" PRId64 "\n", __func__, __LINE__, - batch, remainderBufLen); - - int32_t k; - for (k = 0; k < batch;) { - char *data = pstr; - - int64_t lenOfRow = 0; - - if (tsRand) { - if (stbInfo->disorderRatio > 0) { - lenOfRow = generateStbRowData( - stbInfo, data, remainderBufLen, - startTime + getTSRandTail(stbInfo->timeStampStep, k, - stbInfo->disorderRatio, - stbInfo->disorderRange)); - } else { - lenOfRow = - generateStbRowData(stbInfo, data, remainderBufLen, - startTime + stbInfo->timeStampStep * k); - } - } else { - lenOfRow = getRowDataFromSample( - data, - (remainderBufLen < MAX_DATA_SIZE) ? remainderBufLen - : MAX_DATA_SIZE, - startTime + stbInfo->timeStampStep * k, stbInfo, pSamplePos); - } - - if (lenOfRow == 0) { - data[0] = '\0'; - break; - } - if ((lenOfRow + 1) > remainderBufLen) { - break; - } - - pstr += lenOfRow; - k++; - len += lenOfRow; - remainderBufLen -= lenOfRow; - - verbosePrint("%s() LN%d len=%" PRIu64 " k=%u \nbuffer=%s\n", __func__, - __LINE__, len, k, buffer); - - recordFrom++; - - if (recordFrom >= insertRows) { - break; - } - } - - *dataLen = len; - return k; -} - -static int generateSQLHeadWithoutStb(char *tableName, char *dbName, - char *buffer, int remainderBufLen) { - int len; - - char headBuf[HEAD_BUFF_LEN]; - - len = snprintf(headBuf, HEAD_BUFF_LEN, "%s.%s values", dbName, tableName); - - if (len > remainderBufLen) return -1; - - tstrncpy(buffer, headBuf, len + 1); - - return len; -} - -static int generateStbSQLHead(SSuperTable *stbInfo, char *tableName, - int64_t tableSeq, char *dbName, char *buffer, - int remainderBufLen) { - int len; - - char headBuf[HEAD_BUFF_LEN]; - - if (AUTO_CREATE_SUBTBL == stbInfo->autoCreateTable) { - char *tagsValBuf = (char *)calloc(TSDB_MAX_SQL_LEN + 1, 1); - if (NULL == tagsValBuf) { - errorPrint("%s", "failed to allocate memory\n"); - return -1; - } - - if (0 == stbInfo->tagSource) { - if (generateTagValuesForStb(stbInfo, tableSeq, tagsValBuf)) { - tmfree(tagsValBuf); - return -1; - } - } else { - snprintf( - tagsValBuf, TSDB_MAX_SQL_LEN, "(%s)", - stbInfo->tagDataBuf + stbInfo->lenOfTagOfOneRow * - (tableSeq % stbInfo->tagSampleCount)); - } - - len = - snprintf(headBuf, HEAD_BUFF_LEN, "%s.%s using %s.%s TAGS%s values", - dbName, tableName, dbName, stbInfo->stbName, tagsValBuf); - tmfree(tagsValBuf); - } else if (TBL_ALREADY_EXISTS == stbInfo->childTblExists) { - len = - snprintf(headBuf, HEAD_BUFF_LEN, "%s.%s values", dbName, tableName); - } else { - len = - snprintf(headBuf, HEAD_BUFF_LEN, "%s.%s values", dbName, tableName); - } - - if (len > remainderBufLen) return -1; - - tstrncpy(buffer, headBuf, len + 1); - - return len; -} - -int32_t generateStbInterlaceData(threadInfo *pThreadInfo, char *tableName, - uint32_t batchPerTbl, uint64_t i, - uint32_t batchPerTblTimes, uint64_t tableSeq, - char *buffer, int64_t insertRows, - int64_t startTime, - uint64_t *pRemainderBufLen) { - char *pstr = buffer; - - SSuperTable *stbInfo = pThreadInfo->stbInfo; - int headLen = - generateStbSQLHead(stbInfo, tableName, tableSeq, pThreadInfo->db_name, - pstr, (int)(*pRemainderBufLen)); - - if (headLen <= 0) { - return 0; - } - // generate data buffer - verbosePrint("[%d] %s() LN%d i=%" PRIu64 " buffer:\n%s\n", - pThreadInfo->threadID, __func__, __LINE__, i, buffer); - - pstr += headLen; - *pRemainderBufLen -= headLen; - - int64_t dataLen = 0; - - verbosePrint("[%d] %s() LN%d i=%" PRIu64 - " batchPerTblTimes=%u batchPerTbl = %u\n", - pThreadInfo->threadID, __func__, __LINE__, i, batchPerTblTimes, - batchPerTbl); - - if (0 == strncasecmp(stbInfo->startTimestamp, "now", 3)) { - startTime = taosGetTimestamp(pThreadInfo->time_precision); - } - - int32_t k = generateStbDataTail(stbInfo, batchPerTbl, pstr, - *pRemainderBufLen, insertRows, 0, startTime, - &(pThreadInfo->samplePos), &dataLen); - - if (k == batchPerTbl) { - pstr += dataLen; - *pRemainderBufLen -= dataLen; - } else { - debugPrint( - "%s() LN%d, generated data tail: %u, not equal batch per table: " - "%u\n", - __func__, __LINE__, k, batchPerTbl); - pstr -= headLen; - pstr[0] = '\0'; - k = 0; - } - - return k; -} - -int64_t generateInterlaceDataWithoutStb(char *tableName, uint32_t batch, - uint64_t tableSeq, char *dbName, - char *buffer, int64_t insertRows, - int64_t startTime, - uint64_t *pRemainderBufLen) { - char *pstr = buffer; - - int headLen = generateSQLHeadWithoutStb(tableName, dbName, pstr, - (int)(*pRemainderBufLen)); - - if (headLen <= 0) { - return 0; - } - - pstr += headLen; - *pRemainderBufLen -= headLen; - - int64_t dataLen = 0; - - int32_t k = generateDataTailWithoutStb(batch, pstr, *pRemainderBufLen, - insertRows, 0, startTime, &dataLen); - - if (k == batch) { - pstr += dataLen; - *pRemainderBufLen -= dataLen; - } else { - debugPrint( - "%s() LN%d, generated data tail: %d, not equal batch per table: " - "%u\n", - __func__, __LINE__, k, batch); - pstr -= headLen; - pstr[0] = '\0'; - k = 0; - } - - return k; -} - -static int32_t prepareStmtBindArrayByType(TAOS_BIND *bind, char data_type, - int32_t dataLen, int32_t timePrec, - char *value) { - int32_t * bind_int; - uint32_t *bind_uint; - int64_t * bind_bigint; - uint64_t *bind_ubigint; - float * bind_float; - double * bind_double; - int8_t * bind_bool; - int64_t * bind_ts2; - int16_t * bind_smallint; - uint16_t *bind_usmallint; - int8_t * bind_tinyint; - uint8_t * bind_utinyint; - - switch (data_type) { - case TSDB_DATA_TYPE_BINARY: - if (dataLen > TSDB_MAX_BINARY_LEN) { - errorPrint("binary length overflow, max size:%u\n", - (uint32_t)TSDB_MAX_BINARY_LEN); - return -1; - } - char *bind_binary; - - bind->buffer_type = TSDB_DATA_TYPE_BINARY; - if (value) { - bind_binary = calloc(1, strlen(value) + 1); - strncpy(bind_binary, value, strlen(value)); - bind->buffer_length = strlen(bind_binary); - } else { - bind_binary = calloc(1, dataLen + 1); - rand_string(bind_binary, dataLen); - bind->buffer_length = dataLen; - } - - bind->length = &bind->buffer_length; - bind->buffer = bind_binary; - bind->is_null = NULL; - break; - - case TSDB_DATA_TYPE_NCHAR: - if (dataLen > TSDB_MAX_BINARY_LEN) { - errorPrint("nchar length overflow, max size:%u\n", - (uint32_t)TSDB_MAX_BINARY_LEN); - return -1; - } - char *bind_nchar; - - bind->buffer_type = TSDB_DATA_TYPE_NCHAR; - if (value) { - bind_nchar = calloc(1, strlen(value) + 1); - strncpy(bind_nchar, value, strlen(value)); - } else { - bind_nchar = calloc(1, dataLen + 1); - rand_string(bind_nchar, dataLen); - } - - bind->buffer_length = strlen(bind_nchar); - bind->buffer = bind_nchar; - bind->length = &bind->buffer_length; - bind->is_null = NULL; - break; - - case TSDB_DATA_TYPE_INT: - bind_int = calloc(1, sizeof(int32_t)); - if (value) { - *bind_int = atoi(value); - } else { - *bind_int = rand_int(); - } - bind->buffer_type = TSDB_DATA_TYPE_INT; - bind->buffer_length = sizeof(int32_t); - bind->buffer = bind_int; - bind->length = &bind->buffer_length; - bind->is_null = NULL; - break; - - case TSDB_DATA_TYPE_UINT: - bind_uint = malloc(sizeof(uint32_t)); - - if (value) { - *bind_uint = atoi(value); - } else { - *bind_uint = rand_int(); - } - bind->buffer_type = TSDB_DATA_TYPE_UINT; - bind->buffer_length = sizeof(uint32_t); - bind->buffer = bind_uint; - bind->length = &bind->buffer_length; - bind->is_null = NULL; - break; - - case TSDB_DATA_TYPE_BIGINT: - bind_bigint = malloc(sizeof(int64_t)); - - if (value) { - *bind_bigint = atoll(value); - } else { - *bind_bigint = rand_bigint(); - } - bind->buffer_type = TSDB_DATA_TYPE_BIGINT; - bind->buffer_length = sizeof(int64_t); - bind->buffer = bind_bigint; - bind->length = &bind->buffer_length; - bind->is_null = NULL; - break; - - case TSDB_DATA_TYPE_UBIGINT: - bind_ubigint = malloc(sizeof(uint64_t)); - - if (value) { - *bind_ubigint = atoll(value); - } else { - *bind_ubigint = rand_bigint(); - } - bind->buffer_type = TSDB_DATA_TYPE_UBIGINT; - bind->buffer_length = sizeof(uint64_t); - bind->buffer = bind_ubigint; - bind->length = &bind->buffer_length; - bind->is_null = NULL; - break; - - case TSDB_DATA_TYPE_FLOAT: - bind_float = malloc(sizeof(float)); - - if (value) { - *bind_float = (float)atof(value); - } else { - *bind_float = rand_float(); - } - bind->buffer_type = TSDB_DATA_TYPE_FLOAT; - bind->buffer_length = sizeof(float); - bind->buffer = bind_float; - bind->length = &bind->buffer_length; - bind->is_null = NULL; - break; - - case TSDB_DATA_TYPE_DOUBLE: - bind_double = malloc(sizeof(double)); - - if (value) { - *bind_double = atof(value); - } else { - *bind_double = rand_double(); - } - bind->buffer_type = TSDB_DATA_TYPE_DOUBLE; - bind->buffer_length = sizeof(double); - bind->buffer = bind_double; - bind->length = &bind->buffer_length; - bind->is_null = NULL; - break; - - case TSDB_DATA_TYPE_SMALLINT: - bind_smallint = malloc(sizeof(int16_t)); - - if (value) { - *bind_smallint = (int16_t)atoi(value); - } else { - *bind_smallint = rand_smallint(); - } - bind->buffer_type = TSDB_DATA_TYPE_SMALLINT; - bind->buffer_length = sizeof(int16_t); - bind->buffer = bind_smallint; - bind->length = &bind->buffer_length; - bind->is_null = NULL; - break; - - case TSDB_DATA_TYPE_USMALLINT: - bind_usmallint = malloc(sizeof(uint16_t)); - - if (value) { - *bind_usmallint = (uint16_t)atoi(value); - } else { - *bind_usmallint = rand_smallint(); - } - bind->buffer_type = TSDB_DATA_TYPE_SMALLINT; - bind->buffer_length = sizeof(uint16_t); - bind->buffer = bind_usmallint; - bind->length = &bind->buffer_length; - bind->is_null = NULL; - break; - - case TSDB_DATA_TYPE_TINYINT: - bind_tinyint = malloc(sizeof(int8_t)); - - if (value) { - *bind_tinyint = (int8_t)atoi(value); - } else { - *bind_tinyint = rand_tinyint(); - } - bind->buffer_type = TSDB_DATA_TYPE_TINYINT; - bind->buffer_length = sizeof(int8_t); - bind->buffer = bind_tinyint; - bind->length = &bind->buffer_length; - bind->is_null = NULL; - break; - - case TSDB_DATA_TYPE_UTINYINT: - bind_utinyint = malloc(sizeof(uint8_t)); - - if (value) { - *bind_utinyint = (int8_t)atoi(value); - } else { - *bind_utinyint = rand_tinyint(); - } - bind->buffer_type = TSDB_DATA_TYPE_UTINYINT; - bind->buffer_length = sizeof(uint8_t); - bind->buffer = bind_utinyint; - bind->length = &bind->buffer_length; - bind->is_null = NULL; - break; - - case TSDB_DATA_TYPE_BOOL: - bind_bool = malloc(sizeof(int8_t)); - - if (value) { - if (strncasecmp(value, "true", 4)) { - *bind_bool = true; - } else { - *bind_bool = false; - } - } else { - *bind_bool = rand_bool(); - } - bind->buffer_type = TSDB_DATA_TYPE_BOOL; - bind->buffer_length = sizeof(int8_t); - bind->buffer = bind_bool; - bind->length = &bind->buffer_length; - bind->is_null = NULL; - break; - - case TSDB_DATA_TYPE_TIMESTAMP: - bind_ts2 = malloc(sizeof(int64_t)); - - if (value) { - if (strchr(value, ':') && strchr(value, '-')) { - int i = 0; - while (value[i] != '\0') { - if (value[i] == '\"' || value[i] == '\'') { - value[i] = ' '; - } - i++; - } - int64_t tmpEpoch; - if (TSDB_CODE_SUCCESS != - taosParseTime(value, &tmpEpoch, (int32_t)strlen(value), - timePrec, 0)) { - free(bind_ts2); - errorPrint("Input %s, time format error!\n", value); - return -1; - } - *bind_ts2 = tmpEpoch; - } else { - *bind_ts2 = atoll(value); - } - } else { - *bind_ts2 = rand_bigint(); - } - bind->buffer_type = TSDB_DATA_TYPE_TIMESTAMP; - bind->buffer_length = sizeof(int64_t); - bind->buffer = bind_ts2; - bind->length = &bind->buffer_length; - bind->is_null = NULL; - break; - - case TSDB_DATA_TYPE_NULL: - break; - - default: - errorPrint("Not support data type: %d\n", data_type); - return -1; - } - - return 0; -} - -int32_t prepareStmtWithoutStb(threadInfo *pThreadInfo, char *tableName, - uint32_t batch, int64_t insertRows, - int64_t recordFrom, int64_t startTime) { - TAOS_STMT *stmt = pThreadInfo->stmt; - int ret = taos_stmt_set_tbname(stmt, tableName); - if (ret != 0) { - errorPrint( - "failed to execute taos_stmt_set_tbname(%s). return 0x%x. reason: " - "%s\n", - tableName, ret, taos_stmt_errstr(stmt)); - return ret; - } - - char *data_type = g_args.data_type; - - char *bindArray = malloc(sizeof(TAOS_BIND) * (g_args.columnCount + 1)); - if (bindArray == NULL) { - errorPrint("Failed to allocate %d bind params\n", - (g_args.columnCount + 1)); - return -1; - } - - int32_t k = 0; - for (k = 0; k < batch;) { - /* columnCount + 1 (ts) */ - - TAOS_BIND *bind = (TAOS_BIND *)(bindArray + 0); - - int64_t *bind_ts = pThreadInfo->bind_ts; - - bind->buffer_type = TSDB_DATA_TYPE_TIMESTAMP; - - if (g_args.disorderRatio) { - *bind_ts = startTime + getTSRandTail(g_args.timestamp_step, k, - g_args.disorderRatio, - g_args.disorderRange); - } else { - *bind_ts = startTime + g_args.timestamp_step * k; - } - bind->buffer_length = sizeof(int64_t); - bind->buffer = bind_ts; - bind->length = &bind->buffer_length; - bind->is_null = NULL; - - for (int i = 0; i < g_args.columnCount; i++) { - bind = (TAOS_BIND *)((char *)bindArray + - (sizeof(TAOS_BIND) * (i + 1))); - if (-1 == - prepareStmtBindArrayByType(bind, data_type[i], g_args.binwidth, - pThreadInfo->time_precision, NULL)) { - free(bindArray); - return -1; - } - } - if (taos_stmt_bind_param(stmt, (TAOS_BIND *)bindArray)) { - errorPrint("taos_stmt_bind_param() failed! reason: %s\n", - taos_stmt_errstr(stmt)); - break; - } - // if msg > 3MB, break - if (taos_stmt_add_batch(stmt)) { - errorPrint("taos_stmt_add_batch() failed! reason: %s\n", - taos_stmt_errstr(stmt)); - break; - } - - k++; - recordFrom++; - if (recordFrom >= insertRows) { - break; - } - } - - free(bindArray); - return k; -} - -int32_t prepareStbStmtBindTag(char *bindArray, SSuperTable *stbInfo, - char *tagsVal, int32_t timePrec) { - TAOS_BIND *tag; - - for (int t = 0; t < stbInfo->tagCount; t++) { - tag = (TAOS_BIND *)((char *)bindArray + (sizeof(TAOS_BIND) * t)); - if (prepareStmtBindArrayByType(tag, stbInfo->tags[t].data_type, - stbInfo->tags[t].dataLen, timePrec, - NULL)) { - return -1; - } - } - - return 0; -} - -int parseSamplefileToStmtBatch(SSuperTable *stbInfo) { - int32_t columnCount = (stbInfo) ? stbInfo->columnCount : g_args.columnCount; - char * sampleBindBatchArray = NULL; - - if (stbInfo) { - stbInfo->sampleBindBatchArray = - calloc(1, sizeof(uintptr_t *) * columnCount); - sampleBindBatchArray = stbInfo->sampleBindBatchArray; - } else { - g_sampleBindBatchArray = calloc(1, sizeof(uintptr_t *) * columnCount); - sampleBindBatchArray = g_sampleBindBatchArray; - } - - for (int c = 0; c < columnCount; c++) { - char data_type = - (stbInfo) ? stbInfo->columns[c].data_type : g_args.data_type[c]; - - char *tmpP = NULL; - - switch (data_type) { - case TSDB_DATA_TYPE_INT: - case TSDB_DATA_TYPE_UINT: - tmpP = calloc(1, sizeof(int32_t) * MAX_SAMPLES); - *(uintptr_t *)(sampleBindBatchArray + sizeof(uintptr_t *) * c) = - (uintptr_t)tmpP; - break; - - case TSDB_DATA_TYPE_TINYINT: - case TSDB_DATA_TYPE_UTINYINT: - tmpP = calloc(1, sizeof(char) * MAX_SAMPLES); - *(uintptr_t *)(sampleBindBatchArray + sizeof(uintptr_t *) * c) = - (uintptr_t)tmpP; - break; - - case TSDB_DATA_TYPE_SMALLINT: - case TSDB_DATA_TYPE_USMALLINT: - tmpP = calloc(1, sizeof(int16_t) * MAX_SAMPLES); - *(uintptr_t *)(sampleBindBatchArray + sizeof(uintptr_t *) * c) = - (uintptr_t)tmpP; - break; - - case TSDB_DATA_TYPE_BIGINT: - case TSDB_DATA_TYPE_UBIGINT: - tmpP = calloc(1, sizeof(int64_t) * MAX_SAMPLES); - *(uintptr_t *)(sampleBindBatchArray + sizeof(uintptr_t *) * c) = - (uintptr_t)tmpP; - break; - - case TSDB_DATA_TYPE_BOOL: - tmpP = calloc(1, sizeof(char) * MAX_SAMPLES); - *(uintptr_t *)(sampleBindBatchArray + sizeof(uintptr_t *) * c) = - (uintptr_t)tmpP; - break; - - case TSDB_DATA_TYPE_FLOAT: - tmpP = calloc(1, sizeof(float) * MAX_SAMPLES); - *(uintptr_t *)(sampleBindBatchArray + sizeof(uintptr_t *) * c) = - (uintptr_t)tmpP; - break; - - case TSDB_DATA_TYPE_DOUBLE: - tmpP = calloc(1, sizeof(double) * MAX_SAMPLES); - *(uintptr_t *)(sampleBindBatchArray + sizeof(uintptr_t *) * c) = - (uintptr_t)tmpP; - break; - - case TSDB_DATA_TYPE_BINARY: - case TSDB_DATA_TYPE_NCHAR: - tmpP = calloc( - 1, MAX_SAMPLES * (((stbInfo) ? stbInfo->columns[c].dataLen - : g_args.binwidth) + - 1)); - *(uintptr_t *)(sampleBindBatchArray + sizeof(uintptr_t *) * c) = - (uintptr_t)tmpP; - break; - - case TSDB_DATA_TYPE_TIMESTAMP: - tmpP = calloc(1, sizeof(int64_t) * MAX_SAMPLES); - *(uintptr_t *)(sampleBindBatchArray + sizeof(uintptr_t *) * c) = - (uintptr_t)tmpP; - break; - - default: - errorPrint("Unknown data type: %s\n", - (stbInfo) ? stbInfo->columns[c].dataType - : g_args.dataType[c]); - exit(EXIT_FAILURE); - } - } - - char *sampleDataBuf = (stbInfo) ? stbInfo->sampleDataBuf : g_sampleDataBuf; - int64_t lenOfOneRow = (stbInfo) ? stbInfo->lenOfOneRow : g_args.lenOfOneRow; - - for (int i = 0; i < MAX_SAMPLES; i++) { - int cursor = 0; - - for (int c = 0; c < columnCount; c++) { - char data_type = - (stbInfo) ? stbInfo->columns[c].data_type : g_args.data_type[c]; - char *restStr = sampleDataBuf + lenOfOneRow * i + cursor; - int lengthOfRest = (int)strlen(restStr); - - int index = 0; - for (index = 0; index < lengthOfRest; index++) { - if (restStr[index] == ',') { - break; - } - } - - char *tmpStr = calloc(1, index + 1); - if (NULL == tmpStr) { - errorPrint("%s", "failed to allocate memory\n"); - return -1; - } - - strncpy(tmpStr, restStr, index); - cursor += index + 1; // skip ',' too - char *tmpP; - - switch (data_type) { - case TSDB_DATA_TYPE_INT: - case TSDB_DATA_TYPE_UINT: - *((int32_t *)((uintptr_t) * - (uintptr_t *)(sampleBindBatchArray + - sizeof(char *) * c) + - sizeof(int32_t) * i)) = atoi(tmpStr); - break; - - case TSDB_DATA_TYPE_FLOAT: - *(float *)(((uintptr_t) * - (uintptr_t *)(sampleBindBatchArray + - sizeof(char *) * c) + - sizeof(float) * i)) = (float)atof(tmpStr); - break; - - case TSDB_DATA_TYPE_DOUBLE: - *(double *)(((uintptr_t) * - (uintptr_t *)(sampleBindBatchArray + - sizeof(char *) * c) + - sizeof(double) * i)) = atof(tmpStr); - break; - - case TSDB_DATA_TYPE_TINYINT: - case TSDB_DATA_TYPE_UTINYINT: - *((int8_t *)((uintptr_t) * - (uintptr_t *)(sampleBindBatchArray + - sizeof(char *) * c) + - sizeof(int8_t) * i)) = (int8_t)atoi(tmpStr); - break; - - case TSDB_DATA_TYPE_SMALLINT: - case TSDB_DATA_TYPE_USMALLINT: - *((int16_t *)((uintptr_t) * - (uintptr_t *)(sampleBindBatchArray + - sizeof(char *) * c) + - sizeof(int16_t) * i)) = (int16_t)atoi(tmpStr); - break; - - case TSDB_DATA_TYPE_BIGINT: - case TSDB_DATA_TYPE_UBIGINT: - *((int64_t *)((uintptr_t) * - (uintptr_t *)(sampleBindBatchArray + - sizeof(char *) * c) + - sizeof(int64_t) * i)) = (int64_t)atol(tmpStr); - break; - - case TSDB_DATA_TYPE_BOOL: - *((int8_t *)((uintptr_t) * - (uintptr_t *)(sampleBindBatchArray + - sizeof(char *) * c) + - sizeof(int8_t) * i)) = (int8_t)atoi(tmpStr); - break; - - case TSDB_DATA_TYPE_TIMESTAMP: - *((int64_t *)((uintptr_t) * - (uintptr_t *)(sampleBindBatchArray + - sizeof(char *) * c) + - sizeof(int64_t) * i)) = (int64_t)atol(tmpStr); - break; - - case TSDB_DATA_TYPE_BINARY: - case TSDB_DATA_TYPE_NCHAR: - tmpP = (char *)(*(uintptr_t *)(sampleBindBatchArray + - sizeof(char *) * c)); - strcpy(tmpP + i * (((stbInfo) ? stbInfo->columns[c].dataLen - : g_args.binwidth)), - tmpStr); - break; - - default: - break; - } - - free(tmpStr); - } - } - - return 0; -} - -static int parseSampleToStmtBatchForThread(threadInfo * pThreadInfo, - SSuperTable *stbInfo, - uint32_t timePrec, uint32_t batch) { - uint32_t columnCount = - (stbInfo) ? stbInfo->columnCount : g_args.columnCount; - - pThreadInfo->bind_ts_array = calloc(1, sizeof(int64_t) * batch); - if (NULL == pThreadInfo->bind_ts_array) { - errorPrint("%s", "failed to allocate memory\n"); - return -1; - } - - pThreadInfo->bindParams = - calloc(1, sizeof(TAOS_MULTI_BIND) * (columnCount + 1)); - if (NULL == pThreadInfo->bindParams) { - errorPrint("%s", "failed to allocate memory\n"); - return -1; - } - - pThreadInfo->is_null = calloc(1, batch); - if (NULL == pThreadInfo->is_null) { - errorPrint("%s", "failed to allocate memory\n"); - return -1; - } - - return 0; -} - -int parseStbSampleToStmtBatchForThread(threadInfo * pThreadInfo, - SSuperTable *stbInfo, uint32_t timePrec, - uint32_t batch) { - return parseSampleToStmtBatchForThread(pThreadInfo, stbInfo, timePrec, - batch); -} - -int parseNtbSampleToStmtBatchForThread(threadInfo *pThreadInfo, - uint32_t timePrec, uint32_t batch) { - return parseSampleToStmtBatchForThread(pThreadInfo, NULL, timePrec, batch); -} - -int32_t generateStbProgressiveData(SSuperTable *stbInfo, char *tableName, - int64_t tableSeq, char *dbName, char *buffer, - int64_t insertRows, uint64_t recordFrom, - int64_t startTime, int64_t *pSamplePos, - int64_t *pRemainderBufLen) { - char *pstr = buffer; - - memset(pstr, 0, *pRemainderBufLen); - - int64_t headLen = generateStbSQLHead(stbInfo, tableName, tableSeq, dbName, - buffer, (int)(*pRemainderBufLen)); - - if (headLen <= 0) { - return 0; - } - pstr += headLen; - *pRemainderBufLen -= headLen; - - int64_t dataLen; - - return generateStbDataTail(stbInfo, g_args.reqPerReq, pstr, - *pRemainderBufLen, insertRows, recordFrom, - startTime, pSamplePos, &dataLen); -} - -int32_t generateProgressiveDataWithoutStb( - char *tableName, threadInfo *pThreadInfo, char *buffer, int64_t insertRows, - uint64_t recordFrom, int64_t startTime, int64_t *pRemainderBufLen) { - char *pstr = buffer; - - memset(buffer, 0, *pRemainderBufLen); - - int64_t headLen = generateSQLHeadWithoutStb( - tableName, pThreadInfo->db_name, buffer, (int)(*pRemainderBufLen)); - - if (headLen <= 0) { - return 0; - } - pstr += headLen; - *pRemainderBufLen -= headLen; - - int64_t dataLen; - - return generateDataTailWithoutStb(g_args.reqPerReq, pstr, *pRemainderBufLen, - insertRows, recordFrom, startTime, - /*pSamplePos, */ &dataLen); -} - -int32_t generateSmlConstPart(char *sml, SSuperTable *stbInfo, - threadInfo *pThreadInfo, int tbSeq) { - int64_t dataLen = 0; - uint64_t length = stbInfo->lenOfOneRow; - if (stbInfo->lineProtocol == TSDB_SML_LINE_PROTOCOL) { - dataLen += - snprintf(sml + dataLen, length - dataLen, "%s,id=%s%" PRIu64 "", - stbInfo->stbName, stbInfo->childTblPrefix, - tbSeq + pThreadInfo->start_table_from); - } else if (stbInfo->lineProtocol == TSDB_SML_TELNET_PROTOCOL) { - dataLen += snprintf(sml + dataLen, length - dataLen, "id=%s%" PRIu64 "", - stbInfo->childTblPrefix, - tbSeq + pThreadInfo->start_table_from); - } else { - errorPrint("unsupport schemaless protocol (%d)\n", - stbInfo->lineProtocol); - return -1; - } - - for (int j = 0; j < stbInfo->tagCount; j++) { - tstrncpy(sml + dataLen, - (stbInfo->lineProtocol == TSDB_SML_LINE_PROTOCOL) ? "," : " ", - 2); - dataLen += 1; - switch (stbInfo->tags[j].data_type) { - case TSDB_DATA_TYPE_TIMESTAMP: - errorPrint("Does not support data type %s as tag\n", - stbInfo->tags[j].dataType); - return -1; - case TSDB_DATA_TYPE_BOOL: - dataLen += snprintf(sml + dataLen, length - dataLen, "t%d=%s", - j, rand_bool_str()); - break; - case TSDB_DATA_TYPE_TINYINT: - dataLen += snprintf(sml + dataLen, length - dataLen, "t%d=%s", - j, rand_tinyint_str()); - break; - case TSDB_DATA_TYPE_UTINYINT: - dataLen += snprintf(sml + dataLen, length - dataLen, "t%d=%s", - j, rand_utinyint_str()); - break; - case TSDB_DATA_TYPE_SMALLINT: - dataLen += snprintf(sml + dataLen, length - dataLen, "t%d=%s", - j, rand_smallint_str()); - break; - case TSDB_DATA_TYPE_USMALLINT: - dataLen += snprintf(sml + dataLen, length - dataLen, "t%d=%s", - j, rand_usmallint_str()); - break; - case TSDB_DATA_TYPE_INT: - dataLen += snprintf(sml + dataLen, length - dataLen, "t%d=%s", - j, rand_int_str()); - break; - case TSDB_DATA_TYPE_UINT: - dataLen += snprintf(sml + dataLen, length - dataLen, "t%d=%s", - j, rand_uint_str()); - break; - case TSDB_DATA_TYPE_BIGINT: - dataLen += snprintf(sml + dataLen, length - dataLen, "t%d=%s", - j, rand_bigint_str()); - break; - case TSDB_DATA_TYPE_UBIGINT: - dataLen += snprintf(sml + dataLen, length - dataLen, "t%d=%s", - j, rand_ubigint_str()); - break; - case TSDB_DATA_TYPE_FLOAT: - dataLen += snprintf(sml + dataLen, length - dataLen, "t%d=%s", - j, rand_float_str()); - break; - case TSDB_DATA_TYPE_DOUBLE: - dataLen += snprintf(sml + dataLen, length - dataLen, "t%d=%s", - j, rand_double_str()); - break; - case TSDB_DATA_TYPE_BINARY: - case TSDB_DATA_TYPE_NCHAR: - if (stbInfo->tags[j].dataLen > TSDB_MAX_BINARY_LEN) { - errorPrint("binary or nchar length overflow, maxsize:%u\n", - (uint32_t)TSDB_MAX_BINARY_LEN); - return -1; - } - char *buf = (char *)calloc(stbInfo->tags[j].dataLen + 1, 1); - if (NULL == buf) { - errorPrint("%s", "failed to allocate memory\n"); - return -1; - } - rand_string(buf, stbInfo->tags[j].dataLen); - dataLen += - snprintf(sml + dataLen, length - dataLen, "t%d=%s", j, buf); - tmfree(buf); - break; - - default: - errorPrint("Unsupport data type %s\n", - stbInfo->tags[j].dataType); - return -1; - } - } - return 0; -} - -int32_t generateSmlMutablePart(char *line, char *sml, SSuperTable *stbInfo, - threadInfo *pThreadInfo, int64_t timestamp) { - int dataLen = 0; - uint64_t buffer = stbInfo->lenOfOneRow; - if (stbInfo->lineProtocol == TSDB_SML_LINE_PROTOCOL) { - dataLen = snprintf(line, buffer, "%s ", sml); - for (uint32_t c = 0; c < stbInfo->columnCount; c++) { - if (c != 0) { - tstrncpy(line + dataLen, ",", 2); - dataLen += 1; - } - switch (stbInfo->columns[c].data_type) { - case TSDB_DATA_TYPE_TIMESTAMP: - errorPrint("Does not support data type %s as tag\n", - stbInfo->columns[c].dataType); - return -1; - case TSDB_DATA_TYPE_BOOL: - dataLen += snprintf(line + dataLen, buffer - dataLen, - "c%d=%s", c, rand_bool_str()); - break; - case TSDB_DATA_TYPE_TINYINT: - dataLen += snprintf(line + dataLen, buffer - dataLen, - "c%d=%si8", c, rand_tinyint_str()); - break; - case TSDB_DATA_TYPE_UTINYINT: - dataLen += snprintf(line + dataLen, buffer - dataLen, - "c%d=%su8", c, rand_utinyint_str()); - break; - case TSDB_DATA_TYPE_SMALLINT: - dataLen += snprintf(line + dataLen, buffer - dataLen, - "c%d=%si16", c, rand_smallint_str()); - break; - case TSDB_DATA_TYPE_USMALLINT: - dataLen += snprintf(line + dataLen, buffer - dataLen, - "c%d=%su16", c, rand_usmallint_str()); - break; - case TSDB_DATA_TYPE_INT: - dataLen += snprintf(line + dataLen, buffer - dataLen, - "c%d=%si32", c, rand_int_str()); - break; - case TSDB_DATA_TYPE_UINT: - dataLen += snprintf(line + dataLen, buffer - dataLen, - "c%d=%su32", c, rand_uint_str()); - break; - case TSDB_DATA_TYPE_BIGINT: - dataLen += snprintf(line + dataLen, buffer - dataLen, - "c%d=%si64", c, rand_bigint_str()); - break; - case TSDB_DATA_TYPE_UBIGINT: - dataLen += snprintf(line + dataLen, buffer - dataLen, - "c%d=%su64", c, rand_ubigint_str()); - break; - case TSDB_DATA_TYPE_FLOAT: - dataLen += snprintf(line + dataLen, buffer - dataLen, - "c%d=%sf32", c, rand_float_str()); - break; - case TSDB_DATA_TYPE_DOUBLE: - dataLen += snprintf(line + dataLen, buffer - dataLen, - "c%d=%sf64", c, rand_double_str()); - break; - case TSDB_DATA_TYPE_BINARY: - case TSDB_DATA_TYPE_NCHAR: - if (stbInfo->columns[c].dataLen > TSDB_MAX_BINARY_LEN) { - errorPrint( - "binary or nchar length overflow, maxsize:%u\n", - (uint32_t)TSDB_MAX_BINARY_LEN); - return -1; - } - char *buf = - (char *)calloc(stbInfo->columns[c].dataLen + 1, 1); - if (NULL == buf) { - errorPrint("%s", "failed to allocate memory\n"); - return -1; - } - rand_string(buf, stbInfo->columns[c].dataLen); - if (stbInfo->columns[c].data_type == - TSDB_DATA_TYPE_BINARY) { - dataLen += snprintf(line + dataLen, buffer - dataLen, - "c%d=\"%s\"", c, buf); - } else { - dataLen += snprintf(line + dataLen, buffer - dataLen, - "c%d=L\"%s\"", c, buf); - } - tmfree(buf); - break; - default: - errorPrint("Unsupport data type %s\n", - stbInfo->columns[c].dataType); - return -1; - } - } - dataLen += snprintf(line + dataLen, buffer - dataLen, " %" PRId64 "", - timestamp); - return 0; - } else if (stbInfo->lineProtocol == TSDB_SML_TELNET_PROTOCOL) { - switch (stbInfo->columns[0].data_type) { - case TSDB_DATA_TYPE_BOOL: - snprintf(line, buffer, "%s %" PRId64 " %s %s", stbInfo->stbName, - timestamp, rand_bool_str(), sml); - break; - case TSDB_DATA_TYPE_TINYINT: - snprintf(line, buffer, "%s %" PRId64 " %si8 %s", - stbInfo->stbName, timestamp, rand_tinyint_str(), sml); - break; - case TSDB_DATA_TYPE_UTINYINT: - snprintf(line, buffer, "%s %" PRId64 " %su8 %s", - stbInfo->stbName, timestamp, rand_utinyint_str(), sml); - break; - case TSDB_DATA_TYPE_SMALLINT: - snprintf(line, buffer, "%s %" PRId64 " %si16 %s", - stbInfo->stbName, timestamp, rand_smallint_str(), sml); - break; - case TSDB_DATA_TYPE_USMALLINT: - snprintf(line, buffer, "%s %" PRId64 " %su16 %s", - stbInfo->stbName, timestamp, rand_usmallint_str(), - sml); - break; - case TSDB_DATA_TYPE_INT: - snprintf(line, buffer, "%s %" PRId64 " %si32 %s", - stbInfo->stbName, timestamp, rand_int_str(), sml); - break; - case TSDB_DATA_TYPE_UINT: - snprintf(line, buffer, "%s %" PRId64 " %su32 %s", - stbInfo->stbName, timestamp, rand_uint_str(), sml); - break; - case TSDB_DATA_TYPE_BIGINT: - snprintf(line, buffer, "%s %" PRId64 " %si64 %s", - stbInfo->stbName, timestamp, rand_bigint_str(), sml); - break; - case TSDB_DATA_TYPE_UBIGINT: - snprintf(line, buffer, "%s %" PRId64 " %su64 %s", - stbInfo->stbName, timestamp, rand_ubigint_str(), sml); - break; - case TSDB_DATA_TYPE_FLOAT: - snprintf(line, buffer, "%s %" PRId64 " %sf32 %s", - stbInfo->stbName, timestamp, rand_float_str(), sml); - break; - case TSDB_DATA_TYPE_DOUBLE: - snprintf(line, buffer, "%s %" PRId64 " %sf64 %s", - stbInfo->stbName, timestamp, rand_double_str(), sml); - break; - case TSDB_DATA_TYPE_BINARY: - case TSDB_DATA_TYPE_NCHAR: - if (stbInfo->columns[0].dataLen > TSDB_MAX_BINARY_LEN) { - errorPrint("binary or nchar length overflow, maxsize:%u\n", - (uint32_t)TSDB_MAX_BINARY_LEN); - return -1; - } - char *buf = (char *)calloc(stbInfo->columns[0].dataLen + 1, 1); - if (NULL == buf) { - errorPrint("%s", "failed to allocate memory\n"); - return -1; - } - rand_string(buf, stbInfo->columns[0].dataLen); - if (stbInfo->columns[0].data_type == TSDB_DATA_TYPE_BINARY) { - snprintf(line, buffer, "%s %" PRId64 " \"%s\" %s", - stbInfo->stbName, timestamp, buf, sml); - } else { - snprintf(line, buffer, "%s %" PRId64 " L\"%s\" %s", - stbInfo->stbName, timestamp, buf, sml); - } - tmfree(buf); - break; - default: - errorPrint("Unsupport data type %s\n", - stbInfo->columns[0].dataType); - return -1; - } - return 0; - } else { - errorPrint("unsupport schemaless protocol(%d)\n", - stbInfo->lineProtocol); - return -1; - } -} - -int32_t generateSmlJsonTags(cJSON *tagsList, SSuperTable *stbInfo, - threadInfo *pThreadInfo, int tbSeq) { - cJSON *tags = cJSON_CreateObject(); - char * tbName = calloc(1, TSDB_TABLE_NAME_LEN); - assert(tbName); - snprintf(tbName, TSDB_TABLE_NAME_LEN, "%s%" PRIu64 "", - stbInfo->childTblPrefix, tbSeq + pThreadInfo->start_table_from); - cJSON_AddStringToObject(tags, "id", tbName); - char *tagName = calloc(1, TSDB_MAX_TAGS); - assert(tagName); - for (int i = 0; i < stbInfo->tagCount; i++) { - cJSON *tag = cJSON_CreateObject(); - snprintf(tagName, TSDB_MAX_TAGS, "t%d", i); - switch (stbInfo->tags[i].data_type) { - case TSDB_DATA_TYPE_BOOL: - cJSON_AddNumberToObject(tag, "value", rand_bool()); - cJSON_AddStringToObject(tag, "type", "bool"); - break; - case TSDB_DATA_TYPE_TINYINT: - cJSON_AddNumberToObject(tag, "value", rand_tinyint()); - cJSON_AddStringToObject(tag, "type", "tinyint"); - break; - case TSDB_DATA_TYPE_SMALLINT: - cJSON_AddNumberToObject(tag, "value", rand_smallint()); - cJSON_AddStringToObject(tag, "type", "smallint"); - break; - case TSDB_DATA_TYPE_INT: - cJSON_AddNumberToObject(tag, "value", rand_int()); - cJSON_AddStringToObject(tag, "type", "int"); - break; - case TSDB_DATA_TYPE_BIGINT: - cJSON_AddNumberToObject(tag, "value", (double)rand_bigint()); - cJSON_AddStringToObject(tag, "type", "bigint"); - break; - case TSDB_DATA_TYPE_FLOAT: - cJSON_AddNumberToObject(tag, "value", rand_float()); - cJSON_AddStringToObject(tag, "type", "float"); - break; - case TSDB_DATA_TYPE_DOUBLE: - cJSON_AddNumberToObject(tag, "value", rand_double()); - cJSON_AddStringToObject(tag, "type", "double"); - break; - case TSDB_DATA_TYPE_BINARY: - case TSDB_DATA_TYPE_NCHAR: - if (stbInfo->tags[i].dataLen > TSDB_MAX_BINARY_LEN) { - errorPrint("binary or nchar length overflow, maxsize:%u\n", - (uint32_t)TSDB_MAX_BINARY_LEN); - return -1; - } - char *buf = (char *)calloc(stbInfo->tags[i].dataLen + 1, 1); - if (NULL == buf) { - errorPrint("%s", "failed to allocate memory\n"); - return -1; - } - rand_string(buf, stbInfo->tags[i].dataLen); - if (stbInfo->tags[i].data_type == TSDB_DATA_TYPE_BINARY) { - cJSON_AddStringToObject(tag, "value", buf); - cJSON_AddStringToObject(tag, "type", "binary"); - } else { - cJSON_AddStringToObject(tag, "value", buf); - cJSON_AddStringToObject(tag, "type", "nchar"); - } - tmfree(buf); - break; - default: - errorPrint( - "unsupport data type (%s) for schemaless json protocol\n", - stbInfo->tags[i].dataType); - return -1; - } - cJSON_AddItemToObject(tags, tagName, tag); - } - cJSON_AddItemToArray(tagsList, tags); - tmfree(tagName); - tmfree(tbName); - return 0; -} - -int32_t generateSmlJsonCols(cJSON *array, cJSON *tag, SSuperTable *stbInfo, - threadInfo *pThreadInfo, int64_t timestamp) { - cJSON *record = cJSON_CreateObject(); - cJSON *ts = cJSON_CreateObject(); - cJSON_AddNumberToObject(ts, "value", (double)timestamp); - if (pThreadInfo->time_precision == TSDB_TIME_PRECISION_MILLI) { - cJSON_AddStringToObject(ts, "type", "ms"); - } else if (pThreadInfo->time_precision == TSDB_TIME_PRECISION_MICRO) { - cJSON_AddStringToObject(ts, "type", "us"); - } else if (pThreadInfo->time_precision == TSDB_TIME_PRECISION_NANO) { - cJSON_AddStringToObject(ts, "type", "ns"); - } else { - errorPrint("unsupport time precision %d\n", - pThreadInfo->time_precision); - return -1; - } - cJSON *value = cJSON_CreateObject(); - switch (stbInfo->columns[0].data_type) { - case TSDB_DATA_TYPE_BOOL: - cJSON_AddNumberToObject(value, "value", rand_bool()); - cJSON_AddStringToObject(value, "type", "bool"); - break; - case TSDB_DATA_TYPE_TINYINT: - cJSON_AddNumberToObject(value, "value", rand_tinyint()); - cJSON_AddStringToObject(value, "type", "tinyint"); - break; - case TSDB_DATA_TYPE_SMALLINT: - cJSON_AddNumberToObject(value, "value", rand_smallint()); - cJSON_AddStringToObject(value, "type", "smallint"); - break; - case TSDB_DATA_TYPE_INT: - cJSON_AddNumberToObject(value, "value", rand_int()); - cJSON_AddStringToObject(value, "type", "int"); - break; - case TSDB_DATA_TYPE_BIGINT: - cJSON_AddNumberToObject(value, "value", (double)rand_bigint()); - cJSON_AddStringToObject(value, "type", "bigint"); - break; - case TSDB_DATA_TYPE_FLOAT: - cJSON_AddNumberToObject(value, "value", rand_float()); - cJSON_AddStringToObject(value, "type", "float"); - break; - case TSDB_DATA_TYPE_DOUBLE: - cJSON_AddNumberToObject(value, "value", rand_double()); - cJSON_AddStringToObject(value, "type", "double"); - break; - case TSDB_DATA_TYPE_BINARY: - case TSDB_DATA_TYPE_NCHAR: - if (stbInfo->columns[0].dataLen > TSDB_MAX_BINARY_LEN) { - errorPrint("binary or nchar length overflow, maxsize:%u\n", - (uint32_t)TSDB_MAX_BINARY_LEN); - return -1; - } - char *buf = (char *)calloc(stbInfo->columns[0].dataLen + 1, 1); - if (NULL == buf) { - errorPrint("%s", "failed to allocate memory\n"); - return -1; - } - rand_string(buf, stbInfo->columns[0].dataLen); - if (stbInfo->columns[0].data_type == TSDB_DATA_TYPE_BINARY) { - cJSON_AddStringToObject(value, "value", buf); - cJSON_AddStringToObject(value, "type", "binary"); - } else { - cJSON_AddStringToObject(value, "value", buf); - cJSON_AddStringToObject(value, "type", "nchar"); - } - break; - default: - errorPrint( - "unsupport data type (%s) for schemaless json protocol\n", - stbInfo->columns[0].dataType); - return -1; - } - cJSON_AddItemToObject(record, "timestamp", ts); - cJSON_AddItemToObject(record, "value", value); - cJSON_AddItemToObject(record, "tags", tag); - cJSON_AddStringToObject(record, "metric", stbInfo->stbName); - cJSON_AddItemToArray(array, record); - return 0; -} diff --git a/src/kit/taosdemo/src/demoInsert.c b/src/kit/taosdemo/src/demoInsert.c deleted file mode 100644 index 8209524dc4c0dd30982ab062531fd7ff55403338..0000000000000000000000000000000000000000 --- a/src/kit/taosdemo/src/demoInsert.c +++ /dev/null @@ -1,3510 +0,0 @@ -/* - * Copyright (c) 2019 TAOS Data, Inc. - * - * This program is free software: you can use, redistribute, and/or modify - * it under the terms of the GNU Affero General Public License, version 3 - * or later ("AGPL"), as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. - * - * You should have received a copy of the GNU Affero General Public License - * along with this program. If not, see . - */ - -#include "cJSON.h" -#include "demo.h" -#include "demoData.h" - -static int calcRowLen(SSuperTable *superTbls) { - int colIndex; - int lenOfOneRow = 0; - - for (colIndex = 0; colIndex < superTbls->columnCount; colIndex++) { - char *dataType = superTbls->columns[colIndex].dataType; - - switch (superTbls->columns[colIndex].data_type) { - case TSDB_DATA_TYPE_BINARY: - lenOfOneRow += superTbls->columns[colIndex].dataLen + 3; - break; - - case TSDB_DATA_TYPE_NCHAR: - lenOfOneRow += superTbls->columns[colIndex].dataLen + 3; - break; - - case TSDB_DATA_TYPE_INT: - case TSDB_DATA_TYPE_UINT: - lenOfOneRow += INT_BUFF_LEN; - break; - - case TSDB_DATA_TYPE_BIGINT: - case TSDB_DATA_TYPE_UBIGINT: - lenOfOneRow += BIGINT_BUFF_LEN; - break; - - case TSDB_DATA_TYPE_SMALLINT: - case TSDB_DATA_TYPE_USMALLINT: - lenOfOneRow += SMALLINT_BUFF_LEN; - break; - - case TSDB_DATA_TYPE_TINYINT: - case TSDB_DATA_TYPE_UTINYINT: - lenOfOneRow += TINYINT_BUFF_LEN; - break; - - case TSDB_DATA_TYPE_BOOL: - lenOfOneRow += BOOL_BUFF_LEN; - break; - - case TSDB_DATA_TYPE_FLOAT: - lenOfOneRow += FLOAT_BUFF_LEN; - break; - - case TSDB_DATA_TYPE_DOUBLE: - lenOfOneRow += DOUBLE_BUFF_LEN; - break; - - case TSDB_DATA_TYPE_TIMESTAMP: - lenOfOneRow += TIMESTAMP_BUFF_LEN; - break; - - default: - errorPrint("get error data type : %s\n", dataType); - exit(EXIT_FAILURE); - } - if (superTbls->iface == SML_IFACE) { - lenOfOneRow += SML_LINE_SQL_SYNTAX_OFFSET; - } - } - - superTbls->lenOfOneRow = lenOfOneRow + TIMESTAMP_BUFF_LEN; // timestamp - - int tagIndex; - int lenOfTagOfOneRow = 0; - for (tagIndex = 0; tagIndex < superTbls->tagCount; tagIndex++) { - char *dataType = superTbls->tags[tagIndex].dataType; - switch (superTbls->tags[tagIndex].data_type) { - case TSDB_DATA_TYPE_BINARY: - lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + 3; - break; - case TSDB_DATA_TYPE_NCHAR: - lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + 3; - break; - case TSDB_DATA_TYPE_INT: - case TSDB_DATA_TYPE_UINT: - lenOfTagOfOneRow += - superTbls->tags[tagIndex].dataLen + INT_BUFF_LEN; - break; - case TSDB_DATA_TYPE_BIGINT: - case TSDB_DATA_TYPE_UBIGINT: - lenOfTagOfOneRow += - superTbls->tags[tagIndex].dataLen + BIGINT_BUFF_LEN; - break; - case TSDB_DATA_TYPE_SMALLINT: - case TSDB_DATA_TYPE_USMALLINT: - lenOfTagOfOneRow += - superTbls->tags[tagIndex].dataLen + SMALLINT_BUFF_LEN; - break; - case TSDB_DATA_TYPE_TINYINT: - case TSDB_DATA_TYPE_UTINYINT: - lenOfTagOfOneRow += - superTbls->tags[tagIndex].dataLen + TINYINT_BUFF_LEN; - break; - case TSDB_DATA_TYPE_BOOL: - lenOfTagOfOneRow += - superTbls->tags[tagIndex].dataLen + BOOL_BUFF_LEN; - break; - case TSDB_DATA_TYPE_FLOAT: - lenOfTagOfOneRow += - superTbls->tags[tagIndex].dataLen + FLOAT_BUFF_LEN; - break; - case TSDB_DATA_TYPE_DOUBLE: - lenOfTagOfOneRow += - superTbls->tags[tagIndex].dataLen + DOUBLE_BUFF_LEN; - break; - default: - errorPrint("get error tag type : %s\n", dataType); - exit(EXIT_FAILURE); - } - if (superTbls->iface == SML_IFACE) { - lenOfOneRow += SML_LINE_SQL_SYNTAX_OFFSET; - } - } - - if (superTbls->iface == SML_IFACE) { - lenOfTagOfOneRow += - 2 * TSDB_TABLE_NAME_LEN * 2 + SML_LINE_SQL_SYNTAX_OFFSET; - superTbls->lenOfOneRow += lenOfTagOfOneRow; - } - - superTbls->lenOfTagOfOneRow = lenOfTagOfOneRow; - - return 0; -} - -static int getSuperTableFromServer(TAOS *taos, char *dbName, - SSuperTable *superTbls) { - char command[SQL_BUFF_LEN] = "\0"; - TAOS_RES *res; - TAOS_ROW row = NULL; - int count = 0; - - // get schema use cmd: describe superTblName; - snprintf(command, SQL_BUFF_LEN, "describe %s.%s", dbName, - superTbls->stbName); - res = taos_query(taos, command); - int32_t code = taos_errno(res); - if (code != 0) { - printf("failed to run command %s, reason: %s\n", command, - taos_errstr(res)); - taos_free_result(res); - return -1; - } - - int tagIndex = 0; - int columnIndex = 0; - TAOS_FIELD *fields = taos_fetch_fields(res); - while ((row = taos_fetch_row(res)) != NULL) { - if (0 == count) { - count++; - continue; - } - - if (strcmp((char *)row[TSDB_DESCRIBE_METRIC_NOTE_INDEX], "TAG") == 0) { - tstrncpy(superTbls->tags[tagIndex].field, - (char *)row[TSDB_DESCRIBE_METRIC_FIELD_INDEX], - fields[TSDB_DESCRIBE_METRIC_FIELD_INDEX].bytes); - if (0 == strncasecmp((char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX], - "INT", strlen("INT"))) { - superTbls->tags[tagIndex].data_type = TSDB_DATA_TYPE_INT; - } else if (0 == - strncasecmp((char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX], - "TINYINT", strlen("TINYINT"))) { - superTbls->tags[tagIndex].data_type = TSDB_DATA_TYPE_TINYINT; - } else if (0 == - strncasecmp((char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX], - "SMALLINT", strlen("SMALLINT"))) { - superTbls->tags[tagIndex].data_type = TSDB_DATA_TYPE_SMALLINT; - } else if (0 == - strncasecmp((char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX], - "BIGINT", strlen("BIGINT"))) { - superTbls->tags[tagIndex].data_type = TSDB_DATA_TYPE_BIGINT; - } else if (0 == - strncasecmp((char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX], - "FLOAT", strlen("FLOAT"))) { - superTbls->tags[tagIndex].data_type = TSDB_DATA_TYPE_FLOAT; - } else if (0 == - strncasecmp((char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX], - "DOUBLE", strlen("DOUBLE"))) { - superTbls->tags[tagIndex].data_type = TSDB_DATA_TYPE_DOUBLE; - } else if (0 == - strncasecmp((char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX], - "BINARY", strlen("BINARY"))) { - superTbls->tags[tagIndex].data_type = TSDB_DATA_TYPE_BINARY; - } else if (0 == - strncasecmp((char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX], - "NCHAR", strlen("NCHAR"))) { - superTbls->tags[tagIndex].data_type = TSDB_DATA_TYPE_NCHAR; - } else if (0 == - strncasecmp((char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX], - "BOOL", strlen("BOOL"))) { - superTbls->tags[tagIndex].data_type = TSDB_DATA_TYPE_BOOL; - } else if (0 == - strncasecmp((char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX], - "TIMESTAMP", strlen("TIMESTAMP"))) { - superTbls->tags[tagIndex].data_type = TSDB_DATA_TYPE_TIMESTAMP; - } else if (0 == - strncasecmp((char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX], - "TINYINT UNSIGNED", - strlen("TINYINT UNSIGNED"))) { - superTbls->tags[tagIndex].data_type = TSDB_DATA_TYPE_UTINYINT; - tstrncpy(superTbls->tags[tagIndex].dataType, "UTINYINT", - min(DATATYPE_BUFF_LEN, - fields[TSDB_DESCRIBE_METRIC_TYPE_INDEX].bytes) + - 1); - } else if (0 == - strncasecmp((char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX], - "SMALLINT UNSIGNED", - strlen("SMALLINT UNSIGNED"))) { - superTbls->tags[tagIndex].data_type = TSDB_DATA_TYPE_USMALLINT; - tstrncpy(superTbls->tags[tagIndex].dataType, "USMALLINT", - min(DATATYPE_BUFF_LEN, - fields[TSDB_DESCRIBE_METRIC_TYPE_INDEX].bytes) + - 1); - } else if (0 == - strncasecmp((char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX], - "INT UNSIGNED", strlen("INT UNSIGNED"))) { - superTbls->tags[tagIndex].data_type = TSDB_DATA_TYPE_UINT; - tstrncpy(superTbls->tags[tagIndex].dataType, "UINT", - min(DATATYPE_BUFF_LEN, - fields[TSDB_DESCRIBE_METRIC_TYPE_INDEX].bytes) + - 1); - } else if (0 == strncasecmp( - (char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX], - "BIGINT UNSIGNED", strlen("BIGINT UNSIGNED"))) { - superTbls->tags[tagIndex].data_type = TSDB_DATA_TYPE_UBIGINT; - tstrncpy(superTbls->tags[tagIndex].dataType, "UBIGINT", - min(DATATYPE_BUFF_LEN, - fields[TSDB_DESCRIBE_METRIC_TYPE_INDEX].bytes) + - 1); - } else { - superTbls->tags[tagIndex].data_type = TSDB_DATA_TYPE_NULL; - } - superTbls->tags[tagIndex].dataLen = - *((int *)row[TSDB_DESCRIBE_METRIC_LENGTH_INDEX]); - tstrncpy(superTbls->tags[tagIndex].note, - (char *)row[TSDB_DESCRIBE_METRIC_NOTE_INDEX], - min(NOTE_BUFF_LEN, - fields[TSDB_DESCRIBE_METRIC_NOTE_INDEX].bytes) + - 1); - if (strstr((char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX], - "UNSIGNED") == NULL) { - tstrncpy(superTbls->tags[tagIndex].dataType, - (char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX], - min(DATATYPE_BUFF_LEN, - fields[TSDB_DESCRIBE_METRIC_TYPE_INDEX].bytes) + - 1); - } - tagIndex++; - } else { - tstrncpy(superTbls->columns[columnIndex].field, - (char *)row[TSDB_DESCRIBE_METRIC_FIELD_INDEX], - fields[TSDB_DESCRIBE_METRIC_FIELD_INDEX].bytes); - - if (0 == strncasecmp((char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX], - "INT", strlen("INT")) && - strstr((char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX], - "UNSIGNED") == NULL) { - superTbls->columns[columnIndex].data_type = TSDB_DATA_TYPE_INT; - } else if (0 == strncasecmp( - (char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX], - "TINYINT", strlen("TINYINT")) && - strstr((char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX], - "UNSIGNED") == NULL) { - superTbls->columns[columnIndex].data_type = - TSDB_DATA_TYPE_TINYINT; - } else if (0 == strncasecmp( - (char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX], - "SMALLINT", strlen("SMALLINT")) && - strstr((char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX], - "UNSIGNED") == NULL) { - superTbls->columns[columnIndex].data_type = - TSDB_DATA_TYPE_SMALLINT; - } else if (0 == strncasecmp( - (char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX], - "BIGINT", strlen("BIGINT")) && - strstr((char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX], - "UNSIGNED") == NULL) { - superTbls->columns[columnIndex].data_type = - TSDB_DATA_TYPE_BIGINT; - } else if (0 == - strncasecmp((char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX], - "FLOAT", strlen("FLOAT"))) { - superTbls->columns[columnIndex].data_type = - TSDB_DATA_TYPE_FLOAT; - } else if (0 == - strncasecmp((char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX], - "DOUBLE", strlen("DOUBLE"))) { - superTbls->columns[columnIndex].data_type = - TSDB_DATA_TYPE_DOUBLE; - } else if (0 == - strncasecmp((char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX], - "BINARY", strlen("BINARY"))) { - superTbls->columns[columnIndex].data_type = - TSDB_DATA_TYPE_BINARY; - } else if (0 == - strncasecmp((char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX], - "NCHAR", strlen("NCHAR"))) { - superTbls->columns[columnIndex].data_type = - TSDB_DATA_TYPE_NCHAR; - } else if (0 == - strncasecmp((char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX], - "BOOL", strlen("BOOL"))) { - superTbls->columns[columnIndex].data_type = TSDB_DATA_TYPE_BOOL; - } else if (0 == - strncasecmp((char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX], - "TIMESTAMP", strlen("TIMESTAMP"))) { - superTbls->columns[columnIndex].data_type = - TSDB_DATA_TYPE_TIMESTAMP; - } else if (0 == - strncasecmp((char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX], - "TINYINT UNSIGNED", - strlen("TINYINT UNSIGNED"))) { - superTbls->columns[columnIndex].data_type = - TSDB_DATA_TYPE_UTINYINT; - tstrncpy(superTbls->columns[columnIndex].dataType, "UTINYINT", - min(DATATYPE_BUFF_LEN, - fields[TSDB_DESCRIBE_METRIC_TYPE_INDEX].bytes) + - 1); - } else if (0 == - strncasecmp((char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX], - "SMALLINT UNSIGNED", - strlen("SMALLINT UNSIGNED"))) { - superTbls->columns[columnIndex].data_type = - TSDB_DATA_TYPE_USMALLINT; - tstrncpy(superTbls->columns[columnIndex].dataType, "USMALLINT", - min(DATATYPE_BUFF_LEN, - fields[TSDB_DESCRIBE_METRIC_TYPE_INDEX].bytes) + - 1); - } else if (0 == - strncasecmp((char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX], - "INT UNSIGNED", strlen("INT UNSIGNED"))) { - superTbls->columns[columnIndex].data_type = TSDB_DATA_TYPE_UINT; - tstrncpy(superTbls->columns[columnIndex].dataType, "UINT", - min(DATATYPE_BUFF_LEN, - fields[TSDB_DESCRIBE_METRIC_TYPE_INDEX].bytes) + - 1); - } else if (0 == strncasecmp( - (char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX], - "BIGINT UNSIGNED", strlen("BIGINT UNSIGNED"))) { - superTbls->columns[columnIndex].data_type = - TSDB_DATA_TYPE_UBIGINT; - tstrncpy(superTbls->columns[columnIndex].dataType, "UBIGINT", - min(DATATYPE_BUFF_LEN, - fields[TSDB_DESCRIBE_METRIC_TYPE_INDEX].bytes) + - 1); - } else { - superTbls->columns[columnIndex].data_type = TSDB_DATA_TYPE_NULL; - } - superTbls->columns[columnIndex].dataLen = - *((int *)row[TSDB_DESCRIBE_METRIC_LENGTH_INDEX]); - tstrncpy(superTbls->columns[columnIndex].note, - (char *)row[TSDB_DESCRIBE_METRIC_NOTE_INDEX], - min(NOTE_BUFF_LEN, - fields[TSDB_DESCRIBE_METRIC_NOTE_INDEX].bytes) + - 1); - - if (strstr((char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX], - "UNSIGNED") == NULL) { - tstrncpy(superTbls->columns[columnIndex].dataType, - (char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX], - min(DATATYPE_BUFF_LEN, - fields[TSDB_DESCRIBE_METRIC_TYPE_INDEX].bytes) + - 1); - } - - columnIndex++; - } - count++; - } - - superTbls->columnCount = columnIndex; - superTbls->tagCount = tagIndex; - taos_free_result(res); - - calcRowLen(superTbls); - return 0; -} - -static int createSuperTable(TAOS *taos, char *dbName, SSuperTable *superTbl, - char *command) { - char cols[COL_BUFFER_LEN] = "\0"; - int len = 0; - - int lenOfOneRow = 0; - - if (superTbl->columnCount == 0) { - errorPrint("super table column count is %d\n", superTbl->columnCount); - return -1; - } - - for (int colIndex = 0; colIndex < superTbl->columnCount; colIndex++) { - switch (superTbl->columns[colIndex].data_type) { - case TSDB_DATA_TYPE_BINARY: - len += snprintf(cols + len, COL_BUFFER_LEN - len, ",C%d %s(%d)", - colIndex, "BINARY", - superTbl->columns[colIndex].dataLen); - lenOfOneRow += superTbl->columns[colIndex].dataLen + 3; - break; - - case TSDB_DATA_TYPE_NCHAR: - len += snprintf(cols + len, COL_BUFFER_LEN - len, ",C%d %s(%d)", - colIndex, "NCHAR", - superTbl->columns[colIndex].dataLen); - lenOfOneRow += superTbl->columns[colIndex].dataLen + 3; - break; - - case TSDB_DATA_TYPE_INT: - if ((g_args.demo_mode) && (colIndex == 1)) { - len += snprintf(cols + len, COL_BUFFER_LEN - len, - ", VOLTAGE INT"); - } else { - len += snprintf(cols + len, COL_BUFFER_LEN - len, ",C%d %s", - colIndex, "INT"); - } - lenOfOneRow += INT_BUFF_LEN; - break; - - case TSDB_DATA_TYPE_BIGINT: - len += snprintf(cols + len, COL_BUFFER_LEN - len, ",C%d %s", - colIndex, "BIGINT"); - lenOfOneRow += BIGINT_BUFF_LEN; - break; - - case TSDB_DATA_TYPE_SMALLINT: - len += snprintf(cols + len, COL_BUFFER_LEN - len, ",C%d %s", - colIndex, "SMALLINT"); - lenOfOneRow += SMALLINT_BUFF_LEN; - break; - - case TSDB_DATA_TYPE_TINYINT: - len += snprintf(cols + len, COL_BUFFER_LEN - len, ",C%d %s", - colIndex, "TINYINT"); - lenOfOneRow += TINYINT_BUFF_LEN; - break; - - case TSDB_DATA_TYPE_BOOL: - len += snprintf(cols + len, COL_BUFFER_LEN - len, ",C%d %s", - colIndex, "BOOL"); - lenOfOneRow += BOOL_BUFF_LEN; - break; - - case TSDB_DATA_TYPE_FLOAT: - if (g_args.demo_mode) { - if (colIndex == 0) { - len += snprintf(cols + len, COL_BUFFER_LEN - len, - ", CURRENT FLOAT"); - } else if (colIndex == 2) { - len += snprintf(cols + len, COL_BUFFER_LEN - len, - ", PHASE FLOAT"); - } - } else { - len += snprintf(cols + len, COL_BUFFER_LEN - len, ",C%d %s", - colIndex, "FLOAT"); - } - - lenOfOneRow += FLOAT_BUFF_LEN; - break; - - case TSDB_DATA_TYPE_DOUBLE: - len += snprintf(cols + len, COL_BUFFER_LEN - len, ",C%d %s", - colIndex, "DOUBLE"); - lenOfOneRow += DOUBLE_BUFF_LEN; - break; - - case TSDB_DATA_TYPE_TIMESTAMP: - len += snprintf(cols + len, COL_BUFFER_LEN - len, ",C%d %s", - colIndex, "TIMESTAMP"); - lenOfOneRow += TIMESTAMP_BUFF_LEN; - break; - - case TSDB_DATA_TYPE_UTINYINT: - len += snprintf(cols + len, COL_BUFFER_LEN - len, ",C%d %s", - colIndex, "TINYINT UNSIGNED"); - lenOfOneRow += TINYINT_BUFF_LEN; - break; - - case TSDB_DATA_TYPE_USMALLINT: - len += snprintf(cols + len, COL_BUFFER_LEN - len, ",C%d %s", - colIndex, "SMALLINT UNSIGNED"); - lenOfOneRow += SMALLINT_BUFF_LEN; - break; - - case TSDB_DATA_TYPE_UINT: - len += snprintf(cols + len, COL_BUFFER_LEN - len, ",C%d %s", - colIndex, "INT UNSIGNED"); - lenOfOneRow += INT_BUFF_LEN; - break; - - case TSDB_DATA_TYPE_UBIGINT: - len += snprintf(cols + len, COL_BUFFER_LEN - len, ",C%d %s", - colIndex, "BIGINT UNSIGNED"); - lenOfOneRow += BIGINT_BUFF_LEN; - break; - - default: - taos_close(taos); - errorPrint("config error data type : %s\n", - superTbl->columns[colIndex].dataType); - return -1; - } - } - - superTbl->lenOfOneRow = lenOfOneRow + TIMESTAMP_BUFF_LEN; // timestamp - - // save for creating child table - superTbl->colsOfCreateChildTable = - (char *)calloc(len + TIMESTAMP_BUFF_LEN, 1); - if (NULL == superTbl->colsOfCreateChildTable) { - taos_close(taos); - errorPrint("%s", "failed to allocate memory\n"); - return -1; - } - - snprintf(superTbl->colsOfCreateChildTable, len + TIMESTAMP_BUFF_LEN, - "(ts timestamp%s)", cols); - verbosePrint("%s() LN%d: %s\n", __func__, __LINE__, - superTbl->colsOfCreateChildTable); - - if (superTbl->tagCount == 0) { - errorPrint("super table tag count is %d\n", superTbl->tagCount); - return -1; - } - - char tags[TSDB_MAX_TAGS_LEN] = "\0"; - int tagIndex; - len = 0; - - int lenOfTagOfOneRow = 0; - len += snprintf(tags + len, TSDB_MAX_TAGS_LEN - len, "("); - for (tagIndex = 0; tagIndex < superTbl->tagCount; tagIndex++) { - char *dataType = superTbl->tags[tagIndex].dataType; - - if (strcasecmp(dataType, "BINARY") == 0) { - if ((g_args.demo_mode) && (tagIndex == 1)) { - len += snprintf(tags + len, TSDB_MAX_TAGS_LEN - len, - "location BINARY(%d),", - superTbl->tags[tagIndex].dataLen); - } else { - len += snprintf(tags + len, TSDB_MAX_TAGS_LEN - len, - "T%d %s(%d),", tagIndex, "BINARY", - superTbl->tags[tagIndex].dataLen); - } - lenOfTagOfOneRow += superTbl->tags[tagIndex].dataLen + 3; - } else if (strcasecmp(dataType, "NCHAR") == 0) { - len += - snprintf(tags + len, TSDB_MAX_TAGS_LEN - len, "T%d %s(%d),", - tagIndex, "NCHAR", superTbl->tags[tagIndex].dataLen); - lenOfTagOfOneRow += superTbl->tags[tagIndex].dataLen + 3; - } else if (strcasecmp(dataType, "INT") == 0) { - if ((g_args.demo_mode) && (tagIndex == 0)) { - len += snprintf(tags + len, TSDB_MAX_TAGS_LEN - len, - "groupId INT, "); - } else { - len += snprintf(tags + len, TSDB_MAX_TAGS_LEN - len, "T%d %s,", - tagIndex, "INT"); - } - lenOfTagOfOneRow += superTbl->tags[tagIndex].dataLen + INT_BUFF_LEN; - } else if (strcasecmp(dataType, "BIGINT") == 0) { - len += snprintf(tags + len, TSDB_MAX_TAGS_LEN - len, "T%d %s,", - tagIndex, "BIGINT"); - lenOfTagOfOneRow += - superTbl->tags[tagIndex].dataLen + BIGINT_BUFF_LEN; - } else if (strcasecmp(dataType, "SMALLINT") == 0) { - len += snprintf(tags + len, TSDB_MAX_TAGS_LEN - len, "T%d %s,", - tagIndex, "SMALLINT"); - lenOfTagOfOneRow += - superTbl->tags[tagIndex].dataLen + SMALLINT_BUFF_LEN; - } else if (strcasecmp(dataType, "TINYINT") == 0) { - len += snprintf(tags + len, TSDB_MAX_TAGS_LEN - len, "T%d %s,", - tagIndex, "TINYINT"); - lenOfTagOfOneRow += - superTbl->tags[tagIndex].dataLen + TINYINT_BUFF_LEN; - } else if (strcasecmp(dataType, "BOOL") == 0) { - len += snprintf(tags + len, TSDB_MAX_TAGS_LEN - len, "T%d %s,", - tagIndex, "BOOL"); - lenOfTagOfOneRow += - superTbl->tags[tagIndex].dataLen + BOOL_BUFF_LEN; - } else if (strcasecmp(dataType, "FLOAT") == 0) { - len += snprintf(tags + len, TSDB_MAX_TAGS_LEN - len, "T%d %s,", - tagIndex, "FLOAT"); - lenOfTagOfOneRow += - superTbl->tags[tagIndex].dataLen + FLOAT_BUFF_LEN; - } else if (strcasecmp(dataType, "DOUBLE") == 0) { - len += snprintf(tags + len, TSDB_MAX_TAGS_LEN - len, "T%d %s,", - tagIndex, "DOUBLE"); - lenOfTagOfOneRow += - superTbl->tags[tagIndex].dataLen + DOUBLE_BUFF_LEN; - } else if (strcasecmp(dataType, "UTINYINT") == 0) { - len += snprintf(tags + len, TSDB_MAX_TAGS_LEN - len, "T%d %s,", - tagIndex, "TINYINT UNSIGNED"); - lenOfTagOfOneRow += - superTbl->tags[tagIndex].dataLen + TINYINT_BUFF_LEN; - } else if (strcasecmp(dataType, "USMALLINT") == 0) { - len += snprintf(tags + len, TSDB_MAX_TAGS_LEN - len, "T%d %s,", - tagIndex, "SMALLINT UNSIGNED"); - lenOfTagOfOneRow += - superTbl->tags[tagIndex].dataLen + SMALLINT_BUFF_LEN; - } else if (strcasecmp(dataType, "UINT") == 0) { - len += snprintf(tags + len, TSDB_MAX_TAGS_LEN - len, "T%d %s,", - tagIndex, "INT UNSIGNED"); - lenOfTagOfOneRow += superTbl->tags[tagIndex].dataLen + INT_BUFF_LEN; - } else if (strcasecmp(dataType, "UBIGINT") == 0) { - len += snprintf(tags + len, TSDB_MAX_TAGS_LEN - len, "T%d %s,", - tagIndex, "BIGINT UNSIGNED"); - lenOfTagOfOneRow += - superTbl->tags[tagIndex].dataLen + BIGINT_BUFF_LEN; - } else if (strcasecmp(dataType, "TIMESTAMP") == 0) { - len += snprintf(tags + len, TSDB_MAX_TAGS_LEN - len, "T%d %s,", - tagIndex, "TIMESTAMP"); - lenOfTagOfOneRow += - superTbl->tags[tagIndex].dataLen + TIMESTAMP_BUFF_LEN; - } else { - taos_close(taos); - errorPrint("config error tag type : %s\n", dataType); - return -1; - } - } - - len -= 1; - len += snprintf(tags + len, TSDB_MAX_TAGS_LEN - len, ")"); - - superTbl->lenOfTagOfOneRow = lenOfTagOfOneRow; - - snprintf(command, BUFFER_SIZE, - superTbl->escapeChar - ? "CREATE TABLE IF NOT EXISTS %s.`%s` (ts TIMESTAMP%s) TAGS %s" - : "CREATE TABLE IF NOT EXISTS %s.%s (ts TIMESTAMP%s) TAGS %s", - dbName, superTbl->stbName, cols, tags); - if (0 != queryDbExec(taos, command, NO_INSERT_TYPE, false)) { - errorPrint("create supertable %s failed!\n\n", superTbl->stbName); - return -1; - } - - debugPrint("create supertable %s success!\n\n", superTbl->stbName); - return 0; -} - -int createDatabasesAndStables(char *command) { - TAOS *taos = NULL; - int ret = 0; - taos = - taos_connect(g_Dbs.host, g_Dbs.user, g_Dbs.password, NULL, g_Dbs.port); - if (taos == NULL) { - errorPrint("Failed to connect to TDengine, reason:%s\n", - taos_errstr(NULL)); - return -1; - } - - for (int i = 0; i < g_Dbs.dbCount; i++) { - if (g_Dbs.db[i].drop) { - sprintf(command, "drop database if exists %s;", g_Dbs.db[i].dbName); - if (0 != queryDbExec(taos, command, NO_INSERT_TYPE, false)) { - taos_close(taos); - return -1; - } - - int dataLen = 0; - dataLen += snprintf(command + dataLen, BUFFER_SIZE - dataLen, - "CREATE DATABASE IF NOT EXISTS %s", - g_Dbs.db[i].dbName); - - if (g_Dbs.db[i].dbCfg.blocks > 0) { - dataLen += snprintf(command + dataLen, BUFFER_SIZE - dataLen, - " BLOCKS %d", g_Dbs.db[i].dbCfg.blocks); - } - if (g_Dbs.db[i].dbCfg.cache > 0) { - dataLen += snprintf(command + dataLen, BUFFER_SIZE - dataLen, - " CACHE %d", g_Dbs.db[i].dbCfg.cache); - } - if (g_Dbs.db[i].dbCfg.days > 0) { - dataLen += snprintf(command + dataLen, BUFFER_SIZE - dataLen, - " DAYS %d", g_Dbs.db[i].dbCfg.days); - } - if (g_Dbs.db[i].dbCfg.keep > 0) { - dataLen += snprintf(command + dataLen, BUFFER_SIZE - dataLen, - " KEEP %d", g_Dbs.db[i].dbCfg.keep); - } - if (g_Dbs.db[i].dbCfg.quorum > 1) { - dataLen += snprintf(command + dataLen, BUFFER_SIZE - dataLen, - " QUORUM %d", g_Dbs.db[i].dbCfg.quorum); - } - if (g_Dbs.db[i].dbCfg.replica > 0) { - dataLen += snprintf(command + dataLen, BUFFER_SIZE - dataLen, - " REPLICA %d", g_Dbs.db[i].dbCfg.replica); - } - if (g_Dbs.db[i].dbCfg.update > 0) { - dataLen += snprintf(command + dataLen, BUFFER_SIZE - dataLen, - " UPDATE %d", g_Dbs.db[i].dbCfg.update); - } - // if (g_Dbs.db[i].dbCfg.maxtablesPerVnode > 0) { - // dataLen += snprintf(command + dataLen, - // BUFFER_SIZE - dataLen, "tables %d ", - // g_Dbs.db[i].dbCfg.maxtablesPerVnode); - //} - if (g_Dbs.db[i].dbCfg.minRows > 0) { - dataLen += snprintf(command + dataLen, BUFFER_SIZE - dataLen, - " MINROWS %d", g_Dbs.db[i].dbCfg.minRows); - } - if (g_Dbs.db[i].dbCfg.maxRows > 0) { - dataLen += snprintf(command + dataLen, BUFFER_SIZE - dataLen, - " MAXROWS %d", g_Dbs.db[i].dbCfg.maxRows); - } - if (g_Dbs.db[i].dbCfg.comp > 0) { - dataLen += snprintf(command + dataLen, BUFFER_SIZE - dataLen, - " COMP %d", g_Dbs.db[i].dbCfg.comp); - } - if (g_Dbs.db[i].dbCfg.walLevel > 0) { - dataLen += snprintf(command + dataLen, BUFFER_SIZE - dataLen, - " wal %d", g_Dbs.db[i].dbCfg.walLevel); - } - if (g_Dbs.db[i].dbCfg.cacheLast > 0) { - dataLen += - snprintf(command + dataLen, BUFFER_SIZE - dataLen, - " CACHELAST %d", g_Dbs.db[i].dbCfg.cacheLast); - } - if (g_Dbs.db[i].dbCfg.fsync > 0) { - dataLen += snprintf(command + dataLen, BUFFER_SIZE - dataLen, - " FSYNC %d", g_Dbs.db[i].dbCfg.fsync); - } - if ((0 == strncasecmp(g_Dbs.db[i].dbCfg.precision, "ms", 2)) || - (0 == strncasecmp(g_Dbs.db[i].dbCfg.precision, "ns", 2)) || - (0 == strncasecmp(g_Dbs.db[i].dbCfg.precision, "us", 2))) { - dataLen += - snprintf(command + dataLen, BUFFER_SIZE - dataLen, - " precision \'%s\';", g_Dbs.db[i].dbCfg.precision); - } - - if (0 != queryDbExec(taos, command, NO_INSERT_TYPE, false)) { - taos_close(taos); - errorPrint("\ncreate database %s failed!\n\n", - g_Dbs.db[i].dbName); - return -1; - } - printf("\ncreate database %s success!\n\n", g_Dbs.db[i].dbName); - } - - debugPrint("%s() LN%d supertbl count:%" PRIu64 "\n", __func__, __LINE__, - g_Dbs.db[i].superTblCount); - - int validStbCount = 0; - - for (uint64_t j = 0; j < g_Dbs.db[i].superTblCount; j++) { - if (g_Dbs.db[i].superTbls[j].iface == SML_IFACE) { - goto skip; - } - - sprintf(command, "describe %s.%s;", g_Dbs.db[i].dbName, - g_Dbs.db[i].superTbls[j].stbName); - ret = queryDbExec(taos, command, NO_INSERT_TYPE, true); - - if ((ret != 0) || (g_Dbs.db[i].drop)) { - char *cmd = calloc(1, BUFFER_SIZE); - if (NULL == cmd) { - errorPrint("%s", "failed to allocate memory\n"); - return -1; - } - - ret = createSuperTable(taos, g_Dbs.db[i].dbName, - &g_Dbs.db[i].superTbls[j], cmd); - tmfree(cmd); - - if (0 != ret) { - tmfree(g_Dbs.db[i].superTbls[j].colsOfCreateChildTable); - errorPrint("create super table %" PRIu64 " failed!\n\n", j); - continue; - } - } else { - ret = getSuperTableFromServer(taos, g_Dbs.db[i].dbName, - &g_Dbs.db[i].superTbls[j]); - if (0 != ret) { - errorPrint("\nget super table %s.%s info failed!\n\n", - g_Dbs.db[i].dbName, - g_Dbs.db[i].superTbls[j].stbName); - continue; - } - } - skip: - validStbCount++; - } - g_Dbs.db[i].superTblCount = validStbCount; - } - - taos_close(taos); - return 0; -} - -static void *createTable(void *sarg) { - threadInfo * pThreadInfo = (threadInfo *)sarg; - SSuperTable *stbInfo = pThreadInfo->stbInfo; - int32_t* code = calloc(1, sizeof(int32_t)); - *code = -1; - setThreadName("createTable"); - - uint64_t lastPrintTime = taosGetTimestampMs(); - - int buff_len = BUFFER_SIZE; - - pThreadInfo->buffer = calloc(1, buff_len); - if (NULL == pThreadInfo->buffer) { - errorPrint("%s", "failed to allocate memory\n"); - goto create_table_end; - } - - int len = 0; - int batchNum = 0; - - verbosePrint("%s() LN%d: Creating table from %" PRIu64 " to %" PRIu64 "\n", - __func__, __LINE__, pThreadInfo->start_table_from, - pThreadInfo->end_table_to); - - for (uint64_t i = pThreadInfo->start_table_from; - i <= pThreadInfo->end_table_to; i++) { - if (0 == g_Dbs.use_metric) { - snprintf(pThreadInfo->buffer, buff_len, - g_args.escapeChar - ? "CREATE TABLE IF NOT EXISTS %s.`%s%" PRIu64 "` %s;" - : "CREATE TABLE IF NOT EXISTS %s.%s%" PRIu64 " %s;", - pThreadInfo->db_name, g_args.tb_prefix, i, - pThreadInfo->cols); - batchNum++; - } else { - if (stbInfo == NULL) { - errorPrint( - "%s() LN%d, use metric, but super table info is NULL\n", - __func__, __LINE__); - goto create_table_end; - } else { - if (0 == len) { - batchNum = 0; - memset(pThreadInfo->buffer, 0, buff_len); - len += snprintf(pThreadInfo->buffer + len, buff_len - len, - "CREATE TABLE "); - } - - char *tagsValBuf = (char *)calloc(TSDB_MAX_SQL_LEN + 1, 1); - if (NULL == tagsValBuf) { - errorPrint("%s", "failed to allocate memory\n"); - goto create_table_end; - } - - if (0 == stbInfo->tagSource) { - if (generateTagValuesForStb(stbInfo, i, tagsValBuf)) { - tmfree(tagsValBuf); - goto create_table_end; - } - } else { - snprintf(tagsValBuf, TSDB_MAX_SQL_LEN, "(%s)", - stbInfo->tagDataBuf + - stbInfo->lenOfTagOfOneRow * - (i % stbInfo->tagSampleCount)); - } - len += snprintf( - pThreadInfo->buffer + len, buff_len - len, - stbInfo->escapeChar ? "if not exists %s.`%s%" PRIu64 - "` using %s.`%s` tags %s " - : "if not exists %s.%s%" PRIu64 - " using %s.%s tags %s ", - pThreadInfo->db_name, stbInfo->childTblPrefix, i, - pThreadInfo->db_name, stbInfo->stbName, tagsValBuf); - tmfree(tagsValBuf); - batchNum++; - if ((batchNum < stbInfo->batchCreateTableNum) && - ((buff_len - len) >= - (stbInfo->lenOfTagOfOneRow + EXTRA_SQL_LEN))) { - continue; - } - } - } - - len = 0; - - if (0 != queryDbExec(pThreadInfo->taos, pThreadInfo->buffer, - NO_INSERT_TYPE, false)) { - errorPrint("queryDbExec() failed. buffer:\n%s\n", - pThreadInfo->buffer); - goto create_table_end; - return NULL; - } - pThreadInfo->tables_created += batchNum; - uint64_t currentPrintTime = taosGetTimestampMs(); - if (currentPrintTime - lastPrintTime > PRINT_STAT_INTERVAL) { - printf("thread[%d] already create %" PRIu64 " - %" PRIu64 - " tables\n", - pThreadInfo->threadID, pThreadInfo->start_table_from, i); - lastPrintTime = currentPrintTime; - } - } - - if (0 != len) { - if (0 != queryDbExec(pThreadInfo->taos, pThreadInfo->buffer, - NO_INSERT_TYPE, false)) { - errorPrint("queryDbExec() failed. buffer:\n%s\n", - pThreadInfo->buffer); - goto create_table_end; - } - pThreadInfo->tables_created += batchNum; - } - *code = 0; - create_table_end: - tmfree(pThreadInfo->buffer); - return code; -} - -int startMultiThreadCreateChildTable(char *cols, int threads, - uint64_t tableFrom, int64_t ntables, - char *db_name, SSuperTable *stbInfo) { - pthread_t *pids = calloc(1, threads * sizeof(pthread_t)); - if (NULL == pids) { - errorPrint("%s", "failed to allocate memory\n"); - return -1; - } - threadInfo *infos = calloc(1, threads * sizeof(threadInfo)); - if (NULL == infos) { - errorPrint("%s", "failed to allocate memory\n"); - tmfree(pids); - return -1; - } - - if (threads < 1) { - threads = 1; - } - - int64_t a = ntables / threads; - if (a < 1) { - threads = (int)ntables; - a = 1; - } - - int64_t b = 0; - b = ntables % threads; - - for (int64_t i = 0; i < threads; i++) { - threadInfo *pThreadInfo = infos + i; - pThreadInfo->threadID = (int)i; - tstrncpy(pThreadInfo->db_name, db_name, TSDB_DB_NAME_LEN); - pThreadInfo->stbInfo = stbInfo; - verbosePrint("%s() %d db_name: %s\n", __func__, __LINE__, db_name); - pThreadInfo->taos = taos_connect(g_Dbs.host, g_Dbs.user, g_Dbs.password, - db_name, g_Dbs.port); - if (pThreadInfo->taos == NULL) { - errorPrint("failed to connect to TDengine, reason:%s\n", - taos_errstr(NULL)); - free(pids); - free(infos); - return -1; - } - - pThreadInfo->start_table_from = tableFrom; - pThreadInfo->ntables = i < b ? a + 1 : a; - pThreadInfo->end_table_to = i < b ? tableFrom + a : tableFrom + a - 1; - tableFrom = pThreadInfo->end_table_to + 1; - pThreadInfo->use_metric = true; - pThreadInfo->cols = cols; - pThreadInfo->minDelay = UINT64_MAX; - pThreadInfo->tables_created = 0; - pthread_create(pids + i, NULL, createTable, pThreadInfo); - } - - for (int i = 0; i < threads; i++) { - void* result; - pthread_join(pids[i], &result); - if (*(int32_t*)result) { - g_fail = true; - } - tmfree(result); - } - - for (int i = 0; i < threads; i++) { - threadInfo *pThreadInfo = infos + i; - taos_close(pThreadInfo->taos); - - g_actualChildTables += pThreadInfo->tables_created; - } - - free(pids); - free(infos); - if (g_fail) { - return -1; - } - - return 0; -} - -int createChildTables() { - int32_t code = 0; - fprintf(stderr, "creating %" PRId64 " table(s) with %d thread(s)\n\n", - g_totalChildTables, g_Dbs.threadCountForCreateTbl); - if (g_fpOfInsertResult) { - fprintf(g_fpOfInsertResult, - "creating %" PRId64 " table(s) with %d thread(s)\n\n", - g_totalChildTables, g_Dbs.threadCountForCreateTbl); - } - double start = (double)taosGetTimestampMs(); - char tblColsBuf[TSDB_MAX_BYTES_PER_ROW]; - int len; - - for (int i = 0; i < g_Dbs.dbCount; i++) { - if (g_Dbs.use_metric) { - if (g_Dbs.db[i].superTblCount > 0) { - // with super table - for (int j = 0; j < g_Dbs.db[i].superTblCount; j++) { - if ((AUTO_CREATE_SUBTBL == - g_Dbs.db[i].superTbls[j].autoCreateTable) || - (TBL_ALREADY_EXISTS == - g_Dbs.db[i].superTbls[j].childTblExists)) { - continue; - } - verbosePrint( - "%s() LN%d: %s\n", __func__, __LINE__, - g_Dbs.db[i].superTbls[j].colsOfCreateChildTable); - uint64_t startFrom = 0; - - verbosePrint("%s() LN%d: create %" PRId64 - " child tables from %" PRIu64 "\n", - __func__, __LINE__, g_totalChildTables, - startFrom); - - code = startMultiThreadCreateChildTable( - g_Dbs.db[i].superTbls[j].colsOfCreateChildTable, - g_Dbs.threadCountForCreateTbl, startFrom, - g_Dbs.db[i].superTbls[j].childTblCount, - g_Dbs.db[i].dbName, &(g_Dbs.db[i].superTbls[j])); - if (code) { - errorPrint( - "%s() LN%d, startMultiThreadCreateChildTable() " - "failed for db %d stable %d\n", - __func__, __LINE__, i, j); - return code; - } - } - } - } else { - // normal table - len = snprintf(tblColsBuf, TSDB_MAX_BYTES_PER_ROW, "(TS TIMESTAMP"); - for (int j = 0; j < g_args.columnCount; j++) { - if ((strcasecmp(g_args.dataType[j], "BINARY") == 0) || - (strcasecmp(g_args.dataType[j], "NCHAR") == 0)) { - snprintf(tblColsBuf + len, TSDB_MAX_BYTES_PER_ROW - len, - ",C%d %s(%d)", j, g_args.dataType[j], - g_args.binwidth); - } else { - snprintf(tblColsBuf + len, TSDB_MAX_BYTES_PER_ROW - len, - ",C%d %s", j, g_args.dataType[j]); - } - len = (int)strlen(tblColsBuf); - } - - snprintf(tblColsBuf + len, TSDB_MAX_BYTES_PER_ROW - len, ")"); - - verbosePrint("%s() LN%d: dbName: %s num of tb: %" PRId64 - " schema: %s\n", - __func__, __LINE__, g_Dbs.db[i].dbName, g_args.ntables, - tblColsBuf); - code = startMultiThreadCreateChildTable( - tblColsBuf, g_Dbs.threadCountForCreateTbl, 0, g_args.ntables, - g_Dbs.db[i].dbName, NULL); - if (code) { - errorPrint( - "%s() LN%d, startMultiThreadCreateChildTable() " - "failed\n", - __func__, __LINE__); - return code; - } - } - } - double end = (double)taosGetTimestampMs(); - fprintf(stderr, - "\nSpent %.4f seconds to create %" PRId64 - " table(s) with %d thread(s), actual %" PRId64 - " table(s) created\n\n", - (end - start) / 1000.0, g_totalChildTables, - g_Dbs.threadCountForCreateTbl, g_actualChildTables); - if (g_fpOfInsertResult) { - fprintf(g_fpOfInsertResult, - "\nSpent %.4f seconds to create %" PRId64 - " table(s) with %d thread(s), actual %" PRId64 - " table(s) created\n\n", - (end - start) / 1000.0, g_totalChildTables, - g_Dbs.threadCountForCreateTbl, g_actualChildTables); - } - return code; -} - -void postFreeResource() { - tmfclose(g_fpOfInsertResult); - tmfree(g_dupstr); - for (int i = 0; i < g_Dbs.dbCount; i++) { - for (uint64_t j = 0; j < g_Dbs.db[i].superTblCount; j++) { - if (0 != g_Dbs.db[i].superTbls[j].colsOfCreateChildTable) { - tmfree(g_Dbs.db[i].superTbls[j].colsOfCreateChildTable); - g_Dbs.db[i].superTbls[j].colsOfCreateChildTable = NULL; - } - if (0 != g_Dbs.db[i].superTbls[j].sampleDataBuf) { - tmfree(g_Dbs.db[i].superTbls[j].sampleDataBuf); - g_Dbs.db[i].superTbls[j].sampleDataBuf = NULL; - } - - for (int c = 0; c < g_Dbs.db[i].superTbls[j].columnCount; c++) { - if (g_Dbs.db[i].superTbls[j].sampleBindBatchArray) { - tmfree((char *)((uintptr_t) * - (uintptr_t *)(g_Dbs.db[i] - .superTbls[j] - .sampleBindBatchArray + - sizeof(char *) * c))); - } - } - tmfree(g_Dbs.db[i].superTbls[j].sampleBindBatchArray); - - if (0 != g_Dbs.db[i].superTbls[j].tagDataBuf) { - tmfree(g_Dbs.db[i].superTbls[j].tagDataBuf); - g_Dbs.db[i].superTbls[j].tagDataBuf = NULL; - } - if (0 != g_Dbs.db[i].superTbls[j].childTblName) { - tmfree(g_Dbs.db[i].superTbls[j].childTblName); - g_Dbs.db[i].superTbls[j].childTblName = NULL; - } - } - tmfree(g_Dbs.db[i].superTbls); - } - tmfree(g_Dbs.db); - tmfree(g_randbool_buff); - tmfree(g_randint_buff); - tmfree(g_rand_voltage_buff); - tmfree(g_randbigint_buff); - tmfree(g_randsmallint_buff); - tmfree(g_randtinyint_buff); - tmfree(g_randfloat_buff); - tmfree(g_rand_current_buff); - tmfree(g_rand_phase_buff); - tmfree(g_randdouble_buff); - tmfree(g_randuint_buff); - tmfree(g_randutinyint_buff); - tmfree(g_randusmallint_buff); - tmfree(g_randubigint_buff); - tmfree(g_randint); - tmfree(g_randuint); - tmfree(g_randbigint); - tmfree(g_randubigint); - tmfree(g_randfloat); - tmfree(g_randdouble); - tmfree(g_sampleDataBuf); - - for (int l = 0; l < g_args.columnCount; l++) { - if (g_sampleBindBatchArray) { - tmfree((char *)((uintptr_t) * (uintptr_t *)(g_sampleBindBatchArray + - sizeof(char *) * l))); - } - } - tmfree(g_sampleBindBatchArray); -} - -static int32_t execInsert(threadInfo *pThreadInfo, uint32_t k) { - int32_t affectedRows; - SSuperTable *stbInfo = pThreadInfo->stbInfo; - TAOS_RES * res; - int32_t code; - uint16_t iface; - if (stbInfo) - iface = stbInfo->iface; - else { - if (g_args.iface == INTERFACE_BUT) - iface = TAOSC_IFACE; - else - iface = g_args.iface; - } - - debugPrint("[%d] %s() LN%d %s\n", pThreadInfo->threadID, __func__, __LINE__, - (iface == TAOSC_IFACE) ? "taosc" - : (iface == REST_IFACE) ? "rest" - : "stmt"); - - switch (iface) { - case TAOSC_IFACE: - verbosePrint("[%d] %s() LN%d %s\n", pThreadInfo->threadID, __func__, - __LINE__, pThreadInfo->buffer); - - affectedRows = queryDbExec(pThreadInfo->taos, pThreadInfo->buffer, - INSERT_TYPE, false); - break; - - case REST_IFACE: - verbosePrint("[%d] %s() LN%d %s\n", pThreadInfo->threadID, __func__, - __LINE__, pThreadInfo->buffer); - - if (0 != postProceSql(g_Dbs.host, g_Dbs.port, pThreadInfo->buffer, - pThreadInfo)) { - affectedRows = -1; - printf("========restful return fail, threadID[%d]\n", - pThreadInfo->threadID); - } else { - affectedRows = k; - } - break; - - case STMT_IFACE: - debugPrint("%s() LN%d, stmt=%p", __func__, __LINE__, - pThreadInfo->stmt); - if (0 != taos_stmt_execute(pThreadInfo->stmt)) { - errorPrint( - "%s() LN%d, failied to execute insert statement. reason: " - "%s\n", - __func__, __LINE__, taos_stmt_errstr(pThreadInfo->stmt)); - - fprintf(stderr, - "\n\033[31m === Please reduce batch number if WAL size " - "exceeds limit. ===\033[0m\n\n"); - exit(EXIT_FAILURE); - } - affectedRows = k; - break; - case SML_IFACE: - res = taos_schemaless_insert( - pThreadInfo->taos, pThreadInfo->lines, - stbInfo->lineProtocol == TSDB_SML_JSON_PROTOCOL ? 0 : k, - stbInfo->lineProtocol, stbInfo->tsPrecision); - code = taos_errno(res); - affectedRows = taos_affected_rows(res); - if (code != TSDB_CODE_SUCCESS) { - errorPrint( - "%s() LN%d, failed to execute schemaless insert. reason: " - "%s\n", - __func__, __LINE__, taos_errstr(res)); - exit(EXIT_FAILURE); - } - break; - default: - errorPrint("Unknown insert mode: %d\n", stbInfo->iface); - affectedRows = 0; - } - - return affectedRows; -} - -static void getTableName(char *pTblName, threadInfo *pThreadInfo, - uint64_t tableSeq) { - SSuperTable *stbInfo = pThreadInfo->stbInfo; - if (stbInfo) { - if (AUTO_CREATE_SUBTBL != stbInfo->autoCreateTable) { - if (stbInfo->childTblLimit > 0) { - snprintf(pTblName, TSDB_TABLE_NAME_LEN, - stbInfo->escapeChar ? "`%s`" : "%s", - stbInfo->childTblName + - (tableSeq - stbInfo->childTblOffset) * - TSDB_TABLE_NAME_LEN); - } else { - verbosePrint("[%d] %s() LN%d: from=%" PRIu64 " count=%" PRId64 - " seq=%" PRIu64 "\n", - pThreadInfo->threadID, __func__, __LINE__, - pThreadInfo->start_table_from, - pThreadInfo->ntables, tableSeq); - snprintf( - pTblName, TSDB_TABLE_NAME_LEN, - stbInfo->escapeChar ? "`%s`" : "%s", - stbInfo->childTblName + tableSeq * TSDB_TABLE_NAME_LEN); - } - } else { - snprintf(pTblName, TSDB_TABLE_NAME_LEN, - stbInfo->escapeChar ? "`%s%" PRIu64 "`" : "%s%" PRIu64 "", - stbInfo->childTblPrefix, tableSeq); - } - } else { - snprintf(pTblName, TSDB_TABLE_NAME_LEN, - g_args.escapeChar ? "`%s%" PRIu64 "`" : "%s%" PRIu64 "", - g_args.tb_prefix, tableSeq); - } -} - -static int execStbBindParamBatch(threadInfo *pThreadInfo, char *tableName, - int64_t tableSeq, uint32_t batch, - uint64_t insertRows, uint64_t recordFrom, - int64_t startTime, int64_t *pSamplePos) { - TAOS_STMT *stmt = pThreadInfo->stmt; - - SSuperTable *stbInfo = pThreadInfo->stbInfo; - - uint32_t columnCount = pThreadInfo->stbInfo->columnCount; - - uint32_t thisBatch = (uint32_t)(MAX_SAMPLES - (*pSamplePos)); - - if (thisBatch > batch) { - thisBatch = batch; - } - verbosePrint("%s() LN%d, batch=%d pos=%" PRId64 " thisBatch=%d\n", __func__, - __LINE__, batch, *pSamplePos, thisBatch); - - memset(pThreadInfo->bindParams, 0, - (sizeof(TAOS_MULTI_BIND) * (columnCount + 1))); - memset(pThreadInfo->is_null, 0, thisBatch); - - for (int c = 0; c < columnCount + 1; c++) { - TAOS_MULTI_BIND *param = - (TAOS_MULTI_BIND *)(pThreadInfo->bindParams + - sizeof(TAOS_MULTI_BIND) * c); - - char data_type; - - if (c == 0) { - data_type = TSDB_DATA_TYPE_TIMESTAMP; - param->buffer_length = sizeof(int64_t); - param->buffer = pThreadInfo->bind_ts_array; - - } else { - data_type = stbInfo->columns[c - 1].data_type; - - char *tmpP; - - switch (data_type) { - case TSDB_DATA_TYPE_BINARY: - param->buffer_length = stbInfo->columns[c - 1].dataLen; - - tmpP = - (char *)((uintptr_t) * - (uintptr_t *)(stbInfo->sampleBindBatchArray + - sizeof(char *) * (c - 1))); - - verbosePrint("%s() LN%d, tmpP=%p pos=%" PRId64 - " width=%" PRIxPTR " position=%" PRId64 "\n", - __func__, __LINE__, tmpP, *pSamplePos, - param->buffer_length, - (*pSamplePos) * param->buffer_length); - - param->buffer = - (void *)(tmpP + *pSamplePos * param->buffer_length); - break; - - case TSDB_DATA_TYPE_NCHAR: - param->buffer_length = stbInfo->columns[c - 1].dataLen; - - tmpP = - (char *)((uintptr_t) * - (uintptr_t *)(stbInfo->sampleBindBatchArray + - sizeof(char *) * (c - 1))); - - verbosePrint("%s() LN%d, tmpP=%p pos=%" PRId64 - " width=%" PRIxPTR " position=%" PRId64 "\n", - __func__, __LINE__, tmpP, *pSamplePos, - param->buffer_length, - (*pSamplePos) * param->buffer_length); - - param->buffer = - (void *)(tmpP + *pSamplePos * param->buffer_length); - break; - - case TSDB_DATA_TYPE_INT: - case TSDB_DATA_TYPE_UINT: - param->buffer_length = sizeof(int32_t); - param->buffer = - (void *)((uintptr_t) * - (uintptr_t *)(stbInfo - ->sampleBindBatchArray + - sizeof(char *) * (c - 1)) + - stbInfo->columns[c - 1].dataLen * - (*pSamplePos)); - break; - - case TSDB_DATA_TYPE_TINYINT: - case TSDB_DATA_TYPE_UTINYINT: - param->buffer_length = sizeof(int8_t); - param->buffer = - (void *)((uintptr_t) * - (uintptr_t *)(stbInfo - ->sampleBindBatchArray + - sizeof(char *) * (c - 1)) + - stbInfo->columns[c - 1].dataLen * - (*pSamplePos)); - break; - - case TSDB_DATA_TYPE_SMALLINT: - case TSDB_DATA_TYPE_USMALLINT: - param->buffer_length = sizeof(int16_t); - param->buffer = - (void *)((uintptr_t) * - (uintptr_t *)(stbInfo - ->sampleBindBatchArray + - sizeof(char *) * (c - 1)) + - stbInfo->columns[c - 1].dataLen * - (*pSamplePos)); - break; - - case TSDB_DATA_TYPE_BIGINT: - case TSDB_DATA_TYPE_UBIGINT: - param->buffer_length = sizeof(int64_t); - param->buffer = - (void *)((uintptr_t) * - (uintptr_t *)(stbInfo - ->sampleBindBatchArray + - sizeof(char *) * (c - 1)) + - stbInfo->columns[c - 1].dataLen * - (*pSamplePos)); - break; - - case TSDB_DATA_TYPE_BOOL: - param->buffer_length = sizeof(int8_t); - param->buffer = - (void *)((uintptr_t) * - (uintptr_t *)(stbInfo - ->sampleBindBatchArray + - sizeof(char *) * (c - 1)) + - stbInfo->columns[c - 1].dataLen * - (*pSamplePos)); - break; - - case TSDB_DATA_TYPE_FLOAT: - param->buffer_length = sizeof(float); - param->buffer = - (void *)((uintptr_t) * - (uintptr_t *)(stbInfo - ->sampleBindBatchArray + - sizeof(char *) * (c - 1)) + - stbInfo->columns[c - 1].dataLen * - (*pSamplePos)); - break; - - case TSDB_DATA_TYPE_DOUBLE: - param->buffer_length = sizeof(double); - param->buffer = - (void *)((uintptr_t) * - (uintptr_t *)(stbInfo - ->sampleBindBatchArray + - sizeof(char *) * (c - 1)) + - stbInfo->columns[c - 1].dataLen * - (*pSamplePos)); - break; - - case TSDB_DATA_TYPE_TIMESTAMP: - param->buffer_length = sizeof(int64_t); - param->buffer = - (void *)((uintptr_t) * - (uintptr_t *)(stbInfo - ->sampleBindBatchArray + - sizeof(char *) * (c - 1)) + - stbInfo->columns[c - 1].dataLen * - (*pSamplePos)); - break; - - default: - errorPrint("wrong data type: %d\n", data_type); - return -1; - } - } - - param->buffer_type = data_type; - param->length = calloc(1, sizeof(int32_t) * thisBatch); - if (param->length == NULL) { - errorPrint("%s", "failed to allocate memory\n"); - return -1; - } - - for (int b = 0; b < thisBatch; b++) { - if (param->buffer_type == TSDB_DATA_TYPE_NCHAR) { - param->length[b] = (int32_t)strlen( - (char *)param->buffer + b * stbInfo->columns[c].dataLen); - } else { - param->length[b] = (int32_t)param->buffer_length; - } - } - param->is_null = pThreadInfo->is_null; - param->num = thisBatch; - } - - uint32_t k; - for (k = 0; k < thisBatch;) { - /* columnCount + 1 (ts) */ - if (stbInfo->disorderRatio) { - *(pThreadInfo->bind_ts_array + k) = - startTime + getTSRandTail(stbInfo->timeStampStep, k, - stbInfo->disorderRatio, - stbInfo->disorderRange); - } else { - *(pThreadInfo->bind_ts_array + k) = - startTime + stbInfo->timeStampStep * k; - } - - debugPrint("%s() LN%d, k=%d ts=%" PRId64 "\n", __func__, __LINE__, k, - *(pThreadInfo->bind_ts_array + k)); - k++; - recordFrom++; - - (*pSamplePos)++; - if ((*pSamplePos) == MAX_SAMPLES) { - *pSamplePos = 0; - } - - if (recordFrom >= insertRows) { - break; - } - } - - if (taos_stmt_bind_param_batch( - stmt, (TAOS_MULTI_BIND *)pThreadInfo->bindParams)) { - errorPrint("taos_stmt_bind_param_batch() failed! reason: %s\n", - taos_stmt_errstr(stmt)); - return -1; - } - - for (int c = 0; c < stbInfo->columnCount + 1; c++) { - TAOS_MULTI_BIND *param = - (TAOS_MULTI_BIND *)(pThreadInfo->bindParams + - sizeof(TAOS_MULTI_BIND) * c); - free(param->length); - } - - // if msg > 3MB, break - if (taos_stmt_add_batch(stmt)) { - errorPrint("taos_stmt_add_batch() failed! reason: %s\n", - taos_stmt_errstr(stmt)); - return -1; - } - return k; -} - -int32_t prepareStbStmt(threadInfo *pThreadInfo, char *tableName, - int64_t tableSeq, uint32_t batch, uint64_t insertRows, - uint64_t recordFrom, int64_t startTime, - int64_t *pSamplePos) { - SSuperTable *stbInfo = pThreadInfo->stbInfo; - TAOS_STMT * stmt = pThreadInfo->stmt; - - char *tagsArray = calloc(1, sizeof(TAOS_BIND) * stbInfo->tagCount); - if (NULL == tagsArray) { - errorPrint("%s", "failed to allocate memory\n"); - return -1; - } - char *tagsValBuf = (char *)calloc(TSDB_MAX_SQL_LEN + 1, 1); - - if (AUTO_CREATE_SUBTBL == stbInfo->autoCreateTable) { - if (0 == stbInfo->tagSource) { - if (generateTagValuesForStb(stbInfo, tableSeq, tagsValBuf)) { - tmfree(tagsValBuf); - return -1; - } - } else { - snprintf( - tagsValBuf, TSDB_MAX_SQL_LEN, "(%s)", - stbInfo->tagDataBuf + stbInfo->lenOfTagOfOneRow * - (tableSeq % stbInfo->tagSampleCount)); - } - - if (prepareStbStmtBindTag(tagsArray, stbInfo, tagsValBuf, - pThreadInfo->time_precision)) { - tmfree(tagsValBuf); - tmfree(tagsArray); - return -1; - } - - if (taos_stmt_set_tbname_tags(stmt, tableName, - (TAOS_BIND *)tagsArray)) { - errorPrint("taos_stmt_set_tbname_tags() failed! reason: %s\n", - taos_stmt_errstr(stmt)); - return -1; - } - - } else { - if (taos_stmt_set_tbname(stmt, tableName)) { - errorPrint("taos_stmt_set_tbname() failed! reason: %s\n", - taos_stmt_errstr(stmt)); - return -1; - } - } - tmfree(tagsValBuf); - tmfree(tagsArray); - return execStbBindParamBatch(pThreadInfo, tableName, tableSeq, batch, - insertRows, recordFrom, startTime, pSamplePos); -} - -// stmt sync write interlace data -static void *syncWriteInterlaceStmtBatch(threadInfo *pThreadInfo, - uint32_t interlaceRows) { - debugPrint("[%d] %s() LN%d: ### stmt interlace write\n", - pThreadInfo->threadID, __func__, __LINE__); - int32_t* code = calloc(1, sizeof (int32_t)); - *code = -1; - int64_t insertRows; - int64_t timeStampStep; - uint64_t insert_interval; - - SSuperTable *stbInfo = pThreadInfo->stbInfo; - - if (stbInfo) { - insertRows = stbInfo->insertRows; - timeStampStep = stbInfo->timeStampStep; - insert_interval = stbInfo->insertInterval; - } else { - insertRows = g_args.insertRows; - timeStampStep = g_args.timestamp_step; - insert_interval = g_args.insert_interval; - } - - debugPrint("[%d] %s() LN%d: start_table_from=%" PRIu64 " ntables=%" PRId64 - " insertRows=%" PRIu64 "\n", - pThreadInfo->threadID, __func__, __LINE__, - pThreadInfo->start_table_from, pThreadInfo->ntables, insertRows); - - uint64_t timesInterlace = (insertRows / interlaceRows) + 1; - uint32_t precalcBatch = interlaceRows; - - if (precalcBatch > g_args.reqPerReq) precalcBatch = g_args.reqPerReq; - - if (precalcBatch > MAX_SAMPLES) precalcBatch = MAX_SAMPLES; - - pThreadInfo->totalInsertRows = 0; - pThreadInfo->totalAffectedRows = 0; - - uint64_t st = 0; - uint64_t et = UINT64_MAX; - - uint64_t lastPrintTime = taosGetTimestampMs(); - uint64_t startTs = taosGetTimestampMs(); - uint64_t endTs; - - uint64_t tableSeq = pThreadInfo->start_table_from; - int64_t startTime; - - bool flagSleep = true; - uint64_t sleepTimeTotal = 0; - - int percentComplete = 0; - int64_t totalRows = insertRows * pThreadInfo->ntables; - pThreadInfo->samplePos = 0; - - for (int64_t interlace = 0; interlace < timesInterlace; interlace++) { - if ((flagSleep) && (insert_interval)) { - st = taosGetTimestampMs(); - flagSleep = false; - } - - int64_t generated = 0; - int64_t samplePos; - - for (; tableSeq < pThreadInfo->start_table_from + pThreadInfo->ntables; - tableSeq++) { - char tableName[TSDB_TABLE_NAME_LEN]; - getTableName(tableName, pThreadInfo, tableSeq); - if (0 == strlen(tableName)) { - errorPrint("[%d] %s() LN%d, getTableName return null\n", - pThreadInfo->threadID, __func__, __LINE__); - goto free_of_interlace_stmt; - } - - samplePos = pThreadInfo->samplePos; - startTime = pThreadInfo->start_time + - interlace * interlaceRows * timeStampStep; - uint64_t remainRecPerTbl = insertRows - interlaceRows * interlace; - uint64_t recPerTbl = 0; - - uint64_t remainPerInterlace; - if (remainRecPerTbl > interlaceRows) { - remainPerInterlace = interlaceRows; - } else { - remainPerInterlace = remainRecPerTbl; - } - - while (remainPerInterlace > 0) { - uint32_t batch; - if (remainPerInterlace > precalcBatch) { - batch = precalcBatch; - } else { - batch = (uint32_t)remainPerInterlace; - } - debugPrint( - "[%d] %s() LN%d, tableName:%s, batch:%d startTime:%" PRId64 - "\n", - pThreadInfo->threadID, __func__, __LINE__, tableName, batch, - startTime); - - if (stbInfo) { - generated = - prepareStbStmt(pThreadInfo, tableName, tableSeq, batch, - insertRows, 0, startTime, &samplePos); - } else { - generated = prepareStmtWithoutStb( - pThreadInfo, tableName, batch, insertRows, - interlaceRows * interlace + recPerTbl, startTime); - } - - debugPrint("[%d] %s() LN%d, generated records is %" PRId64 "\n", - pThreadInfo->threadID, __func__, __LINE__, - generated); - if (generated < 0) { - errorPrint( - "[%d] %s() LN%d, generated records is %" PRId64 "\n", - pThreadInfo->threadID, __func__, __LINE__, generated); - goto free_of_interlace_stmt; - } else if (generated == 0) { - break; - } - - recPerTbl += generated; - remainPerInterlace -= generated; - pThreadInfo->totalInsertRows += generated; - - verbosePrint("[%d] %s() LN%d totalInsertRows=%" PRIu64 "\n", - pThreadInfo->threadID, __func__, __LINE__, - pThreadInfo->totalInsertRows); - - startTs = taosGetTimestampUs(); - - int64_t affectedRows = - execInsert(pThreadInfo, (uint32_t)generated); - - endTs = taosGetTimestampUs(); - uint64_t delay = endTs - startTs; - performancePrint( - "%s() LN%d, insert execution time is %10.2f ms\n", __func__, - __LINE__, delay / 1000.0); - verbosePrint("[%d] %s() LN%d affectedRows=%" PRId64 "\n", - pThreadInfo->threadID, __func__, __LINE__, - affectedRows); - - if (delay > pThreadInfo->maxDelay) - pThreadInfo->maxDelay = delay; - if (delay < pThreadInfo->minDelay) - pThreadInfo->minDelay = delay; - pThreadInfo->cntDelay++; - pThreadInfo->totalDelay += delay; - - if (generated != affectedRows) { - errorPrint("[%d] %s() LN%d execInsert() insert %" PRId64 - ", affected rows: %" PRId64 "\n\n", - pThreadInfo->threadID, __func__, __LINE__, - generated, affectedRows); - goto free_of_interlace_stmt; - } - - pThreadInfo->totalAffectedRows += affectedRows; - - int currentPercent = - (int)(pThreadInfo->totalAffectedRows * 100 / totalRows); - if (currentPercent > percentComplete) { - printf("[%d]:%d%%\n", pThreadInfo->threadID, - currentPercent); - percentComplete = currentPercent; - } - int64_t currentPrintTime = taosGetTimestampMs(); - if (currentPrintTime - lastPrintTime > 30 * 1000) { - printf("thread[%d] has currently inserted rows: %" PRIu64 - ", affected rows: %" PRIu64 "\n", - pThreadInfo->threadID, pThreadInfo->totalInsertRows, - pThreadInfo->totalAffectedRows); - lastPrintTime = currentPrintTime; - } - - startTime += (generated * timeStampStep); - } - } - pThreadInfo->samplePos = samplePos; - - if (tableSeq == pThreadInfo->start_table_from + pThreadInfo->ntables) { - // turn to first table - tableSeq = pThreadInfo->start_table_from; - - flagSleep = true; - } - - if ((insert_interval) && flagSleep) { - et = taosGetTimestampMs(); - - if (insert_interval > (et - st)) { - uint64_t sleepTime = insert_interval - (et - st); - performancePrint("%s() LN%d sleep: %" PRId64 - " ms for insert interval\n", - __func__, __LINE__, sleepTime); - taosMsleep((int32_t)sleepTime); // ms - sleepTimeTotal += insert_interval; - } - } - } - if (percentComplete < 100) - printf("[%d]:%d%%\n", pThreadInfo->threadID, percentComplete); - *code = 0; - printStatPerThread(pThreadInfo); -free_of_interlace_stmt: - return code; -} - -void *syncWriteInterlace(threadInfo *pThreadInfo, uint32_t interlaceRows) { - debugPrint("[%d] %s() LN%d: ### interlace write\n", pThreadInfo->threadID, - __func__, __LINE__); - int32_t* code = calloc(1, sizeof (int32_t)); - *code = -1; - int64_t insertRows; - uint64_t maxSqlLen; - int64_t timeStampStep; - uint64_t insert_interval; - - SSuperTable *stbInfo = pThreadInfo->stbInfo; - - if (stbInfo) { - insertRows = stbInfo->insertRows; - maxSqlLen = stbInfo->maxSqlLen; - timeStampStep = stbInfo->timeStampStep; - insert_interval = stbInfo->insertInterval; - } else { - insertRows = g_args.insertRows; - maxSqlLen = g_args.max_sql_len; - timeStampStep = g_args.timestamp_step; - insert_interval = g_args.insert_interval; - } - - debugPrint("[%d] %s() LN%d: start_table_from=%" PRIu64 " ntables=%" PRId64 - " insertRows=%" PRIu64 "\n", - pThreadInfo->threadID, __func__, __LINE__, - pThreadInfo->start_table_from, pThreadInfo->ntables, insertRows); - - if (interlaceRows > g_args.reqPerReq) interlaceRows = g_args.reqPerReq; - - uint32_t batchPerTbl = interlaceRows; - uint32_t batchPerTblTimes; - - if ((interlaceRows > 0) && (pThreadInfo->ntables > 1)) { - batchPerTblTimes = g_args.reqPerReq / interlaceRows; - } else { - batchPerTblTimes = 1; - } - pThreadInfo->buffer = calloc(maxSqlLen, 1); - if (NULL == pThreadInfo->buffer) { - errorPrint("%s", "failed to allocate memory\n"); - goto free_of_interlace; - } - - pThreadInfo->totalInsertRows = 0; - pThreadInfo->totalAffectedRows = 0; - - uint64_t st = 0; - uint64_t et = UINT64_MAX; - - uint64_t lastPrintTime = taosGetTimestampMs(); - uint64_t startTs = taosGetTimestampMs(); - uint64_t endTs; - - uint64_t tableSeq = pThreadInfo->start_table_from; - int64_t startTime = pThreadInfo->start_time; - - uint64_t generatedRecPerTbl = 0; - bool flagSleep = true; - uint64_t sleepTimeTotal = 0; - - int percentComplete = 0; - int64_t totalRows = insertRows * pThreadInfo->ntables; - - while (pThreadInfo->totalInsertRows < pThreadInfo->ntables * insertRows) { - if ((flagSleep) && (insert_interval)) { - st = taosGetTimestampMs(); - flagSleep = false; - } - - // generate data - memset(pThreadInfo->buffer, 0, maxSqlLen); - uint64_t remainderBufLen = maxSqlLen; - - char *pstr = pThreadInfo->buffer; - - int len = - snprintf(pstr, strlen(STR_INSERT_INTO) + 1, "%s", STR_INSERT_INTO); - pstr += len; - remainderBufLen -= len; - - uint32_t recOfBatch = 0; - - int32_t generated; - for (uint64_t i = 0; i < batchPerTblTimes; i++) { - char tableName[TSDB_TABLE_NAME_LEN]; - - getTableName(tableName, pThreadInfo, tableSeq); - if (0 == strlen(tableName)) { - errorPrint("[%d] %s() LN%d, getTableName return null\n", - pThreadInfo->threadID, __func__, __LINE__); - goto free_of_interlace; - } - - uint64_t oldRemainderLen = remainderBufLen; - - if (stbInfo) { - generated = generateStbInterlaceData( - pThreadInfo, tableName, batchPerTbl, i, batchPerTblTimes, - tableSeq, pstr, insertRows, startTime, &remainderBufLen); - } else { - generated = (int32_t)generateInterlaceDataWithoutStb( - tableName, batchPerTbl, tableSeq, pThreadInfo->db_name, - pstr, insertRows, startTime, &remainderBufLen); - } - - debugPrint("[%d] %s() LN%d, generated records is %d\n", - pThreadInfo->threadID, __func__, __LINE__, generated); - if (generated < 0) { - errorPrint("[%d] %s() LN%d, generated records is %d\n", - pThreadInfo->threadID, __func__, __LINE__, - generated); - goto free_of_interlace; - } else if (generated == 0) { - break; - } - - tableSeq++; - recOfBatch += batchPerTbl; - - pstr += (oldRemainderLen - remainderBufLen); - pThreadInfo->totalInsertRows += batchPerTbl; - - verbosePrint("[%d] %s() LN%d batchPerTbl=%d recOfBatch=%d\n", - pThreadInfo->threadID, __func__, __LINE__, batchPerTbl, - recOfBatch); - - if (tableSeq == - pThreadInfo->start_table_from + pThreadInfo->ntables) { - // turn to first table - tableSeq = pThreadInfo->start_table_from; - generatedRecPerTbl += batchPerTbl; - - startTime = pThreadInfo->start_time + - generatedRecPerTbl * timeStampStep; - - flagSleep = true; - if (generatedRecPerTbl >= insertRows) break; - - int64_t remainRows = insertRows - generatedRecPerTbl; - if ((remainRows > 0) && (batchPerTbl > remainRows)) - batchPerTbl = (uint32_t)remainRows; - - if (pThreadInfo->ntables * batchPerTbl < g_args.reqPerReq) - break; - } - - verbosePrint("[%d] %s() LN%d generatedRecPerTbl=%" PRId64 - " insertRows=%" PRId64 "\n", - pThreadInfo->threadID, __func__, __LINE__, - generatedRecPerTbl, insertRows); - - if ((g_args.reqPerReq - recOfBatch) < batchPerTbl) break; - } - - verbosePrint("[%d] %s() LN%d recOfBatch=%d totalInsertRows=%" PRIu64 - "\n", - pThreadInfo->threadID, __func__, __LINE__, recOfBatch, - pThreadInfo->totalInsertRows); - verbosePrint("[%d] %s() LN%d, buffer=%s\n", pThreadInfo->threadID, - __func__, __LINE__, pThreadInfo->buffer); - - startTs = taosGetTimestampUs(); - - if (recOfBatch == 0) { - errorPrint("[%d] %s() LN%d Failed to insert records of batch %d\n", - pThreadInfo->threadID, __func__, __LINE__, batchPerTbl); - if (batchPerTbl > 0) { - errorPrint( - "\tIf the batch is %d, the length of the SQL to insert a " - "row must be less then %" PRId64 "\n", - batchPerTbl, maxSqlLen / batchPerTbl); - } - errorPrint("\tPlease check if the buffer length(%" PRId64 - ") or batch(%d) is set with proper value!\n", - maxSqlLen, batchPerTbl); - goto free_of_interlace; - } - int64_t affectedRows = execInsert(pThreadInfo, recOfBatch); - - endTs = taosGetTimestampUs(); - uint64_t delay = endTs - startTs; - performancePrint("%s() LN%d, insert execution time is %10.2f ms\n", - __func__, __LINE__, delay / 1000.0); - verbosePrint("[%d] %s() LN%d affectedRows=%" PRId64 "\n", - pThreadInfo->threadID, __func__, __LINE__, affectedRows); - - if (delay > pThreadInfo->maxDelay) pThreadInfo->maxDelay = delay; - if (delay < pThreadInfo->minDelay) pThreadInfo->minDelay = delay; - pThreadInfo->cntDelay++; - pThreadInfo->totalDelay += delay; - - if (recOfBatch != affectedRows) { - errorPrint( - "[%d] %s() LN%d execInsert insert %d, affected rows: %" PRId64 - "\n%s\n", - pThreadInfo->threadID, __func__, __LINE__, recOfBatch, - affectedRows, pThreadInfo->buffer); - goto free_of_interlace; - } - - pThreadInfo->totalAffectedRows += affectedRows; - - int currentPercent = - (int)(pThreadInfo->totalAffectedRows * 100 / totalRows); - if (currentPercent > percentComplete) { - printf("[%d]:%d%%\n", pThreadInfo->threadID, currentPercent); - percentComplete = currentPercent; - } - int64_t currentPrintTime = taosGetTimestampMs(); - if (currentPrintTime - lastPrintTime > 30 * 1000) { - printf("thread[%d] has currently inserted rows: %" PRIu64 - ", affected rows: %" PRIu64 "\n", - pThreadInfo->threadID, pThreadInfo->totalInsertRows, - pThreadInfo->totalAffectedRows); - lastPrintTime = currentPrintTime; - } - - if ((insert_interval) && flagSleep) { - et = taosGetTimestampMs(); - - if (insert_interval > (et - st)) { - uint64_t sleepTime = insert_interval - (et - st); - performancePrint("%s() LN%d sleep: %" PRId64 - " ms for insert interval\n", - __func__, __LINE__, sleepTime); - taosMsleep((int32_t)sleepTime); // ms - sleepTimeTotal += insert_interval; - } - } - } - if (percentComplete < 100) - printf("[%d]:%d%%\n", pThreadInfo->threadID, percentComplete); - *code = 0; - printStatPerThread(pThreadInfo); -free_of_interlace: - tmfree(pThreadInfo->buffer); - return code; -} - -static void *syncWriteInterlaceSml(threadInfo *pThreadInfo, - uint32_t interlaceRows) { - int32_t* code = calloc(1, sizeof (int32_t)); - *code = -1; - debugPrint("[%d] %s() LN%d: ### interlace schemaless write\n", - pThreadInfo->threadID, __func__, __LINE__); - int64_t insertRows; - uint64_t maxSqlLen; - int64_t timeStampStep; - uint64_t insert_interval; - - SSuperTable *stbInfo = pThreadInfo->stbInfo; - - if (stbInfo) { - insertRows = stbInfo->insertRows; - maxSqlLen = stbInfo->maxSqlLen; - timeStampStep = stbInfo->timeStampStep; - insert_interval = stbInfo->insertInterval; - } else { - insertRows = g_args.insertRows; - maxSqlLen = g_args.max_sql_len; - timeStampStep = g_args.timestamp_step; - insert_interval = g_args.insert_interval; - } - - debugPrint("[%d] %s() LN%d: start_table_from=%" PRIu64 " ntables=%" PRId64 - " insertRows=%" PRIu64 "\n", - pThreadInfo->threadID, __func__, __LINE__, - pThreadInfo->start_table_from, pThreadInfo->ntables, insertRows); - - if (interlaceRows > g_args.reqPerReq) interlaceRows = g_args.reqPerReq; - - uint32_t batchPerTbl = interlaceRows; - uint32_t batchPerTblTimes; - - if ((interlaceRows > 0) && (pThreadInfo->ntables > 1)) { - batchPerTblTimes = g_args.reqPerReq / interlaceRows; - } else { - batchPerTblTimes = 1; - } - - char **smlList; - cJSON *tagsList; - cJSON *jsonArray; - if (stbInfo->lineProtocol == TSDB_SML_LINE_PROTOCOL || - stbInfo->lineProtocol == TSDB_SML_TELNET_PROTOCOL) { - smlList = (char **)calloc(pThreadInfo->ntables, sizeof(char *)); - if (NULL == smlList) { - errorPrint("%s", "failed to allocate memory\n"); - goto free_of_interlace_sml; - } - - for (int t = 0; t < pThreadInfo->ntables; t++) { - char *sml = (char *)calloc(1, stbInfo->lenOfOneRow); - if (NULL == sml) { - errorPrint("%s", "failed to allocate memory\n"); - goto free_smlheadlist_interlace_sml; - } - if (generateSmlConstPart(sml, stbInfo, pThreadInfo, t)) { - goto free_smlheadlist_interlace_sml; - } - smlList[t] = sml; - } - - pThreadInfo->lines = calloc(g_args.reqPerReq, sizeof(char *)); - if (NULL == pThreadInfo->lines) { - errorPrint("%s", "failed to allocate memory\n"); - goto free_smlheadlist_interlace_sml; - } - - for (int i = 0; i < g_args.reqPerReq; i++) { - pThreadInfo->lines[i] = calloc(1, stbInfo->lenOfOneRow); - if (NULL == pThreadInfo->lines[i]) { - errorPrint("%s", "failed to allocate memory\n"); - goto free_lines_interlace_sml; - } - } - } else { - jsonArray = cJSON_CreateArray(); - tagsList = cJSON_CreateArray(); - for (int t = 0; t < pThreadInfo->ntables; t++) { - if (generateSmlJsonTags(tagsList, stbInfo, pThreadInfo, t)) { - goto free_json_interlace_sml; - } - } - - pThreadInfo->lines = (char **)calloc(1, sizeof(char *)); - if (NULL == pThreadInfo->lines) { - errorPrint("%s", "failed to allocate memory\n"); - goto free_json_interlace_sml; - } - } - - pThreadInfo->totalInsertRows = 0; - pThreadInfo->totalAffectedRows = 0; - - uint64_t st = 0; - uint64_t et = UINT64_MAX; - - uint64_t lastPrintTime = taosGetTimestampMs(); - uint64_t startTs = taosGetTimestampMs(); - uint64_t endTs; - - uint64_t tableSeq = pThreadInfo->start_table_from; - int64_t startTime = pThreadInfo->start_time; - - uint64_t generatedRecPerTbl = 0; - bool flagSleep = true; - uint64_t sleepTimeTotal = 0; - - int percentComplete = 0; - int64_t totalRows = insertRows * pThreadInfo->ntables; - - while (pThreadInfo->totalInsertRows < pThreadInfo->ntables * insertRows) { - if ((flagSleep) && (insert_interval)) { - st = taosGetTimestampMs(); - flagSleep = false; - } - // generate data - - uint32_t recOfBatch = 0; - - for (uint64_t i = 0; i < batchPerTblTimes; i++) { - int64_t timestamp = startTime; - for (int j = recOfBatch; j < recOfBatch + batchPerTbl; j++) { - if (stbInfo->lineProtocol == TSDB_SML_JSON_PROTOCOL) { - cJSON *tag = cJSON_Duplicate( - cJSON_GetArrayItem( - tagsList, - (int)(tableSeq - pThreadInfo->start_table_from)), - true); - if (generateSmlJsonCols(jsonArray, tag, stbInfo, - pThreadInfo, timestamp)) { - goto free_json_interlace_sml; - } - } else { - if (generateSmlMutablePart( - pThreadInfo->lines[j], - smlList[tableSeq - pThreadInfo->start_table_from], - stbInfo, pThreadInfo, timestamp)) { - goto free_lines_interlace_sml; - } - } - - timestamp += timeStampStep; - } - tableSeq++; - recOfBatch += batchPerTbl; - - pThreadInfo->totalInsertRows += batchPerTbl; - - verbosePrint("[%d] %s() LN%d batchPerTbl=%d recOfBatch=%d\n", - pThreadInfo->threadID, __func__, __LINE__, batchPerTbl, - recOfBatch); - - if (tableSeq == - pThreadInfo->start_table_from + pThreadInfo->ntables) { - // turn to first table - tableSeq = pThreadInfo->start_table_from; - generatedRecPerTbl += batchPerTbl; - - startTime = pThreadInfo->start_time + - generatedRecPerTbl * timeStampStep; - - flagSleep = true; - if (generatedRecPerTbl >= insertRows) { - break; - } - - int64_t remainRows = insertRows - generatedRecPerTbl; - if ((remainRows > 0) && (batchPerTbl > remainRows)) { - batchPerTbl = (uint32_t)remainRows; - } - - if (pThreadInfo->ntables * batchPerTbl < g_args.reqPerReq) { - break; - } - } - - verbosePrint("[%d] %s() LN%d generatedRecPerTbl=%" PRId64 - " insertRows=%" PRId64 "\n", - pThreadInfo->threadID, __func__, __LINE__, - generatedRecPerTbl, insertRows); - - if ((g_args.reqPerReq - recOfBatch) < batchPerTbl) { - break; - } - } - - verbosePrint("[%d] %s() LN%d recOfBatch=%d totalInsertRows=%" PRIu64 - "\n", - pThreadInfo->threadID, __func__, __LINE__, recOfBatch, - pThreadInfo->totalInsertRows); - verbosePrint("[%d] %s() LN%d, buffer=%s\n", pThreadInfo->threadID, - __func__, __LINE__, pThreadInfo->buffer); - - if (stbInfo->lineProtocol == TSDB_SML_JSON_PROTOCOL) { - pThreadInfo->lines[0] = cJSON_Print(jsonArray); - } - - startTs = taosGetTimestampUs(); - - if (recOfBatch == 0) { - errorPrint("Failed to insert records of batch %d\n", batchPerTbl); - if (batchPerTbl > 0) { - errorPrint( - "\tIf the batch is %d, the length of the SQL to insert a " - "row must be less then %" PRId64 "\n", - batchPerTbl, maxSqlLen / batchPerTbl); - } - errorPrint("\tPlease check if the buffer length(%" PRId64 - ") or batch(%d) is set with proper value!\n", - maxSqlLen, batchPerTbl); - goto free_lines_interlace_sml; - } - int64_t affectedRows = execInsert(pThreadInfo, recOfBatch); - - endTs = taosGetTimestampUs(); - uint64_t delay = endTs - startTs; - - if (stbInfo->lineProtocol == TSDB_SML_JSON_PROTOCOL) { - tmfree(pThreadInfo->lines[0]); - cJSON_Delete(jsonArray); - jsonArray = cJSON_CreateArray(); - } - - performancePrint("%s() LN%d, insert execution time is %10.2f ms\n", - __func__, __LINE__, delay / 1000.0); - verbosePrint("[%d] %s() LN%d affectedRows=%" PRId64 "\n", - pThreadInfo->threadID, __func__, __LINE__, affectedRows); - - if (delay > pThreadInfo->maxDelay) pThreadInfo->maxDelay = delay; - if (delay < pThreadInfo->minDelay) pThreadInfo->minDelay = delay; - pThreadInfo->cntDelay++; - pThreadInfo->totalDelay += delay; - - if (recOfBatch != affectedRows) { - errorPrint("execInsert insert %d, affected rows: %" PRId64 "\n%s\n", - recOfBatch, affectedRows, pThreadInfo->buffer); - goto free_lines_interlace_sml; - } - - pThreadInfo->totalAffectedRows += affectedRows; - - int currentPercent = - (int)(pThreadInfo->totalAffectedRows * 100 / totalRows); - if (currentPercent > percentComplete) { - printf("[%d]:%d%%\n", pThreadInfo->threadID, currentPercent); - percentComplete = currentPercent; - } - int64_t currentPrintTime = taosGetTimestampMs(); - if (currentPrintTime - lastPrintTime > 30 * 1000) { - printf("thread[%d] has currently inserted rows: %" PRIu64 - ", affected rows: %" PRIu64 "\n", - pThreadInfo->threadID, pThreadInfo->totalInsertRows, - pThreadInfo->totalAffectedRows); - lastPrintTime = currentPrintTime; - } - - if ((insert_interval) && flagSleep) { - et = taosGetTimestampMs(); - - if (insert_interval > (et - st)) { - uint64_t sleepTime = insert_interval - (et - st); - performancePrint("%s() LN%d sleep: %" PRId64 - " ms for insert interval\n", - __func__, __LINE__, sleepTime); - taosMsleep((int32_t)sleepTime); // ms - sleepTimeTotal += insert_interval; - } - } - } - if (percentComplete < 100) - printf("[%d]:%d%%\n", pThreadInfo->threadID, percentComplete); - - *code = 0; - printStatPerThread(pThreadInfo); - free_of_interlace_sml: - if (stbInfo->lineProtocol == TSDB_SML_JSON_PROTOCOL) { - tmfree(pThreadInfo->lines); - free_json_interlace_sml: - if (jsonArray != NULL) { - cJSON_Delete(jsonArray); - } - if (tagsList != NULL) { - cJSON_Delete(tagsList); - } - } else { - free_lines_interlace_sml: - for (int index = 0; index < g_args.reqPerReq; index++) { - tmfree(pThreadInfo->lines[index]); - } - tmfree(pThreadInfo->lines); - free_smlheadlist_interlace_sml: - for (int index = 0; index < pThreadInfo->ntables; index++) { - tmfree(smlList[index]); - } - tmfree(smlList); - } - return code; -} - -void *syncWriteProgressiveStmt(threadInfo *pThreadInfo) { - debugPrint("%s() LN%d: ### stmt progressive write\n", __func__, __LINE__); - int32_t* code = calloc(1, sizeof (int32_t)); - *code = -1; - SSuperTable *stbInfo = pThreadInfo->stbInfo; - int64_t timeStampStep = - stbInfo ? stbInfo->timeStampStep : g_args.timestamp_step; - int64_t insertRows = (stbInfo) ? stbInfo->insertRows : g_args.insertRows; - verbosePrint("%s() LN%d insertRows=%" PRId64 "\n", __func__, __LINE__, - insertRows); - - uint64_t lastPrintTime = taosGetTimestampMs(); - uint64_t startTs = taosGetTimestampMs(); - uint64_t endTs; - - pThreadInfo->totalInsertRows = 0; - pThreadInfo->totalAffectedRows = 0; - - pThreadInfo->samplePos = 0; - - int percentComplete = 0; - int64_t totalRows = insertRows * pThreadInfo->ntables; - - for (uint64_t tableSeq = pThreadInfo->start_table_from; - tableSeq <= pThreadInfo->end_table_to; tableSeq++) { - int64_t start_time = pThreadInfo->start_time; - - for (uint64_t i = 0; i < insertRows;) { - char tableName[TSDB_TABLE_NAME_LEN]; - getTableName(tableName, pThreadInfo, tableSeq); - verbosePrint("%s() LN%d: tid=%d seq=%" PRId64 " tableName=%s\n", - __func__, __LINE__, pThreadInfo->threadID, tableSeq, - tableName); - if (0 == strlen(tableName)) { - errorPrint("[%d] %s() LN%d, getTableName return null\n", - pThreadInfo->threadID, __func__, __LINE__); - goto free_of_stmt_progressive; - } - - // measure prepare + insert - startTs = taosGetTimestampUs(); - - int32_t generated; - if (stbInfo) { - generated = prepareStbStmt( - pThreadInfo, tableName, tableSeq, - (uint32_t)((g_args.reqPerReq > stbInfo->insertRows) - ? stbInfo->insertRows - : g_args.reqPerReq), - insertRows, i, start_time, &(pThreadInfo->samplePos)); - } else { - generated = prepareStmtWithoutStb(pThreadInfo, tableName, - g_args.reqPerReq, insertRows, - i, start_time); - } - - verbosePrint("[%d] %s() LN%d generated=%d\n", pThreadInfo->threadID, - __func__, __LINE__, generated); - - if (generated > 0) - i += generated; - else - goto free_of_stmt_progressive; - - start_time += generated * timeStampStep; - pThreadInfo->totalInsertRows += generated; - - // only measure insert - // startTs = taosGetTimestampUs(); - - int32_t affectedRows = execInsert(pThreadInfo, generated); - - endTs = taosGetTimestampUs(); - uint64_t delay = endTs - startTs; - performancePrint("%s() LN%d, insert execution time is %10.f ms\n", - __func__, __LINE__, delay / 1000.0); - verbosePrint("[%d] %s() LN%d affectedRows=%d\n", - pThreadInfo->threadID, __func__, __LINE__, - affectedRows); - - if (delay > pThreadInfo->maxDelay) pThreadInfo->maxDelay = delay; - if (delay < pThreadInfo->minDelay) pThreadInfo->minDelay = delay; - pThreadInfo->cntDelay++; - pThreadInfo->totalDelay += delay; - - if (affectedRows < 0) { - errorPrint("affected rows: %d\n", affectedRows); - goto free_of_stmt_progressive; - } - - pThreadInfo->totalAffectedRows += affectedRows; - - int currentPercent = - (int)(pThreadInfo->totalAffectedRows * 100 / totalRows); - if (currentPercent > percentComplete) { - printf("[%d]:%d%%\n", pThreadInfo->threadID, currentPercent); - percentComplete = currentPercent; - } - int64_t currentPrintTime = taosGetTimestampMs(); - if (currentPrintTime - lastPrintTime > 30 * 1000) { - printf("thread[%d] has currently inserted rows: %" PRId64 - ", affected rows: %" PRId64 "\n", - pThreadInfo->threadID, pThreadInfo->totalInsertRows, - pThreadInfo->totalAffectedRows); - lastPrintTime = currentPrintTime; - } - - if (i >= insertRows) break; - } // insertRows - - if ((g_args.verbose_print) && (tableSeq == pThreadInfo->ntables - 1) && - (stbInfo) && - (0 == - strncasecmp(stbInfo->dataSource, "sample", strlen("sample")))) { - verbosePrint("%s() LN%d samplePos=%" PRId64 "\n", __func__, - __LINE__, pThreadInfo->samplePos); - } - } // tableSeq - - if (percentComplete < 100) { - printf("[%d]:%d%%\n", pThreadInfo->threadID, percentComplete); - } - *code = 0; - printStatPerThread(pThreadInfo); -free_of_stmt_progressive: - tmfree(pThreadInfo->buffer); - return code; -} - -void *syncWriteProgressive(threadInfo *pThreadInfo) { - debugPrint("%s() LN%d: ### progressive write\n", __func__, __LINE__); - int32_t* code = calloc(1, sizeof (int32_t)); - *code = -1; - SSuperTable *stbInfo = pThreadInfo->stbInfo; - uint64_t maxSqlLen = stbInfo ? stbInfo->maxSqlLen : g_args.max_sql_len; - int64_t timeStampStep = - stbInfo ? stbInfo->timeStampStep : g_args.timestamp_step; - int64_t insertRows = (stbInfo) ? stbInfo->insertRows : g_args.insertRows; - verbosePrint("%s() LN%d insertRows=%" PRId64 "\n", __func__, __LINE__, - insertRows); - - pThreadInfo->buffer = calloc(maxSqlLen, 1); - if (NULL == pThreadInfo->buffer) { - errorPrint("%s", "failed to allocate memory\n"); - goto free_of_progressive; - } - - uint64_t lastPrintTime = taosGetTimestampMs(); - uint64_t startTs = taosGetTimestampMs(); - uint64_t endTs; - - pThreadInfo->totalInsertRows = 0; - pThreadInfo->totalAffectedRows = 0; - - pThreadInfo->samplePos = 0; - - int percentComplete = 0; - int64_t totalRows = insertRows * pThreadInfo->ntables; - - for (uint64_t tableSeq = pThreadInfo->start_table_from; - tableSeq <= pThreadInfo->end_table_to; tableSeq++) { - int64_t start_time = pThreadInfo->start_time; - - for (uint64_t i = 0; i < insertRows;) { - char tableName[TSDB_TABLE_NAME_LEN]; - getTableName(tableName, pThreadInfo, tableSeq); - verbosePrint("%s() LN%d: tid=%d seq=%" PRId64 " tableName=%s\n", - __func__, __LINE__, pThreadInfo->threadID, tableSeq, - tableName); - if (0 == strlen(tableName)) { - errorPrint("[%d] %s() LN%d, getTableName return null\n", - pThreadInfo->threadID, __func__, __LINE__); - goto free_of_progressive; - } - - int64_t remainderBufLen = maxSqlLen - 2000; - char * pstr = pThreadInfo->buffer; - - int len = snprintf(pstr, strlen(STR_INSERT_INTO) + 1, "%s", - STR_INSERT_INTO); - - pstr += len; - remainderBufLen -= len; - - // measure prepare + insert - startTs = taosGetTimestampUs(); - - int32_t generated; - if (stbInfo) { - if (stbInfo->iface == STMT_IFACE) { - generated = prepareStbStmt( - pThreadInfo, tableName, tableSeq, - (uint32_t)((g_args.reqPerReq > stbInfo->insertRows) - ? stbInfo->insertRows - : g_args.reqPerReq), - insertRows, i, start_time, &(pThreadInfo->samplePos)); - } else { - generated = generateStbProgressiveData( - stbInfo, tableName, tableSeq, pThreadInfo->db_name, - pstr, insertRows, i, start_time, - &(pThreadInfo->samplePos), &remainderBufLen); - } - } else { - if (g_args.iface == STMT_IFACE) { - generated = prepareStmtWithoutStb( - pThreadInfo, tableName, g_args.reqPerReq, insertRows, i, - start_time); - } else { - generated = generateProgressiveDataWithoutStb( - tableName, - /* tableSeq, */ - pThreadInfo, pstr, insertRows, i, start_time, - /* &(pThreadInfo->samplePos), */ - &remainderBufLen); - } - } - - verbosePrint("[%d] %s() LN%d generated=%d\n", pThreadInfo->threadID, - __func__, __LINE__, generated); - - if (generated > 0) - i += generated; - else - goto free_of_progressive; - - start_time += generated * timeStampStep; - pThreadInfo->totalInsertRows += generated; - - // only measure insert - // startTs = taosGetTimestampUs(); - - int32_t affectedRows = execInsert(pThreadInfo, generated); - - endTs = taosGetTimestampUs(); - uint64_t delay = endTs - startTs; - performancePrint("%s() LN%d, insert execution time is %10.f ms\n", - __func__, __LINE__, delay / 1000.0); - verbosePrint("[%d] %s() LN%d affectedRows=%d\n", - pThreadInfo->threadID, __func__, __LINE__, - affectedRows); - - if (delay > pThreadInfo->maxDelay) pThreadInfo->maxDelay = delay; - if (delay < pThreadInfo->minDelay) pThreadInfo->minDelay = delay; - pThreadInfo->cntDelay++; - pThreadInfo->totalDelay += delay; - - if (affectedRows < 0) { - errorPrint("affected rows: %d\n", affectedRows); - goto free_of_progressive; - } - - pThreadInfo->totalAffectedRows += affectedRows; - - int currentPercent = - (int)(pThreadInfo->totalAffectedRows * 100 / totalRows); - if (currentPercent > percentComplete) { - printf("[%d]:%d%%\n", pThreadInfo->threadID, currentPercent); - percentComplete = currentPercent; - } - int64_t currentPrintTime = taosGetTimestampMs(); - if (currentPrintTime - lastPrintTime > 30 * 1000) { - printf("thread[%d] has currently inserted rows: %" PRId64 - ", affected rows: %" PRId64 "\n", - pThreadInfo->threadID, pThreadInfo->totalInsertRows, - pThreadInfo->totalAffectedRows); - lastPrintTime = currentPrintTime; - } - - if (i >= insertRows) break; - } // insertRows - - if ((g_args.verbose_print) && (tableSeq == pThreadInfo->ntables - 1) && - (stbInfo) && - (0 == - strncasecmp(stbInfo->dataSource, "sample", strlen("sample")))) { - verbosePrint("%s() LN%d samplePos=%" PRId64 "\n", __func__, - __LINE__, pThreadInfo->samplePos); - } - } // tableSeq - - if (percentComplete < 100) { - printf("[%d]:%d%%\n", pThreadInfo->threadID, percentComplete); - } - *code = 0; - printStatPerThread(pThreadInfo); -free_of_progressive: - tmfree(pThreadInfo->buffer); - return code; -} - -void *syncWriteProgressiveSml(threadInfo *pThreadInfo) { - debugPrint("%s() LN%d: ### sml progressive write\n", __func__, __LINE__); - int32_t * code = calloc(1, sizeof (int32_t)); - *code = -1; - SSuperTable *stbInfo = pThreadInfo->stbInfo; - int64_t timeStampStep = stbInfo->timeStampStep; - int64_t insertRows = stbInfo->insertRows; - verbosePrint("%s() LN%d insertRows=%" PRId64 "\n", __func__, __LINE__, - insertRows); - - uint64_t lastPrintTime = taosGetTimestampMs(); - - pThreadInfo->totalInsertRows = 0; - pThreadInfo->totalAffectedRows = 0; - - pThreadInfo->samplePos = 0; - - char **smlList; - cJSON *tagsList; - cJSON *jsonArray; - - if (insertRows < g_args.reqPerReq) { - g_args.reqPerReq = (uint32_t)insertRows; - } - - if (stbInfo->lineProtocol == TSDB_SML_LINE_PROTOCOL || - stbInfo->lineProtocol == TSDB_SML_TELNET_PROTOCOL) { - smlList = (char **)calloc(pThreadInfo->ntables, sizeof(char *)); - if (NULL == smlList) { - errorPrint("%s", "failed to allocate memory\n"); - goto free_of_progressive_sml; - } - for (int t = 0; t < pThreadInfo->ntables; t++) { - char *sml = (char *)calloc(1, stbInfo->lenOfOneRow); - if (NULL == sml) { - errorPrint("%s", "failed to allocate memory\n"); - goto free_smlheadlist_progressive_sml; - } - if (generateSmlConstPart(sml, stbInfo, pThreadInfo, t)) { - goto free_smlheadlist_progressive_sml; - } - smlList[t] = sml; - } - - pThreadInfo->lines = (char **)calloc(g_args.reqPerReq, sizeof(char *)); - if (NULL == pThreadInfo->lines) { - errorPrint("%s", "failed to allocate memory\n"); - goto free_smlheadlist_progressive_sml; - } - - for (int i = 0; i < g_args.reqPerReq; i++) { - pThreadInfo->lines[i] = (char *)calloc(1, stbInfo->lenOfOneRow); - if (NULL == pThreadInfo->lines[i]) { - errorPrint("%s", "failed to allocate memory\n"); - goto free_lines_progressive_sml; - } - } - } else { - jsonArray = cJSON_CreateArray(); - tagsList = cJSON_CreateArray(); - for (int t = 0; t < pThreadInfo->ntables; t++) { - if (generateSmlJsonTags(tagsList, stbInfo, pThreadInfo, t)) { - goto free_json_progressive_sml; - } - } - - pThreadInfo->lines = (char **)calloc(1, sizeof(char *)); - if (NULL == pThreadInfo->lines) { - errorPrint("%s", "failed to allocate memory\n"); - goto free_json_progressive_sml; - } - } - int currentPercent = 0; - int percentComplete = 0; - - for (uint64_t i = 0; i < pThreadInfo->ntables; i++) { - int64_t timestamp = pThreadInfo->start_time; - for (uint64_t j = 0; j < insertRows;) { - for (int k = 0; k < g_args.reqPerReq; k++) { - if (stbInfo->lineProtocol == TSDB_SML_JSON_PROTOCOL) { - cJSON *tag = cJSON_Duplicate( - cJSON_GetArrayItem(tagsList, (int)i), true); - if (generateSmlJsonCols(jsonArray, tag, stbInfo, - pThreadInfo, timestamp)) { - goto free_json_progressive_sml; - } - } else { - if (generateSmlMutablePart(pThreadInfo->lines[k], - smlList[i], stbInfo, - pThreadInfo, timestamp)) { - goto free_lines_progressive_sml; - } - } - timestamp += timeStampStep; - j++; - if (j == insertRows) { - break; - } - } - if (stbInfo->lineProtocol == TSDB_SML_JSON_PROTOCOL) { - pThreadInfo->lines[0] = cJSON_Print(jsonArray); - } - uint64_t startTs = taosGetTimestampUs(); - int32_t affectedRows = execInsert(pThreadInfo, g_args.reqPerReq); - uint64_t endTs = taosGetTimestampUs(); - uint64_t delay = endTs - startTs; - if (stbInfo->lineProtocol == TSDB_SML_JSON_PROTOCOL) { - tmfree(pThreadInfo->lines[0]); - cJSON_Delete(jsonArray); - jsonArray = cJSON_CreateArray(); - } - - performancePrint("%s() LN%d, insert execution time is %10.f ms\n", - __func__, __LINE__, delay / 1000.0); - verbosePrint("[%d] %s() LN%d affectedRows=%d\n", - pThreadInfo->threadID, __func__, __LINE__, - affectedRows); - - if (delay > pThreadInfo->maxDelay) { - pThreadInfo->maxDelay = delay; - } - if (delay < pThreadInfo->minDelay) { - pThreadInfo->minDelay = delay; - } - pThreadInfo->cntDelay++; - pThreadInfo->totalDelay += delay; - - pThreadInfo->totalAffectedRows += affectedRows; - pThreadInfo->totalInsertRows += g_args.reqPerReq; - currentPercent = (int)(pThreadInfo->totalAffectedRows * 100 / - (insertRows * pThreadInfo->ntables)); - if (currentPercent > percentComplete) { - printf("[%d]:%d%%\n", pThreadInfo->threadID, currentPercent); - percentComplete = currentPercent; - } - - int64_t currentPrintTime = taosGetTimestampMs(); - if (currentPrintTime - lastPrintTime > 30 * 1000) { - printf("thread[%d] has currently inserted rows: %" PRId64 - ", affected rows: %" PRId64 "\n", - pThreadInfo->threadID, pThreadInfo->totalInsertRows, - pThreadInfo->totalAffectedRows); - lastPrintTime = currentPrintTime; - } - - if (j == insertRows) { - break; - } - } - } - - *code = 0; - free_of_progressive_sml: - if (stbInfo->lineProtocol == TSDB_SML_JSON_PROTOCOL) { - tmfree(pThreadInfo->lines); - free_json_progressive_sml: - if (jsonArray != NULL) { - cJSON_Delete(jsonArray); - } - if (tagsList != NULL) { - cJSON_Delete(tagsList); - } - } else { - free_lines_progressive_sml: - for (int index = 0; index < g_args.reqPerReq; index++) { - tmfree(pThreadInfo->lines[index]); - } - tmfree(pThreadInfo->lines); - free_smlheadlist_progressive_sml: - for (int index = 0; index < pThreadInfo->ntables; index++) { - tmfree(smlList[index]); - } - tmfree(smlList); - } - return code; -} - -void *syncWrite(void *sarg) { - threadInfo * pThreadInfo = (threadInfo *)sarg; - SSuperTable *stbInfo = pThreadInfo->stbInfo; - - setThreadName("syncWrite"); - - uint32_t interlaceRows = 0; - - if (stbInfo) { - if (stbInfo->interlaceRows < stbInfo->insertRows) - interlaceRows = stbInfo->interlaceRows; - } else { - if (g_args.interlaceRows < g_args.insertRows) - interlaceRows = g_args.interlaceRows; - } - - if (interlaceRows > 0) { - // interlace mode - if (stbInfo) { - if (STMT_IFACE == stbInfo->iface) { - return syncWriteInterlaceStmtBatch(pThreadInfo, interlaceRows); - } else if (SML_IFACE == stbInfo->iface) { - return syncWriteInterlaceSml(pThreadInfo, interlaceRows); - } else { - return syncWriteInterlace(pThreadInfo, interlaceRows); - } - } - } else { - // progressive mode - if (((stbInfo) && (STMT_IFACE == stbInfo->iface)) || - (STMT_IFACE == g_args.iface)) { - return syncWriteProgressiveStmt(pThreadInfo); - } else if (((stbInfo) && (SML_IFACE == stbInfo->iface)) || - (SML_IFACE == g_args.iface)) { - return syncWriteProgressiveSml(pThreadInfo); - } else { - return syncWriteProgressive(pThreadInfo); - } - } - - return NULL; -} - -void callBack(void *param, TAOS_RES *res, int code) { - threadInfo * pThreadInfo = (threadInfo *)param; - SSuperTable *stbInfo = pThreadInfo->stbInfo; - - int insert_interval = - (int)(stbInfo ? stbInfo->insertInterval : g_args.insert_interval); - if (insert_interval) { - pThreadInfo->et = taosGetTimestampMs(); - if ((pThreadInfo->et - pThreadInfo->st) < insert_interval) { - taosMsleep(insert_interval - - (int32_t)(pThreadInfo->et - pThreadInfo->st)); // ms - } - } - - char *buffer = calloc(1, pThreadInfo->stbInfo->maxSqlLen); - char data[MAX_DATA_SIZE]; - char *pstr = buffer; - pstr += sprintf(pstr, "INSERT INTO %s.%s%" PRId64 " VALUES", - pThreadInfo->db_name, pThreadInfo->tb_prefix, - pThreadInfo->start_table_from); - // if (pThreadInfo->counter >= pThreadInfo->stbInfo->insertRows) { - if (pThreadInfo->counter >= g_args.reqPerReq) { - pThreadInfo->start_table_from++; - pThreadInfo->counter = 0; - } - if (pThreadInfo->start_table_from > pThreadInfo->end_table_to) { - tsem_post(&pThreadInfo->lock_sem); - free(buffer); - taos_free_result(res); - return; - } - - for (int i = 0; i < g_args.reqPerReq; i++) { - int rand_num = taosRandom() % 100; - if (0 != pThreadInfo->stbInfo->disorderRatio && - rand_num < pThreadInfo->stbInfo->disorderRatio) { - int64_t d = - pThreadInfo->lastTs - - (taosRandom() % pThreadInfo->stbInfo->disorderRange + 1); - generateStbRowData(pThreadInfo->stbInfo, data, MAX_DATA_SIZE, d); - } else { - generateStbRowData(pThreadInfo->stbInfo, data, MAX_DATA_SIZE, - pThreadInfo->lastTs += 1000); - } - pstr += sprintf(pstr, "%s", data); - pThreadInfo->counter++; - - if (pThreadInfo->counter >= pThreadInfo->stbInfo->insertRows) { - break; - } - } - - if (insert_interval) { - pThreadInfo->st = taosGetTimestampMs(); - } - taos_query_a(pThreadInfo->taos, buffer, callBack, pThreadInfo); - free(buffer); - - taos_free_result(res); -} - -void *asyncWrite(void *sarg) { - threadInfo * pThreadInfo = (threadInfo *)sarg; - SSuperTable *stbInfo = pThreadInfo->stbInfo; - - setThreadName("asyncWrite"); - - pThreadInfo->st = 0; - pThreadInfo->et = 0; - pThreadInfo->lastTs = pThreadInfo->start_time; - - int insert_interval = - (int)(stbInfo ? stbInfo->insertInterval : g_args.insert_interval); - if (insert_interval) { - pThreadInfo->st = taosGetTimestampMs(); - } - taos_query_a(pThreadInfo->taos, "show databases", callBack, pThreadInfo); - - tsem_wait(&(pThreadInfo->lock_sem)); - - return NULL; -} - -int startMultiThreadInsertData(int threads, char *db_name, char *precision, - SSuperTable *stbInfo) { - int32_t timePrec = TSDB_TIME_PRECISION_MILLI; - if (stbInfo) { - stbInfo->tsPrecision = TSDB_SML_TIMESTAMP_MILLI_SECONDS; - } - - if (0 != precision[0]) { - if (0 == strncasecmp(precision, "ms", 2)) { - timePrec = TSDB_TIME_PRECISION_MILLI; - if (stbInfo) { - stbInfo->tsPrecision = TSDB_SML_TIMESTAMP_MILLI_SECONDS; - } - } else if (0 == strncasecmp(precision, "us", 2)) { - timePrec = TSDB_TIME_PRECISION_MICRO; - if (stbInfo) { - stbInfo->tsPrecision = TSDB_SML_TIMESTAMP_MICRO_SECONDS; - } - } else if (0 == strncasecmp(precision, "ns", 2)) { - timePrec = TSDB_TIME_PRECISION_NANO; - if (stbInfo) { - stbInfo->tsPrecision = TSDB_SML_TIMESTAMP_NANO_SECONDS; - } - } else { - errorPrint("Not support precision: %s\n", precision); - return -1; - } - } - if (stbInfo) { - if (stbInfo->iface == SML_IFACE) { - if (stbInfo->lineProtocol != TSDB_SML_LINE_PROTOCOL) { - if (stbInfo->columnCount != 1) { - errorPrint( - "Schemaless telnet/json protocol can only have 1 " - "column " - "instead of %d\n", - stbInfo->columnCount); - return -1; - } - stbInfo->tsPrecision = TSDB_SML_TIMESTAMP_NOT_CONFIGURED; - } - if (stbInfo->lineProtocol != TSDB_SML_JSON_PROTOCOL) { - calcRowLen(stbInfo); - } - } - } - - int64_t startTime; - if (stbInfo) { - if (0 == strncasecmp(stbInfo->startTimestamp, "now", 3)) { - startTime = taosGetTimestamp(timePrec); - } else { - if (TSDB_CODE_SUCCESS != - taosParseTime(stbInfo->startTimestamp, &startTime, - (int32_t)strlen(stbInfo->startTimestamp), - timePrec, 0)) { - errorPrint("failed to parse time %s\n", - stbInfo->startTimestamp); - return -1; - } - } - } else { - startTime = DEFAULT_START_TIME; - } - debugPrint("%s() LN%d, startTime= %" PRId64 "\n", __func__, __LINE__, - startTime); - - // read sample data from file first - int ret; - if (stbInfo && stbInfo->iface != SML_IFACE) { - ret = prepareSampleForStb(stbInfo); - } else { - ret = prepareSampleForNtb(); - } - - if (ret) { - errorPrint("%s", "prepare sample data for stable failed!\n"); - return -1; - } - - TAOS *taos0 = taos_connect(g_Dbs.host, g_Dbs.user, g_Dbs.password, db_name, - g_Dbs.port); - if (NULL == taos0) { - errorPrint("connect to taosd fail , reason: %s\n", taos_errstr(NULL)); - return -1; - } - - int64_t ntables = 0; - uint64_t tableFrom = 0; - - if (stbInfo) { - if (stbInfo->iface != SML_IFACE) { - int64_t limit; - uint64_t offset; - - if ((NULL != g_args.sqlFile) && - (stbInfo->childTblExists == TBL_NO_EXISTS) && - ((stbInfo->childTblOffset != 0) || - (stbInfo->childTblLimit >= 0))) { - printf( - "WARNING: offset and limit will not be used since the " - "child tables not exists!\n"); - } - - if (stbInfo->childTblExists == TBL_ALREADY_EXISTS) { - if ((stbInfo->childTblLimit < 0) || - ((stbInfo->childTblOffset + stbInfo->childTblLimit) > - (stbInfo->childTblCount))) { - if (stbInfo->childTblCount < stbInfo->childTblOffset) { - printf( - "WARNING: offset will not be used since the child " - "tables count is less then offset!\n"); - - stbInfo->childTblOffset = 0; - } - stbInfo->childTblLimit = - stbInfo->childTblCount - stbInfo->childTblOffset; - } - - offset = stbInfo->childTblOffset; - limit = stbInfo->childTblLimit; - } else { - limit = stbInfo->childTblCount; - offset = 0; - } - - ntables = limit; - tableFrom = offset; - - if ((stbInfo->childTblExists != TBL_NO_EXISTS) && - ((stbInfo->childTblOffset + stbInfo->childTblLimit) > - stbInfo->childTblCount)) { - printf( - "WARNING: specified offset + limit > child table count!\n"); - prompt(); - } - - if ((stbInfo->childTblExists != TBL_NO_EXISTS) && - (0 == stbInfo->childTblLimit)) { - printf( - "WARNING: specified limit = 0, which cannot find table " - "name to insert or query! \n"); - prompt(); - } - - stbInfo->childTblName = - (char *)calloc(1, limit * TSDB_TABLE_NAME_LEN); - if (NULL == stbInfo->childTblName) { - errorPrint("%s", "failed to allocate memory\n"); - return -1; - } - - int64_t childTblCount; - getChildNameOfSuperTableWithLimitAndOffset( - taos0, db_name, stbInfo->stbName, &stbInfo->childTblName, - &childTblCount, limit, offset, stbInfo->escapeChar); - ntables = childTblCount; - } else { - ntables = stbInfo->childTblCount; - } - } else { - ntables = g_args.ntables; - tableFrom = 0; - } - - taos_close(taos0); - - int64_t a = ntables / threads; - if (a < 1) { - threads = (int)ntables; - a = 1; - } - - int64_t b = 0; - if (threads != 0) { - b = ntables % threads; - } - - if (g_args.iface == REST_IFACE || - ((stbInfo) && (stbInfo->iface == REST_IFACE))) { - if (convertHostToServAddr(g_Dbs.host, g_Dbs.port, &(g_Dbs.serv_addr)) != - 0) { - errorPrint("%s\n", "convert host to server address"); - return -1; - } - } - - pthread_t *pids = calloc(1, threads * sizeof(pthread_t)); - if (pids == NULL) { - errorPrint("%s", "failed to allocate memory\n"); - return -1; - } - threadInfo *infos = calloc(1, threads * sizeof(threadInfo)); - if (infos == NULL) { - errorPrint("%s", "failed to allocate memory\n"); - tmfree(pids); - return -1; - } - - char *stmtBuffer = calloc(1, BUFFER_SIZE); - if (stmtBuffer == NULL) { - errorPrint("%s", "failed to allocate memory\n"); - tmfree(pids); - tmfree(infos); - return -1; - } - - uint32_t interlaceRows = 0; - uint32_t batch; - - if (stbInfo) { - if (stbInfo->interlaceRows < stbInfo->insertRows) - interlaceRows = stbInfo->interlaceRows; - } else { - if (g_args.interlaceRows < g_args.insertRows) - interlaceRows = g_args.interlaceRows; - } - - if (interlaceRows > 0) { - batch = interlaceRows; - } else { - batch = (uint32_t)((g_args.reqPerReq > g_args.insertRows) - ? g_args.insertRows - : g_args.reqPerReq); - } - - if ((g_args.iface == STMT_IFACE) || - ((stbInfo) && (stbInfo->iface == STMT_IFACE))) { - char *pstr = stmtBuffer; - - if ((stbInfo) && (AUTO_CREATE_SUBTBL == stbInfo->autoCreateTable)) { - pstr += sprintf(pstr, "INSERT INTO ? USING %s TAGS(?", - stbInfo->stbName); - for (int tag = 0; tag < (stbInfo->tagCount - 1); tag++) { - pstr += sprintf(pstr, ",?"); - } - pstr += sprintf(pstr, ") VALUES(?"); - } else { - pstr += sprintf(pstr, "INSERT INTO ? VALUES(?"); - } - - int columnCount = (stbInfo) ? stbInfo->columnCount : g_args.columnCount; - - for (int col = 0; col < columnCount; col++) { - pstr += sprintf(pstr, ",?"); - } - pstr += sprintf(pstr, ")"); - - debugPrint("%s() LN%d, stmtBuffer: %s", __func__, __LINE__, stmtBuffer); - parseSamplefileToStmtBatch(stbInfo); - } - - for (int i = 0; i < threads; i++) { - threadInfo *pThreadInfo = infos + i; - pThreadInfo->threadID = i; - - tstrncpy(pThreadInfo->db_name, db_name, TSDB_DB_NAME_LEN); - pThreadInfo->time_precision = timePrec; - pThreadInfo->stbInfo = stbInfo; - - pThreadInfo->start_time = startTime; - pThreadInfo->minDelay = UINT64_MAX; - - if ((NULL == stbInfo) || (stbInfo->iface != REST_IFACE)) { - // t_info->taos = taos; - pThreadInfo->taos = taos_connect( - g_Dbs.host, g_Dbs.user, g_Dbs.password, db_name, g_Dbs.port); - if (NULL == pThreadInfo->taos) { - free(infos); - errorPrint( - "connect to server fail from insert sub " - "thread,reason:%s\n ", - taos_errstr(NULL)); - return -1; - } - - if ((g_args.iface == STMT_IFACE) || - ((stbInfo) && (stbInfo->iface == STMT_IFACE))) { - pThreadInfo->stmt = taos_stmt_init(pThreadInfo->taos); - if (NULL == pThreadInfo->stmt) { - free(pids); - free(infos); - errorPrint("taos_stmt_init() failed, reason: %s\n", - taos_errstr(NULL)); - return -1; - } - - if (0 != taos_stmt_prepare(pThreadInfo->stmt, stmtBuffer, 0)) { - free(pids); - free(infos); - free(stmtBuffer); - errorPrint( - "failed to execute taos_stmt_prepare. return 0x%x. " - "reason: %s\n", - ret, taos_stmt_errstr(pThreadInfo->stmt)); - return -1; - } - pThreadInfo->bind_ts = malloc(sizeof(int64_t)); - - if (stbInfo) { - parseStbSampleToStmtBatchForThread(pThreadInfo, stbInfo, - timePrec, batch); - - } else { - parseNtbSampleToStmtBatchForThread(pThreadInfo, timePrec, - batch); - } - } - } else { - pThreadInfo->taos = NULL; - } - - /* if ((NULL == stbInfo) - || (0 == stbInfo->multiThreadWriteOneTbl)) { - */ - pThreadInfo->start_table_from = tableFrom; - pThreadInfo->ntables = i < b ? a + 1 : a; - pThreadInfo->end_table_to = i < b ? tableFrom + a : tableFrom + a - 1; - tableFrom = pThreadInfo->end_table_to + 1; - /* } else { - pThreadInfo->start_table_from = 0; - pThreadInfo->ntables = stbInfo->childTblCount; - pThreadInfo->start_time = pThreadInfo->start_time + rand_int() % - 10000 - rand_tinyint(); - } - */ - if (g_args.iface == REST_IFACE || - ((stbInfo) && (stbInfo->iface == REST_IFACE))) { -#ifdef WINDOWS - WSADATA wsaData; - WSAStartup(MAKEWORD(2, 2), &wsaData); - SOCKET sockfd; -#else - int sockfd; -#endif - sockfd = socket(AF_INET, SOCK_STREAM, 0); - if (sockfd < 0) { -#ifdef WINDOWS - errorPrint("Could not create socket : %d", WSAGetLastError()); -#endif - debugPrint("%s() LN%d, sockfd=%d\n", __func__, __LINE__, - sockfd); - errorPrint("%s\n", "failed to create socket"); - return -1; - } - - int retConn = connect(sockfd, (struct sockaddr *)&(g_Dbs.serv_addr), - sizeof(struct sockaddr)); - debugPrint("%s() LN%d connect() return %d\n", __func__, __LINE__, - retConn); - if (retConn < 0) { - errorPrint("%s\n", "failed to connect"); - return -1; - } - pThreadInfo->sockfd = sockfd; - } - - tsem_init(&(pThreadInfo->lock_sem), 0, 0); - if (ASYNC_MODE == g_Dbs.asyncMode) { - pthread_create(pids + i, NULL, asyncWrite, pThreadInfo); - } else { - pthread_create(pids + i, NULL, syncWrite, pThreadInfo); - } - } - - free(stmtBuffer); - - int64_t start = taosGetTimestampUs(); - - for (int i = 0; i < threads; i++) { - void* result; - pthread_join(pids[i], &result); - if (*(int32_t*)result){ - g_fail = true; - } - tmfree(result); - } - - uint64_t totalDelay = 0; - uint64_t maxDelay = 0; - uint64_t minDelay = UINT64_MAX; - uint64_t cntDelay = 0; - double avgDelay = 0; - - for (int i = 0; i < threads; i++) { - threadInfo *pThreadInfo = infos + i; - - tsem_destroy(&(pThreadInfo->lock_sem)); - taos_close(pThreadInfo->taos); - - if (pThreadInfo->stmt) { - taos_stmt_close(pThreadInfo->stmt); - } - - tmfree((char *)pThreadInfo->bind_ts); - - tmfree((char *)pThreadInfo->bind_ts_array); - tmfree(pThreadInfo->bindParams); - tmfree(pThreadInfo->is_null); - if (g_args.iface == REST_IFACE || - ((stbInfo) && (stbInfo->iface == REST_IFACE))) { -#ifdef WINDOWS - closesocket(pThreadInfo->sockfd); - WSACleanup(); -#else - close(pThreadInfo->sockfd); -#endif - } - - debugPrint("%s() LN%d, [%d] totalInsert=%" PRIu64 - " totalAffected=%" PRIu64 "\n", - __func__, __LINE__, pThreadInfo->threadID, - pThreadInfo->totalInsertRows, - pThreadInfo->totalAffectedRows); - if (stbInfo) { - stbInfo->totalAffectedRows += pThreadInfo->totalAffectedRows; - stbInfo->totalInsertRows += pThreadInfo->totalInsertRows; - } else { - g_args.totalAffectedRows += pThreadInfo->totalAffectedRows; - g_args.totalInsertRows += pThreadInfo->totalInsertRows; - } - - totalDelay += pThreadInfo->totalDelay; - cntDelay += pThreadInfo->cntDelay; - if (pThreadInfo->maxDelay > maxDelay) maxDelay = pThreadInfo->maxDelay; - if (pThreadInfo->minDelay < minDelay) minDelay = pThreadInfo->minDelay; - } - - free(pids); - free(infos); - - if (g_fail){ - return -1; - } - - if (cntDelay == 0) cntDelay = 1; - avgDelay = (double)totalDelay / cntDelay; - - int64_t end = taosGetTimestampUs(); - int64_t t = end - start; - if (0 == t) t = 1; - - double tInMs = (double)t / 1000000.0; - - if (stbInfo) { - fprintf(stderr, - "Spent %.4f seconds to insert rows: %" PRIu64 - ", affected rows: %" PRIu64 - " with %d thread(s) into %s.%s. %.2f records/second\n\n", - tInMs, stbInfo->totalInsertRows, stbInfo->totalAffectedRows, - threads, db_name, stbInfo->stbName, - (double)(stbInfo->totalInsertRows / tInMs)); - - if (g_fpOfInsertResult) { - fprintf(g_fpOfInsertResult, - "Spent %.4f seconds to insert rows: %" PRIu64 - ", affected rows: %" PRIu64 - " with %d thread(s) into %s.%s. %.2f records/second\n\n", - tInMs, stbInfo->totalInsertRows, stbInfo->totalAffectedRows, - threads, db_name, stbInfo->stbName, - (double)(stbInfo->totalInsertRows / tInMs)); - } - } else { - fprintf(stderr, - "Spent %.4f seconds to insert rows: %" PRIu64 - ", affected rows: %" PRIu64 - " with %d thread(s) into %s %.2f records/second\n\n", - tInMs, g_args.totalInsertRows, g_args.totalAffectedRows, - threads, db_name, (double)(g_args.totalInsertRows / tInMs)); - if (g_fpOfInsertResult) { - fprintf(g_fpOfInsertResult, - "Spent %.4f seconds to insert rows: %" PRIu64 - ", affected rows: %" PRIu64 - " with %d thread(s) into %s %.2f records/second\n\n", - tInMs, g_args.totalInsertRows, g_args.totalAffectedRows, - threads, db_name, (double)(g_args.totalInsertRows / tInMs)); - } - } - - if (minDelay != UINT64_MAX) { - fprintf(stderr, - "insert delay, avg: %10.2fms, max: %10.2fms, min: %10.2fms\n\n", - (double)avgDelay / 1000.0, (double)maxDelay / 1000.0, - (double)minDelay / 1000.0); - - if (g_fpOfInsertResult) { - fprintf( - g_fpOfInsertResult, - "insert delay, avg:%10.2fms, max: %10.2fms, min: %10.2fms\n\n", - (double)avgDelay / 1000.0, (double)maxDelay / 1000.0, - (double)minDelay / 1000.0); - } - } - - // taos_close(taos); - - return 0; -} - -int insertTestProcess() { - int32_t code = -1; - char * cmdBuffer = calloc(1, BUFFER_SIZE); - if (NULL == cmdBuffer) { - errorPrint("%s", "failed to allocate memory\n"); - goto end_insert_process; - } - - printfInsertMeta(); - - debugPrint("%d result file: %s\n", __LINE__, g_Dbs.resultFile); - g_fpOfInsertResult = fopen(g_Dbs.resultFile, "a"); - if (NULL == g_fpOfInsertResult) { - errorPrint("failed to open %s for save result\n", g_Dbs.resultFile); - goto end_insert_process; - } - - if (g_fpOfInsertResult) { - printfInsertMetaToFile(g_fpOfInsertResult); - } - - prompt(); - - if (init_rand_data()) { - goto end_insert_process; - } - - // create database and super tables - - if (createDatabasesAndStables(cmdBuffer)) { - goto end_insert_process; - } - - // pretreatment - if (prepareSampleData()) { - goto end_insert_process; - } - - if (g_args.iface != SML_IFACE && g_totalChildTables > 0) { - if (createChildTables()) { - goto end_insert_process; - } - } - // create sub threads for inserting data - // start = taosGetTimestampMs(); - for (int i = 0; i < g_Dbs.dbCount; i++) { - if (g_Dbs.use_metric) { - if (g_Dbs.db[i].superTblCount > 0) { - for (uint64_t j = 0; j < g_Dbs.db[i].superTblCount; j++) { - SSuperTable *stbInfo = &g_Dbs.db[i].superTbls[j]; - - if (stbInfo && (stbInfo->insertRows > 0)) { - if (startMultiThreadInsertData( - g_Dbs.threadCount, g_Dbs.db[i].dbName, - g_Dbs.db[i].dbCfg.precision, stbInfo)) { - goto end_insert_process; - } - } - } - } - } else { - if (SML_IFACE == g_args.iface) { - code = -1; - errorPrint("%s\n", "Schemaless insertion must include stable"); - goto end_insert_process; - } else { - if (startMultiThreadInsertData( - g_Dbs.threadCount, g_Dbs.db[i].dbName, - g_Dbs.db[i].dbCfg.precision, NULL)) { - goto end_insert_process; - } - } - } - } - code = 0; -end_insert_process: - tmfree(cmdBuffer); - return code; -} \ No newline at end of file diff --git a/src/kit/taosdemo/src/demoJsonOpt.c b/src/kit/taosdemo/src/demoJsonOpt.c deleted file mode 100644 index b8d75ccacac07d225788946611d521b8b79a5c10..0000000000000000000000000000000000000000 --- a/src/kit/taosdemo/src/demoJsonOpt.c +++ /dev/null @@ -1,1796 +0,0 @@ -/* - * Copyright (c) 2019 TAOS Data, Inc. - * - * This program is free software: you can use, redistribute, and/or modify - * it under the terms of the GNU Affero General Public License, version 3 - * or later ("AGPL"), as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. - * - * You should have received a copy of the GNU Affero General Public License - * along with this program. If not, see . - */ - -#include "cJSON.h" -#include "demo.h" - -int getColumnAndTagTypeFromInsertJsonFile(cJSON * stbInfo, - SSuperTable *superTbls) { - int32_t code = -1; - - // columns - cJSON *columns = cJSON_GetObjectItem(stbInfo, "columns"); - if (columns && columns->type != cJSON_Array) { - errorPrint("%s", "failed to read json, columns not found\n"); - goto PARSE_OVER; - } else if (NULL == columns) { - superTbls->columnCount = 0; - superTbls->tagCount = 0; - return 0; - } - - int columnSize = cJSON_GetArraySize(columns); - if ((columnSize + 1 /* ts */) > TSDB_MAX_COLUMNS) { - errorPrint( - "failed to read json, column size overflow, max column size is " - "%d\n", - TSDB_MAX_COLUMNS); - goto PARSE_OVER; - } - - int count = 1; - int index = 0; - StrColumn columnCase; - - // superTbls->columnCount = columnSize; - for (int k = 0; k < columnSize; ++k) { - cJSON *column = cJSON_GetArrayItem(columns, k); - if (column == NULL) continue; - - count = 1; - cJSON *countObj = cJSON_GetObjectItem(column, "count"); - if (countObj && countObj->type == cJSON_Number) { - count = (int)countObj->valueint; - } else if (countObj && countObj->type != cJSON_Number) { - errorPrint("%s", "failed to read json, column count not found\n"); - goto PARSE_OVER; - } else { - count = 1; - } - - // column info - memset(&columnCase, 0, sizeof(StrColumn)); - cJSON *dataType = cJSON_GetObjectItem(column, "type"); - if (!dataType || dataType->type != cJSON_String || - dataType->valuestring == NULL) { - errorPrint("%s", "failed to read json, column type not found\n"); - goto PARSE_OVER; - } - // tstrncpy(superTbls->columns[k].dataType, dataType->valuestring, - // DATATYPE_BUFF_LEN); - tstrncpy(columnCase.dataType, dataType->valuestring, - min(DATATYPE_BUFF_LEN, strlen(dataType->valuestring) + 1)); - - cJSON *dataLen = cJSON_GetObjectItem(column, "len"); - if (dataLen && dataLen->type == cJSON_Number) { - columnCase.dataLen = (uint32_t)dataLen->valueint; - } else if (dataLen && dataLen->type != cJSON_Number) { - debugPrint("%s() LN%d: failed to read json, column len not found\n", - __func__, __LINE__); - goto PARSE_OVER; - } else { - columnCase.dataLen = SMALL_BUFF_LEN; - } - - for (int n = 0; n < count; ++n) { - tstrncpy(superTbls->columns[index].dataType, columnCase.dataType, - min(DATATYPE_BUFF_LEN, strlen(columnCase.dataType) + 1)); - - superTbls->columns[index].dataLen = columnCase.dataLen; - index++; - } - } - - if ((index + 1 /* ts */) > MAX_NUM_COLUMNS) { - errorPrint( - "failed to read json, column size overflow, allowed max column " - "size is %d\n", - MAX_NUM_COLUMNS); - goto PARSE_OVER; - } - - superTbls->columnCount = index; - - for (int c = 0; c < superTbls->columnCount; c++) { - if (0 == - strncasecmp(superTbls->columns[c].dataType, "INT", strlen("INT"))) { - superTbls->columns[c].data_type = TSDB_DATA_TYPE_INT; - } else if (0 == strncasecmp(superTbls->columns[c].dataType, "TINYINT", - strlen("TINYINT"))) { - superTbls->columns[c].data_type = TSDB_DATA_TYPE_TINYINT; - } else if (0 == strncasecmp(superTbls->columns[c].dataType, "SMALLINT", - strlen("SMALLINT"))) { - superTbls->columns[c].data_type = TSDB_DATA_TYPE_SMALLINT; - } else if (0 == strncasecmp(superTbls->columns[c].dataType, "BIGINT", - strlen("BIGINT"))) { - superTbls->columns[c].data_type = TSDB_DATA_TYPE_BIGINT; - } else if (0 == strncasecmp(superTbls->columns[c].dataType, "FLOAT", - strlen("FLOAT"))) { - superTbls->columns[c].data_type = TSDB_DATA_TYPE_FLOAT; - } else if (0 == strncasecmp(superTbls->columns[c].dataType, "DOUBLE", - strlen("DOUBLE"))) { - superTbls->columns[c].data_type = TSDB_DATA_TYPE_DOUBLE; - } else if (0 == strncasecmp(superTbls->columns[c].dataType, "BINARY", - strlen("BINARY"))) { - superTbls->columns[c].data_type = TSDB_DATA_TYPE_BINARY; - } else if (0 == strncasecmp(superTbls->columns[c].dataType, "NCHAR", - strlen("NCHAR"))) { - superTbls->columns[c].data_type = TSDB_DATA_TYPE_NCHAR; - } else if (0 == strncasecmp(superTbls->columns[c].dataType, "BOOL", - strlen("BOOL"))) { - superTbls->columns[c].data_type = TSDB_DATA_TYPE_BOOL; - } else if (0 == strncasecmp(superTbls->columns[c].dataType, "TIMESTAMP", - strlen("TIMESTAMP"))) { - superTbls->columns[c].data_type = TSDB_DATA_TYPE_TIMESTAMP; - } else if (0 == strncasecmp(superTbls->columns[c].dataType, "UTINYINT", - strlen("UTINYINT"))) { - superTbls->columns[c].data_type = TSDB_DATA_TYPE_UTINYINT; - } else if (0 == strncasecmp(superTbls->columns[c].dataType, "USMALLINT", - strlen("USMALLINT"))) { - superTbls->columns[c].data_type = TSDB_DATA_TYPE_USMALLINT; - } else if (0 == strncasecmp(superTbls->columns[c].dataType, "UINT", - strlen("UINT"))) { - superTbls->columns[c].data_type = TSDB_DATA_TYPE_UINT; - } else if (0 == strncasecmp(superTbls->columns[c].dataType, "UBIGINT", - strlen("UBIGINT"))) { - superTbls->columns[c].data_type = TSDB_DATA_TYPE_UBIGINT; - } else { - superTbls->columns[c].data_type = TSDB_DATA_TYPE_NULL; - } - } - - count = 1; - index = 0; - // tags - cJSON *tags = cJSON_GetObjectItem(stbInfo, "tags"); - if (!tags || tags->type != cJSON_Array) { - errorPrint("%s", "failed to read json, tags not found\n"); - goto PARSE_OVER; - } - - int tagSize = cJSON_GetArraySize(tags); - if (tagSize > TSDB_MAX_TAGS) { - errorPrint( - "failed to read json, tags size overflow, max tag size is %d\n", - TSDB_MAX_TAGS); - goto PARSE_OVER; - } - - // superTbls->tagCount = tagSize; - for (int k = 0; k < tagSize; ++k) { - cJSON *tag = cJSON_GetArrayItem(tags, k); - if (tag == NULL) continue; - - count = 1; - cJSON *countObj = cJSON_GetObjectItem(tag, "count"); - if (countObj && countObj->type == cJSON_Number) { - count = (int)countObj->valueint; - } else if (countObj && countObj->type != cJSON_Number) { - errorPrint("%s", "failed to read json, column count not found\n"); - goto PARSE_OVER; - } else { - count = 1; - } - - // column info - memset(&columnCase, 0, sizeof(StrColumn)); - cJSON *dataType = cJSON_GetObjectItem(tag, "type"); - if (!dataType || dataType->type != cJSON_String || - dataType->valuestring == NULL) { - errorPrint("%s", "failed to read json, tag type not found\n"); - goto PARSE_OVER; - } - tstrncpy(columnCase.dataType, dataType->valuestring, - min(DATATYPE_BUFF_LEN, strlen(dataType->valuestring) + 1)); - - cJSON *dataLen = cJSON_GetObjectItem(tag, "len"); - if (dataLen && dataLen->type == cJSON_Number) { - columnCase.dataLen = (uint32_t)dataLen->valueint; - } else if (dataLen && dataLen->type != cJSON_Number) { - errorPrint("%s", "failed to read json, column len not found\n"); - goto PARSE_OVER; - } else { - columnCase.dataLen = 0; - } - - for (int n = 0; n < count; ++n) { - tstrncpy(superTbls->tags[index].dataType, columnCase.dataType, - min(DATATYPE_BUFF_LEN, strlen(columnCase.dataType) + 1)); - superTbls->tags[index].dataLen = columnCase.dataLen; - index++; - } - } - - if (index > TSDB_MAX_TAGS) { - errorPrint( - "failed to read json, tags size overflow, allowed max tag count is " - "%d\n", - TSDB_MAX_TAGS); - goto PARSE_OVER; - } - - superTbls->tagCount = index; - - for (int t = 0; t < superTbls->tagCount; t++) { - if (0 == - strncasecmp(superTbls->tags[t].dataType, "INT", strlen("INT"))) { - superTbls->tags[t].data_type = TSDB_DATA_TYPE_INT; - } else if (0 == strncasecmp(superTbls->tags[t].dataType, "TINYINT", - strlen("TINYINT"))) { - superTbls->tags[t].data_type = TSDB_DATA_TYPE_TINYINT; - } else if (0 == strncasecmp(superTbls->tags[t].dataType, "SMALLINT", - strlen("SMALLINT"))) { - superTbls->tags[t].data_type = TSDB_DATA_TYPE_SMALLINT; - } else if (0 == strncasecmp(superTbls->tags[t].dataType, "BIGINT", - strlen("BIGINT"))) { - superTbls->tags[t].data_type = TSDB_DATA_TYPE_BIGINT; - } else if (0 == strncasecmp(superTbls->tags[t].dataType, "FLOAT", - strlen("FLOAT"))) { - superTbls->tags[t].data_type = TSDB_DATA_TYPE_FLOAT; - } else if (0 == strncasecmp(superTbls->tags[t].dataType, "DOUBLE", - strlen("DOUBLE"))) { - superTbls->tags[t].data_type = TSDB_DATA_TYPE_DOUBLE; - } else if (0 == strncasecmp(superTbls->tags[t].dataType, "BINARY", - strlen("BINARY"))) { - superTbls->tags[t].data_type = TSDB_DATA_TYPE_BINARY; - } else if (0 == strncasecmp(superTbls->tags[t].dataType, "NCHAR", - strlen("NCHAR"))) { - superTbls->tags[t].data_type = TSDB_DATA_TYPE_NCHAR; - } else if (0 == strncasecmp(superTbls->tags[t].dataType, "BOOL", - strlen("BOOL"))) { - superTbls->tags[t].data_type = TSDB_DATA_TYPE_BOOL; - } else if (0 == strncasecmp(superTbls->tags[t].dataType, "TIMESTAMP", - strlen("TIMESTAMP"))) { - superTbls->tags[t].data_type = TSDB_DATA_TYPE_TIMESTAMP; - } else if (0 == strncasecmp(superTbls->tags[t].dataType, "UTINYINT", - strlen("UTINYINT"))) { - superTbls->tags[t].data_type = TSDB_DATA_TYPE_UTINYINT; - } else if (0 == strncasecmp(superTbls->tags[t].dataType, "USMALLINT", - strlen("USMALLINT"))) { - superTbls->tags[t].data_type = TSDB_DATA_TYPE_USMALLINT; - } else if (0 == strncasecmp(superTbls->tags[t].dataType, "UINT", - strlen("UINT"))) { - superTbls->tags[t].data_type = TSDB_DATA_TYPE_UINT; - } else if (0 == strncasecmp(superTbls->tags[t].dataType, "UBIGINT", - strlen("UBIGINT"))) { - superTbls->tags[t].data_type = TSDB_DATA_TYPE_UBIGINT; - } else { - superTbls->tags[t].data_type = TSDB_DATA_TYPE_NULL; - } - } - - if ((superTbls->columnCount + superTbls->tagCount + 1 /* ts */) > - TSDB_MAX_COLUMNS) { - errorPrint( - "columns + tags is more than allowed max columns count: %d\n", - TSDB_MAX_COLUMNS); - goto PARSE_OVER; - } - code = 0; - -PARSE_OVER: - return code; -} - -int getMetaFromInsertJsonFile(cJSON *root) { - int32_t code = -1; - - cJSON *cfgdir = cJSON_GetObjectItem(root, "cfgdir"); - if (cfgdir && cfgdir->type == cJSON_String && cfgdir->valuestring != NULL) { - tstrncpy(g_Dbs.cfgDir, cfgdir->valuestring, MAX_FILE_NAME_LEN); - } - - cJSON *host = cJSON_GetObjectItem(root, "host"); - if (host && host->type == cJSON_String && host->valuestring != NULL) { - tstrncpy(g_Dbs.host, host->valuestring, MAX_HOSTNAME_SIZE); - } else if (!host) { - tstrncpy(g_Dbs.host, "127.0.0.1", MAX_HOSTNAME_SIZE); - } else { - errorPrint("%s", "failed to read json, host not found\n"); - goto PARSE_OVER; - } - - cJSON *port = cJSON_GetObjectItem(root, "port"); - if (port && port->type == cJSON_Number) { - g_Dbs.port = (uint16_t)port->valueint; - } else if (!port) { - g_Dbs.port = DEFAULT_PORT; - } - - cJSON *user = cJSON_GetObjectItem(root, "user"); - if (user && user->type == cJSON_String && user->valuestring != NULL) { - tstrncpy(g_Dbs.user, user->valuestring, MAX_USERNAME_SIZE); - } else if (!user) { - tstrncpy(g_Dbs.user, TSDB_DEFAULT_USER, MAX_USERNAME_SIZE); - } - - cJSON *password = cJSON_GetObjectItem(root, "password"); - if (password && password->type == cJSON_String && - password->valuestring != NULL) { - tstrncpy(g_Dbs.password, password->valuestring, SHELL_MAX_PASSWORD_LEN); - } else if (!password) { - tstrncpy(g_Dbs.password, TSDB_DEFAULT_PASS, SHELL_MAX_PASSWORD_LEN); - } - - cJSON *resultfile = cJSON_GetObjectItem(root, "result_file"); - if (resultfile && resultfile->type == cJSON_String && - resultfile->valuestring != NULL) { - tstrncpy(g_Dbs.resultFile, resultfile->valuestring, MAX_FILE_NAME_LEN); - } else if (!resultfile) { - tstrncpy(g_Dbs.resultFile, DEFAULT_OUTPUT, MAX_FILE_NAME_LEN); - } - - cJSON *threads = cJSON_GetObjectItem(root, "thread_count"); - if (threads && threads->type == cJSON_Number) { - g_Dbs.threadCount = (uint32_t)threads->valueint; - } else if (!threads) { - g_Dbs.threadCount = DEFAULT_NTHREADS; - } else { - errorPrint("%s", "failed to read json, threads not found\n"); - goto PARSE_OVER; - } - - cJSON *threads2 = cJSON_GetObjectItem(root, "thread_count_create_tbl"); - if (threads2 && threads2->type == cJSON_Number) { - g_Dbs.threadCountForCreateTbl = (uint32_t)threads2->valueint; - } else if (!threads2) { - g_Dbs.threadCountForCreateTbl = DEFAULT_NTHREADS; - } else { - errorPrint("%s", "failed to read json, threads2 not found\n"); - goto PARSE_OVER; - } - - cJSON *gInsertInterval = cJSON_GetObjectItem(root, "insert_interval"); - if (gInsertInterval && gInsertInterval->type == cJSON_Number) { - if (gInsertInterval->valueint < 0) { - errorPrint("%s", - "failed to read json, insert interval input mistake\n"); - goto PARSE_OVER; - } - g_args.insert_interval = gInsertInterval->valueint; - } else if (!gInsertInterval) { - g_args.insert_interval = DEFAULT_INSERT_INTERVAL; - } else { - errorPrint("%s", - "failed to read json, insert_interval input mistake\n"); - goto PARSE_OVER; - } - - cJSON *interlaceRows = cJSON_GetObjectItem(root, "interlace_rows"); - if (interlaceRows && interlaceRows->type == cJSON_Number) { - if (interlaceRows->valueint < 0) { - errorPrint("%s", - "failed to read json, interlaceRows input mistake\n"); - goto PARSE_OVER; - } - g_args.interlaceRows = (uint32_t)interlaceRows->valueint; - } else if (!interlaceRows) { - g_args.interlaceRows = - DEFAULT_INTERLACE_ROWS; // 0 means progressive mode, > 0 mean - // interlace mode. max value is less or equ - // num_of_records_per_req - } else { - errorPrint("%s", "failed to read json, interlaceRows input mistake\n"); - goto PARSE_OVER; - } - - cJSON *maxSqlLen = cJSON_GetObjectItem(root, "max_sql_len"); - if (maxSqlLen && maxSqlLen->type == cJSON_Number) { - if (maxSqlLen->valueint < 0) { - errorPrint( - "%s() LN%d, failed to read json, max_sql_len input mistake\n", - __func__, __LINE__); - goto PARSE_OVER; - } - g_args.max_sql_len = maxSqlLen->valueint; - } else if (!maxSqlLen) { - g_args.max_sql_len = TSDB_MAX_ALLOWED_SQL_LEN; - } else { - errorPrint( - "%s() LN%d, failed to read json, max_sql_len input mistake\n", - __func__, __LINE__); - goto PARSE_OVER; - } - - cJSON *numRecPerReq = cJSON_GetObjectItem(root, "num_of_records_per_req"); - if (numRecPerReq && numRecPerReq->type == cJSON_Number) { - if (numRecPerReq->valueint <= 0) { - errorPrint( - "%s() LN%d, failed to read json, num_of_records_per_req input " - "mistake\n", - __func__, __LINE__); - goto PARSE_OVER; - } else if (numRecPerReq->valueint > MAX_RECORDS_PER_REQ) { - printf("NOTICE: number of records per request value %" PRIu64 - " > %d\n\n", - numRecPerReq->valueint, MAX_RECORDS_PER_REQ); - printf( - " number of records per request value will be set to " - "%d\n\n", - MAX_RECORDS_PER_REQ); - prompt(); - numRecPerReq->valueint = MAX_RECORDS_PER_REQ; - } - g_args.reqPerReq = (uint32_t)numRecPerReq->valueint; - } else if (!numRecPerReq) { - g_args.reqPerReq = MAX_RECORDS_PER_REQ; - } else { - errorPrint( - "%s() LN%d, failed to read json, num_of_records_per_req not " - "found\n", - __func__, __LINE__); - goto PARSE_OVER; - } - - cJSON *prepareRand = cJSON_GetObjectItem(root, "prepared_rand"); - if (prepareRand && prepareRand->type == cJSON_Number) { - if (prepareRand->valueint <= 0) { - errorPrint( - "%s() LN%d, failed to read json, prepared_rand input mistake\n", - __func__, __LINE__); - goto PARSE_OVER; - } - g_args.prepared_rand = prepareRand->valueint; - } else if (!prepareRand) { - g_args.prepared_rand = DEFAULT_PREPARED_RAND; - } else { - errorPrint("%s", "failed to read json, prepared_rand not found\n"); - goto PARSE_OVER; - } - - cJSON *chineseOpt = cJSON_GetObjectItem(root, "chinese"); // yes, no, - if (chineseOpt && chineseOpt->type == cJSON_String && - chineseOpt->valuestring != NULL) { - if (0 == strncasecmp(chineseOpt->valuestring, "yes", 3)) { - g_args.chinese = true; - } else if (0 == strncasecmp(chineseOpt->valuestring, "no", 2)) { - g_args.chinese = false; - } else { - g_args.chinese = DEFAULT_CHINESE_OPT; - } - } else if (!chineseOpt) { - g_args.chinese = DEFAULT_CHINESE_OPT; - } else { - errorPrint( - "%s", - "failed to read json, chinese input mistake\n"); - goto PARSE_OVER; - } - - cJSON *answerPrompt = - cJSON_GetObjectItem(root, "confirm_parameter_prompt"); // yes, no, - if (answerPrompt && answerPrompt->type == cJSON_String && - answerPrompt->valuestring != NULL) { - if (0 == strncasecmp(answerPrompt->valuestring, "yes", 3)) { - g_args.answer_yes = false; - } else if (0 == strncasecmp(answerPrompt->valuestring, "no", 2)) { - g_args.answer_yes = true; - } else { - g_args.answer_yes = DEFAULT_ANS_YES; - } - } else if (!answerPrompt) { - g_args.answer_yes = true; // default is no, mean answer_yes. - } else { - errorPrint( - "%s", - "failed to read json, confirm_parameter_prompt input mistake\n"); - goto PARSE_OVER; - } - - // rows per table need be less than insert batch - if (g_args.interlaceRows > g_args.reqPerReq) { - printf( - "NOTICE: interlace rows value %u > num_of_records_per_req %u\n\n", - g_args.interlaceRows, g_args.reqPerReq); - printf( - " interlace rows value will be set to " - "num_of_records_per_req %u\n\n", - g_args.reqPerReq); - prompt(); - g_args.interlaceRows = g_args.reqPerReq; - } - - cJSON *dbs = cJSON_GetObjectItem(root, "databases"); - if (!dbs || dbs->type != cJSON_Array) { - errorPrint("%s", "failed to read json, databases not found\n"); - goto PARSE_OVER; - } - - int dbSize = cJSON_GetArraySize(dbs); - if (dbSize > MAX_DB_COUNT) { - errorPrint( - "failed to read json, databases size overflow, max database is " - "%d\n", - MAX_DB_COUNT); - goto PARSE_OVER; - } - g_Dbs.db = calloc(1, sizeof(SDataBase) * dbSize); - assert(g_Dbs.db); - g_Dbs.dbCount = dbSize; - for (int i = 0; i < dbSize; ++i) { - cJSON *dbinfos = cJSON_GetArrayItem(dbs, i); - if (dbinfos == NULL) continue; - - // dbinfo - cJSON *dbinfo = cJSON_GetObjectItem(dbinfos, "dbinfo"); - if (!dbinfo || dbinfo->type != cJSON_Object) { - errorPrint("%s", "failed to read json, dbinfo not found\n"); - goto PARSE_OVER; - } - - cJSON *dbName = cJSON_GetObjectItem(dbinfo, "name"); - if (!dbName || dbName->type != cJSON_String || - dbName->valuestring == NULL) { - errorPrint("%s", "failed to read json, db name not found\n"); - goto PARSE_OVER; - } - tstrncpy(g_Dbs.db[i].dbName, dbName->valuestring, TSDB_DB_NAME_LEN); - - cJSON *drop = cJSON_GetObjectItem(dbinfo, "drop"); - if (drop && drop->type == cJSON_String && drop->valuestring != NULL) { - if (0 == strncasecmp(drop->valuestring, "yes", strlen("yes"))) { - g_Dbs.db[i].drop = true; - } else { - g_Dbs.db[i].drop = false; - } - } else if (!drop) { - g_Dbs.db[i].drop = g_args.drop_database; - } else { - errorPrint("%s", "failed to read json, drop input mistake\n"); - goto PARSE_OVER; - } - - cJSON *precision = cJSON_GetObjectItem(dbinfo, "precision"); - if (precision && precision->type == cJSON_String && - precision->valuestring != NULL) { - tstrncpy(g_Dbs.db[i].dbCfg.precision, precision->valuestring, - SMALL_BUFF_LEN); - } else if (!precision) { - memset(g_Dbs.db[i].dbCfg.precision, 0, SMALL_BUFF_LEN); - } else { - errorPrint("%s", "failed to read json, precision not found\n"); - goto PARSE_OVER; - } - - cJSON *update = cJSON_GetObjectItem(dbinfo, "update"); - if (update && update->type == cJSON_Number) { - g_Dbs.db[i].dbCfg.update = (int)update->valueint; - } else if (!update) { - g_Dbs.db[i].dbCfg.update = -1; - } else { - errorPrint("%s", "failed to read json, update not found\n"); - goto PARSE_OVER; - } - - cJSON *replica = cJSON_GetObjectItem(dbinfo, "replica"); - if (replica && replica->type == cJSON_Number) { - g_Dbs.db[i].dbCfg.replica = (int)replica->valueint; - } else if (!replica) { - g_Dbs.db[i].dbCfg.replica = -1; - } else { - errorPrint("%s", "failed to read json, replica not found\n"); - goto PARSE_OVER; - } - - cJSON *keep = cJSON_GetObjectItem(dbinfo, "keep"); - if (keep && keep->type == cJSON_Number) { - g_Dbs.db[i].dbCfg.keep = (int)keep->valueint; - } else if (!keep) { - g_Dbs.db[i].dbCfg.keep = -1; - } else { - errorPrint("%s", "failed to read json, keep not found\n"); - goto PARSE_OVER; - } - - cJSON *days = cJSON_GetObjectItem(dbinfo, "days"); - if (days && days->type == cJSON_Number) { - g_Dbs.db[i].dbCfg.days = (int)days->valueint; - } else if (!days) { - g_Dbs.db[i].dbCfg.days = -1; - } else { - errorPrint("%s", "failed to read json, days not found\n"); - goto PARSE_OVER; - } - - cJSON *cache = cJSON_GetObjectItem(dbinfo, "cache"); - if (cache && cache->type == cJSON_Number) { - g_Dbs.db[i].dbCfg.cache = (int)cache->valueint; - } else if (!cache) { - g_Dbs.db[i].dbCfg.cache = -1; - } else { - errorPrint("%s", "failed to read json, cache not found\n"); - goto PARSE_OVER; - } - - cJSON *blocks = cJSON_GetObjectItem(dbinfo, "blocks"); - if (blocks && blocks->type == cJSON_Number) { - g_Dbs.db[i].dbCfg.blocks = (int)blocks->valueint; - } else if (!blocks) { - g_Dbs.db[i].dbCfg.blocks = -1; - } else { - errorPrint("%s", "failed to read json, block not found\n"); - goto PARSE_OVER; - } - - // cJSON* maxtablesPerVnode= cJSON_GetObjectItem(dbinfo, - // "maxtablesPerVnode"); if (maxtablesPerVnode && - // maxtablesPerVnode->type - // == cJSON_Number) { - // g_Dbs.db[i].dbCfg.maxtablesPerVnode = maxtablesPerVnode->valueint; - //} else if (!maxtablesPerVnode) { - // g_Dbs.db[i].dbCfg.maxtablesPerVnode = TSDB_DEFAULT_TABLES; - //} else { - // printf("failed to read json, maxtablesPerVnode not found"); - // goto PARSE_OVER; - //} - - cJSON *minRows = cJSON_GetObjectItem(dbinfo, "minRows"); - if (minRows && minRows->type == cJSON_Number) { - g_Dbs.db[i].dbCfg.minRows = (uint32_t)minRows->valueint; - } else if (!minRows) { - g_Dbs.db[i].dbCfg.minRows = 0; // 0 means default - } else { - errorPrint("%s", "failed to read json, minRows not found\n"); - goto PARSE_OVER; - } - - cJSON *maxRows = cJSON_GetObjectItem(dbinfo, "maxRows"); - if (maxRows && maxRows->type == cJSON_Number) { - g_Dbs.db[i].dbCfg.maxRows = (uint32_t)maxRows->valueint; - } else if (!maxRows) { - g_Dbs.db[i].dbCfg.maxRows = 0; // 0 means default - } else { - errorPrint("%s", "failed to read json, maxRows not found\n"); - goto PARSE_OVER; - } - - cJSON *comp = cJSON_GetObjectItem(dbinfo, "comp"); - if (comp && comp->type == cJSON_Number) { - g_Dbs.db[i].dbCfg.comp = (int)comp->valueint; - } else if (!comp) { - g_Dbs.db[i].dbCfg.comp = -1; - } else { - errorPrint("%s", "failed to read json, comp not found\n"); - goto PARSE_OVER; - } - - cJSON *walLevel = cJSON_GetObjectItem(dbinfo, "walLevel"); - if (walLevel && walLevel->type == cJSON_Number) { - g_Dbs.db[i].dbCfg.walLevel = (int)walLevel->valueint; - } else if (!walLevel) { - g_Dbs.db[i].dbCfg.walLevel = -1; - } else { - errorPrint("%s", "failed to read json, walLevel not found\n"); - goto PARSE_OVER; - } - - cJSON *cacheLast = cJSON_GetObjectItem(dbinfo, "cachelast"); - if (cacheLast && cacheLast->type == cJSON_Number) { - g_Dbs.db[i].dbCfg.cacheLast = (int)cacheLast->valueint; - } else if (!cacheLast) { - g_Dbs.db[i].dbCfg.cacheLast = -1; - } else { - errorPrint("%s", "failed to read json, cacheLast not found\n"); - goto PARSE_OVER; - } - - cJSON *quorum = cJSON_GetObjectItem(dbinfo, "quorum"); - if (quorum && quorum->type == cJSON_Number) { - g_Dbs.db[i].dbCfg.quorum = (int)quorum->valueint; - } else if (!quorum) { - g_Dbs.db[i].dbCfg.quorum = 1; - } else { - errorPrint("%s", "failed to read json, quorum input mistake"); - goto PARSE_OVER; - } - - cJSON *fsync = cJSON_GetObjectItem(dbinfo, "fsync"); - if (fsync && fsync->type == cJSON_Number) { - g_Dbs.db[i].dbCfg.fsync = (int)fsync->valueint; - } else if (!fsync) { - g_Dbs.db[i].dbCfg.fsync = -1; - } else { - errorPrint("%s", "failed to read json, fsync input mistake\n"); - goto PARSE_OVER; - } - - // super_tables - cJSON *stables = cJSON_GetObjectItem(dbinfos, "super_tables"); - if (!stables || stables->type != cJSON_Array) { - errorPrint("%s", "failed to read json, super_tables not found\n"); - goto PARSE_OVER; - } - - int stbSize = cJSON_GetArraySize(stables); - if (stbSize > MAX_SUPER_TABLE_COUNT) { - errorPrint( - "failed to read json, supertable size overflow, max supertable " - "is %d\n", - MAX_SUPER_TABLE_COUNT); - goto PARSE_OVER; - } - g_Dbs.db[i].superTbls = calloc(1, stbSize * sizeof(SSuperTable)); - assert(g_Dbs.db[i].superTbls); - g_Dbs.db[i].superTblCount = stbSize; - for (int j = 0; j < stbSize; ++j) { - cJSON *stbInfo = cJSON_GetArrayItem(stables, j); - if (stbInfo == NULL) continue; - - // dbinfo - cJSON *stbName = cJSON_GetObjectItem(stbInfo, "name"); - if (!stbName || stbName->type != cJSON_String || - stbName->valuestring == NULL) { - errorPrint("%s", "failed to read json, stb name not found\n"); - goto PARSE_OVER; - } - tstrncpy(g_Dbs.db[i].superTbls[j].stbName, stbName->valuestring, - TSDB_TABLE_NAME_LEN); - - cJSON *prefix = cJSON_GetObjectItem(stbInfo, "childtable_prefix"); - if (!prefix || prefix->type != cJSON_String || - prefix->valuestring == NULL) { - errorPrint( - "%s", "failed to read json, childtable_prefix not found\n"); - goto PARSE_OVER; - } - tstrncpy(g_Dbs.db[i].superTbls[j].childTblPrefix, - prefix->valuestring, TBNAME_PREFIX_LEN); - - cJSON *escapeChar = - cJSON_GetObjectItem(stbInfo, "escape_character"); - if (escapeChar && escapeChar->type == cJSON_String && - escapeChar->valuestring != NULL) { - if ((0 == strncasecmp(escapeChar->valuestring, "yes", 3))) { - g_Dbs.db[i].superTbls[j].escapeChar = true; - } else if (0 == strncasecmp(escapeChar->valuestring, "no", 2)) { - g_Dbs.db[i].superTbls[j].escapeChar = false; - } else { - g_Dbs.db[i].superTbls[j].escapeChar = false; - } - } else if (!escapeChar) { - g_Dbs.db[i].superTbls[j].escapeChar = false; - } else { - errorPrint("%s", - "failed to read json, escape_character not found\n"); - goto PARSE_OVER; - } - - cJSON *autoCreateTbl = - cJSON_GetObjectItem(stbInfo, "auto_create_table"); - if (autoCreateTbl && autoCreateTbl->type == cJSON_String && - autoCreateTbl->valuestring != NULL) { - if ((0 == strncasecmp(autoCreateTbl->valuestring, "yes", 3)) && - (TBL_ALREADY_EXISTS != - g_Dbs.db[i].superTbls[j].childTblExists)) { - g_Dbs.db[i].superTbls[j].autoCreateTable = - AUTO_CREATE_SUBTBL; - } else if (0 == - strncasecmp(autoCreateTbl->valuestring, "no", 2)) { - g_Dbs.db[i].superTbls[j].autoCreateTable = - PRE_CREATE_SUBTBL; - } else { - g_Dbs.db[i].superTbls[j].autoCreateTable = - PRE_CREATE_SUBTBL; - } - } else if (!autoCreateTbl) { - g_Dbs.db[i].superTbls[j].autoCreateTable = PRE_CREATE_SUBTBL; - } else { - errorPrint( - "%s", "failed to read json, auto_create_table not found\n"); - goto PARSE_OVER; - } - - cJSON *batchCreateTbl = - cJSON_GetObjectItem(stbInfo, "batch_create_tbl_num"); - if (batchCreateTbl && batchCreateTbl->type == cJSON_Number) { - g_Dbs.db[i].superTbls[j].batchCreateTableNum = - batchCreateTbl->valueint; - } else if (!batchCreateTbl) { - g_Dbs.db[i].superTbls[j].batchCreateTableNum = - DEFAULT_CREATE_BATCH; - } else { - errorPrint( - "%s", - "failed to read json, batch_create_tbl_num not found\n"); - goto PARSE_OVER; - } - - cJSON *childTblExists = - cJSON_GetObjectItem(stbInfo, "child_table_exists"); // yes, no - if (childTblExists && childTblExists->type == cJSON_String && - childTblExists->valuestring != NULL) { - if ((0 == strncasecmp(childTblExists->valuestring, "yes", 3)) && - (g_Dbs.db[i].drop == false)) { - g_Dbs.db[i].superTbls[j].childTblExists = - TBL_ALREADY_EXISTS; - } else if ((0 == strncasecmp(childTblExists->valuestring, "no", - 2) || - (g_Dbs.db[i].drop == true))) { - g_Dbs.db[i].superTbls[j].childTblExists = TBL_NO_EXISTS; - } else { - g_Dbs.db[i].superTbls[j].childTblExists = TBL_NO_EXISTS; - } - } else if (!childTblExists) { - g_Dbs.db[i].superTbls[j].childTblExists = TBL_NO_EXISTS; - } else { - errorPrint( - "%s", - "failed to read json, child_table_exists not found\n"); - goto PARSE_OVER; - } - - if (TBL_ALREADY_EXISTS == g_Dbs.db[i].superTbls[j].childTblExists) { - g_Dbs.db[i].superTbls[j].autoCreateTable = PRE_CREATE_SUBTBL; - } - - cJSON *count = cJSON_GetObjectItem(stbInfo, "childtable_count"); - if (!count || count->type != cJSON_Number || 0 >= count->valueint) { - errorPrint( - "%s", - "failed to read json, childtable_count input mistake\n"); - goto PARSE_OVER; - } - g_Dbs.db[i].superTbls[j].childTblCount = count->valueint; - g_totalChildTables += g_Dbs.db[i].superTbls[j].childTblCount; - - cJSON *dataSource = cJSON_GetObjectItem(stbInfo, "data_source"); - if (dataSource && dataSource->type == cJSON_String && - dataSource->valuestring != NULL) { - tstrncpy( - g_Dbs.db[i].superTbls[j].dataSource, - dataSource->valuestring, - min(SMALL_BUFF_LEN, strlen(dataSource->valuestring) + 1)); - } else if (!dataSource) { - tstrncpy(g_Dbs.db[i].superTbls[j].dataSource, "rand", - min(SMALL_BUFF_LEN, strlen("rand") + 1)); - } else { - errorPrint("%s", - "failed to read json, data_source not found\n"); - goto PARSE_OVER; - } - - cJSON *stbIface = cJSON_GetObjectItem( - stbInfo, "insert_mode"); // taosc , rest, stmt - if (stbIface && stbIface->type == cJSON_String && - stbIface->valuestring != NULL) { - if (0 == strcasecmp(stbIface->valuestring, "taosc")) { - g_Dbs.db[i].superTbls[j].iface = TAOSC_IFACE; - } else if (0 == strcasecmp(stbIface->valuestring, "rest")) { - g_Dbs.db[i].superTbls[j].iface = REST_IFACE; - } else if (0 == strcasecmp(stbIface->valuestring, "stmt")) { - g_Dbs.db[i].superTbls[j].iface = STMT_IFACE; - } else if (0 == strcasecmp(stbIface->valuestring, "sml")) { - g_Dbs.db[i].superTbls[j].iface = SML_IFACE; - g_args.iface = SML_IFACE; - } else { - errorPrint( - "failed to read json, insert_mode %s not recognized\n", - stbIface->valuestring); - goto PARSE_OVER; - } - } else if (!stbIface) { - g_Dbs.db[i].superTbls[j].iface = TAOSC_IFACE; - } else { - errorPrint("%s", - "failed to read json, insert_mode not found\n"); - goto PARSE_OVER; - } - - cJSON *stbLineProtocol = - cJSON_GetObjectItem(stbInfo, "line_protocol"); - if (stbLineProtocol && stbLineProtocol->type == cJSON_String && - stbLineProtocol->valuestring != NULL) { - if (0 == strcasecmp(stbLineProtocol->valuestring, "line")) { - g_Dbs.db[i].superTbls[j].lineProtocol = - TSDB_SML_LINE_PROTOCOL; - } else if (0 == - strcasecmp(stbLineProtocol->valuestring, "telnet")) { - g_Dbs.db[i].superTbls[j].lineProtocol = - TSDB_SML_TELNET_PROTOCOL; - } else if (0 == - strcasecmp(stbLineProtocol->valuestring, "json")) { - g_Dbs.db[i].superTbls[j].lineProtocol = - TSDB_SML_JSON_PROTOCOL; - } else { - errorPrint( - "failed to read json, line_protocol %s not " - "recognized\n", - stbLineProtocol->valuestring); - goto PARSE_OVER; - } - } else if (!stbLineProtocol) { - g_Dbs.db[i].superTbls[j].lineProtocol = TSDB_SML_LINE_PROTOCOL; - } else { - errorPrint("%s", - "failed to read json, line_protocol not found\n"); - goto PARSE_OVER; - } - - cJSON *childTbl_limit = - cJSON_GetObjectItem(stbInfo, "childtable_limit"); - if ((childTbl_limit) && (g_Dbs.db[i].drop != true) && - (g_Dbs.db[i].superTbls[j].childTblExists == - TBL_ALREADY_EXISTS)) { - if (childTbl_limit->type != cJSON_Number) { - errorPrint("%s", "failed to read json, childtable_limit\n"); - goto PARSE_OVER; - } - g_Dbs.db[i].superTbls[j].childTblLimit = - childTbl_limit->valueint; - } else { - g_Dbs.db[i].superTbls[j].childTblLimit = - -1; // select ... limit -1 means all query result, drop = - // yes mean all table need recreate, limit value is - // invalid. - } - - cJSON *childTbl_offset = - cJSON_GetObjectItem(stbInfo, "childtable_offset"); - if ((childTbl_offset) && (g_Dbs.db[i].drop != true) && - (g_Dbs.db[i].superTbls[j].childTblExists == - TBL_ALREADY_EXISTS)) { - if ((childTbl_offset->type != cJSON_Number) || - (0 > childTbl_offset->valueint)) { - errorPrint("%s", - "failed to read json, childtable_offset\n"); - goto PARSE_OVER; - } - g_Dbs.db[i].superTbls[j].childTblOffset = - childTbl_offset->valueint; - } else { - g_Dbs.db[i].superTbls[j].childTblOffset = 0; - } - - cJSON *ts = cJSON_GetObjectItem(stbInfo, "start_timestamp"); - if (ts && ts->type == cJSON_String && ts->valuestring != NULL) { - tstrncpy(g_Dbs.db[i].superTbls[j].startTimestamp, - ts->valuestring, TSDB_DB_NAME_LEN); - } else if (!ts) { - tstrncpy(g_Dbs.db[i].superTbls[j].startTimestamp, "now", - TSDB_DB_NAME_LEN); - } else { - errorPrint("%s", - "failed to read json, start_timestamp not found\n"); - goto PARSE_OVER; - } - - cJSON *timestampStep = - cJSON_GetObjectItem(stbInfo, "timestamp_step"); - if (timestampStep && timestampStep->type == cJSON_Number) { - g_Dbs.db[i].superTbls[j].timeStampStep = - timestampStep->valueint; - } else if (!timestampStep) { - g_Dbs.db[i].superTbls[j].timeStampStep = g_args.timestamp_step; - } else { - errorPrint("%s", - "failed to read json, timestamp_step not found\n"); - goto PARSE_OVER; - } - - cJSON *sampleFormat = cJSON_GetObjectItem(stbInfo, "sample_format"); - if (sampleFormat && sampleFormat->type == cJSON_String && - sampleFormat->valuestring != NULL) { - tstrncpy( - g_Dbs.db[i].superTbls[j].sampleFormat, - sampleFormat->valuestring, - min(SMALL_BUFF_LEN, strlen(sampleFormat->valuestring) + 1)); - } else if (!sampleFormat) { - tstrncpy(g_Dbs.db[i].superTbls[j].sampleFormat, "csv", - SMALL_BUFF_LEN); - } else { - errorPrint("%s", - "failed to read json, sample_format not found\n"); - goto PARSE_OVER; - } - - cJSON *sampleFile = cJSON_GetObjectItem(stbInfo, "sample_file"); - if (sampleFile && sampleFile->type == cJSON_String && - sampleFile->valuestring != NULL) { - tstrncpy(g_Dbs.db[i].superTbls[j].sampleFile, - sampleFile->valuestring, - min(MAX_FILE_NAME_LEN, - strlen(sampleFile->valuestring) + 1)); - } else if (!sampleFile) { - memset(g_Dbs.db[i].superTbls[j].sampleFile, 0, - MAX_FILE_NAME_LEN); - } else { - errorPrint("%s", - "failed to read json, sample_file not found\n"); - goto PARSE_OVER; - } - - cJSON *useSampleTs = cJSON_GetObjectItem(stbInfo, "use_sample_ts"); - if (useSampleTs && useSampleTs->type == cJSON_String && - useSampleTs->valuestring != NULL) { - if (0 == strncasecmp(useSampleTs->valuestring, "yes", 3)) { - g_Dbs.db[i].superTbls[j].useSampleTs = true; - } else if (0 == - strncasecmp(useSampleTs->valuestring, "no", 2)) { - g_Dbs.db[i].superTbls[j].useSampleTs = false; - } else { - g_Dbs.db[i].superTbls[j].useSampleTs = false; - } - } else if (!useSampleTs) { - g_Dbs.db[i].superTbls[j].useSampleTs = false; - } else { - errorPrint("%s", - "failed to read json, use_sample_ts not found\n"); - goto PARSE_OVER; - } - - cJSON *tagsFile = cJSON_GetObjectItem(stbInfo, "tags_file"); - if ((tagsFile && tagsFile->type == cJSON_String) && - (tagsFile->valuestring != NULL)) { - tstrncpy(g_Dbs.db[i].superTbls[j].tagsFile, - tagsFile->valuestring, MAX_FILE_NAME_LEN); - if (0 == g_Dbs.db[i].superTbls[j].tagsFile[0]) { - g_Dbs.db[i].superTbls[j].tagSource = 0; - } else { - g_Dbs.db[i].superTbls[j].tagSource = 1; - } - } else if (!tagsFile) { - memset(g_Dbs.db[i].superTbls[j].tagsFile, 0, MAX_FILE_NAME_LEN); - g_Dbs.db[i].superTbls[j].tagSource = 0; - } else { - errorPrint("%s", "failed to read json, tags_file not found\n"); - goto PARSE_OVER; - } - - cJSON *stbMaxSqlLen = cJSON_GetObjectItem(stbInfo, "max_sql_len"); - if (stbMaxSqlLen && stbMaxSqlLen->type == cJSON_Number) { - int32_t len = (int32_t)stbMaxSqlLen->valueint; - if (len > TSDB_MAX_ALLOWED_SQL_LEN) { - len = TSDB_MAX_ALLOWED_SQL_LEN; - } else if (len < 5) { - len = 5; - } - g_Dbs.db[i].superTbls[j].maxSqlLen = len; - } else if (!maxSqlLen) { - g_Dbs.db[i].superTbls[j].maxSqlLen = g_args.max_sql_len; - } else { - errorPrint("%s", - "failed to read json, stbMaxSqlLen input mistake\n"); - goto PARSE_OVER; - } - /* - cJSON *multiThreadWriteOneTbl = - cJSON_GetObjectItem(stbInfo, "multi_thread_write_one_tbl"); // no - , yes if (multiThreadWriteOneTbl - && multiThreadWriteOneTbl->type == cJSON_String - && multiThreadWriteOneTbl->valuestring != NULL) { - if (0 == strncasecmp(multiThreadWriteOneTbl->valuestring, "yes", - 3)) { g_Dbs.db[i].superTbls[j].multiThreadWriteOneTbl = 1; } else - { g_Dbs.db[i].superTbls[j].multiThreadWriteOneTbl = 0; - } - } else if (!multiThreadWriteOneTbl) { - g_Dbs.db[i].superTbls[j].multiThreadWriteOneTbl = 0; - } else { - errorPrint("%s", "failed to read json, multiThreadWriteOneTbl not - found\n"); goto PARSE_OVER; - } - */ - cJSON *insertRows = cJSON_GetObjectItem(stbInfo, "insert_rows"); - if (insertRows && insertRows->type == cJSON_Number) { - if (insertRows->valueint < 0) { - errorPrint( - "%s", - "failed to read json, insert_rows input mistake\n"); - goto PARSE_OVER; - } - g_Dbs.db[i].superTbls[j].insertRows = insertRows->valueint; - } else if (!insertRows) { - g_Dbs.db[i].superTbls[j].insertRows = 0x7FFFFFFFFFFFFFFF; - } else { - errorPrint("%s", - "failed to read json, insert_rows input mistake\n"); - goto PARSE_OVER; - } - - cJSON *stbInterlaceRows = - cJSON_GetObjectItem(stbInfo, "interlace_rows"); - if (stbInterlaceRows && stbInterlaceRows->type == cJSON_Number) { - if (stbInterlaceRows->valueint < 0) { - errorPrint( - "%s", - "failed to read json, interlace rows input mistake\n"); - goto PARSE_OVER; - } - g_Dbs.db[i].superTbls[j].interlaceRows = - (uint32_t)stbInterlaceRows->valueint; - - if (g_Dbs.db[i].superTbls[j].interlaceRows > - g_Dbs.db[i].superTbls[j].insertRows) { - printf( - "NOTICE: db[%d].superTbl[%d]'s interlace rows value %u " - "> insert_rows %" PRId64 "\n\n", - i, j, g_Dbs.db[i].superTbls[j].interlaceRows, - g_Dbs.db[i].superTbls[j].insertRows); - printf( - " interlace rows value will be set to " - "insert_rows %" PRId64 "\n\n", - g_Dbs.db[i].superTbls[j].insertRows); - prompt(); - g_Dbs.db[i].superTbls[j].interlaceRows = - (uint32_t)g_Dbs.db[i].superTbls[j].insertRows; - } - } else if (!stbInterlaceRows) { - g_Dbs.db[i].superTbls[j].interlaceRows = - g_args.interlaceRows; // 0 means progressive mode, > 0 mean - // interlace mode. max value is less - // or equ num_of_records_per_req - } else { - errorPrint( - "%s", - "failed to read json, interlace rows input mistake\n"); - goto PARSE_OVER; - } - - cJSON *disorderRatio = - cJSON_GetObjectItem(stbInfo, "disorder_ratio"); - if (disorderRatio && disorderRatio->type == cJSON_Number) { - if (disorderRatio->valueint > 50) disorderRatio->valueint = 50; - - if (disorderRatio->valueint < 0) disorderRatio->valueint = 0; - - g_Dbs.db[i].superTbls[j].disorderRatio = - (int)disorderRatio->valueint; - } else if (!disorderRatio) { - g_Dbs.db[i].superTbls[j].disorderRatio = 0; - } else { - errorPrint("%s", - "failed to read json, disorderRatio not found\n"); - goto PARSE_OVER; - } - - cJSON *disorderRange = - cJSON_GetObjectItem(stbInfo, "disorder_range"); - if (disorderRange && disorderRange->type == cJSON_Number) { - g_Dbs.db[i].superTbls[j].disorderRange = - (int)disorderRange->valueint; - } else if (!disorderRange) { - g_Dbs.db[i].superTbls[j].disorderRange = DEFAULT_DISORDER_RANGE; - } else { - errorPrint("%s", - "failed to read json, disorderRange not found\n"); - goto PARSE_OVER; - } - - cJSON *insertInterval = - cJSON_GetObjectItem(stbInfo, "insert_interval"); - if (insertInterval && insertInterval->type == cJSON_Number) { - g_Dbs.db[i].superTbls[j].insertInterval = - insertInterval->valueint; - if (insertInterval->valueint < 0) { - errorPrint( - "%s", - "failed to read json, insert_interval input mistake\n"); - goto PARSE_OVER; - } - } else if (!insertInterval) { - verbosePrint( - "%s() LN%d: stable insert interval be overrode by global " - "%" PRIu64 ".\n", - __func__, __LINE__, g_args.insert_interval); - g_Dbs.db[i].superTbls[j].insertInterval = - g_args.insert_interval; - } else { - errorPrint( - "%s", - "failed to read json, insert_interval input mistake\n"); - goto PARSE_OVER; - } - - if (getColumnAndTagTypeFromInsertJsonFile( - stbInfo, &g_Dbs.db[i].superTbls[j])) { - goto PARSE_OVER; - } - } - } - - code = 0; - -PARSE_OVER: - return code; -} -int getMetaFromQueryJsonFile(cJSON *root) { - int32_t code = -1; - - cJSON *cfgdir = cJSON_GetObjectItem(root, "cfgdir"); - if (cfgdir && cfgdir->type == cJSON_String && cfgdir->valuestring != NULL) { - tstrncpy(g_queryInfo.cfgDir, cfgdir->valuestring, MAX_FILE_NAME_LEN); - } - - cJSON *host = cJSON_GetObjectItem(root, "host"); - if (host && host->type == cJSON_String && host->valuestring != NULL) { - tstrncpy(g_queryInfo.host, host->valuestring, MAX_HOSTNAME_SIZE); - } else if (!host) { - tstrncpy(g_queryInfo.host, DEFAULT_HOST, MAX_HOSTNAME_SIZE); - } else { - errorPrint("%s", "failed to read json, host not found\n"); - goto PARSE_OVER; - } - - cJSON *port = cJSON_GetObjectItem(root, "port"); - if (port && port->type == cJSON_Number) { - g_queryInfo.port = (uint16_t)port->valueint; - } else if (!port) { - g_queryInfo.port = DEFAULT_PORT; - } - - cJSON *user = cJSON_GetObjectItem(root, "user"); - if (user && user->type == cJSON_String && user->valuestring != NULL) { - tstrncpy(g_queryInfo.user, user->valuestring, MAX_USERNAME_SIZE); - } else if (!user) { - tstrncpy(g_queryInfo.user, TSDB_DEFAULT_USER, MAX_USERNAME_SIZE); - ; - } - - cJSON *password = cJSON_GetObjectItem(root, "password"); - if (password && password->type == cJSON_String && - password->valuestring != NULL) { - tstrncpy(g_queryInfo.password, password->valuestring, - SHELL_MAX_PASSWORD_LEN); - } else if (!password) { - tstrncpy(g_queryInfo.password, TSDB_DEFAULT_PASS, - SHELL_MAX_PASSWORD_LEN); - ; - } - - cJSON *answerPrompt = - cJSON_GetObjectItem(root, "confirm_parameter_prompt"); // yes, no, - if (answerPrompt && answerPrompt->type == cJSON_String && - answerPrompt->valuestring != NULL) { - if (0 == strncasecmp(answerPrompt->valuestring, "yes", 3)) { - g_args.answer_yes = false; - } else if (0 == strncasecmp(answerPrompt->valuestring, "no", 2)) { - g_args.answer_yes = true; - } else { - g_args.answer_yes = false; - } - } else if (!answerPrompt) { - g_args.answer_yes = false; - } else { - errorPrint("%s", - "failed to read json, confirm_parameter_prompt not found\n"); - goto PARSE_OVER; - } - - cJSON *gQueryTimes = cJSON_GetObjectItem(root, "query_times"); - if (gQueryTimes && gQueryTimes->type == cJSON_Number) { - if (gQueryTimes->valueint <= 0) { - errorPrint("%s", - "failed to read json, query_times input mistake\n"); - goto PARSE_OVER; - } - g_args.query_times = gQueryTimes->valueint; - } else if (!gQueryTimes) { - g_args.query_times = DEFAULT_QUERY_TIME; - } else { - errorPrint("%s", "failed to read json, query_times input mistake\n"); - goto PARSE_OVER; - } - - cJSON *dbs = cJSON_GetObjectItem(root, "databases"); - if (dbs && dbs->type == cJSON_String && dbs->valuestring != NULL) { - tstrncpy(g_queryInfo.dbName, dbs->valuestring, TSDB_DB_NAME_LEN); - } else if (!dbs) { - errorPrint("%s", "failed to read json, databases not found\n"); - goto PARSE_OVER; - } - - cJSON *queryMode = cJSON_GetObjectItem(root, "query_mode"); - if (queryMode && queryMode->type == cJSON_String && - queryMode->valuestring != NULL) { - tstrncpy(g_queryInfo.queryMode, queryMode->valuestring, - min(SMALL_BUFF_LEN, strlen(queryMode->valuestring) + 1)); - } else if (!queryMode) { - tstrncpy(g_queryInfo.queryMode, "taosc", - min(SMALL_BUFF_LEN, strlen("taosc") + 1)); - } else { - errorPrint("%s", "failed to read json, query_mode not found\n"); - goto PARSE_OVER; - } - - // specified_table_query - cJSON *specifiedQuery = cJSON_GetObjectItem(root, "specified_table_query"); - if (!specifiedQuery) { - g_queryInfo.specifiedQueryInfo.concurrent = 1; - g_queryInfo.specifiedQueryInfo.sqlCount = 0; - } else if (specifiedQuery->type != cJSON_Object) { - errorPrint("%s", "failed to read json, super_table_query not found\n"); - goto PARSE_OVER; - } else { - cJSON *queryInterval = - cJSON_GetObjectItem(specifiedQuery, "query_interval"); - if (queryInterval && queryInterval->type == cJSON_Number) { - g_queryInfo.specifiedQueryInfo.queryInterval = - queryInterval->valueint; - } else if (!queryInterval) { - g_queryInfo.specifiedQueryInfo.queryInterval = 0; - } - - cJSON *specifiedQueryTimes = - cJSON_GetObjectItem(specifiedQuery, "query_times"); - if (specifiedQueryTimes && specifiedQueryTimes->type == cJSON_Number) { - if (specifiedQueryTimes->valueint <= 0) { - errorPrint("failed to read json, query_times: %" PRId64 - ", need be a valid (>0) number\n", - specifiedQueryTimes->valueint); - goto PARSE_OVER; - } - g_queryInfo.specifiedQueryInfo.queryTimes = - specifiedQueryTimes->valueint; - } else if (!specifiedQueryTimes) { - g_queryInfo.specifiedQueryInfo.queryTimes = g_args.query_times; - } else { - errorPrint( - "%s() LN%d, failed to read json, query_times input mistake\n", - __func__, __LINE__); - goto PARSE_OVER; - } - - cJSON *concurrent = cJSON_GetObjectItem(specifiedQuery, "concurrent"); - if (concurrent && concurrent->type == cJSON_Number) { - if (concurrent->valueint <= 0) { - errorPrint( - "query sqlCount %d or concurrent %d is not correct.\n", - g_queryInfo.specifiedQueryInfo.sqlCount, - g_queryInfo.specifiedQueryInfo.concurrent); - goto PARSE_OVER; - } - g_queryInfo.specifiedQueryInfo.concurrent = - (uint32_t)concurrent->valueint; - } else if (!concurrent) { - g_queryInfo.specifiedQueryInfo.concurrent = 1; - } - - cJSON *specifiedAsyncMode = cJSON_GetObjectItem(specifiedQuery, "mode"); - if (specifiedAsyncMode && specifiedAsyncMode->type == cJSON_String && - specifiedAsyncMode->valuestring != NULL) { - if (0 == strcmp("sync", specifiedAsyncMode->valuestring)) { - g_queryInfo.specifiedQueryInfo.asyncMode = SYNC_MODE; - } else if (0 == strcmp("async", specifiedAsyncMode->valuestring)) { - g_queryInfo.specifiedQueryInfo.asyncMode = ASYNC_MODE; - } else { - errorPrint("%s", - "failed to read json, async mode input error\n"); - goto PARSE_OVER; - } - } else { - g_queryInfo.specifiedQueryInfo.asyncMode = SYNC_MODE; - } - - cJSON *interval = cJSON_GetObjectItem(specifiedQuery, "interval"); - if (interval && interval->type == cJSON_Number) { - g_queryInfo.specifiedQueryInfo.subscribeInterval = - interval->valueint; - } else if (!interval) { - // printf("failed to read json, subscribe interval no found\n"); - // goto PARSE_OVER; - g_queryInfo.specifiedQueryInfo.subscribeInterval = - DEFAULT_SUB_INTERVAL; - } - - cJSON *restart = cJSON_GetObjectItem(specifiedQuery, "restart"); - if (restart && restart->type == cJSON_String && - restart->valuestring != NULL) { - if (0 == strcmp("yes", restart->valuestring)) { - g_queryInfo.specifiedQueryInfo.subscribeRestart = true; - } else if (0 == strcmp("no", restart->valuestring)) { - g_queryInfo.specifiedQueryInfo.subscribeRestart = false; - } else { - errorPrint("%s", - "failed to read json, subscribe restart error\n"); - goto PARSE_OVER; - } - } else { - g_queryInfo.specifiedQueryInfo.subscribeRestart = true; - } - - cJSON *keepProgress = - cJSON_GetObjectItem(specifiedQuery, "keepProgress"); - if (keepProgress && keepProgress->type == cJSON_String && - keepProgress->valuestring != NULL) { - if (0 == strcmp("yes", keepProgress->valuestring)) { - g_queryInfo.specifiedQueryInfo.subscribeKeepProgress = 1; - } else if (0 == strcmp("no", keepProgress->valuestring)) { - g_queryInfo.specifiedQueryInfo.subscribeKeepProgress = 0; - } else { - errorPrint( - "%s", - "failed to read json, subscribe keepProgress error\n"); - goto PARSE_OVER; - } - } else { - g_queryInfo.specifiedQueryInfo.subscribeKeepProgress = 0; - } - - // sqls - cJSON *specifiedSqls = cJSON_GetObjectItem(specifiedQuery, "sqls"); - if (!specifiedSqls) { - g_queryInfo.specifiedQueryInfo.sqlCount = 0; - } else if (specifiedSqls->type != cJSON_Array) { - errorPrint("%s", "failed to read json, super sqls not found\n"); - goto PARSE_OVER; - } else { - int superSqlSize = cJSON_GetArraySize(specifiedSqls); - if (superSqlSize * g_queryInfo.specifiedQueryInfo.concurrent > - MAX_QUERY_SQL_COUNT) { - errorPrint( - "failed to read json, query sql(%d) * concurrent(%d) " - "overflow, max is %d\n", - superSqlSize, g_queryInfo.specifiedQueryInfo.concurrent, - MAX_QUERY_SQL_COUNT); - goto PARSE_OVER; - } - - g_queryInfo.specifiedQueryInfo.sqlCount = superSqlSize; - for (int j = 0; j < superSqlSize; ++j) { - cJSON *sql = cJSON_GetArrayItem(specifiedSqls, j); - if (sql == NULL) continue; - - cJSON *sqlStr = cJSON_GetObjectItem(sql, "sql"); - if (!sqlStr || sqlStr->type != cJSON_String || - sqlStr->valuestring == NULL) { - errorPrint("%s", "failed to read json, sql not found\n"); - goto PARSE_OVER; - } - tstrncpy(g_queryInfo.specifiedQueryInfo.sql[j], - sqlStr->valuestring, BUFFER_SIZE); - - // default value is -1, which mean infinite loop - g_queryInfo.specifiedQueryInfo.endAfterConsume[j] = -1; - cJSON *endAfterConsume = - cJSON_GetObjectItem(specifiedQuery, "endAfterConsume"); - if (endAfterConsume && endAfterConsume->type == cJSON_Number) { - g_queryInfo.specifiedQueryInfo.endAfterConsume[j] = - (int)endAfterConsume->valueint; - } - if (g_queryInfo.specifiedQueryInfo.endAfterConsume[j] < -1) - g_queryInfo.specifiedQueryInfo.endAfterConsume[j] = -1; - - g_queryInfo.specifiedQueryInfo.resubAfterConsume[j] = -1; - cJSON *resubAfterConsume = - cJSON_GetObjectItem(specifiedQuery, "resubAfterConsume"); - if ((resubAfterConsume) && - (resubAfterConsume->type == cJSON_Number) && - (resubAfterConsume->valueint >= 0)) { - g_queryInfo.specifiedQueryInfo.resubAfterConsume[j] = - (int)resubAfterConsume->valueint; - } - - if (g_queryInfo.specifiedQueryInfo.resubAfterConsume[j] < -1) - g_queryInfo.specifiedQueryInfo.resubAfterConsume[j] = -1; - - cJSON *result = cJSON_GetObjectItem(sql, "result"); - if ((NULL != result) && (result->type == cJSON_String) && - (result->valuestring != NULL)) { - tstrncpy(g_queryInfo.specifiedQueryInfo.result[j], - result->valuestring, MAX_FILE_NAME_LEN); - } else if (NULL == result) { - memset(g_queryInfo.specifiedQueryInfo.result[j], 0, - MAX_FILE_NAME_LEN); - } else { - errorPrint("%s", - "failed to read json, super query result file " - "not found\n"); - goto PARSE_OVER; - } - } - } - } - - // super_table_query - cJSON *superQuery = cJSON_GetObjectItem(root, "super_table_query"); - if (!superQuery) { - g_queryInfo.superQueryInfo.threadCnt = 1; - g_queryInfo.superQueryInfo.sqlCount = 0; - } else if (superQuery->type != cJSON_Object) { - errorPrint("%s", "failed to read json, sub_table_query not found\n"); - code = 0; - goto PARSE_OVER; - } else { - cJSON *subrate = cJSON_GetObjectItem(superQuery, "query_interval"); - if (subrate && subrate->type == cJSON_Number) { - g_queryInfo.superQueryInfo.queryInterval = subrate->valueint; - } else if (!subrate) { - g_queryInfo.superQueryInfo.queryInterval = 0; - } - - cJSON *superQueryTimes = cJSON_GetObjectItem(superQuery, "query_times"); - if (superQueryTimes && superQueryTimes->type == cJSON_Number) { - if (superQueryTimes->valueint <= 0) { - errorPrint("failed to read json, query_times: %" PRId64 - ", need be a valid (>0) number\n", - superQueryTimes->valueint); - goto PARSE_OVER; - } - g_queryInfo.superQueryInfo.queryTimes = superQueryTimes->valueint; - } else if (!superQueryTimes) { - g_queryInfo.superQueryInfo.queryTimes = g_args.query_times; - } else { - errorPrint("%s", - "failed to read json, query_times input mistake\n"); - goto PARSE_OVER; - } - - cJSON *threads = cJSON_GetObjectItem(superQuery, "threads"); - if (threads && threads->type == cJSON_Number) { - if (threads->valueint <= 0) { - errorPrint("%s", - "failed to read json, threads input mistake\n"); - goto PARSE_OVER; - } - g_queryInfo.superQueryInfo.threadCnt = (uint32_t)threads->valueint; - } else if (!threads) { - g_queryInfo.superQueryInfo.threadCnt = DEFAULT_NTHREADS; - } - - // cJSON* subTblCnt = cJSON_GetObjectItem(superQuery, - // "childtable_count"); if (subTblCnt && subTblCnt->type == - // cJSON_Number) - // { - // g_queryInfo.superQueryInfo.childTblCount = subTblCnt->valueint; - //} else if (!subTblCnt) { - // g_queryInfo.superQueryInfo.childTblCount = 0; - //} - - cJSON *stblname = cJSON_GetObjectItem(superQuery, "stblname"); - if (stblname && stblname->type == cJSON_String && - stblname->valuestring != NULL) { - tstrncpy(g_queryInfo.superQueryInfo.stbName, stblname->valuestring, - TSDB_TABLE_NAME_LEN); - } else { - errorPrint("%s", - "failed to read json, super table name input error\n"); - goto PARSE_OVER; - } - - cJSON *superAsyncMode = cJSON_GetObjectItem(superQuery, "mode"); - if (superAsyncMode && superAsyncMode->type == cJSON_String && - superAsyncMode->valuestring != NULL) { - if (0 == strcmp("sync", superAsyncMode->valuestring)) { - g_queryInfo.superQueryInfo.asyncMode = SYNC_MODE; - } else if (0 == strcmp("async", superAsyncMode->valuestring)) { - g_queryInfo.superQueryInfo.asyncMode = ASYNC_MODE; - } else { - errorPrint("%s", - "failed to read json, async mode input error\n"); - goto PARSE_OVER; - } - } else { - g_queryInfo.superQueryInfo.asyncMode = SYNC_MODE; - } - - cJSON *superInterval = cJSON_GetObjectItem(superQuery, "interval"); - if (superInterval && superInterval->type == cJSON_Number) { - if (superInterval->valueint < 0) { - errorPrint("%s", - "failed to read json, interval input mistake\n"); - goto PARSE_OVER; - } - g_queryInfo.superQueryInfo.subscribeInterval = - superInterval->valueint; - } else if (!superInterval) { - // printf("failed to read json, subscribe interval no found\n"); - // goto PARSE_OVER; - g_queryInfo.superQueryInfo.subscribeInterval = - DEFAULT_QUERY_INTERVAL; - } - - cJSON *subrestart = cJSON_GetObjectItem(superQuery, "restart"); - if (subrestart && subrestart->type == cJSON_String && - subrestart->valuestring != NULL) { - if (0 == strcmp("yes", subrestart->valuestring)) { - g_queryInfo.superQueryInfo.subscribeRestart = true; - } else if (0 == strcmp("no", subrestart->valuestring)) { - g_queryInfo.superQueryInfo.subscribeRestart = false; - } else { - errorPrint("%s", - "failed to read json, subscribe restart error\n"); - goto PARSE_OVER; - } - } else { - g_queryInfo.superQueryInfo.subscribeRestart = true; - } - - cJSON *superkeepProgress = - cJSON_GetObjectItem(superQuery, "keepProgress"); - if (superkeepProgress && superkeepProgress->type == cJSON_String && - superkeepProgress->valuestring != NULL) { - if (0 == strcmp("yes", superkeepProgress->valuestring)) { - g_queryInfo.superQueryInfo.subscribeKeepProgress = 1; - } else if (0 == strcmp("no", superkeepProgress->valuestring)) { - g_queryInfo.superQueryInfo.subscribeKeepProgress = 0; - } else { - errorPrint("%s", - "failed to read json, subscribe super table " - "keepProgress error\n"); - goto PARSE_OVER; - } - } else { - g_queryInfo.superQueryInfo.subscribeKeepProgress = 0; - } - - // default value is -1, which mean do not resub - g_queryInfo.superQueryInfo.endAfterConsume = -1; - cJSON *superEndAfterConsume = - cJSON_GetObjectItem(superQuery, "endAfterConsume"); - if (superEndAfterConsume && - superEndAfterConsume->type == cJSON_Number) { - g_queryInfo.superQueryInfo.endAfterConsume = - (int)superEndAfterConsume->valueint; - } - if (g_queryInfo.superQueryInfo.endAfterConsume < -1) - g_queryInfo.superQueryInfo.endAfterConsume = -1; - - // default value is -1, which mean do not resub - g_queryInfo.superQueryInfo.resubAfterConsume = -1; - cJSON *superResubAfterConsume = - cJSON_GetObjectItem(superQuery, "resubAfterConsume"); - if ((superResubAfterConsume) && - (superResubAfterConsume->type == cJSON_Number) && - (superResubAfterConsume->valueint >= 0)) { - g_queryInfo.superQueryInfo.resubAfterConsume = - (int)superResubAfterConsume->valueint; - } - if (g_queryInfo.superQueryInfo.resubAfterConsume < -1) - g_queryInfo.superQueryInfo.resubAfterConsume = -1; - - // supert table sqls - cJSON *superSqls = cJSON_GetObjectItem(superQuery, "sqls"); - if (!superSqls) { - g_queryInfo.superQueryInfo.sqlCount = 0; - } else if (superSqls->type != cJSON_Array) { - errorPrint("%s", "failed to read json, super sqls not found\n"); - goto PARSE_OVER; - } else { - int superSqlSize = cJSON_GetArraySize(superSqls); - if (superSqlSize > MAX_QUERY_SQL_COUNT) { - errorPrint( - "failed to read json, query sql size overflow, max is %d\n", - MAX_QUERY_SQL_COUNT); - goto PARSE_OVER; - } - - g_queryInfo.superQueryInfo.sqlCount = superSqlSize; - for (int j = 0; j < superSqlSize; ++j) { - cJSON *sql = cJSON_GetArrayItem(superSqls, j); - if (sql == NULL) continue; - - cJSON *sqlStr = cJSON_GetObjectItem(sql, "sql"); - if (!sqlStr || sqlStr->type != cJSON_String || - sqlStr->valuestring == NULL) { - errorPrint("%s", "failed to read json, sql not found\n"); - goto PARSE_OVER; - } - tstrncpy(g_queryInfo.superQueryInfo.sql[j], sqlStr->valuestring, - BUFFER_SIZE); - - cJSON *result = cJSON_GetObjectItem(sql, "result"); - if (result != NULL && result->type == cJSON_String && - result->valuestring != NULL) { - tstrncpy(g_queryInfo.superQueryInfo.result[j], - result->valuestring, MAX_FILE_NAME_LEN); - } else if (NULL == result) { - memset(g_queryInfo.superQueryInfo.result[j], 0, - MAX_FILE_NAME_LEN); - } else { - errorPrint("%s", - "failed to read json, sub query result file not " - "found\n"); - goto PARSE_OVER; - } - } - } - } - - code = 0; - -PARSE_OVER: - return code; -} - -int getInfoFromJsonFile(char *file) { - debugPrint("%s %d %s\n", __func__, __LINE__, file); - int32_t code = -1; - FILE * fp = fopen(file, "r"); - if (!fp) { - errorPrint("failed to read %s, reason:%s\n", file, strerror(errno)); - return code; - } - - int maxLen = MAX_JSON_BUFF; - char *content = calloc(1, maxLen + 1); - int len = (int)fread(content, 1, maxLen, fp); - if (len <= 0) { - free(content); - fclose(fp); - errorPrint("failed to read %s, content is null", file); - return code; - } - - content[len] = 0; - cJSON *root = cJSON_Parse(content); - if (root == NULL) { - errorPrint("failed to cjson parse %s, invalid json format\n", file); - goto PARSE_OVER; - } - - cJSON *filetype = cJSON_GetObjectItem(root, "filetype"); - if (filetype && filetype->type == cJSON_String && - filetype->valuestring != NULL) { - if (0 == strcasecmp("insert", filetype->valuestring)) { - g_args.test_mode = INSERT_TEST; - } else if (0 == strcasecmp("query", filetype->valuestring)) { - g_args.test_mode = QUERY_TEST; - } else if (0 == strcasecmp("subscribe", filetype->valuestring)) { - g_args.test_mode = SUBSCRIBE_TEST; - } else { - errorPrint("%s", "failed to read json, filetype not support\n"); - goto PARSE_OVER; - } - } else if (!filetype) { - g_args.test_mode = INSERT_TEST; - } else { - errorPrint("%s", "failed to read json, filetype not found\n"); - goto PARSE_OVER; - } - - if (INSERT_TEST == g_args.test_mode) { - memset(&g_Dbs, 0, sizeof(SDbs)); - g_Dbs.use_metric = g_args.use_metric; - code = getMetaFromInsertJsonFile(root); - } else if ((QUERY_TEST == g_args.test_mode) || - (SUBSCRIBE_TEST == g_args.test_mode)) { - memset(&g_queryInfo, 0, sizeof(SQueryMetaInfo)); - code = getMetaFromQueryJsonFile(root); - } else { - errorPrint("%s", - "input json file type error! please input correct file " - "type: insert or query or subscribe\n"); - goto PARSE_OVER; - } -PARSE_OVER: - free(content); - cJSON_Delete(root); - fclose(fp); - return code; -} - -int testMetaFile() { - if (INSERT_TEST == g_args.test_mode) { - if (g_Dbs.cfgDir[0]) { - taos_options(TSDB_OPTION_CONFIGDIR, g_Dbs.cfgDir); - } - return insertTestProcess(); - - } else if (QUERY_TEST == g_args.test_mode) { - if (g_queryInfo.cfgDir[0]) { - taos_options(TSDB_OPTION_CONFIGDIR, g_queryInfo.cfgDir); - } - return queryTestProcess(); - - } else if (SUBSCRIBE_TEST == g_args.test_mode) { - if (g_queryInfo.cfgDir[0]) { - taos_options(TSDB_OPTION_CONFIGDIR, g_queryInfo.cfgDir); - } - return subscribeTestProcess(); - } else { - errorPrint("unsupport test mode (%d)\n", g_args.test_mode); - return -1; - } - return 0; -} \ No newline at end of file diff --git a/src/kit/taosdemo/src/demoMain.c b/src/kit/taosdemo/src/demoMain.c deleted file mode 100644 index d5e9467b223718338d280dce9b8582dd3de00cd6..0000000000000000000000000000000000000000 --- a/src/kit/taosdemo/src/demoMain.c +++ /dev/null @@ -1,113 +0,0 @@ -/* - * Copyright (c) 2019 TAOS Data, Inc. - * - * This program is free software: you can use, redistribute, and/or modify - * it under the terms of the GNU Affero General Public License, version 3 - * or later ("AGPL"), as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. - * - * You should have received a copy of the GNU Affero General Public License - * along with this program. If not, see . - */ - -#include "demo.h" -int64_t g_totalChildTables = DEFAULT_CHILDTABLES; -int64_t g_actualChildTables = 0; -FILE * g_fpOfInsertResult = NULL; -char * g_dupstr = NULL; -SDbs g_Dbs; -SQueryMetaInfo g_queryInfo; -bool g_fail = false; - -SArguments g_args = { - DEFAULT_METAFILE, // metaFile - DEFAULT_TEST_MODE, // test_mode - DEFAULT_HOST, // host - DEFAULT_PORT, // port - DEFAULT_IFACE, // iface - TSDB_DEFAULT_USER, // user - TSDB_DEFAULT_PASS, // password - DEFAULT_DATABASE, // database - DEFAULT_REPLICA, // replica - DEFAULT_TB_PREFIX, // tb_prefix - DEFAULT_ESCAPE_CHAR, // escapeChar - DEFAULT_SQLFILE, // sqlFile - DEFAULT_USE_METRIC, // use_metric - DEFAULT_DROP_DB, // drop_database - DEFAULT_AGGR_FUNC, // aggr_func - DEFAULT_DEBUG, // debug_print - DEFAULT_VERBOSE, // verbose_print - DEFAULT_PERF_STAT, // performance statistic print - DEFAULT_ANS_YES, // answer_yes; - DEFAULT_OUTPUT, // output_file - DEFAULT_SYNC_MODE, // mode : sync or async - DEFAULT_DATA_TYPE, // data_type - DEFAULT_DATATYPE, // dataType - DEFAULT_DATALENGTH, // data_length - DEFAULT_BINWIDTH, // binwidth - DEFAULT_COL_COUNT, // columnCount, timestamp + float + int + float - DEFAULT_LEN_ONE_ROW, // lenOfOneRow - DEFAULT_NTHREADS, // nthreads - DEFAULT_INSERT_INTERVAL, // insert_interval - DEFAULT_TIMESTAMP_STEP, // timestamp_step - DEFAULT_QUERY_TIME, // query_times - DEFAULT_PREPARED_RAND, // prepared_rand - DEFAULT_INTERLACE_ROWS, // interlaceRows; - DEFAULT_REQ_PER_REQ, // reqPerReq - TSDB_MAX_ALLOWED_SQL_LEN, // max_sql_len - DEFAULT_CHILDTABLES, // ntables - DEFAULT_INSERT_ROWS, // insertRows - DEFAULT_ABORT, // abort - DEFAULT_RATIO, // disorderRatio - DEFAULT_DISORDER_RANGE, // disorderRange - DEFAULT_METHOD_DEL, // method_of_delete - DEFAULT_TOTAL_INSERT, // totalInsertRows; - DEFAULT_TOTAL_AFFECT, // totalAffectedRows; - DEFAULT_DEMO_MODE, // demo_mode; - DEFAULT_CHINESE_OPT // chinese -}; - -int main(int argc, char *argv[]) { - if (parse_args(argc, argv)) { - exit(EXIT_FAILURE); - } - debugPrint("meta file: %s\n", g_args.metaFile); - - if (g_args.metaFile) { - g_totalChildTables = 0; - if (getInfoFromJsonFile(g_args.metaFile)) { - exit(EXIT_FAILURE); - } - if (testMetaFile()) { - exit(EXIT_FAILURE); - } - } else { - memset(&g_Dbs, 0, sizeof(SDbs)); - g_Dbs.db = calloc(1, sizeof(SDataBase)); - if (NULL == g_Dbs.db) { - errorPrint("%s", "failed to allocate memory\n"); - } - - g_Dbs.db[0].superTbls = calloc(1, sizeof(SSuperTable)); - if (NULL == g_Dbs.db[0].superTbls) { - errorPrint("%s", "failed to allocate memory\n"); - } - - setParaFromArg(); - - if (NULL != g_args.sqlFile) { - TAOS *qtaos = taos_connect(g_Dbs.host, g_Dbs.user, g_Dbs.password, - g_Dbs.db[0].dbName, g_Dbs.port); - querySqlFile(qtaos, g_args.sqlFile); - taos_close(qtaos); - } else { - testCmdLine(); - } - } - postFreeResource(); - - return 0; -} \ No newline at end of file diff --git a/src/kit/taosdemo/src/demoOutput.c b/src/kit/taosdemo/src/demoOutput.c deleted file mode 100644 index c253967f8fe270dfbacd3f9dccbfd74bde4487c1..0000000000000000000000000000000000000000 --- a/src/kit/taosdemo/src/demoOutput.c +++ /dev/null @@ -1,1056 +0,0 @@ -/* - * Copyright (c) 2019 TAOS Data, Inc. - * - * This program is free software: you can use, redistribute, and/or modify - * it under the terms of the GNU Affero General Public License, version 3 - * or later ("AGPL"), as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. - * - * You should have received a copy of the GNU Affero General Public License - * along with this program. If not, see . - */ - -#include "demo.h" - -#define SHOW_PARSE_RESULT_START() \ - do { \ - if (g_args.metaFile) \ - printf( \ - "\033[1m\033[40;32m================ %s parse result START " \ - "================\033[0m\n", \ - g_args.metaFile); \ - } while (0) - -#define SHOW_PARSE_RESULT_END() \ - do { \ - if (g_args.metaFile) \ - printf( \ - "\033[1m\033[40;32m================ %s parse result " \ - "END================\033[0m\n", \ - g_args.metaFile); \ - } while (0) - -#define SHOW_PARSE_RESULT_START_TO_FILE(fp) \ - do { \ - if (g_args.metaFile) \ - fprintf(fp, \ - "\033[1m\033[40;32m================ %s parse result " \ - "START ================\033[0m\n", \ - g_args.metaFile); \ - } while (0) - -#define SHOW_PARSE_RESULT_END_TO_FILE(fp) \ - do { \ - if (g_args.metaFile) \ - fprintf(fp, \ - "\033[1m\033[40;32m================ %s parse result " \ - "END================\033[0m\n", \ - g_args.metaFile); \ - } while (0) - -int getDbFromServer(TAOS *taos, SDbInfo **dbInfos) { - TAOS_RES *res; - TAOS_ROW row = NULL; - int count = 0; - - res = taos_query(taos, "show databases;"); - int32_t code = taos_errno(res); - - if (code != 0) { - errorPrint("failed to run , reason: %s\n", - taos_errstr(res)); - return -1; - } - - TAOS_FIELD *fields = taos_fetch_fields(res); - - while ((row = taos_fetch_row(res)) != NULL) { - // sys database name : 'log' - if (strncasecmp(row[TSDB_SHOW_DB_NAME_INDEX], "log", - fields[TSDB_SHOW_DB_NAME_INDEX].bytes) == 0) { - continue; - } - - dbInfos[count] = (SDbInfo *)calloc(1, sizeof(SDbInfo)); - if (dbInfos[count] == NULL) { - errorPrint("failed to allocate memory for some dbInfo[%d]\n", - count); - return -1; - } - - tstrncpy(dbInfos[count]->name, (char *)row[TSDB_SHOW_DB_NAME_INDEX], - fields[TSDB_SHOW_DB_NAME_INDEX].bytes); - formatTimestamp(dbInfos[count]->create_time, - *(int64_t *)row[TSDB_SHOW_DB_CREATED_TIME_INDEX], - TSDB_TIME_PRECISION_MILLI); - dbInfos[count]->ntables = *((int64_t *)row[TSDB_SHOW_DB_NTABLES_INDEX]); - dbInfos[count]->vgroups = *((int32_t *)row[TSDB_SHOW_DB_VGROUPS_INDEX]); - dbInfos[count]->replica = *((int16_t *)row[TSDB_SHOW_DB_REPLICA_INDEX]); - dbInfos[count]->quorum = *((int16_t *)row[TSDB_SHOW_DB_QUORUM_INDEX]); - dbInfos[count]->days = *((int16_t *)row[TSDB_SHOW_DB_DAYS_INDEX]); - - tstrncpy(dbInfos[count]->keeplist, (char *)row[TSDB_SHOW_DB_KEEP_INDEX], - fields[TSDB_SHOW_DB_KEEP_INDEX].bytes); - dbInfos[count]->cache = *((int32_t *)row[TSDB_SHOW_DB_CACHE_INDEX]); - dbInfos[count]->blocks = *((int32_t *)row[TSDB_SHOW_DB_BLOCKS_INDEX]); - dbInfos[count]->minrows = *((int32_t *)row[TSDB_SHOW_DB_MINROWS_INDEX]); - dbInfos[count]->maxrows = *((int32_t *)row[TSDB_SHOW_DB_MAXROWS_INDEX]); - dbInfos[count]->wallevel = - *((int8_t *)row[TSDB_SHOW_DB_WALLEVEL_INDEX]); - dbInfos[count]->fsync = *((int32_t *)row[TSDB_SHOW_DB_FSYNC_INDEX]); - dbInfos[count]->comp = - (int8_t)(*((int8_t *)row[TSDB_SHOW_DB_COMP_INDEX])); - dbInfos[count]->cachelast = - (int8_t)(*((int8_t *)row[TSDB_SHOW_DB_CACHELAST_INDEX])); - - tstrncpy(dbInfos[count]->precision, - (char *)row[TSDB_SHOW_DB_PRECISION_INDEX], - fields[TSDB_SHOW_DB_PRECISION_INDEX].bytes); - dbInfos[count]->update = *((int8_t *)row[TSDB_SHOW_DB_UPDATE_INDEX]); - tstrncpy(dbInfos[count]->status, (char *)row[TSDB_SHOW_DB_STATUS_INDEX], - fields[TSDB_SHOW_DB_STATUS_INDEX].bytes); - - count++; - if (count > MAX_DATABASE_COUNT) { - errorPrint("The database count overflow than %d\n", - MAX_DATABASE_COUNT); - break; - } - } - - return count; -} - -void xDumpFieldToFile(FILE *fp, const char *val, TAOS_FIELD *field, - int32_t length, int precision) { - if (val == NULL) { - fprintf(fp, "%s", TSDB_DATA_NULL_STR); - return; - } - - char buf[TSDB_MAX_BYTES_PER_ROW]; - switch (field->type) { - case TSDB_DATA_TYPE_BOOL: - fprintf(fp, "%d", ((((int32_t)(*((int8_t *)val))) == 1) ? 1 : 0)); - break; - - case TSDB_DATA_TYPE_TINYINT: - fprintf(fp, "%d", *((int8_t *)val)); - break; - - case TSDB_DATA_TYPE_UTINYINT: - fprintf(fp, "%d", *((uint8_t *)val)); - break; - - case TSDB_DATA_TYPE_SMALLINT: - fprintf(fp, "%d", *((int16_t *)val)); - break; - - case TSDB_DATA_TYPE_USMALLINT: - fprintf(fp, "%d", *((uint16_t *)val)); - break; - - case TSDB_DATA_TYPE_INT: - fprintf(fp, "%d", *((int32_t *)val)); - break; - - case TSDB_DATA_TYPE_UINT: - fprintf(fp, "%d", *((uint32_t *)val)); - break; - - case TSDB_DATA_TYPE_BIGINT: - fprintf(fp, "%" PRId64 "", *((int64_t *)val)); - break; - - case TSDB_DATA_TYPE_UBIGINT: - fprintf(fp, "%" PRId64 "", *((uint64_t *)val)); - break; - - case TSDB_DATA_TYPE_FLOAT: - fprintf(fp, "%.5f", GET_FLOAT_VAL(val)); - break; - - case TSDB_DATA_TYPE_DOUBLE: - fprintf(fp, "%.9f", GET_DOUBLE_VAL(val)); - break; - - case TSDB_DATA_TYPE_BINARY: - case TSDB_DATA_TYPE_NCHAR: - memcpy(buf, val, length); - buf[length] = 0; - fprintf(fp, "\'%s\'", buf); - break; - - case TSDB_DATA_TYPE_TIMESTAMP: - formatTimestamp(buf, *(int64_t *)val, precision); - fprintf(fp, "'%s'", buf); - break; - - default: - break; - } -} - -int xDumpResultToFile(const char *fname, TAOS_RES *tres) { - TAOS_ROW row = taos_fetch_row(tres); - if (row == NULL) { - return 0; - } - - FILE *fp = fopen(fname, "at"); - if (fp == NULL) { - errorPrint("failed to open file: %s\n", fname); - return -1; - } - - int num_fields = taos_num_fields(tres); - TAOS_FIELD *fields = taos_fetch_fields(tres); - int precision = taos_result_precision(tres); - - for (int col = 0; col < num_fields; col++) { - if (col > 0) { - fprintf(fp, ","); - } - fprintf(fp, "%s", fields[col].name); - } - fputc('\n', fp); - - int numOfRows = 0; - do { - int32_t *length = taos_fetch_lengths(tres); - for (int i = 0; i < num_fields; i++) { - if (i > 0) { - fputc(',', fp); - } - xDumpFieldToFile(fp, (const char *)row[i], fields + i, length[i], - precision); - } - fputc('\n', fp); - - numOfRows++; - row = taos_fetch_row(tres); - } while (row != NULL); - - fclose(fp); - - return numOfRows; -} - -#ifndef TAOSDEMO_COMMIT_SHA1 -#define TAOSDEMO_COMMIT_SHA1 "unknown" -#endif - -#ifndef TD_VERNUMBER -#define TD_VERNUMBER "unknown" -#endif - -#ifndef TAOSDEMO_STATUS -#define TAOSDEMO_STATUS "unknown" -#endif - -void printVersion() { - char tdengine_ver[] = TD_VERNUMBER; - char taosdemo_ver[] = TAOSDEMO_COMMIT_SHA1; - char taosdemo_status[] = TAOSDEMO_STATUS; - - if (strlen(taosdemo_status) == 0) { - printf("taosdemo version %s-%s\n", tdengine_ver, taosdemo_ver); - } else { - printf("taosdemo version %s-%s, status:%s\n", tdengine_ver, - taosdemo_ver, taosdemo_status); - } - exit(EXIT_SUCCESS); -} - -void printHelp() { - char indent[10] = " "; - printf("%s\n\n", "Usage: taosdemo [OPTION...]"); - printf("%s%s%s%s\n", indent, "-f, --file=FILE", "\t\t", - "The meta file to the execution procedure."); - printf("%s%s%s%s\n", indent, "-u, --user=USER", "\t\t", - "The user name to use when connecting to the server."); -#ifdef _TD_POWER_ - printf("%s%s%s%s\n", indent, "-p, --password", "\t\t", - "The password to use when connecting to the server. By default is " - "'powerdb'"); - printf("%s%s%s%s\n", indent, "-c, --config-dir=CONFIG_DIR", "\t", - "Configuration directory. By default is '/etc/power/'."); -#elif (_TD_TQ_ == true) - printf("%s%s%s%s\n", indent, "-p, --password", "\t\t", - "The password to use when connecting to the server. By default is " - "'tqueue'"); - printf("%s%s%s%s\n", indent, "-c, --config-dir=CONFIG_DIR", "\t", - "Configuration directory. By default is '/etc/tq/'."); -#elif (_TD_PRO_ == true) - printf("%s%s%s%s\n", indent, "-p, --password", "\t\t", - "The password to use when connecting to the server. By default is " - "'prodb'"); - printf("%s%s%s%s\n", indent, "-c, --config-dir=CONFIG_DIR", "\t", - "Configuration directory. By default is '/etc/ProDB/'."); -#else - printf("%s%s%s%s\n", indent, "-p, --password", "\t\t", - "The password to use when connecting to the server."); - printf("%s%s%s%s\n", indent, "-c, --config-dir=CONFIG_DIR", "\t", - "Configuration directory."); -#endif - printf("%s%s%s%s\n", indent, "-h, --host=HOST", "\t\t", - "TDengine server FQDN to connect. The default host is localhost."); - printf("%s%s%s%s\n", indent, "-P, --port=PORT", "\t\t", - "The TCP/IP port number to use for the connection."); - printf("%s%s%s%s\n", indent, "-I, --interface=INTERFACE", "\t", - "The interface (taosc, rest, stmt, and sml(line protocol)) taosdemo " - "uses. By default " - "use 'taosc'."); - printf("%s%s%s%s\n", indent, "-d, --database=DATABASE", "\t", - "Destination database. By default is 'test'."); - printf("%s%s%s%s\n", indent, "-a, --replica=REPLICA", "\t\t", - "Set the replica parameters of the database, By default use 1, min: " - "1, max: 3."); - printf("%s%s%s%s\n", indent, "-m, --table-prefix=TABLEPREFIX", "\t", - "Table prefix name. By default use 'd'."); - printf("%s%s%s%s\n", indent, "-E, --escape-character", "\t", - "Use escape character for Both Stable and normmal table name"); - printf("%s%s%s%s\n", indent, "-C, --chinese", "\t", - "Use chinese characters as the data source for binary/nchar data"); - printf("%s%s%s%s\n", indent, "-s, --sql-file=FILE", "\t\t", - "The select sql file."); - printf("%s%s%s%s\n", indent, "-N, --normal-table", "\t\t", - "Use normal table flag."); - printf("%s%s%s%s\n", indent, "-o, --output=FILE", "\t\t", - "Direct output to the named file. By default use './output.txt'."); - printf("%s%s%s%s\n", indent, "-q, --query-mode=MODE", "\t\t", - "Query mode -- 0: SYNC, 1: ASYNC. By default use SYNC."); - printf("%s%s%s%s\n", indent, "-b, --data-type=DATATYPE", "\t", - "The data_type of columns, By default use: FLOAT,INT,FLOAT. NCHAR " - "and BINARY can also use custom length. Eg: NCHAR(16),BINARY(8)"); - printf("%s%s%s%s%d\n", indent, "-w, --binwidth=WIDTH", "\t\t", - "The width of data_type 'BINARY' or 'NCHAR'. By default use ", - g_args.binwidth); - printf("%s%s%s%s%d%s%d\n", indent, "-l, --columns=COLUMNS", "\t\t", - "The number of columns per record. Demo mode by default is ", - DEFAULT_DATATYPE_NUM, " (float, int, float). Max values is ", - MAX_NUM_COLUMNS); - printf("%s%s%s%s\n", indent, indent, indent, - "\t\t\t\tAll of the new column(s) type is INT. If use -b to specify " - "column type, -l will be ignored."); - printf("%s%s%s%s%d.\n", indent, "-T, --threads=NUMBER", "\t\t", - "The number of threads. By default use ", DEFAULT_NTHREADS); - printf("%s%s%s%s\n", indent, "-i, --insert-interval=NUMBER", "\t", - "The sleep time (ms) between insertion. By default is 0."); - printf("%s%s%s%s%d.\n", indent, "-S, --time-step=TIME_STEP", "\t", - "The timestamp step between insertion. By default is ", - DEFAULT_TIMESTAMP_STEP); - printf("%s%s%s%s%d.\n", indent, "-B, --interlace-rows=NUMBER", "\t", - "The interlace rows of insertion. By default is ", - DEFAULT_INTERLACE_ROWS); - printf("%s%s%s%s\n", indent, "-r, --rec-per-req=NUMBER", "\t", - "The number of records per request. By default is 30000."); - printf("%s%s%s%s\n", indent, "-t, --tables=NUMBER", "\t\t", - "The number of tables. By default is 10000."); - printf("%s%s%s%s\n", indent, "-n, --records=NUMBER", "\t\t", - "The number of records per table. By default is 10000."); - printf("%s%s%s%s\n", indent, "-M, --random", "\t\t\t", - "The value of records generated are totally random."); - printf("%s\n", "\t\t\t\tBy default to simulate power equipment scenario."); - printf("%s%s%s%s\n", indent, "-x, --aggr-func", "\t\t", - "Test aggregation functions after insertion."); - printf("%s%s%s%s\n", indent, "-y, --answer-yes", "\t\t", - "Input yes for prompt."); - printf("%s%s%s%s\n", indent, "-O, --disorder=NUMBER", "\t\t", - "Insert order mode--0: In order, 1 ~ 50: disorder ratio. By default " - "is in order."); - printf("%s%s%s%s\n", indent, "-R, --disorder-range=NUMBER", "\t", - "Out of order data's range. Unit is ms. By default is 1000."); - printf("%s%s%s%s\n", indent, "-g, --debug", "\t\t\t", "Print debug info."); - printf("%s%s%s%s\n", indent, "-?, --help\t", "\t\t", "Give this help list"); - printf("%s%s%s%s\n", indent, " --usage\t", "\t\t", - "Give a short usage message"); - printf("%s%s\n", indent, "-V, --version\t\t\tPrint program version."); - /* printf("%s%s%s%s\n", indent, "-D", indent, - "Delete database if exists. 0: no, 1: yes, default is 1"); - */ - printf( - "\nMandatory or optional arguments to long options are also mandatory or optional\n\ -for any corresponding short options.\n\ -\n\ -Report bugs to .\n"); - exit(EXIT_SUCCESS); -} - -void printfInsertMeta() { - setupForAnsiEscape(); - SHOW_PARSE_RESULT_START(); - - if (g_args.demo_mode) { - printf( - "\ntaosdemo is simulating data generated by power equipment " - "monitoring...\n\n"); - } else { - printf("\ntaosdemo is simulating random data as you request..\n\n"); - } - - if (g_args.iface != INTERFACE_BUT) { - // first time if no iface specified - printf("interface: \033[33m%s\033[0m\n", - (g_args.iface == TAOSC_IFACE) ? "taosc" - : (g_args.iface == REST_IFACE) ? "rest" - : (g_args.iface == STMT_IFACE) ? "stmt" - : "sml"); - } - - printf("host: \033[33m%s:%u\033[0m\n", g_Dbs.host, - g_Dbs.port); - printf("user: \033[33m%s\033[0m\n", g_Dbs.user); - printf("password: \033[33m%s\033[0m\n", g_Dbs.password); - printf("configDir: \033[33m%s\033[0m\n", configDir); - printf("resultFile: \033[33m%s\033[0m\n", g_Dbs.resultFile); - printf("thread num of insert data: \033[33m%d\033[0m\n", - g_Dbs.threadCount); - printf("thread num of create table: \033[33m%d\033[0m\n", - g_Dbs.threadCountForCreateTbl); - printf("top insert interval: \033[33m%" PRIu64 "\033[0m\n", - g_args.insert_interval); - printf("number of records per req: \033[33m%u\033[0m\n", g_args.reqPerReq); - printf("max sql length: \033[33m%" PRIu64 "\033[0m\n", - g_args.max_sql_len); - printf("random prepare data: \033[33m%" PRId64 "\033[0m\n", g_args.prepared_rand); - printf("chinese: \033[33m%s\033[0m\n", g_args.chinese?"yes":"no"); - - printf("database count: \033[33m%d\033[0m\n", g_Dbs.dbCount); - - for (int i = 0; i < g_Dbs.dbCount; i++) { - printf("database[\033[33m%d\033[0m]:\n", i); - printf(" database[%d] name: \033[33m%s\033[0m\n", i, - g_Dbs.db[i].dbName); - if (0 == g_Dbs.db[i].drop) { - printf(" drop: \033[33m no\033[0m\n"); - } else { - printf(" drop: \033[33m yes\033[0m\n"); - } - - if (g_Dbs.db[i].dbCfg.blocks > 0) { - printf(" blocks: \033[33m%d\033[0m\n", - g_Dbs.db[i].dbCfg.blocks); - } - if (g_Dbs.db[i].dbCfg.cache > 0) { - printf(" cache: \033[33m%d\033[0m\n", - g_Dbs.db[i].dbCfg.cache); - } - if (g_Dbs.db[i].dbCfg.days > 0) { - printf(" days: \033[33m%d\033[0m\n", - g_Dbs.db[i].dbCfg.days); - } - if (g_Dbs.db[i].dbCfg.keep > 0) { - printf(" keep: \033[33m%d\033[0m\n", - g_Dbs.db[i].dbCfg.keep); - } - if (g_Dbs.db[i].dbCfg.replica > 0) { - printf(" replica: \033[33m%d\033[0m\n", - g_Dbs.db[i].dbCfg.replica); - } - if (g_Dbs.db[i].dbCfg.update > 0) { - printf(" update: \033[33m%d\033[0m\n", - g_Dbs.db[i].dbCfg.update); - } - if (g_Dbs.db[i].dbCfg.minRows > 0) { - printf(" minRows: \033[33m%d\033[0m\n", - g_Dbs.db[i].dbCfg.minRows); - } - if (g_Dbs.db[i].dbCfg.maxRows > 0) { - printf(" maxRows: \033[33m%d\033[0m\n", - g_Dbs.db[i].dbCfg.maxRows); - } - if (g_Dbs.db[i].dbCfg.comp > 0) { - printf(" comp: \033[33m%d\033[0m\n", - g_Dbs.db[i].dbCfg.comp); - } - if (g_Dbs.db[i].dbCfg.walLevel > 0) { - printf(" walLevel: \033[33m%d\033[0m\n", - g_Dbs.db[i].dbCfg.walLevel); - } - if (g_Dbs.db[i].dbCfg.fsync > 0) { - printf(" fsync: \033[33m%d\033[0m\n", - g_Dbs.db[i].dbCfg.fsync); - } - if (g_Dbs.db[i].dbCfg.quorum > 0) { - printf(" quorum: \033[33m%d\033[0m\n", - g_Dbs.db[i].dbCfg.quorum); - } - if (g_Dbs.db[i].dbCfg.precision[0] != 0) { - if ((0 == strncasecmp(g_Dbs.db[i].dbCfg.precision, "ms", 2)) || - (0 == strncasecmp(g_Dbs.db[i].dbCfg.precision, "us", 2)) || - (0 == strncasecmp(g_Dbs.db[i].dbCfg.precision, "ns", 2))) { - printf(" precision: \033[33m%s\033[0m\n", - g_Dbs.db[i].dbCfg.precision); - } else { - printf("\033[1m\033[40;31m precision error: %s\033[0m\n", - g_Dbs.db[i].dbCfg.precision); - } - } - - if (g_args.use_metric) { - printf(" super table count: \033[33m%" PRIu64 "\033[0m\n", - g_Dbs.db[i].superTblCount); - for (uint64_t j = 0; j < g_Dbs.db[i].superTblCount; j++) { - printf(" super table[\033[33m%" PRIu64 "\033[0m]:\n", j); - - printf(" stbName: \033[33m%s\033[0m\n", - g_Dbs.db[i].superTbls[j].stbName); - - if (PRE_CREATE_SUBTBL == - g_Dbs.db[i].superTbls[j].autoCreateTable) { - printf(" autoCreateTable: \033[33m%s\033[0m\n", - "no"); - } else if (AUTO_CREATE_SUBTBL == - g_Dbs.db[i].superTbls[j].autoCreateTable) { - printf(" autoCreateTable: \033[33m%s\033[0m\n", - "yes"); - } else { - printf(" autoCreateTable: \033[33m%s\033[0m\n", - "error"); - } - - if (TBL_NO_EXISTS == g_Dbs.db[i].superTbls[j].childTblExists) { - printf(" childTblExists: \033[33m%s\033[0m\n", - "no"); - } else if (TBL_ALREADY_EXISTS == - g_Dbs.db[i].superTbls[j].childTblExists) { - printf(" childTblExists: \033[33m%s\033[0m\n", - "yes"); - } else { - printf(" childTblExists: \033[33m%s\033[0m\n", - "error"); - } - - printf(" childTblCount: \033[33m%" PRId64 "\033[0m\n", - g_Dbs.db[i].superTbls[j].childTblCount); - printf(" childTblPrefix: \033[33m%s\033[0m\n", - g_Dbs.db[i].superTbls[j].childTblPrefix); - printf(" dataSource: \033[33m%s\033[0m\n", - g_Dbs.db[i].superTbls[j].dataSource); - printf(" iface: \033[33m%s\033[0m\n", - (g_Dbs.db[i].superTbls[j].iface == TAOSC_IFACE) ? "taosc" - : (g_Dbs.db[i].superTbls[j].iface == REST_IFACE) ? "rest" - : (g_Dbs.db[i].superTbls[j].iface == STMT_IFACE) - ? "stmt" - : "sml"); - if (g_Dbs.db[i].superTbls[j].iface == SML_IFACE) { - printf(" lineProtocol: \033[33m%s\033[0m\n", - (g_Dbs.db[i].superTbls[j].lineProtocol == - TSDB_SML_LINE_PROTOCOL) - ? "line" - : (g_Dbs.db[i].superTbls[j].lineProtocol == - TSDB_SML_TELNET_PROTOCOL) - ? "telnet" - : "json"); - } - - if (g_Dbs.db[i].superTbls[j].childTblLimit > 0) { - printf(" childTblLimit: \033[33m%" PRId64 - "\033[0m\n", - g_Dbs.db[i].superTbls[j].childTblLimit); - } - if (g_Dbs.db[i].superTbls[j].childTblOffset > 0) { - printf(" childTblOffset: \033[33m%" PRIu64 - "\033[0m\n", - g_Dbs.db[i].superTbls[j].childTblOffset); - } - printf(" insertRows: \033[33m%" PRId64 "\033[0m\n", - g_Dbs.db[i].superTbls[j].insertRows); - /* - if (0 == g_Dbs.db[i].superTbls[j].multiThreadWriteOneTbl) { - printf(" multiThreadWriteOneTbl: \033[33m no\033[0m\n"); - }else { - printf(" multiThreadWriteOneTbl: \033[33m yes\033[0m\n"); - } - */ - printf(" interlaceRows: \033[33m%u\033[0m\n", - g_Dbs.db[i].superTbls[j].interlaceRows); - - if (g_Dbs.db[i].superTbls[j].interlaceRows > 0) { - printf(" stable insert interval: \033[33m%" PRIu64 - "\033[0m\n", - g_Dbs.db[i].superTbls[j].insertInterval); - } - - printf(" disorderRange: \033[33m%d\033[0m\n", - g_Dbs.db[i].superTbls[j].disorderRange); - printf(" disorderRatio: \033[33m%d\033[0m\n", - g_Dbs.db[i].superTbls[j].disorderRatio); - printf(" maxSqlLen: \033[33m%" PRIu64 "\033[0m\n", - g_Dbs.db[i].superTbls[j].maxSqlLen); - printf(" timeStampStep: \033[33m%" PRId64 "\033[0m\n", - g_Dbs.db[i].superTbls[j].timeStampStep); - printf(" startTimestamp: \033[33m%s\033[0m\n", - g_Dbs.db[i].superTbls[j].startTimestamp); - printf(" sampleFormat: \033[33m%s\033[0m\n", - g_Dbs.db[i].superTbls[j].sampleFormat); - printf(" sampleFile: \033[33m%s\033[0m\n", - g_Dbs.db[i].superTbls[j].sampleFile); - printf(" useSampleTs: \033[33m%s\033[0m\n", - g_Dbs.db[i].superTbls[j].useSampleTs - ? "yes (warning: disorderRange/disorderRatio is " - "disabled)" - : "no"); - printf(" tagsFile: \033[33m%s\033[0m\n", - g_Dbs.db[i].superTbls[j].tagsFile); - printf(" columnCount: \033[33m%d\033[0m\n ", - g_Dbs.db[i].superTbls[j].columnCount); - for (int k = 0; k < g_Dbs.db[i].superTbls[j].columnCount; k++) { - // printf("dataType:%s, dataLen:%d\t", - // g_Dbs.db[i].superTbls[j].columns[k].dataType, - // g_Dbs.db[i].superTbls[j].columns[k].dataLen); - if ((0 == strncasecmp( - g_Dbs.db[i].superTbls[j].columns[k].dataType, - "binary", 6)) || - (0 == strncasecmp( - g_Dbs.db[i].superTbls[j].columns[k].dataType, - "nchar", 5))) { - printf("column[%d]:\033[33m%s(%d)\033[0m ", k, - g_Dbs.db[i].superTbls[j].columns[k].dataType, - g_Dbs.db[i].superTbls[j].columns[k].dataLen); - } else { - printf("column[%d]:\033[33m%s\033[0m ", k, - g_Dbs.db[i].superTbls[j].columns[k].dataType); - } - } - printf("\n"); - - printf(" tagCount: \033[33m%d\033[0m\n ", - g_Dbs.db[i].superTbls[j].tagCount); - for (int k = 0; k < g_Dbs.db[i].superTbls[j].tagCount; k++) { - // printf("dataType:%s, dataLen:%d\t", - // g_Dbs.db[i].superTbls[j].tags[k].dataType, - // g_Dbs.db[i].superTbls[j].tags[k].dataLen); - if ((0 == - strncasecmp(g_Dbs.db[i].superTbls[j].tags[k].dataType, - "binary", strlen("binary"))) || - (0 == - strncasecmp(g_Dbs.db[i].superTbls[j].tags[k].dataType, - "nchar", strlen("nchar")))) { - printf("tag[%d]:\033[33m%s(%d)\033[0m ", k, - g_Dbs.db[i].superTbls[j].tags[k].dataType, - g_Dbs.db[i].superTbls[j].tags[k].dataLen); - } else { - printf("tag[%d]:\033[33m%s\033[0m ", k, - g_Dbs.db[i].superTbls[j].tags[k].dataType); - } - } - printf("\n"); - } - } else { - printf(" childTblCount: \033[33m%" PRId64 "\033[0m\n", - g_args.ntables); - printf(" insertRows: \033[33m%" PRId64 "\033[0m\n", - g_args.insertRows); - } - printf("\n"); - } - - SHOW_PARSE_RESULT_END(); - resetAfterAnsiEscape(); -} - -void printfInsertMetaToFile(FILE *fp) { - SHOW_PARSE_RESULT_START_TO_FILE(fp); - - fprintf(fp, "host: %s:%u\n", g_Dbs.host, g_Dbs.port); - fprintf(fp, "user: %s\n", g_Dbs.user); - fprintf(fp, "configDir: %s\n", configDir); - fprintf(fp, "resultFile: %s\n", g_Dbs.resultFile); - fprintf(fp, "thread num of insert data: %d\n", g_Dbs.threadCount); - fprintf(fp, "thread num of create table: %d\n", - g_Dbs.threadCountForCreateTbl); - fprintf(fp, "number of records per req: %u\n", g_args.reqPerReq); - fprintf(fp, "max sql length: %" PRIu64 "\n", - g_args.max_sql_len); - fprintf(fp, "database count: %d\n", g_Dbs.dbCount); - - for (int i = 0; i < g_Dbs.dbCount; i++) { - fprintf(fp, "database[%d]:\n", i); - fprintf(fp, " database[%d] name: %s\n", i, g_Dbs.db[i].dbName); - if (0 == g_Dbs.db[i].drop) { - fprintf(fp, " drop: no\n"); - } else { - fprintf(fp, " drop: yes\n"); - } - - if (g_Dbs.db[i].dbCfg.blocks > 0) { - fprintf(fp, " blocks: %d\n", - g_Dbs.db[i].dbCfg.blocks); - } - if (g_Dbs.db[i].dbCfg.cache > 0) { - fprintf(fp, " cache: %d\n", - g_Dbs.db[i].dbCfg.cache); - } - if (g_Dbs.db[i].dbCfg.days > 0) { - fprintf(fp, " days: %d\n", - g_Dbs.db[i].dbCfg.days); - } - if (g_Dbs.db[i].dbCfg.keep > 0) { - fprintf(fp, " keep: %d\n", - g_Dbs.db[i].dbCfg.keep); - } - if (g_Dbs.db[i].dbCfg.replica > 0) { - fprintf(fp, " replica: %d\n", - g_Dbs.db[i].dbCfg.replica); - } - if (g_Dbs.db[i].dbCfg.update > 0) { - fprintf(fp, " update: %d\n", - g_Dbs.db[i].dbCfg.update); - } - if (g_Dbs.db[i].dbCfg.minRows > 0) { - fprintf(fp, " minRows: %d\n", - g_Dbs.db[i].dbCfg.minRows); - } - if (g_Dbs.db[i].dbCfg.maxRows > 0) { - fprintf(fp, " maxRows: %d\n", - g_Dbs.db[i].dbCfg.maxRows); - } - if (g_Dbs.db[i].dbCfg.comp > 0) { - fprintf(fp, " comp: %d\n", - g_Dbs.db[i].dbCfg.comp); - } - if (g_Dbs.db[i].dbCfg.walLevel > 0) { - fprintf(fp, " walLevel: %d\n", - g_Dbs.db[i].dbCfg.walLevel); - } - if (g_Dbs.db[i].dbCfg.fsync > 0) { - fprintf(fp, " fsync: %d\n", - g_Dbs.db[i].dbCfg.fsync); - } - if (g_Dbs.db[i].dbCfg.quorum > 0) { - fprintf(fp, " quorum: %d\n", - g_Dbs.db[i].dbCfg.quorum); - } - if (g_Dbs.db[i].dbCfg.precision[0] != 0) { - if ((0 == strncasecmp(g_Dbs.db[i].dbCfg.precision, "ms", 2)) || - (0 == strncasecmp(g_Dbs.db[i].dbCfg.precision, "ns", 2)) || - (0 == strncasecmp(g_Dbs.db[i].dbCfg.precision, "us", 2))) { - fprintf(fp, " precision: %s\n", - g_Dbs.db[i].dbCfg.precision); - } else { - fprintf(fp, " precision error: %s\n", - g_Dbs.db[i].dbCfg.precision); - } - } - - fprintf(fp, " super table count: %" PRIu64 "\n", - g_Dbs.db[i].superTblCount); - for (int j = 0; j < g_Dbs.db[i].superTblCount; j++) { - fprintf(fp, " super table[%d]:\n", j); - - fprintf(fp, " stbName: %s\n", - g_Dbs.db[i].superTbls[j].stbName); - - if (PRE_CREATE_SUBTBL == g_Dbs.db[i].superTbls[j].autoCreateTable) { - fprintf(fp, " autoCreateTable: %s\n", "no"); - } else if (AUTO_CREATE_SUBTBL == - g_Dbs.db[i].superTbls[j].autoCreateTable) { - fprintf(fp, " autoCreateTable: %s\n", "yes"); - } else { - fprintf(fp, " autoCreateTable: %s\n", "error"); - } - - if (TBL_NO_EXISTS == g_Dbs.db[i].superTbls[j].childTblExists) { - fprintf(fp, " childTblExists: %s\n", "no"); - } else if (TBL_ALREADY_EXISTS == - g_Dbs.db[i].superTbls[j].childTblExists) { - fprintf(fp, " childTblExists: %s\n", "yes"); - } else { - fprintf(fp, " childTblExists: %s\n", "error"); - } - - fprintf(fp, " childTblCount: %" PRId64 "\n", - g_Dbs.db[i].superTbls[j].childTblCount); - fprintf(fp, " childTblPrefix: %s\n", - g_Dbs.db[i].superTbls[j].childTblPrefix); - fprintf(fp, " dataSource: %s\n", - g_Dbs.db[i].superTbls[j].dataSource); - fprintf(fp, " iface: %s\n", - (g_Dbs.db[i].superTbls[j].iface == TAOSC_IFACE) ? "taosc" - : (g_Dbs.db[i].superTbls[j].iface == REST_IFACE) ? "rest" - : (g_Dbs.db[i].superTbls[j].iface == STMT_IFACE) ? "stmt" - : "sml"); - fprintf(fp, " insertRows: %" PRId64 "\n", - g_Dbs.db[i].superTbls[j].insertRows); - fprintf(fp, " interlace rows: %u\n", - g_Dbs.db[i].superTbls[j].interlaceRows); - if (g_Dbs.db[i].superTbls[j].interlaceRows > 0) { - fprintf(fp, " stable insert interval: %" PRIu64 "\n", - g_Dbs.db[i].superTbls[j].insertInterval); - } - /* - if (0 == g_Dbs.db[i].superTbls[j].multiThreadWriteOneTbl) { - fprintf(fp, " multiThreadWriteOneTbl: no\n"); - }else { - fprintf(fp, " multiThreadWriteOneTbl: yes\n"); - } - */ - fprintf(fp, " interlaceRows: %u\n", - g_Dbs.db[i].superTbls[j].interlaceRows); - fprintf(fp, " disorderRange: %d\n", - g_Dbs.db[i].superTbls[j].disorderRange); - fprintf(fp, " disorderRatio: %d\n", - g_Dbs.db[i].superTbls[j].disorderRatio); - fprintf(fp, " maxSqlLen: %" PRIu64 "\n", - g_Dbs.db[i].superTbls[j].maxSqlLen); - - fprintf(fp, " timeStampStep: %" PRId64 "\n", - g_Dbs.db[i].superTbls[j].timeStampStep); - fprintf(fp, " startTimestamp: %s\n", - g_Dbs.db[i].superTbls[j].startTimestamp); - fprintf(fp, " sampleFormat: %s\n", - g_Dbs.db[i].superTbls[j].sampleFormat); - fprintf(fp, " sampleFile: %s\n", - g_Dbs.db[i].superTbls[j].sampleFile); - fprintf(fp, " tagsFile: %s\n", - g_Dbs.db[i].superTbls[j].tagsFile); - - fprintf(fp, " columnCount: %d\n ", - g_Dbs.db[i].superTbls[j].columnCount); - for (int k = 0; k < g_Dbs.db[i].superTbls[j].columnCount; k++) { - // printf("dataType:%s, dataLen:%d\t", - // g_Dbs.db[i].superTbls[j].columns[k].dataType, - // g_Dbs.db[i].superTbls[j].columns[k].dataLen); - if ((0 == - strncasecmp(g_Dbs.db[i].superTbls[j].columns[k].dataType, - "binary", strlen("binary"))) || - (0 == - strncasecmp(g_Dbs.db[i].superTbls[j].columns[k].dataType, - "nchar", strlen("nchar")))) { - fprintf(fp, "column[%d]:%s(%d) ", k, - g_Dbs.db[i].superTbls[j].columns[k].dataType, - g_Dbs.db[i].superTbls[j].columns[k].dataLen); - } else { - fprintf(fp, "column[%d]:%s ", k, - g_Dbs.db[i].superTbls[j].columns[k].dataType); - } - } - fprintf(fp, "\n"); - - fprintf(fp, " tagCount: %d\n ", - g_Dbs.db[i].superTbls[j].tagCount); - for (int k = 0; k < g_Dbs.db[i].superTbls[j].tagCount; k++) { - // printf("dataType:%s, dataLen:%d\t", - // g_Dbs.db[i].superTbls[j].tags[k].dataType, - // g_Dbs.db[i].superTbls[j].tags[k].dataLen); - if ((0 == strncasecmp(g_Dbs.db[i].superTbls[j].tags[k].dataType, - "binary", strlen("binary"))) || - (0 == strncasecmp(g_Dbs.db[i].superTbls[j].tags[k].dataType, - "nchar", strlen("nchar")))) { - fprintf(fp, "tag[%d]:%s(%d) ", k, - g_Dbs.db[i].superTbls[j].tags[k].dataType, - g_Dbs.db[i].superTbls[j].tags[k].dataLen); - } else { - fprintf(fp, "tag[%d]:%s ", k, - g_Dbs.db[i].superTbls[j].tags[k].dataType); - } - } - fprintf(fp, "\n"); - } - fprintf(fp, "\n"); - } - - SHOW_PARSE_RESULT_END_TO_FILE(fp); -} - -void printfQueryMeta() { - setupForAnsiEscape(); - SHOW_PARSE_RESULT_START(); - - printf("host: \033[33m%s:%u\033[0m\n", g_queryInfo.host, - g_queryInfo.port); - printf("user: \033[33m%s\033[0m\n", g_queryInfo.user); - printf("database name: \033[33m%s\033[0m\n", g_queryInfo.dbName); - - printf("\n"); - - if ((SUBSCRIBE_TEST == g_args.test_mode) || - (QUERY_TEST == g_args.test_mode)) { - printf("specified table query info: \n"); - printf("sqlCount: \033[33m%d\033[0m\n", - g_queryInfo.specifiedQueryInfo.sqlCount); - if (g_queryInfo.specifiedQueryInfo.sqlCount > 0) { - printf("specified tbl query times:\n"); - printf(" \033[33m%" PRIu64 "\033[0m\n", - g_queryInfo.specifiedQueryInfo.queryTimes); - printf("query interval: \033[33m%" PRIu64 " ms\033[0m\n", - g_queryInfo.specifiedQueryInfo.queryInterval); - printf("top query times:\033[33m%" PRIu64 "\033[0m\n", - g_args.query_times); - printf("concurrent: \033[33m%d\033[0m\n", - g_queryInfo.specifiedQueryInfo.concurrent); - printf( - "mod: \033[33m%s\033[0m\n", - (g_queryInfo.specifiedQueryInfo.asyncMode) ? "async" : "sync"); - printf("interval: \033[33m%" PRIu64 "\033[0m\n", - g_queryInfo.specifiedQueryInfo.subscribeInterval); - printf("restart: \033[33m%d\033[0m\n", - g_queryInfo.specifiedQueryInfo.subscribeRestart); - printf("keepProgress: \033[33m%d\033[0m\n", - g_queryInfo.specifiedQueryInfo.subscribeKeepProgress); - - for (int i = 0; i < g_queryInfo.specifiedQueryInfo.sqlCount; i++) { - printf(" sql[%d]: \033[33m%s\033[0m\n", i, - g_queryInfo.specifiedQueryInfo.sql[i]); - } - printf("\n"); - } - - printf("super table query info:\n"); - printf("sqlCount: \033[33m%d\033[0m\n", - g_queryInfo.superQueryInfo.sqlCount); - - if (g_queryInfo.superQueryInfo.sqlCount > 0) { - printf("query interval: \033[33m%" PRIu64 "\033[0m\n", - g_queryInfo.superQueryInfo.queryInterval); - printf("threadCnt: \033[33m%d\033[0m\n", - g_queryInfo.superQueryInfo.threadCnt); - printf("childTblCount: \033[33m%" PRId64 "\033[0m\n", - g_queryInfo.superQueryInfo.childTblCount); - printf("stable name: \033[33m%s\033[0m\n", - g_queryInfo.superQueryInfo.stbName); - printf("stb query times:\033[33m%" PRIu64 "\033[0m\n", - g_queryInfo.superQueryInfo.queryTimes); - - printf("mod: \033[33m%s\033[0m\n", - (g_queryInfo.superQueryInfo.asyncMode) ? "async" : "sync"); - printf("interval: \033[33m%" PRIu64 "\033[0m\n", - g_queryInfo.superQueryInfo.subscribeInterval); - printf("restart: \033[33m%d\033[0m\n", - g_queryInfo.superQueryInfo.subscribeRestart); - printf("keepProgress: \033[33m%d\033[0m\n", - g_queryInfo.superQueryInfo.subscribeKeepProgress); - - for (int i = 0; i < g_queryInfo.superQueryInfo.sqlCount; i++) { - printf(" sql[%d]: \033[33m%s\033[0m\n", i, - g_queryInfo.superQueryInfo.sql[i]); - } - printf("\n"); - } - } - - SHOW_PARSE_RESULT_END(); -} - -void printfDbInfoForQueryToFile(char *filename, SDbInfo *dbInfos, int index) { - if (filename[0] == 0) return; - - FILE *fp = fopen(filename, "at"); - if (fp == NULL) { - errorPrint("failed to open file: %s\n", filename); - return; - } - - fprintf(fp, "================ database[%d] ================\n", index); - fprintf(fp, "name: %s\n", dbInfos->name); - fprintf(fp, "created_time: %s\n", dbInfos->create_time); - fprintf(fp, "ntables: %" PRId64 "\n", dbInfos->ntables); - fprintf(fp, "vgroups: %d\n", dbInfos->vgroups); - fprintf(fp, "replica: %d\n", dbInfos->replica); - fprintf(fp, "quorum: %d\n", dbInfos->quorum); - fprintf(fp, "days: %d\n", dbInfos->days); - fprintf(fp, "keep0,keep1,keep(D): %s\n", dbInfos->keeplist); - fprintf(fp, "cache(MB): %d\n", dbInfos->cache); - fprintf(fp, "blocks: %d\n", dbInfos->blocks); - fprintf(fp, "minrows: %d\n", dbInfos->minrows); - fprintf(fp, "maxrows: %d\n", dbInfos->maxrows); - fprintf(fp, "wallevel: %d\n", dbInfos->wallevel); - fprintf(fp, "fsync: %d\n", dbInfos->fsync); - fprintf(fp, "comp: %d\n", dbInfos->comp); - fprintf(fp, "cachelast: %d\n", dbInfos->cachelast); - fprintf(fp, "precision: %s\n", dbInfos->precision); - fprintf(fp, "update: %d\n", dbInfos->update); - fprintf(fp, "status: %s\n", dbInfos->status); - fprintf(fp, "\n"); - - fclose(fp); -} - -void printfQuerySystemInfo(TAOS *taos) { - char filename[MAX_FILE_NAME_LEN] = {0}; - char buffer[SQL_BUFF_LEN] = {0}; - TAOS_RES *res; - - time_t t; - struct tm *lt; - time(&t); - lt = localtime(&t); - snprintf(filename, MAX_FILE_NAME_LEN, "querySystemInfo-%d-%d-%d %d:%d:%d", - lt->tm_year + 1900, lt->tm_mon, lt->tm_mday, lt->tm_hour, - lt->tm_min, lt->tm_sec); - - // show variables - res = taos_query(taos, "show variables;"); - // fetchResult(res, filename); - xDumpResultToFile(filename, res); - - // show dnodes - res = taos_query(taos, "show dnodes;"); - xDumpResultToFile(filename, res); - // fetchResult(res, filename); - - // show databases - res = taos_query(taos, "show databases;"); - SDbInfo **dbInfos = - (SDbInfo **)calloc(MAX_DATABASE_COUNT, sizeof(SDbInfo *)); - if (dbInfos == NULL) { - errorPrint("%s", "failed to allocate memory\n"); - return; - } - int dbCount = getDbFromServer(taos, dbInfos); - if (dbCount <= 0) { - tmfree(dbInfos); - return; - } - - for (int i = 0; i < dbCount; i++) { - // printf database info - printfDbInfoForQueryToFile(filename, dbInfos[i], i); - - // show db.vgroups - snprintf(buffer, SQL_BUFF_LEN, "show %s.vgroups;", dbInfos[i]->name); - res = taos_query(taos, buffer); - xDumpResultToFile(filename, res); - - // show db.stables - snprintf(buffer, SQL_BUFF_LEN, "show %s.stables;", dbInfos[i]->name); - res = taos_query(taos, buffer); - xDumpResultToFile(filename, res); - free(dbInfos[i]); - } - - free(dbInfos); - resetAfterAnsiEscape(); -} - -void printStatPerThread(threadInfo *pThreadInfo) { - if (0 == pThreadInfo->totalDelay) pThreadInfo->totalDelay = 1; - - fprintf(stderr, - "====thread[%d] completed total inserted rows: %" PRIu64 - ", total affected rows: %" PRIu64 ". %.2f records/second====\n", - pThreadInfo->threadID, pThreadInfo->totalInsertRows, - pThreadInfo->totalAffectedRows, - (double)(pThreadInfo->totalAffectedRows / - ((double)pThreadInfo->totalDelay / 1000000.0))); -} - -void appendResultBufToFile(char *resultBuf, threadInfo *pThreadInfo) { - pThreadInfo->fp = fopen(pThreadInfo->filePath, "at"); - if (pThreadInfo->fp == NULL) { - errorPrint( - "%s() LN%d, failed to open result file: %s, result will not save " - "to file\n", - __func__, __LINE__, pThreadInfo->filePath); - return; - } - - fprintf(pThreadInfo->fp, "%s", resultBuf); - tmfclose(pThreadInfo->fp); - pThreadInfo->fp = NULL; -} \ No newline at end of file diff --git a/src/kit/taosdemo/src/demoQuery.c b/src/kit/taosdemo/src/demoQuery.c deleted file mode 100644 index ffae0ff10a643c97fbb2f291ae168d7a23dce545..0000000000000000000000000000000000000000 --- a/src/kit/taosdemo/src/demoQuery.c +++ /dev/null @@ -1,465 +0,0 @@ -/* - * Copyright (c) 2019 TAOS Data, Inc. - * - * This program is free software: you can use, redistribute, and/or modify - * it under the terms of the GNU Affero General Public License, version 3 - * or later ("AGPL"), as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. - * - * You should have received a copy of the GNU Affero General Public License - * along with this program. If not, see . - */ - -#include "demo.h" - -void selectAndGetResult(threadInfo *pThreadInfo, char *command) { - if (0 == strncasecmp(g_queryInfo.queryMode, "taosc", strlen("taosc"))) { - TAOS_RES *res = taos_query(pThreadInfo->taos, command); - if (res == NULL || taos_errno(res) != 0) { - errorPrint("failed to execute sql:%s, reason:%s\n", command, - taos_errstr(res)); - taos_free_result(res); - return; - } - - fetchResult(res, pThreadInfo); - taos_free_result(res); - - } else if (0 == - strncasecmp(g_queryInfo.queryMode, "rest", strlen("rest"))) { - int retCode = postProceSql(g_queryInfo.host, g_queryInfo.port, command, - pThreadInfo); - if (0 != retCode) { - printf("====restful return fail, threadID[%d]\n", - pThreadInfo->threadID); - } - - } else { - errorPrint("unknown query mode: %s\n", g_queryInfo.queryMode); - } -} - -void *specifiedTableQuery(void *sarg) { - threadInfo *pThreadInfo = (threadInfo *)sarg; - int32_t *code = calloc(1, sizeof (int32_t)); - *code = -1; - setThreadName("specTableQuery"); - - if (pThreadInfo->taos == NULL) { - TAOS *taos = NULL; - taos = taos_connect(g_queryInfo.host, g_queryInfo.user, - g_queryInfo.password, NULL, g_queryInfo.port); - if (taos == NULL) { - errorPrint("[%d] Failed to connect to TDengine, reason:%s\n", - pThreadInfo->threadID, taos_errstr(NULL)); - goto end_of_specified_query; - } else { - pThreadInfo->taos = taos; - } - } - - char sqlStr[TSDB_DB_NAME_LEN + 5]; - sprintf(sqlStr, "use %s", g_queryInfo.dbName); - if (0 != queryDbExec(pThreadInfo->taos, sqlStr, NO_INSERT_TYPE, false)) { - taos_close(pThreadInfo->taos); - errorPrint("use database %s failed!\n\n", g_queryInfo.dbName); - goto end_of_specified_query; - } - - uint64_t st = 0; - uint64_t et = 0; - - uint64_t queryTimes = g_queryInfo.specifiedQueryInfo.queryTimes; - - uint64_t totalQueried = 0; - uint64_t lastPrintTime = taosGetTimestampMs(); - uint64_t startTs = taosGetTimestampMs(); - - if (g_queryInfo.specifiedQueryInfo.result[pThreadInfo->querySeq][0] != - '\0') { - sprintf(pThreadInfo->filePath, "%s-%d", - g_queryInfo.specifiedQueryInfo.result[pThreadInfo->querySeq], - pThreadInfo->threadID); - } - - while (queryTimes--) { - if (g_queryInfo.specifiedQueryInfo.queryInterval && - (et - st) < (int64_t)g_queryInfo.specifiedQueryInfo.queryInterval) { - taosMsleep((int32_t)(g_queryInfo.specifiedQueryInfo.queryInterval - - (et - st))); // ms - } - - st = taosGetTimestampMs(); - - selectAndGetResult( - pThreadInfo, - g_queryInfo.specifiedQueryInfo.sql[pThreadInfo->querySeq]); - - et = taosGetTimestampMs(); - printf("=thread[%" PRId64 "] use %s complete one sql, Spent %10.3f s\n", - taosGetSelfPthreadId(), g_queryInfo.queryMode, - (et - st) / 1000.0); - - totalQueried++; - g_queryInfo.specifiedQueryInfo.totalQueried++; - - uint64_t currentPrintTime = taosGetTimestampMs(); - uint64_t endTs = taosGetTimestampMs(); - if (currentPrintTime - lastPrintTime > 30 * 1000) { - debugPrint("%s() LN%d, endTs=%" PRIu64 " ms, startTs=%" PRIu64 - " ms\n", - __func__, __LINE__, endTs, startTs); - printf("thread[%d] has currently completed queries: %" PRIu64 - ", QPS: %10.6f\n", - pThreadInfo->threadID, totalQueried, - (double)(totalQueried / ((endTs - startTs) / 1000.0))); - lastPrintTime = currentPrintTime; - } - } - *code = 0; - end_of_specified_query: - return code; -} - -void *superTableQuery(void *sarg) { - int32_t * code = calloc(1, sizeof (int32_t)); - *code = -1; - char *sqlstr = calloc(1, BUFFER_SIZE); - if (NULL == sqlstr) { - errorPrint("%s", "failed to allocate memory\n"); - goto free_of_super_query; - } - - threadInfo *pThreadInfo = (threadInfo *)sarg; - - setThreadName("superTableQuery"); - - if (pThreadInfo->taos == NULL) { - TAOS *taos = NULL; - taos = taos_connect(g_queryInfo.host, g_queryInfo.user, - g_queryInfo.password, NULL, g_queryInfo.port); - if (taos == NULL) { - errorPrint("[%d] Failed to connect to TDengine, reason:%s\n", - pThreadInfo->threadID, taos_errstr(NULL)); - goto free_of_super_query; - } else { - pThreadInfo->taos = taos; - } - } - - uint64_t st = 0; - uint64_t et = (int64_t)g_queryInfo.superQueryInfo.queryInterval; - - uint64_t queryTimes = g_queryInfo.superQueryInfo.queryTimes; - uint64_t totalQueried = 0; - uint64_t startTs = taosGetTimestampMs(); - - uint64_t lastPrintTime = taosGetTimestampMs(); - while (queryTimes--) { - if (g_queryInfo.superQueryInfo.queryInterval && - (et - st) < (int64_t)g_queryInfo.superQueryInfo.queryInterval) { - taosMsleep((int32_t)(g_queryInfo.superQueryInfo.queryInterval - - (et - st))); // ms - // printf("========sleep duration:%"PRId64 "========inserted - // rows:%d, table range:%d - %d\n", (1000 - (et - st)), i, - // pThreadInfo->start_table_from, pThreadInfo->end_table_to); - } - - st = taosGetTimestampMs(); - for (int i = (int)pThreadInfo->start_table_from; - i <= pThreadInfo->end_table_to; i++) { - for (int j = 0; j < g_queryInfo.superQueryInfo.sqlCount; j++) { - memset(sqlstr, 0, BUFFER_SIZE); - replaceChildTblName(g_queryInfo.superQueryInfo.sql[j], sqlstr, - i); - if (g_queryInfo.superQueryInfo.result[j][0] != '\0') { - sprintf(pThreadInfo->filePath, "%s-%d", - g_queryInfo.superQueryInfo.result[j], - pThreadInfo->threadID); - } - selectAndGetResult(pThreadInfo, sqlstr); - - totalQueried++; - g_queryInfo.superQueryInfo.totalQueried++; - - int64_t currentPrintTime = taosGetTimestampMs(); - int64_t endTs = taosGetTimestampMs(); - if (currentPrintTime - lastPrintTime > 30 * 1000) { - printf( - "thread[%d] has currently completed queries: %" PRIu64 - ", QPS: %10.3f\n", - pThreadInfo->threadID, totalQueried, - (double)(totalQueried / ((endTs - startTs) / 1000.0))); - lastPrintTime = currentPrintTime; - } - } - } - et = taosGetTimestampMs(); - printf("####thread[%" PRId64 - "] complete all sqls to allocate all sub-tables[%" PRIu64 - " - %" PRIu64 "] once queries duration:%.4fs\n\n", - taosGetSelfPthreadId(), pThreadInfo->start_table_from, - pThreadInfo->end_table_to, (double)(et - st) / 1000.0); - } - *code = 0; - free_of_super_query: - tmfree(sqlstr); - return code; -} - -int queryTestProcess() { - printfQueryMeta(); - - TAOS *taos = NULL; - taos = taos_connect(g_queryInfo.host, g_queryInfo.user, - g_queryInfo.password, NULL, g_queryInfo.port); - if (taos == NULL) { - errorPrint("Failed to connect to TDengine, reason:%s\n", - taos_errstr(NULL)); - exit(EXIT_FAILURE); - } - - if (0 != g_queryInfo.superQueryInfo.sqlCount) { - getAllChildNameOfSuperTable(taos, g_queryInfo.dbName, - g_queryInfo.superQueryInfo.stbName, - &g_queryInfo.superQueryInfo.childTblName, - &g_queryInfo.superQueryInfo.childTblCount); - } - - prompt(); - - if (g_args.debug_print || g_args.verbose_print) { - printfQuerySystemInfo(taos); - } - - if (0 == strncasecmp(g_queryInfo.queryMode, "rest", strlen("rest"))) { - if (convertHostToServAddr(g_queryInfo.host, g_queryInfo.port, - &g_queryInfo.serv_addr) != 0) - ERROR_EXIT("convert host to server address"); - } - - pthread_t * pids = NULL; - threadInfo *infos = NULL; - //==== create sub threads for query from specify table - int nConcurrent = g_queryInfo.specifiedQueryInfo.concurrent; - uint64_t nSqlCount = g_queryInfo.specifiedQueryInfo.sqlCount; - - uint64_t startTs = taosGetTimestampMs(); - - if ((nSqlCount > 0) && (nConcurrent > 0)) { - pids = calloc(1, nConcurrent * nSqlCount * sizeof(pthread_t)); - infos = calloc(1, nConcurrent * nSqlCount * sizeof(threadInfo)); - - if ((NULL == pids) || (NULL == infos)) { - taos_close(taos); - ERROR_EXIT("memory allocation failed for create threads\n"); - } - - for (uint64_t i = 0; i < nSqlCount; i++) { - for (int j = 0; j < nConcurrent; j++) { - uint64_t seq = i * nConcurrent + j; - threadInfo *pThreadInfo = infos + seq; - pThreadInfo->threadID = (int)seq; - pThreadInfo->querySeq = i; - - if (0 == strncasecmp(g_queryInfo.queryMode, "taosc", 5)) { - char sqlStr[TSDB_DB_NAME_LEN + 5]; - sprintf(sqlStr, "USE %s", g_queryInfo.dbName); - if (0 != queryDbExec(taos, sqlStr, NO_INSERT_TYPE, false)) { - taos_close(taos); - free(infos); - free(pids); - errorPrint("use database %s failed!\n\n", - g_queryInfo.dbName); - return -1; - } - } - - if (0 == strncasecmp(g_queryInfo.queryMode, "rest", 4)) { -#ifdef WINDOWS - WSADATA wsaData; - WSAStartup(MAKEWORD(2, 2), &wsaData); - SOCKET sockfd; -#else - int sockfd; -#endif - sockfd = socket(AF_INET, SOCK_STREAM, 0); - if (sockfd < 0) { -#ifdef WINDOWS - errorPrint("Could not create socket : %d", - WSAGetLastError()); -#endif - debugPrint("%s() LN%d, sockfd=%d\n", __func__, __LINE__, - sockfd); - ERROR_EXIT("opening socket"); - } - - int retConn = connect( - sockfd, (struct sockaddr *)&(g_queryInfo.serv_addr), - sizeof(struct sockaddr)); - debugPrint("%s() LN%d connect() return %d\n", __func__, - __LINE__, retConn); - if (retConn < 0) { - ERROR_EXIT("connecting"); - } - pThreadInfo->sockfd = sockfd; - } - pThreadInfo->taos = - NULL; // workaround to use separate taos connection; - - pthread_create(pids + seq, NULL, specifiedTableQuery, - pThreadInfo); - } - } - } else { - g_queryInfo.specifiedQueryInfo.concurrent = 0; - } - - taos_close(taos); - - pthread_t * pidsOfSub = NULL; - threadInfo *infosOfSub = NULL; - //==== create sub threads for query from all sub table of the super table - if ((g_queryInfo.superQueryInfo.sqlCount > 0) && - (g_queryInfo.superQueryInfo.threadCnt > 0)) { - pidsOfSub = - calloc(1, g_queryInfo.superQueryInfo.threadCnt * sizeof(pthread_t)); - infosOfSub = calloc( - 1, g_queryInfo.superQueryInfo.threadCnt * sizeof(threadInfo)); - - if ((NULL == pidsOfSub) || (NULL == infosOfSub)) { - free(infos); - free(pids); - - ERROR_EXIT("memory allocation failed for create threads\n"); - } - - int64_t ntables = g_queryInfo.superQueryInfo.childTblCount; - int threads = g_queryInfo.superQueryInfo.threadCnt; - - int64_t a = ntables / threads; - if (a < 1) { - threads = (int)ntables; - a = 1; - } - - int64_t b = 0; - if (threads != 0) { - b = ntables % threads; - } - - uint64_t tableFrom = 0; - for (int i = 0; i < threads; i++) { - threadInfo *pThreadInfo = infosOfSub + i; - pThreadInfo->threadID = i; - - pThreadInfo->start_table_from = tableFrom; - pThreadInfo->ntables = i < b ? a + 1 : a; - pThreadInfo->end_table_to = - i < b ? tableFrom + a : tableFrom + a - 1; - tableFrom = pThreadInfo->end_table_to + 1; - pThreadInfo->taos = - NULL; // workaround to use separate taos connection; - if (0 == strncasecmp(g_queryInfo.queryMode, "rest", 4)) { -#ifdef WINDOWS - WSADATA wsaData; - WSAStartup(MAKEWORD(2, 2), &wsaData); - SOCKET sockfd; -#else - int sockfd; -#endif - sockfd = socket(AF_INET, SOCK_STREAM, 0); - if (sockfd < 0) { -#ifdef WINDOWS - errorPrint("Could not create socket : %d", - WSAGetLastError()); -#endif - debugPrint("%s() LN%d, sockfd=%d\n", __func__, __LINE__, - sockfd); - ERROR_EXIT("opening socket"); - } - - int retConn = - connect(sockfd, (struct sockaddr *)&(g_queryInfo.serv_addr), - sizeof(struct sockaddr)); - debugPrint("%s() LN%d connect() return %d\n", __func__, - __LINE__, retConn); - if (retConn < 0) { - ERROR_EXIT("connecting"); - } - pThreadInfo->sockfd = sockfd; - } - pthread_create(pidsOfSub + i, NULL, superTableQuery, pThreadInfo); - } - - g_queryInfo.superQueryInfo.threadCnt = threads; - } else { - g_queryInfo.superQueryInfo.threadCnt = 0; - } - - if ((nSqlCount > 0) && (nConcurrent > 0)) { - for (int i = 0; i < nConcurrent; i++) { - for (int j = 0; j < nSqlCount; j++) { - void* result; - pthread_join(pids[i * nSqlCount + j], &result); - if (*(int32_t*)result) { - g_fail = true; - } - tmfree(result); - if (0 == strncasecmp(g_queryInfo.queryMode, "rest", 4)) { - threadInfo *pThreadInfo = infos + i * nSqlCount + j; -#ifdef WINDOWS - closesocket(pThreadInfo->sockfd); - WSACleanup(); -#else - close(pThreadInfo->sockfd); -#endif - } - } - } - } - - tmfree((char *)pids); - tmfree((char *)infos); - - for (int i = 0; i < g_queryInfo.superQueryInfo.threadCnt; i++) { - void* result; - pthread_join(pidsOfSub[i], &result); - if (*(int32_t*)result) { - g_fail = true; - } - tmfree(result); - if (0 == strncasecmp(g_queryInfo.queryMode, "rest", 4)) { - threadInfo *pThreadInfo = infosOfSub + i; -#ifdef WINDOWS - closesocket(pThreadInfo->sockfd); - WSACleanup(); -#else - close(pThreadInfo->sockfd); -#endif - } - } - - tmfree((char *)pidsOfSub); - tmfree((char *)infosOfSub); - - if (g_fail) { - return -1; - } - - // taos_close(taos);// workaround to use separate taos connection; - uint64_t endTs = taosGetTimestampMs(); - - uint64_t totalQueried = g_queryInfo.specifiedQueryInfo.totalQueried + - g_queryInfo.superQueryInfo.totalQueried; - - fprintf(stderr, - "==== completed total queries: %" PRIu64 - ", the QPS of all threads: %10.3f====\n", - totalQueried, - (double)(totalQueried / ((endTs - startTs) / 1000.0))); - return 0; -} \ No newline at end of file diff --git a/src/kit/taosdemo/src/demoSubscribe.c b/src/kit/taosdemo/src/demoSubscribe.c deleted file mode 100644 index fb9800d0cdc0be90d82b72d91f0e20902c082b4f..0000000000000000000000000000000000000000 --- a/src/kit/taosdemo/src/demoSubscribe.c +++ /dev/null @@ -1,513 +0,0 @@ -/* - * Copyright (c) 2019 TAOS Data, Inc. - * - * This program is free software: you can use, redistribute, and/or modify - * it under the terms of the GNU Affero General Public License, version 3 - * or later ("AGPL"), as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. - * - * You should have received a copy of the GNU Affero General Public License - * along with this program. If not, see . - */ - -#include "demo.h" - -void stable_sub_callback(TAOS_SUB *tsub, TAOS_RES *res, void *param, int code) { - if (res == NULL || taos_errno(res) != 0) { - errorPrint( - "%s() LN%d, failed to subscribe result, code:%d, reason:%s\n", - __func__, __LINE__, code, taos_errstr(res)); - return; - } - - if (param) fetchResult(res, (threadInfo *)param); - // tao_unsubscribe() will free result. -} - -void specified_sub_callback(TAOS_SUB *tsub, TAOS_RES *res, void *param, - int code) { - if (res == NULL || taos_errno(res) != 0) { - errorPrint( - "%s() LN%d, failed to subscribe result, code:%d, reason:%s\n", - __func__, __LINE__, code, taos_errstr(res)); - return; - } - - if (param) fetchResult(res, (threadInfo *)param); - // tao_unsubscribe() will free result. -} - -TAOS_SUB *subscribeImpl(QUERY_CLASS class, threadInfo *pThreadInfo, char *sql, - char *topic, bool restart, uint64_t interval) { - TAOS_SUB *tsub = NULL; - - if ((SPECIFIED_CLASS == class) && - (ASYNC_MODE == g_queryInfo.specifiedQueryInfo.asyncMode)) { - tsub = taos_subscribe( - pThreadInfo->taos, restart, topic, sql, specified_sub_callback, - (void *)pThreadInfo, - (int)g_queryInfo.specifiedQueryInfo.subscribeInterval); - } else if ((STABLE_CLASS == class) && - (ASYNC_MODE == g_queryInfo.superQueryInfo.asyncMode)) { - tsub = - taos_subscribe(pThreadInfo->taos, restart, topic, sql, - stable_sub_callback, (void *)pThreadInfo, - (int)g_queryInfo.superQueryInfo.subscribeInterval); - } else { - tsub = taos_subscribe(pThreadInfo->taos, restart, topic, sql, NULL, - NULL, (int)interval); - } - - if (tsub == NULL) { - errorPrint("failed to create subscription. topic:%s, sql:%s\n", topic, - sql); - return NULL; - } - - return tsub; -} - -void *specifiedSubscribe(void *sarg) { - int32_t * code = calloc(1, sizeof (int32_t)); - *code = -1; - threadInfo *pThreadInfo = (threadInfo *)sarg; - // TAOS_SUB* tsub = NULL; - - setThreadName("specSub"); - - if (pThreadInfo->taos == NULL) { - pThreadInfo->taos = taos_connect(g_queryInfo.host, g_queryInfo.user, - g_queryInfo.password, - g_queryInfo.dbName, g_queryInfo.port); - if (pThreadInfo->taos == NULL) { - errorPrint("[%d] Failed to connect to TDengine, reason:%s\n", - pThreadInfo->threadID, taos_errstr(NULL)); - goto free_of_specified_subscribe; - } - } - - char sqlStr[TSDB_DB_NAME_LEN + 5]; - sprintf(sqlStr, "USE %s", g_queryInfo.dbName); - if (0 != queryDbExec(pThreadInfo->taos, sqlStr, NO_INSERT_TYPE, false)) { - goto free_of_specified_subscribe; - } - - sprintf(g_queryInfo.specifiedQueryInfo.topic[pThreadInfo->threadID], - "taosdemo-subscribe-%" PRIu64 "-%d", pThreadInfo->querySeq, - pThreadInfo->threadID); - if (g_queryInfo.specifiedQueryInfo.result[pThreadInfo->querySeq][0] != - '\0') { - sprintf(pThreadInfo->filePath, "%s-%d", - g_queryInfo.specifiedQueryInfo.result[pThreadInfo->querySeq], - pThreadInfo->threadID); - } - g_queryInfo.specifiedQueryInfo.tsub[pThreadInfo->threadID] = subscribeImpl( - SPECIFIED_CLASS, pThreadInfo, - g_queryInfo.specifiedQueryInfo.sql[pThreadInfo->querySeq], - g_queryInfo.specifiedQueryInfo.topic[pThreadInfo->threadID], - g_queryInfo.specifiedQueryInfo.subscribeRestart, - g_queryInfo.specifiedQueryInfo.subscribeInterval); - if (NULL == g_queryInfo.specifiedQueryInfo.tsub[pThreadInfo->threadID]) { - goto free_of_specified_subscribe; - } - - // start loop to consume result - - g_queryInfo.specifiedQueryInfo.consumed[pThreadInfo->threadID] = 0; - while ((g_queryInfo.specifiedQueryInfo - .endAfterConsume[pThreadInfo->querySeq] == -1) || - (g_queryInfo.specifiedQueryInfo.consumed[pThreadInfo->threadID] < - g_queryInfo.specifiedQueryInfo - .endAfterConsume[pThreadInfo->querySeq])) { - printf("consumed[%d]: %d, endAfterConsum[%" PRId64 "]: %d\n", - pThreadInfo->threadID, - g_queryInfo.specifiedQueryInfo.consumed[pThreadInfo->threadID], - pThreadInfo->querySeq, - g_queryInfo.specifiedQueryInfo - .endAfterConsume[pThreadInfo->querySeq]); - if (ASYNC_MODE == g_queryInfo.specifiedQueryInfo.asyncMode) { - continue; - } - - g_queryInfo.specifiedQueryInfo.res[pThreadInfo->threadID] = - taos_consume( - g_queryInfo.specifiedQueryInfo.tsub[pThreadInfo->threadID]); - if (g_queryInfo.specifiedQueryInfo.res[pThreadInfo->threadID]) { - if (g_queryInfo.specifiedQueryInfo - .result[pThreadInfo->querySeq][0] != 0) { - sprintf(pThreadInfo->filePath, "%s-%d", - g_queryInfo.specifiedQueryInfo - .result[pThreadInfo->querySeq], - pThreadInfo->threadID); - } - fetchResult( - g_queryInfo.specifiedQueryInfo.res[pThreadInfo->threadID], - pThreadInfo); - - g_queryInfo.specifiedQueryInfo.consumed[pThreadInfo->threadID]++; - if ((g_queryInfo.specifiedQueryInfo - .resubAfterConsume[pThreadInfo->querySeq] != -1) && - (g_queryInfo.specifiedQueryInfo - .consumed[pThreadInfo->threadID] >= - g_queryInfo.specifiedQueryInfo - .resubAfterConsume[pThreadInfo->querySeq])) { - printf("keepProgress:%d, resub specified query: %" PRIu64 "\n", - g_queryInfo.specifiedQueryInfo.subscribeKeepProgress, - pThreadInfo->querySeq); - g_queryInfo.specifiedQueryInfo.consumed[pThreadInfo->threadID] = - 0; - taos_unsubscribe( - g_queryInfo.specifiedQueryInfo.tsub[pThreadInfo->threadID], - g_queryInfo.specifiedQueryInfo.subscribeKeepProgress); - g_queryInfo.specifiedQueryInfo - .tsub[pThreadInfo->threadID] = subscribeImpl( - SPECIFIED_CLASS, pThreadInfo, - g_queryInfo.specifiedQueryInfo.sql[pThreadInfo->querySeq], - g_queryInfo.specifiedQueryInfo.topic[pThreadInfo->threadID], - g_queryInfo.specifiedQueryInfo.subscribeRestart, - g_queryInfo.specifiedQueryInfo.subscribeInterval); - if (NULL == g_queryInfo.specifiedQueryInfo - .tsub[pThreadInfo->threadID]) { - goto free_of_specified_subscribe; - } - } - } - } - *code = 0; - taos_free_result(g_queryInfo.specifiedQueryInfo.res[pThreadInfo->threadID]); - free_of_specified_subscribe: - taos_close(pThreadInfo->taos); - return code; -} - -static void *superSubscribe(void *sarg) { - int32_t * code = calloc(1, sizeof (int32_t)); - *code = -1; - threadInfo *pThreadInfo = (threadInfo *)sarg; - TAOS_SUB *tsub[MAX_QUERY_SQL_COUNT] = {0}; - uint64_t tsubSeq; - char * subSqlStr = calloc(1, BUFFER_SIZE); - if (NULL == subSqlStr) { - errorPrint("%s", "failed to allocate memory\n"); - goto free_of_super_subscribe; - } - - setThreadName("superSub"); - - if (pThreadInfo->ntables > MAX_QUERY_SQL_COUNT) { - errorPrint("The table number(%" PRId64 - ") of the thread is more than max query sql count: %d\n", - pThreadInfo->ntables, MAX_QUERY_SQL_COUNT); - goto free_of_super_subscribe; - } - - if (pThreadInfo->taos == NULL) { - pThreadInfo->taos = taos_connect(g_queryInfo.host, g_queryInfo.user, - g_queryInfo.password, - g_queryInfo.dbName, g_queryInfo.port); - if (pThreadInfo->taos == NULL) { - errorPrint("[%d] Failed to connect to TDengine, reason:%s\n", - pThreadInfo->threadID, taos_errstr(NULL)); - goto free_of_super_subscribe; - } - } - - char sqlStr[TSDB_DB_NAME_LEN + 5]; - sprintf(sqlStr, "USE %s", g_queryInfo.dbName); - if (0 != queryDbExec(pThreadInfo->taos, sqlStr, NO_INSERT_TYPE, false)) { - errorPrint("use database %s failed!\n\n", g_queryInfo.dbName); - goto free_of_super_subscribe; - } - - char topic[32] = {0}; - for (uint64_t i = pThreadInfo->start_table_from; - i <= pThreadInfo->end_table_to; i++) { - tsubSeq = i - pThreadInfo->start_table_from; - verbosePrint("%s() LN%d, [%d], start=%" PRId64 " end=%" PRId64 - " i=%" PRIu64 "\n", - __func__, __LINE__, pThreadInfo->threadID, - pThreadInfo->start_table_from, pThreadInfo->end_table_to, - i); - sprintf(topic, "taosdemo-subscribe-%" PRIu64 "-%" PRIu64 "", i, - pThreadInfo->querySeq); - memset(subSqlStr, 0, BUFFER_SIZE); - replaceChildTblName( - g_queryInfo.superQueryInfo.sql[pThreadInfo->querySeq], subSqlStr, - (int)i); - if (g_queryInfo.superQueryInfo.result[pThreadInfo->querySeq][0] != 0) { - sprintf(pThreadInfo->filePath, "%s-%d", - g_queryInfo.superQueryInfo.result[pThreadInfo->querySeq], - pThreadInfo->threadID); - } - - verbosePrint("%s() LN%d, [%d] subSqlStr: %s\n", __func__, __LINE__, - pThreadInfo->threadID, subSqlStr); - tsub[tsubSeq] = - subscribeImpl(STABLE_CLASS, pThreadInfo, subSqlStr, topic, - g_queryInfo.superQueryInfo.subscribeRestart, - g_queryInfo.superQueryInfo.subscribeInterval); - if (NULL == tsub[tsubSeq]) { - goto free_of_super_subscribe; - } - } - - // start loop to consume result - int consumed[MAX_QUERY_SQL_COUNT]; - for (int i = 0; i < MAX_QUERY_SQL_COUNT; i++) { - consumed[i] = 0; - } - TAOS_RES *res = NULL; - - uint64_t st = 0, et = 0; - - while ( - (g_queryInfo.superQueryInfo.endAfterConsume == -1) || - (g_queryInfo.superQueryInfo.endAfterConsume > - consumed[pThreadInfo->end_table_to - pThreadInfo->start_table_from])) { - verbosePrint("super endAfterConsume: %d, consumed: %d\n", - g_queryInfo.superQueryInfo.endAfterConsume, - consumed[pThreadInfo->end_table_to - - pThreadInfo->start_table_from]); - for (uint64_t i = pThreadInfo->start_table_from; - i <= pThreadInfo->end_table_to; i++) { - tsubSeq = i - pThreadInfo->start_table_from; - if (ASYNC_MODE == g_queryInfo.superQueryInfo.asyncMode) { - continue; - } - - st = taosGetTimestampMs(); - performancePrint("st: %" PRIu64 " et: %" PRIu64 " st-et: %" PRIu64 - "\n", - st, et, (st - et)); - res = taos_consume(tsub[tsubSeq]); - et = taosGetTimestampMs(); - performancePrint("st: %" PRIu64 " et: %" PRIu64 " delta: %" PRIu64 - "\n", - st, et, (et - st)); - - if (res) { - if (g_queryInfo.superQueryInfo - .result[pThreadInfo->querySeq][0] != 0) { - sprintf(pThreadInfo->filePath, "%s-%d", - g_queryInfo.superQueryInfo - .result[pThreadInfo->querySeq], - pThreadInfo->threadID); - fetchResult(res, pThreadInfo); - } - consumed[tsubSeq]++; - - if ((g_queryInfo.superQueryInfo.resubAfterConsume != -1) && - (consumed[tsubSeq] >= - g_queryInfo.superQueryInfo.resubAfterConsume)) { - verbosePrint( - "%s() LN%d, keepProgress:%d, resub super table query: " - "%" PRIu64 "\n", - __func__, __LINE__, - g_queryInfo.superQueryInfo.subscribeKeepProgress, - pThreadInfo->querySeq); - taos_unsubscribe( - tsub[tsubSeq], - g_queryInfo.superQueryInfo.subscribeKeepProgress); - consumed[tsubSeq] = 0; - tsub[tsubSeq] = subscribeImpl( - STABLE_CLASS, pThreadInfo, subSqlStr, topic, - g_queryInfo.superQueryInfo.subscribeRestart, - g_queryInfo.superQueryInfo.subscribeInterval); - if (NULL == tsub[tsubSeq]) { - goto free_of_super_subscribe; - } - } - } - } - } - verbosePrint( - "%s() LN%d, super endAfterConsume: %d, consumed: %d\n", __func__, - __LINE__, g_queryInfo.superQueryInfo.endAfterConsume, - consumed[pThreadInfo->end_table_to - pThreadInfo->start_table_from]); - taos_free_result(res); - - for (uint64_t i = pThreadInfo->start_table_from; - i <= pThreadInfo->end_table_to; i++) { - tsubSeq = i - pThreadInfo->start_table_from; - taos_unsubscribe(tsub[tsubSeq], 0); - } - *code = 0; - free_of_super_subscribe: - taos_close(pThreadInfo->taos); - tmfree(subSqlStr); - return code; -} - -int subscribeTestProcess() { - setupForAnsiEscape(); - printfQueryMeta(); - resetAfterAnsiEscape(); - - prompt(); - - TAOS *taos = NULL; - taos = - taos_connect(g_queryInfo.host, g_queryInfo.user, g_queryInfo.password, - g_queryInfo.dbName, g_queryInfo.port); - if (taos == NULL) { - errorPrint("Failed to connect to TDengine, reason:%s\n", - taos_errstr(NULL)); - exit(EXIT_FAILURE); - } - - if (0 != g_queryInfo.superQueryInfo.sqlCount) { - getAllChildNameOfSuperTable(taos, g_queryInfo.dbName, - g_queryInfo.superQueryInfo.stbName, - &g_queryInfo.superQueryInfo.childTblName, - &g_queryInfo.superQueryInfo.childTblCount); - } - - taos_close(taos); // workaround to use separate taos connection; - - pthread_t * pids = NULL; - threadInfo *infos = NULL; - - pthread_t * pidsOfStable = NULL; - threadInfo *infosOfStable = NULL; - - //==== create threads for query for specified table - if (g_queryInfo.specifiedQueryInfo.sqlCount <= 0) { - debugPrint("%s() LN%d, specified query sqlCount %d.\n", __func__, - __LINE__, g_queryInfo.specifiedQueryInfo.sqlCount); - } else { - if (g_queryInfo.specifiedQueryInfo.concurrent <= 0) { - errorPrint("specified query sqlCount %d.\n", - g_queryInfo.specifiedQueryInfo.sqlCount); - exit(EXIT_FAILURE); - } - - pids = calloc(1, g_queryInfo.specifiedQueryInfo.sqlCount * - g_queryInfo.specifiedQueryInfo.concurrent * - sizeof(pthread_t)); - if (pids == NULL) { - errorPrint("%s", "failed to allocate memory\n"); - } - - infos = calloc(1, g_queryInfo.specifiedQueryInfo.sqlCount * - g_queryInfo.specifiedQueryInfo.concurrent * - sizeof(threadInfo)); - - if (infos == NULL) { - errorPrint("%s", "failed to allocate memory\n"); - } - - for (int i = 0; i < g_queryInfo.specifiedQueryInfo.sqlCount; i++) { - for (int j = 0; j < g_queryInfo.specifiedQueryInfo.concurrent; - j++) { - uint64_t seq = - i * g_queryInfo.specifiedQueryInfo.concurrent + j; - threadInfo *pThreadInfo = infos + seq; - pThreadInfo->threadID = (int)seq; - pThreadInfo->querySeq = i; - pThreadInfo->taos = - NULL; // workaround to use separate taos connection; - pthread_create(pids + seq, NULL, specifiedSubscribe, - pThreadInfo); - } - } - } - - //==== create threads for super table query - if (g_queryInfo.superQueryInfo.sqlCount <= 0) { - debugPrint("%s() LN%d, super table query sqlCount %d.\n", __func__, - __LINE__, g_queryInfo.superQueryInfo.sqlCount); - } else { - if ((g_queryInfo.superQueryInfo.sqlCount > 0) && - (g_queryInfo.superQueryInfo.threadCnt > 0)) { - pidsOfStable = calloc(1, g_queryInfo.superQueryInfo.sqlCount * - g_queryInfo.superQueryInfo.threadCnt * - sizeof(pthread_t)); - - if (pidsOfStable) { - errorPrint("%s", "failed to allocate memory\n"); - } - - infosOfStable = calloc(1, g_queryInfo.superQueryInfo.sqlCount * - g_queryInfo.superQueryInfo.threadCnt * - sizeof(threadInfo)); - - if (infosOfStable) { - errorPrint("%s", "failed to allocate memmory\n"); - } - - int64_t ntables = g_queryInfo.superQueryInfo.childTblCount; - int threads = g_queryInfo.superQueryInfo.threadCnt; - - int64_t a = ntables / threads; - if (a < 1) { - threads = (int)ntables; - a = 1; - } - - int64_t b = 0; - if (threads != 0) { - b = ntables % threads; - } - - for (uint64_t i = 0; i < g_queryInfo.superQueryInfo.sqlCount; i++) { - uint64_t tableFrom = 0; - for (int j = 0; j < threads; j++) { - uint64_t seq = i * threads + j; - threadInfo *pThreadInfo = infosOfStable + seq; - pThreadInfo->threadID = (int)seq; - pThreadInfo->querySeq = i; - - pThreadInfo->start_table_from = tableFrom; - pThreadInfo->ntables = j < b ? a + 1 : a; - pThreadInfo->end_table_to = - j < b ? tableFrom + a : tableFrom + a - 1; - tableFrom = pThreadInfo->end_table_to + 1; - pThreadInfo->taos = - NULL; // workaround to use separate taos connection; - pthread_create(pidsOfStable + seq, NULL, superSubscribe, - pThreadInfo); - } - } - - g_queryInfo.superQueryInfo.threadCnt = threads; - - for (int i = 0; i < g_queryInfo.superQueryInfo.sqlCount; i++) { - for (int j = 0; j < threads; j++) { - uint64_t seq = i * threads + j; - void* result; - pthread_join(pidsOfStable[seq], &result); - if (*(int32_t*)result) { - g_fail = true; - } - tmfree(result); - } - } - } - } - - for (int i = 0; i < g_queryInfo.specifiedQueryInfo.sqlCount; i++) { - for (int j = 0; j < g_queryInfo.specifiedQueryInfo.concurrent; j++) { - uint64_t seq = i * g_queryInfo.specifiedQueryInfo.concurrent + j; - void* result; - pthread_join(pids[seq], &result); - if (*(int32_t*)result) { - g_fail = true; - } - tmfree(result); - } - } - - tmfree((char *)pids); - tmfree((char *)infos); - - tmfree((char *)pidsOfStable); - tmfree((char *)infosOfStable); - // taos_close(taos); - if (g_fail) { - return -1; - } - return 0; -} \ No newline at end of file diff --git a/src/kit/taosdemo/src/demoUtil.c b/src/kit/taosdemo/src/demoUtil.c deleted file mode 100644 index bae2e30f53db95df6024eee4f7c48d601b5240e3..0000000000000000000000000000000000000000 --- a/src/kit/taosdemo/src/demoUtil.c +++ /dev/null @@ -1,594 +0,0 @@ -/* - * Copyright (c) 2019 TAOS Data, Inc. - * - * This program is free software: you can use, redistribute, and/or modify - * it under the terms of the GNU Affero General Public License, version 3 - * or later ("AGPL"), as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. - * - * You should have received a copy of the GNU Affero General Public License - * along with this program. If not, see . - */ - -#include "demo.h" - -void errorWrongValue(char *program, char *wrong_arg, char *wrong_value) { - fprintf(stderr, "%s %s: %s is an invalid value\n", program, wrong_arg, - wrong_value); - fprintf( - stderr, - "Try `taosdemo --help' or `taosdemo --usage' for more information.\n"); -} - -void errorUnrecognized(char *program, char *wrong_arg) { - fprintf(stderr, "%s: unrecognized options '%s'\n", program, wrong_arg); - fprintf( - stderr, - "Try `taosdemo --help' or `taosdemo --usage' for more information.\n"); -} - -void errorPrintReqArg(char *program, char *wrong_arg) { - fprintf(stderr, "%s: option requires an argument -- '%s'\n", program, - wrong_arg); - fprintf( - stderr, - "Try `taosdemo --help' or `taosdemo --usage' for more information.\n"); -} - -void errorPrintReqArg2(char *program, char *wrong_arg) { - fprintf(stderr, "%s: option requires a number argument '-%s'\n", program, - wrong_arg); - fprintf( - stderr, - "Try `taosdemo --help' or `taosdemo --usage' for more information.\n"); -} - -void errorPrintReqArg3(char *program, char *wrong_arg) { - fprintf(stderr, "%s: option '%s' requires an argument\n", program, - wrong_arg); - fprintf( - stderr, - "Try `taosdemo --help' or `taosdemo --usage' for more information.\n"); -} - -void tmfclose(FILE *fp) { - if (NULL != fp) { - fclose(fp); - } -} - -void tmfree(void *buf) { - if (NULL != buf) { - free(buf); - buf = NULL; - } -} - -void ERROR_EXIT(const char *msg) { - errorPrint("%s", msg); - exit(EXIT_FAILURE); -} - -#ifdef WINDOWS -#define _CRT_RAND_S -#include -#include - -typedef unsigned __int32 uint32_t; - -#pragma comment(lib, "ws2_32.lib") -// Some old MinGW/CYGWIN distributions don't define this: -#ifndef ENABLE_VIRTUAL_TERMINAL_PROCESSING -#define ENABLE_VIRTUAL_TERMINAL_PROCESSING 0x0004 -#endif // ENABLE_VIRTUAL_TERMINAL_PROCESSING - -HANDLE g_stdoutHandle; -DWORD g_consoleMode; - -void setupForAnsiEscape(void) { - DWORD mode = 0; - g_stdoutHandle = GetStdHandle(STD_OUTPUT_HANDLE); - - if (g_stdoutHandle == INVALID_HANDLE_VALUE) { - exit(GetLastError()); - } - - if (!GetConsoleMode(g_stdoutHandle, &mode)) { - exit(GetLastError()); - } - - g_consoleMode = mode; - - // Enable ANSI escape codes - mode |= ENABLE_VIRTUAL_TERMINAL_PROCESSING; - - if (!SetConsoleMode(g_stdoutHandle, mode)) { - exit(GetLastError()); - } -} - -void resetAfterAnsiEscape(void) { - // Reset colors - printf("\x1b[0m"); - - // Reset console mode - if (!SetConsoleMode(g_stdoutHandle, g_consoleMode)) { - exit(GetLastError()); - } -} - -int taosRandom() { - int number; - rand_s(&number); - - return number; -} -#else // Not windows -void setupForAnsiEscape(void) {} - -void resetAfterAnsiEscape(void) { - // Reset colors - printf("\x1b[0m"); -} - -#include - -int taosRandom() { return rand(); } - -#endif - -bool isStringNumber(char *input) { - int len = (int)strlen(input); - if (0 == len) { - return false; - } - - for (int i = 0; i < len; i++) { - if (!isdigit(input[i])) return false; - } - - return true; -} - -char *formatTimestamp(char *buf, int64_t val, int precision) { - time_t tt; - if (precision == TSDB_TIME_PRECISION_MICRO) { - tt = (time_t)(val / 1000000); - } - if (precision == TSDB_TIME_PRECISION_NANO) { - tt = (time_t)(val / 1000000000); - } else { - tt = (time_t)(val / 1000); - } - - /* comment out as it make testcases like select_with_tags.sim fail. - but in windows, this may cause the call to localtime crash if tt < 0, - need to find a better solution. - if (tt < 0) { - tt = 0; - } - */ - -#ifdef WINDOWS - if (tt < 0) tt = 0; -#endif - - struct tm *ptm = localtime(&tt); - size_t pos = strftime(buf, 32, "%Y-%m-%d %H:%M:%S", ptm); - - if (precision == TSDB_TIME_PRECISION_MICRO) { - sprintf(buf + pos, ".%06d", (int)(val % 1000000)); - } else if (precision == TSDB_TIME_PRECISION_NANO) { - sprintf(buf + pos, ".%09d", (int)(val % 1000000000)); - } else { - sprintf(buf + pos, ".%03d", (int)(val % 1000)); - } - - return buf; -} - -int getChildNameOfSuperTableWithLimitAndOffset(TAOS *taos, char *dbName, - char * stbName, - char ** childTblNameOfSuperTbl, - int64_t *childTblCountOfSuperTbl, - int64_t limit, uint64_t offset, - bool escapChar) { - char command[SQL_BUFF_LEN] = "\0"; - char limitBuf[100] = "\0"; - - TAOS_RES *res; - TAOS_ROW row = NULL; - int64_t childTblCount = (limit < 0) ? DEFAULT_CHILDTABLES : limit; - int64_t count = 0; - char * childTblName = *childTblNameOfSuperTbl; - - if (childTblName == NULL) { - childTblName = (char *)calloc(1, childTblCount * TSDB_TABLE_NAME_LEN); - if (childTblName == NULL) { - errorPrint("%s", "failed to allocate memory\n"); - } - } - char *pTblName = childTblName; - - snprintf(limitBuf, 100, " limit %" PRId64 " offset %" PRIu64 "", limit, - offset); - - // get all child table name use cmd: select tbname from superTblName; - snprintf(command, SQL_BUFF_LEN, - escapChar ? "select tbname from %s.`%s` %s" - : "select tbname from %s.%s %s", - dbName, stbName, limitBuf); - - res = taos_query(taos, command); - int32_t code = taos_errno(res); - if (code != 0) { - taos_free_result(res); - taos_close(taos); - errorPrint("failed to run command %s, reason: %s\n", command, - taos_errstr(res)); - exit(EXIT_FAILURE); - } - - while ((row = taos_fetch_row(res)) != NULL) { - int32_t *len = taos_fetch_lengths(res); - - if (0 == strlen((char *)row[0])) { - errorPrint("No.%" PRId64 " table return empty name\n", count); - exit(EXIT_FAILURE); - } - - tstrncpy(pTblName, (char *)row[0], len[0] + 1); - // printf("==== sub table name: %s\n", pTblName); - count++; - if (count >= childTblCount - 1) { - char *tmp = realloc( - childTblName, - (size_t)(childTblCount * 1.5 * TSDB_TABLE_NAME_LEN + 1)); - if (tmp != NULL) { - childTblName = tmp; - childTblCount = (int)(childTblCount * 1.5); - memset(childTblName + count * TSDB_TABLE_NAME_LEN, 0, - (size_t)((childTblCount - count) * TSDB_TABLE_NAME_LEN)); - } else { - // exit, if allocate more memory failed - tmfree(childTblName); - taos_free_result(res); - taos_close(taos); - errorPrint( - "%s() LN%d, realloc fail for save child table name of " - "%s.%s\n", - __func__, __LINE__, dbName, stbName); - exit(EXIT_FAILURE); - } - } - pTblName = childTblName + count * TSDB_TABLE_NAME_LEN; - } - - *childTblCountOfSuperTbl = count; - *childTblNameOfSuperTbl = childTblName; - - taos_free_result(res); - return 0; -} - -int getAllChildNameOfSuperTable(TAOS *taos, char *dbName, char *stbName, - char ** childTblNameOfSuperTbl, - int64_t *childTblCountOfSuperTbl) { - return getChildNameOfSuperTableWithLimitAndOffset( - taos, dbName, stbName, childTblNameOfSuperTbl, childTblCountOfSuperTbl, - -1, 0, false); -} - -int convertHostToServAddr(char *host, uint16_t port, - struct sockaddr_in *serv_addr) { - uint16_t rest_port = port + TSDB_PORT_HTTP; - struct hostent *server = gethostbyname(host); - if ((server == NULL) || (server->h_addr == NULL)) { - errorPrint("%s", "no such host"); - return -1; - } - - debugPrint("h_name: %s\nh_addr=%p\nh_addretype: %s\nh_length: %d\n", - server->h_name, server->h_addr, - (server->h_addrtype == AF_INET) ? "ipv4" : "ipv6", - server->h_length); - - memset(serv_addr, 0, sizeof(struct sockaddr_in)); - serv_addr->sin_family = AF_INET; - serv_addr->sin_port = htons(rest_port); -#ifdef WINDOWS - serv_addr->sin_addr.s_addr = inet_addr(host); -#else - memcpy(&(serv_addr->sin_addr.s_addr), server->h_addr, server->h_length); -#endif - return 0; -} - -void prompt() { - if (!g_args.answer_yes) { - printf(" Press enter key to continue or Ctrl-C to stop\n\n"); - (void)getchar(); - } -} - -void replaceChildTblName(char *inSql, char *outSql, int tblIndex) { - char sourceString[32] = "xxxx"; - char subTblName[TSDB_TABLE_NAME_LEN]; - sprintf(subTblName, "%s.%s", g_queryInfo.dbName, - g_queryInfo.superQueryInfo.childTblName + - tblIndex * TSDB_TABLE_NAME_LEN); - - // printf("inSql: %s\n", inSql); - - char *pos = strstr(inSql, sourceString); - if (0 == pos) { - return; - } - - tstrncpy(outSql, inSql, pos - inSql + 1); - // printf("1: %s\n", outSql); - strncat(outSql, subTblName, BUFFER_SIZE - 1); - // printf("2: %s\n", outSql); - strncat(outSql, pos + strlen(sourceString), BUFFER_SIZE - 1); - // printf("3: %s\n", outSql); -} - -int isCommentLine(char *line) { - if (line == NULL) return 1; - - return regexMatch(line, "^\\s*#.*", REG_EXTENDED); -} - -int regexMatch(const char *s, const char *reg, int cflags) { - regex_t regex; - char msgbuf[100] = {0}; - - /* Compile regular expression */ - if (regcomp(®ex, reg, cflags) != 0) { - ERROR_EXIT("Fail to compile regex\n"); - } - - /* Execute regular expression */ - int reti = regexec(®ex, s, 0, NULL, 0); - if (!reti) { - regfree(®ex); - return 1; - } else if (reti == REG_NOMATCH) { - regfree(®ex); - return 0; - } else { - regerror(reti, ®ex, msgbuf, sizeof(msgbuf)); - regfree(®ex); - printf("Regex match failed: %s\n", msgbuf); - exit(EXIT_FAILURE); - } - return 0; -} - -int queryDbExec(TAOS *taos, char *command, QUERY_TYPE type, bool quiet) { - verbosePrint("%s() LN%d - command: %s\n", __func__, __LINE__, command); - - TAOS_RES *res = taos_query(taos, command); - int32_t code = taos_errno(res); - - if (code != 0) { - if (!quiet) { - errorPrint("Failed to execute <%s>, reason: %s\n", command, - taos_errstr(res)); - } - taos_free_result(res); - // taos_close(taos); - return -1; - } - - if (INSERT_TYPE == type) { - int affectedRows = taos_affected_rows(res); - taos_free_result(res); - return affectedRows; - } - - taos_free_result(res); - return 0; -} - -int postProceSql(char *host, uint16_t port, char *sqlstr, - threadInfo *pThreadInfo) { - char *req_fmt = - "POST %s HTTP/1.1\r\nHost: %s:%d\r\nAccept: */*\r\nAuthorization: " - "Basic %s\r\nContent-Length: %d\r\nContent-Type: " - "application/x-www-form-urlencoded\r\n\r\n%s"; - - char *url = "/rest/sql"; - - int bytes, sent, received, req_str_len, resp_len; - char * request_buf; - char response_buf[RESP_BUF_LEN]; - uint16_t rest_port = port + TSDB_PORT_HTTP; - - int req_buf_len = (int)strlen(sqlstr) + REQ_EXTRA_BUF_LEN; - - request_buf = malloc(req_buf_len); - if (NULL == request_buf) { - errorPrint("%s", "cannot allocate memory.\n"); - exit(EXIT_FAILURE); - } - - char userpass_buf[INPUT_BUF_LEN]; - int mod_table[] = {0, 2, 1}; - - static char base64[] = { - 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', - 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', - 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', - 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', - '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '+', '/'}; - - if (g_args.test_mode == INSERT_TEST) { - snprintf(userpass_buf, INPUT_BUF_LEN, "%s:%s", g_Dbs.user, - g_Dbs.password); - } else { - snprintf(userpass_buf, INPUT_BUF_LEN, "%s:%s", g_queryInfo.user, - g_queryInfo.password); - } - - size_t userpass_buf_len = strlen(userpass_buf); - size_t encoded_len = 4 * ((userpass_buf_len + 2) / 3); - - char base64_buf[INPUT_BUF_LEN]; - - memset(base64_buf, 0, INPUT_BUF_LEN); - - for (int n = 0, m = 0; n < userpass_buf_len;) { - uint32_t oct_a = - n < userpass_buf_len ? (unsigned char)userpass_buf[n++] : 0; - uint32_t oct_b = - n < userpass_buf_len ? (unsigned char)userpass_buf[n++] : 0; - uint32_t oct_c = - n < userpass_buf_len ? (unsigned char)userpass_buf[n++] : 0; - uint32_t triple = (oct_a << 0x10) + (oct_b << 0x08) + oct_c; - - base64_buf[m++] = base64[(triple >> 3 * 6) & 0x3f]; - base64_buf[m++] = base64[(triple >> 2 * 6) & 0x3f]; - base64_buf[m++] = base64[(triple >> 1 * 6) & 0x3f]; - base64_buf[m++] = base64[(triple >> 0 * 6) & 0x3f]; - } - - for (int l = 0; l < mod_table[userpass_buf_len % 3]; l++) - base64_buf[encoded_len - 1 - l] = '='; - - debugPrint("%s() LN%d: auth string base64 encoded: %s\n", __func__, - __LINE__, base64_buf); - char *auth = base64_buf; - - int r = snprintf(request_buf, req_buf_len, req_fmt, url, host, rest_port, - auth, strlen(sqlstr), sqlstr); - if (r >= req_buf_len) { - free(request_buf); - ERROR_EXIT("too long request"); - } - verbosePrint("%s() LN%d: Request:\n%s\n", __func__, __LINE__, request_buf); - - req_str_len = (int)strlen(request_buf); - sent = 0; - do { -#ifdef WINDOWS - bytes = send(pThreadInfo->sockfd, request_buf + sent, - req_str_len - sent, 0); -#else - bytes = - write(pThreadInfo->sockfd, request_buf + sent, req_str_len - sent); -#endif - if (bytes < 0) ERROR_EXIT("writing message to socket"); - if (bytes == 0) break; - sent += bytes; - } while (sent < req_str_len); - - memset(response_buf, 0, RESP_BUF_LEN); - resp_len = sizeof(response_buf) - 1; - received = 0; - - char resEncodingChunk[] = "Encoding: chunked"; - char resHttp[] = "HTTP/1.1 "; - char resHttpOk[] = "HTTP/1.1 200 OK"; - - do { -#ifdef WINDOWS - bytes = recv(pThreadInfo->sockfd, response_buf + received, - resp_len - received, 0); -#else - bytes = read(pThreadInfo->sockfd, response_buf + received, - resp_len - received); -#endif - verbosePrint("%s() LN%d: bytes:%d\n", __func__, __LINE__, bytes); - if (bytes < 0) { - free(request_buf); - ERROR_EXIT("reading response from socket"); - } - if (bytes == 0) break; - received += bytes; - - verbosePrint("%s() LN%d: received:%d resp_len:%d, response_buf:\n%s\n", - __func__, __LINE__, received, resp_len, response_buf); - - response_buf[RESP_BUF_LEN - 1] = '\0'; - if (strlen(response_buf)) { - verbosePrint( - "%s() LN%d: received:%d resp_len:%d, response_buf:\n%s\n", - __func__, __LINE__, received, resp_len, response_buf); - - if (((NULL != strstr(response_buf, resEncodingChunk)) && - (NULL != strstr(response_buf, resHttp))) || - ((NULL != strstr(response_buf, resHttpOk)) && - (NULL != strstr(response_buf, "\"status\":")))) { - debugPrint( - "%s() LN%d: received:%d resp_len:%d, response_buf:\n%s\n", - __func__, __LINE__, received, resp_len, response_buf); - break; - } - } - } while (received < resp_len); - - if (received == resp_len) { - free(request_buf); - ERROR_EXIT("storing complete response from socket"); - } - - if (strlen(pThreadInfo->filePath) > 0) { - appendResultBufToFile(response_buf, pThreadInfo); - } - - free(request_buf); - - if (NULL == strstr(response_buf, resHttpOk)) { - errorPrint("Response:\n%s\n", response_buf); - return -1; - } - return 0; -} - -void fetchResult(TAOS_RES *res, threadInfo *pThreadInfo) { - TAOS_ROW row = NULL; - int num_rows = 0; - int num_fields = taos_field_count(res); - TAOS_FIELD *fields = taos_fetch_fields(res); - - char *databuf = (char *)calloc(1, FETCH_BUFFER_SIZE); - if (databuf == NULL) { - errorPrint( - "%s() LN%d, failed to malloc, warning: save result to file " - "slowly!\n", - __func__, __LINE__); - return; - } - - int64_t totalLen = 0; - - // fetch the records row by row - while ((row = taos_fetch_row(res))) { - if (totalLen >= (FETCH_BUFFER_SIZE - HEAD_BUFF_LEN * 2)) { - if (strlen(pThreadInfo->filePath) > 0) - appendResultBufToFile(databuf, pThreadInfo); - totalLen = 0; - memset(databuf, 0, FETCH_BUFFER_SIZE); - } - num_rows++; - char temp[HEAD_BUFF_LEN] = {0}; - int len = taos_print_row(temp, row, fields, num_fields); - len += sprintf(temp + len, "\n"); - // printf("query result:%s\n", temp); - memcpy(databuf + totalLen, temp, len); - totalLen += len; - verbosePrint("%s() LN%d, totalLen: %" PRId64 "\n", __func__, __LINE__, - totalLen); - } - - verbosePrint("%s() LN%d, databuf=%s resultFile=%s\n", __func__, __LINE__, - databuf, pThreadInfo->filePath); - if (strlen(pThreadInfo->filePath) > 0) { - appendResultBufToFile(databuf, pThreadInfo); - } - free(databuf); -} \ No newline at end of file diff --git a/src/kit/taosdemo/subscribe.json b/src/kit/taosdemo/subscribe.json deleted file mode 100644 index 9faf03a03d03b8baeffeb6a4397d1727dde0c594..0000000000000000000000000000000000000000 --- a/src/kit/taosdemo/subscribe.json +++ /dev/null @@ -1,37 +0,0 @@ -{ - "filetype": "subscribe", - "cfgdir": "/etc/taos", - "host": "127.0.0.1", - "port": 6030, - "user": "root", - "password": "taosdata", - "databases": "test", - "specified_table_query": { - "concurrent": 1, - "mode": "sync", - "interval": 1000, - "restart": "yes", - "keepProgress": "yes", - "resubAfterConsume": 10, - "sqls": [ - { - "sql": "select avg(col1) from meters where col1 > 1;", - "result": "./subscribe_res0.txt" - } - ] - }, - "super_table_query": { - "stblname": "meters", - "threads": 1, - "mode": "sync", - "interval": 1000, - "restart": "yes", - "keepProgress": "yes", - "sqls": [ - { - "sql": "select col1 from xxxx where col1 > 10;", - "result": "./subscribe_res1.txt" - } - ] - } -} diff --git a/src/kit/taosdemo/taosdemo.c b/src/kit/taosdemo/taosdemo.c deleted file mode 100644 index 0a573b799ae184042f1126cb179e3909c6a2249b..0000000000000000000000000000000000000000 --- a/src/kit/taosdemo/taosdemo.c +++ /dev/null @@ -1,12273 +0,0 @@ -/* - * Copyright (c) 2019 TAOS Data, Inc. - * - * This program is free software: you can use, redistribute, and/or modify - * it under the terms of the GNU Affero General Public License, version 3 - * or later ("AGPL"), as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. - * - * You should have received a copy of the GNU Affero General Public License - * along with this program. If not, see . - */ - - -/* - when in some thread query return error, thread don't exit, but return, otherwise coredump in other thread. - */ - -#include -#include -#include -#define _GNU_SOURCE -#define CURL_STATICLIB - -#ifdef LINUX -#include -#include -#ifndef _ALPINE -#include -#endif -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#else -#include -#include -#endif - -#include -#include -#include "cJSON.h" - -#include "os.h" -#include "taos.h" -#include "taoserror.h" -#include "tutil.h" - -#define REQ_EXTRA_BUF_LEN 1024 -#define RESP_BUF_LEN 4096 - -extern char configDir[]; - -#define STR_INSERT_INTO "INSERT INTO " - -#define MAX_RECORDS_PER_REQ 32766 - -#define HEAD_BUFF_LEN TSDB_MAX_COLUMNS*24 // 16*MAX_COLUMNS + (192+32)*2 + insert into .. - -#define BUFFER_SIZE TSDB_MAX_ALLOWED_SQL_LEN -#define COND_BUF_LEN (BUFFER_SIZE - 30) -#define COL_BUFFER_LEN ((TSDB_COL_NAME_LEN + 15) * TSDB_MAX_COLUMNS) - -#define MAX_USERNAME_SIZE 64 -#define MAX_HOSTNAME_SIZE 253 // https://man7.org/linux/man-pages/man7/hostname.7.html -#define MAX_TB_NAME_SIZE 64 -#define MAX_DATA_SIZE (16*TSDB_MAX_COLUMNS)+20 // max record len: 16*MAX_COLUMNS, timestamp string and ,('') need extra space -#define OPT_ABORT 1 /* –abort */ -#define MAX_FILE_NAME_LEN 256 // max file name length on linux is 255. -#define MAX_PATH_LEN 4096 - -#define DEFAULT_START_TIME 1500000000000 - -#define MAX_PREPARED_RAND 1000000 -#define INT_BUFF_LEN 12 -#define BIGINT_BUFF_LEN 21 -#define SMALLINT_BUFF_LEN 7 -#define TINYINT_BUFF_LEN 5 -#define BOOL_BUFF_LEN 6 -#define FLOAT_BUFF_LEN 22 -#define DOUBLE_BUFF_LEN 42 -#define TIMESTAMP_BUFF_LEN 21 - -#define MAX_SAMPLES 10000 -#define MAX_NUM_COLUMNS (TSDB_MAX_COLUMNS - 1) // exclude first column timestamp - -#define MAX_DB_COUNT 8 -#define MAX_SUPER_TABLE_COUNT 200 - -#define MAX_QUERY_SQL_COUNT 100 - -#define MAX_DATABASE_COUNT 256 -#define INPUT_BUF_LEN 256 - -#define TBNAME_PREFIX_LEN (TSDB_TABLE_NAME_LEN - 20) // 20 characters reserved for seq -#define SMALL_BUFF_LEN 8 -#define DATATYPE_BUFF_LEN (SMALL_BUFF_LEN*3) -#define NOTE_BUFF_LEN (SMALL_BUFF_LEN*16) - -#define DEFAULT_NTHREADS 8 -#define DEFAULT_TIMESTAMP_STEP 1 -#define DEFAULT_INTERLACE_ROWS 0 -#define DEFAULT_DATATYPE_NUM 1 -#define DEFAULT_CHILDTABLES 10000 - -#define STMT_BIND_PARAM_BATCH 1 - -char* g_sampleDataBuf = NULL; -#if STMT_BIND_PARAM_BATCH == 1 - // bind param batch -char* g_sampleBindBatchArray = NULL; -#endif - -enum TEST_MODE { - INSERT_TEST, // 0 - QUERY_TEST, // 1 - SUBSCRIBE_TEST, // 2 - INVAID_TEST -}; - -typedef enum CREATE_SUB_TABLE_MOD_EN { - PRE_CREATE_SUBTBL, - AUTO_CREATE_SUBTBL, - NO_CREATE_SUBTBL -} CREATE_SUB_TABLE_MOD_EN; - -typedef enum TABLE_EXISTS_EN { - TBL_NO_EXISTS, - TBL_ALREADY_EXISTS, - TBL_EXISTS_BUTT -} TABLE_EXISTS_EN; - -enum enumSYNC_MODE { - SYNC_MODE, - ASYNC_MODE, - MODE_BUT -}; - -enum enum_TAOS_INTERFACE { - TAOSC_IFACE, - REST_IFACE, - STMT_IFACE, - INTERFACE_BUT -}; - -typedef enum enumQUERY_CLASS { - SPECIFIED_CLASS, - STABLE_CLASS, - CLASS_BUT -} QUERY_CLASS; - -typedef enum enum_PROGRESSIVE_OR_INTERLACE { - PROGRESSIVE_INSERT_MODE, - INTERLACE_INSERT_MODE, - INVALID_INSERT_MODE -} PROG_OR_INTERLACE_MODE; - -typedef enum enumQUERY_TYPE { - NO_INSERT_TYPE, - INSERT_TYPE, - QUERY_TYPE_BUT -} QUERY_TYPE; - -enum _show_db_index { - TSDB_SHOW_DB_NAME_INDEX, - TSDB_SHOW_DB_CREATED_TIME_INDEX, - TSDB_SHOW_DB_NTABLES_INDEX, - TSDB_SHOW_DB_VGROUPS_INDEX, - TSDB_SHOW_DB_REPLICA_INDEX, - TSDB_SHOW_DB_QUORUM_INDEX, - TSDB_SHOW_DB_DAYS_INDEX, - TSDB_SHOW_DB_KEEP_INDEX, - TSDB_SHOW_DB_CACHE_INDEX, - TSDB_SHOW_DB_BLOCKS_INDEX, - TSDB_SHOW_DB_MINROWS_INDEX, - TSDB_SHOW_DB_MAXROWS_INDEX, - TSDB_SHOW_DB_WALLEVEL_INDEX, - TSDB_SHOW_DB_FSYNC_INDEX, - TSDB_SHOW_DB_COMP_INDEX, - TSDB_SHOW_DB_CACHELAST_INDEX, - TSDB_SHOW_DB_PRECISION_INDEX, - TSDB_SHOW_DB_UPDATE_INDEX, - TSDB_SHOW_DB_STATUS_INDEX, - TSDB_MAX_SHOW_DB -}; - -// -----------------------------------------SHOW TABLES CONFIGURE ------------------------------------- -enum _show_stables_index { - TSDB_SHOW_STABLES_NAME_INDEX, - TSDB_SHOW_STABLES_CREATED_TIME_INDEX, - TSDB_SHOW_STABLES_COLUMNS_INDEX, - TSDB_SHOW_STABLES_METRIC_INDEX, - TSDB_SHOW_STABLES_UID_INDEX, - TSDB_SHOW_STABLES_TID_INDEX, - TSDB_SHOW_STABLES_VGID_INDEX, - TSDB_MAX_SHOW_STABLES -}; - -enum _describe_table_index { - TSDB_DESCRIBE_METRIC_FIELD_INDEX, - TSDB_DESCRIBE_METRIC_TYPE_INDEX, - TSDB_DESCRIBE_METRIC_LENGTH_INDEX, - TSDB_DESCRIBE_METRIC_NOTE_INDEX, - TSDB_MAX_DESCRIBE_METRIC -}; - -/* Used by main to communicate with parse_opt. */ -static char *g_dupstr = NULL; - -typedef struct SArguments_S { - char *metaFile; - uint32_t test_mode; - char *host; - uint16_t port; - uint16_t iface; - char * user; - char password[SHELL_MAX_PASSWORD_LEN]; - char * database; - int replica; - char * tb_prefix; - char * sqlFile; - bool use_metric; - bool drop_database; - bool aggr_func; - bool answer_yes; - bool debug_print; - bool verbose_print; - bool performance_print; - char * output_file; - bool async_mode; - char data_type[MAX_NUM_COLUMNS+1]; - char *dataType[MAX_NUM_COLUMNS+1]; - uint32_t binwidth; - uint32_t columnCount; - uint64_t lenOfOneRow; - uint32_t nthreads; - uint64_t insert_interval; - uint64_t timestamp_step; - int64_t query_times; - int64_t prepared_rand; - uint32_t interlaceRows; - uint32_t reqPerReq; // num_of_records_per_req - uint64_t max_sql_len; - int64_t ntables; - int64_t insertRows; - int abort; - uint32_t disorderRatio; // 0: no disorder, >0: x% - int disorderRange; // ms, us or ns. according to database precision - uint32_t method_of_delete; - uint64_t totalInsertRows; - uint64_t totalAffectedRows; - bool demo_mode; // use default column name and semi-random data -} SArguments; - -typedef struct SColumn_S { - char field[TSDB_COL_NAME_LEN]; - char data_type; - char dataType[DATATYPE_BUFF_LEN]; - uint32_t dataLen; - char note[NOTE_BUFF_LEN]; -} StrColumn; - -typedef struct SSuperTable_S { - char stbName[TSDB_TABLE_NAME_LEN]; - char dataSource[SMALL_BUFF_LEN]; // rand_gen or sample - char childTblPrefix[TBNAME_PREFIX_LEN]; - uint16_t childTblExists; - int64_t childTblCount; - uint64_t batchCreateTableNum; // 0: no batch, > 0: batch table number in one sql - uint8_t autoCreateTable; // 0: create sub table, 1: auto create sub table - uint16_t iface; // 0: taosc, 1: rest, 2: stmt - int64_t childTblLimit; - uint64_t childTblOffset; - - // int multiThreadWriteOneTbl; // 0: no, 1: yes - uint32_t interlaceRows; // - int disorderRatio; // 0: no disorder, >0: x% - int disorderRange; // ms, us or ns. according to database precision - uint64_t maxSqlLen; // - - uint64_t insertInterval; // insert interval, will override global insert interval - int64_t insertRows; - int64_t timeStampStep; - char startTimestamp[MAX_TB_NAME_SIZE]; - char sampleFormat[SMALL_BUFF_LEN]; // csv, json - char sampleFile[MAX_FILE_NAME_LEN]; - char tagsFile[MAX_FILE_NAME_LEN]; - - uint32_t columnCount; - StrColumn columns[TSDB_MAX_COLUMNS]; - uint32_t tagCount; - StrColumn tags[TSDB_MAX_TAGS]; - - char* childTblName; - char* colsOfCreateChildTable; - uint64_t lenOfOneRow; - uint64_t lenOfTagOfOneRow; - - char* sampleDataBuf; - bool useSampleTs; - - uint32_t tagSource; // 0: rand, 1: tag sample - char* tagDataBuf; - uint32_t tagSampleCount; - uint32_t tagUsePos; - -#if STMT_BIND_PARAM_BATCH == 1 - // bind param batch - char *sampleBindBatchArray; -#endif - // statistics - uint64_t totalInsertRows; - uint64_t totalAffectedRows; -} SSuperTable; - -typedef struct { - char name[TSDB_DB_NAME_LEN]; - char create_time[32]; - int64_t ntables; - int32_t vgroups; - int16_t replica; - int16_t quorum; - int16_t days; - char keeplist[64]; - int32_t cache; //MB - int32_t blocks; - int32_t minrows; - int32_t maxrows; - int8_t wallevel; - int32_t fsync; - int8_t comp; - int8_t cachelast; - char precision[SMALL_BUFF_LEN]; // time resolution - int8_t update; - char status[16]; -} SDbInfo; - -typedef struct SDbCfg_S { - // int maxtablesPerVnode; - uint32_t minRows; // 0 means default - uint32_t maxRows; // 0 means default - int comp; - int walLevel; - int cacheLast; - int fsync; - int replica; - int update; - int keep; - int days; - int cache; - int blocks; - int quorum; - char precision[SMALL_BUFF_LEN]; -} SDbCfg; - -typedef struct SDataBase_S { - char dbName[TSDB_DB_NAME_LEN]; - bool drop; // 0: use exists, 1: if exists, drop then new create - SDbCfg dbCfg; - uint64_t superTblCount; - SSuperTable* superTbls; -} SDataBase; - -typedef struct SDbs_S { - char cfgDir[MAX_FILE_NAME_LEN]; - char host[MAX_HOSTNAME_SIZE]; - struct sockaddr_in serv_addr; - - uint16_t port; - char user[MAX_USERNAME_SIZE]; - char password[SHELL_MAX_PASSWORD_LEN]; - char resultFile[MAX_FILE_NAME_LEN]; - bool use_metric; - bool aggr_func; - bool asyncMode; - - uint32_t threadCount; - uint32_t threadCountForCreateTbl; - uint32_t dbCount; - // statistics - uint64_t totalInsertRows; - uint64_t totalAffectedRows; - - SDataBase* db; -} SDbs; - -typedef struct SpecifiedQueryInfo_S { - uint64_t queryInterval; // 0: unlimited > 0 loop/s - uint32_t concurrent; - int sqlCount; - uint32_t asyncMode; // 0: sync, 1: async - uint64_t subscribeInterval; // ms - uint64_t queryTimes; - bool subscribeRestart; - int subscribeKeepProgress; - char sql[MAX_QUERY_SQL_COUNT][BUFFER_SIZE+1]; - char result[MAX_QUERY_SQL_COUNT][MAX_FILE_NAME_LEN]; - int resubAfterConsume[MAX_QUERY_SQL_COUNT]; - int endAfterConsume[MAX_QUERY_SQL_COUNT]; - TAOS_SUB* tsub[MAX_QUERY_SQL_COUNT]; - char topic[MAX_QUERY_SQL_COUNT][32]; - int consumed[MAX_QUERY_SQL_COUNT]; - TAOS_RES* res[MAX_QUERY_SQL_COUNT]; - uint64_t totalQueried; -} SpecifiedQueryInfo; - -typedef struct SuperQueryInfo_S { - char stbName[TSDB_TABLE_NAME_LEN]; - uint64_t queryInterval; // 0: unlimited > 0 loop/s - uint32_t threadCnt; - uint32_t asyncMode; // 0: sync, 1: async - uint64_t subscribeInterval; // ms - bool subscribeRestart; - int subscribeKeepProgress; - uint64_t queryTimes; - int64_t childTblCount; - char childTblPrefix[TBNAME_PREFIX_LEN]; // 20 characters reserved for seq - int sqlCount; - char sql[MAX_QUERY_SQL_COUNT][BUFFER_SIZE+1]; - char result[MAX_QUERY_SQL_COUNT][MAX_FILE_NAME_LEN]; - int resubAfterConsume; - int endAfterConsume; - TAOS_SUB* tsub[MAX_QUERY_SQL_COUNT]; - - char* childTblName; - uint64_t totalQueried; -} SuperQueryInfo; - -typedef struct SQueryMetaInfo_S { - char cfgDir[MAX_FILE_NAME_LEN]; - char host[MAX_HOSTNAME_SIZE]; - uint16_t port; - struct sockaddr_in serv_addr; - char user[MAX_USERNAME_SIZE]; - char password[SHELL_MAX_PASSWORD_LEN]; - char dbName[TSDB_DB_NAME_LEN]; - char queryMode[SMALL_BUFF_LEN]; // taosc, rest - - SpecifiedQueryInfo specifiedQueryInfo; - SuperQueryInfo superQueryInfo; - uint64_t totalQueried; -} SQueryMetaInfo; - -typedef struct SThreadInfo_S { - TAOS * taos; - TAOS_STMT *stmt; - int64_t *bind_ts; - -#if STMT_BIND_PARAM_BATCH == 1 - int64_t *bind_ts_array; - char *bindParams; - char *is_null; -#else - char* sampleBindArray; -#endif - - int threadID; - char db_name[TSDB_DB_NAME_LEN]; - uint32_t time_precision; - char filePath[4096]; - FILE *fp; - char tb_prefix[TSDB_TABLE_NAME_LEN]; - uint64_t start_table_from; - uint64_t end_table_to; - int64_t ntables; - int64_t tables_created; - uint64_t data_of_rate; - int64_t start_time; - char* cols; - bool use_metric; - SSuperTable* stbInfo; - char *buffer; // sql cmd buffer - - // for async insert - tsem_t lock_sem; - int64_t counter; - uint64_t st; - uint64_t et; - uint64_t lastTs; - - // sample data - int64_t samplePos; - // statistics - uint64_t totalInsertRows; - uint64_t totalAffectedRows; - - // insert delay statistics - uint64_t cntDelay; - uint64_t totalDelay; - uint64_t avgDelay; - uint64_t maxDelay; - uint64_t minDelay; - - // seq of query or subscribe - uint64_t querySeq; // sequence number of sql command - TAOS_SUB* tsub; - - int sockfd; -} threadInfo; - -#ifdef WINDOWS -#define _CRT_RAND_S - -#include -#include - -typedef unsigned __int32 uint32_t; - -#pragma comment ( lib, "ws2_32.lib" ) -// Some old MinGW/CYGWIN distributions don't define this: -#ifndef ENABLE_VIRTUAL_TERMINAL_PROCESSING -#define ENABLE_VIRTUAL_TERMINAL_PROCESSING 0x0004 -#endif // ENABLE_VIRTUAL_TERMINAL_PROCESSING - -static HANDLE g_stdoutHandle; -static DWORD g_consoleMode; - -static void setupForAnsiEscape(void) { - DWORD mode = 0; - g_stdoutHandle = GetStdHandle(STD_OUTPUT_HANDLE); - - if(g_stdoutHandle == INVALID_HANDLE_VALUE) { - exit(GetLastError()); - } - - if(!GetConsoleMode(g_stdoutHandle, &mode)) { - exit(GetLastError()); - } - - g_consoleMode = mode; - - // Enable ANSI escape codes - mode |= ENABLE_VIRTUAL_TERMINAL_PROCESSING; - - if(!SetConsoleMode(g_stdoutHandle, mode)) { - exit(GetLastError()); - } -} - -static void resetAfterAnsiEscape(void) { - // Reset colors - printf("\x1b[0m"); - - // Reset console mode - if(!SetConsoleMode(g_stdoutHandle, g_consoleMode)) { - exit(GetLastError()); - } -} - -static int taosRandom() -{ - int number; - rand_s(&number); - - return number; -} -#else // Not windows -static void setupForAnsiEscape(void) {} - -static void resetAfterAnsiEscape(void) { - // Reset colors - printf("\x1b[0m"); -} - -#include - -static int taosRandom() -{ - return rand(); -} - -#endif // ifdef Windows - -static void prompt(); -static int createDatabasesAndStables(); -static void createChildTables(); -static int queryDbExec(TAOS *taos, char *command, QUERY_TYPE type, bool quiet); -static int postProceSql(char *host, uint16_t port, char* sqlstr, threadInfo *pThreadInfo); -static int64_t getTSRandTail(int64_t timeStampStep, int32_t seq, - int disorderRatio, int disorderRange); -static bool getInfoFromJsonFile(char* file); -static void init_rand_data(); -static int regexMatch(const char *s, const char *reg, int cflags); - -/* ************ Global variables ************ */ - -int32_t* g_randint; -uint32_t* g_randuint; -int64_t* g_randbigint; -uint64_t* g_randubigint; -float* g_randfloat; -double* g_randdouble; - -char *g_randbool_buff = NULL; -char *g_randint_buff = NULL; -char *g_randuint_buff = NULL; -char *g_rand_voltage_buff = NULL; -char *g_randbigint_buff = NULL; -char *g_randubigint_buff = NULL; -char *g_randsmallint_buff = NULL; -char *g_randusmallint_buff = NULL; -char *g_randtinyint_buff = NULL; -char *g_randutinyint_buff = NULL; -char *g_randfloat_buff = NULL; -char *g_rand_current_buff = NULL; -char *g_rand_phase_buff = NULL; -char *g_randdouble_buff = NULL; - -char *g_aggreFuncDemo[] = {"*", "count(*)", "avg(current)", "sum(current)", - "max(current)", "min(current)", "first(current)", "last(current)"}; - -char *g_aggreFunc[] = {"*", "count(*)", "avg(C0)", "sum(C0)", - "max(C0)", "min(C0)", "first(C0)", "last(C0)"}; - -SArguments g_args = { - NULL, // metaFile - 0, // test_mode - "localhost", // host - 6030, // port - INTERFACE_BUT, // iface - "root", // user -#ifdef _TD_POWER_ - "powerdb", // password -#elif (_TD_TQ_ == true) - "tqueue", // password -#elif (_TD_PRO_ == true) - "prodb", // password -#elif (_TD_KH_ == true) - "khroot", // password -#elif (_TD_JH_ == true) - "jhdata", // password -#else - "taosdata", // password -#endif - "test", // database - 1, // replica - "d", // tb_prefix - NULL, // sqlFile - true, // use_metric - true, // drop_database - false, // aggr_func - false, // debug_print - false, // verbose_print - false, // performance statistic print - false, // answer_yes; - "./output.txt", // output_file - 0, // mode : sync or async - {TSDB_DATA_TYPE_FLOAT, - TSDB_DATA_TYPE_INT, - TSDB_DATA_TYPE_FLOAT}, - { - "FLOAT", // dataType - "INT", // dataType - "FLOAT", // dataType. demo mode has 3 columns - }, - 64, // binwidth - 4, // columnCount, timestamp + float + int + float - 20 + FLOAT_BUFF_LEN + INT_BUFF_LEN + FLOAT_BUFF_LEN, // lenOfOneRow - DEFAULT_NTHREADS,// nthreads - 0, // insert_interval - DEFAULT_TIMESTAMP_STEP, // timestamp_step - 1, // query_times - 10000, // prepared_rand - DEFAULT_INTERLACE_ROWS, // interlaceRows; - 30000, // reqPerReq - (1024*1024), // max_sql_len - DEFAULT_CHILDTABLES, // ntables - 10000, // insertRows - 0, // abort - 0, // disorderRatio - 1000, // disorderRange - 1, // method_of_delete - 0, // totalInsertRows; - 0, // totalAffectedRows; - true, // demo_mode; -}; - -static SDbs g_Dbs; -static int64_t g_totalChildTables = DEFAULT_CHILDTABLES; -static int64_t g_actualChildTables = 0; -static SQueryMetaInfo g_queryInfo; -static FILE * g_fpOfInsertResult = NULL; - -#if _MSC_VER <= 1900 -#define __func__ __FUNCTION__ -#endif - -#define debugPrint(fmt, ...) \ - do { if (g_args.debug_print || g_args.verbose_print) \ - fprintf(stderr, "DEBG: "fmt, __VA_ARGS__); } while(0) - -#define verbosePrint(fmt, ...) \ - do { if (g_args.verbose_print) \ - fprintf(stderr, "VERB: "fmt, __VA_ARGS__); } while(0) - -#define performancePrint(fmt, ...) \ - do { if (g_args.performance_print) \ - fprintf(stderr, "PERF: "fmt, __VA_ARGS__); } while(0) - -#define errorPrint(fmt, ...) \ - do {\ - fprintf(stderr, " \033[31m");\ - fprintf(stderr, "ERROR: "fmt, __VA_ARGS__);\ - fprintf(stderr, " \033[0m");\ - } while(0) - -#define errorPrint2(fmt, ...) \ - do {\ - struct tm Tm, *ptm;\ - struct timeval timeSecs; \ - time_t curTime;\ - gettimeofday(&timeSecs, NULL); \ - curTime = timeSecs.tv_sec;\ - ptm = localtime_r(&curTime, &Tm);\ - fprintf(stderr, " \033[31m");\ - fprintf(stderr, "%02d/%02d %02d:%02d:%02d.%06d %08" PRId64 " ",\ - ptm->tm_mon + 1, ptm->tm_mday, ptm->tm_hour,\ - ptm->tm_min, ptm->tm_sec, (int32_t)timeSecs.tv_usec,\ - taosGetSelfPthreadId());\ - fprintf(stderr, " \033[0m");\ - errorPrint(fmt, __VA_ARGS__);\ - } while(0) - -// for strncpy buffer overflow -#define min(a, b) (((a) < (b)) ? (a) : (b)) - - -/////////////////////////////////////////////////// - -static void ERROR_EXIT(const char *msg) { errorPrint("%s", msg); exit(-1); } - -#ifndef TAOSDEMO_COMMIT_SHA1 -#define TAOSDEMO_COMMIT_SHA1 "unknown" -#endif - -#ifndef TD_VERNUMBER -#define TD_VERNUMBER "unknown" -#endif - -#ifndef TAOSDEMO_STATUS -#define TAOSDEMO_STATUS "unknown" -#endif - -static void printVersion() { - char tdengine_ver[] = TD_VERNUMBER; - char taosdemo_ver[] = TAOSDEMO_COMMIT_SHA1; - char taosdemo_status[] = TAOSDEMO_STATUS; - - if (strlen(taosdemo_status) == 0) { - printf("taosdemo version %s-%s\n", - tdengine_ver, taosdemo_ver); - } else { - printf("taosdemo version %s-%s, status:%s\n", - tdengine_ver, taosdemo_ver, taosdemo_status); - } -} - -static void printHelp() { - char indent[10] = " "; - printf("%s\n\n", "Usage: taosdemo [OPTION...]"); - printf("%s%s%s%s\n", indent, "-f, --file=FILE", "\t\t", - "The meta file to the execution procedure."); - printf("%s%s%s%s\n", indent, "-u, --user=USER", "\t\t", - "The user name to use when connecting to the server."); - printf("%s%s%s%s\n", indent, "-p, --password", "\t\t", - "The password to use when connecting to the server."); - printf("%s%s%s%s\n", indent, "-c, --config-dir=CONFIG_DIR", "\t", - "Configuration directory."); - printf("%s%s%s%s\n", indent, "-h, --host=HOST", "\t\t", - "TDengine server FQDN to connect. The default host is localhost."); - printf("%s%s%s%s\n", indent, "-P, --port=PORT", "\t\t", - "The TCP/IP port number to use for the connection."); - printf("%s%s%s%s\n", indent, "-I, --interface=INTERFACE", "\t", - "The interface (taosc, rest, and stmt) taosdemo uses. By default use 'taosc'."); - printf("%s%s%s%s\n", indent, "-d, --database=DATABASE", "\t", - "Destination database. By default is 'test'."); - printf("%s%s%s%s\n", indent, "-a, --replica=REPLICA", "\t\t", - "Set the replica parameters of the database, By default use 1, min: 1, max: 3."); - printf("%s%s%s%s\n", indent, "-m, --table-prefix=TABLEPREFIX", "\t", - "Table prefix name. By default use 'd'."); - printf("%s%s%s%s\n", indent, "-E, --escape-character", "\t", - "Use escape character for Both Stable and normmal table name"); - printf("%s%s%s%s\n", indent, "-s, --sql-file=FILE", "\t\t", - "The select sql file."); - printf("%s%s%s%s\n", indent, "-N, --normal-table", "\t\t", "Use normal table flag."); - printf("%s%s%s%s\n", indent, "-o, --output=FILE", "\t\t", - "Direct output to the named file. By default use './output.txt'."); - printf("%s%s%s%s\n", indent, "-q, --query-mode=MODE", "\t\t", - "Query mode -- 0: SYNC, 1: ASYNC. By default use SYNC."); - printf("%s%s%s%s\n", indent, "-b, --data-type=DATATYPE", "\t", - "The data_type of columns, By default use: FLOAT,INT,FLOAT. NCHAR and BINARY can also use custom length. Eg: NCHAR(16),BINARY(8)"); - printf("%s%s%s%s%d\n", indent, "-w, --binwidth=WIDTH", "\t\t", - "The width of data_type 'BINARY' or 'NCHAR'. By default use ", - g_args.binwidth); - printf("%s%s%s%s%d%s%d\n", indent, "-l, --columns=COLUMNS", "\t\t", - "The number of columns per record. Demo mode by default is ", - DEFAULT_DATATYPE_NUM, - " (float, int, float). Max values is ", - MAX_NUM_COLUMNS); - printf("%s%s%s%s\n", indent, indent, indent, - "\t\t\t\tAll of the new column(s) type is INT. If use -b to specify column type, -l will be ignored."); - printf("%s%s%s%s%d.\n", indent, "-T, --threads=NUMBER", "\t\t", - "The number of threads. By default use ", DEFAULT_NTHREADS); - printf("%s%s%s%s\n", indent, "-i, --insert-interval=NUMBER", "\t", - "The sleep time (ms) between insertion. By default is 0."); - printf("%s%s%s%s%d.\n", indent, "-S, --time-step=TIME_STEP", "\t", - "The timestamp step between insertion. By default is ", - DEFAULT_TIMESTAMP_STEP); - printf("%s%s%s%s%d.\n", indent, "-B, --interlace-rows=NUMBER", "\t", - "The interlace rows of insertion. By default is ", - DEFAULT_INTERLACE_ROWS); - printf("%s%s%s%s\n", indent, "-r, --rec-per-req=NUMBER", "\t", - "The number of records per request. By default is 30000."); - printf("%s%s%s%s\n", indent, "-t, --tables=NUMBER", "\t\t", - "The number of tables. By default is 10000."); - printf("%s%s%s%s\n", indent, "-n, --records=NUMBER", "\t\t", - "The number of records per table. By default is 10000."); - printf("%s%s%s%s\n", indent, "-M, --random", "\t\t\t", - "The value of records generated are totally random."); - printf("%s\n", "\t\t\t\tBy default to simulate power equipment scenario."); - printf("%s%s%s%s\n", indent, "-x, --aggr-func", "\t\t", - "Test aggregation functions after insertion."); - printf("%s%s%s%s\n", indent, "-y, --answer-yes", "\t\t", "Input yes for prompt."); - printf("%s%s%s%s\n", indent, "-O, --disorder=NUMBER", "\t\t", - "Insert order mode--0: In order, 1 ~ 50: disorder ratio. By default is in order."); - printf("%s%s%s%s\n", indent, "-R, --disorder-range=NUMBER", "\t", - "Out of order data's range. Unit is ms. By default is 1000."); - printf("%s%s%s%s\n", indent, "-g, --debug", "\t\t\t", - "Print debug info."); - printf("%s%s%s%s\n", indent, "-?, --help\t", "\t\t", - "Give this help list"); - printf("%s%s%s%s\n", indent, " --usage\t", "\t\t", - "Give a short usage message"); - printf("%s%s\n", indent, "-V, --version\t\t\tPrint program version."); - /* printf("%s%s%s%s\n", indent, "-D", indent, - "Delete database if exists. 0: no, 1: yes, default is 1"); - */ - printf("\nMandatory or optional arguments to long options are also mandatory or optional\n\ -for any corresponding short options.\n\ -\n\ -Report bugs to .\n"); -} - -static bool isStringNumber(char *input) -{ - int len = strlen(input); - if (0 == len) { - return false; - } - - for (int i = 0; i < len; i++) { - if (!isdigit(input[i])) - return false; - } - - return true; -} - -static void errorWrongValue(char *program, char *wrong_arg, char *wrong_value) -{ - fprintf(stderr, "%s %s: %s is an invalid value\n", program, wrong_arg, wrong_value); - fprintf(stderr, "Try `taosdemo --help' or `taosdemo --usage' for more information.\n"); -} - -static void errorUnrecognized(char *program, char *wrong_arg) -{ - fprintf(stderr, "%s: unrecognized options '%s'\n", program, wrong_arg); - fprintf(stderr, "Try `taosdemo --help' or `taosdemo --usage' for more information.\n"); -} - -static void errorPrintReqArg(char *program, char *wrong_arg) -{ - fprintf(stderr, - "%s: option requires an argument -- '%s'\n", - program, wrong_arg); - fprintf(stderr, - "Try `taosdemo --help' or `taosdemo --usage' for more information.\n"); -} - -static void errorPrintReqArg2(char *program, char *wrong_arg) -{ - fprintf(stderr, - "%s: option requires a number argument '-%s'\n", - program, wrong_arg); - fprintf(stderr, - "Try `taosdemo --help' or `taosdemo --usage' for more information.\n"); -} - -static void errorPrintReqArg3(char *program, char *wrong_arg) -{ - fprintf(stderr, - "%s: option '%s' requires an argument\n", - program, wrong_arg); - fprintf(stderr, - "Try `taosdemo --help' or `taosdemo --usage' for more information.\n"); -} - -static void parse_args(int argc, char *argv[], SArguments *arguments) { - - for (int i = 1; i < argc; i++) { - if ((0 == strncmp(argv[i], "-f", strlen("-f"))) - || (0 == strncmp(argv[i], "--file", strlen("--file")))) { - arguments->demo_mode = false; - - if (2 == strlen(argv[i])) { - if (i+1 == argc) { - errorPrintReqArg(argv[0], "f"); - exit(EXIT_FAILURE); - } - arguments->metaFile = argv[++i]; - } else if (0 == strncmp(argv[i], "-f", strlen("-f"))) { - arguments->metaFile = (char *)(argv[i] + strlen("-f")); - } else if (strlen("--file") == strlen(argv[i])) { - if (i+1 == argc) { - errorPrintReqArg3(argv[0], "--file"); - exit(EXIT_FAILURE); - } - arguments->metaFile = argv[++i]; - } else if (0 == strncmp(argv[i], "--file=", strlen("--file="))) { - arguments->metaFile = (char *)(argv[i] + strlen("--file=")); - } else { - errorUnrecognized(argv[0], argv[i]); - exit(EXIT_FAILURE); - } - } else if ((0 == strncmp(argv[i], "-c", strlen("-c"))) - || (0 == strncmp(argv[i], "--config-dir", strlen("--config-dir")))) { - if (2 == strlen(argv[i])) { - if (argc == i+1) { - errorPrintReqArg(argv[0], "c"); - exit(EXIT_FAILURE); - } - tstrncpy(configDir, argv[++i], TSDB_FILENAME_LEN); - } else if (0 == strncmp(argv[i], "-c", strlen("-c"))) { - tstrncpy(configDir, (char *)(argv[i] + strlen("-c")), TSDB_FILENAME_LEN); - } else if (strlen("--config-dir") == strlen(argv[i])) { - if (argc == i+1) { - errorPrintReqArg3(argv[0], "--config-dir"); - exit(EXIT_FAILURE); - } - tstrncpy(configDir, argv[++i], TSDB_FILENAME_LEN); - } else if (0 == strncmp(argv[i], "--config-dir=", strlen("--config-dir="))) { - tstrncpy(configDir, (char *)(argv[i] + strlen("--config-dir=")), TSDB_FILENAME_LEN); - } else { - errorUnrecognized(argv[0], argv[i]); - exit(EXIT_FAILURE); - } - } else if ((0 == strncmp(argv[i], "-h", strlen("-h"))) - || (0 == strncmp(argv[i], "--host", strlen("--host")))) { - if (2 == strlen(argv[i])) { - if (argc == i+1) { - errorPrintReqArg(argv[0], "h"); - exit(EXIT_FAILURE); - } - arguments->host = argv[++i]; - } else if (0 == strncmp(argv[i], "-h", strlen("-h"))) { - arguments->host = (char *)(argv[i] + strlen("-h")); - } else if (strlen("--host") == strlen(argv[i])) { - if (argc == i+1) { - errorPrintReqArg3(argv[0], "--host"); - exit(EXIT_FAILURE); - } - arguments->host = argv[++i]; - } else if (0 == strncmp(argv[i], "--host=", strlen("--host="))) { - arguments->host = (char *)(argv[i] + strlen("--host=")); - } else { - errorUnrecognized(argv[0], argv[i]); - exit(EXIT_FAILURE); - } - } else if (strcmp(argv[i], "-PP") == 0) { - arguments->performance_print = true; - } else if ((0 == strncmp(argv[i], "-P", strlen("-P"))) - || (0 == strncmp(argv[i], "--port", strlen("--port")))) { - uint64_t port; - char strPort[BIGINT_BUFF_LEN]; - - if (2 == strlen(argv[i])) { - if (argc == i+1) { - errorPrintReqArg(argv[0], "P"); - exit(EXIT_FAILURE); - } else if (isStringNumber(argv[i+1])) { - tstrncpy(strPort, argv[++i], BIGINT_BUFF_LEN); - } else { - errorPrintReqArg2(argv[0], "P"); - exit(EXIT_FAILURE); - } - } else if (0 == strncmp(argv[i], "--port=", strlen("--port="))) { - if (isStringNumber((char *)(argv[i] + strlen("--port=")))) { - tstrncpy(strPort, (char *)(argv[i]+strlen("--port=")), BIGINT_BUFF_LEN); - } else { - errorPrintReqArg2(argv[0], "--port"); - exit(EXIT_FAILURE); - } - } else if (0 == strncmp(argv[i], "-P", strlen("-P"))) { - if (isStringNumber((char *)(argv[i] + strlen("-P")))) { - tstrncpy(strPort, (char *)(argv[i]+strlen("-P")), BIGINT_BUFF_LEN); - } else { - errorPrintReqArg2(argv[0], "--port"); - exit(EXIT_FAILURE); - } - } else if (strlen("--port") == strlen(argv[i])) { - if (argc == i+1) { - errorPrintReqArg3(argv[0], "--port"); - exit(EXIT_FAILURE); - } else if (isStringNumber(argv[i+1])) { - tstrncpy(strPort, argv[++i], BIGINT_BUFF_LEN); - } else { - errorPrintReqArg2(argv[0], "--port"); - exit(EXIT_FAILURE); - } - } else { - errorUnrecognized(argv[0], argv[i]); - exit(EXIT_FAILURE); - } - - port = atoi(strPort); - if (port > 65535) { - errorWrongValue("taosdump", "-P or --port", strPort); - exit(EXIT_FAILURE); - } - arguments->port = (uint16_t)port; - - } else if ((0 == strncmp(argv[i], "-I", strlen("-I"))) - || (0 == strncmp(argv[i], "--interface", strlen("--interface")))) { - if (2 == strlen(argv[i])) { - if (argc == i+1) { - errorPrintReqArg(argv[0], "I"); - exit(EXIT_FAILURE); - } - if (0 == strcasecmp(argv[i+1], "taosc")) { - arguments->iface = TAOSC_IFACE; - } else if (0 == strcasecmp(argv[i+1], "rest")) { - arguments->iface = REST_IFACE; - } else if (0 == strcasecmp(argv[i+1], "stmt")) { - arguments->iface = STMT_IFACE; - } else { - errorWrongValue(argv[0], "-I", argv[i+1]); - exit(EXIT_FAILURE); - } - i++; - } else if (0 == strncmp(argv[i], "--interface=", strlen("--interface="))) { - if (0 == strcasecmp((char *)(argv[i] + strlen("--interface=")), "taosc")) { - arguments->iface = TAOSC_IFACE; - } else if (0 == strcasecmp((char *)(argv[i] + strlen("--interface=")), "rest")) { - arguments->iface = REST_IFACE; - } else if (0 == strcasecmp((char *)(argv[i] + strlen("--interface=")), "stmt")) { - arguments->iface = STMT_IFACE; - } else { - errorPrintReqArg3(argv[0], "--interface"); - exit(EXIT_FAILURE); - } - } else if (0 == strncmp(argv[i], "-I", strlen("-I"))) { - if (0 == strcasecmp((char *)(argv[i] + strlen("-I")), "taosc")) { - arguments->iface = TAOSC_IFACE; - } else if (0 == strcasecmp((char *)(argv[i] + strlen("-I")), "rest")) { - arguments->iface = REST_IFACE; - } else if (0 == strcasecmp((char *)(argv[i] + strlen("-I")), "stmt")) { - arguments->iface = STMT_IFACE; - } else { - errorWrongValue(argv[0], "-I", - (char *)(argv[i] + strlen("-I"))); - exit(EXIT_FAILURE); - } - } else if (strlen("--interface") == strlen(argv[i])) { - if (argc == i+1) { - errorPrintReqArg3(argv[0], "--interface"); - exit(EXIT_FAILURE); - } - if (0 == strcasecmp(argv[i+1], "taosc")) { - arguments->iface = TAOSC_IFACE; - } else if (0 == strcasecmp(argv[i+1], "rest")) { - arguments->iface = REST_IFACE; - } else if (0 == strcasecmp(argv[i+1], "stmt")) { - arguments->iface = STMT_IFACE; - } else { - errorWrongValue(argv[0], "--interface", argv[i+1]); - exit(EXIT_FAILURE); - } - i++; - } else { - errorUnrecognized(argv[0], argv[i]); - exit(EXIT_FAILURE); - } - } else if ((0 == strncmp(argv[i], "-u", strlen("-u"))) - || (0 == strncmp(argv[i], "--user", strlen("--user")))) { - if (2 == strlen(argv[i])) { - if (argc == i+1) { - errorPrintReqArg(argv[0], "u"); - exit(EXIT_FAILURE); - } - arguments->user = argv[++i]; - } else if (0 == strncmp(argv[i], "-u", strlen("-u"))) { - arguments->user = (char *)(argv[i++] + strlen("-u")); - } else if (0 == strncmp(argv[i], "--user=", strlen("--user="))) { - arguments->user = (char *)(argv[i++] + strlen("--user=")); - } else if (strlen("--user") == strlen(argv[i])) { - if (argc == i+1) { - errorPrintReqArg3(argv[0], "--user"); - exit(EXIT_FAILURE); - } - arguments->user = argv[++i]; - } else { - errorUnrecognized(argv[0], argv[i]); - exit(EXIT_FAILURE); - } - } else if ((0 == strncmp(argv[i], "-p", strlen("-p"))) - || (0 == strcmp(argv[i], "--password"))) { - if ((strlen(argv[i]) == 2) || (0 == strcmp(argv[i], "--password"))) { - printf("Enter password: "); - taosSetConsoleEcho(false); - if (scanf("%s", arguments->password) > 1) { - fprintf(stderr, "password read error!\n"); - } - taosSetConsoleEcho(true); - } else { - tstrncpy(arguments->password, (char *)(argv[i] + 2), SHELL_MAX_PASSWORD_LEN); - } - } else if ((0 == strncmp(argv[i], "-o", strlen("-o"))) - || (0 == strncmp(argv[i], "--output", strlen("--output")))) { - if (2 == strlen(argv[i])) { - if (argc == i+1) { - errorPrintReqArg3(argv[0], "--output"); - exit(EXIT_FAILURE); - } - arguments->output_file = argv[++i]; - } else if (0 == strncmp(argv[i], "--output=", strlen("--output="))) { - arguments->output_file = (char *)(argv[i++] + strlen("--output=")); - } else if (0 == strncmp(argv[i], "-o", strlen("-o"))) { - arguments->output_file = (char *)(argv[i++] + strlen("-o")); - } else if (strlen("--output") == strlen(argv[i])) { - if (argc == i+1) { - errorPrintReqArg3(argv[0], "--output"); - exit(EXIT_FAILURE); - } - arguments->output_file = argv[++i]; - } else { - errorUnrecognized(argv[0], argv[i]); - exit(EXIT_FAILURE); - } - } else if ((0 == strncmp(argv[i], "-s", strlen("-s"))) - || (0 == strncmp(argv[i], "--sql-file", strlen("--sql-file")))) { - if (2 == strlen(argv[i])) { - if (argc == i+1) { - errorPrintReqArg(argv[0], "s"); - exit(EXIT_FAILURE); - } - arguments->sqlFile = argv[++i]; - } else if (0 == strncmp(argv[i], "--sql-file=", strlen("--sql-file="))) { - arguments->sqlFile = (char *)(argv[i++] + strlen("--sql-file=")); - } else if (0 == strncmp(argv[i], "-s", strlen("-s"))) { - arguments->sqlFile = (char *)(argv[i++] + strlen("-s")); - } else if (strlen("--sql-file") == strlen(argv[i])) { - if (argc == i+1) { - errorPrintReqArg3(argv[0], "--sql-file"); - exit(EXIT_FAILURE); - } - arguments->sqlFile = argv[++i]; - } else { - errorUnrecognized(argv[0], argv[i]); - exit(EXIT_FAILURE); - } - } else if ((0 == strncmp(argv[i], "-q", strlen("-q"))) - || (0 == strncmp(argv[i], "--query-mode", strlen("--query-mode")))) { - if (2 == strlen(argv[i])) { - if (argc == i+1) { - errorPrintReqArg(argv[0], "q"); - exit(EXIT_FAILURE); - } else if (!isStringNumber(argv[i+1])) { - errorPrintReqArg2(argv[0], "q"); - exit(EXIT_FAILURE); - } - arguments->async_mode = atoi(argv[++i]); - } else if (0 == strncmp(argv[i], "--query-mode=", strlen("--query-mode="))) { - if (isStringNumber((char *)(argv[i] + strlen("--query-mode=")))) { - arguments->async_mode = atoi((char *)(argv[i]+strlen("--query-mode="))); - } else { - errorPrintReqArg2(argv[0], "--query-mode"); - exit(EXIT_FAILURE); - } - } else if (0 == strncmp(argv[i], "-q", strlen("-q"))) { - if (isStringNumber((char *)(argv[i] + strlen("-q")))) { - arguments->async_mode = atoi((char *)(argv[i]+strlen("-q"))); - } else { - errorPrintReqArg2(argv[0], "-q"); - exit(EXIT_FAILURE); - } - } else if (strlen("--query-mode") == strlen(argv[i])) { - if (argc == i+1) { - errorPrintReqArg3(argv[0], "--query-mode"); - exit(EXIT_FAILURE); - } else if (!isStringNumber(argv[i+1])) { - errorPrintReqArg2(argv[0], "--query-mode"); - exit(EXIT_FAILURE); - } - arguments->async_mode = atoi(argv[++i]); - } else { - errorUnrecognized(argv[0], argv[i]); - exit(EXIT_FAILURE); - } - } else if ((0 == strncmp(argv[i], "-T", strlen("-T"))) - || (0 == strncmp(argv[i], "--threads", strlen("--threads")))) { - if (2 == strlen(argv[i])) { - if (argc == i+1) { - errorPrintReqArg(argv[0], "T"); - exit(EXIT_FAILURE); - } else if (!isStringNumber(argv[i+1])) { - errorPrintReqArg2(argv[0], "T"); - exit(EXIT_FAILURE); - } - arguments->nthreads = atoi(argv[++i]); - } else if (0 == strncmp(argv[i], "--threads=", strlen("--threads="))) { - if (isStringNumber((char *)(argv[i] + strlen("--threads=")))) { - arguments->nthreads = atoi((char *)(argv[i]+strlen("--threads="))); - } else { - errorPrintReqArg2(argv[0], "--threads"); - exit(EXIT_FAILURE); - } - } else if (0 == strncmp(argv[i], "-T", strlen("-T"))) { - if (isStringNumber((char *)(argv[i] + strlen("-T")))) { - arguments->nthreads = atoi((char *)(argv[i]+strlen("-T"))); - } else { - errorPrintReqArg2(argv[0], "-T"); - exit(EXIT_FAILURE); - } - } else if (strlen("--threads") == strlen(argv[i])) { - if (argc == i+1) { - errorPrintReqArg3(argv[0], "--threads"); - exit(EXIT_FAILURE); - } else if (!isStringNumber(argv[i+1])) { - errorPrintReqArg2(argv[0], "--threads"); - exit(EXIT_FAILURE); - } - arguments->nthreads = atoi(argv[++i]); - } else { - errorUnrecognized(argv[0], argv[i]); - exit(EXIT_FAILURE); - } - } else if ((0 == strncmp(argv[i], "-i", strlen("-i"))) - || (0 == strncmp(argv[i], "--insert-interval", strlen("--insert-interval")))) { - if (2 == strlen(argv[i])) { - if (argc == i+1) { - errorPrintReqArg(argv[0], "i"); - exit(EXIT_FAILURE); - } else if (!isStringNumber(argv[i+1])) { - errorPrintReqArg2(argv[0], "i"); - exit(EXIT_FAILURE); - } - arguments->insert_interval = atoi(argv[++i]); - } else if (0 == strncmp(argv[i], "--insert-interval=", strlen("--insert-interval="))) { - if (isStringNumber((char *)(argv[i] + strlen("--insert-interval=")))) { - arguments->insert_interval = atoi((char *)(argv[i]+strlen("--insert-interval="))); - } else { - errorPrintReqArg3(argv[0], "--insert-innterval"); - exit(EXIT_FAILURE); - } - } else if (0 == strncmp(argv[i], "-i", strlen("-i"))) { - if (isStringNumber((char *)(argv[i] + strlen("-i")))) { - arguments->insert_interval = atoi((char *)(argv[i]+strlen("-i"))); - } else { - errorPrintReqArg3(argv[0], "-i"); - exit(EXIT_FAILURE); - } - } else if (strlen("--insert-interval")== strlen(argv[i])) { - if (argc == i+1) { - errorPrintReqArg3(argv[0], "--insert-interval"); - exit(EXIT_FAILURE); - } else if (!isStringNumber(argv[i+1])) { - errorPrintReqArg2(argv[0], "--insert-interval"); - exit(EXIT_FAILURE); - } - arguments->insert_interval = atoi(argv[++i]); - } else { - errorUnrecognized(argv[0], argv[i]); - exit(EXIT_FAILURE); - } - } else if ((0 == strncmp(argv[i], "-S", strlen("-S"))) - || (0 == strncmp(argv[i], "--time-step", strlen("--time-step")))) { - if (2 == strlen(argv[i])) { - if (argc == i+1) { - errorPrintReqArg(argv[0], "S"); - exit(EXIT_FAILURE); - } else if (!isStringNumber(argv[i+1])) { - errorPrintReqArg2(argv[0], "S"); - exit(EXIT_FAILURE); - } - arguments->timestamp_step = atoi(argv[++i]); - } else if (0 == strncmp(argv[i], "--time-step=", strlen("--time-step="))) { - if (isStringNumber((char *)(argv[i] + strlen("--time-step=")))) { - arguments->async_mode = atoi((char *)(argv[i]+strlen("--time-step="))); - } else { - errorPrintReqArg2(argv[0], "--time-step"); - exit(EXIT_FAILURE); - } - } else if (0 == strncmp(argv[i], "-S", strlen("-S"))) { - if (isStringNumber((char *)(argv[i] + strlen("-S")))) { - arguments->timestamp_step = atoi((char *)(argv[i]+strlen("-S"))); - } else { - errorPrintReqArg2(argv[0], "-S"); - exit(EXIT_FAILURE); - } - } else if (strlen("--time-step") == strlen(argv[i])) { - if (argc == i+1) { - errorPrintReqArg3(argv[0], "--time-step"); - exit(EXIT_FAILURE); - } else if (!isStringNumber(argv[i+1])) { - errorPrintReqArg2(argv[0], "--time-step"); - exit(EXIT_FAILURE); - } - arguments->timestamp_step = atoi(argv[++i]); - } else { - errorUnrecognized(argv[0], argv[i]); - exit(EXIT_FAILURE); - } - } else if (strcmp(argv[i], "-qt") == 0) { - if ((argc == i+1) - || (!isStringNumber(argv[i+1]))) { - printHelp(); - errorPrint("%s", "\n\t-qt need a number following!\n"); - exit(EXIT_FAILURE); - } - arguments->query_times = atoi(argv[++i]); - } else if ((0 == strncmp(argv[i], "-B", strlen("-B"))) - || (0 == strncmp(argv[i], "--interlace-rows", strlen("--interlace-rows")))) { - if (strlen("-B") == strlen(argv[i])) { - if (argc == i+1) { - errorPrintReqArg(argv[0], "B"); - exit(EXIT_FAILURE); - } else if (!isStringNumber(argv[i+1])) { - errorPrintReqArg2(argv[0], "B"); - exit(EXIT_FAILURE); - } - arguments->interlaceRows = atoi(argv[++i]); - } else if (0 == strncmp(argv[i], "--interlace-rows=", strlen("--interlace-rows="))) { - if (isStringNumber((char *)(argv[i] + strlen("--interlace-rows=")))) { - arguments->interlaceRows = atoi((char *)(argv[i]+strlen("--interlace-rows="))); - } else { - errorPrintReqArg2(argv[0], "--interlace-rows"); - exit(EXIT_FAILURE); - } - } else if (0 == strncmp(argv[i], "-B", strlen("-B"))) { - if (isStringNumber((char *)(argv[i] + strlen("-B")))) { - arguments->interlaceRows = atoi((char *)(argv[i]+strlen("-B"))); - } else { - errorPrintReqArg2(argv[0], "-B"); - exit(EXIT_FAILURE); - } - } else if (strlen("--interlace-rows")== strlen(argv[i])) { - if (argc == i+1) { - errorPrintReqArg3(argv[0], "--interlace-rows"); - exit(EXIT_FAILURE); - } else if (!isStringNumber(argv[i+1])) { - errorPrintReqArg2(argv[0], "--interlace-rows"); - exit(EXIT_FAILURE); - } - arguments->interlaceRows = atoi(argv[++i]); - } else { - errorUnrecognized(argv[0], argv[i]); - exit(EXIT_FAILURE); - } - } else if ((0 == strncmp(argv[i], "-r", strlen("-r"))) - || (0 == strncmp(argv[i], "--rec-per-req", 13))) { - if (strlen("-r") == strlen(argv[i])) { - if (argc == i+1) { - errorPrintReqArg(argv[0], "r"); - exit(EXIT_FAILURE); - } else if (!isStringNumber(argv[i+1])) { - errorPrintReqArg2(argv[0], "r"); - exit(EXIT_FAILURE); - } - arguments->reqPerReq = atoi(argv[++i]); - } else if (0 == strncmp(argv[i], "--rec-per-req=", strlen("--rec-per-req="))) { - if (isStringNumber((char *)(argv[i] + strlen("--rec-per-req=")))) { - arguments->reqPerReq = atoi((char *)(argv[i]+strlen("--rec-per-req="))); - } else { - errorPrintReqArg2(argv[0], "--rec-per-req"); - exit(EXIT_FAILURE); - } - } else if (0 == strncmp(argv[i], "-r", strlen("-r"))) { - if (isStringNumber((char *)(argv[i] + strlen("-r")))) { - arguments->reqPerReq = atoi((char *)(argv[i]+strlen("-r"))); - } else { - errorPrintReqArg2(argv[0], "-r"); - exit(EXIT_FAILURE); - } - } else if (strlen("--rec-per-req")== strlen(argv[i])) { - if (argc == i+1) { - errorPrintReqArg3(argv[0], "--rec-per-req"); - exit(EXIT_FAILURE); - } else if (!isStringNumber(argv[i+1])) { - errorPrintReqArg2(argv[0], "--rec-per-req"); - exit(EXIT_FAILURE); - } - arguments->reqPerReq = atoi(argv[++i]); - } else { - errorUnrecognized(argv[0], argv[i]); - exit(EXIT_FAILURE); - } - } else if ((0 == strncmp(argv[i], "-t", strlen("-t"))) - || (0 == strncmp(argv[i], "--tables", strlen("--tables")))) { - if (2 == strlen(argv[i])) { - if (argc == i+1) { - errorPrintReqArg(argv[0], "t"); - exit(EXIT_FAILURE); - } else if (!isStringNumber(argv[i+1])) { - errorPrintReqArg2(argv[0], "t"); - exit(EXIT_FAILURE); - } - arguments->ntables = atoi(argv[++i]); - } else if (0 == strncmp(argv[i], "--tables=", strlen("--tables="))) { - if (isStringNumber((char *)(argv[i] + strlen("--tables=")))) { - arguments->ntables = atoi((char *)(argv[i]+strlen("--tables="))); - } else { - errorPrintReqArg2(argv[0], "--tables"); - exit(EXIT_FAILURE); - } - } else if (0 == strncmp(argv[i], "-t", strlen("-t"))) { - if (isStringNumber((char *)(argv[i] + strlen("-t")))) { - arguments->ntables = atoi((char *)(argv[i]+strlen("-t"))); - } else { - errorPrintReqArg2(argv[0], "-t"); - exit(EXIT_FAILURE); - } - } else if (strlen("--tables") == strlen(argv[i])) { - if (argc == i+1) { - errorPrintReqArg3(argv[0], "--tables"); - exit(EXIT_FAILURE); - } else if (!isStringNumber(argv[i+1])) { - errorPrintReqArg2(argv[0], "--tables"); - exit(EXIT_FAILURE); - } - arguments->ntables = atoi(argv[++i]); - } else { - errorUnrecognized(argv[0], argv[i]); - exit(EXIT_FAILURE); - } - - g_totalChildTables = arguments->ntables; - } else if ((0 == strncmp(argv[i], "-n", strlen("-n"))) - || (0 == strncmp(argv[i], "--records", strlen("--records")))) { - if (2 == strlen(argv[i])) { - if (argc == i+1) { - errorPrintReqArg(argv[0], "n"); - exit(EXIT_FAILURE); - } else if (!isStringNumber(argv[i+1])) { - errorPrintReqArg2(argv[0], "n"); - exit(EXIT_FAILURE); - } - arguments->insertRows = atoi(argv[++i]); - } else if (0 == strncmp(argv[i], "--records=", strlen("--records="))) { - if (isStringNumber((char *)(argv[i] + strlen("--records=")))) { - arguments->insertRows = atoi((char *)(argv[i]+strlen("--records="))); - } else { - errorPrintReqArg2(argv[0], "--records"); - exit(EXIT_FAILURE); - } - } else if (0 == strncmp(argv[i], "-n", strlen("-n"))) { - if (isStringNumber((char *)(argv[i] + strlen("-n")))) { - arguments->insertRows = atoi((char *)(argv[i]+strlen("-n"))); - } else { - errorPrintReqArg2(argv[0], "-n"); - exit(EXIT_FAILURE); - } - } else if (strlen("--records") == strlen(argv[i])) { - if (argc == i+1) { - errorPrintReqArg3(argv[0], "--records"); - exit(EXIT_FAILURE); - } else if (!isStringNumber(argv[i+1])) { - errorPrintReqArg2(argv[0], "--records"); - exit(EXIT_FAILURE); - } - arguments->insertRows = atoi(argv[++i]); - } else { - errorUnrecognized(argv[0], argv[i]); - exit(EXIT_FAILURE); - } - } else if ((0 == strncmp(argv[i], "-d", strlen("-d"))) - || (0 == strncmp(argv[i], "--database", strlen("--database")))) { - if (2 == strlen(argv[i])) { - if (argc == i+1) { - errorPrintReqArg(argv[0], "d"); - exit(EXIT_FAILURE); - } - arguments->database = argv[++i]; - } else if (0 == strncmp(argv[i], "--database=", strlen("--database="))) { - arguments->output_file = (char *)(argv[i] + strlen("--database=")); - } else if (0 == strncmp(argv[i], "-d", strlen("-d"))) { - arguments->output_file = (char *)(argv[i] + strlen("-d")); - } else if (strlen("--database") == strlen(argv[i])) { - if (argc == i+1) { - errorPrintReqArg3(argv[0], "--database"); - exit(EXIT_FAILURE); - } - arguments->database = argv[++i]; - } else { - errorUnrecognized(argv[0], argv[i]); - exit(EXIT_FAILURE); - } - } else if ((0 == strncmp(argv[i], "-l", strlen("-l"))) - || (0 == strncmp(argv[i], "--columns", strlen("--columns")))) { - arguments->demo_mode = false; - if (2 == strlen(argv[i])) { - if (argc == i+1) { - errorPrintReqArg(argv[0], "l"); - exit(EXIT_FAILURE); - } else if (!isStringNumber(argv[i+1])) { - errorPrintReqArg2(argv[0], "l"); - exit(EXIT_FAILURE); - } - arguments->columnCount = atoi(argv[++i]); - } else if (0 == strncmp(argv[i], "--columns=", strlen("--columns="))) { - if (isStringNumber((char *)(argv[i] + strlen("--columns=")))) { - arguments->columnCount = atoi((char *)(argv[i]+strlen("--columns="))); - } else { - errorPrintReqArg2(argv[0], "--columns"); - exit(EXIT_FAILURE); - } - } else if (0 == strncmp(argv[i], "-l", strlen("-l"))) { - if (isStringNumber((char *)(argv[i] + strlen("-l")))) { - arguments->columnCount = atoi((char *)(argv[i]+strlen("-l"))); - } else { - errorPrintReqArg2(argv[0], "-l"); - exit(EXIT_FAILURE); - } - } else if (strlen("--columns")== strlen(argv[i])) { - if (argc == i+1) { - errorPrintReqArg3(argv[0], "--columns"); - exit(EXIT_FAILURE); - } else if (!isStringNumber(argv[i+1])) { - errorPrintReqArg2(argv[0], "--columns"); - exit(EXIT_FAILURE); - } - arguments->columnCount = atoi(argv[++i]); - } else { - errorUnrecognized(argv[0], argv[i]); - exit(EXIT_FAILURE); - } - - if (arguments->columnCount > MAX_NUM_COLUMNS) { - printf("WARNING: max acceptable columns count is %d\n", MAX_NUM_COLUMNS); - prompt(); - arguments->columnCount = MAX_NUM_COLUMNS; - } - - for (int col = DEFAULT_DATATYPE_NUM; col < arguments->columnCount; col ++) { - arguments->dataType[col] = "INT"; - arguments->data_type[col] = TSDB_DATA_TYPE_INT; - } - for (int col = arguments->columnCount; col < MAX_NUM_COLUMNS; col++) { - arguments->dataType[col] = NULL; - arguments->data_type[col] = TSDB_DATA_TYPE_NULL; - } - } else if ((0 == strncmp(argv[i], "-b", strlen("-b"))) - || (0 == strncmp(argv[i], "--data-type", strlen("--data-type")))) { - arguments->demo_mode = false; - - char *dataType; - if (2 == strlen(argv[i])) { - if (argc == i+1) { - errorPrintReqArg(argv[0], "b"); - exit(EXIT_FAILURE); - } - dataType = argv[++i]; - } else if (0 == strncmp(argv[i], "--data-type=", strlen("--data-type="))) { - dataType = (char *)(argv[i] + strlen("--data-type=")); - } else if (0 == strncmp(argv[i], "-b", strlen("-b"))) { - dataType = (char *)(argv[i] + strlen("-b")); - } else if (strlen("--data-type") == strlen(argv[i])) { - if (argc == i+1) { - errorPrintReqArg3(argv[0], "--data-type"); - exit(EXIT_FAILURE); - } - dataType = argv[++i]; - } else { - errorUnrecognized(argv[0], argv[i]); - exit(EXIT_FAILURE); - } - - if (strstr(dataType, ",") == NULL) { - // only one col - if (strcasecmp(dataType, "INT") - && strcasecmp(dataType, "FLOAT") - && strcasecmp(dataType, "TINYINT") - && strcasecmp(dataType, "BOOL") - && strcasecmp(dataType, "SMALLINT") - && strcasecmp(dataType, "BIGINT") - && strcasecmp(dataType, "DOUBLE") - && strcasecmp(dataType, "TIMESTAMP") - && !regexMatch(dataType, - "^(NCHAR|BINARY)(\\([1-9][0-9]*\\))?$", - REG_ICASE | REG_EXTENDED) - && strcasecmp(dataType, "UTINYINT") - && strcasecmp(dataType, "USMALLINT") - && strcasecmp(dataType, "UINT") - && strcasecmp(dataType, "UBIGINT")) { - printHelp(); - errorPrint("%s", "-b: Invalid data_type!\n"); - exit(EXIT_FAILURE); - } - arguments->dataType[0] = dataType; - if (0 == strcasecmp(dataType, "INT")) { - arguments->data_type[0] = TSDB_DATA_TYPE_INT; - } else if (0 == strcasecmp(dataType, "TINYINT")) { - arguments->data_type[0] = TSDB_DATA_TYPE_TINYINT; - } else if (0 == strcasecmp(dataType, "SMALLINT")) { - arguments->data_type[0] = TSDB_DATA_TYPE_SMALLINT; - } else if (0 == strcasecmp(dataType, "BIGINT")) { - arguments->data_type[0] = TSDB_DATA_TYPE_BIGINT; - } else if (0 == strcasecmp(dataType, "FLOAT")) { - arguments->data_type[0] = TSDB_DATA_TYPE_FLOAT; - } else if (0 == strcasecmp(dataType, "DOUBLE")) { - arguments->data_type[0] = TSDB_DATA_TYPE_DOUBLE; - } else if (1 == regexMatch(dataType, - "^BINARY(\\([1-9][0-9]*\\))?$", - REG_ICASE | REG_EXTENDED)) { - arguments->data_type[0] = TSDB_DATA_TYPE_BINARY; - } else if (1 == regexMatch(dataType, - "^NCHAR(\\([1-9][0-9]*\\))?$", - REG_ICASE | REG_EXTENDED)) { - arguments->data_type[0] = TSDB_DATA_TYPE_NCHAR; - } else if (0 == strcasecmp(dataType, "BOOL")) { - arguments->data_type[0] = TSDB_DATA_TYPE_BOOL; - } else if (0 == strcasecmp(dataType, "TIMESTAMP")) { - arguments->data_type[0] = TSDB_DATA_TYPE_TIMESTAMP; - } else if (0 == strcasecmp(dataType, "UTINYINT")) { - arguments->data_type[0] = TSDB_DATA_TYPE_UTINYINT; - } else if (0 == strcasecmp(dataType, "USMALLINT")) { - arguments->data_type[0] = TSDB_DATA_TYPE_USMALLINT; - } else if (0 == strcasecmp(dataType, "UINT")) { - arguments->data_type[0] = TSDB_DATA_TYPE_UINT; - } else if (0 == strcasecmp(dataType, "UBIGINT")) { - arguments->data_type[0] = TSDB_DATA_TYPE_UBIGINT; - } else { - arguments->data_type[0] = TSDB_DATA_TYPE_NULL; - } - arguments->dataType[1] = NULL; - arguments->data_type[1] = TSDB_DATA_TYPE_NULL; - } else { - // more than one col - int index = 0; - g_dupstr = strdup(dataType); - char *running = g_dupstr; - char *token = strsep(&running, ","); - while(token != NULL) { - if (strcasecmp(token, "INT") - && strcasecmp(token, "FLOAT") - && strcasecmp(token, "TINYINT") - && strcasecmp(token, "BOOL") - && strcasecmp(token, "SMALLINT") - && strcasecmp(token, "BIGINT") - && strcasecmp(token, "DOUBLE") - && strcasecmp(token, "TIMESTAMP") - && !regexMatch(token, "^(NCHAR|BINARY)(\\([1-9][0-9]*\\))?$", REG_ICASE | REG_EXTENDED) - && strcasecmp(token, "UTINYINT") - && strcasecmp(token, "USMALLINT") - && strcasecmp(token, "UINT") - && strcasecmp(token, "UBIGINT")) { - printHelp(); - free(g_dupstr); - errorPrint("%s", "-b: Invalid data_type!\n"); - exit(EXIT_FAILURE); - } - - if (0 == strcasecmp(token, "INT")) { - arguments->data_type[index] = TSDB_DATA_TYPE_INT; - } else if (0 == strcasecmp(token, "FLOAT")) { - arguments->data_type[index] = TSDB_DATA_TYPE_FLOAT; - } else if (0 == strcasecmp(token, "SMALLINT")) { - arguments->data_type[index] = TSDB_DATA_TYPE_SMALLINT; - } else if (0 == strcasecmp(token, "BIGINT")) { - arguments->data_type[index] = TSDB_DATA_TYPE_BIGINT; - } else if (0 == strcasecmp(token, "DOUBLE")) { - arguments->data_type[index] = TSDB_DATA_TYPE_DOUBLE; - } else if (0 == strcasecmp(token, "TINYINT")) { - arguments->data_type[index] = TSDB_DATA_TYPE_TINYINT; - } else if (1 == regexMatch(token, "^BINARY(\\([1-9][0-9]*\\))?$", REG_ICASE | - REG_EXTENDED)) { - arguments->data_type[index] = TSDB_DATA_TYPE_BINARY; - } else if (1 == regexMatch(token, "^NCHAR(\\([1-9][0-9]*\\))?$", REG_ICASE | - REG_EXTENDED)) { - arguments->data_type[index] = TSDB_DATA_TYPE_NCHAR; - } else if (0 == strcasecmp(token, "BOOL")) { - arguments->data_type[index] = TSDB_DATA_TYPE_BOOL; - } else if (0 == strcasecmp(token, "TIMESTAMP")) { - arguments->data_type[index] = TSDB_DATA_TYPE_TIMESTAMP; - } else if (0 == strcasecmp(token, "UTINYINT")) { - arguments->data_type[index] = TSDB_DATA_TYPE_UTINYINT; - } else if (0 == strcasecmp(token, "USMALLINT")) { - arguments->data_type[index] = TSDB_DATA_TYPE_USMALLINT; - } else if (0 == strcasecmp(token, "UINT")) { - arguments->data_type[index] = TSDB_DATA_TYPE_UINT; - } else if (0 == strcasecmp(token, "UBIGINT")) { - arguments->data_type[index] = TSDB_DATA_TYPE_UBIGINT; - } else { - arguments->data_type[index] = TSDB_DATA_TYPE_NULL; - } - arguments->dataType[index] = token; - index ++; - token = strsep(&running, ","); - if (index >= MAX_NUM_COLUMNS) break; - } - arguments->dataType[index] = NULL; - arguments->data_type[index] = TSDB_DATA_TYPE_NULL; - } - } else if ((0 == strncmp(argv[i], "-w", strlen("-w"))) - || (0 == strncmp(argv[i], "--binwidth", strlen("--binwidth")))) { - if (2 == strlen(argv[i])) { - if (argc == i+1) { - errorPrintReqArg(argv[0], "w"); - exit(EXIT_FAILURE); - } else if (!isStringNumber(argv[i+1])) { - errorPrintReqArg2(argv[0], "w"); - exit(EXIT_FAILURE); - } - arguments->binwidth = atoi(argv[++i]); - } else if (0 == strncmp(argv[i], "--binwidth=", strlen("--binwidth="))) { - if (isStringNumber((char *)(argv[i] + strlen("--binwidth=")))) { - arguments->binwidth = atoi((char *)(argv[i]+strlen("--binwidth="))); - } else { - errorPrintReqArg2(argv[0], "--binwidth"); - exit(EXIT_FAILURE); - } - } else if (0 == strncmp(argv[i], "-w", strlen("-w"))) { - if (isStringNumber((char *)(argv[i] + strlen("-w")))) { - arguments->binwidth = atoi((char *)(argv[i]+strlen("-w"))); - } else { - errorPrintReqArg2(argv[0], "-w"); - exit(EXIT_FAILURE); - } - } else if (strlen("--binwidth") == strlen(argv[i])) { - if (argc == i+1) { - errorPrintReqArg3(argv[0], "--binwidth"); - exit(EXIT_FAILURE); - } else if (!isStringNumber(argv[i+1])) { - errorPrintReqArg2(argv[0], "--binwidth"); - exit(EXIT_FAILURE); - } - arguments->binwidth = atoi(argv[++i]); - } else { - errorUnrecognized(argv[0], argv[i]); - exit(EXIT_FAILURE); - } - } else if ((0 == strncmp(argv[i], "-m", strlen("-m"))) - || (0 == strncmp(argv[i], "--table-prefix", strlen("--table-prefix")))) { - if (2 == strlen(argv[i])) { - if (argc == i+1) { - errorPrintReqArg(argv[0], "m"); - exit(EXIT_FAILURE); - } - arguments->tb_prefix = argv[++i]; - } else if (0 == strncmp(argv[i], "--table-prefix=", strlen("--table-prefix="))) { - arguments->tb_prefix = (char *)(argv[i] + strlen("--table-prefix=")); - } else if (0 == strncmp(argv[i], "-m", strlen("-m"))) { - arguments->tb_prefix = (char *)(argv[i] + strlen("-m")); - } else if (strlen("--table-prefix") == strlen(argv[i])) { - if (argc == i+1) { - errorPrintReqArg3(argv[0], "--table-prefix"); - exit(EXIT_FAILURE); - } - arguments->tb_prefix = argv[++i]; - } else { - errorUnrecognized(argv[0], argv[i]); - exit(EXIT_FAILURE); - } - } else if ((strcmp(argv[i], "-N") == 0) - || (0 == strcmp(argv[i], "--normal-table"))) { - arguments->demo_mode = false; - arguments->use_metric = false; - } else if ((strcmp(argv[i], "-M") == 0) - || (0 == strcmp(argv[i], "--random"))) { - arguments->demo_mode = false; - } else if ((strcmp(argv[i], "-x") == 0) - || (0 == strcmp(argv[i], "--aggr-func"))) { - arguments->aggr_func = true; - } else if ((strcmp(argv[i], "-y") == 0) - || (0 == strcmp(argv[i], "--answer-yes"))) { - arguments->answer_yes = true; - } else if ((strcmp(argv[i], "-g") == 0) - || (0 == strcmp(argv[i], "--debug"))) { - arguments->debug_print = true; - } else if (strcmp(argv[i], "-gg") == 0) { - arguments->verbose_print = true; - } else if ((0 == strncmp(argv[i], "-R", strlen("-R"))) - || (0 == strncmp(argv[i], "--disorder-range", - strlen("--disorder-range")))) { - if (strlen("-R") == strlen(argv[i])) { - if (argc == i+1) { - errorPrintReqArg(argv[0], "R"); - exit(EXIT_FAILURE); - } else if (!isStringNumber(argv[i+1])) { - errorPrintReqArg2(argv[0], "R"); - exit(EXIT_FAILURE); - } - arguments->disorderRange = atoi(argv[++i]); - } else if (0 == strncmp(argv[i], "--disorder-range=", - strlen("--disorder-range="))) { - if (isStringNumber((char *)(argv[i] + strlen("--disorder-range=")))) { - arguments->disorderRange = - atoi((char *)(argv[i]+strlen("--disorder-range="))); - } else { - errorPrintReqArg2(argv[0], "--disorder-range"); - exit(EXIT_FAILURE); - } - } else if (0 == strncmp(argv[i], "-R", strlen("-R"))) { - if (isStringNumber((char *)(argv[i] + strlen("-R")))) { - arguments->disorderRange = - atoi((char *)(argv[i]+strlen("-R"))); - } else { - errorPrintReqArg2(argv[0], "-R"); - exit(EXIT_FAILURE); - } - - if (arguments->disorderRange < 0) { - errorPrint("Invalid disorder range %d, will be set to %d\n", - arguments->disorderRange, 1000); - arguments->disorderRange = 1000; - } - } else if (strlen("--disorder-range") == strlen(argv[i])) { - if (argc == i+1) { - errorPrintReqArg3(argv[0], "--disorder-range"); - exit(EXIT_FAILURE); - } else if (!isStringNumber(argv[i+1])) { - errorPrintReqArg2(argv[0], "--disorder-range"); - exit(EXIT_FAILURE); - } - arguments->disorderRange = atoi(argv[++i]); - } else { - errorUnrecognized(argv[0], argv[i]); - exit(EXIT_FAILURE); - } - } else if ((0 == strncmp(argv[i], "-O", strlen("-O"))) - || (0 == strncmp(argv[i], "--disorder", strlen("--disorder")))) { - if (2 == strlen(argv[i])) { - if (argc == i+1) { - errorPrintReqArg(argv[0], "O"); - exit(EXIT_FAILURE); - } else if (!isStringNumber(argv[i+1])) { - errorPrintReqArg2(argv[0], "O"); - exit(EXIT_FAILURE); - } - arguments->disorderRatio = atoi(argv[++i]); - } else if (0 == strncmp(argv[i], "--disorder=", strlen("--disorder="))) { - if (isStringNumber((char *)(argv[i] + strlen("--disorder=")))) { - arguments->disorderRatio = atoi((char *)(argv[i]+strlen("--disorder="))); - } else { - errorPrintReqArg2(argv[0], "--disorder"); - exit(EXIT_FAILURE); - } - } else if (0 == strncmp(argv[i], "-O", strlen("-O"))) { - if (isStringNumber((char *)(argv[i] + strlen("-O")))) { - arguments->disorderRatio = atoi((char *)(argv[i]+strlen("-O"))); - } else { - errorPrintReqArg2(argv[0], "-O"); - exit(EXIT_FAILURE); - } - } else if (strlen("--disorder") == strlen(argv[i])) { - if (argc == i+1) { - errorPrintReqArg3(argv[0], "--disorder"); - exit(EXIT_FAILURE); - } else if (!isStringNumber(argv[i+1])) { - errorPrintReqArg2(argv[0], "--disorder"); - exit(EXIT_FAILURE); - } - arguments->disorderRatio = atoi(argv[++i]); - } else { - errorUnrecognized(argv[0], argv[i]); - exit(EXIT_FAILURE); - } - - if (arguments->disorderRatio > 50) { - errorPrint("Invalid disorder ratio %d, will be set to %d\n", - arguments->disorderRatio, 50); - arguments->disorderRatio = 50; - } - } else if ((0 == strncmp(argv[i], "-a", strlen("-a"))) - || (0 == strncmp(argv[i], "--replica", - strlen("--replica")))) { - if (2 == strlen(argv[i])) { - if (argc == i+1) { - errorPrintReqArg(argv[0], "a"); - exit(EXIT_FAILURE); - } else if (!isStringNumber(argv[i+1])) { - errorPrintReqArg2(argv[0], "a"); - exit(EXIT_FAILURE); - } - arguments->replica = atoi(argv[++i]); - } else if (0 == strncmp(argv[i], "--replica=", - strlen("--replica="))) { - if (isStringNumber((char *)(argv[i] + strlen("--replica=")))) { - arguments->replica = - atoi((char *)(argv[i]+strlen("--replica="))); - } else { - errorPrintReqArg2(argv[0], "--replica"); - exit(EXIT_FAILURE); - } - } else if (0 == strncmp(argv[i], "-a", strlen("-a"))) { - if (isStringNumber((char *)(argv[i] + strlen("-a")))) { - arguments->replica = - atoi((char *)(argv[i]+strlen("-a"))); - } else { - errorPrintReqArg2(argv[0], "-a"); - exit(EXIT_FAILURE); - } - } else if (strlen("--replica") == strlen(argv[i])) { - if (argc == i+1) { - errorPrintReqArg3(argv[0], "--replica"); - exit(EXIT_FAILURE); - } else if (!isStringNumber(argv[i+1])) { - errorPrintReqArg2(argv[0], "--replica"); - exit(EXIT_FAILURE); - } - arguments->replica = atoi(argv[++i]); - } else { - errorUnrecognized(argv[0], argv[i]); - exit(EXIT_FAILURE); - } - - if (arguments->replica > 3 || arguments->replica < 1) { - errorPrint("Invalid replica value %d, will be set to %d\n", - arguments->replica, 1); - arguments->replica = 1; - } - } else if (strcmp(argv[i], "-D") == 0) { - arguments->method_of_delete = atoi(argv[++i]); - if (arguments->method_of_delete > 3) { - errorPrint("%s", "\n\t-D need a value (0~3) number following!\n"); - exit(EXIT_FAILURE); - } - } else if ((strcmp(argv[i], "--version") == 0) - || (strcmp(argv[i], "-V") == 0)) { - printVersion(); - exit(0); - } else if ((strcmp(argv[i], "--help") == 0) - || (strcmp(argv[i], "-?") == 0)) { - printHelp(); - exit(0); - } else if (strcmp(argv[i], "--usage") == 0) { - printf(" Usage: taosdemo [-f JSONFILE] [-u USER] [-p PASSWORD] [-c CONFIG_DIR]\n\ - [-h HOST] [-P PORT] [-I INTERFACE] [-d DATABASE] [-a REPLICA]\n\ - [-m TABLEPREFIX] [-s SQLFILE] [-N] [-o OUTPUTFILE] [-q QUERYMODE]\n\ - [-b DATATYPES] [-w WIDTH_OF_BINARY] [-l COLUMNS] [-T THREADNUMBER]\n\ - [-i SLEEPTIME] [-S TIME_STEP] [-B INTERLACE_ROWS] [-t TABLES]\n\ - [-n RECORDS] [-M] [-x] [-y] [-O ORDERMODE] [-R RANGE] [-a REPLIcA][-g]\n\ - [--help] [--usage] [--version]\n"); - exit(0); - } else { - // to simulate argp_option output - if (strlen(argv[i]) > 2) { - if (0 == strncmp(argv[i], "--", 2)) { - fprintf(stderr, "%s: unrecognized options '%s'\n", argv[0], argv[i]); - } else if (0 == strncmp(argv[i], "-", 1)) { - char tmp[2] = {0}; - tstrncpy(tmp, argv[i]+1, 2); - fprintf(stderr, "%s: invalid options -- '%s'\n", argv[0], tmp); - } else { - fprintf(stderr, "%s: Too many arguments\n", argv[0]); - } - } else { - fprintf(stderr, "%s invalid options -- '%s'\n", argv[0], - (char *)((char *)argv[i])+1); - } - fprintf(stderr, "Try `taosdemo --help' or `taosdemo --usage' for more information.\n"); - exit(EXIT_FAILURE); - } - } - - int columnCount; - for (columnCount = 0; columnCount < MAX_NUM_COLUMNS; columnCount ++) { - if (g_args.dataType[columnCount] == NULL) { - break; - } - } - - if (0 == columnCount) { - ERROR_EXIT("data type error!"); - } - g_args.columnCount = columnCount; - - g_args.lenOfOneRow = 20; // timestamp - for (int c = 0; c < g_args.columnCount; c++) { - switch(g_args.data_type[c]) { - case TSDB_DATA_TYPE_BINARY: - g_args.lenOfOneRow += g_args.binwidth + 3; - break; - - case TSDB_DATA_TYPE_NCHAR: - g_args.lenOfOneRow += g_args.binwidth + 3; - break; - - case TSDB_DATA_TYPE_INT: - case TSDB_DATA_TYPE_UINT: - g_args.lenOfOneRow += INT_BUFF_LEN; - break; - - case TSDB_DATA_TYPE_BIGINT: - case TSDB_DATA_TYPE_UBIGINT: - g_args.lenOfOneRow += BIGINT_BUFF_LEN; - break; - - case TSDB_DATA_TYPE_SMALLINT: - case TSDB_DATA_TYPE_USMALLINT: - g_args.lenOfOneRow += SMALLINT_BUFF_LEN; - break; - - case TSDB_DATA_TYPE_TINYINT: - case TSDB_DATA_TYPE_UTINYINT: - g_args.lenOfOneRow += TINYINT_BUFF_LEN; - break; - - case TSDB_DATA_TYPE_BOOL: - g_args.lenOfOneRow += BOOL_BUFF_LEN; - break; - - case TSDB_DATA_TYPE_FLOAT: - g_args.lenOfOneRow += FLOAT_BUFF_LEN; - break; - - case TSDB_DATA_TYPE_DOUBLE: - g_args.lenOfOneRow += DOUBLE_BUFF_LEN; - break; - - case TSDB_DATA_TYPE_TIMESTAMP: - g_args.lenOfOneRow += TIMESTAMP_BUFF_LEN; - break; - - default: - errorPrint2("get error data type : %s\n", g_args.dataType[c]); - exit(EXIT_FAILURE); - } - } - - if (((arguments->debug_print) && (NULL != arguments->metaFile)) - || arguments->verbose_print) { - printf("###################################################################\n"); - printf("# meta file: %s\n", arguments->metaFile); - printf("# Server IP: %s:%hu\n", - arguments->host == NULL ? "localhost" : arguments->host, - arguments->port ); - printf("# User: %s\n", arguments->user); - printf("# Password: %s\n", arguments->password); - printf("# Use metric: %s\n", - arguments->use_metric ? "true" : "false"); - if (*(arguments->dataType)) { - printf("# Specified data type: "); - for (int c = 0; c < MAX_NUM_COLUMNS; c++) - if (arguments->dataType[c]) - printf("%s,", arguments->dataType[c]); - else - break; - printf("\n"); - } - printf("# Insertion interval: %"PRIu64"\n", - arguments->insert_interval); - printf("# Number of records per req: %u\n", - arguments->reqPerReq); - printf("# Max SQL length: %"PRIu64"\n", - arguments->max_sql_len); - printf("# Length of Binary: %d\n", arguments->binwidth); - printf("# Number of Threads: %d\n", arguments->nthreads); - printf("# Number of Tables: %"PRId64"\n", - arguments->ntables); - printf("# Number of Data per Table: %"PRId64"\n", - arguments->insertRows); - printf("# Database name: %s\n", arguments->database); - printf("# Table prefix: %s\n", arguments->tb_prefix); - if (arguments->disorderRatio) { - printf("# Data order: %d\n", arguments->disorderRatio); - printf("# Data out of order rate: %d\n", arguments->disorderRange); - } - printf("# Delete method: %d\n", arguments->method_of_delete); - printf("# Answer yes when prompt: %d\n", arguments->answer_yes); - printf("# Print debug info: %d\n", arguments->debug_print); - printf("# Print verbose info: %d\n", arguments->verbose_print); - printf("###################################################################\n"); - - prompt(); - } -} - -static void tmfclose(FILE *fp) { - if (NULL != fp) { - fclose(fp); - } -} - -static void tmfree(void *buf) { - if (NULL != buf) { - free(buf); - buf = NULL; - } -} - -static int queryDbExec(TAOS *taos, char *command, QUERY_TYPE type, bool quiet) { - - verbosePrint("%s() LN%d - command: %s\n", __func__, __LINE__, command); - - TAOS_RES *res = taos_query(taos, command); - int32_t code = taos_errno(res); - - if (code != 0) { - if (!quiet) { - errorPrint2("Failed to execute <%s>, reason: %s\n", - command, taos_errstr(res)); - } - taos_free_result(res); - //taos_close(taos); - return -1; - } - - if (INSERT_TYPE == type) { - int affectedRows = taos_affected_rows(res); - taos_free_result(res); - return affectedRows; - } - - taos_free_result(res); - return 0; -} - -static void appendResultBufToFile(char *resultBuf, threadInfo *pThreadInfo) -{ - pThreadInfo->fp = fopen(pThreadInfo->filePath, "at"); - if (pThreadInfo->fp == NULL) { - errorPrint2( - "%s() LN%d, failed to open result file: %s, result will not save to file\n", - __func__, __LINE__, pThreadInfo->filePath); - return; - } - - fprintf(pThreadInfo->fp, "%s", resultBuf); - tmfclose(pThreadInfo->fp); - pThreadInfo->fp = NULL; -} - -static void fetchResult(TAOS_RES *res, threadInfo* pThreadInfo) { - TAOS_ROW row = NULL; - int num_rows = 0; - int num_fields = taos_field_count(res); - TAOS_FIELD *fields = taos_fetch_fields(res); - - char* databuf = (char*) calloc(1, 100*1024*1024); - if (databuf == NULL) { - errorPrint2("%s() LN%d, failed to malloc, warning: save result to file slowly!\n", - __func__, __LINE__); - return ; - } - - int64_t totalLen = 0; - - // fetch the records row by row - while((row = taos_fetch_row(res))) { - if (totalLen >= (100*1024*1024 - HEAD_BUFF_LEN*2)) { - if (strlen(pThreadInfo->filePath) > 0) - appendResultBufToFile(databuf, pThreadInfo); - totalLen = 0; - memset(databuf, 0, 100*1024*1024); - } - num_rows++; - char temp[HEAD_BUFF_LEN] = {0}; - int len = taos_print_row(temp, row, fields, num_fields); - len += sprintf(temp + len, "\n"); - //printf("query result:%s\n", temp); - memcpy(databuf + totalLen, temp, len); - totalLen += len; - verbosePrint("%s() LN%d, totalLen: %"PRId64"\n", - __func__, __LINE__, totalLen); - } - - verbosePrint("%s() LN%d, databuf=%s resultFile=%s\n", - __func__, __LINE__, databuf, pThreadInfo->filePath); - if (strlen(pThreadInfo->filePath) > 0) { - appendResultBufToFile(databuf, pThreadInfo); - } - free(databuf); -} - -static void selectAndGetResult( - threadInfo *pThreadInfo, char *command) -{ - if (0 == strncasecmp(g_queryInfo.queryMode, "taosc", strlen("taosc"))) { - TAOS_RES *res = taos_query(pThreadInfo->taos, command); - if (res == NULL || taos_errno(res) != 0) { - errorPrint2("%s() LN%d, failed to execute sql:%s, reason:%s\n", - __func__, __LINE__, command, taos_errstr(res)); - taos_free_result(res); - return; - } - - fetchResult(res, pThreadInfo); - taos_free_result(res); - - } else if (0 == strncasecmp(g_queryInfo.queryMode, "rest", strlen("rest"))) { - int retCode = postProceSql( - g_queryInfo.host, g_queryInfo.port, - command, - pThreadInfo); - if (0 != retCode) { - printf("====restful return fail, threadID[%d]\n", pThreadInfo->threadID); - } - - } else { - errorPrint2("%s() LN%d, unknown query mode: %s\n", - __func__, __LINE__, g_queryInfo.queryMode); - } -} - -static char *rand_bool_str() { - static int cursor; - cursor++; - if (cursor > (g_args.prepared_rand - 1)) cursor = 0; - return g_randbool_buff + ((cursor % g_args.prepared_rand) * BOOL_BUFF_LEN); -} - -static int32_t rand_bool() { - static int cursor; - cursor++; - if (cursor > (g_args.prepared_rand - 1)) cursor = 0; - return g_randint[cursor % g_args.prepared_rand] % 2; -} - -static char *rand_tinyint_str() -{ - static int cursor; - cursor++; - if (cursor > (g_args.prepared_rand - 1)) cursor = 0; - return g_randtinyint_buff + - ((cursor % g_args.prepared_rand) * TINYINT_BUFF_LEN); -} - -static int32_t rand_tinyint() -{ - static int cursor; - cursor++; - if (cursor > (g_args.prepared_rand - 1)) cursor = 0; - return g_randint[cursor % g_args.prepared_rand] % 128; -} - -static char *rand_utinyint_str() -{ - static int cursor; - cursor++; - if (cursor > (g_args.prepared_rand - 1)) cursor = 0; - return g_randutinyint_buff + - ((cursor % g_args.prepared_rand) * TINYINT_BUFF_LEN); -} - -static int32_t rand_utinyint() -{ - static int cursor; - cursor++; - if (cursor > (g_args.prepared_rand - 1)) cursor = 0; - return g_randuint[cursor % g_args.prepared_rand] % 255; -} - -static char *rand_smallint_str() -{ - static int cursor; - cursor++; - if (cursor > (g_args.prepared_rand - 1)) cursor = 0; - return g_randsmallint_buff + - ((cursor % g_args.prepared_rand) * SMALLINT_BUFF_LEN); -} - -static int32_t rand_smallint() -{ - static int cursor; - cursor++; - if (cursor > (g_args.prepared_rand - 1)) cursor = 0; - return g_randint[cursor % g_args.prepared_rand] % 32768; -} - -static char *rand_usmallint_str() -{ - static int cursor; - cursor++; - if (cursor > (g_args.prepared_rand - 1)) cursor = 0; - return g_randusmallint_buff + - ((cursor % g_args.prepared_rand) * SMALLINT_BUFF_LEN); -} - -static int32_t rand_usmallint() -{ - static int cursor; - cursor++; - if (cursor > (g_args.prepared_rand - 1)) cursor = 0; - return g_randuint[cursor % g_args.prepared_rand] % 65535; -} - -static char *rand_int_str() -{ - static int cursor; - cursor++; - if (cursor > (g_args.prepared_rand - 1)) cursor = 0; - return g_randint_buff + ((cursor % g_args.prepared_rand) * INT_BUFF_LEN); -} - -static int32_t rand_int() -{ - static int cursor; - cursor++; - if (cursor > (g_args.prepared_rand - 1)) cursor = 0; - return g_randint[cursor % g_args.prepared_rand]; -} - -static char *rand_uint_str() -{ - static int cursor; - cursor++; - if (cursor > (g_args.prepared_rand - 1)) cursor = 0; - return g_randuint_buff + ((cursor % g_args.prepared_rand) * INT_BUFF_LEN); -} - -static int32_t rand_uint() -{ - static int cursor; - cursor++; - if (cursor > (g_args.prepared_rand - 1)) cursor = 0; - return g_randuint[cursor % g_args.prepared_rand]; -} - -static char *rand_bigint_str() -{ - static int cursor; - cursor++; - if (cursor > (g_args.prepared_rand - 1)) cursor = 0; - return g_randbigint_buff + - ((cursor % g_args.prepared_rand) * BIGINT_BUFF_LEN); -} - -static int64_t rand_bigint() -{ - static int cursor; - cursor++; - if (cursor > (g_args.prepared_rand - 1)) cursor = 0; - return g_randbigint[cursor % g_args.prepared_rand]; -} - -static char *rand_ubigint_str() -{ - static int cursor; - cursor++; - if (cursor > (g_args.prepared_rand - 1)) cursor = 0; - return g_randubigint_buff + - ((cursor % g_args.prepared_rand) * BIGINT_BUFF_LEN); -} - -static int64_t rand_ubigint() -{ - static int cursor; - cursor++; - if (cursor > (g_args.prepared_rand - 1)) cursor = 0; - return g_randubigint[cursor % g_args.prepared_rand]; -} - -static char *rand_float_str() -{ - static int cursor; - cursor++; - if (cursor > (g_args.prepared_rand - 1)) cursor = 0; - return g_randfloat_buff + ((cursor % g_args.prepared_rand) * FLOAT_BUFF_LEN); -} - - -static float rand_float() -{ - static int cursor; - cursor++; - if (cursor > (g_args.prepared_rand - 1)) cursor = 0; - return g_randfloat[cursor % g_args.prepared_rand]; -} - -static char *demo_current_float_str() -{ - static int cursor; - cursor++; - if (cursor > (g_args.prepared_rand - 1)) cursor = 0; - return g_rand_current_buff + - ((cursor % g_args.prepared_rand) * FLOAT_BUFF_LEN); -} - -static float UNUSED_FUNC demo_current_float() -{ - static int cursor; - cursor++; - if (cursor > (g_args.prepared_rand - 1)) cursor = 0; - return (float)(9.8 + 0.04 * (g_randint[cursor % g_args.prepared_rand] % 10) - + g_randfloat[cursor % g_args.prepared_rand]/1000000000); -} - -static char *demo_voltage_int_str() -{ - static int cursor; - cursor++; - if (cursor > (g_args.prepared_rand - 1)) cursor = 0; - return g_rand_voltage_buff + - ((cursor % g_args.prepared_rand) * INT_BUFF_LEN); -} - -static int32_t UNUSED_FUNC demo_voltage_int() -{ - static int cursor; - cursor++; - if (cursor > (g_args.prepared_rand - 1)) cursor = 0; - return 215 + g_randint[cursor % g_args.prepared_rand] % 10; -} - -static char *demo_phase_float_str() { - static int cursor; - cursor++; - if (cursor > (g_args.prepared_rand - 1)) cursor = 0; - return g_rand_phase_buff + ((cursor % g_args.prepared_rand) * FLOAT_BUFF_LEN); -} - -static float UNUSED_FUNC demo_phase_float() { - static int cursor; - cursor++; - if (cursor > (g_args.prepared_rand - 1)) cursor = 0; - return (float)((115 + g_randint[cursor % g_args.prepared_rand] % 10 - + g_randfloat[cursor % g_args.prepared_rand]/1000000000)/360); -} - -#if 0 -static const char charNum[] = "0123456789"; - -static void nonrand_string(char *, int) __attribute__ ((unused)); // reserve for debugging purpose -static void nonrand_string(char *str, int size) -{ - str[0] = 0; - if (size > 0) { - int n; - for (n = 0; n < size; n++) { - str[n] = charNum[n % 10]; - } - str[n] = 0; - } -} -#endif - -static const char charset[] = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890"; - -static void rand_string(char *str, int size) { - str[0] = 0; - if (size > 0) { - //--size; - int n; - for (n = 0; n < size; n++) { - int key = abs(rand_tinyint()) % (int)(sizeof(charset) - 1); - str[n] = charset[key]; - } - str[n] = 0; - } -} - -static char *rand_double_str() -{ - static int cursor; - cursor++; - if (cursor > (g_args.prepared_rand - 1)) cursor = 0; - return g_randdouble_buff + (cursor * DOUBLE_BUFF_LEN); -} - -static double rand_double() -{ - static int cursor; - cursor++; - cursor = cursor % g_args.prepared_rand; - return g_randdouble[cursor]; -} - -static void init_rand_data() { - - g_randint_buff = calloc(1, INT_BUFF_LEN * g_args.prepared_rand); - assert(g_randint_buff); - g_rand_voltage_buff = calloc(1, INT_BUFF_LEN * g_args.prepared_rand); - assert(g_rand_voltage_buff); - g_randbigint_buff = calloc(1, BIGINT_BUFF_LEN * g_args.prepared_rand); - assert(g_randbigint_buff); - g_randsmallint_buff = calloc(1, SMALLINT_BUFF_LEN * g_args.prepared_rand); - assert(g_randsmallint_buff); - g_randtinyint_buff = calloc(1, TINYINT_BUFF_LEN * g_args.prepared_rand); - assert(g_randtinyint_buff); - g_randbool_buff = calloc(1, BOOL_BUFF_LEN * g_args.prepared_rand); - assert(g_randbool_buff); - g_randfloat_buff = calloc(1, FLOAT_BUFF_LEN * g_args.prepared_rand); - assert(g_randfloat_buff); - g_rand_current_buff = calloc(1, FLOAT_BUFF_LEN * g_args.prepared_rand); - assert(g_rand_current_buff); - g_rand_phase_buff = calloc(1, FLOAT_BUFF_LEN * g_args.prepared_rand); - assert(g_rand_phase_buff); - g_randdouble_buff = calloc(1, DOUBLE_BUFF_LEN * g_args.prepared_rand); - assert(g_randdouble_buff); - g_randuint_buff = calloc(1, INT_BUFF_LEN * g_args.prepared_rand); - assert(g_randuint_buff); - g_randutinyint_buff = calloc(1, TINYINT_BUFF_LEN * g_args.prepared_rand); - assert(g_randutinyint_buff); - g_randusmallint_buff = calloc(1, SMALLINT_BUFF_LEN * g_args.prepared_rand); - assert(g_randusmallint_buff); - g_randubigint_buff = calloc(1, BIGINT_BUFF_LEN * g_args.prepared_rand); - assert(g_randubigint_buff); - g_randint = calloc(1, sizeof(int32_t) * g_args.prepared_rand); - assert(g_randint); - g_randuint = calloc(1, sizeof(uint32_t) * g_args.prepared_rand); - assert(g_randuint); - g_randbigint = calloc(1, sizeof(int64_t) * g_args.prepared_rand); - assert(g_randbigint); - g_randubigint = calloc(1, sizeof(uint64_t) * g_args.prepared_rand); - assert(g_randubigint); - g_randfloat = calloc(1, sizeof(float) * g_args.prepared_rand); - assert(g_randfloat); - g_randdouble = calloc(1, sizeof(double) * g_args.prepared_rand); - assert(g_randdouble); - - for (int i = 0; i < g_args.prepared_rand; i++) { - g_randint[i] = (int)(taosRandom() % RAND_MAX - (RAND_MAX >> 1)); - g_randuint[i] = (int)(taosRandom()); - sprintf(g_randint_buff + i * INT_BUFF_LEN, "%d", - g_randint[i]); - sprintf(g_rand_voltage_buff + i * INT_BUFF_LEN, "%d", - 215 + g_randint[i] % 10); - - sprintf(g_randbool_buff + i * BOOL_BUFF_LEN, "%s", - ((g_randint[i] % 2) & 1)?"true":"false"); - sprintf(g_randsmallint_buff + i * SMALLINT_BUFF_LEN, "%d", - g_randint[i] % 32768); - sprintf(g_randtinyint_buff + i * TINYINT_BUFF_LEN, "%d", - g_randint[i] % 128); - sprintf(g_randuint_buff + i * INT_BUFF_LEN, "%d", - g_randuint[i]); - sprintf(g_randusmallint_buff + i * SMALLINT_BUFF_LEN, "%d", - g_randuint[i] % 65535); - sprintf(g_randutinyint_buff + i * TINYINT_BUFF_LEN, "%d", - g_randuint[i] % 255); - - g_randbigint[i] = (int64_t)(taosRandom() % RAND_MAX - (RAND_MAX >> 1)); - g_randubigint[i] = (uint64_t)(taosRandom()); - sprintf(g_randbigint_buff + i * BIGINT_BUFF_LEN, "%"PRId64"", - g_randbigint[i]); - sprintf(g_randubigint_buff + i * BIGINT_BUFF_LEN, "%"PRId64"", - g_randubigint[i]); - - g_randfloat[i] = (float)(taosRandom() / 1000.0) * (taosRandom() % 2 > 0.5 ? 1 : -1); - sprintf(g_randfloat_buff + i * FLOAT_BUFF_LEN, "%f", - g_randfloat[i]); - sprintf(g_rand_current_buff + i * FLOAT_BUFF_LEN, "%f", - (float)(9.8 + 0.04 * (g_randint[i] % 10) - + g_randfloat[i]/1000000000)); - sprintf(g_rand_phase_buff + i * FLOAT_BUFF_LEN, "%f", - (float)((115 + g_randint[i] % 10 - + g_randfloat[i]/1000000000)/360)); - - g_randdouble[i] = (double)(taosRandom() / 1000000.0) * (taosRandom() % 2 > 0.5 ? 1 : -1); - sprintf(g_randdouble_buff + i * DOUBLE_BUFF_LEN, "%f", - g_randdouble[i]); - } -} - -#define SHOW_PARSE_RESULT_START() \ - do { if (g_args.metaFile) \ - printf("\033[1m\033[40;32m================ %s parse result START ================\033[0m\n", \ - g_args.metaFile); } while(0) - -#define SHOW_PARSE_RESULT_END() \ - do { if (g_args.metaFile) \ - printf("\033[1m\033[40;32m================ %s parse result END================\033[0m\n", \ - g_args.metaFile); } while(0) - -#define SHOW_PARSE_RESULT_START_TO_FILE(fp) \ - do { if (g_args.metaFile) \ - fprintf(fp, "\033[1m\033[40;32m================ %s parse result START ================\033[0m\n", \ - g_args.metaFile); } while(0) - -#define SHOW_PARSE_RESULT_END_TO_FILE(fp) \ - do { if (g_args.metaFile) \ - fprintf(fp, "\033[1m\033[40;32m================ %s parse result END================\033[0m\n", \ - g_args.metaFile); } while(0) - -static int printfInsertMeta() { - SHOW_PARSE_RESULT_START(); - - if (g_args.demo_mode) { - printf("\ntaosdemo is simulating data generated by power equipment monitoring...\n\n"); - } else { - printf("\ntaosdemo is simulating random data as you request..\n\n"); - } - - if (g_args.iface != INTERFACE_BUT) { - // first time if no iface specified - printf("interface: \033[33m%s\033[0m\n", - (g_args.iface==TAOSC_IFACE)?"taosc": - (g_args.iface==REST_IFACE)?"rest":"stmt"); - } - - printf("host: \033[33m%s:%u\033[0m\n", - g_Dbs.host, g_Dbs.port); - printf("user: \033[33m%s\033[0m\n", g_Dbs.user); - printf("password: \033[33m%s\033[0m\n", g_Dbs.password); - printf("configDir: \033[33m%s\033[0m\n", configDir); - printf("resultFile: \033[33m%s\033[0m\n", g_Dbs.resultFile); - printf("thread num of insert data: \033[33m%d\033[0m\n", g_Dbs.threadCount); - printf("thread num of create table: \033[33m%d\033[0m\n", - g_Dbs.threadCountForCreateTbl); - printf("top insert interval: \033[33m%"PRIu64"\033[0m\n", - g_args.insert_interval); - printf("number of records per req: \033[33m%u\033[0m\n", - g_args.reqPerReq); - printf("max sql length: \033[33m%"PRIu64"\033[0m\n", - g_args.max_sql_len); - - printf("database count: \033[33m%d\033[0m\n", g_Dbs.dbCount); - - for (int i = 0; i < g_Dbs.dbCount; i++) { - printf("database[\033[33m%d\033[0m]:\n", i); - printf(" database[%d] name: \033[33m%s\033[0m\n", - i, g_Dbs.db[i].dbName); - if (0 == g_Dbs.db[i].drop) { - printf(" drop: \033[33m no\033[0m\n"); - } else { - printf(" drop: \033[33m yes\033[0m\n"); - } - - if (g_Dbs.db[i].dbCfg.blocks > 0) { - printf(" blocks: \033[33m%d\033[0m\n", - g_Dbs.db[i].dbCfg.blocks); - } - if (g_Dbs.db[i].dbCfg.cache > 0) { - printf(" cache: \033[33m%d\033[0m\n", - g_Dbs.db[i].dbCfg.cache); - } - if (g_Dbs.db[i].dbCfg.days > 0) { - printf(" days: \033[33m%d\033[0m\n", - g_Dbs.db[i].dbCfg.days); - } - if (g_Dbs.db[i].dbCfg.keep > 0) { - printf(" keep: \033[33m%d\033[0m\n", - g_Dbs.db[i].dbCfg.keep); - } - if (g_Dbs.db[i].dbCfg.replica > 0) { - printf(" replica: \033[33m%d\033[0m\n", - g_Dbs.db[i].dbCfg.replica); - } - if (g_Dbs.db[i].dbCfg.update > 0) { - printf(" update: \033[33m%d\033[0m\n", - g_Dbs.db[i].dbCfg.update); - } - if (g_Dbs.db[i].dbCfg.minRows > 0) { - printf(" minRows: \033[33m%d\033[0m\n", - g_Dbs.db[i].dbCfg.minRows); - } - if (g_Dbs.db[i].dbCfg.maxRows > 0) { - printf(" maxRows: \033[33m%d\033[0m\n", - g_Dbs.db[i].dbCfg.maxRows); - } - if (g_Dbs.db[i].dbCfg.comp > 0) { - printf(" comp: \033[33m%d\033[0m\n", g_Dbs.db[i].dbCfg.comp); - } - if (g_Dbs.db[i].dbCfg.walLevel > 0) { - printf(" walLevel: \033[33m%d\033[0m\n", - g_Dbs.db[i].dbCfg.walLevel); - } - if (g_Dbs.db[i].dbCfg.fsync > 0) { - printf(" fsync: \033[33m%d\033[0m\n", - g_Dbs.db[i].dbCfg.fsync); - } - if (g_Dbs.db[i].dbCfg.quorum > 0) { - printf(" quorum: \033[33m%d\033[0m\n", - g_Dbs.db[i].dbCfg.quorum); - } - if (g_Dbs.db[i].dbCfg.precision[0] != 0) { - if ((0 == strncasecmp(g_Dbs.db[i].dbCfg.precision, "ms", 2)) - || (0 == strncasecmp(g_Dbs.db[i].dbCfg.precision, "us", 2)) - || (0 == strncasecmp(g_Dbs.db[i].dbCfg.precision, "ns", 2))) { - printf(" precision: \033[33m%s\033[0m\n", - g_Dbs.db[i].dbCfg.precision); - } else { - printf("\033[1m\033[40;31m precision error: %s\033[0m\n", - g_Dbs.db[i].dbCfg.precision); - return -1; - } - } - - - if (g_args.use_metric) { - printf(" super table count: \033[33m%"PRIu64"\033[0m\n", - g_Dbs.db[i].superTblCount); - for (uint64_t j = 0; j < g_Dbs.db[i].superTblCount; j++) { - printf(" super table[\033[33m%"PRIu64"\033[0m]:\n", j); - - printf(" stbName: \033[33m%s\033[0m\n", - g_Dbs.db[i].superTbls[j].stbName); - - if (PRE_CREATE_SUBTBL == g_Dbs.db[i].superTbls[j].autoCreateTable) { - printf(" autoCreateTable: \033[33m%s\033[0m\n", "no"); - } else if (AUTO_CREATE_SUBTBL == - g_Dbs.db[i].superTbls[j].autoCreateTable) { - printf(" autoCreateTable: \033[33m%s\033[0m\n", "yes"); - } else { - printf(" autoCreateTable: \033[33m%s\033[0m\n", "error"); - } - - if (TBL_NO_EXISTS == g_Dbs.db[i].superTbls[j].childTblExists) { - printf(" childTblExists: \033[33m%s\033[0m\n", "no"); - } else if (TBL_ALREADY_EXISTS == g_Dbs.db[i].superTbls[j].childTblExists) { - printf(" childTblExists: \033[33m%s\033[0m\n", "yes"); - } else { - printf(" childTblExists: \033[33m%s\033[0m\n", "error"); - } - - printf(" childTblCount: \033[33m%"PRId64"\033[0m\n", - g_Dbs.db[i].superTbls[j].childTblCount); - printf(" childTblPrefix: \033[33m%s\033[0m\n", - g_Dbs.db[i].superTbls[j].childTblPrefix); - printf(" dataSource: \033[33m%s\033[0m\n", - g_Dbs.db[i].superTbls[j].dataSource); - printf(" iface: \033[33m%s\033[0m\n", - (g_Dbs.db[i].superTbls[j].iface==TAOSC_IFACE)?"taosc": - (g_Dbs.db[i].superTbls[j].iface==REST_IFACE)?"rest":"stmt"); - if (g_Dbs.db[i].superTbls[j].childTblLimit > 0) { - printf(" childTblLimit: \033[33m%"PRId64"\033[0m\n", - g_Dbs.db[i].superTbls[j].childTblLimit); - } - if (g_Dbs.db[i].superTbls[j].childTblOffset > 0) { - printf(" childTblOffset: \033[33m%"PRIu64"\033[0m\n", - g_Dbs.db[i].superTbls[j].childTblOffset); - } - printf(" insertRows: \033[33m%"PRId64"\033[0m\n", - g_Dbs.db[i].superTbls[j].insertRows); - /* - if (0 == g_Dbs.db[i].superTbls[j].multiThreadWriteOneTbl) { - printf(" multiThreadWriteOneTbl: \033[33m no\033[0m\n"); - }else { - printf(" multiThreadWriteOneTbl: \033[33m yes\033[0m\n"); - } - */ - printf(" interlaceRows: \033[33m%u\033[0m\n", - g_Dbs.db[i].superTbls[j].interlaceRows); - - if (g_Dbs.db[i].superTbls[j].interlaceRows > 0) { - printf(" stable insert interval: \033[33m%"PRIu64"\033[0m\n", - g_Dbs.db[i].superTbls[j].insertInterval); - } - - printf(" disorderRange: \033[33m%d\033[0m\n", - g_Dbs.db[i].superTbls[j].disorderRange); - printf(" disorderRatio: \033[33m%d\033[0m\n", - g_Dbs.db[i].superTbls[j].disorderRatio); - printf(" maxSqlLen: \033[33m%"PRIu64"\033[0m\n", - g_Dbs.db[i].superTbls[j].maxSqlLen); - printf(" timeStampStep: \033[33m%"PRId64"\033[0m\n", - g_Dbs.db[i].superTbls[j].timeStampStep); - printf(" startTimestamp: \033[33m%s\033[0m\n", - g_Dbs.db[i].superTbls[j].startTimestamp); - printf(" sampleFormat: \033[33m%s\033[0m\n", - g_Dbs.db[i].superTbls[j].sampleFormat); - printf(" sampleFile: \033[33m%s\033[0m\n", - g_Dbs.db[i].superTbls[j].sampleFile); - printf(" useSampleTs: \033[33m%s\033[0m\n", - g_Dbs.db[i].superTbls[j].useSampleTs ? "yes (warning: disorderRange/disorderRatio is disabled)" : "no"); - printf(" tagsFile: \033[33m%s\033[0m\n", - g_Dbs.db[i].superTbls[j].tagsFile); - printf(" columnCount: \033[33m%d\033[0m\n ", - g_Dbs.db[i].superTbls[j].columnCount); - for (int k = 0; k < g_Dbs.db[i].superTbls[j].columnCount; k++) { - //printf("dataType:%s, dataLen:%d\t", g_Dbs.db[i].superTbls[j].columns[k].dataType, g_Dbs.db[i].superTbls[j].columns[k].dataLen); - if ((0 == strncasecmp(g_Dbs.db[i].superTbls[j].columns[k].dataType, - "binary", 6)) - || (0 == strncasecmp(g_Dbs.db[i].superTbls[j].columns[k].dataType, - "nchar", 5))) { - printf("column[%d]:\033[33m%s(%d)\033[0m ", k, - g_Dbs.db[i].superTbls[j].columns[k].dataType, - g_Dbs.db[i].superTbls[j].columns[k].dataLen); - } else { - printf("column[%d]:\033[33m%s\033[0m ", k, - g_Dbs.db[i].superTbls[j].columns[k].dataType); - } - } - printf("\n"); - - printf(" tagCount: \033[33m%d\033[0m\n ", - g_Dbs.db[i].superTbls[j].tagCount); - for (int k = 0; k < g_Dbs.db[i].superTbls[j].tagCount; k++) { - //printf("dataType:%s, dataLen:%d\t", g_Dbs.db[i].superTbls[j].tags[k].dataType, g_Dbs.db[i].superTbls[j].tags[k].dataLen); - if ((0 == strncasecmp(g_Dbs.db[i].superTbls[j].tags[k].dataType, - "binary", strlen("binary"))) - || (0 == strncasecmp(g_Dbs.db[i].superTbls[j].tags[k].dataType, - "nchar", strlen("nchar")))) { - printf("tag[%d]:\033[33m%s(%d)\033[0m ", k, - g_Dbs.db[i].superTbls[j].tags[k].dataType, - g_Dbs.db[i].superTbls[j].tags[k].dataLen); - } else { - printf("tag[%d]:\033[33m%s\033[0m ", k, - g_Dbs.db[i].superTbls[j].tags[k].dataType); - } - } - printf("\n"); - } - } else { - printf(" childTblCount: \033[33m%"PRId64"\033[0m\n", - g_args.ntables); - printf(" insertRows: \033[33m%"PRId64"\033[0m\n", - g_args.insertRows); - } - printf("\n"); - } - - SHOW_PARSE_RESULT_END(); - - return 0; -} - -static void printfInsertMetaToFile(FILE* fp) { - - SHOW_PARSE_RESULT_START_TO_FILE(fp); - - fprintf(fp, "host: %s:%u\n", g_Dbs.host, g_Dbs.port); - fprintf(fp, "user: %s\n", g_Dbs.user); - fprintf(fp, "configDir: %s\n", configDir); - fprintf(fp, "resultFile: %s\n", g_Dbs.resultFile); - fprintf(fp, "thread num of insert data: %d\n", g_Dbs.threadCount); - fprintf(fp, "thread num of create table: %d\n", g_Dbs.threadCountForCreateTbl); - fprintf(fp, "number of records per req: %u\n", g_args.reqPerReq); - fprintf(fp, "max sql length: %"PRIu64"\n", g_args.max_sql_len); - fprintf(fp, "database count: %d\n", g_Dbs.dbCount); - - for (int i = 0; i < g_Dbs.dbCount; i++) { - fprintf(fp, "database[%d]:\n", i); - fprintf(fp, " database[%d] name: %s\n", i, g_Dbs.db[i].dbName); - if (0 == g_Dbs.db[i].drop) { - fprintf(fp, " drop: no\n"); - }else { - fprintf(fp, " drop: yes\n"); - } - - if (g_Dbs.db[i].dbCfg.blocks > 0) { - fprintf(fp, " blocks: %d\n", g_Dbs.db[i].dbCfg.blocks); - } - if (g_Dbs.db[i].dbCfg.cache > 0) { - fprintf(fp, " cache: %d\n", g_Dbs.db[i].dbCfg.cache); - } - if (g_Dbs.db[i].dbCfg.days > 0) { - fprintf(fp, " days: %d\n", g_Dbs.db[i].dbCfg.days); - } - if (g_Dbs.db[i].dbCfg.keep > 0) { - fprintf(fp, " keep: %d\n", g_Dbs.db[i].dbCfg.keep); - } - if (g_Dbs.db[i].dbCfg.replica > 0) { - fprintf(fp, " replica: %d\n", g_Dbs.db[i].dbCfg.replica); - } - if (g_Dbs.db[i].dbCfg.update > 0) { - fprintf(fp, " update: %d\n", g_Dbs.db[i].dbCfg.update); - } - if (g_Dbs.db[i].dbCfg.minRows > 0) { - fprintf(fp, " minRows: %d\n", g_Dbs.db[i].dbCfg.minRows); - } - if (g_Dbs.db[i].dbCfg.maxRows > 0) { - fprintf(fp, " maxRows: %d\n", g_Dbs.db[i].dbCfg.maxRows); - } - if (g_Dbs.db[i].dbCfg.comp > 0) { - fprintf(fp, " comp: %d\n", g_Dbs.db[i].dbCfg.comp); - } - if (g_Dbs.db[i].dbCfg.walLevel > 0) { - fprintf(fp, " walLevel: %d\n", g_Dbs.db[i].dbCfg.walLevel); - } - if (g_Dbs.db[i].dbCfg.fsync > 0) { - fprintf(fp, " fsync: %d\n", g_Dbs.db[i].dbCfg.fsync); - } - if (g_Dbs.db[i].dbCfg.quorum > 0) { - fprintf(fp, " quorum: %d\n", g_Dbs.db[i].dbCfg.quorum); - } - if (g_Dbs.db[i].dbCfg.precision[0] != 0) { - if ((0 == strncasecmp(g_Dbs.db[i].dbCfg.precision, "ms", 2)) - || (0 == strncasecmp(g_Dbs.db[i].dbCfg.precision, "ns", 2)) - || (0 == strncasecmp(g_Dbs.db[i].dbCfg.precision, "us", 2))) { - fprintf(fp, " precision: %s\n", - g_Dbs.db[i].dbCfg.precision); - } else { - fprintf(fp, " precision error: %s\n", - g_Dbs.db[i].dbCfg.precision); - } - } - - fprintf(fp, " super table count: %"PRIu64"\n", - g_Dbs.db[i].superTblCount); - for (int j = 0; j < g_Dbs.db[i].superTblCount; j++) { - fprintf(fp, " super table[%d]:\n", j); - - fprintf(fp, " stbName: %s\n", - g_Dbs.db[i].superTbls[j].stbName); - - if (PRE_CREATE_SUBTBL == g_Dbs.db[i].superTbls[j].autoCreateTable) { - fprintf(fp, " autoCreateTable: %s\n", "no"); - } else if (AUTO_CREATE_SUBTBL - == g_Dbs.db[i].superTbls[j].autoCreateTable) { - fprintf(fp, " autoCreateTable: %s\n", "yes"); - } else { - fprintf(fp, " autoCreateTable: %s\n", "error"); - } - - if (TBL_NO_EXISTS == g_Dbs.db[i].superTbls[j].childTblExists) { - fprintf(fp, " childTblExists: %s\n", "no"); - } else if (TBL_ALREADY_EXISTS - == g_Dbs.db[i].superTbls[j].childTblExists) { - fprintf(fp, " childTblExists: %s\n", "yes"); - } else { - fprintf(fp, " childTblExists: %s\n", "error"); - } - - fprintf(fp, " childTblCount: %"PRId64"\n", - g_Dbs.db[i].superTbls[j].childTblCount); - fprintf(fp, " childTblPrefix: %s\n", - g_Dbs.db[i].superTbls[j].childTblPrefix); - fprintf(fp, " dataSource: %s\n", - g_Dbs.db[i].superTbls[j].dataSource); - fprintf(fp, " iface: %s\n", - (g_Dbs.db[i].superTbls[j].iface==TAOSC_IFACE)?"taosc": - (g_Dbs.db[i].superTbls[j].iface==REST_IFACE)?"rest":"stmt"); - fprintf(fp, " insertRows: %"PRId64"\n", - g_Dbs.db[i].superTbls[j].insertRows); - fprintf(fp, " interlace rows: %u\n", - g_Dbs.db[i].superTbls[j].interlaceRows); - if (g_Dbs.db[i].superTbls[j].interlaceRows > 0) { - fprintf(fp, " stable insert interval: %"PRIu64"\n", - g_Dbs.db[i].superTbls[j].insertInterval); - } - /* - if (0 == g_Dbs.db[i].superTbls[j].multiThreadWriteOneTbl) { - fprintf(fp, " multiThreadWriteOneTbl: no\n"); - }else { - fprintf(fp, " multiThreadWriteOneTbl: yes\n"); - } - */ - fprintf(fp, " interlaceRows: %u\n", - g_Dbs.db[i].superTbls[j].interlaceRows); - fprintf(fp, " disorderRange: %d\n", - g_Dbs.db[i].superTbls[j].disorderRange); - fprintf(fp, " disorderRatio: %d\n", - g_Dbs.db[i].superTbls[j].disorderRatio); - fprintf(fp, " maxSqlLen: %"PRIu64"\n", - g_Dbs.db[i].superTbls[j].maxSqlLen); - - fprintf(fp, " timeStampStep: %"PRId64"\n", - g_Dbs.db[i].superTbls[j].timeStampStep); - fprintf(fp, " startTimestamp: %s\n", - g_Dbs.db[i].superTbls[j].startTimestamp); - fprintf(fp, " sampleFormat: %s\n", - g_Dbs.db[i].superTbls[j].sampleFormat); - fprintf(fp, " sampleFile: %s\n", - g_Dbs.db[i].superTbls[j].sampleFile); - fprintf(fp, " tagsFile: %s\n", - g_Dbs.db[i].superTbls[j].tagsFile); - - fprintf(fp, " columnCount: %d\n ", - g_Dbs.db[i].superTbls[j].columnCount); - for (int k = 0; k < g_Dbs.db[i].superTbls[j].columnCount; k++) { - //printf("dataType:%s, dataLen:%d\t", g_Dbs.db[i].superTbls[j].columns[k].dataType, g_Dbs.db[i].superTbls[j].columns[k].dataLen); - if ((0 == strncasecmp( - g_Dbs.db[i].superTbls[j].columns[k].dataType, - "binary", strlen("binary"))) - || (0 == strncasecmp( - g_Dbs.db[i].superTbls[j].columns[k].dataType, - "nchar", strlen("nchar")))) { - fprintf(fp, "column[%d]:%s(%d) ", k, - g_Dbs.db[i].superTbls[j].columns[k].dataType, - g_Dbs.db[i].superTbls[j].columns[k].dataLen); - } else { - fprintf(fp, "column[%d]:%s ", - k, g_Dbs.db[i].superTbls[j].columns[k].dataType); - } - } - fprintf(fp, "\n"); - - fprintf(fp, " tagCount: %d\n ", - g_Dbs.db[i].superTbls[j].tagCount); - for (int k = 0; k < g_Dbs.db[i].superTbls[j].tagCount; k++) { - //printf("dataType:%s, dataLen:%d\t", g_Dbs.db[i].superTbls[j].tags[k].dataType, g_Dbs.db[i].superTbls[j].tags[k].dataLen); - if ((0 == strncasecmp(g_Dbs.db[i].superTbls[j].tags[k].dataType, - "binary", strlen("binary"))) - || (0 == strncasecmp(g_Dbs.db[i].superTbls[j].tags[k].dataType, - "nchar", strlen("nchar")))) { - fprintf(fp, "tag[%d]:%s(%d) ", - k, g_Dbs.db[i].superTbls[j].tags[k].dataType, - g_Dbs.db[i].superTbls[j].tags[k].dataLen); - } else { - fprintf(fp, "tag[%d]:%s ", k, g_Dbs.db[i].superTbls[j].tags[k].dataType); - } - } - fprintf(fp, "\n"); - } - fprintf(fp, "\n"); - } - - SHOW_PARSE_RESULT_END_TO_FILE(fp); -} - -static void printfQueryMeta() { - - SHOW_PARSE_RESULT_START(); - - printf("host: \033[33m%s:%u\033[0m\n", - g_queryInfo.host, g_queryInfo.port); - printf("user: \033[33m%s\033[0m\n", g_queryInfo.user); - printf("database name: \033[33m%s\033[0m\n", g_queryInfo.dbName); - - printf("\n"); - - if ((SUBSCRIBE_TEST == g_args.test_mode) || (QUERY_TEST == g_args.test_mode)) { - printf("specified table query info: \n"); - printf("sqlCount: \033[33m%d\033[0m\n", - g_queryInfo.specifiedQueryInfo.sqlCount); - if (g_queryInfo.specifiedQueryInfo.sqlCount > 0) { - printf("specified tbl query times:\n"); - printf(" \033[33m%"PRIu64"\033[0m\n", - g_queryInfo.specifiedQueryInfo.queryTimes); - printf("query interval: \033[33m%"PRIu64" ms\033[0m\n", - g_queryInfo.specifiedQueryInfo.queryInterval); - printf("top query times:\033[33m%"PRIu64"\033[0m\n", g_args.query_times); - printf("concurrent: \033[33m%d\033[0m\n", - g_queryInfo.specifiedQueryInfo.concurrent); - printf("mod: \033[33m%s\033[0m\n", - (g_queryInfo.specifiedQueryInfo.asyncMode)?"async":"sync"); - printf("interval: \033[33m%"PRIu64"\033[0m\n", - g_queryInfo.specifiedQueryInfo.subscribeInterval); - printf("restart: \033[33m%d\033[0m\n", - g_queryInfo.specifiedQueryInfo.subscribeRestart); - printf("keepProgress: \033[33m%d\033[0m\n", - g_queryInfo.specifiedQueryInfo.subscribeKeepProgress); - - for (int i = 0; i < g_queryInfo.specifiedQueryInfo.sqlCount; i++) { - printf(" sql[%d]: \033[33m%s\033[0m\n", - i, g_queryInfo.specifiedQueryInfo.sql[i]); - } - printf("\n"); - } - - printf("super table query info:\n"); - printf("sqlCount: \033[33m%d\033[0m\n", - g_queryInfo.superQueryInfo.sqlCount); - - if (g_queryInfo.superQueryInfo.sqlCount > 0) { - printf("query interval: \033[33m%"PRIu64"\033[0m\n", - g_queryInfo.superQueryInfo.queryInterval); - printf("threadCnt: \033[33m%d\033[0m\n", - g_queryInfo.superQueryInfo.threadCnt); - printf("childTblCount: \033[33m%"PRId64"\033[0m\n", - g_queryInfo.superQueryInfo.childTblCount); - printf("stable name: \033[33m%s\033[0m\n", - g_queryInfo.superQueryInfo.stbName); - printf("stb query times:\033[33m%"PRIu64"\033[0m\n", - g_queryInfo.superQueryInfo.queryTimes); - - printf("mod: \033[33m%s\033[0m\n", - (g_queryInfo.superQueryInfo.asyncMode)?"async":"sync"); - printf("interval: \033[33m%"PRIu64"\033[0m\n", - g_queryInfo.superQueryInfo.subscribeInterval); - printf("restart: \033[33m%d\033[0m\n", - g_queryInfo.superQueryInfo.subscribeRestart); - printf("keepProgress: \033[33m%d\033[0m\n", - g_queryInfo.superQueryInfo.subscribeKeepProgress); - - for (int i = 0; i < g_queryInfo.superQueryInfo.sqlCount; i++) { - printf(" sql[%d]: \033[33m%s\033[0m\n", - i, g_queryInfo.superQueryInfo.sql[i]); - } - printf("\n"); - } - } - - SHOW_PARSE_RESULT_END(); -} - -static char* formatTimestamp(char* buf, int64_t val, int precision) { - time_t tt; - if (precision == TSDB_TIME_PRECISION_MICRO) { - tt = (time_t)(val / 1000000); - } if (precision == TSDB_TIME_PRECISION_NANO) { - tt = (time_t)(val / 1000000000); - } else { - tt = (time_t)(val / 1000); - } - - /* comment out as it make testcases like select_with_tags.sim fail. - but in windows, this may cause the call to localtime crash if tt < 0, - need to find a better solution. - if (tt < 0) { - tt = 0; - } - */ - -#ifdef WINDOWS - if (tt < 0) tt = 0; -#endif - - struct tm* ptm = localtime(&tt); - size_t pos = strftime(buf, 32, "%Y-%m-%d %H:%M:%S", ptm); - - if (precision == TSDB_TIME_PRECISION_MICRO) { - sprintf(buf + pos, ".%06d", (int)(val % 1000000)); - } else if (precision == TSDB_TIME_PRECISION_NANO) { - sprintf(buf + pos, ".%09d", (int)(val % 1000000000)); - } else { - sprintf(buf + pos, ".%03d", (int)(val % 1000)); - } - - return buf; -} - -static void xDumpFieldToFile(FILE* fp, const char* val, - TAOS_FIELD* field, int32_t length, int precision) { - - if (val == NULL) { - fprintf(fp, "%s", TSDB_DATA_NULL_STR); - return; - } - - char buf[TSDB_MAX_BYTES_PER_ROW]; - switch (field->type) { - case TSDB_DATA_TYPE_BOOL: - fprintf(fp, "%d", ((((int32_t)(*((int8_t*)val))) == 1) ? 1 : 0)); - break; - - case TSDB_DATA_TYPE_TINYINT: - fprintf(fp, "%d", *((int8_t *)val)); - break; - - case TSDB_DATA_TYPE_UTINYINT: - fprintf(fp, "%d", *((uint8_t *)val)); - break; - - case TSDB_DATA_TYPE_SMALLINT: - fprintf(fp, "%d", *((int16_t *)val)); - break; - - case TSDB_DATA_TYPE_USMALLINT: - fprintf(fp, "%d", *((uint16_t *)val)); - break; - - case TSDB_DATA_TYPE_INT: - fprintf(fp, "%d", *((int32_t *)val)); - break; - - case TSDB_DATA_TYPE_UINT: - fprintf(fp, "%d", *((uint32_t *)val)); - break; - - case TSDB_DATA_TYPE_BIGINT: - fprintf(fp, "%"PRId64"", *((int64_t *)val)); - break; - - case TSDB_DATA_TYPE_UBIGINT: - fprintf(fp, "%"PRId64"", *((uint64_t *)val)); - break; - - case TSDB_DATA_TYPE_FLOAT: - fprintf(fp, "%.5f", GET_FLOAT_VAL(val)); - break; - - case TSDB_DATA_TYPE_DOUBLE: - fprintf(fp, "%.9f", GET_DOUBLE_VAL(val)); - break; - - case TSDB_DATA_TYPE_BINARY: - case TSDB_DATA_TYPE_NCHAR: - memcpy(buf, val, length); - buf[length] = 0; - fprintf(fp, "\'%s\'", buf); - break; - - case TSDB_DATA_TYPE_TIMESTAMP: - formatTimestamp(buf, *(int64_t*)val, precision); - fprintf(fp, "'%s'", buf); - break; - - default: - break; - } -} - -static int xDumpResultToFile(const char* fname, TAOS_RES* tres) { - TAOS_ROW row = taos_fetch_row(tres); - if (row == NULL) { - return 0; - } - - FILE* fp = fopen(fname, "at"); - if (fp == NULL) { - errorPrint2("%s() LN%d, failed to open file: %s\n", - __func__, __LINE__, fname); - return -1; - } - - int num_fields = taos_num_fields(tres); - TAOS_FIELD *fields = taos_fetch_fields(tres); - int precision = taos_result_precision(tres); - - for (int col = 0; col < num_fields; col++) { - if (col > 0) { - fprintf(fp, ","); - } - fprintf(fp, "%s", fields[col].name); - } - fputc('\n', fp); - - int numOfRows = 0; - do { - int32_t* length = taos_fetch_lengths(tres); - for (int i = 0; i < num_fields; i++) { - if (i > 0) { - fputc(',', fp); - } - xDumpFieldToFile(fp, - (const char*)row[i], fields +i, length[i], precision); - } - fputc('\n', fp); - - numOfRows++; - row = taos_fetch_row(tres); - } while( row != NULL); - - fclose(fp); - - return numOfRows; -} - -static int getDbFromServer(TAOS * taos, SDbInfo** dbInfos) { - TAOS_RES * res; - TAOS_ROW row = NULL; - int count = 0; - - res = taos_query(taos, "show databases;"); - int32_t code = taos_errno(res); - - if (code != 0) { - errorPrint2("failed to run , reason: %s\n", - taos_errstr(res)); - return -1; - } - - TAOS_FIELD *fields = taos_fetch_fields(res); - - while((row = taos_fetch_row(res)) != NULL) { - // sys database name : 'log' - if (strncasecmp(row[TSDB_SHOW_DB_NAME_INDEX], "log", - fields[TSDB_SHOW_DB_NAME_INDEX].bytes) == 0) { - continue; - } - - dbInfos[count] = (SDbInfo *)calloc(1, sizeof(SDbInfo)); - if (dbInfos[count] == NULL) { - errorPrint2("failed to allocate memory for some dbInfo[%d]\n", count); - return -1; - } - - tstrncpy(dbInfos[count]->name, (char *)row[TSDB_SHOW_DB_NAME_INDEX], - fields[TSDB_SHOW_DB_NAME_INDEX].bytes); - formatTimestamp(dbInfos[count]->create_time, - *(int64_t*)row[TSDB_SHOW_DB_CREATED_TIME_INDEX], - TSDB_TIME_PRECISION_MILLI); - dbInfos[count]->ntables = *((int64_t *)row[TSDB_SHOW_DB_NTABLES_INDEX]); - dbInfos[count]->vgroups = *((int32_t *)row[TSDB_SHOW_DB_VGROUPS_INDEX]); - dbInfos[count]->replica = *((int16_t *)row[TSDB_SHOW_DB_REPLICA_INDEX]); - dbInfos[count]->quorum = *((int16_t *)row[TSDB_SHOW_DB_QUORUM_INDEX]); - dbInfos[count]->days = *((int16_t *)row[TSDB_SHOW_DB_DAYS_INDEX]); - - tstrncpy(dbInfos[count]->keeplist, (char *)row[TSDB_SHOW_DB_KEEP_INDEX], - fields[TSDB_SHOW_DB_KEEP_INDEX].bytes); - dbInfos[count]->cache = *((int32_t *)row[TSDB_SHOW_DB_CACHE_INDEX]); - dbInfos[count]->blocks = *((int32_t *)row[TSDB_SHOW_DB_BLOCKS_INDEX]); - dbInfos[count]->minrows = *((int32_t *)row[TSDB_SHOW_DB_MINROWS_INDEX]); - dbInfos[count]->maxrows = *((int32_t *)row[TSDB_SHOW_DB_MAXROWS_INDEX]); - dbInfos[count]->wallevel = *((int8_t *)row[TSDB_SHOW_DB_WALLEVEL_INDEX]); - dbInfos[count]->fsync = *((int32_t *)row[TSDB_SHOW_DB_FSYNC_INDEX]); - dbInfos[count]->comp = (int8_t)(*((int8_t *)row[TSDB_SHOW_DB_COMP_INDEX])); - dbInfos[count]->cachelast = - (int8_t)(*((int8_t *)row[TSDB_SHOW_DB_CACHELAST_INDEX])); - - tstrncpy(dbInfos[count]->precision, - (char *)row[TSDB_SHOW_DB_PRECISION_INDEX], - fields[TSDB_SHOW_DB_PRECISION_INDEX].bytes); - dbInfos[count]->update = *((int8_t *)row[TSDB_SHOW_DB_UPDATE_INDEX]); - tstrncpy(dbInfos[count]->status, (char *)row[TSDB_SHOW_DB_STATUS_INDEX], - fields[TSDB_SHOW_DB_STATUS_INDEX].bytes); - - count++; - if (count > MAX_DATABASE_COUNT) { - errorPrint("%s() LN%d, The database count overflow than %d\n", - __func__, __LINE__, MAX_DATABASE_COUNT); - break; - } - } - - return count; -} - -static void printfDbInfoForQueryToFile( - char* filename, SDbInfo* dbInfos, int index) { - - if (filename[0] == 0) - return; - - FILE *fp = fopen(filename, "at"); - if (fp == NULL) { - errorPrint( "failed to open file: %s\n", filename); - return; - } - - fprintf(fp, "================ database[%d] ================\n", index); - fprintf(fp, "name: %s\n", dbInfos->name); - fprintf(fp, "created_time: %s\n", dbInfos->create_time); - fprintf(fp, "ntables: %"PRId64"\n", dbInfos->ntables); - fprintf(fp, "vgroups: %d\n", dbInfos->vgroups); - fprintf(fp, "replica: %d\n", dbInfos->replica); - fprintf(fp, "quorum: %d\n", dbInfos->quorum); - fprintf(fp, "days: %d\n", dbInfos->days); - fprintf(fp, "keep0,keep1,keep(D): %s\n", dbInfos->keeplist); - fprintf(fp, "cache(MB): %d\n", dbInfos->cache); - fprintf(fp, "blocks: %d\n", dbInfos->blocks); - fprintf(fp, "minrows: %d\n", dbInfos->minrows); - fprintf(fp, "maxrows: %d\n", dbInfos->maxrows); - fprintf(fp, "wallevel: %d\n", dbInfos->wallevel); - fprintf(fp, "fsync: %d\n", dbInfos->fsync); - fprintf(fp, "comp: %d\n", dbInfos->comp); - fprintf(fp, "cachelast: %d\n", dbInfos->cachelast); - fprintf(fp, "precision: %s\n", dbInfos->precision); - fprintf(fp, "update: %d\n", dbInfos->update); - fprintf(fp, "status: %s\n", dbInfos->status); - fprintf(fp, "\n"); - - fclose(fp); -} - -static void printfQuerySystemInfo(TAOS * taos) { - char filename[MAX_FILE_NAME_LEN] = {0}; - char buffer[1024] = {0}; - TAOS_RES* res; - - time_t t; - struct tm* lt; - time(&t); - lt = localtime(&t); - snprintf(filename, MAX_FILE_NAME_LEN, "querySystemInfo-%d-%d-%d %d:%d:%d", - lt->tm_year+1900, lt->tm_mon, lt->tm_mday, lt->tm_hour, lt->tm_min, - lt->tm_sec); - - // show variables - res = taos_query(taos, "show variables;"); - //fetchResult(res, filename); - xDumpResultToFile(filename, res); - - // show dnodes - res = taos_query(taos, "show dnodes;"); - xDumpResultToFile(filename, res); - //fetchResult(res, filename); - - // show databases - res = taos_query(taos, "show databases;"); - SDbInfo** dbInfos = (SDbInfo **)calloc(MAX_DATABASE_COUNT, sizeof(SDbInfo *)); - if (dbInfos == NULL) { - errorPrint("%s() LN%d, failed to allocate memory\n", __func__, __LINE__); - return; - } - int dbCount = getDbFromServer(taos, dbInfos); - if (dbCount <= 0) { - free(dbInfos); - return; - } - - for (int i = 0; i < dbCount; i++) { - // printf database info - printfDbInfoForQueryToFile(filename, dbInfos[i], i); - - // show db.vgroups - snprintf(buffer, 1024, "show %s.vgroups;", dbInfos[i]->name); - res = taos_query(taos, buffer); - xDumpResultToFile(filename, res); - - // show db.stables - snprintf(buffer, 1024, "show %s.stables;", dbInfos[i]->name); - res = taos_query(taos, buffer); - xDumpResultToFile(filename, res); - free(dbInfos[i]); - } - - free(dbInfos); -} - -static int postProceSql(char *host, uint16_t port, - char* sqlstr, threadInfo *pThreadInfo) -{ - char *req_fmt = "POST %s HTTP/1.1\r\nHost: %s:%d\r\nAccept: */*\r\nAuthorization: Basic %s\r\nContent-Length: %d\r\nContent-Type: application/x-www-form-urlencoded\r\n\r\n%s"; - - char *url = "/rest/sql"; - - int bytes, sent, received, req_str_len, resp_len; - char *request_buf; - char response_buf[RESP_BUF_LEN]; - uint16_t rest_port = port + TSDB_PORT_HTTP; - - int req_buf_len = strlen(sqlstr) + REQ_EXTRA_BUF_LEN; - - request_buf = malloc(req_buf_len); - if (NULL == request_buf) { - errorPrint("%s", "cannot allocate memory.\n"); - exit(EXIT_FAILURE); - } - - char userpass_buf[INPUT_BUF_LEN]; - int mod_table[] = {0, 2, 1}; - - static char base64[] = {'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', - 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', - 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', - 'Y', 'Z', 'a', 'b', 'c', 'd', 'e', 'f', - 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', - 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', - 'w', 'x', 'y', 'z', '0', '1', '2', '3', - '4', '5', '6', '7', '8', '9', '+', '/'}; - - if (g_args.test_mode == INSERT_TEST) { - snprintf(userpass_buf, INPUT_BUF_LEN, "%s:%s", - g_Dbs.user, g_Dbs.password); - } else { - snprintf(userpass_buf, INPUT_BUF_LEN, "%s:%s", - g_queryInfo.user, g_queryInfo.password); - } - - size_t userpass_buf_len = strlen(userpass_buf); - size_t encoded_len = 4 * ((userpass_buf_len +2) / 3); - - char base64_buf[INPUT_BUF_LEN]; - - memset(base64_buf, 0, INPUT_BUF_LEN); - - for (int n = 0, m = 0; n < userpass_buf_len;) { - uint32_t oct_a = n < userpass_buf_len ? - (unsigned char) userpass_buf[n++]:0; - uint32_t oct_b = n < userpass_buf_len ? - (unsigned char) userpass_buf[n++]:0; - uint32_t oct_c = n < userpass_buf_len ? - (unsigned char) userpass_buf[n++]:0; - uint32_t triple = (oct_a << 0x10) + (oct_b << 0x08) + oct_c; - - base64_buf[m++] = base64[(triple >> 3* 6) & 0x3f]; - base64_buf[m++] = base64[(triple >> 2* 6) & 0x3f]; - base64_buf[m++] = base64[(triple >> 1* 6) & 0x3f]; - base64_buf[m++] = base64[(triple >> 0* 6) & 0x3f]; - } - - for (int l = 0; l < mod_table[userpass_buf_len % 3]; l++) - base64_buf[encoded_len - 1 - l] = '='; - - debugPrint("%s() LN%d: auth string base64 encoded: %s\n", - __func__, __LINE__, base64_buf); - char *auth = base64_buf; - - int r = snprintf(request_buf, - req_buf_len, - req_fmt, url, host, rest_port, - auth, strlen(sqlstr), sqlstr); - if (r >= req_buf_len) { - free(request_buf); - ERROR_EXIT("too long request"); - } - verbosePrint("%s() LN%d: Request:\n%s\n", __func__, __LINE__, request_buf); - - req_str_len = strlen(request_buf); - sent = 0; - do { -#ifdef WINDOWS - bytes = send(pThreadInfo->sockfd, request_buf + sent, req_str_len - sent, 0); -#else - bytes = write(pThreadInfo->sockfd, request_buf + sent, req_str_len - sent); -#endif - if (bytes < 0) - ERROR_EXIT("writing message to socket"); - if (bytes == 0) - break; - sent+=bytes; - } while(sent < req_str_len); - - memset(response_buf, 0, RESP_BUF_LEN); - resp_len = sizeof(response_buf) - 1; - received = 0; - - char resEncodingChunk[] = "Encoding: chunked"; - char resHttp[] = "HTTP/1.1 "; - char resHttpOk[] = "HTTP/1.1 200 OK"; - - do { -#ifdef WINDOWS - bytes = recv(pThreadInfo->sockfd, response_buf + received, resp_len - received, 0); -#else - bytes = read(pThreadInfo->sockfd, response_buf + received, resp_len - received); -#endif - verbosePrint("%s() LN%d: bytes:%d\n", __func__, __LINE__, bytes); - if (bytes < 0) { - free(request_buf); - ERROR_EXIT("reading response from socket"); - } - if (bytes == 0) - break; - received += bytes; - - verbosePrint("%s() LN%d: received:%d resp_len:%d, response_buf:\n%s\n", - __func__, __LINE__, received, resp_len, response_buf); - - response_buf[RESP_BUF_LEN - 1] = '\0'; - if (strlen(response_buf)) { - if (((NULL == strstr(response_buf, resEncodingChunk)) - && (NULL != strstr(response_buf, resHttp))) - || ((NULL != strstr(response_buf, resHttpOk)) - && (NULL != strstr(response_buf, "\"status\":")))) { - debugPrint( - "%s() LN%d: received:%d resp_len:%d, response_buf:\n%s\n", - __func__, __LINE__, received, resp_len, response_buf); - break; - } - } - } while(received < resp_len); - - if (received == resp_len) { - free(request_buf); - ERROR_EXIT("storing complete response from socket"); - } - - if (strlen(pThreadInfo->filePath) > 0) { - appendResultBufToFile(response_buf, pThreadInfo); - } - - free(request_buf); - - response_buf[RESP_BUF_LEN - 1] = '\0'; - if (NULL == strstr(response_buf, resHttpOk)) { - errorPrint("%s() LN%d, Response:\n%s\n", - __func__, __LINE__, response_buf); - return -1; - } - return 0; -} - -static char* getTagValueFromTagSample(SSuperTable* stbInfo, int tagUsePos) { - char* dataBuf = (char*)calloc(TSDB_MAX_SQL_LEN+1, 1); - if (NULL == dataBuf) { - errorPrint2("%s() LN%d, calloc failed! size:%d\n", - __func__, __LINE__, TSDB_MAX_SQL_LEN+1); - return NULL; - } - - int dataLen = 0; - dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen, - "(%s)", stbInfo->tagDataBuf + stbInfo->lenOfTagOfOneRow * tagUsePos); - - return dataBuf; -} - -static char *generateBinaryNCharTagValues(int64_t tableSeq, uint32_t len) -{ - char* buf = (char*)calloc(len, 1); - if (NULL == buf) { - printf("calloc failed! size:%d\n", len); - return NULL; - } - - if (tableSeq % 2) { - tstrncpy(buf, "beijing", len); - } else { - tstrncpy(buf, "shanghai", len); - } - //rand_string(buf, stbInfo->tags[i].dataLen); - - return buf; -} - -static char* generateTagValuesForStb(SSuperTable* stbInfo, int64_t tableSeq) { - char* dataBuf = (char*)calloc(TSDB_MAX_SQL_LEN+1, 1); - if (NULL == dataBuf) { - printf("calloc failed! size:%d\n", TSDB_MAX_SQL_LEN+1); - return NULL; - } - - int dataLen = 0; - dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen, "("); - for (int i = 0; i < stbInfo->tagCount; i++) { - if ((0 == strncasecmp(stbInfo->tags[i].dataType, - "binary", strlen("binary"))) - || (0 == strncasecmp(stbInfo->tags[i].dataType, - "nchar", strlen("nchar")))) { - if (stbInfo->tags[i].dataLen > TSDB_MAX_BINARY_LEN) { - printf("binary or nchar length overflow, max size:%u\n", - (uint32_t)TSDB_MAX_BINARY_LEN); - tmfree(dataBuf); - return NULL; - } - - int32_t tagBufLen = stbInfo->tags[i].dataLen + 1; - char *buf = generateBinaryNCharTagValues(tableSeq, tagBufLen); - if (NULL == buf) { - tmfree(dataBuf); - return NULL; - } - dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen, - "\'%s\',", buf); - tmfree(buf); - } else if (0 == strncasecmp(stbInfo->tags[i].dataType, - "int", strlen("int"))) { - if ((g_args.demo_mode) && (i == 0)) { - dataLen += snprintf(dataBuf + dataLen, - TSDB_MAX_SQL_LEN - dataLen, - "%"PRId64",", (tableSeq % 10) + 1); - } else { - dataLen += snprintf(dataBuf + dataLen, - TSDB_MAX_SQL_LEN - dataLen, - "%"PRId64",", tableSeq); - } - } else if (0 == strncasecmp(stbInfo->tags[i].dataType, - "bigint", strlen("bigint"))) { - dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen, - "%"PRId64",", rand_bigint()); - } else if (0 == strncasecmp(stbInfo->tags[i].dataType, - "float", strlen("float"))) { - dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen, - "%f,", rand_float()); - } else if (0 == strncasecmp(stbInfo->tags[i].dataType, - "double", strlen("double"))) { - dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen, - "%f,", rand_double()); - } else if (0 == strncasecmp(stbInfo->tags[i].dataType, - "smallint", strlen("smallint"))) { - dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen, - "%d,", rand_smallint()); - } else if (0 == strncasecmp(stbInfo->tags[i].dataType, - "tinyint", strlen("tinyint"))) { - dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen, - "%d,", rand_tinyint()); - } else if (0 == strncasecmp(stbInfo->tags[i].dataType, - "bool", strlen("bool"))) { - dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen, - "%d,", rand_bool()); - } else if (0 == strncasecmp(stbInfo->tags[i].dataType, - "timestamp", strlen("timestamp"))) { - dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen, - "%"PRId64",", rand_ubigint()); - } else if (0 == strncasecmp(stbInfo->tags[i].dataType, - "utinyint", strlen("utinyint"))) { - dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen, - "%d,", rand_utinyint()); - } else if (0 == strncasecmp(stbInfo->tags[i].dataType, - "usmallint", strlen("usmallint"))) { - dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen, - "%d,", rand_usmallint()); - } else if (0 == strncasecmp(stbInfo->tags[i].dataType, - "uint", strlen("uint"))) { - dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen, - "%d,", rand_uint()); - } else if (0 == strncasecmp(stbInfo->tags[i].dataType, - "ubigint", strlen("ubigint"))) { - dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen, - "%"PRId64",", rand_ubigint()); - } else { - errorPrint2("No support data type: %s\n", stbInfo->tags[i].dataType); - tmfree(dataBuf); - return NULL; - } - } - - dataLen -= 1; - dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen, ")"); - return dataBuf; -} - -static int calcRowLen(SSuperTable* superTbls) { - int colIndex; - int lenOfOneRow = 0; - - for (colIndex = 0; colIndex < superTbls->columnCount; colIndex++) { - char* dataType = superTbls->columns[colIndex].dataType; - - switch(superTbls->columns[colIndex].data_type) { - case TSDB_DATA_TYPE_BINARY: - lenOfOneRow += superTbls->columns[colIndex].dataLen + 3; - break; - - case TSDB_DATA_TYPE_NCHAR: - lenOfOneRow += superTbls->columns[colIndex].dataLen + 3; - break; - - case TSDB_DATA_TYPE_INT: - case TSDB_DATA_TYPE_UINT: - lenOfOneRow += INT_BUFF_LEN; - break; - - case TSDB_DATA_TYPE_BIGINT: - case TSDB_DATA_TYPE_UBIGINT: - lenOfOneRow += BIGINT_BUFF_LEN; - break; - - case TSDB_DATA_TYPE_SMALLINT: - case TSDB_DATA_TYPE_USMALLINT: - lenOfOneRow += SMALLINT_BUFF_LEN; - break; - - case TSDB_DATA_TYPE_TINYINT: - case TSDB_DATA_TYPE_UTINYINT: - lenOfOneRow += TINYINT_BUFF_LEN; - break; - - case TSDB_DATA_TYPE_BOOL: - lenOfOneRow += BOOL_BUFF_LEN; - break; - - case TSDB_DATA_TYPE_FLOAT: - lenOfOneRow += FLOAT_BUFF_LEN; - break; - - case TSDB_DATA_TYPE_DOUBLE: - lenOfOneRow += DOUBLE_BUFF_LEN; - break; - - case TSDB_DATA_TYPE_TIMESTAMP: - lenOfOneRow += TIMESTAMP_BUFF_LEN; - break; - - default: - errorPrint2("get error data type : %s\n", dataType); - exit(EXIT_FAILURE); - } - } - - superTbls->lenOfOneRow = lenOfOneRow + 20; // timestamp - - int tagIndex; - int lenOfTagOfOneRow = 0; - for (tagIndex = 0; tagIndex < superTbls->tagCount; tagIndex++) { - char * dataType = superTbls->tags[tagIndex].dataType; - switch (superTbls->tags[tagIndex].data_type) - { - case TSDB_DATA_TYPE_BINARY: - lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + 3; - break; - case TSDB_DATA_TYPE_NCHAR: - lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + 3; - break; - case TSDB_DATA_TYPE_INT: - case TSDB_DATA_TYPE_UINT: - lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + INT_BUFF_LEN; - break; - case TSDB_DATA_TYPE_BIGINT: - case TSDB_DATA_TYPE_UBIGINT: - lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + BIGINT_BUFF_LEN; - break; - case TSDB_DATA_TYPE_SMALLINT: - case TSDB_DATA_TYPE_USMALLINT: - lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + SMALLINT_BUFF_LEN; - break; - case TSDB_DATA_TYPE_TINYINT: - case TSDB_DATA_TYPE_UTINYINT: - lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + TINYINT_BUFF_LEN; - break; - case TSDB_DATA_TYPE_BOOL: - lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + BOOL_BUFF_LEN; - break; - case TSDB_DATA_TYPE_FLOAT: - lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + FLOAT_BUFF_LEN; - break; - case TSDB_DATA_TYPE_DOUBLE: - lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + DOUBLE_BUFF_LEN; - break; - default: - errorPrint2("get error tag type : %s\n", dataType); - exit(EXIT_FAILURE); - } - } - - superTbls->lenOfTagOfOneRow = lenOfTagOfOneRow; - - return 0; -} - -static int getChildNameOfSuperTableWithLimitAndOffset(TAOS * taos, - char* dbName, char* stbName, char** childTblNameOfSuperTbl, - int64_t* childTblCountOfSuperTbl, int64_t limit, uint64_t offset) { - - char command[1024] = "\0"; - char limitBuf[100] = "\0"; - - TAOS_RES * res; - TAOS_ROW row = NULL; - - char* childTblName = *childTblNameOfSuperTbl; - - snprintf(limitBuf, 100, " limit %"PRId64" offset %"PRIu64"", - limit, offset); - - //get all child table name use cmd: select tbname from superTblName; - snprintf(command, 1024, "select tbname from %s.%s %s", dbName, stbName, limitBuf); - - res = taos_query(taos, command); - int32_t code = taos_errno(res); - if (code != 0) { - taos_free_result(res); - taos_close(taos); - errorPrint2("%s() LN%d, failed to run command %s\n", - __func__, __LINE__, command); - exit(EXIT_FAILURE); - } - - int64_t childTblCount = (limit < 0)?10000:limit; - int64_t count = 0; - if (childTblName == NULL) { - childTblName = (char*)calloc(1, childTblCount * TSDB_TABLE_NAME_LEN); - if (NULL == childTblName) { - taos_free_result(res); - taos_close(taos); - errorPrint2("%s() LN%d, failed to allocate memory!\n", __func__, __LINE__); - exit(EXIT_FAILURE); - } - } - - char* pTblName = childTblName; - while((row = taos_fetch_row(res)) != NULL) { - int32_t* len = taos_fetch_lengths(res); - - if (0 == strlen((char *)row[0])) { - errorPrint2("%s() LN%d, No.%"PRId64" table return empty name\n", - __func__, __LINE__, count); - exit(EXIT_FAILURE); - } - - tstrncpy(pTblName, (char *)row[0], len[0]+1); - //printf("==== sub table name: %s\n", pTblName); - count++; - if (count >= childTblCount - 1) { - char *tmp = realloc(childTblName, - (size_t)childTblCount*1.5*TSDB_TABLE_NAME_LEN+1); - if (tmp != NULL) { - childTblName = tmp; - childTblCount = (int)(childTblCount*1.5); - memset(childTblName + count*TSDB_TABLE_NAME_LEN, 0, - (size_t)((childTblCount-count)*TSDB_TABLE_NAME_LEN)); - } else { - // exit, if allocate more memory failed - tmfree(childTblName); - taos_free_result(res); - taos_close(taos); - errorPrint2("%s() LN%d, realloc fail for save child table name of %s.%s\n", - __func__, __LINE__, dbName, stbName); - exit(EXIT_FAILURE); - } - } - pTblName = childTblName + count * TSDB_TABLE_NAME_LEN; - } - - *childTblCountOfSuperTbl = count; - *childTblNameOfSuperTbl = childTblName; - - taos_free_result(res); - return 0; -} - -static int getAllChildNameOfSuperTable(TAOS * taos, char* dbName, - char* stbName, char** childTblNameOfSuperTbl, - int64_t* childTblCountOfSuperTbl) { - - return getChildNameOfSuperTableWithLimitAndOffset(taos, dbName, stbName, - childTblNameOfSuperTbl, childTblCountOfSuperTbl, - -1, 0); -} - -static int getSuperTableFromServer(TAOS * taos, char* dbName, - SSuperTable* superTbls) { - - char command[1024] = "\0"; - TAOS_RES * res; - TAOS_ROW row = NULL; - int count = 0; - - //get schema use cmd: describe superTblName; - snprintf(command, 1024, "describe %s.%s", dbName, superTbls->stbName); - res = taos_query(taos, command); - int32_t code = taos_errno(res); - if (code != 0) { - printf("failed to run command %s\n", command); - taos_free_result(res); - return -1; - } - - int tagIndex = 0; - int columnIndex = 0; - TAOS_FIELD *fields = taos_fetch_fields(res); - while((row = taos_fetch_row(res)) != NULL) { - if (0 == count) { - count++; - continue; - } - - if (strcmp((char *)row[TSDB_DESCRIBE_METRIC_NOTE_INDEX], "TAG") == 0) { - tstrncpy(superTbls->tags[tagIndex].field, - (char *)row[TSDB_DESCRIBE_METRIC_FIELD_INDEX], - fields[TSDB_DESCRIBE_METRIC_FIELD_INDEX].bytes); - if (0 == strncasecmp((char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX], - "INT", strlen("INT"))) { - superTbls->tags[tagIndex].data_type = TSDB_DATA_TYPE_INT; - } else if (0 == strncasecmp((char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX], - "TINYINT", strlen("TINYINT"))) { - superTbls->tags[tagIndex].data_type = TSDB_DATA_TYPE_TINYINT; - } else if (0 == strncasecmp((char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX], - "SMALLINT", strlen("SMALLINT"))) { - superTbls->tags[tagIndex].data_type = TSDB_DATA_TYPE_SMALLINT; - } else if (0 == strncasecmp((char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX], - "BIGINT", strlen("BIGINT"))) { - superTbls->tags[tagIndex].data_type = TSDB_DATA_TYPE_BIGINT; - } else if (0 == strncasecmp((char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX], - "FLOAT", strlen("FLOAT"))) { - superTbls->tags[tagIndex].data_type = TSDB_DATA_TYPE_FLOAT; - } else if (0 == strncasecmp((char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX], - "DOUBLE", strlen("DOUBLE"))) { - superTbls->tags[tagIndex].data_type = TSDB_DATA_TYPE_DOUBLE; - } else if (0 == strncasecmp((char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX], - "BINARY", strlen("BINARY"))) { - superTbls->tags[tagIndex].data_type = TSDB_DATA_TYPE_BINARY; - } else if (0 == strncasecmp((char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX], - "NCHAR", strlen("NCHAR"))) { - superTbls->tags[tagIndex].data_type = TSDB_DATA_TYPE_NCHAR; - } else if (0 == strncasecmp((char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX], - "BOOL", strlen("BOOL"))) { - superTbls->tags[tagIndex].data_type = TSDB_DATA_TYPE_BOOL; - } else if (0 == strncasecmp((char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX], - "TIMESTAMP", strlen("TIMESTAMP"))) { - superTbls->tags[tagIndex].data_type = TSDB_DATA_TYPE_TIMESTAMP; - } else if (0 == strncasecmp((char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX], - "TINYINT UNSIGNED", strlen("TINYINT UNSIGNED"))) { - superTbls->tags[tagIndex].data_type = TSDB_DATA_TYPE_UTINYINT; - tstrncpy(superTbls->tags[tagIndex].dataType,"UTINYINT", - min(DATATYPE_BUFF_LEN, - fields[TSDB_DESCRIBE_METRIC_TYPE_INDEX].bytes) + 1); - } else if (0 == strncasecmp((char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX], - "SMALLINT UNSIGNED", strlen("SMALLINT UNSIGNED"))) { - superTbls->tags[tagIndex].data_type = TSDB_DATA_TYPE_USMALLINT; - tstrncpy(superTbls->tags[tagIndex].dataType,"USMALLINT", - min(DATATYPE_BUFF_LEN, - fields[TSDB_DESCRIBE_METRIC_TYPE_INDEX].bytes) + 1); - } else if (0 == strncasecmp((char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX], - "INT UNSIGNED", strlen("INT UNSIGNED"))) { - superTbls->tags[tagIndex].data_type = TSDB_DATA_TYPE_UINT; - tstrncpy(superTbls->tags[tagIndex].dataType,"UINT", - min(DATATYPE_BUFF_LEN, - fields[TSDB_DESCRIBE_METRIC_TYPE_INDEX].bytes) + 1); - }else if (0 == strncasecmp((char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX], - "BIGINT UNSIGNED", strlen("BIGINT UNSIGNED"))) { - superTbls->tags[tagIndex].data_type = TSDB_DATA_TYPE_UBIGINT; - tstrncpy(superTbls->tags[tagIndex].dataType,"UBIGINT", - min(DATATYPE_BUFF_LEN, - fields[TSDB_DESCRIBE_METRIC_TYPE_INDEX].bytes) + 1); - } else { - superTbls->tags[tagIndex].data_type = TSDB_DATA_TYPE_NULL; - } - superTbls->tags[tagIndex].dataLen = - *((int *)row[TSDB_DESCRIBE_METRIC_LENGTH_INDEX]); - tstrncpy(superTbls->tags[tagIndex].note, - (char *)row[TSDB_DESCRIBE_METRIC_NOTE_INDEX], - min(NOTE_BUFF_LEN, - fields[TSDB_DESCRIBE_METRIC_NOTE_INDEX].bytes) + 1); - if (strstr((char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX], "UNSIGNED") == NULL) - { - tstrncpy(superTbls->tags[tagIndex].dataType, - (char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX], - min(DATATYPE_BUFF_LEN, - fields[TSDB_DESCRIBE_METRIC_TYPE_INDEX].bytes) + 1); - } - tagIndex++; - } else { - tstrncpy(superTbls->columns[columnIndex].field, - (char *)row[TSDB_DESCRIBE_METRIC_FIELD_INDEX], - fields[TSDB_DESCRIBE_METRIC_FIELD_INDEX].bytes); - - - if (0 == strncasecmp((char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX], - "INT", strlen("INT")) && - strstr((char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX], "UNSIGNED") == NULL) { - superTbls->columns[columnIndex].data_type = TSDB_DATA_TYPE_INT; - } else if (0 == strncasecmp((char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX], - "TINYINT", strlen("TINYINT")) && - strstr((char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX], "UNSIGNED") == NULL) { - superTbls->columns[columnIndex].data_type = TSDB_DATA_TYPE_TINYINT; - } else if (0 == strncasecmp((char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX], - "SMALLINT", strlen("SMALLINT")) && - strstr((char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX], "UNSIGNED") == NULL) { - superTbls->columns[columnIndex].data_type = TSDB_DATA_TYPE_SMALLINT; - } else if (0 == strncasecmp((char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX], - "BIGINT", strlen("BIGINT")) && - strstr((char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX], "UNSIGNED") == NULL) { - superTbls->columns[columnIndex].data_type = TSDB_DATA_TYPE_BIGINT; - } else if (0 == strncasecmp((char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX], - "FLOAT", strlen("FLOAT"))) { - superTbls->columns[columnIndex].data_type = TSDB_DATA_TYPE_FLOAT; - } else if (0 == strncasecmp((char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX], - "DOUBLE", strlen("DOUBLE"))) { - superTbls->columns[columnIndex].data_type = TSDB_DATA_TYPE_DOUBLE; - } else if (0 == strncasecmp((char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX], - "BINARY", strlen("BINARY"))) { - superTbls->columns[columnIndex].data_type = TSDB_DATA_TYPE_BINARY; - } else if (0 == strncasecmp((char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX], - "NCHAR", strlen("NCHAR"))) { - superTbls->columns[columnIndex].data_type = TSDB_DATA_TYPE_NCHAR; - } else if (0 == strncasecmp((char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX], - "BOOL", strlen("BOOL"))) { - superTbls->columns[columnIndex].data_type = TSDB_DATA_TYPE_BOOL; - } else if (0 == strncasecmp((char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX], - "TIMESTAMP", strlen("TIMESTAMP"))) { - superTbls->columns[columnIndex].data_type = TSDB_DATA_TYPE_TIMESTAMP; - } else if (0 == strncasecmp((char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX], - "TINYINT UNSIGNED", strlen("TINYINT UNSIGNED"))) { - superTbls->columns[columnIndex].data_type = TSDB_DATA_TYPE_UTINYINT; - tstrncpy(superTbls->columns[columnIndex].dataType,"UTINYINT", - min(DATATYPE_BUFF_LEN, - fields[TSDB_DESCRIBE_METRIC_TYPE_INDEX].bytes) + 1); - } else if (0 == strncasecmp((char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX], - "SMALLINT UNSIGNED", strlen("SMALLINT UNSIGNED"))) { - superTbls->columns[columnIndex].data_type = TSDB_DATA_TYPE_USMALLINT; - tstrncpy(superTbls->columns[columnIndex].dataType,"USMALLINT", - min(DATATYPE_BUFF_LEN, - fields[TSDB_DESCRIBE_METRIC_TYPE_INDEX].bytes) + 1); - } else if (0 == strncasecmp((char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX], - "INT UNSIGNED", strlen("INT UNSIGNED"))) { - superTbls->columns[columnIndex].data_type = TSDB_DATA_TYPE_UINT; - tstrncpy(superTbls->columns[columnIndex].dataType,"UINT", - min(DATATYPE_BUFF_LEN, - fields[TSDB_DESCRIBE_METRIC_TYPE_INDEX].bytes) + 1); - } else if (0 == strncasecmp((char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX], - "BIGINT UNSIGNED", strlen("BIGINT UNSIGNED"))) { - superTbls->columns[columnIndex].data_type = TSDB_DATA_TYPE_UBIGINT; - tstrncpy(superTbls->columns[columnIndex].dataType,"UBIGINT", - min(DATATYPE_BUFF_LEN, - fields[TSDB_DESCRIBE_METRIC_TYPE_INDEX].bytes) + 1); - } else { - superTbls->columns[columnIndex].data_type = TSDB_DATA_TYPE_NULL; - } - superTbls->columns[columnIndex].dataLen = - *((int *)row[TSDB_DESCRIBE_METRIC_LENGTH_INDEX]); - tstrncpy(superTbls->columns[columnIndex].note, - (char *)row[TSDB_DESCRIBE_METRIC_NOTE_INDEX], - min(NOTE_BUFF_LEN, - fields[TSDB_DESCRIBE_METRIC_NOTE_INDEX].bytes) + 1); - - if (strstr((char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX], "UNSIGNED") == NULL) { - tstrncpy(superTbls->columns[columnIndex].dataType, - (char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX], - min(DATATYPE_BUFF_LEN, - fields[TSDB_DESCRIBE_METRIC_TYPE_INDEX].bytes) + 1); - } - - columnIndex++; - } - count++; - } - - superTbls->columnCount = columnIndex; - superTbls->tagCount = tagIndex; - taos_free_result(res); - - calcRowLen(superTbls); - - /* - if (TBL_ALREADY_EXISTS == superTbls->childTblExists) { - //get all child table name use cmd: select tbname from superTblName; - int childTblCount = 10000; - superTbls->childTblName = (char*)calloc(1, childTblCount * TSDB_TABLE_NAME_LEN); - if (superTbls->childTblName == NULL) { - errorPrint2("%s() LN%d, alloc memory failed!\n", __func__, __LINE__); - return -1; - } - getAllChildNameOfSuperTable(taos, dbName, - superTbls->stbName, - &superTbls->childTblName, - &superTbls->childTblCount); - } - */ - return 0; -} - -static int createSuperTable( - TAOS * taos, char* dbName, - SSuperTable* superTbl) { - - char *command = calloc(1, BUFFER_SIZE); - assert(command); - - char cols[COL_BUFFER_LEN] = "\0"; - int len = 0; - - int lenOfOneRow = 0; - - if (superTbl->columnCount == 0) { - errorPrint2("%s() LN%d, super table column count is %d\n", - __func__, __LINE__, superTbl->columnCount); - free(command); - return -1; - } - - for (int colIndex = 0; colIndex < superTbl->columnCount; colIndex++) { - - switch(superTbl->columns[colIndex].data_type) { - case TSDB_DATA_TYPE_BINARY: - len += snprintf(cols + len, COL_BUFFER_LEN - len, - ",C%d %s(%d)", colIndex, "BINARY", - superTbl->columns[colIndex].dataLen); - lenOfOneRow += superTbl->columns[colIndex].dataLen + 3; - break; - - case TSDB_DATA_TYPE_NCHAR: - len += snprintf(cols + len, COL_BUFFER_LEN - len, - ",C%d %s(%d)", colIndex, "NCHAR", - superTbl->columns[colIndex].dataLen); - lenOfOneRow += superTbl->columns[colIndex].dataLen + 3; - break; - - case TSDB_DATA_TYPE_INT: - if ((g_args.demo_mode) && (colIndex == 1)) { - len += snprintf(cols + len, COL_BUFFER_LEN - len, - ", VOLTAGE INT"); - } else { - len += snprintf(cols + len, COL_BUFFER_LEN - len, ",C%d %s", colIndex, "INT"); - } - lenOfOneRow += INT_BUFF_LEN; - break; - - case TSDB_DATA_TYPE_BIGINT: - len += snprintf(cols + len, COL_BUFFER_LEN - len, ",C%d %s", - colIndex, "BIGINT"); - lenOfOneRow += BIGINT_BUFF_LEN; - break; - - case TSDB_DATA_TYPE_SMALLINT: - len += snprintf(cols + len, COL_BUFFER_LEN - len, ",C%d %s", - colIndex, "SMALLINT"); - lenOfOneRow += SMALLINT_BUFF_LEN; - break; - - case TSDB_DATA_TYPE_TINYINT: - len += snprintf(cols + len, COL_BUFFER_LEN - len, ",C%d %s", colIndex, "TINYINT"); - lenOfOneRow += TINYINT_BUFF_LEN; - break; - - case TSDB_DATA_TYPE_BOOL: - len += snprintf(cols + len, COL_BUFFER_LEN - len, ",C%d %s", colIndex, "BOOL"); - lenOfOneRow += BOOL_BUFF_LEN; - break; - - case TSDB_DATA_TYPE_FLOAT: - if (g_args.demo_mode) { - if (colIndex == 0) { - len += snprintf(cols + len, COL_BUFFER_LEN - len, ", CURRENT FLOAT"); - } else if (colIndex == 2) { - len += snprintf(cols + len, COL_BUFFER_LEN - len, ", PHASE FLOAT"); - } - } else { - len += snprintf(cols + len, COL_BUFFER_LEN - len, ",C%d %s", colIndex, "FLOAT"); - } - - lenOfOneRow += FLOAT_BUFF_LEN; - break; - - case TSDB_DATA_TYPE_DOUBLE: - len += snprintf(cols + len, COL_BUFFER_LEN - len, ",C%d %s", - colIndex, "DOUBLE"); - lenOfOneRow += DOUBLE_BUFF_LEN; - break; - - case TSDB_DATA_TYPE_TIMESTAMP: - len += snprintf(cols + len, COL_BUFFER_LEN - len, ",C%d %s", - colIndex, "TIMESTAMP"); - lenOfOneRow += TIMESTAMP_BUFF_LEN; - break; - - case TSDB_DATA_TYPE_UTINYINT: - len += snprintf(cols + len, COL_BUFFER_LEN - len, ",C%d %s", - colIndex, "TINYINT UNSIGNED"); - lenOfOneRow += TINYINT_BUFF_LEN; - break; - - case TSDB_DATA_TYPE_USMALLINT: - len += snprintf(cols + len, COL_BUFFER_LEN - len, ",C%d %s", - colIndex, "SMALLINT UNSIGNED"); - lenOfOneRow += SMALLINT_BUFF_LEN; - break; - - case TSDB_DATA_TYPE_UINT: - len += snprintf(cols + len, COL_BUFFER_LEN - len, ",C%d %s", - colIndex, "INT UNSIGNED"); - lenOfOneRow += INT_BUFF_LEN; - break; - - case TSDB_DATA_TYPE_UBIGINT: - len += snprintf(cols + len, COL_BUFFER_LEN - len, ",C%d %s", - colIndex, "BIGINT UNSIGNED"); - lenOfOneRow += BIGINT_BUFF_LEN; - break; - - default: - taos_close(taos); - free(command); - errorPrint2("%s() LN%d, config error data type : %s\n", - __func__, __LINE__, superTbl->columns[colIndex].dataType); - exit(EXIT_FAILURE); - } - } - - superTbl->lenOfOneRow = lenOfOneRow + 20; // timestamp - - // save for creating child table - superTbl->colsOfCreateChildTable = (char*)calloc(len+20, 1); - if (NULL == superTbl->colsOfCreateChildTable) { - taos_close(taos); - free(command); - errorPrint2("%s() LN%d, Failed when calloc, size:%d", - __func__, __LINE__, len+1); - exit(EXIT_FAILURE); - } - - snprintf(superTbl->colsOfCreateChildTable, len+20, "(ts timestamp%s)", cols); - verbosePrint("%s() LN%d: %s\n", - __func__, __LINE__, superTbl->colsOfCreateChildTable); - - if (superTbl->tagCount == 0) { - errorPrint2("%s() LN%d, super table tag count is %d\n", - __func__, __LINE__, superTbl->tagCount); - free(command); - return -1; - } - - char tags[TSDB_MAX_TAGS_LEN] = "\0"; - int tagIndex; - len = 0; - - int lenOfTagOfOneRow = 0; - len += snprintf(tags + len, TSDB_MAX_TAGS_LEN - len, "("); - for (tagIndex = 0; tagIndex < superTbl->tagCount; tagIndex++) { - char* dataType = superTbl->tags[tagIndex].dataType; - - if (strcasecmp(dataType, "BINARY") == 0) { - if ((g_args.demo_mode) && (tagIndex == 1)) { - len += snprintf(tags + len, TSDB_MAX_TAGS_LEN - len, - "location BINARY(%d),", - superTbl->tags[tagIndex].dataLen); - } else { - len += snprintf(tags + len, TSDB_MAX_TAGS_LEN - len, - "T%d %s(%d),", tagIndex, "BINARY", - superTbl->tags[tagIndex].dataLen); - } - lenOfTagOfOneRow += superTbl->tags[tagIndex].dataLen + 3; - } else if (strcasecmp(dataType, "NCHAR") == 0) { - len += snprintf(tags + len, TSDB_MAX_TAGS_LEN - len, - "T%d %s(%d),", tagIndex, - "NCHAR", superTbl->tags[tagIndex].dataLen); - lenOfTagOfOneRow += superTbl->tags[tagIndex].dataLen + 3; - } else if (strcasecmp(dataType, "INT") == 0) { - if ((g_args.demo_mode) && (tagIndex == 0)) { - len += snprintf(tags + len, TSDB_MAX_TAGS_LEN - len, - "groupId INT, "); - } else { - len += snprintf(tags + len, TSDB_MAX_TAGS_LEN - len, - "T%d %s,", tagIndex, "INT"); - } - lenOfTagOfOneRow += superTbl->tags[tagIndex].dataLen + INT_BUFF_LEN; - } else if (strcasecmp(dataType, "BIGINT") == 0) { - len += snprintf(tags + len, TSDB_MAX_TAGS_LEN - len, - "T%d %s,", tagIndex, "BIGINT"); - lenOfTagOfOneRow += superTbl->tags[tagIndex].dataLen + BIGINT_BUFF_LEN; - } else if (strcasecmp(dataType, "SMALLINT") == 0) { - len += snprintf(tags + len, TSDB_MAX_TAGS_LEN - len, - "T%d %s,", tagIndex, "SMALLINT"); - lenOfTagOfOneRow += superTbl->tags[tagIndex].dataLen + SMALLINT_BUFF_LEN; - } else if (strcasecmp(dataType, "TINYINT") == 0) { - len += snprintf(tags + len, TSDB_MAX_TAGS_LEN - len, - "T%d %s,", tagIndex, "TINYINT"); - lenOfTagOfOneRow += superTbl->tags[tagIndex].dataLen + TINYINT_BUFF_LEN; - } else if (strcasecmp(dataType, "BOOL") == 0) { - len += snprintf(tags + len, TSDB_MAX_TAGS_LEN - len, - "T%d %s,", tagIndex, "BOOL"); - lenOfTagOfOneRow += superTbl->tags[tagIndex].dataLen + BOOL_BUFF_LEN; - } else if (strcasecmp(dataType, "FLOAT") == 0) { - len += snprintf(tags + len, TSDB_MAX_TAGS_LEN - len, - "T%d %s,", tagIndex, "FLOAT"); - lenOfTagOfOneRow += superTbl->tags[tagIndex].dataLen + FLOAT_BUFF_LEN; - } else if (strcasecmp(dataType, "DOUBLE") == 0) { - len += snprintf(tags + len, TSDB_MAX_TAGS_LEN - len, - "T%d %s,", tagIndex, "DOUBLE"); - lenOfTagOfOneRow += superTbl->tags[tagIndex].dataLen + DOUBLE_BUFF_LEN; - } else if (strcasecmp(dataType, "UTINYINT") == 0) { - len += snprintf(tags + len, TSDB_MAX_TAGS_LEN - len, - "T%d %s,", tagIndex, "TINYINT UNSIGNED"); - lenOfTagOfOneRow += superTbl->tags[tagIndex].dataLen + TINYINT_BUFF_LEN; - } else if (strcasecmp(dataType, "USMALLINT") == 0) { - len += snprintf(tags + len, TSDB_MAX_TAGS_LEN - len, - "T%d %s,", tagIndex, "SMALLINT UNSIGNED"); - lenOfTagOfOneRow += superTbl->tags[tagIndex].dataLen + SMALLINT_BUFF_LEN; - } else if (strcasecmp(dataType, "UINT") == 0) { - len += snprintf(tags + len, TSDB_MAX_TAGS_LEN - len, - "T%d %s,", tagIndex, "INT UNSIGNED"); - lenOfTagOfOneRow += superTbl->tags[tagIndex].dataLen + INT_BUFF_LEN; - } else if (strcasecmp(dataType, "UBIGINT") == 0) { - len += snprintf(tags + len, TSDB_MAX_TAGS_LEN - len, - "T%d %s,", tagIndex, "BIGINT UNSIGNED"); - lenOfTagOfOneRow += superTbl->tags[tagIndex].dataLen + BIGINT_BUFF_LEN; - } else if (strcasecmp(dataType, "TIMESTAMP") == 0) { - len += snprintf(tags + len, TSDB_MAX_TAGS_LEN - len, - "T%d %s,", tagIndex, "TIMESTAMP"); - lenOfTagOfOneRow += superTbl->tags[tagIndex].dataLen + TIMESTAMP_BUFF_LEN; - } else { - taos_close(taos); - free(command); - errorPrint2("%s() LN%d, config error tag type : %s\n", - __func__, __LINE__, dataType); - exit(EXIT_FAILURE); - } - } - - len -= 1; - len += snprintf(tags + len, TSDB_MAX_TAGS_LEN - len, ")"); - - superTbl->lenOfTagOfOneRow = lenOfTagOfOneRow; - - - snprintf(command, BUFFER_SIZE, - "CREATE TABLE IF NOT EXISTS %s.%s (ts TIMESTAMP%s) TAGS %s", - dbName, superTbl->stbName, cols, tags); - if (0 != queryDbExec(taos, command, NO_INSERT_TYPE, false)) { - errorPrint2("create supertable %s failed!\n\n", - superTbl->stbName); - free(command); - return -1; - } - - debugPrint("create supertable %s success!\n\n", superTbl->stbName); - free(command); - return 0; -} - -int createDatabasesAndStables(char *command) { - TAOS * taos = NULL; - int ret = 0; - taos = taos_connect(g_Dbs.host, g_Dbs.user, g_Dbs.password, NULL, g_Dbs.port); - if (taos == NULL) { - errorPrint2("Failed to connect to TDengine, reason:%s\n", taos_errstr(NULL)); - return -1; - } - - for (int i = 0; i < g_Dbs.dbCount; i++) { - if (g_Dbs.db[i].drop) { - sprintf(command, "drop database if exists %s;", g_Dbs.db[i].dbName); - if (0 != queryDbExec(taos, command, NO_INSERT_TYPE, false)) { - taos_close(taos); - return -1; - } - - int dataLen = 0; - dataLen += snprintf(command + dataLen, - BUFFER_SIZE - dataLen, "CREATE DATABASE IF NOT EXISTS %s", - g_Dbs.db[i].dbName); - - if (g_Dbs.db[i].dbCfg.blocks > 0) { - dataLen += snprintf(command + dataLen, - BUFFER_SIZE - dataLen, " BLOCKS %d", - g_Dbs.db[i].dbCfg.blocks); - } - if (g_Dbs.db[i].dbCfg.cache > 0) { - dataLen += snprintf(command + dataLen, - BUFFER_SIZE - dataLen, " CACHE %d", - g_Dbs.db[i].dbCfg.cache); - } - if (g_Dbs.db[i].dbCfg.days > 0) { - dataLen += snprintf(command + dataLen, - BUFFER_SIZE - dataLen, " DAYS %d", - g_Dbs.db[i].dbCfg.days); - } - if (g_Dbs.db[i].dbCfg.keep > 0) { - dataLen += snprintf(command + dataLen, - BUFFER_SIZE - dataLen, " KEEP %d", - g_Dbs.db[i].dbCfg.keep); - } - if (g_Dbs.db[i].dbCfg.quorum > 1) { - dataLen += snprintf(command + dataLen, - BUFFER_SIZE - dataLen, " QUORUM %d", - g_Dbs.db[i].dbCfg.quorum); - } - if (g_Dbs.db[i].dbCfg.replica > 0) { - dataLen += snprintf(command + dataLen, - BUFFER_SIZE - dataLen, " REPLICA %d", - g_Dbs.db[i].dbCfg.replica); - } - if (g_Dbs.db[i].dbCfg.update > 0) { - dataLen += snprintf(command + dataLen, - BUFFER_SIZE - dataLen, " UPDATE %d", - g_Dbs.db[i].dbCfg.update); - } - //if (g_Dbs.db[i].dbCfg.maxtablesPerVnode > 0) { - // dataLen += snprintf(command + dataLen, - // BUFFER_SIZE - dataLen, "tables %d ", g_Dbs.db[i].dbCfg.maxtablesPerVnode); - //} - if (g_Dbs.db[i].dbCfg.minRows > 0) { - dataLen += snprintf(command + dataLen, - BUFFER_SIZE - dataLen, " MINROWS %d", - g_Dbs.db[i].dbCfg.minRows); - } - if (g_Dbs.db[i].dbCfg.maxRows > 0) { - dataLen += snprintf(command + dataLen, - BUFFER_SIZE - dataLen, " MAXROWS %d", - g_Dbs.db[i].dbCfg.maxRows); - } - if (g_Dbs.db[i].dbCfg.comp > 0) { - dataLen += snprintf(command + dataLen, - BUFFER_SIZE - dataLen, " COMP %d", - g_Dbs.db[i].dbCfg.comp); - } - if (g_Dbs.db[i].dbCfg.walLevel > 0) { - dataLen += snprintf(command + dataLen, - BUFFER_SIZE - dataLen, " wal %d", - g_Dbs.db[i].dbCfg.walLevel); - } - if (g_Dbs.db[i].dbCfg.cacheLast > 0) { - dataLen += snprintf(command + dataLen, - BUFFER_SIZE - dataLen, " CACHELAST %d", - g_Dbs.db[i].dbCfg.cacheLast); - } - if (g_Dbs.db[i].dbCfg.fsync > 0) { - dataLen += snprintf(command + dataLen, BUFFER_SIZE - dataLen, - " FSYNC %d", g_Dbs.db[i].dbCfg.fsync); - } - if ((0 == strncasecmp(g_Dbs.db[i].dbCfg.precision, "ms", 2)) - || (0 == strncasecmp(g_Dbs.db[i].dbCfg.precision, - "ns", 2)) - || (0 == strncasecmp(g_Dbs.db[i].dbCfg.precision, - "us", 2))) { - dataLen += snprintf(command + dataLen, BUFFER_SIZE - dataLen, - " precision \'%s\';", g_Dbs.db[i].dbCfg.precision); - } - - if (0 != queryDbExec(taos, command, NO_INSERT_TYPE, false)) { - taos_close(taos); - errorPrint("\ncreate database %s failed!\n\n", - g_Dbs.db[i].dbName); - return -1; - } - printf("\ncreate database %s success!\n\n", g_Dbs.db[i].dbName); - } - - debugPrint("%s() LN%d supertbl count:%"PRIu64"\n", - __func__, __LINE__, g_Dbs.db[i].superTblCount); - - int validStbCount = 0; - - for (uint64_t j = 0; j < g_Dbs.db[i].superTblCount; j++) { - sprintf(command, "describe %s.%s;", g_Dbs.db[i].dbName, - g_Dbs.db[i].superTbls[j].stbName); - ret = queryDbExec(taos, command, NO_INSERT_TYPE, true); - - if ((ret != 0) || (g_Dbs.db[i].drop)) { - ret = createSuperTable(taos, g_Dbs.db[i].dbName, - &g_Dbs.db[i].superTbls[j]); - - if (0 != ret) { - errorPrint("create super table %"PRIu64" failed!\n\n", j); - continue; - } - } else { - ret = getSuperTableFromServer(taos, g_Dbs.db[i].dbName, - &g_Dbs.db[i].superTbls[j]); - if (0 != ret) { - errorPrint2("\nget super table %s.%s info failed!\n\n", - g_Dbs.db[i].dbName, g_Dbs.db[i].superTbls[j].stbName); - continue; - } - } - validStbCount ++; - } - g_Dbs.db[i].superTblCount = validStbCount; - } - - taos_close(taos); - return 0; -} - -static void* createTable(void *sarg) -{ - threadInfo *pThreadInfo = (threadInfo *)sarg; - SSuperTable* stbInfo = pThreadInfo->stbInfo; - - setThreadName("createTable"); - - uint64_t lastPrintTime = taosGetTimestampMs(); - - int buff_len = BUFFER_SIZE; - - pThreadInfo->buffer = calloc(buff_len, 1); - if (pThreadInfo->buffer == NULL) { - errorPrint2("%s() LN%d, Memory allocated failed!\n", __func__, __LINE__); - exit(EXIT_FAILURE); - } - - int len = 0; - int batchNum = 0; - - verbosePrint("%s() LN%d: Creating table from %"PRIu64" to %"PRIu64"\n", - __func__, __LINE__, - pThreadInfo->start_table_from, pThreadInfo->end_table_to); - - for (uint64_t i = pThreadInfo->start_table_from; - i <= pThreadInfo->end_table_to; i++) { - if (0 == g_Dbs.use_metric) { - snprintf(pThreadInfo->buffer, buff_len, - "CREATE TABLE IF NOT EXISTS %s.%s%"PRIu64" %s;", - pThreadInfo->db_name, - g_args.tb_prefix, i, - pThreadInfo->cols); - batchNum ++; - } else { - if (stbInfo == NULL) { - free(pThreadInfo->buffer); - errorPrint2("%s() LN%d, use metric, but super table info is NULL\n", - __func__, __LINE__); - exit(EXIT_FAILURE); - } else { - if (0 == len) { - batchNum = 0; - memset(pThreadInfo->buffer, 0, buff_len); - len += snprintf(pThreadInfo->buffer + len, - buff_len - len, "CREATE TABLE "); - } - - char* tagsValBuf = NULL; - if (0 == stbInfo->tagSource) { - tagsValBuf = generateTagValuesForStb(stbInfo, i); - } else { - if (0 == stbInfo->tagSampleCount) { - free(pThreadInfo->buffer); - ERROR_EXIT("use sample file for tag, but has no content!\n"); - } - tagsValBuf = getTagValueFromTagSample( - stbInfo, - i % stbInfo->tagSampleCount); - } - - if (NULL == tagsValBuf) { - free(pThreadInfo->buffer); - ERROR_EXIT("use metric, but tag buffer is NULL\n"); - } - len += snprintf(pThreadInfo->buffer + len, - buff_len - len, - "if not exists %s.%s%"PRIu64" using %s.%s tags %s ", - pThreadInfo->db_name, stbInfo->childTblPrefix, - i, pThreadInfo->db_name, - stbInfo->stbName, tagsValBuf); - free(tagsValBuf); - batchNum++; - if ((batchNum < stbInfo->batchCreateTableNum) - && ((buff_len - len) - >= (stbInfo->lenOfTagOfOneRow + 256))) { - continue; - } - } - } - - len = 0; - - if (0 != queryDbExec(pThreadInfo->taos, pThreadInfo->buffer, - NO_INSERT_TYPE, false)) { - errorPrint2("queryDbExec() failed. buffer:\n%s\n", pThreadInfo->buffer); - free(pThreadInfo->buffer); - return NULL; - } - pThreadInfo->tables_created += batchNum; - uint64_t currentPrintTime = taosGetTimestampMs(); - if (currentPrintTime - lastPrintTime > 30*1000) { - printf("thread[%d] already create %"PRIu64" - %"PRIu64" tables\n", - pThreadInfo->threadID, pThreadInfo->start_table_from, i); - lastPrintTime = currentPrintTime; - } - } - - if (0 != len) { - if (0 != queryDbExec(pThreadInfo->taos, pThreadInfo->buffer, - NO_INSERT_TYPE, false)) { - errorPrint2("queryDbExec() failed. buffer:\n%s\n", pThreadInfo->buffer); - } - pThreadInfo->tables_created += batchNum; - } - free(pThreadInfo->buffer); - return NULL; -} - -static int startMultiThreadCreateChildTable( - char* cols, int threads, uint64_t tableFrom, int64_t ntables, - char* db_name, SSuperTable* stbInfo) { - - pthread_t *pids = calloc(1, threads * sizeof(pthread_t)); - threadInfo *infos = calloc(1, threads * sizeof(threadInfo)); - - if ((NULL == pids) || (NULL == infos)) { - ERROR_EXIT("createChildTable malloc failed\n"); - } - - if (threads < 1) { - threads = 1; - } - - int64_t a = ntables / threads; - if (a < 1) { - threads = ntables; - a = 1; - } - - int64_t b = 0; - b = ntables % threads; - - for (int64_t i = 0; i < threads; i++) { - threadInfo *pThreadInfo = infos + i; - pThreadInfo->threadID = i; - tstrncpy(pThreadInfo->db_name, db_name, TSDB_DB_NAME_LEN); - pThreadInfo->stbInfo = stbInfo; - verbosePrint("%s() %d db_name: %s\n", __func__, __LINE__, db_name); - pThreadInfo->taos = taos_connect( - g_Dbs.host, - g_Dbs.user, - g_Dbs.password, - db_name, - g_Dbs.port); - if (pThreadInfo->taos == NULL) { - errorPrint2("%s() LN%d, Failed to connect to TDengine, reason:%s\n", - __func__, __LINE__, taos_errstr(NULL)); - free(pids); - free(infos); - return -1; - } - - pThreadInfo->start_table_from = tableFrom; - pThreadInfo->ntables = iend_table_to = i < b ? tableFrom + a : tableFrom + a - 1; - tableFrom = pThreadInfo->end_table_to + 1; - pThreadInfo->use_metric = true; - pThreadInfo->cols = cols; - pThreadInfo->minDelay = UINT64_MAX; - pThreadInfo->tables_created = 0; - pthread_create(pids + i, NULL, createTable, pThreadInfo); - } - - for (int i = 0; i < threads; i++) { - pthread_join(pids[i], NULL); - } - - for (int i = 0; i < threads; i++) { - threadInfo *pThreadInfo = infos + i; - taos_close(pThreadInfo->taos); - - g_actualChildTables += pThreadInfo->tables_created; - } - - free(pids); - free(infos); - - return 0; -} - -static void createChildTables() { - char tblColsBuf[TSDB_MAX_BYTES_PER_ROW]; - int len; - - for (int i = 0; i < g_Dbs.dbCount; i++) { - if (g_Dbs.use_metric) { - if (g_Dbs.db[i].superTblCount > 0) { - // with super table - for (int j = 0; j < g_Dbs.db[i].superTblCount; j++) { - if ((AUTO_CREATE_SUBTBL - == g_Dbs.db[i].superTbls[j].autoCreateTable) - || (TBL_ALREADY_EXISTS - == g_Dbs.db[i].superTbls[j].childTblExists)) { - continue; - } - verbosePrint("%s() LN%d: %s\n", __func__, __LINE__, - g_Dbs.db[i].superTbls[j].colsOfCreateChildTable); - uint64_t startFrom = 0; - - verbosePrint("%s() LN%d: create %"PRId64" child tables from %"PRIu64"\n", - __func__, __LINE__, g_totalChildTables, startFrom); - - startMultiThreadCreateChildTable( - g_Dbs.db[i].superTbls[j].colsOfCreateChildTable, - g_Dbs.threadCountForCreateTbl, - startFrom, - g_Dbs.db[i].superTbls[j].childTblCount, - g_Dbs.db[i].dbName, &(g_Dbs.db[i].superTbls[j])); - } - } - } else { - // normal table - len = snprintf(tblColsBuf, TSDB_MAX_BYTES_PER_ROW, "(TS TIMESTAMP"); - for (int j = 0; j < g_args.columnCount; j++) { - if ((strncasecmp(g_args.dataType[j], "BINARY", strlen("BINARY")) == 0) - || (strncasecmp(g_args.dataType[j], - "NCHAR", strlen("NCHAR")) == 0)) { - snprintf(tblColsBuf + len, TSDB_MAX_BYTES_PER_ROW - len, - ",C%d %s(%d)", j, g_args.dataType[j], g_args.binwidth); - } else { - snprintf(tblColsBuf + len, TSDB_MAX_BYTES_PER_ROW - len, - ",C%d %s", j, g_args.dataType[j]); - } - len = strlen(tblColsBuf); - } - - snprintf(tblColsBuf + len, TSDB_MAX_BYTES_PER_ROW - len, ")"); - - verbosePrint("%s() LN%d: dbName: %s num of tb: %"PRId64" schema: %s\n", - __func__, __LINE__, - g_Dbs.db[i].dbName, g_args.ntables, tblColsBuf); - startMultiThreadCreateChildTable( - tblColsBuf, - g_Dbs.threadCountForCreateTbl, - 0, - g_args.ntables, - g_Dbs.db[i].dbName, - NULL); - } - } -} - -/* - Read 10000 lines at most. If more than 10000 lines, continue to read after using - */ -static int readTagFromCsvFileToMem(SSuperTable * stbInfo) { - size_t n = 0; - ssize_t readLen = 0; - char * line = NULL; - - FILE *fp = fopen(stbInfo->tagsFile, "r"); - if (fp == NULL) { - printf("Failed to open tags file: %s, reason:%s\n", - stbInfo->tagsFile, strerror(errno)); - return -1; - } - - if (stbInfo->tagDataBuf) { - free(stbInfo->tagDataBuf); - stbInfo->tagDataBuf = NULL; - } - - int tagCount = 10000; - int count = 0; - char* tagDataBuf = calloc(1, stbInfo->lenOfTagOfOneRow * tagCount); - if (tagDataBuf == NULL) { - printf("Failed to calloc, reason:%s\n", strerror(errno)); - fclose(fp); - return -1; - } - - while((readLen = tgetline(&line, &n, fp)) != -1) { - if (('\r' == line[readLen - 1]) || ('\n' == line[readLen - 1])) { - line[--readLen] = 0; - } - - if (readLen == 0) { - continue; - } - - memcpy(tagDataBuf + count * stbInfo->lenOfTagOfOneRow, line, readLen); - count++; - - if (count >= tagCount - 1) { - char *tmp = realloc(tagDataBuf, - (size_t)tagCount*1.5*stbInfo->lenOfTagOfOneRow); - if (tmp != NULL) { - tagDataBuf = tmp; - tagCount = (int)(tagCount*1.5); - memset(tagDataBuf + count*stbInfo->lenOfTagOfOneRow, - 0, (size_t)((tagCount-count)*stbInfo->lenOfTagOfOneRow)); - } else { - // exit, if allocate more memory failed - printf("realloc fail for save tag val from %s\n", stbInfo->tagsFile); - tmfree(tagDataBuf); - free(line); - fclose(fp); - return -1; - } - } - } - - stbInfo->tagDataBuf = tagDataBuf; - stbInfo->tagSampleCount = count; - - free(line); - fclose(fp); - return 0; -} - -static void getAndSetRowsFromCsvFile(SSuperTable *stbInfo) { - FILE *fp = fopen(stbInfo->sampleFile, "r"); - int line_count = 0; - if (fp == NULL) { - errorPrint("Failed to open sample file: %s, reason:%s\n", - stbInfo->sampleFile, strerror(errno)); - exit(EXIT_FAILURE); - } - char *buf = calloc(1, stbInfo->maxSqlLen); - while (fgets(buf, stbInfo->maxSqlLen, fp)) { - line_count++; - } - fclose(fp); - tmfree(buf); - stbInfo->insertRows = line_count; -} - -/* - Read 10000 lines at most. If more than 10000 lines, continue to read after using - */ -static int generateSampleFromCsvForStb( - SSuperTable* stbInfo) { - size_t n = 0; - ssize_t readLen = 0; - char * line = NULL; - int getRows = 0; - - FILE* fp = fopen(stbInfo->sampleFile, "r"); - if (fp == NULL) { - errorPrint("Failed to open sample file: %s, reason:%s\n", - stbInfo->sampleFile, strerror(errno)); - return -1; - } - - assert(stbInfo->sampleDataBuf); - memset(stbInfo->sampleDataBuf, 0, - MAX_SAMPLES * stbInfo->lenOfOneRow); - while(1) { - readLen = tgetline(&line, &n, fp); - if (-1 == readLen) { - if(0 != fseek(fp, 0, SEEK_SET)) { - errorPrint("Failed to fseek file: %s, reason:%s\n", - stbInfo->sampleFile, strerror(errno)); - fclose(fp); - return -1; - } - continue; - } - - if (('\r' == line[readLen - 1]) || ('\n' == line[readLen - 1])) { - line[--readLen] = 0; - } - - if (readLen == 0) { - continue; - } - - if (readLen > stbInfo->lenOfOneRow) { - printf("sample row len[%d] overflow define schema len[%"PRIu64"], so discard this row\n", - (int32_t)readLen, stbInfo->lenOfOneRow); - continue; - } - - memcpy(stbInfo->sampleDataBuf + getRows * stbInfo->lenOfOneRow, - line, readLen); - getRows++; - - if (getRows == MAX_SAMPLES) { - break; - } - } - - fclose(fp); - tmfree(line); - return 0; -} - -static bool getColumnAndTagTypeFromInsertJsonFile( - cJSON* stbInfo, SSuperTable* superTbls) { - bool ret = false; - - // columns - cJSON *columns = cJSON_GetObjectItem(stbInfo, "columns"); - if (columns && columns->type != cJSON_Array) { - errorPrint("%s", "failed to read json, columns not found\n"); - goto PARSE_OVER; - } else if (NULL == columns) { - superTbls->columnCount = 0; - superTbls->tagCount = 0; - return true; - } - - int columnSize = cJSON_GetArraySize(columns); - if ((columnSize + 1/* ts */) > TSDB_MAX_COLUMNS) { - errorPrint("failed to read json, column size overflow, max column size is %d\n", - TSDB_MAX_COLUMNS); - goto PARSE_OVER; - } - - int count = 1; - int index = 0; - StrColumn columnCase; - - //superTbls->columnCount = columnSize; - for (int k = 0; k < columnSize; ++k) { - cJSON* column = cJSON_GetArrayItem(columns, k); - if (column == NULL) continue; - - count = 1; - cJSON* countObj = cJSON_GetObjectItem(column, "count"); - if (countObj && countObj->type == cJSON_Number) { - count = countObj->valueint; - } else if (countObj && countObj->type != cJSON_Number) { - errorPrint("%s", "failed to read json, column count not found\n"); - goto PARSE_OVER; - } else { - count = 1; - } - - // column info - memset(&columnCase, 0, sizeof(StrColumn)); - cJSON *dataType = cJSON_GetObjectItem(column, "type"); - if (!dataType || dataType->type != cJSON_String - || dataType->valuestring == NULL) { - errorPrint("%s", "failed to read json, column type not found\n"); - goto PARSE_OVER; - } - //tstrncpy(superTbls->columns[k].dataType, dataType->valuestring, DATATYPE_BUFF_LEN); - tstrncpy(columnCase.dataType, dataType->valuestring, - min(DATATYPE_BUFF_LEN, strlen(dataType->valuestring) + 1)); - - cJSON* dataLen = cJSON_GetObjectItem(column, "len"); - if (dataLen && dataLen->type == cJSON_Number) { - columnCase.dataLen = dataLen->valueint; - } else if (dataLen && dataLen->type != cJSON_Number) { - debugPrint("%s() LN%d: failed to read json, column len not found\n", - __func__, __LINE__); - goto PARSE_OVER; - } else { - columnCase.dataLen = SMALL_BUFF_LEN; - } - - for (int n = 0; n < count; ++n) { - tstrncpy(superTbls->columns[index].dataType, - columnCase.dataType, - min(DATATYPE_BUFF_LEN, strlen(columnCase.dataType) + 1)); - - superTbls->columns[index].dataLen = columnCase.dataLen; - index++; - } - } - - if ((index + 1 /* ts */) > MAX_NUM_COLUMNS) { - errorPrint("failed to read json, column size overflow, allowed max column size is %d\n", - MAX_NUM_COLUMNS); - goto PARSE_OVER; - } - - superTbls->columnCount = index; - - for (int c = 0; c < superTbls->columnCount; c++) { - if (0 == strncasecmp(superTbls->columns[c].dataType, - "INT", strlen("INT"))) { - superTbls->columns[c].data_type = TSDB_DATA_TYPE_INT; - } else if (0 == strncasecmp(superTbls->columns[c].dataType, - "TINYINT", strlen("TINYINT"))) { - superTbls->columns[c].data_type = TSDB_DATA_TYPE_TINYINT; - } else if (0 == strncasecmp(superTbls->columns[c].dataType, - "SMALLINT", strlen("SMALLINT"))) { - superTbls->columns[c].data_type = TSDB_DATA_TYPE_SMALLINT; - } else if (0 == strncasecmp(superTbls->columns[c].dataType, - "BIGINT", strlen("BIGINT"))) { - superTbls->columns[c].data_type = TSDB_DATA_TYPE_BIGINT; - } else if (0 == strncasecmp(superTbls->columns[c].dataType, - "FLOAT", strlen("FLOAT"))) { - superTbls->columns[c].data_type = TSDB_DATA_TYPE_FLOAT; - } else if (0 == strncasecmp(superTbls->columns[c].dataType, - "DOUBLE", strlen("DOUBLE"))) { - superTbls->columns[c].data_type = TSDB_DATA_TYPE_DOUBLE; - } else if (0 == strncasecmp(superTbls->columns[c].dataType, - "BINARY", strlen("BINARY"))) { - superTbls->columns[c].data_type = TSDB_DATA_TYPE_BINARY; - } else if (0 == strncasecmp(superTbls->columns[c].dataType, - "NCHAR", strlen("NCHAR"))) { - superTbls->columns[c].data_type = TSDB_DATA_TYPE_NCHAR; - } else if (0 == strncasecmp(superTbls->columns[c].dataType, - "BOOL", strlen("BOOL"))) { - superTbls->columns[c].data_type = TSDB_DATA_TYPE_BOOL; - } else if (0 == strncasecmp(superTbls->columns[c].dataType, - "TIMESTAMP", strlen("TIMESTAMP"))) { - superTbls->columns[c].data_type = TSDB_DATA_TYPE_TIMESTAMP; - } else if (0 == strncasecmp(superTbls->columns[c].dataType, - "UTINYINT", strlen("UTINYINT"))) { - superTbls->columns[c].data_type = TSDB_DATA_TYPE_UTINYINT; - } else if (0 == strncasecmp(superTbls->columns[c].dataType, - "USMALLINT", strlen("USMALLINT"))) { - superTbls->columns[c].data_type = TSDB_DATA_TYPE_USMALLINT; - } else if (0 == strncasecmp(superTbls->columns[c].dataType, - "UINT", strlen("UINT"))) { - superTbls->columns[c].data_type = TSDB_DATA_TYPE_UINT; - } else if (0 == strncasecmp(superTbls->columns[c].dataType, - "UBIGINT", strlen("UBIGINT"))) { - superTbls->columns[c].data_type = TSDB_DATA_TYPE_UBIGINT; - } else { - superTbls->columns[c].data_type = TSDB_DATA_TYPE_NULL; - } - } - - count = 1; - index = 0; - // tags - cJSON *tags = cJSON_GetObjectItem(stbInfo, "tags"); - if (!tags || tags->type != cJSON_Array) { - errorPrint("%s", "failed to read json, tags not found\n"); - goto PARSE_OVER; - } - - int tagSize = cJSON_GetArraySize(tags); - if (tagSize > TSDB_MAX_TAGS) { - errorPrint("failed to read json, tags size overflow, max tag size is %d\n", - TSDB_MAX_TAGS); - goto PARSE_OVER; - } - - //superTbls->tagCount = tagSize; - for (int k = 0; k < tagSize; ++k) { - cJSON* tag = cJSON_GetArrayItem(tags, k); - if (tag == NULL) continue; - - count = 1; - cJSON* countObj = cJSON_GetObjectItem(tag, "count"); - if (countObj && countObj->type == cJSON_Number) { - count = countObj->valueint; - } else if (countObj && countObj->type != cJSON_Number) { - errorPrint("%s", "failed to read json, column count not found\n"); - goto PARSE_OVER; - } else { - count = 1; - } - - // column info - memset(&columnCase, 0, sizeof(StrColumn)); - cJSON *dataType = cJSON_GetObjectItem(tag, "type"); - if (!dataType || dataType->type != cJSON_String - || dataType->valuestring == NULL) { - errorPrint("%s", "failed to read json, tag type not found\n"); - goto PARSE_OVER; - } - tstrncpy(columnCase.dataType, dataType->valuestring, - min(DATATYPE_BUFF_LEN, strlen(dataType->valuestring) + 1)); - - cJSON* dataLen = cJSON_GetObjectItem(tag, "len"); - if (dataLen && dataLen->type == cJSON_Number) { - columnCase.dataLen = dataLen->valueint; - } else if (dataLen && dataLen->type != cJSON_Number) { - errorPrint("%s", "failed to read json, column len not found\n"); - goto PARSE_OVER; - } else { - columnCase.dataLen = 0; - } - - for (int n = 0; n < count; ++n) { - tstrncpy(superTbls->tags[index].dataType, columnCase.dataType, - min(DATATYPE_BUFF_LEN, strlen(columnCase.dataType) + 1)); - superTbls->tags[index].dataLen = columnCase.dataLen; - index++; - } - } - - if (index > TSDB_MAX_TAGS) { - errorPrint("failed to read json, tags size overflow, allowed max tag count is %d\n", - TSDB_MAX_TAGS); - goto PARSE_OVER; - } - - superTbls->tagCount = index; - - for (int t = 0; t < superTbls->tagCount; t++) { - if (0 == strncasecmp(superTbls->tags[t].dataType, - "INT", strlen("INT"))) { - superTbls->tags[t].data_type = TSDB_DATA_TYPE_INT; - } else if (0 == strncasecmp(superTbls->tags[t].dataType, - "TINYINT", strlen("TINYINT"))) { - superTbls->tags[t].data_type = TSDB_DATA_TYPE_TINYINT; - } else if (0 == strncasecmp(superTbls->tags[t].dataType, - "SMALLINT", strlen("SMALLINT"))) { - superTbls->tags[t].data_type = TSDB_DATA_TYPE_SMALLINT; - } else if (0 == strncasecmp(superTbls->tags[t].dataType, - "BIGINT", strlen("BIGINT"))) { - superTbls->tags[t].data_type = TSDB_DATA_TYPE_BIGINT; - } else if (0 == strncasecmp(superTbls->tags[t].dataType, - "FLOAT", strlen("FLOAT"))) { - superTbls->tags[t].data_type = TSDB_DATA_TYPE_FLOAT; - } else if (0 == strncasecmp(superTbls->tags[t].dataType, - "DOUBLE", strlen("DOUBLE"))) { - superTbls->tags[t].data_type = TSDB_DATA_TYPE_DOUBLE; - } else if (0 == strncasecmp(superTbls->tags[t].dataType, - "BINARY", strlen("BINARY"))) { - superTbls->tags[t].data_type = TSDB_DATA_TYPE_BINARY; - } else if (0 == strncasecmp(superTbls->tags[t].dataType, - "NCHAR", strlen("NCHAR"))) { - superTbls->tags[t].data_type = TSDB_DATA_TYPE_NCHAR; - } else if (0 == strncasecmp(superTbls->tags[t].dataType, - "BOOL", strlen("BOOL"))) { - superTbls->tags[t].data_type = TSDB_DATA_TYPE_BOOL; - } else if (0 == strncasecmp(superTbls->tags[t].dataType, - "TIMESTAMP", strlen("TIMESTAMP"))) { - superTbls->tags[t].data_type = TSDB_DATA_TYPE_TIMESTAMP; - } else if (0 == strncasecmp(superTbls->tags[t].dataType, - "UTINYINT", strlen("UTINYINT"))) { - superTbls->tags[t].data_type = TSDB_DATA_TYPE_UTINYINT; - } else if (0 == strncasecmp(superTbls->tags[t].dataType, - "USMALLINT", strlen("USMALLINT"))) { - superTbls->tags[t].data_type = TSDB_DATA_TYPE_USMALLINT; - } else if (0 == strncasecmp(superTbls->tags[t].dataType, - "UINT", strlen("UINT"))) { - superTbls->tags[t].data_type = TSDB_DATA_TYPE_UINT; - } else if (0 == strncasecmp(superTbls->tags[t].dataType, - "UBIGINT", strlen("UBIGINT"))) { - superTbls->tags[t].data_type = TSDB_DATA_TYPE_UBIGINT; - } else { - superTbls->tags[t].data_type = TSDB_DATA_TYPE_NULL; - } - } - - if ((superTbls->columnCount + superTbls->tagCount + 1 /* ts */) > TSDB_MAX_COLUMNS) { - errorPrint("columns + tags is more than allowed max columns count: %d\n", - TSDB_MAX_COLUMNS); - goto PARSE_OVER; - } - ret = true; - -PARSE_OVER: - return ret; -} - -static bool getMetaFromInsertJsonFile(cJSON* root) { - bool ret = false; - - cJSON* cfgdir = cJSON_GetObjectItem(root, "cfgdir"); - if (cfgdir && cfgdir->type == cJSON_String && cfgdir->valuestring != NULL) { - tstrncpy(g_Dbs.cfgDir, cfgdir->valuestring, MAX_FILE_NAME_LEN); - } - - cJSON* host = cJSON_GetObjectItem(root, "host"); - if (host && host->type == cJSON_String && host->valuestring != NULL) { - tstrncpy(g_Dbs.host, host->valuestring, MAX_HOSTNAME_SIZE); - } else if (!host) { - tstrncpy(g_Dbs.host, "127.0.0.1", MAX_HOSTNAME_SIZE); - } else { - errorPrint("%s", "failed to read json, host not found\n"); - goto PARSE_OVER; - } - - cJSON* port = cJSON_GetObjectItem(root, "port"); - if (port && port->type == cJSON_Number) { - g_Dbs.port = port->valueint; - } else if (!port) { - g_Dbs.port = 6030; - } - - cJSON* user = cJSON_GetObjectItem(root, "user"); - if (user && user->type == cJSON_String && user->valuestring != NULL) { - tstrncpy(g_Dbs.user, user->valuestring, MAX_USERNAME_SIZE); - } else if (!user) { - tstrncpy(g_Dbs.user, "root", MAX_USERNAME_SIZE); - } - - cJSON* password = cJSON_GetObjectItem(root, "password"); - if (password && password->type == cJSON_String && password->valuestring != NULL) { - tstrncpy(g_Dbs.password, password->valuestring, SHELL_MAX_PASSWORD_LEN); - } else if (!password) { - tstrncpy(g_Dbs.password, "taosdata", SHELL_MAX_PASSWORD_LEN); - } - - cJSON* resultfile = cJSON_GetObjectItem(root, "result_file"); - if (resultfile && resultfile->type == cJSON_String && resultfile->valuestring != NULL) { - tstrncpy(g_Dbs.resultFile, resultfile->valuestring, MAX_FILE_NAME_LEN); - } else if (!resultfile) { - tstrncpy(g_Dbs.resultFile, "./insert_res.txt", MAX_FILE_NAME_LEN); - } - - cJSON* threads = cJSON_GetObjectItem(root, "thread_count"); - if (threads && threads->type == cJSON_Number) { - g_Dbs.threadCount = threads->valueint; - } else if (!threads) { - g_Dbs.threadCount = 1; - } else { - errorPrint("%s", "failed to read json, threads not found\n"); - goto PARSE_OVER; - } - - cJSON* threads2 = cJSON_GetObjectItem(root, "thread_count_create_tbl"); - if (threads2 && threads2->type == cJSON_Number) { - g_Dbs.threadCountForCreateTbl = threads2->valueint; - } else if (!threads2) { - g_Dbs.threadCountForCreateTbl = 1; - } else { - errorPrint("%s", "failed to read json, threads2 not found\n"); - goto PARSE_OVER; - } - - cJSON* gInsertInterval = cJSON_GetObjectItem(root, "insert_interval"); - if (gInsertInterval && gInsertInterval->type == cJSON_Number) { - if (gInsertInterval->valueint <0) { - errorPrint("%s", "failed to read json, insert interval input mistake\n"); - goto PARSE_OVER; - } - g_args.insert_interval = gInsertInterval->valueint; - } else if (!gInsertInterval) { - g_args.insert_interval = 0; - } else { - errorPrint("%s", "failed to read json, insert_interval input mistake\n"); - goto PARSE_OVER; - } - - cJSON* interlaceRows = cJSON_GetObjectItem(root, "interlace_rows"); - if (interlaceRows && interlaceRows->type == cJSON_Number) { - if (interlaceRows->valueint < 0) { - errorPrint("%s", "failed to read json, interlaceRows input mistake\n"); - goto PARSE_OVER; - - } - g_args.interlaceRows = interlaceRows->valueint; - } else if (!interlaceRows) { - g_args.interlaceRows = 0; // 0 means progressive mode, > 0 mean interlace mode. max value is less or equ num_of_records_per_req - } else { - errorPrint("%s", "failed to read json, interlaceRows input mistake\n"); - goto PARSE_OVER; - } - - cJSON* maxSqlLen = cJSON_GetObjectItem(root, "max_sql_len"); - if (maxSqlLen && maxSqlLen->type == cJSON_Number) { - if (maxSqlLen->valueint < 0) { - errorPrint("%s() LN%d, failed to read json, max_sql_len input mistake\n", - __func__, __LINE__); - goto PARSE_OVER; - } - g_args.max_sql_len = maxSqlLen->valueint; - } else if (!maxSqlLen) { - g_args.max_sql_len = (1024*1024); - } else { - errorPrint("%s() LN%d, failed to read json, max_sql_len input mistake\n", - __func__, __LINE__); - goto PARSE_OVER; - } - - cJSON* numRecPerReq = cJSON_GetObjectItem(root, "num_of_records_per_req"); - if (numRecPerReq && numRecPerReq->type == cJSON_Number) { - if (numRecPerReq->valueint <= 0) { - errorPrint("%s() LN%d, failed to read json, num_of_records_per_req input mistake\n", - __func__, __LINE__); - goto PARSE_OVER; - } else if (numRecPerReq->valueint > MAX_RECORDS_PER_REQ) { - printf("NOTICE: number of records per request value %"PRIu64" > %d\n\n", - numRecPerReq->valueint, MAX_RECORDS_PER_REQ); - printf(" number of records per request value will be set to %d\n\n", - MAX_RECORDS_PER_REQ); - prompt(); - numRecPerReq->valueint = MAX_RECORDS_PER_REQ; - } - g_args.reqPerReq = numRecPerReq->valueint; - } else if (!numRecPerReq) { - g_args.reqPerReq = MAX_RECORDS_PER_REQ; - } else { - errorPrint("%s() LN%d, failed to read json, num_of_records_per_req not found\n", - __func__, __LINE__); - goto PARSE_OVER; - } - - cJSON* prepareRand = cJSON_GetObjectItem(root, "prepared_rand"); - if (prepareRand && prepareRand->type == cJSON_Number) { - if (prepareRand->valueint <= 0) { - errorPrint("%s() LN%d, failed to read json, prepared_rand input mistake\n", - __func__, __LINE__); - goto PARSE_OVER; - } - g_args.prepared_rand = prepareRand->valueint; - } else if (!prepareRand) { - g_args.prepared_rand = 10000; - } else { - errorPrint("%s() LN%d, failed to read json, prepared_rand not found\n", - __func__, __LINE__); - goto PARSE_OVER; - } - - cJSON *answerPrompt = cJSON_GetObjectItem(root, "confirm_parameter_prompt"); // yes, no, - if (answerPrompt - && answerPrompt->type == cJSON_String - && answerPrompt->valuestring != NULL) { - if (0 == strncasecmp(answerPrompt->valuestring, "yes", 3)) { - g_args.answer_yes = false; - } else if (0 == strncasecmp(answerPrompt->valuestring, "no", 2)) { - g_args.answer_yes = true; - } else { - g_args.answer_yes = false; - } - } else if (!answerPrompt) { - g_args.answer_yes = true; // default is no, mean answer_yes. - } else { - errorPrint("%s", "failed to read json, confirm_parameter_prompt input mistake\n"); - goto PARSE_OVER; - } - - // rows per table need be less than insert batch - if (g_args.interlaceRows > g_args.reqPerReq) { - printf("NOTICE: interlace rows value %u > num_of_records_per_req %u\n\n", - g_args.interlaceRows, g_args.reqPerReq); - printf(" interlace rows value will be set to num_of_records_per_req %u\n\n", - g_args.reqPerReq); - prompt(); - g_args.interlaceRows = g_args.reqPerReq; - } - - cJSON* dbs = cJSON_GetObjectItem(root, "databases"); - if (!dbs || dbs->type != cJSON_Array) { - errorPrint("%s", "failed to read json, databases not found\n"); - goto PARSE_OVER; - } - - int dbSize = cJSON_GetArraySize(dbs); - if (dbSize > MAX_DB_COUNT) { - errorPrint( - "failed to read json, databases size overflow, max database is %d\n", - MAX_DB_COUNT); - goto PARSE_OVER; - } - g_Dbs.db = calloc(1, sizeof(SDataBase)*dbSize); - assert(g_Dbs.db); - g_Dbs.dbCount = dbSize; - for (int i = 0; i < dbSize; ++i) { - cJSON* dbinfos = cJSON_GetArrayItem(dbs, i); - if (dbinfos == NULL) continue; - - // dbinfo - cJSON *dbinfo = cJSON_GetObjectItem(dbinfos, "dbinfo"); - if (!dbinfo || dbinfo->type != cJSON_Object) { - errorPrint("%s", "failed to read json, dbinfo not found\n"); - goto PARSE_OVER; - } - - cJSON *dbName = cJSON_GetObjectItem(dbinfo, "name"); - if (!dbName || dbName->type != cJSON_String || dbName->valuestring == NULL) { - errorPrint("%s", "failed to read json, db name not found\n"); - goto PARSE_OVER; - } - tstrncpy(g_Dbs.db[i].dbName, dbName->valuestring, TSDB_DB_NAME_LEN); - - cJSON *drop = cJSON_GetObjectItem(dbinfo, "drop"); - if (drop && drop->type == cJSON_String && drop->valuestring != NULL) { - if (0 == strncasecmp(drop->valuestring, "yes", strlen("yes"))) { - g_Dbs.db[i].drop = true; - } else { - g_Dbs.db[i].drop = false; - } - } else if (!drop) { - g_Dbs.db[i].drop = g_args.drop_database; - } else { - errorPrint("%s", "failed to read json, drop input mistake\n"); - goto PARSE_OVER; - } - - cJSON *precision = cJSON_GetObjectItem(dbinfo, "precision"); - if (precision && precision->type == cJSON_String - && precision->valuestring != NULL) { - tstrncpy(g_Dbs.db[i].dbCfg.precision, precision->valuestring, - SMALL_BUFF_LEN); - } else if (!precision) { - memset(g_Dbs.db[i].dbCfg.precision, 0, SMALL_BUFF_LEN); - } else { - errorPrint("%s", "failed to read json, precision not found\n"); - goto PARSE_OVER; - } - - cJSON* update = cJSON_GetObjectItem(dbinfo, "update"); - if (update && update->type == cJSON_Number) { - g_Dbs.db[i].dbCfg.update = update->valueint; - } else if (!update) { - g_Dbs.db[i].dbCfg.update = -1; - } else { - errorPrint("%s", "failed to read json, update not found\n"); - goto PARSE_OVER; - } - - cJSON* replica = cJSON_GetObjectItem(dbinfo, "replica"); - if (replica && replica->type == cJSON_Number) { - g_Dbs.db[i].dbCfg.replica = replica->valueint; - } else if (!replica) { - g_Dbs.db[i].dbCfg.replica = -1; - } else { - errorPrint("%s", "failed to read json, replica not found\n"); - goto PARSE_OVER; - } - - cJSON* keep = cJSON_GetObjectItem(dbinfo, "keep"); - if (keep && keep->type == cJSON_Number) { - g_Dbs.db[i].dbCfg.keep = keep->valueint; - } else if (!keep) { - g_Dbs.db[i].dbCfg.keep = -1; - } else { - errorPrint("%s", "failed to read json, keep not found\n"); - goto PARSE_OVER; - } - - cJSON* days = cJSON_GetObjectItem(dbinfo, "days"); - if (days && days->type == cJSON_Number) { - g_Dbs.db[i].dbCfg.days = days->valueint; - } else if (!days) { - g_Dbs.db[i].dbCfg.days = -1; - } else { - errorPrint("%s", "failed to read json, days not found\n"); - goto PARSE_OVER; - } - - cJSON* cache = cJSON_GetObjectItem(dbinfo, "cache"); - if (cache && cache->type == cJSON_Number) { - g_Dbs.db[i].dbCfg.cache = cache->valueint; - } else if (!cache) { - g_Dbs.db[i].dbCfg.cache = -1; - } else { - errorPrint("%s", "failed to read json, cache not found\n"); - goto PARSE_OVER; - } - - cJSON* blocks= cJSON_GetObjectItem(dbinfo, "blocks"); - if (blocks && blocks->type == cJSON_Number) { - g_Dbs.db[i].dbCfg.blocks = blocks->valueint; - } else if (!blocks) { - g_Dbs.db[i].dbCfg.blocks = -1; - } else { - errorPrint("%s", "failed to read json, block not found\n"); - goto PARSE_OVER; - } - - //cJSON* maxtablesPerVnode= cJSON_GetObjectItem(dbinfo, "maxtablesPerVnode"); - //if (maxtablesPerVnode && maxtablesPerVnode->type == cJSON_Number) { - // g_Dbs.db[i].dbCfg.maxtablesPerVnode = maxtablesPerVnode->valueint; - //} else if (!maxtablesPerVnode) { - // g_Dbs.db[i].dbCfg.maxtablesPerVnode = TSDB_DEFAULT_TABLES; - //} else { - // printf("failed to read json, maxtablesPerVnode not found"); - // goto PARSE_OVER; - //} - - cJSON* minRows= cJSON_GetObjectItem(dbinfo, "minRows"); - if (minRows && minRows->type == cJSON_Number) { - g_Dbs.db[i].dbCfg.minRows = minRows->valueint; - } else if (!minRows) { - g_Dbs.db[i].dbCfg.minRows = 0; // 0 means default - } else { - errorPrint("%s", "failed to read json, minRows not found\n"); - goto PARSE_OVER; - } - - cJSON* maxRows= cJSON_GetObjectItem(dbinfo, "maxRows"); - if (maxRows && maxRows->type == cJSON_Number) { - g_Dbs.db[i].dbCfg.maxRows = maxRows->valueint; - } else if (!maxRows) { - g_Dbs.db[i].dbCfg.maxRows = 0; // 0 means default - } else { - errorPrint("%s", "failed to read json, maxRows not found\n"); - goto PARSE_OVER; - } - - cJSON* comp= cJSON_GetObjectItem(dbinfo, "comp"); - if (comp && comp->type == cJSON_Number) { - g_Dbs.db[i].dbCfg.comp = comp->valueint; - } else if (!comp) { - g_Dbs.db[i].dbCfg.comp = -1; - } else { - errorPrint("%s", "failed to read json, comp not found\n"); - goto PARSE_OVER; - } - - cJSON* walLevel= cJSON_GetObjectItem(dbinfo, "walLevel"); - if (walLevel && walLevel->type == cJSON_Number) { - g_Dbs.db[i].dbCfg.walLevel = walLevel->valueint; - } else if (!walLevel) { - g_Dbs.db[i].dbCfg.walLevel = -1; - } else { - errorPrint("%s", "failed to read json, walLevel not found\n"); - goto PARSE_OVER; - } - - cJSON* cacheLast= cJSON_GetObjectItem(dbinfo, "cachelast"); - if (cacheLast && cacheLast->type == cJSON_Number) { - g_Dbs.db[i].dbCfg.cacheLast = cacheLast->valueint; - } else if (!cacheLast) { - g_Dbs.db[i].dbCfg.cacheLast = -1; - } else { - errorPrint("%s", "failed to read json, cacheLast not found\n"); - goto PARSE_OVER; - } - - cJSON* quorum= cJSON_GetObjectItem(dbinfo, "quorum"); - if (quorum && quorum->type == cJSON_Number) { - g_Dbs.db[i].dbCfg.quorum = quorum->valueint; - } else if (!quorum) { - g_Dbs.db[i].dbCfg.quorum = 1; - } else { - printf("failed to read json, quorum input mistake"); - goto PARSE_OVER; - } - - cJSON* fsync= cJSON_GetObjectItem(dbinfo, "fsync"); - if (fsync && fsync->type == cJSON_Number) { - g_Dbs.db[i].dbCfg.fsync = fsync->valueint; - } else if (!fsync) { - g_Dbs.db[i].dbCfg.fsync = -1; - } else { - errorPrint("%s", "failed to read json, fsync input mistake\n"); - goto PARSE_OVER; - } - - // super_tables - cJSON *stables = cJSON_GetObjectItem(dbinfos, "super_tables"); - if (!stables || stables->type != cJSON_Array) { - errorPrint("%s", "failed to read json, super_tables not found\n"); - goto PARSE_OVER; - } - - int stbSize = cJSON_GetArraySize(stables); - if (stbSize > MAX_SUPER_TABLE_COUNT) { - errorPrint( - "failed to read json, supertable size overflow, max supertable is %d\n", - MAX_SUPER_TABLE_COUNT); - goto PARSE_OVER; - } - g_Dbs.db[i].superTbls = calloc(1, stbSize * sizeof(SSuperTable)); - assert(g_Dbs.db[i].superTbls); - g_Dbs.db[i].superTblCount = stbSize; - for (int j = 0; j < stbSize; ++j) { - cJSON* stbInfo = cJSON_GetArrayItem(stables, j); - if (stbInfo == NULL) continue; - - // dbinfo - cJSON *stbName = cJSON_GetObjectItem(stbInfo, "name"); - if (!stbName || stbName->type != cJSON_String - || stbName->valuestring == NULL) { - errorPrint("%s", "failed to read json, stb name not found\n"); - goto PARSE_OVER; - } - tstrncpy(g_Dbs.db[i].superTbls[j].stbName, stbName->valuestring, - TSDB_TABLE_NAME_LEN); - - cJSON *prefix = cJSON_GetObjectItem(stbInfo, "childtable_prefix"); - if (!prefix || prefix->type != cJSON_String || prefix->valuestring == NULL) { - errorPrint("%s", "failed to read json, childtable_prefix not found\n"); - goto PARSE_OVER; - } - tstrncpy(g_Dbs.db[i].superTbls[j].childTblPrefix, prefix->valuestring, - TBNAME_PREFIX_LEN); - - cJSON *autoCreateTbl = cJSON_GetObjectItem(stbInfo, "auto_create_table"); - if (autoCreateTbl - && autoCreateTbl->type == cJSON_String - && autoCreateTbl->valuestring != NULL) { - if ((0 == strncasecmp(autoCreateTbl->valuestring, "yes", 3)) - && (TBL_ALREADY_EXISTS != g_Dbs.db[i].superTbls[j].childTblExists)) { - g_Dbs.db[i].superTbls[j].autoCreateTable = AUTO_CREATE_SUBTBL; - } else if (0 == strncasecmp(autoCreateTbl->valuestring, "no", 2)) { - g_Dbs.db[i].superTbls[j].autoCreateTable = PRE_CREATE_SUBTBL; - } else { - g_Dbs.db[i].superTbls[j].autoCreateTable = PRE_CREATE_SUBTBL; - } - } else if (!autoCreateTbl) { - g_Dbs.db[i].superTbls[j].autoCreateTable = PRE_CREATE_SUBTBL; - } else { - errorPrint("%s", "failed to read json, auto_create_table not found\n"); - goto PARSE_OVER; - } - - cJSON* batchCreateTbl = cJSON_GetObjectItem(stbInfo, "batch_create_tbl_num"); - if (batchCreateTbl && batchCreateTbl->type == cJSON_Number) { - g_Dbs.db[i].superTbls[j].batchCreateTableNum = batchCreateTbl->valueint; - } else if (!batchCreateTbl) { - g_Dbs.db[i].superTbls[j].batchCreateTableNum = 10; - } else { - errorPrint("%s", "failed to read json, batch_create_tbl_num not found\n"); - goto PARSE_OVER; - } - - cJSON *childTblExists = cJSON_GetObjectItem(stbInfo, "child_table_exists"); // yes, no - if (childTblExists - && childTblExists->type == cJSON_String - && childTblExists->valuestring != NULL) { - if ((0 == strncasecmp(childTblExists->valuestring, "yes", 3)) - && (g_Dbs.db[i].drop == false)) { - g_Dbs.db[i].superTbls[j].childTblExists = TBL_ALREADY_EXISTS; - } else if ((0 == strncasecmp(childTblExists->valuestring, "no", 2) - || (g_Dbs.db[i].drop == true))) { - g_Dbs.db[i].superTbls[j].childTblExists = TBL_NO_EXISTS; - } else { - g_Dbs.db[i].superTbls[j].childTblExists = TBL_NO_EXISTS; - } - } else if (!childTblExists) { - g_Dbs.db[i].superTbls[j].childTblExists = TBL_NO_EXISTS; - } else { - errorPrint("%s", - "failed to read json, child_table_exists not found\n"); - goto PARSE_OVER; - } - - if (TBL_ALREADY_EXISTS == g_Dbs.db[i].superTbls[j].childTblExists) { - g_Dbs.db[i].superTbls[j].autoCreateTable = PRE_CREATE_SUBTBL; - } - - cJSON* count = cJSON_GetObjectItem(stbInfo, "childtable_count"); - if (!count || count->type != cJSON_Number || 0 >= count->valueint) { - errorPrint("%s", - "failed to read json, childtable_count input mistake\n"); - goto PARSE_OVER; - } - g_Dbs.db[i].superTbls[j].childTblCount = count->valueint; - g_totalChildTables += g_Dbs.db[i].superTbls[j].childTblCount; - - cJSON *dataSource = cJSON_GetObjectItem(stbInfo, "data_source"); - if (dataSource && dataSource->type == cJSON_String - && dataSource->valuestring != NULL) { - tstrncpy(g_Dbs.db[i].superTbls[j].dataSource, - dataSource->valuestring, - min(SMALL_BUFF_LEN, strlen(dataSource->valuestring) + 1)); - } else if (!dataSource) { - tstrncpy(g_Dbs.db[i].superTbls[j].dataSource, "rand", - min(SMALL_BUFF_LEN, strlen("rand") + 1)); - } else { - errorPrint("%s", "failed to read json, data_source not found\n"); - goto PARSE_OVER; - } - - cJSON *stbIface = cJSON_GetObjectItem(stbInfo, "insert_mode"); // taosc , rest, stmt - if (stbIface && stbIface->type == cJSON_String - && stbIface->valuestring != NULL) { - if (0 == strcasecmp(stbIface->valuestring, "taosc")) { - g_Dbs.db[i].superTbls[j].iface= TAOSC_IFACE; - } else if (0 == strcasecmp(stbIface->valuestring, "rest")) { - g_Dbs.db[i].superTbls[j].iface= REST_IFACE; - } else if (0 == strcasecmp(stbIface->valuestring, "stmt")) { - g_Dbs.db[i].superTbls[j].iface= STMT_IFACE; - } else { - errorPrint("failed to read json, insert_mode %s not recognized\n", - stbIface->valuestring); - goto PARSE_OVER; - } - } else if (!stbIface) { - g_Dbs.db[i].superTbls[j].iface = TAOSC_IFACE; - } else { - errorPrint("%s", "failed to read json, insert_mode not found\n"); - goto PARSE_OVER; - } - - cJSON* childTbl_limit = cJSON_GetObjectItem(stbInfo, "childtable_limit"); - if ((childTbl_limit) && (g_Dbs.db[i].drop != true) - && (g_Dbs.db[i].superTbls[j].childTblExists == TBL_ALREADY_EXISTS)) { - if (childTbl_limit->type != cJSON_Number) { - errorPrint("%s", "failed to read json, childtable_limit\n"); - goto PARSE_OVER; - } - g_Dbs.db[i].superTbls[j].childTblLimit = childTbl_limit->valueint; - } else { - g_Dbs.db[i].superTbls[j].childTblLimit = -1; // select ... limit -1 means all query result, drop = yes mean all table need recreate, limit value is invalid. - } - - cJSON* childTbl_offset = cJSON_GetObjectItem(stbInfo, "childtable_offset"); - if ((childTbl_offset) && (g_Dbs.db[i].drop != true) - && (g_Dbs.db[i].superTbls[j].childTblExists == TBL_ALREADY_EXISTS)) { - if ((childTbl_offset->type != cJSON_Number) - || (0 > childTbl_offset->valueint)) { - errorPrint("%s", "failed to read json, childtable_offset\n"); - goto PARSE_OVER; - } - g_Dbs.db[i].superTbls[j].childTblOffset = childTbl_offset->valueint; - } else { - g_Dbs.db[i].superTbls[j].childTblOffset = 0; - } - - cJSON *ts = cJSON_GetObjectItem(stbInfo, "start_timestamp"); - if (ts && ts->type == cJSON_String && ts->valuestring != NULL) { - tstrncpy(g_Dbs.db[i].superTbls[j].startTimestamp, - ts->valuestring, TSDB_DB_NAME_LEN); - } else if (!ts) { - tstrncpy(g_Dbs.db[i].superTbls[j].startTimestamp, - "now", TSDB_DB_NAME_LEN); - } else { - errorPrint("%s", "failed to read json, start_timestamp not found\n"); - goto PARSE_OVER; - } - - cJSON* timestampStep = cJSON_GetObjectItem(stbInfo, "timestamp_step"); - if (timestampStep && timestampStep->type == cJSON_Number) { - g_Dbs.db[i].superTbls[j].timeStampStep = timestampStep->valueint; - } else if (!timestampStep) { - g_Dbs.db[i].superTbls[j].timeStampStep = g_args.timestamp_step; - } else { - errorPrint("%s", "failed to read json, timestamp_step not found\n"); - goto PARSE_OVER; - } - - cJSON *sampleFormat = cJSON_GetObjectItem(stbInfo, "sample_format"); - if (sampleFormat && sampleFormat->type - == cJSON_String && sampleFormat->valuestring != NULL) { - tstrncpy(g_Dbs.db[i].superTbls[j].sampleFormat, - sampleFormat->valuestring, - min(SMALL_BUFF_LEN, - strlen(sampleFormat->valuestring) + 1)); - } else if (!sampleFormat) { - tstrncpy(g_Dbs.db[i].superTbls[j].sampleFormat, "csv", - SMALL_BUFF_LEN); - } else { - errorPrint("%s", "failed to read json, sample_format not found\n"); - goto PARSE_OVER; - } - - cJSON *sampleFile = cJSON_GetObjectItem(stbInfo, "sample_file"); - if (sampleFile && sampleFile->type == cJSON_String - && sampleFile->valuestring != NULL) { - tstrncpy(g_Dbs.db[i].superTbls[j].sampleFile, - sampleFile->valuestring, - min(MAX_FILE_NAME_LEN, - strlen(sampleFile->valuestring) + 1)); - } else if (!sampleFile) { - memset(g_Dbs.db[i].superTbls[j].sampleFile, 0, - MAX_FILE_NAME_LEN); - } else { - errorPrint("%s", "failed to read json, sample_file not found\n"); - goto PARSE_OVER; - } - - cJSON *useSampleTs = cJSON_GetObjectItem(stbInfo, "use_sample_ts"); - if (useSampleTs && useSampleTs->type == cJSON_String - && useSampleTs->valuestring != NULL) { - if (0 == strncasecmp(useSampleTs->valuestring, "yes", 3)) { - g_Dbs.db[i].superTbls[j].useSampleTs = true; - } else if (0 == strncasecmp(useSampleTs->valuestring, "no", 2)){ - g_Dbs.db[i].superTbls[j].useSampleTs = false; - } else { - g_Dbs.db[i].superTbls[j].useSampleTs = false; - } - } else if (!useSampleTs) { - g_Dbs.db[i].superTbls[j].useSampleTs = false; - } else { - errorPrint("%s", "failed to read json, use_sample_ts not found\n"); - goto PARSE_OVER; - } - - cJSON *tagsFile = cJSON_GetObjectItem(stbInfo, "tags_file"); - if ((tagsFile && tagsFile->type == cJSON_String) - && (tagsFile->valuestring != NULL)) { - tstrncpy(g_Dbs.db[i].superTbls[j].tagsFile, - tagsFile->valuestring, MAX_FILE_NAME_LEN); - if (0 == g_Dbs.db[i].superTbls[j].tagsFile[0]) { - g_Dbs.db[i].superTbls[j].tagSource = 0; - } else { - g_Dbs.db[i].superTbls[j].tagSource = 1; - } - } else if (!tagsFile) { - memset(g_Dbs.db[i].superTbls[j].tagsFile, 0, MAX_FILE_NAME_LEN); - g_Dbs.db[i].superTbls[j].tagSource = 0; - } else { - errorPrint("%s", "failed to read json, tags_file not found\n"); - goto PARSE_OVER; - } - - cJSON* stbMaxSqlLen = cJSON_GetObjectItem(stbInfo, "max_sql_len"); - if (stbMaxSqlLen && stbMaxSqlLen->type == cJSON_Number) { - int32_t len = stbMaxSqlLen->valueint; - if (len > TSDB_MAX_ALLOWED_SQL_LEN) { - len = TSDB_MAX_ALLOWED_SQL_LEN; - } else if (len < 5) { - len = 5; - } - g_Dbs.db[i].superTbls[j].maxSqlLen = len; - } else if (!maxSqlLen) { - g_Dbs.db[i].superTbls[j].maxSqlLen = g_args.max_sql_len; - } else { - errorPrint("%s", "failed to read json, stbMaxSqlLen input mistake\n"); - goto PARSE_OVER; - } - /* - cJSON *multiThreadWriteOneTbl = - cJSON_GetObjectItem(stbInfo, "multi_thread_write_one_tbl"); // no , yes - if (multiThreadWriteOneTbl - && multiThreadWriteOneTbl->type == cJSON_String - && multiThreadWriteOneTbl->valuestring != NULL) { - if (0 == strncasecmp(multiThreadWriteOneTbl->valuestring, "yes", 3)) { - g_Dbs.db[i].superTbls[j].multiThreadWriteOneTbl = 1; - } else { - g_Dbs.db[i].superTbls[j].multiThreadWriteOneTbl = 0; - } - } else if (!multiThreadWriteOneTbl) { - g_Dbs.db[i].superTbls[j].multiThreadWriteOneTbl = 0; - } else { - errorPrint("%s", "failed to read json, multiThreadWriteOneTbl not found\n"); - goto PARSE_OVER; - } - */ - cJSON* insertRows = cJSON_GetObjectItem(stbInfo, "insert_rows"); - if (insertRows && insertRows->type == cJSON_Number) { - if (insertRows->valueint < 0) { - errorPrint("%s", "failed to read json, insert_rows input mistake\n"); - goto PARSE_OVER; - } - g_Dbs.db[i].superTbls[j].insertRows = insertRows->valueint; - } else if (!insertRows) { - g_Dbs.db[i].superTbls[j].insertRows = 0x7FFFFFFFFFFFFFFF; - } else { - errorPrint("%s", "failed to read json, insert_rows input mistake\n"); - goto PARSE_OVER; - } - - cJSON* stbInterlaceRows = cJSON_GetObjectItem(stbInfo, "interlace_rows"); - if (stbInterlaceRows && stbInterlaceRows->type == cJSON_Number) { - if (stbInterlaceRows->valueint < 0) { - errorPrint("%s", "failed to read json, interlace rows input mistake\n"); - goto PARSE_OVER; - } - g_Dbs.db[i].superTbls[j].interlaceRows = stbInterlaceRows->valueint; - - if (g_Dbs.db[i].superTbls[j].interlaceRows > g_Dbs.db[i].superTbls[j].insertRows) { - printf("NOTICE: db[%d].superTbl[%d]'s interlace rows value %u > insert_rows %"PRId64"\n\n", - i, j, g_Dbs.db[i].superTbls[j].interlaceRows, - g_Dbs.db[i].superTbls[j].insertRows); - printf(" interlace rows value will be set to insert_rows %"PRId64"\n\n", - g_Dbs.db[i].superTbls[j].insertRows); - prompt(); - g_Dbs.db[i].superTbls[j].interlaceRows = g_Dbs.db[i].superTbls[j].insertRows; - } - } else if (!stbInterlaceRows) { - g_Dbs.db[i].superTbls[j].interlaceRows = g_args.interlaceRows; // 0 means progressive mode, > 0 mean interlace mode. max value is less or equ num_of_records_per_req - } else { - errorPrint( - "%s", "failed to read json, interlace rows input mistake\n"); - goto PARSE_OVER; - } - - cJSON* disorderRatio = cJSON_GetObjectItem(stbInfo, "disorder_ratio"); - if (disorderRatio && disorderRatio->type == cJSON_Number) { - if (disorderRatio->valueint > 50) - disorderRatio->valueint = 50; - - if (disorderRatio->valueint < 0) - disorderRatio->valueint = 0; - - g_Dbs.db[i].superTbls[j].disorderRatio = disorderRatio->valueint; - } else if (!disorderRatio) { - g_Dbs.db[i].superTbls[j].disorderRatio = 0; - } else { - errorPrint("%s", "failed to read json, disorderRatio not found\n"); - goto PARSE_OVER; - } - - cJSON* disorderRange = cJSON_GetObjectItem(stbInfo, "disorder_range"); - if (disorderRange && disorderRange->type == cJSON_Number) { - g_Dbs.db[i].superTbls[j].disorderRange = disorderRange->valueint; - } else if (!disorderRange) { - g_Dbs.db[i].superTbls[j].disorderRange = 1000; - } else { - errorPrint("%s", "failed to read json, disorderRange not found\n"); - goto PARSE_OVER; - } - - cJSON* insertInterval = cJSON_GetObjectItem(stbInfo, "insert_interval"); - if (insertInterval && insertInterval->type == cJSON_Number) { - g_Dbs.db[i].superTbls[j].insertInterval = insertInterval->valueint; - if (insertInterval->valueint < 0) { - errorPrint("%s", "failed to read json, insert_interval input mistake\n"); - goto PARSE_OVER; - } - } else if (!insertInterval) { - verbosePrint("%s() LN%d: stable insert interval be overrode by global %"PRIu64".\n", - __func__, __LINE__, g_args.insert_interval); - g_Dbs.db[i].superTbls[j].insertInterval = g_args.insert_interval; - } else { - errorPrint("%s", "failed to read json, insert_interval input mistake\n"); - goto PARSE_OVER; - } - - int retVal = getColumnAndTagTypeFromInsertJsonFile( - stbInfo, &g_Dbs.db[i].superTbls[j]); - if (false == retVal) { - goto PARSE_OVER; - } - } - } - - ret = true; - -PARSE_OVER: - return ret; -} - -static bool getMetaFromQueryJsonFile(cJSON* root) { - bool ret = false; - - cJSON* cfgdir = cJSON_GetObjectItem(root, "cfgdir"); - if (cfgdir && cfgdir->type == cJSON_String && cfgdir->valuestring != NULL) { - tstrncpy(g_queryInfo.cfgDir, cfgdir->valuestring, MAX_FILE_NAME_LEN); - } - - cJSON* host = cJSON_GetObjectItem(root, "host"); - if (host && host->type == cJSON_String && host->valuestring != NULL) { - tstrncpy(g_queryInfo.host, host->valuestring, MAX_HOSTNAME_SIZE); - } else if (!host) { - tstrncpy(g_queryInfo.host, "127.0.0.1", MAX_HOSTNAME_SIZE); - } else { - errorPrint("%s", "failed to read json, host not found\n"); - goto PARSE_OVER; - } - - cJSON* port = cJSON_GetObjectItem(root, "port"); - if (port && port->type == cJSON_Number) { - g_queryInfo.port = port->valueint; - } else if (!port) { - g_queryInfo.port = 6030; - } - - cJSON* user = cJSON_GetObjectItem(root, "user"); - if (user && user->type == cJSON_String && user->valuestring != NULL) { - tstrncpy(g_queryInfo.user, user->valuestring, MAX_USERNAME_SIZE); - } else if (!user) { - tstrncpy(g_queryInfo.user, "root", MAX_USERNAME_SIZE); ; - } - - cJSON* password = cJSON_GetObjectItem(root, "password"); - if (password && password->type == cJSON_String && password->valuestring != NULL) { - tstrncpy(g_queryInfo.password, password->valuestring, SHELL_MAX_PASSWORD_LEN); - } else if (!password) { - tstrncpy(g_queryInfo.password, "taosdata", SHELL_MAX_PASSWORD_LEN);; - } - - cJSON *answerPrompt = cJSON_GetObjectItem(root, "confirm_parameter_prompt"); // yes, no, - if (answerPrompt && answerPrompt->type == cJSON_String - && answerPrompt->valuestring != NULL) { - if (0 == strncasecmp(answerPrompt->valuestring, "yes", 3)) { - g_args.answer_yes = false; - } else if (0 == strncasecmp(answerPrompt->valuestring, "no", 2)) { - g_args.answer_yes = true; - } else { - g_args.answer_yes = false; - } - } else if (!answerPrompt) { - g_args.answer_yes = false; - } else { - errorPrint("%s", "failed to read json, confirm_parameter_prompt not found\n"); - goto PARSE_OVER; - } - - cJSON* gQueryTimes = cJSON_GetObjectItem(root, "query_times"); - if (gQueryTimes && gQueryTimes->type == cJSON_Number) { - if (gQueryTimes->valueint <= 0) { - errorPrint("%s()", "failed to read json, query_times input mistake\n"); - goto PARSE_OVER; - } - g_args.query_times = gQueryTimes->valueint; - } else if (!gQueryTimes) { - g_args.query_times = 1; - } else { - errorPrint("%s", "failed to read json, query_times input mistake\n"); - goto PARSE_OVER; - } - - cJSON* dbs = cJSON_GetObjectItem(root, "databases"); - if (dbs && dbs->type == cJSON_String && dbs->valuestring != NULL) { - tstrncpy(g_queryInfo.dbName, dbs->valuestring, TSDB_DB_NAME_LEN); - } else if (!dbs) { - errorPrint("%s", "failed to read json, databases not found\n"); - goto PARSE_OVER; - } - - cJSON* queryMode = cJSON_GetObjectItem(root, "query_mode"); - if (queryMode - && queryMode->type == cJSON_String - && queryMode->valuestring != NULL) { - tstrncpy(g_queryInfo.queryMode, queryMode->valuestring, - min(SMALL_BUFF_LEN, strlen(queryMode->valuestring) + 1)); - } else if (!queryMode) { - tstrncpy(g_queryInfo.queryMode, "taosc", - min(SMALL_BUFF_LEN, strlen("taosc") + 1)); - } else { - errorPrint("%s", "failed to read json, query_mode not found\n"); - goto PARSE_OVER; - } - - // specified_table_query - cJSON *specifiedQuery = cJSON_GetObjectItem(root, "specified_table_query"); - if (!specifiedQuery) { - g_queryInfo.specifiedQueryInfo.concurrent = 1; - g_queryInfo.specifiedQueryInfo.sqlCount = 0; - } else if (specifiedQuery->type != cJSON_Object) { - errorPrint("%s", "failed to read json, super_table_query not found\n"); - goto PARSE_OVER; - } else { - cJSON* queryInterval = cJSON_GetObjectItem(specifiedQuery, "query_interval"); - if (queryInterval && queryInterval->type == cJSON_Number) { - g_queryInfo.specifiedQueryInfo.queryInterval = queryInterval->valueint; - } else if (!queryInterval) { - g_queryInfo.specifiedQueryInfo.queryInterval = 0; - } - - cJSON* specifiedQueryTimes = cJSON_GetObjectItem(specifiedQuery, - "query_times"); - if (specifiedQueryTimes && specifiedQueryTimes->type == cJSON_Number) { - if (specifiedQueryTimes->valueint <= 0) { - errorPrint( - "failed to read json, query_times: %"PRId64", need be a valid (>0) number\n", - specifiedQueryTimes->valueint); - goto PARSE_OVER; - - } - g_queryInfo.specifiedQueryInfo.queryTimes = specifiedQueryTimes->valueint; - } else if (!specifiedQueryTimes) { - g_queryInfo.specifiedQueryInfo.queryTimes = g_args.query_times; - } else { - errorPrint("%s() LN%d, failed to read json, query_times input mistake\n", - __func__, __LINE__); - goto PARSE_OVER; - } - - cJSON* concurrent = cJSON_GetObjectItem(specifiedQuery, "concurrent"); - if (concurrent && concurrent->type == cJSON_Number) { - if (concurrent->valueint <= 0) { - errorPrint( - "query sqlCount %d or concurrent %d is not correct.\n", - g_queryInfo.specifiedQueryInfo.sqlCount, - g_queryInfo.specifiedQueryInfo.concurrent); - goto PARSE_OVER; - } - g_queryInfo.specifiedQueryInfo.concurrent = concurrent->valueint; - } else if (!concurrent) { - g_queryInfo.specifiedQueryInfo.concurrent = 1; - } - - cJSON* specifiedAsyncMode = cJSON_GetObjectItem(specifiedQuery, "mode"); - if (specifiedAsyncMode && specifiedAsyncMode->type == cJSON_String - && specifiedAsyncMode->valuestring != NULL) { - if (0 == strcmp("sync", specifiedAsyncMode->valuestring)) { - g_queryInfo.specifiedQueryInfo.asyncMode = SYNC_MODE; - } else if (0 == strcmp("async", specifiedAsyncMode->valuestring)) { - g_queryInfo.specifiedQueryInfo.asyncMode = ASYNC_MODE; - } else { - errorPrint("%s", "failed to read json, async mode input error\n"); - goto PARSE_OVER; - } - } else { - g_queryInfo.specifiedQueryInfo.asyncMode = SYNC_MODE; - } - - cJSON* interval = cJSON_GetObjectItem(specifiedQuery, "interval"); - if (interval && interval->type == cJSON_Number) { - g_queryInfo.specifiedQueryInfo.subscribeInterval = interval->valueint; - } else if (!interval) { - //printf("failed to read json, subscribe interval no found\n"); - //goto PARSE_OVER; - g_queryInfo.specifiedQueryInfo.subscribeInterval = 10000; - } - - cJSON* restart = cJSON_GetObjectItem(specifiedQuery, "restart"); - if (restart && restart->type == cJSON_String && restart->valuestring != NULL) { - if (0 == strcmp("yes", restart->valuestring)) { - g_queryInfo.specifiedQueryInfo.subscribeRestart = true; - } else if (0 == strcmp("no", restart->valuestring)) { - g_queryInfo.specifiedQueryInfo.subscribeRestart = false; - } else { - errorPrint("%s", "failed to read json, subscribe restart error\n"); - goto PARSE_OVER; - } - } else { - g_queryInfo.specifiedQueryInfo.subscribeRestart = true; - } - - cJSON* keepProgress = cJSON_GetObjectItem(specifiedQuery, "keepProgress"); - if (keepProgress - && keepProgress->type == cJSON_String - && keepProgress->valuestring != NULL) { - if (0 == strcmp("yes", keepProgress->valuestring)) { - g_queryInfo.specifiedQueryInfo.subscribeKeepProgress = 1; - } else if (0 == strcmp("no", keepProgress->valuestring)) { - g_queryInfo.specifiedQueryInfo.subscribeKeepProgress = 0; - } else { - errorPrint("%s", "failed to read json, subscribe keepProgress error\n"); - goto PARSE_OVER; - } - } else { - g_queryInfo.specifiedQueryInfo.subscribeKeepProgress = 0; - } - - // sqls - cJSON* specifiedSqls = cJSON_GetObjectItem(specifiedQuery, "sqls"); - if (!specifiedSqls) { - g_queryInfo.specifiedQueryInfo.sqlCount = 0; - } else if (specifiedSqls->type != cJSON_Array) { - errorPrint("%s", "failed to read json, super sqls not found\n"); - goto PARSE_OVER; - } else { - int superSqlSize = cJSON_GetArraySize(specifiedSqls); - if (superSqlSize * g_queryInfo.specifiedQueryInfo.concurrent - > MAX_QUERY_SQL_COUNT) { - errorPrint("failed to read json, query sql(%d) * concurrent(%d) overflow, max is %d\n", - superSqlSize, - g_queryInfo.specifiedQueryInfo.concurrent, - MAX_QUERY_SQL_COUNT); - goto PARSE_OVER; - } - - g_queryInfo.specifiedQueryInfo.sqlCount = superSqlSize; - for (int j = 0; j < superSqlSize; ++j) { - cJSON* sql = cJSON_GetArrayItem(specifiedSqls, j); - if (sql == NULL) continue; - - cJSON *sqlStr = cJSON_GetObjectItem(sql, "sql"); - if (!sqlStr || sqlStr->type != cJSON_String || sqlStr->valuestring == NULL) { - errorPrint("%s", "failed to read json, sql not found\n"); - goto PARSE_OVER; - } - tstrncpy(g_queryInfo.specifiedQueryInfo.sql[j], - sqlStr->valuestring, BUFFER_SIZE); - - // default value is -1, which mean infinite loop - g_queryInfo.specifiedQueryInfo.endAfterConsume[j] = -1; - cJSON* endAfterConsume = - cJSON_GetObjectItem(specifiedQuery, "endAfterConsume"); - if (endAfterConsume - && endAfterConsume->type == cJSON_Number) { - g_queryInfo.specifiedQueryInfo.endAfterConsume[j] - = endAfterConsume->valueint; - } - if (g_queryInfo.specifiedQueryInfo.endAfterConsume[j] < -1) - g_queryInfo.specifiedQueryInfo.endAfterConsume[j] = -1; - - g_queryInfo.specifiedQueryInfo.resubAfterConsume[j] = -1; - cJSON* resubAfterConsume = - cJSON_GetObjectItem(specifiedQuery, "resubAfterConsume"); - if ((resubAfterConsume) - && (resubAfterConsume->type == cJSON_Number) - && (resubAfterConsume->valueint >= 0)) { - g_queryInfo.specifiedQueryInfo.resubAfterConsume[j] - = resubAfterConsume->valueint; - } - - if (g_queryInfo.specifiedQueryInfo.resubAfterConsume[j] < -1) - g_queryInfo.specifiedQueryInfo.resubAfterConsume[j] = -1; - - cJSON *result = cJSON_GetObjectItem(sql, "result"); - if ((NULL != result) && (result->type == cJSON_String) - && (result->valuestring != NULL)) { - tstrncpy(g_queryInfo.specifiedQueryInfo.result[j], - result->valuestring, MAX_FILE_NAME_LEN); - } else if (NULL == result) { - memset(g_queryInfo.specifiedQueryInfo.result[j], - 0, MAX_FILE_NAME_LEN); - } else { - errorPrint("%s", - "failed to read json, super query result file not found\n"); - goto PARSE_OVER; - } - } - } - } - - // super_table_query - cJSON *superQuery = cJSON_GetObjectItem(root, "super_table_query"); - if (!superQuery) { - g_queryInfo.superQueryInfo.threadCnt = 1; - g_queryInfo.superQueryInfo.sqlCount = 0; - } else if (superQuery->type != cJSON_Object) { - errorPrint("%s", "failed to read json, sub_table_query not found\n"); - ret = true; - goto PARSE_OVER; - } else { - cJSON* subrate = cJSON_GetObjectItem(superQuery, "query_interval"); - if (subrate && subrate->type == cJSON_Number) { - g_queryInfo.superQueryInfo.queryInterval = subrate->valueint; - } else if (!subrate) { - g_queryInfo.superQueryInfo.queryInterval = 0; - } - - cJSON* superQueryTimes = cJSON_GetObjectItem(superQuery, "query_times"); - if (superQueryTimes && superQueryTimes->type == cJSON_Number) { - if (superQueryTimes->valueint <= 0) { - errorPrint("failed to read json, query_times: %"PRId64", need be a valid (>0) number\n", - superQueryTimes->valueint); - goto PARSE_OVER; - } - g_queryInfo.superQueryInfo.queryTimes = superQueryTimes->valueint; - } else if (!superQueryTimes) { - g_queryInfo.superQueryInfo.queryTimes = g_args.query_times; - } else { - errorPrint("%s", "failed to read json, query_times input mistake\n"); - goto PARSE_OVER; - } - - cJSON* threads = cJSON_GetObjectItem(superQuery, "threads"); - if (threads && threads->type == cJSON_Number) { - if (threads->valueint <= 0) { - errorPrint("%s", "failed to read json, threads input mistake\n"); - goto PARSE_OVER; - - } - g_queryInfo.superQueryInfo.threadCnt = threads->valueint; - } else if (!threads) { - g_queryInfo.superQueryInfo.threadCnt = 1; - } - - //cJSON* subTblCnt = cJSON_GetObjectItem(superQuery, "childtable_count"); - //if (subTblCnt && subTblCnt->type == cJSON_Number) { - // g_queryInfo.superQueryInfo.childTblCount = subTblCnt->valueint; - //} else if (!subTblCnt) { - // g_queryInfo.superQueryInfo.childTblCount = 0; - //} - - cJSON* stblname = cJSON_GetObjectItem(superQuery, "stblname"); - if (stblname && stblname->type == cJSON_String - && stblname->valuestring != NULL) { - tstrncpy(g_queryInfo.superQueryInfo.stbName, stblname->valuestring, - TSDB_TABLE_NAME_LEN); - } else { - errorPrint("%s", "failed to read json, super table name input error\n"); - goto PARSE_OVER; - } - - cJSON* superAsyncMode = cJSON_GetObjectItem(superQuery, "mode"); - if (superAsyncMode && superAsyncMode->type == cJSON_String - && superAsyncMode->valuestring != NULL) { - if (0 == strcmp("sync", superAsyncMode->valuestring)) { - g_queryInfo.superQueryInfo.asyncMode = SYNC_MODE; - } else if (0 == strcmp("async", superAsyncMode->valuestring)) { - g_queryInfo.superQueryInfo.asyncMode = ASYNC_MODE; - } else { - errorPrint("%s", "failed to read json, async mode input error\n"); - goto PARSE_OVER; - } - } else { - g_queryInfo.superQueryInfo.asyncMode = SYNC_MODE; - } - - cJSON* superInterval = cJSON_GetObjectItem(superQuery, "interval"); - if (superInterval && superInterval->type == cJSON_Number) { - if (superInterval->valueint < 0) { - errorPrint("%s", "failed to read json, interval input mistake\n"); - goto PARSE_OVER; - } - g_queryInfo.superQueryInfo.subscribeInterval = superInterval->valueint; - } else if (!superInterval) { - //printf("failed to read json, subscribe interval no found\n"); - //goto PARSE_OVER; - g_queryInfo.superQueryInfo.subscribeInterval = 10000; - } - - cJSON* subrestart = cJSON_GetObjectItem(superQuery, "restart"); - if (subrestart && subrestart->type == cJSON_String - && subrestart->valuestring != NULL) { - if (0 == strcmp("yes", subrestart->valuestring)) { - g_queryInfo.superQueryInfo.subscribeRestart = true; - } else if (0 == strcmp("no", subrestart->valuestring)) { - g_queryInfo.superQueryInfo.subscribeRestart = false; - } else { - errorPrint("%s", "failed to read json, subscribe restart error\n"); - goto PARSE_OVER; - } - } else { - g_queryInfo.superQueryInfo.subscribeRestart = true; - } - - cJSON* superkeepProgress = cJSON_GetObjectItem(superQuery, "keepProgress"); - if (superkeepProgress && - superkeepProgress->type == cJSON_String - && superkeepProgress->valuestring != NULL) { - if (0 == strcmp("yes", superkeepProgress->valuestring)) { - g_queryInfo.superQueryInfo.subscribeKeepProgress = 1; - } else if (0 == strcmp("no", superkeepProgress->valuestring)) { - g_queryInfo.superQueryInfo.subscribeKeepProgress = 0; - } else { - errorPrint("%s", - "failed to read json, subscribe super table keepProgress error\n"); - goto PARSE_OVER; - } - } else { - g_queryInfo.superQueryInfo.subscribeKeepProgress = 0; - } - - // default value is -1, which mean do not resub - g_queryInfo.superQueryInfo.endAfterConsume = -1; - cJSON* superEndAfterConsume = - cJSON_GetObjectItem(superQuery, "endAfterConsume"); - if (superEndAfterConsume - && superEndAfterConsume->type == cJSON_Number) { - g_queryInfo.superQueryInfo.endAfterConsume = - superEndAfterConsume->valueint; - } - if (g_queryInfo.superQueryInfo.endAfterConsume < -1) - g_queryInfo.superQueryInfo.endAfterConsume = -1; - - // default value is -1, which mean do not resub - g_queryInfo.superQueryInfo.resubAfterConsume = -1; - cJSON* superResubAfterConsume = - cJSON_GetObjectItem(superQuery, "resubAfterConsume"); - if ((superResubAfterConsume) - && (superResubAfterConsume->type == cJSON_Number) - && (superResubAfterConsume->valueint >= 0)) { - g_queryInfo.superQueryInfo.resubAfterConsume = - superResubAfterConsume->valueint; - } - if (g_queryInfo.superQueryInfo.resubAfterConsume < -1) - g_queryInfo.superQueryInfo.resubAfterConsume = -1; - - // supert table sqls - cJSON* superSqls = cJSON_GetObjectItem(superQuery, "sqls"); - if (!superSqls) { - g_queryInfo.superQueryInfo.sqlCount = 0; - } else if (superSqls->type != cJSON_Array) { - errorPrint("%s", "failed to read json, super sqls not found\n"); - goto PARSE_OVER; - } else { - int superSqlSize = cJSON_GetArraySize(superSqls); - if (superSqlSize > MAX_QUERY_SQL_COUNT) { - errorPrint("failed to read json, query sql size overflow, max is %d\n", - MAX_QUERY_SQL_COUNT); - goto PARSE_OVER; - } - - g_queryInfo.superQueryInfo.sqlCount = superSqlSize; - for (int j = 0; j < superSqlSize; ++j) { - cJSON* sql = cJSON_GetArrayItem(superSqls, j); - if (sql == NULL) continue; - - cJSON *sqlStr = cJSON_GetObjectItem(sql, "sql"); - if (!sqlStr || sqlStr->type != cJSON_String - || sqlStr->valuestring == NULL) { - errorPrint("%s", "failed to read json, sql not found\n"); - goto PARSE_OVER; - } - tstrncpy(g_queryInfo.superQueryInfo.sql[j], sqlStr->valuestring, - BUFFER_SIZE); - - cJSON *result = cJSON_GetObjectItem(sql, "result"); - if (result != NULL && result->type == cJSON_String - && result->valuestring != NULL) { - tstrncpy(g_queryInfo.superQueryInfo.result[j], - result->valuestring, MAX_FILE_NAME_LEN); - } else if (NULL == result) { - memset(g_queryInfo.superQueryInfo.result[j], 0, MAX_FILE_NAME_LEN); - } else { - errorPrint("%s", "failed to read json, sub query result file not found\n"); - goto PARSE_OVER; - } - } - } - } - - ret = true; - -PARSE_OVER: - return ret; -} - -static bool getInfoFromJsonFile(char* file) { - debugPrint("%s %d %s\n", __func__, __LINE__, file); - - FILE *fp = fopen(file, "r"); - if (!fp) { - errorPrint("failed to read %s, reason:%s\n", file, strerror(errno)); - return false; - } - - bool ret = false; - int maxLen = 6400000; - char *content = calloc(1, maxLen + 1); - int len = fread(content, 1, maxLen, fp); - if (len <= 0) { - free(content); - fclose(fp); - errorPrint("failed to read %s, content is null", file); - return false; - } - - content[len] = 0; - cJSON* root = cJSON_Parse(content); - if (root == NULL) { - errorPrint("failed to cjson parse %s, invalid json format\n", file); - goto PARSE_OVER; - } - - cJSON* filetype = cJSON_GetObjectItem(root, "filetype"); - if (filetype && filetype->type == cJSON_String && filetype->valuestring != NULL) { - if (0 == strcasecmp("insert", filetype->valuestring)) { - g_args.test_mode = INSERT_TEST; - } else if (0 == strcasecmp("query", filetype->valuestring)) { - g_args.test_mode = QUERY_TEST; - } else if (0 == strcasecmp("subscribe", filetype->valuestring)) { - g_args.test_mode = SUBSCRIBE_TEST; - } else { - errorPrint("%s", "failed to read json, filetype not support\n"); - goto PARSE_OVER; - } - } else if (!filetype) { - g_args.test_mode = INSERT_TEST; - } else { - errorPrint("%s", "failed to read json, filetype not found\n"); - goto PARSE_OVER; - } - - if (INSERT_TEST == g_args.test_mode) { - memset(&g_Dbs, 0, sizeof(SDbs)); - g_Dbs.use_metric = g_args.use_metric; - ret = getMetaFromInsertJsonFile(root); - } else if ((QUERY_TEST == g_args.test_mode) - || (SUBSCRIBE_TEST == g_args.test_mode)) { - memset(&g_queryInfo, 0, sizeof(SQueryMetaInfo)); - ret = getMetaFromQueryJsonFile(root); - } else { - errorPrint("%s", - "input json file type error! please input correct file type: insert or query or subscribe\n"); - goto PARSE_OVER; - } - -PARSE_OVER: - free(content); - cJSON_Delete(root); - fclose(fp); - return ret; -} - -static int prepareSampleData() { - for (int i = 0; i < g_Dbs.dbCount; i++) { - for (int j = 0; j < g_Dbs.db[i].superTblCount; j++) { - if (g_Dbs.db[i].superTbls[j].tagsFile[0] != 0) { - if (readTagFromCsvFileToMem(&g_Dbs.db[i].superTbls[j]) != 0) { - return -1; - } - } - } - } - - return 0; -} - -static void postFreeResource() { - tmfclose(g_fpOfInsertResult); - - for (int i = 0; i < g_Dbs.dbCount; i++) { - for (uint64_t j = 0; j < g_Dbs.db[i].superTblCount; j++) { - if (0 != g_Dbs.db[i].superTbls[j].colsOfCreateChildTable) { - tmfree(g_Dbs.db[i].superTbls[j].colsOfCreateChildTable); - g_Dbs.db[i].superTbls[j].colsOfCreateChildTable = NULL; - } - if (0 != g_Dbs.db[i].superTbls[j].sampleDataBuf) { - tmfree(g_Dbs.db[i].superTbls[j].sampleDataBuf); - g_Dbs.db[i].superTbls[j].sampleDataBuf = NULL; - } - -#if STMT_BIND_PARAM_BATCH == 1 - for (int c = 0; - c < g_Dbs.db[i].superTbls[j].columnCount; c ++) { - - if (g_Dbs.db[i].superTbls[j].sampleBindBatchArray) { - - tmfree((char *)((uintptr_t)*(uintptr_t*)( - g_Dbs.db[i].superTbls[j].sampleBindBatchArray - + sizeof(char*) * c))); - } - } - tmfree(g_Dbs.db[i].superTbls[j].sampleBindBatchArray); -#endif - if (0 != g_Dbs.db[i].superTbls[j].tagDataBuf) { - tmfree(g_Dbs.db[i].superTbls[j].tagDataBuf); - g_Dbs.db[i].superTbls[j].tagDataBuf = NULL; - } - if (0 != g_Dbs.db[i].superTbls[j].childTblName) { - tmfree(g_Dbs.db[i].superTbls[j].childTblName); - g_Dbs.db[i].superTbls[j].childTblName = NULL; - } - } - tmfree(g_Dbs.db[i].superTbls); - } - tmfree(g_Dbs.db); - tmfree(g_randbool_buff); - tmfree(g_randint_buff); - tmfree(g_rand_voltage_buff); - tmfree(g_randbigint_buff); - tmfree(g_randsmallint_buff); - tmfree(g_randtinyint_buff); - tmfree(g_randfloat_buff); - tmfree(g_rand_current_buff); - tmfree(g_rand_phase_buff); - tmfree(g_randdouble_buff); - tmfree(g_randuint_buff); - tmfree(g_randutinyint_buff); - tmfree(g_randusmallint_buff); - tmfree(g_randubigint_buff); - tmfree(g_randint); - tmfree(g_randuint); - tmfree(g_randbigint); - tmfree(g_randubigint); - tmfree(g_randfloat); - tmfree(g_randdouble); - - tmfree(g_sampleDataBuf); - -#if STMT_BIND_PARAM_BATCH == 1 - for (int l = 0; - l < g_args.columnCount; l ++) { - if (g_sampleBindBatchArray) { - tmfree((char *)((uintptr_t)*(uintptr_t*)( - g_sampleBindBatchArray - + sizeof(char*) * l))); - } - } - tmfree(g_sampleBindBatchArray); - -#endif -} - -static int getRowDataFromSample( - char* dataBuf, int64_t maxLen, int64_t timestamp, - SSuperTable* stbInfo, int64_t* sampleUsePos) -{ - if ((*sampleUsePos) == MAX_SAMPLES) { - *sampleUsePos = 0; - } - - int dataLen = 0; - if(stbInfo->useSampleTs) { - dataLen += snprintf(dataBuf + dataLen, maxLen - dataLen, - "(%s", - stbInfo->sampleDataBuf - + stbInfo->lenOfOneRow * (*sampleUsePos)); - } else { - dataLen += snprintf(dataBuf + dataLen, maxLen - dataLen, - "(%" PRId64 ", ", timestamp); - dataLen += snprintf(dataBuf + dataLen, maxLen - dataLen, - "%s", - stbInfo->sampleDataBuf - + stbInfo->lenOfOneRow * (*sampleUsePos)); - } - - dataLen += snprintf(dataBuf + dataLen, maxLen - dataLen, ")"); - - (*sampleUsePos)++; - - return dataLen; -} - -static int64_t generateStbRowData( - SSuperTable* stbInfo, - char* recBuf, - int64_t remainderBufLen, - int64_t timestamp) -{ - int64_t dataLen = 0; - char *pstr = recBuf; - int64_t maxLen = MAX_DATA_SIZE; - int tmpLen; - - dataLen += snprintf(pstr + dataLen, maxLen - dataLen, - "(%" PRId64 "", timestamp); - - for (int i = 0; i < stbInfo->columnCount; i++) { - tstrncpy(pstr + dataLen, ",", 2); - dataLen += 1; - - if ((stbInfo->columns[i].data_type == TSDB_DATA_TYPE_BINARY) - || (stbInfo->columns[i].data_type == TSDB_DATA_TYPE_NCHAR)) { - if (stbInfo->columns[i].dataLen > TSDB_MAX_BINARY_LEN) { - errorPrint2("binary or nchar length overflow, max size:%u\n", - (uint32_t)TSDB_MAX_BINARY_LEN); - return -1; - } - - if ((stbInfo->columns[i].dataLen + 1) > - /* need count 3 extra chars \', \', and , */ - (remainderBufLen - dataLen - 3)) { - return 0; - } - char* buf = (char*)calloc(stbInfo->columns[i].dataLen+1, 1); - if (NULL == buf) { - errorPrint2("calloc failed! size:%d\n", stbInfo->columns[i].dataLen); - return -1; - } - rand_string(buf, stbInfo->columns[i].dataLen); - dataLen += snprintf(pstr + dataLen, maxLen - dataLen, "\'%s\'", buf); - tmfree(buf); - - } else { - char *tmp = NULL; - switch(stbInfo->columns[i].data_type) { - case TSDB_DATA_TYPE_INT: - if ((g_args.demo_mode) && (i == 1)) { - tmp = demo_voltage_int_str(); - } else { - tmp = rand_int_str(); - } - tmpLen = strlen(tmp); - tstrncpy(pstr + dataLen, tmp, min(tmpLen + 1, INT_BUFF_LEN)); - break; - - case TSDB_DATA_TYPE_UINT: - tmp = rand_uint_str(); - tmpLen = strlen(tmp); - tstrncpy(pstr + dataLen, tmp, min(tmpLen + 1, INT_BUFF_LEN)); - break; - - case TSDB_DATA_TYPE_BIGINT: - tmp = rand_bigint_str(); - tmpLen = strlen(tmp); - tstrncpy(pstr + dataLen, tmp, min(tmpLen + 1, BIGINT_BUFF_LEN)); - break; - - case TSDB_DATA_TYPE_UBIGINT: - tmp = rand_ubigint_str(); - tmpLen = strlen(tmp); - tstrncpy(pstr + dataLen, tmp, min(tmpLen + 1, BIGINT_BUFF_LEN)); - break; - - case TSDB_DATA_TYPE_FLOAT: - if (g_args.demo_mode) { - if (i == 0) { - tmp = demo_current_float_str(); - } else { - tmp = demo_phase_float_str(); - } - } else { - tmp = rand_float_str(); - } - tmpLen = strlen(tmp); - tstrncpy(pstr + dataLen, tmp, min(tmpLen + 1, FLOAT_BUFF_LEN)); - break; - - case TSDB_DATA_TYPE_DOUBLE: - tmp = rand_double_str(); - tmpLen = strlen(tmp); - tstrncpy(pstr + dataLen, tmp, min(tmpLen + 1, DOUBLE_BUFF_LEN)); - break; - - case TSDB_DATA_TYPE_SMALLINT: - tmp = rand_smallint_str(); - tmpLen = strlen(tmp); - tstrncpy(pstr + dataLen, tmp, min(tmpLen + 1, SMALLINT_BUFF_LEN)); - break; - - case TSDB_DATA_TYPE_USMALLINT: - tmp = rand_usmallint_str(); - tmpLen = strlen(tmp); - tstrncpy(pstr + dataLen, tmp, min(tmpLen + 1, SMALLINT_BUFF_LEN)); - break; - - case TSDB_DATA_TYPE_TINYINT: - tmp = rand_tinyint_str(); - tmpLen = strlen(tmp); - tstrncpy(pstr + dataLen, tmp, min(tmpLen + 1, TINYINT_BUFF_LEN)); - break; - - case TSDB_DATA_TYPE_UTINYINT: - tmp = rand_utinyint_str(); - tmpLen = strlen(tmp); - tstrncpy(pstr + dataLen, tmp, min(tmpLen + 1, TINYINT_BUFF_LEN)); - break; - - case TSDB_DATA_TYPE_BOOL: - tmp = rand_bool_str(); - tmpLen = strlen(tmp); - tstrncpy(pstr + dataLen, tmp, min(tmpLen + 1, BOOL_BUFF_LEN)); - break; - - case TSDB_DATA_TYPE_TIMESTAMP: - tmp = rand_bigint_str(); - tmpLen = strlen(tmp); - tstrncpy(pstr + dataLen, tmp, min(tmpLen + 1, BIGINT_BUFF_LEN)); - break; - - case TSDB_DATA_TYPE_NULL: - break; - - default: - errorPrint2("Not support data type: %s\n", - stbInfo->columns[i].dataType); - exit(EXIT_FAILURE); - } - if (tmp) { - dataLen += tmpLen; - } - } - - if (dataLen > (remainderBufLen - (128))) - return 0; - } - - dataLen += snprintf(pstr + dataLen, 2, ")"); - - verbosePrint("%s() LN%d, dataLen:%"PRId64"\n", __func__, __LINE__, dataLen); - verbosePrint("%s() LN%d, recBuf:\n\t%s\n", __func__, __LINE__, recBuf); - - return strlen(recBuf); -} - -static int64_t generateData(char *recBuf, char *data_type, - int64_t timestamp, int lenOfBinary) { - memset(recBuf, 0, MAX_DATA_SIZE); - char *pstr = recBuf; - pstr += sprintf(pstr, "(%"PRId64"", timestamp); - - int columnCount = g_args.columnCount; - - bool b; - char *s; - for (int i = 0; i < columnCount; i++) { - switch (data_type[i]) { - case TSDB_DATA_TYPE_TINYINT: - pstr += sprintf(pstr, ",%d", rand_tinyint() ); - break; - - case TSDB_DATA_TYPE_SMALLINT: - pstr += sprintf(pstr, ",%d", rand_smallint()); - break; - - case TSDB_DATA_TYPE_INT: - pstr += sprintf(pstr, ",%d", rand_int()); - break; - - case TSDB_DATA_TYPE_BIGINT: - pstr += sprintf(pstr, ",%"PRId64"", rand_bigint()); - break; - - case TSDB_DATA_TYPE_TIMESTAMP: - pstr += sprintf(pstr, ",%"PRId64"", rand_bigint()); - break; - - case TSDB_DATA_TYPE_FLOAT: - pstr += sprintf(pstr, ",%10.4f", rand_float()); - break; - - case TSDB_DATA_TYPE_DOUBLE: - pstr += sprintf(pstr, ",%20.8f", rand_double()); - break; - - case TSDB_DATA_TYPE_BOOL: - b = rand_bool() & 1; - pstr += sprintf(pstr, ",%s", b ? "true" : "false"); - break; - - case TSDB_DATA_TYPE_BINARY: - s = malloc(lenOfBinary + 1); - if (s == NULL) { - errorPrint2("%s() LN%d, memory allocation %d bytes failed\n", - __func__, __LINE__, lenOfBinary + 1); - exit(EXIT_FAILURE); - } - rand_string(s, lenOfBinary); - pstr += sprintf(pstr, ",\"%s\"", s); - free(s); - break; - - case TSDB_DATA_TYPE_NCHAR: - s = malloc(lenOfBinary + 1); - if (s == NULL) { - errorPrint2("%s() LN%d, memory allocation %d bytes failed\n", - __func__, __LINE__, lenOfBinary + 1); - exit(EXIT_FAILURE); - } - rand_string(s, lenOfBinary); - pstr += sprintf(pstr, ",\"%s\"", s); - free(s); - break; - - case TSDB_DATA_TYPE_UTINYINT: - pstr += sprintf(pstr, ",%d", rand_utinyint() ); - break; - - case TSDB_DATA_TYPE_USMALLINT: - pstr += sprintf(pstr, ",%d", rand_usmallint()); - break; - - case TSDB_DATA_TYPE_UINT: - pstr += sprintf(pstr, ",%d", rand_uint()); - break; - - case TSDB_DATA_TYPE_UBIGINT: - pstr += sprintf(pstr, ",%"PRId64"", rand_ubigint()); - break; - - case TSDB_DATA_TYPE_NULL: - break; - - default: - errorPrint2("%s() LN%d, Unknown data type %d\n", - __func__, __LINE__, - data_type[i]); - exit(EXIT_FAILURE); - } - - if (strlen(recBuf) > MAX_DATA_SIZE) { - ERROR_EXIT("column length too long, abort"); - } - } - - pstr += sprintf(pstr, ")"); - - verbosePrint("%s() LN%d, recBuf:\n\t%s\n", __func__, __LINE__, recBuf); - - return (int32_t)strlen(recBuf); -} - -static int generateSampleFromRand( - char *sampleDataBuf, - uint64_t lenOfOneRow, - int columnCount, - StrColumn *columns - ) -{ - char data[MAX_DATA_SIZE]; - memset(data, 0, MAX_DATA_SIZE); - - char *buff = malloc(lenOfOneRow); - if (NULL == buff) { - errorPrint2("%s() LN%d, memory allocation %"PRIu64" bytes failed\n", - __func__, __LINE__, lenOfOneRow); - exit(EXIT_FAILURE); - } - - for (int i=0; i < MAX_SAMPLES; i++) { - uint64_t pos = 0; - memset(buff, 0, lenOfOneRow); - - for (int c = 0; c < columnCount; c++) { - char *tmp = NULL; - - uint32_t dataLen; - char data_type = (columns)?(columns[c].data_type):g_args.data_type[c]; - - switch(data_type) { - case TSDB_DATA_TYPE_BINARY: - dataLen = (columns)?columns[c].dataLen:g_args.binwidth; - rand_string(data, dataLen); - pos += sprintf(buff + pos, "%s,", data); - break; - - case TSDB_DATA_TYPE_NCHAR: - dataLen = (columns)?columns[c].dataLen:g_args.binwidth; - rand_string(data, dataLen - 1); - pos += sprintf(buff + pos, "%s,", data); - break; - - case TSDB_DATA_TYPE_INT: - if ((g_args.demo_mode) && (c == 1)) { - tmp = demo_voltage_int_str(); - } else { - tmp = rand_int_str(); - } - pos += sprintf(buff + pos, "%s,", tmp); - break; - - case TSDB_DATA_TYPE_UINT: - pos += sprintf(buff + pos, "%s,", rand_uint_str()); - break; - - case TSDB_DATA_TYPE_BIGINT: - pos += sprintf(buff + pos, "%s,", rand_bigint_str()); - break; - - case TSDB_DATA_TYPE_UBIGINT: - pos += sprintf(buff + pos, "%s,", rand_ubigint_str()); - break; - - case TSDB_DATA_TYPE_FLOAT: - if (g_args.demo_mode) { - if (c == 0) { - tmp = demo_current_float_str(); - } else { - tmp = demo_phase_float_str(); - } - } else { - tmp = rand_float_str(); - } - pos += sprintf(buff + pos, "%s,", tmp); - break; - - case TSDB_DATA_TYPE_DOUBLE: - pos += sprintf(buff + pos, "%s,", rand_double_str()); - break; - - case TSDB_DATA_TYPE_SMALLINT: - pos += sprintf(buff + pos, "%s,", rand_smallint_str()); - break; - - case TSDB_DATA_TYPE_USMALLINT: - pos += sprintf(buff + pos, "%s,", rand_usmallint_str()); - break; - - case TSDB_DATA_TYPE_TINYINT: - pos += sprintf(buff + pos, "%s,", rand_tinyint_str()); - break; - - case TSDB_DATA_TYPE_UTINYINT: - pos += sprintf(buff + pos, "%s,", rand_utinyint_str()); - break; - - case TSDB_DATA_TYPE_BOOL: - pos += sprintf(buff + pos, "%s,", rand_bool_str()); - break; - - case TSDB_DATA_TYPE_TIMESTAMP: - pos += sprintf(buff + pos, "%s,", rand_bigint_str()); - break; - - case TSDB_DATA_TYPE_NULL: - break; - - default: - errorPrint2("%s() LN%d, Unknown data type %s\n", - __func__, __LINE__, - (columns)?(columns[c].dataType):g_args.dataType[c]); - exit(EXIT_FAILURE); - } - } - - *(buff + pos - 1) = 0; - memcpy(sampleDataBuf + i * lenOfOneRow, buff, pos); - } - - free(buff); - return 0; -} - -static int generateSampleFromRandForNtb() -{ - return generateSampleFromRand( - g_sampleDataBuf, - g_args.lenOfOneRow, - g_args.columnCount, - NULL); -} - -static int generateSampleFromRandForStb(SSuperTable *stbInfo) -{ - return generateSampleFromRand( - stbInfo->sampleDataBuf, - stbInfo->lenOfOneRow, - stbInfo->columnCount, - stbInfo->columns); -} - -static int prepareSampleForNtb() { - g_sampleDataBuf = calloc(g_args.lenOfOneRow * MAX_SAMPLES, 1); - if (NULL == g_sampleDataBuf) { - errorPrint2("%s() LN%d, Failed to calloc %"PRIu64" Bytes, reason:%s\n", - __func__, __LINE__, - g_args.lenOfOneRow * MAX_SAMPLES, - strerror(errno)); - return -1; - } - - return generateSampleFromRandForNtb(); -} - -static int prepareSampleForStb(SSuperTable *stbInfo) { - - stbInfo->sampleDataBuf = calloc( - stbInfo->lenOfOneRow * MAX_SAMPLES, 1); - if (NULL == stbInfo->sampleDataBuf) { - errorPrint2("%s() LN%d, Failed to calloc %"PRIu64" Bytes, reason:%s\n", - __func__, __LINE__, - stbInfo->lenOfOneRow * MAX_SAMPLES, - strerror(errno)); - return -1; - } - - int ret; - if (0 == strncasecmp(stbInfo->dataSource, "sample", strlen("sample"))) { - if(stbInfo->useSampleTs) { - getAndSetRowsFromCsvFile(stbInfo); - } - ret = generateSampleFromCsvForStb(stbInfo); - } else { - ret = generateSampleFromRandForStb(stbInfo); - } - - if (0 != ret) { - errorPrint2("%s() LN%d, read sample from csv file failed.\n", - __func__, __LINE__); - tmfree(stbInfo->sampleDataBuf); - stbInfo->sampleDataBuf = NULL; - return -1; - } - - return 0; -} - -static int32_t execInsert(threadInfo *pThreadInfo, uint32_t k) -{ - int32_t affectedRows; - SSuperTable* stbInfo = pThreadInfo->stbInfo; - - uint16_t iface; - if (stbInfo) - iface = stbInfo->iface; - else { - if (g_args.iface == INTERFACE_BUT) - iface = TAOSC_IFACE; - else - iface = g_args.iface; - } - - debugPrint("[%d] %s() LN%d %s\n", pThreadInfo->threadID, - __func__, __LINE__, - (iface==TAOSC_IFACE)? - "taosc":(iface==REST_IFACE)?"rest":"stmt"); - - switch(iface) { - case TAOSC_IFACE: - verbosePrint("[%d] %s() LN%d %s\n", pThreadInfo->threadID, - __func__, __LINE__, pThreadInfo->buffer); - - affectedRows = queryDbExec( - pThreadInfo->taos, - pThreadInfo->buffer, INSERT_TYPE, false); - break; - - case REST_IFACE: - verbosePrint("[%d] %s() LN%d %s\n", pThreadInfo->threadID, - __func__, __LINE__, pThreadInfo->buffer); - - if (0 != postProceSql(g_Dbs.host, g_Dbs.port, - pThreadInfo->buffer, pThreadInfo)) { - affectedRows = -1; - printf("========restful return fail, threadID[%d]\n", - pThreadInfo->threadID); - } else { - affectedRows = k; - } - break; - - case STMT_IFACE: - debugPrint("%s() LN%d, stmt=%p", - __func__, __LINE__, pThreadInfo->stmt); - if (0 != taos_stmt_execute(pThreadInfo->stmt)) { - errorPrint2("%s() LN%d, failied to execute insert statement. reason: %s\n", - __func__, __LINE__, taos_stmt_errstr(pThreadInfo->stmt)); - - fprintf(stderr, "\n\033[31m === Please reduce batch number if WAL size exceeds limit. ===\033[0m\n\n"); - exit(EXIT_FAILURE); - } - affectedRows = k; - break; - - default: - errorPrint2("%s() LN%d: unknown insert mode: %d\n", - __func__, __LINE__, stbInfo->iface); - affectedRows = 0; - } - - return affectedRows; -} - -static void getTableName(char *pTblName, - threadInfo* pThreadInfo, uint64_t tableSeq) -{ - SSuperTable* stbInfo = pThreadInfo->stbInfo; - if (stbInfo) { - if (AUTO_CREATE_SUBTBL != stbInfo->autoCreateTable) { - if (stbInfo->childTblLimit > 0) { - snprintf(pTblName, TSDB_TABLE_NAME_LEN, "%s", - stbInfo->childTblName + - (tableSeq - stbInfo->childTblOffset) * TSDB_TABLE_NAME_LEN); - } else { - verbosePrint("[%d] %s() LN%d: from=%"PRIu64" count=%"PRId64" seq=%"PRIu64"\n", - pThreadInfo->threadID, __func__, __LINE__, - pThreadInfo->start_table_from, - pThreadInfo->ntables, tableSeq); - snprintf(pTblName, TSDB_TABLE_NAME_LEN, "%s", - stbInfo->childTblName + tableSeq * TSDB_TABLE_NAME_LEN); - } - } else { - snprintf(pTblName, TSDB_TABLE_NAME_LEN, - "%s%"PRIu64"", stbInfo->childTblPrefix, tableSeq); - } - } else { - snprintf(pTblName, TSDB_TABLE_NAME_LEN, "%s%"PRIu64"", g_args.tb_prefix, tableSeq); - } -} - -static int32_t generateDataTailWithoutStb( - uint32_t batch, char* buffer, - int64_t remainderBufLen, int64_t insertRows, - uint64_t recordFrom, int64_t startTime, - /* int64_t *pSamplePos, */int64_t *dataLen) { - - uint64_t len = 0; - char *pstr = buffer; - - verbosePrint("%s() LN%d batch=%d\n", __func__, __LINE__, batch); - - int32_t k = 0; - for (k = 0; k < batch;) { - char *data = pstr; - memset(data, 0, MAX_DATA_SIZE); - - int64_t retLen = 0; - - char *data_type = g_args.data_type; - int lenOfBinary = g_args.binwidth; - - if (g_args.disorderRatio) { - retLen = generateData(data, data_type, - startTime + getTSRandTail( - g_args.timestamp_step, k, - g_args.disorderRatio, - g_args.disorderRange), - lenOfBinary); - } else { - retLen = generateData(data, data_type, - startTime + g_args.timestamp_step * k, - lenOfBinary); - } - - if (len > remainderBufLen) - break; - - pstr += retLen; - k++; - len += retLen; - remainderBufLen -= retLen; - - verbosePrint("%s() LN%d len=%"PRIu64" k=%d \nbuffer=%s\n", - __func__, __LINE__, len, k, buffer); - - recordFrom ++; - - if (recordFrom >= insertRows) { - break; - } - } - - *dataLen = len; - return k; -} - -static int64_t getTSRandTail(int64_t timeStampStep, int32_t seq, - int disorderRatio, int disorderRange) -{ - int64_t randTail = timeStampStep * seq; - if (disorderRatio > 0) { - int rand_num = taosRandom() % 100; - if(rand_num < disorderRatio) { - randTail = (randTail + - (taosRandom() % disorderRange + 1)) * (-1); - debugPrint("rand data generated, back %"PRId64"\n", randTail); - } - } - - return randTail; -} - -static int32_t generateStbDataTail( - SSuperTable* stbInfo, - uint32_t batch, char* buffer, - int64_t remainderBufLen, int64_t insertRows, - uint64_t recordFrom, int64_t startTime, - int64_t *pSamplePos, int64_t *dataLen) { - uint64_t len = 0; - - char *pstr = buffer; - - bool tsRand; - if (0 == strncasecmp(stbInfo->dataSource, "rand", strlen("rand"))) { - tsRand = true; - } else { - tsRand = false; - } - verbosePrint("%s() LN%d batch=%u buflen=%"PRId64"\n", - __func__, __LINE__, batch, remainderBufLen); - - int32_t k; - for (k = 0; k < batch;) { - char *data = pstr; - - int64_t lenOfRow = 0; - - if (tsRand) { - if (stbInfo->disorderRatio > 0) { - lenOfRow = generateStbRowData(stbInfo, data, - remainderBufLen, - startTime + getTSRandTail( - stbInfo->timeStampStep, k, - stbInfo->disorderRatio, - stbInfo->disorderRange) - ); - } else { - lenOfRow = generateStbRowData(stbInfo, data, - remainderBufLen, - startTime + stbInfo->timeStampStep * k - ); - } - } else { - lenOfRow = getRowDataFromSample( - data, - (remainderBufLen < MAX_DATA_SIZE)?remainderBufLen:MAX_DATA_SIZE, - startTime + stbInfo->timeStampStep * k, - stbInfo, - pSamplePos); - } - - if (lenOfRow == 0) { - data[0] = '\0'; - break; - } - if ((lenOfRow + 1) > remainderBufLen) { - break; - } - - pstr += lenOfRow; - k++; - len += lenOfRow; - remainderBufLen -= lenOfRow; - - verbosePrint("%s() LN%d len=%"PRIu64" k=%u \nbuffer=%s\n", - __func__, __LINE__, len, k, buffer); - - recordFrom ++; - - if (recordFrom >= insertRows) { - break; - } - } - - *dataLen = len; - return k; -} - - -static int generateSQLHeadWithoutStb(char *tableName, - char *dbName, - char *buffer, int remainderBufLen) -{ - int len; - - char headBuf[HEAD_BUFF_LEN]; - - len = snprintf( - headBuf, - HEAD_BUFF_LEN, - "%s.%s values", - dbName, - tableName); - - if (len > remainderBufLen) - return -1; - - tstrncpy(buffer, headBuf, len + 1); - - return len; -} - -static int generateStbSQLHead( - SSuperTable* stbInfo, - char *tableName, int64_t tableSeq, - char *dbName, - char *buffer, int remainderBufLen) -{ - int len; - - char headBuf[HEAD_BUFF_LEN]; - - if (AUTO_CREATE_SUBTBL == stbInfo->autoCreateTable) { - char* tagsValBuf = NULL; - if (0 == stbInfo->tagSource) { - tagsValBuf = generateTagValuesForStb(stbInfo, tableSeq); - } else { - tagsValBuf = getTagValueFromTagSample( - stbInfo, - tableSeq % stbInfo->tagSampleCount); - } - if (NULL == tagsValBuf) { - errorPrint2("%s() LN%d, tag buf failed to allocate memory\n", - __func__, __LINE__); - return -1; - } - - len = snprintf( - headBuf, - HEAD_BUFF_LEN, - "%s.%s using %s.%s TAGS%s values", - dbName, - tableName, - dbName, - stbInfo->stbName, - tagsValBuf); - tmfree(tagsValBuf); - } else if (TBL_ALREADY_EXISTS == stbInfo->childTblExists) { - len = snprintf( - headBuf, - HEAD_BUFF_LEN, - "%s.%s values", - dbName, - tableName); - } else { - len = snprintf( - headBuf, - HEAD_BUFF_LEN, - "%s.%s values", - dbName, - tableName); - } - - if (len > remainderBufLen) - return -1; - - tstrncpy(buffer, headBuf, len + 1); - - return len; -} - -static int32_t generateStbInterlaceData( - threadInfo *pThreadInfo, - char *tableName, uint32_t batchPerTbl, - uint64_t i, - uint32_t batchPerTblTimes, - uint64_t tableSeq, - char *buffer, - int64_t insertRows, - int64_t startTime, - uint64_t *pRemainderBufLen) -{ - assert(buffer); - char *pstr = buffer; - - SSuperTable *stbInfo = pThreadInfo->stbInfo; - int headLen = generateStbSQLHead( - stbInfo, - tableName, tableSeq, pThreadInfo->db_name, - pstr, *pRemainderBufLen); - - if (headLen <= 0) { - return 0; - } - // generate data buffer - verbosePrint("[%d] %s() LN%d i=%"PRIu64" buffer:\n%s\n", - pThreadInfo->threadID, __func__, __LINE__, i, buffer); - - pstr += headLen; - *pRemainderBufLen -= headLen; - - int64_t dataLen = 0; - - verbosePrint("[%d] %s() LN%d i=%"PRIu64" batchPerTblTimes=%u batchPerTbl = %u\n", - pThreadInfo->threadID, __func__, __LINE__, - i, batchPerTblTimes, batchPerTbl); - - if (0 == strncasecmp(stbInfo->startTimestamp, "now", 3)) { - startTime = taosGetTimestamp(pThreadInfo->time_precision); - } - - int32_t k = generateStbDataTail( - stbInfo, - batchPerTbl, pstr, *pRemainderBufLen, insertRows, 0, - startTime, - &(pThreadInfo->samplePos), &dataLen); - - if (k == batchPerTbl) { - pstr += dataLen; - *pRemainderBufLen -= dataLen; - } else { - debugPrint("%s() LN%d, generated data tail: %u, not equal batch per table: %u\n", - __func__, __LINE__, k, batchPerTbl); - pstr -= headLen; - pstr[0] = '\0'; - k = 0; - } - - return k; -} - -static int64_t generateInterlaceDataWithoutStb( - char *tableName, uint32_t batch, - uint64_t tableSeq, - char *dbName, char *buffer, - int64_t insertRows, - int64_t startTime, - uint64_t *pRemainderBufLen) -{ - assert(buffer); - char *pstr = buffer; - - int headLen = generateSQLHeadWithoutStb( - tableName, dbName, - pstr, *pRemainderBufLen); - - if (headLen <= 0) { - return 0; - } - - pstr += headLen; - *pRemainderBufLen -= headLen; - - int64_t dataLen = 0; - - int32_t k = generateDataTailWithoutStb( - batch, pstr, *pRemainderBufLen, insertRows, 0, - startTime, - &dataLen); - - if (k == batch) { - pstr += dataLen; - *pRemainderBufLen -= dataLen; - } else { - debugPrint("%s() LN%d, generated data tail: %d, not equal batch per table: %u\n", - __func__, __LINE__, k, batch); - pstr -= headLen; - pstr[0] = '\0'; - k = 0; - } - - return k; -} - -static int32_t prepareStmtBindArrayByType( - TAOS_BIND *bind, - char data_type, int32_t dataLen, - int32_t timePrec, - char *value) -{ - int32_t *bind_int; - uint32_t *bind_uint; - int64_t *bind_bigint; - uint64_t *bind_ubigint; - float *bind_float; - double *bind_double; - int8_t *bind_bool; - int64_t *bind_ts2; - int16_t *bind_smallint; - uint16_t *bind_usmallint; - int8_t *bind_tinyint; - uint8_t *bind_utinyint; - - switch(data_type) { - case TSDB_DATA_TYPE_BINARY: - if (dataLen > TSDB_MAX_BINARY_LEN) { - errorPrint2("binary length overflow, max size:%u\n", - (uint32_t)TSDB_MAX_BINARY_LEN); - return -1; - } - char *bind_binary; - - bind->buffer_type = TSDB_DATA_TYPE_BINARY; - if (value) { - bind_binary = calloc(1, strlen(value) + 1); - strncpy(bind_binary, value, strlen(value)); - bind->buffer_length = strlen(bind_binary); - } else { - bind_binary = calloc(1, dataLen + 1); - rand_string(bind_binary, dataLen); - bind->buffer_length = dataLen; - } - - bind->length = &bind->buffer_length; - bind->buffer = bind_binary; - bind->is_null = NULL; - break; - - case TSDB_DATA_TYPE_NCHAR: - if (dataLen > TSDB_MAX_BINARY_LEN) { - errorPrint2("nchar length overflow, max size:%u\n", - (uint32_t)TSDB_MAX_BINARY_LEN); - return -1; - } - char *bind_nchar; - - bind->buffer_type = TSDB_DATA_TYPE_NCHAR; - if (value) { - bind_nchar = calloc(1, strlen(value) + 1); - strncpy(bind_nchar, value, strlen(value)); - } else { - bind_nchar = calloc(1, dataLen + 1); - rand_string(bind_nchar, dataLen); - } - - bind->buffer_length = strlen(bind_nchar); - bind->buffer = bind_nchar; - bind->length = &bind->buffer_length; - bind->is_null = NULL; - break; - - case TSDB_DATA_TYPE_INT: - bind_int = malloc(sizeof(int32_t)); - assert(bind_int); - - if (value) { - *bind_int = atoi(value); - } else { - *bind_int = rand_int(); - } - bind->buffer_type = TSDB_DATA_TYPE_INT; - bind->buffer_length = sizeof(int32_t); - bind->buffer = bind_int; - bind->length = &bind->buffer_length; - bind->is_null = NULL; - break; - - case TSDB_DATA_TYPE_UINT: - bind_uint = malloc(sizeof(uint32_t)); - assert(bind_uint); - - if (value) { - *bind_uint = atoi(value); - } else { - *bind_uint = rand_int(); - } - bind->buffer_type = TSDB_DATA_TYPE_UINT; - bind->buffer_length = sizeof(uint32_t); - bind->buffer = bind_uint; - bind->length = &bind->buffer_length; - bind->is_null = NULL; - break; - - case TSDB_DATA_TYPE_BIGINT: - bind_bigint = malloc(sizeof(int64_t)); - assert(bind_bigint); - - if (value) { - *bind_bigint = atoll(value); - } else { - *bind_bigint = rand_bigint(); - } - bind->buffer_type = TSDB_DATA_TYPE_BIGINT; - bind->buffer_length = sizeof(int64_t); - bind->buffer = bind_bigint; - bind->length = &bind->buffer_length; - bind->is_null = NULL; - break; - - case TSDB_DATA_TYPE_UBIGINT: - bind_ubigint = malloc(sizeof(uint64_t)); - assert(bind_ubigint); - - if (value) { - *bind_ubigint = atoll(value); - } else { - *bind_ubigint = rand_bigint(); - } - bind->buffer_type = TSDB_DATA_TYPE_UBIGINT; - bind->buffer_length = sizeof(uint64_t); - bind->buffer = bind_ubigint; - bind->length = &bind->buffer_length; - bind->is_null = NULL; - break; - - case TSDB_DATA_TYPE_FLOAT: - bind_float = malloc(sizeof(float)); - assert(bind_float); - - if (value) { - *bind_float = (float)atof(value); - } else { - *bind_float = rand_float(); - } - bind->buffer_type = TSDB_DATA_TYPE_FLOAT; - bind->buffer_length = sizeof(float); - bind->buffer = bind_float; - bind->length = &bind->buffer_length; - bind->is_null = NULL; - break; - - case TSDB_DATA_TYPE_DOUBLE: - bind_double = malloc(sizeof(double)); - assert(bind_double); - - if (value) { - *bind_double = atof(value); - } else { - *bind_double = rand_double(); - } - bind->buffer_type = TSDB_DATA_TYPE_DOUBLE; - bind->buffer_length = sizeof(double); - bind->buffer = bind_double; - bind->length = &bind->buffer_length; - bind->is_null = NULL; - break; - - case TSDB_DATA_TYPE_SMALLINT: - bind_smallint = malloc(sizeof(int16_t)); - assert(bind_smallint); - - if (value) { - *bind_smallint = (int16_t)atoi(value); - } else { - *bind_smallint = rand_smallint(); - } - bind->buffer_type = TSDB_DATA_TYPE_SMALLINT; - bind->buffer_length = sizeof(int16_t); - bind->buffer = bind_smallint; - bind->length = &bind->buffer_length; - bind->is_null = NULL; - break; - - case TSDB_DATA_TYPE_USMALLINT: - bind_usmallint = malloc(sizeof(uint16_t)); - assert(bind_usmallint); - - if (value) { - *bind_usmallint = (uint16_t)atoi(value); - } else { - *bind_usmallint = rand_smallint(); - } - bind->buffer_type = TSDB_DATA_TYPE_SMALLINT; - bind->buffer_length = sizeof(uint16_t); - bind->buffer = bind_usmallint; - bind->length = &bind->buffer_length; - bind->is_null = NULL; - break; - - case TSDB_DATA_TYPE_TINYINT: - bind_tinyint = malloc(sizeof(int8_t)); - assert(bind_tinyint); - - if (value) { - *bind_tinyint = (int8_t)atoi(value); - } else { - *bind_tinyint = rand_tinyint(); - } - bind->buffer_type = TSDB_DATA_TYPE_TINYINT; - bind->buffer_length = sizeof(int8_t); - bind->buffer = bind_tinyint; - bind->length = &bind->buffer_length; - bind->is_null = NULL; - break; - - case TSDB_DATA_TYPE_UTINYINT: - bind_utinyint = malloc(sizeof(uint8_t)); - assert(bind_utinyint); - - if (value) { - *bind_utinyint = (int8_t)atoi(value); - } else { - *bind_utinyint = rand_tinyint(); - } - bind->buffer_type = TSDB_DATA_TYPE_UTINYINT; - bind->buffer_length = sizeof(uint8_t); - bind->buffer = bind_utinyint; - bind->length = &bind->buffer_length; - bind->is_null = NULL; - break; - - case TSDB_DATA_TYPE_BOOL: - bind_bool = malloc(sizeof(int8_t)); - assert(bind_bool); - - if (value) { - if (strncasecmp(value, "true", 4)) { - *bind_bool = true; - } else { - *bind_bool = false; - } - } else { - *bind_bool = rand_bool(); - } - bind->buffer_type = TSDB_DATA_TYPE_BOOL; - bind->buffer_length = sizeof(int8_t); - bind->buffer = bind_bool; - bind->length = &bind->buffer_length; - bind->is_null = NULL; - break; - - case TSDB_DATA_TYPE_TIMESTAMP: - bind_ts2 = malloc(sizeof(int64_t)); - assert(bind_ts2); - - if (value) { - if (strchr(value, ':') && strchr(value, '-')) { - int i = 0; - while(value[i] != '\0') { - if (value[i] == '\"' || value[i] == '\'') { - value[i] = ' '; - } - i++; - } - int64_t tmpEpoch; - if (TSDB_CODE_SUCCESS != taosParseTime( - value, &tmpEpoch, strlen(value), - timePrec, 0)) { - free(bind_ts2); - errorPrint2("Input %s, time format error!\n", value); - return -1; - } - *bind_ts2 = tmpEpoch; - } else { - *bind_ts2 = atoll(value); - } - } else { - *bind_ts2 = rand_bigint(); - } - bind->buffer_type = TSDB_DATA_TYPE_TIMESTAMP; - bind->buffer_length = sizeof(int64_t); - bind->buffer = bind_ts2; - bind->length = &bind->buffer_length; - bind->is_null = NULL; - break; - - case TSDB_DATA_TYPE_NULL: - break; - - default: - errorPrint2("Not support data type: %d\n", data_type); - exit(EXIT_FAILURE); - } - - return 0; -} - -static int32_t prepareStmtBindArrayByTypeForRand( - TAOS_BIND *bind, - char data_type, int32_t dataLen, - int32_t timePrec, - char **ptr, - char *value) -{ - int32_t *bind_int; - uint32_t *bind_uint; - int64_t *bind_bigint; - uint64_t *bind_ubigint; - float *bind_float; - double *bind_double; - int16_t *bind_smallint; - uint16_t *bind_usmallint; - int8_t *bind_tinyint; - uint8_t *bind_utinyint; - int8_t *bind_bool; - int64_t *bind_ts2; - - switch(data_type) { - case TSDB_DATA_TYPE_BINARY: - - if (dataLen > TSDB_MAX_BINARY_LEN) { - errorPrint2("binary length overflow, max size:%u\n", - (uint32_t)TSDB_MAX_BINARY_LEN); - return -1; - } - char *bind_binary = (char *)*ptr; - - bind->buffer_type = TSDB_DATA_TYPE_BINARY; - if (value) { - strncpy(bind_binary, value, strlen(value)); - bind->buffer_length = strlen(bind_binary); - } else { - rand_string(bind_binary, dataLen); - bind->buffer_length = dataLen; - } - - bind->length = &bind->buffer_length; - bind->buffer = bind_binary; - bind->is_null = NULL; - - *ptr += bind->buffer_length; - break; - - case TSDB_DATA_TYPE_NCHAR: - if (dataLen > TSDB_MAX_BINARY_LEN) { - errorPrint2("nchar length overflow, max size: %u\n", - (uint32_t)TSDB_MAX_BINARY_LEN); - return -1; - } - char *bind_nchar = (char *)*ptr; - - bind->buffer_type = TSDB_DATA_TYPE_NCHAR; - if (value) { - strncpy(bind_nchar, value, strlen(value)); - } else { - rand_string(bind_nchar, dataLen); - } - - bind->buffer_length = strlen(bind_nchar); - bind->buffer = bind_nchar; - bind->length = &bind->buffer_length; - bind->is_null = NULL; - - *ptr += bind->buffer_length; - break; - - case TSDB_DATA_TYPE_INT: - bind_int = (int32_t *)*ptr; - - if (value) { - *bind_int = atoi(value); - } else { - *bind_int = rand_int(); - } - bind->buffer_type = TSDB_DATA_TYPE_INT; - bind->buffer_length = sizeof(int32_t); - bind->buffer = bind_int; - bind->length = &bind->buffer_length; - bind->is_null = NULL; - - *ptr += bind->buffer_length; - break; - - case TSDB_DATA_TYPE_UINT: - bind_uint = (uint32_t *)*ptr; - - if (value) { - *bind_uint = atoi(value); - } else { - *bind_uint = rand_int(); - } - bind->buffer_type = TSDB_DATA_TYPE_UINT; - bind->buffer_length = sizeof(uint32_t); - bind->buffer = bind_uint; - bind->length = &bind->buffer_length; - bind->is_null = NULL; - - *ptr += bind->buffer_length; - break; - - case TSDB_DATA_TYPE_BIGINT: - bind_bigint = (int64_t *)*ptr; - - if (value) { - *bind_bigint = atoll(value); - } else { - *bind_bigint = rand_bigint(); - } - bind->buffer_type = TSDB_DATA_TYPE_BIGINT; - bind->buffer_length = sizeof(int64_t); - bind->buffer = bind_bigint; - bind->length = &bind->buffer_length; - bind->is_null = NULL; - - *ptr += bind->buffer_length; - break; - - case TSDB_DATA_TYPE_UBIGINT: - bind_ubigint = (uint64_t *)*ptr; - - if (value) { - *bind_ubigint = atoll(value); - } else { - *bind_ubigint = rand_bigint(); - } - bind->buffer_type = TSDB_DATA_TYPE_UBIGINT; - bind->buffer_length = sizeof(uint64_t); - bind->buffer = bind_ubigint; - bind->length = &bind->buffer_length; - bind->is_null = NULL; - - *ptr += bind->buffer_length; - break; - - case TSDB_DATA_TYPE_FLOAT: - bind_float = (float *)*ptr; - - if (value) { - *bind_float = (float)atof(value); - } else { - *bind_float = rand_float(); - } - bind->buffer_type = TSDB_DATA_TYPE_FLOAT; - bind->buffer_length = sizeof(float); - bind->buffer = bind_float; - bind->length = &bind->buffer_length; - bind->is_null = NULL; - - *ptr += bind->buffer_length; - break; - - case TSDB_DATA_TYPE_DOUBLE: - bind_double = (double *)*ptr; - - if (value) { - *bind_double = atof(value); - } else { - *bind_double = rand_double(); - } - bind->buffer_type = TSDB_DATA_TYPE_DOUBLE; - bind->buffer_length = sizeof(double); - bind->buffer = bind_double; - bind->length = &bind->buffer_length; - bind->is_null = NULL; - - *ptr += bind->buffer_length; - break; - - case TSDB_DATA_TYPE_SMALLINT: - bind_smallint = (int16_t *)*ptr; - - if (value) { - *bind_smallint = (int16_t)atoi(value); - } else { - *bind_smallint = rand_smallint(); - } - bind->buffer_type = TSDB_DATA_TYPE_SMALLINT; - bind->buffer_length = sizeof(int16_t); - bind->buffer = bind_smallint; - bind->length = &bind->buffer_length; - bind->is_null = NULL; - - *ptr += bind->buffer_length; - break; - - case TSDB_DATA_TYPE_USMALLINT: - bind_usmallint = (uint16_t *)*ptr; - - if (value) { - *bind_usmallint = (uint16_t)atoi(value); - } else { - *bind_usmallint = rand_smallint(); - } - bind->buffer_type = TSDB_DATA_TYPE_USMALLINT; - bind->buffer_length = sizeof(uint16_t); - bind->buffer = bind_usmallint; - bind->length = &bind->buffer_length; - bind->is_null = NULL; - - *ptr += bind->buffer_length; - break; - - case TSDB_DATA_TYPE_TINYINT: - bind_tinyint = (int8_t *)*ptr; - - if (value) { - *bind_tinyint = (int8_t)atoi(value); - } else { - *bind_tinyint = rand_tinyint(); - } - bind->buffer_type = TSDB_DATA_TYPE_TINYINT; - bind->buffer_length = sizeof(int8_t); - bind->buffer = bind_tinyint; - bind->length = &bind->buffer_length; - bind->is_null = NULL; - - *ptr += bind->buffer_length; - break; - - case TSDB_DATA_TYPE_UTINYINT: - bind_utinyint = (uint8_t *)*ptr; - - if (value) { - *bind_utinyint = (uint8_t)atoi(value); - } else { - *bind_utinyint = rand_tinyint(); - } - bind->buffer_type = TSDB_DATA_TYPE_UTINYINT; - bind->buffer_length = sizeof(uint8_t); - bind->buffer = bind_utinyint; - bind->length = &bind->buffer_length; - bind->is_null = NULL; - - *ptr += bind->buffer_length; - break; - - case TSDB_DATA_TYPE_BOOL: - bind_bool = (int8_t *)*ptr; - - if (value) { - if (strncasecmp(value, "true", 4)) { - *bind_bool = true; - } else { - *bind_bool = false; - } - } else { - *bind_bool = rand_bool(); - } - bind->buffer_type = TSDB_DATA_TYPE_BOOL; - bind->buffer_length = sizeof(int8_t); - bind->buffer = bind_bool; - bind->length = &bind->buffer_length; - bind->is_null = NULL; - - *ptr += bind->buffer_length; - break; - - case TSDB_DATA_TYPE_TIMESTAMP: - bind_ts2 = (int64_t *)*ptr; - - if (value) { - if (strchr(value, ':') && strchr(value, '-')) { - int i = 0; - while(value[i] != '\0') { - if (value[i] == '\"' || value[i] == '\'') { - value[i] = ' '; - } - i++; - } - int64_t tmpEpoch; - if (TSDB_CODE_SUCCESS != taosParseTime( - value, &tmpEpoch, strlen(value), - timePrec, 0)) { - errorPrint2("Input %s, time format error!\n", value); - return -1; - } - *bind_ts2 = tmpEpoch; - } else { - *bind_ts2 = atoll(value); - } - } else { - *bind_ts2 = rand_bigint(); - } - bind->buffer_type = TSDB_DATA_TYPE_TIMESTAMP; - bind->buffer_length = sizeof(int64_t); - bind->buffer = bind_ts2; - bind->length = &bind->buffer_length; - bind->is_null = NULL; - - *ptr += bind->buffer_length; - break; - - default: - errorPrint2("No support data type: %d\n", data_type); - return -1; - } - - return 0; -} - -static int32_t prepareStmtWithoutStb( - threadInfo *pThreadInfo, - char *tableName, - uint32_t batch, - int64_t insertRows, - int64_t recordFrom, - int64_t startTime) -{ - TAOS_STMT *stmt = pThreadInfo->stmt; - int ret = taos_stmt_set_tbname(stmt, tableName); - if (ret != 0) { - errorPrint2("failed to execute taos_stmt_set_tbname(%s). return 0x%x. reason: %s\n", - tableName, ret, taos_stmt_errstr(stmt)); - return ret; - } - - char *data_type = g_args.data_type; - - char *bindArray = malloc(sizeof(TAOS_BIND) * (g_args.columnCount + 1)); - if (bindArray == NULL) { - errorPrint2("Failed to allocate %d bind params\n", - (g_args.columnCount + 1)); - return -1; - } - - int32_t k = 0; - for (k = 0; k < batch;) { - /* columnCount + 1 (ts) */ - - TAOS_BIND *bind = (TAOS_BIND *)(bindArray + 0); - - int64_t *bind_ts = pThreadInfo->bind_ts; - - bind->buffer_type = TSDB_DATA_TYPE_TIMESTAMP; - - if (g_args.disorderRatio) { - *bind_ts = startTime + getTSRandTail( - g_args.timestamp_step, k, - g_args.disorderRatio, - g_args.disorderRange); - } else { - *bind_ts = startTime + g_args.timestamp_step * k; - } - bind->buffer_length = sizeof(int64_t); - bind->buffer = bind_ts; - bind->length = &bind->buffer_length; - bind->is_null = NULL; - - for (int i = 0; i < g_args.columnCount; i ++) { - bind = (TAOS_BIND *)((char *)bindArray - + (sizeof(TAOS_BIND) * (i + 1))); - if ( -1 == prepareStmtBindArrayByType( - bind, - data_type[i], - g_args.binwidth, - pThreadInfo->time_precision, - NULL)) { - free(bindArray); - return -1; - } - } - if (0 != taos_stmt_bind_param(stmt, (TAOS_BIND *)bindArray)) { - errorPrint2("%s() LN%d, stmt_bind_param() failed! reason: %s\n", - __func__, __LINE__, taos_stmt_errstr(stmt)); - break; - } - // if msg > 3MB, break - if (0 != taos_stmt_add_batch(stmt)) { - errorPrint2("%s() LN%d, stmt_add_batch() failed! reason: %s\n", - __func__, __LINE__, taos_stmt_errstr(stmt)); - break; - } - - k++; - recordFrom ++; - if (recordFrom >= insertRows) { - break; - } - } - - free(bindArray); - return k; -} - -static int32_t prepareStbStmtBindTag( - char *bindArray, SSuperTable *stbInfo, - char *tagsVal, - int32_t timePrec) -{ - TAOS_BIND *tag; - - for (int t = 0; t < stbInfo->tagCount; t ++) { - tag = (TAOS_BIND *)((char *)bindArray + (sizeof(TAOS_BIND) * t)); - if ( -1 == prepareStmtBindArrayByType( - tag, - stbInfo->tags[t].data_type, - stbInfo->tags[t].dataLen, - timePrec, - NULL)) { - return -1; - } - } - - return 0; -} - -static int32_t prepareStbStmtBindRand( - int64_t *ts, - char *bindArray, SSuperTable *stbInfo, - int64_t startTime, int32_t recSeq, - int32_t timePrec) -{ - char data[MAX_DATA_SIZE]; - memset(data, 0, MAX_DATA_SIZE); - char *ptr = data; - - TAOS_BIND *bind; - - for (int i = 0; i < stbInfo->columnCount + 1; i ++) { - bind = (TAOS_BIND *)((char *)bindArray + (sizeof(TAOS_BIND) * i)); - - if (i == 0) { - int64_t *bind_ts = ts; - - bind->buffer_type = TSDB_DATA_TYPE_TIMESTAMP; - if (stbInfo->disorderRatio) { - *bind_ts = startTime + getTSRandTail( - stbInfo->timeStampStep, recSeq, - stbInfo->disorderRatio, - stbInfo->disorderRange); - } else { - *bind_ts = startTime + stbInfo->timeStampStep * recSeq; - } - bind->buffer_length = sizeof(int64_t); - bind->buffer = bind_ts; - bind->length = &bind->buffer_length; - bind->is_null = NULL; - - ptr += bind->buffer_length; - } else if ( -1 == prepareStmtBindArrayByTypeForRand( - bind, - stbInfo->columns[i-1].data_type, - stbInfo->columns[i-1].dataLen, - timePrec, - &ptr, - NULL)) { - return -1; - } - } - - return 0; -} - -UNUSED_FUNC static int32_t prepareStbStmtRand( - threadInfo *pThreadInfo, - char *tableName, - int64_t tableSeq, - uint32_t batch, - uint64_t insertRows, - uint64_t recordFrom, - int64_t startTime) -{ - int ret; - SSuperTable *stbInfo = pThreadInfo->stbInfo; - TAOS_STMT *stmt = pThreadInfo->stmt; - - if (AUTO_CREATE_SUBTBL == stbInfo->autoCreateTable) { - char* tagsValBuf = NULL; - - if (0 == stbInfo->tagSource) { - tagsValBuf = generateTagValuesForStb(stbInfo, tableSeq); - } else { - tagsValBuf = getTagValueFromTagSample( - stbInfo, - tableSeq % stbInfo->tagSampleCount); - } - - if (NULL == tagsValBuf) { - errorPrint2("%s() LN%d, tag buf failed to allocate memory\n", - __func__, __LINE__); - return -1; - } - - char *tagsArray = calloc(1, sizeof(TAOS_BIND) * stbInfo->tagCount); - if (NULL == tagsArray) { - tmfree(tagsValBuf); - errorPrint2("%s() LN%d, tag buf failed to allocate memory\n", - __func__, __LINE__); - return -1; - } - - if (-1 == prepareStbStmtBindTag( - tagsArray, stbInfo, tagsValBuf, pThreadInfo->time_precision - /* is tag */)) { - tmfree(tagsValBuf); - tmfree(tagsArray); - return -1; - } - - ret = taos_stmt_set_tbname_tags(stmt, tableName, (TAOS_BIND *)tagsArray); - - tmfree(tagsValBuf); - tmfree(tagsArray); - - if (0 != ret) { - errorPrint2("%s() LN%d, stmt_set_tbname_tags() failed! reason: %s\n", - __func__, __LINE__, taos_stmt_errstr(stmt)); - return -1; - } - } else { - ret = taos_stmt_set_tbname(stmt, tableName); - if (0 != ret) { - errorPrint2("%s() LN%d, stmt_set_tbname() failed! reason: %s\n", - __func__, __LINE__, taos_stmt_errstr(stmt)); - return -1; - } - } - - char *bindArray = calloc(1, sizeof(TAOS_BIND) * (stbInfo->columnCount + 1)); - if (bindArray == NULL) { - errorPrint2("%s() LN%d, Failed to allocate %d bind params\n", - __func__, __LINE__, (stbInfo->columnCount + 1)); - return -1; - } - - uint32_t k; - for (k = 0; k < batch;) { - /* columnCount + 1 (ts) */ - if (-1 == prepareStbStmtBindRand( - pThreadInfo->bind_ts, - bindArray, stbInfo, - startTime, k, - pThreadInfo->time_precision - /* is column */)) { - free(bindArray); - return -1; - } - ret = taos_stmt_bind_param(stmt, (TAOS_BIND *)bindArray); - if (0 != ret) { - errorPrint2("%s() LN%d, stmt_bind_param() failed! reason: %s\n", - __func__, __LINE__, taos_stmt_errstr(stmt)); - free(bindArray); - return -1; - } - // if msg > 3MB, break - ret = taos_stmt_add_batch(stmt); - if (0 != ret) { - errorPrint2("%s() LN%d, stmt_add_batch() failed! reason: %s\n", - __func__, __LINE__, taos_stmt_errstr(stmt)); - free(bindArray); - return -1; - } - - k++; - recordFrom ++; - - if (recordFrom >= insertRows) { - break; - } - } - - free(bindArray); - return k; -} - -#if STMT_BIND_PARAM_BATCH == 1 -static int execStbBindParamBatch( - threadInfo *pThreadInfo, - char *tableName, - int64_t tableSeq, - uint32_t batch, - uint64_t insertRows, - uint64_t recordFrom, - int64_t startTime, - int64_t *pSamplePos) -{ - int ret; - TAOS_STMT *stmt = pThreadInfo->stmt; - - SSuperTable *stbInfo = pThreadInfo->stbInfo; - assert(stbInfo); - - uint32_t columnCount = pThreadInfo->stbInfo->columnCount; - - uint32_t thisBatch = MAX_SAMPLES - (*pSamplePos); - - if (thisBatch > batch) { - thisBatch = batch; - } - verbosePrint("%s() LN%d, batch=%d pos=%"PRId64" thisBatch=%d\n", - __func__, __LINE__, batch, *pSamplePos, thisBatch); - - memset(pThreadInfo->bindParams, 0, - (sizeof(TAOS_MULTI_BIND) * (columnCount + 1))); - memset(pThreadInfo->is_null, 0, thisBatch); - - for (int c = 0; c < columnCount + 1; c ++) { - TAOS_MULTI_BIND *param = (TAOS_MULTI_BIND *)(pThreadInfo->bindParams + sizeof(TAOS_MULTI_BIND) * c); - - char data_type; - - if (c == 0) { - data_type = TSDB_DATA_TYPE_TIMESTAMP; - param->buffer_length = sizeof(int64_t); - param->buffer = pThreadInfo->bind_ts_array; - - } else { - data_type = stbInfo->columns[c-1].data_type; - - char *tmpP; - - switch(data_type) { - case TSDB_DATA_TYPE_BINARY: - param->buffer_length = - stbInfo->columns[c-1].dataLen; - - tmpP = - (char *)((uintptr_t)*(uintptr_t*)(stbInfo->sampleBindBatchArray - +sizeof(char*)*(c-1))); - - verbosePrint("%s() LN%d, tmpP=%p pos=%"PRId64" width=%"PRIxPTR" position=%"PRId64"\n", - __func__, __LINE__, tmpP, *pSamplePos, param->buffer_length, - (*pSamplePos) * param->buffer_length); - - param->buffer = (void *)(tmpP + *pSamplePos * param->buffer_length); - break; - - case TSDB_DATA_TYPE_NCHAR: - param->buffer_length = - stbInfo->columns[c-1].dataLen; - - tmpP = - (char *)((uintptr_t)*(uintptr_t*)(stbInfo->sampleBindBatchArray - +sizeof(char*)*(c-1))); - - verbosePrint("%s() LN%d, tmpP=%p pos=%"PRId64" width=%"PRIxPTR" position=%"PRId64"\n", - __func__, __LINE__, tmpP, *pSamplePos, param->buffer_length, - (*pSamplePos) * param->buffer_length); - - param->buffer = (void *)(tmpP + *pSamplePos * param->buffer_length); - break; - - case TSDB_DATA_TYPE_INT: - case TSDB_DATA_TYPE_UINT: - param->buffer_length = sizeof(int32_t); - param->buffer = - (void *)((uintptr_t)*(uintptr_t*)(stbInfo->sampleBindBatchArray+sizeof(char*)*(c-1)) - + stbInfo->columns[c-1].dataLen * (*pSamplePos)); - break; - - case TSDB_DATA_TYPE_TINYINT: - case TSDB_DATA_TYPE_UTINYINT: - param->buffer_length = sizeof(int8_t); - param->buffer = - (void *)((uintptr_t)*(uintptr_t*)( - stbInfo->sampleBindBatchArray - +sizeof(char*)*(c-1)) - + stbInfo->columns[c-1].dataLen*(*pSamplePos)); - break; - - case TSDB_DATA_TYPE_SMALLINT: - case TSDB_DATA_TYPE_USMALLINT: - param->buffer_length = sizeof(int16_t); - param->buffer = - (void *)((uintptr_t)*(uintptr_t*)(stbInfo->sampleBindBatchArray+sizeof(char*)*(c-1)) - + stbInfo->columns[c-1].dataLen * (*pSamplePos)); - break; - - case TSDB_DATA_TYPE_BIGINT: - case TSDB_DATA_TYPE_UBIGINT: - param->buffer_length = sizeof(int64_t); - param->buffer = - (void *)((uintptr_t)*(uintptr_t*)(stbInfo->sampleBindBatchArray+sizeof(char*)*(c-1)) - + stbInfo->columns[c-1].dataLen * (*pSamplePos)); - break; - - case TSDB_DATA_TYPE_BOOL: - param->buffer_length = sizeof(int8_t); - param->buffer = - (void *)((uintptr_t)*(uintptr_t*)(stbInfo->sampleBindBatchArray+sizeof(char*)*(c-1)) - + stbInfo->columns[c-1].dataLen * (*pSamplePos)); - break; - - case TSDB_DATA_TYPE_FLOAT: - param->buffer_length = sizeof(float); - param->buffer = - (void *)((uintptr_t)*(uintptr_t*)(stbInfo->sampleBindBatchArray+sizeof(char*)*(c-1)) - + stbInfo->columns[c-1].dataLen * (*pSamplePos)); - break; - - case TSDB_DATA_TYPE_DOUBLE: - param->buffer_length = sizeof(double); - param->buffer = - (void *)((uintptr_t)*(uintptr_t*)(stbInfo->sampleBindBatchArray+sizeof(char*)*(c-1)) - + stbInfo->columns[c-1].dataLen * (*pSamplePos)); - break; - - case TSDB_DATA_TYPE_TIMESTAMP: - param->buffer_length = sizeof(int64_t); - param->buffer = - (void *)((uintptr_t)*(uintptr_t*)(stbInfo->sampleBindBatchArray+sizeof(char*)*(c-1)) - + stbInfo->columns[c-1].dataLen * (*pSamplePos)); - break; - - default: - errorPrint("%s() LN%d, wrong data type: %d\n", - __func__, - __LINE__, - data_type); - exit(EXIT_FAILURE); - - } - } - - param->buffer_type = data_type; - param->length = malloc(sizeof(int32_t) * thisBatch); - assert(param->length); - - for (int b = 0; b < thisBatch; b++) { - if (param->buffer_type == TSDB_DATA_TYPE_NCHAR) { - param->length[b] = strlen( - (char *)param->buffer + b * - stbInfo->columns[c].dataLen - ); - } else { - param->length[b] = param->buffer_length; - } - } - param->is_null = pThreadInfo->is_null; - param->num = thisBatch; - } - - uint32_t k; - for (k = 0; k < thisBatch;) { - /* columnCount + 1 (ts) */ - if (stbInfo->disorderRatio) { - *(pThreadInfo->bind_ts_array + k) = startTime + getTSRandTail( - stbInfo->timeStampStep, k, - stbInfo->disorderRatio, - stbInfo->disorderRange); - } else { - *(pThreadInfo->bind_ts_array + k) = startTime + stbInfo->timeStampStep * k; - } - - debugPrint("%s() LN%d, k=%d ts=%"PRId64"\n", - __func__, __LINE__, - k, *(pThreadInfo->bind_ts_array +k)); - k++; - recordFrom ++; - - (*pSamplePos) ++; - if ((*pSamplePos) == MAX_SAMPLES) { - *pSamplePos = 0; - } - - if (recordFrom >= insertRows) { - break; - } - } - - ret = taos_stmt_bind_param_batch(stmt, (TAOS_MULTI_BIND *)pThreadInfo->bindParams); - if (0 != ret) { - errorPrint2("%s() LN%d, stmt_bind_param() failed! reason: %s\n", - __func__, __LINE__, taos_stmt_errstr(stmt)); - return -1; - } - - for (int c = 0; c < stbInfo->columnCount + 1; c ++) { - TAOS_MULTI_BIND *param = (TAOS_MULTI_BIND *)(pThreadInfo->bindParams + sizeof(TAOS_MULTI_BIND) * c); - free(param->length); - } - - // if msg > 3MB, break - ret = taos_stmt_add_batch(stmt); - if (0 != ret) { - errorPrint2("%s() LN%d, stmt_add_batch() failed! reason: %s\n", - __func__, __LINE__, taos_stmt_errstr(stmt)); - return -1; - } - return k; -} - -static int parseSamplefileToStmtBatch( - SSuperTable* stbInfo) -{ - // char *sampleDataBuf = (stbInfo)? - // stbInfo->sampleDataBuf:g_sampleDataBuf; - int32_t columnCount = (stbInfo)?stbInfo->columnCount:g_args.columnCount; - char *sampleBindBatchArray = NULL; - - if (stbInfo) { - stbInfo->sampleBindBatchArray = calloc(1, sizeof(uintptr_t *) * columnCount); - sampleBindBatchArray = stbInfo->sampleBindBatchArray; - } else { - g_sampleBindBatchArray = calloc(1, sizeof(uintptr_t *) * columnCount); - sampleBindBatchArray = g_sampleBindBatchArray; - } - assert(sampleBindBatchArray); - - for (int c = 0; c < columnCount; c++) { - char data_type = (stbInfo)?stbInfo->columns[c].data_type:g_args.data_type[c]; - - char *tmpP = NULL; - - switch(data_type) { - case TSDB_DATA_TYPE_INT: - case TSDB_DATA_TYPE_UINT: - tmpP = calloc(1, sizeof(int) * MAX_SAMPLES); - assert(tmpP); - *(uintptr_t*)(sampleBindBatchArray+ sizeof(uintptr_t*)*c) = (uintptr_t)tmpP; - break; - - case TSDB_DATA_TYPE_TINYINT: - case TSDB_DATA_TYPE_UTINYINT: - tmpP = calloc(1, sizeof(int8_t) * MAX_SAMPLES); - assert(tmpP); - *(uintptr_t*)(sampleBindBatchArray+ sizeof(uintptr_t*)*c) = (uintptr_t)tmpP; - break; - - case TSDB_DATA_TYPE_SMALLINT: - case TSDB_DATA_TYPE_USMALLINT: - tmpP = calloc(1, sizeof(int16_t) * MAX_SAMPLES); - assert(tmpP); - *(uintptr_t*)(sampleBindBatchArray+ sizeof(uintptr_t*)*c) = (uintptr_t)tmpP; - break; - - case TSDB_DATA_TYPE_BIGINT: - case TSDB_DATA_TYPE_UBIGINT: - tmpP = calloc(1, sizeof(int64_t) * MAX_SAMPLES); - assert(tmpP); - *(uintptr_t*)(sampleBindBatchArray+ sizeof(uintptr_t*)*c) = (uintptr_t)tmpP; - break; - - case TSDB_DATA_TYPE_BOOL: - tmpP = calloc(1, sizeof(int8_t) * MAX_SAMPLES); - assert(tmpP); - *(uintptr_t*)(sampleBindBatchArray+ sizeof(uintptr_t*)*c) = (uintptr_t)tmpP; - break; - - case TSDB_DATA_TYPE_FLOAT: - tmpP = calloc(1, sizeof(float) * MAX_SAMPLES); - assert(tmpP); - *(uintptr_t*)(sampleBindBatchArray+ sizeof(uintptr_t*)*c) = (uintptr_t)tmpP; - break; - - case TSDB_DATA_TYPE_DOUBLE: - tmpP = calloc(1, sizeof(double) * MAX_SAMPLES); - assert(tmpP); - *(uintptr_t*)(sampleBindBatchArray+ sizeof(uintptr_t*)*c) = (uintptr_t)tmpP; - break; - - case TSDB_DATA_TYPE_BINARY: - case TSDB_DATA_TYPE_NCHAR: - tmpP = calloc(1, MAX_SAMPLES * - (((stbInfo)?stbInfo->columns[c].dataLen:g_args.binwidth))); - assert(tmpP); - *(uintptr_t*)(sampleBindBatchArray+ sizeof(uintptr_t*)*c) = (uintptr_t)tmpP; - break; - - case TSDB_DATA_TYPE_TIMESTAMP: - tmpP = calloc(1, sizeof(int64_t) * MAX_SAMPLES); - assert(tmpP); - *(uintptr_t*)(sampleBindBatchArray+ sizeof(uintptr_t*)*c) = (uintptr_t)tmpP; - break; - - default: - errorPrint("Unknown data type: %s\n", - (stbInfo)?stbInfo->columns[c].dataType:g_args.dataType[c]); - exit(EXIT_FAILURE); - } - } - - char *sampleDataBuf = (stbInfo)?stbInfo->sampleDataBuf:g_sampleDataBuf; - int64_t lenOfOneRow = (stbInfo)?stbInfo->lenOfOneRow:g_args.lenOfOneRow; - - for (int i=0; i < MAX_SAMPLES; i++) { - int cursor = 0; - - for (int c = 0; c < columnCount; c++) { - char data_type = (stbInfo)? - stbInfo->columns[c].data_type: - g_args.data_type[c]; - char *restStr = sampleDataBuf - + lenOfOneRow * i + cursor; - int lengthOfRest = strlen(restStr); - - int index = 0; - for (index = 0; index < lengthOfRest; index ++) { - if (restStr[index] == ',') { - break; - } - } - - char *tmpStr = calloc(1, index + 1); - if (NULL == tmpStr) { - errorPrint2("%s() LN%d, Failed to allocate %d bind buffer\n", - __func__, __LINE__, index + 1); - return -1; - } - - strncpy(tmpStr, restStr, index); - cursor += index + 1; // skip ',' too - char *tmpP; - - switch(data_type) { - case TSDB_DATA_TYPE_INT: - case TSDB_DATA_TYPE_UINT: - *((int32_t*)((uintptr_t)*(uintptr_t*)(sampleBindBatchArray - +sizeof(char*)*c)+sizeof(int32_t)*i)) = - atoi(tmpStr); - break; - - case TSDB_DATA_TYPE_FLOAT: - *(float*)(((uintptr_t)*(uintptr_t*)(sampleBindBatchArray - +sizeof(char*)*c)+sizeof(float)*i)) = - (float)atof(tmpStr); - break; - - case TSDB_DATA_TYPE_DOUBLE: - *(double*)(((uintptr_t)*(uintptr_t*)(sampleBindBatchArray - +sizeof(char*)*c)+sizeof(double)*i)) = - atof(tmpStr); - break; - - case TSDB_DATA_TYPE_TINYINT: - case TSDB_DATA_TYPE_UTINYINT: - *((int8_t*)((uintptr_t)*(uintptr_t*)(sampleBindBatchArray - +sizeof(char*)*c)+sizeof(int8_t)*i)) = - (int8_t)atoi(tmpStr); - break; - - case TSDB_DATA_TYPE_SMALLINT: - case TSDB_DATA_TYPE_USMALLINT: - *((int16_t*)((uintptr_t)*(uintptr_t*)(sampleBindBatchArray - +sizeof(char*)*c)+sizeof(int16_t)*i)) = - (int16_t)atoi(tmpStr); - break; - - case TSDB_DATA_TYPE_BIGINT: - case TSDB_DATA_TYPE_UBIGINT: - *((int64_t*)((uintptr_t)*(uintptr_t*)(sampleBindBatchArray - +sizeof(char*)*c)+sizeof(int64_t)*i)) = - (int64_t)atol(tmpStr); - break; - - case TSDB_DATA_TYPE_BOOL: - *((int8_t*)((uintptr_t)*(uintptr_t*)(sampleBindBatchArray - +sizeof(char*)*c)+sizeof(int8_t)*i)) = - (int8_t)atoi(tmpStr); - break; - - case TSDB_DATA_TYPE_TIMESTAMP: - *((int64_t*)((uintptr_t)*(uintptr_t*)(sampleBindBatchArray - +sizeof(char*)*c)+sizeof(int64_t)*i)) = - (int64_t)atol(tmpStr); - break; - - case TSDB_DATA_TYPE_BINARY: - case TSDB_DATA_TYPE_NCHAR: - tmpP = (char *)(*(uintptr_t*)(sampleBindBatchArray - +sizeof(char*)*c)); - strcpy(tmpP + i* - (((stbInfo)?stbInfo->columns[c].dataLen:g_args.binwidth)) - , tmpStr); - break; - - default: - break; - } - - free(tmpStr); - } - } - - return 0; -} - -static int parseSampleToStmtBatchForThread( - threadInfo *pThreadInfo, SSuperTable *stbInfo, - uint32_t timePrec, - uint32_t batch) -{ - uint32_t columnCount = (stbInfo)?stbInfo->columnCount:g_args.columnCount; - - pThreadInfo->bind_ts_array = malloc(sizeof(int64_t) * batch); - assert(pThreadInfo->bind_ts_array); - - pThreadInfo->bindParams = malloc(sizeof(TAOS_MULTI_BIND) * (columnCount + 1)); - assert(pThreadInfo->bindParams); - - pThreadInfo->is_null = malloc(batch); - assert(pThreadInfo->is_null); - - return 0; -} - -static int parseStbSampleToStmtBatchForThread( - threadInfo *pThreadInfo, - SSuperTable *stbInfo, - uint32_t timePrec, - uint32_t batch) -{ - return parseSampleToStmtBatchForThread( - pThreadInfo, stbInfo, timePrec, batch); -} - -static int parseNtbSampleToStmtBatchForThread( - threadInfo *pThreadInfo, uint32_t timePrec, uint32_t batch) -{ - return parseSampleToStmtBatchForThread( - pThreadInfo, NULL, timePrec, batch); -} - -#else -static int parseSampleToStmt( - threadInfo *pThreadInfo, - SSuperTable *stbInfo, uint32_t timePrec) -{ - pThreadInfo->sampleBindArray = - (char *)calloc(1, sizeof(char *) * MAX_SAMPLES); - if (pThreadInfo->sampleBindArray == NULL) { - errorPrint2("%s() LN%d, Failed to allocate %"PRIu64" bind array buffer\n", - __func__, __LINE__, - (uint64_t)sizeof(char *) * MAX_SAMPLES); - return -1; - } - - int32_t columnCount = (stbInfo)?stbInfo->columnCount:g_args.columnCount; - char *sampleDataBuf = (stbInfo)?stbInfo->sampleDataBuf:g_sampleDataBuf; - int64_t lenOfOneRow = (stbInfo)?stbInfo->lenOfOneRow:g_args.lenOfOneRow; - - for (int i=0; i < MAX_SAMPLES; i++) { - char *bindArray = - calloc(1, sizeof(TAOS_BIND) * (columnCount + 1)); - if (bindArray == NULL) { - errorPrint2("%s() LN%d, Failed to allocate %d bind params\n", - __func__, __LINE__, (columnCount + 1)); - return -1; - } - - TAOS_BIND *bind; - int cursor = 0; - - for (int c = 0; c < columnCount + 1; c++) { - bind = (TAOS_BIND *)((char *)bindArray + (sizeof(TAOS_BIND) * c)); - - if (c == 0) { - bind->buffer_type = TSDB_DATA_TYPE_TIMESTAMP; - bind->buffer_length = sizeof(int64_t); - bind->buffer = NULL; //bind_ts; - bind->length = &bind->buffer_length; - bind->is_null = NULL; - } else { - char data_type = (stbInfo)? - stbInfo->columns[c-1].data_type: - g_args.data_type[c-1]; - int32_t dataLen = (stbInfo)? - stbInfo->columns[c-1].dataLen: - g_args.binwidth; - char *restStr = sampleDataBuf - + lenOfOneRow * i + cursor; - int lengthOfRest = strlen(restStr); - - int index = 0; - for (index = 0; index < lengthOfRest; index ++) { - if (restStr[index] == ',') { - break; - } - } - - char *bindBuffer = calloc(1, index + 1); - if (bindBuffer == NULL) { - errorPrint2("%s() LN%d, Failed to allocate %d bind buffer\n", - __func__, __LINE__, index + 1); - return -1; - } - - strncpy(bindBuffer, restStr, index); - cursor += index + 1; // skip ',' too - - if (-1 == prepareStmtBindArrayByType( - bind, - data_type, - dataLen, - timePrec, - bindBuffer)) { - free(bindBuffer); - free(bindArray); - return -1; - } - free(bindBuffer); - } - } - *((uintptr_t *)(pThreadInfo->sampleBindArray + (sizeof(char *)) * i)) = - (uintptr_t)bindArray; - } - - return 0; -} - -static int parseStbSampleToStmt( - threadInfo *pThreadInfo, - SSuperTable *stbInfo, uint32_t timePrec) -{ - return parseSampleToStmt( - pThreadInfo, - stbInfo, timePrec); -} - -static int parseNtbSampleToStmt( - threadInfo *pThreadInfo, - uint32_t timePrec) -{ - return parseSampleToStmt( - pThreadInfo, - NULL, - timePrec); -} - -static int32_t prepareStbStmtBindStartTime( - char *tableName, - int64_t *ts, - char *bindArray, SSuperTable *stbInfo, - int64_t startTime, int32_t recSeq) -{ - TAOS_BIND *bind; - - bind = (TAOS_BIND *)bindArray; - - int64_t *bind_ts = ts; - - bind->buffer_type = TSDB_DATA_TYPE_TIMESTAMP; - if (stbInfo->disorderRatio) { - *bind_ts = startTime + getTSRandTail( - stbInfo->timeStampStep, recSeq, - stbInfo->disorderRatio, - stbInfo->disorderRange); - } else { - *bind_ts = startTime + stbInfo->timeStampStep * recSeq; - } - - verbosePrint("%s() LN%d, tableName: %s, bind_ts=%"PRId64"\n", - __func__, __LINE__, tableName, *bind_ts); - - bind->buffer_length = sizeof(int64_t); - bind->buffer = bind_ts; - bind->length = &bind->buffer_length; - bind->is_null = NULL; - - return 0; -} - -static uint32_t execBindParam( - threadInfo *pThreadInfo, - char *tableName, - int64_t tableSeq, - uint32_t batch, - uint64_t insertRows, - uint64_t recordFrom, - int64_t startTime, - int64_t *pSamplePos) -{ - int ret; - SSuperTable *stbInfo = pThreadInfo->stbInfo; - TAOS_STMT *stmt = pThreadInfo->stmt; - - uint32_t k; - for (k = 0; k < batch;) { - char *bindArray = (char *)(*((uintptr_t *) - (pThreadInfo->sampleBindArray + (sizeof(char *)) * (*pSamplePos)))); - /* columnCount + 1 (ts) */ - if (-1 == prepareStbStmtBindStartTime( - tableName, - pThreadInfo->bind_ts, - bindArray, stbInfo, - startTime, k - /* is column */)) { - return -1; - } - ret = taos_stmt_bind_param(stmt, (TAOS_BIND *)bindArray); - if (0 != ret) { - errorPrint2("%s() LN%d, stmt_bind_param() failed! reason: %s\n", - __func__, __LINE__, taos_stmt_errstr(stmt)); - return -1; - } - // if msg > 3MB, break - ret = taos_stmt_add_batch(stmt); - if (0 != ret) { - errorPrint2("%s() LN%d, stmt_add_batch() failed! reason: %s\n", - __func__, __LINE__, taos_stmt_errstr(stmt)); - return -1; - } - - k++; - recordFrom ++; - - (*pSamplePos) ++; - if ((*pSamplePos) == MAX_SAMPLES) { - *pSamplePos = 0; - } - - if (recordFrom >= insertRows) { - break; - } - } - - return k; -} -#endif - -static int32_t prepareStbStmt( - threadInfo *pThreadInfo, - char *tableName, - int64_t tableSeq, - uint32_t batch, - uint64_t insertRows, - uint64_t recordFrom, - int64_t startTime, - int64_t *pSamplePos) -{ - int ret; - SSuperTable *stbInfo = pThreadInfo->stbInfo; - TAOS_STMT *stmt = pThreadInfo->stmt; - - if (AUTO_CREATE_SUBTBL == stbInfo->autoCreateTable) { - char* tagsValBuf = NULL; - - if (0 == stbInfo->tagSource) { - tagsValBuf = generateTagValuesForStb(stbInfo, tableSeq); - } else { - tagsValBuf = getTagValueFromTagSample( - stbInfo, - tableSeq % stbInfo->tagSampleCount); - } - - if (NULL == tagsValBuf) { - errorPrint2("%s() LN%d, tag buf failed to allocate memory\n", - __func__, __LINE__); - return -1; - } - - char *tagsArray = calloc(1, sizeof(TAOS_BIND) * stbInfo->tagCount); - if (NULL == tagsArray) { - tmfree(tagsValBuf); - errorPrint2("%s() LN%d, tag buf failed to allocate memory\n", - __func__, __LINE__); - return -1; - } - - if (-1 == prepareStbStmtBindTag( - tagsArray, stbInfo, tagsValBuf, pThreadInfo->time_precision - /* is tag */)) { - tmfree(tagsValBuf); - tmfree(tagsArray); - return -1; - } - - ret = taos_stmt_set_tbname_tags(stmt, tableName, (TAOS_BIND *)tagsArray); - - tmfree(tagsValBuf); - tmfree(tagsArray); - - if (0 != ret) { - errorPrint2("%s() LN%d, stmt_set_tbname_tags() failed! reason: %s\n", - __func__, __LINE__, taos_stmt_errstr(stmt)); - return -1; - } - } else { - ret = taos_stmt_set_tbname(stmt, tableName); - if (0 != ret) { - errorPrint2("%s() LN%d, stmt_set_tbname() failed! reason: %s\n", - __func__, __LINE__, taos_stmt_errstr(stmt)); - return -1; - } - } - -#if STMT_BIND_PARAM_BATCH == 1 - return execStbBindParamBatch( - pThreadInfo, - tableName, - tableSeq, - batch, - insertRows, - recordFrom, - startTime, - pSamplePos); -#else - return execBindParam( - pThreadInfo, - tableName, - tableSeq, - batch, - insertRows, - recordFrom, - startTime, - pSamplePos); -#endif -} - -static int32_t generateStbProgressiveData( - SSuperTable *stbInfo, - char *tableName, - int64_t tableSeq, - char *dbName, char *buffer, - int64_t insertRows, - uint64_t recordFrom, int64_t startTime, int64_t *pSamplePos, - int64_t *pRemainderBufLen) -{ - assert(buffer != NULL); - char *pstr = buffer; - - memset(pstr, 0, *pRemainderBufLen); - - int64_t headLen = generateStbSQLHead( - stbInfo, - tableName, tableSeq, dbName, - buffer, *pRemainderBufLen); - - if (headLen <= 0) { - return 0; - } - pstr += headLen; - *pRemainderBufLen -= headLen; - - int64_t dataLen; - - return generateStbDataTail(stbInfo, - g_args.reqPerReq, pstr, *pRemainderBufLen, - insertRows, recordFrom, - startTime, - pSamplePos, &dataLen); -} - -static int32_t generateProgressiveDataWithoutStb( - char *tableName, - /* int64_t tableSeq, */ - threadInfo *pThreadInfo, char *buffer, - int64_t insertRows, - uint64_t recordFrom, int64_t startTime, /*int64_t *pSamplePos, */ - int64_t *pRemainderBufLen) -{ - assert(buffer != NULL); - char *pstr = buffer; - - memset(buffer, 0, *pRemainderBufLen); - - int64_t headLen = generateSQLHeadWithoutStb( - tableName, pThreadInfo->db_name, - buffer, *pRemainderBufLen); - - if (headLen <= 0) { - return 0; - } - pstr += headLen; - *pRemainderBufLen -= headLen; - - int64_t dataLen; - - return generateDataTailWithoutStb( - g_args.reqPerReq, pstr, *pRemainderBufLen, insertRows, recordFrom, - startTime, - /*pSamplePos, */&dataLen); -} - -static void printStatPerThread(threadInfo *pThreadInfo) -{ - if (0 == pThreadInfo->totalDelay) - pThreadInfo->totalDelay = 1; - - fprintf(stderr, "====thread[%d] completed total inserted rows: %"PRIu64 ", total affected rows: %"PRIu64". %.2f records/second====\n", - pThreadInfo->threadID, - pThreadInfo->totalInsertRows, - pThreadInfo->totalAffectedRows, - (double)(pThreadInfo->totalAffectedRows/((double)pThreadInfo->totalDelay/1000000.0)) - ); -} - -#if STMT_BIND_PARAM_BATCH == 1 -// stmt sync write interlace data -static void* syncWriteInterlaceStmtBatch(threadInfo *pThreadInfo, uint32_t interlaceRows) { - debugPrint("[%d] %s() LN%d: ### stmt interlace write\n", - pThreadInfo->threadID, __func__, __LINE__); - - int64_t insertRows; - int64_t timeStampStep; - uint64_t insert_interval; - - SSuperTable* stbInfo = pThreadInfo->stbInfo; - - if (stbInfo) { - insertRows = stbInfo->insertRows; - timeStampStep = stbInfo->timeStampStep; - insert_interval = stbInfo->insertInterval; - } else { - insertRows = g_args.insertRows; - timeStampStep = g_args.timestamp_step; - insert_interval = g_args.insert_interval; - } - - debugPrint("[%d] %s() LN%d: start_table_from=%"PRIu64" ntables=%"PRId64" insertRows=%"PRIu64"\n", - pThreadInfo->threadID, __func__, __LINE__, - pThreadInfo->start_table_from, - pThreadInfo->ntables, insertRows); - - uint64_t timesInterlace = (insertRows / interlaceRows) + 1; - uint32_t precalcBatch = interlaceRows; - - if (precalcBatch > g_args.reqPerReq) - precalcBatch = g_args.reqPerReq; - - if (precalcBatch > MAX_SAMPLES) - precalcBatch = MAX_SAMPLES; - - pThreadInfo->totalInsertRows = 0; - pThreadInfo->totalAffectedRows = 0; - - uint64_t st = 0; - uint64_t et = UINT64_MAX; - - uint64_t lastPrintTime = taosGetTimestampMs(); - uint64_t startTs = taosGetTimestampMs(); - uint64_t endTs; - - uint64_t tableSeq = pThreadInfo->start_table_from; - int64_t startTime; - - bool flagSleep = true; - uint64_t sleepTimeTotal = 0; - - int percentComplete = 0; - int64_t totalRows = insertRows * pThreadInfo->ntables; - pThreadInfo->samplePos = 0; - - for (int64_t interlace = 0; - interlace < timesInterlace; interlace ++) { - if ((flagSleep) && (insert_interval)) { - st = taosGetTimestampMs(); - flagSleep = false; - } - - int64_t generated = 0; - int64_t samplePos; - - for (; tableSeq < pThreadInfo->start_table_from + pThreadInfo->ntables; tableSeq ++) { - char tableName[TSDB_TABLE_NAME_LEN]; - getTableName(tableName, pThreadInfo, tableSeq); - if (0 == strlen(tableName)) { - errorPrint2("[%d] %s() LN%d, getTableName return null\n", - pThreadInfo->threadID, __func__, __LINE__); - return NULL; - } - - samplePos = pThreadInfo->samplePos; - startTime = pThreadInfo->start_time - + interlace * interlaceRows * timeStampStep; - uint64_t remainRecPerTbl = - insertRows - interlaceRows * interlace; - uint64_t recPerTbl = 0; - - uint64_t remainPerInterlace; - if (remainRecPerTbl > interlaceRows) { - remainPerInterlace = interlaceRows; - } else { - remainPerInterlace = remainRecPerTbl; - } - - while(remainPerInterlace > 0) { - - uint32_t batch; - if (remainPerInterlace > precalcBatch) { - batch = precalcBatch; - } else { - batch = remainPerInterlace; - } - debugPrint("[%d] %s() LN%d, tableName:%s, batch:%d startTime:%"PRId64"\n", - pThreadInfo->threadID, - __func__, __LINE__, - tableName, batch, startTime); - - if (stbInfo) { - generated = prepareStbStmt( - pThreadInfo, - tableName, - tableSeq, - batch, - insertRows, 0, - startTime, - &samplePos); - } else { - generated = prepareStmtWithoutStb( - pThreadInfo, - tableName, - batch, - insertRows, - interlaceRows * interlace + recPerTbl, - startTime); - } - - debugPrint("[%d] %s() LN%d, generated records is %"PRId64"\n", - pThreadInfo->threadID, __func__, __LINE__, generated); - if (generated < 0) { - errorPrint2("[%d] %s() LN%d, generated records is %"PRId64"\n", - pThreadInfo->threadID, __func__, __LINE__, generated); - goto free_of_interlace_stmt; - } else if (generated == 0) { - break; - } - - recPerTbl += generated; - remainPerInterlace -= generated; - pThreadInfo->totalInsertRows += generated; - - verbosePrint("[%d] %s() LN%d totalInsertRows=%"PRIu64"\n", - pThreadInfo->threadID, __func__, __LINE__, - pThreadInfo->totalInsertRows); - - startTs = taosGetTimestampUs(); - - int64_t affectedRows = execInsert(pThreadInfo, generated); - - endTs = taosGetTimestampUs(); - uint64_t delay = endTs - startTs; - performancePrint("%s() LN%d, insert execution time is %10.2f ms\n", - __func__, __LINE__, delay / 1000.0); - verbosePrint("[%d] %s() LN%d affectedRows=%"PRId64"\n", - pThreadInfo->threadID, - __func__, __LINE__, affectedRows); - - if (delay > pThreadInfo->maxDelay) pThreadInfo->maxDelay = delay; - if (delay < pThreadInfo->minDelay) pThreadInfo->minDelay = delay; - pThreadInfo->cntDelay++; - pThreadInfo->totalDelay += delay; - - if (generated != affectedRows) { - errorPrint2("[%d] %s() LN%d execInsert() insert %"PRId64", affected rows: %"PRId64"\n\n", - pThreadInfo->threadID, __func__, __LINE__, - generated, affectedRows); - goto free_of_interlace_stmt; - } - - pThreadInfo->totalAffectedRows += affectedRows; - - int currentPercent = pThreadInfo->totalAffectedRows * 100 / totalRows; - if (currentPercent > percentComplete ) { - printf("[%d]:%d%%\n", pThreadInfo->threadID, currentPercent); - percentComplete = currentPercent; - } - int64_t currentPrintTime = taosGetTimestampMs(); - if (currentPrintTime - lastPrintTime > 30*1000) { - printf("thread[%d] has currently inserted rows: %"PRIu64 ", affected rows: %"PRIu64 "\n", - pThreadInfo->threadID, - pThreadInfo->totalInsertRows, - pThreadInfo->totalAffectedRows); - lastPrintTime = currentPrintTime; - } - - startTime += (generated * timeStampStep); - } - } - pThreadInfo->samplePos = samplePos; - - if (tableSeq == pThreadInfo->start_table_from - + pThreadInfo->ntables) { - // turn to first table - tableSeq = pThreadInfo->start_table_from; - - flagSleep = true; - } - - if ((insert_interval) && flagSleep) { - et = taosGetTimestampMs(); - - if (insert_interval > (et - st) ) { - uint64_t sleepTime = insert_interval - (et -st); - performancePrint("%s() LN%d sleep: %"PRId64" ms for insert interval\n", - __func__, __LINE__, sleepTime); - taosMsleep(sleepTime); // ms - sleepTimeTotal += insert_interval; - } - } - } - if (percentComplete < 100) - printf("[%d]:%d%%\n", pThreadInfo->threadID, percentComplete); - -free_of_interlace_stmt: - printStatPerThread(pThreadInfo); - return NULL; -} -#else -// stmt sync write interlace data -static void* syncWriteInterlaceStmt(threadInfo *pThreadInfo, uint32_t interlaceRows) { - debugPrint("[%d] %s() LN%d: ### stmt interlace write\n", - pThreadInfo->threadID, __func__, __LINE__); - - int64_t insertRows; - uint64_t maxSqlLen; - int64_t timeStampStep; - uint64_t insert_interval; - - SSuperTable* stbInfo = pThreadInfo->stbInfo; - - if (stbInfo) { - insertRows = stbInfo->insertRows; - maxSqlLen = stbInfo->maxSqlLen; - timeStampStep = stbInfo->timeStampStep; - insert_interval = stbInfo->insertInterval; - } else { - insertRows = g_args.insertRows; - maxSqlLen = g_args.max_sql_len; - timeStampStep = g_args.timestamp_step; - insert_interval = g_args.insert_interval; - } - - debugPrint("[%d] %s() LN%d: start_table_from=%"PRIu64" ntables=%"PRId64" insertRows=%"PRIu64"\n", - pThreadInfo->threadID, __func__, __LINE__, - pThreadInfo->start_table_from, - pThreadInfo->ntables, insertRows); - - uint32_t batchPerTbl = interlaceRows; - uint32_t batchPerTblTimes; - - if (interlaceRows > g_args.reqPerReq) - interlaceRows = g_args.reqPerReq; - - if ((interlaceRows > 0) && (pThreadInfo->ntables > 1)) { - batchPerTblTimes = - g_args.reqPerReq / interlaceRows; - } else { - batchPerTblTimes = 1; - } - - pThreadInfo->totalInsertRows = 0; - pThreadInfo->totalAffectedRows = 0; - - uint64_t st = 0; - uint64_t et = UINT64_MAX; - - uint64_t lastPrintTime = taosGetTimestampMs(); - uint64_t startTs = taosGetTimestampMs(); - uint64_t endTs; - - uint64_t tableSeq = pThreadInfo->start_table_from; - int64_t startTime = pThreadInfo->start_time; - - uint64_t generatedRecPerTbl = 0; - bool flagSleep = true; - uint64_t sleepTimeTotal = 0; - - int percentComplete = 0; - int64_t totalRows = insertRows * pThreadInfo->ntables; - - while(pThreadInfo->totalInsertRows < pThreadInfo->ntables * insertRows) { - if ((flagSleep) && (insert_interval)) { - st = taosGetTimestampMs(); - flagSleep = false; - } - - uint32_t recOfBatch = 0; - - int32_t generated; - for (uint64_t i = 0; i < batchPerTblTimes; i ++) { - char tableName[TSDB_TABLE_NAME_LEN]; - - getTableName(tableName, pThreadInfo, tableSeq); - if (0 == strlen(tableName)) { - errorPrint2("[%d] %s() LN%d, getTableName return null\n", - pThreadInfo->threadID, __func__, __LINE__); - return NULL; - } - - debugPrint("[%d] %s() LN%d, tableName:%s, batch:%d startTime:%"PRId64"\n", - pThreadInfo->threadID, - __func__, __LINE__, - tableName, batchPerTbl, startTime); - if (stbInfo) { - generated = prepareStbStmt( - pThreadInfo, - tableName, - tableSeq, - batchPerTbl, - insertRows, 0, - startTime, - &(pThreadInfo->samplePos)); - } else { - generated = prepareStmtWithoutStb( - pThreadInfo, - tableName, - batchPerTbl, - insertRows, i, - startTime); - } - - debugPrint("[%d] %s() LN%d, generated records is %d\n", - pThreadInfo->threadID, __func__, __LINE__, generated); - if (generated < 0) { - errorPrint2("[%d] %s() LN%d, generated records is %d\n", - pThreadInfo->threadID, __func__, __LINE__, generated); - goto free_of_interlace_stmt; - } else if (generated == 0) { - break; - } - - tableSeq ++; - recOfBatch += batchPerTbl; - - pThreadInfo->totalInsertRows += batchPerTbl; - - verbosePrint("[%d] %s() LN%d batchPerTbl=%d recOfBatch=%d\n", - pThreadInfo->threadID, __func__, __LINE__, - batchPerTbl, recOfBatch); - - if (tableSeq == pThreadInfo->start_table_from + pThreadInfo->ntables) { - // turn to first table - tableSeq = pThreadInfo->start_table_from; - generatedRecPerTbl += batchPerTbl; - - startTime = pThreadInfo->start_time - + generatedRecPerTbl * timeStampStep; - - flagSleep = true; - if (generatedRecPerTbl >= insertRows) - break; - - int64_t remainRows = insertRows - generatedRecPerTbl; - if ((remainRows > 0) && (batchPerTbl > remainRows)) - batchPerTbl = remainRows; - - if (pThreadInfo->ntables * batchPerTbl < g_args.reqPerReq) - break; - } - - verbosePrint("[%d] %s() LN%d generatedRecPerTbl=%"PRId64" insertRows=%"PRId64"\n", - pThreadInfo->threadID, __func__, __LINE__, - generatedRecPerTbl, insertRows); - - if ((g_args.reqPerReq - recOfBatch) < batchPerTbl) - break; - } - - verbosePrint("[%d] %s() LN%d recOfBatch=%d totalInsertRows=%"PRIu64"\n", - pThreadInfo->threadID, __func__, __LINE__, recOfBatch, - pThreadInfo->totalInsertRows); - - startTs = taosGetTimestampUs(); - - if (recOfBatch == 0) { - errorPrint2("[%d] %s() LN%d Failed to insert records of batch %d\n", - pThreadInfo->threadID, __func__, __LINE__, - batchPerTbl); - if (batchPerTbl > 0) { - errorPrint("\tIf the batch is %d, the length of the SQL to insert a row must be less then %"PRId64"\n", - batchPerTbl, maxSqlLen / batchPerTbl); - } - goto free_of_interlace_stmt; - } - int64_t affectedRows = execInsert(pThreadInfo, recOfBatch); - - endTs = taosGetTimestampUs(); - uint64_t delay = endTs - startTs; - performancePrint("%s() LN%d, insert execution time is %10.2f ms\n", - __func__, __LINE__, delay / 1000.0); - verbosePrint("[%d] %s() LN%d affectedRows=%"PRId64"\n", - pThreadInfo->threadID, - __func__, __LINE__, affectedRows); - - if (delay > pThreadInfo->maxDelay) pThreadInfo->maxDelay = delay; - if (delay < pThreadInfo->minDelay) pThreadInfo->minDelay = delay; - pThreadInfo->cntDelay++; - pThreadInfo->totalDelay += delay; - - if (recOfBatch != affectedRows) { - errorPrint2("[%d] %s() LN%d execInsert insert %d, affected rows: %"PRId64"\n\n", - pThreadInfo->threadID, __func__, __LINE__, - recOfBatch, affectedRows); - goto free_of_interlace_stmt; - } - - pThreadInfo->totalAffectedRows += affectedRows; - - int currentPercent = pThreadInfo->totalAffectedRows * 100 / totalRows; - if (currentPercent > percentComplete ) { - printf("[%d]:%d%%\n", pThreadInfo->threadID, currentPercent); - percentComplete = currentPercent; - } - int64_t currentPrintTime = taosGetTimestampMs(); - if (currentPrintTime - lastPrintTime > 30*1000) { - printf("thread[%d] has currently inserted rows: %"PRIu64 ", affected rows: %"PRIu64 "\n", - pThreadInfo->threadID, - pThreadInfo->totalInsertRows, - pThreadInfo->totalAffectedRows); - lastPrintTime = currentPrintTime; - } - - if ((insert_interval) && flagSleep) { - et = taosGetTimestampMs(); - - if (insert_interval > (et - st) ) { - uint64_t sleepTime = insert_interval - (et -st); - performancePrint("%s() LN%d sleep: %"PRId64" ms for insert interval\n", - __func__, __LINE__, sleepTime); - taosMsleep(sleepTime); // ms - sleepTimeTotal += insert_interval; - } - } - } - if (percentComplete < 100) - printf("[%d]:%d%%\n", pThreadInfo->threadID, percentComplete); - -free_of_interlace_stmt: - printStatPerThread(pThreadInfo); - return NULL; -} - -#endif - -// sync write interlace data -static void* syncWriteInterlace(threadInfo *pThreadInfo, uint32_t interlaceRows) { - debugPrint("[%d] %s() LN%d: ### interlace write\n", - pThreadInfo->threadID, __func__, __LINE__); - - int64_t insertRows; - uint64_t maxSqlLen; - int64_t timeStampStep; - uint64_t insert_interval; - - SSuperTable* stbInfo = pThreadInfo->stbInfo; - - if (stbInfo) { - insertRows = stbInfo->insertRows; - maxSqlLen = stbInfo->maxSqlLen; - timeStampStep = stbInfo->timeStampStep; - insert_interval = stbInfo->insertInterval; - } else { - insertRows = g_args.insertRows; - maxSqlLen = g_args.max_sql_len; - timeStampStep = g_args.timestamp_step; - insert_interval = g_args.insert_interval; - } - - debugPrint("[%d] %s() LN%d: start_table_from=%"PRIu64" ntables=%"PRId64" insertRows=%"PRIu64"\n", - pThreadInfo->threadID, __func__, __LINE__, - pThreadInfo->start_table_from, - pThreadInfo->ntables, insertRows); -#if 1 - if (interlaceRows > g_args.reqPerReq) - interlaceRows = g_args.reqPerReq; - - uint32_t batchPerTbl = interlaceRows; - uint32_t batchPerTblTimes; - - if ((interlaceRows > 0) && (pThreadInfo->ntables > 1)) { - batchPerTblTimes = - g_args.reqPerReq / interlaceRows; - } else { - batchPerTblTimes = 1; - } -#else - uint32_t batchPerTbl; - if (interlaceRows > g_args.reqPerReq) - batchPerTbl = g_args.reqPerReq; - else - batchPerTbl = interlaceRows; - - uint32_t batchPerTblTimes; - - if ((interlaceRows > 0) && (pThreadInfo->ntables > 1)) { - batchPerTblTimes = - interlaceRows / batchPerTbl; - } else { - batchPerTblTimes = 1; - } -#endif - pThreadInfo->buffer = calloc(maxSqlLen, 1); - if (NULL == pThreadInfo->buffer) { - errorPrint2( "%s() LN%d, Failed to alloc %"PRIu64" Bytes, reason:%s\n", - __func__, __LINE__, maxSqlLen, strerror(errno)); - return NULL; - } - - pThreadInfo->totalInsertRows = 0; - pThreadInfo->totalAffectedRows = 0; - - uint64_t st = 0; - uint64_t et = UINT64_MAX; - - uint64_t lastPrintTime = taosGetTimestampMs(); - uint64_t startTs = taosGetTimestampMs(); - uint64_t endTs; - - uint64_t tableSeq = pThreadInfo->start_table_from; - int64_t startTime = pThreadInfo->start_time; - - uint64_t generatedRecPerTbl = 0; - bool flagSleep = true; - uint64_t sleepTimeTotal = 0; - - int percentComplete = 0; - int64_t totalRows = insertRows * pThreadInfo->ntables; - - while(pThreadInfo->totalInsertRows < pThreadInfo->ntables * insertRows) { - if ((flagSleep) && (insert_interval)) { - st = taosGetTimestampMs(); - flagSleep = false; - } - - // generate data - memset(pThreadInfo->buffer, 0, maxSqlLen); - uint64_t remainderBufLen = maxSqlLen; - - char *pstr = pThreadInfo->buffer; - - int len = snprintf(pstr, - strlen(STR_INSERT_INTO) + 1, "%s", STR_INSERT_INTO); - pstr += len; - remainderBufLen -= len; - - uint32_t recOfBatch = 0; - - int32_t generated; - for (uint64_t i = 0; i < batchPerTblTimes; i ++) { - char tableName[TSDB_TABLE_NAME_LEN]; - - getTableName(tableName, pThreadInfo, tableSeq); - if (0 == strlen(tableName)) { - errorPrint2("[%d] %s() LN%d, getTableName return null\n", - pThreadInfo->threadID, __func__, __LINE__); - free(pThreadInfo->buffer); - return NULL; - } - - uint64_t oldRemainderLen = remainderBufLen; - - if (stbInfo) { - generated = generateStbInterlaceData( - pThreadInfo, - tableName, batchPerTbl, i, - batchPerTblTimes, - tableSeq, - pstr, - insertRows, - startTime, - &remainderBufLen); - } else { - generated = generateInterlaceDataWithoutStb( - tableName, batchPerTbl, - tableSeq, - pThreadInfo->db_name, pstr, - insertRows, - startTime, - &remainderBufLen); - } - - debugPrint("[%d] %s() LN%d, generated records is %d\n", - pThreadInfo->threadID, __func__, __LINE__, generated); - if (generated < 0) { - errorPrint2("[%d] %s() LN%d, generated records is %d\n", - pThreadInfo->threadID, __func__, __LINE__, generated); - goto free_of_interlace; - } else if (generated == 0) { - break; - } - - tableSeq ++; - recOfBatch += batchPerTbl; - - pstr += (oldRemainderLen - remainderBufLen); - pThreadInfo->totalInsertRows += batchPerTbl; - - verbosePrint("[%d] %s() LN%d batchPerTbl=%d recOfBatch=%d\n", - pThreadInfo->threadID, __func__, __LINE__, - batchPerTbl, recOfBatch); - - if (tableSeq == pThreadInfo->start_table_from + pThreadInfo->ntables) { - // turn to first table - tableSeq = pThreadInfo->start_table_from; - generatedRecPerTbl += batchPerTbl; - - startTime = pThreadInfo->start_time - + generatedRecPerTbl * timeStampStep; - - flagSleep = true; - if (generatedRecPerTbl >= insertRows) - break; - - int64_t remainRows = insertRows - generatedRecPerTbl; - if ((remainRows > 0) && (batchPerTbl > remainRows)) - batchPerTbl = remainRows; - - if (pThreadInfo->ntables * batchPerTbl < g_args.reqPerReq) - break; - } - - verbosePrint("[%d] %s() LN%d generatedRecPerTbl=%"PRId64" insertRows=%"PRId64"\n", - pThreadInfo->threadID, __func__, __LINE__, - generatedRecPerTbl, insertRows); - - if ((g_args.reqPerReq - recOfBatch) < batchPerTbl) - break; - } - - verbosePrint("[%d] %s() LN%d recOfBatch=%d totalInsertRows=%"PRIu64"\n", - pThreadInfo->threadID, __func__, __LINE__, recOfBatch, - pThreadInfo->totalInsertRows); - verbosePrint("[%d] %s() LN%d, buffer=%s\n", - pThreadInfo->threadID, __func__, __LINE__, pThreadInfo->buffer); - - startTs = taosGetTimestampUs(); - - if (recOfBatch == 0) { - errorPrint2("[%d] %s() LN%d Failed to insert records of batch %d\n", - pThreadInfo->threadID, __func__, __LINE__, - batchPerTbl); - if (batchPerTbl > 0) { - errorPrint("\tIf the batch is %d, the length of the SQL to insert a row must be less then %"PRId64"\n", - batchPerTbl, maxSqlLen / batchPerTbl); - } - errorPrint("\tPlease check if the buffer length(%"PRId64") or batch(%d) is set with proper value!\n", - maxSqlLen, batchPerTbl); - goto free_of_interlace; - } - int64_t affectedRows = execInsert(pThreadInfo, recOfBatch); - - endTs = taosGetTimestampUs(); - uint64_t delay = endTs - startTs; - performancePrint("%s() LN%d, insert execution time is %10.2f ms\n", - __func__, __LINE__, delay / 1000.0); - verbosePrint("[%d] %s() LN%d affectedRows=%"PRId64"\n", - pThreadInfo->threadID, - __func__, __LINE__, affectedRows); - - if (delay > pThreadInfo->maxDelay) pThreadInfo->maxDelay = delay; - if (delay < pThreadInfo->minDelay) pThreadInfo->minDelay = delay; - pThreadInfo->cntDelay++; - pThreadInfo->totalDelay += delay; - - if (recOfBatch != affectedRows) { - errorPrint2("[%d] %s() LN%d execInsert insert %d, affected rows: %"PRId64"\n%s\n", - pThreadInfo->threadID, __func__, __LINE__, - recOfBatch, affectedRows, pThreadInfo->buffer); - goto free_of_interlace; - } - - pThreadInfo->totalAffectedRows += affectedRows; - - int currentPercent = pThreadInfo->totalAffectedRows * 100 / totalRows; - if (currentPercent > percentComplete ) { - printf("[%d]:%d%%\n", pThreadInfo->threadID, currentPercent); - percentComplete = currentPercent; - } - int64_t currentPrintTime = taosGetTimestampMs(); - if (currentPrintTime - lastPrintTime > 30*1000) { - printf("thread[%d] has currently inserted rows: %"PRIu64 ", affected rows: %"PRIu64 "\n", - pThreadInfo->threadID, - pThreadInfo->totalInsertRows, - pThreadInfo->totalAffectedRows); - lastPrintTime = currentPrintTime; - } - - if ((insert_interval) && flagSleep) { - et = taosGetTimestampMs(); - - if (insert_interval > (et - st) ) { - uint64_t sleepTime = insert_interval - (et -st); - performancePrint("%s() LN%d sleep: %"PRId64" ms for insert interval\n", - __func__, __LINE__, sleepTime); - taosMsleep(sleepTime); // ms - sleepTimeTotal += insert_interval; - } - } - } - if (percentComplete < 100) - printf("[%d]:%d%%\n", pThreadInfo->threadID, percentComplete); - -free_of_interlace: - tmfree(pThreadInfo->buffer); - printStatPerThread(pThreadInfo); - return NULL; -} - -static void* syncWriteProgressiveStmt(threadInfo *pThreadInfo) { - debugPrint("%s() LN%d: ### stmt progressive write\n", __func__, __LINE__); - - SSuperTable* stbInfo = pThreadInfo->stbInfo; - int64_t timeStampStep = - stbInfo?stbInfo->timeStampStep:g_args.timestamp_step; - int64_t insertRows = - (stbInfo)?stbInfo->insertRows:g_args.insertRows; - verbosePrint("%s() LN%d insertRows=%"PRId64"\n", - __func__, __LINE__, insertRows); - - uint64_t lastPrintTime = taosGetTimestampMs(); - uint64_t startTs = taosGetTimestampMs(); - uint64_t endTs; - - pThreadInfo->totalInsertRows = 0; - pThreadInfo->totalAffectedRows = 0; - - pThreadInfo->samplePos = 0; - - int percentComplete = 0; - int64_t totalRows = insertRows * pThreadInfo->ntables; - - for (uint64_t tableSeq = pThreadInfo->start_table_from; - tableSeq <= pThreadInfo->end_table_to; - tableSeq ++) { - int64_t start_time = pThreadInfo->start_time; - - for (uint64_t i = 0; i < insertRows;) { - char tableName[TSDB_TABLE_NAME_LEN]; - getTableName(tableName, pThreadInfo, tableSeq); - verbosePrint("%s() LN%d: tid=%d seq=%"PRId64" tableName=%s\n", - __func__, __LINE__, - pThreadInfo->threadID, tableSeq, tableName); - if (0 == strlen(tableName)) { - errorPrint2("[%d] %s() LN%d, getTableName return null\n", - pThreadInfo->threadID, __func__, __LINE__); - return NULL; - } - - // measure prepare + insert - startTs = taosGetTimestampUs(); - - int32_t generated; - if (stbInfo) { - generated = prepareStbStmt( - pThreadInfo, - tableName, - tableSeq, - (g_args.reqPerReq>stbInfo->insertRows)? - stbInfo->insertRows: - g_args.reqPerReq, - insertRows, i, start_time, - &(pThreadInfo->samplePos)); - } else { - generated = prepareStmtWithoutStb( - pThreadInfo, - tableName, - g_args.reqPerReq, - insertRows, i, - start_time); - } - - verbosePrint("[%d] %s() LN%d generated=%d\n", - pThreadInfo->threadID, - __func__, __LINE__, generated); - - if (generated > 0) - i += generated; - else - goto free_of_stmt_progressive; - - start_time += generated * timeStampStep; - pThreadInfo->totalInsertRows += generated; - - // only measure insert - // startTs = taosGetTimestampUs(); - - int32_t affectedRows = execInsert(pThreadInfo, generated); - - endTs = taosGetTimestampUs(); - uint64_t delay = endTs - startTs; - performancePrint("%s() LN%d, insert execution time is %10.f ms\n", - __func__, __LINE__, delay/1000.0); - verbosePrint("[%d] %s() LN%d affectedRows=%d\n", - pThreadInfo->threadID, - __func__, __LINE__, affectedRows); - - if (delay > pThreadInfo->maxDelay) pThreadInfo->maxDelay = delay; - if (delay < pThreadInfo->minDelay) pThreadInfo->minDelay = delay; - pThreadInfo->cntDelay++; - pThreadInfo->totalDelay += delay; - - if (affectedRows < 0) { - errorPrint2("%s() LN%d, affected rows: %d\n", - __func__, __LINE__, affectedRows); - goto free_of_stmt_progressive; - } - - pThreadInfo->totalAffectedRows += affectedRows; - - int currentPercent = pThreadInfo->totalAffectedRows * 100 / totalRows; - if (currentPercent > percentComplete ) { - printf("[%d]:%d%%\n", pThreadInfo->threadID, currentPercent); - percentComplete = currentPercent; - } - int64_t currentPrintTime = taosGetTimestampMs(); - if (currentPrintTime - lastPrintTime > 30*1000) { - printf("thread[%d] has currently inserted rows: %"PRId64 ", affected rows: %"PRId64 "\n", - pThreadInfo->threadID, - pThreadInfo->totalInsertRows, - pThreadInfo->totalAffectedRows); - lastPrintTime = currentPrintTime; - } - - if (i >= insertRows) - break; - } // insertRows - - if ((g_args.verbose_print) && - (tableSeq == pThreadInfo->ntables - 1) && (stbInfo) - && (0 == strncasecmp( - stbInfo->dataSource, - "sample", strlen("sample")))) { - verbosePrint("%s() LN%d samplePos=%"PRId64"\n", - __func__, __LINE__, pThreadInfo->samplePos); - } - } // tableSeq - - if (percentComplete < 100) { - printf("[%d]:%d%%\n", pThreadInfo->threadID, percentComplete); - } - -free_of_stmt_progressive: - tmfree(pThreadInfo->buffer); - printStatPerThread(pThreadInfo); - return NULL; -} -// sync insertion progressive data -static void* syncWriteProgressive(threadInfo *pThreadInfo) { - debugPrint("%s() LN%d: ### progressive write\n", __func__, __LINE__); - - SSuperTable* stbInfo = pThreadInfo->stbInfo; - uint64_t maxSqlLen = stbInfo?stbInfo->maxSqlLen:g_args.max_sql_len; - int64_t timeStampStep = - stbInfo?stbInfo->timeStampStep:g_args.timestamp_step; - int64_t insertRows = - (stbInfo)?stbInfo->insertRows:g_args.insertRows; - verbosePrint("%s() LN%d insertRows=%"PRId64"\n", - __func__, __LINE__, insertRows); - - pThreadInfo->buffer = calloc(maxSqlLen, 1); - if (NULL == pThreadInfo->buffer) { - errorPrint2("Failed to alloc %"PRIu64" bytes, reason:%s\n", - maxSqlLen, - strerror(errno)); - return NULL; - } - - uint64_t lastPrintTime = taosGetTimestampMs(); - uint64_t startTs = taosGetTimestampMs(); - uint64_t endTs; - - pThreadInfo->totalInsertRows = 0; - pThreadInfo->totalAffectedRows = 0; - - pThreadInfo->samplePos = 0; - - int percentComplete = 0; - int64_t totalRows = insertRows * pThreadInfo->ntables; - - for (uint64_t tableSeq = pThreadInfo->start_table_from; - tableSeq <= pThreadInfo->end_table_to; - tableSeq ++) { - int64_t start_time = pThreadInfo->start_time; - - for (uint64_t i = 0; i < insertRows;) { - char tableName[TSDB_TABLE_NAME_LEN]; - getTableName(tableName, pThreadInfo, tableSeq); - verbosePrint("%s() LN%d: tid=%d seq=%"PRId64" tableName=%s\n", - __func__, __LINE__, - pThreadInfo->threadID, tableSeq, tableName); - if (0 == strlen(tableName)) { - errorPrint2("[%d] %s() LN%d, getTableName return null\n", - pThreadInfo->threadID, __func__, __LINE__); - free(pThreadInfo->buffer); - return NULL; - } - - int64_t remainderBufLen = maxSqlLen - 2000; - char *pstr = pThreadInfo->buffer; - - int len = snprintf(pstr, - strlen(STR_INSERT_INTO) + 1, "%s", STR_INSERT_INTO); - - pstr += len; - remainderBufLen -= len; - - // measure prepare + insert - startTs = taosGetTimestampUs(); - - int32_t generated; - if (stbInfo) { - if (stbInfo->iface == STMT_IFACE) { - generated = prepareStbStmt( - pThreadInfo, - tableName, - tableSeq, - (g_args.reqPerReq>stbInfo->insertRows)? - stbInfo->insertRows: - g_args.reqPerReq, - insertRows, i, start_time, - &(pThreadInfo->samplePos)); - } else { - generated = generateStbProgressiveData( - stbInfo, - tableName, tableSeq, - pThreadInfo->db_name, pstr, - insertRows, i, start_time, - &(pThreadInfo->samplePos), - &remainderBufLen); - } - } else { - if (g_args.iface == STMT_IFACE) { - generated = prepareStmtWithoutStb( - pThreadInfo, - tableName, - g_args.reqPerReq, - insertRows, i, - start_time); - } else { - generated = generateProgressiveDataWithoutStb( - tableName, - /* tableSeq, */ - pThreadInfo, pstr, insertRows, - i, start_time, - /* &(pThreadInfo->samplePos), */ - &remainderBufLen); - } - } - - verbosePrint("[%d] %s() LN%d generated=%d\n", - pThreadInfo->threadID, - __func__, __LINE__, generated); - - if (generated > 0) - i += generated; - else - goto free_of_progressive; - - start_time += generated * timeStampStep; - pThreadInfo->totalInsertRows += generated; - - // only measure insert - // startTs = taosGetTimestampUs(); - - int32_t affectedRows = execInsert(pThreadInfo, generated); - - endTs = taosGetTimestampUs(); - uint64_t delay = endTs - startTs; - performancePrint("%s() LN%d, insert execution time is %10.f ms\n", - __func__, __LINE__, delay/1000.0); - verbosePrint("[%d] %s() LN%d affectedRows=%d\n", - pThreadInfo->threadID, - __func__, __LINE__, affectedRows); - - if (delay > pThreadInfo->maxDelay) pThreadInfo->maxDelay = delay; - if (delay < pThreadInfo->minDelay) pThreadInfo->minDelay = delay; - pThreadInfo->cntDelay++; - pThreadInfo->totalDelay += delay; - - if (affectedRows < 0) { - errorPrint2("%s() LN%d, affected rows: %d\n", - __func__, __LINE__, affectedRows); - goto free_of_progressive; - } - - pThreadInfo->totalAffectedRows += affectedRows; - - int currentPercent = pThreadInfo->totalAffectedRows * 100 / totalRows; - if (currentPercent > percentComplete ) { - printf("[%d]:%d%%\n", pThreadInfo->threadID, currentPercent); - percentComplete = currentPercent; - } - int64_t currentPrintTime = taosGetTimestampMs(); - if (currentPrintTime - lastPrintTime > 30*1000) { - printf("thread[%d] has currently inserted rows: %"PRId64 ", affected rows: %"PRId64 "\n", - pThreadInfo->threadID, - pThreadInfo->totalInsertRows, - pThreadInfo->totalAffectedRows); - lastPrintTime = currentPrintTime; - } - - if (i >= insertRows) - break; - } // insertRows - - if ((g_args.verbose_print) && - (tableSeq == pThreadInfo->ntables - 1) && (stbInfo) - && (0 == strncasecmp( - stbInfo->dataSource, - "sample", strlen("sample")))) { - verbosePrint("%s() LN%d samplePos=%"PRId64"\n", - __func__, __LINE__, pThreadInfo->samplePos); - } - } // tableSeq - - if (percentComplete < 100) { - printf("[%d]:%d%%\n", pThreadInfo->threadID, percentComplete); - } - -free_of_progressive: - tmfree(pThreadInfo->buffer); - printStatPerThread(pThreadInfo); - return NULL; -} - -static void* syncWrite(void *sarg) { - - threadInfo *pThreadInfo = (threadInfo *)sarg; - SSuperTable* stbInfo = pThreadInfo->stbInfo; - - setThreadName("syncWrite"); - - uint32_t interlaceRows = 0; - - if (stbInfo) { - if (stbInfo->interlaceRows < stbInfo->insertRows) - interlaceRows = stbInfo->interlaceRows; - } else { - if (g_args.interlaceRows < g_args.insertRows) - interlaceRows = g_args.interlaceRows; - } - - if (interlaceRows > 0) { - // interlace mode - if (stbInfo) { - if (STMT_IFACE == stbInfo->iface) { -#if STMT_BIND_PARAM_BATCH == 1 - return syncWriteInterlaceStmtBatch(pThreadInfo, interlaceRows); -#else - return syncWriteInterlaceStmt(pThreadInfo, interlaceRows); -#endif - } else { - return syncWriteInterlace(pThreadInfo, interlaceRows); - } - } - } else { - // progressive mode - if (((stbInfo) && (STMT_IFACE == stbInfo->iface)) - || (STMT_IFACE == g_args.iface)) { - return syncWriteProgressiveStmt(pThreadInfo); - } else { - return syncWriteProgressive(pThreadInfo); - } - } - - return NULL; -} - -static void callBack(void *param, TAOS_RES *res, int code) { - threadInfo* pThreadInfo = (threadInfo*)param; - SSuperTable* stbInfo = pThreadInfo->stbInfo; - - int insert_interval = - stbInfo?stbInfo->insertInterval:g_args.insert_interval; - if (insert_interval) { - pThreadInfo->et = taosGetTimestampMs(); - if ((pThreadInfo->et - pThreadInfo->st) < insert_interval) { - taosMsleep(insert_interval - (pThreadInfo->et - pThreadInfo->st)); // ms - } - } - - char *buffer = calloc(1, pThreadInfo->stbInfo->maxSqlLen); - char data[MAX_DATA_SIZE]; - char *pstr = buffer; - pstr += sprintf(pstr, "INSERT INTO %s.%s%"PRId64" VALUES", - pThreadInfo->db_name, pThreadInfo->tb_prefix, - pThreadInfo->start_table_from); - // if (pThreadInfo->counter >= pThreadInfo->stbInfo->insertRows) { - if (pThreadInfo->counter >= g_args.reqPerReq) { - pThreadInfo->start_table_from++; - pThreadInfo->counter = 0; - } - if (pThreadInfo->start_table_from > pThreadInfo->end_table_to) { - tsem_post(&pThreadInfo->lock_sem); - free(buffer); - taos_free_result(res); - return; - } - - for (int i = 0; i < g_args.reqPerReq; i++) { - int rand_num = taosRandom() % 100; - if (0 != pThreadInfo->stbInfo->disorderRatio - && rand_num < pThreadInfo->stbInfo->disorderRatio) { - int64_t d = pThreadInfo->lastTs - - (taosRandom() % pThreadInfo->stbInfo->disorderRange + 1); - generateStbRowData(pThreadInfo->stbInfo, data, - MAX_DATA_SIZE, - d); - } else { - generateStbRowData(pThreadInfo->stbInfo, - data, - MAX_DATA_SIZE, - pThreadInfo->lastTs += 1000); - } - pstr += sprintf(pstr, "%s", data); - pThreadInfo->counter++; - - if (pThreadInfo->counter >= pThreadInfo->stbInfo->insertRows) { - break; - } - } - - if (insert_interval) { - pThreadInfo->st = taosGetTimestampMs(); - } - taos_query_a(pThreadInfo->taos, buffer, callBack, pThreadInfo); - free(buffer); - - taos_free_result(res); -} - -static void *asyncWrite(void *sarg) { - threadInfo *pThreadInfo = (threadInfo *)sarg; - SSuperTable* stbInfo = pThreadInfo->stbInfo; - - setThreadName("asyncWrite"); - - pThreadInfo->st = 0; - pThreadInfo->et = 0; - pThreadInfo->lastTs = pThreadInfo->start_time; - - int insert_interval = - stbInfo?stbInfo->insertInterval:g_args.insert_interval; - if (insert_interval) { - pThreadInfo->st = taosGetTimestampMs(); - } - taos_query_a(pThreadInfo->taos, "show databases", callBack, pThreadInfo); - - tsem_wait(&(pThreadInfo->lock_sem)); - - return NULL; -} - -static int convertHostToServAddr(char *host, uint16_t port, struct sockaddr_in *serv_addr) -{ - uint16_t rest_port = port + TSDB_PORT_HTTP; - struct hostent *server = gethostbyname(host); - if ((server == NULL) || (server->h_addr == NULL)) { - errorPrint2("%s", "no such host"); - return -1; - } - - debugPrint("h_name: %s\nh_addr=%p\nh_addretype: %s\nh_length: %d\n", - server->h_name, - server->h_addr, - (server->h_addrtype == AF_INET)?"ipv4":"ipv6", - server->h_length); - - memset(serv_addr, 0, sizeof(struct sockaddr_in)); - serv_addr->sin_family = AF_INET; - serv_addr->sin_port = htons(rest_port); -#ifdef WINDOWS - serv_addr->sin_addr.s_addr = inet_addr(host); -#else - memcpy(&(serv_addr->sin_addr.s_addr), server->h_addr, server->h_length); -#endif - return 0; -} - -static void startMultiThreadInsertData(int threads, char* db_name, - char* precision, SSuperTable* stbInfo) { - - int32_t timePrec = TSDB_TIME_PRECISION_MILLI; - if (0 != precision[0]) { - if (0 == strncasecmp(precision, "ms", 2)) { - timePrec = TSDB_TIME_PRECISION_MILLI; - } else if (0 == strncasecmp(precision, "us", 2)) { - timePrec = TSDB_TIME_PRECISION_MICRO; - } else if (0 == strncasecmp(precision, "ns", 2)) { - timePrec = TSDB_TIME_PRECISION_NANO; - } else { - errorPrint2("Not support precision: %s\n", precision); - exit(EXIT_FAILURE); - } - } - - int64_t startTime; - if (stbInfo) { - if (0 == strncasecmp(stbInfo->startTimestamp, "now", 3)) { - startTime = taosGetTimestamp(timePrec); - } else { - if (TSDB_CODE_SUCCESS != taosParseTime( - stbInfo->startTimestamp, - &startTime, - strlen(stbInfo->startTimestamp), - timePrec, 0)) { - ERROR_EXIT("failed to parse time!\n"); - } - } - } else { - startTime = DEFAULT_START_TIME; - } - debugPrint("%s() LN%d, startTime= %"PRId64"\n", - __func__, __LINE__, startTime); - - // read sample data from file first - int ret; - if (stbInfo) { - ret = prepareSampleForStb(stbInfo); - } else { - ret = prepareSampleForNtb(); - } - - if (0 != ret) { - errorPrint2("%s() LN%d, prepare sample data for stable failed!\n", - __func__, __LINE__); - exit(EXIT_FAILURE); - } - - TAOS* taos0 = taos_connect( - g_Dbs.host, g_Dbs.user, - g_Dbs.password, db_name, g_Dbs.port); - if (NULL == taos0) { - errorPrint2("%s() LN%d, connect to server fail , reason: %s\n", - __func__, __LINE__, taos_errstr(NULL)); - exit(EXIT_FAILURE); - } - - int64_t ntables = 0; - uint64_t tableFrom = 0; - - if (stbInfo) { - int64_t limit; - uint64_t offset; - - if ((NULL != g_args.sqlFile) - && (stbInfo->childTblExists == TBL_NO_EXISTS) - && ((stbInfo->childTblOffset != 0) - || (stbInfo->childTblLimit >= 0))) { - printf("WARNING: offset and limit will not be used since the child tables not exists!\n"); - } - - if (stbInfo->childTblExists == TBL_ALREADY_EXISTS) { - if ((stbInfo->childTblLimit < 0) - || ((stbInfo->childTblOffset - + stbInfo->childTblLimit) - > (stbInfo->childTblCount))) { - - if (stbInfo->childTblCount < stbInfo->childTblOffset) { - printf("WARNING: offset will not be used since the child tables count is less then offset!\n"); - - stbInfo->childTblOffset = 0; - } - stbInfo->childTblLimit = - stbInfo->childTblCount - stbInfo->childTblOffset; - } - - offset = stbInfo->childTblOffset; - limit = stbInfo->childTblLimit; - } else { - limit = stbInfo->childTblCount; - offset = 0; - } - - ntables = limit; - tableFrom = offset; - - if ((stbInfo->childTblExists != TBL_NO_EXISTS) - && ((stbInfo->childTblOffset + stbInfo->childTblLimit) - > stbInfo->childTblCount)) { - printf("WARNING: specified offset + limit > child table count!\n"); - prompt(); - } - - if ((stbInfo->childTblExists != TBL_NO_EXISTS) - && (0 == stbInfo->childTblLimit)) { - printf("WARNING: specified limit = 0, which cannot find table name to insert or query! \n"); - prompt(); - } - - stbInfo->childTblName = (char*)calloc(1, - limit * TSDB_TABLE_NAME_LEN); - if (stbInfo->childTblName == NULL) { - taos_close(taos0); - errorPrint2("%s() LN%d, alloc memory failed!\n", __func__, __LINE__); - exit(EXIT_FAILURE); - } - - int64_t childTblCount; - getChildNameOfSuperTableWithLimitAndOffset( - taos0, - db_name, stbInfo->stbName, - &stbInfo->childTblName, &childTblCount, - limit, - offset); - ntables = childTblCount; - } else { - ntables = g_args.ntables; - tableFrom = 0; - } - - taos_close(taos0); - - int64_t a = ntables / threads; - if (a < 1) { - threads = ntables; - a = 1; - } - - int64_t b = 0; - if (threads != 0) { - b = ntables % threads; - } - - if (g_args.iface == REST_IFACE || ((stbInfo) && (stbInfo->iface == REST_IFACE))) { - if (convertHostToServAddr( - g_Dbs.host, g_Dbs.port, &(g_Dbs.serv_addr)) != 0) { - ERROR_EXIT("convert host to server address"); - } - } - - pthread_t *pids = calloc(1, threads * sizeof(pthread_t)); - threadInfo *infos = calloc(1, threads * sizeof(threadInfo)); - assert(pids != NULL); - assert(infos != NULL); - - char *stmtBuffer = calloc(1, BUFFER_SIZE); - assert(stmtBuffer); - -#if STMT_BIND_PARAM_BATCH == 1 - uint32_t interlaceRows = 0; - uint32_t batch; - - if (stbInfo) { - if (stbInfo->interlaceRows < stbInfo->insertRows) - interlaceRows = stbInfo->interlaceRows; - } else { - if (g_args.interlaceRows < g_args.insertRows) - interlaceRows = g_args.interlaceRows; - } - - if (interlaceRows > 0) { - batch = interlaceRows; - } else { - batch = (g_args.reqPerReq>g_args.insertRows)? - g_args.insertRows:g_args.reqPerReq; - } - -#endif - - if ((g_args.iface == STMT_IFACE) - || ((stbInfo) - && (stbInfo->iface == STMT_IFACE))) { - char *pstr = stmtBuffer; - - if ((stbInfo) - && (AUTO_CREATE_SUBTBL - == stbInfo->autoCreateTable)) { - pstr += sprintf(pstr, "INSERT INTO ? USING %s TAGS(?", - stbInfo->stbName); - for (int tag = 0; tag < (stbInfo->tagCount - 1); - tag ++ ) { - pstr += sprintf(pstr, ",?"); - } - pstr += sprintf(pstr, ") VALUES(?"); - } else { - pstr += sprintf(pstr, "INSERT INTO ? VALUES(?"); - } - - int columnCount = (stbInfo)? - stbInfo->columnCount: - g_args.columnCount; - - for (int col = 0; col < columnCount; col ++) { - pstr += sprintf(pstr, ",?"); - } - pstr += sprintf(pstr, ")"); - - debugPrint("%s() LN%d, stmtBuffer: %s", __func__, __LINE__, stmtBuffer); -#if STMT_BIND_PARAM_BATCH == 1 - parseSamplefileToStmtBatch(stbInfo); -#endif - } - - for (int i = 0; i < threads; i++) { - threadInfo *pThreadInfo = infos + i; - pThreadInfo->threadID = i; - - tstrncpy(pThreadInfo->db_name, db_name, TSDB_DB_NAME_LEN); - pThreadInfo->time_precision = timePrec; - pThreadInfo->stbInfo = stbInfo; - - pThreadInfo->start_time = startTime; - pThreadInfo->minDelay = UINT64_MAX; - - if ((NULL == stbInfo) || - (stbInfo->iface != REST_IFACE)) { - //t_info->taos = taos; - pThreadInfo->taos = taos_connect( - g_Dbs.host, g_Dbs.user, - g_Dbs.password, db_name, g_Dbs.port); - if (NULL == pThreadInfo->taos) { - free(infos); - errorPrint2( - "%s() LN%d, connect to server fail from insert sub thread, reason: %s\n", - __func__, __LINE__, - taos_errstr(NULL)); - exit(EXIT_FAILURE); - } - - if ((g_args.iface == STMT_IFACE) - || ((stbInfo) - && (stbInfo->iface == STMT_IFACE))) { - - pThreadInfo->stmt = taos_stmt_init(pThreadInfo->taos); - if (NULL == pThreadInfo->stmt) { - free(pids); - free(infos); - errorPrint2( - "%s() LN%d, failed init stmt, reason: %s\n", - __func__, __LINE__, - taos_errstr(NULL)); - exit(EXIT_FAILURE); - } - - if (0 != taos_stmt_prepare(pThreadInfo->stmt, stmtBuffer, 0)) { - free(pids); - free(infos); - free(stmtBuffer); - errorPrint2("failed to execute taos_stmt_prepare. return 0x%x. reason: %s\n", - ret, taos_stmt_errstr(pThreadInfo->stmt)); - exit(EXIT_FAILURE); - } - pThreadInfo->bind_ts = malloc(sizeof(int64_t)); - - if (stbInfo) { -#if STMT_BIND_PARAM_BATCH == 1 - parseStbSampleToStmtBatchForThread( - pThreadInfo, stbInfo, timePrec, batch); -#else - parseStbSampleToStmt(pThreadInfo, stbInfo, timePrec); -#endif - } else { -#if STMT_BIND_PARAM_BATCH == 1 - parseNtbSampleToStmtBatchForThread( - pThreadInfo, timePrec, batch); -#else - parseNtbSampleToStmt(pThreadInfo, timePrec); -#endif - } - } - } else { - pThreadInfo->taos = NULL; - } - - /* if ((NULL == stbInfo) - || (0 == stbInfo->multiThreadWriteOneTbl)) { - */ - pThreadInfo->start_table_from = tableFrom; - pThreadInfo->ntables = iend_table_to = i < b ? tableFrom + a : tableFrom + a - 1; - tableFrom = pThreadInfo->end_table_to + 1; - /* } else { - pThreadInfo->start_table_from = 0; - pThreadInfo->ntables = stbInfo->childTblCount; - pThreadInfo->start_time = pThreadInfo->start_time + rand_int() % 10000 - rand_tinyint(); - } - */ - - if (g_args.iface == REST_IFACE || ((stbInfo) && (stbInfo->iface == REST_IFACE))) { -#ifdef WINDOWS - WSADATA wsaData; - WSAStartup(MAKEWORD(2, 2), &wsaData); - SOCKET sockfd; -#else - int sockfd; -#endif - sockfd = socket(AF_INET, SOCK_STREAM, 0); - if (sockfd < 0) { -#ifdef WINDOWS - errorPrint( "Could not create socket : %d" , WSAGetLastError()); -#endif - debugPrint("%s() LN%d, sockfd=%d\n", __func__, __LINE__, sockfd); - ERROR_EXIT("opening socket"); - } - - int retConn = connect(sockfd, (struct sockaddr *)&(g_Dbs.serv_addr), sizeof(struct sockaddr)); - debugPrint("%s() LN%d connect() return %d\n", __func__, __LINE__, retConn); - if (retConn < 0) { - ERROR_EXIT("connecting"); - } - pThreadInfo->sockfd = sockfd; - } - - - tsem_init(&(pThreadInfo->lock_sem), 0, 0); - if (ASYNC_MODE == g_Dbs.asyncMode) { - pthread_create(pids + i, NULL, asyncWrite, pThreadInfo); - } else { - pthread_create(pids + i, NULL, syncWrite, pThreadInfo); - } - } - - free(stmtBuffer); - - int64_t start = taosGetTimestampUs(); - - for (int i = 0; i < threads; i++) { - pthread_join(pids[i], NULL); - } - - uint64_t totalDelay = 0; - uint64_t maxDelay = 0; - uint64_t minDelay = UINT64_MAX; - uint64_t cntDelay = 1; - double avgDelay = 0; - - for (int i = 0; i < threads; i++) { - threadInfo *pThreadInfo = infos + i; - - tsem_destroy(&(pThreadInfo->lock_sem)); - taos_close(pThreadInfo->taos); - - if (pThreadInfo->stmt) { - taos_stmt_close(pThreadInfo->stmt); - } - - tmfree((char *)pThreadInfo->bind_ts); -#if STMT_BIND_PARAM_BATCH == 1 - tmfree((char *)pThreadInfo->bind_ts_array); - tmfree(pThreadInfo->bindParams); - tmfree(pThreadInfo->is_null); - if (g_args.iface == REST_IFACE || ((stbInfo) && (stbInfo->iface == REST_IFACE))) { -#ifdef WINDOWS - closesocket(pThreadInfo->sockfd); - WSACleanup(); -#else - close(pThreadInfo->sockfd); -#endif - } -#else - if (pThreadInfo->sampleBindArray) { - for (int k = 0; k < MAX_SAMPLES; k++) { - uintptr_t *tmp = (uintptr_t *)(*(uintptr_t *)( - pThreadInfo->sampleBindArray - + sizeof(uintptr_t *) * k)); - int columnCount = (pThreadInfo->stbInfo)? - pThreadInfo->stbInfo->columnCount: - g_args.columnCount; - for (int c = 1; c < columnCount + 1; c++) { - TAOS_BIND *bind = (TAOS_BIND *)((char *)tmp + (sizeof(TAOS_BIND) * c)); - if (bind) - tmfree(bind->buffer); - } - tmfree((char *)tmp); - } - tmfree(pThreadInfo->sampleBindArray); - } -#endif - - debugPrint("%s() LN%d, [%d] totalInsert=%"PRIu64" totalAffected=%"PRIu64"\n", - __func__, __LINE__, - pThreadInfo->threadID, pThreadInfo->totalInsertRows, - pThreadInfo->totalAffectedRows); - if (stbInfo) { - stbInfo->totalAffectedRows += pThreadInfo->totalAffectedRows; - stbInfo->totalInsertRows += pThreadInfo->totalInsertRows; - } else { - g_args.totalAffectedRows += pThreadInfo->totalAffectedRows; - g_args.totalInsertRows += pThreadInfo->totalInsertRows; - } - - totalDelay += pThreadInfo->totalDelay; - cntDelay += pThreadInfo->cntDelay; - if (pThreadInfo->maxDelay > maxDelay) maxDelay = pThreadInfo->maxDelay; - if (pThreadInfo->minDelay < minDelay) minDelay = pThreadInfo->minDelay; - } - - if (cntDelay == 0) cntDelay = 1; - avgDelay = (double)totalDelay / cntDelay; - - int64_t end = taosGetTimestampUs(); - int64_t t = end - start; - if (0 == t) t = 1; - - double tInMs = (double) t / 1000000.0; - - if (stbInfo) { - fprintf(stderr, "Spent %.4f seconds to insert rows: %"PRIu64", affected rows: %"PRIu64" with %d thread(s) into %s.%s. %.2f records/second\n\n", - tInMs, stbInfo->totalInsertRows, - stbInfo->totalAffectedRows, - threads, db_name, stbInfo->stbName, - (double)(stbInfo->totalInsertRows/tInMs)); - - if (g_fpOfInsertResult) { - fprintf(g_fpOfInsertResult, - "Spent %.4f seconds to insert rows: %"PRIu64", affected rows: %"PRIu64" with %d thread(s) into %s.%s. %.2f records/second\n\n", - tInMs, stbInfo->totalInsertRows, - stbInfo->totalAffectedRows, - threads, db_name, stbInfo->stbName, - (double)(stbInfo->totalInsertRows/tInMs)); - } - } else { - fprintf(stderr, "Spent %.4f seconds to insert rows: %"PRIu64", affected rows: %"PRIu64" with %d thread(s) into %s %.2f records/second\n\n", - tInMs, g_args.totalInsertRows, - g_args.totalAffectedRows, - threads, db_name, - (double)(g_args.totalInsertRows/tInMs)); - if (g_fpOfInsertResult) { - fprintf(g_fpOfInsertResult, - "Spent %.4f seconds to insert rows: %"PRIu64", affected rows: %"PRIu64" with %d thread(s) into %s %.2f records/second\n\n", - tInMs, g_args.totalInsertRows, - g_args.totalAffectedRows, - threads, db_name, - (double)(g_args.totalInsertRows/tInMs)); - } - } - - if (minDelay != UINT64_MAX) { - fprintf(stderr, "insert delay, avg: %10.2fms, max: %10.2fms, min: %10.2fms\n\n", - (double)avgDelay/1000.0, - (double)maxDelay/1000.0, - (double)minDelay/1000.0); - - if (g_fpOfInsertResult) { - fprintf(g_fpOfInsertResult, "insert delay, avg:%10.2fms, max: %10.2fms, min: %10.2fms\n\n", - (double)avgDelay/1000.0, - (double)maxDelay/1000.0, - (double)minDelay/1000.0); - } - } - - //taos_close(taos); - - free(pids); - free(infos); -} - -static void *queryNtableAggrFunc(void *sarg) { - threadInfo *pThreadInfo = (threadInfo *)sarg; - TAOS *taos = pThreadInfo->taos; - setThreadName("queryNtableAggrFunc"); - char *command = calloc(1, BUFFER_SIZE); - assert(command); - - uint64_t startTime = pThreadInfo->start_time; - char *tb_prefix = pThreadInfo->tb_prefix; - FILE *fp = fopen(pThreadInfo->filePath, "a"); - if (NULL == fp) { - errorPrint2("fopen %s fail, reason:%s.\n", pThreadInfo->filePath, strerror(errno)); - free(command); - return NULL; - } - - int64_t insertRows; - /* if (pThreadInfo->stbInfo) { - insertRows = pThreadInfo->stbInfo->insertRows; // nrecords_per_table; - } else { - */ - insertRows = g_args.insertRows; - // } - - int64_t ntables = pThreadInfo->ntables; // pThreadInfo->end_table_to - pThreadInfo->start_table_from + 1; - int64_t totalData = insertRows * ntables; - bool aggr_func = g_Dbs.aggr_func; - - char **aggreFunc; - int n; - - if (g_args.demo_mode) { - aggreFunc = g_aggreFuncDemo; - n = aggr_func?(sizeof(g_aggreFuncDemo) / sizeof(g_aggreFuncDemo[0])) : 2; - } else { - aggreFunc = g_aggreFunc; - n = aggr_func?(sizeof(g_aggreFunc) / sizeof(g_aggreFunc[0])) : 2; - } - - if (!aggr_func) { - printf("\nThe first field is either Binary or Bool. Aggregation functions are not supported.\n"); - } - printf("%"PRId64" records:\n", totalData); - fprintf(fp, "| QFunctions | QRecords | QSpeed(R/s) | QLatency(ms) |\n"); - - for (int j = 0; j < n; j++) { - double totalT = 0; - uint64_t count = 0; - for (int64_t i = 0; i < ntables; i++) { - sprintf(command, "SELECT %s FROM %s%"PRId64" WHERE ts>= %" PRIu64, - aggreFunc[j], tb_prefix, i, startTime); - - double t = taosGetTimestampUs(); - debugPrint("%s() LN%d, sql command: %s\n", - __func__, __LINE__, command); - TAOS_RES *pSql = taos_query(taos, command); - int32_t code = taos_errno(pSql); - - if (code != 0) { - errorPrint2("Failed to query:%s\n", taos_errstr(pSql)); - taos_free_result(pSql); - taos_close(taos); - fclose(fp); - free(command); - return NULL; - } - - while(taos_fetch_row(pSql) != NULL) { - count++; - } - - t = taosGetTimestampUs() - t; - totalT += t; - - taos_free_result(pSql); - } - - fprintf(fp, "|%10s | %"PRId64" | %12.2f | %10.2f |\n", - aggreFunc[j][0] == '*' ? " * " : aggreFunc[j], totalData, - (double)(ntables * insertRows) / totalT, totalT / 1000000); - printf("select %10s took %.6f second(s)\n", aggreFunc[j], totalT / 1000000); - } - fprintf(fp, "\n"); - fclose(fp); - free(command); - return NULL; -} - -static void *queryStableAggrFunc(void *sarg) { - threadInfo *pThreadInfo = (threadInfo *)sarg; - TAOS *taos = pThreadInfo->taos; - setThreadName("queryStableAggrFunc"); - char *command = calloc(1, BUFFER_SIZE); - assert(command); - - FILE *fp = fopen(pThreadInfo->filePath, "a"); - if (NULL == fp) { - printf("fopen %s fail, reason:%s.\n", pThreadInfo->filePath, strerror(errno)); - free(command); - return NULL; - } - - int64_t insertRows = pThreadInfo->stbInfo->insertRows; - int64_t ntables = pThreadInfo->ntables; // pThreadInfo->end_table_to - pThreadInfo->start_table_from + 1; - int64_t totalData = insertRows * ntables; - bool aggr_func = g_Dbs.aggr_func; - - char **aggreFunc; - int n; - - if (g_args.demo_mode) { - aggreFunc = g_aggreFuncDemo; - n = aggr_func?(sizeof(g_aggreFuncDemo) / sizeof(g_aggreFuncDemo[0])) : 2; - } else { - aggreFunc = g_aggreFunc; - n = aggr_func?(sizeof(g_aggreFunc) / sizeof(g_aggreFunc[0])) : 2; - } - - if (!aggr_func) { - printf("\nThe first field is either Binary or Bool. Aggregation functions are not supported.\n"); - } - - printf("%"PRId64" records:\n", totalData); - fprintf(fp, "Querying On %"PRId64" records:\n", totalData); - - for (int j = 0; j < n; j++) { - char condition[COND_BUF_LEN] = "\0"; - char tempS[64] = "\0"; - - int64_t m = 10 < ntables ? 10 : ntables; - - for (int64_t i = 1; i <= m; i++) { - if (i == 1) { - if (g_args.demo_mode) { - sprintf(tempS, "groupid = %"PRId64"", i); - } else { - sprintf(tempS, "t0 = %"PRId64"", i); - } - } else { - if (g_args.demo_mode) { - sprintf(tempS, " or groupid = %"PRId64" ", i); - } else { - sprintf(tempS, " or t0 = %"PRId64" ", i); - } - } - strncat(condition, tempS, COND_BUF_LEN - 1); - - sprintf(command, "SELECT %s FROM meters WHERE %s", aggreFunc[j], condition); - - printf("Where condition: %s\n", condition); - - debugPrint("%s() LN%d, sql command: %s\n", - __func__, __LINE__, command); - fprintf(fp, "%s\n", command); - - double t = taosGetTimestampUs(); - - TAOS_RES *pSql = taos_query(taos, command); - int32_t code = taos_errno(pSql); - - if (code != 0) { - errorPrint2("Failed to query:%s\n", taos_errstr(pSql)); - taos_free_result(pSql); - taos_close(taos); - fclose(fp); - free(command); - return NULL; - } - int count = 0; - while(taos_fetch_row(pSql) != NULL) { - count++; - } - t = taosGetTimestampUs() - t; - - fprintf(fp, "| Speed: %12.2f(per s) | Latency: %.4f(ms) |\n", - ntables * insertRows / (t / 1000), t); - printf("select %10s took %.6f second(s)\n\n", aggreFunc[j], t / 1000000); - - taos_free_result(pSql); - } - fprintf(fp, "\n"); - } - fclose(fp); - free(command); - - return NULL; -} - -static void prompt() -{ - if (!g_args.answer_yes) { - printf(" Press enter key to continue or Ctrl-C to stop\n\n"); - (void)getchar(); - } -} - -static int insertTestProcess() { - - setupForAnsiEscape(); - int ret = printfInsertMeta(); - resetAfterAnsiEscape(); - - if (ret == -1) - exit(EXIT_FAILURE); - - debugPrint("%d result file: %s\n", __LINE__, g_Dbs.resultFile); - g_fpOfInsertResult = fopen(g_Dbs.resultFile, "a"); - if (NULL == g_fpOfInsertResult) { - errorPrint("Failed to open %s for save result\n", g_Dbs.resultFile); - return -1; - } - - if (g_fpOfInsertResult) - printfInsertMetaToFile(g_fpOfInsertResult); - - prompt(); - - init_rand_data(); - - // create database and super tables - char *cmdBuffer = calloc(1, BUFFER_SIZE); - assert(cmdBuffer); - - if(createDatabasesAndStables(cmdBuffer) != 0) { - if (g_fpOfInsertResult) - fclose(g_fpOfInsertResult); - free(cmdBuffer); - return -1; - } - free(cmdBuffer); - - // pretreatment - if (prepareSampleData() != 0) { - if (g_fpOfInsertResult) - fclose(g_fpOfInsertResult); - return -1; - } - - double start; - double end; - - if (g_totalChildTables > 0) { - fprintf(stderr, - "creating %"PRId64" table(s) with %d thread(s)\n\n", - g_totalChildTables, g_Dbs.threadCountForCreateTbl); - if (g_fpOfInsertResult) { - fprintf(g_fpOfInsertResult, - "creating %"PRId64" table(s) with %d thread(s)\n\n", - g_totalChildTables, g_Dbs.threadCountForCreateTbl); - } - - // create child tables - start = taosGetTimestampMs(); - createChildTables(); - end = taosGetTimestampMs(); - - fprintf(stderr, - "\nSpent %.4f seconds to create %"PRId64" table(s) with %d thread(s), actual %"PRId64" table(s) created\n\n", - (end - start)/1000.0, g_totalChildTables, - g_Dbs.threadCountForCreateTbl, g_actualChildTables); - if (g_fpOfInsertResult) { - fprintf(g_fpOfInsertResult, - "\nSpent %.4f seconds to create %"PRId64" table(s) with %d thread(s), actual %"PRId64" table(s) created\n\n", - (end - start)/1000.0, g_totalChildTables, - g_Dbs.threadCountForCreateTbl, g_actualChildTables); - } - } - - // create sub threads for inserting data - //start = taosGetTimestampMs(); - for (int i = 0; i < g_Dbs.dbCount; i++) { - if (g_Dbs.use_metric) { - if (g_Dbs.db[i].superTblCount > 0) { - for (uint64_t j = 0; j < g_Dbs.db[i].superTblCount; j++) { - - SSuperTable* stbInfo = &g_Dbs.db[i].superTbls[j]; - - if (stbInfo && (stbInfo->insertRows > 0)) { - startMultiThreadInsertData( - g_Dbs.threadCount, - g_Dbs.db[i].dbName, - g_Dbs.db[i].dbCfg.precision, - stbInfo); - } - } - } - } else { - startMultiThreadInsertData( - g_Dbs.threadCount, - g_Dbs.db[i].dbName, - g_Dbs.db[i].dbCfg.precision, - NULL); - } - } - //end = taosGetTimestampMs(); - - //int64_t totalInsertRows = 0; - //int64_t totalAffectedRows = 0; - //for (int i = 0; i < g_Dbs.dbCount; i++) { - // for (int j = 0; j < g_Dbs.db[i].superTblCount; j++) { - // totalInsertRows+= g_Dbs.db[i].superTbls[j].totalInsertRows; - // totalAffectedRows += g_Dbs.db[i].superTbls[j].totalAffectedRows; - //} - //printf("Spent %.4f seconds to insert rows: %"PRId64", affected rows: %"PRId64" with %d thread(s)\n\n", end - start, totalInsertRows, totalAffectedRows, g_Dbs.threadCount); - - return 0; -} - -static void *specifiedTableQuery(void *sarg) { - threadInfo *pThreadInfo = (threadInfo *)sarg; - - setThreadName("specTableQuery"); - - if (pThreadInfo->taos == NULL) { - TAOS * taos = NULL; - taos = taos_connect(g_queryInfo.host, - g_queryInfo.user, - g_queryInfo.password, - NULL, - g_queryInfo.port); - if (taos == NULL) { - errorPrint2("[%d] Failed to connect to TDengine, reason:%s\n", - pThreadInfo->threadID, taos_errstr(NULL)); - return NULL; - } else { - pThreadInfo->taos = taos; - } - } - - char sqlStr[TSDB_DB_NAME_LEN + 5]; - sprintf(sqlStr, "use %s", g_queryInfo.dbName); - if (0 != queryDbExec(pThreadInfo->taos, sqlStr, NO_INSERT_TYPE, false)) { - taos_close(pThreadInfo->taos); - errorPrint("use database %s failed!\n\n", - g_queryInfo.dbName); - return NULL; - } - - uint64_t st = 0; - uint64_t et = 0; - - uint64_t queryTimes = g_queryInfo.specifiedQueryInfo.queryTimes; - - uint64_t totalQueried = 0; - uint64_t lastPrintTime = taosGetTimestampMs(); - uint64_t startTs = taosGetTimestampMs(); - - if (g_queryInfo.specifiedQueryInfo.result[pThreadInfo->querySeq][0] != '\0') { - sprintf(pThreadInfo->filePath, "%s-%d", - g_queryInfo.specifiedQueryInfo.result[pThreadInfo->querySeq], - pThreadInfo->threadID); - } - - while(queryTimes --) { - if (g_queryInfo.specifiedQueryInfo.queryInterval && (et - st) < - (int64_t)g_queryInfo.specifiedQueryInfo.queryInterval) { - taosMsleep(g_queryInfo.specifiedQueryInfo.queryInterval - (et - st)); // ms - } - - st = taosGetTimestampMs(); - - selectAndGetResult(pThreadInfo, - g_queryInfo.specifiedQueryInfo.sql[pThreadInfo->querySeq]); - - et = taosGetTimestampMs(); - printf("=thread[%"PRId64"] use %s complete one sql, Spent %10.3f s\n", - taosGetSelfPthreadId(), g_queryInfo.queryMode, (et - st)/1000.0); - - totalQueried ++; - g_queryInfo.specifiedQueryInfo.totalQueried ++; - - uint64_t currentPrintTime = taosGetTimestampMs(); - uint64_t endTs = taosGetTimestampMs(); - if (currentPrintTime - lastPrintTime > 30*1000) { - debugPrint("%s() LN%d, endTs=%"PRIu64" ms, startTs=%"PRIu64" ms\n", - __func__, __LINE__, endTs, startTs); - printf("thread[%d] has currently completed queries: %"PRIu64", QPS: %10.6f\n", - pThreadInfo->threadID, - totalQueried, - (double)(totalQueried/((endTs-startTs)/1000.0))); - lastPrintTime = currentPrintTime; - } - } - return NULL; -} - -static void replaceChildTblName(char* inSql, char* outSql, int tblIndex) { - char sourceString[32] = "xxxx"; - char subTblName[TSDB_TABLE_NAME_LEN]; - sprintf(subTblName, "%s.%s", - g_queryInfo.dbName, - g_queryInfo.superQueryInfo.childTblName + tblIndex*TSDB_TABLE_NAME_LEN); - - //printf("inSql: %s\n", inSql); - - char* pos = strstr(inSql, sourceString); - if (0 == pos) { - return; - } - - tstrncpy(outSql, inSql, pos - inSql + 1); - //printf("1: %s\n", outSql); - strncat(outSql, subTblName, BUFFER_SIZE - 1); - //printf("2: %s\n", outSql); - strncat(outSql, pos+strlen(sourceString), BUFFER_SIZE - 1); - //printf("3: %s\n", outSql); -} - -static void *superTableQuery(void *sarg) { - char *sqlstr = calloc(1, BUFFER_SIZE); - assert(sqlstr); - - threadInfo *pThreadInfo = (threadInfo *)sarg; - - setThreadName("superTableQuery"); - - if (pThreadInfo->taos == NULL) { - TAOS * taos = NULL; - taos = taos_connect(g_queryInfo.host, - g_queryInfo.user, - g_queryInfo.password, - NULL, - g_queryInfo.port); - if (taos == NULL) { - errorPrint("[%d] Failed to connect to TDengine, reason:%s\n", - pThreadInfo->threadID, taos_errstr(NULL)); - free(sqlstr); - return NULL; - } else { - pThreadInfo->taos = taos; - } - } - - uint64_t st = 0; - uint64_t et = (int64_t)g_queryInfo.superQueryInfo.queryInterval; - - uint64_t queryTimes = g_queryInfo.superQueryInfo.queryTimes; - uint64_t totalQueried = 0; - uint64_t startTs = taosGetTimestampMs(); - - uint64_t lastPrintTime = taosGetTimestampMs(); - while(queryTimes --) { - if (g_queryInfo.superQueryInfo.queryInterval - && (et - st) < (int64_t)g_queryInfo.superQueryInfo.queryInterval) { - taosMsleep(g_queryInfo.superQueryInfo.queryInterval - (et - st)); // ms - //printf("========sleep duration:%"PRId64 "========inserted rows:%d, table range:%d - %d\n", (1000 - (et - st)), i, pThreadInfo->start_table_from, pThreadInfo->end_table_to); - } - - st = taosGetTimestampMs(); - for (int i = pThreadInfo->start_table_from; i <= pThreadInfo->end_table_to; i++) { - for (int j = 0; j < g_queryInfo.superQueryInfo.sqlCount; j++) { - memset(sqlstr, 0, BUFFER_SIZE); - replaceChildTblName(g_queryInfo.superQueryInfo.sql[j], sqlstr, i); - if (g_queryInfo.superQueryInfo.result[j][0] != '\0') { - sprintf(pThreadInfo->filePath, "%s-%d", - g_queryInfo.superQueryInfo.result[j], - pThreadInfo->threadID); - } - selectAndGetResult(pThreadInfo, sqlstr); - - totalQueried++; - g_queryInfo.superQueryInfo.totalQueried ++; - - int64_t currentPrintTime = taosGetTimestampMs(); - int64_t endTs = taosGetTimestampMs(); - if (currentPrintTime - lastPrintTime > 30*1000) { - printf("thread[%d] has currently completed queries: %"PRIu64", QPS: %10.3f\n", - pThreadInfo->threadID, - totalQueried, - (double)(totalQueried/((endTs-startTs)/1000.0))); - lastPrintTime = currentPrintTime; - } - } - } - et = taosGetTimestampMs(); - printf("####thread[%"PRId64"] complete all sqls to allocate all sub-tables[%"PRIu64" - %"PRIu64"] once queries duration:%.4fs\n\n", - taosGetSelfPthreadId(), - pThreadInfo->start_table_from, - pThreadInfo->end_table_to, - (double)(et - st)/1000.0); - } - - free(sqlstr); - return NULL; -} - -static int queryTestProcess() { - - setupForAnsiEscape(); - printfQueryMeta(); - resetAfterAnsiEscape(); - - TAOS * taos = NULL; - taos = taos_connect(g_queryInfo.host, - g_queryInfo.user, - g_queryInfo.password, - NULL, - g_queryInfo.port); - if (taos == NULL) { - errorPrint("Failed to connect to TDengine, reason:%s\n", - taos_errstr(NULL)); - exit(EXIT_FAILURE); - } - - if (0 != g_queryInfo.superQueryInfo.sqlCount) { - getAllChildNameOfSuperTable(taos, - g_queryInfo.dbName, - g_queryInfo.superQueryInfo.stbName, - &g_queryInfo.superQueryInfo.childTblName, - &g_queryInfo.superQueryInfo.childTblCount); - } - - prompt(); - - if (g_args.debug_print || g_args.verbose_print) { - printfQuerySystemInfo(taos); - } - - if (0 == strncasecmp(g_queryInfo.queryMode, "rest", strlen("rest"))) { - if (convertHostToServAddr( - g_queryInfo.host, g_queryInfo.port, &g_queryInfo.serv_addr) != 0) - ERROR_EXIT("convert host to server address"); - } - - pthread_t *pids = NULL; - threadInfo *infos = NULL; - //==== create sub threads for query from specify table - int nConcurrent = g_queryInfo.specifiedQueryInfo.concurrent; - uint64_t nSqlCount = g_queryInfo.specifiedQueryInfo.sqlCount; - - uint64_t startTs = taosGetTimestampMs(); - - if ((nSqlCount > 0) && (nConcurrent > 0)) { - - pids = calloc(1, nConcurrent * nSqlCount * sizeof(pthread_t)); - infos = calloc(1, nConcurrent * nSqlCount * sizeof(threadInfo)); - - if ((NULL == pids) || (NULL == infos)) { - taos_close(taos); - ERROR_EXIT("memory allocation failed for create threads\n"); - } - - for (uint64_t i = 0; i < nSqlCount; i++) { - for (int j = 0; j < nConcurrent; j++) { - uint64_t seq = i * nConcurrent + j; - threadInfo *pThreadInfo = infos + seq; - pThreadInfo->threadID = seq; - pThreadInfo->querySeq = i; - - if (0 == strncasecmp(g_queryInfo.queryMode, "taosc", 5)) { - - char sqlStr[TSDB_DB_NAME_LEN + 5]; - sprintf(sqlStr, "USE %s", g_queryInfo.dbName); - if (0 != queryDbExec(taos, sqlStr, NO_INSERT_TYPE, false)) { - taos_close(taos); - free(infos); - free(pids); - errorPrint2("use database %s failed!\n\n", - g_queryInfo.dbName); - return -1; - } - } - - if (0 == strncasecmp(g_queryInfo.queryMode, "rest", 4)) { -#ifdef WINDOWS - WSADATA wsaData; - WSAStartup(MAKEWORD(2, 2), &wsaData); - SOCKET sockfd; -#else - int sockfd; -#endif - sockfd = socket(AF_INET, SOCK_STREAM, 0); - if (sockfd < 0) { -#ifdef WINDOWS - errorPrint( "Could not create socket : %d" , WSAGetLastError()); -#endif - debugPrint("%s() LN%d, sockfd=%d\n", __func__, __LINE__, sockfd); - ERROR_EXIT("opening socket"); - } - - int retConn = connect(sockfd, (struct sockaddr *)&(g_queryInfo.serv_addr), - sizeof(struct sockaddr)); - debugPrint("%s() LN%d connect() return %d\n", __func__, __LINE__, retConn); - if (retConn < 0) { - ERROR_EXIT("connecting"); - } - pThreadInfo->sockfd = sockfd; - } - pThreadInfo->taos = NULL;// workaround to use separate taos connection; - - pthread_create(pids + seq, NULL, specifiedTableQuery, - pThreadInfo); - } - } - } else { - g_queryInfo.specifiedQueryInfo.concurrent = 0; - } - - taos_close(taos); - - pthread_t *pidsOfSub = NULL; - threadInfo *infosOfSub = NULL; - //==== create sub threads for query from all sub table of the super table - if ((g_queryInfo.superQueryInfo.sqlCount > 0) - && (g_queryInfo.superQueryInfo.threadCnt > 0)) { - pidsOfSub = calloc(1, g_queryInfo.superQueryInfo.threadCnt * sizeof(pthread_t)); - infosOfSub = calloc(1, g_queryInfo.superQueryInfo.threadCnt * sizeof(threadInfo)); - - if ((NULL == pidsOfSub) || (NULL == infosOfSub)) { - free(infos); - free(pids); - - ERROR_EXIT("memory allocation failed for create threads\n"); - } - - int64_t ntables = g_queryInfo.superQueryInfo.childTblCount; - int threads = g_queryInfo.superQueryInfo.threadCnt; - - int64_t a = ntables / threads; - if (a < 1) { - threads = ntables; - a = 1; - } - - int64_t b = 0; - if (threads != 0) { - b = ntables % threads; - } - - uint64_t tableFrom = 0; - for (int i = 0; i < threads; i++) { - threadInfo *pThreadInfo = infosOfSub + i; - pThreadInfo->threadID = i; - - pThreadInfo->start_table_from = tableFrom; - pThreadInfo->ntables = iend_table_to = i < b ? tableFrom + a : tableFrom + a - 1; - tableFrom = pThreadInfo->end_table_to + 1; - pThreadInfo->taos = NULL; // workaround to use separate taos connection; - if (0 == strncasecmp(g_queryInfo.queryMode, "rest", 4)) { -#ifdef WINDOWS - WSADATA wsaData; - WSAStartup(MAKEWORD(2, 2), &wsaData); - SOCKET sockfd; -#else - int sockfd; -#endif - sockfd = socket(AF_INET, SOCK_STREAM, 0); - if (sockfd < 0) { -#ifdef WINDOWS - errorPrint( "Could not create socket : %d" , WSAGetLastError()); -#endif - debugPrint("%s() LN%d, sockfd=%d\n", __func__, __LINE__, sockfd); - ERROR_EXIT("opening socket"); - } - - int retConn = connect(sockfd, (struct sockaddr *)&(g_queryInfo.serv_addr), - sizeof(struct sockaddr)); - debugPrint("%s() LN%d connect() return %d\n", __func__, __LINE__, retConn); - if (retConn < 0) { - ERROR_EXIT("connecting"); - } - pThreadInfo->sockfd = sockfd; - } - pthread_create(pidsOfSub + i, NULL, superTableQuery, pThreadInfo); - } - - g_queryInfo.superQueryInfo.threadCnt = threads; - } else { - g_queryInfo.superQueryInfo.threadCnt = 0; - } - - if ((nSqlCount > 0) && (nConcurrent > 0)) { - for (int i = 0; i < nConcurrent; i++) { - for (int j = 0; j < nSqlCount; j++) { - pthread_join(pids[i * nSqlCount + j], NULL); - if (0 == strncasecmp(g_queryInfo.queryMode, "rest", 4)) { - threadInfo *pThreadInfo = infos + i * nSqlCount + j; -#ifdef WINDOWS - closesocket(pThreadInfo->sockfd); - WSACleanup(); -#else - close(pThreadInfo->sockfd); -#endif - } - } - } - } - - tmfree((char*)pids); - tmfree((char*)infos); - - for (int i = 0; i < g_queryInfo.superQueryInfo.threadCnt; i++) { - pthread_join(pidsOfSub[i], NULL); - if (0 == strncasecmp(g_queryInfo.queryMode, "rest", 4)) { - threadInfo *pThreadInfo = infosOfSub + i; -#ifdef WINDOWS - closesocket(pThreadInfo->sockfd); - WSACleanup(); -#else - close(pThreadInfo->sockfd); -#endif - } - } - - tmfree((char*)pidsOfSub); - tmfree((char*)infosOfSub); - - // taos_close(taos);// workaround to use separate taos connection; - uint64_t endTs = taosGetTimestampMs(); - - uint64_t totalQueried = g_queryInfo.specifiedQueryInfo.totalQueried + - g_queryInfo.superQueryInfo.totalQueried; - - fprintf(stderr, "==== completed total queries: %"PRIu64", the QPS of all threads: %10.3f====\n", - totalQueried, - (double)(totalQueried/((endTs-startTs)/1000.0))); - return 0; -} - -static void stable_sub_callback( - TAOS_SUB* tsub, TAOS_RES *res, void* param, int code) { - if (res == NULL || taos_errno(res) != 0) { - errorPrint2("%s() LN%d, failed to subscribe result, code:%d, reason:%s\n", - __func__, __LINE__, code, taos_errstr(res)); - return; - } - - if (param) - fetchResult(res, (threadInfo *)param); - // tao_unsubscribe() will free result. -} - -static void specified_sub_callback( - TAOS_SUB* tsub, TAOS_RES *res, void* param, int code) { - if (res == NULL || taos_errno(res) != 0) { - errorPrint2("%s() LN%d, failed to subscribe result, code:%d, reason:%s\n", - __func__, __LINE__, code, taos_errstr(res)); - return; - } - - if (param) - fetchResult(res, (threadInfo *)param); - // tao_unsubscribe() will free result. -} - -static TAOS_SUB* subscribeImpl( - QUERY_CLASS class, - threadInfo *pThreadInfo, - char *sql, char* topic, bool restart, uint64_t interval) -{ - TAOS_SUB* tsub = NULL; - - if ((SPECIFIED_CLASS == class) - && (ASYNC_MODE == g_queryInfo.specifiedQueryInfo.asyncMode)) { - tsub = taos_subscribe( - pThreadInfo->taos, - restart, - topic, sql, specified_sub_callback, (void*)pThreadInfo, - g_queryInfo.specifiedQueryInfo.subscribeInterval); - } else if ((STABLE_CLASS == class) - && (ASYNC_MODE == g_queryInfo.superQueryInfo.asyncMode)) { - tsub = taos_subscribe( - pThreadInfo->taos, - restart, - topic, sql, stable_sub_callback, (void*)pThreadInfo, - g_queryInfo.superQueryInfo.subscribeInterval); - } else { - tsub = taos_subscribe( - pThreadInfo->taos, - restart, - topic, sql, NULL, NULL, interval); - } - - if (tsub == NULL) { - errorPrint2("failed to create subscription. topic:%s, sql:%s\n", topic, sql); - return NULL; - } - - return tsub; -} - -static void *superSubscribe(void *sarg) { - threadInfo *pThreadInfo = (threadInfo *)sarg; - char *subSqlStr = calloc(1, BUFFER_SIZE); - assert(subSqlStr); - - TAOS_SUB* tsub[MAX_QUERY_SQL_COUNT] = {0}; - uint64_t tsubSeq; - - setThreadName("superSub"); - - if (pThreadInfo->ntables > MAX_QUERY_SQL_COUNT) { - free(subSqlStr); - errorPrint("The table number(%"PRId64") of the thread is more than max query sql count: %d\n", - pThreadInfo->ntables, MAX_QUERY_SQL_COUNT); - exit(EXIT_FAILURE); - } - - if (pThreadInfo->taos == NULL) { - pThreadInfo->taos = taos_connect(g_queryInfo.host, - g_queryInfo.user, - g_queryInfo.password, - g_queryInfo.dbName, - g_queryInfo.port); - if (pThreadInfo->taos == NULL) { - errorPrint2("[%d] Failed to connect to TDengine, reason:%s\n", - pThreadInfo->threadID, taos_errstr(NULL)); - free(subSqlStr); - return NULL; - } - } - - char sqlStr[TSDB_DB_NAME_LEN + 5]; - sprintf(sqlStr, "USE %s", g_queryInfo.dbName); - if (0 != queryDbExec(pThreadInfo->taos, sqlStr, NO_INSERT_TYPE, false)) { - taos_close(pThreadInfo->taos); - errorPrint2("use database %s failed!\n\n", - g_queryInfo.dbName); - free(subSqlStr); - return NULL; - } - - char topic[32] = {0}; - for (uint64_t i = pThreadInfo->start_table_from; - i <= pThreadInfo->end_table_to; i++) { - tsubSeq = i - pThreadInfo->start_table_from; - verbosePrint("%s() LN%d, [%d], start=%"PRId64" end=%"PRId64" i=%"PRIu64"\n", - __func__, __LINE__, - pThreadInfo->threadID, - pThreadInfo->start_table_from, - pThreadInfo->end_table_to, i); - sprintf(topic, "taosdemo-subscribe-%"PRIu64"-%"PRIu64"", - i, pThreadInfo->querySeq); - memset(subSqlStr, 0, BUFFER_SIZE); - replaceChildTblName( - g_queryInfo.superQueryInfo.sql[pThreadInfo->querySeq], - subSqlStr, i); - if (g_queryInfo.superQueryInfo.result[pThreadInfo->querySeq][0] != 0) { - sprintf(pThreadInfo->filePath, "%s-%d", - g_queryInfo.superQueryInfo.result[pThreadInfo->querySeq], - pThreadInfo->threadID); - } - - verbosePrint("%s() LN%d, [%d] subSqlStr: %s\n", - __func__, __LINE__, pThreadInfo->threadID, subSqlStr); - tsub[tsubSeq] = subscribeImpl( - STABLE_CLASS, - pThreadInfo, subSqlStr, topic, - g_queryInfo.superQueryInfo.subscribeRestart, - g_queryInfo.superQueryInfo.subscribeInterval); - if (NULL == tsub[tsubSeq]) { - taos_close(pThreadInfo->taos); - free(subSqlStr); - return NULL; - } - } - - // start loop to consume result - int consumed[MAX_QUERY_SQL_COUNT]; - for (int i = 0; i < MAX_QUERY_SQL_COUNT; i++) { - consumed[i] = 0; - } - TAOS_RES* res = NULL; - - uint64_t st = 0, et = 0; - - while ((g_queryInfo.superQueryInfo.endAfterConsume == -1) - || (g_queryInfo.superQueryInfo.endAfterConsume > - consumed[pThreadInfo->end_table_to - - pThreadInfo->start_table_from])) { - - verbosePrint("super endAfterConsume: %d, consumed: %d\n", - g_queryInfo.superQueryInfo.endAfterConsume, - consumed[pThreadInfo->end_table_to - - pThreadInfo->start_table_from]); - for (uint64_t i = pThreadInfo->start_table_from; - i <= pThreadInfo->end_table_to; i++) { - tsubSeq = i - pThreadInfo->start_table_from; - if (ASYNC_MODE == g_queryInfo.superQueryInfo.asyncMode) { - continue; - } - - st = taosGetTimestampMs(); - performancePrint("st: %"PRIu64" et: %"PRIu64" st-et: %"PRIu64"\n", st, et, (st - et)); - res = taos_consume(tsub[tsubSeq]); - et = taosGetTimestampMs(); - performancePrint("st: %"PRIu64" et: %"PRIu64" delta: %"PRIu64"\n", st, et, (et - st)); - - if (res) { - if (g_queryInfo.superQueryInfo.result[pThreadInfo->querySeq][0] != 0) { - sprintf(pThreadInfo->filePath, "%s-%d", - g_queryInfo.superQueryInfo.result[pThreadInfo->querySeq], - pThreadInfo->threadID); - fetchResult(res, pThreadInfo); - } - consumed[tsubSeq] ++; - - if ((g_queryInfo.superQueryInfo.resubAfterConsume != -1) - && (consumed[tsubSeq] >= - g_queryInfo.superQueryInfo.resubAfterConsume)) { - verbosePrint("%s() LN%d, keepProgress:%d, resub super table query: %"PRIu64"\n", - __func__, __LINE__, - g_queryInfo.superQueryInfo.subscribeKeepProgress, - pThreadInfo->querySeq); - taos_unsubscribe(tsub[tsubSeq], - g_queryInfo.superQueryInfo.subscribeKeepProgress); - consumed[tsubSeq]= 0; - tsub[tsubSeq] = subscribeImpl( - STABLE_CLASS, - pThreadInfo, subSqlStr, topic, - g_queryInfo.superQueryInfo.subscribeRestart, - g_queryInfo.superQueryInfo.subscribeInterval - ); - if (NULL == tsub[tsubSeq]) { - taos_close(pThreadInfo->taos); - free(subSqlStr); - return NULL; - } - } - } - } - } - verbosePrint("%s() LN%d, super endAfterConsume: %d, consumed: %d\n", - __func__, __LINE__, - g_queryInfo.superQueryInfo.endAfterConsume, - consumed[pThreadInfo->end_table_to - pThreadInfo->start_table_from]); - taos_free_result(res); - - for (uint64_t i = pThreadInfo->start_table_from; - i <= pThreadInfo->end_table_to; i++) { - tsubSeq = i - pThreadInfo->start_table_from; - taos_unsubscribe(tsub[tsubSeq], 0); - } - - taos_close(pThreadInfo->taos); - free(subSqlStr); - return NULL; -} - -static void *specifiedSubscribe(void *sarg) { - threadInfo *pThreadInfo = (threadInfo *)sarg; - // TAOS_SUB* tsub = NULL; - - setThreadName("specSub"); - - if (pThreadInfo->taos == NULL) { - pThreadInfo->taos = taos_connect(g_queryInfo.host, - g_queryInfo.user, - g_queryInfo.password, - g_queryInfo.dbName, - g_queryInfo.port); - if (pThreadInfo->taos == NULL) { - errorPrint2("[%d] Failed to connect to TDengine, reason:%s\n", - pThreadInfo->threadID, taos_errstr(NULL)); - return NULL; - } - } - - char sqlStr[TSDB_DB_NAME_LEN + 5]; - sprintf(sqlStr, "USE %s", g_queryInfo.dbName); - if (0 != queryDbExec(pThreadInfo->taos, sqlStr, NO_INSERT_TYPE, false)) { - taos_close(pThreadInfo->taos); - return NULL; - } - - sprintf(g_queryInfo.specifiedQueryInfo.topic[pThreadInfo->threadID], - "taosdemo-subscribe-%"PRIu64"-%d", - pThreadInfo->querySeq, - pThreadInfo->threadID); - if (g_queryInfo.specifiedQueryInfo.result[pThreadInfo->querySeq][0] != '\0') { - sprintf(pThreadInfo->filePath, "%s-%d", - g_queryInfo.specifiedQueryInfo.result[pThreadInfo->querySeq], - pThreadInfo->threadID); - } - g_queryInfo.specifiedQueryInfo.tsub[pThreadInfo->threadID] = subscribeImpl( - SPECIFIED_CLASS, pThreadInfo, - g_queryInfo.specifiedQueryInfo.sql[pThreadInfo->querySeq], - g_queryInfo.specifiedQueryInfo.topic[pThreadInfo->threadID], - g_queryInfo.specifiedQueryInfo.subscribeRestart, - g_queryInfo.specifiedQueryInfo.subscribeInterval); - if (NULL == g_queryInfo.specifiedQueryInfo.tsub[pThreadInfo->threadID]) { - taos_close(pThreadInfo->taos); - return NULL; - } - - // start loop to consume result - - g_queryInfo.specifiedQueryInfo.consumed[pThreadInfo->threadID] = 0; - while((g_queryInfo.specifiedQueryInfo.endAfterConsume[pThreadInfo->querySeq] == -1) - || (g_queryInfo.specifiedQueryInfo.consumed[pThreadInfo->threadID] < - g_queryInfo.specifiedQueryInfo.endAfterConsume[pThreadInfo->querySeq])) { - - printf("consumed[%d]: %d, endAfterConsum[%"PRId64"]: %d\n", - pThreadInfo->threadID, - g_queryInfo.specifiedQueryInfo.consumed[pThreadInfo->threadID], - pThreadInfo->querySeq, - g_queryInfo.specifiedQueryInfo.endAfterConsume[pThreadInfo->querySeq]); - if (ASYNC_MODE == g_queryInfo.specifiedQueryInfo.asyncMode) { - continue; - } - - g_queryInfo.specifiedQueryInfo.res[pThreadInfo->threadID] = taos_consume( - g_queryInfo.specifiedQueryInfo.tsub[pThreadInfo->threadID]); - if (g_queryInfo.specifiedQueryInfo.res[pThreadInfo->threadID]) { - if (g_queryInfo.specifiedQueryInfo.result[pThreadInfo->querySeq][0] - != 0) { - sprintf(pThreadInfo->filePath, "%s-%d", - g_queryInfo.specifiedQueryInfo.result[pThreadInfo->querySeq], - pThreadInfo->threadID); - } - fetchResult( - g_queryInfo.specifiedQueryInfo.res[pThreadInfo->threadID], - pThreadInfo); - - g_queryInfo.specifiedQueryInfo.consumed[pThreadInfo->threadID] ++; - if ((g_queryInfo.specifiedQueryInfo.resubAfterConsume[pThreadInfo->querySeq] != -1) - && (g_queryInfo.specifiedQueryInfo.consumed[pThreadInfo->threadID] >= - g_queryInfo.specifiedQueryInfo.resubAfterConsume[pThreadInfo->querySeq])) { - printf("keepProgress:%d, resub specified query: %"PRIu64"\n", - g_queryInfo.specifiedQueryInfo.subscribeKeepProgress, - pThreadInfo->querySeq); - g_queryInfo.specifiedQueryInfo.consumed[pThreadInfo->threadID] = 0; - taos_unsubscribe(g_queryInfo.specifiedQueryInfo.tsub[pThreadInfo->threadID], - g_queryInfo.specifiedQueryInfo.subscribeKeepProgress); - g_queryInfo.specifiedQueryInfo.tsub[pThreadInfo->threadID] = - subscribeImpl( - SPECIFIED_CLASS, - pThreadInfo, - g_queryInfo.specifiedQueryInfo.sql[pThreadInfo->querySeq], - g_queryInfo.specifiedQueryInfo.topic[pThreadInfo->threadID], - g_queryInfo.specifiedQueryInfo.subscribeRestart, - g_queryInfo.specifiedQueryInfo.subscribeInterval); - if (NULL == g_queryInfo.specifiedQueryInfo.tsub[pThreadInfo->threadID]) { - taos_close(pThreadInfo->taos); - return NULL; - } - } - } - } - taos_free_result(g_queryInfo.specifiedQueryInfo.res[pThreadInfo->threadID]); - taos_close(pThreadInfo->taos); - - return NULL; -} - -static int subscribeTestProcess() { - setupForAnsiEscape(); - printfQueryMeta(); - resetAfterAnsiEscape(); - - prompt(); - - TAOS * taos = NULL; - taos = taos_connect(g_queryInfo.host, - g_queryInfo.user, - g_queryInfo.password, - g_queryInfo.dbName, - g_queryInfo.port); - if (taos == NULL) { - errorPrint2("Failed to connect to TDengine, reason:%s\n", - taos_errstr(NULL)); - exit(EXIT_FAILURE); - } - - if (0 != g_queryInfo.superQueryInfo.sqlCount) { - getAllChildNameOfSuperTable(taos, - g_queryInfo.dbName, - g_queryInfo.superQueryInfo.stbName, - &g_queryInfo.superQueryInfo.childTblName, - &g_queryInfo.superQueryInfo.childTblCount); - } - - taos_close(taos); // workaround to use separate taos connection; - - pthread_t *pids = NULL; - threadInfo *infos = NULL; - - pthread_t *pidsOfStable = NULL; - threadInfo *infosOfStable = NULL; - - //==== create threads for query for specified table - if (g_queryInfo.specifiedQueryInfo.sqlCount <= 0) { - debugPrint("%s() LN%d, specified query sqlCount %d.\n", - __func__, __LINE__, - g_queryInfo.specifiedQueryInfo.sqlCount); - } else { - if (g_queryInfo.specifiedQueryInfo.concurrent <= 0) { - errorPrint2("%s() LN%d, specified query sqlCount %d.\n", - __func__, __LINE__, - g_queryInfo.specifiedQueryInfo.sqlCount); - exit(EXIT_FAILURE); - } - - pids = calloc( - 1, - g_queryInfo.specifiedQueryInfo.sqlCount * - g_queryInfo.specifiedQueryInfo.concurrent * - sizeof(pthread_t)); - infos = calloc( - 1, - g_queryInfo.specifiedQueryInfo.sqlCount * - g_queryInfo.specifiedQueryInfo.concurrent * - sizeof(threadInfo)); - if ((NULL == pids) || (NULL == infos)) { - errorPrint2("%s() LN%d, malloc failed for create threads\n", __func__, __LINE__); - exit(EXIT_FAILURE); - } - - for (int i = 0; i < g_queryInfo.specifiedQueryInfo.sqlCount; i++) { - for (int j = 0; j < g_queryInfo.specifiedQueryInfo.concurrent; j++) { - uint64_t seq = i * g_queryInfo.specifiedQueryInfo.concurrent + j; - threadInfo *pThreadInfo = infos + seq; - pThreadInfo->threadID = seq; - pThreadInfo->querySeq = i; - pThreadInfo->taos = NULL; // workaround to use separate taos connection; - pthread_create(pids + seq, NULL, specifiedSubscribe, pThreadInfo); - } - } - } - - //==== create threads for super table query - if (g_queryInfo.superQueryInfo.sqlCount <= 0) { - debugPrint("%s() LN%d, super table query sqlCount %d.\n", - __func__, __LINE__, - g_queryInfo.superQueryInfo.sqlCount); - } else { - if ((g_queryInfo.superQueryInfo.sqlCount > 0) - && (g_queryInfo.superQueryInfo.threadCnt > 0)) { - pidsOfStable = calloc( - 1, - g_queryInfo.superQueryInfo.sqlCount * - g_queryInfo.superQueryInfo.threadCnt * - sizeof(pthread_t)); - infosOfStable = calloc( - 1, - g_queryInfo.superQueryInfo.sqlCount * - g_queryInfo.superQueryInfo.threadCnt * - sizeof(threadInfo)); - if ((NULL == pidsOfStable) || (NULL == infosOfStable)) { - errorPrint2("%s() LN%d, malloc failed for create threads\n", - __func__, __LINE__); - // taos_close(taos); - exit(EXIT_FAILURE); - } - - int64_t ntables = g_queryInfo.superQueryInfo.childTblCount; - int threads = g_queryInfo.superQueryInfo.threadCnt; - - int64_t a = ntables / threads; - if (a < 1) { - threads = ntables; - a = 1; - } - - int64_t b = 0; - if (threads != 0) { - b = ntables % threads; - } - - for (uint64_t i = 0; i < g_queryInfo.superQueryInfo.sqlCount; i++) { - uint64_t tableFrom = 0; - for (int j = 0; j < threads; j++) { - uint64_t seq = i * threads + j; - threadInfo *pThreadInfo = infosOfStable + seq; - pThreadInfo->threadID = seq; - pThreadInfo->querySeq = i; - - pThreadInfo->start_table_from = tableFrom; - pThreadInfo->ntables = jend_table_to = jend_table_to + 1; - pThreadInfo->taos = NULL; // workaround to use separate taos connection; - pthread_create(pidsOfStable + seq, - NULL, superSubscribe, pThreadInfo); - } - } - - g_queryInfo.superQueryInfo.threadCnt = threads; - - for (int i = 0; i < g_queryInfo.superQueryInfo.sqlCount; i++) { - for (int j = 0; j < threads; j++) { - uint64_t seq = i * threads + j; - pthread_join(pidsOfStable[seq], NULL); - } - } - } - } - - for (int i = 0; i < g_queryInfo.specifiedQueryInfo.sqlCount; i++) { - for (int j = 0; j < g_queryInfo.specifiedQueryInfo.concurrent; j++) { - uint64_t seq = i * g_queryInfo.specifiedQueryInfo.concurrent + j; - pthread_join(pids[seq], NULL); - } - } - - tmfree((char*)pids); - tmfree((char*)infos); - - tmfree((char*)pidsOfStable); - tmfree((char*)infosOfStable); - // taos_close(taos); - return 0; -} - -static void setParaFromArg() { - char type[20]; - char length[20]; - if (g_args.host) { - tstrncpy(g_Dbs.host, g_args.host, MAX_HOSTNAME_SIZE); - } else { - tstrncpy(g_Dbs.host, "127.0.0.1", MAX_HOSTNAME_SIZE); - } - - if (g_args.user) { - tstrncpy(g_Dbs.user, g_args.user, MAX_USERNAME_SIZE); - } - - tstrncpy(g_Dbs.password, g_args.password, SHELL_MAX_PASSWORD_LEN); - - if (g_args.port) { - g_Dbs.port = g_args.port; - } - - g_Dbs.threadCount = g_args.nthreads; - g_Dbs.threadCountForCreateTbl = g_args.nthreads; - - g_Dbs.dbCount = 1; - g_Dbs.db[0].drop = true; - - tstrncpy(g_Dbs.db[0].dbName, g_args.database, TSDB_DB_NAME_LEN); - g_Dbs.db[0].dbCfg.replica = g_args.replica; - tstrncpy(g_Dbs.db[0].dbCfg.precision, "ms", SMALL_BUFF_LEN); - - tstrncpy(g_Dbs.resultFile, g_args.output_file, MAX_FILE_NAME_LEN); - - g_Dbs.use_metric = g_args.use_metric; - g_args.prepared_rand = min(g_args.insertRows, MAX_PREPARED_RAND); - g_Dbs.aggr_func = g_args.aggr_func; - - char dataString[TSDB_MAX_BYTES_PER_ROW]; - char *data_type = g_args.data_type; - char **dataType = g_args.dataType; - - memset(dataString, 0, TSDB_MAX_BYTES_PER_ROW); - - if ((data_type[0] == TSDB_DATA_TYPE_BINARY) - || (data_type[0] == TSDB_DATA_TYPE_BOOL) - || (data_type[0] == TSDB_DATA_TYPE_NCHAR)) { - g_Dbs.aggr_func = false; - } - - if (g_args.use_metric) { - g_Dbs.db[0].superTblCount = 1; - tstrncpy(g_Dbs.db[0].superTbls[0].stbName, "meters", TSDB_TABLE_NAME_LEN); - g_Dbs.db[0].superTbls[0].childTblCount = g_args.ntables; - g_Dbs.threadCount = g_args.nthreads; - g_Dbs.threadCountForCreateTbl = g_args.nthreads; - g_Dbs.asyncMode = g_args.async_mode; - - g_Dbs.db[0].superTbls[0].autoCreateTable = PRE_CREATE_SUBTBL; - g_Dbs.db[0].superTbls[0].childTblExists = TBL_NO_EXISTS; - g_Dbs.db[0].superTbls[0].disorderRange = g_args.disorderRange; - g_Dbs.db[0].superTbls[0].disorderRatio = g_args.disorderRatio; - tstrncpy(g_Dbs.db[0].superTbls[0].childTblPrefix, - g_args.tb_prefix, TBNAME_PREFIX_LEN); - tstrncpy(g_Dbs.db[0].superTbls[0].dataSource, "rand", SMALL_BUFF_LEN); - - if (g_args.iface == INTERFACE_BUT) { - g_Dbs.db[0].superTbls[0].iface = TAOSC_IFACE; - } else { - g_Dbs.db[0].superTbls[0].iface = g_args.iface; - } - tstrncpy(g_Dbs.db[0].superTbls[0].startTimestamp, - "2017-07-14 10:40:00.000", MAX_TB_NAME_SIZE); - g_Dbs.db[0].superTbls[0].timeStampStep = g_args.timestamp_step; - - g_Dbs.db[0].superTbls[0].insertRows = g_args.insertRows; - g_Dbs.db[0].superTbls[0].maxSqlLen = g_args.max_sql_len; - - g_Dbs.db[0].superTbls[0].columnCount = 0; - for (int i = 0; i < MAX_NUM_COLUMNS; i++) { - if (data_type[i] == TSDB_DATA_TYPE_NULL) { - break; - } - - g_Dbs.db[0].superTbls[0].columns[i].data_type = data_type[i]; - tstrncpy(g_Dbs.db[0].superTbls[0].columns[i].dataType, - dataType[i], min(DATATYPE_BUFF_LEN, strlen(dataType[i]) + 1)); - if (1 == regexMatch(dataType[i], "^(NCHAR|BINARY)(\\([1-9][0-9]*\\))$", REG_ICASE | - REG_EXTENDED)) { - sscanf(dataType[i], "%[^(](%[^)]", type, length); - g_Dbs.db[0].superTbls[0].columns[i].dataLen = atoi(length); - tstrncpy(g_Dbs.db[0].superTbls[0].columns[i].dataType, - type, min(DATATYPE_BUFF_LEN, strlen(type) + 1)); - } else { - g_Dbs.db[0].superTbls[0].columns[i].dataLen = g_args.binwidth; - tstrncpy(g_Dbs.db[0].superTbls[0].columns[i].dataType, - dataType[i], min(DATATYPE_BUFF_LEN, strlen(dataType[i]) + 1)); - } - g_Dbs.db[0].superTbls[0].columnCount++; - } - - if (g_Dbs.db[0].superTbls[0].columnCount > g_args.columnCount) { - g_Dbs.db[0].superTbls[0].columnCount = g_args.columnCount; - } else { - for (int i = g_Dbs.db[0].superTbls[0].columnCount; - i < g_args.columnCount; i++) { - g_Dbs.db[0].superTbls[0].columns[i].data_type = TSDB_DATA_TYPE_INT; - tstrncpy(g_Dbs.db[0].superTbls[0].columns[i].dataType, - "INT", min(DATATYPE_BUFF_LEN, strlen("INT") + 1)); - g_Dbs.db[0].superTbls[0].columns[i].dataLen = 0; - g_Dbs.db[0].superTbls[0].columnCount++; - } - } - - tstrncpy(g_Dbs.db[0].superTbls[0].tags[0].dataType, - "INT", min(DATATYPE_BUFF_LEN, strlen("INT") + 1)); - g_Dbs.db[0].superTbls[0].tags[0].dataLen = 0; - - tstrncpy(g_Dbs.db[0].superTbls[0].tags[1].dataType, - "BINARY", min(DATATYPE_BUFF_LEN, strlen("BINARY") + 1)); - g_Dbs.db[0].superTbls[0].tags[1].dataLen = g_args.binwidth; - g_Dbs.db[0].superTbls[0].tagCount = 2; - } else { - g_Dbs.threadCountForCreateTbl = g_args.nthreads; - g_Dbs.db[0].superTbls[0].tagCount = 0; - } -} - -/* Function to do regular expression check */ -static int regexMatch(const char *s, const char *reg, int cflags) { - regex_t regex; - char msgbuf[100] = {0}; - - /* Compile regular expression */ - if (regcomp(®ex, reg, cflags) != 0) { - ERROR_EXIT("Fail to compile regex\n"); - } - - /* Execute regular expression */ - int reti = regexec(®ex, s, 0, NULL, 0); - if (!reti) { - regfree(®ex); - return 1; - } else if (reti == REG_NOMATCH) { - regfree(®ex); - return 0; - } else { - regerror(reti, ®ex, msgbuf, sizeof(msgbuf)); - regfree(®ex); - printf("Regex match failed: %s\n", msgbuf); - exit(EXIT_FAILURE); - } - return 0; -} - -static int isCommentLine(char *line) { - if (line == NULL) return 1; - - return regexMatch(line, "^\\s*#.*", REG_EXTENDED); -} - -static void querySqlFile(TAOS* taos, char* sqlFile) -{ - FILE *fp = fopen(sqlFile, "r"); - if (fp == NULL) { - printf("failed to open file %s, reason:%s\n", sqlFile, strerror(errno)); - return; - } - - int read_len = 0; - char * cmd = calloc(1, TSDB_MAX_BYTES_PER_ROW); - size_t cmd_len = 0; - char * line = NULL; - size_t line_len = 0; - - double t = taosGetTimestampMs(); - - while((read_len = tgetline(&line, &line_len, fp)) != -1) { - if (read_len >= TSDB_MAX_BYTES_PER_ROW) continue; - line[--read_len] = '\0'; - - if (read_len == 0 || isCommentLine(line)) { // line starts with # - continue; - } - - if (line[read_len - 1] == '\\') { - line[read_len - 1] = ' '; - memcpy(cmd + cmd_len, line, read_len); - cmd_len += read_len; - continue; - } - - memcpy(cmd + cmd_len, line, read_len); - if (0 != queryDbExec(taos, cmd, NO_INSERT_TYPE, false)) { - errorPrint2("%s() LN%d, queryDbExec %s failed!\n", - __func__, __LINE__, cmd); - tmfree(cmd); - tmfree(line); - tmfclose(fp); - return; - } - memset(cmd, 0, TSDB_MAX_BYTES_PER_ROW); - cmd_len = 0; - } - - t = taosGetTimestampMs() - t; - printf("run %s took %.6f second(s)\n\n", sqlFile, t); - - tmfree(cmd); - tmfree(line); - tmfclose(fp); - return; -} - -static void testMetaFile() { - if (INSERT_TEST == g_args.test_mode) { - if (g_Dbs.cfgDir[0]) - taos_options(TSDB_OPTION_CONFIGDIR, g_Dbs.cfgDir); - - insertTestProcess(); - - } else if (QUERY_TEST == g_args.test_mode) { - if (g_queryInfo.cfgDir[0]) - taos_options(TSDB_OPTION_CONFIGDIR, g_queryInfo.cfgDir); - - queryTestProcess(); - - } else if (SUBSCRIBE_TEST == g_args.test_mode) { - if (g_queryInfo.cfgDir[0]) - taos_options(TSDB_OPTION_CONFIGDIR, g_queryInfo.cfgDir); - - subscribeTestProcess(); - - } else { - ; - } -} - -static void queryAggrFunc() { - // query data - - pthread_t read_id; - threadInfo *pThreadInfo = calloc(1, sizeof(threadInfo)); - assert(pThreadInfo); - pThreadInfo->start_time = DEFAULT_START_TIME; // 2017-07-14 10:40:00.000 - pThreadInfo->start_table_from = 0; - - if (g_args.use_metric) { - pThreadInfo->ntables = g_Dbs.db[0].superTbls[0].childTblCount; - pThreadInfo->end_table_to = g_Dbs.db[0].superTbls[0].childTblCount - 1; - pThreadInfo->stbInfo = &g_Dbs.db[0].superTbls[0]; - tstrncpy(pThreadInfo->tb_prefix, - g_Dbs.db[0].superTbls[0].childTblPrefix, TBNAME_PREFIX_LEN); - } else { - pThreadInfo->ntables = g_args.ntables; - pThreadInfo->end_table_to = g_args.ntables -1; - tstrncpy(pThreadInfo->tb_prefix, g_args.tb_prefix, TSDB_TABLE_NAME_LEN); - } - - pThreadInfo->taos = taos_connect( - g_Dbs.host, - g_Dbs.user, - g_Dbs.password, - g_Dbs.db[0].dbName, - g_Dbs.port); - if (pThreadInfo->taos == NULL) { - free(pThreadInfo); - errorPrint2("Failed to connect to TDengine, reason:%s\n", - taos_errstr(NULL)); - exit(EXIT_FAILURE); - } - - tstrncpy(pThreadInfo->filePath, g_Dbs.resultFile, MAX_FILE_NAME_LEN); - - if (!g_Dbs.use_metric) { - pthread_create(&read_id, NULL, queryNtableAggrFunc, pThreadInfo); - } else { - pthread_create(&read_id, NULL, queryStableAggrFunc, pThreadInfo); - } - pthread_join(read_id, NULL); - taos_close(pThreadInfo->taos); - free(pThreadInfo); -} - -static void testCmdLine() { - - if (strlen(configDir)) { - wordexp_t full_path; - if (wordexp(configDir, &full_path, 0) != 0) { - errorPrint("Invalid path %s\n", configDir); - return; - } - taos_options(TSDB_OPTION_CONFIGDIR, full_path.we_wordv[0]); - wordfree(&full_path); - } - - g_args.test_mode = INSERT_TEST; - insertTestProcess(); - - if (g_Dbs.aggr_func) { - queryAggrFunc(); - } -} - -int main(int argc, char *argv[]) { - parse_args(argc, argv, &g_args); - - debugPrint("meta file: %s\n", g_args.metaFile); - - if (g_args.metaFile) { - g_totalChildTables = 0; - - if (false == getInfoFromJsonFile(g_args.metaFile)) { - printf("Failed to read %s\n", g_args.metaFile); - return 1; - } - - testMetaFile(); - } else { - memset(&g_Dbs, 0, sizeof(SDbs)); - g_Dbs.db = calloc(1, sizeof(SDataBase)); - assert(g_Dbs.db); - g_Dbs.db[0].superTbls = calloc(1, sizeof(SSuperTable)); - assert(g_Dbs.db[0].superTbls); - setParaFromArg(); - - if (NULL != g_args.sqlFile) { - TAOS* qtaos = taos_connect( - g_Dbs.host, - g_Dbs.user, - g_Dbs.password, - g_Dbs.db[0].dbName, - g_Dbs.port); - querySqlFile(qtaos, g_args.sqlFile); - taos_close(qtaos); - - } else { - testCmdLine(); - } - - if (g_dupstr) - free(g_dupstr); - } - postFreeResource(); - - return 0; -} diff --git a/src/plugins/CMakeLists.txt b/src/plugins/CMakeLists.txt index 9e0de204d78cb54bea240a734f2373b709b6c6f9..c7221a6d301ae09e47bd68c76a90599fd85dff2a 100644 --- a/src/plugins/CMakeLists.txt +++ b/src/plugins/CMakeLists.txt @@ -43,7 +43,7 @@ ELSE () COMMAND git clean -f -d BUILD_COMMAND CGO_CFLAGS=-I${CMAKE_CURRENT_SOURCE_DIR}/../inc CGO_LDFLAGS=-L${CMAKE_BINARY_DIR}/build/lib go build -ldflags "-s -w -X github.com/taosdata/taosadapter/version.CommitID=${taosadapter_commit_sha1}" INSTALL_COMMAND - COMMAND curl -sL https://github.com/upx/upx/releases/download/v3.96/upx-3.96-amd64_linux.tar.xz -o upx.tar.xz && tar xvJf upx.tar.xz --strip-components 1 > /dev/null && ./upx taosadapter || : + COMMAND curl -sL https://github.com/upx/upx/releases/download/v3.96/upx-3.96-amd64_linux.tar.xz -o upx.tar.xz && tar -xvJf upx.tar.xz -C ${CMAKE_BINARY_DIR} --strip-components 1 > /dev/null && ${CMAKE_BINARY_DIR}/upx taosadapter || : COMMAND cmake -E copy taosadapter ${CMAKE_BINARY_DIR}/build/bin COMMAND cmake -E make_directory ${CMAKE_BINARY_DIR}/test/cfg/ COMMAND cmake -E copy ./example/config/taosadapter.toml ${CMAKE_BINARY_DIR}/test/cfg/ diff --git a/src/plugins/taosadapter b/src/plugins/taosadapter index 6397bf5963f62f0aa5c4b9b961b16ed5c62579f1..88346a2e4e2e9282d2ec8b8c5264ca1ec23698a1 160000 --- a/src/plugins/taosadapter +++ b/src/plugins/taosadapter @@ -1 +1 @@ -Subproject commit 6397bf5963f62f0aa5c4b9b961b16ed5c62579f1 +Subproject commit 88346a2e4e2e9282d2ec8b8c5264ca1ec23698a1 diff --git a/src/query/inc/qAggMain.h b/src/query/inc/qAggMain.h index 217efffe2b539c7d1ad56704c9572ff1eb8227ff..24356ee095d0a7dd12a650a450a975005ac3a54e 100644 --- a/src/query/inc/qAggMain.h +++ b/src/query/inc/qAggMain.h @@ -76,6 +76,8 @@ extern "C" { #define TSDB_FUNC_BLKINFO 36 +#define TSDB_FUNC_ELAPSED 37 + /////////////////////////////////////////// // the following functions is not implemented. // after implementation, move them before TSDB_FUNC_BLKINFO. also make TSDB_FUNC_BLKINFO the maxium function index diff --git a/src/query/inc/qExecutor.h b/src/query/inc/qExecutor.h index be746a697889c53f1ce6ec8940c8aa0a9dc05000..99f1de059a47b52099c5f40cada86afff858ea22 100644 --- a/src/query/inc/qExecutor.h +++ b/src/query/inc/qExecutor.h @@ -230,6 +230,7 @@ typedef struct SQueryAttr { bool diffQuery; // is diff query bool simpleAgg; bool pointInterpQuery; // point interpolation query + bool needTableSeqScan; // need scan table by table bool needReverseScan; // need reverse scan bool distinct; // distinct query or not bool stateWindow; // window State on sub/normal table diff --git a/src/query/src/qAggMain.c b/src/query/src/qAggMain.c index 9fe775c40a7e66d2523c4deb02396953af307765..b4b1cfd0c8fdc88a00256f40a612f0956a830178 100644 --- a/src/query/src/qAggMain.c +++ b/src/query/src/qAggMain.c @@ -196,6 +196,12 @@ typedef struct { char *taglists; } SSampleFuncInfo; +typedef struct SElapsedInfo { + int8_t hasResult; + TSKEY min; + TSKEY max; +} SElapsedInfo; + typedef struct { bool valueAssigned; union { @@ -371,6 +377,11 @@ int32_t getResultDataInfo(int32_t dataType, int32_t dataBytes, int32_t functionI *bytes = sizeof(STwaInfo); *interBytes = *bytes; return TSDB_CODE_SUCCESS; + } else if (functionId == TSDB_FUNC_ELAPSED) { + *type = TSDB_DATA_TYPE_BINARY; + *bytes = sizeof(SElapsedInfo); + *interBytes = *bytes; + return TSDB_CODE_SUCCESS; } } @@ -471,6 +482,10 @@ int32_t getResultDataInfo(int32_t dataType, int32_t dataBytes, int32_t functionI *bytes = sizeof(SStddevdstInfo); *interBytes = (*bytes); + } else if (functionId == TSDB_FUNC_ELAPSED) { + *type = TSDB_DATA_TYPE_DOUBLE; + *bytes = tDataTypes[*type].bytes; + *interBytes = sizeof(SElapsedInfo); } else { return TSDB_CODE_TSC_INVALID_OPERATION; } @@ -480,6 +495,7 @@ int32_t getResultDataInfo(int32_t dataType, int32_t dataBytes, int32_t functionI // TODO use hash table int32_t isValidFunction(const char* name, int32_t len) { +<<<<<<< HEAD for (int32_t i = 0; i < TSDB_FUNC_SCALAR_MAX_NUM; ++i) { int32_t nameLen = (int32_t) strlen(aScalarFunctions[i].name); @@ -492,7 +508,7 @@ int32_t isValidFunction(const char* name, int32_t len) { } } - for(int32_t i = 0; i <= tListLen(aAggs); ++i) { + for(int32_t i = 0; i <= TSDB_FUNC_ELAPSED; ++i) { int32_t nameLen = (int32_t) strlen(aAggs[i].name); if (len != nameLen) { continue; @@ -3461,7 +3477,7 @@ static void spread_function(SQLFunctionCtx *pCtx) { SSpreadInfo *pInfo = GET_ROWCELL_INTERBUF(pResInfo); int32_t numOfElems = 0; - + // todo : opt with pre-calculated result // column missing cause the hasNull to be true if (pCtx->preAggVals.isSet) { @@ -3564,7 +3580,7 @@ void spread_function_finalizer(SQLFunctionCtx *pCtx) { * the type of intermediate data is binary */ SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx); - + if (pCtx->currentStage == MERGE_STAGE) { assert(pCtx->inputType == TSDB_DATA_TYPE_BINARY); @@ -4755,6 +4771,120 @@ static void sample_func_finalizer(SQLFunctionCtx *pCtx) { doFinalizer(pCtx); } +static SElapsedInfo * getSElapsedInfo(SQLFunctionCtx *pCtx) { + if (pCtx->stableQuery && pCtx->currentStage != MERGE_STAGE) { + return (SElapsedInfo *)pCtx->pOutput; + } else { + return GET_ROWCELL_INTERBUF(GET_RES_INFO(pCtx)); + } +} + +static bool elapsedSetup(SQLFunctionCtx *pCtx, SResultRowCellInfo* pResInfo) { + if (!function_setup(pCtx, pResInfo)) { + return false; + } + + SElapsedInfo *pInfo = getSElapsedInfo(pCtx); + pInfo->min = MAX_TS_KEY; + pInfo->max = 0; + pInfo->hasResult = 0; + + return true; +} + +static int32_t elapsedRequired(SQLFunctionCtx *pCtx, STimeWindow* w, int32_t colId) { + return BLK_DATA_NO_NEEDED; +} + +static void elapsedFunction(SQLFunctionCtx *pCtx) { + SElapsedInfo *pInfo = getSElapsedInfo(pCtx); + if (pCtx->preAggVals.isSet) { + if (pInfo->min == MAX_TS_KEY) { + pInfo->min = pCtx->preAggVals.statis.min; + pInfo->max = pCtx->preAggVals.statis.max; + } else { + if (pCtx->order == TSDB_ORDER_ASC) { + pInfo->max = pCtx->preAggVals.statis.max; + } else { + pInfo->min = pCtx->preAggVals.statis.min; + } + } + } else { + // 0 == pCtx->size mean this is end interpolation. + if (0 == pCtx->size) { + if (pCtx->order == TSDB_ORDER_DESC) { + if (pCtx->end.key != INT64_MIN) { + pInfo->min = pCtx->end.key; + } + } else { + if (pCtx->end.key != INT64_MIN) { + pInfo->max = pCtx->end.key + 1; + } + } + goto elapsedOver; + } + + int64_t *ptsList = (int64_t *)GET_INPUT_DATA_LIST(pCtx); + // pCtx->start.key == INT64_MIN mean this is first window or there is actual start point of current window. + // pCtx->end.key == INT64_MIN mean current window does not end in current data block or there is actual end point of current window. + if (pCtx->order == TSDB_ORDER_DESC) { + if (pCtx->start.key == INT64_MIN) { + pInfo->max = (pInfo->max < ptsList[pCtx->size - 1]) ? ptsList[pCtx->size - 1] : pInfo->max; + } else { + pInfo->max = pCtx->start.key + 1; + } + + if (pCtx->end.key != INT64_MIN) { + pInfo->min = pCtx->end.key; + } else { + pInfo->min = ptsList[0]; + } + } else { + if (pCtx->start.key == INT64_MIN) { + pInfo->min = (pInfo->min > ptsList[0]) ? ptsList[0] : pInfo->min; + } else { + pInfo->min = pCtx->start.key; + } + + if (pCtx->end.key != INT64_MIN) { + pInfo->max = pCtx->end.key + 1; + } else { + pInfo->max = ptsList[pCtx->size - 1]; + } + } + } + +elapsedOver: + SET_VAL(pCtx, pCtx->size, 1); + + if (pCtx->size > 0) { + GET_RES_INFO(pCtx)->hasResult = DATA_SET_FLAG; + pInfo->hasResult = DATA_SET_FLAG; + } +} + +static void elapsedMerge(SQLFunctionCtx *pCtx) { + SElapsedInfo *pInfo = getSElapsedInfo(pCtx); + memcpy(pInfo, pCtx->pInput, (size_t)pCtx->inputBytes); + GET_RES_INFO(pCtx)->hasResult = pInfo->hasResult; +} + +static void elapsedFinalizer(SQLFunctionCtx *pCtx) { + if (GET_RES_INFO(pCtx)->hasResult != DATA_SET_FLAG) { + setNull(pCtx->pOutput, pCtx->outputType, pCtx->outputBytes); + return; + } + + SElapsedInfo *pInfo = GET_ROWCELL_INTERBUF(GET_RES_INFO(pCtx)); + *(double *)pCtx->pOutput = (double)pInfo->max - (double)pInfo->min; + if (pCtx->numOfParams > 0 && pCtx->param[0].i64 > 0) { + *(double *)pCtx->pOutput = *(double *)pCtx->pOutput / pCtx->param[0].i64; + } + GET_RES_INFO(pCtx)->numOfRes = 1; + + doFinalizer(pCtx); +} + ///////////////////////////////////////////////////////////////////////////////////////////// /* * function compatible list. @@ -4775,8 +4905,8 @@ int32_t functionCompatList[] = { 1, 1, 1, 1, -1, 1, 1, 1, 5, 1, 1, // tid_tag, deriv, csum, mavg, sample, 6, 8, -1, -1, -1, - // block_info - 7 + // block_info, elapsed + 7, 1 }; SAggFunctionInfo aAggs[40] = {{ @@ -5179,7 +5309,7 @@ SAggFunctionInfo aAggs[40] = {{ dataBlockRequired, }, { - // 36 + // 33 "csum", TSDB_FUNC_CSUM, TSDB_FUNC_INVALID_ID, @@ -5191,7 +5321,7 @@ SAggFunctionInfo aAggs[40] = {{ dataBlockRequired, }, { - // 37 + // 34 "mavg", TSDB_FUNC_MAVG, TSDB_FUNC_INVALID_ID, @@ -5203,7 +5333,7 @@ SAggFunctionInfo aAggs[40] = {{ dataBlockRequired, }, { - // 38 + // 35 "sample", TSDB_FUNC_SAMPLE, TSDB_FUNC_SAMPLE, @@ -5215,7 +5345,7 @@ SAggFunctionInfo aAggs[40] = {{ dataBlockRequired, }, { - // 39 + // 36 "_block_dist", TSDB_FUNC_BLKINFO, TSDB_FUNC_BLKINFO, @@ -5226,4 +5356,16 @@ SAggFunctionInfo aAggs[40] = {{ block_func_merge, dataBlockRequired, }, + { + // 37 + "elapsed", + TSDB_FUNC_ELAPSED, + TSDB_FUNC_ELAPSED, + TSDB_BASE_FUNC_SO, + elapsedSetup, + elapsedFunction, + elapsedFinalizer, + elapsedMerge, + elapsedRequired, + } }; diff --git a/src/query/src/qExecutor.c b/src/query/src/qExecutor.c index 75cab67f38a72e47475ff40810d17e6d241e5360..1cf68f4190443fce6c20506bfc2930e33c56b0e8 100644 --- a/src/query/src/qExecutor.c +++ b/src/query/src/qExecutor.c @@ -938,9 +938,10 @@ void doInvokeUdf(SUdfInfo* pUdfInfo, SQLFunctionCtx *pCtx, int32_t idx, int32_t static void doApplyFunctions(SQueryRuntimeEnv* pRuntimeEnv, SQLFunctionCtx* pCtx, STimeWindow* pWin, int32_t offset, int32_t forwardStep, TSKEY* tsCol, int32_t numOfTotal, int32_t numOfOutput) { SQueryAttr *pQueryAttr = pRuntimeEnv->pQueryAttr; - bool hasAggregates = pCtx[0].preAggVals.isSet; for (int32_t k = 0; k < numOfOutput; ++k) { + bool hasAggregates = pCtx[k].preAggVals.isSet; + pCtx[k].size = forwardStep; pCtx[k].startTs = pWin->skey; @@ -1268,7 +1269,7 @@ void doTimeWindowInterpolation(SOperatorInfo* pOperator, SOptrBasicInfo* pInfo, for (int32_t k = 0; k < pOperator->numOfOutput; ++k) { int32_t functionId = pCtx[k].functionId; - if (functionId != TSDB_FUNC_TWA && functionId != TSDB_FUNC_INTERP) { + if (functionId != TSDB_FUNC_TWA && functionId != TSDB_FUNC_INTERP && functionId != TSDB_FUNC_ELAPSED) { pCtx[k].start.key = INT64_MIN; continue; } @@ -1311,7 +1312,7 @@ void doTimeWindowInterpolation(SOperatorInfo* pOperator, SOptrBasicInfo* pInfo, pCtx[k].end.ptr = (char *)pColInfo->pData + curRowIndex * pColInfo->info.bytes; } } - } else if (functionId == TSDB_FUNC_TWA) { + } else if (functionId == TSDB_FUNC_TWA || functionId == TSDB_FUNC_ELAPSED) { assert(curTs != windowKey); if (prevRowIndex == -1) { @@ -1478,7 +1479,6 @@ static void hashIntervalAgg(SOperatorInfo* pOperatorInfo, SResultRowInfo* pResul STimeWindow win = getActiveTimeWindow(pResultRowInfo, ts, pQueryAttr); bool masterScan = IS_MASTER_SCAN(pRuntimeEnv); - SResultRow* pResult = NULL; int32_t ret = setResultOutputBufByKey(pRuntimeEnv, pResultRowInfo, pSDataBlock->info.tid, &win, masterScan, &pResult, tableGroupId, pInfo->pCtx, numOfOutput, pInfo->rowCellInfoOffset); @@ -1501,23 +1501,22 @@ static void hashIntervalAgg(SOperatorInfo* pOperatorInfo, SResultRowInfo* pResul continue; } - STimeWindow w = pRes->win; - ret = setResultOutputBufByKey(pRuntimeEnv, pResultRowInfo, pSDataBlock->info.tid, &w, masterScan, &pResult, - tableGroupId, pInfo->pCtx, numOfOutput, pInfo->rowCellInfoOffset); - if (ret != TSDB_CODE_SUCCESS) { - longjmp(pRuntimeEnv->env, TSDB_CODE_QRY_OUT_OF_MEMORY); - } - - assert(!resultRowInterpolated(pResult, RESULT_ROW_END_INTERP)); + STimeWindow w = pRes->win; + ret = setResultOutputBufByKey(pRuntimeEnv, pResultRowInfo, pSDataBlock->info.tid, &w, masterScan, &pResult, + tableGroupId, pInfo->pCtx, numOfOutput, pInfo->rowCellInfoOffset); + if (ret != TSDB_CODE_SUCCESS) { + longjmp(pRuntimeEnv->env, TSDB_CODE_QRY_OUT_OF_MEMORY); + } - doTimeWindowInterpolation(pOperatorInfo, pInfo, pSDataBlock->pDataBlock, *(TSKEY*)pRuntimeEnv->prevRow[0], -1, - tsCols[startPos], startPos, w.ekey, RESULT_ROW_END_INTERP); + assert(!resultRowInterpolated(pResult, RESULT_ROW_END_INTERP)); - setResultRowInterpo(pResult, RESULT_ROW_END_INTERP); - setNotInterpoWindowKey(pInfo->pCtx, pQueryAttr->numOfOutput, RESULT_ROW_START_INTERP); + doTimeWindowInterpolation(pOperatorInfo, pInfo, pSDataBlock->pDataBlock, *(TSKEY*)pRuntimeEnv->prevRow[0], -1, + tsCols[startPos], startPos, QUERY_IS_ASC_QUERY(pQueryAttr) ? w.ekey : w.skey, RESULT_ROW_END_INTERP); - doApplyFunctions(pRuntimeEnv, pInfo->pCtx, &w, startPos, 0, tsCols, pSDataBlock->info.rows, numOfOutput); - } + setResultRowInterpo(pResult, RESULT_ROW_END_INTERP); + setNotInterpoWindowKey(pInfo->pCtx, pQueryAttr->numOfOutput, RESULT_ROW_START_INTERP); + doApplyFunctions(pRuntimeEnv, pInfo->pCtx, &w, startPos, 0, tsCols, pSDataBlock->info.rows, numOfOutput); + } // restore current time window ret = setResultOutputBufByKey(pRuntimeEnv, pResultRowInfo, pSDataBlock->info.tid, &win, masterScan, &pResult, tableGroupId, pInfo->pCtx, @@ -1831,7 +1830,7 @@ void setBlockStatisInfo(SQLFunctionCtx *pCtx, SSDataBlock* pSDataBlock, SColInde pCtx->hasNull = hasNull(pColIndex, pStatis); // set the statistics data for primary time stamp column - if (pCtx->functionId == TSDB_FUNC_SPREAD && pColIndex->colId == PRIMARYKEY_TIMESTAMP_COL_INDEX) { + if ((pCtx->functionId == TSDB_FUNC_SPREAD || pCtx->functionId == TSDB_FUNC_ELAPSED) && pColIndex->colId == PRIMARYKEY_TIMESTAMP_COL_INDEX) { pCtx->preAggVals.isSet = true; pCtx->preAggVals.statis.min = pSDataBlock->info.window.skey; pCtx->preAggVals.statis.max = pSDataBlock->info.window.ekey; @@ -6239,7 +6238,17 @@ group_finished_exit: return true; } +static void resetInterpolation(SQLFunctionCtx *pCtx, SQueryRuntimeEnv* pRuntimeEnv, int32_t numOfOutput) { + if (!pRuntimeEnv->pQueryAttr->timeWindowInterpo) { + return; + } + for (int32_t i = 0; i < numOfOutput; ++i) { + pCtx[i].start.key = INT64_MIN; + pCtx[i].end.key = INT64_MIN; + } + *(TSKEY *)pRuntimeEnv->prevRow[0] = INT64_MIN; +} static void doTimeEveryImpl(SOperatorInfo* pOperator, SQLFunctionCtx *pCtx, SSDataBlock* pBlock, bool newgroup) { STimeEveryOperatorInfo* pEveryInfo = (STimeEveryOperatorInfo*) pOperator->info; @@ -6467,6 +6476,7 @@ static SSDataBlock* doSTableIntervalAgg(void* param, bool* newgroup) { SOperatorInfo* upstream = pOperator->upstream[0]; + STableId prevId = {0, 0}; while(1) { publishOperatorProfEvent(upstream, QUERY_PROF_BEFORE_OPERATOR_EXEC); SSDataBlock* pBlock = upstream->exec(upstream, newgroup); @@ -6476,6 +6486,12 @@ static SSDataBlock* doSTableIntervalAgg(void* param, bool* newgroup) { break; } + if (prevId.tid != pBlock->info.tid || prevId.uid != pBlock->info.uid) { + resetInterpolation(pIntervalInfo->pCtx, pRuntimeEnv, pOperator->numOfOutput); + prevId.uid = pBlock->info.uid; + prevId.tid = pBlock->info.tid; + } + // the pDataBlock are always the same one, no need to call this again STableQueryInfo* pTableQueryInfo = pRuntimeEnv->current; @@ -8818,6 +8834,7 @@ SQInfo* createQInfoImpl(SQueryTableMsg* pQueryMsg, SGroupbyExpr* pGroupbyExpr, S pQueryAttr->tsCompQuery = pQueryMsg->tsCompQuery; pQueryAttr->simpleAgg = pQueryMsg->simpleAgg; pQueryAttr->pointInterpQuery = pQueryMsg->pointInterpQuery; + pQueryAttr->needTableSeqScan = pQueryMsg->needTableSeqScan; pQueryAttr->needReverseScan = pQueryMsg->needReverseScan; pQueryAttr->stateWindow = pQueryMsg->stateWindow; pQueryAttr->vgId = vgId; diff --git a/src/query/src/qPlan.c b/src/query/src/qPlan.c index d6b49c53ded48f49fe76d052e27904241e910a98..90b42cc6574d116c18fc46c201b6de9dc95fc989 100644 --- a/src/query/src/qPlan.c +++ b/src/query/src/qPlan.c @@ -538,7 +538,7 @@ SArray* createTableScanPlan(SQueryAttr* pQueryAttr) { } else { if (pQueryAttr->queryBlockDist) { op = OP_TableBlockInfoScan; - } else if (pQueryAttr->tsCompQuery || pQueryAttr->diffQuery) { + } else if (pQueryAttr->tsCompQuery || pQueryAttr->diffQuery || pQueryAttr->needTableSeqScan) { op = OP_TableSeqScan; } else if (pQueryAttr->needReverseScan || pQueryAttr->pointInterpQuery) { op = OP_DataBlocksOptScan; diff --git a/src/util/src/terror.c b/src/util/src/terror.c index 379b7530fa5a898938b9bf0b552e09ab4fbc70b8..3ab8ad9e7c9e737bb0f94dddbdcf19b198566c7e 100644 --- a/src/util/src/terror.c +++ b/src/util/src/terror.c @@ -16,7 +16,7 @@ #include "os.h" #define TAOS_ERROR_C - + typedef struct { int32_t val; const char* str; @@ -232,6 +232,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_DND_INVALID_MSG_LEN, "Invalid message lengt TAOS_DEFINE_ERROR(TSDB_CODE_DND_ACTION_IN_PROGRESS, "Action in progress") TAOS_DEFINE_ERROR(TSDB_CODE_DND_TOO_MANY_VNODES, "Too many vnode directories") TAOS_DEFINE_ERROR(TSDB_CODE_DND_EXITING, "Dnode is exiting") +TAOS_DEFINE_ERROR(TSDB_CODE_DND_VNODE_OPEN_FAILED, "Vnode open failed") // vnode TAOS_DEFINE_ERROR(TSDB_CODE_VND_ACTION_IN_PROGRESS, "Action in progress") diff --git a/tests/examples/C#/C#checker/C#checker.csproj b/tests/examples/C#/C#checker/C#checker.csproj new file mode 100644 index 0000000000000000000000000000000000000000..afeeaf3f01301210c0e945c8e02b40790ebec743 --- /dev/null +++ b/tests/examples/C#/C#checker/C#checker.csproj @@ -0,0 +1,13 @@ + + + + Exe + net5.0 + C_checker + + + + + + + diff --git a/tests/examples/C#/TDengineDriver.cs b/tests/examples/C#/TDengineDriver.cs deleted file mode 100644 index 6e86b692f7edf512c751a49590eca3bf74949091..0000000000000000000000000000000000000000 --- a/tests/examples/C#/TDengineDriver.cs +++ /dev/null @@ -1,175 +0,0 @@ -/* - * Copyright (c) 2019 TAOS Data, Inc. - * - * This program is free software: you can use, redistribute, and/or modify - * it under the terms of the GNU Affero General Public License, version 3 - * or later ("AGPL"), as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. - * - * You should have received a copy of the GNU Affero General Public License - * along with this program. If not, see . - */ - -using System; -using System.Collections.Generic; -using System.Runtime.InteropServices; - -namespace TDengineDriver -{ - enum TDengineDataType - { - TSDB_DATA_TYPE_NULL = 0, // 1 bytes - TSDB_DATA_TYPE_BOOL = 1, // 1 bytes - TSDB_DATA_TYPE_TINYINT = 2, // 1 bytes - TSDB_DATA_TYPE_SMALLINT = 3, // 2 bytes - TSDB_DATA_TYPE_INT = 4, // 4 bytes - TSDB_DATA_TYPE_BIGINT = 5, // 8 bytes - TSDB_DATA_TYPE_FLOAT = 6, // 4 bytes - TSDB_DATA_TYPE_DOUBLE = 7, // 8 bytes - TSDB_DATA_TYPE_BINARY = 8, // string - TSDB_DATA_TYPE_TIMESTAMP = 9,// 8 bytes - TSDB_DATA_TYPE_NCHAR = 10, // unicode string - TSDB_DATA_TYPE_UTINYINT = 11,// 1 byte - TSDB_DATA_TYPE_USMALLINT= 12,// 2 bytes - TSDB_DATA_TYPE_UINT = 13, // 4 bytes - TSDB_DATA_TYPE_UBIGINT= 14 // 8 bytes - } - - enum TDengineInitOption - { - TSDB_OPTION_LOCALE = 0, - TSDB_OPTION_CHARSET = 1, - TSDB_OPTION_TIMEZONE = 2, - TDDB_OPTION_CONFIGDIR = 3, - TDDB_OPTION_SHELL_ACTIVITY_TIMER = 4 - } - - class TDengineMeta - { - public string name; - public short size; - public byte type; - public string TypeName() - { - switch ((TDengineDataType)type) - { - case TDengineDataType.TSDB_DATA_TYPE_BOOL: - return "BOOL"; - case TDengineDataType.TSDB_DATA_TYPE_TINYINT: - return "TINYINT"; - case TDengineDataType.TSDB_DATA_TYPE_SMALLINT: - return "SMALLINT"; - case TDengineDataType.TSDB_DATA_TYPE_INT: - return "INT"; - case TDengineDataType.TSDB_DATA_TYPE_BIGINT: - return "BIGINT"; - case TDengineDataType.TSDB_DATA_TYPE_UTINYINT: - return "TINYINT UNSIGNED"; - case TDengineDataType.TSDB_DATA_TYPE_USMALLINT: - return "SMALLINT UNSIGNED"; - case TDengineDataType.TSDB_DATA_TYPE_UINT: - return "INT UNSIGNED"; - case TDengineDataType.TSDB_DATA_TYPE_UBIGINT: - return "BIGINT UNSIGNED"; - case TDengineDataType.TSDB_DATA_TYPE_FLOAT: - return "FLOAT"; - case TDengineDataType.TSDB_DATA_TYPE_DOUBLE: - return "DOUBLE"; - case TDengineDataType.TSDB_DATA_TYPE_BINARY: - return "STRING"; - case TDengineDataType.TSDB_DATA_TYPE_TIMESTAMP: - return "TIMESTAMP"; - case TDengineDataType.TSDB_DATA_TYPE_NCHAR: - return "NCHAR"; - default: - return "undefine"; - } - } - } - - class TDengine - { - public const int TSDB_CODE_SUCCESS = 0; - - [DllImport("taos", EntryPoint = "taos_init", CallingConvention = CallingConvention.Cdecl)] - static extern public void Init(); - - [DllImport("taos", EntryPoint = "taos_cleanup", CallingConvention = CallingConvention.Cdecl)] - static extern public void Cleanup(); - - [DllImport("taos", EntryPoint = "taos_options", CallingConvention = CallingConvention.Cdecl)] - static extern public void Options(int option, string value); - - [DllImport("taos", EntryPoint = "taos_connect", CallingConvention = CallingConvention.Cdecl)] - static extern public IntPtr Connect(string ip, string user, string password, string db, short port); - - [DllImport("taos", EntryPoint = "taos_errstr", CallingConvention = CallingConvention.Cdecl)] - static extern private IntPtr taos_errstr(IntPtr res); - static public string Error(IntPtr res) - { - IntPtr errPtr = taos_errstr(res); - return Marshal.PtrToStringAnsi(errPtr); - } - - [DllImport("taos", EntryPoint = "taos_errno", CallingConvention = CallingConvention.Cdecl)] - static extern public int ErrorNo(IntPtr res); - - [DllImport("taos", EntryPoint = "taos_query", CallingConvention = CallingConvention.Cdecl)] - static extern public IntPtr Query(IntPtr conn, string sqlstr); - - [DllImport("taos", EntryPoint = "taos_affected_rows", CallingConvention = CallingConvention.Cdecl)] - static extern public int AffectRows(IntPtr res); - - [DllImport("taos", EntryPoint = "taos_field_count", CallingConvention = CallingConvention.Cdecl)] - static extern public int FieldCount(IntPtr res); - - [DllImport("taos", EntryPoint = "taos_fetch_fields", CallingConvention = CallingConvention.Cdecl)] - static extern private IntPtr taos_fetch_fields(IntPtr res); - static public List FetchFields(IntPtr res) - { - const int fieldSize = 68; - - List metas = new List(); - if (res == IntPtr.Zero) - { - return metas; - } - - int fieldCount = FieldCount(res); - IntPtr fieldsPtr = taos_fetch_fields(res); - - for (int i = 0; i < fieldCount; ++i) - { - int offset = i * fieldSize; - - TDengineMeta meta = new TDengineMeta(); - meta.name = Marshal.PtrToStringAnsi(fieldsPtr + offset); - meta.type = Marshal.ReadByte(fieldsPtr + offset + 65); - meta.size = Marshal.ReadInt16(fieldsPtr + offset + 66); - metas.Add(meta); - } - - return metas; - } - - [DllImport("taos", EntryPoint = "taos_fetch_row", CallingConvention = CallingConvention.Cdecl)] - static extern public IntPtr FetchRows(IntPtr res); - - [DllImport("taos", EntryPoint = "taos_free_result", CallingConvention = CallingConvention.Cdecl)] - static extern public IntPtr FreeResult(IntPtr res); - - [DllImport("taos", EntryPoint = "taos_close", CallingConvention = CallingConvention.Cdecl)] - static extern public int Close(IntPtr taos); - - //get precision in restultset - [DllImport("taos", EntryPoint = "taos_result_precision", CallingConvention = CallingConvention.Cdecl)] - static extern public int ResultPrecision(IntPtr taos); - - //schemaless API - [DllImport("taos",SetLastError = true, EntryPoint = "taos_schemaless_insert", CallingConvention = CallingConvention.Cdecl)] - static extern public IntPtr SchemalessInsert(IntPtr taos, string[] lines, int numLines, int protocol, int precision); - } -} diff --git a/tests/examples/C#/TDengineTest.cs b/tests/examples/C#/TDengineTest.cs deleted file mode 100644 index f4ee62527feda4d43b21f37e9c513af2053e1f9d..0000000000000000000000000000000000000000 --- a/tests/examples/C#/TDengineTest.cs +++ /dev/null @@ -1,495 +0,0 @@ -/* - * Copyright (c) 2019 TAOS Data, Inc. - * - * This program is free software: you can use, redistribute, and/or modify - * it under the terms of the GNU Affero General Public License, version 3 - * or later ("AGPL"), as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. - * - * You should have received a copy of the GNU Affero General Public License - * along with this program. If not, see . - */ - -using System; -using System.Text; -using System.Collections.Generic; -using System.Runtime.InteropServices; -using System.Collections; - -namespace TDengineDriver -{ - class TDengineTest - { - //connect parameters - private string host; - private string configDir; - private string user; - private string password; - private short port = 0; - - //sql parameters - private string dbName; - private string stableName; - private string tablePrefix; - - private bool isInsertData; - private bool isQueryData; - - private long tableCount; - private long totalRows; - private long batchRows; - private long beginTimestamp = 1551369600000L; - - private IntPtr conn = IntPtr.Zero; - private long rowsInserted = 0; - - static void Main(string[] args) - { - TDengineTest tester = new TDengineTest(); - tester.ReadArgument(args); - - Console.WriteLine("---------------------------------------------------------------"); - Console.WriteLine("Starting Testing..."); - Console.WriteLine("---------------------------------------------------------------"); - - tester.InitTDengine(); - tester.ConnectTDengine(); - tester.CreateDbAndTable(); - tester.ExecuteInsert(); - tester.ExecuteQuery(); - tester.CloseConnection(); - - Console.WriteLine("---------------------------------------------------------------"); - Console.WriteLine("Stop Testing..."); - Console.WriteLine("---------------------------------------------------------------"); - - } - - public long GetArgumentAsLong(String[] argv, String argName, int minVal, int maxVal, int defaultValue) - { - int argc = argv.Length; - for (int i = 0; i < argc; ++i) - { - if (argName != argv[i]) - { - continue; - } - if (i < argc - 1) - { - String tmp = argv[i + 1]; - if (tmp[0] == '-') - { - Console.WriteLine("option {0:G} requires an argument", tmp); - ExitProgram(); - } - - long tmpVal = Convert.ToInt64(tmp); - if (tmpVal < minVal || tmpVal > maxVal) - { - Console.WriteLine("option {0:G} should in range [{1:G}, {2:G}]", argName, minVal, maxVal); - ExitProgram(); - } - - return tmpVal; - } - } - - return defaultValue; - } - - public String GetArgumentAsString(String[] argv, String argName, String defaultValue) - { - int argc = argv.Length; - for (int i = 0; i < argc; ++i) - { - if (argName != argv[i]) - { - continue; - } - if (i < argc - 1) - { - String tmp = argv[i + 1]; - if (tmp[0] == '-') - { - Console.WriteLine("option {0:G} requires an argument", tmp); - ExitProgram(); - } - return tmp; - } - } - - return defaultValue; - } - - public void PrintHelp(String[] argv) - { - for (int i = 0; i < argv.Length; ++i) - { - if ("--help" == argv[i]) - { - String indent = " "; - Console.WriteLine("taosTest is simple example to operate TDengine use C# Language.\n"); - Console.WriteLine("{0:G}{1:G}", indent, "-h"); - Console.WriteLine("{0:G}{1:G}{2:G}", indent, indent, "TDEngine server IP address to connect"); - Console.WriteLine("{0:G}{1:G}", indent, "-u"); - Console.WriteLine("{0:G}{1:G}{2:G}", indent, indent, "The TDEngine user name to use when connecting to the server, default is root"); - Console.WriteLine("{0:G}{1:G}", indent, "-p"); - Console.WriteLine("{0:G}{1:G}{2:G}", indent, indent, "The TDEngine user name to use when connecting to the server, default is taosdata"); - Console.WriteLine("{0:G}{1:G}", indent, "-d"); - Console.WriteLine("{0:G}{1:G}{2:G}", indent, indent, "Database used to create table or import data, default is db"); - Console.WriteLine("{0:G}{1:G}", indent, "-s"); - Console.WriteLine("{0:G}{1:G}{2:G}", indent, indent, "Super Tables used to create table, default is mt"); - Console.WriteLine("{0:G}{1:G}", indent, "-t"); - Console.WriteLine("{0:G}{1:G}{2:G}", indent, indent, "Table prefixs, default is t"); - Console.WriteLine("{0:G}{1:G}", indent, "-w"); - Console.WriteLine("{0:G}{1:G}{2:G}", indent, indent, "Whether to insert data"); - Console.WriteLine("{0:G}{1:G}", indent, "-r"); - Console.WriteLine("{0:G}{1:G}{2:G}", indent, indent, "Whether to query data"); - Console.WriteLine("{0:G}{1:G}", indent, "-n"); - Console.WriteLine("{0:G}{1:G}{2:G}", indent, indent, "How many Tables to create, default is 10"); - Console.WriteLine("{0:G}{1:G}", indent, "-b"); - Console.WriteLine("{0:G}{1:G}{2:G}", indent, indent, "How many rows per insert batch, default is 10"); - Console.WriteLine("{0:G}{1:G}", indent, "-i"); - Console.WriteLine("{0:G}{1:G}{2:G}", indent, indent, "How many rows to insert, default is 100"); - Console.WriteLine("{0:G}{1:G}", indent, "-c"); - Console.WriteLine("{0:G}{1:G}{2:G}", indent, indent, "Configuration directory"); - - ExitProgram(); - } - } - } - - public void ReadArgument(String[] argv) - { - PrintHelp(argv); - host = this.GetArgumentAsString(argv, "-h", "127.0.0.1"); - user = this.GetArgumentAsString(argv, "-u", "root"); - password = this.GetArgumentAsString(argv, "-p", "taosdata"); - dbName = this.GetArgumentAsString(argv, "-d", "db"); - stableName = this.GetArgumentAsString(argv, "-s", "st"); - tablePrefix = this.GetArgumentAsString(argv, "-t", "t"); - isInsertData = this.GetArgumentAsLong(argv, "-w", 0, 1, 1) != 0; - isQueryData = this.GetArgumentAsLong(argv, "-r", 0, 1, 1) != 0; - tableCount = this.GetArgumentAsLong(argv, "-n", 1, 10000, 10); - batchRows = this.GetArgumentAsLong(argv, "-b", 1, 1000, 500); - totalRows = this.GetArgumentAsLong(argv, "-i", 1, 10000000, 10000); - configDir = this.GetArgumentAsString(argv, "-c", "C:/TDengine/cfg"); - } - - public void InitTDengine() - { - TDengine.Options((int)TDengineInitOption.TDDB_OPTION_CONFIGDIR, this.configDir); - TDengine.Options((int)TDengineInitOption.TDDB_OPTION_SHELL_ACTIVITY_TIMER, "60"); - TDengine.Init(); - Console.WriteLine("TDengine Initialization finished"); - } - - public void ConnectTDengine() - { - string db = ""; - this.conn = TDengine.Connect(this.host, this.user, this.password, db, this.port); - if (this.conn == IntPtr.Zero) - { - Console.WriteLine("Connect to TDengine failed"); - ExitProgram(); - } - else - { - Console.WriteLine("Connect to TDengine success"); - } - } - - public void CreateDbAndTable() - { - if (!this.isInsertData) - { - return; - } - - StringBuilder sql = new StringBuilder(); - sql.Append("create database if not exists ").Append(this.dbName); - IntPtr res = TDengine.Query(this.conn, sql.ToString()); - if ((res == IntPtr.Zero) || (TDengine.ErrorNo(res) != 0)) - { - Console.Write(sql.ToString() + " failure, "); - if (res != IntPtr.Zero) { - Console.Write("reason: " + TDengine.Error(res)); - } - Console.WriteLine(""); - ExitProgram(); - } - else - { - Console.WriteLine(sql.ToString() + " success"); - } - TDengine.FreeResult(res); - - sql.Clear(); - sql.Append("use ").Append(this.dbName); - res = TDengine.Query(this.conn, sql.ToString()); - if ((res == IntPtr.Zero) || (TDengine.ErrorNo(res) != 0)) - { - Console.Write(sql.ToString() + " failure, "); - if (res != IntPtr.Zero) { - Console.Write("reason: " + TDengine.Error(res)); - } - Console.WriteLine(""); - ExitProgram(); - } - else - { - Console.WriteLine(sql.ToString() + " success"); - } - TDengine.FreeResult(res); - - sql.Clear(); - sql.Append("create table if not exists ").Append(this.stableName).Append("(ts timestamp, v1 bool, v2 tinyint, v3 smallint, v4 int, v5 bigint, v6 float, v7 double, v8 binary(10), v9 nchar(10)) tags(t1 int)"); - res = TDengine.Query(this.conn, sql.ToString()); - if ((res == IntPtr.Zero) || (TDengine.ErrorNo(res) != 0)) - { - Console.Write(sql.ToString() + " failure, "); - if (res != IntPtr.Zero) { - Console.Write("reason: " + TDengine.Error(res)); - } - Console.WriteLine(""); - ExitProgram(); - } - else - { - Console.WriteLine(sql.ToString() + " success"); - } - TDengine.FreeResult(res); - - for (int i = 0; i < this.tableCount; i++) - { - sql.Clear(); - sql = sql.Append("create table if not exists ").Append(this.tablePrefix).Append(i) - .Append(" using ").Append(this.stableName).Append(" tags(").Append(i).Append(")"); - res = TDengine.Query(this.conn, sql.ToString()); - if ((res == IntPtr.Zero) || (TDengine.ErrorNo(res) != 0)) - { - Console.Write(sql.ToString() + " failure, "); - if (res != IntPtr.Zero) { - Console.Write("reason: " + TDengine.Error(res)); - } - Console.WriteLine(""); - ExitProgram(); - } - else - { - Console.WriteLine(sql.ToString() + " success"); - } - TDengine.FreeResult(res); - } - - Console.WriteLine("create db and table success"); - } - - public void ExecuteInsert() - { - if (!this.isInsertData) - { - return; - } - - System.DateTime start = new System.DateTime(); - long loopCount = this.totalRows / this.batchRows; - - for (int table = 0; table < this.tableCount; ++table) - { - for (long loop = 0; loop < loopCount; loop++) - { - StringBuilder sql = new StringBuilder(); - sql.Append("insert into ").Append(this.tablePrefix).Append(table).Append(" values"); - for (int batch = 0; batch < this.batchRows; ++batch) - { - long rows = loop * this.batchRows + batch; - sql.Append("(") - .Append(this.beginTimestamp + rows) - .Append(", 1, 2, 3,") - .Append(rows) - .Append(", 5, 6, 7, 'abc', 'def')"); - } - IntPtr res = TDengine.Query(this.conn, sql.ToString()); - if ((res == IntPtr.Zero) || (TDengine.ErrorNo(res) != 0)) - { - Console.Write(sql.ToString() + " failure, "); - if (res != IntPtr.Zero) { - Console.Write("reason: " + TDengine.Error(res)); - } - Console.WriteLine(""); - } - - int affectRows = TDengine.AffectRows(res); - this.rowsInserted += affectRows; - - TDengine.FreeResult(res); - } - } - - System.DateTime end = new System.DateTime(); - TimeSpan ts = end - start; - - Console.Write("Total {0:G} rows inserted, {1:G} rows failed, time spend {2:G} seconds.\n" - , this.rowsInserted, this.totalRows * this.tableCount - this.rowsInserted, ts.TotalSeconds); - } - - public void ExecuteQuery() - { - if (!this.isQueryData) - { - return; - } - - System.DateTime start = new System.DateTime(); - long queryRows = 0; - - for (int i = 0; i < 1/*this.tableCount*/; ++i) - { - String sql = "select * from " + this.dbName + "." + tablePrefix + i; - Console.WriteLine(sql); - - IntPtr res = TDengine.Query(conn, sql); - if ((res == IntPtr.Zero) || (TDengine.ErrorNo(res) != 0)) - { - Console.Write(sql.ToString() + " failure, "); - if (res != IntPtr.Zero) { - Console.Write("reason: " + TDengine.Error(res)); - } - Console.WriteLine(""); - ExitProgram(); - } - - int fieldCount = TDengine.FieldCount(res); - Console.WriteLine("field count: " + fieldCount); - - List metas = TDengine.FetchFields(res); - for (int j = 0; j < metas.Count; j++) - { - TDengineMeta meta = (TDengineMeta)metas[j]; - Console.WriteLine("index:" + j + ", type:" + meta.type + ", typename:" + meta.TypeName() + ", name:" + meta.name + ", size:" + meta.size); - } - - IntPtr rowdata; - StringBuilder builder = new StringBuilder(); - while ((rowdata = TDengine.FetchRows(res)) != IntPtr.Zero) - { - queryRows++; - for (int fields = 0; fields < fieldCount; ++fields) - { - TDengineMeta meta = metas[fields]; - int offset = IntPtr.Size * fields; - IntPtr data = Marshal.ReadIntPtr(rowdata, offset); - - builder.Append("---"); - - if (data == IntPtr.Zero) - { - builder.Append("NULL"); - continue; - } - - switch ((TDengineDataType)meta.type) - { - case TDengineDataType.TSDB_DATA_TYPE_BOOL: - bool v1 = Marshal.ReadByte(data) == 0 ? false : true; - builder.Append(v1); - break; - case TDengineDataType.TSDB_DATA_TYPE_TINYINT: - byte v2 = Marshal.ReadByte(data); - builder.Append(v2); - break; - case TDengineDataType.TSDB_DATA_TYPE_SMALLINT: - short v3 = Marshal.ReadInt16(data); - builder.Append(v3); - break; - case TDengineDataType.TSDB_DATA_TYPE_INT: - int v4 = Marshal.ReadInt32(data); - builder.Append(v4); - break; - case TDengineDataType.TSDB_DATA_TYPE_BIGINT: - long v5 = Marshal.ReadInt64(data); - builder.Append(v5); - break; - case TDengineDataType.TSDB_DATA_TYPE_FLOAT: - float v6 = (float)Marshal.PtrToStructure(data, typeof(float)); - builder.Append(v6); - break; - case TDengineDataType.TSDB_DATA_TYPE_DOUBLE: - double v7 = (double)Marshal.PtrToStructure(data, typeof(double)); - builder.Append(v7); - break; - case TDengineDataType.TSDB_DATA_TYPE_BINARY: - string v8 = Marshal.PtrToStringAnsi(data); - builder.Append(v8); - break; - case TDengineDataType.TSDB_DATA_TYPE_TIMESTAMP: - long v9 = Marshal.ReadInt64(data); - builder.Append(v9); - break; - case TDengineDataType.TSDB_DATA_TYPE_NCHAR: - string v10 = Marshal.PtrToStringAnsi(data); - builder.Append(v10); - break; - case TDengineDataType.TSDB_DATA_TYPE_UTINYINT: - byte v11 = Marshal.ReadByte(data); - builder.Append(v11); - break; - case TDengineDataType.TSDB_DATA_TYPE_USMALLINT: - ushort v12 = (ushort)Marshal.ReadInt16(data); - builder.Append(v12); - break; - case TDengineDataType.TSDB_DATA_TYPE_UINT: - uint v13 = (uint)Marshal.ReadInt32(data); - builder.Append(v13); - break; - case TDengineDataType.TSDB_DATA_TYPE_UBIGINT: - ulong v14 = (ulong)Marshal.ReadInt64(data); - builder.Append(v14); - break; - } - } - builder.Append("---"); - - if (queryRows <= 10) - { - Console.WriteLine(builder.ToString()); - } - builder.Clear(); - } - - if (TDengine.ErrorNo(res) != 0) - { - Console.Write("Query is not complete, Error {0:G}", - TDengine.ErrorNo(res), TDengine.Error(res)); - } - Console.WriteLine(""); - - TDengine.FreeResult(res); - } - - System.DateTime end = new System.DateTime(); - TimeSpan ts = end - start; - - Console.Write("Total {0:G} rows inserted, {1:G} rows query, time spend {2:G} seconds.\n" - , this.rowsInserted, queryRows, ts.TotalSeconds); - } - - public void CloseConnection() - { - if (this.conn != IntPtr.Zero) - { - TDengine.Close(this.conn); - } - } - - static void ExitProgram() - { - TDengine.Cleanup(); - System.Environment.Exit(0); - } - } -} diff --git a/tests/examples/C#/TDengineTest/TDengineTest.cs b/tests/examples/C#/TDengineTest/TDengineTest.cs new file mode 100644 index 0000000000000000000000000000000000000000..89ef57bd41ac8f68ac2bc34e4ebe5ad90d213b17 --- /dev/null +++ b/tests/examples/C#/TDengineTest/TDengineTest.cs @@ -0,0 +1,504 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +using System; +using System.Text; +using System.Collections.Generic; +using System.Runtime.InteropServices; +using System.Collections; + +namespace TDengineDriver +{ + class TDengineTest + { + //connect parameters + private string host; + private string configDir; + private string user; + private string password; + private short port = 0; + + //sql parameters + private string dbName; + private string stableName; + private string tablePrefix; + + private bool isInsertData; + private bool isQueryData; + + private long tableCount; + private long totalRows; + private long batchRows; + private long beginTimestamp = 1551369600000L; + + private IntPtr conn = IntPtr.Zero; + private long rowsInserted = 0; + + static void Main(string[] args) + { + TDengineTest tester = new TDengineTest(); + tester.ReadArgument(args); + + Console.WriteLine("---------------------------------------------------------------"); + Console.WriteLine("Starting Testing..."); + Console.WriteLine("---------------------------------------------------------------"); + + tester.InitTDengine(); + tester.ConnectTDengine(); + tester.CreateDbAndTable(); + tester.ExecuteInsert(); + tester.ExecuteQuery(); + tester.CloseConnection(); + + Console.WriteLine("---------------------------------------------------------------"); + Console.WriteLine("Stop Testing..."); + Console.WriteLine("---------------------------------------------------------------"); + + } + + public long GetArgumentAsLong(String[] argv, String argName, int minVal, int maxVal, int defaultValue) + { + int argc = argv.Length; + for (int i = 0; i < argc; ++i) + { + if (argName != argv[i]) + { + continue; + } + if (i < argc - 1) + { + String tmp = argv[i + 1]; + if (tmp[0] == '-') + { + Console.WriteLine("option {0:G} requires an argument", tmp); + ExitProgram(); + } + + long tmpVal = Convert.ToInt64(tmp); + if (tmpVal < minVal || tmpVal > maxVal) + { + Console.WriteLine("option {0:G} should in range [{1:G}, {2:G}]", argName, minVal, maxVal); + ExitProgram(); + } + + return tmpVal; + } + } + + return defaultValue; + } + + public String GetArgumentAsString(String[] argv, String argName, String defaultValue) + { + int argc = argv.Length; + for (int i = 0; i < argc; ++i) + { + if (argName != argv[i]) + { + continue; + } + if (i < argc - 1) + { + String tmp = argv[i + 1]; + if (tmp[0] == '-') + { + Console.WriteLine("option {0:G} requires an argument", tmp); + ExitProgram(); + } + return tmp; + } + } + + return defaultValue; + } + + public void PrintHelp(String[] argv) + { + for (int i = 0; i < argv.Length; ++i) + { + if ("--help" == argv[i]) + { + String indent = " "; + Console.WriteLine("taosTest is simple example to operate TDengine use C# Language.\n"); + Console.WriteLine("{0:G}{1:G}", indent, "-h"); + Console.WriteLine("{0:G}{1:G}{2:G}", indent, indent, "TDEngine server IP address to connect"); + Console.WriteLine("{0:G}{1:G}", indent, "-u"); + Console.WriteLine("{0:G}{1:G}{2:G}", indent, indent, "The TDEngine user name to use when connecting to the server, default is root"); + Console.WriteLine("{0:G}{1:G}", indent, "-p"); + Console.WriteLine("{0:G}{1:G}{2:G}", indent, indent, "The TDEngine user name to use when connecting to the server, default is taosdata"); + Console.WriteLine("{0:G}{1:G}", indent, "-d"); + Console.WriteLine("{0:G}{1:G}{2:G}", indent, indent, "Database used to create table or import data, default is db"); + Console.WriteLine("{0:G}{1:G}", indent, "-s"); + Console.WriteLine("{0:G}{1:G}{2:G}", indent, indent, "Super Tables used to create table, default is mt"); + Console.WriteLine("{0:G}{1:G}", indent, "-t"); + Console.WriteLine("{0:G}{1:G}{2:G}", indent, indent, "Table prefixs, default is t"); + Console.WriteLine("{0:G}{1:G}", indent, "-w"); + Console.WriteLine("{0:G}{1:G}{2:G}", indent, indent, "Whether to insert data"); + Console.WriteLine("{0:G}{1:G}", indent, "-r"); + Console.WriteLine("{0:G}{1:G}{2:G}", indent, indent, "Whether to query data"); + Console.WriteLine("{0:G}{1:G}", indent, "-n"); + Console.WriteLine("{0:G}{1:G}{2:G}", indent, indent, "How many Tables to create, default is 10"); + Console.WriteLine("{0:G}{1:G}", indent, "-b"); + Console.WriteLine("{0:G}{1:G}{2:G}", indent, indent, "How many rows per insert batch, default is 10"); + Console.WriteLine("{0:G}{1:G}", indent, "-i"); + Console.WriteLine("{0:G}{1:G}{2:G}", indent, indent, "How many rows to insert, default is 100"); + Console.WriteLine("{0:G}{1:G}", indent, "-c"); + Console.WriteLine("{0:G}{1:G}{2:G}", indent, indent, "Configuration directory"); + + ExitProgram(); + } + } + } + + public void ReadArgument(String[] argv) + { + PrintHelp(argv); + host = this.GetArgumentAsString(argv, "-h", "127.0.0.1"); + user = this.GetArgumentAsString(argv, "-u", "root"); + password = this.GetArgumentAsString(argv, "-p", "taosdata"); + dbName = this.GetArgumentAsString(argv, "-d", "db"); + stableName = this.GetArgumentAsString(argv, "-s", "st"); + tablePrefix = this.GetArgumentAsString(argv, "-t", "t"); + isInsertData = this.GetArgumentAsLong(argv, "-w", 0, 1, 1) != 0; + isQueryData = this.GetArgumentAsLong(argv, "-r", 0, 1, 1) != 0; + tableCount = this.GetArgumentAsLong(argv, "-n", 1, 10000, 10); + batchRows = this.GetArgumentAsLong(argv, "-b", 1, 1000,500 ); + totalRows = this.GetArgumentAsLong(argv, "-i", 1, 10000000, 10000); + configDir = this.GetArgumentAsString(argv, "-c", "C:/TDengine/cfg"); + } + + public void InitTDengine() + { + TDengine.Options((int)TDengineInitOption.TDDB_OPTION_CONFIGDIR, this.configDir); + TDengine.Options((int)TDengineInitOption.TDDB_OPTION_SHELL_ACTIVITY_TIMER, "60"); + TDengine.Init(); + Console.WriteLine("TDengine Initialization finished"); + } + + public void ConnectTDengine() + { + string db = ""; + Console.WriteLine("Host:{0}", this.host); + this.conn = TDengine.Connect(this.host, this.user, this.password, db, this.port); + if (this.conn == IntPtr.Zero) + { + Console.WriteLine("Connect to TDengine failed"); + ExitProgram(); + } + else + { + Console.WriteLine("Connect to TDengine success"); + } + } + + public void CreateDbAndTable() + { + if (!this.isInsertData) + { + return; + } + + StringBuilder sql = new StringBuilder(); + sql.Append("create database if not exists ").Append(this.dbName); + IntPtr res = TDengine.Query(this.conn, sql.ToString()); + if ((res == IntPtr.Zero) || (TDengine.ErrorNo(res) != 0)) + { + Console.Write(sql.ToString() + " failure, "); + if (res != IntPtr.Zero) + { + Console.Write("reason: " + TDengine.Error(res)); + } + Console.WriteLine(""); + ExitProgram(); + } + else + { + Console.WriteLine(sql.ToString() + " success"); + } + TDengine.FreeResult(res); + + sql.Clear(); + sql.Append("use ").Append(this.dbName); + res = TDengine.Query(this.conn, sql.ToString()); + if ((res == IntPtr.Zero) || (TDengine.ErrorNo(res) != 0)) + { + Console.Write(sql.ToString() + " failure, "); + if (res != IntPtr.Zero) + { + Console.Write("reason: " + TDengine.Error(res)); + } + Console.WriteLine(""); + ExitProgram(); + } + else + { + Console.WriteLine(sql.ToString() + " success"); + } + TDengine.FreeResult(res); + + sql.Clear(); + sql.Append("create table if not exists ").Append(this.stableName).Append("(ts timestamp, v1 bool, v2 tinyint, v3 smallint, v4 int, v5 bigint, v6 float, v7 double, v8 binary(10), v9 nchar(10)) tags(t1 int)"); + res = TDengine.Query(this.conn, sql.ToString()); + if ((res == IntPtr.Zero) || (TDengine.ErrorNo(res) != 0)) + { + Console.Write(sql.ToString() + " failure, "); + if (res != IntPtr.Zero) + { + Console.Write("reason: " + TDengine.Error(res)); + } + Console.WriteLine(""); + ExitProgram(); + } + else + { + Console.WriteLine(sql.ToString() + " success"); + } + TDengine.FreeResult(res); + + for (int i = 0; i < this.tableCount; i++) + { + sql.Clear(); + sql = sql.Append("create table if not exists ").Append(this.tablePrefix).Append(i) + .Append(" using ").Append(this.stableName).Append(" tags(").Append(i).Append(")"); + res = TDengine.Query(this.conn, sql.ToString()); + if ((res == IntPtr.Zero) || (TDengine.ErrorNo(res) != 0)) + { + Console.Write(sql.ToString() + " failure, "); + if (res != IntPtr.Zero) + { + Console.Write("reason: " + TDengine.Error(res)); + } + Console.WriteLine(""); + ExitProgram(); + } + else + { + Console.WriteLine(sql.ToString() + " success"); + } + TDengine.FreeResult(res); + } + + Console.WriteLine("create db and table success"); + } + + public void ExecuteInsert() + { + if (!this.isInsertData) + { + return; + } + + System.DateTime start = new System.DateTime(); + long loopCount = this.totalRows / this.batchRows; + + for (int table = 0; table < this.tableCount; ++table) + { + for (long loop = 0; loop < loopCount; loop++) + { + StringBuilder sql = new StringBuilder(); + sql.Append("insert into ").Append(this.tablePrefix).Append(table).Append(" values"); + for (int batch = 0; batch < this.batchRows; ++batch) + { + + long rows = loop * this.batchRows + batch; + sql.Append("(") + .Append(this.beginTimestamp + rows) + .Append(", 1, 2, 3,") + .Append(rows) + .Append(", 5, 6, 7, 'abc', 'def')"); + } + IntPtr res = TDengine.Query(this.conn,sql.ToString() ); + + if ((res == IntPtr.Zero) || (TDengine.ErrorNo(res) != 0)) + { + Console.Write(sql.ToString() + " failure, "); + if (res != IntPtr.Zero) + { + Console.Write("reason: " + TDengine.Error(res)); + } + Console.WriteLine(""); + } + + int affectRows = TDengine.AffectRows(res); + this.rowsInserted += affectRows; + + TDengine.FreeResult(res); + } + } + + System.DateTime end = new System.DateTime(); + TimeSpan ts = end - start; + + Console.Write("Total {0:G} rows inserted, {1:G} rows failed, time spend {2:G} seconds.\n" + , this.rowsInserted, this.totalRows * this.tableCount - this.rowsInserted, ts.TotalSeconds); + } + + public void ExecuteQuery() + { + if (!this.isQueryData) + { + return; + } + + System.DateTime start = new System.DateTime(); + long queryRows = 0; + + for (int i = 0; i < 1/*this.tableCount*/; ++i) + { + String sql = "select * from " + this.dbName + "." + tablePrefix + i; + Console.WriteLine(sql); + + IntPtr res = TDengine.Query(conn, sql); + if ((res == IntPtr.Zero) || (TDengine.ErrorNo(res) != 0)) + { + Console.Write(sql.ToString() + " failure, "); + if (res != IntPtr.Zero) + { + Console.Write("reason: " + TDengine.Error(res)); + } + Console.WriteLine(""); + ExitProgram(); + } + + int fieldCount = TDengine.FieldCount(res); + Console.WriteLine("field count: " + fieldCount); + + List metas = TDengine.FetchFields(res); + for (int j = 0; j < metas.Count; j++) + { + TDengineMeta meta = (TDengineMeta)metas[j]; + Console.WriteLine("index:" + j + ", type:" + meta.type + ", typename:" + meta.TypeName() + ", name:" + meta.name + ", size:" + meta.size); + } + + IntPtr rowdata; + StringBuilder builder = new StringBuilder(); + while ((rowdata = TDengine.FetchRows(res)) != IntPtr.Zero) + { + queryRows++; + for (int fields = 0; fields < fieldCount; ++fields) + { + TDengineMeta meta = metas[fields]; + int offset = IntPtr.Size * fields; + IntPtr data = Marshal.ReadIntPtr(rowdata, offset); + + builder.Append("---"); + + if (data == IntPtr.Zero) + { + builder.Append("NULL"); + continue; + } + + switch ((TDengineDataType)meta.type) + { + case TDengineDataType.TSDB_DATA_TYPE_BOOL: + bool v1 = Marshal.ReadByte(data) == 0 ? false : true; + builder.Append(v1); + break; + case TDengineDataType.TSDB_DATA_TYPE_TINYINT: + byte v2 = Marshal.ReadByte(data); + builder.Append(v2); + break; + case TDengineDataType.TSDB_DATA_TYPE_SMALLINT: + short v3 = Marshal.ReadInt16(data); + builder.Append(v3); + break; + case TDengineDataType.TSDB_DATA_TYPE_INT: + int v4 = Marshal.ReadInt32(data); + builder.Append(v4); + break; + case TDengineDataType.TSDB_DATA_TYPE_BIGINT: + long v5 = Marshal.ReadInt64(data); + builder.Append(v5); + break; + case TDengineDataType.TSDB_DATA_TYPE_FLOAT: + float v6 = (float)Marshal.PtrToStructure(data, typeof(float)); + builder.Append(v6); + break; + case TDengineDataType.TSDB_DATA_TYPE_DOUBLE: + double v7 = (double)Marshal.PtrToStructure(data, typeof(double)); + builder.Append(v7); + break; + case TDengineDataType.TSDB_DATA_TYPE_BINARY: + string v8 = Marshal.PtrToStringAnsi(data); + builder.Append(v8); + break; + case TDengineDataType.TSDB_DATA_TYPE_TIMESTAMP: + long v9 = Marshal.ReadInt64(data); + builder.Append(v9); + break; + case TDengineDataType.TSDB_DATA_TYPE_NCHAR: + string v10 = Marshal.PtrToStringAnsi(data); + builder.Append(v10); + break; + case TDengineDataType.TSDB_DATA_TYPE_UTINYINT: + byte v11 = Marshal.ReadByte(data); + builder.Append(v11); + break; + case TDengineDataType.TSDB_DATA_TYPE_USMALLINT: + ushort v12 = (ushort)Marshal.ReadInt16(data); + builder.Append(v12); + break; + case TDengineDataType.TSDB_DATA_TYPE_UINT: + uint v13 = (uint)Marshal.ReadInt32(data); + builder.Append(v13); + break; + case TDengineDataType.TSDB_DATA_TYPE_UBIGINT: + ulong v14 = (ulong)Marshal.ReadInt64(data); + builder.Append(v14); + break; + } + } + builder.Append("---"); + + if (queryRows <= 10) + { + Console.WriteLine(builder.ToString()); + } + builder.Clear(); + } + + if (TDengine.ErrorNo(res) != 0) + { + Console.Write("Query is not complete, Error {0:G}", + TDengine.ErrorNo(res), TDengine.Error(res)); + } + Console.WriteLine(""); + + TDengine.FreeResult(res); + } + + System.DateTime end = new System.DateTime(); + TimeSpan ts = end - start; + + Console.Write("Total {0:G} rows inserted, {1:G} rows query, time spend {2:G} seconds.\n" + , this.rowsInserted, queryRows, ts.TotalSeconds); + } + + public void CloseConnection() + { + if (this.conn != IntPtr.Zero) + { + TDengine.Close(this.conn); + } + } + + static void ExitProgram() + { + TDengine.Cleanup(); + System.Environment.Exit(0); + } + } +} diff --git a/tests/examples/C#/TDengineTest/TDengineTest.csproj b/tests/examples/C#/TDengineTest/TDengineTest.csproj new file mode 100644 index 0000000000000000000000000000000000000000..211c927d3d36df5941291319e3c85707610c6a8f --- /dev/null +++ b/tests/examples/C#/TDengineTest/TDengineTest.csproj @@ -0,0 +1,12 @@ + + + + Exe + net5.0 + + + + + + + diff --git a/tests/examples/C#/taosdemo/TDengineDriver.cs b/tests/examples/C#/taosdemo/TDengineDriver.cs deleted file mode 100644 index e6c3a598adc0bc4bcf5ea84953f649b418199555..0000000000000000000000000000000000000000 --- a/tests/examples/C#/taosdemo/TDengineDriver.cs +++ /dev/null @@ -1,170 +0,0 @@ -/* - * Copyright (c) 2019 TAOS Data, Inc. - * - * This program is free software: you can use, redistribute, and/or modify - * it under the terms of the GNU Affero General Public License, version 3 - * or later ("AGPL"), as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. - * - * You should have received a copy of the GNU Affero General Public License - * along with this program. If not, see . - */ - -using System; -using System.Collections.Generic; -using System.Runtime.InteropServices; - -namespace TDengineDriver -{ - enum TDengineDataType - { - TSDB_DATA_TYPE_NULL = 0, // 1 bytes - TSDB_DATA_TYPE_BOOL = 1, // 1 bytes - TSDB_DATA_TYPE_TINYINT = 2, // 1 bytes - TSDB_DATA_TYPE_SMALLINT = 3, // 2 bytes - TSDB_DATA_TYPE_INT = 4, // 4 bytes - TSDB_DATA_TYPE_BIGINT = 5, // 8 bytes - TSDB_DATA_TYPE_FLOAT = 6, // 4 bytes - TSDB_DATA_TYPE_DOUBLE = 7, // 8 bytes - TSDB_DATA_TYPE_BINARY = 8, // string - TSDB_DATA_TYPE_TIMESTAMP = 9,// 8 bytes - TSDB_DATA_TYPE_NCHAR = 10, // unicode string - TSDB_DATA_TYPE_UTINYINT = 11,// 1 byte - TSDB_DATA_TYPE_USMALLINT= 12,// 2 bytes - TSDB_DATA_TYPE_UINT = 13, // 4 bytes - TSDB_DATA_TYPE_UBIGINT= 14 // 8 bytes - } - - enum TDengineInitOption - { - TSDB_OPTION_LOCALE = 0, - TSDB_OPTION_CHARSET = 1, - TSDB_OPTION_TIMEZONE = 2, - TDDB_OPTION_CONFIGDIR = 3, - TDDB_OPTION_SHELL_ACTIVITY_TIMER = 4 - } - - class TDengineMeta - { - public string name; - public short size; - public byte type; - public string TypeName() - { - switch ((TDengineDataType)type) - { - case TDengineDataType.TSDB_DATA_TYPE_BOOL: - return "BOOL"; - case TDengineDataType.TSDB_DATA_TYPE_TINYINT: - return "TINYINT"; - case TDengineDataType.TSDB_DATA_TYPE_SMALLINT: - return "SMALLINT"; - case TDengineDataType.TSDB_DATA_TYPE_INT: - return "INT"; - case TDengineDataType.TSDB_DATA_TYPE_BIGINT: - return "BIGINT"; - case TDengineDataType.TSDB_DATA_TYPE_UTINYINT: - return "TINYINT UNSIGNED"; - case TDengineDataType.TSDB_DATA_TYPE_USMALLINT: - return "SMALLINT UNSIGNED"; - case TDengineDataType.TSDB_DATA_TYPE_UINT: - return "INT UNSIGNED"; - case TDengineDataType.TSDB_DATA_TYPE_UBIGINT: - return "BIGINT UNSIGNED"; - case TDengineDataType.TSDB_DATA_TYPE_FLOAT: - return "FLOAT"; - case TDengineDataType.TSDB_DATA_TYPE_DOUBLE: - return "DOUBLE"; - case TDengineDataType.TSDB_DATA_TYPE_BINARY: - return "STRING"; - case TDengineDataType.TSDB_DATA_TYPE_TIMESTAMP: - return "TIMESTAMP"; - case TDengineDataType.TSDB_DATA_TYPE_NCHAR: - return "NCHAR"; - default: - return "undefine"; - } - } - } - - class TDengine - { - public const int TSDB_CODE_SUCCESS = 0; - - [DllImport("taos", EntryPoint = "taos_init", CallingConvention = CallingConvention.Cdecl)] - static extern public void Init(); - - [DllImport("taos", EntryPoint = "taos_cleanup", CallingConvention = CallingConvention.Cdecl)] - static extern public void Cleanup(); - - [DllImport("taos", EntryPoint = "taos_options", CallingConvention = CallingConvention.Cdecl)] - static extern public void Options(int option, string value); - - [DllImport("taos", EntryPoint = "taos_connect", CallingConvention = CallingConvention.Cdecl)] - static extern public IntPtr Connect(string ip, string user, string password, string db, short port); - - [DllImport("taos", EntryPoint = "taos_errstr", CallingConvention = CallingConvention.Cdecl)] - static extern private IntPtr taos_errstr(IntPtr res); - static public string Error(IntPtr res) - { - IntPtr errPtr = taos_errstr(res); - return Marshal.PtrToStringAnsi(errPtr); - } - - [DllImport("taos", EntryPoint = "taos_errno", CallingConvention = CallingConvention.Cdecl)] - static extern public int ErrorNo(IntPtr res); - - [DllImport("taos", EntryPoint = "taos_query", CallingConvention = CallingConvention.Cdecl)] - static extern public IntPtr Query(IntPtr conn, string sqlstr); - - [DllImport("taos", EntryPoint = "taos_affected_rows", CallingConvention = CallingConvention.Cdecl)] - static extern public int AffectRows(IntPtr res); - - [DllImport("taos", EntryPoint = "taos_field_count", CallingConvention = CallingConvention.Cdecl)] - static extern public int FieldCount(IntPtr res); - - [DllImport("taos", EntryPoint = "taos_fetch_fields", CallingConvention = CallingConvention.Cdecl)] - static extern private IntPtr taos_fetch_fields(IntPtr res); - static public List FetchFields(IntPtr res) - { - const int fieldSize = 68; - - List metas = new List(); - if (res == IntPtr.Zero) - { - return metas; - } - - int fieldCount = FieldCount(res); - IntPtr fieldsPtr = taos_fetch_fields(res); - - for (int i = 0; i < fieldCount; ++i) - { - int offset = i * fieldSize; - - TDengineMeta meta = new TDengineMeta(); - meta.name = Marshal.PtrToStringAnsi(fieldsPtr + offset); - meta.type = Marshal.ReadByte(fieldsPtr + offset + 65); - meta.size = Marshal.ReadInt16(fieldsPtr + offset + 66); - metas.Add(meta); - } - - return metas; - } - - [DllImport("taos", EntryPoint = "taos_fetch_row", CallingConvention = CallingConvention.Cdecl)] - static extern public IntPtr FetchRows(IntPtr res); - - [DllImport("taos", EntryPoint = "taos_free_result", CallingConvention = CallingConvention.Cdecl)] - static extern public IntPtr FreeResult(IntPtr res); - - [DllImport("taos", EntryPoint = "taos_close", CallingConvention = CallingConvention.Cdecl)] - static extern public int Close(IntPtr taos); - //get precisionin parameter restultset - [DllImport("taos", EntryPoint = "taos_result_precision", CallingConvention = CallingConvention.Cdecl)] - static extern public int ResultPrecision(IntPtr taos); - } -} diff --git a/tests/examples/C#/taosdemo/taosdemo.csproj b/tests/examples/C#/taosdemo/taosdemo.csproj index 15ec155d45e34aae7276fe596c177619dfddd3e9..8d4b786ba3a99b600783a5b4ee55d99f03e47655 100644 --- a/tests/examples/C#/taosdemo/taosdemo.csproj +++ b/tests/examples/C#/taosdemo/taosdemo.csproj @@ -6,4 +6,8 @@ false + + + + diff --git a/tests/pytest/fulltest.sh b/tests/pytest/fulltest.sh index e057c5f542cb31570c8b7c810f512634bffc53f6..3359931d5437a6007973b8522bb7cc7fe66fbc78 100755 --- a/tests/pytest/fulltest.sh +++ b/tests/pytest/fulltest.sh @@ -371,6 +371,9 @@ python3 ./test.py -f functions/function_irate.py python3 ./test.py -f functions/function_ceil.py python3 ./test.py -f functions/function_floor.py python3 ./test.py -f functions/function_round.py +python3 ./test.py -f functions/function_elapsed.py +python3 ./test.py -f functions/function_mavg.py +python3 ./test.py -f functions/function_csum.py python3 ./test.py -f insert/unsignedInt.py python3 ./test.py -f insert/unsignedBigint.py diff --git a/tests/pytest/functions/function_all_sample.py b/tests/pytest/functions/function_all_sample.py new file mode 100644 index 0000000000000000000000000000000000000000..de136976ea9a755edd0ca1e90cab5301fb48d764 --- /dev/null +++ b/tests/pytest/functions/function_all_sample.py @@ -0,0 +1,671 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import subprocess +import random +import math +import numpy as np +import inspect +import re +import taos + +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor()) + + def sample_query_form(self, sel="select", func="sample(", col="c1", m_comm =",", k=1,r_comm=")", alias="", fr="from",table_expr="t1", condition=""): + ''' + sample function: + + :param sel: string, must be "select", required parameters; + :param func: string, in this case must be "sample(", otherwise return other function, required parameters; + :param col: string, column name, required parameters; + :param m_comm: string, comma between col and k , required parameters; + :param k: int/float,the width of the sliding window, [1,100], required parameters; + :param r_comm: string, must be ")", use with "(" in func, required parameters; + :param alias: string, result column another name,or add other funtion; + :param fr: string, must be "from", required parameters; + :param table_expr: string or expression, data source(eg,table/stable name, result set), required parameters; + :param condition: expression; + :return: sample query statement,default: select sample(c1, 1) from t1 + ''' + + return f"{sel} {func} {col} {m_comm} {k} {r_comm} {alias} {fr} {table_expr} {condition}" + + def checksample(self,sel="select", func="sample(", col="c1", m_comm =",", k=1,r_comm=")", alias="", fr="from",table_expr="t1", condition=""): + # print(self.sample_query_form(sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + # table_expr=table_expr, condition=condition)) + line = sys._getframe().f_back.f_lineno + + if not all([sel , func , col , m_comm , k , r_comm , fr , table_expr]): + print(f"case in {line}: ", end='') + return tdSql.error(self.sample_query_form( + sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + table_expr=table_expr, condition=condition + )) + + + sql = "select * from t1" + collist = tdSql.getColNameList(sql) + + if not isinstance(col, str): + print(f"case in {line}: ", end='') + return tdSql.error(self.sample_query_form( + sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + table_expr=table_expr, condition=condition + )) + + if len([x for x in col.split(",") if x.strip()]) != 1: + print(f"case in {line}: ", end='') + return tdSql.error(self.sample_query_form( + sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + table_expr=table_expr, condition=condition + )) + + col = col.replace(",", "").replace(" ","") + + if any([re.compile('^[a-zA-Z]{1}.*$').match(col) is None , not col.replace(".","").isalnum()]): + print(f"case in {line}: ", end='') + return tdSql.error(self.sample_query_form( + sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + table_expr=table_expr, condition=condition + )) + + if '.' in col: + if any([col.split(".")[0] not in table_expr, col.split(".")[1] not in collist]): + print(f"case in {line}: ", end='') + return tdSql.error(self.sample_query_form( + sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + table_expr=table_expr, condition=condition + )) + pass + + if "." not in col: + if col not in collist: + print(f"case in {line}: ", end='') + return tdSql.error(self.sample_query_form( + sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + table_expr=table_expr, condition=condition + )) + pass + + # colname = col if "." not in col else col.split(".")[1] + # col_index = collist.index(colname) + # if any([tdSql.cursor.istype(col_index, "TIMESTAMP"), tdSql.cursor.istype(col_index, "BOOL")]): + # print(f"case in {line}: ", end='') + # return tdSql.error(self.sample_query_form( + # sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + # table_expr=table_expr, condition=condition + # )) + # + # if any([tdSql.cursor.istype(col_index, "BINARY") , tdSql.cursor.istype(col_index,"NCHAR")]): + # print(f"case in {line}: ", end='') + # return tdSql.error(self.sample_query_form( + # sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + # table_expr=table_expr, condition=condition + # )) + + if any( [func != "sample(" , r_comm != ")" , fr != "from", sel != "select"]): + print(f"case in {line}: ", end='') + return tdSql.error(self.sample_query_form( + sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + table_expr=table_expr, condition=condition + )) + + if all(["(" not in table_expr, "stb" in table_expr, "group" not in condition.lower()]): + print(f"case in {line}: ", end='') + return tdSql.error(self.sample_query_form( + sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + table_expr=table_expr, condition=condition + )) + + if all(["group" in condition.lower(), "tbname" not in condition.lower()]): + print(f"case in {line}: ", end='') + return tdSql.error(self.sample_query_form( + sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + table_expr=table_expr, condition=condition + )) + + alias_list = ["tbname", "_c0", "st", "ts"] + if all([alias, "," not in alias]): + if any([ not alias.isalnum(), re.compile('^[a-zA-Z]{1}.*$').match(col) is None ]): + # actually, column alias also support "_", but in this case,forbidden that。 + print(f"case in {line}: ", end='') + return tdSql.error(self.sample_query_form( + sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + table_expr=table_expr, condition=condition + )) + + if all([alias, "," in alias]): + if all(parm != alias.lower().split(",")[1].strip() for parm in alias_list): + print(f"case in {line}: ", end='') + return tdSql.error(self.sample_query_form( + sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + table_expr=table_expr, condition=condition + )) + pass + + condition_exception = [ "-", "+", "/", "*", "~", "^", "insert", "distinct", + "count", "avg", "twa", "irate", "sum", "stddev", "leastquares", + "min", "max", "first", "last", "top", "bottom", "percentile", + "apercentile", "last_row", "interp", "diff", "derivative", + "spread", "ceil", "floor", "round", "interval", "fill", "slimit", "soffset"] + if "union" not in condition.lower(): + if any(parm in condition.lower().strip() for parm in condition_exception): + + print(f"case in {line}: ", end='') + return tdSql.error(self.sample_query_form( + sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + table_expr=table_expr, condition=condition + )) + pass + + if not any([isinstance(k, int) , isinstance(k, float)]) : + print(f"case in {line}: ", end='') + return tdSql.error(self.sample_query_form( + col=col, k=k, alias=alias, table_expr=table_expr, condition=condition + )) + + if not(1 <= k < 1001): + print(f"case in {line}: ", end='') + return tdSql.error(self.sample_query_form( + col=col, k=k, alias=alias, table_expr=table_expr, condition=condition + )) + + k = int(k // 1) + pre_sql = re.sub("sample\([a-z0-9 .,]*\)", f"count({col})", self.sample_query_form( + col=col, table_expr=table_expr, condition=condition + )) + tdSql.query(pre_sql) + if tdSql.queryRows == 0: + tdSql.query(self.sample_query_form( + sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + table_expr=table_expr, condition=condition + )) + print(f"case in {line}: ", end='') + tdSql.checkRows(0) + return + + tdSql.query(self.sample_query_form( + sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + table_expr=table_expr, condition=condition + )) + + sample_result = tdSql.queryResult + sample_len = tdSql.queryRows + + if "group" in condition: + tb_condition = condition.split("group by")[1].split(" ")[1] + tdSql.query(f"select distinct {tb_condition} from {table_expr}") + query_result = tdSql.queryResult + query_rows = tdSql.queryRows + clear_condition = re.sub('order by [0-9a-z]*|slimit [0-9]*|soffset [0-9]*', "", condition) + + pre_row = 0 + for i in range(query_rows): + group_name = query_result[i][0] + if "where" in clear_condition: + pre_condition = re.sub('group by [0-9a-z]*', f"and {tb_condition}='{group_name}' and {col} is not null", clear_condition) + else: + pre_condition = "where " + re.sub('group by [0-9a-z]*',f"{tb_condition}='{group_name}' and {col} is not null", clear_condition) + + tdSql.query(f"select ts, {col} {alias} from {table_expr} {pre_condition}") + # pre_data = np.array(tdSql.queryResult)[np.array(tdSql.queryResult) != None] + # pre_sample = np.convolve(pre_data, np.ones(k), "valid")/k + pre_sample = tdSql.queryResult + pre_len = tdSql.queryRows + step = pre_len if pre_len < k else k + # tdSql.query(self.sample_query_form( + # sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + # table_expr=table_expr, condition=condition + # )) + for i in range(step): + if sample_result[pre_row:pre_row+step][i] not in pre_sample: + tdLog.exit(f"case in {line} is failed: sample data is not in {group_name}") + else: + tdLog.info(f"case in {line} is success: sample data is in {group_name}") + + # for j in range(len(pre_sample)): + # print(f"case in {line}:", end='') + # tdSql.checkData(pre_row+j, 1, pre_sample[j]) + pre_row += step + return + elif "union" in condition: + union_sql_0 = self.sample_query_form( + sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + table_expr=table_expr, condition=condition + ).split("union all")[0] + + union_sql_1 = self.sample_query_form( + sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + table_expr=table_expr, condition=condition + ).split("union all")[1] + + tdSql.query(union_sql_0) + # union_sample_0 = tdSql.queryResult + row_union_0 = tdSql.queryRows + + tdSql.query(union_sql_1) + # union_sample_1 = tdSql.queryResult + row_union_1 = tdSql.queryRows + + tdSql.query(self.sample_query_form( + sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + table_expr=table_expr, condition=condition + )) + # for i in range(tdSql.queryRows): + # print(f"case in {line}: ", end='') + # if i < row_union_0: + # tdSql.checkData(i, 1, union_sample_0[i][1]) + # else: + # tdSql.checkData(i, 1, union_sample_1[i-row_union_0][1]) + if row_union_0 + row_union_1 != sample_len: + tdLog.exit(f"case in {line} is failed: sample data is not in {group_name}") + else: + tdLog.info(f"case in {line} is success: sample data is in {group_name}") + return + + else: + if "where" in condition: + condition = re.sub('where', f"where {col} is not null and ", condition) + else: + condition = f"where {col} is not null" + condition + tdSql.query(f"select ts, {col}, {alias} from {table_expr} {re.sub('limit [0-9]*|offset [0-9]*','',condition)}") + # offset_val = condition.split("offset")[1].split(" ")[1] if "offset" in condition else 0 + pre_sample = tdSql.queryResult + # pre_len = tdSql.queryRows + for i in range(sample_len): + if sample_result[pre_row:pre_row + step][i] not in pre_sample: + tdLog.exit(f"case in {line} is failed: sample data is not in {group_name}") + else: + tdLog.info(f"case in {line} is success: sample data is in {group_name}") + + pass + + def sample_current_query(self) : + + # table schema :ts timestamp, c1 int, c2 float, c3 timestamp, c4 binary(16), c5 double, c6 bool + # c7 bigint, c8 smallint, c9 tinyint, c10 nchar(16) + + # case1~6: numeric col:int/bigint/tinyint/smallint/float/double + self.checksample() + case2 = {"col": "c2"} + self.checksample(**case2) + case3 = {"col": "c5"} + self.checksample(**case3) + case4 = {"col": "c7"} + self.checksample(**case4) + case5 = {"col": "c8"} + self.checksample(**case5) + case6 = {"col": "c9"} + self.checksample(**case6) + + # case7~8: nested query + case7 = {"table_expr": "(select c1 from stb1)"} + self.checksample(**case7) + case8 = {"table_expr": "(select sample(c1, 1) c1 from stb1 group by tbname)"} + self.checksample(**case8) + + # case9~10: mix with tbname/ts/tag/col + # case9 = {"alias": ", tbname"} + # self.checksample(**case9) + case10 = {"alias": ", _c0"} + self.checksample(**case10) + # case11 = {"alias": ", st1"} + # self.checksample(**case11) + # case12 = {"alias": ", c1"} + # self.checksample(**case12) + + # case13~15: with single condition + case13 = {"condition": "where c1 <= 10"} + self.checksample(**case13) + case14 = {"condition": "where c6 in (0, 1)"} + self.checksample(**case14) + case15 = {"condition": "where c1 between 1 and 10"} + self.checksample(**case15) + + # case16: with multi-condition + case16 = {"condition": "where c6=1 or c6 =0"} + self.checksample(**case16) + + # case17: only support normal table join + case17 = { + "col": "t1.c1", + "table_expr": "t1, t2", + "condition": "where t1.ts=t2.ts" + } + self.checksample(**case17) + # # case18~19: with group by + case19 = { + "table_expr": "stb1", + "condition": "group by tbname" + } + self.checksample(**case19) + + # case20~21: with order by + case20 = {"condition": "order by ts"} + self.checksample(**case20) + case21 = { + "table_expr": "stb1", + "condition": "group by tbname order by tbname" + } + self.checksample(**case21) + + # case22: with union + case22 = { + "condition": "union all select sample( c1 , 1 ) from t2" + } + self.checksample(**case22) + + # case23: with limit/slimit + case23 = { + "condition": "limit 1" + } + self.checksample(**case23) + + # case24: value k range[1, 100], can be int or float, k = floor(k) + case24 = {"k": 3} + self.checksample(**case24) + case25 = {"k": 2.999} + self.checksample(**case25) + case26 = {"k": 1000} + self.checksample(**case26) + case27 = { + "table_expr": "stb1", + "condition": "group by tbname slimit 1 " + } + self.checksample(**case27) # with slimit + case28 = { + "table_expr": "stb1", + "condition": "group by tbname slimit 1 soffset 1" + } + self.checksample(**case28) # with soffset + + pass + + def sample_error_query(self) -> None : + # unusual test + + # form test + err1 = {"col": ""} + self.checksample(**err1) # no col + err2 = {"sel": ""} + self.checksample(**err2) # no select + err3 = {"func": "sample", "col": "", "m_comm": "", "k": "", "r_comm": ""} + self.checksample(**err3) # no sample condition: select sample from + err4 = {"col": "", "m_comm": "", "k": ""} + self.checksample(**err4) # no sample condition: select sample() from + err5 = {"func": "sample", "r_comm": ""} + self.checksample(**err5) # no brackets: select sample col, k from + err6 = {"fr": ""} + self.checksample(**err6) # no from + err7 = {"k": ""} + self.checksample(**err7) # no k + err8 = {"table_expr": ""} + self.checksample(**err8) # no table_expr + + err9 = {"col": "st1"} + self.checksample(**err9) # col: tag + err10 = {"col": 1} + self.checksample(**err10) # col: value + err11 = {"col": "NULL"} + self.checksample(**err11) # col: NULL + err12 = {"col": "%_"} + self.checksample(**err12) # col: %_ + err13 = {"col": "c3"} + self.checksample(**err13) # col: timestamp col + err14 = {"col": "_c0"} + # self.checksample(**err14) # col: Primary key + err15 = {"col": "avg(c1)"} + # self.checksample(**err15) # expr col + err16 = {"col": "c4"} + self.checksample(**err16) # binary col + err17 = {"col": "c10"} + self.checksample(**err17) # nchar col + err18 = {"col": "c6"} + self.checksample(**err18) # bool col + err19 = {"col": "'c1'"} + self.checksample(**err19) # col: string + err20 = {"col": None} + self.checksample(**err20) # col: None + err21 = {"col": "''"} + self.checksample(**err21) # col: '' + err22 = {"col": "tt1.c1"} + self.checksample(**err22) # not table_expr col + err23 = {"col": "t1"} + self.checksample(**err23) # tbname + err24 = {"col": "stb1"} + self.checksample(**err24) # stbname + err25 = {"col": "db"} + self.checksample(**err25) # datbasename + err26 = {"col": "True"} + self.checksample(**err26) # col: BOOL 1 + err27 = {"col": True} + self.checksample(**err27) # col: BOOL 2 + err28 = {"col": "*"} + self.checksample(**err28) # col: all col + err29 = {"func": "sample[", "r_comm": "]"} + self.checksample(**err29) # form: sample[col, k] + err30 = {"func": "sample{", "r_comm": "}"} + self.checksample(**err30) # form: sample{col, k} + err31 = {"col": "[c1]"} + self.checksample(**err31) # form: sample([col], k) + err32 = {"col": "c1, c2"} + self.checksample(**err32) # form: sample(col, col2, k) + err33 = {"col": "c1, 2"} + self.checksample(**err33) # form: sample(col, k1, k2) + err34 = {"alias": ", count(c1)"} + self.checksample(**err34) # mix with aggregate function 1 + err35 = {"alias": ", avg(c1)"} + self.checksample(**err35) # mix with aggregate function 2 + err36 = {"alias": ", min(c1)"} + self.checksample(**err36) # mix with select function 1 + err37 = {"alias": ", top(c1, 5)"} + self.checksample(**err37) # mix with select function 2 + err38 = {"alias": ", spread(c1)"} + self.checksample(**err38) # mix with calculation function 1 + err39 = {"alias": ", diff(c1)"} + self.checksample(**err39) # mix with calculation function 2 + err40 = {"alias": "+ 2"} + self.checksample(**err40) # mix with arithmetic 1 + err41 = {"alias": "+ avg(c1)"} + self.checksample(**err41) # mix with arithmetic 2 + err42 = {"alias": ", c1"} + self.checksample(**err42) # mix with other col + err43 = {"table_expr": "stb1"} + self.checksample(**err43) # select stb directly + err44 = { + "col": "stb1.c1", + "table_expr": "stb1, stb2", + "condition": "where stb1.ts=stb2.ts and stb1.st1=stb2.st2 order by stb1.ts" + } + self.checksample(**err44) # stb join + err45 = { + "condition": "where ts>0 and ts < now interval(1h) fill(next)" + } + self.checksample(**err45) # interval + err46 = { + "table_expr": "t1", + "condition": "group by c6" + } + # self.checksample(**err46) # group by normal col + + err49 = {"k": "2021-01-01 00:00:00.000"} + self.checksample(**err49) # k: timestamp + err50 = {"k": False} + self.checksample(**err50) # k: False + err51 = {"k": "%"} + self.checksample(**err51) # k: special char + err52 = {"k": ""} + self.checksample(**err52) # k: "" + err53 = {"k": None} + self.checksample(**err53) # k: None + err54 = {"k": "NULL"} + self.checksample(**err54) # k: null + err55 = {"k": "binary(4)"} + self.checksample(**err55) # k: string + err56 = {"k": "c1"} + self.checksample(**err56) # k: sring,col name + err57 = {"col": "c1, 1, c2"} + self.checksample(**err57) # form: sample(col1, k1, col2, k2) + err58 = {"col": "c1 cc1"} + self.checksample(**err58) # form: sample(col newname, k) + err59 = {"k": "'1'"} + # self.checksample(**err59) # formL sample(colm, "1") + err60 = {"k": "-1-(-2)"} + # self.checksample(**err60) # formL sample(colm, -1-2) + err61 = {"k": 1001} + self.checksample(**err61) # k: right out of [1, 1000] + err62 = {"k": -1} + self.checksample(**err62) # k: negative number + err63 = {"k": 0} + self.checksample(**err63) # k: 0 + err64 = {"k": 2**63-1} + self.checksample(**err64) # k: max(bigint) + err65 = {"k": 1-2**63} + # self.checksample(**err65) # k: min(bigint) + err66 = {"k": -2**63} + self.checksample(**err66) # k: NULL + err67 = {"k": 0.999999} + self.checksample(**err67) # k: left out of [1, 1000] + + pass + + def sample_test_data(self, tbnum:int, data_row:int, basetime:int) -> None : + for i in range(tbnum): + for j in range(data_row): + tdSql.execute( + f"insert into t{i} values (" + f"{basetime + (j+1)*10}, {random.randint(-200, -1)}, {random.uniform(200, -1)}, {basetime + random.randint(-200, -1)}, " + f"'binary_{j}', {random.uniform(-200, -1)}, {random.choice([0,1])}, {random.randint(-200,-1)}, " + f"{random.randint(-200, -1)}, {random.randint(-127, -1)}, 'nchar_{j}' )" + ) + + tdSql.execute( + f"insert into t{i} values (" + f"{basetime - (j+1) * 10}, {random.randint(1, 200)}, {random.uniform(1, 200)}, {basetime - random.randint(1, 200)}, " + f"'binary_{j}_1', {random.uniform(1, 200)}, {random.choice([0, 1])}, {random.randint(1,200)}, " + f"{random.randint(1,200)}, {random.randint(1,127)}, 'nchar_{j}_1' )" + ) + tdSql.execute( + f"insert into tt{i} values ( {basetime-(j+1) * 10}, {random.randint(1, 200)} )" + ) + + pass + + def sample_test_table(self,tbnum: int) -> None : + tdSql.execute("drop database if exists db") + tdSql.execute("create database if not exists db keep 3650") + tdSql.execute("use db") + + tdSql.execute( + "create stable db.stb1 (\ + ts timestamp, c1 int, c2 float, c3 timestamp, c4 binary(16), c5 double, c6 bool, \ + c7 bigint, c8 smallint, c9 tinyint, c10 nchar(16)\ + ) \ + tags(st1 int)" + ) + tdSql.execute( + "create stable db.stb2 (ts timestamp, c1 int) tags(st2 int)" + ) + for i in range(tbnum): + tdSql.execute(f"create table t{i} using stb1 tags({i})") + tdSql.execute(f"create table tt{i} using stb2 tags({i})") + + pass + + def sample_test_run(self) : + tdLog.printNoPrefix("==========TD-10594==========") + tbnum = 10 + nowtime = int(round(time.time() * 1000)) + per_table_rows = 100 + self.sample_test_table(tbnum) + + tdLog.printNoPrefix("######## no data test:") + self.sample_current_query() + self.sample_error_query() + + tdLog.printNoPrefix("######## insert only NULL test:") + for i in range(tbnum): + tdSql.execute(f"insert into t{i}(ts) values ({nowtime - 5})") + tdSql.execute(f"insert into t{i}(ts) values ({nowtime + 5})") + self.sample_current_query() + self.sample_error_query() + + tdLog.printNoPrefix("######## insert data in the range near the max(bigint/double):") + # self.sample_test_table(tbnum) + # tdSql.execute(f"insert into t1(ts, c1,c2,c5,c7) values " + # f"({nowtime - (per_table_rows + 1) * 10}, {2**31-1}, {3.4*10**38}, {1.7*10**308}, {2**63-1})") + # tdSql.execute(f"insert into t1(ts, c1,c2,c5,c7) values " + # f"({nowtime - (per_table_rows + 2) * 10}, {2**31-1}, {3.4*10**38}, {1.7*10**308}, {2**63-1})") + # self.sample_current_query() + # self.sample_error_query() + + tdLog.printNoPrefix("######## insert data in the range near the min(bigint/double):") + # self.sample_test_table(tbnum) + # tdSql.execute(f"insert into t1(ts, c1,c2,c5,c7) values " + # f"({nowtime - (per_table_rows + 1) * 10}, {1-2**31}, {-3.4*10**38}, {-1.7*10**308}, {1-2**63})") + # tdSql.execute(f"insert into t1(ts, c1,c2,c5,c7) values " + # f"({nowtime - (per_table_rows + 2) * 10}, {1-2**31}, {-3.4*10**38}, {-1.7*10**308}, {512-2**63})") + # self.sample_current_query() + # self.sample_error_query() + + tdLog.printNoPrefix("######## insert data without NULL data test:") + self.sample_test_table(tbnum) + self.sample_test_data(tbnum, per_table_rows, nowtime) + self.sample_current_query() + self.sample_error_query() + + + tdLog.printNoPrefix("######## insert data mix with NULL test:") + for i in range(tbnum): + tdSql.execute(f"insert into t{i}(ts) values ({nowtime})") + tdSql.execute(f"insert into t{i}(ts) values ({nowtime-(per_table_rows+3)*10})") + tdSql.execute(f"insert into t{i}(ts) values ({nowtime+(per_table_rows+3)*10})") + self.sample_current_query() + self.sample_error_query() + + + + tdLog.printNoPrefix("######## check after WAL test:") + tdSql.query("show dnodes") + index = tdSql.getData(0, 0) + tdDnodes.stop(index) + tdDnodes.start(index) + self.sample_current_query() + self.sample_error_query() + + def run(self): + import traceback + try: + # run in develop branch + self.sample_test_run() + pass + except Exception as e: + traceback.print_exc() + raise e + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/functions/function_csum.py b/tests/pytest/functions/function_csum.py new file mode 100644 index 0000000000000000000000000000000000000000..de2400066b9c606b2352649a394d9c20006ea929 --- /dev/null +++ b/tests/pytest/functions/function_csum.py @@ -0,0 +1,423 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import subprocess +import random +import math +import numpy as np +import inspect +import re + +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor()) + + def csum_query_form(self, col="c1", alias="", table_expr="t1", condition=""): + + ''' + csum function: + :param col: string, column name, required parameters; + :param alias: string, result column another name,or add other funtion; + :param table_expr: string or expression, data source(eg,table/stable name, result set), required parameters; + :param condition: expression; + :param args: other funtions,like: ', last(col)',or give result column another name, like 'c2' + :return: csum query statement,default: select csum(c1) from t1 + ''' + + return f"select csum({col}) {alias} from {table_expr} {condition}" + + def checkcsum(self,col="c1", alias="", table_expr="t1", condition="" ): + line = sys._getframe().f_back.f_lineno + pre_sql = self.csum_query_form( + col=col, table_expr=table_expr, condition=condition + ).replace("csum", "count") + tdSql.query(pre_sql) + + if tdSql.queryRows == 0: + tdSql.query(self.csum_query_form( + col=col, alias=alias, table_expr=table_expr, condition=condition + )) + print(f"case in {line}: ", end='') + tdSql.checkRows(0) + return + + if "order by tbname" in condition: + tdSql.error(self.csum_query_form( + col=col, alias=alias, table_expr=table_expr, condition=condition + )) + return + + if "group" in condition: + + tb_condition = condition.split("group by")[1].split(" ")[1] + tdSql.query(f"select distinct {tb_condition} from {table_expr}") + query_result = tdSql.queryResult + query_rows = tdSql.queryRows + clear_condition = re.sub('order by [0-9a-z]*|slimit [0-9]*|soffset [0-9]*', "", condition) + + pre_row = 0 + for i in range(query_rows): + group_name = query_result[i][0] + if "where" in clear_condition: + pre_condition = re.sub('group by [0-9a-z]*', f"{tb_condition}='{group_name}'", clear_condition) + else: + pre_condition = "where " + re.sub('group by [0-9a-z]*',f"{tb_condition}='{group_name}'", clear_condition) + + tdSql.query(f"select {col} {alias} from {table_expr} {pre_condition}") + pre_data = np.array(tdSql.queryResult)[np.array(tdSql.queryResult) != None] + pre_csum = np.cumsum(pre_data) + tdSql.query(self.csum_query_form( + col=col, alias=alias, table_expr=table_expr, condition=condition + )) + for j in range(len(pre_csum)): + print(f"case in {line}:", end='') + tdSql.checkData(pre_row+j, 1, pre_csum[j]) + pre_row += len(pre_csum) + return + elif "union" in condition: + union_sql_0 = self.csum_query_form( + col=col, alias=alias, table_expr=table_expr, condition=condition + ).split("union all")[0] + + union_sql_1 = self.csum_query_form( + col=col, alias=alias, table_expr=table_expr, condition=condition + ).split("union all")[1] + + tdSql.query(union_sql_0) + union_csum_0 = tdSql.queryResult + row_union_0 = tdSql.queryRows + + tdSql.query(union_sql_1) + union_csum_1 = tdSql.queryResult + + tdSql.query(self.csum_query_form( + col=col, alias=alias, table_expr=table_expr, condition=condition + )) + for i in range(tdSql.queryRows): + print(f"case in {line}: ", end='') + if i < row_union_0: + tdSql.checkData(i, 1, union_csum_0[i][1]) + else: + tdSql.checkData(i, 1, union_csum_1[i-row_union_0][1]) + return + + else: + tdSql.query(f"select {col} from {table_expr} {re.sub('limit [0-9]*|offset [0-9]*','',condition)}") + offset_val = condition.split("offset")[1].split(" ")[1] if "offset" in condition else 0 + pre_result = np.array(tdSql.queryResult)[np.array(tdSql.queryResult) != None] + pre_csum = np.cumsum(pre_result)[offset_val:] + tdSql.query(self.csum_query_form( + col=col, alias=alias, table_expr=table_expr, condition=condition + )) + for i in range(tdSql.queryRows): + print(f"case in {line}: ", end='') + tdSql.checkData(i, 1, pre_csum[i]) + + pass + + def csum_current_query(self) : + + # table schema :ts timestamp, c1 int, c2 float, c3 timestamp, c4 binary(16), c5 double, c6 bool + # c7 bigint, c8 smallint, c9 tinyint, c10 nchar(16) + + # case1~6: numeric col:int/bigint/tinyint/smallint/float/double + self.checkcsum() + case2 = {"col": "c2"} + self.checkcsum(**case2) + case3 = {"col": "c5"} + self.checkcsum(**case3) + case4 = {"col": "c7"} + self.checkcsum(**case4) + case5 = {"col": "c8"} + self.checkcsum(**case5) + case6 = {"col": "c9"} + self.checkcsum(**case6) + + # case7~8: nested query + case7 = {"table_expr": "(select c1 from stb1)"} + self.checkcsum(**case7) + case8 = {"table_expr": "(select csum(c1) c1 from stb1 group by tbname)"} + self.checkcsum(**case8) + + # case9~10: mix with tbname/ts/tag/col + case9 = {"alias": ", tbname"} + self.checkcsum(**case9) + case10 = {"alias": ", _c0"} + self.checkcsum(**case10) + case11 = {"alias": ", st1"} + self.checkcsum(**case11) + case12 = {"alias": ", c1"} + # self.checkcsum(**case12) + + # case13~15: with single condition + case13 = {"condition": "where c1 <= 10"} + self.checkcsum(**case13) + case14 = {"condition": "where c6 in (0, 1)"} + self.checkcsum(**case14) + case15 = {"condition": "where c1 between 1 and 10"} + self.checkcsum(**case15) + + # case16: with multi-condition + case16 = {"condition": "where c6=1 or c6 =0"} + self.checkcsum(**case16) + + # case17: only support normal table join + case17 = { + "col": "t1.c1", + "table_expr": "t1, t2", + "condition": "where t1.ts=t2.ts" + } + self.checkcsum(**case17) + # # case18~19: with group by + # case18 = { + # "table_expr": "t1", + # "condition": "group by c6" + # } + # self.checkcsum(**case18) + case19 = { + "table_expr": "stb1", + "condition": "group by tbname" + } + self.checkcsum(**case19) + + # case20~21: with order by + case20 = {"condition": "order by ts"} + self.checkcsum(**case20) + + # case22: with union + case22 = { + "condition": "union all select csum(c1) from t2" + } + self.checkcsum(**case22) + + # case23: with limit/slimit + case23 = { + "condition": "limit 1" + } + self.checkcsum(**case23) + # case24 = { + # "table_expr": "stb1", + # "condition": "group by tbname slimit 1 soffset 1" + # } + # self.checkcsum(**case24) + + pass + + def csum_error_query(self) -> None : + # unusual test + # + # table schema :ts timestamp, c1 int, c2 float, c3 timestamp, c4 binary(16), c5 double, c6 bool + # c7 bigint, c8 smallint, c9 tinyint, c10 nchar(16) + # + # form test + tdSql.error(self.csum_query_form(col="")) # no col + tdSql.error("csum(c1) from stb1") # no select + tdSql.error("select csum from t1") # no csum condition + tdSql.error("select csum c1 from t1") # no brackets + tdSql.error("select csum(c1) t1") # no from + tdSql.error("select csum( c1 ) from ") # no table_expr + tdSql.error(self.csum_query_form(col="st1")) # tag col + tdSql.error(self.csum_query_form(col=1)) # col is a value + tdSql.error(self.csum_query_form(col="'c1'")) # col is a string + tdSql.error(self.csum_query_form(col=None)) # col is NULL 1 + tdSql.error(self.csum_query_form(col="NULL")) # col is NULL 2 + tdSql.error(self.csum_query_form(col='""')) # col is "" + tdSql.error(self.csum_query_form(col='c%')) # col is spercial char 1 + tdSql.error(self.csum_query_form(col='c_')) # col is spercial char 2 + tdSql.error(self.csum_query_form(col='c.')) # col is spercial char 3 + tdSql.error(self.csum_query_form(col='c3')) # timestamp col + tdSql.error(self.csum_query_form(col='ts')) # Primary key + tdSql.error(self.csum_query_form(col='avg(c1)')) # expr col + tdSql.error(self.csum_query_form(col='c6')) # bool col + tdSql.error(self.csum_query_form(col='c4')) # binary col + tdSql.error(self.csum_query_form(col='c10')) # nachr col + tdSql.error(self.csum_query_form(col='c10')) # not table_expr col + tdSql.error(self.csum_query_form(col='t1')) # tbname + tdSql.error(self.csum_query_form(col='stb1')) # stbname + tdSql.error(self.csum_query_form(col='db')) # datbasename + tdSql.error(self.csum_query_form(col=True)) # col is BOOL 1 + tdSql.error(self.csum_query_form(col='True')) # col is BOOL 2 + tdSql.error(self.csum_query_form(col='*')) # col is all col + tdSql.error("select csum[c1] from t1") # sql form error 1 + tdSql.error("select csum{c1} from t1") # sql form error 2 + tdSql.error(self.csum_query_form(col="[c1]")) # sql form error 3 + tdSql.error(self.csum_query_form(col="c1, c2")) # sql form error 3 + tdSql.error(self.csum_query_form(col="c1, 2")) # sql form error 3 + tdSql.error(self.csum_query_form(alias=", count(c1)")) # mix with aggregate function 1 + tdSql.error(self.csum_query_form(alias=", avg(c1)")) # mix with aggregate function 2 + tdSql.error(self.csum_query_form(alias=", min(c1)")) # mix with select function 1 + tdSql.error(self.csum_query_form(alias=", top(c1, 5)")) # mix with select function 2 + tdSql.error(self.csum_query_form(alias=", spread(c1)")) # mix with calculation function 1 + tdSql.error(self.csum_query_form(alias=", diff(c1)")) # mix with calculation function 2 + tdSql.error(self.csum_query_form(alias=" + 2")) # mix with arithmetic 1 + tdSql.error(self.csum_query_form(alias=" + avg(c1)")) # mix with arithmetic 2 + tdSql.error(self.csum_query_form(alias=", c2")) # mix with other 1 + tdSql.error(self.csum_query_form(table_expr="stb1")) # select stb directly + stb_join = { + "col": "stb1.c1", + "table_expr": "stb1, stb2", + "condition": "where stb1.ts=stb2.ts and stb1.st1=stb2.st2 order by stb1.ts" + } + tdSql.error(self.csum_query_form(**stb_join)) # stb join + interval_sql = { + "condition": "where ts>0 and ts < now interval(1h) fill(next)" + } + tdSql.error(self.csum_query_form(**interval_sql)) # interval + group_normal_col = { + "table_expr": "t1", + "condition": "group by c6" + } + tdSql.error(self.csum_query_form(**group_normal_col)) # group by normal col + slimit_soffset_sql = { + "table_expr": "stb1", + "condition": "group by tbname slimit 1 soffset 1" + } + # tdSql.error(self.csum_query_form(**slimit_soffset_sql)) + order_by_tbname_sql = { + "table_expr": "stb1", + "condition": "group by tbname order by tbname" + } + tdSql.error(self.csum_query_form(**order_by_tbname_sql)) + + pass + + def csum_test_data(self, tbnum:int, data_row:int, basetime:int) -> None : + for i in range(tbnum): + for j in range(data_row): + tdSql.execute( + f"insert into t{i} values (" + f"{basetime + (j+1)*10}, {random.randint(-200, -1)}, {random.uniform(200, -1)}, {basetime + random.randint(-200, -1)}, " + f"'binary_{j}', {random.uniform(-200, -1)}, {random.choice([0,1])}, {random.randint(-200,-1)}, " + f"{random.randint(-200, -1)}, {random.randint(-127, -1)}, 'nchar_{j}' )" + ) + + tdSql.execute( + f"insert into t{i} values (" + f"{basetime - (j+1) * 10}, {random.randint(1, 200)}, {random.uniform(1, 200)}, {basetime - random.randint(1, 200)}, " + f"'binary_{j}_1', {random.uniform(1, 200)}, {random.choice([0, 1])}, {random.randint(1,200)}, " + f"{random.randint(1,200)}, {random.randint(1,127)}, 'nchar_{j}_1' )" + ) + tdSql.execute( + f"insert into tt{i} values ( {basetime-(j+1) * 10}, {random.randint(1, 200)} )" + ) + + pass + + def csum_test_table(self,tbnum: int) -> None : + tdSql.execute("drop database if exists db") + tdSql.execute("create database if not exists db keep 3650") + tdSql.execute("use db") + + tdSql.execute( + "create stable db.stb1 (\ + ts timestamp, c1 int, c2 float, c3 timestamp, c4 binary(16), c5 double, c6 bool, \ + c7 bigint, c8 smallint, c9 tinyint, c10 nchar(16)\ + ) \ + tags(st1 int)" + ) + tdSql.execute( + "create stable db.stb2 (ts timestamp, c1 int) tags(st2 int)" + ) + for i in range(tbnum): + tdSql.execute(f"create table t{i} using stb1 tags({i})") + tdSql.execute(f"create table tt{i} using stb2 tags({i})") + + pass + + def csum_test_run(self) : + tdLog.printNoPrefix("==========TD-10594==========") + tbnum = 10 + nowtime = int(round(time.time() * 1000)) + per_table_rows = 100 + self.csum_test_table(tbnum) + + tdLog.printNoPrefix("######## no data test:") + self.csum_current_query() + self.csum_error_query() + + tdLog.printNoPrefix("######## insert only NULL test:") + for i in range(tbnum): + tdSql.execute(f"insert into t{i}(ts) values ({nowtime - 5})") + tdSql.execute(f"insert into t{i}(ts) values ({nowtime + 5})") + self.csum_current_query() + self.csum_error_query() + + tdLog.printNoPrefix("######## insert data in the range near the max(bigint/double):") + self.csum_test_table(tbnum) + tdSql.execute(f"insert into t1(ts, c1,c2,c5,c7) values " + f"({nowtime - (per_table_rows + 1) * 10}, {2**31-1}, {3.4*10**38}, {1.7*10**308}, {2**63-1})") + tdSql.execute(f"insert into t1(ts, c1,c2,c5,c7) values " + f"({nowtime - (per_table_rows + 2) * 10}, {2**31-1}, {3.4*10**38}, {1.7*10**308}, {2**63-1})") + self.csum_current_query() + self.csum_error_query() + + tdLog.printNoPrefix("######## insert data in the range near the min(bigint/double):") + self.csum_test_table(tbnum) + tdSql.execute(f"insert into t1(ts, c1,c2,c5,c7) values " + f"({nowtime - (per_table_rows + 1) * 10}, {1-2**31}, {-3.4*10**38}, {-1.7*10**308}, {1-2**63})") + tdSql.execute(f"insert into t1(ts, c1,c2,c5,c7) values " + f"({nowtime - (per_table_rows + 2) * 10}, {1-2**31}, {-3.4*10**38}, {-1.7*10**308}, {512-2**63})") + self.csum_current_query() + self.csum_error_query() + + tdLog.printNoPrefix("######## insert data without NULL data test:") + self.csum_test_table(tbnum) + self.csum_test_data(tbnum, per_table_rows, nowtime) + self.csum_current_query() + self.csum_error_query() + + + tdLog.printNoPrefix("######## insert data mix with NULL test:") + for i in range(tbnum): + tdSql.execute(f"insert into t{i}(ts) values ({nowtime})") + tdSql.execute(f"insert into t{i}(ts) values ({nowtime-(per_table_rows+3)*10})") + tdSql.execute(f"insert into t{i}(ts) values ({nowtime+(per_table_rows+3)*10})") + self.csum_current_query() + self.csum_error_query() + + + + tdLog.printNoPrefix("######## check after WAL test:") + tdSql.query("show dnodes") + index = tdSql.getData(0, 0) + tdDnodes.stop(index) + tdDnodes.start(index) + self.csum_current_query() + self.csum_error_query() + + def run(self): + import traceback + try: + # run in develop branch + self.csum_test_run() + pass + except Exception as e: + traceback.print_exc() + raise e + + + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/functions/function_elapsed.py b/tests/pytest/functions/function_elapsed.py new file mode 100644 index 0000000000000000000000000000000000000000..6bc54bfc1c7fc173bf9447da1a9b0aa4aba3e525 --- /dev/null +++ b/tests/pytest/functions/function_elapsed.py @@ -0,0 +1,97 @@ +################################################################### +# Copyright (c) 2020 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import taos +from util.log import * +from util.cases import * +from util.sql import * +from functions.function_elapsed_case import * + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor()) + + def genTime(self, no): + h = int(no / 60) + hs = "%d" %h + if h < 10: + hs = "0%d" %h + + m = int(no % 60) + ms = "%d" %m + if m < 10: + ms = "0%d" %m + + return hs, ms + + def general(self): + # normal table + tdSql.execute("create database wxy_db minrows 10 maxrows 200") + tdSql.execute("use wxy_db") + tdSql.execute("create table t1(ts timestamp, i int, b bigint, f float, d double, bin binary(10), s smallint, t tinyint, bl bool, n nchar(10), ts1 timestamp)") + for i in range(1, 1001): + hs, ms = self.genTime(i) + if i < 500: + ret = tdSql.execute("insert into t1(ts, i, b) values (\"2021-11-22 %s:%s:00\", %d, 1)" % (hs, ms, i)) + else: + ret = tdSql.execute("insert into t1(ts, i, b) values (\"2021-11-22 %s:%s:00\", %d, 0)" % (hs, ms, i)) + tdSql.query("select count(*) from t1") + tdSql.checkEqual(int(tdSql.getData(0, 0)), 1000) + + # empty normal table + tdSql.execute("create table t2(ts timestamp, i int, b bigint, f float, d double, bin binary(10), s smallint, t tinyint, bl bool, n nchar(10), ts1 timestamp)") + + tdSql.execute("create database wxy_db_ns precision \"ns\"") + tdSql.execute("use wxy_db_ns") + tdSql.execute("create table t1 (ts timestamp, f float)") + tdSql.execute("insert into t1 values('2021-11-18 00:00:00.000000100', 1)" + "('2021-11-18 00:00:00.000000200', 2)" + "('2021-11-18 00:00:00.000000300', 3)" + "('2021-11-18 00:00:00.000000500', 4)") + + # super table + tdSql.execute("use wxy_db") + tdSql.execute("create stable st1(ts timestamp, i int, b bigint, f float, d double, bin binary(10), s smallint, t tinyint, bl bool, n nchar(10), ts1 timestamp) tags(id int)") + tdSql.execute("create table st1s1 using st1 tags(1)") + tdSql.execute("create table st1s2 using st1 tags(2)") + for i in range(1, 1001): + hs, ms = self.genTime(i) + if 0 == i % 2: + ret = tdSql.execute("insert into st1s1(ts, i) values (\"2021-11-22 %s:%s:00\", %d)" % (hs, ms, i)) + else: + ret = tdSql.execute("insert into st1s2(ts, i) values (\"2021-11-22 %s:%s:00\", %d)" % (hs, ms, i)) + tdSql.query("select count(*) from st1s1") + tdSql.checkEqual(int(tdSql.getData(0, 0)), 500) + tdSql.query("select count(*) from st1s2") + tdSql.checkEqual(int(tdSql.getData(0, 0)), 500) + # empty super table + tdSql.execute("create stable st2(ts timestamp, i int, b bigint, f float, d double, bin binary(10), s smallint, t tinyint, bl bool, n nchar(10), ts1 timestamp) tags(id int)") + tdSql.execute("create table st2s1 using st1 tags(1)") + tdSql.execute("create table st2s2 using st1 tags(2)") + + tdSql.execute("create stable st3(ts timestamp, i int, b bigint, f float, d double, bin binary(10), s smallint, t tinyint, bl bool, n nchar(10), ts1 timestamp) tags(id int)") + + def run(self): + tdSql.prepare() + self.general() + ElapsedCase().run() + + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/functions/function_elapsed_case.py b/tests/pytest/functions/function_elapsed_case.py new file mode 100644 index 0000000000000000000000000000000000000000..56610a9347c3ab90a9addc64dd62a6ed60758abf --- /dev/null +++ b/tests/pytest/functions/function_elapsed_case.py @@ -0,0 +1,374 @@ +################################################################### +# Copyright (c) 2020 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import taos +from util.log import * +from util.cases import * +from util.sql import * + +class ElapsedCase: + def __init__(self, restart = False): + self.restart = restart + + def selectTest(self): + tdSql.execute("use wxy_db") + + tdSql.query("select elapsed(ts) from t1 where ts > '2021-11-22 00:00:00' and ts < '2021-11-23 00:00:00'") + tdSql.checkRows(1) + tdSql.checkCols(1) + + tdSql.query("select elapsed(ts, 1m) from t1 where ts > '2021-11-22 00:00:00' and ts < '2021-11-23 00:00:00'") + tdSql.checkEqual(int(tdSql.getData(0, 0)), 999) + + tdSql.query("select elapsed(ts), elapsed(ts, 1m), elapsed(ts, 10m) from t1 where ts > '2021-11-22 00:00:00' and ts < '2021-11-23 00:00:00'") + tdSql.checkEqual(int(tdSql.getData(0, 1)), 999) + tdSql.checkEqual(int(tdSql.getData(0, 2)), 99) + + tdSql.query("select elapsed(ts), count(*), avg(f), twa(f), irate(f), sum(f), stddev(f), leastsquares(f, 1, 1), " + "min(f), max(f), first(f), last(f), percentile(i, 20), apercentile(i, 30), last_row(i), spread(i) " + "from t1 where ts > '2021-11-22 00:00:00' and ts < '2021-11-23 00:00:00'") + tdSql.checkRows(1) + tdSql.checkCols(16) + tdSql.checkEqual(int(tdSql.getData(0, 1)), 1000) + + tdSql.query("select elapsed(ts) + 10, elapsed(ts) - 20, elapsed(ts) * 0, elapsed(ts) / 10, elapsed(ts) / elapsed(ts, 1m) from t1 " + "where ts > '2021-11-22 00:00:00' and ts < '2021-11-23 00:00:00'") + tdSql.checkRows(1) + tdSql.checkCols(5) + tdSql.checkEqual(int(tdSql.getData(0, 2)), 0) + + tdSql.query("select elapsed(ts) from st1 where ts > '2021-11-22 00:00:00' and ts < '2021-11-23 00:00:00' group by tbname") + tdSql.checkRows(2) + tdSql.checkCols(2) # append tbname + + tdSql.query("select elapsed(ts, 10m) from st1 where ts > '2021-11-22 00:00:00' and ts < '2021-11-23 00:00:00' group by tbname") + tdSql.checkEqual(int(tdSql.getData(0, 0)), 99) + tdSql.checkEqual(int(tdSql.getData(1, 0)), 99) + + tdSql.query("select elapsed(ts), elapsed(ts, 10m), elapsed(ts, 100m) from st1 where ts > '2021-11-22 00:00:00' and ts < '2021-11-23 00:00:00' group by tbname") + tdSql.checkEqual(int(tdSql.getData(0, 1)), 99) + tdSql.checkEqual(int(tdSql.getData(0, 2)), 9) + # stddev(f), + tdSql.query("select elapsed(ts), count(*), avg(f), twa(f), irate(f), sum(f), min(f), max(f), first(f), last(f), apercentile(i, 30), last_row(i), spread(i) " + "from st1 where ts > '2021-11-22 00:00:00' and ts < '2021-11-23 00:00:00' group by tbname") + tdSql.checkRows(2) + tdSql.checkCols(14) # append tbname + tdSql.checkEqual(int(tdSql.getData(0, 1)), 500) + + tdSql.query("select elapsed(ts) + 10, elapsed(ts) - 20, elapsed(ts) * 0, elapsed(ts) / 10, elapsed(ts) / elapsed(ts, 1m) " + "from st1 where ts > '2021-11-22 00:00:00' and ts < '2021-11-23 00:00:00' group by tbname") + tdSql.checkRows(2) + tdSql.checkCols(6) # append tbname + tdSql.checkEqual(int(tdSql.getData(0, 2)), 0) + + tdSql.query("select elapsed(ts), tbname from st1 where ts > '2021-11-22 00:00:00' and ts < '2021-11-23 00:00:00' group by tbname") + tdSql.checkRows(2) + tdSql.checkCols(3) # additional append tbname + + tdSql.execute("use wxy_db_ns") + tdSql.query("select elapsed(ts, 1b), elapsed(ts, 1u) from t1") + tdSql.checkRows(1) + tdSql.checkCols(2) + + self.selectIllegalTest() + + # It has little to do with the elapsed function, so just simple test. + def whereTest(self): + tdSql.execute("use wxy_db") + + tdSql.query("select elapsed(ts) from st1 where ts > '2021-11-22 00:00:00' and ts < '2021-11-23 00:00:00' and id = 1 group by tbname") + tdSql.checkRows(1) + tdSql.checkCols(2) # append tbname + + # It has little to do with the elapsed function, so just simple test. + def sessionTest(self): + tdSql.execute("use wxy_db") + + tdSql.query("select elapsed(ts) from t1 where ts > '2021-11-22 00:00:00' and ts < '2021-11-23 00:00:00' session(ts, 10s)") + tdSql.checkRows(1000) + + tdSql.query("select elapsed(ts) from t1 where ts > '2021-11-22 00:00:00' and ts < '2021-11-23 00:00:00' session(ts, 70s)") + tdSql.checkRows(1) + + # It has little to do with the elapsed function, so just simple test. + def stateWindowTest(self): + tdSql.execute("use wxy_db") + + tdSql.query("select elapsed(ts) from t1 where ts > '2021-11-22 00:00:00' and ts < '2021-11-23 00:00:00' state_window(i)") + tdSql.checkRows(1000) + + tdSql.query("select elapsed(ts) from t1 where ts > '2021-11-22 00:00:00' and ts < '2021-11-23 00:00:00' state_window(b)") + tdSql.checkRows(2) + + def intervalTest(self): + tdSql.execute("use wxy_db") + + tdSql.query("select elapsed(ts) from t1 where ts > '2021-11-22 00:00:00' and ts < '2021-11-23 00:00:00' interval(1m)") + tdSql.checkRows(1000) + + # The first window has 9 records, and the last window has 1 record. + tdSql.query("select elapsed(ts) from t1 where ts > '2021-11-22 00:00:00' and ts < '2021-11-23 00:00:00' interval(10m)") + tdSql.checkRows(101) + tdSql.checkEqual(int(tdSql.getData(0, 1)), 9 * 60 * 1000) + tdSql.checkEqual(int(tdSql.getData(100, 1)), 0) + + # Skip windows without data. + tdSql.query("select elapsed(ts) from t1 where ts > '2021-11-22 00:00:00' and ts < '2021-11-23 00:00:00' interval(35s)") + tdSql.checkRows(1000) + + tdSql.query("select elapsed(ts), count(*), avg(f), twa(f), irate(f), sum(f), stddev(f), leastsquares(f, 1, 1), " + "min(f), max(f), first(f), last(f), percentile(i, 20), apercentile(i, 30), last_row(i), spread(i) " + "from t1 where ts > '2021-11-22 00:00:00' and ts < '2021-11-23 00:00:00' interval(20m)") + tdSql.checkRows(51) # ceil(1000/50) + 1(last point), window is half-open interval. + tdSql.checkCols(17) # front push timestamp + + tdSql.query("select elapsed(ts) from st1 where ts > '2021-11-22 00:00:00' and ts < '2021-11-23 00:00:00' interval(40s) group by tbname") + tdSql.checkRows(1000) + + tdSql.query("select elapsed(ts) + 10, elapsed(ts) - 20, elapsed(ts) * 0, elapsed(ts) / 10, elapsed(ts) / elapsed(ts, 1m) " + "from st1 where ts > '2021-11-22 00:00:00' and ts < '2021-11-23 00:00:00' interval(30m) group by tbname") + tdSql.checkRows(68) # ceil(1000/30) + tdSql.checkCols(7) # front push timestamp and append tbname + + # It has little to do with the elapsed function, so just simple test. + def fillTest(self): + tdSql.execute("use wxy_db") + + tdSql.query("select elapsed(ts) from t1 where ts > '2021-11-22 00:00:00' and ts < '2021-11-23 00:00:00' interval(30s) fill(value, 1000)") + tdSql.checkRows(2880) # The range of window conditions is 24 hours. + tdSql.checkEqual(int(tdSql.getData(0, 1)), 1000) + + tdSql.query("select elapsed(ts) from t1 where ts > '2021-11-22 00:00:00' and ts < '2021-11-23 00:00:00' interval(30s) fill(prev)") + tdSql.checkRows(2880) # The range of window conditions is 24 hours. + tdSql.checkData(0, 1, None) + + tdSql.query("select elapsed(ts) from t1 where ts > '2021-11-22 00:00:00' and ts < '2021-11-23 00:00:00' interval(30s) fill(null)") + tdSql.checkRows(2880) # The range of window conditions is 24 hours. + tdSql.checkData(0, 1, None) + + tdSql.query("select elapsed(ts) from t1 where ts > '2021-11-22 00:00:00' and ts < '2021-11-23 00:00:00' interval(30s) fill(linear)") + tdSql.checkRows(2880) # The range of window conditions is 24 hours. + + tdSql.query("select elapsed(ts) from t1 where ts > '2021-11-22 00:00:00' and ts < '2021-11-23 00:00:00' interval(30s) fill(next)") + tdSql.checkRows(2880) # The range of window conditions is 24 hours. + + # Elapsed only support group by tbname. Supported tests have been done in selectTest(). + def groupbyTest(self): + tdSql.execute("use wxy_db") + + tdSql.error("select elapsed(ts) from t1 where ts > '2021-11-22 00:00:00' and ts < '2021-11-23 00:00:00' group by i") + tdSql.error("select elapsed(ts) from st1 where ts > '2021-11-22 00:00:00' and ts < '2021-11-23 00:00:00' group by i") + + def orderbyCheck(self, sql, elapsedCol): + resultAsc = tdSql.getResult(sql) + resultdesc = tdSql.getResult(sql + " order by ts desc") + resultRows = len(resultAsc) + for i in range(resultRows): + tdSql.checkEqual(resultAsc[i][elapsedCol], resultdesc[resultRows - i - 1][elapsedCol]) + + def splitStableResult(self, sql, elapsedCol, tbnameCol): + subtable = {} + result = tdSql.getResult(sql) + for i in range(len(result)): + if None == subtable.get(result[i][tbnameCol]): + subtable[result[i][tbnameCol]] = [result[i][elapsedCol]] + else: + subtable[result[i][tbnameCol]].append(result[i][elapsedCol]) + return subtable + + def doOrderbyCheck(self, resultAsc, resultdesc): + resultRows = len(resultAsc) + for i in range(resultRows): + tdSql.checkEqual(resultAsc[i], resultdesc[resultRows - i - 1]) + + def orderbyForStableCheck(self, sql, elapsedCol, tbnameCol): + subtableAsc = self.splitStableResult(sql, elapsedCol, tbnameCol) + subtableDesc = self.splitStableResult(sql + " order by ts desc", elapsedCol, tbnameCol) + for kv in subtableAsc.items(): + descValue = subtableDesc.get(kv[0]) + if None == descValue: + tdLog.exit("%s failed: subtable %s not exists" % (sql)) + else: + self.doOrderbyCheck(kv[1], descValue) + + # Orderby clause only changes the output order and has no effect on the calculation results. + def orderbyTest(self): + tdSql.execute("use wxy_db") + + self.orderbyCheck("select elapsed(ts) from t1 where ts > '2021-11-22 00:00:00' and ts < '2021-11-23 00:00:00'", 0) + self.orderbyCheck("select elapsed(ts) from t1 where ts > '2021-11-22 00:00:00' and ts < '2021-11-23 00:00:00' interval(40s)", 1) + self.orderbyCheck("select elapsed(ts) from t1 where ts > '2021-11-22 00:00:00' and ts < '2021-11-23 00:00:00' interval(1m)", 1) + self.orderbyCheck("select elapsed(ts) from t1 where ts > '2021-11-22 00:00:00' and ts < '2021-11-23 00:00:00' interval(10m)", 1) + self.orderbyCheck("select elapsed(ts) from t1 where ts > '2021-11-22 00:00:00' and ts < '2021-11-23 00:00:00' interval(150m)", 1) + self.orderbyCheck("select elapsed(ts) from t1 where ts > '2021-11-22 00:00:00' and ts < '2021-11-23 00:00:00' interval(222m)", 1) + self.orderbyCheck("select elapsed(ts) from t1 where ts > '2021-11-22 00:00:00' and ts < '2021-11-23 00:00:00' interval(1000m)", 1) + + self.orderbyForStableCheck("select elapsed(ts) from st1 where ts > '2021-11-22 00:00:00' and ts < '2021-11-23 00:00:00' group by tbname", 0, 1) + self.orderbyForStableCheck("select elapsed(ts) from st1 where ts > '2021-11-22 00:00:00' and ts < '2021-11-23 00:00:00' interval(40s) group by tbname", 1, 2) + self.orderbyForStableCheck("select elapsed(ts) from st1 where ts > '2021-11-22 00:00:00' and ts < '2021-11-23 00:00:00' interval(1m) group by tbname", 1, 2) + self.orderbyForStableCheck("select elapsed(ts) from st1 where ts > '2021-11-22 00:00:00' and ts < '2021-11-23 00:00:00' interval(10m) group by tbname", 1, 2) + self.orderbyForStableCheck("select elapsed(ts) from st1 where ts > '2021-11-22 00:00:00' and ts < '2021-11-23 00:00:00' interval(150m) group by tbname", 1, 2) + self.orderbyForStableCheck("select elapsed(ts) from st1 where ts > '2021-11-22 00:00:00' and ts < '2021-11-23 00:00:00' interval(222m) group by tbname", 1, 2) + self.orderbyForStableCheck("select elapsed(ts) from st1 where ts > '2021-11-22 00:00:00' and ts < '2021-11-23 00:00:00' interval(1000m) group by tbname", 1, 2) + + def slimitCheck(self, sql): + tdSql.checkEqual(tdSql.query(sql + " slimit 0"), 0) + tdSql.checkEqual(tdSql.query(sql + " slimit 1 soffset 0"), tdSql.query(sql + " slimit 0, 1")) + tdSql.checkEqual(tdSql.query(sql + " slimit 1, 1"), tdSql.query(sql) / 2) + tdSql.checkEqual(tdSql.query(sql + " slimit 10"), tdSql.query(sql)) + + # It has little to do with the elapsed function, so just simple test. + def slimitTest(self): + tdSql.execute("use wxy_db") + + self.slimitCheck("select elapsed(ts) from st1 where ts > '2021-11-22 00:00:00' and ts < '2021-11-23 00:00:00' group by tbname") + self.slimitCheck("select elapsed(ts) from st1 where ts > '2021-11-22 00:00:00' and ts < '2021-11-23 00:00:00' interval(40s) group by tbname") + + def limitCheck(self, sql, groupby = 0): + rows = tdSql.query(sql) + if rows > 0: + tdSql.checkEqual(tdSql.query(sql + " limit 0"), 0) + if 1 == groupby: + tdSql.checkEqual(tdSql.query(sql + " limit 1"), 2) + tdSql.checkEqual(tdSql.query(sql + " limit %d offset %d" % (rows / 2, rows / 3)), tdSql.query(sql + " limit %d, %d" % (rows / 3, rows / 2))) + tdSql.checkEqual(tdSql.query(sql + " limit %d" % (rows / 2)), rows) + else: + tdSql.checkEqual(tdSql.query(sql + " limit 1"), 1) + tdSql.checkEqual(tdSql.query(sql + " limit %d offset %d" % (rows / 2, rows / 3)), tdSql.query(sql + " limit %d, %d" % (rows / 3, rows / 2))) + tdSql.checkEqual(tdSql.query(sql + " limit %d" % (rows + 1)), rows) + + # It has little to do with the elapsed function, so just simple test. + def limitTest(self): + tdSql.execute("use wxy_db") + + self.limitCheck("select elapsed(ts) from t1 where ts > '2021-11-22 00:00:00' and ts < '2021-11-23 00:00:00'") + self.limitCheck("select elapsed(ts) from t1 where ts > '2021-11-22 00:00:00' and ts < '2021-11-23 00:00:00' interval(40s)") + + self.limitCheck("select elapsed(ts) from st1 where ts > '2021-11-22 00:00:00' and ts < '2021-11-23 00:00:00' group by tbname", 1) + self.limitCheck("select elapsed(ts) from st1 where ts > '2021-11-22 00:00:00' and ts < '2021-11-23 00:00:00' interval(40s) group by tbname", 1) + + def fromCheck(self, sqlTemplate, table): + tdSql.checkEqual(tdSql.getResult(sqlTemplate % table), tdSql.getResult(sqlTemplate % ("(select * from %s)" % table))) + tdSql.query(sqlTemplate % ("(select last(ts) from %s interval(10s))" % table)) + tdSql.query(sqlTemplate % ("(select elapsed(ts) from %s interval(10s))" % table)) + + # It has little to do with the elapsed function, so just simple test. + def fromTest(self): + tdSql.execute("use wxy_db") + + self.fromCheck("select elapsed(ts) from %s where ts > '2021-11-22 00:00:00' and ts < '2021-11-23 00:00:00'", "t1") + self.fromCheck("select elapsed(ts) from %s where ts > '2021-11-22 00:00:00' and ts < '2021-11-23 00:00:00' interval(40s)", "t1") + tdSql.query("select * from (select elapsed(ts) from t1 interval(10s)) where ts > '2021-11-22 00:00:00' and ts < '2021-11-23 00:00:00'") + tdSql.query("select * from (select elapsed(ts) from t1)") + # empty table test + tdSql.checkEqual(tdSql.query("select elapsed(ts) from t2"), 0) + tdSql.checkEqual(tdSql.query("select elapsed(ts) from st2 group by tbname"), 0) + tdSql.checkEqual(tdSql.query("select elapsed(ts) from st3 group by tbname"), 0) + # Tags not allowed for table query, so there is no need to test super table. + tdSql.error("select elapsed(ts) from (select * from st1)") + + def joinCheck(self, sqlTemplate, rtable): + tdSql.checkEqual(tdSql.getResult(sqlTemplate % (rtable, "")), tdSql.getResult(sqlTemplate % ("t1, %s t2" % rtable, "t1.ts = t2.ts and "))) + + # It has little to do with the elapsed function, so just simple test. + def joinTest(self): + tdSql.execute("use wxy_db") + + # st1s1 is a subset of t1. + self.joinCheck("select elapsed(ts) from %s where %s ts > '2021-11-22 00:00:00' and ts < '2021-11-23 00:00:00'", "st1s1") + self.joinCheck("select elapsed(ts) from %s where %s ts > '2021-11-22 00:00:00' and ts < '2021-11-23 00:00:00' interval(150m)", "st1s1") + # join query does not support group by, so there is no need to test super table. + + def unionAllCheck(self, sql1, sql2): + rows1 = tdSql.query(sql1) + rows2 = tdSql.query(sql2) + tdSql.checkEqual(tdSql.query(sql1 + " union all " + sql2), rows1 + rows2) + + # It has little to do with the elapsed function, so just simple test. + def unionAllTest(self): + tdSql.execute("use wxy_db") + + self.unionAllCheck("select elapsed(ts) from t1 where ts > '2021-11-22 00:00:00' and ts < '2021-11-23 00:00:00'", + "select elapsed(ts) from t1 where ts > '2021-11-22 00:00:00' and ts < '2021-11-22 01:00:00'") + self.unionAllCheck("select elapsed(ts) from t1 where ts > '2021-11-22 00:00:00' and ts < '2021-11-23 00:00:00' interval(40s)", + "select elapsed(ts) from t1 where ts > '2021-11-22 00:00:00' and ts < '2021-11-23 00:00:00' interval(150m)") + self.unionAllCheck("select elapsed(ts) from st1 where ts > '2021-11-22 00:00:00' and ts < '2021-11-23 00:00:00' group by tbname", + "select elapsed(ts) from st1 where ts > '2021-11-22 00:00:00' and ts < '2021-11-22 02:00:00' group by tbname") + self.unionAllCheck("select elapsed(ts) from st1 where ts > '2021-11-22 00:00:00' and ts < '2021-11-23 00:00:00' interval(1m) group by tbname", + "select elapsed(ts) from st1 where ts > '2021-11-22 00:00:00' and ts < '2021-11-23 00:00:00' interval(222m) group by tbname") + + # It has little to do with the elapsed function, so just simple test. + def continuousQueryTest(self): + tdSql.execute("use wxy_db") + + if (self.restart): + tdSql.execute("drop table elapsed_t") + tdSql.execute("drop table elapsed_st") + tdSql.execute("create table elapsed_t as select elapsed(ts) from t1 interval(1m) sliding(30s)") + tdSql.execute("create table elapsed_st as select elapsed(ts) from st1 interval(1m) sliding(30s) group by tbname") + + def selectIllegalTest(self): + tdSql.execute("use wxy_db") + tdSql.error("select elapsed(1) from t1 where ts > '2021-11-22 00:00:00' and ts < '2021-11-23 00:00:00'") + tdSql.error("select elapsed('2021-11-18 00:00:10') from t1 where ts > '2021-11-22 00:00:00' and ts < '2021-11-23 00:00:00'") + tdSql.error("select elapsed(now) from t1 where ts > '2021-11-22 00:00:00' and ts < '2021-11-23 00:00:00'") + tdSql.error("select elapsed(i) from t1 where ts > '2021-11-22 00:00:00' and ts < '2021-11-23 00:00:00'") + tdSql.error("select elapsed(b) from t1 where ts > '2021-11-22 00:00:00' and ts < '2021-11-23 00:00:00'") + tdSql.error("select elapsed(f) from t1 where ts > '2021-11-22 00:00:00' and ts < '2021-11-23 00:00:00'") + tdSql.error("select elapsed(d) from t1 where ts > '2021-11-22 00:00:00' and ts < '2021-11-23 00:00:00'") + tdSql.error("select elapsed(bin) from t1 where ts > '2021-11-22 00:00:00' and ts < '2021-11-23 00:00:00'") + tdSql.error("select elapsed(s) from t1 where ts > '2021-11-22 00:00:00' and ts < '2021-11-23 00:00:00'") + tdSql.error("select elapsed(t) from t1 where ts > '2021-11-22 00:00:00' and ts < '2021-11-23 00:00:00'") + tdSql.error("select elapsed(bl) from t1 where ts > '2021-11-22 00:00:00' and ts < '2021-11-23 00:00:00'") + tdSql.error("select elapsed(n) from t1 where ts > '2021-11-22 00:00:00' and ts < '2021-11-23 00:00:00'") + tdSql.error("select elapsed(ts1) from t1 where ts > '2021-11-22 00:00:00' and ts < '2021-11-23 00:00:00'") + tdSql.error("select elapsed(*) from t1 where ts > '2021-11-22 00:00:00' and ts < '2021-11-23 00:00:00'") + tdSql.error("select elapsed(ts, '1s') from t1 where ts > '2021-11-22 00:00:00' and ts < '2021-11-23 00:00:00'") + tdSql.error("select elapsed(ts, i) from t1 where ts > '2021-11-22 00:00:00' and ts < '2021-11-23 00:00:00'") + #tdSql.error("select elapsed(ts, now) from t1 where ts > '2021-11-22 00:00:00' and ts < '2021-11-23 00:00:00'") + tdSql.error("select elapsed(ts, ts) from t1 where ts > '2021-11-22 00:00:00' and ts < '2021-11-23 00:00:00'") + tdSql.error("select elapsed(ts + 1) from t1 where ts > '2021-11-22 00:00:00' and ts < '2021-11-23 00:00:00'") + tdSql.error("select elapsed(ts, 1b) from t1 where ts > '2021-11-22 00:00:00' and ts < '2021-11-23 00:00:00'") + tdSql.error("select elapsed(ts, 1u) from t1 where ts > '2021-11-22 00:00:00' and ts < '2021-11-23 00:00:00'") + tdSql.error("select elapsed(max(ts)) from t1 where ts > '2021-11-22 00:00:00' and ts < '2021-11-23 00:00:00'") + tdSql.error("select distinct elapsed(ts) from t1 where ts > '2021-11-22 00:00:00' and ts < '2021-11-23 00:00:00'") + tdSql.error("select elapsed(ts) from st1 where ts > '2021-11-22 00:00:00' and ts < '2021-11-23 00:00:00'") + tdSql.error("select distinct elapsed(ts) from st1 where ts > '2021-11-22 00:00:00' and ts < '2021-11-23 00:00:00' group by tbname") + tdSql.error("select elapsed(ts), i from t1 where ts > '2021-11-22 00:00:00' and ts < '2021-11-23 00:00:00'") + tdSql.error("select elapsed(ts), ts from t1 where ts > '2021-11-22 00:00:00' and ts < '2021-11-23 00:00:00'") + tdSql.error("select elapsed(ts), _c0 from t1 where ts > '2021-11-22 00:00:00' and ts < '2021-11-23 00:00:00'") + tdSql.error("select elapsed(ts), top(i, 1) from t1 where ts > '2021-11-22 00:00:00' and ts < '2021-11-23 00:00:00'") + tdSql.error("select elapsed(ts), bottom(i, 1) from t1 where ts > '2021-11-22 00:00:00' and ts < '2021-11-23 00:00:00'") + tdSql.error("select elapsed(ts), inerp(i) from t1 where ts > '2021-11-22 00:00:00' and ts < '2021-11-23 00:00:00'") + tdSql.error("select elapsed(ts), diff(i) from t1 where ts > '2021-11-22 00:00:00' and ts < '2021-11-23 00:00:00'") + tdSql.error("select elapsed(ts), derivative(i, 1s, 0) from t1 where ts > '2021-11-22 00:00:00' and ts < '2021-11-23 00:00:00'") + tdSql.error("select elapsed(ts), ceil(i) from t1 where ts > '2021-11-22 00:00:00' and ts < '2021-11-23 00:00:00'") + tdSql.error("select elapsed(ts), floor(i) from t1 where ts > '2021-11-22 00:00:00' and ts < '2021-11-23 00:00:00'") + tdSql.error("select elapsed(ts), round(i) from t1 where ts > '2021-11-22 00:00:00' and ts < '2021-11-23 00:00:00'") + + def run(self): + self.selectTest() + self.whereTest() + self.sessionTest() + self.stateWindowTest() + self.intervalTest() + self.fillTest() + self.groupbyTest() + self.orderbyTest() + self.slimitTest() + self.limitTest() + self.fromTest() + self.joinTest() + self.unionAllTest() + self.continuousQueryTest() diff --git a/tests/pytest/functions/function_elapsed_restart.py b/tests/pytest/functions/function_elapsed_restart.py new file mode 100644 index 0000000000000000000000000000000000000000..8b492267abdd8ea2d2b2fc27ee2e957e1038f48d --- /dev/null +++ b/tests/pytest/functions/function_elapsed_restart.py @@ -0,0 +1,35 @@ +################################################################### +# Copyright (c) 2020 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import taos +from util.log import * +from util.cases import * +from util.sql import * +from functions.function_elapsed_case import * + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor()) + + def run(self): + tdSql.prepare() + ElapsedCase(True).run() + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/functions/function_mavg.py b/tests/pytest/functions/function_mavg.py new file mode 100644 index 0000000000000000000000000000000000000000..0760b203b809ae1a1a05b061ed6f2fbc8659b740 --- /dev/null +++ b/tests/pytest/functions/function_mavg.py @@ -0,0 +1,676 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import subprocess +import random +import math +import numpy as np +import inspect +import re +import taos + +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor()) + + def mavg_query_form(self, sel="select", func="mavg(", col="c1", m_comm =",", k=1,r_comm=")", alias="", fr="from",table_expr="t1", condition=""): + ''' + mavg function: + + :param sel: string, must be "select", required parameters; + :param func: string, in this case must be "mavg(", otherwise return other function, required parameters; + :param col: string, column name, required parameters; + :param m_comm: string, comma between col and k , required parameters; + :param k: int/float,the width of the sliding window, [1,100], required parameters; + :param r_comm: string, must be ")", use with "(" in func, required parameters; + :param alias: string, result column another name,or add other funtion; + :param fr: string, must be "from", required parameters; + :param table_expr: string or expression, data source(eg,table/stable name, result set), required parameters; + :param condition: expression; + :return: mavg query statement,default: select mavg(c1, 1) from t1 + ''' + + return f"{sel} {func} {col} {m_comm} {k} {r_comm} {alias} {fr} {table_expr} {condition}" + + def checkmavg(self,sel="select", func="mavg(", col="c1", m_comm =",", k=1,r_comm=")", alias="", fr="from",table_expr="t1", condition=""): + # print(self.mavg_query_form(sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + # table_expr=table_expr, condition=condition)) + line = sys._getframe().f_back.f_lineno + + if not all([sel , func , col , m_comm , k , r_comm , fr , table_expr]): + print(f"case in {line}: ", end='') + return tdSql.error(self.mavg_query_form( + sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + table_expr=table_expr, condition=condition + )) + + sql = "select * from t1" + collist = tdSql.getColNameList(sql) + + if not isinstance(col, str): + print(f"case in {line}: ", end='') + return tdSql.error(self.mavg_query_form( + sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + table_expr=table_expr, condition=condition + )) + + if len([x for x in col.split(",") if x.strip()]) != 1: + print(f"case in {line}: ", end='') + return tdSql.error(self.mavg_query_form( + sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + table_expr=table_expr, condition=condition + )) + + col = col.replace(",", "").replace(" ", "") + + if any([re.compile('^[a-zA-Z]{1}.*$').match(col) is None , not col.replace(".","").isalnum()]): + print(f"case in {line}: ", end='') + return tdSql.error(self.mavg_query_form( + sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + table_expr=table_expr, condition=condition + )) + + # if all(["," in col , len(col.split(",")) != 2]): + # print(f"case in {line}: ", end='') + # return tdSql.error(self.mavg_query_form( + # sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + # table_expr=table_expr, condition=condition + # )) + # + # if ("," in col): + # if (not col.split(",")[0].strip()) ^ (not col.split(",")[1].strip()): + # col = col.strip().split(",")[0] if not col.split(",")[1].strip() else col.strip().split(",")[1] + # else: + # print(f"case in {line}: ", end='') + # return tdSql.error(self.mavg_query_form( + # sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + # table_expr=table_expr, condition=condition + # )) + # pass + + if '.' in col: + if any([col.split(".")[0] not in table_expr, col.split(".")[1] not in collist]): + print(f"case in {line}: ", end='') + return tdSql.error(self.mavg_query_form( + sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + table_expr=table_expr, condition=condition + )) + pass + + if "." not in col: + if col not in collist: + print(f"case in {line}: ", end='') + return tdSql.error(self.mavg_query_form( + sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + table_expr=table_expr, condition=condition + )) + pass + + colname = col if "." not in col else col.split(".")[1] + col_index = collist.index(colname) + if any([tdSql.cursor.istype(col_index, "TIMESTAMP"), tdSql.cursor.istype(col_index, "BOOL")]): + print(f"case in {line}: ", end='') + return tdSql.error(self.mavg_query_form( + sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + table_expr=table_expr, condition=condition + )) + + if any([tdSql.cursor.istype(col_index, "BINARY") , tdSql.cursor.istype(col_index,"NCHAR")]): + print(f"case in {line}: ", end='') + return tdSql.error(self.mavg_query_form( + sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + table_expr=table_expr, condition=condition + )) + + if any( [func != "mavg(" , r_comm != ")" , fr != "from", sel != "select"]): + print(f"case in {line}: ", end='') + return tdSql.error(self.mavg_query_form( + sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + table_expr=table_expr, condition=condition + )) + + if all(["(" not in table_expr, "stb" in table_expr, "group" not in condition.lower()]): + print(f"case in {line}: ", end='') + return tdSql.error(self.mavg_query_form( + sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + table_expr=table_expr, condition=condition + )) + + if "order by tbname" in condition.lower(): + print(f"case in {line}: ", end='') + return tdSql.error(self.mavg_query_form( + sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + table_expr=table_expr, condition=condition + )) + + if all(["group" in condition.lower(), "tbname" not in condition.lower()]): + print(f"case in {line}: ", end='') + return tdSql.error(self.mavg_query_form( + sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + table_expr=table_expr, condition=condition + )) + + alias_list = ["tbname", "_c0", "st", "ts"] + if all([alias, "," not in alias, not alias.isalnum()]): + # actually, column alias also support "_", but in this case,forbidden that。 + print(f"case in {line}: ", end='') + return tdSql.error(self.mavg_query_form( + sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + table_expr=table_expr, condition=condition + )) + + if all([alias, "," in alias]): + if all(parm != alias.lower().split(",")[1].strip() for parm in alias_list): + print(f"case in {line}: ", end='') + return tdSql.error(self.mavg_query_form( + sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + table_expr=table_expr, condition=condition + )) + pass + + condition_exception = [ "-", "+", "/", "*", "~", "^", "insert", "distinct", + "count", "avg", "twa", "irate", "sum", "stddev", "leastquares", + "min", "max", "first", "last", "top", "bottom", "percentile", + "apercentile", "last_row", "interp", "diff", "derivative", + "spread", "ceil", "floor", "round", "interval", "fill", "slimit", "soffset"] + if "union" not in condition.lower(): + if any(parm in condition.lower().strip() for parm in condition_exception): + + print(f"case in {line}: ", end='') + return tdSql.error(self.mavg_query_form( + sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + table_expr=table_expr, condition=condition + )) + pass + + if not any([isinstance(k, int) , isinstance(k, float)]) : + print(f"case in {line}: ", end='') + return tdSql.error(self.mavg_query_form( + col=col, k=k, alias=alias, table_expr=table_expr, condition=condition + )) + + if not(1 <= k < 1001): + print(f"case in {line}: ", end='') + return tdSql.error(self.mavg_query_form( + col=col, k=k, alias=alias, table_expr=table_expr, condition=condition + )) + + k = int(k // 1) + pre_sql = re.sub("mavg\([a-z0-9 .,]*\)", f"count({col})", self.mavg_query_form( + col=col, table_expr=table_expr, condition=condition + )) + tdSql.query(pre_sql) + + if tdSql.queryRows == 0: + tdSql.query(self.mavg_query_form( + sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + table_expr=table_expr, condition=condition + )) + print(f"case in {line}: ", end='') + tdSql.checkRows(0) + return + + if "group" in condition: + tb_condition = condition.split("group by")[1].split(" ")[1] + tdSql.query(f"select distinct {tb_condition} from {table_expr}") + query_result = tdSql.queryResult + query_rows = tdSql.queryRows + clear_condition = re.sub('order by [0-9a-z]*|slimit [0-9]*|soffset [0-9]*', "", condition) + + pre_row = 0 + for i in range(query_rows): + group_name = query_result[i][0] + if "where" in clear_condition: + pre_condition = re.sub('group by [0-9a-z]*', f"{tb_condition}='{group_name}'", clear_condition) + else: + pre_condition = "where " + re.sub('group by [0-9a-z]*',f"{tb_condition}='{group_name}'", clear_condition) + + tdSql.query(f"select {col} {alias} from {table_expr} {pre_condition}") + pre_data = np.array(tdSql.queryResult)[np.array(tdSql.queryResult) != None] + pre_mavg = np.convolve(pre_data, np.ones(k), "valid")/k + tdSql.query(self.mavg_query_form( + sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + table_expr=table_expr, condition=condition + )) + for j in range(len(pre_mavg)): + print(f"case in {line}:", end='') + tdSql.checkData(pre_row+j, 1, pre_mavg[j]) + pre_row += len(pre_mavg) + return + elif "union" in condition: + union_sql_0 = self.mavg_query_form( + sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + table_expr=table_expr, condition=condition + ).split("union all")[0] + + union_sql_1 = self.mavg_query_form( + sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + table_expr=table_expr, condition=condition + ).split("union all")[1] + + tdSql.query(union_sql_0) + union_mavg_0 = tdSql.queryResult + row_union_0 = tdSql.queryRows + + tdSql.query(union_sql_1) + union_mavg_1 = tdSql.queryResult + + tdSql.query(self.mavg_query_form( + sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + table_expr=table_expr, condition=condition + )) + for i in range(tdSql.queryRows): + print(f"case in {line}: ", end='') + if i < row_union_0: + tdSql.checkData(i, 1, union_mavg_0[i][1]) + else: + tdSql.checkData(i, 1, union_mavg_1[i-row_union_0][1]) + return + + else: + tdSql.query(f"select {col} from {table_expr} {re.sub('limit [0-9]*|offset [0-9]*','',condition)}") + offset_val = condition.split("offset")[1].split(" ")[1] if "offset" in condition else 0 + pre_result = np.array(tdSql.queryResult)[np.array(tdSql.queryResult) != None] + pre_mavg = pre_mavg = np.convolve(pre_result, np.ones(k), "valid")[offset_val:]/k + tdSql.query(self.mavg_query_form( + sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, + table_expr=table_expr, condition=condition + )) + for i in range(tdSql.queryRows): + print(f"case in {line}: ", end='') + tdSql.checkData(i, 1, pre_mavg[i]) + + pass + + def mavg_current_query(self) : + + # table schema :ts timestamp, c1 int, c2 float, c3 timestamp, c4 binary(16), c5 double, c6 bool + # c7 bigint, c8 smallint, c9 tinyint, c10 nchar(16) + + # case1~6: numeric col:int/bigint/tinyint/smallint/float/double + self.checkmavg() + case2 = {"col": "c2"} + self.checkmavg(**case2) + case3 = {"col": "c5"} + self.checkmavg(**case3) + case4 = {"col": "c7"} + self.checkmavg(**case4) + case5 = {"col": "c8"} + self.checkmavg(**case5) + case6 = {"col": "c9"} + self.checkmavg(**case6) + + # case7~8: nested query + case7 = {"table_expr": "(select c1 from stb1)"} + self.checkmavg(**case7) + case8 = {"table_expr": "(select mavg(c1, 1) c1 from stb1 group by tbname)"} + self.checkmavg(**case8) + + # case9~10: mix with tbname/ts/tag/col + # case9 = {"alias": ", tbname"} + # self.checkmavg(**case9) + case10 = {"alias": ", _c0"} + self.checkmavg(**case10) + # case11 = {"alias": ", st1"} + # self.checkmavg(**case11) + # case12 = {"alias": ", c1"} + # self.checkmavg(**case12) + + # case13~15: with single condition + case13 = {"condition": "where c1 <= 10"} + self.checkmavg(**case13) + case14 = {"condition": "where c6 in (0, 1)"} + self.checkmavg(**case14) + case15 = {"condition": "where c1 between 1 and 10"} + self.checkmavg(**case15) + + # case16: with multi-condition + case16 = {"condition": "where c6=1 or c6 =0"} + self.checkmavg(**case16) + + # case17: only support normal table join + case17 = { + "col": "t1.c1", + "table_expr": "t1, t2", + "condition": "where t1.ts=t2.ts" + } + self.checkmavg(**case17) + # # case18~19: with group by + case19 = { + "table_expr": "stb1", + "condition": "group by tbname" + } + self.checkmavg(**case19) + + # case20~21: with order by + case20 = {"condition": "order by ts"} + self.checkmavg(**case20) + #case21 = { + # "table_expr": "stb1", + # "condition": "group by tbname order by tbname" + #} + #self.checkmavg(**case21) + + # case22: with union + case22 = { + "condition": "union all select mavg( c1 , 1 ) from t2" + } + self.checkmavg(**case22) + + # case23: with limit/slimit + case23 = { + "condition": "limit 1" + } + self.checkmavg(**case23) + + # case24: value k range[1, 100], can be int or float, k = floor(k) + case24 = {"k": 3} + self.checkmavg(**case24) + case25 = {"k": 2.999} + self.checkmavg(**case25) + case26 = {"k": 1000} + self.checkmavg(**case26) + + pass + + def mavg_error_query(self) -> None : + # unusual test + + # form test + err1 = {"col": ""} + self.checkmavg(**err1) # no col + err2 = {"sel": ""} + self.checkmavg(**err2) # no select + err3 = {"func": "mavg", "col": "", "m_comm": "", "k": "", "r_comm": ""} + self.checkmavg(**err3) # no mavg condition: select mavg from + err4 = {"col": "", "m_comm": "", "k": ""} + self.checkmavg(**err4) # no mavg condition: select mavg() from + err5 = {"func": "mavg", "r_comm": ""} + self.checkmavg(**err5) # no brackets: select mavg col, k from + err6 = {"fr": ""} + self.checkmavg(**err6) # no from + err7 = {"k": ""} + self.checkmavg(**err7) # no k + err8 = {"table_expr": ""} + self.checkmavg(**err8) # no table_expr + + err9 = {"col": "st1"} + self.checkmavg(**err9) # col: tag + err10 = {"col": 1} + self.checkmavg(**err10) # col: value + err11 = {"col": "NULL"} + self.checkmavg(**err11) # col: NULL + err12 = {"col": "%_"} + self.checkmavg(**err12) # col: %_ + err13 = {"col": "c3"} + self.checkmavg(**err13) # col: timestamp col + err14 = {"col": "_c0"} + self.checkmavg(**err14) # col: Primary key + err15 = {"col": "avg(c1)"} + self.checkmavg(**err15) # expr col + err16 = {"col": "c4"} + self.checkmavg(**err16) # binary col + err17 = {"col": "c10"} + self.checkmavg(**err17) # nchar col + err18 = {"col": "c6"} + self.checkmavg(**err18) # bool col + err19 = {"col": "'c1'"} + self.checkmavg(**err19) # col: string + err20 = {"col": None} + self.checkmavg(**err20) # col: None + err21 = {"col": "''"} + self.checkmavg(**err21) # col: '' + err22 = {"col": "tt1.c1"} + self.checkmavg(**err22) # not table_expr col + err23 = {"col": "t1"} + self.checkmavg(**err23) # tbname + err24 = {"col": "stb1"} + self.checkmavg(**err24) # stbname + err25 = {"col": "db"} + self.checkmavg(**err25) # datbasename + err26 = {"col": "True"} + self.checkmavg(**err26) # col: BOOL 1 + err27 = {"col": True} + self.checkmavg(**err27) # col: BOOL 2 + err28 = {"col": "*"} + self.checkmavg(**err28) # col: all col + err29 = {"func": "mavg[", "r_comm": "]"} + self.checkmavg(**err29) # form: mavg[col, k] + err30 = {"func": "mavg{", "r_comm": "}"} + self.checkmavg(**err30) # form: mavg{col, k} + err31 = {"col": "[c1]"} + self.checkmavg(**err31) # form: mavg([col], k) + err32 = {"col": "c1, c2"} + self.checkmavg(**err32) # form: mavg(col, col2, k) + err33 = {"col": "c1, 2"} + self.checkmavg(**err33) # form: mavg(col, k1, k2) + err34 = {"alias": ", count(c1)"} + self.checkmavg(**err34) # mix with aggregate function 1 + err35 = {"alias": ", avg(c1)"} + self.checkmavg(**err35) # mix with aggregate function 2 + err36 = {"alias": ", min(c1)"} + self.checkmavg(**err36) # mix with select function 1 + err37 = {"alias": ", top(c1, 5)"} + self.checkmavg(**err37) # mix with select function 2 + err38 = {"alias": ", spread(c1)"} + self.checkmavg(**err38) # mix with calculation function 1 + err39 = {"alias": ", diff(c1)"} + self.checkmavg(**err39) # mix with calculation function 2 + err40 = {"alias": "+ 2"} + self.checkmavg(**err40) # mix with arithmetic 1 + err41 = {"alias": "+ avg(c1)"} + self.checkmavg(**err41) # mix with arithmetic 2 + err42 = {"alias": ", c1"} + self.checkmavg(**err42) # mix with other col + err43 = {"table_expr": "stb1"} + self.checkmavg(**err43) # select stb directly + err44 = { + "col": "stb1.c1", + "table_expr": "stb1, stb2", + "condition": "where stb1.ts=stb2.ts and stb1.st1=stb2.st2 order by stb1.ts" + } + self.checkmavg(**err44) # stb join + err45 = { + "condition": "where ts>0 and ts < now interval(1h) fill(next)" + } + self.checkmavg(**err45) # interval + err46 = { + "table_expr": "t1", + "condition": "group by c6" + } + self.checkmavg(**err46) # group by normal col + err47 = { + "table_expr": "stb1", + "condition": "group by tbname slimit 1 " + } + # self.checkmavg(**err47) # with slimit + err48 = { + "table_expr": "stb1", + "condition": "group by tbname slimit 1 soffset 1" + } + # self.checkmavg(**err48) # with soffset + err49 = {"k": "2021-01-01 00:00:00.000"} + self.checkmavg(**err49) # k: timestamp + err50 = {"k": False} + self.checkmavg(**err50) # k: False + err51 = {"k": "%"} + self.checkmavg(**err51) # k: special char + err52 = {"k": ""} + self.checkmavg(**err52) # k: "" + err53 = {"k": None} + self.checkmavg(**err53) # k: None + err54 = {"k": "NULL"} + self.checkmavg(**err54) # k: null + err55 = {"k": "binary(4)"} + self.checkmavg(**err55) # k: string + err56 = {"k": "c1"} + self.checkmavg(**err56) # k: sring,col name + err57 = {"col": "c1, 1, c2"} + self.checkmavg(**err57) # form: mavg(col1, k1, col2, k2) + err58 = {"col": "c1 cc1"} + self.checkmavg(**err58) # form: mavg(col newname, k) + err59 = {"k": "'1'"} + # self.checkmavg(**err59) # formL mavg(colm, "1") + err60 = {"k": "-1-(-2)"} + # self.checkmavg(**err60) # formL mavg(colm, -1-2) + err61 = {"k": 1001} + self.checkmavg(**err61) # k: right out of [1, 1000] + err62 = {"k": -1} + self.checkmavg(**err62) # k: negative number + err63 = {"k": 0} + self.checkmavg(**err63) # k: 0 + err64 = {"k": 2**63-1} + self.checkmavg(**err64) # k: max(bigint) + err65 = {"k": 1-2**63} + # self.checkmavg(**err65) # k: min(bigint) + err66 = {"k": -2**63} + self.checkmavg(**err66) # k: NULL + err67 = {"k": 0.999999} + self.checkmavg(**err67) # k: left out of [1, 1000] + err68 = { + "table_expr": "stb1", + "condition": "group by tbname order by tbname" # order by tbname not supported + } + self.checkmavg(**err68) + + pass + + def mavg_test_data(self, tbnum:int, data_row:int, basetime:int) -> None : + for i in range(tbnum): + for j in range(data_row): + tdSql.execute( + f"insert into t{i} values (" + f"{basetime + (j+1)*10}, {random.randint(-200, -1)}, {random.uniform(200, -1)}, {basetime + random.randint(-200, -1)}, " + f"'binary_{j}', {random.uniform(-200, -1)}, {random.choice([0,1])}, {random.randint(-200,-1)}, " + f"{random.randint(-200, -1)}, {random.randint(-127, -1)}, 'nchar_{j}' )" + ) + + tdSql.execute( + f"insert into t{i} values (" + f"{basetime - (j+1) * 10}, {random.randint(1, 200)}, {random.uniform(1, 200)}, {basetime - random.randint(1, 200)}, " + f"'binary_{j}_1', {random.uniform(1, 200)}, {random.choice([0, 1])}, {random.randint(1,200)}, " + f"{random.randint(1,200)}, {random.randint(1,127)}, 'nchar_{j}_1' )" + ) + tdSql.execute( + f"insert into tt{i} values ( {basetime-(j+1) * 10}, {random.randint(1, 200)} )" + ) + + pass + + def mavg_test_table(self,tbnum: int) -> None : + tdSql.execute("drop database if exists db") + tdSql.execute("create database if not exists db keep 3650") + tdSql.execute("use db") + + tdSql.execute( + "create stable db.stb1 (\ + ts timestamp, c1 int, c2 float, c3 timestamp, c4 binary(16), c5 double, c6 bool, \ + c7 bigint, c8 smallint, c9 tinyint, c10 nchar(16)\ + ) \ + tags(st1 int)" + ) + tdSql.execute( + "create stable db.stb2 (ts timestamp, c1 int) tags(st2 int)" + ) + for i in range(tbnum): + tdSql.execute(f"create table t{i} using stb1 tags({i})") + tdSql.execute(f"create table tt{i} using stb2 tags({i})") + + pass + + def mavg_test_run(self) : + tdLog.printNoPrefix("==========TD-10594==========") + tbnum = 10 + nowtime = int(round(time.time() * 1000)) + per_table_rows = 100 + self.mavg_test_table(tbnum) + + tdLog.printNoPrefix("######## no data test:") + self.mavg_current_query() + self.mavg_error_query() + + tdLog.printNoPrefix("######## insert only NULL test:") + for i in range(tbnum): + tdSql.execute(f"insert into t{i}(ts) values ({nowtime - 5})") + tdSql.execute(f"insert into t{i}(ts) values ({nowtime + 5})") + self.mavg_current_query() + self.mavg_error_query() + + tdLog.printNoPrefix("######## insert data in the range near the max(bigint/double):") + # self.mavg_test_table(tbnum) + # tdSql.execute(f"insert into t1(ts, c1,c2,c5,c7) values " + # f"({nowtime - (per_table_rows + 1) * 10}, {2**31-1}, {3.4*10**38}, {1.7*10**308}, {2**63-1})") + # tdSql.execute(f"insert into t1(ts, c1,c2,c5,c7) values " + # f"({nowtime - (per_table_rows + 2) * 10}, {2**31-1}, {3.4*10**38}, {1.7*10**308}, {2**63-1})") + # self.mavg_current_query() + # self.mavg_error_query() + + tdLog.printNoPrefix("######## insert data in the range near the min(bigint/double):") + # self.mavg_test_table(tbnum) + # tdSql.execute(f"insert into t1(ts, c1,c2,c5,c7) values " + # f"({nowtime - (per_table_rows + 1) * 10}, {1-2**31}, {-3.4*10**38}, {-1.7*10**308}, {1-2**63})") + # tdSql.execute(f"insert into t1(ts, c1,c2,c5,c7) values " + # f"({nowtime - (per_table_rows + 2) * 10}, {1-2**31}, {-3.4*10**38}, {-1.7*10**308}, {512-2**63})") + # self.mavg_current_query() + # self.mavg_error_query() + + tdLog.printNoPrefix("######## insert data without NULL data test:") + self.mavg_test_table(tbnum) + self.mavg_test_data(tbnum, per_table_rows, nowtime) + self.mavg_current_query() + self.mavg_error_query() + + + tdLog.printNoPrefix("######## insert data mix with NULL test:") + for i in range(tbnum): + tdSql.execute(f"insert into t{i}(ts) values ({nowtime})") + tdSql.execute(f"insert into t{i}(ts) values ({nowtime-(per_table_rows+3)*10})") + tdSql.execute(f"insert into t{i}(ts) values ({nowtime+(per_table_rows+3)*10})") + self.mavg_current_query() + self.mavg_error_query() + + + + tdLog.printNoPrefix("######## check after WAL test:") + tdSql.query("show dnodes") + index = tdSql.getData(0, 0) + tdDnodes.stop(index) + tdDnodes.start(index) + self.mavg_current_query() + self.mavg_error_query() + + def run(self): + import traceback + try: + # run in develop branch + self.mavg_test_run() + pass + except Exception as e: + traceback.print_exc() + raise e + + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/insert/insertJSONPayload.py b/tests/pytest/insert/insertJSONPayload.py index a2e773328a7a346f17d8c256cce79d7beb9628e4..b151aa946c4f6c6a808df70e921adb1f6f11f5df 100644 --- a/tests/pytest/insert/insertJSONPayload.py +++ b/tests/pytest/insert/insertJSONPayload.py @@ -129,7 +129,7 @@ class TDTestCase: print("schemaless_insert result {}".format(code)) tdSql.query("describe stb0_3") - tdSql.checkData(1, 1, "BINARY") + tdSql.checkData(1, 1, "NCHAR") payload = [''' { @@ -835,7 +835,7 @@ class TDTestCase: code = self._conn.schemaless_insert(payload, TDSmlProtocolType.JSON.value, TDSmlTimestampType.NOT_CONFIGURED.value) print("schemaless_insert result {}".format(code)) - tdSql.query("describe `stable`") + tdSql.query("describe `STABLE`") tdSql.checkRows(9) #tdSql.query("select * from `key`") diff --git a/tests/pytest/insert/insertTelnetLines.py b/tests/pytest/insert/insertTelnetLines.py index 149e62c362ab802fbbc4f2d939b3bf149cbf7e1b..774027ffed1199de643957970e3cd8122a25905c 100644 --- a/tests/pytest/insert/insertTelnetLines.py +++ b/tests/pytest/insert/insertTelnetLines.py @@ -333,7 +333,7 @@ class TDTestCase: tdSql.query('describe `!@#$.%^&*()`') tdSql.checkRows(9) - tdSql.query('describe `stable`') + tdSql.query('describe `STABLE`') tdSql.checkRows(9) #tdSql.query('select * from `123`') diff --git a/tests/pytest/insert/line_insert.py b/tests/pytest/insert/line_insert.py index acc43d80e719065706aaee95c5cdbaf5235ae04b..d95df3a8491f73f7279e583afd446a7182adf823 100644 --- a/tests/pytest/insert/line_insert.py +++ b/tests/pytest/insert/line_insert.py @@ -86,6 +86,67 @@ class TDTestCase: #tdSql.query('select tbname, * from childtable') #tdSql.checkRows(1) + ###Test when tag is omitted + lines3 = [ "sti c1=4i64,c3=L\"passit\",c2=false,c4=4f64 1626006833639000000", + "sti c1=3i64,c3=L\"passitagin\",c2=true,c4=5f64,c5=5f64 1626006833640000000" + ] + + code = self._conn.schemaless_insert(lines3, TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value) + print("schemaless_insert result {}".format(code)) + + tdSql.query('select * from sti') + tdSql.checkRows(2) + + tdSql.query('select tbname from sti') + tdSql.checkRows(1) + + lines4 = [ "stp c1=4i64,c3=L\"passit\",c2=false,c4=4f64 1626006833639000000", + "stp c1=3i64,c3=L\"passitagin\",c2=true,c4=5f64,c5=5f64 1626006833640000000" + ] + code = self._conn.schemaless_insert([ lines4[0] ], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value) + print("schemaless_insert result {}".format(code)) + + code = self._conn.schemaless_insert([ lines4[1] ], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value) + print("schemaless_insert result {}".format(code)) + + tdSql.query('select * from stp') + tdSql.checkRows(2) + + tdSql.query('select tbname from stp') + tdSql.checkRows(1) + + lines5 = [ "stq c1=4i64,c3=L\"passit\",c2=false,c4=4f64 1626006833639000000", + "stq,t1=abc c1=3i64,c3=L\"passitagin\",c2=true,c4=5f64,c5=5f64 1626006833640000000", + "stq,t2=abc c1=3i64,c3=L\"passitagin\",c4=5f64,c5=5f64,c6=true 1626006833640000000" + ] + code = self._conn.schemaless_insert([ lines5[0] ], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value) + print("schemaless_insert result {}".format(code)) + + code = self._conn.schemaless_insert([ lines5[1] ], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value) + print("schemaless_insert result {}".format(code)) + + code = self._conn.schemaless_insert([ lines5[2] ], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value) + print("schemaless_insert result {}".format(code)) + + tdSql.query('select * from stq') + tdSql.checkRows(3) + + tdSql.query('select tbname from stq') + tdSql.checkRows(3) + + lines6 = [ "str c1=4i64,c3=L\"passit\",c2=false,c4=4f64 1626006833639000000", + "str,t1=abc c1=3i64,c3=L\"passitagin\",c2=true,c4=5f64,c5=5f64 1626006833640000000", + "str,t2=abc c1=3i64,c3=L\"passitagin\",c4=5f64,c5=5f64,c6=true 1626006833640000000" + ] + code = self._conn.schemaless_insert(lines6, TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value) + print("schemaless_insert result {}".format(code)) + + tdSql.query('select * from str') + tdSql.checkRows(3) + + tdSql.query('select tbname from str') + tdSql.checkRows(3) + ###Special Character and keyss self._conn.schemaless_insert([ "1234,id=3456,abc=4i64,def=3i64 123=3i64,int=2i64,bool=false,into=5f64,column=7u64,!@#$.%^&*()=false 1626006933641", @@ -112,7 +173,7 @@ class TDTestCase: tdSql.query('describe `!@#$.%^&*()`') tdSql.checkRows(9) - tdSql.query('describe `stable`') + tdSql.query('describe `STABLE`') tdSql.checkRows(9) #tdSql.query('select * from `3456`') diff --git a/tests/pytest/query/queryLike.py b/tests/pytest/query/queryLike.py index 2bcd5906a8eaa505e1702cefce7f8b2594f53f43..b3916ed84db1d558e4b95f62c2def19deee75944 100644 --- a/tests/pytest/query/queryLike.py +++ b/tests/pytest/query/queryLike.py @@ -92,13 +92,13 @@ class TDTestCase: tdSql.query("select * from st where tbname like 'tb_\_'") tdSql.checkRows(1) tdSql.query("select * from st where tbname like 'tb___'") - tdSql.checkRows(4) - tdSql.query("select * from st where tbname like 'tb_\__'") tdSql.checkRows(3) + tdSql.query("select * from st where tbname like 'tb_\__'") + tdSql.checkRows(2) tdSql.query("select * from st where tbname like 'tb_\_\_'") tdSql.checkRows(1) tdSql.query("select * from st where tbname like 'tb\__\_'") - tdSql.checkRows(1) + tdSql.checkRows(2) tdSql.query("select * from st where tbname like 'tb\__\__'") tdSql.checkRows(2) tdSql.query("select * from st where tbname like 'tb\__\_\_'") @@ -116,9 +116,9 @@ class TDTestCase: tdSql.query("select * from st where name like 'tbname\__';") tdSql.checkRows(3) tdSql.query("select * from st where name like 'tbname___';") - tdSql.checkRows(4) - tdSql.query("select * from st where name like 'tbname_\__';") tdSql.checkRows(3) + tdSql.query("select * from st where name like 'tbname_\__';") + tdSql.checkRows(2) tdSql.query("select * from st where name like 'tbname_\_\_';") tdSql.checkRows(1) tdSql.query("select * from st where name like 'tbname\_\__';") @@ -132,7 +132,8 @@ class TDTestCase: tdSql.query("select * from st where name like 'tbname\_\__\_';") tdSql.checkRows(2) tdSql.query("select name from st where name like 'tbname\_\_\__';") - tdSql.checkData(0,0 "tbname____") + tdSql.checkRows(1) + tdSql.checkData(0,0, "tbname____") # check escape about tags tdSql.query("select * from st where tagg like 'tag\_';") @@ -142,9 +143,9 @@ class TDTestCase: tdSql.query("select * from st where tagg like 'tag\__';") tdSql.checkRows(3) tdSql.query("select * from st where tagg like 'tag___';") - tdSql.checkRows(4) - tdSql.query("select * from st where tagg like 'tag_\__';") tdSql.checkRows(3) + tdSql.query("select * from st where tagg like 'tag_\__';") + tdSql.checkRows(2) tdSql.query("select * from st where tagg like 'tag_\_\_';") tdSql.checkRows(1) tdSql.query("select * from st where tagg like 'tag\_\__';") @@ -158,7 +159,7 @@ class TDTestCase: tdSql.query("select * from st where tagg like 'tag\_\__\_';") tdSql.checkRows(2) tdSql.query("select * from st where tagg like 'tag\_\__\_';") - tdSql.checkData(0,0 "tag__a_") + tdSql.checkData(0,0, "tag__a_") os.system("rm -rf ./*.py.sql") diff --git a/tests/script/general/compute/csum.sim b/tests/script/general/compute/csum.sim index b350e4f403a02702741e0f10ab91fb9799e776d3..e7a2c2065029ab58e92c0d5643262a01875843be 100644 --- a/tests/script/general/compute/csum.sim +++ b/tests/script/general/compute/csum.sim @@ -101,9 +101,16 @@ if $data11 != -2 then endi print ==========>TD10758 +sql create database groupby_tbname +sql use groupby_tbname sql create stable st(ts timestamp, c1 int) tags(t int); sql create table ct1 using st tags(1) sql insert into ct1 values(now, 1)(now+1s, 2)(now+2s, 3) +sql create table ct2 using st tags(2) +sql insert into ct2 values(now, 21)(now+1s, 22)(now+2s, 23) +sql create table ct3 using st tags(3) +sql insert into ct3 values(now, 31)(now+1s, 32)(now+2s, 33) + sql select csum(c1),ts,tbname,t from ct1 print $data10 , $data11 , $data12, $data13, $data14 if $data13 != ct1 then @@ -169,6 +176,81 @@ if $data14 != 1 then return -1 endi +sql select mavg(c1,2),tbname from st group by tbname +print $data10 , $data11 , $data12 , $data13 +if $data12 != ct1 then + return -1 +endi +if $data13 != ct1 then + return -1 +endi + +sql select diff(c1),tbname from st group by tbname +print $data10 , $data11 , $data12 , $data13 +if $data12 != ct1 then + return -1 +endi +if $data13 != ct1 then + return -1 +endi + +sql select csum(c1),tbname from st group by tbname +print $data10 , $data11 , $data12, $data13, $data14 +print $data10 , $data11 , $data12 , $data13 +if $data12 != ct1 then + return -1 +endi +if $data13 != ct1 then + return -1 +endi + +sql select csum(c1),t,tbname from st group by tbname limit 2 +print $data10 , $data11 , $data12 , $data13 , $data14 +print $data30 , $data31 , $data32 , $data33 , $data34 +if $data13 != ct1 then + return -1 +endi +if $data14 != ct1 then + return -1 +endi +if $data33 != ct2 then + return -1 +endi +if $data34 != ct2 then + return -1 +endi + +sql select mavg(c1,2),t,tbname from st group by tbname limit 2 +print $data10 , $data11 , $data12 , $data13 , $data14 +print $data30 , $data31 , $data32 , $data33 , $data34 +if $data13 != ct1 then + return -1 +endi +if $data14 != ct1 then + return -1 +endi +if $data33 != ct2 then + return -1 +endi +if $data34 != ct2 then + return -1 +endi +sql select diff(c1),t,tbname from st group by tbname limit 2 +print $data10 , $data11 , $data12 , $data13 , $data14 +print $data30 , $data31 , $data32 , $data33 , $data34 +if $data13 != ct1 then + return -1 +endi +if $data14 != ct1 then + return -1 +endi +if $data33 != ct2 then + return -1 +endi +if $data34 != ct2 then + return -1 +endi +sql drop database groupby_tbname print =============== clear sql drop database $db diff --git a/tests/script/unique/dnode/alternativeRole.sim b/tests/script/unique/dnode/alternativeRole.sim index 14a6e92f064f6077d549ad2c48c5ada3da83995a..7e647925d1d3d66d21f279ace852e3fc12496510 100644 --- a/tests/script/unique/dnode/alternativeRole.sim +++ b/tests/script/unique/dnode/alternativeRole.sim @@ -30,35 +30,50 @@ sql create dnode $hostname2 system sh/exec.sh -n dnode2 -s start sql create dnode $hostname3 system sh/exec.sh -n dnode3 -s start -sleep 5000 +sleep 3000 + +$x = 0 +show1: + $x = $x + 1 + sleep 1000 + if $x == 30 then + return -1 + endi sql show dnodes print dnode1 $data5_1 -print dnode1 $data5_2 -print dnode1 $data5_3 +print dnode2 $data5_2 +print dnode3 $data5_3 if $data5_1 != mnode then - return -1 + goto show1 endi if $data5_2 != vnode then - return -1 + goto show1 endi if $data5_3 != any then - return -1 + goto show1 endi +show2: + $x = $x + 1 + sleep 1000 + if $x == 30 then + return -1 + endi + sql show mnodes print dnode1 ==> $data2_1 print dnode2 ==> $data2_2 print dnode3 ==> $data2_3 if $data2_1 != master then - return -1 + goto show2 endi if $data2_2 != null then - return -1 + goto show2 endi if $data2_3 != slave then - return -1 + goto show2 endi print ========== step2 @@ -72,26 +87,28 @@ sql create table d1.t6 (ts timestamp, i int) sql create table d1.t7 (ts timestamp, i int) sql create table d1.t8 (ts timestamp, i int) +show3: + $x = $x + 1 + sleep 1000 + if $x == 30 then + return -1 + endi + sql show dnodes print dnode1 $data2_1 print dnode2 $data2_2 print dnode3 $data2_3 if $data2_1 != 0 then - return -1 + goto show3 endi if $data2_2 != 1 then - return -1 + goto show3 endi if $data2_3 != 1 then - return -1 + goto show3 endi system sh/exec.sh -n dnode1 -s stop -x SIGINT system sh/exec.sh -n dnode2 -s stop -x SIGINT -system sh/exec.sh -n dnode3 -s stop -x SIGINT -system sh/exec.sh -n dnode4 -s stop -x SIGINT -system sh/exec.sh -n dnode5 -s stop -x SIGINT -system sh/exec.sh -n dnode6 -s stop -x SIGINT -system sh/exec.sh -n dnode7 -s stop -x SIGINT -system sh/exec.sh -n dnode8 -s stop -x SIGINT \ No newline at end of file +system sh/exec.sh -n dnode3 -s stop -x SIGINT \ No newline at end of file