diff --git a/.gitmodules b/.gitmodules index 57cf1a87a14f638527478e2d8a993d8c0d329601..df28582f94518d5f3bd54770ccac2e33360b4e54 100644 --- a/.gitmodules +++ b/.gitmodules @@ -10,6 +10,7 @@ [submodule "deps/TSZ"] path = deps/TSZ url = https://github.com/taosdata/TSZ.git + branch = master [submodule "src/kit/taos-tools"] path = src/kit/taos-tools url = https://github.com/taosdata/taos-tools @@ -20,3 +21,6 @@ path = tests url = https://github.com/taosdata/tests branch = develop +[submodule "examples/rust"] + path = examples/rust + url = https://github.com/songtianyi/tdengine-rust-bindings.git \ No newline at end of file diff --git a/README.md b/README.md index 4ea1c833f2d4afbe3986315c774f98de8793f0f2..8f1eb8ddc925ca7f0fc35c17f7a19943bfe7c66c 100644 --- a/README.md +++ b/README.md @@ -277,7 +277,7 @@ If TDengine shell connects the server successfully, welcome messages and version ## Install TDengine by apt-get -If you use Debian or Ubuntu system, you can use 'apt-get' command to intall TDengine from official repository. Please use following commands to setup: +If you use Debian or Ubuntu system, you can use 'apt-get' command to install TDengine from official repository. Please use following commands to setup: ``` wget -qO - http://repos.taosdata.com/tdengine.key | sudo apt-key add - diff --git a/cmake/input.inc b/cmake/input.inc index bc79de48a482539660e6166b642144d754fc94a4..9d411e382b4a9b07de238cc1caaa246dcbda57bc 100755 --- a/cmake/input.inc +++ b/cmake/input.inc @@ -96,11 +96,12 @@ IF (${VERBOSE} MATCHES "true") SET(TD_BUILD_VERBOSE TRUE) ENDIF () -IF (${TSZ_ENABLED} MATCHES "true") - # define add +# build TSZ by default +IF ("${TSZ_ENABLED}" MATCHES "false") + set(VAR_TSZ "" CACHE INTERNAL "global variant empty" ) +ELSE() + # define add MESSAGE(STATUS "build with TSZ enabled") ADD_DEFINITIONS(-DTD_TSZ) set(VAR_TSZ "TSZ" CACHE INTERNAL "global variant tsz" ) -ELSE() - set(VAR_TSZ "" CACHE INTERNAL "global variant empty" ) -ENDIF() +ENDIF() \ No newline at end of file diff --git a/deps/CMakeLists.txt b/deps/CMakeLists.txt index a8b4fd288ea83676c98fa9db5acc464b42f51992..d4526a8839dc41d91fd717d542c543b806db5fce 100644 --- a/deps/CMakeLists.txt +++ b/deps/CMakeLists.txt @@ -45,6 +45,6 @@ IF (TD_LINUX_64 AND JEMALLOC_ENABLED) INCLUDE_DIRECTORIES(${CMAKE_BINARY_DIR}/build/include) ENDIF () -IF (${TSZ_ENABLED} MATCHES "true") +IF (NOT "${TSZ_ENABLED}" MATCHES "false") ADD_SUBDIRECTORY(TSZ) ENDIF() diff --git a/deps/TSZ b/deps/TSZ new file mode 160000 index 0000000000000000000000000000000000000000..11c1060d4f917dd799ae628b131db5d6a5ef6954 --- /dev/null +++ b/deps/TSZ @@ -0,0 +1 @@ +Subproject commit 11c1060d4f917dd799ae628b131db5d6a5ef6954 diff --git a/documentation20/cn/00.index/docs.md b/documentation20/cn/00.index/docs.md index df1fd197559c25b61e186860da6bdf973fae1f83..62d06bf64cb337f0bbb0152b4111452ec513ec49 100644 --- a/documentation20/cn/00.index/docs.md +++ b/documentation20/cn/00.index/docs.md @@ -87,12 +87,14 @@ TDengine是一个高效的存储、查询、分析时序大数据的平台,专 * [taosAdapter](/tools/adapter): TDengine 集群和应用之间的 RESTful 接口适配服务。 * [TDinsight](/tools/insight): 监控 TDengine 集群的 Grafana 面板集合。 * [taosdump](/tools/taosdump): TDengine 数据备份工具。使用 taosdump 请安装 taosTools。 -* [taosBenchmark](/tools/taosbenchmark): TDengine 压力测试工具。使用 taosBenchmark 请安装 taosTools。 +* [taosBenchmark](/tools/taosbenchmark): TDengine 压力测试工具。 ## [与其他工具的连接](/connections) * [Grafana](/connections#grafana):获取并可视化保存在TDengine的数据 * [IDEA Database](https://www.taosdata.com/blog/2020/08/27/1767.html):通过IDEA 数据库管理工具可视化使用 TDengine +* [TDengineGUI](https://github.com/skye0207/TDengineGUI):基于Electron开发的跨平台TDengine图形化管理工具 +* [DataX](https://www.taosdata.com/blog/2021/10/26/3156.html):支持 TDeninge 和其他数据库之间进行数据迁移的工具 ## [TDengine集群的安装、管理](/cluster) @@ -132,14 +134,6 @@ TDengine是一个高效的存储、查询、分析时序大数据的平台,专 * [devops](/devops/collectd):使用 TDengine + collectd_statsd + Grafana 快速搭建 IT 运维系统 * [最佳实践](/devops/immigrate):OpenTSDB 应用迁移到 TDengine 的最佳实践 -## 常用工具 - -* [TDengine样例导入工具](https://www.taosdata.com/blog/2020/01/18/1166.html) -* [TDengine写入性能测试工具](https://www.taosdata.com/blog/2020/01/18/1166.html) -* [IDEA数据库管理工具可视化使用TDengine](https://www.taosdata.com/blog/2020/08/27/1767.html) -* [基于Electron开发的跨平台TDengine图形化管理工具](https://github.com/skye0207/TDengineGUI) -* [基于DataX的TDeninge数据迁移工具](https://www.taosdata.com/blog/2021/10/26/3156.html) - ## TDengine与其他数据库的对比测试 * [用InfluxDB开源的性能测试工具对比InfluxDB和TDengine](https://www.taosdata.com/blog/2020/01/13/1105.html) diff --git a/documentation20/cn/02.getting-started/02.taosdemo/docs.md b/documentation20/cn/02.getting-started/02.taosdemo/docs.md index 64200f17ff5912d4741ea69f7e4dffaa99f7c5c3..7e48000737cbf4acd710af03fd371022295896b7 100644 --- a/documentation20/cn/02.getting-started/02.taosdemo/docs.md +++ b/documentation20/cn/02.getting-started/02.taosdemo/docs.md @@ -1,16 +1,15 @@ - 如何使用 taosBenchmark 进行性能测试 -== - +# 如何使用 taosBenchmark 进行性能测试 自从 TDengine 2019年 7 月开源以来,凭借创新的数据建模设计、快捷的安装方式、易用的编程接口和强大的数据写入查询性能博得了大量时序数据开发者的青睐。其中写入和查询性能往往令刚接触 TDengine 的用户称叹不已。为了便于用户在最短时间内就可以体验到 TDengine 的高性能特点,我们专门开发了一个应用程序 taosBenchmark (曾命名为 taosdemo)用于对 TDengine 进行写入和查询的性能测试,用户可以通过 taosBenchmark 轻松模拟大量设备产生海量数据的场景,并且可以通过 taosBenchmark 参数灵活控制表的列数、数据类型、乱序比例以及并发线程数量。 -运行 taosBenchmark 很简单,通过下载 TDengine 安装包( https://www.taosdata.com/cn/all-downloads/ )或者自行下载 TDengine 代码( https://github.com/taosdata/TDengine )编译都可以在安装目录或者编译结果目录中找到并运行。 +运行 taosBenchmark 很简单,通过下载 [TDengine 安装包](https://www.taosdata.com/cn/all-downloads/)或者自行下载 [TDengine 代码](https://github.com/taosdata/TDengine)编译都可以在安装目录或者编译结果目录中找到并运行。 接下来本文为大家讲解 taosBenchmark 的使用介绍及注意事项。 -使用 taosBenchmark 进行写入测试 --- +## 使用 taosBenchmark 进行写入测试 + 不使用任何参数的情况下执行 taosBenchmark 命令,输出如下: + ``` $ taosBenchmark @@ -58,7 +57,9 @@ column[0]:FLOAT column[1]:INT column[2]:FLOAT Press enter key to continue or Ctrl-C to stop ``` + 这里显示的是接下来 taosBenchmark 进行数据写入的各项参数。默认不输入任何命令行参数的情况下 taosBenchmark 将模拟生成一个电力行业典型应用的电表数据采集场景数据。即建立一个名为 test 的数据库,并创建一个名为 meters 的超级表,其中表结构为: + ``` taos> describe test.meters; Field | Type | Length | Note | @@ -71,7 +72,9 @@ taos> describe test.meters; location | BINARY | 64 | TAG | Query OK, 6 row(s) in set (0.002972s) ``` + 按任意键后 taosBenchmark 将建立数据库 test 和超级表 meters,并按照 TDengine 数据建模的最佳实践,以 meters 超级表为模板生成一万个子表,代表一万个独立上报数据的电表设备。 + ``` taos> use test; Database changed. @@ -82,7 +85,9 @@ taos> show stables; meters | 2021-08-27 11:21:01.209 | 4 | 2 | 10000 | Query OK, 1 row(s) in set (0.001740s) ``` + 然后 taosBenchmark 为每个电表设备模拟生成一万条记录: + ``` ... ====thread[3] completed total inserted rows: 6250000, total affected rows: 6250000. 347626.22 records/second==== @@ -99,9 +104,11 @@ Spent 18.0863 seconds to insert rows: 100000000, affected rows: 100000000 with 1 insert delay, avg: 28.64ms, max: 112.92ms, min: 9.35ms ``` + 以上信息是在一台具备 8个CPU 64G 内存的普通 PC 服务器上进行实测的结果。显示 taosBenchmark 用了 18 秒的时间插入了 100000000 (一亿)条记录,平均每秒钟插入 552 万 9千零49 条记录。 TDengine 还提供性能更好的参数绑定接口,而在同样的硬件上使用参数绑定接口 (taosBenchmark -I stmt )进行相同数据量的写入,结果如下: + ``` ... @@ -136,12 +143,13 @@ Spent 6.0257 seconds to insert rows: 100000000, affected rows: 100000000 with 16 insert delay, avg: 8.31ms, max: 860.12ms, min: 2.00ms ``` -显示 taosBenchmark 用了 6 秒的时间插入了一亿条记录,每秒钟插入性能高达 1659 万 5 千 590 条记录。 +显示 taosBenchmark 用了 6 秒的时间插入了一亿条记录,每秒钟插入性能高达 1659 万 5 千 590 条记录。 由于 taosBenchmark 使用起来非常方便,我们又对 taosBenchmark 做了更多的功能扩充,使其支持更复杂的参数设置,便于进行快速原型开发的样例数据准备和验证工作。 完整的 taosBenchmark 命令行参数列表可以通过 taosBenchmark --help 显示如下: + ``` $ taosBenchmark --help @@ -188,51 +196,70 @@ Report bugs to . ``` taosBenchmark 的参数是为了满足数据模拟的需求来设计的。下面介绍几个常用的参数: + ``` -I, --interface=INTERFACE The interface (taosc, rest, and stmt) taosBenchmark uses. Default is 'taosc'. ``` + 前面介绍 taosBenchmark 不同接口的性能差异已经提到, -I 参数为选择不同的接口,目前支持 taosc、stmt 和 rest 几种。其中 taosc 为使用 SQL 语句方式进行数据写入;stmt 为使用参数绑定接口进行数据写入;rest 为使用 RESTful 协议进行数据写入。 + ``` -T, --threads=NUMBER The number of threads. Default is 8. ``` + -T 参数设置 taosBenchmark 使用多少个线程进行数据同步写入,通过多线程可以尽最大可能压榨硬件的处理能力。 + ``` -b, --data-type=DATATYPE The data_type of columns, default: FLOAT, INT, FLOAT. -w, --binwidth=WIDTH The width of data_type 'BINARY' or 'NCHAR'. Default is 64 - + -l, --columns=COLUMNS The number of columns per record. Demo mode by default is 3 (float, int, float). Max values is 4095 ``` + 前文提到,taosBenchmark 默认创建一个典型电表数据采集应用场景,每个设备包含电流电压相位3个采集量。对于需要定义不同的采集量,可以使用 -b 参数。TDengine 支持 BOOL、TINYINT、SMALLINT、INT、BIGINT、FLOAT、DOUBLE、BINARY、NCHAR、TIMESTAMP 等多种数据类型。通过 -b 加上以“ , ”(英文逗号)分割定制类型的列表可以使 taosBenchmark 建立对应的超级表和子表并插入相应模拟数据。通过 -w 参数可以指定 BINARY 和 NCHAR 数据类型的列的宽度(默认为 64 )。-l 参数可以在 -b 参数指定数据类型的几列之后补充以 INT 型的总的列数,特别多列的情况下可以减少手工输入的过程,最多支持到 4095 列。 + ``` -r, --rec-per-req=NUMBER The number of records per request. Default is 30000. ``` + 为了达到 TDengine 性能极限,可以使用多客户端、多线程以及一次插入多条数据来进行数据写入。 -r 参数为设置一次写入请求可以拼接的记录条数,默认为30000条。有效的拼接记录条数还和客户端缓冲区大小有关,目前的缓冲区为 1M Bytes,如果记录的列宽度比较大,最大拼接记录条数可以通过 1M 除以列宽(以字节为单位)计算得出。 + ``` -t, --tables=NUMBER The number of tables. Default is 10000. -n, --records=NUMBER The number of records per table. Default is 10000. -M, --random The value of records generated are totally random. The default is to simulate power equipment senario. ``` + 前面提到 taosBenchmark 默认创建 10000 个表,每个表写入 10000 条记录。可以通过 -t 和 -n 设置表的数量和每个表的记录的数量。默认无参数生成的数据为模拟真实场景,模拟生成的数据为电流电压相位值增加一定的抖动,可以更真实表现 TDengine 高效的数据压缩能力。如果需要模拟生成完全随机数据,可以通过 -M 参数。 + ``` -y, --answer-yes Default input yes for prompt. ``` + 前面我们可以看到 taosBenchmark 默认在进行创建数据库或插入数据之前输出将要进行操作的参数列表,方便使用者在插入之前了解即将进行的数据写入的内容。为了方便进行自动测试,-y 参数可以使 taosBenchmark 输出参数后立刻进行数据写入操作。 + ``` -O, --disorder=NUMBER Insert order mode--0: In order, 1 ~ 50: disorder ratio. Default is in order. -R, --disorder-range=NUMBER Out of order data's range, ms, default is 1000. ``` + 在某些场景,接收到的数据并不是完全按时间顺序到来,而是包含一定比例的乱序数据,TDengine 也能进行很好的处理。为了模拟乱序数据的写入,taosBenchmark 提供 -O 和 -R 参数进行设置。-O 参数为 0 和不使用 -O 参数相同为完全有序数据写入。1 到 50 为数据中包含乱序数据的比例。-R 参数为乱序数据时间戳偏移的范围,默认为 1000 毫秒。另外注意,时序数据以时间戳为唯一标识,所以乱序数据可能会生成和之前已经写入数据完全相同的时间戳,这样的数据会根据数据库创建的 update 值或者被丢弃(update 0)或者覆盖已有数据(update 1 或 2),而总的数据条数可能和期待的条数不一致的情况。 + ``` -g, --debug Print debug info. ``` + 如果对 taosBenchmark 写入数据过程感兴趣或者数据写入结果不符合预期,可以使用 -g 参数使 taosBenchmark 打印执行过程中间调试信息到屏幕上,或通过 Linux 重定向命令导入到另外一个文件,方便找到发生问题的原因。另外 taosBenchmark 在执行失败后也会把相应执行的语句和调试原因输出到屏幕。可以搜索 reason 来找到 TDengine 服务端返回的错误原因信息。 + ``` -x, --aggr-func Test aggregation funtions after insertion. ``` + TDengine 不仅仅是插入性能非常强大,由于其先进的数据库引擎设计使查询性能也异常强大。taosBenchmark 提供一个 -x 函数,可以在插入数据结束后进行常用查询操作并输出查询消耗时间。以下为在前述服务器上进行插入一亿条记录后进行常用查询的结果。 可以看到 select * 取出一亿条记录(不输出到屏幕)操作仅消耗1.26秒。而对一亿条记录进行常用的聚合函数操作通常仅需要二十几毫秒,时间最长的 count 函数也不到四十毫秒。 + ``` taosBenchmark -I stmt -T 48 -y -x ... @@ -254,7 +281,9 @@ select min(current) took 0.025812 second(s) select first(current) took 0.024105 second(s) ... ``` + 除了命令行方式, taosBenchmark 还支持接受指定一个 JSON 文件做为传入参数的方式来提供更丰富的设置。一个典型的 JSON 文件内容如下: + ``` { "filetype": "insert", @@ -263,17 +292,17 @@ select first(current) took 0.024105 second(s) "port": 6030, "user": "root", "password": "taosdata", - "thread_count": 4, - "thread_count_create_tbl": 4, - "result_file": "./insert_res.txt", - "confirm_parameter_prompt": "no", - "insert_interval": 0, - "interlace_rows": 100, + "thread_count": 4, + "thread_count_create_tbl": 4, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "interlace_rows": 100, "num_of_records_per_req": 100, "databases": [{ "dbinfo": { "name": "db", - "drop": "yes", + "drop": "yes", "replica": 1, "days": 10, "cache": 16, @@ -291,39 +320,41 @@ select first(current) took 0.024105 second(s) }, "super_tables": [{ "name": "stb", - "child_table_exists":"no", - "childtable_count": 100, - "childtable_prefix": "stb_", - "auto_create_table": "no", - "batch_create_tbl_num": 5, - "data_source": "rand", - "insert_mode": "taosc", - "insert_rows": 100000, - "childtable_limit": 10, - "childtable_offset":100, - "interlace_rows": 0, - "insert_interval":0, - "max_sql_len": 1024000, - "disorder_ratio": 0, - "disorder_range": 1000, - "timestamp_step": 10, - "start_timestamp": "2020-10-01 00:00:00.000", - "sample_format": "csv", - "sample_file": "./sample.csv", - "tags_file": "", + "child_table_exists":"no", + "childtable_count": 100, + "childtable_prefix": "stb_", + "auto_create_table": "no", + "batch_create_tbl_num": 5, + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows": 100000, + "childtable_limit": 10, + "childtable_offset":100, + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 10, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":10}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] }] }] } ``` + 例如:我们可以通过 "thread_count" 和 "thread_count_create_tbl" 来为建表和插入数据指定不同数量的线程。可以通过 "child_table_exists"、"childtable_limit" 和 "childtable_offset" 的组合来使用多个 taosBenchmark 进程(甚至可以在不同的电脑上)对同一个超级表的不同范围子表进行同时写入。也可以通过 "data_source" 和 "sample_file" 来指定数据来源为 csv 文件,来实现导入已有数据的功能。 -使用 taosBenchmark 进行查询和订阅测试 --- +## 使用 taosBenchmark 进行查询和订阅测试 + taosBenchmark 不仅仅可以进行数据写入,也可以执行查询和订阅功能。但一个 taosBenchmark 实例只能支持其中的一种功能,不能同时支持三种功能,通过配置文件来指定进行哪种功能的测试。 以下为一个典型查询 JSON 示例文件内容: + ``` { "filetype": "query", @@ -363,7 +394,9 @@ taosBenchmark 不仅仅可以进行数据写入,也可以执行查询和订阅 } } ``` + 以下为 JSON 文件中和查询相关的特有参数含义: + ``` "query_times": 每种查询类型的查询次数 "query_mode": 查询数据接口,"taosc":调用TDengine的c接口;“resetful”:使用restfule接口。可选项。缺省是“taosc”。 @@ -382,6 +415,7 @@ taosBenchmark 不仅仅可以进行数据写入,也可以执行查询和订阅 ``` 以下为一个典型订阅 JSON 示例文件内容: + ``` { "filetype":"subscribe", @@ -394,34 +428,36 @@ taosBenchmark 不仅仅可以进行数据写入,也可以执行查询和订阅 "confirm_parameter_prompt": "no", "specified_table_query": { - "concurrent":1, - "mode":"sync", - "interval":0, - "restart":"yes", + "concurrent":1, + "mode":"sync", + "interval":0, + "restart":"yes", "keepProgress":"yes", "sqls": [ { - "sql": "select * from stb00_0 ;", + "sql": "select * from stb00_0 ;", "result": "./subscribe_res0.txt" }] }, - "super_table_query": + "super_table_query": { "stblname": "stb0", - "threads":1, - "mode":"sync", - "interval":10000, - "restart":"yes", + "threads":1, + "mode":"sync", + "interval":10000, + "restart":"yes", "keepProgress":"yes", "sqls": [ { - "sql": "select * from xxxx where ts > '2021-02-25 11:35:00.000' ;", + "sql": "select * from xxxx where ts > '2021-02-25 11:35:00.000' ;", "result": "./subscribe_res1.txt" }] } } ``` + 以下为订阅功能相关的特有参数含义: + ``` "interval": 执行订阅的间隔,单位是秒。可选项,缺省是0。 "restart": 订阅重启。"yes":如果订阅已经存在,重新开始,"no": 继续之前的订阅。(请注意执行用户需要对 dataDir 目录有读写权限) @@ -429,16 +465,15 @@ taosBenchmark 不仅仅可以进行数据写入,也可以执行查询和订阅 "resubAfterConsume": 配合 keepProgress 使用,在订阅消费了相应次数后调用 unsubscribe 取消订阅并再次订阅。 "result": 查询结果写入的文件名。可选项,缺省是空,表示查询结果不写入文件。 注意:每条sql语句后的保存结果的文件不能重名,且生成结果文件时,文件名会附加线程号。 ``` -结语 --- + +## 结语 + TDengine是涛思数据专为物联网、车联网、工业互联网、IT运维等设计和优化的大数据平台。TDengine 由于数据库内核中创新的数据存储和查询引擎设计,展现出远超同类产品的高效性能。并且由于支持 SQL 语法和多种编程语言的连接器(目前支持 Java, Python, Go, C#, NodeJS, Rust 等),易用性极强,学习成本为零。为了便于运维需求,我们还提供数据迁移和监控功能等相关生态工具软件。 为了刚接触 TDengine 的使用者方便进行技术评估和压力测试,我们为 taosBenchmark 开发了丰富的特性。本文即为对 taosBenchmark 的一个简单介绍,随着 TDengine 新功能的不断增加,taosBenchmark 也会继续演化和改进。taosBenchmark 的代码做为 TDengine 的一部分在 GitHub 上完全开源。欢迎就 taosBenchmark 或 TDengine 的使用或实现在 GitHub 或者涛思数据的用户群提出建议或批评。 +## 附录 - 完整 taosBenchmark 参数介绍 - -附录 - 完整 taosBenchmark 参数介绍 --- taosBenchmark支持两种配置参数的模式,一种是命令行参数,一种是使用 JSON 格式的配置文件。 一、命令行参数 @@ -505,12 +540,12 @@ taosBenchmark支持两种配置参数的模式,一种是命令行参数,一 --help: 打印命令参数列表。 - 二、JSON 格式的配置文件中所有参数说明 taosBenchmark支持3种功能的测试,包括插入、查询、订阅。但一个taosBenchmark实例不能同时支持三种功能,一个 taosBenchmark 实例只能支持其中的一种功能,通过配置文件来指定进行哪种功能的测试。 1、插入功能测试的 JSON 配置文件 + ``` { "filetype": "insert", @@ -519,17 +554,17 @@ taosBenchmark支持3种功能的测试,包括插入、查询、订阅。但一 "port": 6030, "user": "root", "password": "taosdata", - "thread_count": 4, - "thread_count_create_tbl": 4, - "result_file": "./insert_res.txt", - "confirm_parameter_prompt": "no", - "insert_interval": 0, - "interlace_rows": 100, + "thread_count": 4, + "thread_count_create_tbl": 4, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "interlace_rows": 100, "num_of_records_per_req": 100, "databases": [{ "dbinfo": { "name": "db", - "drop": "yes", + "drop": "yes", "replica": 1, "days": 10, "cache": 16, @@ -547,27 +582,27 @@ taosBenchmark支持3种功能的测试,包括插入、查询、订阅。但一 }, "super_tables": [{ "name": "stb", - "child_table_exists":"no", - "childtable_count": 100, - "childtable_prefix": "stb_", - "auto_create_table": "no", - "batch_create_tbl_num": 5, - "data_source": "rand", - "insert_mode": "taosc", - "insert_rows": 100000, - "childtable_limit": 10, - "childtable_offset":100, - "interlace_rows": 0, - "insert_interval":0, - "max_sql_len": 1024000, - "disorder_ratio": 0, - "disorder_range": 1000, - "timestamp_step": 10, - "start_timestamp": "2020-10-01 00:00:00.000", - "sample_format": "csv", + "child_table_exists":"no", + "childtable_count": 100, + "childtable_prefix": "stb_", + "auto_create_table": "no", + "batch_create_tbl_num": 5, + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows": 100000, + "childtable_limit": 10, + "childtable_offset":100, + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 10, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", "sample_file": "./sample.csv", "use_sameple_ts": "no", - "tags_file": "", + "tags_file": "", "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":10}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] }] @@ -700,6 +735,7 @@ taosBenchmark支持3种功能的测试,包括插入、查询、订阅。但一 }] 2、查询功能测试的 JSON 配置文件 + ``` { "filetype": "query", @@ -784,12 +820,12 @@ taosBenchmark支持3种功能的测试,包括插入、查询、订阅。但一 "result": 查询结果写入的文件名。可选项,缺省是空,表示查询结果不写入文件。 - 注意:每条sql语句后的保存结果的文件不能重名,且生成结果文件时,文件名会附加线程号。 查询结果显示:如果查询线程结束一次查询距开始执行时间超过30秒打印一次查询次数、用时和QPS。所有查询结束时,汇总打印总的查询次数和QPS。 3、订阅功能测试的 JSON 配置文件 + ``` { "filetype":"subscribe", @@ -802,28 +838,28 @@ taosBenchmark支持3种功能的测试,包括插入、查询、订阅。但一 "confirm_parameter_prompt": "no", "specified_table_query": { - "concurrent":1, - "mode":"sync", - "interval":0, - "restart":"yes", + "concurrent":1, + "mode":"sync", + "interval":0, + "restart":"yes", "keepProgress":"yes", "sqls": [ { - "sql": "select * from stb00_0 ;", + "sql": "select * from stb00_0 ;", "result": "./subscribe_res0.txt" }] }, - "super_table_query": + "super_table_query": { "stblname": "stb0", - "threads":1, - "mode":"sync", - "interval":10000, - "restart":"yes", + "threads":1, + "mode":"sync", + "interval":10000, + "restart":"yes", "keepProgress":"yes", "sqls": [ { - "sql": "select * from xxxx where ts > '2021-02-25 11:35:00.000' ;", + "sql": "select * from xxxx where ts > '2021-02-25 11:35:00.000' ;", "result": "./subscribe_res1.txt" }] } diff --git a/documentation20/cn/02.getting-started/03.install/docs.md b/documentation20/cn/02.getting-started/03.install/docs.md new file mode 100644 index 0000000000000000000000000000000000000000..208271bf54f87c4fbb4d681653cbf53dd6e318c7 --- /dev/null +++ b/documentation20/cn/02.getting-started/03.install/docs.md @@ -0,0 +1,183 @@ + +# TDengine 安装包的安装和卸载 + +TDengine 开源版本提供 deb 和 rpm 格式安装包,用户可以根据自己的运行环境选择合适的安装包。其中 deb 支持 Debian/Ubuntu 等系统,rpm 支持 CentOS/RHEL/SUSE 等系统。同时我们也为企业用户提供 tar.gz 格式安装包。 + +## deb 包的安装和卸载 + +### 安装 deb + +1、从官网下载获得deb安装包,比如TDengine-server-2.0.0.0-Linux-x64.deb; +2、进入到TDengine-server-2.0.0.0-Linux-x64.deb安装包所在目录,执行如下的安装命令: + +``` +plum@ubuntu:~/git/taosv16$ sudo dpkg -i TDengine-server-2.0.0.0-Linux-x64.deb + +Selecting previously unselected package tdengine. +(Reading database ... 233181 files and directories currently installed.) +Preparing to unpack TDengine-server-2.0.0.0-Linux-x64.deb ... +Failed to stop taosd.service: Unit taosd.service not loaded. +Stop taosd service success! +Unpacking tdengine (2.0.0.0) ... +Setting up tdengine (2.0.0.0) ... +Start to install TDEngine... +Synchronizing state of taosd.service with SysV init with /lib/systemd/systemd-sysv-install... +Executing /lib/systemd/systemd-sysv-install enable taosd +insserv: warning: current start runlevel(s) (empty) of script `taosd' overrides LSB defaults (2 3 4 5). +insserv: warning: current stop runlevel(s) (0 1 2 3 4 5 6) of script `taosd' overrides LSB defaults (0 1 6). +Enter FQDN:port (like h1.taosdata.com:6030) of an existing TDengine cluster node to join OR leave it blank to build one : +To configure TDengine : edit /etc/taos/taos.cfg +To start TDengine : sudo systemctl start taosd +To access TDengine : use taos in shell +TDengine is installed successfully! +``` + +注:当安装第一个节点时,出现 Enter FQDN:提示的时候,不需要输入任何内容。只有当安装第二个或以后更多的节点时,才需要输入已有集群中任何一个可用节点的 FQDN,支持该新节点加入集群。当然也可以不输入,而是在新节点启动前,配置到新节点的配置文件中。 + +后续两种安装包也是同样的操作。 + +### 卸载 deb + +卸载命令如下: + +``` + plum@ubuntu:~/git/tdengine/debs$ sudo dpkg -r tdengine + (Reading database ... 233482 files and directories currently installed.) + Removing tdengine (2.0.0.0) ... + TDEngine is removed successfully! +``` + +## rpm包的安装和卸载 + +### 安装 rpm + +1、从官网下载获得rpm安装包,比如TDengine-server-2.0.0.0-Linux-x64.rpm; +2、进入到TDengine-server-2.0.0.0-Linux-x64.rpm安装包所在目录,执行如下的安装命令: + +``` + [root@bogon x86_64]# rpm -iv TDengine-server-2.0.0.0-Linux-x64.rpm + Preparing packages... + TDengine-2.0.0.0-3.x86_64 + Start to install TDEngine... + Created symlink from /etc/systemd/system/multi-user.target.wants/taosd.service to /etc/systemd/system/taosd.service. + Enter FQDN:port (like h1.taosdata.com:6030) of an existing TDengine cluster node to join OR leave it blank to build one : + To configure TDengine : edit /etc/taos/taos.cfg + To start TDengine : sudo systemctl start taosd + To access TDengine : use taos in shell + TDengine is installed successfully! +``` + +### 卸载 rpm + +卸载命令如下: + +``` + [root@bogon x86_64]# rpm -e tdengine + TDEngine is removed successfully! +``` + +## tar.gz 格式安装包的安装和卸载 + +### 安装 tar.gz 安装包 + +1、从官网下载获得tar.gz安装包,比如TDengine-server-2.0.0.0-Linux-x64.tar.gz; +2、进入到TDengine-server-2.0.0.0-Linux-x64.tar.gz安装包所在目录,先解压文件后,进入子目录,执行其中的install.sh安装脚本: + +``` + plum@ubuntu:~/git/tdengine/release$ sudo tar -xzvf TDengine-server-2.0.0.0-Linux-x64.tar.gz + plum@ubuntu:~/git/tdengine/release$ ll + total 3796 + drwxr-xr-x 3 root root 4096 Aug 9 14:20 ./ + drwxrwxr-x 11 plum plum 4096 Aug 8 11:03 ../ + drwxr-xr-x 5 root root 4096 Aug 8 11:03 TDengine-server/ + -rw-r--r-- 1 root root 3871844 Aug 8 11:03 TDengine-server-2.0.0.0-Linux-x64.tar.gz + plum@ubuntu:~/git/tdengine/release$ cd TDengine-server/ + plum@ubuntu:~/git/tdengine/release/TDengine-server$ ll + total 2640 + drwxr-xr-x 5 root root 4096 Aug 8 11:03 ./ + drwxr-xr-x 3 root root 4096 Aug 9 14:20 ../ + drwxr-xr-x 5 root root 4096 Aug 8 11:03 connector/ + drwxr-xr-x 2 root root 4096 Aug 8 11:03 driver/ + drwxr-xr-x 8 root root 4096 Aug 8 11:03 examples/ + -rwxr-xr-x 1 root root 13095 Aug 8 11:03 install.sh* + -rw-r--r-- 1 root root 2651954 Aug 8 11:03 taos.tar.gz + plum@ubuntu:~/git/tdengine/release/TDengine-server$ sudo ./install.sh + This is ubuntu system + verType=server interactiveFqdn=yes + Start to install TDengine... + Synchronizing state of taosd.service with SysV init with /lib/systemd/systemd-sysv-install... + Executing /lib/systemd/systemd-sysv-install enable taosd + insserv: warning: current start runlevel(s) (empty) of script `taosd' overrides LSB defaults (2 3 4 5). + insserv: warning: current stop runlevel(s) (0 1 2 3 4 5 6) of script `taosd' overrides LSB defaults (0 1 6). + Enter FQDN:port (like h1.taosdata.com:6030) of an existing TDengine cluster node to join OR leave it blank to build one :hostname.taosdata.com:7030 + To configure TDengine : edit /etc/taos/taos.cfg + To start TDengine : sudo systemctl start taosd + To access TDengine : use taos in shell + Please run: taos -h hostname.taosdata.com:7030 to login into cluster, then execute : create dnode 'newDnodeFQDN:port'; in TAOS shell to add this new node into the clsuter + TDengine is installed successfully! +``` + +说明:install.sh 安装脚本在执行过程中,会通过命令行交互界面询问一些配置信息。如果希望采取无交互安装方式,那么可以用 -e no 参数来执行 install.sh 脚本。运行 ./install.sh -h 指令可以查看所有参数的详细说明信息。 + +### tar.gz 安装后的卸载 + +卸载命令如下: + +``` + plum@ubuntu:~/git/tdengine/release/TDengine-server$ rmtaos + TDEngine is removed successfully! +``` + +## 安装目录说明 + +TDengine成功安装后,主安装目录是/usr/local/taos,目录内容如下: + +``` + plum@ubuntu:/usr/local/taos$ cd /usr/local/taos + plum@ubuntu:/usr/local/taos$ ll + total 36 + drwxr-xr-x 9 root root 4096 7月 30 19:20 ./ + drwxr-xr-x 13 root root 4096 7月 30 19:20 ../ + drwxr-xr-x 2 root root 4096 7月 30 19:20 bin/ + drwxr-xr-x 2 root root 4096 7月 30 19:20 cfg/ + lrwxrwxrwx 1 root root 13 7月 30 19:20 data -> /var/lib/taos/ + drwxr-xr-x 2 root root 4096 7月 30 19:20 driver/ + drwxr-xr-x 8 root root 4096 7月 30 19:20 examples/ + drwxr-xr-x 2 root root 4096 7月 30 19:20 include/ + drwxr-xr-x 2 root root 4096 7月 30 19:20 init.d/ + lrwxrwxrwx 1 root root 13 7月 30 19:20 log -> /var/log/taos/ +``` + +- 自动生成配置文件目录、数据库目录、日志目录。 +- 配置文件缺省目录:/etc/taos/taos.cfg, 软链接到/usr/local/taos/cfg/taos.cfg; +- 数据库缺省目录:/var/lib/taos, 软链接到/usr/local/taos/data; +- 日志缺省目录:/var/log/taos, 软链接到/usr/local/taos/log; +- /usr/local/taos/bin目录下的可执行文件,会软链接到/usr/bin目录下; +- /usr/local/taos/driver目录下的动态库文件,会软链接到/usr/lib目录下; +- /usr/local/taos/include目录下的头文件,会软链接到到/usr/include目录下; + +## 卸载和更新文件说明 + +卸载安装包的时候,将保留配置文件、数据库文件和日志文件,即 /etc/taos/taos.cfg 、 /var/lib/taos 、 /var/log/taos 。如果用户确认后不需保留,可以手工删除,但一定要慎重,因为删除后,数据将永久丢失,不可以恢复! + +如果是更新安装,当缺省配置文件( /etc/taos/taos.cfg )存在时,仍然使用已有的配置文件,安装包中携带的配置文件修改为taos.cfg.org保存在 /usr/local/taos/cfg/ 目录,可以作为设置配置参数的参考样例;如果不存在配置文件,就使用安装包中自带的配置文件。 + +## 注意事项 + +- TDengine提供了多种安装包,但最好不要在一个系统上同时使用 tar.gz 安装包和 deb 或 rpm 安装包。否则会相互影响,导致在使用时出现问题。 + +- 对于deb包安装后,如果安装目录被手工误删了部分,出现卸载、或重新安装不能成功。此时,需要清除 tdengine 包的安装信息,执行如下命令: + +``` + plum@ubuntu:~/git/tdengine/$ sudo rm -f /var/lib/dpkg/info/tdengine* +``` + +然后再重新进行安装就可以了。 + +- 对于rpm包安装后,如果安装目录被手工误删了部分,出现卸载、或重新安装不能成功。此时,需要清除tdengine包的安装信息,执行如下命令: + +``` + [root@bogon x86_64]# rpm -e --noscripts tdengine +``` + +然后再重新进行安装就可以了。 diff --git a/documentation20/cn/02.getting-started/docs.md b/documentation20/cn/02.getting-started/docs.md index a6d8d89ba3e7d7084fc4488ac94b0926eaf97478..6cf41c65eba732f64a40156a9917875bcb28bfd7 100644 --- a/documentation20/cn/02.getting-started/docs.md +++ b/documentation20/cn/02.getting-started/docs.md @@ -2,15 +2,11 @@ ## 快捷安装 -TDengine 软件分为服务器、客户端和报警模块三部分,目前 2.0 版服务器仅能在 Linux 系统上安装和运行,后续会支持 Windows、Mac OS 等系统。客户端可以在 Windows 或 Linux 上安装和运行。任何 OS 的应用也可以选择 RESTful 接口连接服务器 taosd,其中 2.4 之后版本默认使用单独运行的独立组件 taosAdapter 提供 http 服务,之前版本使用内置 http 服务。CPU 支持 X64/ARM64/MIPS64/Alpha64,后续会支持 ARM32、RISC-V 等 CPU 架构。用户可根据需求选择通过 [源码](https://www.taosdata.com/cn/getting-started/#通过源码安装) 或者 [安装包](https://www.taosdata.com/cn/getting-started/#通过安装包安装) 来安装。 +TDengine 包括服务端、客户端和周边生态工具软件,目前 2.0 版服务端仅在 Linux 系统上安装和运行,后续将支持 Windows、Mac OS 等系统。客户端可以在 Windows 或 Linux 上安装和运行。在任何操作系统上的应用都可以使用 RESTful 接口连接服务端程序 taosd,其中 2.4 之后版本默认使用单独运行的独立组件 taosAdapter 提供 http 服务和更多数据写入方式。taosAdapter 需要手动启动。而之前版本 TDengine 使用内置 http 服务。 -### 通过源码安装 - -请参考我们的 [TDengine github 主页](https://github.com/taosdata/TDengine) 下载源码并安装. +TDengine 支持 X64/ARM64/MIPS64/Alpha64 硬件平台,后续将支持 ARM32、RISC-V 等 CPU 架构。 -### 通过 Docker 容器运行 - -暂时不建议生产环境采用 Docker 来部署 TDengine 的客户端或服务端,但在开发环境下或初次尝试时,使用 Docker 方式部署是十分方便的。特别是,利用 Docker,可以方便地在 Mac OS X 和 Windows 环境下尝试 TDengine。 +### 通过 Docker 容器安装 ``` docker run -d -p 6030-6049:6030-6049 -p 6030-6049:6030-6049/udp tdengine/tdengine @@ -18,65 +14,55 @@ docker run -d -p 6030-6049:6030-6049 -p 6030-6049:6030-6049/udp tdengine/tdengin 详细操作方法请参照 [通过 Docker 快速体验 TDengine](https://www.taosdata.com/cn/documentation/getting-started/docker)。 +注:暂时不建议生产环境采用 Docker 来部署 TDengine 的客户端或服务端,但在开发环境下或初次尝试时,使用 Docker 方式部署是十分方便的。特别是,利用 Docker,可以方便地在 Mac OS X 和 Windows 环境下尝试 TDengine。 + ### 通过安装包安装 -TDengine 的安装非常简单,从下载到安装成功仅仅只要几秒钟。为方便使用,标准的服务端安装包包含了客户端程序、各种编程语言的连接器和示例代码;如果您只需要用到服务端程序和客户端连接的 C/C++ 语言支持,那么也可以仅下载 lite 版本的安装包。在安装包格式上,我们提供 rpm、deb、tar.gz 三种,以方便在特定操作系统上使用。版本还分稳定版和Beta版,Beta版含有更多新功能,正式上线或测试,建议安装稳定版。您可以根据需要选择下载: +TDengine 的安装非常简单,从下载到安装成功仅仅只要几秒钟。为方便使用,标准的服务端安装包包含了客户端程序和示例代码;如果您只需要用到服务端程序和客户端连接的 C/C++ 语言支持,也可以仅下载 lite 版本的安装包。在安装包格式上,我们提供 rpm 和 deb 格式,也为企业客户提供 tar.gz 格式安装包,以方便在特定操作系统上使用。发布版本包括稳定版和 Beta 版,Beta版含有更多新功能。正式上线或测试建议安装稳定版。您可以根据需要选择下载: -具体的安装过程,请参见 [TDengine 多种安装包的安装和卸载](https://www.taosdata.com/blog/2019/08/09/566.html) 以及 [视频教程](https://www.taosdata.com/blog/2020/11/11/1941.html)。 - -## taosBenchmark 详细功能列表 +具体的安装方法,请参见 [TDengine 多种安装包的安装和卸载](https://www.taosdata.com/cn/getting-started/install) 以及 [视频教程](https://www.taosdata.com/blog/2020/11/11/1941.html)。 -taosBenchmark (曾命名 taosdemo)命令本身带有很多选项,配置表的数目、记录条数等等,请执行 `taosBenchmark --help` 详细列出。您可以设置不同参数进行体验。 -taosBenchmark 详细使用方法请参照 [如何使用taosBenchmark对TDengine进行性能测试](https://www.taosdata.com/2021/10/09/3111.html)。 - -## 客户端 +**请点击[这里](https://github.com/taosdata/TDengine/releases)查看 release notes。** -如果客户端和服务端运行在不同的电脑上,可以单独安装客户端。下载时请注意,所选择的客户端版本号应该和在上面下载的服务端版本号精确匹配。Linux 和 Windows 安装包如下(其中 lite 版本的安装包仅带有 C/C++ 语言的连接支持,而标准版本的安装包还包含 Java、Python、Go、Node.js 等编程语言的连接器支持和示例代码): - - - -## taosTools - -taosTools 是多个用于 TDengine 的辅助工具软件集合。 +### 使用 apt-get 安装 -推荐下载 deb 或 rpm 安装包,方便安装依赖软件。如果使用 tar.gz 格式安装包,需要自行安装依赖包。其中: - -* Debian/Ubuntu 系统需要安装 libjansson4 和 libsnappy1v5 -* CentOS/RHEL 系统需要安装 jansson 和 snappy - -以及 TDengine server 或 TDengine client 安装包 - - - - -## 使用 apt-get 安装 - -如果使用 Debian 或 Ubuntu 系统,也可以使用 apt-get 从官方仓库安装,设置方法为: +如果使用 Debian 或 Ubuntu 系统,也可以使用 apt-get 工具从官方仓库安装,设置方法为: ``` wget -qO - http://repos.taosdata.com/tdengine.key | sudo apt-key add - echo "deb [arch=amd64] http://repos.taosdata.com/tdengine-stable stable main" | sudo tee /etc/apt/sources.list.d/tdengine-stable.list -[ beta 版安装包仓库为可选安装项 ] echo "deb [arch=amd64] http://repos.taosdata.com/tdengine-beta beta main" | sudo tee /etc/apt/sources.list.d/tdengine-beta.list +[ 如果安装 Beta 版需要安装包仓库 ] echo "deb [arch=amd64] http://repos.taosdata.com/tdengine-beta beta main" | sudo tee /etc/apt/sources.list.d/tdengine-beta.list sudo apt-get update apt-cache policy tdengine sudo apt-get install tdengine ``` - -## 轻松启动 +### 仅安装客户端 + +如果客户端和服务端运行在不同的电脑上,可以单独安装客户端。下载时请注意,所选择的客户端版本号应该和在上面下载的服务端版本号严格匹配。Linux 和 Windows 安装包如下(其中 lite 版本的安装包仅带有 C/C++ 语言的连接支持,而标准版本的安装包还包含和示例代码): + + + +### 通过源码安装 + +如果您希望对 TDengine 贡献代码或对内部实现感兴趣,请参考我们的 [TDengine github 主页](https://github.com/taosdata/TDengine) 下载源码构建和安装. + +**下载其他组件、最新 Beta 版及之前版本的安装包,请点击[这里](https://www.taosdata.com/cn/all-downloads/)** + +## 轻松启动 安装成功后,用户可使用 `systemctl` 命令来启动 TDengine 的服务进程。 ```bash -$ systemctl start taosd +systemctl start taosd ``` 检查服务是否正常工作: ```bash -$ systemctl status taosd +systemctl status taosd ``` 如果 TDengine 服务正常工作,那么您可以通过 TDengine 的命令行程序 `taos` 来访问并体验 TDengine。 @@ -88,30 +74,29 @@ $ systemctl status taosd - TDengine 采用 FQDN (一般就是 hostname )作为节点的 ID,为保证正常运行,需要给运行 taosd 的服务器配置好 hostname,在客户端应用运行的机器配置好 DNS 服务或 hosts 文件,保证 FQDN 能够解析。 - `systemctl stop taosd` 指令在执行后并不会马上停止 TDengine 服务,而是会等待系统中必要的落盘工作正常完成。在数据量很大的情况下,这可能会消耗较长时间。 -* TDengine 支持在使用 [`systemd`](https://en.wikipedia.org/wiki/Systemd) 做进程服务管理的 Linux 系统上安装,用 `which systemctl` 命令来检测系统中是否存在 `systemd` 包: +TDengine 支持在使用 [`systemd`](https://en.wikipedia.org/wiki/Systemd) 做进程服务管理的 Linux 系统上安装,用 `which systemctl` 命令来检测系统中是否存在 `systemd` 包: ```bash - $ which systemctl + which systemctl ``` 如果系统中不支持 `systemd`,也可以用手动运行 /usr/local/taos/bin/taosd 方式启动 TDengine 服务。 - -## TDengine 命令行程序 +## 使用 TDengine 客户端程序 -执行 TDengine 命令行程序,您只要在 Linux 终端执行 `taos` 即可。 +执行 TDengine 客户端程序,您只要在 Linux 终端执行 `taos` 即可。 ```bash -$ taos +taos ``` -如果 TDengine 终端连接服务成功,将会打印出欢迎消息和版本信息。如果失败,则会打印错误消息出来(请参考 [FAQ](https://www.taosdata.com/cn/documentation/faq/) 来解决终端连接服务端失败的问题)。TDengine 终端的提示符号如下: +如果连接服务成功,将会打印出欢迎消息和版本信息。如果失败,则会打印错误消息出来(请参考 [FAQ](https://www.taosdata.com/cn/documentation/faq/) 来解决终端连接服务端失败的问题)。客户端的提示符号如下: ```cmd taos> ``` -在 TDengine 终端中,用户可以通过 SQL 命令来创建/删除数据库、表等,并进行插入查询操作。在终端中运行的 SQL 语句需要以分号结束来运行。示例: +在 TDengine 客户端中,用户可以通过 SQL 命令来创建/删除数据库、表等,并进行插入查询操作。在终端中运行的 SQL 语句需要以分号结束来运行。示例: ```mysql create database demo; @@ -127,23 +112,23 @@ select * from t; Query OK, 2 row(s) in set (0.003128s) ``` -除执行 SQL 语句外,系统管理员还可以从 TDengine 终端进行检查系统运行状态、添加删除用户账号等操作。 +除执行 SQL 语句外,系统管理员还可以从 TDengine 客户端进行检查系统运行状态、添加删除用户账号等操作。 ### 命令行参数 -您可通过配置命令行参数来改变 TDengine 终端的行为。以下为常用的几个命令行参数: +您可通过配置命令行参数来改变 TDengine 客户端的行为。以下为常用的几个命令行参数: - -c, --config-dir: 指定配置文件目录,默认为 `/etc/taos` - -h, --host: 指定服务的 FQDN 地址或 IP 地址,默认为连接本地服务 - -s, --commands: 在不进入终端的情况下运行 TDengine 命令 -- -u, --user: 连接 TDengine 服务器的用户名,缺省为 root -- -p, --password: 连接TDengine服务器的密码,缺省为 taosdata +- -u, --user: 连接 TDengine 服务端的用户名,缺省为 root +- -p, --password: 连接 TDengine 服务端的密码,缺省为 taosdata - -?, --help: 打印出所有命令行参数 示例: ```bash -$ taos -h h1.taos.com -s "use db; show tables;" +taos -h h1.taos.com -s "use db; show tables;" ``` ### 运行 SQL 命令脚本 @@ -165,15 +150,25 @@ taos> source ; ## TDengine 极速体验 -启动 TDengine 的服务,在 Linux 终端执行 taosBenchmark (曾命名为 taosdemo,在 2.4 之后的版本请按照独立的 taosTools 软件包): +### 使用 taosBenchmark 体验写入速度 + +启动 TDengine 的服务,在 Linux 终端执行 `taosBenchmark` (曾命名为 taosdemo)。taosBenchmark 在 TDengine 2.4.0.7 和之前发布版本在 taosTools 安装包中发布提供,在后续版本中 taosBenchmark 将在 TDengine 标准安装包中发布。 ```bash -$ taosBenchmark +taosBenchmark ``` 该命令将在数据库 test 下面自动创建一张超级表 meters,该超级表下有 1 万张表,表名为 "d0" 到 "d9999",每张表有 1 万条记录,每条记录有 (ts, current, voltage, phase) 四个字段,时间戳从 "2017-07-14 10:40:00 000" 到 "2017-07-14 10:40:09 999",每张表带有标签 location 和 groupId,groupId 被设置为 1 到 10, location 被设置为 "beijing" 或者 "shanghai"。 -执行这条命令大概需要几分钟,最后共插入 1 亿条记录。 +这条命令很快完成 1 亿条记录的插入。具体时间取决于硬件性能,即使在一台普通的 PC 服务器往往也仅需十几秒。 + +#### taosBenchmark 详细功能列表 + +taosBenchmark 命令本身带有很多选项,配置表的数目、记录条数等等,请执行 `taosBenchmark --help` 详细列出。您可以设置不同参数进行体验。 + +taosBenchmark 详细使用方法请参照 [如何使用taosBenchmark对TDengine进行性能测试](https://www.taosdata.com/2021/10/09/3111.html)。 + +### 使用 taos shell 体验查询速度 在 TDengine 客户端输入查询命令,体验查询速度。 @@ -206,11 +201,10 @@ taos> select avg(current), max(voltage), min(phase) from test.meters where group ```mysql taos> select avg(current), max(voltage), min(phase) from test.d10 interval(10s); ``` - ## 支持平台列表 -### TDengine 服务器支持的平台列表 +### TDengine 服务端支持的平台列表 | | **CentOS 7/8** | **Ubuntu 16/18/20** | **Other Linux** | **统信 UOS** | **银河/中标麒麟** | **凝思 V60/V80** | **华为 EulerOS** | | -------------- | --------------------- | ------------------------ | --------------- | --------------- | ------------------------- | --------------------- | --------------------- | @@ -248,3 +242,4 @@ taos> select avg(current), max(voltage), min(phase) from test.d10 interval(10s); 请跳转到 [连接器](https://www.taosdata.com/cn/documentation/connector) 查看更详细的信息。 + diff --git a/documentation20/cn/05.insert/docs.md b/documentation20/cn/05.insert/docs.md index 8ad0f5e2c66e2fd897203649d889338842bda0d0..ff22c1ae0dede9af739ced37ff7bb6dada6cf81e 100644 --- a/documentation20/cn/05.insert/docs.md +++ b/documentation20/cn/05.insert/docs.md @@ -313,16 +313,6 @@ TCollector 是一个在客户侧收集本地收集器并发送数据到 OpenTSDB taosAdapter 相关配置参数请参考 taosadapter --help 命令输出以及相关文档。 -## 使用 Bailongma 2.0 接入 Prometheus 数据写入 - -**注意:** -TDengine 新版本(2.4.0.4+)包含 taosAdapter 组件,提供更简便的 Prometheus 数据写入以及其他更强大的功能,Bailongma v2 及之前版本将逐步不再维护。 - -## 使用 Bailongma 2.0 接入 Telegraf 数据写入 - -**注意:** -TDengine 新版本(2.3.0.0+)包含 taosAdapter 组件,提供更简便的 Telegraf 数据写入以及其他更强大的功能,Bailongma v2 及之前版本将逐步不再维护。 - ## EMQ Broker 直接写入 MQTT是流行的物联网数据传输协议,[EMQ](https://github.com/emqx/emqx)是一开源的MQTT Broker软件,无需任何代码,只需要在EMQ Dashboard里使用“规则”做简单配置,即可将MQTT的数据直接写入TDengine。EMQ X 支持通过 发送到 Web 服务的方式保存数据到 TDEngine,也在企业版上提供原生的 TDEngine 驱动实现直接保存。详细使用方法请参考 [EMQ 官方文档](https://docs.emqx.io/broker/latest/cn/rule/rule-example.html#%E4%BF%9D%E5%AD%98%E6%95%B0%E6%8D%AE%E5%88%B0-tdengine)。 diff --git a/documentation20/cn/11.administrator/docs.md b/documentation20/cn/11.administrator/docs.md index 0f29a6cec9547057c32193b477dc4bfdcc7106bf..9756a9d85403b3434fe9eedbab5aeea18041d29e 100644 --- a/documentation20/cn/11.administrator/docs.md +++ b/documentation20/cn/11.administrator/docs.md @@ -15,11 +15,13 @@ Database Memory Size = maxVgroupsPerDb * (blocks * cache + 10MB) + numOfTables * 示例:假设是 4 核机器,cache 是缺省大小 16M, blocks 是缺省值 6,并且一个 DB 中有 10 万张表,标签总长度是 256 字节,则这个 DB 总的内存需求为:4 \* (16 \* 6 + 10) + 100000 \* (0.25 + 0.5) / 1000 = 499M。 在实际的系统运维中,我们通常会更关心 TDengine 服务进程(taosd)会占用的内存量。 + ``` taosd 内存总量 = vnode 内存 + mnode 内存 + 查询内存 ``` 其中: + 1. “vnode 内存”指的是集群中所有的 Database 存储分摊到当前 taosd 节点上所占用的内存资源。可以按上文“Database Memory Size”计算公式估算每个 DB 的内存占用量进行加总,再按集群中总共的 TDengine 节点数做平均(如果设置为多副本,则还需要乘以对应的副本倍数)。 2. “mnode 内存”指的是集群中管理节点所占用的资源。如果一个 taosd 节点上分布有 mnode 管理节点,则内存消耗还需要增加“0.2KB * 集群中数据表总数”。 3. “查询内存”指的是服务端处理查询请求时所需要占用的内存。单条查询语句至少会占用“0.2KB * 查询涉及的数据表总数”的内存量。 @@ -33,11 +35,13 @@ taosd 内存总量 = vnode 内存 + mnode 内存 + 查询内存 客户端应用采用 taosc 客户端驱动连接服务端,会有内存需求的开销。 客户端的内存开销主要由写入过程中的 SQL 语句、表的元数据信息缓存、以及结构性开销构成。系统最大容纳的表数量为 N(每个通过超级表创建的表的 meta data 开销约 256 字节),最大并行写入线程数量 T,最大 SQL 语句长度 S(通常是 1 Mbytes)。由此可以进行客户端内存开销的估算(单位 MBytes): + ``` M = (T * S * 3 + (N / 4096) + 100) ``` 举例如下:用户最大并发写入线程数 100,子表数总数 10,000,000,那么客户端的内存最低要求是: + ``` 100 * 3 + (10000000 / 4096) + 100 = 2741 (MBytes) ``` @@ -310,6 +314,7 @@ ALTER DNODE > debugFlag < 131 | 135 | 143 > 设置debugFlag为131、135或者143 例如: + ``` alter dnode 1 debugFlag 135; ``` @@ -347,25 +352,33 @@ taos -C 或 taos --dump-config 如果配置文件中不设置charset,在Linux系统中,taos在启动时候,自动读取系统当前的locale信息,并从locale信息中解析提取charset编码格式。如果自动读取locale信息失败,则尝试读取charset配置,如果读取charset配置也失败,则中断启动过程。 在Linux系统中,locale信息包含了字符编码信息,因此正确设置了Linux系统locale以后可以不用再单独设置charset。例如: + ``` locale zh_CN.UTF-8 ``` + 在Windows系统中,无法从locale获取系统当前编码。如果无法从配置文件中读取字符串编码信息,taos默认设置为字符编码为CP936。其等效在配置文件中添加如下配置: + ``` charset CP936 ``` + 如果需要调整字符编码,请查阅当前操作系统使用的编码,并在配置文件中正确设置。 在Linux系统中,如果用户同时设置了locale和字符集编码charset,并且locale和charset的不一致,后设置的值将覆盖前面设置的值。 + ``` locale zh_CN.UTF-8 charset GBK ``` + 则charset的有效值是GBK。 + ``` charset GBK locale zh_CN.UTF-8 ``` + charset的有效值是UTF-8。 日志的配置参数,与server 的配置参数完全一样。 @@ -373,29 +386,37 @@ taos -C 或 taos --dump-config - timezone 默认值:动态获取当前客户端运行系统所在的时区。 - + 为应对多时区的数据写入和查询问题,TDengine 采用 Unix 时间戳(Unix Timestamp)来记录和存储时间戳。Unix 时间戳的特点决定了任一时刻不论在任何时区,产生的时间戳均一致。需要注意的是,Unix时间戳是在客户端完成转换和记录。为了确保客户端其他形式的时间转换为正确的 Unix 时间戳,需要设置正确的时区。 在Linux系统中,客户端会自动读取系统设置的时区信息。用户也可以采用多种方式在配置文件设置时区。例如: + ``` timezone UTC-8 timezone GMT-8 timezone Asia/Shanghai ``` + 均是合法的设置东八区时区的格式。但需注意,Windows 下并不支持 `timezone Asia/Shanghai` 这样的写法,而必须写成 `timezone UTC-8`。 时区的设置对于查询和写入SQL语句中非Unix时间戳的内容(时间戳字符串、关键词now的解析)产生影响。例如: + ```sql SELECT count(*) FROM table_name WHERE TS<'2019-04-11 12:01:08'; ``` + 在东八区,SQL语句等效于 + ```sql SELECT count(*) FROM table_name WHERE TS<1554955268000; ``` + 在UTC时区,SQL语句等效于 + ```sql SELECT count(*) FROM table_name WHERE TS<1554984068000; ``` + 为了避免使用字符串时间格式带来的不确定性,也可以直接使用Unix时间戳。此外,还可以在SQL语句中使用带有时区的时间戳字符串,例如:RFC3339格式的时间戳字符串,2013-04-12T15:52:01.123+08:00或者ISO-8601格式时间戳字符串2013-04-12T15:52:01.123+0800。上述两个字符串转化为Unix时间戳不受系统所在时区的影响。 启动taos时,也可以从命令行指定一个taosd实例的end point,否则就从taos.cfg读取。 @@ -457,6 +478,7 @@ TDengine也支持在shell对已存在的表从CSV文件中进行数据导入。C ```mysql insert into tb1 file 'path/data.csv'; ``` + **注意:如果CSV文件首行存在描述信息,请手动删除后再导入。如某列为空,填NULL,无引号。** 例如,现在存在一个子表d1001, 其表结构如下: @@ -472,6 +494,7 @@ taos> DESCRIBE d1001 location | BINARY | 64 | TAG | groupid | INT | 4 | TAG | ``` + 要导入的data.csv的格式如下: ```csv @@ -485,6 +508,7 @@ taos> DESCRIBE d1001 '2018-10-11 06:38:05.000',17.30000,219,0.32000 '2018-10-12 06:38:05.000',18.30000,219,0.31000 ``` + 那么可以用如下命令导入数据: ```mysql @@ -494,7 +518,7 @@ Query OK, 9 row(s) affected (0.004763s) **taosdump工具导入** -TDengine提供了方便的数据库导入导出工具taosdump。用户可以将taosdump从一个系统导出的数据,导入到其他系统中。具体使用方法,请参见博客:[TDengine DUMP工具使用指南](https://www.taosdata.com/blog/2020/03/09/1334.html)。 +TDengine提供了方便的数据库导入导出工具taosdump。用户可以将taosdump从一个系统导出的数据,导入到其他系统中。具体使用方法,请参见:[TDengine 数据备份工具: taosdump](/tools/taosdump)。 ## 数据导出 @@ -578,12 +602,14 @@ chmod +x TDinsight.sh 准备: 1. TDengine Server 信息: - * TDengine RESTful 服务:对本地而言,可以是 http://localhost:6041 ,使用参数 `-a`。 + + * TDengine RESTful 服务:对本地而言,可以是 `http://localhost:6041`,使用参数 `-a`。 * TDengine 用户名和密码,使用 `-u` `-p` 参数设置。 2. Grafana 告警通知 + * 使用已经存在的Grafana Notification Channel `uid`,参数 `-E`。该参数可以使用 `curl -u admin:admin localhost:3000/api/alert-notifications |jq` 来获取。 - + ```bash sudo ./TDinsight.sh -a http://localhost:6041 -u root -p taosdata -E ``` @@ -602,7 +628,7 @@ chmod +x TDinsight.sh -T '{"alarm_level":"%s","time":"%s","name":"%s","content":"%s"}' ``` -运行程序并重启 Grafana 服务,打开面板:http://localhost:3000/d/tdinsight。 +运行程序并重启 Grafana 服务,打开面板:`http://localhost:3000/d/tdinsight`。 更多使用场景和限制请参考[TDinsight](https://github.com/taosdata/grafanaplugin/blob/master/dashboards/TDinsight.md) 文档。 @@ -654,7 +680,7 @@ TDengine 使用 Linux 系统的 systemd/systemctl/service 来管理系统的启 以 systemctl 为例,命令如下: -- 启动服务进程:`systemctl start taosd` +- 启动服务进程:`systemctl start taosd` - 停止服务进程:`systemctl stop taosd` @@ -663,15 +689,17 @@ TDengine 使用 Linux 系统的 systemd/systemctl/service 来管理系统的启 - 查看服务状态:`systemctl status taosd` 如果服务进程处于活动状态,则 status 指令会显示如下的相关信息: + ``` ...... -Active: active (running) +Active: active (running) ...... ``` 如果后台服务进程处于停止状态,则 status 指令会显示如下的相关信息: + ``` ...... @@ -681,6 +709,7 @@ Active: inactive (dead) ``` 卸载 TDengine,只需要执行如下命令: + ``` rmtaos ``` @@ -771,11 +800,12 @@ rmtaos | COPY | IF | NOW | STABLES | WHERE | ## 转义字符说明 + - 转义字符表(转义符的功能从 2.4.0.4 版本开始) | 字符序列 | **代表的字符** | | :--------: | ------- | - | `\'` | 单引号' | + | `\'` | 单引号' | | `\"` | 双引号" | | \n | 换行符 | | \r | 回车符 | @@ -791,6 +821,7 @@ rmtaos 2. 数据里有转义字符 1. 遇到上面定义的转义字符会转义(%和_见下面说明),如果没有匹配的转义字符会忽略掉转义符\。 2. 对于%和_,因为在like里这两个字符是通配符,所以在模式匹配like里用`\%`%和`\_`表示字符里本身的%和_,如果在like模式匹配上下文之外使用`\%`或`\_`,则它们的计算结果为字符串`\%`和`\_`,而不是%和_。 + ## 诊断及其他 #### 网络连接诊断 @@ -864,7 +895,7 @@ rmtaos 针对多台服务器组成的集群,当服务启动过程耗时较长时,可通过该命令行来诊断每台服务器的 taosd 实例的启动状态,以准确定位问题。 -`taos -n rpc -h ` +`taos -n rpc -h ` 该命令用来诊断已经启动的 taosd 实例的端口是否可正常访问。如果 taosd 程序异常或者失去响应,可以通过 `taos -n rpc -h ` 来发起一个与指定 fqdn 的 rpc 通信,看看 taosd 是否能收到,以此来判定是网络问题还是 taosd 程序异常问题。 @@ -909,7 +940,9 @@ taosd 服务端日志文件标志位 debugflag 默认为 131,在 debug 时往 - taosdlog 服务器端生成的日志,记录taosinfo中全部信息外,还根据设置的日志输出级别,记录DEBUG(日志级别135)、TRACE(日志级别是 143)。 ### 客户端日志 + 每个独立运行的客户端(一个进程)生成一个独立的客户端日志,其命名方式采用 taoslog+<序号> 的方式命名。文件标志位 debugflag 默认为 131,在 debug 时往往需要将其提升到 135 或 143 。 + - taoslog 客户端(driver)生成的日志,默认记录客户端INFO/ERROR/WARNING 级别日志,还根据设置的日志输出级别,记录DEBUG(日志级别135)、TRACE(日志级别是 143)。 其中,日志文件最大长度由 numOfLogLines 来进行配置,一个 taosd 实例最多保留两个文件。 diff --git a/documentation20/en/00.index/docs.md b/documentation20/en/00.index/docs.md index ac722648fefe6cbc1b6ac2fef57b06bb8ced2156..c0bd283bcdcb58d8abd6cde761817f9a4c1268c0 100644 --- a/documentation20/en/00.index/docs.md +++ b/documentation20/en/00.index/docs.md @@ -1,6 +1,7 @@ # TDengine Documentation -TDengine is a highly efficient platform to store, query, and analyze time-series data. It is specially designed and optimized for IoT, Internet of Vehicles, Industrial IoT, IT Infrastructure and Application Monitoring, etc. It works like a relational database, such as MySQL, but you are strongly encouraged to read through the following documentation before you experience it, especially the Data Modeling sections. In addition to this document, you should also download and read the technology white paper. +TDengine is a highly efficient platform to store, query, and analyze time-series data. It is specially designed and optimized for IoT, Internet of Vehicles, Industrial IoT, IT Infrastructure and Application Monitoring, etc. It works like a relational database, such as MySQL, but you are strongly encouraged to read through the following documentation before you experience it, especially the Data Modeling sections. In addition to this document, you should also download and read the technology white paper. + ## [TDengine Introduction](/evaluation) * [TDengine Introduction and Features](/evaluation#intro) @@ -20,7 +21,7 @@ TDengine is a highly efficient platform to store, query, and analyze time-series - [Data Model](/architecture#model): relational database model, but one table for one data collection point with static tags - [Cluster and Primary Logical Unit](/architecture#cluster): Take advantage of NoSQL architecture, high availability and horizontal scalability -- [Storage Model and Data Partitioning/Sharding](/architecture#sharding): tag data is separated from time-series data, sharded by vnodes and partitioned by time +- [Storage Model and Data Partitioning/Sharding](/architecture#sharding): tag data is separated from time-series data, sharded by vnodes and partitioned by time - [Data Writing and Replication Process](/architecture#replication): records received are written to WAL, cached, with acknowledgement sent back to client, while supporting data replications - [Caching and Persistence](/architecture#persistence): latest records are cached in memory, but are written in columnar format with an ultra-high compression ratio - [Data Query](/architecture#query): support various SQL functions, downsampling, interpolation, and multi-table aggregation @@ -84,7 +85,7 @@ TDengine is a highly efficient platform to store, query, and analyze time-series * [taosAdapter](/tools/adapter): a bridge/adapter between TDengine cluster and applications. * [TDinsight](/tools/insight): monitoring TDengine cluster with Grafana. * [taosdump](/tools/taosdump): backup tool for TDengine. Please install `taosTools` package for it. -* [taosBenchmark](/tools/taosbenchmark): stress test tool for TDengine. Please install `taosTools` package for it. +* [taosBenchmark](/tools/taosbenchmark): stress test tool for TDengine. ## [Connections with Other Tools](/connections) @@ -92,11 +93,13 @@ TDengine is a highly efficient platform to store, query, and analyze time-series - [MATLAB](/connections#matlab): access data stored in TDengine server via JDBC configured within MATLAB - [R](/connections#r): access data stored in TDengine server via JDBC configured within R - [IDEA Database](https://www.taosdata.com/blog/2020/08/27/1767.html): use TDengine visually through IDEA Database Management Tool +- [TDengineGUI](https://github.com/skye0207/TDengineGUI): a TDengine management tool with Graphical User Interface +- [DataX](https://github.com/taosdata/datax): a data immigaration tool with TDeninge supported ## [Installation and Management of TDengine Cluster](/cluster) - [Preparation](/cluster#prepare): important steps before deploying TDengine for production usage -- [Create the First Node](/cluster#node-one): just follow the steps in quick start +- [Create the First Node](/cluster#node-one): just follow the steps in quick start - [Create Subsequent Nodes](/cluster#node-other): configure taos.cfg for new nodes to add more to the existing cluster - [Node Management](/cluster#management): add, delete, and check nodes in the cluster - [High-availability of Vnode](/cluster#high-availability): implement high-availability of Vnode through replicas @@ -118,6 +121,12 @@ TDengine is a highly efficient platform to store, query, and analyze time-series - [File Directory Structure](/administrator#directories): directories where TDengine data files and configuration files located - [Parameter Limits and Reserved Keywords](/administrator#keywords): TDengine’s list of parameter limits and reserved keywords +## Rapidly build an IT DevOps system with TDengine + +* [devops](/devops/telegraf): Rapidly build an IT DevOps system with TDengine + Telegraf + Grafana +* [devops](/devops/collectd): Rapidly build a IT DevOps system with TDengine + collectd/StatsD + Grafana +* [immigration](/devops/immigrate): Best practice of immigration from OpenTSDB to TDengine + ## Performance: TDengine vs Others - [Performance: TDengine vs OpenTSDB](https://www.taosdata.com/blog/2019/09/12/710.html) diff --git a/documentation20/en/02.getting-started/01.docker/docs.md b/documentation20/en/02.getting-started/01.docker/docs.md index f5d48db8abe99b96bc565bc5daf03b4f758060c7..a5ad96e940ba78786ab2341b1051a8a18661367b 100644 --- a/documentation20/en/02.getting-started/01.docker/docs.md +++ b/documentation20/en/02.getting-started/01.docker/docs.md @@ -1,6 +1,6 @@ -# Quickly experience TDengine with Docker +# Quickly Taste TDengine with Docker -While it is not recommended to deploy TDengine services via Docker in a production environment, Docker tools do a good job of shielding the environmental differences in the underlying operating system and are well suited for use in development testing or first-time experience with the toolset for installing and running TDengine. In particular, Docker makes it relatively easy to try TDengine on Mac OSX and Windows systems without having to install a virtual machine or rent an additional Linux server. In addition, starting from version 2.0.14.0, TDengine provides images that support both X86-64, X86, arm64, and arm32 platforms, so non-mainstream computers that can run docker, such as NAS, Raspberry Pi, and embedded development boards, can also easily experience TDengine based on this document. +While it is not recommended to deploy TDengine services via Docker in a production environment, Docker tools do a good job of shielding the environmental differences in the underlying operating system and are well suited for use in development testing or first-time taste with the toolset for installing and running TDengine. In particular, Docker makes it relatively easy to try TDengine on Mac OSX and Windows systems without having to install a virtual machine or rent an additional Linux server. In addition, starting from version 2.0.14.0, TDengine provides images that support both X86-64, X86, arm64, and arm32 platforms, so non-mainstream computers that can run docker, such as NAS, Raspberry Pi, and embedded development boards, can also easily taste TDengine based on this document. The following article explains how to quickly build a single-node TDengine runtime environment via Docker to support development and testing through a Step by Step style introduction. diff --git a/documentation20/en/02.getting-started/02.taosdemo/docs.md b/documentation20/en/02.getting-started/02.taosdemo/docs.md index 2fd09ef3d3774d1bc47091c9eaa4020d6f937bc0..9a3ad5a0c1e431a0667e8c8980b5e0e5f6840095 100644 --- a/documentation20/en/02.getting-started/02.taosdemo/docs.md +++ b/documentation20/en/02.getting-started/02.taosdemo/docs.md @@ -1,11 +1,11 @@ Since TDengine was open sourced in July 2019, it has gained a lot of popularity among time-series database developers with its innovative data modeling design, simple installation method, easy programming interface, and powerful data insertion and query performance. The insertion and querying performance is often astonishing to users who are new to TDengine. In order to help users to experience the high performance and functions of TDengine in the shortest time, we developed an application called `taosBenchmark` (was named `taosdemo`) for insertion and querying performance testing of TDengine. Then user can easily simulate the scenario of a large number of devices generating a very large amount of data. User can easily manipulate the number of columns, data types, disorder ratio, and number of concurrent threads with taosBenchmark customized parameters. +Running taosBenchmark is very simple. Just download the [TDengine installation package](https://www.taosdata.com/cn/all-downloads/) or compiling the [TDengine code](https://github.com/taosdata/TDengine). It can be found and run in the installation directory or in the compiled results directory. -Running taosBenchmark is very simple. Just download the TDengine installation package (https://www.taosdata.com/cn/all-downloads/) or compiling the TDengine code yourself (https://github.com/taosdata/TDengine). It can be found and run in the installation directory or in the compiled results directory. +# To run an insertion test with taosBenchmark -To run an insertion test with taosBenchmark --- Executing taosBenchmark without any parameters results in the following output. + ``` $ taosBenchmark @@ -70,6 +70,7 @@ Query OK, 6 row(s) in set (0.002972s) ``` After pressing any key taosBenchmark will create the database test and super table meters and generate 10,000 sub-tables representing 10,000 individule meter devices that report data. That means they independently using the super table meters as a template according to TDengine data modeling best practices. + ``` taos> use test; Database changed. @@ -91,7 +92,9 @@ taos> show stables; meters | 2021-08-27 11:21:01.209 | 4 | 2 | 10000 | Query OK, 1 row(s) in set (0.001740s) ``` + Then taosBenchmark generates 10,000 records for each meter device. + ``` ... ====thread[3] completed total inserted rows: 6250000, total affected rows: 6250000. 347626.22 records/second==== @@ -108,9 +111,11 @@ Spent 18.0863 seconds to insert rows: 100000000, affected rows: 100000000 with 1 insert delay, avg: 28.64ms, max: 112.92ms, min: 9.35ms ``` + The above information is the result of a real test on a normal PC server with 8 CPUs and 64G RAM. It shows that taosBenchmark inserted 100,000,000 (no need to count, 100 million) records in 18 seconds, or an average of 552,909,049 records per second. TDengine also offers a parameter-bind interface for better performance, and using the parameter-bind interface (taosBenchmark -I stmt) on the same hardware for the same amount of data writes, the results are as follows. + ``` ... @@ -145,12 +150,13 @@ Spent 6.0257 seconds to insert rows: 100000000, affected rows: 100000000 with 16 insert delay, avg: 8.31ms, max: 860.12ms, min: 2.00ms ``` -It shows that taosBenchmark inserted 100 million records in 6 seconds, with a much more higher insertion performance, 1,659,590 records wer inserted per second. +It shows that taosBenchmark inserted 100 million records in 6 seconds, with a much more higher insertion performance, 1,659,590 records wer inserted per second. Because taosBenchmark is so easy to use, so we have extended it with more features to support more complex parameter settings for sample data preparation and validation for rapid prototyping. The complete list of taosBenchmark command-line arguments can be displayed via taosBenchmark --help as follows. + ``` $ taosBenchmark --help @@ -197,52 +203,70 @@ Report bugs to . ``` taosBenchmark's parameters are designed to meet the needs of data simulation. A few commonly used parameters are described below. + ``` -I, --interface=INTERFACE The interface (taosc, rest, and stmt) taosBenchmark uses. Default is 'taosc'. ``` + The performance difference between different interfaces of taosBenchmark has been mentioned earlier, the -I parameter is used to select different interfaces, currently taosc, stmt and rest are supported. The -I parameter is used to select different interfaces, currently taosc, stmt and rest are supported. taosc uses SQL statements to write data, stmt uses parameter binding interface to write data, and rest uses RESTful protocol to write data. ``` -T, --threads=NUMBER The number of threads. Default is 8. ``` + The -T parameter sets how many threads taosBenchmark uses to synchronize data writes, so that multiple threads can squeeze as much processing power out of the hardware as possible. + ``` -b, --data-type=DATATYPE The data_type of columns, default: FLOAT, INT, FLOAT. -w, --binwidth=WIDTH The width of data_type 'BINARY' or 'NCHAR'. Default is 64 - + -l, --columns=COLUMNS The number of columns per record. Demo mode by default is 3 (float, int, float). Max values is 4095 ``` + As mentioned earlier, tadosdemo creates a typical meter data reporting scenario by default, with each device containing three columns. They are current, voltage and phases. TDengine supports BOOL, TINYINT, SMALLINT, INT, BIGINT, FLOAT, DOUBLE, BINARY, NCHAR, TIMESTAMP data types. By using -b with a list of types allows you to specify the column list with customized data type. Using -w to specify the width of the columns of the BINARY and NCHAR data types (default is 64). The -l parameter can be added to the columns of the data type specified by the -b parameter with the total number of columns of the INT type, which reduces the manual input process in case of a particularly large number of columns, up to 4095 columns. + ``` -r, --rec-per-req=NUMBER The number of records per request. Default is 30000. ``` + To reach TDengine performance limits, data insertion can be executed by using multiple clients, multiple threads, and batch data insertions at once. The -r parameter sets the number of records batch that can be stitched together in a single write request, the default is 30,000. The effective number of spliced records is also related to the client buffer size, which is currently 1M Bytes. If the record column width is large, the maximum number of spliced records can be calculated by dividing 1M by the column width (in bytes). + ``` -t, --tables=NUMBER The number of tables. Default is 10000. -n, --records=NUMBER The number of records per table. Default is 10000. -M, --random The value of records generated are totally random. The default is to simulate power equipment scenario. ``` + As mentioned earlier, taosBenchmark creates 10,000 tables by default, and each table writes 10,000 records. taosBenchmark can set the number of tables and the number of records in each table by -t and -n. The data generated by default without parameters are simulated real scenarios, and the simulated data are current and voltage phase values with certain jitter, which can more realistically show TDengine's efficient data compression ability. If you need to simulate the generation of completely random data, you can pass the -M parameter. + ``` -y, --answer-yes Default input yes for prompt. ``` + As we can see above, taosBenchmark outputs a list of parameters for the upcoming operation by default before creating a database or inserting data, so that the user can know what data is about to be written before inserting. To facilitate automatic testing, the -y parameter allows taosBenchmark to write data immediately after outputting the parameters. + ``` -O, --disorder=NUMBER Insert order mode--0: In order, 1 ~ 50: disorder ratio. Default is in order. -R, --disorder-range=NUMBER Out of order data's range, ms, default is 1000. ``` + In some scenarios, the received data does not arrive in exact order, but contains a certain percentage of out-of-order data, which TDengine can also handle very well. In order to simulate the writing of out-of-order data, tadosdemo provides -O and -R parameters to be set. The -O parameter is the same as the -O parameter for fully ordered data writes. 1 to 50 is the percentage of data that contains out-of-order data. The -R parameter is the range of the timestamp offset of the out-of-order data, default is 1000 milliseconds. Also note that temporal data is uniquely identified by a timestamp, so garbled data may generate the exact same timestamp as previously written data, and such data may either be discarded (update 0) or overwrite existing data (update 1 or 2) depending on the update value created by the database, and the total number of data entries may not match the expected number of entries. + ``` -g, --debug Print debug info. ``` + If you are interested in the taosBenchmark insertion process or if the data insertion result is not as expected, you can use the -g parameter to make taosBenchmark print the debugging information in the process of the execution to the screen or import it to another file with the Linux redirect command to easily find the cause of the problem. In addition, taosBenchmark will also output the corresponding executed statements and debugging reasons to the screen after the execution fails. You can search the word "reason" to find the error reason information returned by the TDengine server. + ``` -x, --aggr-func Test aggregation funtions after insertion. ``` + TDengine is not only very powerful in insertion performance, but also in query performance due to its advanced database engine design. tadosdemo provides a -x function that performs the usual query operations and outputs the query consumption time after the insertion of data. The following is the result of a common query after inserting 100 million rows on the aforementioned server. You can see that the select * fetch 100 million rows (not output to the screen) operation consumes only 1.26 seconds. The most of normal aggregation function for 100 million records usually takes only about 20 milliseconds, and even the longest count function takes less than 40 milliseconds. + ``` taosBenchmark -I stmt -T 48 -y -x ... @@ -264,7 +288,9 @@ select min(current) took 0.025812 second(s) select first(current) took 0.024105 second(s) ... ``` + In addition to the command line approach, taosBenchmark also supports take a JSON file as an incoming parameter to provide a richer set of settings. A typical JSON file would look like this. + ``` { "filetype": "insert", @@ -273,17 +299,17 @@ In addition to the command line approach, taosBenchmark also supports take a JSO "port": 6030, "user": "root", "password": "taosdata", - "thread_count": 4, - "thread_count_create_tbl": 4, - "result_file": "./insert_res.txt", - "confirm_parameter_prompt": "no", - "insert_interval": 0, - "interlace_rows": 100, + "thread_count": 4, + "thread_count_create_tbl": 4, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "interlace_rows": 100, "num_of_records_per_req": 100, "databases": [{ "dbinfo": { "name": "db", - "drop": "yes", + "drop": "yes", "replica": 1, "days": 10, "cache": 16, @@ -301,39 +327,41 @@ In addition to the command line approach, taosBenchmark also supports take a JSO }, "super_tables": [{ "name": "stb", - "child_table_exists":"no", - "childtable_count": 100, - "childtable_prefix": "stb_", - "auto_create_table": "no", - "batch_create_tbl_num": 5, - "data_source": "rand", - "insert_mode": "taosc", - "insert_rows": 100000, - "childtable_limit": 10, - "childtable_offset":100, - "interlace_rows": 0, - "insert_interval":0, - "max_sql_len": 1024000, - "disorder_ratio": 0, - "disorder_range": 1000, - "timestamp_step": 10, - "start_timestamp": "2020-10-01 00:00:00.000", - "sample_format": "csv", - "sample_file": "./sample.csv", - "tags_file": "", + "child_table_exists":"no", + "childtable_count": 100, + "childtable_prefix": "stb_", + "auto_create_table": "no", + "batch_create_tbl_num": 5, + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows": 100000, + "childtable_limit": 10, + "childtable_offset":100, + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 10, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":10}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] }] }] } ``` + For example, we can specify different number of threads for table creation and data insertion with "thread_count" and "thread_count_create_tbl". You can use a combination of "child_table_exists", "childtable_limit" and "childtable_offset" to use multiple taosBenchmark processes (even on different computers) to write to different ranges of child tables of the same super table at the same time. You can also import existing data by specifying the data source as a csv file with "data_source" and "sample_file". -Use taosBenchmark for query and subscription testing --- +# Use taosBenchmark for query and subscription testing + taosBenchmark can not only write data, but also perform query and subscription functions. However, a taosBenchmark instance can only support one of these functions, not all three, and the configuration file is used to specify which function to test. The following is the content of a typical query JSON example file. + ``` { "filetype": "query", @@ -373,7 +401,9 @@ The following is the content of a typical query JSON example file. } } ``` + The following parameters are specific to the query in the JSON file. + ``` "query_times": the number of queries per query type "query_mode": query data interface, "tosc": call TDengine's c interface; "resetful": use restfule interface. Options are available. Default is "taosc". @@ -392,6 +422,7 @@ The following parameters are specific to the query in the JSON file. ``` The following is a typical subscription JSON example file content. + ``` { "filetype":"subscribe", @@ -404,34 +435,36 @@ The following is a typical subscription JSON example file content. "confirm_parameter_prompt": "no", "specified_table_query": { - "concurrent":1, - "mode":"sync", - "interval":0, - "restart":"yes", + "concurrent":1, + "mode":"sync", + "interval":0, + "restart":"yes", "keepProgress":"yes", "sqls": [ { - "sql": "select * from stb00_0 ;", + "sql": "select * from stb00_0 ;", "result": "./subscribe_res0.txt" }] }, - "super_table_query": + "super_table_query": { "stblname": "stb0", - "threads":1, - "mode":"sync", - "interval":10000, - "restart":"yes", + "threads":1, + "mode":"sync", + "interval":10000, + "restart":"yes", "keepProgress":"yes", "sqls": [ { - "sql": "select * from xxxx where ts > '2021-02-25 11:35:00.000' ;", + "sql": "select * from xxxx where ts > '2021-02-25 11:35:00.000' ;", "result": "./subscribe_res1.txt" }] } } ``` + The following are the meanings of the parameters specific to the subscription function. + ``` "interval": interval for executing subscriptions, in seconds. Optional, default is 0. "restart": subscription restart." yes": restart the subscription if it already exists, "no": continue the previous subscription. (Please note that the executing user needs to have read/write access to the dataDir directory) @@ -439,11 +472,12 @@ The following are the meanings of the parameters specific to the subscription fu "resubAfterConsume": Used in conjunction with keepProgress to call unsubscribe after the subscription has been consumed the appropriate number of times and to subscribe again. "result": the name of the file to which the query result is written. Optional, default is null, means the query result will not be written to the file. Note: The file to save the result after each sql statement cannot be renamed, and the file name will be appended with the thread number when generating the result file. ``` -Conclusion --- + +# Conclusion + TDengine is a big data platform designed and optimized for IoT, Telematics, Industrial Internet, DevOps, etc. TDengine shows a high performance that far exceeds similar products due to the innovative data storage and query engine design in the database kernel. And withSQL syntax support and connectors for multiple programming languages (currently Java, Python, Go, C#, NodeJS, Rust, etc. are supported), it is extremely easy to use and has zero learning cost. To facilitate the operation and maintenance needs, we also provide data migration and monitoring functions and other related ecological tools and software. -For users who are new to TDengine, we have developed rich features for taosBenchmark to facilitate technical evaluation and stress testing. This article is a brief introduction to taosBenchmark, which will continue to evolve and improve as new features are added to TDengine. +For users who are new to TDengine, we have developed rich features for taosBenchmark to facilitate technical evaluation and stress testing. This article is a brief introduction to taosBenchmark, which will continue to evolve and improve as new features are added to TDengine. As part of TDengine, taosBenchmark's source code is fully open on the GitHub. Suggestions or advices about the use or implementation of taosBenchmark or TDengine are welcomed on GitHub or in the Taos Data user group. diff --git a/documentation20/en/02.getting-started/03.install/docs.md b/documentation20/en/02.getting-started/03.install/docs.md new file mode 100644 index 0000000000000000000000000000000000000000..d12619cd3a79d6d83c93d9cd31a2a6b7fc296c6b --- /dev/null +++ b/documentation20/en/02.getting-started/03.install/docs.md @@ -0,0 +1,185 @@ + +# How to install/uninstall TDengine with installtion package + +TDengine open source version provides `deb` and `rpm` format installation packages. Our users can choose the appropriate installation package according to their own running environment. The `deb` supports Debian/Ubuntu etc. and the `rpm` supports CentOS/RHEL/SUSE etc. We also provide `tar.gz` format installers for enterprise users. + +## Install and uninstall deb package + +### Install deb package + +- Download and obtain the deb installation package from the official website, such as TDengine-server-2.0.0.0-Linux-x64.deb. +- Go to the directory where the TDengine-server-2.0.0.0-Linux-x64.deb installation package is located and execute the following installation command. + +``` +plum@ubuntu:~/git/taosv16$ sudo dpkg -i TDengine-server-2.0.0.0-Linux-x64.deb + +Selecting previously unselected package tdengine. +(Reading database ... 233181 files and directories currently installed.) +Preparing to unpack TDengine-server-2.0.0.0-Linux-x64.deb ... +Failed to stop taosd.service: Unit taosd.service not loaded. +Stop taosd service success! +Unpacking tdengine (2.0.0.0) ... +Setting up tdengine (2.0.0.0) ... +Start to install TDEngine... +Synchronizing state of taosd.service with SysV init with /lib/systemd/systemd-sysv-install... +Executing /lib/systemd/systemd-sysv-install enable taosd +insserv: warning: current start runlevel(s) (empty) of script `taosd' overrides LSB defaults (2 3 4 5). +insserv: warning: current stop runlevel(s) (0 1 2 3 4 5 6) of script `taosd' overrides LSB defaults (0 1 6). +Enter FQDN:port (like h1.taosdata.com:6030) of an existing TDengine cluster node to join OR leave it blank to build one : +To configure TDengine : edit /etc/taos/taos.cfg +To start TDengine : sudo systemctl start taosd +To access TDengine : use taos in shell +TDengine is installed successfully! +``` + +Note: When the Enter FQDN: prompt appears when the first node is installed, nothing needs to be entered. Only when installing the second or later more nodes is it necessary to enter the FQDN of any of the available nodes in the existing cluster to support that new node joining the cluster. It is of course possible to not enter it, but to configure it into the new node's configuration file before the new node starts +in the configuration file of the new node before it starts. + +The same operation is performed for the other installation packages format. + +### Uninstall deb + +Uninstall command is below: + +``` + plum@ubuntu:~/git/tdengine/debs$ sudo dpkg -r tdengine + (Reading database ... 233482 files and directories currently installed.) + Removing tdengine (2.0.0.0) ... + TDEngine is removed successfully! +``` + +## Install and unstall rpm package + +### Install rpm + +- Download and obtain the rpm installation package from the official website, such as TDengine-server-2.0.0.0-Linux-x64.rpm. +- Go to the directory where the TDengine-server-2.0.0.0-Linux-x64.rpm installation package is located and execute the following installation command. + +``` + [root@bogon x86_64]# rpm -iv TDengine-server-2.0.0.0-Linux-x64.rpm + Preparing packages... + TDengine-2.0.0.0-3.x86_64 + Start to install TDEngine... + Created symlink from /etc/systemd/system/multi-user.target.wants/taosd.service to /etc/systemd/system/taosd.service. + Enter FQDN:port (like h1.taosdata.com:6030) of an existing TDengine cluster node to join OR leave it blank to build one : + To configure TDengine : edit /etc/taos/taos.cfg + To start TDengine : sudo systemctl start taosd + To access TDengine : use taos in shell + TDengine is installed successfully! +``` + +### Uninstall rpm + +Uninstall command is following: + +``` + [root@bogon x86_64]# rpm -e tdengine + TDEngine is removed successfully! +``` + +## Install and uninstall tar.gz + +### Install tar.gz + +- Download and obtain the tar.gz installation package from the official website, such as `TDengine-server-2.0.0.0-Linux-x64.tar.gz`. +- Go to the directory where the `TDengine-server-2.0.0.0-Linux-x64.tar.gz` installation package is located, unzip the file first, then enter the subdirectory and execute the install.sh installation script in it as follows + +``` + plum@ubuntu:~/git/tdengine/release$ sudo tar -xzvf TDengine-server-2.0.0.0-Linux-x64.tar.gz + plum@ubuntu:~/git/tdengine/release$ ll + total 3796 + drwxr-xr-x 3 root root 4096 Aug 9 14:20 ./ + drwxrwxr-x 11 plum plum 4096 Aug 8 11:03 ../ + drwxr-xr-x 5 root root 4096 Aug 8 11:03 TDengine-server/ + -rw-r--r-- 1 root root 3871844 Aug 8 11:03 TDengine-server-2.0.0.0-Linux-x64.tar.gz + plum@ubuntu:~/git/tdengine/release$ cd TDengine-server/ + plum@ubuntu:~/git/tdengine/release/TDengine-server$ ll + total 2640 + drwxr-xr-x 5 root root 4096 Aug 8 11:03 ./ + drwxr-xr-x 3 root root 4096 Aug 9 14:20 ../ + drwxr-xr-x 5 root root 4096 Aug 8 11:03 connector/ + drwxr-xr-x 2 root root 4096 Aug 8 11:03 driver/ + drwxr-xr-x 8 root root 4096 Aug 8 11:03 examples/ + -rwxr-xr-x 1 root root 13095 Aug 8 11:03 install.sh* + -rw-r--r-- 1 root root 2651954 Aug 8 11:03 taos.tar.gz + plum@ubuntu:~/git/tdengine/release/TDengine-server$ sudo ./install.sh + This is ubuntu system + verType=server interactiveFqdn=yes + Start to install TDengine... + Synchronizing state of taosd.service with SysV init with /lib/systemd/systemd-sysv-install... + Executing /lib/systemd/systemd-sysv-install enable taosd + insserv: warning: current start runlevel(s) (empty) of script `taosd' overrides LSB defaults (2 3 4 5). + insserv: warning: current stop runlevel(s) (0 1 2 3 4 5 6) of script `taosd' overrides LSB defaults (0 1 6). + Enter FQDN:port (like h1.taosdata.com:6030) of an existing TDengine cluster node to join OR leave it blank to build one :hostname.taosdata.com:7030 + To configure TDengine : edit /etc/taos/taos.cfg + To start TDengine : sudo systemctl start taosd + To access TDengine : use taos in shell + Please run: taos -h hostname.taosdata.com:7030 to login into cluster, then execute : create dnode 'newDnodeFQDN:port'; in TAOS shell to add this new node into the clsuter + TDengine is installed successfully! +``` + +Note: The install.sh install script asks for some configuration information through an interactive command line interface during execution. If you prefer a non-interactive installation, you can execute the install.sh script with the -e no parameter. Run . /install.sh -h command to see detailed information about all parameters. + +### Uninstall TDengine after tar.gz package installed + +Uninstall command is following: + +``` + plum@ubuntu:~/git/tdengine/release/TDengine-server$ rmtaos + TDEngine is removed successfully! +``` + +## Installation directory description + +After TDengine is successfully installed, the main installation directory is /usr/local/taos, and the directory contents are as follows: + +``` + plum@ubuntu:/usr/local/taos$ cd /usr/local/taos + plum@ubuntu:/usr/local/taos$ ll + total 36 + drwxr-xr-x 9 root root 4096 7 30 19:20 ./ + drwxr-xr-x 13 root root 4096 7 30 19:20 ../ + drwxr-xr-x 2 root root 4096 7 30 19:20 bin/ + drwxr-xr-x 2 root root 4096 7 30 19:20 cfg/ + lrwxrwxrwx 1 root root 13 7 30 19:20 data -> /var/lib/taos/ + drwxr-xr-x 2 root root 4096 7 30 19:20 driver/ + drwxr-xr-x 8 root root 4096 7 30 19:20 examples/ + drwxr-xr-x 2 root root 4096 7 30 19:20 include/ + drwxr-xr-x 2 root root 4096 7 30 19:20 init.d/ + lrwxrwxrwx 1 root root 13 7 30 19:20 log -> /var/log/taos/ +``` + +- Automatically generates the configuration file directory, database directory, and log directory. +- Configuration file default directory: /etc/taos/taos.cfg, softlinked to /usr/local/taos/cfg/taos.cfg. +- Database default directory: /var/lib/taos, softlinked to /usr/local/taos/data. +- Log default directory: /var/log/taos, softlinked to /usr/local/taos/log. +- executables in the /usr/local/taos/bin directory, which are soft-linked to the /usr/bin directory. +- Dynamic library files in the /usr/local/taos/driver directory, which are soft-linked to the /usr/lib directory. +- header files in the /usr/local/taos/include directory, which are soft-linked to the /usr/include directory. + +## Uninstall and update file instructions + +When uninstalling the installation package, the configuration files, database files and log files will be kept, i.e. /etc/taos/taos.cfg, /var/lib/taos, /var/log/taos. If users confirm that they do not need to keep them, they can delete them manually, but must be careful, because after deletion, the data will be permanently lost and cannot be recovered! + +If the installation is updated, when the default configuration file (/etc/taos/taos.cfg) exists, the existing configuration file is still used. The configuration file carried in the installation package is modified to taos.cfg.org and saved in the /usr/local/taos/cfg/ directory, which can be used as a reference sample for setting configuration parameters; if the configuration file does not exist, the Use the configuration file that comes with the installation package +file that comes with the installation package. + +## Caution + +- TDengine provides several installers, but it is best not to use both the tar.gz installer and the deb or rpm installer on one system. Otherwise, they may affect each other and cause problems when using them. + +- For deb package installation, if the installation directory is manually deleted by mistake, the uninstallation, or reinstallation cannot be successful. In this case, you need to clear the installation information of the tdengine package by executing the following command: + +``` + plum@ubuntu:~/git/tdengine/$ sudo rm -f /var/lib/dpkg/info/tdengine* +``` + +Then just reinstall it. + +- For the rpm package after installation, if the installation directory is manually deleted by mistake part of the uninstallation, or reinstallation can not be successful. In this case, you need to clear the installation information of the tdengine package by executing the following command: + +``` + [root@bogon x86_64]# rpm -e --noscripts tdengine +``` + +Then just reinstall it. diff --git a/documentation20/en/02.getting-started/docs.md b/documentation20/en/02.getting-started/docs.md index c3f08b49432a9abd3e29d73a68ead06ec47dcf9b..53cb2f2b194d93b653b2821356cd0e2f180e7c8d 100644 --- a/documentation20/en/02.getting-started/docs.md +++ b/documentation20/en/02.getting-started/docs.md @@ -2,31 +2,33 @@ ## Quick Install -TDengine software consists of 3 parts: server, client, and alart module. At the moment, TDengine server only runs on Linux (Windows, mac OS and more OS supports will come soon), but client can run on either Windows or Linux. TDengine client can be installed and run on Windows or Linux. Applications based-on any OSes can all connect to server taosd via a RESTful interface. From 2.4 and later version, TDengine use a stand-alone software, taosAdapteer to provide http service. The early version uses the http server embedded in the taosd. About CPU, TDengine supports X64/ARM64/MIPS64/Alpha64, and ARM32、RISC-V, other more CPU architectures will be supported soon. You can set up and install TDengine server either from the [source code](https://www.taosdata.com/en/getting-started/#Install-from-Source) or the [packages](https://www.taosdata.com/en/getting-started/#Install-from-Package). +TDengine includes server, client, and ecological software and peripheral tools. Currently, version 2.0 of the server can only be installed and run on Linux and will support Windows, macOS, and other OSes in the future. The client can be installed and run on Windows or Linux. Applications on any operating system can use the RESTful interface to connect to the taosd server. After 2.4, TDengine includes taosAdapter to provide an easy-to-use and efficient way to ingest data including RESTful service. taosAdapter needs to be started manually as a stand-alone component. The early version uses an embedded HTTP component to provide the RESTful interface. -### Install from Source - -Please visit our [TDengine github page](https://github.com/taosdata/TDengine) for instructions on installation from the source code. - -### Install from Docker Container +TDengine supports X64/ARM64/MIPS64/Alpha64 hardware platforms and will support ARM32, RISC-V, and other CPU architectures in the future. -For the time being, it is not recommended to use Docker to deploy the client or server side of TDengine in production environments, but it is convenient to use Docker to deploy in development environments or when trying it for the first time. In particular, with Docker, it is easy to try TDengine in Mac OS X and Windows environments. +### Install with Docker Container ``` docker run -d -p 6030-6049:6030-6049 -p 6030-6049:6030-6049/udp tdengine/tdengine ``` -Please refer to [Quickly experience TDengine with Docker](https://www.taosdata.com/en/documentation/getting-started/docker) for the details. +Please refer to [Quickly Taste TDengine with Docker](https://www.taosdata.com/en/documentation/getting-started/docker) for the details. + +For the time being, using Docker to deploy the client or server of TDengine for production environments is not recommended. However it is a convenient way to deploy TDengine for development purposes. In particular, it is easy to try TDengine in Mac OS X and Windows environments with Docker. ### Install from Package -Three different packages for TDengine server are provided, please pick up the one you like. (Lite packages only have execution files and connector of C/C++, but standard packages support connectors of nearly all programming languages.) Beta version has more features, but we suggest you to install stable version for production or testing. +TDengine is very easy to install, from download to successful installation in just a few seconds. For ease of use, the standard server installation package includes the client application and sample code; if you only need the server application and C/C++ language support for the client connection, you can also download the lite version of the installation package only. The installation packages are available in `rpm` and `deb` formats, as well as `tar.gz` format for enterprise customers who need to facilitate use on specific operating systems. Releases include both stable and beta releases. We recommend the stable release for production use or testing. The beta release may contain more new features. You can choose to download from the following as needed: + +
    + +For detailed installation steps, please refer to [How to install/uninstall TDengine with installation package](https://www.taosdata.com/getting-started/install). -Click [here](https://www.taosdata.com/en/getting-started/#Install-from-Package) to download the install package. +**Click [here](https://github.com/taosdata/TDengine/releases) for release notes.** ### Install TDengine by apt-get -If you use Debian or Ubuntu system you can use 'apt-get' command to install TDengine from official repository. Please use following commands to setup: +If you use Debian or Ubuntu system you can use the `apt-get` command to install TDengine from the official repository. Please use the following commands to setup: ``` wget -qO - http://repos.taosdata.com/tdengine.key | sudo apt-key add - @@ -37,18 +39,30 @@ apt-get policy tdengine sudo apt-get install tdengine ``` +### Install client only + +If the client and server are running on different computers, you can install the client separately. When downloading, please note that the selected client version number should strictly match the server version number downloaded above. Linux and Windows installation packages are as follows (the lite version of the installer comes with connection support for the C/C++ language only, while the standard version of the installer also contains sample code): + +
      + +### Install from Source + +If you want to contribute to TDengine, please visit [TDengine GitHub page](https://github.com/taosdata/TDengine) for detailed instructions on build and installation from the source code. + +**To download other components, beta version, or early releases, please click [here](https://www.taosdata.com/en/all-downloads/).** + ## Quick Launch After installation, you can start the TDengine service by the `systemctl` command. ```bash -$ systemctl start taosd +systemctl start taosd ``` Then check if the service is working now. ```bash -$ systemctl status taosd +systemctl status taosd ``` If the service is running successfully, you can play around through TDengine shell `taos`. @@ -56,25 +70,25 @@ If the service is running successfully, you can play around through TDengine she **Note:** - The `systemctl` command needs the **root** privilege. Use **sudo** if you are not the **root** user. -- To get better product feedback and improve our solution, TDengine will collect basic usage information, but you can modify the configuration parameter **telemetryReporting** in the system configuration file taos.cfg, and set it to 0 to turn it off. -- TDengine uses FQDN (usually hostname) as the node ID. In order to ensure normal operation, you need to set hostname for the server running taosd, and configure DNS service or hosts file for the machine running client application, to ensure the FQDN can be resolved. -- TDengine supports installation on Linux systems with [systemd](https://en.wikipedia.org/wiki/Systemd) as the process service management, and uses `which systemctl` command to detect whether `systemd` packages exist in the system: +- To get better product feedback and improve our solution, TDengine will collect basic usage information, but you can modify the configuration parameter **telemetryReporting** in the system configuration file `taos.cfg`, and set it to 0 to turn it off. +- TDengine uses FQDN (usually hostname) as the node ID. To ensure normal operation, you need to set the host's name for the server running `taosd`, and configure DNS service or hosts file for the machine running the client application, to ensure the FQDN can be resolved. +- TDengine supports installation on Linux systems with [systemd](https://en.wikipedia.org/wiki/Systemd) as the process service management and uses `which systemctl` command to detect whether `systemd` packages exist in the system: ```bash - $ which systemctl + which systemctl ``` If `systemd` is not supported in the system, TDengine service can also be launched via `/usr/local/taos/bin/taosd` manually. ## TDengine Shell Command Line -To launch TDengine shell, the command line interface, in a Linux terminal, type: +To launch TDengine shell, the command-line interface, in a Linux terminal, type: ```bash -$ taos +taos ``` -The welcome message is printed if the shell connects to TDengine server successfully, otherwise, an error message will be printed (refer to our [FAQ](https://www.taosdata.com/en/faq) page for troubleshooting the connection error). The TDengine shell prompt is: +The welcome message is printed if the shell connects to the TDengine server successfully, otherwise, an error message will be printed (refer to our [FAQ](https://www.taosdata.com/en/faq) page for troubleshooting the connection error). The TDengine shell prompt is: ```cmd taos> @@ -110,49 +124,59 @@ Besides the SQL commands, the system administrator can check system status, add ### Shell Command Line Parameters -You can configure command parameters to change how TDengine shell executes. Some frequently used options are listed below: +You can configure command parameters to change how the TDengine shell executes. Some frequently used options are listed below: - -c, --config-dir: set the configuration directory. It is */etc/taos* by default. - -h, --host: set the IP address of the server it will connect to. Default is localhost. - -s, --commands: set the command to run without entering the shell. -- -u, -- user: user name to connect to server. Default is root. +- -u, -- user: user name to connect to the server/cluster. Default is root. - -p, --password: password. Default is 'taosdata'. - -?, --help: get a full list of supported options. Examples: ```bash -$ taos -h 192.168.0.1 -s "use db; show tables;" +taos -h 192.168.0.1 -s "use db; show tables;" ``` ### Run SQL Command Scripts -Inside TDengine shell, you can run SQL scripts in a file with source command. +Inside TDengine shell, you can run SQL scripts in a file with the `source` command. ```mysql taos> source ; ``` -### Shell Tips +### taos shell tips -- Use up/down arrow key to check the command history -- To change the default password, use "alter user" command +- Use the up/down arrow key to check the command history +- To change the default password, use `alter user` command - Use ctrl+c to interrupt any queries -- To clean the schema of local cached tables, execute command `RESET QUERY CACHE` +- To clean the schema of locally cached tables, execute the command `RESET QUERY CACHE` + +## Taste TDengine’s Lightning Speed -## Experience TDengine’s Lightning Speed +### Taste insertion speed with taosBenchmark -After starting the TDengine server, you can execute the command `taosBenchmark` (was named `taosdemo`, please install taosTools package if you use TDengine 2.4 or later version) in the Linux terminal. +Once the TDengine server started, you can execute the command `taosBenchmark` (which was named `taosdemo`) in the Linux terminal. In 2.4.0.7 and early release, taosBenchmark is distributed within taosTools package. In later release, taosBenchmark will be included within TDengine again. ```bash -$ taosBenchmark +taosBenchmark ``` -Using this command, a STable named `meters` will be created in the database `test`. There are 10k tables under this STable, named from `t0` to `t9999`. In each table there are 100k rows of records, each row with columns (`f1`, `f2` and `f3`. The timestamp is from "2017-07-14 10:40:00 000" to "2017-07-14 10:41:39 999". Each table also has tags `areaid` and `loc`: `areaid` is set from 1 to 10, `loc` is set to "beijing" or "shanghai". +Using this command, a STable named `meters` will be created in the database `test`. There are 10k tables under this STable, named from `d0` to `d9999`. In each table, there are 100k rows of records, each row with columns (`ts`, `current`, `voltage`, and `phase`. The timestamp is from "2017-07-14 10:40:00 000" to "2017-07-14 10:41:39 999". Each table also has tags `location` and `groupId`: `groupId` is set from 1 to 10, `location` is set to "beijing" or "shanghai". + +Once execution is finished, 1 billion rows of records will be inserted. It usually takes about a dozen seconds to execute this command on a normal PC server but it may be different depending on the particular hardware platform performance. + +### Using taosBenchmark in detail + +you can run the command `taosBenchmark` with many options, like the number of tables, rows of records, and so on. To know more about these options, you can execute `taosBenchmark --help` and then take a try using different options. -It takes about 10 minutes to execute this command. Once finished, 1 billion rows of records will be inserted. +For more details on how to use taosBenchmark, please refer to [How to use taosBenchmark to test the performance of TDengine](https://tdengine.com/2021/10/09/3114.html). -In the TDengine client, enter sql query commands and then experience our lightning query speed. +### Taste query speed with taos shell + +In the TDengine client, enter sql query commands and then taste our lightning query speed. - query total rows of records: @@ -160,7 +184,7 @@ In the TDengine client, enter sql query commands and then experience our lightni taos> select count(*) from test.meters; ``` -- query average, max and min of the total 1 billion records: +- query average, max, and min of the total 1 billion records: ```mysql taos> select avg(f1), max(f2), min(f3) from test.meters; @@ -184,11 +208,6 @@ taos> select avg(f1), max(f2), min(f3) from test.meters where areaid=10; taos> select avg(f1), max(f2), min(f3) from test.t10 interval(10s); ``` -### Using taosBenchmark in detail - -you can run command `taosBenchmark` with many options, like number of tables, rows of records and so on. To know more about these options, you can execute `taosBenchmark --help` and then take a try using different options. -Please refer to [How to use taosBenchmark to test the performance of TDengine](https://tdengine.com/2021/10/09/3114.html) for detail. - ## List of Supported Platforms List of platforms supported by TDengine server @@ -209,7 +228,7 @@ Note: ● has been verified by official tests; ○ has been verified by unoffici List of platforms supported by TDengine client and connectors -At the moment, TDengine connectors can support a wide range of platforms, including hardware platforms such as X64/X86/ARM64/ARM32/MIPS/Alpha, and operating system such as Linux/Win64/Win32. +At the moment, TDengine connectors can support a wide range of platforms, including hardware platforms such as X64/X86/ARM64/ARM32/MIPS/Alpha, and operating systems such as Linux/Win64/Win32. Comparison matrix as following: @@ -227,3 +246,5 @@ Comparison matrix as following: Note: ● has been verified by official tests; ○ has been verified by unofficial tests. Please visit Connectors section for more detailed information. + + diff --git a/documentation20/en/05.insert/docs.md b/documentation20/en/05.insert/docs.md index ed7aae32a75295c0f6405a14c9bcca76c3b1b602..65098044a6cedfe740c0829798a6ff0f30fb0139 100644 --- a/documentation20/en/05.insert/docs.md +++ b/documentation20/en/05.insert/docs.md @@ -304,16 +304,6 @@ TCollector is a client-side process that gathers data from local collectors and Please find taosAdapter configuration and usage from `taosadapter --help` output. -## Insert Prometheus data via Bailongma 2.0 - -**Notice:** -TDengine 2.4.0.4+ provides taosAdapter to support Prometheus data writing. Bailongma v2 will be abandoned and no more maintained. - -## Insert data via Bailongma 2.0 and Telegraf - -**Notice:** -TDengine 2.3.0.0+ provides taosAdapter to support Telegraf data writing. Bailongma v2 will be abandoned and no more maintained. - ## Data Writing via EMQ Broker [EMQ](https://github.com/emqx/emqx) is an open source MQTT Broker software, with no need of coding, only to use "rules" in EMQ Dashboard for simple configuration, and MQTT data can be directly written into TDengine. EMQ X supports storing data to the TDengine by sending it to a Web service, and also provides a native TDengine driver on Enterprise Edition for direct data store. Please refer to [EMQ official documents](https://docs.emqx.io/broker/latest/cn/rule/rule-example.html#%E4%BF%9D%E5%AD%98%E6%95%B0%E6%8D%AE%E5%88%B0-tdengine) for more details. diff --git a/documentation20/en/11.administrator/docs.md b/documentation20/en/11.administrator/docs.md index 20936f22ad3aef8d4314d866948317154438640f..e8b1f7ded774e8ef6e649158f64ad543f88c0aa8 100644 --- a/documentation20/en/11.administrator/docs.md +++ b/documentation20/en/11.administrator/docs.md @@ -175,39 +175,44 @@ Client configuration parameters: - secondEp: when taos starts, if unable to connect to firstEp, it will try to connect to secondEp. - locale Default value: obtained dynamically from the system. If the automatic acquisition fails, user needs to set it in the configuration file or through API - + TDengine provides a special field type nchar for storing non-ASCII encoded wide characters such as Chinese, Japanese and Korean. The data written to the nchar field will be uniformly encoded in UCS4-LE format and sent to the server. It should be noted that the correctness of coding is guaranteed by the client. Therefore, if users want to normally use nchar fields to store non-ASCII characters such as Chinese, Japanese, Korean, etc., it’s needed to set the encoding format of the client correctly. - + The characters inputted by the client are all in the current default coding format of the operating system, mostly UTF-8 on Linux systems, and some Chinese system codes may be GB18030 or GBK, etc. The default encoding in the docker environment is POSIX. In the Chinese versions of Windows system, the code is CP936. The client needs to ensure that the character set it uses is correctly set, that is, the current encoded character set of the operating system running by the client, in order to ensure that the data in nchar is correctly converted into UCS4-LE encoding format. - + The naming rules of locale in Linux are: < language > _ < region >. < character set coding >, such as: zh_CN.UTF-8, zh stands for Chinese, CN stands for mainland region, and UTF-8 stands for character set. Character set encoding provides a description of encoding transformations for clients to correctly parse local strings. Linux system and Mac OSX system can determine the character encoding of the system by setting locale. Because the locale used by Windows is not the POSIX standard locale format, another configuration parameter charset is needed to specify the character encoding under Windows. You can also use charset to specify character encoding in Linux systems. - charset Default value: obtained dynamically from the system. If the automatic acquisition fails, user needs to set it in the configuration file or through API - + If charset is not set in the configuration file, in Linux system, when taos starts up, it automatically reads the current locale information of the system, and parses and extracts the charset encoding format from the locale information. If the automatic reading of locale information fails, an attempt is made to read the charset configuration, and if the reading of the charset configuration also fails, the startup process is interrupted. - + In Linux system, locale information contains character encoding information, so it is unnecessary to set charset separately after setting locale of Linux system correctly. For example: - + ``` locale zh_CN.UTF-8 ``` + On Windows systems, the current system encoding cannot be obtained from locale. If string encoding information cannot be read from the configuration file, taos defaults to CP936. It is equivalent to adding the following to the configuration file: +j ``` charset CP936 ``` + If you need to adjust the character encoding, check the encoding used by the current operating system and set it correctly in the configuration file. - + In Linux systems, if user sets both locale and charset encoding charset, and the locale and charset are inconsistent, the value set later will override the value set earlier. + ``` locale zh_CN.UTF-8 charset GBK ``` + The valid value for charset is GBK. - + And the valid value for charset is UTF-8. - + The configuration parameters of log are exactly the same as those of server. - timezone @@ -217,31 +222,35 @@ Client configuration parameters: The time zone in which the client runs the system. In order to deal with the problem of data writing and query in multiple time zones, TDengine uses Unix Timestamp to record and store timestamps. The characteristics of UNIX timestamps determine that the generated timestamps are consistent at any time regardless of any time zone. It should be noted that UNIX timestamps are converted and recorded on the client side. In order to ensure that other forms of time on the client are converted into the correct Unix timestamp, the correct time zone needs to be set. In Linux system, the client will automatically read the time zone information set by the system. Users can also set time zones in profiles in a number of ways. For example: + ``` timezone UTC-8 timezone GMT-8 timezone Asia/Shanghai ``` - + All above are legal to set the format of the East Eight Zone. - + The setting of time zone affects the content of non-Unix timestamp (timestamp string, parsing of keyword now) in query and writing SQL statements. For example: ```sql SELECT count(*) FROM table_name WHERE TS<'2019-04-11 12:01:08'; ``` - + In East Eight Zone, the SQL statement is equivalent to + ```sql SELECT count(*) FROM table_name WHERE TS<1554955268000; ``` - + In the UTC time zone, the SQL statement is equivalent to + ```sql SELECT count(*) FROM table_name WHERE TS<1554984068000; ``` + In order to avoid the uncertainty caused by using string time format, Unix timestamp can also be used directly. In addition, timestamp strings with time zones can also be used in SQL statements, such as: timestamp strings in RFC3339 format, 2013-04-12T15:52:01.123+08:00, or ISO-8601 format timestamp strings 2013-04-12T15:52:01.123+0800. The conversion of the above two strings into Unix timestamps is not affected by the time zone in which the system is located. - + When starting taos, you can also specify an end point for an instance of taosd from the command line, otherwise read from taos.cfg. - maxBinaryDisplayWidth @@ -340,7 +349,7 @@ Query OK, 9 row(s) affected (0.004763s) **Import via taosdump tool** -TDengine provides a convenient database import and export tool, taosdump. Users can import data exported by taosdump from one system into other systems. Please refer to the blog: [User Guide of TDengine DUMP Tool](https://www.taosdata.com/blog/2020/03/09/1334.html). +TDengine provides a convenient database import and export tool, taosdump. Users can import data exported by taosdump from one system into other systems. Please refer to [backup tool for TDengine - taosdump](/tools/taosdump). ## Export Data @@ -428,7 +437,7 @@ Some CLI options are needed to use the script: 2. Grafana alerting notifications. There's two ways to setup this: 1. To use existing Grafana notification channel with `uid`, option `-E`. The `uid` could be retrieved with `curl -u admin:admin localhost:3000/api/alert-notifications |'.[]| .uid + "," + .name' -r`, then use it like this: - + ```bash sudo ./TDinsight.sh -a http://localhost:6041 -u root -p taosdata -E ``` diff --git a/examples/rust b/examples/rust new file mode 160000 index 0000000000000000000000000000000000000000..1c8924dc668e6aa848214c2fc54e3ace3f5bf8df --- /dev/null +++ b/examples/rust @@ -0,0 +1 @@ +Subproject commit 1c8924dc668e6aa848214c2fc54e3ace3f5bf8df diff --git a/packaging/deb/makedeb.sh b/packaging/deb/makedeb.sh index 9e15c06612b50a04a59a3226f3bf7e6789431333..42110657a6d9c32d37637c034a84ae34cbedba8e 100755 --- a/packaging/deb/makedeb.sh +++ b/packaging/deb/makedeb.sh @@ -59,6 +59,7 @@ cp ${compile_dir}/../packaging/tools/set_core.sh ${pkg_dir}${install_home_pat cp ${compile_dir}/../packaging/tools/taosd-dump-cfg.gdb ${pkg_dir}${install_home_path}/bin cp ${compile_dir}/build/bin/taosd ${pkg_dir}${install_home_path}/bin +cp ${compile_dir}/build/bin/taosBenchmark ${pkg_dir}${install_home_path}/bin if [ -f "${compile_dir}/build/bin/taosadapter" ]; then cp ${compile_dir}/build/bin/taosadapter ${pkg_dir}${install_home_path}/bin ||: diff --git a/packaging/docker/Dockerfile b/packaging/docker/Dockerfile index 4f847f949a25e157261cc42b20ece0c9072e328f..f6a0950e24cc1bd065e5f60f05998e000b74ecec 100644 --- a/packaging/docker/Dockerfile +++ b/packaging/docker/Dockerfile @@ -4,6 +4,7 @@ WORKDIR /root ARG pkgFile ARG dirName +ARG cpuType RUN echo ${pkgFile} && echo ${dirName} COPY ${pkgFile} /root/ @@ -21,6 +22,11 @@ ENV LD_LIBRARY_PATH="$LD_LIBRARY_PATH:/usr/lib" \ EXPOSE 6030-6049 EXPOSE 6030-6039/udp COPY ./bin/* /usr/bin/ -ENTRYPOINT ["/usr/bin/entrypoint.sh"] + +ENV TINI_VERSION v0.19.0 +RUN bash -c 'echo -e "Downloading tini-${cpuType} ..."' +ADD https://github.com/krallin/tini/releases/download/${TINI_VERSION}/tini-${cpuType} /tini +RUN chmod +x /tini +ENTRYPOINT ["/tini", "--", "/usr/bin/entrypoint.sh"] CMD ["taosd"] VOLUME [ "/var/lib/taos", "/var/log/taos", "/corefile" ] diff --git a/packaging/docker/dockerbuild.sh b/packaging/docker/dockerbuild.sh index c0e52f6dce5a95f3a073251fd8f221c648e4e7f9..3729131c0e20859488d0a7c0c100463c818aaf8c 100755 --- a/packaging/docker/dockerbuild.sh +++ b/packaging/docker/dockerbuild.sh @@ -89,7 +89,7 @@ cp -f ${comunityArchiveDir}/${pkgFile} . echo "dirName=${dirName}" -docker build --rm -f "Dockerfile" --network=host -t tdengine/tdengine-${dockername}:${version} "." --build-arg pkgFile=${pkgFile} --build-arg dirName=${dirName} +docker build --rm -f "Dockerfile" --network=host -t tdengine/tdengine-${dockername}:${version} "." --build-arg pkgFile=${pkgFile} --build-arg dirName=${dirName} --build-arg cpuType=${cpuType} docker login -u tdengine -p ${passWord} #replace the docker registry username and password docker push tdengine/tdengine-${dockername}:${version} @@ -98,4 +98,4 @@ docker tag tdengine/tdengine-${dockername}:${version} tdengine/tdengine-${docker docker push tdengine/tdengine-${dockername}:latest -rm -f ${pkgFile} \ No newline at end of file +rm -f ${pkgFile} diff --git a/packaging/release.sh b/packaging/release.sh index 46f42736d75cfef0ce9c265d0c006166086cc031..207444377c1195762506ac2ada8338b3bd105885 100755 --- a/packaging/release.sh +++ b/packaging/release.sh @@ -312,9 +312,12 @@ if [ "$osType" != "Darwin" ]; then echo "====do tar.gz package for all systems====" cd ${script_dir}/tools - ${csudo}./makepkg.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode} ${verNumberComp} - ${csudo}./makeclient.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode} - ${csudo}./makearbi.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode} + if [ "$verMode" == "cluster" ]; then + ${csudo}./makepkg.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode} ${verNumberComp} +# ${csudo}./makeclient.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode} +# ${csudo}./makearbi.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode} + fi + else # only make client for Darwin cd ${script_dir}/tools diff --git a/packaging/rpm/tdengine.spec b/packaging/rpm/tdengine.spec index 3992f7b6a867b7aacc9b479820c20dd7745dd54f..dcc615132896e25c7f18398643576608b344f58e 100644 --- a/packaging/rpm/tdengine.spec +++ b/packaging/rpm/tdengine.spec @@ -68,6 +68,8 @@ cp %{_compiledir}/../packaging/tools/set_core.sh %{buildroot}%{homepath}/bin cp %{_compiledir}/../packaging/tools/taosd-dump-cfg.gdb %{buildroot}%{homepath}/bin cp %{_compiledir}/build/bin/taos %{buildroot}%{homepath}/bin cp %{_compiledir}/build/bin/taosd %{buildroot}%{homepath}/bin +cp %{_compiledir}/build/bin/taosBenchmark %{buildroot}%{homepath}/bin + if [ -f %{_compiledir}/build/bin/taosadapter ]; then cp %{_compiledir}/build/bin/taosadapter %{buildroot}%{homepath}/bin ||: fi diff --git a/packaging/tools/install.sh b/packaging/tools/install.sh index f8d4bf167d32302c89e7307b4a83fe5428f05913..b9a5ad35947a117e2c673701ce244e0d74cefcde 100755 --- a/packaging/tools/install.sh +++ b/packaging/tools/install.sh @@ -192,6 +192,7 @@ function install_bin() { ${csudo}rm -f ${bin_link_dir}/tarbitrator || : ${csudo}rm -f ${bin_link_dir}/set_core || : ${csudo}rm -f ${bin_link_dir}/run_taosd_and_taosadapter.sh || : + ${csudo}rm -f ${bin_link_dir}/TDinsight.sh || : ${csudo}cp -r ${script_dir}/bin/* ${install_main_dir}/bin && ${csudo}chmod 0555 ${install_main_dir}/bin/* @@ -201,6 +202,7 @@ function install_bin() { [ -x ${install_main_dir}/bin/taosadapter ] && ${csudo}ln -s ${install_main_dir}/bin/taosadapter ${bin_link_dir}/taosadapter || : [ -x ${install_main_dir}/bin/taosBenchmark ] && ${csudo}ln -s ${install_main_dir}/bin/taosBenchmark ${bin_link_dir}/taosdemo || : [ -x ${install_main_dir}/bin/taosdump ] && ${csudo}ln -s ${install_main_dir}/bin/taosdump ${bin_link_dir}/taosdump || : + [ -x ${install_main_dir}/bin/TDinsight.sh ] && ${csudo}ln -s ${install_main_dir}/bin/TDinsight.sh ${bin_link_dir}/TDinsight.sh || : [ -x ${install_main_dir}/bin/remove.sh ] && ${csudo}ln -s ${install_main_dir}/bin/remove.sh ${bin_link_dir}/${uninstallScript} || : [ -x ${install_main_dir}/bin/set_core.sh ] && ${csudo}ln -s ${install_main_dir}/bin/set_core.sh ${bin_link_dir}/set_core || : [ -x ${install_main_dir}/bin/run_taosd_and_taosadapter.sh ] && ${csudo}ln -s ${install_main_dir}/bin/run_taosd_and_taosadapter.sh ${bin_link_dir}/run_taosd_and_taosadapter.sh || : @@ -565,7 +567,7 @@ function install_data() { } function install_connector() { - ${csudo}cp -rf ${script_dir}/connector/ ${install_main_dir}/ + [ -d "${script_dir}/connector/" ] && ${csudo}cp -rf ${script_dir}/connector/ ${install_main_dir}/ } function install_examples() { @@ -691,6 +693,10 @@ function install_service_on_systemd() { ${service_config_dir}/ || : ${csudo}systemctl daemon-reload + [ -f ${script_dir}/cfg/nginxd.service ] && + ${csudo}cp ${script_dir}/cfg/nginxd.service \ + ${service_config_dir}/ || : + if ! ${csudo}systemctl enable nginxd &>/dev/null; then ${csudo}systemctl daemon-reexec ${csudo}systemctl enable nginxd @@ -820,9 +826,9 @@ function update_TDengine() { install_log install_header install_lib - if [ "$pagMode" != "lite" ]; then - install_connector - fi +# if [ "$pagMode" != "lite" ]; then +# install_connector +# fi install_examples if [ -z $1 ]; then install_bin @@ -879,7 +885,7 @@ function update_TDengine() { echo -e "\033[44;32;1m${productName} client is updated successfully!${NC}" fi - rm -rf $(tar -tf ${tarName}) + rm -rf $(tar -tf ${tarName} |grep -v "^\./$") } function install_TDengine() { @@ -976,7 +982,7 @@ function install_TDengine() { fi touch ~/.${historyFile} - rm -rf $(tar -tf ${tarName}) + rm -rf $(tar -tf ${tarName} |grep -v "^\./$") } ## ==============================Main program starts from here============================ diff --git a/packaging/tools/makepkg.sh b/packaging/tools/makepkg.sh index 7ba35a36ebd0aad231e7e5c0f3a383253ac2002a..f9f9d8b68bcf5061c3c3c76efbb706750c27ca33 100755 --- a/packaging/tools/makepkg.sh +++ b/packaging/tools/makepkg.sh @@ -3,7 +3,7 @@ # Generate tar.gz package for all os system set -e -#set -x +set -x curr_dir=$(pwd) compile_dir=$1 @@ -54,11 +54,21 @@ if [ "$pagMode" == "lite" ]; then strip ${build_dir}/bin/${serverName} strip ${build_dir}/bin/${clientName} # lite version doesn't include taosadapter, which will lead to no restful interface - bin_files="${build_dir}/bin/${serverName} ${build_dir}/bin/${clientName} ${script_dir}/remove.sh ${script_dir}/startPre.sh" + bin_files="${build_dir}/bin/${serverName} ${build_dir}/bin/${clientName} ${script_dir}/remove.sh ${script_dir}/startPre.sh ${build_dir}/bin/taosBenchmark" taostools_bin_files="" else + + wget https://github.com/taosdata/grafanaplugin/releases/latest/download/TDinsight.sh -O ${build_dir}/bin/TDinsight.sh \ + && echo "TDinsight.sh downloaded!" \ + || echo "failed to download TDinsight.sh" + + taostools_bin_files=" ${build_dir}/bin/taosdump \ + ${build_dir}/bin/TDinsight.sh " + bin_files="${build_dir}/bin/${serverName} \ ${build_dir}/bin/${clientName} \ + ${build_dir}/bin/taosBenchmark \ + ${taostools_bin_files} \ ${build_dir}/bin/taosadapter \ ${build_dir}/bin/tarbitrator\ ${script_dir}/remove.sh \ @@ -66,9 +76,6 @@ else ${script_dir}/run_taosd_and_taosadapter.sh \ ${script_dir}/startPre.sh \ ${script_dir}/taosd-dump-cfg.gdb" - - taostools_bin_files=" ${build_dir}/bin/taosdump \ - ${build_dir}/bin/taosBenchmark" fi lib_files="${build_dir}/lib/libtaos.so.${version}" @@ -119,36 +126,36 @@ mkdir -p ${install_dir}/init.d && cp ${init_file_rpm} ${install_dir}/init.d/${se mkdir -p ${install_dir}/init.d && cp ${init_file_tarbitrator_deb} ${install_dir}/init.d/tarbitratord.deb || : mkdir -p ${install_dir}/init.d && cp ${init_file_tarbitrator_rpm} ${install_dir}/init.d/tarbitratord.rpm || : -if [ -n "${taostools_bin_files}" ]; then - mkdir -p ${taostools_install_dir} || echo -e "failed to create ${taostools_install_dir}" - mkdir -p ${taostools_install_dir}/bin \ - && cp ${taostools_bin_files} ${taostools_install_dir}/bin \ - && chmod a+x ${taostools_install_dir}/bin/* || : - - if [ -f ${top_dir}/src/kit/taos-tools/packaging/tools/install-taostools.sh ]; then - cp ${top_dir}/src/kit/taos-tools/packaging/tools/install-taostools.sh \ - ${taostools_install_dir}/ > /dev/null \ - && chmod a+x ${taostools_install_dir}/install-taostools.sh \ - || echo -e "failed to copy install-taostools.sh" - else - echo -e "install-taostools.sh not found" - fi - - if [ -f ${top_dir}/src/kit/taos-tools/packaging/tools/uninstall-taostools.sh ]; then - cp ${top_dir}/src/kit/taos-tools/packaging/tools/uninstall-taostools.sh \ - ${taostools_install_dir}/ > /dev/null \ - && chmod a+x ${taostools_install_dir}/uninstall-taostools.sh \ - || echo -e "failed to copy uninstall-taostools.sh" - else - echo -e "uninstall-taostools.sh not found" - fi - - if [ -f ${build_dir}/lib/libavro.so.23.0.0 ]; then - mkdir -p ${taostools_install_dir}/avro/{lib,lib/pkgconfig} || echo -e "failed to create ${taostools_install_dir}/avro" - cp ${build_dir}/lib/libavro.* ${taostools_install_dir}/avro/lib - cp ${build_dir}/lib/pkgconfig/avro-c.pc ${taostools_install_dir}/avro/lib/pkgconfig - fi -fi +#if [ -n "${taostools_bin_files}" ]; then +# mkdir -p ${taostools_install_dir} || echo -e "failed to create ${taostools_install_dir}" +# mkdir -p ${taostools_install_dir}/bin \ +# && cp ${taostools_bin_files} ${taostools_install_dir}/bin \ +# && chmod a+x ${taostools_install_dir}/bin/* || : + +# if [ -f ${top_dir}/src/kit/taos-tools/packaging/tools/install-taostools.sh ]; then +# cp ${top_dir}/src/kit/taos-tools/packaging/tools/install-taostools.sh \ +# ${taostools_install_dir}/ > /dev/null \ +# && chmod a+x ${taostools_install_dir}/install-taostools.sh \ +# || echo -e "failed to copy install-taostools.sh" +# else +# echo -e "install-taostools.sh not found" +# fi + +# if [ -f ${top_dir}/src/kit/taos-tools/packaging/tools/uninstall-taostools.sh ]; then +# cp ${top_dir}/src/kit/taos-tools/packaging/tools/uninstall-taostools.sh \ +# ${taostools_install_dir}/ > /dev/null \ +# && chmod a+x ${taostools_install_dir}/uninstall-taostools.sh \ +# || echo -e "failed to copy uninstall-taostools.sh" +# else +# echo -e "uninstall-taostools.sh not found" +# fi + +# if [ -f ${build_dir}/lib/libavro.so.23.0.0 ]; then +# mkdir -p ${taostools_install_dir}/avro/{lib,lib/pkgconfig} || echo -e "failed to create ${taostools_install_dir}/avro" +# cp ${build_dir}/lib/libavro.* ${taostools_install_dir}/avro/lib +# cp ${build_dir}/lib/pkgconfig/avro-c.pc ${taostools_install_dir}/avro/lib/pkgconfig +# fi +#fi if [ -f ${build_dir}/bin/jemalloc-config ]; then mkdir -p ${install_dir}/jemalloc/{bin,lib,lib/pkgconfig,include/jemalloc,share/doc/jemalloc,share/man/man3} @@ -310,13 +317,14 @@ if [ "$exitcode" != "0" ]; then exit $exitcode fi -if [ -n "${taostools_bin_files}" ]; then - tar -zcv -f "$(basename ${taostools_pkg_name}).tar.gz" "$(basename ${taostools_install_dir})" --remove-files || : - exitcode=$? - if [ "$exitcode" != "0" ]; then - echo "tar ${taostools_pkg_name}.tar.gz error !!!" - exit $exitcode - fi -fi +#if [ -n "${taostools_bin_files}" ]; then +# wget https://github.com/taosdata/grafanaplugin/releases/latest/download/TDinsight.sh -O ${taostools_install_dir}/bin/TDinsight.sh && echo "TDinsight.sh downloaded!"|| echo "failed to download TDinsight.sh" +# tar -zcv -f "$(basename ${taostools_pkg_name}).tar.gz" "$(basename ${taostools_install_dir})" --remove-files || : +# exitcode=$? +# if [ "$exitcode" != "0" ]; then +# echo "tar ${taostools_pkg_name}.tar.gz error !!!" +# exit $exitcode +# fi +#fi cd ${curr_dir} diff --git a/packaging/tools/post.sh b/packaging/tools/post.sh index e0da436a2ec3e4217d531bdc3a4c85a4152bc071..93849dd4ebef00512854b4dfff8b57f4b44f7797 100755 --- a/packaging/tools/post.sh +++ b/packaging/tools/post.sh @@ -107,6 +107,7 @@ function install_bin() { ${csudo}rm -f ${bin_link_dir}/taos || : ${csudo}rm -f ${bin_link_dir}/taosd || : ${csudo}rm -f ${bin_link_dir}/taosadapter || : + ${csudo}rm -f ${bin_link_dir}/taosBenchmark || : ${csudo}rm -f ${bin_link_dir}/taosdemo || : ${csudo}rm -f ${bin_link_dir}/taosdump || : ${csudo}rm -f ${bin_link_dir}/rmtaos || : @@ -118,7 +119,8 @@ function install_bin() { [ -x ${bin_dir}/taos ] && ${csudo}ln -s ${bin_dir}/taos ${bin_link_dir}/taos || : [ -x ${bin_dir}/taosd ] && ${csudo}ln -s ${bin_dir}/taosd ${bin_link_dir}/taosd || : [ -x ${bin_dir}/taosadapter ] && ${csudo}ln -s ${bin_dir}/taosadapter ${bin_link_dir}/taosadapter || : - [ -x ${bin_dir}/taosdemo ] && ${csudo}ln -s ${bin_dir}/taosdemo ${bin_link_dir}/taosdemo || : + [ -x ${bin_dir}/taosBenchmark ] && ${csudo}ln -sf ${bin_dir}/taosBenchmark ${bin_link_dir}/taosdemo || : + [ -x ${bin_dir}/TDinsight.sh ] && ${csudo}ln -sf ${bin_dir}/TDinsight.sh ${bin_link_dir}/TDinsight.sh || : [ -x ${bin_dir}/taosdump ] && ${csudo}ln -s ${bin_dir}/taosdump ${bin_link_dir}/taosdump || : [ -x ${bin_dir}/set_core.sh ] && ${csudo}ln -s ${bin_dir}/set_core.sh ${bin_link_dir}/set_core || : } diff --git a/packaging/tools/preun.sh b/packaging/tools/preun.sh index 27dc830a1f189d196a5b937f71dbafc776d035d4..2f35e41a48a438d86a7dc6ca71511ce967ba7ebf 100755 --- a/packaging/tools/preun.sh +++ b/packaging/tools/preun.sh @@ -121,6 +121,7 @@ clean_service ${csudo}rm -f ${bin_link_dir}/taos || : ${csudo}rm -f ${bin_link_dir}/taosd || : ${csudo}rm -f ${bin_link_dir}/taosadapter || : +${csudo}rm -f ${bin_link_dir}/taosBenchmark || : ${csudo}rm -f ${bin_link_dir}/taosdemo || : ${csudo}rm -f ${bin_link_dir}/set_core || : ${csudo}rm -f ${cfg_link_dir}/*.new || : diff --git a/packaging/tools/remove.sh b/packaging/tools/remove.sh index 2eff41fe41df4c0dd01283f4e4d972148b0003ac..14b9688eb4b42bfecd2fbc78afba66f1118f5d45 100755 --- a/packaging/tools/remove.sh +++ b/packaging/tools/remove.sh @@ -83,12 +83,14 @@ function clean_bin() { ${csudo}rm -f ${bin_link_dir}/${clientName} || : ${csudo}rm -f ${bin_link_dir}/${serverName} || : ${csudo}rm -f ${bin_link_dir}/taosadapter || : + ${csudo}rm -f ${bin_link_dir}/taosBenchmark || : ${csudo}rm -f ${bin_link_dir}/taosdemo || : ${csudo}rm -f ${bin_link_dir}/taosdump || : ${csudo}rm -f ${bin_link_dir}/${uninstallScript} || : ${csudo}rm -f ${bin_link_dir}/tarbitrator || : ${csudo}rm -f ${bin_link_dir}/set_core || : ${csudo}rm -f ${bin_link_dir}/run_taosd_and_taosadapter.sh || : + ${csudo}rm -f ${bin_link_dir}/TDinsight.sh || : } function clean_lib() { diff --git a/src/client/inc/tscUtil.h b/src/client/inc/tscUtil.h index 3dfaae820e0be57947569fd46c99e53c3effb214..1f84fa27d7ccfc32337365295b80da873c1053f9 100644 --- a/src/client/inc/tscUtil.h +++ b/src/client/inc/tscUtil.h @@ -242,7 +242,7 @@ SExprInfo* tscExprAppend(SQueryInfo* pQueryInfo, int16_t functionId, SColumnInde int16_t size, int16_t resColId, int16_t interSize, bool isTagCol); SExprInfo* tscExprUpdate(SQueryInfo* pQueryInfo, int32_t index, int16_t functionId, int16_t srcColumnIndex, int16_t type, - int16_t size); + int32_t size); size_t tscNumOfExprs(SQueryInfo* pQueryInfo); int32_t tscExprTopBottomIndex(SQueryInfo* pQueryInfo); diff --git a/src/client/src/tscGlobalmerge.c b/src/client/src/tscGlobalmerge.c index 02c5604ab427efca2227c90947c9ae80a84892fe..d01e1fcae3b4824959dced85f31b3cc252cda6c5 100644 --- a/src/client/src/tscGlobalmerge.c +++ b/src/client/src/tscGlobalmerge.c @@ -440,6 +440,15 @@ int32_t tscCreateGlobalMergerEnv(SQueryInfo *pQueryInfo, tExtMemBuffer ***pMemBu rlen += pExpr->base.resBytes; } + int32_t pg = DEFAULT_PAGE_SIZE; + int32_t overhead = sizeof(tFilePage); + while((pg - overhead) < rlen * 2) { + pg *= 2; + } + + if (*nBufferSizes < pg){ + *nBufferSizes = 2 * pg; + } int32_t capacity = 0; if (rlen != 0) { if ((*nBufferSizes) < rlen) { @@ -447,19 +456,13 @@ int32_t tscCreateGlobalMergerEnv(SQueryInfo *pQueryInfo, tExtMemBuffer ***pMemBu } capacity = (*nBufferSizes) / rlen; } - + pModel = createColumnModel(pSchema, (int32_t)size, capacity); tfree(pSchema); if (pModel == NULL){ return TSDB_CODE_TSC_OUT_OF_MEMORY; } - int32_t pg = DEFAULT_PAGE_SIZE; - int32_t overhead = sizeof(tFilePage); - while((pg - overhead) < pModel->rowSize * 2) { - pg *= 2; - } - assert(numOfSub <= pTableMetaInfo->vgroupList->numOfVgroups); for (int32_t i = 0; i < numOfSub; ++i) { (*pMemBuffer)[i] = createExtMemBuffer(*nBufferSizes, rlen, pg, pModel); @@ -593,7 +596,7 @@ static void setTagValueForMultipleRows(SQLFunctionCtx* pCtx, int32_t numOfOutput } } -static void doMergeResultImpl(SMultiwayMergeInfo* pInfo, SQLFunctionCtx *pCtx, int32_t numOfExpr, int32_t rowIndex, char** pDataPtr) { +static void doMergeResultImpl(SOperatorInfo* pInfo, SQLFunctionCtx *pCtx, int32_t numOfExpr, int32_t rowIndex, char** pDataPtr) { for (int32_t j = 0; j < numOfExpr; ++j) { pCtx[j].pInput = pDataPtr[j] + pCtx[j].inputBytes * rowIndex; } @@ -605,12 +608,19 @@ static void doMergeResultImpl(SMultiwayMergeInfo* pInfo, SQLFunctionCtx *pCtx, i } if (functionId < 0) { - SUdfInfo* pUdfInfo = taosArrayGet(pInfo->udfInfo, -1 * functionId - 1); + SUdfInfo* pUdfInfo = taosArrayGet(((SMultiwayMergeInfo*)(pInfo->info))->udfInfo, -1 * functionId - 1); doInvokeUdf(pUdfInfo, &pCtx[j], 0, TSDB_UDF_FUNC_MERGE); } else { assert(!TSDB_FUNC_IS_SCALAR(functionId)); aAggs[functionId].mergeFunc(&pCtx[j]); } + + if (functionId == TSDB_FUNC_UNIQUE && + (GET_RES_INFO(&(pCtx[j]))->numOfRes > MAX_UNIQUE_RESULT_ROWS || GET_RES_INFO(&(pCtx[j]))->numOfRes == -1)){ + tscError("Unique result num is too large. num: %d, limit: %d", + GET_RES_INFO(&(pCtx[j]))->numOfRes, MAX_UNIQUE_RESULT_ROWS); + longjmp(pInfo->pRuntimeEnv->env, TSDB_CODE_QRY_UNIQUE_RESULT_TOO_LARGE); + } } } @@ -644,7 +654,7 @@ static void doExecuteFinalMerge(SOperatorInfo* pOperator, int32_t numOfExpr, SSD for(int32_t i = 0; i < pBlock->info.rows; ++i) { if (pInfo->hasPrev) { if (needToMerge(pBlock, pInfo->orderColumnList, i, pInfo->prevRow)) { - doMergeResultImpl(pInfo, pCtx, numOfExpr, i, addrPtr); + doMergeResultImpl(pOperator, pCtx, numOfExpr, i, addrPtr); } else { doFinalizeResultImpl(pInfo, pCtx, numOfExpr); @@ -656,7 +666,7 @@ static void doExecuteFinalMerge(SOperatorInfo* pOperator, int32_t numOfExpr, SSD for(int32_t j = 0; j < numOfExpr; ++j) { pCtx[j].pOutput += (pCtx[j].outputBytes * numOfRows); if (pCtx[j].functionId == TSDB_FUNC_TOP || pCtx[j].functionId == TSDB_FUNC_BOTTOM || - pCtx[j].functionId == TSDB_FUNC_SAMPLE) { + pCtx[j].functionId == TSDB_FUNC_SAMPLE || pCtx[j].functionId == TSDB_FUNC_UNIQUE) { if(j > 0) pCtx[j].ptsOutputBuf = pCtx[j - 1].pOutput; } } @@ -671,10 +681,10 @@ static void doExecuteFinalMerge(SOperatorInfo* pOperator, int32_t numOfExpr, SSD } } - doMergeResultImpl(pInfo, pCtx, numOfExpr, i, addrPtr); + doMergeResultImpl(pOperator, pCtx, numOfExpr, i, addrPtr); } } else { - doMergeResultImpl(pInfo, pCtx, numOfExpr, i, addrPtr); + doMergeResultImpl(pOperator, pCtx, numOfExpr, i, addrPtr); } savePrevOrderColumns(pInfo->prevRow, pInfo->orderColumnList, pBlock, i, &pInfo->hasPrev); diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c index b18b76c9672e6bebf02a4d2fa064b16192f37630..5e61d8e0c1b8e23976933260f84efdb168099642 100644 --- a/src/client/src/tscSQLParser.c +++ b/src/client/src/tscSQLParser.c @@ -1065,13 +1065,14 @@ int32_t tscValidateSqlInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) { * if the top/bottom exists, only tags columns, tbname column, and primary timestamp column * are available. */ -static bool isTopBottomQuery(SQueryInfo* pQueryInfo) { +static bool isTopBottomUniqueQuery(SQueryInfo* pQueryInfo) { size_t size = tscNumOfExprs(pQueryInfo); for (int32_t i = 0; i < size; ++i) { int32_t functionId = tscExprGet(pQueryInfo, i)->base.functionId; - if (functionId == TSDB_FUNC_TOP || functionId == TSDB_FUNC_BOTTOM) { + if (functionId == TSDB_FUNC_TOP || functionId == TSDB_FUNC_BOTTOM + || functionId == TSDB_FUNC_UNIQUE) { return true; } } @@ -1112,20 +1113,7 @@ static int32_t addPrimaryTsColumnForTimeWindowQuery(SQueryInfo* pQueryInfo, SSql static int32_t checkInvalidExprForTimeWindow(SSqlCmd* pCmd, SQueryInfo* pQueryInfo) { const char* msg1 = "invalid query expression"; const char* msg2 = "top/bottom query does not support order by value in time window query"; - - // for top/bottom + interval query, we do not add additional timestamp column in the front - if (isTopBottomQuery(pQueryInfo)) { - - // invalid sql: - // top(col, k) from table_name [interval(1d)|session(ts, 1d)] order by k asc - // order by normal column is not supported - int32_t colId = pQueryInfo->order.orderColId; - if (isTimeWindowQuery(pQueryInfo) && colId != PRIMARYKEY_TIMESTAMP_COL_INDEX) { - return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2); - } - - return TSDB_CODE_SUCCESS; - } + const char* msg3 = "unique function does not supportted in time window query"; /* * invalid sql: @@ -1137,6 +1125,9 @@ static int32_t checkInvalidExprForTimeWindow(SSqlCmd* pCmd, SQueryInfo* pQueryIn if (pExpr->base.functionId == TSDB_FUNC_COUNT && TSDB_COL_IS_TAG(pExpr->base.colInfo.flag)) { return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1); } + if (pExpr->base.functionId == TSDB_FUNC_UNIQUE) { + return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg3); + } } /* @@ -1147,6 +1138,20 @@ static int32_t checkInvalidExprForTimeWindow(SSqlCmd* pCmd, SQueryInfo* pQueryIn return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1); } + // for top/bottom + interval query, we do not add additional timestamp column in the front + if (isTopBottomUniqueQuery(pQueryInfo)) { + + // invalid sql: + // top(col, k) from table_name [interval(1d)|session(ts, 1d)] order by k asc + // order by normal column is not supported + int32_t colId = pQueryInfo->order.orderColId; + if (isTimeWindowQuery(pQueryInfo) && colId != PRIMARYKEY_TIMESTAMP_COL_INDEX) { + return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2); + } + + return TSDB_CODE_SUCCESS; + } + return addPrimaryTsColumnForTimeWindowQuery(pQueryInfo, pCmd); } @@ -1225,7 +1230,7 @@ static int32_t validateStateWindowNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SS const char* msg1 = "invalid column name"; const char* msg2 = "invalid column type"; const char* msg3 = "not support state_window with group by "; - const char* msg4 = "function not support for super table query"; + const char* msg4 = "state_window not support for super table query"; const char* msg5 = "not support state_window on tag column"; const char* msg6 = "function not support for state_window"; @@ -2658,7 +2663,7 @@ static UNUSED_FUNC void updateFunctionInterBuf(SQueryInfo* pQueryInfo, bool supe int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t colIndex, tSqlExprItem* pItem, bool finalResult, SUdfInfo* pUdfInfo) { STableMetaInfo* pTableMetaInfo = NULL; - int32_t functionId = pItem->pNode->functionId; + int32_t functionId = pItem->pNode->functionId; const char* msg1 = "unsupported column types"; const char* msg2 = "invalid parameters"; @@ -2688,7 +2693,7 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col const char* msg26 = "start param cannot be 0 with 'log_bin'"; const char* msg27 = "factor param cannot be negative or equal to 0/1"; const char* msg28 = "the second paramter of diff should be 0 or 1"; - + const char* msg29 = "key timestamp column cannot be used to unique function"; switch (functionId) { case TSDB_FUNC_COUNT: { @@ -2697,13 +2702,13 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2); } - SExprInfo* pExpr = NULL; + SExprInfo* pExpr = NULL; SColumnIndex index = COLUMN_INDEX_INITIALIZER; if (pItem->pNode->Expr.paramList != NULL) { tSqlExprItem* pParamElem = taosArrayGet(pItem->pNode->Expr.paramList, 0); - SStrToken* pToken = &pParamElem->pNode->columnName; - int16_t tokenId = pParamElem->pNode->tokenId; + SStrToken* pToken = &pParamElem->pNode->columnName; + int16_t tokenId = pParamElem->pNode->tokenId; if ((pToken->z == NULL || pToken->n == 0) && (TK_INTEGER != tokenId)) { return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg3); } @@ -2719,7 +2724,8 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col index = (SColumnIndex){0, PRIMARYKEY_TIMESTAMP_COL_INDEX}; int32_t size = tDataTypes[TSDB_DATA_TYPE_BIGINT].bytes; - pExpr = tscExprAppend(pQueryInfo, functionId, &index, TSDB_DATA_TYPE_BIGINT, size, getNewResColId(pCmd), size, false); + pExpr = tscExprAppend(pQueryInfo, functionId, &index, TSDB_DATA_TYPE_BIGINT, size, getNewResColId(pCmd), size, + false); } else { // count the number of table created according to the super table if (getColumnIndexByName(pToken, pQueryInfo, &index, tscGetErrorMsgPayload(pCmd)) != TSDB_CODE_SUCCESS) { @@ -2730,34 +2736,38 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col // count tag is equalled to count(tbname) bool isTag = false; - if (index.columnIndex >= tscGetNumOfColumns(pTableMetaInfo->pTableMeta) || index.columnIndex == TSDB_TBNAME_COLUMN_INDEX) { + if (index.columnIndex >= tscGetNumOfColumns(pTableMetaInfo->pTableMeta) || + index.columnIndex == TSDB_TBNAME_COLUMN_INDEX) { index.columnIndex = TSDB_TBNAME_COLUMN_INDEX; isTag = true; } int32_t size = tDataTypes[TSDB_DATA_TYPE_BIGINT].bytes; - pExpr = tscExprAppend(pQueryInfo, functionId, &index, TSDB_DATA_TYPE_BIGINT, size, getNewResColId(pCmd), size, isTag); + pExpr = tscExprAppend(pQueryInfo, functionId, &index, TSDB_DATA_TYPE_BIGINT, size, getNewResColId(pCmd), size, + isTag); } } else { // count(*) is equalled to count(primary_timestamp_key) index = (SColumnIndex){0, PRIMARYKEY_TIMESTAMP_COL_INDEX}; int32_t size = tDataTypes[TSDB_DATA_TYPE_BIGINT].bytes; - pExpr = tscExprAppend(pQueryInfo, functionId, &index, TSDB_DATA_TYPE_BIGINT, size, getNewResColId(pCmd), size, false); + pExpr = tscExprAppend(pQueryInfo, functionId, &index, TSDB_DATA_TYPE_BIGINT, size, getNewResColId(pCmd), size, + false); } pTableMetaInfo = tscGetMetaInfo(pQueryInfo, index.tableIndex); memset(pExpr->base.aliasName, 0, tListLen(pExpr->base.aliasName)); - getColumnName(pItem, pExpr->base.aliasName, pExpr->base.token,sizeof(pExpr->base.aliasName) - 1); + getColumnName(pItem, pExpr->base.aliasName, pExpr->base.token, sizeof(pExpr->base.aliasName) - 1); SColumnList list = createColumnList(1, index.tableIndex, index.columnIndex); if (finalResult) { int32_t numOfOutput = tscNumOfFields(pQueryInfo); - insertResultField(pQueryInfo, numOfOutput, &list, sizeof(int64_t), TSDB_DATA_TYPE_BIGINT, pExpr->base.aliasName, pExpr); + insertResultField(pQueryInfo, numOfOutput, &list, sizeof(int64_t), TSDB_DATA_TYPE_BIGINT, pExpr->base.aliasName, + pExpr); } else { for (int32_t i = 0; i < list.num; ++i) { SSchema* ps = tscGetTableSchema(pTableMetaInfo->pTableMeta); tscColumnListInsert(pQueryInfo->colList, list.ids[i].columnIndex, pTableMetaInfo->pTableMeta->id.uid, - &ps[list.ids[i].columnIndex]); + &ps[list.ids[i].columnIndex]); } } @@ -2783,12 +2793,13 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col case TSDB_FUNC_LEASTSQR: case TSDB_FUNC_ELAPSED: { // 1. valid the number of parameters - int32_t numOfParams = (pItem->pNode->Expr.paramList == NULL)? 0: (int32_t) taosArrayGetSize(pItem->pNode->Expr.paramList); + int32_t numOfParams = + (pItem->pNode->Expr.paramList == NULL) ? 0 : (int32_t)taosArrayGetSize(pItem->pNode->Expr.paramList); // no parameters or more than one parameter for function if (pItem->pNode->Expr.paramList == NULL || - (functionId != TSDB_FUNC_LEASTSQR && functionId != TSDB_FUNC_DERIVATIVE && functionId != TSDB_FUNC_ELAPSED && functionId != TSDB_FUNC_DIFF - && numOfParams != 1) || + (functionId != TSDB_FUNC_LEASTSQR && functionId != TSDB_FUNC_DERIVATIVE && functionId != TSDB_FUNC_ELAPSED && + functionId != TSDB_FUNC_DIFF && numOfParams != 1) || ((functionId == TSDB_FUNC_LEASTSQR || functionId == TSDB_FUNC_DERIVATIVE) && numOfParams != 3) || (functionId == TSDB_FUNC_ELAPSED && numOfParams != 1 && numOfParams != 2) || (functionId == TSDB_FUNC_DIFF && numOfParams != 1 && numOfParams != 2)) { @@ -2796,12 +2807,14 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col } tSqlExprItem* pParamElem = taosArrayGet(pItem->pNode->Expr.paramList, 0); - if ((pParamElem->pNode->tokenId != TK_ALL && pParamElem->pNode->tokenId != TK_ID) || 0 == pParamElem->pNode->columnName.n) { + if ((pParamElem->pNode->tokenId != TK_ALL && pParamElem->pNode->tokenId != TK_ID) || + 0 == pParamElem->pNode->columnName.n) { return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2); } SColumnIndex index = COLUMN_INDEX_INITIALIZER; - if ((getColumnIndexByName(&pParamElem->pNode->columnName, pQueryInfo, &index, tscGetErrorMsgPayload(pCmd)) != TSDB_CODE_SUCCESS)) { + if ((getColumnIndexByName(&pParamElem->pNode->columnName, pQueryInfo, &index, tscGetErrorMsgPayload(pCmd)) != + TSDB_CODE_SUCCESS)) { return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg3); } @@ -2810,13 +2823,14 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col // elapsed only can be applied to primary key if (functionId == TSDB_FUNC_ELAPSED) { - if ( index.columnIndex != PRIMARYKEY_TIMESTAMP_COL_INDEX || pColumnSchema->colId != PRIMARYKEY_TIMESTAMP_COL_INDEX) { + if (index.columnIndex != PRIMARYKEY_TIMESTAMP_COL_INDEX || + pColumnSchema->colId != PRIMARYKEY_TIMESTAMP_COL_INDEX) { return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), "elapsed only can be applied to primary key"); } } - //for timeline related aggregation function like elapsed and twa, groupby in subquery is not allowed - //as calculation result is meaningless by mixing different childtables(timelines) results. + // for timeline related aggregation function like elapsed and twa, groupby in subquery is not allowed + // as calculation result is meaningless by mixing different childtables(timelines) results. if ((functionId == TSDB_FUNC_ELAPSED || functionId == TSDB_FUNC_TWA) && pQueryInfo->pUpstream != NULL) { size_t numOfUpstreams = taosArrayGetSize(pQueryInfo->pUpstream); for (int32_t i = 0; i < numOfUpstreams; ++i) { @@ -2830,7 +2844,8 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col STableComInfo info = tscGetTableInfo(pTableMetaInfo->pTableMeta); // functions can not be applied to tags - if (index.columnIndex == TSDB_TBNAME_COLUMN_INDEX || (index.columnIndex >= tscGetNumOfColumns(pTableMetaInfo->pTableMeta))) { + if (index.columnIndex == TSDB_TBNAME_COLUMN_INDEX || + (index.columnIndex >= tscGetNumOfColumns(pTableMetaInfo->pTableMeta))) { return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg6); } @@ -2839,7 +2854,8 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col if (!IS_NUMERIC_TYPE(pSchema->type) && (functionId != TSDB_FUNC_ELAPSED)) { return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1); - } else if (IS_UNSIGNED_NUMERIC_TYPE(pSchema->type) && (functionId == TSDB_FUNC_DIFF || functionId == TSDB_FUNC_DERIVATIVE)) { + } else if (IS_UNSIGNED_NUMERIC_TYPE(pSchema->type) && + (functionId == TSDB_FUNC_DIFF || functionId == TSDB_FUNC_DERIVATIVE)) { return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg9); } @@ -2856,16 +2872,18 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col if (functionId == TSDB_FUNC_DIFF || functionId == TSDB_FUNC_DERIVATIVE || functionId == TSDB_FUNC_CSUM) { SColumnIndex indexTS = {.tableIndex = index.tableIndex, .columnIndex = 0}; SExprInfo* pExpr = tscExprAppend(pQueryInfo, TSDB_FUNC_TS_DUMMY, &indexTS, TSDB_DATA_TYPE_TIMESTAMP, - TSDB_KEYSIZE, 0, TSDB_KEYSIZE, false); + TSDB_KEYSIZE, 0, TSDB_KEYSIZE, false); tstrncpy(pExpr->base.aliasName, aAggs[TSDB_FUNC_TS_DUMMY].name, sizeof(pExpr->base.aliasName)); SColumnList ids = createColumnList(1, 0, 0); - insertResultField(pQueryInfo, colIndex, &ids, TSDB_KEYSIZE, TSDB_DATA_TYPE_TIMESTAMP, aAggs[TSDB_FUNC_TS_DUMMY].name, pExpr); + insertResultField(pQueryInfo, colIndex, &ids, TSDB_KEYSIZE, TSDB_DATA_TYPE_TIMESTAMP, + aAggs[TSDB_FUNC_TS_DUMMY].name, pExpr); } - SExprInfo* pExpr = tscExprAppend(pQueryInfo, functionId, &index, resultType, resultSize, getNewResColId(pCmd), intermediateResSize, false); + SExprInfo* pExpr = tscExprAppend(pQueryInfo, functionId, &index, resultType, resultSize, getNewResColId(pCmd), + intermediateResSize, false); - if (functionId == TSDB_FUNC_LEASTSQR) { // set the leastsquares parameters + if (functionId == TSDB_FUNC_LEASTSQR) { // set the leastsquares parameters char val[8] = {0}; if (tVariantDump(&pParamElem[1].pNode->value, val, TSDB_DATA_TYPE_DOUBLE, true) < 0) { return TSDB_CODE_TSC_INVALID_OPERATION; @@ -2886,14 +2904,15 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col char val[8] = {0}; int64_t tickPerSec = 0; - char *exprToken = tcalloc(pParamElem[1].pNode->exprToken.n + 1, sizeof(char)); + char* exprToken = tcalloc(pParamElem[1].pNode->exprToken.n + 1, sizeof(char)); memcpy(exprToken, pParamElem[1].pNode->exprToken.z, pParamElem[1].pNode->exprToken.n); if (pParamElem[1].pNode->exprToken.type == TK_NOW || strstr(exprToken, "now")) { return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2); } tfree(exprToken); - if ((TSDB_DATA_TYPE_NULL == pParamElem[1].pNode->value.nType) || tVariantDump(&pParamElem[1].pNode->value, (char*) &tickPerSec, TSDB_DATA_TYPE_BIGINT, true) < 0) { + if ((TSDB_DATA_TYPE_NULL == pParamElem[1].pNode->value.nType) || + tVariantDump(&pParamElem[1].pNode->value, (char*)&tickPerSec, TSDB_DATA_TYPE_BIGINT, true) < 0) { return TSDB_CODE_TSC_INVALID_OPERATION; } @@ -2909,7 +2928,7 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg16); } - tscExprAddParams(&pExpr->base, (char*) &tickPerSec, TSDB_DATA_TYPE_BIGINT, LONG_BYTES); + tscExprAddParams(&pExpr->base, (char*)&tickPerSec, TSDB_DATA_TYPE_BIGINT, LONG_BYTES); if (functionId == TSDB_FUNC_DERIVATIVE) { memset(val, 0, tListLen(val)); @@ -2917,7 +2936,7 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col return TSDB_CODE_TSC_INVALID_OPERATION; } - int64_t v = *(int64_t*) val; + int64_t v = *(int64_t*)val; if (v != 0 && v != 1) { return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg11); } @@ -2944,7 +2963,7 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col SColumnList ids = createColumnList(1, index.tableIndex, index.columnIndex); memset(pExpr->base.aliasName, 0, tListLen(pExpr->base.aliasName)); - getColumnName(pItem, pExpr->base.aliasName, pExpr->base.token,sizeof(pExpr->base.aliasName) - 1); + getColumnName(pItem, pExpr->base.aliasName, pExpr->base.token, sizeof(pExpr->base.aliasName) - 1); if (finalResult) { int32_t numOfOutput = tscNumOfFields(pQueryInfo); @@ -2968,9 +2987,9 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col // NOTE: has time range condition or normal column filter condition, the last_row query will be transferred to last query SConvertFunc cvtFunc = {.originFuncId = functionId, .execFuncId = functionId}; - if (functionId == TSDB_FUNC_LAST_ROW && ((!TSWINDOW_IS_EQUAL(pQueryInfo->window, TSWINDOW_INITIALIZER)) || - (hasNormalColumnFilter(pQueryInfo)) || - taosArrayGetSize(pQueryInfo->pUpstream)>0)) { + if (functionId == TSDB_FUNC_LAST_ROW && + ((!TSWINDOW_IS_EQUAL(pQueryInfo->window, TSWINDOW_INITIALIZER)) || (hasNormalColumnFilter(pQueryInfo)) || + taosArrayGetSize(pQueryInfo->pUpstream) > 0)) { cvtFunc.execFuncId = TSDB_FUNC_LAST; } @@ -2979,7 +2998,8 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg3); } - if (taosArrayGetSize(pItem->pNode->Expr.paramList) > 1 && (pItem->aliasName != NULL && strlen(pItem->aliasName) > 0)) { + if (taosArrayGetSize(pItem->pNode->Expr.paramList) > 1 && + (pItem->aliasName != NULL && strlen(pItem->aliasName) > 0)) { return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg8); } @@ -2992,7 +3012,7 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col SColumnIndex index = COLUMN_INDEX_INITIALIZER; - if (pParamElem->pNode->tokenId == TK_ALL) { // select table.* + if (pParamElem->pNode->tokenId == TK_ALL) { // select table.* SStrToken tmpToken = pParamElem->pNode->columnName; if (getTableIndexByName(&tmpToken, pQueryInfo, &index) != TSDB_CODE_SUCCESS) { @@ -3008,14 +3028,15 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col SStrToken t = {.z = pSchema[j].name, .n = (uint32_t)strnlen(pSchema[j].name, TSDB_COL_NAME_LEN)}; setResultColName(name, pItem, cvtFunc.originFuncId, &t, true); - if (setExprInfoForFunctions(pCmd, pQueryInfo, &pSchema[j], cvtFunc, name, colIndex++, &index, - finalResult, pUdfInfo) != 0) { + if (setExprInfoForFunctions(pCmd, pQueryInfo, &pSchema[j], cvtFunc, name, colIndex++, &index, finalResult, + pUdfInfo) != 0) { return TSDB_CODE_TSC_INVALID_OPERATION; } } } else { - if (getColumnIndexByName(&pParamElem->pNode->columnName, pQueryInfo, &index, tscGetErrorMsgPayload(pCmd)) != TSDB_CODE_SUCCESS) { + if (getColumnIndexByName(&pParamElem->pNode->columnName, pQueryInfo, &index, tscGetErrorMsgPayload(pCmd)) != + TSDB_CODE_SUCCESS) { return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg3); } @@ -3030,13 +3051,14 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg6); } - char name[TSDB_COL_NAME_LEN] = {0}; + char name[TSDB_COL_NAME_LEN] = {0}; SSchema* pSchema = tscGetTableColumnSchema(pTableMetaInfo->pTableMeta, index.columnIndex); bool multiColOutput = taosArrayGetSize(pItem->pNode->Expr.paramList) > 1; setResultColName(name, pItem, cvtFunc.originFuncId, &pParamElem->pNode->columnName, multiColOutput); - if (setExprInfoForFunctions(pCmd, pQueryInfo, pSchema, cvtFunc, name, colIndex++, &index, finalResult, pUdfInfo) != 0) { + if (setExprInfoForFunctions(pCmd, pQueryInfo, pSchema, cvtFunc, name, colIndex++, &index, finalResult, + pUdfInfo) != 0) { return TSDB_CODE_TSC_INVALID_OPERATION; } } @@ -3057,12 +3079,12 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col for (int32_t i = 0; i < tscGetNumOfColumns(pTableMetaInfo->pTableMeta); ++i) { SColumnIndex index = {.tableIndex = j, .columnIndex = i}; - char name[TSDB_COL_NAME_LEN] = {0}; + char name[TSDB_COL_NAME_LEN] = {0}; SStrToken t = {.z = pSchema[i].name, .n = (uint32_t)strnlen(pSchema[i].name, TSDB_COL_NAME_LEN)}; setResultColName(name, pItem, cvtFunc.originFuncId, &t, true); if (setExprInfoForFunctions(pCmd, pQueryInfo, &pSchema[index.columnIndex], cvtFunc, name, colIndex, &index, - finalResult, pUdfInfo) != 0) { + finalResult, pUdfInfo) != 0) { return TSDB_CODE_TSC_INVALID_OPERATION; } colIndex++; @@ -3079,18 +3101,21 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col case TSDB_FUNC_MAVG: case TSDB_FUNC_SAMPLE: case TSDB_FUNC_PERCT: - case TSDB_FUNC_APERCT: { + case TSDB_FUNC_APERCT: + case TSDB_FUNC_UNIQUE: { // 1. valid the number of parameters bool valid = true; - if(pItem->pNode->Expr.paramList == NULL) { + if (pItem->pNode->Expr.paramList == NULL) { valid = false; - } else if(functionId == TSDB_FUNC_APERCT) { + } else if (functionId == TSDB_FUNC_APERCT) { size_t cnt = taosArrayGetSize(pItem->pNode->Expr.paramList); - if(cnt != 2 && cnt !=3) valid = false; - } else { + if (cnt != 2 && cnt != 3) valid = false; + } else if (functionId == TSDB_FUNC_UNIQUE) { + if (taosArrayGetSize(pItem->pNode->Expr.paramList) != 1) valid = false; + }else { if (taosArrayGetSize(pItem->pNode->Expr.paramList) != 2) valid = false; } - if(!valid) { + if (!valid) { return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2); } @@ -3098,16 +3123,20 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col if (pParamElem->pNode->tokenId != TK_ID) { return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2); } - + SColumnIndex index = COLUMN_INDEX_INITIALIZER; - if (getColumnIndexByName(&pParamElem->pNode->columnName, pQueryInfo, &index, tscGetErrorMsgPayload(pCmd)) != TSDB_CODE_SUCCESS) { + if (getColumnIndexByName(&pParamElem->pNode->columnName, pQueryInfo, &index, tscGetErrorMsgPayload(pCmd)) != + TSDB_CODE_SUCCESS) { return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg3); } if (index.columnIndex == TSDB_TBNAME_COLUMN_INDEX) { return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg6); } - + if (index.columnIndex == PRIMARYKEY_TIMESTAMP_COL_INDEX && functionId == TSDB_FUNC_UNIQUE) { + return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg29); + } + pTableMetaInfo = tscGetMetaInfo(pQueryInfo, index.tableIndex); SSchema* pSchema = tscGetTableColumnSchema(pTableMetaInfo->pTableMeta, index.columnIndex); @@ -3117,27 +3146,30 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col } // 2. valid the column type - if (functionId != TSDB_FUNC_SAMPLE && !IS_NUMERIC_TYPE(pSchema->type)) { + if (functionId != TSDB_FUNC_SAMPLE && functionId != TSDB_FUNC_UNIQUE && !IS_NUMERIC_TYPE(pSchema->type)) { return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1); } - // 3. valid the parameters - if (pParamElem[1].pNode->tokenId == TK_ID) { - return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2); - } + tVariant* pVariant = NULL; + if (functionId != TSDB_FUNC_UNIQUE) { + // 3. valid the parameters + if (pParamElem[1].pNode->tokenId == TK_ID) { + return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2); + } - tVariant* pVariant = &pParamElem[1].pNode->value; + pVariant = &pParamElem[1].pNode->value; + } - int16_t resultType = pSchema->type; - int32_t resultSize = pSchema->bytes; - int32_t interResult = 0; + int16_t resultType = pSchema->type; + int32_t resultSize = pSchema->bytes; + int32_t interResult = 0; char val[8] = {0}; SExprInfo* pExpr = NULL; if (functionId == TSDB_FUNC_PERCT || functionId == TSDB_FUNC_APERCT) { - // param1 double - if(pVariant->nType != TSDB_DATA_TYPE_DOUBLE && pVariant->nType != TSDB_DATA_TYPE_BIGINT){ + // param1 double + if (pVariant->nType != TSDB_DATA_TYPE_DOUBLE && pVariant->nType != TSDB_DATA_TYPE_BIGINT) { return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg5); } tVariantDump(pVariant, val, TSDB_DATA_TYPE_DOUBLE, true); @@ -3147,18 +3179,19 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg5); } - getResultDataInfo(pSchema->type, pSchema->bytes, functionId, 0, &resultType, &resultSize, &interResult, 0, false, - pUdfInfo); + getResultDataInfo(pSchema->type, pSchema->bytes, functionId, 0, &resultType, &resultSize, &interResult, 0, + false, pUdfInfo); /* - * sql function transformation - * for dp = 0, it is actually min, - * for dp = 100, it is max, + * sql function transformation + * for dp = 0, it is actually min, + * for dp = 100, it is max, */ tscInsertPrimaryTsSourceColumn(pQueryInfo, pTableMetaInfo->pTableMeta->id.uid); colIndex += 1; // the first column is ts - - pExpr = tscExprAppend(pQueryInfo, functionId, &index, resultType, resultSize, getNewResColId(pCmd), interResult, false); + + pExpr = tscExprAppend(pQueryInfo, functionId, &index, resultType, resultSize, getNewResColId(pCmd), + interResult, false); tscExprAddParams(&pExpr->base, val, TSDB_DATA_TYPE_DOUBLE, sizeof(double)); // param2 int32 @@ -3166,15 +3199,15 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col if (pParamElem[2].pNode != NULL) { pVariant = &pParamElem[2].pNode->value; // check type must string - if(pVariant->nType != TSDB_DATA_TYPE_BINARY || pVariant->pz == NULL){ + if (pVariant->nType != TSDB_DATA_TYPE_BINARY || pVariant->pz == NULL) { return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg13); } - char* pzAlgo = pVariant->pz; + char* pzAlgo = pVariant->pz; int32_t algo = 0; - if(strcasecmp(pzAlgo, "t-digest") == 0) { + if (strcasecmp(pzAlgo, "t-digest") == 0) { algo = 1; - } else if(strcasecmp(pzAlgo, "default") == 0){ + } else if (strcasecmp(pzAlgo, "default") == 0) { algo = 0; } else { return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg14); @@ -3190,7 +3223,7 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col char* endptr = NULL; strtoll(pParamElem[1].pNode->exprToken.z, &endptr, 10); - if ((endptr-pParamElem[1].pNode->exprToken.z != pParamElem[1].pNode->exprToken.n) || errno == ERANGE) { + if ((endptr - pParamElem[1].pNode->exprToken.z != pParamElem[1].pNode->exprToken.n) || errno == ERANGE) { return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg18); } tVariantDump(pVariant, val, TSDB_DATA_TYPE_BIGINT, true); @@ -3202,36 +3235,38 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col // todo REFACTOR // set the first column ts for top/bottom query - int32_t tsFuncId = (functionId == TSDB_FUNC_MAVG) ? TSDB_FUNC_TS_DUMMY : TSDB_FUNC_TS; + int32_t tsFuncId = (functionId == TSDB_FUNC_MAVG) ? TSDB_FUNC_TS_DUMMY : TSDB_FUNC_TS; SColumnIndex index1 = {index.tableIndex, PRIMARYKEY_TIMESTAMP_COL_INDEX}; - pExpr = tscExprAppend(pQueryInfo, tsFuncId, &index1, TSDB_DATA_TYPE_TIMESTAMP, TSDB_KEYSIZE, 0, - 0, false); + pExpr = tscExprAppend(pQueryInfo, tsFuncId, &index1, TSDB_DATA_TYPE_TIMESTAMP, TSDB_KEYSIZE, 0, 0, false); tstrncpy(pExpr->base.aliasName, aAggs[tsFuncId].name, sizeof(pExpr->base.aliasName)); const int32_t TS_COLUMN_INDEX = PRIMARYKEY_TIMESTAMP_COL_INDEX; SColumnList ids = createColumnList(1, index.tableIndex, TS_COLUMN_INDEX); - insertResultField(pQueryInfo, colIndex, &ids, TSDB_KEYSIZE, TSDB_DATA_TYPE_TIMESTAMP, - aAggs[tsFuncId].name, pExpr); + insertResultField(pQueryInfo, colIndex, &ids, TSDB_KEYSIZE, TSDB_DATA_TYPE_TIMESTAMP, aAggs[tsFuncId].name, + pExpr); colIndex += 1; // the first column is ts - getResultDataInfo(pSchema->type, pSchema->bytes, functionId, (int32_t)numRowsSelected, &resultType, &resultSize, &interResult, 0, false, - pUdfInfo); - pExpr = tscExprAppend(pQueryInfo, functionId, &index, resultType, resultSize, getNewResColId(pCmd), interResult, false); + getResultDataInfo(pSchema->type, pSchema->bytes, functionId, (int32_t)numRowsSelected, &resultType, + &resultSize, &interResult, 0, false, pUdfInfo); + pExpr = tscExprAppend(pQueryInfo, functionId, &index, resultType, resultSize, getNewResColId(pCmd), + interResult, false); tscExprAddParams(&pExpr->base, val, TSDB_DATA_TYPE_BIGINT, sizeof(int64_t)); } else { tVariantDump(pVariant, val, TSDB_DATA_TYPE_BIGINT, true); int64_t numRowsSelected = GET_INT32_VAL(val); - if (numRowsSelected <= 0 || numRowsSelected > 100) { // todo use macro + if (functionId != TSDB_FUNC_UNIQUE && (numRowsSelected <= 0 || numRowsSelected > 100)) { // todo use macro return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg12); } + if(functionId == TSDB_FUNC_UNIQUE){ + GET_INT32_VAL(val) = MAX_UNIQUE_RESULT_ROWS; + } // todo REFACTOR // set the first column ts for top/bottom query SColumnIndex index1 = {index.tableIndex, PRIMARYKEY_TIMESTAMP_COL_INDEX}; - pExpr = tscExprAppend(pQueryInfo, TSDB_FUNC_TS, &index1, TSDB_DATA_TYPE_TIMESTAMP, TSDB_KEYSIZE, 0, - 0, false); + pExpr = tscExprAppend(pQueryInfo, TSDB_FUNC_TS, &index1, TSDB_DATA_TYPE_TIMESTAMP, TSDB_KEYSIZE, 0, 0, false); tstrncpy(pExpr->base.aliasName, aAggs[TSDB_FUNC_TS].name, sizeof(pExpr->base.aliasName)); const int32_t TS_COLUMN_INDEX = PRIMARYKEY_TIMESTAMP_COL_INDEX; @@ -3241,12 +3276,13 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col colIndex += 1; // the first column is ts - pExpr = tscExprAppend(pQueryInfo, functionId, &index, resultType, resultSize, getNewResColId(pCmd), resultSize, false); + pExpr = tscExprAppend(pQueryInfo, functionId, &index, resultType, resultSize, getNewResColId(pCmd), + resultSize, false); tscExprAddParams(&pExpr->base, val, TSDB_DATA_TYPE_BIGINT, sizeof(int64_t)); } - + memset(pExpr->base.aliasName, 0, tListLen(pExpr->base.aliasName)); - getColumnName(pItem, pExpr->base.aliasName, pExpr->base.token,sizeof(pExpr->base.aliasName) - 1); + getColumnName(pItem, pExpr->base.aliasName, pExpr->base.token, sizeof(pExpr->base.aliasName) - 1); // todo refactor: tscColumnListInsert part SColumnList ids = createColumnList(1, index.tableIndex, index.columnIndex); @@ -3260,39 +3296,40 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col return TSDB_CODE_SUCCESS; } - + case TSDB_FUNC_TID_TAG: { pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0); if (UTIL_TABLE_IS_NORMAL_TABLE(pTableMetaInfo)) { return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg7); } - + // no parameters or more than one parameter for function if (pItem->pNode->Expr.paramList == NULL || taosArrayGetSize(pItem->pNode->Expr.paramList) != 1) { return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2); } - + tSqlExprItem* pParamItem = taosArrayGet(pItem->pNode->Expr.paramList, 0); - tSqlExpr* pParam = pParamItem->pNode; + tSqlExpr* pParam = pParamItem->pNode; SColumnIndex index = COLUMN_INDEX_INITIALIZER; - if (getColumnIndexByName(&pParam->columnName, pQueryInfo, &index, tscGetErrorMsgPayload(pCmd)) != TSDB_CODE_SUCCESS) { + if (getColumnIndexByName(&pParam->columnName, pQueryInfo, &index, tscGetErrorMsgPayload(pCmd)) != + TSDB_CODE_SUCCESS) { return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg3); } - + pTableMetaInfo = tscGetMetaInfo(pQueryInfo, index.tableIndex); SSchema* pSchema = tscGetTableTagSchema(pTableMetaInfo->pTableMeta); - + // functions can not be applied to normal columns int32_t numOfCols = tscGetNumOfColumns(pTableMetaInfo->pTableMeta); if (index.columnIndex < numOfCols && index.columnIndex != TSDB_TBNAME_COLUMN_INDEX) { return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg6); } - + if (index.columnIndex > 0) { index.columnIndex -= numOfCols; } - + // 2. valid the column type int16_t colType = 0; if (index.columnIndex == TSDB_TBNAME_COLUMN_INDEX) { @@ -3300,7 +3337,7 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col } else { colType = pSchema[index.columnIndex].type; } - + if (colType == TSDB_DATA_TYPE_BOOL) { return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1); } @@ -3315,20 +3352,20 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col } else { s = pTagSchema[index.columnIndex]; } - + int32_t bytes = 0; - int16_t type = 0; + int16_t type = 0; int32_t inter = 0; int32_t ret = getResultDataInfo(s.type, s.bytes, TSDB_FUNC_TID_TAG, 0, &type, &bytes, &inter, 0, 0, NULL); assert(ret == TSDB_CODE_SUCCESS); - + s.type = (uint8_t)type; s.bytes = bytes; TSDB_QUERY_SET_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_TAG_FILTER_QUERY); tscAddFuncInSelectClause(pQueryInfo, 0, TSDB_FUNC_TID_TAG, &index, &s, TSDB_COL_TAG, getNewResColId(pCmd)); - + return TSDB_CODE_SUCCESS; } @@ -3338,12 +3375,15 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2); } - SColumnIndex index = {.tableIndex = 0, .columnIndex = 0,}; + SColumnIndex index = { + .tableIndex = 0, + .columnIndex = 0, + }; pTableMetaInfo = tscGetMetaInfo(pQueryInfo, index.tableIndex); - int32_t inter = 0; + int32_t inter = 0; int16_t resType = 0; - int32_t bytes = 0; + int32_t bytes = 0; getResultDataInfo(TSDB_DATA_TYPE_INT, 4, TSDB_FUNC_BLKINFO, 0, &resType, &bytes, &inter, 0, 0, NULL); @@ -3375,7 +3415,8 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col } SColumnIndex index = COLUMN_INDEX_INITIALIZER; - if (getColumnIndexByName(&pParamElem->pNode->columnName, pQueryInfo, &index, tscGetErrorMsgPayload(pCmd)) != TSDB_CODE_SUCCESS) { + if (getColumnIndexByName(&pParamElem->pNode->columnName, pQueryInfo, &index, tscGetErrorMsgPayload(pCmd)) != + TSDB_CODE_SUCCESS) { return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg3); } @@ -3390,20 +3431,19 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1); } - //bin_type param + // bin_type param if (pParamElem[1].pNode->tokenId == TK_ID) { return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2); } - tVariant *pVariant = &pParamElem[1].pNode->value; + tVariant* pVariant = &pParamElem[1].pNode->value; if (pVariant == NULL || pVariant->nType != TSDB_DATA_TYPE_BINARY) { return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2); } -#define USER_INPUT_BIN 0 -#define LINEAR_BIN 1 -#define LOG_BIN 2 - + #define USER_INPUT_BIN 0 + #define LINEAR_BIN 1 + #define LOG_BIN 2 int8_t binType; if (strcasecmp(pVariant->pz, "user_input") == 0) { binType = USER_INPUT_BIN; @@ -3414,7 +3454,7 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col } else { return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg20); } - //bin_description param in JSON format + // bin_description param in JSON format if (pParamElem[2].pNode->tokenId == TK_ID) { return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2); } @@ -3424,11 +3464,11 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2); } - cJSON *binDesc = cJSON_Parse(pVariant->pz); + cJSON* binDesc = cJSON_Parse(pVariant->pz); int32_t counter; int32_t numBins; int32_t numOutput; - double *intervals; + double* intervals; if (cJSON_IsObject(binDesc)) { /* linaer/log bins */ int32_t numOfParams = cJSON_GetArraySize(binDesc); int32_t startIndex; @@ -3436,11 +3476,11 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg22); } - cJSON *start = cJSON_GetObjectItem(binDesc, "start"); - cJSON *factor = cJSON_GetObjectItem(binDesc, "factor"); - cJSON *width = cJSON_GetObjectItem(binDesc, "width"); - cJSON *count = cJSON_GetObjectItem(binDesc, "count"); - cJSON *infinity = cJSON_GetObjectItem(binDesc, "infinity"); + cJSON* start = cJSON_GetObjectItem(binDesc, "start"); + cJSON* factor = cJSON_GetObjectItem(binDesc, "factor"); + cJSON* width = cJSON_GetObjectItem(binDesc, "width"); + cJSON* count = cJSON_GetObjectItem(binDesc, "count"); + cJSON* infinity = cJSON_GetObjectItem(binDesc, "infinity"); if (!cJSON_IsNumber(start) || !cJSON_IsNumber(count) || !cJSON_IsBool(infinity)) { return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg22); @@ -3450,10 +3490,8 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg25); } - if (isinf(start->valuedouble) || - (width != NULL && isinf(width->valuedouble)) || - (factor != NULL && isinf(factor->valuedouble)) || - (count != NULL && isinf(count->valuedouble))) { + if (isinf(start->valuedouble) || (width != NULL && isinf(width->valuedouble)) || + (factor != NULL && isinf(factor->valuedouble)) || (count != NULL && isinf(count->valuedouble))) { return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg23); } @@ -3468,7 +3506,7 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col intervals = tcalloc(numBins, sizeof(double)); if (cJSON_IsNumber(width) && factor == NULL && binType == LINEAR_BIN) { - //linear bin process + // linear bin process if (width->valuedouble == 0) { tfree(intervals); return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg24); @@ -3482,7 +3520,7 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col startIndex++; } } else if (cJSON_IsNumber(factor) && width == NULL && binType == LOG_BIN) { - //log bin process + // log bin process if (start->valuedouble == 0) { tfree(intervals); return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg26); @@ -3511,7 +3549,7 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col tfree(intervals); return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg23); } - //in case of desc bin orders, -inf/inf should be swapped + // in case of desc bin orders, -inf/inf should be swapped assert(numBins >= 4); if (intervals[1] > intervals[numBins - 2]) { SWAP(intervals[0], intervals[numBins - 1], double); @@ -3524,7 +3562,7 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col } counter = numBins = cJSON_GetArraySize(binDesc); intervals = tcalloc(numBins, sizeof(double)); - cJSON *bin = binDesc->child; + cJSON* bin = binDesc->child; if (bin == NULL) { tfree(intervals); return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg22); @@ -3550,16 +3588,17 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col int16_t resultType = pSchema->type; int32_t resultSize = pSchema->bytes; int32_t interResult = 0; - getResultDataInfo(pSchema->type, pSchema->bytes, functionId, counter, &resultType, &resultSize, &interResult, 0, false, - pUdfInfo); + getResultDataInfo(pSchema->type, pSchema->bytes, functionId, counter, &resultType, &resultSize, &interResult, 0, + false, pUdfInfo); SExprInfo* pExpr = NULL; - pExpr = tscExprAppend(pQueryInfo, functionId, &index, resultType, resultSize, getNewResColId(pCmd), interResult, false); + pExpr = tscExprAppend(pQueryInfo, functionId, &index, resultType, resultSize, getNewResColId(pCmd), interResult, + false); numOutput = numBins - 1; tscExprAddParams(&pExpr->base, (char*)&numOutput, TSDB_DATA_TYPE_INT, sizeof(int32_t)); tscExprAddParams(&pExpr->base, (char*)intervals, TSDB_DATA_TYPE_BINARY, sizeof(double) * numBins); tfree(intervals); - //normalized param + // normalized param char val[8] = {0}; if (pParamElem[3].pNode->tokenId == TK_ID) { return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2); @@ -3604,13 +3643,15 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg13); } - tSqlExprItem* pParamElem = taosArrayGet(pItem->pNode->Expr.paramList, 0);; + tSqlExprItem* pParamElem = taosArrayGet(pItem->pNode->Expr.paramList, 0); + ; if (pParamElem->pNode->tokenId != TK_ID) { return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2); } SColumnIndex index = COLUMN_INDEX_INITIALIZER; - if (getColumnIndexByName(&pParamElem->pNode->columnName, pQueryInfo, &index, tscGetErrorMsgPayload(pCmd)) != TSDB_CODE_SUCCESS) { + if (getColumnIndexByName(&pParamElem->pNode->columnName, pQueryInfo, &index, tscGetErrorMsgPayload(pCmd)) != + TSDB_CODE_SUCCESS) { return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg3); } @@ -3625,22 +3666,24 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg6); } - int32_t inter = 0; + int32_t inter = 0; int16_t resType = 0; - int32_t bytes = 0; + int32_t bytes = 0; getResultDataInfo(TSDB_DATA_TYPE_INT, 4, functionId, 0, &resType, &bytes, &inter, 0, false, pUdfInfo); - SExprInfo* pExpr = tscExprAppend(pQueryInfo, functionId, &index, resType, bytes, getNewResColId(pCmd), inter, false); + SExprInfo* pExpr = + tscExprAppend(pQueryInfo, functionId, &index, resType, bytes, getNewResColId(pCmd), inter, false); memset(pExpr->base.aliasName, 0, tListLen(pExpr->base.aliasName)); getColumnName(pItem, pExpr->base.aliasName, pExpr->base.token, sizeof(pExpr->base.aliasName) - 1); SSchema* pSchema = tscGetTableColumnSchema(pTableMetaInfo->pTableMeta, index.columnIndex); - uint64_t uid = pTableMetaInfo->pTableMeta->id.uid; + uint64_t uid = pTableMetaInfo->pTableMeta->id.uid; SColumnList ids = createColumnList(1, index.tableIndex, index.columnIndex); if (finalResult) { - insertResultField(pQueryInfo, colIndex, &ids, pUdfInfo->resBytes, pUdfInfo->resType, pExpr->base.aliasName, pExpr); + insertResultField(pQueryInfo, colIndex, &ids, pUdfInfo->resBytes, pUdfInfo->resType, pExpr->base.aliasName, + pExpr); } else { for (int32_t i = 0; i < ids.num; ++i) { tscColumnListInsert(pQueryInfo->colList, index.columnIndex, uid, pSchema); @@ -3653,7 +3696,6 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col return TSDB_CODE_TSC_INVALID_OPERATION; } - // todo refactor static SColumnList createColumnList(int32_t num, int16_t tableIndex, int32_t columnIndex) { assert(num == 1 && tableIndex >= 0); @@ -3966,7 +4008,9 @@ int32_t tscTansformFuncForSTableQuery(SQueryInfo* pQueryInfo) { (functionId >= TSDB_FUNC_FIRST_DST && functionId <= TSDB_FUNC_STDDEV_DST) || (functionId >= TSDB_FUNC_RATE && functionId <= TSDB_FUNC_IRATE) || (functionId == TSDB_FUNC_SAMPLE) || - (functionId == TSDB_FUNC_ELAPSED) || (functionId == TSDB_FUNC_HISTOGRAM)) { + (functionId == TSDB_FUNC_ELAPSED) || + (functionId == TSDB_FUNC_HISTOGRAM) || + (functionId == TSDB_FUNC_UNIQUE)) { if (getResultDataInfo(pSrcSchema->type, pSrcSchema->bytes, functionId, (int32_t)pExpr->base.param[0].i64, &type, &bytes, &interBytes, 0, true, NULL) != TSDB_CODE_SUCCESS) { return TSDB_CODE_TSC_INVALID_OPERATION; @@ -6614,7 +6658,7 @@ static void setDefaultOrderInfo(SQueryInfo* pQueryInfo) { STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0); pQueryInfo->order.order = TSDB_ORDER_ASC; - if (isTopBottomQuery(pQueryInfo)) { + if (isTopBottomUniqueQuery(pQueryInfo)) { pQueryInfo->order.orderColId = PRIMARYKEY_TIMESTAMP_COL_INDEX; } else { // in case of select tbname from super_table, the default order column can not be the primary ts column pQueryInfo->order.orderColId = INT32_MIN; // todo define a macro @@ -6770,7 +6814,7 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq } } - if (!(orderByTags || orderByTS || orderByGroupbyCol) && !isTopBottomQuery(pQueryInfo)) { + if (!(orderByTags || orderByTS || orderByGroupbyCol) && !isTopBottomUniqueQuery(pQueryInfo)) { return invalidOperationMsg(pMsgBuf, msg3); } @@ -6780,7 +6824,7 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq if (tscIsDiffDerivLikeQuery(pQueryInfo)) { return invalidOperationMsg(pMsgBuf, msg12); } - pQueryInfo->groupbyExpr.orderIndex = index.columnIndex - tscGetNumOfColumns(pTableMetaInfo->pTableMeta); + //pQueryInfo->groupbyExpr.orderIndex = index.columnIndex - tscGetNumOfColumns(pTableMetaInfo->pTableMeta); CommonItem* p1 = taosArrayGet(pSqlNode->pSortOrder, 0); pQueryInfo->groupbyExpr.orderType = p1->sortOrder; @@ -6792,7 +6836,7 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq if (udf) { return invalidOperationMsg(pMsgBuf, msg11); } - } else if (isTopBottomQuery(pQueryInfo)) { + } else if (isTopBottomUniqueQuery(pQueryInfo)) { /* order of top/bottom query in interval is not valid */ int32_t pos = tscExprTopBottomIndex(pQueryInfo); @@ -6839,7 +6883,7 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq } else { pItem = taosArrayGet(pSqlNode->pSortOrder, 0); if (orderByTags) { - pQueryInfo->groupbyExpr.orderIndex = index.columnIndex - tscGetNumOfColumns(pTableMetaInfo->pTableMeta); + //pQueryInfo->groupbyExpr.orderIndex = index.columnIndex - tscGetNumOfColumns(pTableMetaInfo->pTableMeta); pQueryInfo->groupbyExpr.orderType = pItem->sortOrder; } else if (orderByGroupbyCol){ pQueryInfo->order.order = pItem->sortOrder; @@ -6884,7 +6928,7 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq return invalidOperationMsg(pMsgBuf, msg1); } - if (index.columnIndex != PRIMARYKEY_TIMESTAMP_COL_INDEX && !isTopBottomQuery(pQueryInfo)) { + if (index.columnIndex != PRIMARYKEY_TIMESTAMP_COL_INDEX && !isTopBottomUniqueQuery(pQueryInfo)) { bool validOrder = false; SArray *columnInfo = pQueryInfo->groupbyExpr.columnInfo; if (columnInfo != NULL && taosArrayGetSize(columnInfo) > 0) { @@ -6905,11 +6949,11 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq } CommonItem* p1 = taosArrayGet(pSqlNode->pSortOrder, 0); - pQueryInfo->groupbyExpr.orderIndex = pSchema[index.columnIndex].colId; + //pQueryInfo->groupbyExpr.orderIndex = pSchema[index.columnIndex].colId; pQueryInfo->groupbyExpr.orderType = p1->sortOrder; } - if (isTopBottomQuery(pQueryInfo)) { + if (isTopBottomUniqueQuery(pQueryInfo)) { SArray *columnInfo = pQueryInfo->groupbyExpr.columnInfo; if (columnInfo != NULL && taosArrayGetSize(columnInfo) > 0) { SColIndex* pColIndex = taosArrayGet(columnInfo, 0); @@ -7983,7 +8027,7 @@ static void doUpdateSqlFunctionForTagPrj(SQueryInfo* pQueryInfo) { pExpr->base.functionId = TSDB_FUNC_TAG_DUMMY; tagLength += pExpr->base.resBytes; } else if (pExpr->base.functionId == TSDB_FUNC_PRJ && pExpr->base.colInfo.colId == PRIMARYKEY_TIMESTAMP_COL_INDEX) { - pExpr->base.functionId = TSDB_FUNC_TS_DUMMY; + pExpr->base.functionId = TSDB_FUNC_TS_DUMMY; // ts_select ts,top(col,2) tagLength += pExpr->base.resBytes; } } @@ -8340,6 +8384,7 @@ int32_t doFunctionsCompatibleCheck(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, char* const char* msg4 = "retrieve tags not compatible with group by or interval query"; const char* msg5 = "functions can not be mixed up"; const char* msg6 = "TWA/Diff/Derivative/Irate/CSum/MAvg/Elapsed only support group by tbname"; + const char* msg7 = "unique function does not supportted in state window query"; // only retrieve tags, group by is not supportted if (tscQueryTags(pQueryInfo)) { @@ -8419,9 +8464,14 @@ int32_t doFunctionsCompatibleCheck(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, char* } } + if (pQueryInfo->stateWindow && f == TSDB_FUNC_UNIQUE){ + return invalidOperationMsg(msg, msg7); + } + if (IS_MULTIOUTPUT(aAggs[f].status) && f != TSDB_FUNC_TOP && f != TSDB_FUNC_BOTTOM && f != TSDB_FUNC_DIFF && f != TSDB_FUNC_MAVG && f != TSDB_FUNC_CSUM && f != TSDB_FUNC_SAMPLE && - f != TSDB_FUNC_DERIVATIVE && f != TSDB_FUNC_TAGPRJ && f != TSDB_FUNC_PRJ) { + f != TSDB_FUNC_DERIVATIVE && f != TSDB_FUNC_TAGPRJ && f != TSDB_FUNC_PRJ && + f != TSDB_FUNC_UNIQUE) { return invalidOperationMsg(msg, msg1); } diff --git a/src/client/src/tscServer.c b/src/client/src/tscServer.c index a3edca6ee229d459d4b9bee279c03750ef08132d..b59f7cc4db0c4e0cf6c5d2d04d5aeb1d5aa44154 100644 --- a/src/client/src/tscServer.c +++ b/src/client/src/tscServer.c @@ -1045,8 +1045,8 @@ int tscBuildQueryMsg(SSqlObj *pSql, SSqlInfo *pInfo) { SGroupbyExpr *pGroupbyExpr = query.pGroupbyExpr; if (pGroupbyExpr != NULL && pGroupbyExpr->numOfGroupCols > 0) { - pQueryMsg->orderByIdx = htons(pGroupbyExpr->orderIndex); - pQueryMsg->orderType = htons(pGroupbyExpr->orderType); + //pQueryMsg->orderByIdx = htons(pGroupbyExpr->orderIndex); + pQueryMsg->groupOrderType = htons(pGroupbyExpr->orderType); for (int32_t j = 0; j < pGroupbyExpr->numOfGroupCols; ++j) { SColIndex* pCol = taosArrayGet(pGroupbyExpr->columnInfo, j); @@ -1947,7 +1947,6 @@ int tscProcessRetrieveGlobalMergeRsp(SSqlObj *pSql) { SQueryInfo *pQueryInfo = tscGetQueryInfo(pCmd); if (pQueryInfo->pQInfo == NULL) { STableGroupInfo tableGroupInfo = {.numOfTables = 1, .pGroupList = taosArrayInit(1, POINTER_BYTES),}; - tableGroupInfo.map = taosHashInit(1, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), true, HASH_NO_LOCK); STableKeyInfo tableKeyInfo = {.pTable = NULL, .lastKey = INT64_MIN}; @@ -1958,8 +1957,6 @@ int tscProcessRetrieveGlobalMergeRsp(SSqlObj *pSql) { tscDebug("0x%"PRIx64" create QInfo 0x%"PRIx64" to execute query processing", pSql->self, pSql->self); pQueryInfo->pQInfo = createQInfoFromQueryNode(pQueryInfo, &tableGroupInfo, NULL, NULL, pRes->pMerger, MERGE_STAGE, pSql->self); if (pQueryInfo->pQInfo == NULL) { - taosHashCleanup(tableGroupInfo.map); - taosArrayDestroy(&group); tscAsyncResultOnError(pSql); pRes->code = TSDB_CODE_QRY_OUT_OF_MEMORY; return pRes->code; diff --git a/src/client/src/tscSubquery.c b/src/client/src/tscSubquery.c index 39289a55f482df04979ba79f500f9f19d04dac03..be9b3a2cf32e969594986066fdaba7af847607ac 100644 --- a/src/client/src/tscSubquery.c +++ b/src/client/src/tscSubquery.c @@ -3805,6 +3805,7 @@ void* createQInfoFromQueryNode(SQueryInfo* pQueryInfo, STableGroupInfo* pTableGr assert(pQueryInfo != NULL); SQInfo *pQInfo = (SQInfo *)calloc(1, sizeof(SQInfo)); if (pQInfo == NULL) { + tsdbDestroyTableGroup(pTableGroupInfo); goto _cleanup; } @@ -3913,6 +3914,7 @@ void* createQInfoFromQueryNode(SQueryInfo* pQueryInfo, STableGroupInfo* pTableGr int32_t code = initQInfo(&bufInfo, NULL, pSourceOperator, pQInfo, ¶m, NULL, 0, merger); taosArrayDestroy(&pa); if (code != TSDB_CODE_SUCCESS) { + pQInfo = NULL; goto _cleanup; } diff --git a/src/client/src/tscUtil.c b/src/client/src/tscUtil.c index 6d6e41aa848524fbdf9e11fdb7f7106a2380c14f..dfbe4441463a3a3e18c50955110bcc368549217d 100644 --- a/src/client/src/tscUtil.c +++ b/src/client/src/tscUtil.c @@ -74,11 +74,11 @@ int32_t converToStr(char *str, int type, void *buf, int32_t bufSize, int32_t *le break; case TSDB_DATA_TYPE_UINT: - n = sprintf(str, "%d", *(uint32_t*)buf); + n = sprintf(str, "%u", *(uint32_t*)buf); break; case TSDB_DATA_TYPE_UBIGINT: - n = sprintf(str, "%" PRId64, *(uint64_t*)buf); + n = sprintf(str, "%" PRIu64, *(uint64_t*)buf); break; case TSDB_DATA_TYPE_FLOAT: @@ -304,7 +304,7 @@ bool tscNonOrderedProjectionQueryOnSTable(SQueryInfo* pQueryInfo, int32_t tableI return false; } - // order by columnIndex exists, not a non-ordered projection query + // order by columnIndex not exists, not a ordered projection query return pQueryInfo->order.orderColId < 0; } @@ -313,7 +313,7 @@ bool tscOrderedProjectionQueryOnSTable(SQueryInfo* pQueryInfo, int32_t tableInde return false; } - // order by columnIndex exists, a non-ordered projection query + // order by columnIndex exists, a ordered projection query return pQueryInfo->order.orderColId >= 0; } @@ -689,7 +689,8 @@ bool isSimpleAggregateRv(SQueryInfo* pQueryInfo) { (functionId == TSDB_FUNC_TOP || functionId == TSDB_FUNC_BOTTOM || functionId == TSDB_FUNC_TS_COMP || functionId == TSDB_FUNC_SAMPLE || - functionId == TSDB_FUNC_HISTOGRAM)) { + functionId == TSDB_FUNC_HISTOGRAM || + functionId == TSDB_FUNC_UNIQUE)) { return true; } } @@ -1404,8 +1405,6 @@ void handleDownstreamOperator(SSqlObj** pSqlObjList, int32_t numOfUpstream, SQue } } - tableGroupInfo.map = taosHashInit(1, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), true, HASH_NO_LOCK); - STableKeyInfo tableKeyInfo = {.pTable = NULL, .lastKey = INT64_MIN}; SArray* group = taosArrayInit(1, sizeof(STableKeyInfo)); @@ -2614,7 +2613,7 @@ SExprInfo* tscExprAppend(SQueryInfo* pQueryInfo, int16_t functionId, SColumnInde } SExprInfo* tscExprUpdate(SQueryInfo* pQueryInfo, int32_t index, int16_t functionId, int16_t srcColumnIndex, - int16_t type, int16_t size) { + int16_t type, int32_t size) { STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0); SExprInfo* pExpr = tscExprGet(pQueryInfo, index); if (pExpr == NULL) { @@ -2659,7 +2658,8 @@ int32_t tscExprTopBottomIndex(SQueryInfo* pQueryInfo){ SExprInfo* pExpr = tscExprGet(pQueryInfo, i); if (pExpr == NULL) continue; - if (pExpr->base.functionId == TSDB_FUNC_TOP || pExpr->base.functionId == TSDB_FUNC_BOTTOM) { + if (pExpr->base.functionId == TSDB_FUNC_TOP || pExpr->base.functionId == TSDB_FUNC_BOTTOM + || pExpr->base.functionId == TSDB_FUNC_UNIQUE) { return i; } } @@ -4937,7 +4937,11 @@ static int32_t createGlobalAggregateExpr(SQueryAttr* pQueryAttr, SQueryInfo* pQu pse->colInfo.colIndex = i; pse->colType = pExpr->base.resType; - pse->colBytes = pExpr->base.resBytes; + if(pExpr->base.resBytes > INT16_MAX && pExpr->base.functionId == TSDB_FUNC_UNIQUE){ + pQueryAttr->interBytesForGlobal = pExpr->base.resBytes; + }else{ + pse->colBytes = pExpr->base.resBytes; + } } { @@ -5081,6 +5085,7 @@ int32_t tscCreateQueryFromQueryInfo(SQueryInfo* pQueryInfo, SQueryAttr* pQueryAt pQueryAttr->pUdfInfo = pQueryInfo->pUdfInfo; pQueryAttr->range = pQueryInfo->range; + if (pQueryInfo->order.order == TSDB_ORDER_ASC) { // TODO refactor pQueryAttr->window = pQueryInfo->window; } else { @@ -5112,6 +5117,8 @@ int32_t tscCreateQueryFromQueryInfo(SQueryInfo* pQueryInfo, SQueryAttr* pQueryAt } } + pQueryAttr->uniqueQuery = isUniqueQuery(numOfOutput, pQueryAttr->pExpr1); + pQueryAttr->tableCols = calloc(numOfCols, sizeof(SColumnInfo)); for(int32_t i = 0; i < numOfCols; ++i) { SColumn* pCol = taosArrayGetP(pQueryInfo->colList, i); @@ -5403,7 +5410,7 @@ int parseJsontoTagData(char* json, SKVRowBuilder* kvRowBuilder, char* errMsg, in // set json real data cJSON *root = cJSON_Parse(json); if (root == NULL){ - tscError("json parse error"); + tscError("json parse error:%s", json); return tscSQLSyntaxErrMsg(errMsg, "json parse error", NULL); } diff --git a/src/common/inc/tname.h b/src/common/inc/tname.h index 7a401d8a7f71c094654d06a2ed37ae3fd7fc9c94..e6b7dd1463a754bfaa78bb1081e3b6b0b753d752 100644 --- a/src/common/inc/tname.h +++ b/src/common/inc/tname.h @@ -54,7 +54,7 @@ typedef struct SSqlExpr { int32_t resBytes; // length of return value int32_t interBytes; // inter result buffer size - int16_t colType; // table column type + int16_t colType; // table column type, this should be int32_t, because it is too small for globale merge stage, pQueryAttr->interBytesForGlobal int16_t colBytes; // table column bytes int16_t numOfParams; // argument value of each function diff --git a/src/common/src/tglobal.c b/src/common/src/tglobal.c index af92f264dfb613035944cf80d81baf4ab8abc2f4..922ae1dedc806c8939a142d737e4661f3fe4561d 100644 --- a/src/common/src/tglobal.c +++ b/src/common/src/tglobal.c @@ -1812,9 +1812,10 @@ static void doInitGlobalConfig(void) { cfg.ptrLength = 0; cfg.unitType = TAOS_CFG_UTYPE_NONE; taosInitConfigOption(cfg); - assert(tsGlobalConfigNum < TSDB_CFG_MAX_NUM); + assert(tsGlobalConfigNum == TSDB_CFG_MAX_NUM); #else - assert(tsGlobalConfigNum < TSDB_CFG_MAX_NUM); + // if TD_TSZ macro define, have 5 count configs, so must add 5 + assert(tsGlobalConfigNum + 5 == TSDB_CFG_MAX_NUM); #endif } diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulDriver.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulDriver.java index eb51da8aff9e789dead5930a3ee019907d68e3b7..888f58856a8f858f25f7ee5317f10864c00bac0e 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulDriver.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulDriver.java @@ -3,6 +3,7 @@ package com.taosdata.jdbc.rs; import com.alibaba.fastjson.JSON; import com.alibaba.fastjson.JSONObject; import com.taosdata.jdbc.*; +import com.taosdata.jdbc.enums.TimestampFormat; import com.taosdata.jdbc.utils.HttpClientPoolUtil; import com.taosdata.jdbc.ws.InFlightRequest; import com.taosdata.jdbc.ws.Transport; @@ -77,18 +78,20 @@ public class RestfulDriver extends AbstractDriver { int maxRequest = props.containsKey(TSDBDriver.PROPERTY_KEY_MAX_CONCURRENT_REQUEST) ? Integer.parseInt(props.getProperty(TSDBDriver.PROPERTY_KEY_MAX_CONCURRENT_REQUEST)) : Transport.DEFAULT_MAX_REQUEST; + InFlightRequest inFlightRequest = new InFlightRequest(timeout, maxRequest); CountDownLatch latch = new CountDownLatch(1); Map httpHeaders = new HashMap<>(); - client = new WSClient(new URI(loginUrl), user, password, database, inFlightRequest, httpHeaders, latch, maxRequest); + client = new WSClient(new URI(loginUrl), user, password, database, + inFlightRequest, httpHeaders, latch, maxRequest); transport = new Transport(client, inFlightRequest); - if (!client.connectBlocking()) { + if (!client.connectBlocking(timeout, TimeUnit.MILLISECONDS)) { throw new SQLException("can't create connection with server"); } if (!latch.await(timeout, TimeUnit.MILLISECONDS)) { throw new SQLException("auth timeout"); } - if (client.isAuth()) { + if (!client.isAuth()) { throw new SQLException("auth failure"); } } catch (URISyntaxException e) { @@ -96,7 +99,9 @@ public class RestfulDriver extends AbstractDriver { } catch (InterruptedException e) { throw new SQLException("creat websocket connection has been Interrupted ", e); } - return new WSConnection(url, props, transport, database, true); + // TODO fetch Type from config + props.setProperty(TSDBDriver.PROPERTY_KEY_TIMESTAMP_FORMAT, String.valueOf(TimestampFormat.TIMESTAMP)); + return new WSConnection(url, props, transport, database); } loginUrl = "http://" + props.getProperty(TSDBDriver.PROPERTY_KEY_HOST) + ":" + props.getProperty(TSDBDriver.PROPERTY_KEY_PORT) + "/rest/login/" + user + "/" + password + ""; int poolSize = Integer.parseInt(props.getProperty("httpPoolSize", HttpClientPoolUtil.DEFAULT_MAX_PER_ROUTE)); diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulResultSet.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulResultSet.java index 47c5232d11a26068b644d9c4bebc6df41746650f..599ee85a6bbdb2a612faf1434f4921f071ea348e 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulResultSet.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulResultSet.java @@ -302,6 +302,9 @@ public class RestfulResultSet extends AbstractResultSet implements ResultSet { this.taos_type = taos_type; } + public int getTaosType() { + return taos_type; + } } @Override diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/ws/AbstractWSResultSet.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/ws/AbstractWSResultSet.java new file mode 100644 index 0000000000000000000000000000000000000000..2325161d689d6acdf91bd7469b3c820f7716229d --- /dev/null +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/ws/AbstractWSResultSet.java @@ -0,0 +1,146 @@ +package com.taosdata.jdbc.ws; + +import com.taosdata.jdbc.*; +import com.taosdata.jdbc.rs.RestfulResultSet; +import com.taosdata.jdbc.rs.RestfulResultSetMetaData; +import com.taosdata.jdbc.ws.entity.*; + +import java.sql.ResultSetMetaData; +import java.sql.SQLException; +import java.sql.Statement; +import java.time.chrono.IsoChronology; +import java.time.format.DateTimeFormatter; +import java.time.format.DateTimeFormatterBuilder; +import java.time.format.ResolverStyle; +import java.time.temporal.ChronoField; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ExecutionException; + +public abstract class AbstractWSResultSet extends AbstractResultSet { + public static DateTimeFormatter rfc3339Parser = new DateTimeFormatterBuilder() + .parseCaseInsensitive() + .appendValue(ChronoField.YEAR, 4) + .appendLiteral('-') + .appendValue(ChronoField.MONTH_OF_YEAR, 2) + .appendLiteral('-') + .appendValue(ChronoField.DAY_OF_MONTH, 2) + .appendLiteral('T') + .appendValue(ChronoField.HOUR_OF_DAY, 2) + .appendLiteral(':') + .appendValue(ChronoField.MINUTE_OF_HOUR, 2) + .appendLiteral(':') + .appendValue(ChronoField.SECOND_OF_MINUTE, 2) + .optionalStart() + .appendFraction(ChronoField.NANO_OF_SECOND, 2, 9, true) + .optionalEnd() + .appendOffset("+HH:MM", "Z").toFormatter() + .withResolverStyle(ResolverStyle.STRICT) + .withChronology(IsoChronology.INSTANCE); + + protected final Statement statement; + protected final Transport transport; + protected final RequestFactory factory; + protected final long queryId; + + protected boolean isClosed; + // meta + protected final ResultSetMetaData metaData; + protected final List fields = new ArrayList<>(); + protected final List columnNames; + protected List fieldLength; + // data + protected List> result = new ArrayList<>(); + + protected int numOfRows = 0; + protected int rowIndex = 0; + private boolean isCompleted; + + public AbstractWSResultSet(Statement statement, Transport transport, RequestFactory factory, + QueryResp response, String database) throws SQLException { + this.statement = statement; + this.transport = transport; + this.factory = factory; + this.queryId = response.getId(); + columnNames = Arrays.asList(response.getFieldsNames()); + for (int i = 0; i < response.getFieldsCount(); i++) { + String colName = response.getFieldsNames()[i]; + int taosType = response.getFieldsTypes()[i]; + int jdbcType = TSDBConstants.taosType2JdbcType(taosType); + int length = response.getFieldsLengths()[i]; + fields.add(new RestfulResultSet.Field(colName, jdbcType, length, "", taosType)); + } + this.metaData = new RestfulResultSetMetaData(database, fields, null); + this.timestampPrecision = response.getPrecision(); + } + + private boolean forward() { + if (this.rowIndex > this.numOfRows) { + return false; + } + + return ((++this.rowIndex) < this.numOfRows); + } + + public void reset() { + this.rowIndex = 0; + } + + @Override + public boolean next() throws SQLException { + if (isClosed()) { + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESULTSET_CLOSED); + } + + if (this.forward()) { + return true; + } + + Request request = factory.generateFetch(queryId); + CompletableFuture send = transport.send(request); + try { + Response response = send.get(); + FetchResp fetchResp = (FetchResp) response; + if (Code.SUCCESS.getCode() != fetchResp.getCode()) { +// TODO reWrite error type + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNKNOWN, fetchResp.getMessage()); + } + this.reset(); + if (fetchResp.isCompleted()) { + this.isCompleted = true; + return false; + } + fieldLength = Arrays.asList(fetchResp.getLengths()); + this.numOfRows = fetchResp.getRows(); + this.result = fetchJsonData(); + return true; + } catch (InterruptedException | ExecutionException e) { + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESTFul_Client_IOException, e.getMessage()); + } + } + + public abstract List> fetchJsonData() throws SQLException, ExecutionException, InterruptedException; + + @Override + public void close() throws SQLException { + this.isClosed = true; + if (result != null && !result.isEmpty() && !isCompleted) { + FetchReq fetchReq = new FetchReq(queryId, queryId); + transport.sendWithoutRep(new Request(Action.FREE_RESULT.getAction(), fetchReq)); + } + } + + @Override + public ResultSetMetaData getMetaData() throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESULTSET_CLOSED); + return this.metaData; + } + + @Override + public boolean isClosed() throws SQLException { + return isClosed; + } +} diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/ws/BlockResultSet.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/ws/BlockResultSet.java new file mode 100644 index 0000000000000000000000000000000000000000..8371b9e7c4727c5a014c43faead2f4864df6afa8 --- /dev/null +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/ws/BlockResultSet.java @@ -0,0 +1,626 @@ +package com.taosdata.jdbc.ws; + +import com.google.common.primitives.Ints; +import com.google.common.primitives.Longs; +import com.google.common.primitives.Shorts; +import com.taosdata.jdbc.TSDBConstants; +import com.taosdata.jdbc.TSDBDriver; +import com.taosdata.jdbc.TSDBError; +import com.taosdata.jdbc.TSDBErrorNumbers; +import com.taosdata.jdbc.enums.TimestampFormat; +import com.taosdata.jdbc.enums.TimestampPrecision; +import com.taosdata.jdbc.utils.Utils; +import com.taosdata.jdbc.ws.entity.*; + +import java.math.BigDecimal; +import java.nio.Buffer; +import java.nio.ByteBuffer; +import java.nio.ByteOrder; +import java.nio.charset.StandardCharsets; +import java.sql.*; +import java.time.Instant; +import java.time.ZoneOffset; +import java.time.ZonedDateTime; +import java.time.format.DateTimeParseException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Calendar; +import java.util.List; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ExecutionException; + +import static com.taosdata.jdbc.TSDBConstants.*; + +public class BlockResultSet extends AbstractWSResultSet { + + public BlockResultSet(Statement statement, Transport transport, RequestFactory factory, + QueryResp response, String database) throws SQLException { + super(statement, transport, factory, response, database); + } + + @Override + public List> fetchJsonData() throws SQLException, ExecutionException, InterruptedException { + Request blockRequest = factory.generateFetchBlock(queryId); + CompletableFuture fetchFuture = transport.send(blockRequest); + FetchBlockResp resp = (FetchBlockResp) fetchFuture.get(); + ByteBuffer buffer = resp.getBuffer(); + List> list = new ArrayList<>(); + if (resp.getBuffer() != null) { + for (int i = 0; i < fields.size(); i++) { + List col = new ArrayList<>(numOfRows); + int type = fields.get(i).getTaosType(); + switch (type) { + case TSDB_DATA_TYPE_BOOL: + for (int j = 0; j < numOfRows; j++) { + col.add(buffer.get() == 1); + } + break; + case TSDB_DATA_TYPE_UTINYINT: + case TSDB_DATA_TYPE_TINYINT: + for (int j = 0; j < numOfRows; j++) { + col.add(buffer.get()); + } + break; + case TSDB_DATA_TYPE_USMALLINT: + case TSDB_DATA_TYPE_SMALLINT: + for (int j = 0; j < numOfRows; j++) { + col.add(buffer.getShort()); + } + break; + case TSDB_DATA_TYPE_UINT: + case TSDB_DATA_TYPE_INT: + for (int j = 0; j < numOfRows; j++) { + col.add(buffer.getInt()); + } + break; + case TSDB_DATA_TYPE_UBIGINT: + case TSDB_DATA_TYPE_BIGINT: + for (int j = 0; j < numOfRows; j++) { + col.add(buffer.getLong()); + } + break; + case TSDB_DATA_TYPE_FLOAT: + for (int j = 0; j < numOfRows; j++) { + col.add(buffer.getFloat()); + } + break; + case TSDB_DATA_TYPE_DOUBLE: + for (int j = 0; j < numOfRows; j++) { + col.add(buffer.getDouble()); + } + break; + case TSDB_DATA_TYPE_BINARY: { + byte[] bytes = new byte[fieldLength.get(i) - 2]; + for (int j = 0; j < numOfRows; j++) { + short s = buffer.getShort(); + buffer.get(bytes); + col.add(Arrays.copyOf(bytes, s)); + } + break; + } + case TSDB_DATA_TYPE_NCHAR: + case TSDB_DATA_TYPE_JSON: { + byte[] bytes = new byte[fieldLength.get(i) - 2]; + for (int j = 0; j < numOfRows; j++) { + short s = buffer.getShort(); + buffer.get(bytes); + col.add(new String(Arrays.copyOf(bytes, s), StandardCharsets.UTF_8)); + } + break; + } + case TSDB_DATA_TYPE_TIMESTAMP: { + byte[] bytes = new byte[fieldLength.get(i)]; + for (int j = 0; j < numOfRows; j++) { + buffer.get(bytes); + col.add(parseTimestampColumnData(bytes)); + } + break; + } + default: + break; + } + list.add(col); + } + } + return list; + } + + public static long bytesToLong(byte[] bytes) { + ByteBuffer buffer = ByteBuffer.allocate(8); + buffer.put(bytes, 0, bytes.length); + buffer.flip();//need flip + buffer.order(ByteOrder.LITTLE_ENDIAN); + return buffer.getLong(); + } + + private Timestamp parseTimestampColumnData(byte[] bytes) throws SQLException { + if (bytes == null || bytes.length < 1) + return null; + String tsFormatUpperCase = this.statement.getConnection().getClientInfo(TSDBDriver.PROPERTY_KEY_TIMESTAMP_FORMAT).toUpperCase(); + TimestampFormat timestampFormat = TimestampFormat.valueOf(tsFormatUpperCase); + switch (timestampFormat) { + case TIMESTAMP: { + long value = bytesToLong(bytes); + if (TimestampPrecision.MS == this.timestampPrecision) + return new Timestamp(value); + + if (TimestampPrecision.US == this.timestampPrecision) { + long epochSec = value / 1000_000L; + long nanoAdjustment = value % 1000_000L * 1000L; + return Timestamp.from(Instant.ofEpochSecond(epochSec, nanoAdjustment)); + } + if (TimestampPrecision.NS == this.timestampPrecision) { + long epochSec = value / 1000_000_000L; + long nanoAdjustment = value % 1000_000_000L; + return Timestamp.from(Instant.ofEpochSecond(epochSec, nanoAdjustment)); + } + } + case UTC: { + String value = new String(bytes); + if (value.lastIndexOf(":") > 19) { + ZonedDateTime parse = ZonedDateTime.parse(value, rfc3339Parser); + return Timestamp.from(parse.toInstant()); + } else { + long epochSec = Timestamp.valueOf(value.substring(0, 19).replace("T", " ")).getTime() / 1000; + int fractionalSec = Integer.parseInt(value.substring(20, value.length() - 5)); + long nanoAdjustment; + if (TimestampPrecision.NS == this.timestampPrecision) { + // ns timestamp: yyyy-MM-ddTHH:mm:ss.SSSSSSSSS+0x00 + nanoAdjustment = fractionalSec; + } else if (TimestampPrecision.US == this.timestampPrecision) { + // ms timestamp: yyyy-MM-ddTHH:mm:ss.SSSSSS+0x00 + nanoAdjustment = fractionalSec * 1000L; + } else { + // ms timestamp: yyyy-MM-ddTHH:mm:ss.SSS+0x00 + nanoAdjustment = fractionalSec * 1000_000L; + } + ZoneOffset zoneOffset = ZoneOffset.of(value.substring(value.length() - 5)); + Instant instant = Instant.ofEpochSecond(epochSec, nanoAdjustment).atOffset(zoneOffset).toInstant(); + return Timestamp.from(instant); + } + } + case STRING: + default: { + String value = new String(bytes, StandardCharsets.UTF_8); + if (TimestampPrecision.MS == this.timestampPrecision) { + // ms timestamp: yyyy-MM-dd HH:mm:ss.SSS + return Timestamp.valueOf(value); + } + if (TimestampPrecision.US == this.timestampPrecision) { + // us timestamp: yyyy-MM-dd HH:mm:ss.SSSSSS + long epochSec = Timestamp.valueOf(value.substring(0, 19)).getTime() / 1000; + long nanoAdjustment = Integer.parseInt(value.substring(20)) * 1000L; + return Timestamp.from(Instant.ofEpochSecond(epochSec, nanoAdjustment)); + } + if (TimestampPrecision.NS == this.timestampPrecision) { + // ms timestamp: yyyy-MM-dd HH:mm:ss.SSSSSSSSS + long epochSec = Timestamp.valueOf(value.substring(0, 19)).getTime() / 1000; + long nanoAdjustment = Integer.parseInt(value.substring(20)); + return Timestamp.from(Instant.ofEpochSecond(epochSec, nanoAdjustment)); + } + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNKNOWN_TIMESTAMP_PRECISION); + } + } + } + + @Override + public String getString(int columnIndex) throws SQLException { + checkAvailability(columnIndex, fields.size()); + + Object value = result.get(columnIndex - 1).get(rowIndex); + wasNull = value == null; + if (value == null) + return null; + if (value instanceof String) + return (String) value; + if (value instanceof byte[]) + return new String((byte[]) value); + return value.toString(); + } + + @Override + public boolean getBoolean(int columnIndex) throws SQLException { + checkAvailability(columnIndex, fields.size()); + + Object value = result.get(columnIndex - 1).get(rowIndex); + wasNull = value == null; + if (value == null) + return false; + if (value instanceof Boolean) + return (boolean) value; + return Boolean.parseBoolean(value.toString()); + } + + @Override + public byte getByte(int columnIndex) throws SQLException { + checkAvailability(columnIndex, fields.size()); + + Object value = result.get(columnIndex - 1).get(rowIndex); + wasNull = value == null; + if (value == null) + return 0; + if (value instanceof Byte) + return (byte) value; + long valueAsLong = Long.parseLong(value.toString()); + if (valueAsLong == Byte.MIN_VALUE) + return 0; + if (valueAsLong < Byte.MIN_VALUE || valueAsLong > Byte.MAX_VALUE) + throwRangeException(value.toString(), columnIndex, Types.TINYINT); + + return (byte) valueAsLong; + } + + private void throwRangeException(String valueAsString, int columnIndex, int jdbcType) throws SQLException { + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_NUMERIC_VALUE_OUT_OF_RANGE, + "'" + valueAsString + "' in column '" + columnIndex + "' is outside valid range for the jdbcType " + TSDBConstants.jdbcType2TaosTypeName(jdbcType)); + } + + @Override + public short getShort(int columnIndex) throws SQLException { + checkAvailability(columnIndex, fields.size()); + + Object value = result.get(columnIndex - 1).get(rowIndex); + wasNull = value == null; + if (value == null) + return 0; + if (value instanceof Short) + return (short) value; + long valueAsLong = Long.parseLong(value.toString()); + if (valueAsLong == Short.MIN_VALUE) + return 0; + if (valueAsLong < Short.MIN_VALUE || valueAsLong > Short.MAX_VALUE) + throwRangeException(value.toString(), columnIndex, Types.SMALLINT); + return (short) valueAsLong; + } + + @Override + public int getInt(int columnIndex) throws SQLException { + checkAvailability(columnIndex, fields.size()); + + Object value = result.get(columnIndex - 1).get(rowIndex); + wasNull = value == null; + if (value == null) + return 0; + if (value instanceof Integer) + return (int) value; + long valueAsLong = Long.parseLong(value.toString()); + if (valueAsLong == Integer.MIN_VALUE) + return 0; + if (valueAsLong < Integer.MIN_VALUE || valueAsLong > Integer.MAX_VALUE) + throwRangeException(value.toString(), columnIndex, Types.INTEGER); + return (int) valueAsLong; + } + + @Override + public long getLong(int columnIndex) throws SQLException { + checkAvailability(columnIndex, fields.size()); + + Object value = result.get(columnIndex - 1).get(rowIndex); + wasNull = value == null; + if (value == null) + return 0; + if (value instanceof Long) + return (long) value; + if (value instanceof Timestamp) { + Timestamp ts = (Timestamp) value; + switch (this.timestampPrecision) { + case TimestampPrecision.MS: + default: + return ts.getTime(); + case TimestampPrecision.US: + return ts.getTime() * 1000 + ts.getNanos() / 1000 % 1000; + case TimestampPrecision.NS: + return ts.getTime() * 1000_000 + ts.getNanos() % 1000_000; + } + } + long valueAsLong = 0; + try { + valueAsLong = Long.parseLong(value.toString()); + if (valueAsLong == Long.MIN_VALUE) + return 0; + } catch (NumberFormatException e) { + throwRangeException(value.toString(), columnIndex, Types.BIGINT); + } + return valueAsLong; + } + + @Override + public float getFloat(int columnIndex) throws SQLException { + checkAvailability(columnIndex, fields.size()); + + Object value = result.get(columnIndex - 1).get(rowIndex); + wasNull = value == null; + if (value == null) + return 0; + if (value instanceof Float) + return (float) value; + if (value instanceof Double) + return new Float((Double) value); + return Float.parseFloat(value.toString()); + } + + @Override + public double getDouble(int columnIndex) throws SQLException { + checkAvailability(columnIndex, fields.size()); + + Object value = result.get(columnIndex - 1).get(rowIndex); + wasNull = value == null; + if (value == null) { + return 0; + } + if (value instanceof Double || value instanceof Float) + return (double) value; + return Double.parseDouble(value.toString()); + } + + @Override + public byte[] getBytes(int columnIndex) throws SQLException { + checkAvailability(columnIndex, fields.size()); + + Object value = result.get(columnIndex - 1).get(rowIndex); + wasNull = value == null; + if (value == null) + return null; + if (value instanceof byte[]) + return (byte[]) value; + if (value instanceof String) + return ((String) value).getBytes(); + if (value instanceof Long) + return Longs.toByteArray((long) value); + if (value instanceof Integer) + return Ints.toByteArray((int) value); + if (value instanceof Short) + return Shorts.toByteArray((short) value); + if (value instanceof Byte) + return new byte[]{(byte) value}; + if (value instanceof Timestamp) { + return Utils.formatTimestamp((Timestamp) value).getBytes(); + } + + return value.toString().getBytes(); + } + + @Override + public Date getDate(int columnIndex) throws SQLException { + checkAvailability(columnIndex, fields.size()); + + Object value = result.get(columnIndex - 1).get(rowIndex); + wasNull = value == null; + if (value == null) + return null; + if (value instanceof Timestamp) + return new Date(((Timestamp) value).getTime()); + return Utils.parseDate(value.toString()); + } + + @Override + public Time getTime(int columnIndex) throws SQLException { + checkAvailability(columnIndex, fields.size()); + + Object value = result.get(columnIndex - 1).get(rowIndex); + wasNull = value == null; + if (value == null) + return null; + if (value instanceof Timestamp) + return new Time(((Timestamp) value).getTime()); + Time time = null; + try { + time = Utils.parseTime(value.toString()); + } catch (DateTimeParseException ignored) { + } + return time; + } + + @Override + public Timestamp getTimestamp(int columnIndex) throws SQLException { + checkAvailability(columnIndex, fields.size()); + + Object value = result.get(columnIndex - 1).get(rowIndex); + wasNull = value == null; + if (value == null) + return null; + if (value instanceof Timestamp) + return (Timestamp) value; + if (value instanceof Long) { + if (1_0000_0000_0000_0L > (long) value) + return Timestamp.from(Instant.ofEpochMilli((long) value)); + long epochSec = (long) value / 1000_000L; + long nanoAdjustment = (long) value % 1000_000L * 1000; + return Timestamp.from(Instant.ofEpochSecond(epochSec, nanoAdjustment)); + } + Timestamp ret; + try { + ret = Utils.parseTimestamp(value.toString()); + } catch (Exception e) { + ret = null; + wasNull = true; + } + return ret; + } + + @Override + public ResultSetMetaData getMetaData() throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESULTSET_CLOSED); + return this.metaData; + } + + @Override + public Object getObject(int columnIndex) throws SQLException { + checkAvailability(columnIndex, fields.size()); + + Object value = result.get(columnIndex - 1).get(rowIndex); + wasNull = value == null; + return value; + } + + @Override + public int findColumn(String columnLabel) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESULTSET_CLOSED); + + int columnIndex = columnNames.indexOf(columnLabel); + if (columnIndex == -1) + throw new SQLException("cannot find Column in result"); + return columnIndex + 1; + } + + @Override + public BigDecimal getBigDecimal(int columnIndex) throws SQLException { + checkAvailability(columnIndex, fields.size()); + + Object value = result.get(columnIndex - 1).get(rowIndex); + wasNull = value == null; + if (value == null) + return null; + if (value instanceof Long || value instanceof Integer || value instanceof Short || value instanceof Byte) + return new BigDecimal(Long.parseLong(value.toString())); + if (value instanceof Double || value instanceof Float) + return BigDecimal.valueOf(Double.parseDouble(value.toString())); + if (value instanceof Timestamp) + return new BigDecimal(((Timestamp) value).getTime()); + BigDecimal ret; + try { + ret = new BigDecimal(value.toString()); + } catch (Exception e) { + ret = null; + } + return ret; + } + + @Override + public boolean isBeforeFirst() throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESULTSET_CLOSED); + return this.rowIndex == -1 && this.numOfRows != 0; + } + + @Override + public boolean isAfterLast() throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESULTSET_CLOSED); + + return this.rowIndex >= numOfRows && this.numOfRows != 0; + } + + @Override + public boolean isFirst() throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESULTSET_CLOSED); + return this.rowIndex == 0; + } + + @Override + public boolean isLast() throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESULTSET_CLOSED); + if (this.numOfRows == 0) + return false; + return this.rowIndex == (this.numOfRows - 1); + } + + @Override + public void beforeFirst() throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESULTSET_CLOSED); + + synchronized (this) { + if (this.numOfRows > 0) { + this.rowIndex = -1; + } + } + } + + @Override + public void afterLast() throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESULTSET_CLOSED); + synchronized (this) { + if (this.numOfRows > 0) { + this.rowIndex = this.numOfRows; + } + } + } + + @Override + public boolean first() throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESULTSET_CLOSED); + + if (this.numOfRows == 0) + return false; + + synchronized (this) { + this.rowIndex = 0; + } + return true; + } + + @Override + public boolean last() throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESULTSET_CLOSED); + if (this.numOfRows == 0) + return false; + synchronized (this) { + this.rowIndex = this.numOfRows - 1; + } + return true; + } + + @Override + public int getRow() throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESULTSET_CLOSED); + int row; + synchronized (this) { + if (this.rowIndex < 0 || this.rowIndex >= this.numOfRows) + return 0; + row = this.rowIndex + 1; + } + return row; + } + + @Override + public boolean absolute(int row) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESULTSET_CLOSED); + + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public boolean relative(int rows) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESULTSET_CLOSED); + + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public boolean previous() throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESULTSET_CLOSED); + + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + @Override + public String getNString(int columnIndex) throws SQLException { + return getString(columnIndex); + } + + @Override + public Statement getStatement() throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESULTSET_CLOSED); + + return this.statement; + } + + @Override + public Timestamp getTimestamp(int columnIndex, Calendar cal) throws SQLException { + //TODO:did not use the specified timezone in cal + return getTimestamp(columnIndex); + } +} diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/ws/InFlightRequest.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/ws/InFlightRequest.java index 773bb38a8ea60216a2d6046fc8c88453fd4ff27c..37349c43320af8acc8c101a42a3ac8407aadb121 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/ws/InFlightRequest.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/ws/InFlightRequest.java @@ -1,58 +1,73 @@ package com.taosdata.jdbc.ws; +import com.taosdata.jdbc.ws.entity.Action; + +import java.util.HashMap; import java.util.Map; import java.util.concurrent.*; /** * Unfinished execution */ -public class InFlightRequest implements AutoCloseable { +public class InFlightRequest { private final int timeoutSec; private final Semaphore semaphore; - private final Map futureMap = new ConcurrentHashMap<>(); - private final ScheduledExecutorService scheduledExecutorService = Executors.newSingleThreadScheduledExecutor(); - private final ScheduledFuture scheduledFuture; + private final Map> futureMap = new HashMap<>(); + private final Map> expireMap = new HashMap<>(); + private final ScheduledExecutorService scheduledExecutorService = Executors.newSingleThreadScheduledExecutor(r -> { + Thread t = new Thread(r); + t.setName("timer-" + t.getId()); + return t; + }); public InFlightRequest(int timeoutSec, int concurrentNum) { this.timeoutSec = timeoutSec; this.semaphore = new Semaphore(concurrentNum); - this.scheduledFuture = scheduledExecutorService.scheduleAtFixedRate(this::removeTimeoutFuture, timeoutSec, timeoutSec, TimeUnit.MILLISECONDS); + scheduledExecutorService.scheduleWithFixedDelay(this::removeTimeoutFuture, + timeoutSec, timeoutSec, TimeUnit.MILLISECONDS); + Runtime.getRuntime().addShutdownHook(new Thread(scheduledExecutorService::shutdown)); + for (Action value : Action.values()) { + String action = value.getAction(); + if (Action.CONN.getAction().equals(action)) + continue; + futureMap.put(action, new ConcurrentHashMap<>()); + expireMap.put(action, new PriorityBlockingQueue<>()); + } } - public void put(ResponseFuture responseFuture) throws InterruptedException, TimeoutException { + public void put(ResponseFuture rf) throws InterruptedException, TimeoutException { if (semaphore.tryAcquire(timeoutSec, TimeUnit.MILLISECONDS)) { - futureMap.put(responseFuture.getId(), responseFuture); + futureMap.get(rf.getAction()).put(rf.getId(), rf); + expireMap.get(rf.getAction()).put(rf); } else { throw new TimeoutException(); } } - public ResponseFuture remove(String id) { - ResponseFuture future = futureMap.remove(id); + public ResponseFuture remove(String action, Long id) { + ResponseFuture future = futureMap.get(action).remove(id); if (null != future) { + expireMap.get(action).remove(future); semaphore.release(); } return future; } private void removeTimeoutFuture() { - futureMap.entrySet().removeIf(entry -> { - if (System.nanoTime() - entry.getValue().getTimestamp() > timeoutSec * 1_000_000L) { + expireMap.forEach((k, v) -> { + while (true) { + ResponseFuture response = v.peek(); + if (null == response || (System.nanoTime() - response.getTimestamp()) < timeoutSec * 1_000_000L) + break; + try { - entry.getValue().getFuture().completeExceptionally(new TimeoutException()); - }finally { + v.poll(); + futureMap.get(k).remove(response.getId()); + response.getFuture().completeExceptionally(new TimeoutException()); + } finally { semaphore.release(); } - return true; - } else { - return false; } }); } - - @Override - public void close() { - scheduledFuture.cancel(true); - scheduledExecutorService.shutdown(); - } } diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/ws/ResponseFuture.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/ws/ResponseFuture.java index f2525c30bfe686739310454aa13a562065551190..5ce7e86572765c7dbcb7ca98b8b1cb288676ec4d 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/ws/ResponseFuture.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/ws/ResponseFuture.java @@ -4,18 +4,24 @@ import com.taosdata.jdbc.ws.entity.Response; import java.util.concurrent.CompletableFuture; -public class ResponseFuture { - private final String id; +public class ResponseFuture implements Comparable { + private final String action; + private final Long id; private final CompletableFuture future; private final long timestamp; - public ResponseFuture(String id, CompletableFuture future) { + public ResponseFuture(String action, Long id, CompletableFuture future) { + this.action = action; this.id = id; this.future = future; timestamp = System.nanoTime(); } - public String getId() { + public String getAction() { + return action; + } + + public Long getId() { return id; } @@ -26,4 +32,12 @@ public class ResponseFuture { long getTimestamp() { return timestamp; } + + @Override + public int compareTo(ResponseFuture rf) { + long r = this.timestamp - rf.timestamp; + if (r > 0) return 1; + if (r < 0) return -1; + return 0; + } } diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/ws/Transport.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/ws/Transport.java index 9431e26585023d90db2bc79494d6f1603d4cecd3..94b5d9b6c839f0cd5d8c809e3086674a4e5af739 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/ws/Transport.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/ws/Transport.java @@ -25,15 +25,19 @@ public class Transport implements AutoCloseable { public CompletableFuture send(Request request) { CompletableFuture completableFuture = new CompletableFuture<>(); try { - inFlightRequest.put(new ResponseFuture(request.id(), completableFuture)); + inFlightRequest.put(new ResponseFuture(request.getAction(), request.id(), completableFuture)); client.send(request.toString()); } catch (Throwable t) { - inFlightRequest.remove(request.id()); + inFlightRequest.remove(request.getAction(), request.id()); completableFuture.completeExceptionally(t); } return completableFuture; } + public void sendWithoutRep(Request request) { + client.send(request.toString()); + } + public boolean isClosed() throws SQLException { return client.isClosed(); } diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/ws/WSClient.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/ws/WSClient.java index d04ef1aba388c4588a7f85be5a19ac0c2776ccf1..f66bbbe6b3a8391aca849a8236f29ca6083f172b 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/ws/WSClient.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/ws/WSClient.java @@ -7,6 +7,8 @@ import org.java_websocket.handshake.ServerHandshake; import java.net.URI; import java.nio.ByteBuffer; +import java.nio.ByteOrder; +import java.nio.charset.StandardCharsets; import java.util.Map; import java.util.concurrent.*; @@ -20,6 +22,7 @@ public class WSClient extends WebSocketClient implements AutoCloseable { ThreadPoolExecutor executor; private boolean auth; + private int reqId; public boolean isAuth() { return auth; @@ -54,8 +57,8 @@ public class WSClient extends WebSocketClient implements AutoCloseable { @Override public void onOpen(ServerHandshake serverHandshake) { // certification - Request request = Request.generateConnect(user, password, database); - this.send(request.toString()); + ConnectReq connectReq = new ConnectReq(++reqId, user, password, database); + this.send(new Request(Action.CONN.getAction(), connectReq).toString()); } @Override @@ -64,14 +67,15 @@ public class WSClient extends WebSocketClient implements AutoCloseable { executor.submit(() -> { JSONObject jsonObject = JSONObject.parseObject(message); if (Action.CONN.getAction().equals(jsonObject.getString("action"))) { - latch.countDown(); if (Code.SUCCESS.getCode() != jsonObject.getInteger("code")) { - auth = false; this.close(); + } else { + auth = true; } + latch.countDown(); } else { Response response = parseMessage(jsonObject); - ResponseFuture remove = inFlightRequest.remove(response.id()); + ResponseFuture remove = inFlightRequest.remove(response.getAction(), response.getReqId()); if (null != remove) { remove.getFuture().complete(response); } @@ -87,7 +91,14 @@ public class WSClient extends WebSocketClient implements AutoCloseable { @Override public void onMessage(ByteBuffer bytes) { - super.onMessage(bytes); + bytes.order(ByteOrder.LITTLE_ENDIAN); + long id = bytes.getLong(); + ResponseFuture remove = inFlightRequest.remove(Action.FETCH_BLOCK.getAction(), id); + if (null != remove) { +// FetchBlockResp fetchBlockResp = new FetchBlockResp(id, bytes.slice()); + FetchBlockResp fetchBlockResp = new FetchBlockResp(id, bytes); + remove.getFuture().complete(fetchBlockResp); + } } @Override @@ -97,7 +108,6 @@ public class WSClient extends WebSocketClient implements AutoCloseable { } else { throw new RuntimeException("close connection: " + reason); } - } @Override @@ -109,6 +119,42 @@ public class WSClient extends WebSocketClient implements AutoCloseable { public void close() { super.close(); executor.shutdown(); - inFlightRequest.close(); + } + + static class ConnectReq extends Payload { + private String user; + private String password; + private String db; + + public ConnectReq(long reqId, String user, String password, String db) { + super(reqId); + this.user = user; + this.password = password; + this.db = db; + } + + public String getUser() { + return user; + } + + public void setUser(String user) { + this.user = user; + } + + public String getPassword() { + return password; + } + + public void setPassword(String password) { + this.password = password; + } + + public String getDb() { + return db; + } + + public void setDb(String db) { + this.db = db; + } } } diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/ws/WSConnection.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/ws/WSConnection.java index 5e2195093df47e97643805012dfcebc271c7fe73..bdd56c03ce6cb0a736ce5fe1e6e98be787c2a62f 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/ws/WSConnection.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/ws/WSConnection.java @@ -5,6 +5,7 @@ import com.taosdata.jdbc.TSDBDriver; import com.taosdata.jdbc.TSDBError; import com.taosdata.jdbc.TSDBErrorNumbers; import com.taosdata.jdbc.rs.RestfulDatabaseMetaData; +import com.taosdata.jdbc.ws.entity.RequestFactory; import java.sql.DatabaseMetaData; import java.sql.PreparedStatement; @@ -16,14 +17,14 @@ public class WSConnection extends AbstractConnection { private final Transport transport; private final DatabaseMetaData metaData; private final String database; - private boolean fetchType; + private final RequestFactory factory; - public WSConnection(String url, Properties properties, Transport transport, String database, boolean fetchType) { + public WSConnection(String url, Properties properties, Transport transport, String database) { super(properties); this.transport = transport; this.database = database; - this.fetchType = fetchType; this.metaData = new RestfulDatabaseMetaData(url, properties.getProperty(TSDBDriver.PROPERTY_KEY_USER), this); + this.factory = new RequestFactory(); } @Override @@ -31,8 +32,7 @@ public class WSConnection extends AbstractConnection { if (isClosed()) throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); -// return new WSStatement(transport, database , fetchType); - return null; + return new WSStatement(transport, database, this, factory); } @Override diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/ws/WSStatement.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/ws/WSStatement.java new file mode 100644 index 0000000000000000000000000000000000000000..58e6ad31930e985acf903d34d993ddc5bbfc1002 --- /dev/null +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/ws/WSStatement.java @@ -0,0 +1,112 @@ +package com.taosdata.jdbc.ws; + +import com.taosdata.jdbc.AbstractStatement; +import com.taosdata.jdbc.TSDBError; +import com.taosdata.jdbc.TSDBErrorNumbers; +import com.taosdata.jdbc.utils.SqlSyntaxValidator; +import com.taosdata.jdbc.ws.entity.*; + +import java.sql.Connection; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ExecutionException; + +public class WSStatement extends AbstractStatement { + private final Transport transport; + private final String database; + private final Connection connection; + private final RequestFactory factory; + + private boolean closed; + private ResultSet resultSet; + + public WSStatement(Transport transport, String database, Connection connection, RequestFactory factory) { + this.transport = transport; + this.database = database; + this.connection = connection; + this.factory = factory; + } + + @Override + public ResultSet executeQuery(String sql) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + if (!SqlSyntaxValidator.isValidForExecuteQuery(sql)) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_INVALID_FOR_EXECUTE_QUERY, "not a valid sql for executeQuery: " + sql); + + this.execute(sql); + return this.resultSet; + } + + @Override + public int executeUpdate(String sql) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + if (!SqlSyntaxValidator.isValidForExecuteUpdate(sql)) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_INVALID_FOR_EXECUTE_UPDATE, "not a valid sql for executeUpdate: " + sql); + + this.execute(sql); + return affectedRows; + } + + @Override + public void close() throws SQLException { + if (!isClosed()) + this.closed = true; + } + + @Override + public boolean execute(String sql) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + + Request request = factory.generateQuery(sql); + CompletableFuture send = transport.send(request); + + Response response; + try { + response = send.get(); + QueryResp queryResp = (QueryResp) response; + if (Code.SUCCESS.getCode() != queryResp.getCode()) { + throw TSDBError.createSQLException(queryResp.getCode(), queryResp.getMessage()); + } + if (queryResp.isUpdate()) { + this.resultSet = null; + this.affectedRows = queryResp.getAffectedRows(); + return false; + } else { + this.resultSet = new BlockResultSet(this, this.transport, this.factory, queryResp, this.database); + this.affectedRows = -1; + return true; + } + } catch (InterruptedException | ExecutionException e) { + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_INVALID_WITH_EXECUTEQUERY, e.getMessage()); + } + } + + @Override + public ResultSet getResultSet() throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + return this.resultSet; + } + + @Override + public int getUpdateCount() throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + + return affectedRows; + } + + @Override + public Connection getConnection() throws SQLException { + return this.connection; + } + + @Override + public boolean isClosed() throws SQLException { + return closed; + } +} diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/ws/entity/Action.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/ws/entity/Action.java index 8d5d8272d73596d0049c3be3aa8d475f501c802f..9d44282df746ae0edd0c5bf286fa5cd32663a136 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/ws/entity/Action.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/ws/entity/Action.java @@ -11,8 +11,9 @@ public enum Action { QUERY("query", QueryResp.class), FETCH("fetch", FetchResp.class), FETCH_JSON("fetch_json", FetchJsonResp.class), - // fetch_block's class is meaningless - FETCH_BLOCK("fetch_block", Response.class), + FETCH_BLOCK("fetch_block", FetchBlockResp.class), + // free_result's class is meaningless + FREE_RESULT("free_result", Response.class), ; private final String action; private final Class clazz; @@ -35,7 +36,6 @@ public enum Action { static { for (Action value : Action.values()) { actions.put(value.action, value); - IdUtil.init(value.action); } } diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/ws/entity/Code.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/ws/entity/Code.java index 6b6d60858d447165a5c922f5e08a1db783f60e01..13a2b852e088fe771c586379558a33424d9a9f17 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/ws/entity/Code.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/ws/entity/Code.java @@ -5,7 +5,6 @@ package com.taosdata.jdbc.ws.entity; */ public enum Code { SUCCESS(0, "success"), - ; private final int code; diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/ws/entity/FetchBlockResp.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/ws/entity/FetchBlockResp.java index 40052f68e9209525501dba2478bec97ff96b3c04..2dbcffb40f81d5ec31d0e08d5afe00dfea558a17 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/ws/entity/FetchBlockResp.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/ws/entity/FetchBlockResp.java @@ -1,4 +1,21 @@ package com.taosdata.jdbc.ws.entity; -public class FetchBlockResp { +import java.nio.ByteBuffer; + +public class FetchBlockResp extends Response { + private ByteBuffer buffer; + + public FetchBlockResp(long id, ByteBuffer buffer) { + this.setAction(Action.FETCH_BLOCK.getAction()); + this.setReqId(id); + this.buffer = buffer; + } + + public ByteBuffer getBuffer() { + return buffer; + } + + public void setBuffer(ByteBuffer buffer) { + this.buffer = buffer; + } } diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/ws/entity/FetchJsonResp.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/ws/entity/FetchJsonResp.java index bdf6d51232b8492fc5d4aaa5fb9e68ffa133a8f5..74c2f46d547c916ff09c286fd90aa7eb66d6c5bc 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/ws/entity/FetchJsonResp.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/ws/entity/FetchJsonResp.java @@ -1,14 +1,16 @@ package com.taosdata.jdbc.ws.entity; +import com.alibaba.fastjson.JSONArray; + public class FetchJsonResp extends Response{ private long id; - private Object[][] data; + private JSONArray data; - public Object[][] getData() { + public JSONArray getData() { return data; } - public void setData(Object[][] data) { + public void setData(JSONArray data) { this.data = data; } diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/ws/entity/FetchReq.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/ws/entity/FetchReq.java new file mode 100644 index 0000000000000000000000000000000000000000..25cd9dc47241e859c9e6501117a7b19f4b3098b9 --- /dev/null +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/ws/entity/FetchReq.java @@ -0,0 +1,18 @@ +package com.taosdata.jdbc.ws.entity; + +public class FetchReq extends Payload { + private long id; + + public FetchReq(long reqId, long id) { + super(reqId); + this.id = id; + } + + public long getId() { + return id; + } + + public void setId(long id) { + this.id = id; + } +} diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/ws/entity/FetchResp.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/ws/entity/FetchResp.java index 45f5452007e4fc1122f6eb4f03e196bdbb8303ed..08229c00b167f0ad77af8e41a2df55aa645fcf9b 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/ws/entity/FetchResp.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/ws/entity/FetchResp.java @@ -8,7 +8,7 @@ public class FetchResp extends Response{ private String message; private long id; private boolean completed; - private int[] lengths; + private Integer[] lengths; private int rows; public int getCode() { @@ -43,11 +43,11 @@ public class FetchResp extends Response{ this.completed = completed; } - public int[] getLengths() { + public Integer[] getLengths() { return lengths; } - public void setLengths(int[] lengths) { + public void setLengths(Integer[] lengths) { this.lengths = lengths; } diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/ws/entity/IdUtil.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/ws/entity/IdUtil.java deleted file mode 100644 index fb2aab51c61f91790b8c79a7e0898de5ab6fca8b..0000000000000000000000000000000000000000 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/ws/entity/IdUtil.java +++ /dev/null @@ -1,20 +0,0 @@ -package com.taosdata.jdbc.ws.entity; - -import java.util.HashMap; -import java.util.Map; -import java.util.concurrent.atomic.AtomicLong; - -/** - * generate id for request - */ -public class IdUtil { - private static final Map ids = new HashMap<>(); - - public static long getId(String action) { - return ids.get(action).incrementAndGet(); - } - - public static void init(String action) { - ids.put(action, new AtomicLong(0)); - } -} diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/ws/entity/Payload.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/ws/entity/Payload.java new file mode 100644 index 0000000000000000000000000000000000000000..1821a5fc1fd6af4268a7af207f1ecdf6bf96ff00 --- /dev/null +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/ws/entity/Payload.java @@ -0,0 +1,16 @@ +package com.taosdata.jdbc.ws.entity; + +import com.alibaba.fastjson.annotation.JSONField; + +public class Payload { + @JSONField(name = "req_id") + private final long reqId; + + public Payload(long reqId) { + this.reqId = reqId; + } + + public long getReqId() { + return reqId; + } +} \ No newline at end of file diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/ws/entity/QueryReq.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/ws/entity/QueryReq.java new file mode 100644 index 0000000000000000000000000000000000000000..8e6d197bc6684585e169e356bba30e1b979b51d0 --- /dev/null +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/ws/entity/QueryReq.java @@ -0,0 +1,18 @@ +package com.taosdata.jdbc.ws.entity; + +public class QueryReq extends Payload { + private String sql; + + public QueryReq(long reqId, String sql) { + super(reqId); + this.sql = sql; + } + + public String getSql() { + return sql; + } + + public void setSql(String sql) { + this.sql = sql; + } +} diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/ws/entity/Request.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/ws/entity/Request.java index ca0fdf427d55901bea85537a083f70a3159a01f5..6462664309cf92e87bffe71a0a0456afcecfe526 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/ws/entity/Request.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/ws/entity/Request.java @@ -1,7 +1,6 @@ package com.taosdata.jdbc.ws.entity; import com.alibaba.fastjson.JSON; -import com.alibaba.fastjson.annotation.JSONField; /** * send to taosadapter @@ -15,14 +14,14 @@ public class Request { this.args = args; } - public String id() { - return action + "_" + args.getReqId(); - } - public String getAction() { return action; } + public Long id(){ + return args.getReqId(); + } + public void setAction(String action) { this.action = action; } @@ -39,118 +38,4 @@ public class Request { public String toString() { return JSON.toJSONString(this); } - - public static Request generateConnect(String user, String password, String db) { - long reqId = IdUtil.getId(Action.CONN.getAction()); - ConnectReq connectReq = new ConnectReq(reqId, user, password, db); - return new Request(Action.CONN.getAction(), connectReq); - } - - public static Request generateQuery(String sql) { - long reqId = IdUtil.getId(Action.QUERY.getAction()); - QueryReq queryReq = new QueryReq(reqId, sql); - return new Request(Action.QUERY.getAction(), queryReq); - } - - public static Request generateFetch(long id) { - long reqId = IdUtil.getId(Action.FETCH.getAction()); - FetchReq fetchReq = new FetchReq(reqId, id); - return new Request(Action.FETCH.getAction(), fetchReq); - } - - public static Request generateFetchJson(long id) { - long reqId = IdUtil.getId(Action.FETCH_JSON.getAction()); - FetchReq fetchReq = new FetchReq(reqId, id); - return new Request(Action.FETCH_JSON.getAction(), fetchReq); - } - - public static Request generateFetchBlock(long id) { - long reqId = IdUtil.getId(Action.FETCH_BLOCK.getAction()); - FetchReq fetchReq = new FetchReq(reqId, id); - return new Request(Action.FETCH_BLOCK.getAction(), fetchReq); - } -} - -class Payload { - @JSONField(name = "req_id") - private final long reqId; - - public Payload(long reqId) { - this.reqId = reqId; - } - - public long getReqId() { - return reqId; - } -} - -class ConnectReq extends Payload { - private String user; - private String password; - private String db; - - public ConnectReq(long reqId, String user, String password, String db) { - super(reqId); - this.user = user; - this.password = password; - this.db = db; - } - - public String getUser() { - return user; - } - - public void setUser(String user) { - this.user = user; - } - - public String getPassword() { - return password; - } - - public void setPassword(String password) { - this.password = password; - } - - public String getDb() { - return db; - } - - public void setDb(String db) { - this.db = db; - } -} - -class QueryReq extends Payload { - private String sql; - - public QueryReq(long reqId, String sql) { - super(reqId); - this.sql = sql; - } - - public String getSql() { - return sql; - } - - public void setSql(String sql) { - this.sql = sql; - } -} - -class FetchReq extends Payload { - private long id; - - public FetchReq(long reqId, long id) { - super(reqId); - this.id = id; - } - - public long getId() { - return id; - } - - public void setId(long id) { - this.id = id; - } } \ No newline at end of file diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/ws/entity/RequestFactory.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/ws/entity/RequestFactory.java new file mode 100644 index 0000000000000000000000000000000000000000..f033d0d8ba9c4edd172fbe36283add80d2254574 --- /dev/null +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/ws/entity/RequestFactory.java @@ -0,0 +1,48 @@ +package com.taosdata.jdbc.ws.entity; + +import java.util.HashMap; +import java.util.Map; +import java.util.concurrent.atomic.AtomicLong; + +/** + * generate id for request + */ +public class RequestFactory { + private final Map ids = new HashMap<>(); + + public long getId(String action) { + return ids.get(action).incrementAndGet(); + } + + public RequestFactory() { + for (Action value : Action.values()) { + String action = value.getAction(); + if (Action.CONN.getAction().equals(action) || Action.FETCH_BLOCK.getAction().equals(action)) + continue; + ids.put(action, new AtomicLong(0)); + } + } + + public Request generateQuery(String sql) { + long reqId = this.getId(Action.QUERY.getAction()); + QueryReq queryReq = new QueryReq(reqId, sql); + return new Request(Action.QUERY.getAction(), queryReq); + } + + public Request generateFetch(long id) { + long reqId = this.getId(Action.FETCH.getAction()); + FetchReq fetchReq = new FetchReq(reqId, id); + return new Request(Action.FETCH.getAction(), fetchReq); + } + + public Request generateFetchJson(long id) { + long reqId = this.getId(Action.FETCH_JSON.getAction()); + FetchReq fetchReq = new FetchReq(reqId, id); + return new Request(Action.FETCH_JSON.getAction(), fetchReq); + } + + public Request generateFetchBlock(long id) { + FetchReq fetchReq = new FetchReq(id, id); + return new Request(Action.FETCH_BLOCK.getAction(), fetchReq); + } +} diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/ws/entity/Response.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/ws/entity/Response.java index 780e30067fdb14eeca465cc1d50842219a58774e..604317acafd41337b95ce059f1e8a8c59269edb0 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/ws/entity/Response.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/ws/entity/Response.java @@ -11,10 +11,6 @@ public class Response { @JSONField(name = "req_id") private long reqId; - public String id() { - return action + "_" + reqId; - } - public String getAction() { return action; } diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/ws/WSConnectionTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/ws/WSConnectionTest.java index 0719a5094ce6d9dbd96d0abb6f313a126f542621..916f8287de42a411a44025749f2b77130bc52198 100644 --- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/ws/WSConnectionTest.java +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/ws/WSConnectionTest.java @@ -10,15 +10,17 @@ import org.junit.runner.RunWith; import java.sql.*; import java.util.Properties; +import java.util.concurrent.TimeUnit; /** * You need to start taosadapter before testing this method */ @Ignore @RunWith(CatalogRunner.class) -@TestTarget(alias = "test connection with server", author = "huolibo",version = "2.0.37") +@TestTarget(alias = "test connection with server", author = "huolibo", version = "2.0.37") public class WSConnectionTest { - private static final String host = "192.168.1.98"; +// private static final String host = "192.168.1.98"; + private static final String host = "127.0.0.1"; private static final int port = 6041; private Connection connection; @@ -46,13 +48,12 @@ public class WSConnectionTest { String url = "jdbc:TAOS-RS://" + host + ":" + port + "/"; Properties properties = new Properties(); properties.setProperty(TSDBDriver.PROPERTY_KEY_USER, "root"); - properties.setProperty(TSDBDriver.PROPERTY_KEY_PASSWORD,"taosdata"); + properties.setProperty(TSDBDriver.PROPERTY_KEY_PASSWORD, "taosdata"); properties.setProperty(TSDBDriver.PROPERTY_KEY_BATCH_LOAD, "true"); connection = DriverManager.getConnection(url, properties); } - @Test -// @Test(expected = SQLException.class) + @Test(expected = SQLException.class) @Description("wrong password or user") public void wrongUserOrPasswordConection() throws SQLException { String url = "jdbc:TAOS-RS://" + host + ":" + port + "/test?user=abc&password=taosdata"; @@ -60,4 +61,21 @@ public class WSConnectionTest { properties.setProperty(TSDBDriver.PROPERTY_KEY_BATCH_LOAD, "true"); connection = DriverManager.getConnection(url, properties); } + + @Test + @Description("sleep keep connection") + public void keepConnection() throws SQLException, InterruptedException { + String url = "jdbc:TAOS-RS://" + host + ":" + port + "/?user=root&password=taosdata"; + Properties properties = new Properties(); + properties.setProperty(TSDBDriver.PROPERTY_KEY_BATCH_LOAD, "true"); + connection = DriverManager.getConnection(url, properties); + TimeUnit.MINUTES.sleep(1); + Statement statement = connection.createStatement(); + ResultSet resultSet = statement.executeQuery("show databases"); + TimeUnit.MINUTES.sleep(1); + resultSet.next(); + System.out.println(resultSet.getTimestamp(1)); + resultSet.close(); + statement.close(); + } } diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/ws/WSJsonTagTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/ws/WSJsonTagTest.java new file mode 100644 index 0000000000000000000000000000000000000000..a106e57fbfbcb71c52e75b8872319dc870472368 --- /dev/null +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/ws/WSJsonTagTest.java @@ -0,0 +1,1283 @@ +package com.taosdata.jdbc.ws; + +import com.taosdata.jdbc.TSDBDriver; +import com.taosdata.jdbc.annotation.CatalogRunner; +import com.taosdata.jdbc.annotation.Description; +import com.taosdata.jdbc.annotation.TestTarget; +import org.junit.*; +import org.junit.runner.RunWith; +import org.junit.runners.MethodSorters; + +import java.sql.*; +import java.util.Properties; + +/** + * Most of the functionality is consistent with {@link com.taosdata.jdbc.JsonTagTest}, + * Except for batchInsert, which is not supported by restful API. + * Restful could not distinguish between empty and nonexistent of json value, the result is always null. + * The order of json results may change due to serialization and deserialization + */ +@Ignore +@FixMethodOrder(MethodSorters.NAME_ASCENDING) +@RunWith(CatalogRunner.class) +@TestTarget(alias = "JsonTag", author = "huolibo", version = "2.0.38") +public class WSJsonTagTest { + private static final String dbName = "json_tag_test"; + private static Connection connection; + private static Statement statement; + private static final String superSql = "create table if not exists jsons1(ts timestamp, dataInt int, dataBool bool, dataStr nchar(50), dataStrBin binary(150)) tags(jtag json)"; + private static final String[] sql = { + "insert into jsons1_1 using jsons1 tags('{\"tag1\":\"fff\",\"tag2\":5, \"tag3\":true}') values(now, 1, false, 'json1', '你是') (1591060608000, 23, true, '等等', 'json')", + "insert into jsons1_2 using jsons1 tags('{\"tag1\":5,\"tag2\":\"beijing\"}') values (1591060628000, 2, true, 'json2', 'sss')", + "insert into jsons1_3 using jsons1 tags('{\"tag1\":false,\"tag2\":\"beijing\"}') values (1591060668000, 3, false, 'json3', 'efwe')", + "insert into jsons1_4 using jsons1 tags('{\"tag1\":null,\"tag2\":\"shanghai\",\"tag3\":\"hello\"}') values (1591060728000, 4, true, 'json4', '323sd')", + "insert into jsons1_5 using jsons1 tags('{\"tag1\":1.232, \"tag2\":null}') values(1591060928000, 1, false, '你就会', 'ewe')", + "insert into jsons1_6 using jsons1 tags('{\"tag1\":11,\"tag2\":\"\",\"tag2\":null}') values(1591061628000, 11, false, '你就会','')", + "insert into jsons1_7 using jsons1 tags('{\"tag1\":\"收到货\",\"tag2\":\"\",\"tag3\":null}') values(1591062628000, 2, NULL, '你就会', 'dws')", + // test duplicate key using the first one. + "CREATE TABLE if not exists jsons1_8 using jsons1 tags('{\"tag1\":null, \"tag1\":true, \"tag1\":45, \"1tag$\":2, \" \":90}')", + + }; + + private static final String[] invalidJsonInsertSql = { + // test empty json string, save as tag is NULL + "insert into jsons1_9 using jsons1 tags('\t') values (1591062328000, 24, NULL, '你就会', '2sdw')", + }; + + private static final String[] invalidJsonCreateSql = { + "CREATE TABLE if not exists jsons1_10 using jsons1 tags('')", + "CREATE TABLE if not exists jsons1_11 using jsons1 tags(' ')", + "CREATE TABLE if not exists jsons1_12 using jsons1 tags('{}')", + "CREATE TABLE if not exists jsons1_13 using jsons1 tags('null')", + }; + + // test invalidate json + private static final String[] errorJsonInsertSql = { + "CREATE TABLE if not exists jsons1_14 using jsons1 tags('\"efwewf\"')", + "CREATE TABLE if not exists jsons1_14 using jsons1 tags('3333')", + "CREATE TABLE if not exists jsons1_14 using jsons1 tags('33.33')", + "CREATE TABLE if not exists jsons1_14 using jsons1 tags('false')", + "CREATE TABLE if not exists jsons1_14 using jsons1 tags('[1,true]')", + "CREATE TABLE if not exists jsons1_14 using jsons1 tags('{222}')", + "CREATE TABLE if not exists jsons1_14 using jsons1 tags('{\"fe\"}')", + }; + + private static final String[] errorSelectSql = { + "select * from jsons1 where jtag->tag1='beijing'", + "select * from jsons1 where jtag->'location'", + "select * from jsons1 where jtag->''", + "select * from jsons1 where jtag->''=9", + "select -> from jsons1", + "select ? from jsons1", + "select * from jsons1 where contains", + "select * from jsons1 where jtag->", + "select jtag->location from jsons1", + "select jtag contains location from jsons1", + "select * from jsons1 where jtag contains location", + "select * from jsons1 where jtag contains ''", + "select * from jsons1 where jtag contains 'location'='beijing'", + // test where with json tag + "select * from jsons1_1 where jtag is not null", + "select * from jsons1 where jtag='{\"tag1\":11,\"tag2\":\"\"}'", + "select * from jsons1 where jtag->'tag1'={}" + }; + + @Test + @Description("insert json tag") + public void case01_InsertTest() throws SQLException { + for (String sql : sql) { + statement.execute(sql); + } + for (String sql : invalidJsonInsertSql) { + statement.execute(sql); + } + for (String sql : invalidJsonCreateSql) { + statement.execute(sql); + } + } + + @Test + @Description("error json tag insert") + public void case02_ErrorJsonInsertTest() { + int count = 0; + for (String sql : errorJsonInsertSql) { + try { + statement.execute(sql); + } catch (SQLException e) { + count++; + } + } + Assert.assertEquals(errorJsonInsertSql.length, count); + } + + @Test(expected = SQLException.class) + @Description("exception will throw when json value is array") + public void case02_ArrayErrorTest() throws SQLException { + statement.execute("CREATE TABLE if not exists jsons1_14 using jsons1 tags('{\"tag1\":[1,true]}')"); + } + + @Test(expected = SQLException.class) + @Description("exception will throw when json value is empty") + public void case02_EmptyValueErrorTest() throws SQLException { + statement.execute("CREATE TABLE if not exists jsons1_14 using jsons1 tags('{\"tag1\":{}}')"); + } + + @Test(expected = SQLException.class) + @Description("exception will throw when json key is not ASCII") + public void case02_AbnormalKeyErrorTest1() throws SQLException { + statement.execute("CREATE TABLE if not exists jsons1_14 using jsons1 tags('{\"。loc\":\"fff\"}')"); + } + + @Test(expected = SQLException.class) + @Description("exception will throw when json key is '\\t'") + public void case02_AbnormalKeyErrorTest2() throws SQLException { + statement.execute("CREATE TABLE if not exists jsons1_14 using jsons1 tags('{\"\t\":\"fff\"}')"); + } + + @Test(expected = SQLException.class) + @Description("exception will throw when json key is chinese") + public void case02_AbnormalKeyErrorTest3() throws SQLException { + statement.execute("CREATE TABLE if not exists jsons1_14 using jsons1 tags('{\"试试\":\"fff\"}')"); + } + + @Test + @Description("alter json tag") + public void case03_AlterTag() throws SQLException { + statement.execute("ALTER TABLE jsons1_1 SET TAG jtag='{\"tag1\":\"femail\",\"tag2\":35,\"tag3\":true}'"); + } + + @Test(expected = SQLException.class) + @Description("exception will throw when add json tag") + public void case03_AddTagErrorTest() throws SQLException { + statement.execute("ALTER STABLE jsons1 add tag tag2 nchar(20)"); + } + + @Test(expected = SQLException.class) + @Description("exception will throw when delete json tag") + public void case03_dropTagErrorTest() throws SQLException { + statement.execute("ALTER STABLE jsons1 drop tag jtag"); + } + + @Test(expected = SQLException.class) + @Description("exception will throw when set some json tag value") + public void case03_AlterTagErrorTest() throws SQLException { + statement.execute("ALTER TABLE jsons1_1 SET TAG jtag=4"); + } + + @Test + @Description("exception will throw when select syntax error") + public void case04_SelectErrorTest() { + int count = 0; + for (String sql : errorSelectSql) { + try { + statement.execute(sql); + } catch (SQLException e) { + count++; + } + } + Assert.assertEquals(errorSelectSql.length, count); + } + + @Test + @Description("normal select stable") + public void case04_select01() throws SQLException { + ResultSet resultSet = statement.executeQuery("select dataint from jsons1"); + int count = 0; + while (resultSet.next()) { + count++; + } + Assert.assertEquals(sql.length + invalidJsonInsertSql.length, count); + close(resultSet); + } + + @Test + @Description("select all column from stable") + public void case04_select02() throws SQLException { + ResultSet resultSet = statement.executeQuery("select * from jsons1"); + int count = 0; + while (resultSet.next()) { + count++; + } + Assert.assertEquals(sql.length + invalidJsonInsertSql.length, count); + close(resultSet); + } + + @Test + @Description("select json tag from stable") + public void case04_select03() throws SQLException { + ResultSet resultSet = statement.executeQuery("select jtag from jsons1"); + ResultSetMetaData metaData = resultSet.getMetaData(); + metaData.getColumnTypeName(1); + int count = 0; + while (resultSet.next()) { + count++; + } + Assert.assertEquals(sql.length + invalidJsonInsertSql.length + invalidJsonCreateSql.length, count); + close(resultSet); + } + + @Test + @Description("where condition tag is null") + public void case04_select04() throws SQLException { + ResultSet resultSet = statement.executeQuery("select jtag from jsons1 where jtag is null"); + int count = 0; + while (resultSet.next()) { + count++; + } + Assert.assertEquals(invalidJsonInsertSql.length + invalidJsonCreateSql.length, count); + close(resultSet); + } + + @Test + @Description("where condition tag is not null") + public void case04_select05() throws SQLException { + ResultSet resultSet = statement.executeQuery("select jtag from jsons1 where jtag is not null"); + int count = 0; + while (resultSet.next()) { + count++; + } + Assert.assertEquals(sql.length, count); + close(resultSet); + } + + @Test + @Description("select json tag") + public void case04_select06() throws SQLException { + ResultSet resultSet = statement.executeQuery("select jtag from jsons1_8"); + resultSet.next(); + String result = resultSet.getString(1); + Assert.assertEquals("{\" \":90,\"tag1\":null,\"1tag$\":2}", result); + close(resultSet); + } + + @Test + @Description("select json tag") + public void case04_select07() throws SQLException { + ResultSet resultSet = statement.executeQuery("select jtag from jsons1_1"); + resultSet.next(); + String result = resultSet.getString(1); + Assert.assertEquals("{\"tag1\":\"femail\",\"tag2\":35,\"tag3\":true}", result); + close(resultSet); + } + + @Test + @Description("select not exist json tag") + public void case04_select08() throws SQLException { + ResultSet resultSet = statement.executeQuery("select jtag from jsons1_9"); + resultSet.next(); + String result = resultSet.getString(1); + Assert.assertNull(result); + close(resultSet); + } + + @Test + @Description("select a json tag") + public void case04_select09() throws SQLException { + ResultSet resultSet = statement.executeQuery("select jtag->'tag1' from jsons1_1"); + resultSet.next(); + String result = resultSet.getString(1); + Assert.assertEquals("\"femail\"", result); + close(resultSet); + } + + @Test + @Description(value = "select a normal value", version = "2.0.37") + public void case04_selectNormal() throws SQLException { + ResultSet resultSet = statement.executeQuery("select datastr from jsons1_1"); + resultSet.next(); + String result = resultSet.getString(1); + Assert.assertEquals("等等", result); + close(resultSet); + } + + @Test + @Description("select a json tag, the value is empty") + public void case04_select10() throws SQLException { + ResultSet resultSet = statement.executeQuery("select jtag->'tag2' from jsons1_6"); + resultSet.next(); + String result = resultSet.getString(1); + Assert.assertEquals("\"\"", result); + close(resultSet); + } + + @Test + @Description("select a json tag, the value is int") + public void case04_select11() throws SQLException { + ResultSet resultSet = statement.executeQuery("select jtag->'tag2' from jsons1_1"); + resultSet.next(); + String string = resultSet.getString(1); + Assert.assertEquals("35", string); + close(resultSet); + } + + @Test + @Description("select a json tag, the value is boolean") + public void case04_select12() throws SQLException { + ResultSet resultSet = statement.executeQuery("select jtag->'tag3' from jsons1_1"); + resultSet.next(); + String string = resultSet.getString(1); + Assert.assertEquals("true", string); + close(resultSet); + } + +// @Test +// @Description("select a json tag, the value is null") +// public void case04_select13() throws SQLException { +// ResultSet resultSet = statement.executeQuery("select jtag->'tag1' from jsons1_4"); +// resultSet.next(); +// String string = resultSet.getString(1); +// Assert.assertEquals("null", string); +// close(resultSet); +// } + + @Test + @Description("select a json tag, the value is double") + public void case04_select14() throws SQLException { + ResultSet resultSet = statement.executeQuery("select jtag->'tag1' from jsons1_5"); + resultSet.next(); + String string = resultSet.getString(1); + Assert.assertEquals("1.232000000", string); + close(resultSet); + } + + @Test + @Description("select a json tag, the key is not exist") + public void case04_select15() throws SQLException { + ResultSet resultSet = statement.executeQuery("select jtag->'tag10' from jsons1_4"); + resultSet.next(); + String string = resultSet.getString(1); + Assert.assertNull(string); + close(resultSet); + } + + @Test + @Description("select a json tag, the result number equals tables number") + public void case04_select16() throws SQLException { + ResultSet resultSet = statement.executeQuery("select jtag->'tag1' from jsons1"); + int count = 0; + while (resultSet.next()) { + count++; + } + Assert.assertEquals(sql.length + invalidJsonCreateSql.length + invalidJsonInsertSql.length, count); + close(resultSet); + } + + @Test + @Description("where condition '=' for string") + public void case04_select19() throws SQLException { + ResultSet resultSet = statement.executeQuery("select * from jsons1 where jtag->'tag2'='beijing'"); + int count = 0; + while (resultSet.next()) { + count++; + } + Assert.assertEquals(2, count); + close(resultSet); + } + + @Test + @Description("select and where conditon '=' for string") + public void case04_select20() throws SQLException { + ResultSet resultSet = statement.executeQuery("select dataint,tbname,jtag->'tag1',jtag from jsons1 where jtag->'tag2'='beijing'"); + int count = 0; + while (resultSet.next()) { + count++; + } + Assert.assertEquals(2, count); + close(resultSet); + } + + @Test + @Description("where condition result is null") + public void case04_select21() throws SQLException { + ResultSet resultSet = statement.executeQuery("select * from jsons1 where jtag->'tag1'='beijing'"); + int count = 0; + while (resultSet.next()) { + count++; + } + Assert.assertEquals(0, count); + close(resultSet); + } + + @Test + @Description("where condition equation has chinese") + public void case04_select23() throws SQLException { + ResultSet resultSet = statement.executeQuery("select * from jsons1 where jtag->'tag1'='收到货'"); + int count = 0; + while (resultSet.next()) { + count++; + } + Assert.assertEquals(1, count); + close(resultSet); + } + + @Test + @Description("where condition support '>' for character") + public void case05_symbolOperation01() throws SQLException { + ResultSet resultSet = statement.executeQuery("select * from jsons1 where jtag->'tag2'>'beijing'"); + int count = 0; + while (resultSet.next()) { + count++; + } + Assert.assertEquals(1, count); + close(resultSet); + } + + @Test + @Description("where condition support '>=' for character") + public void case05_symbolOperation02() throws SQLException { + ResultSet resultSet = statement.executeQuery("select * from jsons1 where jtag->'tag2'>='beijing'"); + int count = 0; + while (resultSet.next()) { + count++; + } + Assert.assertEquals(3, count); + close(resultSet); + } + + @Test + @Description("where condition support '<' for character") + public void case05_symbolOperation03() throws SQLException { + ResultSet resultSet = statement.executeQuery("select * from jsons1 where jtag->'tag2'<'beijing'"); + int count = 0; + while (resultSet.next()) { + count++; + } + Assert.assertEquals(2, count); + close(resultSet); + } + + @Test + @Description("where condition support '<=' in character") + public void case05_symbolOperation04() throws SQLException { + ResultSet resultSet = statement.executeQuery("select * from jsons1 where jtag->'tag2'<='beijing'"); + int count = 0; + while (resultSet.next()) { + count++; + } + Assert.assertEquals(4, count); + close(resultSet); + } + + @Test + @Description("where condition support '!=' in character") + public void case05_symbolOperation05() throws SQLException { + ResultSet resultSet = statement.executeQuery("select * from jsons1 where jtag->'tag2'!='beijing'"); + int count = 0; + while (resultSet.next()) { + count++; + } + Assert.assertEquals(3, count); + close(resultSet); + } + + @Test + @Description("where condition support '=' empty") + public void case05_symbolOperation06() throws SQLException { + ResultSet resultSet = statement.executeQuery("select * from jsons1 where jtag->'tag2'=''"); + int count = 0; + while (resultSet.next()) { + count++; + } + Assert.assertEquals(2, count); + close(resultSet); + } + + // where json value is int + @Test + @Description("where condition support '=' for int") + public void case06_selectValue01() throws SQLException { + ResultSet resultSet = statement.executeQuery("select * from jsons1 where jtag->'tag1'=5"); + int count = 0; + while (resultSet.next()) { + count++; + } + Assert.assertEquals(1, count); + close(resultSet); + } + + @Test + @Description("where conditional support '<' for int") + public void case06_selectValue02() throws SQLException { + ResultSet resultSet = statement.executeQuery("select * from jsons1 where jtag->'tag1'<54"); + int count = 0; + while (resultSet.next()) { + count++; + } + Assert.assertEquals(3, count); + close(resultSet); + } + + @Test + @Description("where condition support '<=' for int") + public void case06_selectValue03() throws SQLException { + ResultSet resultSet = statement.executeQuery("select * from jsons1 where jtag->'tag1'<=11"); + int count = 0; + while (resultSet.next()) { + count++; + } + Assert.assertEquals(3, count); + close(resultSet); + } + + @Test + @Description("where conditional support '>' for int") + public void case06_selectValue04() throws SQLException { + ResultSet resultSet = statement.executeQuery("select * from jsons1 where jtag->'tag1'>4"); + int count = 0; + while (resultSet.next()) { + count++; + } + Assert.assertEquals(2, count); + close(resultSet); + } + + @Test + @Description("where condition support '>=' for int") + public void case06_selectValue05() throws SQLException { + ResultSet resultSet = statement.executeQuery("select * from jsons1 where jtag->'tag1'>=5"); + int count = 0; + while (resultSet.next()) { + count++; + } + Assert.assertEquals(2, count); + close(resultSet); + } + + @Test + @Description("where conditional support '!=' for int") + public void case06_selectValue06() throws SQLException { + ResultSet resultSet = statement.executeQuery("select * from jsons1 where jtag->'tag1'!=5"); + int count = 0; + while (resultSet.next()) { + count++; + } + Assert.assertEquals(2, count); + close(resultSet); + } + + @Test + @Description("where conditional support '!=' for int") + public void case06_selectValue07() throws SQLException { + ResultSet resultSet = statement.executeQuery("select * from jsons1 where jtag->'tag1'!=55"); + int count = 0; + while (resultSet.next()) { + count++; + } + Assert.assertEquals(3, count); + close(resultSet); + } + + @Test + @Description("where conditional support '!=' for int and result is nothing") + public void case06_selectValue08() throws SQLException { + ResultSet resultSet = statement.executeQuery("select * from jsons1 where jtag->'tag1'=10"); + int count = 0; + while (resultSet.next()) { + count++; + } + Assert.assertEquals(0, count); + close(resultSet); + } + + @Test + @Description("where condition support '=' for double") + public void case07_selectValue01() throws SQLException { + ResultSet resultSet = statement.executeQuery("select * from jsons1 where jtag->'tag1'=1.232"); + int count = 0; + while (resultSet.next()) { + count++; + } + Assert.assertEquals(1, count); + close(resultSet); + } + + @Test + @Description("where condition support '<' for double") + public void case07_doubleOperation01() throws SQLException { + ResultSet resultSet = statement.executeQuery("select * from jsons1 where jtag->'tag1'<1.232"); + int count = 0; + while (resultSet.next()) { + count++; + } + Assert.assertEquals(0, count); + close(resultSet); + } + + @Test + @Description("where condition support '<=' for double") + public void case07_doubleOperation02() throws SQLException { + ResultSet resultSet = statement.executeQuery("select * from jsons1 where jtag->'tag1'<=1.232"); + int count = 0; + while (resultSet.next()) { + count++; + } + Assert.assertEquals(1, count); + close(resultSet); + } + + @Test + @Description("where condition support '>' for double") + public void case07_doubleOperation03() throws SQLException { + ResultSet resultSet = statement.executeQuery("select * from jsons1 where jtag->'tag1'>1.23"); + int count = 0; + while (resultSet.next()) { + count++; + } + Assert.assertEquals(3, count); + close(resultSet); + } + + @Test + @Description("where condition support '>=' for double") + public void case07_doubleOperation04() throws SQLException { + ResultSet resultSet = statement.executeQuery("select * from jsons1 where jtag->'tag1'>=1.232"); + int count = 0; + while (resultSet.next()) { + count++; + } + Assert.assertEquals(3, count); + close(resultSet); + } + + @Test + @Description("where condition support '!=' for double") + public void case07_doubleOperation05() throws SQLException { + ResultSet resultSet = statement.executeQuery("select * from jsons1 where jtag->'tag1'!=1.232"); + int count = 0; + while (resultSet.next()) { + count++; + } + Assert.assertEquals(2, count); + close(resultSet); + } + + @Test + @Description("where condition support '!=' for double") + public void case07_doubleOperation06() throws SQLException { + ResultSet resultSet = statement.executeQuery("select * from jsons1 where jtag->'tag1'!=3.232"); + int count = 0; + while (resultSet.next()) { + count++; + } + Assert.assertEquals(3, count); + close(resultSet); + } + + @Test(expected = SQLException.class) + @Description("exception will throw when denominator is zero") + public void case07_doubleOperation07() throws SQLException { + statement.executeQuery("select * from jsons1 where jtag->'tag1'/0=3"); + } + + @Test(expected = SQLException.class) + @Description("exception will throw when invalid operation") + public void case07_doubleOperation08() throws SQLException { + statement.executeQuery("select * from jsons1 where jtag->'tag1'/5=1"); + } + + @Test + @Description("where condition support '=' for boolean") + public void case08_boolOperation01() throws SQLException { + ResultSet resultSet = statement.executeQuery("select * from jsons1 where jtag->'tag1'=true"); + int count = 0; + while (resultSet.next()) { + count++; + } + Assert.assertEquals(0, count); + close(resultSet); + } + + @Test + @Description("where condition support '=' for boolean") + public void case08_boolOperation02() throws SQLException { + ResultSet resultSet = statement.executeQuery("select * from jsons1 where jtag->'tag1'=false"); + int count = 0; + while (resultSet.next()) { + count++; + } + Assert.assertEquals(1, count); + close(resultSet); + } + + @Test + @Description("where condition support '!=' for boolean") + public void case08_boolOperation03() throws SQLException { + ResultSet resultSet = statement.executeQuery("select * from jsons1 where jtag->'tag1'!=false"); + int count = 0; + while (resultSet.next()) { + count++; + } + Assert.assertEquals(0, count); + close(resultSet); + } + + @Test(expected = SQLException.class) + @Description("exception will throw when '>' operation for boolean") + public void case08_boolOperation04() throws SQLException { + statement.executeQuery("select * from jsons1 where jtag->'tag1'>false"); + } + + @Test + @Description("where conditional support '=null'") + public void case09_select01() throws SQLException { + ResultSet resultSet = statement.executeQuery("select * from jsons1 where jtag->'tag1'=null"); + int count = 0; + while (resultSet.next()) { + count++; + } + Assert.assertEquals(1, count); + close(resultSet); + } + + @Test + @Description("where conditional support 'is null'") + public void case09_select02() throws SQLException { + ResultSet resultSet = statement.executeQuery("select * from jsons1 where jtag is null"); + int count = 0; + while (resultSet.next()) { + count++; + } + Assert.assertEquals(1, count); + close(resultSet); + } + + @Test + @Description("where condition support 'is not null'") + public void case09_select03() throws SQLException { + ResultSet resultSet = statement.executeQuery("select * from jsons1 where jtag is not null"); + int count = 0; + while (resultSet.next()) { + count++; + } + Assert.assertEquals(8, count); + close(resultSet); + } + + @Test + @Description("where condition support one tag '='") + public void case09_select04() throws SQLException { + ResultSet resultSet = statement.executeQuery("select * from jsons1 where jtag->'tag_no_exist'=3"); + int count = 0; + while (resultSet.next()) { + count++; + } + Assert.assertEquals(0, count); + close(resultSet); + } + + @Test + @Description("where condition support one tag 'is null'") + public void case09_select05() throws SQLException { + ResultSet resultSet = statement.executeQuery("select * from jsons1 where jtag->'tag1' is null"); + int count = 0; + while (resultSet.next()) { + count++; + } + Assert.assertEquals(invalidJsonInsertSql.length, count); + close(resultSet); + } + + @Test + @Description("where condition support one tag 'is null'") + public void case09_select06() throws SQLException { + ResultSet resultSet = statement.executeQuery("select * from jsons1 where jtag->'tag4' is null"); + int count = 0; + while (resultSet.next()) { + count++; + } + Assert.assertEquals(sql.length + invalidJsonInsertSql.length, count); + close(resultSet); + } + + @Test + @Description("where condition support one tag 'is not null'") + public void case09_select07() throws SQLException { + ResultSet resultSet = statement.executeQuery("select * from jsons1 where jtag->'tag3' is not null"); + int count = 0; + while (resultSet.next()) { + count++; + } + Assert.assertEquals(4, count); + close(resultSet); + } + + @Test + @Description("contains") + public void case09_select10() throws SQLException { + ResultSet resultSet = statement.executeQuery("select * from jsons1 where jtag contains 'tag1'"); + int count = 0; + while (resultSet.next()) { + count++; + } + Assert.assertEquals(8, count); + close(resultSet); + } + + @Test + @Description("contains") + public void case09_select11() throws SQLException { + ResultSet resultSet = statement.executeQuery("select * from jsons1 where jtag contains 'tag3'"); + int count = 0; + while (resultSet.next()) { + count++; + } + Assert.assertEquals(4, count); + close(resultSet); + } + + @Test + @Description("contains with no exist tag") + public void case09_select12() throws SQLException { + ResultSet resultSet = statement.executeQuery("select * from jsons1 where jtag contains 'tag_no_exist'"); + int count = 0; + while (resultSet.next()) { + count++; + } + Assert.assertEquals(0, count); + close(resultSet); + } + + @Test + @Description("where condition with and") + public void case10_selectAndOr01() throws SQLException { + ResultSet resultSet = statement.executeQuery("select * from jsons1 where jtag->'tag1'=false and jtag->'tag2'='beijing'"); + int count = 0; + while (resultSet.next()) { + count++; + } + Assert.assertEquals(1, count); + close(resultSet); + } + + @Test + @Description("where condition with 'or'") + public void case10_selectAndOr02() throws SQLException { + ResultSet resultSet = statement.executeQuery("select * from jsons1 where jtag->'tag1'=false or jtag->'tag2'='beijing'"); + int count = 0; + while (resultSet.next()) { + count++; + } + Assert.assertEquals(2, count); + close(resultSet); + } + + @Test + @Description("where condition with 'and'") + public void case10_selectAndOr03() throws SQLException { + ResultSet resultSet = statement.executeQuery("select * from jsons1 where jtag->'tag1'=false and jtag->'tag2'='shanghai'"); + int count = 0; + while (resultSet.next()) { + count++; + } + Assert.assertEquals(0, count); + close(resultSet); + } + + @Test + @Description("where condition with 'or'") + public void case10_selectAndOr04() throws SQLException { + ResultSet resultSet = statement.executeQuery("select * from jsons1 where jtag->'tag1'=13 or jtag->'tag2'>35"); + int count = 0; + while (resultSet.next()) { + count++; + } + Assert.assertEquals(0, count); + close(resultSet); + } + + @Test + @Description("where condition with 'or' and contains") + public void case10_selectAndOr05() throws SQLException { + ResultSet resultSet = statement.executeQuery("select * from jsons1 where jtag->'tag1' is not null and jtag contains 'tag3'"); + int count = 0; + while (resultSet.next()) { + count++; + } + Assert.assertEquals(4, count); + close(resultSet); + } + + @Test + @Description("where condition with 'and' and contains") + public void case10_selectAndOr06() throws SQLException { + ResultSet resultSet = statement.executeQuery("select * from jsons1 where jtag->'tag1'='femail' and jtag contains 'tag3'"); + int count = 0; + while (resultSet.next()) { + count++; + } + Assert.assertEquals(2, count); + close(resultSet); + } + + @Test + @Description("test with tbname/normal column") + public void case11_selectTbName01() throws SQLException { + ResultSet resultSet = statement.executeQuery("select * from jsons1 where tbname = 'jsons1_1'"); + int count = 0; + while (resultSet.next()) { + count++; + } + Assert.assertEquals(2, count); + close(resultSet); + } + + @Test + @Description("test with tbname/normal column") + public void case11_selectTbName02() throws SQLException { + ResultSet resultSet = statement.executeQuery("select * from jsons1 where tbname = 'jsons1_1' and jtag contains 'tag3'"); + int count = 0; + while (resultSet.next()) { + count++; + } + Assert.assertEquals(2, count); + close(resultSet); + } + + @Test + @Description("test with tbname/normal column") + public void case11_selectTbName03() throws SQLException { + ResultSet resultSet = statement.executeQuery("select * from jsons1 where tbname = 'jsons1_1' and jtag contains 'tag3' and dataint=3"); + int count = 0; + while (resultSet.next()) { + count++; + } + Assert.assertEquals(0, count); + close(resultSet); + } + + @Test + @Description("test with tbname/normal column") + public void case11_selectTbName04() throws SQLException { + ResultSet resultSet = statement.executeQuery("select * from jsons1 where tbname = 'jsons1_1' and jtag contains 'tag3' and dataint=23"); + int count = 0; + while (resultSet.next()) { + count++; + } + Assert.assertEquals(1, count); + close(resultSet); + } + + @Test + @Description("where condition like") + public void case12_selectWhere01() throws SQLException { + ResultSet resultSet = statement.executeQuery("select *,tbname from jsons1 where jtag->'tag2' like 'bei%'"); + int count = 0; + while (resultSet.next()) { + count++; + } + Assert.assertEquals(2, count); + close(resultSet); + } + + @Test + @Description("where condition like") + public void case12_selectWhere02() throws SQLException { + ResultSet resultSet = statement.executeQuery("select *,tbname from jsons1 where jtag->'tag1' like 'fe%' and jtag->'tag2' is not null"); + int count = 0; + while (resultSet.next()) { + count++; + } + Assert.assertEquals(2, count); + close(resultSet); + } + + @Test(expected = SQLException.class) + @Description("where condition in no support in") + public void case12_selectWhere03() throws SQLException { + statement.executeQuery("select * from jsons1 where jtag->'tag1' in ('beijing')"); + } + + @Test + @Description("where condition match") + public void case12_selectWhere04() throws SQLException { + ResultSet resultSet = statement.executeQuery("select * from jsons1 where jtag->'tag1' match 'ma'"); + int count = 0; + while (resultSet.next()) { + count++; + } + Assert.assertEquals(2, count); + close(resultSet); + } + + @Test + @Description("where condition match") + public void case12_selectWhere05() throws SQLException { + ResultSet resultSet = statement.executeQuery("select * from jsons1 where jtag->'tag1' match 'ma$'"); + int count = 0; + while (resultSet.next()) { + count++; + } + Assert.assertEquals(0, count); + close(resultSet); + } + + @Test + @Description("where condition match") + public void case12_selectWhere06() throws SQLException { + ResultSet resultSet = statement.executeQuery("select * from jsons1 where jtag->'tag2' match 'jing$'"); + int count = 0; + while (resultSet.next()) { + count++; + } + Assert.assertEquals(2, count); + close(resultSet); + } + + @Test + @Description("where condition match") + public void case12_selectWhere07() throws SQLException { + ResultSet resultSet = statement.executeQuery("select * from jsons1 where jtag->'tag1' match '收到'"); + int count = 0; + while (resultSet.next()) { + count++; + } + Assert.assertEquals(1, count); + close(resultSet); + } + + @Test + @Description("insert distinct") + public void case13_selectDistinct01() throws SQLException { + statement.execute("insert into jsons1_14 using jsons1 tags('{\"tag1\":\"收到货\",\"tag2\":\"\",\"tag3\":null}') values(1591062628000, 2, NULL, '你就会', 'dws')"); + } + + @Test + @Description("distinct json tag") + public void case13_selectDistinct02() throws SQLException { + ResultSet resultSet = statement.executeQuery("select distinct jtag->'tag1' from jsons1"); + int count = 0; + while (resultSet.next()) { + count++; + } + Assert.assertEquals(8, count); + close(resultSet); + } + + @Test + @Description("distinct json tag") + public void case13_selectDistinct03() throws SQLException { + ResultSet resultSet = statement.executeQuery("select distinct jtag from jsons1"); + int count = 0; + while (resultSet.next()) { + count++; + } + Assert.assertEquals(9, count); + close(resultSet); + } + + @Test + @Description("insert json tag") + public void case14_selectDump01() throws SQLException { + statement.execute("INSERT INTO jsons1_15 using jsons1 tags('{\"tbname\":\"tt\",\"databool\":true,\"datastr\":\"是是是\"}') values(1591060828000, 4, false, 'jjsf', \"你就会\")"); + } + + @Test + @Description("test duplicate key with normal column") + public void case14_selectDump02() throws SQLException { + ResultSet resultSet = statement.executeQuery("select *,tbname,jtag from jsons1 where jtag->'datastr' match '是' and datastr match 'js'"); + int count = 0; + while (resultSet.next()) { + count++; + } + Assert.assertEquals(1, count); + close(resultSet); + } + + @Test + @Description("test duplicate key with normal column") + public void case14_selectDump03() throws SQLException { + ResultSet resultSet = statement.executeQuery("select tbname,jtag->'tbname' from jsons1 where jtag->'tbname'='tt' and tbname='jsons1_14'"); + int count = 0; + while (resultSet.next()) { + count++; + } + Assert.assertEquals(0, count); + close(resultSet); + } + + @Test + @Description("insert json tag for join test") + public void case15_selectJoin01() throws SQLException { + statement.execute("create table if not exists jsons2(ts timestamp, dataInt int, dataBool bool, dataStr nchar(50), dataStrBin binary(150)) tags(jtag json)"); + statement.execute("insert into jsons2_1 using jsons2 tags('{\"tag1\":\"fff\",\"tag2\":5, \"tag3\":true}') values(1591060618000, 2, false, 'json2', '你是2')"); + statement.execute("insert into jsons2_2 using jsons2 tags('{\"tag1\":5,\"tag2\":null}') values (1591060628000, 2, true, 'json2', 'sss')"); + + statement.execute("create table if not exists jsons3(ts timestamp, dataInt int, dataBool bool, dataStr nchar(50), dataStrBin binary(150)) tags(jtag json)"); + statement.execute("insert into jsons3_1 using jsons3 tags('{\"tag1\":\"fff\",\"tag2\":5, \"tag3\":true}') values(1591060618000, 3, false, 'json3', '你是3')"); + statement.execute("insert into jsons3_2 using jsons3 tags('{\"tag1\":5,\"tag2\":\"beijing\"}') values (1591060638000, 2, true, 'json3', 'sss')"); + } + + @Test + @Description("select json tag from join") + public void case15_selectJoin02() throws SQLException { + ResultSet resultSet = statement.executeQuery("select 'sss',33,a.jtag->'tag3' from jsons2 a,jsons3 b where a.ts=b.ts and a.jtag->'tag1'=b.jtag->'tag1'"); + resultSet.next(); + Assert.assertEquals("sss", resultSet.getString(1)); + close(resultSet); + } + + @Test + @Description("group by and order by json tag desc") + public void case16_selectGroupOrder01() throws SQLException { + ResultSet resultSet = statement.executeQuery("select count(*) from jsons1 group by jtag->'tag1' order by jtag->'tag1' desc"); + int count = 0; + while (resultSet.next()) { + count++; + } + Assert.assertEquals(8, count); + close(resultSet); + } + + @Test + @Description("group by and order by json tag asc") + public void case16_selectGroupOrder02() throws SQLException { + ResultSet resultSet = statement.executeQuery("select count(*) from jsons1 group by jtag->'tag1' order by jtag->'tag1' asc"); + int count = 0; + while (resultSet.next()) { + count++; + } + Assert.assertEquals(8, count); + close(resultSet); + } + + @Test + @Description("stddev with group by json tag") + public void case17_selectStddev01() throws SQLException { + ResultSet resultSet = statement.executeQuery("select stddev(dataint) from jsons1 group by jtag->'tag1'"); + String s = ""; + int count = 0; + while (resultSet.next()) { + count++; + s = resultSet.getString(2); + + } + Assert.assertEquals(8, count); + Assert.assertEquals("\"femail\"", s); + close(resultSet); + } + + @Test + @Description("subquery json tag") + public void case18_selectSubquery01() throws SQLException { + ResultSet resultSet = statement.executeQuery("select * from (select jtag, dataint from jsons1)"); + int count = 0; + while (resultSet.next()) { + count++; + } + Assert.assertEquals(11, count); + close(resultSet); + } + + @Test + @Description("subquery some json tags") + public void case18_selectSubquery02() throws SQLException { + ResultSet resultSet = statement.executeQuery("select jtag->'tag1' from (select jtag->'tag1', dataint from jsons1)"); + + ResultSetMetaData metaData = resultSet.getMetaData(); + String columnName = metaData.getColumnName(1); + Assert.assertEquals("jtag->'tag1'", columnName); + + int count = 0; + while (resultSet.next()) { + count++; + } + Assert.assertEquals(11, count); + close(resultSet); + } + + @Test + @Description("query some json tags from subquery") + public void case18_selectSubquery04() throws SQLException { + ResultSet resultSet = statement.executeQuery("select ts,tbname,jtag->'tag1' from (select jtag->'tag1',tbname,ts from jsons1 order by ts)"); + int count = 0; + while (resultSet.next()) { + count++; + } + Assert.assertEquals(11, count); + close(resultSet); + } + + @Test + @Description(value = "query metadata for json", version = "2.0.37") + public void case19_selectMetadata01() throws SQLException { + ResultSet resultSet = statement.executeQuery("select jtag from jsons1"); + ResultSetMetaData metaData = resultSet.getMetaData(); + int columnType = metaData.getColumnType(1); + String columnTypeName = metaData.getColumnTypeName(1); + Assert.assertEquals(Types.OTHER, columnType); + Assert.assertEquals("JSON", columnTypeName); + close(resultSet); + } + + @Test + @Description(value = "query metadata for json", version = "2.0.37") + public void case19_selectMetadata02() throws SQLException { + ResultSet resultSet = statement.executeQuery("select *,jtag from jsons1"); + ResultSetMetaData metaData = resultSet.getMetaData(); + int columnType = metaData.getColumnType(6); + String columnTypeName = metaData.getColumnTypeName(6); + Assert.assertEquals(Types.OTHER, columnType); + Assert.assertEquals("JSON", columnTypeName); + close(resultSet); + } + + @Test + @Description(value = "query metadata for one json result", version = "2.0.37") + public void case19_selectMetadata03() throws SQLException { + ResultSet resultSet = statement.executeQuery("select jtag->'tag1' from jsons1_6"); + ResultSetMetaData metaData = resultSet.getMetaData(); + int columnType = metaData.getColumnType(1); + String columnTypeName = metaData.getColumnTypeName(1); + Assert.assertEquals(Types.OTHER, columnType); + Assert.assertEquals("JSON", columnTypeName); + resultSet.next(); + String string = resultSet.getString(1); + Assert.assertEquals("11", string); + close(resultSet); + } + + private void close(ResultSet resultSet) { + try { + if (null != resultSet) { + resultSet.close(); + } + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @BeforeClass + public static void beforeClass() { +// String host = "192.168.1.98"; + String host = "127.0.0.1"; + final String url = "jdbc:TAOS-RS://" + host + ":6041/?user=root&password=taosdata"; + try { + Properties properties = new Properties(); + properties.setProperty(TSDBDriver.PROPERTY_KEY_BATCH_LOAD, "true"); + connection = DriverManager.getConnection(url, properties); + statement = connection.createStatement(); + statement.execute("drop database if exists " + dbName); + statement.execute("create database if not exists " + dbName); + statement.execute("use " + dbName); + statement.execute(superSql); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @AfterClass + public static void afterClass() { + try { + if (null != statement) { +// statement.execute("drop database " + dbName); + statement.close(); + } + if (null != connection) { + connection.close(); + } + } catch (SQLException e) { + e.printStackTrace(); + } + + } +} diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/ws/WSQueryTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/ws/WSQueryTest.java new file mode 100644 index 0000000000000000000000000000000000000000..70ea3c4d88446a31273ce9f334f4d8c0a8a72285 --- /dev/null +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/ws/WSQueryTest.java @@ -0,0 +1,62 @@ +package com.taosdata.jdbc.ws; + +import com.taosdata.jdbc.TSDBDriver; +import com.taosdata.jdbc.annotation.CatalogRunner; +import com.taosdata.jdbc.annotation.Description; +import com.taosdata.jdbc.annotation.TestTarget; +import org.junit.*; +import org.junit.runner.RunWith; + +import java.sql.*; +import java.util.Properties; +import java.util.concurrent.TimeUnit; +import java.util.stream.IntStream; + +@Ignore +@RunWith(CatalogRunner.class) +@TestTarget(alias = "query test", author = "huolibo", version = "2.0.38") +@FixMethodOrder +public class WSQueryTest { + private static final String host = "192.168.1.98"; + private static final int port = 6041; + private static final String databaseName = "ws_query"; + private static final String tableName = "wq"; + private Connection connection; + private long now; + + @Description("query") + @Test + public void queryBlock() throws SQLException, InterruptedException { + IntStream.range(1, 100).limit(1000).parallel().forEach(x -> { + try { + Statement statement = connection.createStatement(); + + statement.execute("insert into " + databaseName + "." + tableName + " values(now+100s, 100)"); + + ResultSet resultSet = statement.executeQuery("select * from " + databaseName + "." + tableName); + resultSet.next(); + Assert.assertEquals(100, resultSet.getInt(2)); + statement.close(); + TimeUnit.SECONDS.sleep(10); + } catch (SQLException e) { + e.printStackTrace(); + } catch (InterruptedException e) { + e.printStackTrace(); + } + }); + } + + @Before + public void before() throws SQLException { + String url = "jdbc:TAOS-RS://" + host + ":" + port + "/test?user=root&password=taosdata"; + Properties properties = new Properties(); + properties.setProperty(TSDBDriver.PROPERTY_KEY_BATCH_LOAD, "true"); + connection = DriverManager.getConnection(url, properties); + Statement statement = connection.createStatement(); + statement.execute("drop database if exists " + databaseName); + statement.execute("create database " + databaseName); + statement.execute("use " + databaseName); + statement.execute("create table if not exists " + databaseName + "." + tableName + "(ts timestamp, f int)"); + statement.close(); + } +} diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/ws/WSSelectTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/ws/WSSelectTest.java new file mode 100644 index 0000000000000000000000000000000000000000..fa48480592199e2efc1f4455786a590417f8fafc --- /dev/null +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/ws/WSSelectTest.java @@ -0,0 +1,83 @@ +package com.taosdata.jdbc.ws; + +import com.taosdata.jdbc.TSDBDriver; +import com.taosdata.jdbc.enums.TimestampFormat; +import org.junit.BeforeClass; +import org.junit.Ignore; +import org.junit.Test; + +import java.sql.*; +import java.util.ArrayList; +import java.util.List; +import java.util.Properties; +import java.util.concurrent.TimeUnit; + +@Ignore +public class WSSelectTest { + // private static final String host = "192.168.1.98"; + private static final String host = "127.0.0.1"; + private static final int port = 6041; + private static Connection connection; + private static final String databaseName = "driver"; + + private static void testInsert() throws SQLException { + Statement statement = connection.createStatement(); + long cur = System.currentTimeMillis(); + List timeList = new ArrayList<>(); + for (long i = 0L; i < 3000; i++) { + long t = cur + i; + timeList.add("insert into " + databaseName + ".alltype_query values(" + t + ",1,1,1,1,1,1,1,1,1,1,1,'test_binary','test_nchar')"); + } + for (int i = 0; i < 3000; i++) { + statement.execute(timeList.get(i)); + } + statement.close(); + } + + @Test + public void testWSSelect() throws SQLException { + Statement statement = connection.createStatement(); + int count = 0; + long start = System.nanoTime(); + for (int i = 0; i < 1000; i++) { + ResultSet resultSet = statement.executeQuery("select ts,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13 from " + databaseName + ".alltype_query limit 3000"); + while (resultSet.next()) { + count++; + resultSet.getTimestamp(1); + resultSet.getBoolean(2); + resultSet.getInt(3); + resultSet.getInt(4); + resultSet.getInt(5); + resultSet.getLong(6); + resultSet.getInt(7); + resultSet.getInt(8); + resultSet.getLong(9); + resultSet.getLong(10); + resultSet.getFloat(11); + resultSet.getDouble(12); + resultSet.getString(13); + resultSet.getString(14); + } + } + long d = System.nanoTime() - start; + System.out.println(d / 1000); + System.out.println(count); + statement.close(); + } + + @BeforeClass + public static void beforeClass() throws SQLException { + String url = "jdbc:TAOS-RS://" + host + ":" + port + "/?user=root&password=taosdata"; + Properties properties = new Properties(); + properties.setProperty(TSDBDriver.PROPERTY_KEY_TIMESTAMP_FORMAT, String.valueOf(TimestampFormat.UTC)); + properties.setProperty(TSDBDriver.PROPERTY_KEY_MESSAGE_WAIT_TIMEOUT, "100000"); + properties.setProperty(TSDBDriver.PROPERTY_KEY_BATCH_LOAD, "true"); + connection = DriverManager.getConnection(url, properties); + Statement statement = connection.createStatement(); + statement.execute("drop database if exists " + databaseName); + statement.execute("create database " + databaseName); + statement.execute("create table " + databaseName + ".alltype_query(ts timestamp, c1 bool,c2 tinyint, c3 smallint, c4 int, c5 bigint, c6 tinyint unsigned, c7 smallint unsigned, c8 int unsigned, c9 bigint unsigned, c10 float, c11 double, c12 binary(20), c13 nchar(30) )"); + statement.close(); + testInsert(); + } +} diff --git a/src/connector/node-red-contrib-tdengine/package-lock.json b/src/connector/node-red-contrib-tdengine/package-lock.json index 37d4784caaa8e225290991cf8f598df2d7d122e8..3d2f9977419c62649b3eb332014f4baaa7b0810f 100644 --- a/src/connector/node-red-contrib-tdengine/package-lock.json +++ b/src/connector/node-red-contrib-tdengine/package-lock.json @@ -1192,9 +1192,9 @@ } }, "follow-redirects": { - "version": "1.14.7", - "resolved": "https://registry.npmjs.org/follow-redirects/-/follow-redirects-1.14.7.tgz", - "integrity": "sha512-+hbxoLbFMbRKDwohX8GkTataGqO6Jb7jGwpAlwgy2bIz25XtRm7KEzJM76R1WiNT5SwZkX4Y75SwBolkpmE7iQ==" + "version": "1.14.8", + "resolved": "https://registry.npmjs.org/follow-redirects/-/follow-redirects-1.14.8.tgz", + "integrity": "sha512-1x0S9UVJHsQprFcEC/qnNzBLcIxsjAV905f/UkQxbclCsoTWlacCNOpQa/anodLl2uaEKFhfWOvM2Qg77+15zA==" }, "form-data": { "version": "4.0.0", diff --git a/src/connector/nodejs/examples/stmtBindParamBatchSample.js b/src/connector/nodejs/examples/stmtBindParamBatchSample.js new file mode 100755 index 0000000000000000000000000000000000000000..030958bfd16faf88f79c6d4476defb76c0a3e990 --- /dev/null +++ b/src/connector/nodejs/examples/stmtBindParamBatchSample.js @@ -0,0 +1,111 @@ +const taos = require('../tdengine'); +var conn = taos.connect({ host: "localhost" }); +var cursor = conn.cursor(); + +function executeUpdate(updateSql) { + console.log(updateSql); + cursor.execute(updateSql); +} +function executeQuery(querySql) { + + let query = cursor.query(querySql); + query.execute().then((result => { + console.log(querySql); + result.pretty(); + })); +} + +function stmtBindParamBatchSample() { + let db = 'node_test_db'; + let table = 'stmt_taos_bind_param_batch'; + + let createDB = `create database if not exists ${db} keep 3650;`; + let dropDB = `drop database if exists ${db};`; + let useDB = `use ${db}`; + let createTable = `create table if not exists ${table} ` + + `(ts timestamp,` + + `bl bool,` + + `i8 tinyint,` + + `i16 smallint,` + + `i32 int,` + + `i64 bigint,` + + `f32 float,` + + `d64 double,` + + `bnr binary(20),` + + `blob nchar(20),` + + `u8 tinyint unsigned,` + + `u16 smallint unsigned,` + + `u32 int unsigned,` + + `u64 bigint unsigned` + + `)tags(` + + `t_bl bool,` + + `t_i8 tinyint,` + + `t_i16 smallint,` + + `t_i32 int,` + + `t_i64 bigint,` + + `t_f32 float,` + + `t_d64 double,` + + `t_bnr binary(20),` + + `t_blob nchar(20),` + + `t_u8 tinyint unsigned,` + + `t_u16 smallint unsigned,` + + `t_u32 int unsigned,` + + `t_u64 bigint unsigned` + + `);`; + let querySql = `select * from ${table};`; + let insertSql = `insert into ? using ${table} tags(?,?,?,?,?,?,?,?,?,?,?,?,?) values (?,?,?,?,?,?,?,?,?,?,?,?,?,?);`; + + executeUpdate(dropDB); + executeUpdate(createDB); + executeUpdate(useDB); + executeUpdate(createTable); + + let mBinds = new taos.TaosMultiBindArr(14); + mBinds.multiBindTimestamp([1642435200000, 1642435300000, 1642435400000, 1642435500000, 1642435600000]); + mBinds.multiBindBool([true, false, true, undefined, null]); + mBinds.multiBindTinyInt([-127, 3, 127, null, undefined]); + mBinds.multiBindSmallInt([-256, 0, 256, null, undefined]); + mBinds.multiBindInt([-1299, 0, 1233, null, undefined]); + mBinds.multiBindBigInt([16424352000002222n, -16424354000001111n, 0, null, undefined]); + mBinds.multiBindFloat([12.33, 0, -3.1415, null, undefined]); + mBinds.multiBindDouble([3.141592653, 0, -3.141592653, null, undefined]); + mBinds.multiBindBinary(['TDengine_Binary', '', 'taosdata涛思数据', null, undefined]); + mBinds.multiBindNchar(['taos_data_nchar', 'taosdata涛思数据', '', null, undefined]); + mBinds.multiBindUTinyInt([0, 127, 254, null, undefined]); + mBinds.multiBindUSmallInt([0, 256, 512, null, undefined]); + mBinds.multiBindUInt([0, 1233, 4294967294, null, undefined]); + mBinds.multiBindUBigInt([16424352000002222n, 36424354000001111n, 0, null, undefined]); + + let tags = new taos.TaosBind(13); + + tags.bindBool(true); + tags.bindTinyInt(127); + tags.bindSmallInt(32767); + tags.bindInt(1234555); + tags.bindBigInt(-164243520000011111n); + tags.bindFloat(214.02); + tags.bindDouble(2.01); + tags.bindBinary('taosdata涛思数据'); + tags.bindNchar('TDengine数据'); + tags.bindUTinyInt(254); + tags.bindUSmallInt(65534); + tags.bindUInt(4294967290 / 2); + tags.bindUBigInt(164243520000011111n); + + cursor.stmtInit(); + cursor.stmtPrepare(insertSql); + cursor.stmtSetTbnameTags('s_01', tags.getBind()); + cursor.stmtBindParamBatch(mBinds.getMultiBindArr()); + cursor.stmtAddBatch(); + cursor.stmtExecute(); + cursor.stmtClose(); + + executeQuery(querySql); + executeUpdate(dropDB); + +} + +stmtBindParamBatchSample(); +setTimeout(() => { + conn.close(); +}, 2000); diff --git a/src/connector/nodejs/examples/stmtBindParamSample.js b/src/connector/nodejs/examples/stmtBindParamSample.js new file mode 100644 index 0000000000000000000000000000000000000000..ee1354aff0a1052a67d961de39c147c6cbe616dd --- /dev/null +++ b/src/connector/nodejs/examples/stmtBindParamSample.js @@ -0,0 +1,82 @@ +// const TaosBind = require('../nodetaos/taosBind'); +const taos = require('../tdengine'); +var conn = taos.connect({ host: "localhost" }); +var cursor = conn.cursor(); + +function executeUpdate(updateSql) { + console.log(updateSql); + cursor.execute(updateSql); +} +function executeQuery(querySql) { + + let query = cursor.query(querySql); + query.execute().then((result => { + console.log(querySql); + result.pretty(); + })); +} + +function stmtBindParamSample() { + let db = 'node_test_db'; + let table = 'stmt_taos_bind_sample'; + + let createDB = `create database if not exists ${db} keep 3650;`; + let dropDB = `drop database if exists ${db};`; + let useDB = `use ${db}`; + let createTable = `create table if not exists ${table} ` + + `(ts timestamp,` + + `nil int,` + + `bl bool,` + + `i8 tinyint,` + + `i16 smallint,` + + `i32 int,` + + `i64 bigint,` + + `f32 float,` + + `d64 double,` + + `bnr binary(20),` + + `blob nchar(20),` + + `u8 tinyint unsigned,` + + `u16 smallint unsigned,` + + `u32 int unsigned,` + + `u64 bigint unsigned);`; + let querySql = `select * from ${table};`; + let insertSql = `insert into ? values(?,?,?,?,?,?,?,?,?,?,?,?,?,?,?);` + + executeUpdate(dropDB); + executeUpdate(createDB); + executeUpdate(useDB); + executeUpdate(createTable); + + let binds = new taos.TaosBind(15); + binds.bindTimestamp(1642435200000); + binds.bindNil(); + binds.bindBool(true); + binds.bindTinyInt(127); + binds.bindSmallInt(32767); + binds.bindInt(1234555); + binds.bindBigInt(-164243520000011111n); + binds.bindFloat(214.02); + binds.bindDouble(2.01); + binds.bindBinary('taosdata涛思数据'); + binds.bindNchar('TDengine数据'); + binds.bindUTinyInt(254); + binds.bindUSmallInt(65534); + binds.bindUInt(4294967294); + binds.bindUBigInt(164243520000011111n); + + cursor.stmtInit(); + cursor.stmtPrepare(insertSql); + cursor.stmtSetTbname(table); + cursor.stmtBindParam(binds.getBind()); + cursor.stmtAddBatch(); + cursor.stmtExecute(); + cursor.stmtClose(); + + executeQuery(querySql); + executeUpdate(dropDB); +} + +stmtBindParamSample(); +setTimeout(() => { + conn.close(); +}, 2000); \ No newline at end of file diff --git a/src/connector/nodejs/examples/stmtBindSingleParamBatchSample.js b/src/connector/nodejs/examples/stmtBindSingleParamBatchSample.js new file mode 100755 index 0000000000000000000000000000000000000000..3b424b8d0cdc0d18997c2224fdac499e42c0c57d --- /dev/null +++ b/src/connector/nodejs/examples/stmtBindSingleParamBatchSample.js @@ -0,0 +1,101 @@ +const taos = require('../tdengine'); +var conn = taos.connect({ host: "localhost" }); +var cursor = conn.cursor(); + +function executeUpdate(updateSql) { + console.log(updateSql); + cursor.execute(updateSql); +} +function executeQuery(querySql) { + + let query = cursor.query(querySql); + query.execute().then((result => { + console.log(querySql); + result.pretty(); + + })); +} + +function stmtSingleParaBatchSample() { + let db = 'node_test_db'; + let table = 'stmt_taos_bind_single_bind_batch'; + + let createDB = `create database if not exists ${db} keep 3650;`; + let dropDB = `drop database if exists ${db};`; + let useDB = `use ${db}`; + let createTable = `create table if not exists ${table} ` + + `(ts timestamp,` + + `bl bool,` + + `i8 tinyint,` + + `i16 smallint,` + + `i32 int,` + + `i64 bigint,` + + `f32 float,` + + `d64 double,` + + `bnr binary(20),` + + `blob nchar(20),` + + `u8 tinyint unsigned,` + + `u16 smallint unsigned,` + + `u32 int unsigned,` + + `u64 bigint unsigned` + + `)tags(` + + `jsonTag json` + + `);`; + let querySql = `select * from ${table};`; + let insertSql = `insert into ? using ${table} tags(?) values(?,?,?,?,?,?,?,?,?,?,?,?,?,?);` + + executeUpdate(dropDB); + executeUpdate(createDB); + executeUpdate(useDB); + executeUpdate(createTable); + + // normal colum values. + let mbind = new taos.TaosMultiBind(); + let tsMBind = mbind.multiBindTimestamp([1642435200000, 1642435300000, 1642435400000, 1642435500000, 1642435600000]) + let boolMbind = mbind.multiBindBool([true, false, true, undefined, null]); + let tinyIntMbind = mbind.multiBindTinyInt([-127, 3, 127, null, undefined]); + let smallIntMbind = mbind.multiBindSmallInt([-256, 0, 256, null, undefined]); + let intMbind = mbind.multiBindInt([-1299, 0, 1233, null, undefined]); + let bigIntMbind = mbind.multiBindBigInt([16424352000002222n, -16424354000001111n, 0, null, undefined]); + let floatMbind = mbind.multiBindFloat([12.33, 0, -3.1415, null, undefined]); + let doubleMbind = mbind.multiBindDouble([3.141592653, 0, -3.141592653, null, undefined]); + let binaryMbind = mbind.multiBindBinary(['TDengine_Binary', '', 'taosdata涛思数据', null, undefined]); + let ncharMbind = mbind.multiBindNchar(['taos_data_nchar', 'taosdata涛思数据', '', null, undefined]); + let uTinyIntMbind = mbind.multiBindUTinyInt([0, 127, 254, null, undefined]); + let uSmallIntMbind = mbind.multiBindUSmallInt([0, 256, 512, null, undefined]); + let uIntMbind = mbind.multiBindUInt([0, 1233, 4294967294, null, undefined]); + let uBigIntMbind = mbind.multiBindUBigInt([16424352000002222n, 36424354000001111n, 0, null, undefined]); + + // tags value. + let tags = new taos.TaosBind(1); + tags.bindJson('{\"key1\":\"taosdata\",\"key2\":null,\"key3\":\"TDengine涛思数据\",\"key4\":3.2}'); + + cursor.stmtInit(); + cursor.stmtPrepare(insertSql); + cursor.stmtSetTbnameTags('s_01', tags.getBind()); + cursor.stmtBindSingleParamBatch(tsMBind, 0); + cursor.stmtBindSingleParamBatch(boolMbind, 1); + cursor.stmtBindSingleParamBatch(tinyIntMbind, 2); + cursor.stmtBindSingleParamBatch(smallIntMbind, 3); + cursor.stmtBindSingleParamBatch(intMbind, 4); + cursor.stmtBindSingleParamBatch(bigIntMbind, 5); + cursor.stmtBindSingleParamBatch(floatMbind, 6); + cursor.stmtBindSingleParamBatch(doubleMbind, 7); + cursor.stmtBindSingleParamBatch(binaryMbind, 8); + cursor.stmtBindSingleParamBatch(ncharMbind, 9); + cursor.stmtBindSingleParamBatch(uTinyIntMbind, 10); + cursor.stmtBindSingleParamBatch(uSmallIntMbind, 11); + cursor.stmtBindSingleParamBatch(uIntMbind, 12); + cursor.stmtBindSingleParamBatch(uBigIntMbind, 13); + + cursor.stmtAddBatch(); + cursor.stmtExecute(); + cursor.stmtClose(); + + executeQuery(querySql); + executeUpdate(dropDB); +} +stmtSingleParaBatchSample(); +setTimeout(() => { + conn.close(); +}, 2000); diff --git a/src/connector/nodejs/examples/stmtUseResultSample.js b/src/connector/nodejs/examples/stmtUseResultSample.js new file mode 100755 index 0000000000000000000000000000000000000000..b9f55545b0892d575c952308febfa9055a4f570a --- /dev/null +++ b/src/connector/nodejs/examples/stmtUseResultSample.js @@ -0,0 +1,100 @@ +const taos = require('../tdengine'); +var conn = taos.connect({ host: "localhost" }); +var cursor = conn.cursor(); + +function executeUpdate(updateSql) { + console.log(updateSql); + cursor.execute(updateSql); +} +function executeQuery(querySql) { + let query = cursor.query(querySql); + query.execute().then((result => { + console.log(querySql); + result.pretty(); + })); +} + +function stmtUseResultSample() { + let db = 'node_test_db'; + let table = 'stmt_use_result'; + let subTable = 's1_0'; + + let createDB = `create database if not exists ${db} keep 3650;`; + let dropDB = `drop database if exists ${db};`; + let useDB = `use ${db}`; + let createTable = `create table if not exists ${table} ` + + `(ts timestamp,` + + `bl bool,` + + `i8 tinyint,` + + `i16 smallint,` + + `i32 int,` + + `i64 bigint,` + + `f32 float,` + + `d64 double,` + + `bnr binary(20),` + + `blob nchar(20),` + + `u8 tinyint unsigned,` + + `u16 smallint unsigned,` + + `u32 int unsigned,` + + `u64 bigint unsigned` + + `)tags(` + + `jsonTag json` + + `);`; + let createSubTable = `create table if not exists ${subTable} using ${table} tags('{\"key1\":\"taosdata\",\"key2\":null,\"key3\":\"TDengine涛思数据\",\"key4\":3.2}')`; + let querySql = `select * from ${table} where i32>? and bnr = ? `; + let insertSql = `insert into ? values(?,?,?,?,?,?,?,?,?,?,?,?,?,?);`; + + let mBinds = new taos.TaosMultiBindArr(14); + mBinds.multiBindTimestamp([1642435200000,1642435300000,1642435400000,1642435500000,1642435600000]); + mBinds.multiBindBool([true,false,true,undefined,null]); + mBinds.multiBindTinyInt([-127,3,127,null,undefined]); + mBinds.multiBindSmallInt([-256,0,256,null,undefined]); + mBinds.multiBindInt([-1299,0,1233,null,undefined]); + mBinds.multiBindBigInt([16424352000002222n,-16424354000001111n,0,null,undefined]); + mBinds.multiBindFloat([12.33,0,-3.1415,null,undefined]); + mBinds.multiBindDouble([3.141592653,0,-3.141592653,null,undefined]); + mBinds.multiBindBinary(['TDengine_Binary','','taosdata涛思数据',null,undefined]); + mBinds.multiBindNchar(['taos_data_nchar','taosdata涛思数据','',null,undefined]); + mBinds.multiBindUTinyInt([0,127, 254,null,undefined]); + mBinds.multiBindUSmallInt([0,256,512,null,undefined]); + mBinds.multiBindUInt([0,1233,4294967294,null,undefined]); + mBinds.multiBindUBigInt([16424352000002222n,36424354000001111n,0,null,undefined]); + + // executeUpdate(dropDB); + executeUpdate(createDB); + executeUpdate(useDB); + executeUpdate(createTable); + executeUpdate(createSubTable); + + //stmt bind values + cursor.stmtInit(); + cursor.stmtPrepare(insertSql); + cursor.loadTableInfo([subTable]); + cursor.stmtSetTbname(subTable); + cursor.stmtBindParamBatch(mBinds.getMultiBindArr()); + cursor.stmtAddBatch(); + cursor.stmtExecute(); + cursor.stmtClose(); + + // stmt select with normal column. + let condition1 = new taos.TaosBind(2); + condition1.bindInt(0); + condition1.bindNchar('taosdata涛思数据'); + cursor.stmtInit(); + cursor.stmtPrepare(querySql); + cursor.stmtBindParam(condition1.getBind()); + cursor.stmtExecute(); + cursor.stmtUseResult(); + cursor.stmtClose(); + + cursor.fetchall(); + console.log(cursor.fields); + console.log(cursor.data); + + executeUpdate(dropDB); +} + +stmtUseResultSample(); +setTimeout(() => { + conn.close(); +}, 2000); \ No newline at end of file diff --git a/src/connector/nodejs/nodetaos/cinterface.js b/src/connector/nodejs/nodetaos/cinterface.js index 380d96505de0d6af4cf0cbf5573ffe204453850d..0a81a0c79b21b2c2869e8e747df76e673c65b2eb 100644 --- a/src/connector/nodejs/nodetaos/cinterface.js +++ b/src/connector/nodejs/nodetaos/cinterface.js @@ -373,7 +373,7 @@ function CTaosInterface(config = null, pass = false) { , 'taos_stmt_execute': [ref.types.int, [ref.types.void_ptr]] // TAOS_RES* taos_stmt_use_result(TAOS_STMT *stmt) - , 'taos_stmt_use_result': [ref.types.int, [ref.types.void_ptr]] + , 'taos_stmt_use_result': [ref.types.void_ptr, [ref.types.void_ptr]] // int taos_stmt_close(TAOS_STMT *stmt) , 'taos_stmt_close': [ref.types.int, [ref.types.void_ptr]] @@ -934,7 +934,15 @@ CTaosInterface.prototype.stmtUseResult = function stmtUseResult(stmt) { * @returns 0 for success, non-zero for failure. */ CTaosInterface.prototype.loadTableInfo = function loadTableInfo(taos, tableList) { - return this.libtaos.taos_load_table_info(taos, tableList) + let _tableListBuf = Buffer.alloc(ref.sizeof.pointer); + let _listStr = tableList.toString(); + + if ((_.isString(tableList) )|| (_.isArray(tableList))) { + ref.set(_tableListBuf, 0, ref.allocCString(_listStr), ref.types.char_ptr); + return this.libtaos.taos_load_table_info(taos, _tableListBuf); + } else { + throw new errors.InterfaceError("Unspport tableLis input"); + } } /** diff --git a/src/connector/nodejs/nodetaos/cursor.js b/src/connector/nodejs/nodetaos/cursor.js index 4d2251b9799d1afb89a7ce90297d3918747144e9..5969d4f344affa49ebbf81329729bff4733e116b 100644 --- a/src/connector/nodejs/nodetaos/cursor.js +++ b/src/connector/nodejs/nodetaos/cursor.js @@ -30,6 +30,7 @@ function TDengineCursor(connection = null) { this._fields = null; this.data = []; this.fields = null; + this._stmt = null; if (connection != null) { this._connection = connection this._chandle = connection._chandle //pass through, just need library loaded. @@ -488,7 +489,6 @@ TDengineCursor.prototype.schemalessInsert = function schemalessInsert(lines, pro let errorNo = this._chandle.errno(this._result); if (errorNo != 0) { throw new errors.InterfaceError(errorNo + ":" + this._chandle.errStr(this._result)); - this._chandle.freeResult(this._result); } this._chandle.freeResult(this._result); } @@ -499,7 +499,7 @@ TDengineCursor.prototype.schemalessInsert = function schemalessInsert(lines, pro * @returns Not NULL returned for success, and NULL for failure. * */ - TDengineCursor.prototype.stmtInit = function stmtInit() { +TDengineCursor.prototype.stmtInit = function stmtInit() { let stmt = null stmt = this._chandle.stmtInit(this._connection._conn); if (stmt == null || stmt == undefined) { @@ -532,7 +532,7 @@ TDengineCursor.prototype.stmtPrepare = function stmtPrepare(sql) { * @param {TaosBind} tableName target table name you want to bind * @returns 0 for success, non-zero for failure. */ -TDengineCursor.prototype.stmtSetTbname = function stmtSetTbname(tableName){ +TDengineCursor.prototype.stmtSetTbname = function stmtSetTbname(tableName) { if (this._stmt == null) { throw new errors.DatabaseError("stmt is null,init stmt first"); } else { @@ -552,11 +552,11 @@ TDengineCursor.prototype.stmtSetTbname = function stmtSetTbname(tableName){ * @param {TaosMultiBind} tags use to set tag value for target table. * @returns */ -TDengineCursor.prototype.stmtSetTbnameTags = function stmtSetTbnameTags(tableName,tags){ +TDengineCursor.prototype.stmtSetTbnameTags = function stmtSetTbnameTags(tableName, tags) { if (this._stmt == null) { throw new errors.DatabaseError("stmt is null,init stmt first"); } else { - let stmtPrepare = this._chandle.stmtSetTbnameTags(this._stmt, tableName,tags); + let stmtPrepare = this._chandle.stmtSetTbnameTags(this._stmt, tableName, tags); if (stmtPrepare != 0) { throw new errors.DatabaseError(this._chandle.stmtErrStr(this._stmt)); } else { @@ -573,7 +573,7 @@ TDengineCursor.prototype.stmtSetTbnameTags = function stmtSetTbnameTags(tableNam * @param {*} subTableName table name which is belong to an stable * @returns 0 for success, non-zero for failure. */ -TDengineCursor.prototype.stmtSetSubTbname = function stmtSetSubTbname(subTableName){ +TDengineCursor.prototype.stmtSetSubTbname = function stmtSetSubTbname(subTableName) { if (this._stmt == null) { throw new errors.DatabaseError("stmt is null,init stmt first"); } else { @@ -594,7 +594,7 @@ TDengineCursor.prototype.stmtSetSubTbname = function stmtSetSubTbname(subTableNa * @param {*} binds points to an array contains the whole line data. * @returns 0 for success, non-zero for failure. */ -TDengineCursor.prototype.bindParam = function bindParam(binds) { +TDengineCursor.prototype.stmtBindParam = function stmtBindParam(binds) { if (this._stmt == null) { throw new errors.DatabaseError("stmt is null,init stmt first"); } else { @@ -613,11 +613,11 @@ TDengineCursor.prototype.bindParam = function bindParam(binds) { * @param {*} colIndex the column's index in prepared sql statement, it starts from 0. * @returns 0 for success, non-zero for failure. */ -TDengineCursor.prototype.stmtBindSingleParamBatch = function stmtBindSingleParamBatch(mbind,colIndex){ +TDengineCursor.prototype.stmtBindSingleParamBatch = function stmtBindSingleParamBatch(mbind, colIndex) { if (this._stmt == null) { throw new errors.DatabaseError("stmt is null,init stmt first"); } else { - let stmtPrepare = this._chandle.stmtBindSingleParamBatch(this._stmt, mbind,colIndex); + let stmtPrepare = this._chandle.stmtBindSingleParamBatch(this._stmt, mbind, colIndex); if (stmtPrepare != 0) { throw new errors.DatabaseError(this._chandle.stmtErrStr(this._stmt)); } else { @@ -634,7 +634,7 @@ TDengineCursor.prototype.stmtBindSingleParamBatch = function stmtBindSingleParam * n sql statement. * @returns 0 for success, non-zero for failure. */ -TDengineCursor.prototype.stmtBindParamBatch = function stmtBindParamBatch(mbinds){ +TDengineCursor.prototype.stmtBindParamBatch = function stmtBindParamBatch(mbinds) { if (this._stmt == null) { throw new errors.DatabaseError("stmt is null,init stmt first"); } else { @@ -656,7 +656,7 @@ TDengineCursor.prototype.stmtBindParamBatch = function stmtBindParamBatch(mbinds * @param {*} stmt * @returns 0 for success, non-zero for failure. */ -TDengineCursor.prototype.addBatch = function addBatch() { +TDengineCursor.prototype.stmtAddBatch = function stmtAddBatch() { if (this._stmt == null) { throw new errors.DatabaseError("stmt is null,init stmt first"); } else { @@ -694,13 +694,19 @@ TDengineCursor.prototype.stmtExecute = function stmtExecute() { * User application should free it with API 'FreeResult' at the end. * @returns Not NULL for success, NULL for failure. */ -TDengineCursor.prototype.stmtUseResult = function stmtUseResult(){ +TDengineCursor.prototype.stmtUseResult = function stmtUseResult() { if (this._stmt != null) { - let stmtExecRes = this._chandle.stmtUseResult(this._stmt); - if (stmtExecRes != 0) { - throw new errors.DatabaseError(this._chandle.stmtErrStr(this._stmt)); + this._result = this._chandle.stmtUseResult(this._stmt); + let res = this._chandle.errno(this._result); + if (res != 0) { + throw new errors.DatabaseError(this._chandle.errStr(this._stmt)); } else { - console.log("stmtUseResult success.") + console.log("stmtUseResult success."); + let fieldCount = this._chandle.fieldsCount(this._result); + if (fieldCount != 0) { + this._fields = this._chandle.useResult(this._result); + this.fields = this._fields; + } } } else { throw new errors.DatabaseError("stmt is null,init stmt first"); @@ -713,11 +719,11 @@ TDengineCursor.prototype.stmtUseResult = function stmtUseResult(){ * @param {*} tableList tables need to load meta info are form in an array * @returns 0 for success, non-zero for failure. */ -TDengineCursor.prototype.loadTableInfo = function loadTableInfo(tableList){ +TDengineCursor.prototype.loadTableInfo = function loadTableInfo(tableList) { if (this._connection._conn != null) { - let stmtExecRes = this._chandle.loadTableInfo(this._connection._conn,tableList); + let stmtExecRes = this._chandle.loadTableInfo(this._connection._conn, tableList); if (stmtExecRes != 0) { - throw new errors.DatabaseError(this._chandle.stmtErrStr(this._stmt)); + throw new errors.DatabaseError(`loadTableInfo() failed,code ${stmtExecRes}`); } else { console.log("loadTableInfo success.") } diff --git a/src/connector/nodejs/nodetaos/taosBind.js b/src/connector/nodejs/nodetaos/taosBind.js index 161e56ee709020c439184bc7a0f7c76ab6b3c139..b70d035b9fcf2eff5d0d1edb7114b5549eda83d4 100644 --- a/src/connector/nodejs/nodetaos/taosBind.js +++ b/src/connector/nodejs/nodetaos/taosBind.js @@ -14,20 +14,19 @@ var u = ref.types.int64; var allocated = ref.types.uint32; var TAOS_BIND = StructType({ - buffer_type : bufferType, - buffer : buffer, - buffer_length : bufferLength, - length : length, - is_null : isNull, - is_unsigned : is_unsigned, - error : error, - u : u, + buffer_type: bufferType, + buffer: buffer, + buffer_length: bufferLength, + length: length, + is_null: isNull, + is_unsigned: is_unsigned, + error: error, + u: u, allocated: allocated, }); class TaosBind { constructor(num) { - console.log(TAOS_BIND.size); this.buf = Buffer.alloc(TAOS_BIND.size * num); this.num = num; this.index = 0; @@ -36,15 +35,15 @@ class TaosBind { * Used to bind null value for all data types that tdengine supports. */ bindNil() { - if(!this._isOutOfBound()){ + if (!this._isOutOfBound()) { let nil = new TAOS_BIND({ - buffer_type : taosConst.C_NULL, - is_null : ref.alloc(ref.types.int32, 1), + buffer_type: taosConst.C_NULL, + is_null: ref.alloc(ref.types.int32, 1), }); - + TAOS_BIND.set(this.buf, this.index * TAOS_BIND.size, nil); this.index++ - }else{ + } else { throw new TDError(`bindNil() failed,since index:${this.index} is out of Buffer bound ${this.num}.`); } } @@ -54,18 +53,18 @@ class TaosBind { * @param {bool} val is not null bool value,true or false. */ bindBool(val) { - if(!this._isOutOfBound()){ + if (!this._isOutOfBound()) { let bl = new TAOS_BIND({ - buffer_type : taosConst.C_BOOL, - buffer : ref.alloc(ref.types.bool, val), - buffer_length : ref.types.bool.size, - length : ref.alloc(ref.types.uint64, ref.types.bool.size), - is_null : ref.alloc(ref.types.int32, 0), + buffer_type: taosConst.C_BOOL, + buffer: ref.alloc(ref.types.bool, val), + buffer_length: ref.types.bool.size, + length: ref.alloc(ref.types.uint64, ref.types.bool.size), + is_null: ref.alloc(ref.types.int32, 0), }); - + TAOS_BIND.set(this.buf, this.index * TAOS_BIND.size, bl); this.index++ - }else{ + } else { throw new TDError(`bindBool() failed with ${val},since index:${this.index} is out of Buffer bound ${this.num}.`); } @@ -75,19 +74,19 @@ class TaosBind { * * @param {int8} val is a not null tinyint value. */ - bindTinyInt(val){ - if(!this._isOutOfBound()){ + bindTinyInt(val) { + if (!this._isOutOfBound()) { let tinnyInt = new TAOS_BIND({ - buffer_type : taosConst.C_TINYINT, - buffer : ref.alloc(ref.types.int8, val), - buffer_length : ref.types.int8.size, - length : ref.alloc(ref.types.uint64, ref.types.int8.size), - is_null : ref.alloc(ref.types.int32, 0), + buffer_type: taosConst.C_TINYINT, + buffer: ref.alloc(ref.types.int8, val), + buffer_length: ref.types.int8.size, + length: ref.alloc(ref.types.uint64, ref.types.int8.size), + is_null: ref.alloc(ref.types.int32, 0), }); - + TAOS_BIND.set(this.buf, this.index * TAOS_BIND.size, tinnyInt); - this.index++ - }else{ + this.index++ + } else { throw new TDError(`bindTinyInt() failed with ${val},since index:${this.index} is out of Buffer bound ${this.num}.`); } } @@ -96,39 +95,39 @@ class TaosBind { * * @param {short} val is a not null small int value. */ - bindSmallInt(val){ - if(!this._isOutOfBound()){ + bindSmallInt(val) { + if (!this._isOutOfBound()) { let samllint = new TAOS_BIND({ - buffer_type : taosConst.C_SMALLINT, - buffer : ref.alloc(ref.types.int16, val), - buffer_length : ref.types.int16.size, - length : ref.alloc(ref.types.uint64, ref.types.int16.size), - is_null : ref.alloc(ref.types.int32, 0), + buffer_type: taosConst.C_SMALLINT, + buffer: ref.alloc(ref.types.int16, val), + buffer_length: ref.types.int16.size, + length: ref.alloc(ref.types.uint64, ref.types.int16.size), + is_null: ref.alloc(ref.types.int32, 0), }); TAOS_BIND.set(this.buf, this.index * TAOS_BIND.size, samllint); - this.index++ - }else{ + this.index++ + } else { throw new TDError(`bindSmallInt() failed with ${val},since index:${this.index} is out of Buffer bound ${this.num}.`); } } - + /** * * @param {int} val is a not null int value. */ - bindInt(val){ - if(!this._isOutOfBound()){ + bindInt(val) { + if (!this._isOutOfBound()) { let int = new TAOS_BIND({ - buffer_type : taosConst.C_INT, - buffer : ref.alloc(ref.types.int32, val), - buffer_length : ref.types.int32.size, - length : ref.alloc(ref.types.uint64, ref.types.int32.size), - is_null : ref.alloc(ref.types.int32, 0), + buffer_type: taosConst.C_INT, + buffer: ref.alloc(ref.types.int32, val), + buffer_length: ref.types.int32.size, + length: ref.alloc(ref.types.uint64, ref.types.int32.size), + is_null: ref.alloc(ref.types.int32, 0), }); TAOS_BIND.set(this.buf, this.index * TAOS_BIND.size, int); - this.index++ - }else{ + this.index++ + } else { throw new TDError(`bindInt() failed with ${val},since index:${this.index} is out of Buffer bound ${this.num}.`); } @@ -139,37 +138,37 @@ class TaosBind { * @param {long} val is not null big int value. */ bindBigInt(val) { - if(!this._isOutOfBound()){ + if (!this._isOutOfBound()) { let bigint = new TAOS_BIND({ - buffer_type : taosConst.C_BIGINT, - buffer : ref.alloc(ref.types.int64, val.toString()), - buffer_length : ref.types.int64.size, - length : ref.alloc(ref.types.uint64, ref.types.int64.size), - is_null : ref.alloc(ref.types.int32, 0), + buffer_type: taosConst.C_BIGINT, + buffer: ref.alloc(ref.types.int64, val.toString()), + buffer_length: ref.types.int64.size, + length: ref.alloc(ref.types.uint64, ref.types.int64.size), + is_null: ref.alloc(ref.types.int32, 0), }); TAOS_BIND.set(this.buf, this.index * TAOS_BIND.size, bigint); this.index++ - }else{ + } else { throw new TDError(`bindBigInt() failed with ${val},since index:${this.index} is out of Buffer bound ${this.num}.`); } } - /** - * - * @param {float} val is a not null float value - */ + /** + * + * @param {float} val is a not null float value + */ bindFloat(val) { - if(!this._isOutOfBound()){ + if (!this._isOutOfBound()) { let float = new TAOS_BIND({ - buffer_type : taosConst.C_FLOAT, - buffer : ref.alloc(ref.types.float, val), - buffer_length : ref.types.float.size, - length : ref.alloc(ref.types.uint64, ref.types.float.size), - is_null : ref.alloc(ref.types.int32, 0), + buffer_type: taosConst.C_FLOAT, + buffer: ref.alloc(ref.types.float, val), + buffer_length: ref.types.float.size, + length: ref.alloc(ref.types.uint64, ref.types.float.size), + is_null: ref.alloc(ref.types.int32, 0), }); TAOS_BIND.set(this.buf, this.index * TAOS_BIND.size, float); this.index++ - }else{ + } else { throw new TDError(`bindFloat() failed with ${val},since index:${this.index} is out of Buffer bound ${this.num}.`); } } @@ -177,18 +176,18 @@ class TaosBind { * * @param {double} val is a not null double value */ - bindDouble(val){ - if(!this._isOutOfBound()){ + bindDouble(val) { + if (!this._isOutOfBound()) { let double = new TAOS_BIND({ - buffer_type : taosConst.C_DOUBLE, - buffer : ref.alloc(ref.types.double, val), - buffer_length : ref.types.double.size, - length : ref.alloc(ref.types.uint64, ref.types.double.size), - is_null : ref.alloc(ref.types.int32, 0), + buffer_type: taosConst.C_DOUBLE, + buffer: ref.alloc(ref.types.double, val), + buffer_length: ref.types.double.size, + length: ref.alloc(ref.types.uint64, ref.types.double.size), + is_null: ref.alloc(ref.types.int32, 0), }); TAOS_BIND.set(this.buf, this.index * TAOS_BIND.size, double); this.index++ - }else{ + } else { throw new TDError(`bindDouble() failed with ${val},since index:${this.index} is out of Buffer bound ${this.num}.`); } } @@ -197,19 +196,19 @@ class TaosBind { * * @param {string} val is a string. */ - bindBinary(val){ - let cstringBuf = ref.allocCString(val,'utf-8'); - if(!this._isOutOfBound()){ + bindBinary(val) { + let cstringBuf = ref.allocCString(val, 'utf-8'); + if (!this._isOutOfBound()) { let binary = new TAOS_BIND({ - buffer_type : taosConst.C_BINARY, - buffer : cstringBuf, - buffer_length : cstringBuf.length, - length : ref.alloc(ref.types.uint64, cstringBuf.length), - is_null : ref.alloc(ref.types.int32, 0), + buffer_type: taosConst.C_BINARY, + buffer: cstringBuf, + buffer_length: cstringBuf.length, + length: ref.alloc(ref.types.uint64, cstringBuf.length - 1), + is_null: ref.alloc(ref.types.int32, 0), }); TAOS_BIND.set(this.buf, this.index * TAOS_BIND.size, binary); this.index++ - }else{ + } else { throw new TDError(`bindBinary() failed with ${val},since index:${this.index} is out of Buffer bound ${this.num}.`); } } @@ -220,11 +219,11 @@ class TaosBind { */ bindTimestamp(val) { let ts = new TAOS_BIND({ - buffer_type : taosConst.C_TIMESTAMP, - buffer : ref.alloc(ref.types.int64, val), - buffer_length : ref.types.int64.size, - length : ref.alloc(ref.types.uint64, ref.types.int64.size), - is_null : ref.alloc(ref.types.int32, 0), + buffer_type: taosConst.C_TIMESTAMP, + buffer: ref.alloc(ref.types.int64, val), + buffer_length: ref.types.int64.size, + length: ref.alloc(ref.types.uint64, ref.types.int64.size), + is_null: ref.alloc(ref.types.int32, 0), }); TAOS_BIND.set(this.buf, this.index * TAOS_BIND.size, ts); @@ -235,61 +234,61 @@ class TaosBind { * * @param {string} val is a string. */ - bindNchar(val){ - let cstringBuf = ref.allocCString(val,'utf-8'); - if(!this._isOutOfBound()){ + bindNchar(val) { + let cstringBuf = ref.allocCString(val, 'utf-8'); + if (!this._isOutOfBound()) { let nchar = new TAOS_BIND({ - buffer_type : taosConst.C_NCHAR, - buffer : cstringBuf, - buffer_length : cstringBuf.length, - length : ref.alloc(ref.types.uint64, cstringBuf.length), - is_null : ref.alloc(ref.types.int32, 0), + buffer_type: taosConst.C_NCHAR, + buffer: cstringBuf, + buffer_length: cstringBuf.length, + length: ref.alloc(ref.types.uint64, cstringBuf.length - 1), + is_null: ref.alloc(ref.types.int32, 0), }); TAOS_BIND.set(this.buf, this.index * TAOS_BIND.size, nchar); this.index++ - }else{ + } else { throw new TDError(`bindNchar() failed with ${val},since index:${this.index} is out of Buffer bound ${this.num}.`); } } - /** - * - * @param {uint8} val is a not null unsinged tinyint value. - */ - bindUTinyInt(val){ - if(!this._isOutOfBound()){ + /** + * + * @param {uint8} val is a not null unsinged tinyint value. + */ + bindUTinyInt(val) { + if (!this._isOutOfBound()) { let uTinyInt = new TAOS_BIND({ - buffer_type : taosConst.C_TINYINT_UNSIGNED, - buffer : ref.alloc(ref.types.uint8, val), - buffer_length : ref.types.uint8.size, - length : ref.alloc(ref.types.uint64, ref.types.uint8.size), - is_null : ref.alloc(ref.types.int32, 0), + buffer_type: taosConst.C_TINYINT_UNSIGNED, + buffer: ref.alloc(ref.types.uint8, val), + buffer_length: ref.types.uint8.size, + length: ref.alloc(ref.types.uint64, ref.types.uint8.size), + is_null: ref.alloc(ref.types.int32, 0), }); - + TAOS_BIND.set(this.buf, this.index * TAOS_BIND.size, uTinyInt); - this.index++ - }else{ + this.index++ + } else { throw new TDError(`bindUTinyInt() failed with ${val},since index:${this.index} is out of Buffer bound ${this.num}.`); } } - + /** * * @param {uint16} val is a not null unsinged smallint value. */ - bindUSmallInt(val){ - if(!this._isOutOfBound()){ + bindUSmallInt(val) { + if (!this._isOutOfBound()) { let uSmallInt = new TAOS_BIND({ - buffer_type : taosConst.C_SMALLINT_UNSIGNED, - buffer : ref.alloc(ref.types.uint16, val), - buffer_length : ref.types.uint16.size, - length : ref.alloc(ref.types.uint64, ref.types.uint16.size), - is_null : ref.alloc(ref.types.int32, 0), + buffer_type: taosConst.C_SMALLINT_UNSIGNED, + buffer: ref.alloc(ref.types.uint16, val), + buffer_length: ref.types.uint16.size, + length: ref.alloc(ref.types.uint64, ref.types.uint16.size), + is_null: ref.alloc(ref.types.int32, 0), }); - + TAOS_BIND.set(this.buf, this.index * TAOS_BIND.size, uSmallInt); - this.index++ - }else{ + this.index++ + } else { throw new TDError(`bindUSmallInt() failed with ${val},since index:${this.index} is out of Buffer bound ${this.num}.`); } } @@ -298,19 +297,19 @@ class TaosBind { * * @param {uint32} val is a not null unsinged int value. */ - bindUInt(val){ - if(!this._isOutOfBound()){ + bindUInt(val) { + if (!this._isOutOfBound()) { let uInt = new TAOS_BIND({ - buffer_type : taosConst.C_INT_UNSIGNED, - buffer : ref.alloc(ref.types.uint32, val), - buffer_length : ref.types.uint32.size, - length : ref.alloc(ref.types.uint64, ref.types.uint32.size), - is_null : ref.alloc(ref.types.int32, 0), + buffer_type: taosConst.C_INT_UNSIGNED, + buffer: ref.alloc(ref.types.uint32, val), + buffer_length: ref.types.uint32.size, + length: ref.alloc(ref.types.uint64, ref.types.uint32.size), + is_null: ref.alloc(ref.types.int32, 0), }); - + TAOS_BIND.set(this.buf, this.index * TAOS_BIND.size, uInt); - this.index++ - }else{ + this.index++ + } else { throw new TDError(`bindUInt() failed with ${val},since index:${this.index} is out of Buffer bound ${this.num}.`); } } @@ -319,23 +318,46 @@ class TaosBind { * * @param {uint64} val is a not null unsinged bigint value. */ - bindUBigInt(val){ - if(!this._isOutOfBound()){ + bindUBigInt(val) { + if (!this._isOutOfBound()) { let uBigInt = new TAOS_BIND({ - buffer_type : taosConst.C_BIGINT_UNSIGNED, - buffer : ref.alloc(ref.types.uint64, val.toString()), - buffer_length : ref.types.uint64.size, - length : ref.alloc(ref.types.uint64, ref.types.uint64.size), - is_null : ref.alloc(ref.types.int32, 0), + buffer_type: taosConst.C_BIGINT_UNSIGNED, + buffer: ref.alloc(ref.types.uint64, val.toString()), + buffer_length: ref.types.uint64.size, + length: ref.alloc(ref.types.uint64, ref.types.uint64.size), + is_null: ref.alloc(ref.types.int32, 0), }); - + TAOS_BIND.set(this.buf, this.index * TAOS_BIND.size, uBigInt); - this.index++ - }else{ + this.index++ + } else { throw new TDError(`bindUBigInt() failed with ${val},since index:${this.index} is out of Buffer bound ${this.num}.`); } } + /** + * + * @param {jsonStr} val is a json string. Such as '{\"key1\":\"taosdata\"}' + */ + bindJson(val) { + let cstringBuf = ref.allocCString(val, 'utf-8'); + if (!this._isOutOfBound()) { + let jsonType = new TAOS_BIND({ + buffer_type: taosConst.C_JSON_TAG, + buffer: cstringBuf, + buffer_length: cstringBuf.length, + length: ref.alloc(ref.types.uint64, cstringBuf.length - 1), + is_null: ref.alloc(ref.types.int32, 0), + }); + + TAOS_BIND.set(this.buf, this.index * TAOS_BIND.size, jsonType); + this.index++ + } else { + throw new TDError(`bindJson() failed with ${val},since index:${this.index} is out of Buffer bound ${this.num}.`); + } + + } + /** * * @returns binded buffer. @@ -344,10 +366,10 @@ class TaosBind { return this.buf; } - _isOutOfBound(){ - if(this.num>this.index){ + _isOutOfBound() { + if (this.num > this.index) { return false; - }else{ + } else { return true; } } diff --git a/src/connector/nodejs/nodetaos/taosMultiBind.js b/src/connector/nodejs/nodetaos/taosMultiBind.js new file mode 100755 index 0000000000000000000000000000000000000000..deead6d2f702ea2d40584a2c57574da5f75f4de7 --- /dev/null +++ b/src/connector/nodejs/nodetaos/taosMultiBind.js @@ -0,0 +1,530 @@ +const ref = require('ref-napi'); +const StructType = require('ref-struct-di')(ref); +const taosConst = require('./constants'); + +var TAOS_MULTI_BIND = StructType({ + 'buffer_type': ref.types.int, + 'buffer': ref.refType(ref.types.void), + 'buffer_length': ref.types.ulong, + 'length': ref.refType(ref.types.int), + 'is_null': ref.refType(ref.types.char), + 'num': ref.types.int, +}) + +class TaosMultiBind { + constructor() { + } + + /** + * To bind bool through an array. + * @param {*} boolArray is an boolean array that stores one column's value. + * @returns A instance of struct TAOS_MULTI_BIND that contains one column's data with bool type. + */ + multiBindBool(boolArray) { + let mbindBufferBuf = Buffer.alloc(ref.types.bool.size * boolArray.length); + let mbindLengBuf = Buffer.alloc(ref.types.int.size * boolArray.length); + let mbindIsNullBuf = Buffer.alloc(ref.types.char.size * boolArray.length); + + boolArray.forEach((element, index) => { + ref.set(mbindLengBuf, index * ref.types.int.size, ref.types.bool.size, ref.types.int) + if (element == null || element == undefined) { + // ref.set(mbindBufferBuf,index * ref.types.int64.size,taosConst.C_BIGINT_NULL,ref.types.int64); + ref.set(mbindIsNullBuf, index * ref.types.char.size, 1, ref.types.char); + } else { + ref.set(mbindBufferBuf, index * ref.types.bool.size, element, ref.types.bool); + ref.set(mbindIsNullBuf, index * ref.types.char.size, 0, ref.types.char); + } + + }); + + let mbind = new TAOS_MULTI_BIND({ + buffer_type: taosConst.C_BOOL, + buffer: mbindBufferBuf, + buffer_length: ref.types.bool.size, + length: mbindLengBuf, + is_null: mbindIsNullBuf, + num: boolArray.length, + }) + return mbind; + } + + /** + * to bind tiny int through an array. + * @param {*} tinyIntArray is an array that stores tiny int. + * @returns A instance of struct TAOS_MULTI_BIND that contains one column's data with tiny int. + */ + multiBindTinyInt(tinyIntArray) { + let mbindBufferBuf = Buffer.alloc(ref.types.int8.size * tinyIntArray.length); + let mbindLengBuf = Buffer.alloc(ref.types.int.size * tinyIntArray.length); + let mbindIsNullBuf = Buffer.alloc(ref.types.char.size * tinyIntArray.length); + + tinyIntArray.forEach((element, index) => { + ref.set(mbindLengBuf, index * ref.types.int.size, ref.types.int8.size, ref.types.int) + if (element == null || element == undefined) { + // ref.set(mbindBufferBuf,index * ref.types.int64.size,taosConst.C_BIGINT_NULL,ref.types.int64); + ref.set(mbindIsNullBuf, index * ref.types.char.size, 1, ref.types.char); + } else { + ref.set(mbindBufferBuf, index * ref.types.int8.size, element, ref.types.int8); + ref.set(mbindIsNullBuf, index * ref.types.char.size, 0, ref.types.char); + } + + }); + + let mbind = new TAOS_MULTI_BIND({ + buffer_type: taosConst.C_TINYINT, + buffer: mbindBufferBuf, + buffer_length: ref.types.int8.size, + length: mbindLengBuf, + is_null: mbindIsNullBuf, + num: tinyIntArray.length, + }) + return mbind; + } + + /** + * To bind small int through an array. + * @param {*} smallIntArray is an array that stores small int. + * @returns A instance of struct TAOS_MULTI_BIND that contains one column's data with small int. + */ + multiBindSmallInt(smallIntArray) { + let mbindBufferBuf = Buffer.alloc(ref.types.int16.size * smallIntArray.length); + let mbindLengBuf = Buffer.alloc(ref.types.int.size * smallIntArray.length); + let mbindIsNullBuf = Buffer.alloc(ref.types.char.size * smallIntArray.length); + + smallIntArray.forEach((element, index) => { + ref.set(mbindLengBuf, index * ref.types.int.size, ref.types.int16.size, ref.types.int) + if (element == null || element == undefined) { + ref.set(mbindIsNullBuf, index * ref.types.char.size, 1, ref.types.char); + } else { + ref.set(mbindBufferBuf, index * ref.types.int16.size, element, ref.types.int16); + ref.set(mbindIsNullBuf, index * ref.types.char.size, 0, ref.types.char); + } + + }); + + let mbind = new TAOS_MULTI_BIND({ + buffer_type: taosConst.C_SMALLINT, + buffer: mbindBufferBuf, + buffer_length: ref.types.int16.size, + length: mbindLengBuf, + is_null: mbindIsNullBuf, + num: smallIntArray.length, + }) + return mbind; + } + + /** + * To bind int through an array. + * @param {*} intArray is an array that stores int. + * @returns A instance of struct TAOS_MULTI_BIND that contains one column's data with int. + */ + multiBindInt(intArray) { + let mbindBufferBuf = Buffer.alloc(ref.types.int.size * intArray.length); + let mbindLengBuf = Buffer.alloc(ref.types.int.size * intArray.length); + let mbindIsNullBuf = Buffer.alloc(ref.types.char.size * intArray.length); + + intArray.forEach((element, index) => { + ref.set(mbindLengBuf, index * ref.types.int.size, ref.types.int.size, ref.types.int) + if (element == null || element == undefined) { + ref.set(mbindIsNullBuf, index * ref.types.char.size, 1, ref.types.char); + } else { + ref.set(mbindBufferBuf, index * ref.types.int.size, element, ref.types.int); + ref.set(mbindIsNullBuf, index * ref.types.char.size, 0, ref.types.char); + } + + }); + + let mbind = new TAOS_MULTI_BIND({ + buffer_type: taosConst.C_INT, + buffer: mbindBufferBuf, + buffer_length: ref.types.int.size, + length: mbindLengBuf, + is_null: mbindIsNullBuf, + num: intArray.length, + }) + return mbind; + } + + /** + * To bind big int through an array. + * @param {*} bigIntArray is an array that stores big int. + * @returns A instance of struct TAOS_MULTI_BIND that contains one column's data with big int. + */ + multiBindBigInt(bigIntArray) { + let mbindBufferBuf = Buffer.alloc(ref.types.int64.size * bigIntArray.length); + let mbindLengBuf = Buffer.alloc(ref.types.int.size * bigIntArray.length); + let mbindIsNullBuf = Buffer.alloc(ref.types.char.size * bigIntArray.length); + + bigIntArray.forEach((element, index) => { + ref.set(mbindLengBuf, index * ref.types.int.size, ref.types.int64.size, ref.types.int) + if (element == null || element == undefined) { + ref.set(mbindIsNullBuf, index * ref.types.char.size, 1, ref.types.char); + } else { + ref.writeInt64LE(mbindBufferBuf, index * ref.types.int64.size, element.toString()) + ref.set(mbindIsNullBuf, index * ref.types.char.size, 0, ref.types.char); + } + + }); + + let mbind = new TAOS_MULTI_BIND({ + buffer_type: taosConst.C_BIGINT, + buffer: mbindBufferBuf, + buffer_length: ref.types.int64.size, + length: mbindLengBuf, + is_null: mbindIsNullBuf, + num: bigIntArray.length, + }) + return mbind; + } + + /** + * To bind float through an array. + * @param {*} floatArray is an array that stores float. + * @returns A instance of struct TAOS_MULTI_BIND that contains one column's data with float. + */ + multiBindFloat(floatArray) { + let mbindBufferBuf = Buffer.alloc(ref.types.float.size * floatArray.length); + let mbindLengBuf = Buffer.alloc(ref.types.int.size * floatArray.length); + let mbindIsNullBuf = Buffer.alloc(ref.types.char.size * floatArray.length); + + floatArray.forEach((element, index) => { + ref.set(mbindLengBuf, index * ref.types.int.size, ref.types.float.size, ref.types.int) + if (element == null || element == undefined) { + ref.set(mbindIsNullBuf, index * ref.types.char.size, 1, ref.types.char); + } else { + ref.set(mbindBufferBuf, index * ref.types.float.size, element, ref.types.float); + ref.set(mbindIsNullBuf, index * ref.types.char.size, 0, ref.types.char); + } + + }); + + let mbind = new TAOS_MULTI_BIND({ + buffer_type: taosConst.C_FLOAT, + buffer: mbindBufferBuf, + buffer_length: ref.types.float.size, + length: mbindLengBuf, + is_null: mbindIsNullBuf, + num: floatArray.length, + }) + return mbind; + } + + /** + * To bind double through an array. + * @param {*} doubleArray is an array that stores double. + * @returns A instance of struct TAOS_MULTI_BIND that contains one column's data with double. + */ + multiBindDouble(doubleArray) { + let mbindBufferBuf = Buffer.alloc(ref.types.double.size * doubleArray.length); + let mbindLengBuf = Buffer.alloc(ref.types.int.size * doubleArray.length); + let mbindIsNullBuf = Buffer.alloc(ref.types.char.size * doubleArray.length); + + doubleArray.forEach((element, index) => { + ref.set(mbindLengBuf, index * ref.types.int.size, ref.types.double.size, ref.types.int) + if (element == null || element == undefined) { + ref.set(mbindIsNullBuf, index * ref.types.char.size, 1, ref.types.char); + } else { + ref.set(mbindBufferBuf, index * ref.types.double.size, element, ref.types.double); + ref.set(mbindIsNullBuf, index * ref.types.char.size, 0, ref.types.char); + } + + }); + + let mbind = new TAOS_MULTI_BIND({ + buffer_type: taosConst.C_DOUBLE, + buffer: mbindBufferBuf, + buffer_length: ref.types.double.size, + length: mbindLengBuf, + is_null: mbindIsNullBuf, + num: doubleArray.length, + }) + return mbind; + } + + /** + * To bind tdengine's binary through an array. + * @param {*} strArr is an array that stores string. + * (Null string can be defined as undefined or null,notice '' is not null.) + * @returns A instance of struct TAOS_MULTI_BIND that contains one column's data with binary. + */ + multiBindBinary(strArr) { + let maxStrUFT8Length = this._maxUTF8StrArrLength(strArr); + console.log(`maxStrUFT8Length * strArr.length=${maxStrUFT8Length * strArr.length}`); + let mbindBufferBuf = Buffer.alloc(maxStrUFT8Length * strArr.length); + let mbindLengBuf = Buffer.alloc(ref.types.int.size * strArr.length); + let mbindIsNullBuf = Buffer.alloc(ref.types.char.size * strArr.length); + + strArr.forEach((element, index) => { + ref.set(mbindLengBuf, index * ref.types.int.size, this._stringUTF8Length(element), ref.types.int) + if (element == null || element == undefined) { + ref.set(mbindIsNullBuf, index * ref.types.char.size, 1, ref.types.char); + } else { + ref.writeCString(mbindBufferBuf, index * maxStrUFT8Length, element, 'utf8'); + ref.set(mbindIsNullBuf, index * ref.types.char.size, 0, ref.types.char); + } + }); + + let mbind = new TAOS_MULTI_BIND({ + buffer_type: taosConst.C_BINARY, + buffer: mbindBufferBuf, + buffer_length: maxStrUFT8Length, + length: mbindLengBuf, + is_null: mbindIsNullBuf, + num: strArr.length, + }) + return mbind; + } + + /** + * To bind timestamp through an array. + * @param {*} timestampArray is an array that stores timestamp. + * @returns A instance of struct TAOS_MULTI_BIND that contains one column's data with timestamp. + */ + multiBindTimestamp(timestampArray) { + let mbindBufferBuf = Buffer.alloc(ref.types.int64.size * timestampArray.length); + let mbindLengBuf = Buffer.alloc(ref.types.int.size * timestampArray.length); + let mbindIsNullBuf = Buffer.alloc(ref.types.char.size * timestampArray.length); + + timestampArray.forEach((element, index) => { + ref.set(mbindLengBuf, index * ref.types.int.size, ref.types.int64.size, ref.types.int) + if (element == null || element == undefined) { + ref.set(mbindIsNullBuf, index * ref.types.char.size, 1, ref.types.char); + } else { + ref.writeInt64LE(mbindBufferBuf, index * ref.types.int64.size, element.toString()) + ref.set(mbindIsNullBuf, index * ref.types.char.size, 0, ref.types.char); + } + + }); + + let mbind = new TAOS_MULTI_BIND({ + buffer_type: taosConst.C_TIMESTAMP, + buffer: mbindBufferBuf, + buffer_length: ref.types.int64.size, + length: mbindLengBuf, + is_null: mbindIsNullBuf, + num: timestampArray.length, + }) + return mbind; + } + + /** + * To bind tdengine's nchar through an array. + * @param {*} strArr is an array that stores string. + * (Null string can be defined as undefined or null,notice '' is not null.) + * @returns A instance of struct TAOS_MULTI_BIND that contains one nchar column's data with nchar. + */ + multiBindNchar(strArr) { + let maxStrUFT8Length = this._maxUTF8StrArrLength(strArr); + // console.log(`maxStrUFT8Length * strArr.length=${maxStrUFT8Length * strArr.length}`); + let mbindBufferBuf = Buffer.alloc(maxStrUFT8Length * strArr.length); + let mbindLengBuf = Buffer.alloc(ref.types.int.size * strArr.length); + let mbindIsNullBuf = Buffer.alloc(ref.types.char.size * strArr.length); + + strArr.forEach((element, index) => { + ref.set(mbindLengBuf, index * ref.types.int.size, this._stringUTF8Length(element), ref.types.int) + if (element == null || element == undefined) { + ref.set(mbindIsNullBuf, index * ref.types.char.size, 1, ref.types.char); + } else { + ref.writeCString(mbindBufferBuf, index * maxStrUFT8Length, element, 'utf8'); + ref.set(mbindIsNullBuf, index * ref.types.char.size, 0, ref.types.char); + } + }); + + let mbind = new TAOS_MULTI_BIND({ + buffer_type: taosConst.C_NCHAR, + buffer: mbindBufferBuf, + buffer_length: maxStrUFT8Length, + length: mbindLengBuf, + is_null: mbindIsNullBuf, + num: strArr.length, + }) + return mbind; + } + + /** + * to bind unsigned tiny int through an array. + * @param {*} uTinyIntArray is an array that stores unsigned tiny int. + * @returns A instance of struct TAOS_MULTI_BIND that contains one column's data with unsigned tiny int. + */ + multiBindUTinyInt(uTinyIntArray) { + let mbindBufferBuf = Buffer.alloc(ref.types.uint8.size * uTinyIntArray.length); + let mbindLengBuf = Buffer.alloc(ref.types.int.size * uTinyIntArray.length); + let mbindIsNullBuf = Buffer.alloc(ref.types.char.size * uTinyIntArray.length); + + uTinyIntArray.forEach((element, index) => { + ref.set(mbindLengBuf, index * ref.types.int.size, ref.types.uint8.size, ref.types.int) + if (element == null || element == undefined) { + ref.set(mbindIsNullBuf, index * ref.types.char.size, 1, ref.types.char); + } else { + ref.set(mbindBufferBuf, index * ref.types.uint8.size, element, ref.types.uint8); + ref.set(mbindIsNullBuf, index * ref.types.char.size, 0, ref.types.char); + } + + }); + + let mbind = new TAOS_MULTI_BIND({ + buffer_type: taosConst.C_TINYINT_UNSIGNED, + buffer: mbindBufferBuf, + buffer_length: ref.types.uint8.size, + length: mbindLengBuf, + is_null: mbindIsNullBuf, + num: uTinyIntArray.length, + }) + return mbind; + } + + /** + * To bind unsigned small int through an array. + * @param {*} uSmallIntArray is an array that stores unsigned small int. + * @returns A instance of struct TAOS_MULTI_BIND that contains one column's data with unsigned small int. + */ + multiBindUSmallInt(uSmallIntArray) { + let mbindBufferBuf = Buffer.alloc(ref.types.uint16.size * uSmallIntArray.length); + let mbindLengBuf = Buffer.alloc(ref.types.int.size * uSmallIntArray.length); + let mbindIsNullBuf = Buffer.alloc(ref.types.char.size * uSmallIntArray.length); + + uSmallIntArray.forEach((element, index) => { + ref.set(mbindLengBuf, index * ref.types.int.size, ref.types.uint16.size, ref.types.int) + if (element == null || element == undefined) { + ref.set(mbindIsNullBuf, index * ref.types.char.size, 1, ref.types.char); + } else { + ref.set(mbindBufferBuf, index * ref.types.uint16.size, element, ref.types.uint16); + ref.set(mbindIsNullBuf, index * ref.types.char.size, 0, ref.types.char); + } + + }); + + let mbind = new TAOS_MULTI_BIND({ + buffer_type: taosConst.C_SMALLINT_UNSIGNED, + buffer: mbindBufferBuf, + buffer_length: ref.types.uint16.size, + length: mbindLengBuf, + is_null: mbindIsNullBuf, + num: uSmallIntArray.length, + }) + return mbind; + } + + /** + * To bind unsigned int through an array. + * @param {*} uIntArray is an array that stores unsigned int. + * @returns A instance of struct TAOS_MULTI_BIND that contains one column's data with unsigned int. + */ + multiBindUInt(uIntArray) { + let mbindBufferBuf = Buffer.alloc(ref.types.uint.size * uIntArray.length); + let mbindLengBuf = Buffer.alloc(ref.types.int.size * uIntArray.length); + let mbindIsNullBuf = Buffer.alloc(ref.types.char.size * uIntArray.length); + + uIntArray.forEach((element, index) => { + ref.set(mbindLengBuf, index * ref.types.int.size, ref.types.uint.size, ref.types.int) + if (element == null || element == undefined) { + ref.set(mbindIsNullBuf, index * ref.types.char.size, 1, ref.types.char); + } else { + ref.set(mbindBufferBuf, index * ref.types.uint.size, element, ref.types.uint); + ref.set(mbindIsNullBuf, index * ref.types.char.size, 0, ref.types.char); + } + + }); + + let mbind = new TAOS_MULTI_BIND({ + buffer_type: taosConst.C_INT_UNSIGNED, + buffer: mbindBufferBuf, + buffer_length: ref.types.uint.size, + length: mbindLengBuf, + is_null: mbindIsNullBuf, + num: uIntArray.length, + }) + return mbind; + } + + /** + * To bind unsigned big int through an array. + * @param {*} uBigIntArray is an array that stores unsigned big int. + * @returns A instance of struct TAOS_MULTI_BIND that contains one column's data with unsigned big int. + */ + multiBindUBigInt(uBigIntArray) { + let mbindBufferBuf = Buffer.alloc(ref.types.uint64.size * uBigIntArray.length); + let mbindLengBuf = Buffer.alloc(ref.types.int.size * uBigIntArray.length); + let mbindIsNullBuf = Buffer.alloc(ref.types.char.size * uBigIntArray.length); + + uBigIntArray.forEach((element, index) => { + ref.set(mbindLengBuf, index * ref.types.int.size, ref.types.uint64.size, ref.types.int) + if (element == null || element == undefined) { + ref.set(mbindIsNullBuf, index * ref.types.char.size, 1, ref.types.char); + } else { + ref.writeInt64LE(mbindBufferBuf, index * ref.types.uint64.size, element.toString()) + ref.set(mbindIsNullBuf, index * ref.types.char.size, 0, ref.types.char); + } + + }); + + let mbind = new TAOS_MULTI_BIND({ + buffer_type: taosConst.C_BIGINT_UNSIGNED, + buffer: mbindBufferBuf, + buffer_length: ref.types.uint64.size, + length: mbindLengBuf, + is_null: mbindIsNullBuf, + num: uBigIntArray.length, + }) + return mbind; + } + + + // multiBJson(jsonArray) no need to support.Since till now TDengine only support json tag + // and there is no need to support bind json tag in TAOS_MULTI_BIND. + + /** + * + * @param {*} strArr an string array + * @returns return the max length of the element in strArr in "UFT-8" encoding. + */ + _maxUTF8StrArrLength(strArr) { + let max = 0; + strArr.forEach((item) => { + let realLeng = 0; + let itemLength = -1; + if (item == null || item == undefined) { + itemLength = 0; + } else { + itemLength = item.length; + } + + let charCode = -1; + for (let i = 0; i < itemLength; i++) { + charCode = item.charCodeAt(i); + if (charCode >= 0 && charCode <= 128) { + realLeng += 1; + } else { + realLeng += 3; + } + } + if (max < realLeng) { + max = realLeng + }; + }); + return max; + } + + /** + * + * @param {*} str a string. + * @returns return the length of the input string encoding with utf-8. + */ + _stringUTF8Length(str) { + let leng = 0; + if (str == null || str == undefined) { + leng = 0; + } else { + for (let i = 0; i < str.length; i++) { + if (str.charCodeAt(i) >= 0 && str.charCodeAt(i) <= 128) { + leng += 1; + } else { + leng += 3; + } + } + } + return leng; + } +} +// console.log(TAOS_MULTI_BIND.size) +module.exports = { TaosMultiBind, TAOS_MULTI_BIND }; \ No newline at end of file diff --git a/src/connector/nodejs/nodetaos/taosMultiBindArr.js b/src/connector/nodejs/nodetaos/taosMultiBindArr.js new file mode 100755 index 0000000000000000000000000000000000000000..68c9c95bddad725b6dc10fe7766c1ad46adc2919 --- /dev/null +++ b/src/connector/nodejs/nodetaos/taosMultiBindArr.js @@ -0,0 +1,250 @@ +const ref = require('ref-napi'); +const { TDError } = require('./error'); +const { TAOS_MULTI_BIND, TaosMultiBind } = require('./taosMultiBind'); + +const TAOS_MULTI_BIND_SIZE = TAOS_MULTI_BIND.size; + +class TaosMultiBindArr extends TaosMultiBind { + /** + * The constructor,initial basic parameters and alloc buffer. + * @param {*} numOfColumns the number of column that you want to bind parameters. + */ + constructor(numOfColumns) { + super(); + this.taosMBindArrBuf = Buffer.alloc(numOfColumns * TAOS_MULTI_BIND_SIZE); + this.index = 0; + this.bound = numOfColumns; + } + + /** + * Used to bind boolean column's values. + * @param {*} boolArray An array of bool value, + * represents the bool values you want to bind. + */ + multiBindBool(boolArray) { + if (this.bound > this.index) { + let mBindBool = super.multiBindBool(boolArray); + TAOS_MULTI_BIND.set(this.taosMBindArrBuf, this.index * TAOS_MULTI_BIND_SIZE, mBindBool); + this.index += 1; + } else { + throw new TDError(`multiBindArrBool() failed,since index:${this.index} is out of Buffer bound ${this.bound}.`) + } + } + + /** + * Used to bind tiny int column's values. + * @param {*} tinyIntArray An array of tiny int value. + * represents the tiny int values you want to bind. + */ + multiBindTinyInt(tinyIntArray) { + if (this.bound > this.index) { + let mBindTinyInt = super.multiBindTinyInt(tinyIntArray); + TAOS_MULTI_BIND.set(this.taosMBindArrBuf, this.index * TAOS_MULTI_BIND_SIZE, mBindTinyInt); + this.index += 1; + } else { + throw new TDError(`multiBindArrTinyInt() failed,since index:${this.index} is out of Buffer bound ${this.bound}.`) + } + } + + /** + * Used to bind small int column's value. + * @param {*} smallIntArray An array of small int values, + * represents the small int values you want to bind. + */ + multiBindSmallInt(smallIntArray) { + if (this.bound > this.index) { + let mBindSmallInt = super.multiBindSmallInt(smallIntArray); + TAOS_MULTI_BIND.set(this.taosMBindArrBuf, this.index * TAOS_MULTI_BIND_SIZE, mBindSmallInt); + this.index += 1; + } else { + throw new TDError(`multiBindSmallInt() failed,since index:${this.index} is out of Buffer bound ${this.bound}.`) + } + + } + + /** + * Used to bind int column's value. + * @param {*} intArray An array of int values, + * represents the int values you want to bind. + */ + multiBindInt(intArray) { + if (this.bound > this.index) { + let mBindInt = super.multiBindInt(intArray); + TAOS_MULTI_BIND.set(this.taosMBindArrBuf, this.index * TAOS_MULTI_BIND_SIZE, mBindInt); + this.index += 1; + } else { + throw new TDError(`multiBindInt() failed,since index:${this.index} is out of Buffer bound ${this.bound}.`) + } + + } + + /** + * Used to bind big int column's value. + * @param {*} bigIntArray An array of big int values, + * represents the big int values you want to bind. + */ + multiBindBigInt(bigIntArray) { + if (this.bound > this.index) { + let mBindBigInt = super.multiBindBigInt(bigIntArray); + TAOS_MULTI_BIND.set(this.taosMBindArrBuf, this.index * TAOS_MULTI_BIND_SIZE, mBindBigInt); + this.index += 1; + } else { + throw new TDError(`multiBindBigInt() failed,since index:${this.index} is out of Buffer bound ${this.bound}.`) + } + + } + + /** + * Used to bind float column's value. + * @param {*} floatArray An array of float values, + * represents the float values you want to bind. + */ + multiBindFloat(floatArray) { + if (this.bound > this.index) { + let mBindFloat = super.multiBindFloat(floatArray); + TAOS_MULTI_BIND.set(this.taosMBindArrBuf, this.index * TAOS_MULTI_BIND_SIZE, mBindFloat); + this.index += 1; + } else { + throw new TDError(`multiBindFloat() failed,since index:${this.index} is out of Buffer bound ${this.bound}.`) + } + + } + + /** + * Used to bind double column's value. + * @param {*} doubleArray An array of double values, + * represents the double values you want to bind. + */ + multiBindDouble(doubleArray) { + if (this.bound > this.index) { + let mBindDouble = super.multiBindDouble(doubleArray); + TAOS_MULTI_BIND.set(this.taosMBindArrBuf, this.index * TAOS_MULTI_BIND_SIZE, mBindDouble); + this.index += 1; + } else { + throw new TDError(`multiBindDouble() failed,since index:${this.index} is out of Buffer bound ${this.bound}.`) + } + + } + + /** + * Used to bind binary column's value. + * @param {*} strArr An array of binary(string) values, + * represents the binary values you want to bind. + * Notice '' is not equal to TDengine's "null" value. + */ + multiBindBinary(strArr) { + if (this.bound > this.index) { + let mBindBinary = super.multiBindBinary(strArr); + TAOS_MULTI_BIND.set(this.taosMBindArrBuf, this.index * TAOS_MULTI_BIND_SIZE, mBindBinary); + this.index += 1; + } else { + throw new TDError(`multiBindBinary() failed,since index:${this.index} is out of Buffer bound ${this.bound}.`) + } + } + + /** + * Used to bind timestamp column's values. + * @param {*} timestampArray An array of timestamp values, + * represents the timestamp values you want to bind. + */ + multiBindTimestamp(timestampArray) { + if (this.bound > this.index) { + let mBindTimestamp = super.multiBindTimestamp(timestampArray); + TAOS_MULTI_BIND.set(this.taosMBindArrBuf, this.index * TAOS_MULTI_BIND_SIZE, mBindTimestamp); + this.index += 1; + } else { + throw new TDError(`multiBindArrTimestamp() failed,since index:${this.index} is out of Buffer bound ${this.bound}.`) + } + } + + /** + * Used to bind nchar column's value. + * @param {*} strArr An array of nchar(string) values, + * represents the nchar values you want to bind. + * Notice '' is not equal to TDengine's "null" value. + */ + multiBindNchar(strArr) { + if (this.bound > this.index) { + let mBindNchar = super.multiBindNchar(strArr); + TAOS_MULTI_BIND.set(this.taosMBindArrBuf, this.index * TAOS_MULTI_BIND_SIZE, mBindNchar); + this.index += 1; + } else { + throw new TDError(`multiBindNchar() failed,since index:${this.index} is out of Buffer bound ${this.bound}.`) + } + } + + /** + * Used to bind unsigned tiny int column's value. + * @param {*} uTinyIntArray An array of unsigned tiny int values, + * represents the unsigned tiny int values you want to bind. + */ + multiBindUTinyInt(uTinyIntArray) { + if (this.bound > this.index) { + let mBindNchar = super.multiBindUTinyInt(uTinyIntArray); + TAOS_MULTI_BIND.set(this.taosMBindArrBuf, this.index * TAOS_MULTI_BIND_SIZE, mBindNchar); + this.index += 1; + } else { + throw new TDError(`multiBindUTinyInt() failed,since index:${this.index} is out of Buffer bound ${this.bound}.`) + } + } + + /** + * Used to bind unsigned small int column's value. + * @param {*} uSmallIntArray An array of unsigned small int value, + * represents the unsigned small int values you want to bind. + */ + multiBindUSmallInt(uSmallIntArray) { + if (this.bound > this.index) { + let mBindUSmallInt = super.multiBindUSmallInt(uSmallIntArray); + TAOS_MULTI_BIND.set(this.taosMBindArrBuf, this.index * TAOS_MULTI_BIND_SIZE, mBindUSmallInt); + this.index += 1; + } else { + throw new TDError(`multiBindUSmallInt() failed,since index:${this.index} is out of Buffer bound ${this.bound}.`) + } + } + + /** + * Used to bind unsigned int column's value. + * @param {*} uIntArray An array of unsigned int column's value, + * represents the unsigned int values you want to bind. + */ + multiBindUInt(uIntArray) { + if (this.bound > this.index) { + let mBindUInt = super.multiBindUInt(uIntArray); + TAOS_MULTI_BIND.set(this.taosMBindArrBuf, this.index * TAOS_MULTI_BIND_SIZE, mBindUInt); + this.index += 1; + } else { + throw new TDError(`multiBindUInt() failed,since index:${this.index} is out of Buffer bound ${this.bound}.`) + } + } + + /** + * Used to bind unsigned big int column's value. + * @param {*} uBigIntArray An array of unsigned big int column's value, + * represents the unsigned big int values you want to bind. + */ + multiBindUBigInt(uBigIntArray) { + if (this.bound > this.index) { + let mBindUBigInt = super.multiBindUBigInt(uBigIntArray); + TAOS_MULTI_BIND.set(this.taosMBindArrBuf, this.index * TAOS_MULTI_BIND_SIZE, mBindUBigInt); + this.index += 1; + } else { + throw new TDError(`multiBindUBigInt() failed,since index:${this.index} is out of Buffer bound ${this.bound}.`) + } + } + + // multiBJson(jsonArray) no need to support.Since till now TDengine only support json tag + // and there is no need to support bind json tag in TAOS_MULTI_BIND. + + + /** + * After all the parameters have been prepared and stored + * in the buffer, Call this method to get the buffer. + * @returns return the buffer which stores all the parameters. + */ + getMultiBindArr() { + return this.taosMBindArrBuf; + } + +} +module.exports = TaosMultiBindArr; \ No newline at end of file diff --git a/src/connector/nodejs/tdengine.js b/src/connector/nodejs/tdengine.js index b33069bd9dd46a5f6c050d85ea1f73fab6f51a64..275834bd4f2b5ba5a903049a5973a34287132175 100644 --- a/src/connector/nodejs/tdengine.js +++ b/src/connector/nodejs/tdengine.js @@ -1,6 +1,9 @@ var TDengineConnection = require('./nodetaos/connection.js') const TDengineConstant = require('./nodetaos/constants.js') const TaosBind = require('./nodetaos/taosBind') +const { TaosMultiBind } = require('./nodetaos/taosMultiBind') +const TaosMultiBindArr = require('./nodetaos/taosMultiBindArr') + module.exports = { connect: function (connection = {}) { return new TDengineConnection(connection); @@ -8,4 +11,6 @@ module.exports = { SCHEMALESS_PROTOCOL: TDengineConstant.SCHEMALESS_PROTOCOL, SCHEMALESS_PRECISION: TDengineConstant.SCHEMALESS_PRECISION, TaosBind, + TaosMultiBind, + TaosMultiBindArr, } \ No newline at end of file diff --git a/src/inc/taoserror.h b/src/inc/taoserror.h index 44192403972cd9dc54b3f2a965e1468595e17487..64065d0b4672a36c0510242cf9d52830aeccc67b 100644 --- a/src/inc/taoserror.h +++ b/src/inc/taoserror.h @@ -293,6 +293,7 @@ int32_t* taosGetErrno(); #define TSDB_CODE_QRY_SYS_ERROR TAOS_DEF_ERROR_CODE(0, 0x070D) //"System error") #define TSDB_CODE_QRY_INVALID_TIME_CONDITION TAOS_DEF_ERROR_CODE(0, 0x070E) //"invalid time condition") #define TSDB_CODE_QRY_INVALID_SCHEMA_VERSION TAOS_DEF_ERROR_CODE(0, 0x0710) //"invalid schema version") +#define TSDB_CODE_QRY_UNIQUE_RESULT_TOO_LARGE TAOS_DEF_ERROR_CODE(0, 0x0711) //"unique result num is too large") // grant #define TSDB_CODE_GRANT_EXPIRED TAOS_DEF_ERROR_CODE(0, 0x0800) //"License expired" diff --git a/src/inc/taosmsg.h b/src/inc/taosmsg.h index 2c4d21037c9697e832bccf082595408c712d0670..e5c390f9191f1aad622a9b8787d4643791c2a870 100644 --- a/src/inc/taosmsg.h +++ b/src/inc/taosmsg.h @@ -503,8 +503,8 @@ typedef struct { uint32_t tagCondLen; // tag length in current query int32_t colCondLen; // column length in current query int16_t numOfGroupCols; // num of group by columns - int16_t orderByIdx; - int16_t orderType; // used in group by xx order by xxx + int16_t orderByIdx; // useless + int16_t groupOrderType; // used for group order int64_t vgroupLimit; // limit the number of rows for each table, used in order by + limit in stable projection query. int16_t prjOrder; // global order in super table projection query. int64_t limit; diff --git a/src/kit/taos-tools b/src/kit/taos-tools index 28ff2899fd0238f81c14cb76ea6dbdefa83570b3..ca4a90027ddfd5faa858a676e695ddcdd56ef2b5 160000 --- a/src/kit/taos-tools +++ b/src/kit/taos-tools @@ -1 +1 @@ -Subproject commit 28ff2899fd0238f81c14cb76ea6dbdefa83570b3 +Subproject commit ca4a90027ddfd5faa858a676e695ddcdd56ef2b5 diff --git a/src/kit/taospack/taospack.c b/src/kit/taospack/taospack.c index 9e7355b42af8fe296975f78960639de0a43a4d18..13360606df240966ec7cde494bbd07772f64be26 100644 --- a/src/kit/taospack/taospack.c +++ b/src/kit/taospack/taospack.c @@ -704,7 +704,7 @@ void leakFloat() { void leakTest(){ - for(int i=0; i< 90000000000000; i++){ + for(int i=0; i< 90000000; i++){ if(i%10000==0) printf(" ---------- %d ---------------- \n", i); leakFloat(); diff --git a/src/mnode/src/mnodeUser.c b/src/mnode/src/mnodeUser.c index e2fda182d7287f61311f658b83134132a4e8d9a1..b3e3ba6cd9698b08aceb86841bd858a7c6f05220 100644 --- a/src/mnode/src/mnodeUser.c +++ b/src/mnode/src/mnodeUser.c @@ -355,13 +355,41 @@ static int32_t mnodeGetUserMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *pCo static int32_t mnodeRetrieveUsers(SShowObj *pShow, char *data, int32_t rows, void *pConn) { int32_t numOfRows = 0; SUserObj *pUser = NULL; + SUserObj *userObj = NULL; + char *user = NULL; int32_t cols = 0; + size_t len1 = 0; + size_t len2; + int32_t acctId = -1; char *pWrite; + if (pConn) { + userObj = mnodeGetUserFromConn(pConn); + if (userObj && userObj->pAcct) { + user = userObj->pAcct->user; + if (user) { + len1 = strlen(user); + if (len1 == 0) { + user = NULL; + } + } + + acctId = userObj->pAcct->acctId; + } + } + while (numOfRows < rows) { pShow->pIter = mnodeGetNextUser(pShow->pIter, &pUser); if (pUser == NULL) break; - + + if (user && pUser->pAcct) { + len2 = strlen(pUser->pAcct->user); + + if ((len1 != len2 || strncmp(user, pUser->pAcct->user, len1)) && acctId != pUser->pAcct->acctId) { + continue; + } + } + cols = 0; pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows; @@ -643,4 +671,4 @@ int32_t mnodeCompactUsers() { mInfo("end to compact users table..."); return 0; -} \ No newline at end of file +} diff --git a/src/query/inc/qAggMain.h b/src/query/inc/qAggMain.h index 444612f15771212757f20234c51d4b6c29a44180..aa5e2abd803d611be005115ba387a45e1138ed56 100644 --- a/src/query/inc/qAggMain.h +++ b/src/query/inc/qAggMain.h @@ -78,8 +78,9 @@ extern "C" { #define TSDB_FUNC_ELAPSED 37 #define TSDB_FUNC_HISTOGRAM 38 +#define TSDB_FUNC_UNIQUE 39 -#define TSDB_FUNC_MAX_NUM 39 +#define TSDB_FUNC_MAX_NUM 40 #define TSDB_FUNCSTATE_SO 0x1u // single output #define TSDB_FUNCSTATE_MO 0x2u // dynamic number of output, not multinumber of output e.g., TOP/BOTTOM @@ -174,7 +175,7 @@ typedef struct SQLFunctionCtx { void * pInput; // input data buffer uint32_t order; // asc|desc int16_t inputType; - int16_t inputBytes; + int32_t inputBytes; int16_t outputType; int32_t outputBytes; // size of results, determined by function and input column data type @@ -200,6 +201,8 @@ typedef struct SQLFunctionCtx { SExtTagsInfo tagInfo; SPoint1 start; SPoint1 end; + + SHashObj **pUniqueSet; // for unique function } SQLFunctionCtx; typedef struct SAggFunctionInfo { @@ -249,7 +252,7 @@ void blockDistInfoToBinary(STableBlockDist* pDist, struct SBufferWriter* bw); void blockDistInfoFromBinary(const char* data, int32_t len, STableBlockDist* pDist); /* global sql function array */ -extern struct SAggFunctionInfo aAggs[40]; +extern struct SAggFunctionInfo aAggs[TSDB_FUNC_MAX_NUM]; extern int32_t functionCompatList[]; // compatible check array list diff --git a/src/query/inc/qExecutor.h b/src/query/inc/qExecutor.h index c654047932f5c99a5e30d46e44efc9d7631e2136..c4aebc07b15749da343b4d0175812ca6e4211021 100644 --- a/src/query/inc/qExecutor.h +++ b/src/query/inc/qExecutor.h @@ -90,6 +90,7 @@ typedef struct SResultRow { SResultRowCellInfo* pCellInfo; // For each result column, there is a resultInfo STimeWindow win; char *key; // start key of current result row + SHashObj *uniqueHash; // for unique function } SResultRow; typedef struct SResultRowCell { @@ -221,6 +222,7 @@ typedef struct SQueryAttr { bool stableQuery; // super table query or not bool topBotQuery; // TODO used bitwise flag + bool uniqueQuery; bool groupbyColumn; // denote if this is a groupby normal column query bool hasTagResults; // if there are tag values in final result or not bool timeWindowInterpo;// if the time window start/end required interpolation @@ -281,6 +283,7 @@ typedef struct SQueryAttr { STableGroupInfo tableGroupInfo; // table list SArray int32_t vgId; SArray *pUdfInfo; // no need to free + int32_t interBytesForGlobal; } SQueryAttr; typedef SSDataBlock* (*__operator_fn_t)(void* param, bool* newgroup); @@ -730,4 +733,5 @@ void addTableReadRows(SQueryRuntimeEnv* pEnv, int32_t tid, int32_t rows); // tsdb scan table callback table or query is over. param is SQueryRuntimeEnv* bool qReadOverCB(void* param, int8_t type, int32_t tid); +bool isUniqueQuery(int32_t numOfOutput, SExprInfo* pExprs); #endif // TDENGINE_QEXECUTOR_H diff --git a/src/query/inc/qExtbuffer.h b/src/query/inc/qExtbuffer.h index ccdfd5c05994b71bd911c3a66d02dc1ffa58a474..abcf11bfa54d1950edc7e42e8b76b0121fcc4c2c 100644 --- a/src/query/inc/qExtbuffer.h +++ b/src/query/inc/qExtbuffer.h @@ -53,14 +53,14 @@ typedef struct tFlushoutInfo { } tFlushoutInfo; typedef struct tFlushoutData { - uint32_t nAllocSize; - uint32_t nLength; - tFlushoutInfo *pFlushoutInfo; + uint32_t nAllocSize; // capacity + uint32_t nLength; // size + tFlushoutInfo *pFlushoutInfo; // dynamic allocate } tFlushoutData; typedef struct SExtFileInfo { - uint32_t nFileSize; // in pages - uint32_t pageSize; + uint32_t nFileSize; // how many pages in file + //uint32_t pageSize; // useless uint32_t numOfElemsInFile; tFlushoutData flushoutData; } SExtFileInfo; diff --git a/src/query/inc/qResultbuf.h b/src/query/inc/qResultbuf.h index f0c4aa3702cc083f7cc2ceaf1afabde21a3de73b..d4194168e565fd8e1202985d3597ace56326e92e 100644 --- a/src/query/inc/qResultbuf.h +++ b/src/query/inc/qResultbuf.h @@ -78,7 +78,8 @@ typedef struct SDiskbasedResultBuf { #define DEFAULT_INTERN_BUF_PAGE_SIZE (1024L) // in bytes #define PAGE_INFO_INITIALIZER (SPageDiskInfo){-1, -1} - +#define MAX_UNIQUE_RESULT_ROWS (1000) +#define MAX_UNIQUE_RESULT_SIZE (1024*1024*1) /** * create disk-based result buffer * @param pResultBuf diff --git a/src/query/inc/qTableMeta.h b/src/query/inc/qTableMeta.h index d47189691ebbe2c4ec3ad55dd72306686586a56e..e9b98cfe44f7bea24d4e680472d253ce4c9ce626 100644 --- a/src/query/inc/qTableMeta.h +++ b/src/query/inc/qTableMeta.h @@ -50,7 +50,7 @@ typedef struct SGroupbyExpr { int16_t tableIndex; SArray* columnInfo; // SArray, group by columns information int16_t numOfGroupCols; // todo remove it - int16_t orderIndex; // order by column index + //int16_t orderIndex; // order by column index, rm useless orderIndex int16_t orderType; // order by type: asc/desc } SGroupbyExpr; diff --git a/src/query/inc/qUtil.h b/src/query/inc/qUtil.h index 0882df77c2a8bc38560269ce093568fd96467dae..ce0a0648f554e007e46d441d9607d1d8edb971e3 100644 --- a/src/query/inc/qUtil.h +++ b/src/query/inc/qUtil.h @@ -70,8 +70,12 @@ static FORCE_INLINE char* getPosInResultPage(SQueryAttr* pQueryAttr, tFilePage* int32_t offset) { assert(rowOffset >= 0 && pQueryAttr != NULL); - int32_t numOfRows = (int32_t)getRowNumForMultioutput(pQueryAttr, pQueryAttr->topBotQuery, pQueryAttr->stableQuery); - return ((char *)page->data) + rowOffset + offset * numOfRows; + int64_t numOfRows = (int64_t)getRowNumForMultioutput(pQueryAttr, pQueryAttr->topBotQuery, pQueryAttr->stableQuery); + numOfRows *= offset; + if(numOfRows >= INT32_MAX){ + assert(0); + } + return ((char *)page->data) + rowOffset + numOfRows; } bool isNullOperator(SColumnFilterElem *pFilter, const char* minval, const char* maxval, int16_t type); diff --git a/src/query/src/qAggMain.c b/src/query/src/qAggMain.c index 2069fe7578cb68aff8ff98e20eaa50d323d564ff..b294c0482f0d2002cca7255f572d527ec21b543b 100644 --- a/src/query/src/qAggMain.c +++ b/src/query/src/qAggMain.c @@ -223,6 +223,16 @@ typedef struct{ SHistogramFuncBin* orderedBins; } SHistogramFuncInfo; +typedef struct { + int64_t timestamp; + char data[]; +} UniqueUnit; + +typedef struct { + int32_t num; + char res[]; +} SUniqueFuncInfo; + int32_t getResultDataInfo(int32_t dataType, int32_t dataBytes, int32_t functionId, int32_t param, int16_t *type, int32_t *bytes, int32_t *interBytes, int16_t extLength, bool isSuperTable, SUdfInfo* pUdfInfo) { if (!isValidDataType(dataType)) { @@ -353,6 +363,18 @@ int32_t getResultDataInfo(int32_t dataType, int32_t dataBytes, int32_t functionI *bytes = (sizeof(STopBotInfo) + (sizeof(tValuePair) + POINTER_BYTES + extLength) * param); *interBytes = *bytes; + return TSDB_CODE_SUCCESS; + } else if (functionId == TSDB_FUNC_UNIQUE) { + *type = TSDB_DATA_TYPE_BINARY; + int64_t size = sizeof(UniqueUnit) + dataBytes + extLength; + size *= param; + size += sizeof(SUniqueFuncInfo); + if (size > MAX_UNIQUE_RESULT_SIZE){ + size = MAX_UNIQUE_RESULT_SIZE; + } + *bytes = size; + *interBytes = *bytes; + return TSDB_CODE_SUCCESS; } else if (functionId == TSDB_FUNC_SAMPLE) { *type = TSDB_DATA_TYPE_BINARY; @@ -477,10 +499,20 @@ int32_t getResultDataInfo(int32_t dataType, int32_t dataBytes, int32_t functionI *type = (int16_t)dataType; *bytes = dataBytes; - size_t size = sizeof(STopBotInfo) + (sizeof(tValuePair) + POINTER_BYTES + extLength) * param; + size_t size = sizeof(STopBotInfo) + (sizeof(tValuePair) + extLength) * param; // the output column may be larger than sizeof(STopBotInfo) *interBytes = (int32_t)size; + } else if (functionId == TSDB_FUNC_UNIQUE) { + *type = (int16_t)dataType; + *bytes = dataBytes; + int64_t size = sizeof(UniqueUnit) + dataBytes + extLength; + size *= param; + size += sizeof(SUniqueFuncInfo); + if (size > MAX_UNIQUE_RESULT_SIZE){ + size = MAX_UNIQUE_RESULT_SIZE; + } + *interBytes = (int32_t)size; } else if (functionId == TSDB_FUNC_SAMPLE) { *type = (int16_t)dataType; *bytes = dataBytes; @@ -2130,7 +2162,7 @@ static void copyTopBotRes(SQLFunctionCtx *pCtx, int32_t type) { int32_t step = QUERY_ASC_FORWARD_STEP; int32_t len = (int32_t)(GET_RES_INFO(pCtx)->numOfRes); - + switch (type) { case TSDB_DATA_TYPE_UINT: case TSDB_DATA_TYPE_INT: { @@ -5097,6 +5129,194 @@ static void histogram_func_finalizer(SQLFunctionCtx *pCtx) { doFinalizer(pCtx); } +// unique use the intermediate result buffer to keep the intermediate result +static SUniqueFuncInfo *getUniqueOutputInfo(SQLFunctionCtx *pCtx) { + SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx); + + // only the first_stage_merge is directly written data into final output buffer + if (pCtx->stableQuery && pCtx->currentStage != MERGE_STAGE) { + return (SUniqueFuncInfo*) pCtx->pOutput; + } else { // during normal table query and super table at the secondary_stage, result is written to intermediate buffer + return GET_ROWCELL_INTERBUF(pResInfo); + } +} + +// unique +static void copyUniqueRes(SQLFunctionCtx *pCtx, int32_t bytes) { + SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx); + SUniqueFuncInfo *pRes = GET_ROWCELL_INTERBUF(pResInfo); + + size_t size = sizeof(UniqueUnit) + bytes + pCtx->tagInfo.tagsLen; + int32_t len = (int32_t)(GET_RES_INFO(pCtx)->numOfRes); + + char *tsOutput = pCtx->ptsOutputBuf; + char *output = pCtx->pOutput; + int32_t step = GET_FORWARD_DIRECTION_FACTOR(pCtx->param[2].i64); + char *tvp = pRes->res + (size * ((pCtx->param[2].i64 == TSDB_ORDER_ASC) ? 0 : len -1)); + for (int32_t i = 0; i < len; ++i) { + memcpy(tsOutput, tvp, sizeof(int64_t)); + memcpy(output, tvp + sizeof(UniqueUnit), bytes); + tvp += (step * size); + tsOutput += sizeof(int64_t); + output += bytes; + } + + // set the corresponding tag data for each record + // todo check malloc failure + if (pCtx->tagInfo.numOfTagCols == 0) { + return ; + } + + char **pData = calloc(pCtx->tagInfo.numOfTagCols, POINTER_BYTES); + for (int32_t i = 0; i < pCtx->tagInfo.numOfTagCols; ++i) { + pData[i] = pCtx->tagInfo.pTagCtxList[i]->pOutput; + } + + tvp = pRes->res + (size * ((pCtx->param[2].i64 == TSDB_ORDER_ASC) ? 0 : len -1)); + for (int32_t i = 0; i < len; ++i) { + int16_t offset = sizeof(UniqueUnit) + bytes; + for (int32_t j = 0; j < pCtx->tagInfo.numOfTagCols; ++j) { + memcpy(pData[j], tvp + offset, (size_t)pCtx->tagInfo.pTagCtxList[j]->outputBytes); + offset += pCtx->tagInfo.pTagCtxList[j]->outputBytes; + pData[j] += pCtx->tagInfo.pTagCtxList[j]->outputBytes; + } + tvp += (step * size); + } + + tfree(pData); +} + +static bool unique_function_setup(SQLFunctionCtx *pCtx, SResultRowCellInfo* pResInfo) { + if (!function_setup(pCtx, pResInfo)) { + return false; + } + if(*pCtx->pUniqueSet != NULL){ + taosHashClear(*pCtx->pUniqueSet); + }else{ + *pCtx->pUniqueSet = taosHashInit(64, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK); + } + + return true; +} + +static void do_unique_function(SQLFunctionCtx *pCtx, SUniqueFuncInfo *pInfo, TSKEY timestamp, char *pData, char *tag, int32_t bytes, int16_t type){ + int32_t hashKeyBytes = bytes; + if(IS_VAR_DATA_TYPE(type)){ // for var data, we can not use bytes, because there are dirty data in the back of var data + hashKeyBytes = varDataTLen(pData); + } + UniqueUnit **unique = taosHashGet(*pCtx->pUniqueSet, pData, hashKeyBytes); + if (unique == NULL) { + size_t size = sizeof(UniqueUnit) + bytes + pCtx->tagInfo.tagsLen; + char *tmp = pInfo->res + pInfo->num * size; + ((UniqueUnit*)tmp)->timestamp = timestamp; + char *data = tmp + sizeof(UniqueUnit); + char *tags = tmp + sizeof(UniqueUnit) + bytes; + memcpy(data, pData, bytes); + + if (pCtx->currentStage == MERGE_STAGE && tag != NULL) { + memcpy(tags, tag, (size_t)pCtx->tagInfo.tagsLen); + }else{ + int32_t offset = 0; + for (int32_t j = 0; j < pCtx->tagInfo.numOfTagCols; ++j) { + SQLFunctionCtx *tagCtx = pCtx->tagInfo.pTagCtxList[j]; + if (tagCtx->functionId == TSDB_FUNC_TS_DUMMY) { + tagCtx->tag.nType = TSDB_DATA_TYPE_BIGINT; + tagCtx->tag.i64 = timestamp; + } + + tVariantDump(&tagCtx->tag, tagCtx->pOutput, tagCtx->tag.nType, true); + memcpy(tags + offset, tagCtx->pOutput, tagCtx->outputBytes); + offset += tagCtx->outputBytes; + } + } + + taosHashPut(*pCtx->pUniqueSet, pData, hashKeyBytes, &tmp, sizeof(UniqueUnit*)); + pInfo->num++; + }else if((*unique)->timestamp > timestamp){ + (*unique)->timestamp = timestamp; + } +} + +static void unique_function(SQLFunctionCtx *pCtx) { + SUniqueFuncInfo *pInfo = getUniqueOutputInfo(pCtx); + + for (int32_t i = 0; i < pCtx->size; i++) { + char *pData = GET_INPUT_DATA(pCtx, i); + TSKEY k = 0; + if (pCtx->ptsList != NULL) { + k = GET_TS_DATA(pCtx, i); + } + do_unique_function(pCtx, pInfo, k, pData, NULL, pCtx->inputBytes, pCtx->inputType); + + if (sizeof(SUniqueFuncInfo) + pInfo->num * (sizeof(UniqueUnit) + pCtx->inputBytes + pCtx->tagInfo.tagsLen) >= MAX_UNIQUE_RESULT_SIZE){ + GET_RES_INFO(pCtx)->numOfRes = -1; // mark out of memory + return; + } + } + + GET_RES_INFO(pCtx)->numOfRes = 1; +} + +static void unique_function_merge(SQLFunctionCtx *pCtx) { + SUniqueFuncInfo *pInput = (SUniqueFuncInfo *)GET_INPUT_DATA_LIST(pCtx); + SUniqueFuncInfo *pOutput = getUniqueOutputInfo(pCtx); + size_t size = sizeof(UniqueUnit) + pCtx->outputBytes + pCtx->tagInfo.tagsLen; + for (int32_t i = 0; i < pInput->num; ++i) { + char *tmp = pInput->res + i* size; + TSKEY timestamp = ((UniqueUnit*)tmp)->timestamp; + char *data = tmp + sizeof(UniqueUnit); + char *tags = tmp + sizeof(UniqueUnit) + pCtx->outputBytes; + do_unique_function(pCtx, pOutput, timestamp, data, tags, pCtx->outputBytes, pCtx->outputType); + + if (sizeof(SUniqueFuncInfo) + pOutput->num * (sizeof(UniqueUnit) + pCtx->outputBytes + pCtx->tagInfo.tagsLen) >= MAX_UNIQUE_RESULT_SIZE){ + GET_RES_INFO(pCtx)->numOfRes = -1; // mark out of memory + return; + } + } + + GET_RES_INFO(pCtx)->numOfRes = pOutput->num; +} + +typedef struct{ + int32_t dataOffset; + __compar_fn_t comparFn; +} UiqueSupporter; + +static int32_t uniqueCompareFn(const void *p1, const void *p2, const void *param) { + UiqueSupporter *support = (UiqueSupporter *)param; + return support->comparFn(p1 + support->dataOffset, p2 + support->dataOffset); +} + +static void unique_func_finalizer(SQLFunctionCtx *pCtx) { + SUniqueFuncInfo *pInfo = getUniqueOutputInfo(pCtx); + + GET_RES_INFO(pCtx)->numOfRes = pInfo->num; + int32_t bytes = 0; + int32_t type = 0; + if (pCtx->currentStage == MERGE_STAGE) { + bytes = pCtx->outputBytes; + type = pCtx->outputType; + assert(pCtx->inputType == TSDB_DATA_TYPE_BINARY); + } else { + bytes = pCtx->inputBytes; + type = pCtx->inputType; + } + UiqueSupporter support = {0}; + // user specify the order of output by sort the result according to timestamp + if (pCtx->param[1].i64 == PRIMARYKEY_TIMESTAMP_COL_INDEX) { + support.dataOffset = 0; + support.comparFn = compareInt64Val; + } else{ + support.dataOffset = sizeof(UniqueUnit); + support.comparFn = getComparFunc(type, 0); + } + + size_t size = sizeof(UniqueUnit) + bytes + pCtx->tagInfo.tagsLen; + taosqsort(pInfo->res, (size_t)GET_RES_INFO(pCtx)->numOfRes, size, &support, uniqueCompareFn); + copyUniqueRes(pCtx, bytes); + doFinalizer(pCtx); +} + ///////////////////////////////////////////////////////////////////////////////////////////// /* * function compatible list. @@ -5117,11 +5337,11 @@ int32_t functionCompatList[] = { 1, 1, 1, 1, -1, 1, 1, 1, 5, 1, 1, // tid_tag, deriv, csum, mavg, sample, 6, 8, -1, -1, -1, - // block_info,elapsed,histogram - 7, 1, -1 + // block_info,elapsed,histogram,unique + 7, 1, -1, -1 }; -SAggFunctionInfo aAggs[40] = {{ +SAggFunctionInfo aAggs[TSDB_FUNC_MAX_NUM] = {{ // 0, count function does not invoke the finalize function "count", TSDB_FUNC_COUNT, @@ -5591,5 +5811,17 @@ SAggFunctionInfo aAggs[40] = {{ histogram_func_finalizer, histogram_func_merge, dataBlockRequired, + }, + { + // 39 + "unique", + TSDB_FUNC_UNIQUE, + TSDB_FUNC_UNIQUE, + TSDB_FUNCSTATE_MO | TSDB_FUNCSTATE_STABLE | TSDB_FUNCSTATE_SELECTIVITY, + unique_function_setup, + unique_function, + unique_func_finalizer, + unique_function_merge, + dataBlockRequired, } }; diff --git a/src/query/src/qExecutor.c b/src/query/src/qExecutor.c index 33701984fe83c9afe46bad8b3d7cc66f9cee7e71..1dd9a8e75b32fdac04c660657f6941870ac3632b 100644 --- a/src/query/src/qExecutor.c +++ b/src/query/src/qExecutor.c @@ -281,7 +281,7 @@ static int compareRowData(const void *a, const void *b, const void *userData) { tFilePage *page1 = getResBufPage(pRuntimeEnv->pResultBuf, pRow1->pageId); tFilePage *page2 = getResBufPage(pRuntimeEnv->pResultBuf, pRow2->pageId); - int16_t offset = supporter->dataOffset; + int32_t offset = supporter->dataOffset; char *in1 = getPosInResultPage(pRuntimeEnv->pQueryAttr, page1, pRow1->offset, offset); char *in2 = getPosInResultPage(pRuntimeEnv->pQueryAttr, page2, pRow2->offset, offset); @@ -289,9 +289,8 @@ static int compareRowData(const void *a, const void *b, const void *userData) { } static void sortGroupResByOrderList(SGroupResInfo *pGroupResInfo, SQueryRuntimeEnv *pRuntimeEnv, SSDataBlock* pDataBlock, SQLFunctionCtx *pCtx) { - SArray *columnOrderList = getOrderCheckColumns(pRuntimeEnv->pQueryAttr); - size_t size = taosArrayGetSize(columnOrderList); - taosArrayDestroy(&columnOrderList); + int32_t size = pRuntimeEnv->pQueryAttr->pGroupbyExpr == NULL? 0: pRuntimeEnv->pQueryAttr->pGroupbyExpr->numOfGroupCols; + if (pRuntimeEnv->pQueryAttr->interval.interval > 0) size++; if (size <= 0) { return; @@ -357,7 +356,13 @@ SSDataBlock* createOutputBuf(SExprInfo* pExpr, int32_t numOfOutput, int32_t numO idata.info.bytes = pExpr[i].base.resBytes; idata.info.colId = pExpr[i].base.resColId; - int32_t size = MAX(idata.info.bytes * numOfRows, minSize); + int64_t tmp = idata.info.bytes; + tmp *= numOfRows; + if (tmp >= 1024*1024*1024) { // 1G + qError("size is too large, failed to allocate column buffer for output buffer"); + tmp = 128*1024*1024; + } + int32_t size = MAX(tmp, minSize); idata.pData = calloc(1, size); // at least to hold a pointer on x64 platform if (idata.pData == NULL) { qError("failed to allocate column buffer for output buffer"); @@ -1004,6 +1009,13 @@ static void doApplyFunctions(SQueryRuntimeEnv* pRuntimeEnv, SQLFunctionCtx* pCtx } } + if (functionId == TSDB_FUNC_UNIQUE && + (GET_RES_INFO(&(pCtx[k]))->numOfRes > MAX_UNIQUE_RESULT_ROWS || GET_RES_INFO(&(pCtx[k]))->numOfRes == -1)){ + qError("Unique result num is too large. num: %d, limit: %d", + GET_RES_INFO(&(pCtx[k]))->numOfRes, MAX_UNIQUE_RESULT_ROWS); + longjmp(pRuntimeEnv->env, TSDB_CODE_QRY_UNIQUE_RESULT_TOO_LARGE); + } + // restore it pCtx[k].preAggVals.isSet = hasAggregates; pCtx[k].pInput = start; @@ -1263,6 +1275,13 @@ static void doAggregateImpl(SOperatorInfo* pOperator, TSKEY startTs, SQLFunction } else { assert(0); } + + if (functionId == TSDB_FUNC_UNIQUE && + (GET_RES_INFO(&(pCtx[k]))->numOfRes > MAX_UNIQUE_RESULT_ROWS || GET_RES_INFO(&(pCtx[k]))->numOfRes == -1)){ + qError("Unique result num is too large. num: %d, limit: %d", + GET_RES_INFO(&(pCtx[k]))->numOfRes, MAX_UNIQUE_RESULT_ROWS); + longjmp(pRuntimeEnv->env, TSDB_CODE_QRY_UNIQUE_RESULT_TOO_LARGE); + } } } } @@ -1893,7 +1912,7 @@ static int32_t setCtxTagColumnInfo(SQLFunctionCtx *pCtx, int32_t numOfOutput) { continue; } - if (functionId == TSDB_FUNC_TAG_DUMMY || functionId == TSDB_FUNC_TS_DUMMY) { + if (functionId == TSDB_FUNC_TAG_DUMMY || functionId == TSDB_FUNC_TS_DUMMY) { //ts_select ts,top(col,2) tagLen += pCtx[i].outputBytes; pTagCtx[num++] = &pCtx[i]; } else if ((aAggs[functionId].status & TSDB_FUNCSTATE_SELECTIVITY) != 0) { @@ -1945,8 +1964,12 @@ static SQLFunctionCtx* createSQLFunctionCtx(SQueryRuntimeEnv* pRuntimeEnv, SExpr pCtx->requireNull = false; } - pCtx->inputBytes = pSqlExpr->colBytes; pCtx->inputType = pSqlExpr->colType; + if (pRuntimeEnv->pQueryAttr->interBytesForGlobal > INT16_MAX && pSqlExpr->functionId == TSDB_FUNC_UNIQUE){ + pCtx->inputBytes = pRuntimeEnv->pQueryAttr->interBytesForGlobal; + }else{ + pCtx->inputBytes = pSqlExpr->colBytes; + } pCtx->ptsOutputBuf = NULL; @@ -1980,7 +2003,8 @@ static SQLFunctionCtx* createSQLFunctionCtx(SQueryRuntimeEnv* pRuntimeEnv, SExpr // set the order information for top/bottom query int32_t functionId = pCtx->functionId; - if (functionId == TSDB_FUNC_TOP || functionId == TSDB_FUNC_BOTTOM || functionId == TSDB_FUNC_DIFF) { + if (functionId == TSDB_FUNC_TOP || functionId == TSDB_FUNC_BOTTOM + || functionId == TSDB_FUNC_DIFF || functionId == TSDB_FUNC_UNIQUE) { int32_t f = pExpr[i-1].base.functionId; assert(f == TSDB_FUNC_TS || f == TSDB_FUNC_TS_DUMMY); @@ -3166,7 +3190,7 @@ int32_t loadDataBlockOnDemand(SQueryRuntimeEnv* pRuntimeEnv, STableScanInfo* pTa if ((*status) != BLK_DATA_ALL_NEEDED) { // the pCtx[i] result is belonged to previous time window since the outputBuf has not been set yet, // the filter result may be incorrect. So in case of interval query, we need to set the correct time output buffer - if (QUERY_IS_INTERVAL_QUERY(pQueryAttr) && (!pQueryAttr->pointInterpQuery)) { + if (QUERY_IS_INTERVAL_QUERY(pQueryAttr) && (!pQueryAttr->pointInterpQuery) && (!pQueryAttr->uniqueQuery)) { SResultRow* pResult = NULL; bool masterScan = IS_MASTER_SCAN(pRuntimeEnv); @@ -3178,7 +3202,7 @@ int32_t loadDataBlockOnDemand(SQueryRuntimeEnv* pRuntimeEnv, STableScanInfo* pTa pTableScanInfo->rowCellInfoOffset) != TSDB_CODE_SUCCESS) { longjmp(pRuntimeEnv->env, TSDB_CODE_QRY_OUT_OF_MEMORY); } - } else if (pQueryAttr->stableQuery && (!pQueryAttr->tsCompQuery) && (!pQueryAttr->diffQuery) && (!pQueryAttr->pointInterpQuery)) { // stable aggregate, not interval aggregate or normal column aggregate + } else if (pQueryAttr->stableQuery && (!pQueryAttr->tsCompQuery) && (!pQueryAttr->diffQuery) && (!pQueryAttr->pointInterpQuery) && (!pQueryAttr->uniqueQuery)) { // stable aggregate, not interval aggregate or normal column aggregate doSetTableGroupOutputBuf(pRuntimeEnv, pTableScanInfo->pResultRowInfo, pTableScanInfo->pCtx, pTableScanInfo->rowCellInfoOffset, pTableScanInfo->numOfOutput, pRuntimeEnv->current->groupIndex); @@ -3665,6 +3689,9 @@ void setDefaultOutputBuf(SQueryRuntimeEnv *pRuntimeEnv, SOptrBasicInfo *pInfo, i RESET_RESULT_INFO(pCellInfo); pCtx[i].resultInfo = pCellInfo; + if (pCtx[i].functionId == TSDB_FUNC_UNIQUE) { + pCtx[i].pUniqueSet = &pRow->uniqueHash; + } pCtx[i].pOutput = pData->pData; pCtx[i].currentStage = stage; assert(pCtx[i].pOutput != NULL); @@ -3672,7 +3699,7 @@ void setDefaultOutputBuf(SQueryRuntimeEnv *pRuntimeEnv, SOptrBasicInfo *pInfo, i // set the timestamp output buffer for top/bottom/diff query int32_t fid = pCtx[i].functionId; if (fid == TSDB_FUNC_TOP || fid == TSDB_FUNC_BOTTOM || fid == TSDB_FUNC_DIFF || fid == TSDB_FUNC_DERIVATIVE || - fid == TSDB_FUNC_SAMPLE || fid == TSDB_FUNC_MAVG || fid == TSDB_FUNC_CSUM) { + fid == TSDB_FUNC_SAMPLE || fid == TSDB_FUNC_MAVG || fid == TSDB_FUNC_CSUM || fid == TSDB_FUNC_UNIQUE) { if (i > 0) pCtx[i].ptsOutputBuf = pCtx[i-1].pOutput; } else if (fid == TSDB_FUNC_INTERP) { assert(pCtx[0].functionId == TSDB_FUNC_TS_DUMMY || pCtx[0].functionId == TSDB_FUNC_TS); @@ -3743,7 +3770,7 @@ void updateOutputBuf(SOptrBasicInfo* pBInfo, int32_t *bufCapacity, int32_t numOf if (functionId == TSDB_FUNC_TOP || functionId == TSDB_FUNC_BOTTOM || functionId == TSDB_FUNC_DIFF || functionId == TSDB_FUNC_DERIVATIVE || functionId == TSDB_FUNC_CSUM || functionId == TSDB_FUNC_MAVG || - functionId == TSDB_FUNC_SAMPLE ) { + functionId == TSDB_FUNC_SAMPLE || functionId == TSDB_FUNC_UNIQUE) { if (i > 0) pBInfo->pCtx[i].ptsOutputBuf = pBInfo->pCtx[i-1].pOutput; } else if (functionId == TSDB_FUNC_INTERP) { assert(pBInfo->pCtx[0].functionId == TSDB_FUNC_TS_DUMMY || pBInfo->pCtx[0].functionId == TSDB_FUNC_TS); @@ -3919,6 +3946,15 @@ void finalizeQueryResult(SOperatorInfo* pOperator, SQLFunctionCtx* pCtx, SResult } } +bool isUniqueQuery(int32_t numOfOutput, SExprInfo* pExprs) { + for (int32_t i = 0; i < numOfOutput; ++i) { + if (pExprs[i].base.functionId == TSDB_FUNC_UNIQUE) { + return true; + } + } + return false; +} + static bool hasMainOutput(SQueryAttr *pQueryAttr) { for (int32_t i = 0; i < pQueryAttr->numOfOutput; ++i) { int32_t functionId = pQueryAttr->pExpr1[i].base.functionId; @@ -3990,6 +4026,9 @@ void setResultRowOutputBufInitCtx(SQueryRuntimeEnv *pRuntimeEnv, SResultRow *pRe int32_t offset = 0; for (int32_t i = 0; i < numOfOutput; ++i) { pCtx[i].resultInfo = getResultCell(pResult, i, rowCellInfoOffset); + if (pCtx[i].functionId == TSDB_FUNC_UNIQUE){ + pCtx[i].pUniqueSet = &pResult->uniqueHash; + } SResultRowCellInfo* pResInfo = pCtx[i].resultInfo; if (pResInfo->initialized && pResInfo->complete) { @@ -4003,7 +4042,8 @@ void setResultRowOutputBufInitCtx(SQueryRuntimeEnv *pRuntimeEnv, SResultRow *pRe int32_t functionId = pCtx[i].functionId; if (functionId == TSDB_FUNC_TOP || functionId == TSDB_FUNC_BOTTOM || functionId == TSDB_FUNC_DIFF || - functionId == TSDB_FUNC_CSUM || functionId == TSDB_FUNC_MAVG || functionId == TSDB_FUNC_SAMPLE) { + functionId == TSDB_FUNC_CSUM || functionId == TSDB_FUNC_MAVG || functionId == TSDB_FUNC_SAMPLE || + functionId == TSDB_FUNC_UNIQUE) { if(i > 0) pCtx[i].ptsOutputBuf = pCtx[i-1].pOutput; } @@ -4064,7 +4104,7 @@ void setResultOutputBuf(SQueryRuntimeEnv *pRuntimeEnv, SResultRow *pResult, SQLF // Note: pResult->pos[i]->num == 0, there is only fixed number of results for each group tFilePage *page = getResBufPage(pRuntimeEnv->pResultBuf, pResult->pageId); - int16_t offset = 0; + int32_t offset = 0; for (int32_t i = 0; i < numOfCols; ++i) { pCtx[i].pOutput = getPosInResultPage(pRuntimeEnv->pQueryAttr, page, pResult->offset, offset); offset += pCtx[i].outputBytes; @@ -4072,7 +4112,8 @@ void setResultOutputBuf(SQueryRuntimeEnv *pRuntimeEnv, SResultRow *pResult, SQLF int32_t functionId = pCtx[i].functionId; if (functionId == TSDB_FUNC_TOP || functionId == TSDB_FUNC_BOTTOM || functionId == TSDB_FUNC_DIFF || functionId == TSDB_FUNC_DERIVATIVE || - functionId == TSDB_FUNC_SAMPLE || functionId == TSDB_FUNC_MAVG || functionId == TSDB_FUNC_CSUM) { + functionId == TSDB_FUNC_SAMPLE || functionId == TSDB_FUNC_MAVG || + functionId == TSDB_FUNC_CSUM || functionId == TSDB_FUNC_UNIQUE) { if(i > 0) pCtx[i].ptsOutputBuf = pCtx[i-1].pOutput; } @@ -4081,6 +4122,9 @@ void setResultOutputBuf(SQueryRuntimeEnv *pRuntimeEnv, SResultRow *pResult, SQLF * not all queries require the interResultBuf, such as COUNT */ pCtx[i].resultInfo = getResultCell(pResult, i, rowCellInfoOffset); + if (pCtx[i].functionId == TSDB_FUNC_UNIQUE) { + pCtx[i].pUniqueSet = &pResult->uniqueHash; + } } } @@ -5512,10 +5556,6 @@ SArray* getOrderCheckColumns(SQueryAttr* pQuery) { } if (pQuery->interval.interval > 0) { - if (pOrderColumns == NULL) { - pOrderColumns = taosArrayInit(1, sizeof(SColIndex)); - } - SColIndex colIndex = {.colIndex = 0, .colId = 0, .flag = TSDB_COL_NORMAL}; taosArrayPush(pOrderColumns, &colIndex); } @@ -8754,8 +8794,8 @@ int32_t convertQueryMsg(SQueryTableMsg *pQueryMsg, SQueryParam* param) { pMsg += tListLen(param->pGroupColIndex[i].name); } - pQueryMsg->orderByIdx = htons(pQueryMsg->orderByIdx); - pQueryMsg->orderType = htons(pQueryMsg->orderType); + //pQueryMsg->orderByIdx = htons(pQueryMsg->orderByIdx); + pQueryMsg->groupOrderType = htons(pQueryMsg->groupOrderType); } pQueryMsg->fillType = htons(pQueryMsg->fillType); @@ -8963,7 +9003,7 @@ static int32_t updateOutputBufForTopBotQuery(SQueriedTableInfo* pTableInfo, SCol for (int32_t i = 0; i < numOfOutput; ++i) { int16_t functId = pExprs[i].base.functionId; - if (functId == TSDB_FUNC_TOP || functId == TSDB_FUNC_BOTTOM || functId == TSDB_FUNC_SAMPLE) { + if (functId == TSDB_FUNC_TOP || functId == TSDB_FUNC_BOTTOM || functId == TSDB_FUNC_SAMPLE || functId == TSDB_FUNC_UNIQUE) { int32_t j = getColumnIndexInSource(pTableInfo, &pExprs[i].base, pTagCols); if (j < 0 || j >= pTableInfo->numOfCols) { return TSDB_CODE_QRY_INVALID_MSG; @@ -9332,8 +9372,8 @@ SGroupbyExpr *createGroupbyExprFromMsg(SQueryTableMsg *pQueryMsg, SColIndex *pCo } pGroupbyExpr->numOfGroupCols = pQueryMsg->numOfGroupCols; - pGroupbyExpr->orderType = pQueryMsg->orderType; - pGroupbyExpr->orderIndex = pQueryMsg->orderByIdx; + pGroupbyExpr->orderType = pQueryMsg->groupOrderType; + //pGroupbyExpr->orderIndex = pQueryMsg->orderByIdx; pGroupbyExpr->columnInfo = taosArrayInit(pQueryMsg->numOfGroupCols, sizeof(SColIndex)); for(int32_t i = 0; i < pQueryMsg->numOfGroupCols; ++i) { @@ -9548,6 +9588,7 @@ SQInfo* createQInfoImpl(SQueryTableMsg* pQueryMsg, SGroupbyExpr* pGroupbyExpr, S pQueryAttr->vgId = vgId; pQueryAttr->pFilters = pFilters; pQueryAttr->range = pQueryMsg->range; + pQueryAttr->uniqueQuery = isUniqueQuery(numOfOutput, pExprs); pQueryAttr->tableCols = calloc(numOfCols, sizeof(SSingleColumnFilterInfo)); if (pQueryAttr->tableCols == NULL) { diff --git a/src/query/src/qExtbuffer.c b/src/query/src/qExtbuffer.c index 9d174b0389d74073b5989af5a8fd7c26d5fd80dd..5b210f882415f3995cf4fcde1ba3087397bb75b6 100644 --- a/src/query/src/qExtbuffer.c +++ b/src/query/src/qExtbuffer.c @@ -46,7 +46,7 @@ tExtMemBuffer* createExtMemBuffer(int32_t inMemSize, int32_t elemSize, int32_t p SExtFileInfo *pFMeta = &pMemBuffer->fileMeta; - pFMeta->pageSize = DEFAULT_PAGE_SIZE; + //pFMeta->pageSize = DEFAULT_PAGE_SIZE; pFMeta->flushoutData.nAllocSize = 4; pFMeta->flushoutData.nLength = 0; diff --git a/src/query/src/qFilter.c b/src/query/src/qFilter.c index 8d63ab4f91ca2de8e246293dafef1b31e93c3e22..d6a7dac7fcb6f525f6bf599d9d45b72f3d8c60f5 100644 --- a/src/query/src/qFilter.c +++ b/src/query/src/qFilter.c @@ -807,7 +807,8 @@ int32_t filterGetFiledByData(SFilterInfo *info, int32_t type, void *v, int32_t d return -1; } - +// In the params, we should use void *data instead of void **data, there is no need to use tfree(*data) to set *data = 0 +// Besides, fields data value is a pointer, so dataLen should be POINTER_BYTES for better. int32_t filterAddField(SFilterInfo *info, void *desc, void **data, int32_t type, SFilterFieldId *fid, int32_t dataLen, bool freeIfExists) { int32_t idx = -1; uint32_t *num; @@ -1285,7 +1286,7 @@ int32_t filterAddUnitFromUnit(SFilterInfo *dst, SFilterInfo *src, SFilterUnit* u void *data = FILTER_UNIT_VAL_DATA(src, u); if (IS_VAR_DATA_TYPE(type)) { if (FILTER_UNIT_OPTR(u) == TSDB_RELATION_IN) { - filterAddField(dst, NULL, &data, FLD_TYPE_VALUE, &right, sizeof(SHashObj), false); + filterAddField(dst, NULL, &data, FLD_TYPE_VALUE, &right, POINTER_BYTES, false); // POINTER_BYTES should be sizeof(SHashObj), but POINTER_BYTES is also right. t = FILTER_GET_FIELD(dst, right); diff --git a/src/query/src/qPlan.c b/src/query/src/qPlan.c index e737db6edabf06dfb6c458364755d192bc8b8694..95c7f81ed68d0ef8f303ee45deda89e347d163d9 100644 --- a/src/query/src/qPlan.c +++ b/src/query/src/qPlan.c @@ -77,7 +77,7 @@ static SQueryNode* createQueryNode(int32_t type, const char* name, SQueryNode** pGroupbyExpr->tableIndex = p->tableIndex; pGroupbyExpr->orderType = p->orderType; - pGroupbyExpr->orderIndex = p->orderIndex; + //pGroupbyExpr->orderIndex = p->orderIndex; pGroupbyExpr->numOfGroupCols = p->numOfGroupCols; pGroupbyExpr->columnInfo = taosArrayDup(p->columnInfo); pNode->pExtInfo = pGroupbyExpr; diff --git a/src/query/src/qResultbuf.c b/src/query/src/qResultbuf.c index 05ac5f7dc579aa788538ead5b7be2ff72926ea12..8610aaeeb20279b1a898bee78403ed8c8cd18fee 100644 --- a/src/query/src/qResultbuf.c +++ b/src/query/src/qResultbuf.c @@ -20,7 +20,7 @@ int32_t createDiskbasedResultBuffer(SDiskbasedResultBuf** pResultBuf, int32_t pa pResBuf->pageSize = pagesize; pResBuf->numOfPages = 0; // all pages are in buffer in the first place pResBuf->totalBufSize = 0; - pResBuf->inMemPages = inMemBufSize/pagesize; // maximum allowed pages, it is a soft limit. + pResBuf->inMemPages = inMemBufSize/pagesize + 1; // maximum allowed pages, it is a soft limit. pResBuf->allocateId = -1; pResBuf->comp = true; pResBuf->file = NULL; @@ -28,7 +28,7 @@ int32_t createDiskbasedResultBuffer(SDiskbasedResultBuf** pResultBuf, int32_t pa pResBuf->fileSize = 0; // at least more than 2 pages must be in memory - assert(inMemBufSize >= pagesize * 2); + // assert(inMemBufSize >= pagesize * 2); pResBuf->lruList = tdListNew(POINTER_BYTES); @@ -257,7 +257,7 @@ static char* evicOneDataPage(SDiskbasedResultBuf* pResultBuf) { int32_t prev = pResultBuf->inMemPages; // increase by 50% of previous mem pages - pResultBuf->inMemPages = (int32_t)(pResultBuf->inMemPages * 1.5f); + pResultBuf->inMemPages = (int32_t)(pResultBuf->inMemPages * 1.5f) + 1; // if pResultBuf->inMemPages == 1, *1.5 always == 1 qWarn("%p in memory buf page not sufficient, expand from %d to %d, page size:%d", pResultBuf, prev, pResultBuf->inMemPages, pResultBuf->pageSize); diff --git a/src/query/src/qUtil.c b/src/query/src/qUtil.c index 3ada2b76c7d085904c5a84f284f2a0f64efa028e..22bdefd59ef8844a560bb2944f8e61ad15f5f27f 100644 --- a/src/query/src/qUtil.c +++ b/src/query/src/qUtil.c @@ -33,7 +33,7 @@ typedef struct SCompSupporter { } SCompSupporter; int32_t getRowNumForMultioutput(SQueryAttr* pQueryAttr, bool topBottomQuery, bool stable) { - if (pQueryAttr && (!stable)) { + if (pQueryAttr && (!stable)) { // if table is stable, no need return more than 1 no in merge stage for (int16_t i = 0; i < pQueryAttr->numOfOutput; ++i) { if (pQueryAttr->pExpr1[i].base.functionId == TSDB_FUNC_TOP || pQueryAttr->pExpr1[i].base.functionId == TSDB_FUNC_BOTTOM || @@ -42,6 +42,9 @@ int32_t getRowNumForMultioutput(SQueryAttr* pQueryAttr, bool topBottomQuery, boo return (int32_t)pQueryAttr->pExpr1[i].base.param[0].i64; } } + if (pQueryAttr->uniqueQuery){ + return MAX_UNIQUE_RESULT_ROWS; + } } return 1; @@ -85,6 +88,10 @@ void cleanupResultRowInfo(SResultRowInfo *pResultRowInfo) { for(int32_t i = 0; i < pResultRowInfo->size; ++i) { if (pResultRowInfo->pResult[i]) { tfree(pResultRowInfo->pResult[i]->key); + if (pResultRowInfo->pResult[i]->uniqueHash){ + taosHashCleanup(pResultRowInfo->pResult[i]->uniqueHash); + pResultRowInfo->pResult[i]->uniqueHash = NULL; + } } } @@ -150,11 +157,11 @@ void clearResultRow(SQueryRuntimeEnv *pRuntimeEnv, SResultRow *pResultRow, int16 if (pResultRow->pageId >= 0) { tFilePage *page = getResBufPage(pRuntimeEnv->pResultBuf, pResultRow->pageId); - int16_t offset = 0; + int32_t offset = 0; for (int32_t i = 0; i < pRuntimeEnv->pQueryAttr->numOfOutput; ++i) { SResultRowCellInfo *pResultInfo = &pResultRow->pCellInfo[i]; - int16_t size = pRuntimeEnv->pQueryAttr->pExpr1[i].base.resType; + int32_t size = pRuntimeEnv->pQueryAttr->pExpr1[i].base.resBytes; char * s = getPosInResultPage(pRuntimeEnv->pQueryAttr, page, pResultRow->offset, offset); memset(s, 0, size); @@ -192,7 +199,13 @@ SResultRowPool* initResultRowPool(size_t size) { p->numOfElemPerBlock = 128; p->elemSize = (int32_t) size; - p->blockSize = p->numOfElemPerBlock * p->elemSize; + int64_t tmp = p->elemSize; + tmp *= p->numOfElemPerBlock; + if (tmp > 1024*1024*1024){ + qError("ResultRow blockSize is too large:%" PRId64, tmp); + tmp = 128*1024*1024; + } + p->blockSize = tmp; p->position.pos = 0; p->pData = taosArrayInit(8, POINTER_BYTES); @@ -217,7 +230,6 @@ SResultRow* getNewResultRow(SResultRowPool* p) { } p->position.pos = (p->position.pos + 1)%p->numOfElemPerBlock; - initResultRow(ptr); return ptr; } @@ -451,9 +463,7 @@ int32_t tsDescOrder(const void* p1, const void* p2) { } } -void - -orderTheResultRows(SQueryRuntimeEnv* pRuntimeEnv) { +void orderTheResultRows(SQueryRuntimeEnv* pRuntimeEnv) { __compar_fn_t fn = NULL; if (pRuntimeEnv->pQueryAttr->order.order == TSDB_ORDER_ASC) { fn = tsAscOrder; diff --git a/src/tsdb/src/tsdbRead.c b/src/tsdb/src/tsdbRead.c index c9a28d3342e46e6d6cd0b8942f3528147bb151de..60c7311d4c0f3d784231fceb8a7e2628a5bd4eda 100644 --- a/src/tsdb/src/tsdbRead.c +++ b/src/tsdb/src/tsdbRead.c @@ -3150,8 +3150,7 @@ static bool loadCachedLast(STsdbQueryHandle* pQueryHandle) { } pData = (char*)pColInfo->pData + numOfRows * pColInfo->info.bytes; - - if (pTable->lastCols[j].bytes > 0) { + if (pTable->lastCols[j].bytes > 0) { void* value = pTable->lastCols[j].pData; switch (pColInfo->info.type) { case TSDB_DATA_TYPE_BINARY: @@ -3205,7 +3204,6 @@ static bool loadCachedLast(STsdbQueryHandle* pQueryHandle) { pColInfo = taosArrayGet(pQueryHandle->pColumns, n); pData = (char*)pColInfo->pData + numOfRows * pColInfo->info.bytes;; - if (pColInfo->info.colId == PRIMARYKEY_TIMESTAMP_COL_INDEX) { *(TSKEY *)pData = pTable->lastCols[j].ts; continue; @@ -3231,7 +3229,7 @@ static bool loadCachedLast(STsdbQueryHandle* pQueryHandle) { if (priKey != TSKEY_INITIAL_VAL) { pColInfo = taosArrayGet(pQueryHandle->pColumns, priIdx); pData = (char*)pColInfo->pData + numOfRows * pColInfo->info.bytes; - + *(TSKEY *)pData = priKey; for (int32_t n = 0; n < tgNumOfCols; ++n) { @@ -3241,7 +3239,7 @@ static bool loadCachedLast(STsdbQueryHandle* pQueryHandle) { pColInfo = taosArrayGet(pQueryHandle->pColumns, n); pData = (char*)pColInfo->pData + numOfRows * pColInfo->info.bytes;; - + assert (pColInfo->info.colId != PRIMARYKEY_TIMESTAMP_COL_INDEX); if (pColInfo->info.type == TSDB_DATA_TYPE_BINARY || pColInfo->info.type == TSDB_DATA_TYPE_NCHAR) { @@ -4288,6 +4286,7 @@ void tsdbDestroyTableGroup(STableGroupInfo *pGroupList) { } taosHashCleanup(pGroupList->map); + pGroupList->map = NULL; taosArrayDestroy(&pGroupList->pGroupList); pGroupList->numOfTables = 0; } @@ -4662,4 +4661,4 @@ void tsdbAddScanCallback(TsdbQueryHandleT* queryHandle, readover_callback callba pQueryHandle->readover_cb = callback; pQueryHandle->param = param; return ; -} \ No newline at end of file +} diff --git a/src/util/inc/tconfig.h b/src/util/inc/tconfig.h index 752930ed7e762eac31c77b8c1c1a91aa626ef16a..c0a4986936f7d43d48aa4fac0be6f427f5db32f5 100644 --- a/src/util/inc/tconfig.h +++ b/src/util/inc/tconfig.h @@ -20,7 +20,7 @@ extern "C" { #endif -#define TSDB_CFG_MAX_NUM 132 +#define TSDB_CFG_MAX_NUM 133 #define TSDB_CFG_PRINT_LEN 23 #define TSDB_CFG_OPTION_LEN 24 #define TSDB_CFG_VALUE_LEN 41 diff --git a/src/util/src/terror.c b/src/util/src/terror.c index acbee18ec21b02761295de90ef9ff535a97739d1..e78d1d37ee900268be5cdc7c2883b74284c65639 100644 --- a/src/util/src/terror.c +++ b/src/util/src/terror.c @@ -299,6 +299,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_QRY_NOT_ENOUGH_BUFFER, "Query buffer limit ha TAOS_DEFINE_ERROR(TSDB_CODE_QRY_INCONSISTAN, "File inconsistance in replica") TAOS_DEFINE_ERROR(TSDB_CODE_QRY_INVALID_TIME_CONDITION, "One valid time range condition expected") TAOS_DEFINE_ERROR(TSDB_CODE_QRY_SYS_ERROR, "System error") +TAOS_DEFINE_ERROR(TSDB_CODE_QRY_UNIQUE_RESULT_TOO_LARGE, "Unique result num is too large") // grant TAOS_DEFINE_ERROR(TSDB_CODE_GRANT_EXPIRED, "License expired")