diff --git a/documentation20/cn/00.index/docs.md b/documentation20/cn/00.index/docs.md index 463e59d27fcfd944bfef751d427a85bdea8e5045..a48ea49f59ce3fbccf5370de2276692f8e367fe3 100644 --- a/documentation20/cn/00.index/docs.md +++ b/documentation20/cn/00.index/docs.md @@ -95,7 +95,7 @@ TDengine 是一个高效的存储、查询、分析时序大数据的平台, - [Grafana](/connections#grafana):获取并可视化保存在 TDengine 的数据 - [IDEA Database](https://www.taosdata.com/blog/2020/08/27/1767.html):通过 IDEA 数据库管理工具可视化使用 TDengine - [TDengineGUI](https://github.com/skye0207/TDengineGUI):基于 Electron 开发的跨平台 TDengine 图形化管理工具 -- [DataX](https://www.taosdata.com/blog/2021/10/26/3156.html):支持 TDeninge 和其他数据库之间进行数据迁移的工具 +- [DataX](https://www.taosdata.com/blog/2021/10/26/3156.html):支持 TDengine 和其他数据库之间进行数据迁移的工具 ## [TDengine 集群的安装、管理](/cluster) diff --git a/documentation20/cn/02.getting-started/02.taosdemo/docs.md b/documentation20/cn/02.getting-started/02.taosdemo/docs.md index 8d555c4778187394b8849113d68afff6d1158a4d..72972dc4f7550c84797caf8be6b24d07c9ee77b0 100644 --- a/documentation20/cn/02.getting-started/02.taosdemo/docs.md +++ b/documentation20/cn/02.getting-started/02.taosdemo/docs.md @@ -1,6 +1,6 @@ # 如何使用 taosBenchmark 进行性能测试 -自从 TDengine 2019年 7 月开源以来,凭借创新的数据建模设计、快捷的安装方式、易用的编程接口和强大的数据写入查询性能博得了大量时序数据开发者的青睐。其中写入和查询性能往往令刚接触 TDengine 的用户称叹不已。为了便于用户在最短时间内就可以体验到 TDengine 的高性能特点,我们专门开发了一个应用程序 taosBenchmark (曾命名为 taosdemo)用于对 TDengine 进行写入和查询的性能测试,用户可以通过 taosBenchmark 轻松模拟大量设备产生海量数据的场景,并且可以通过 taosBenchmark 参数灵活控制表的列数、数据类型、乱序比例以及并发线程数量。 +自从 TDengine 2019年 7 月开源以来,凭借创新的数据建模设计、快捷的安装方式、易用的编程接口和强大的数据写入查询性能博得了大量时序数据开发者的青睐。其中写入和查询性能往往令刚接触 TDengine 的用户称叹不已。为了便于用户在最短时间内就可以体验到 TDengine 的高性能特点,我们专门开发了一个应用程序 taosBenchmark (曾命名为 taosdemo)用于对 TDengine 进行写入和查询的性能测试,用户可以通过 taosBenchmark 轻松模拟大量设备产生海量数据的场景,并且可以通过 taosBenchmark 参数灵活按照实际场景定制表的个数(对应设备数)、表的列数(对应每个设备采样点)、数据类型、乱序数据比例、顺序或轮询插入方式、以及并发线程数量。 运行 taosBenchmark 很简单,通过下载 [TDengine 安装包](https://www.taosdata.com/cn/all-downloads/)或者自行下载 [TDengine 代码](https://github.com/taosdata/TDengine)编译都可以在安装目录或者编译结果目录中找到并运行。 @@ -153,7 +153,7 @@ insert delay, avg: 8.31ms, max: 860.12ms, min: 2.00ms ``` $ taosBenchmark --help --f, --file=FILE The meta file to the execution procedure. Currently, we support standard UTF-8 (without BOM) encoded files only. +-f, --file=FILE The JSON configuration file to the execution procedure. Currently, we support standard UTF-8 (without BOM) encoded files only. -u, --user=USER The user name to use when connecting to the server. -p, --password The password to use when connecting to the server. -c, --config-dir=CONFIG_DIR Configuration directory. @@ -163,7 +163,7 @@ $ taosBenchmark --help -d, --database=DATABASE Destination database. By default is 'test'. -a, --replica=REPLICA Set the replica parameters of the database, By default use 1, min: 1, max: 3. -m, --table-prefix=TABLEPREFIX Table prefix name. By default use 'd'. --s, --sql-file=FILE The select sql file. +-s, --sql-file=FILE The select SQL file. -N, --normal-table Use normal table flag. -o, --output=FILE Direct output to the named file. By default use './output.txt'. -q, --query-mode=MODE Query mode -- 0: SYNC, 1: ASYNC. By default use SYNC. @@ -339,6 +339,7 @@ select first(current) took 0.024105 second(s) "start_timestamp": "2020-10-01 00:00:00.000", "sample_format": "csv", "sample_file": "./sample.csv", + "use_sample_ts": "no", "tags_file": "", "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":10}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] @@ -347,7 +348,7 @@ select first(current) took 0.024105 second(s) } ``` -例如:我们可以通过 "thread_count" 和 "thread_count_create_tbl" 来为建表和插入数据指定不同数量的线程。可以通过 "child_table_exists"、"childtable_limit" 和 "childtable_offset" 的组合来使用多个 taosBenchmark 进程(甚至可以在不同的电脑上)对同一个超级表的不同范围子表进行同时写入。也可以通过 "data_source" 和 "sample_file" 来指定数据来源为 csv 文件,来实现导入已有数据的功能。 +例如:我们可以通过 "thread_count" 和 "thread_count_create_tbl" 来为建表和插入数据指定不同数量的线程。可以通过 "child_table_exists"、"childtable_limit" 和 "childtable_offset" 的组合来使用多个 taosBenchmark 进程(甚至可以在不同的电脑上)对同一个超级表的不同范围子表进行同时写入。也可以通过 "data_source" 和 "sample_file" 来指定数据来源为 CSV 文件,来实现导入已有数据的功能。CSV 为半角逗号分隔的数据文件,每行数据列数需要和表的数据列数(如果是标签数据,是和标签数)相同。 ## 使用 taosBenchmark 进行查询和订阅测试 @@ -401,16 +402,16 @@ taosBenchmark 不仅仅可以进行数据写入,也可以执行查询和订阅 "query_times": 每种查询类型的查询次数 "query_mode": 查询数据接口,"taosc":调用TDengine的c接口;“restful”:使用 RESTful 接口。可选项。缺省是“taosc”。 "specified_table_query": { 指定表的查询 -"query_interval": 执行sqls的间隔,单位是秒。可选项,缺省是0。 -"concurrent": 并发执行sqls的线程数,可选项,缺省是1。每个线程都执行所有的sqls。 -"sqls": 可以添加多个sql语句,最多支持100条。 +"query_interval": 执行 sqls 的间隔,单位是秒。可选项,缺省是0。 +"concurrent": 并发执行 sqls 的线程数,可选项,缺省是1。每个线程都执行所有的 sqls。 +"sqls": 可以添加多个 SQL 语句,最多支持 100 条。 "sql": 查询语句。必选项。 "result": 查询结果写入的文件名。可选项,缺省是空,表示查询结果不写入文件。 "super_table_query": { 对超级表中所有子表的查询 "stblname": 超级表名称。必选项。 -"query_interval": 执行sqls的间隔,单位是秒。可选项,缺省是0。 -"threads": 并发执行sqls的线程数,可选项,缺省是1。每个线程负责一部分子表,执行所有的sqls。 -"sql": "select count(*) from xxxx"。查询超级表内所有子表的查询语句,其中表名必须写成 “xxxx”,实例会自动替换成子表名。 +"query_interval": 执行 sqls 的间隔,单位是秒。可选项,缺省是0。 +"threads": 并发执行 sqls 的线程数,可选项,缺省是1。每个线程负责一部分子表,执行所有的 sqls。 +"sql": "SELECT COUNT(*) FROM xxxx"。查询超级表内所有子表的查询语句,其中表名必须写成 “xxxx”,实例会自动替换成子表名。 "result": 查询结果写入的文件名。可选项,缺省是空,表示查询结果不写入文件。 ``` @@ -463,7 +464,7 @@ taosBenchmark 不仅仅可以进行数据写入,也可以执行查询和订阅 "restart": 订阅重启。"yes":如果订阅已经存在,重新开始,"no": 继续之前的订阅。(请注意执行用户需要对 dataDir 目录有读写权限) "keepProgress": 保留订阅信息进度。yes表示保留订阅信息,no表示不保留。该值为yes,restart为no时,才能继续之前的订阅。 "resubAfterConsume": 配合 keepProgress 使用,在订阅消费了相应次数后调用 unsubscribe 取消订阅并再次订阅。 -"result": 查询结果写入的文件名。可选项,缺省是空,表示查询结果不写入文件。 注意:每条sql语句后的保存结果的文件不能重名,且生成结果文件时,文件名会附加线程号。 +"result": 查询结果写入的文件名。可选项,缺省是空,表示查询结果不写入文件。 注意:每条 SQL 语句后的保存结果的文件不能重名,且生成结果文件时,文件名会附加线程号。 ``` ## 结语 @@ -478,7 +479,7 @@ taosBenchmark支持两种配置参数的模式,一种是命令行参数,一 一、命令行参数 --f:指定taosBenchmark所需参数的meta文件。当使用该参数时,其他所有命令行参数都失效。可选项,缺省是NULL。目前仅支持不含 BOM(byte-order mark)的标准 UTF-8 编码文件。 +-f:指定 taosBenchmark 所需参数的 JSON 配置文件。当使用该参数时,其他所有命令行参数都失效。可选项,缺省是 NULL。目前仅支持不含 BOM(byte-order mark)的标准 UTF-8 编码文件。 -u: 用户名。可选项,缺省是“root“。 @@ -512,7 +513,7 @@ taosBenchmark支持两种配置参数的模式,一种是命令行参数,一 -T:并发线程数。可选项,缺省是10。 --i:两次sql插入的休眠时间间隔,缺省是0。 +-i:两次 SQL 插入的休眠时间间隔,缺省是0。 -S:两次插入间隔时间戳步长,缺省是1。 @@ -601,7 +602,7 @@ taosBenchmark支持3种功能的测试,包括插入、查询、订阅。但一 "start_timestamp": "2020-10-01 00:00:00.000", "sample_format": "csv", "sample_file": "./sample.csv", - "use_sample_ts": "no", + "use_sample_ts": "no", "tags_file": "", "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":10}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] @@ -632,9 +633,9 @@ taosBenchmark支持3种功能的测试,包括插入、查询、订阅。但一 "insert_interval": 两次发送请求的间隔时间。可选项,缺省是0,代表无人工设置的时间间隔,单位为ms。。 -"interlace_rows": 设置轮询插入每个单表数据的条目数,如果interlace_rows*childtable_count*supertable_num小于num_of_records_per_req时,则请求插入的数目以interlace_rows*childtable_count*supertable_num为准。可选项,缺省是0。 +"interlace_rows": 设置轮询插入每个单表数据的条目数,如果 interlace_rows * childtable_count * supertable_num 小于 num_of_records_per_req 时,则请求插入的数目以 interlace_rows * childtable_count * supertable_num 为准。可选项,缺省是 0。 -"num_of_records_per_req": 每条请求数据内容包含的插入数据记录数目,该数据组成的sql不能大于maxsqllen,如果过大,则取taosd限制的1M长度(1048576)。0代表不插入数据,建议配置大于0。 +"num_of_records_per_req": 每条请求数据内容包含的插入数据记录数目,该数据组成的 SQL 不能大于 maxSqlLen,如果过大,则取 taosd 限制的1M长度(1048576)。0 代表不插入数据,建议配置大于 0。 "databases": [{ @@ -680,7 +681,7 @@ taosBenchmark支持3种功能的测试,包括插入、查询、订阅。但一 "auto_create_table": 子表的创建方式,“yes”:自动建表;"no":提前建表。可选项,缺省是“no”。当 child_table_exists 为 “yes” 时此参数将自动置为 no 。 -"batch_create_tbl_num": 一个sql批量创建子表的数目。 +"batch_create_tbl_num": 一个 SQL 批量创建子表的数目。 "data_source": 插入数据来源,"rand":实例随机生成;“sample”:从样例文件中读取。可选项。缺省是“rand”。 @@ -706,31 +707,31 @@ taosBenchmark支持3种功能的测试,包括插入、查询、订阅。但一 "start_timestamp": 子表中记录时间戳的起始值,支持"2020-10-01 00:00:00.000"和“now”两种格式,可选项,缺省是“now”。 -"sample_format": 当插入数据源选择“sample”时,sample文件的格式,"csv":csv格式,每列的值与子表的columns保持一致,但不包含第1列的时间戳。可选项,缺省是”csv”。目前仅仅支持csv格式的sample文件。 +"sample_format": 当插入数据源选择“sample”时,sample文件的格式,"csv":CSV 格式,每列的值与子表的 columns 保持一致,但不包含第1列的时间戳。可选项,缺省是”csv”。目前仅仅支持 CSV 格式的 sample 文件。 "sample_file":sample文件,包含路径和文件名。当插入数据源选择“sample”时,该项为必选项。 -"use_sample_ts":sample文件是否包含第一列时间戳,可选项: "yes" 和 "no", 默认 "no"。(注意:若为yes,则disorder_ratio 和 disorder_range失效) +"use_sample_ts":sample 文件是否包含第一列时间戳,可选项: "yes" 和 "no", 默认 "no"。(注意:若为 yes,则 disorder_ratio 和 disorder_range 失效) -"tags_file": 子表tags值文件,只能是csv文件格式,且必须与超级表的tags保持一致。当该项为非空时,表示子表的tags值从文件中获取;为空时,实例随机生成。可选项,缺省是空。 +"tags_file": 子表 tags 值文件,只能是 CSV 文件格式,且必须与超级表的tags保持一致。当该项为非空时,表示子表的tags值从文件中获取;为空时,实例随机生成。可选项,缺省是空。 -"columns": [{ 超级表的column列表,最大支持1024列(指所有普通列+超级列总和)。默认的第一列为时间类型,程序自动添加,不需要手工添加。 +"columns": [{ 超级表的 column 列表,最大支持 4096 列(指所有普通列+超级列总和)。默认的第一列为时间类型,程序自动添加,不需要手工添加。 "type": 该列的数据类型 ,必选项。 -"len": 该列的长度,只有type是BINARY或NCHAR时有效,可选项,缺省值是8。 +"len": 该列的长度,只有 type 是 BINARY 或 NCHAR 时有效,可选项,缺省值是 8。 -"count":该类型的连续列个数,可选项,缺省是1。 +"count":该类型的连续列个数,可选项,缺省是 1。 }], -"tags": [{ 超级表的tags列表,type不能是timestamp类型, 最大支持128个。 +"tags": [{ 超级表的 tags 列表,type不能是 TIMESTAMP 类型, 最大支持 128 个。 "type": 该列的数据类型 ,必选项。 -"len": 该列的长度,只有type是BINARY或NCHAR时有效,可选项,缺省值是8。 +"len": 该列的长度,只有 type 是 BINARY 或 NCHAR 时有效,可选项,缺省值是 8。 -"count":该类型的连续列个数,可选项,缺省是1。 +"count":该类型的连续列个数,可选项,缺省是 1。 }] @@ -798,11 +799,11 @@ taosBenchmark支持3种功能的测试,包括插入、查询、订阅。但一 "specified_table_query": { 指定表的查询 -"query_interval": 执行sqls的间隔,单位是秒。可选项,缺省是0。 +"query_interval": 执行 sqls 的间隔,单位是秒。可选项,缺省是0。 -"concurrent": 并发执行sqls的线程数,可选项,缺省是1。每个线程都执行所有的sqls。 +"concurrent": 并发执行 sqls 的线程数,可选项,缺省是1。每个线程都执行所有的 sqls。 -"sqls": 可以添加多个sql语句,最多支持100条。 +"sqls": 可以添加多个 SQL 语句,最多支持100条。 "sql": 查询语句。必选项。 @@ -812,15 +813,15 @@ taosBenchmark支持3种功能的测试,包括插入、查询、订阅。但一 "stblname": 超级表名称。必选项。 -"query_interval": 执行sqls的间隔,单位是秒。可选项,缺省是0。 +"query_interval": 执行 sqls 的间隔,单位是秒。可选项,缺省是0。 -"threads": 并发执行sqls的线程数,可选项,缺省是1。每个线程负责一部分子表,执行所有的sqls。 +"threads": 并发执行 sqls 的线程数,可选项,缺省是1。每个线程负责一部分子表,执行所有的 sqls。 "sql": "select count(*) from xxxx"。查询超级表内所有子表的查询语句,其中表名必须写成 “xxxx”,实例会自动替换成子表名。 "result": 查询结果写入的文件名。可选项,缺省是空,表示查询结果不写入文件。 -注意:每条sql语句后的保存结果的文件不能重名,且生成结果文件时,文件名会附加线程号。 +注意:每条 SQL 语句后的保存结果的文件不能重名,且生成结果文件时,文件名会附加线程号。 查询结果显示:如果查询线程结束一次查询距开始执行时间超过30秒打印一次查询次数、用时和QPS。所有查询结束时,汇总打印总的查询次数和QPS。 @@ -882,11 +883,11 @@ taosBenchmark支持3种功能的测试,包括插入、查询、订阅。但一 "confirm_parameter_prompt": 执行过程中提示是否确认,为no时,执行过程无需手工输入enter。可选项,缺省是no。 -注意:这里的订阅查询sql目前只支持select * ,其余不支持。 +注意:这里的订阅查询 SQL 目前只支持 SELECT * ,其余不支持。 "specified_table_query": 指定表的订阅。 -"concurrent": 并发执行sqls的线程数,可选项,缺省是1。每个线程都执行所有的sqls。 +"concurrent": 并发执行 sqls 的线程数,可选项,缺省是1。每个线程都执行所有的 sqls。 "mode": 订阅模式。目前支持同步和异步订阅,缺省是sync。 @@ -906,11 +907,11 @@ taosBenchmark支持3种功能的测试,包括插入、查询、订阅。但一 "stblname": 超级表名称。必选项。 -"threads": 并发执行sqls的线程数,可选项,缺省是1。每个线程都执行所有的sqls。 +"threads": 并发执行 sqls 的线程数,可选项,缺省是1。每个线程都执行所有的 sqls。 "mode": 订阅模式。 -"interval": 执行sqls的间隔,单位是秒。可选项,缺省是0。 +"interval": 执行 sqls 的间隔,单位是秒。可选项,缺省是 0。 "restart": 订阅重启。"yes":如果订阅已经存在,重新开始,"no": 继续之前的订阅。 @@ -918,6 +919,6 @@ taosBenchmark支持3种功能的测试,包括插入、查询、订阅。但一 "resubAfterConsume": 配合 keepProgress 使用,在订阅消费了相应次数后调用 unsubscribe 取消订阅并再次订阅。 -"sql": " select count(*) from xxxx "。查询语句,其中表名必须写成 “xxxx”,实例会自动替换成子表名。 +"sql": " SELECT COUNT(*) FROM xxxx "。查询语句,其中表名必须写成 “xxxx”,实例会自动替换成子表名。 -"result": 查询结果写入的文件名。可选项,缺省是空,表示查询结果不写入文件。 注意:每条sql语句后的保存结果的文件不能重名,且生成结果文件时,文件名会附加线程号。 +"result": 查询结果写入的文件名。可选项,缺省是空,表示查询结果不写入文件。 注意:每条 SQL 语句后的保存结果的文件不能重名,且生成结果文件时,文件名会附加线程号。 diff --git a/documentation20/cn/05.insert/docs.md b/documentation20/cn/05.insert/docs.md index 7e28d6de1a25c7f516472097caef28d05a97854f..9337da0e39e0851e434bf21aebd0a06b39ef3716 100644 --- a/documentation20/cn/05.insert/docs.md +++ b/documentation20/cn/05.insert/docs.md @@ -35,7 +35,7 @@ INSERT INTO d1001 VALUES (1538548685000, 10.3, 219, 0.31) (1538548695000, 12.6, **前言**
在物联网应用中,常会采集比较多的数据项,用于实现智能控制、业务分析、设备监控等。由于应用逻辑的版本升级,或者设备自身的硬件调整等原因,数据采集项就有可能比较频繁地出现变动。为了在这种情况下方便地完成数据记录工作,TDengine 从 2.2.0.0 版本开始,提供调用 Schemaless 写入方式,可以免于预先创建超级表/子表的步骤,随着数据写入接口能够自动创建与数据对应的存储结构。并且在必要时,Schemaless 将自动增加必要的数据列,保证用户写入的数据可以被正确存储。 -
目前,TDengine 的 C/C++ Connector 提供支持 Schemaless 的操作接口,详情请参见 [Schemaless 方式写入接口](https://www.taosdata.com/cn/documentation/connector#schemaless)章节。这里对 Schemaless 的数据表达格式进行了描述。 +
目前,TDengine 的所有官方支持的连接器支持 Schemaless 的操作接口,详情请参见 [Schemaless 方式写入接口](https://www.taosdata.com/cn/documentation/connector#schemaless)章节。这里对 Schemaless 的数据表达格式进行了描述。
无模式写入方式建立的超级表及其对应的子表与通过 SQL 直接建立的超级表和子表完全没有区别,您也可以通过 SQL 语句直接向其中写入数据。需要注意的是,通过无模式写入方式建立的表,其表名是基于标签值按照固定的映射规则生成,所以无法明确地进行表意,缺乏可读性。 **无模式写入行协议** diff --git a/documentation20/cn/08.connector/docs.md b/documentation20/cn/08.connector/docs.md index ac9d93f983e8338eb048a77f2ab9688bac4b288f..b3ff66765f39636a110bff38c1a11a934982fa52 100644 --- a/documentation20/cn/08.connector/docs.md +++ b/documentation20/cn/08.connector/docs.md @@ -2,7 +2,7 @@ TDengine 提供了丰富的应用程序开发接口,其中包括 C/C++、Java、Python、Go、Node.js、C# 、RESTful 等,便于用户快速开发应用。 -![image-connecotr](../images/connector.png) +![image-connector](../images/connector.png) 目前 TDengine 的连接器可支持的平台广泛,包括:X64/X86/ARM64/ARM32/MIPS/Alpha 等硬件平台,以及 Linux/Win64/Win32 等开发环境。对照矩阵如下: @@ -13,7 +13,7 @@ TDengine 提供了丰富的应用程序开发接口,其中包括 C/C++、Java | **JDBC** | ● | ● | ● | ○ | ● | ● | ○ | ○ | ○ | | **Python** | ● | ● | ● | ○ | ● | ● | ○ | -- | ○ | | **Go** | ● | ● | ● | ○ | ● | ● | ○ | -- | -- | -| **NodeJs** | ● | ● | ○ | ○ | ● | ● | ○ | -- | -- | +| **Node.js** | ● | ● | ○ | ○ | ● | ● | ○ | -- | -- | | **C#** | ● | ● | ○ | ○ | ○ | ○ | ○ | -- | -- | | **RESTful** | ● | ● | ● | ● | ● | ● | ○ | ○ | ○ | @@ -22,7 +22,7 @@ TDengine 提供了丰富的应用程序开发接口,其中包括 C/C++、Java 注意: * 在没有安装 TDengine 服务端软件的系统中使用连接器(除 RESTful 外)访问 TDengine 数据库,需要安装相应版本的客户端安装包来使应用驱动(Linux 系统中文件名为 libtaos.so,Windows 系统中为 taos.dll)被安装在系统中,否则会产生无法找到相应库文件的错误。 -* 所有执行 SQL 语句的 API,例如 C/C++ Connector 中的 `tao_query`、`taos_query_a`、`taos_subscribe` 等,以及其它语言中与它们对应的 API,每次都只能执行一条 SQL 语句,如果实际参数中包含了多条语句,它们的行为是未定义的。 +* 所有执行 SQL 语句的 API,例如 C/C++ Connector 中的 `taos_query`、`taos_query_a`、`taos_subscribe` 等,以及其它语言中与它们对应的 API,每次都只能执行一条 SQL 语句,如果实际参数中包含了多条语句,它们的行为是未定义的。 * 升级 TDengine 到 2.0.8.0 版本的用户,必须更新 JDBC。连接 TDengine 必须升级 taos-jdbcdriver 到 2.0.12 及以上。详细的版本依赖关系请参见 [taos-jdbcdriver 文档](https://www.taosdata.com/cn/documentation/connector/java#version)。 * 无论选用何种编程语言的连接器,2.0 及以上版本的 TDengine 推荐数据库应用的每个线程都建立一个独立的连接,或基于线程建立连接池,以避免连接内的“USE statement”状态量在线程之间相互干扰(但连接的查询和写入操作都是线程安全的)。 @@ -434,9 +434,9 @@ typedef struct TAOS_MULTI_BIND { **说明** 协议类型是枚举类型,包含以下三种格式: - TSDB_SML_LINE_PROTOCOL:InfluxDB行协议(Line Protocol) - TSDB_SML_TELNET_PROTOCOL: OpenTSDB文本行协议 - TSDB_SML_JSON_PROTOCOL: OpenTSDB Json协议格式 + TSDB_SML_LINE_PROTOCOL:InfluxDB 行协议(Line Protocol) + TSDB_SML_TELNET_PROTOCOL: OpenTSDB 文本行协议 + TSDB_SML_JSON_PROTOCOL: OpenTSDB JSON 协议格式 时间戳分辨率的定义,定义在 taos.h 文件中,具体内容如下: TSDB_SML_TIMESTAMP_NOT_CONFIGURED = 0, @@ -451,7 +451,7 @@ typedef struct TAOS_MULTI_BIND { 对于 OpenTSDB 的文本协议,时间戳的解析遵循其官方解析规则 — 按照时间戳包含的字符的数量来确认时间精度。 **支持版本** - 该功能接口从2.3.0.0版本开始支持。 + 该功能接口从 2.3.0.0 版本开始支持。 ```c #include diff --git a/documentation20/cn/14.devops/03.immigrate/docs.md b/documentation20/cn/14.devops/03.immigrate/docs.md index 1b003f5d0c9adab5a2da3ce22133eacaf5132df4..bff4d4a7137a3bcb2b0ec0dec891c7a584d9df0a 100644 --- a/documentation20/cn/14.devops/03.immigrate/docs.md +++ b/documentation20/cn/14.devops/03.immigrate/docs.md @@ -174,7 +174,7 @@ TDengine 支持标准的 JDBC 3.0 接口操纵数据库,你也可以使用其 为了方便历史数据的迁移工作,我们为数据同步工具 DataX 提供了插件,能够将数据自动写入到 TDengine 中,需要注意的是 DataX 的自动化数据迁移只能够支持单值模型的数据迁移过程。 -DataX 具体的使用方式及如何使用 DataX 将数据写入 TDengine 请参见[基于DataX的TDeninge数据迁移工具](https://www.taosdata.com/blog/2021/10/26/3156.html)。 +DataX 具体的使用方式及如何使用 DataX 将数据写入 TDengine 请参见[基于 DataX 的 TDengine 数据迁移工具](https://www.taosdata.com/blog/2021/10/26/3156.html)。 在对 DataX 进行迁移实践后,我们发现通过启动多个进程,同时迁移多个 metric 的方式,可以大幅度的提高迁移历史数据的效率,下面是迁移过程中的部分记录,希望这些能为应用迁移工作带来参考。 diff --git a/documentation20/en/00.index/docs.md b/documentation20/en/00.index/docs.md index 2929ee5e33eab7531e3777869a4d9dafea40e7b4..9870f0f25e4b73458357e35fa10d6a8e3575c5fa 100644 --- a/documentation20/en/00.index/docs.md +++ b/documentation20/en/00.index/docs.md @@ -38,6 +38,8 @@ TDengine is a highly efficient platform to store, query, and analyze time-series - [Data Writing via Schemaless](/insert#schemaless): write one or multiple records with automatic table creation and adaptive table structure maintenance - [Data Writing via Prometheus](/insert#prometheus): Configure Prometheus to write data directly without any code - [Data Writing via Telegraf](/insert#telegraf): Configure Telegraf to write collected data directly without any code +- [Data Writing via collectd](/insert#collectd): Configure collectd to write collected data directly without any code +- [Data Writing via StatsD](/insert#statsd): Configure StatsD to write collected data directly without any code - [Data Writing via EMQX](/insert#emq): Configure EMQX to write MQTT data directly without any code - [Data Writing via HiveMQ Broker](/insert#hivemq): Configure HiveMQ to write MQTT data directly without any code @@ -95,7 +97,7 @@ TDengine is a highly efficient platform to store, query, and analyze time-series - [R](/connections#r): access data stored in TDengine server via JDBC configured within R - [IDEA Database](https://www.taosdata.com/blog/2020/08/27/1767.html): use TDengine visually through IDEA Database Management Tool - [TDengineGUI](https://github.com/skye0207/TDengineGUI): a TDengine management tool with Graphical User Interface -- [DataX](https://github.com/taosdata/datax): a data immigaration tool with TDeninge supported +- [DataX](https://github.com/taosdata/datax): a data immigration tool with TDengine supported ## [Installation and Management of TDengine Cluster](/cluster) diff --git a/documentation20/en/02.getting-started/02.taosdemo/docs.md b/documentation20/en/02.getting-started/02.taosdemo/docs.md index ff017c01559d9137792a25dec8ffd052ad643cda..be2a0529d26ec8b620391b9e0fd1a5ba416c047b 100644 --- a/documentation20/en/02.getting-started/02.taosdemo/docs.md +++ b/documentation20/en/02.getting-started/02.taosdemo/docs.md @@ -1,4 +1,4 @@ -Since TDengine was open sourced in July 2019, it has gained a lot of popularity among time-series database developers with its innovative data modeling design, simple installation method, easy programming interface, and powerful data insertion and query performance. The insertion and querying performance is often astonishing to users who are new to TDengine. In order to help users to experience the high performance and functions of TDengine in the shortest time, we developed an application called `taosBenchmark` (was named `taosdemo`) for insertion and querying performance testing of TDengine. Then user can easily simulate the scenario of a large number of devices generating a very large amount of data. User can easily manipulate the number of columns, data types, disorder ratio, and number of concurrent threads with taosBenchmark customized parameters. +Since TDengine was open sourced in July 2019, it has gained a lot of popularity among time-series database developers with its innovative data modeling design, simple installation method, easy programming interface, and powerful data insertion and query performance. The insertion and querying performance is often astonishing to users who are new to TDengine. In order to help users to experience the high performance and functions of TDengine in the shortest time, we developed an application called `taosBenchmark` (was named `taosdemo`) for insertion and querying performance testing of TDengine. Then user can easily simulate the scenario of a large number of devices generating a very large amount of data. User can easily manipulate the number of tables, columns, data types, disorder ratio, and number of concurrent threads with taosBenchmark customized parameters. Running taosBenchmark is very simple. Just download the [TDengine installation package](https://www.taosdata.com/cn/all-downloads/) or compiling the [TDengine code](https://github.com/taosdata/TDengine). It can be found and run in the installation directory or in the compiled results directory. @@ -160,7 +160,7 @@ The complete list of taosBenchmark command-line arguments can be displayed via t ``` $ taosBenchmark --help --f, --file=FILE The meta file to the execution procedure. Currently, we support standard UTF-8 (without BOM) encoded files only. +-f, --file=FILE The JSON configuration file to the execution procedure. Currently, we support standard UTF-8 (without BOM) encoded files only. -u, --user=USER The user name to use when connecting to the server. -p, --password The password to use when connecting to the server. -c, --config-dir=CONFIG_DIR Configuration directory. @@ -170,7 +170,7 @@ $ taosBenchmark --help -d, --database=DATABASE Destination database. By default is 'test'. -a, --replica=REPLICA Set the replica parameters of the database, By default use 1, min: 1, max: 3. -m, --table-prefix=TABLEPREFIX Table prefix name. By default use 'd'. --s, --sql-file=FILE The select sql file. +-s, --sql-file=FILE The select SQL file. -N, --normal-table Use normal table flag. -o, --output=FILE Direct output to the named file. By default use './output.txt'. -q, --query-mode=MODE Query mode -- 0: SYNC, 1: ASYNC. By default use SYNC. @@ -346,6 +346,7 @@ In addition to the command line approach, taosBenchmark also supports take a JSO "start_timestamp": "2020-10-01 00:00:00.000", "sample_format": "csv", "sample_file": "./sample.csv", + "use_sample_ts": "no", "tags_file": "", "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":10}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] @@ -354,7 +355,9 @@ In addition to the command line approach, taosBenchmark also supports take a JSO } ``` -For example, we can specify different number of threads for table creation and data insertion with "thread_count" and "thread_count_create_tbl". You can use a combination of "child_table_exists", "childtable_limit" and "childtable_offset" to use multiple taosBenchmark processes (even on different computers) to write to different ranges of child tables of the same super table at the same time. You can also import existing data by specifying the data source as a csv file with "data_source" and "sample_file". +For example, we can specify different number of threads for table creation and data insertion with `thread_count` and `thread_count_create_tbl`. You can use a combination of `child_table_exists`, `childtable_limit` and `childtable_offset` to use multiple taosBenchmark processes (even on different computers) to write to different ranges of child tables of the same super table at the same time. You can also import existing data by specifying the data source as a CSV file with `data_source` and `sample_file`. The argument `use_sample_ts` indicate whether the first column, timestamp in TDengine would use the data of the specified CSV file too. + +CSV file is a plain text format and use comma signs as separators between two columns. The number of columns must is same as the number of columns or tags of the table you intend to insert. # Use taosBenchmark for query and subscription testing @@ -410,7 +413,7 @@ The following parameters are specific to the query in the JSON file. "specified_table_query": { query for the specified table "query_interval": interval to execute sqls, in seconds. Optional, default is 0. "concurrent": the number of threads to execute sqls concurrently, optional, default is 1. Each thread executes all sqls. -"sqls": multiple sql statements can be added, support up to 100 statements. +"sqls": multiple SQL statements can be added, support up to 100 statements. "sql": query statement. Mandatory. "result": the name of the file where the query result will be written. Optional, default is null, means the query result will not be written to the file. "super_table_query": { query for all sub-tables in the super table @@ -470,7 +473,7 @@ The following are the meanings of the parameters specific to the subscription fu "restart": subscription restart." yes": restart the subscription if it already exists, "no": continue the previous subscription. (Please note that the executing user needs to have read/write access to the dataDir directory) "keepProgress": keep the progress of the subscription information. yes means keep the subscription information, no means don't keep it. The value is yes and restart is no to continue the previous subscriptions. "resubAfterConsume": Used in conjunction with keepProgress to call unsubscribe after the subscription has been consumed the appropriate number of times and to subscribe again. -"result": the name of the file to which the query result is written. Optional, default is null, means the query result will not be written to the file. Note: The file to save the result after each sql statement cannot be renamed, and the file name will be appended with the thread number when generating the result file. +"result": the name of the file to which the query result is written. Optional, default is null, means the query result will not be written to the file. Note: The file to save the result after each SQL statement cannot be renamed, and the file name will be appended with the thread number when generating the result file. ``` # Conclusion diff --git a/documentation20/en/02.getting-started/docs.md b/documentation20/en/02.getting-started/docs.md index db538f45f709442d4fbf1691d8be80e15595ee41..5608a46489a9af9814bb016071f3eb3cf9ea6e98 100644 --- a/documentation20/en/02.getting-started/docs.md +++ b/documentation20/en/02.getting-started/docs.md @@ -35,7 +35,7 @@ wget -qO - http://repos.taosdata.com/tdengine.key | sudo apt-key add - echo "deb [arch=amd64] http://repos.taosdata.com/tdengine-stable stable main" | sudo tee /etc/apt/sources.list.d/tdengine-stable.list [Optional] echo "deb [arch=amd64] http://repos.taosdata.com/tdengine-beta beta main" | sudo tee /etc/apt/sources.list.d/tdengine-beta.list sudo apt-get update -apt-get policy tdengine +apt-cache policy tdengine sudo apt-get install tdengine ``` diff --git a/documentation20/en/05.insert/docs.md b/documentation20/en/05.insert/docs.md index 76be97d4f2cfefa4eaecdbc8c1f7a125e5d3cd54..942a1c26d66a0c25717e4b2bd58461abc705bf6c 100644 --- a/documentation20/en/05.insert/docs.md +++ b/documentation20/en/05.insert/docs.md @@ -4,7 +4,7 @@ TDengine supports multiple ways to write data, including SQL, Prometheus, Telegr ## Data Writing via SQL -Applications insert data by executing SQL insert statements through C/C++, Java, Go, C#, Python, Node.js Connectors, and users can manually enter SQL insert statements to insert data through TAOS Shell. For example, the following insert writes a record to table d1001: +Applications insert data by executing SQL insert statements through C/C++, Java, Go, C#, Python, Node.js connectors, and users can manually enter SQL insert statements to insert data through TAOS Shell. For example, the following insert writes a record to table d1001: ```mysql INSERT INTO d1001 VALUES (1538548685000, 10.3, 219, 0.31); @@ -36,7 +36,7 @@ For the SQL INSERT Grammar, please refer to [Taos SQL insert](https://www.taosd **Introduction**
In many IoT applications, data collection is often used in intelligent control, business analysis and device monitoring etc. As fast application upgrade and iteration, or hardware adjustment, data collection metrics can change rapidly over time. To provide solutions to such use cases, from version 2.2.0.0, TDengine supports writing data via Schemaless. When using Schemaless, action of pre-creating table before inserting data is no longer needed anymore. Tables, data columns and tags can be created automatically. Schemaless can also add additional data columns to tables if necessary, to make sure data can be properly stored into TDengine. -
TDengine C/C++ Connector provides Schemaless API. Please see [Schemaless data writing API](https://www.taosdata.com/en/documentation/connector#schemaless) for detailed data writing format. +
TDengine's all official connectors provide Schemaless API now. Please see [Schemaless data writing API](https://www.taosdata.com/en/documentation/connector#schemaless) for detailed data writing format.
Super table and corresponding child tables created via Schemaless are identical to the ones created via SQL, so inserting data into these tables via SQL is also supported. Note that child table names are generated via Schemaless are following special rules through tags mapping. Therefore, child table names are usually not meaningful in terms of readability. **Schemaless writing protocols** diff --git a/documentation20/en/08.connector/docs.md b/documentation20/en/08.connector/docs.md index c74dc7fb0bd2252a9d517b5bde8fdd794a5c158e..fccd0c70361f804189b8f44072ba78c9f8626bbc 100644 --- a/documentation20/en/08.connector/docs.md +++ b/documentation20/en/08.connector/docs.md @@ -22,7 +22,7 @@ Note: ● stands for that has been verified by official tests; ○ stands for th Note: - To access the TDengine database through connectors (except RESTful) in the system without TDengine server software, it is necessary to install the corresponding version of the client installation package to make the application driver (the file name is libtaos.so in Linux system and taos.dll in Windows system) installed in the system, otherwise, the error that the corresponding library file cannot be found will occur. -- All APIs that execute SQL statements, such as `tao_query()`, `taos_query_a()`, `taos_subscribe()` in C/C++ Connector, and APIs corresponding to them in other languages, can only execute one SQL statement at a time. If the actual parameters contain multiple statements, their behavior is undefined. +- All APIs that execute SQL statements, such as `taos_query()`, `taos_query_a()`, `taos_subscribe()` in C/C++ connector, and APIs corresponding to them in other languages, can only execute one SQL statement at a time. If the actual parameters contain multiple statements, their behavior is undefined. - Users upgrading to TDengine 2.0. 8.0 must update the JDBC connection. TDengine must upgrade taos-jdbcdriver to 2.0.12 and above. - No matter which programming language connector is selected, TDengine version 2.0 and above recommends that each thread of database application establish an independent connection or establish a connection pool based on threads to avoid mutual interference between threads of "USE statement" state variables in the connection (but query and write operations of the connection are thread-safe). @@ -347,6 +347,108 @@ Gets the result set of the statement. The result set is used in the same way as Execution completed, release all resources. +- `char * taos_stmt_errstr(TAOS_STMT *stmt)` + +Gets the error message if any stmt API returns error. + + +### Schemaless writing API + +In addition to writing data using SQL or using the parameter binding API, writing can also be done using Schemaless, which eliminates the need to create a super table/data sub-table data structure in advance and writes data directly, while the TDengine system automatically creates and maintains the required table structure based on the written data content. The use of Schemaless is described in the Schemaless Writing section, and the C/C++ API used with it is described here. + +- `TAOS_RES* taos_schemaless_insert(TAOS* taos, const char* lines[], int numLines, int protocol, int precision)` + + **Function Description** + + This interface writes the text data of the line protocol to TDengine. + + **Parameter Description** + + taos: database connection, the database connection established by taos_connect function. + + lines: text data. A pattern-free text string that meets the parsing format requirements. + + numLines: the number of lines of the text data, cannot be 0. + + protocol: the protocol type of the lines, used to identify the format of the text data. + + precision: precision string of the timestamp in the text data. + + **Return Value** + + TAOS_RES structure, the application can get the error message by using taos_errstr and also get the error code by using taos_errno. + + In some cases, the returned TAOS_RES is NULL, in which case taos_errno can still be called to safely get the error code information. + + The returned TAOS_RES needs to be freed by the caller, otherwise a memory leak will occur. + + **Description** + + The protocol type is enumerated and contains the following three formats. + + TSDB_SML_LINE_PROTOCOL: InfluxDB line protocol (Line Protocol) + + TSDB_SML_TELNET_PROTOCOL: OpenTSDB Text Line Protocol + + TSDB_SML_JSON_PROTOCOL: OpenTSDB JSON protocol format + + The timestamp resolution is defined in the taos.h file, as follows + + TSDB_SML_TIMESTAMP_NOT_CONFIGURED = 0, + + TSDB_SML_TIMESTAMP_HOURS, + + TSDB_SML_TIMESTAMP_MINUTES, + + TSDB_SML_TIMESTAMP_SECONDS, + + TSDB_SML_TIMESTAMP_MILLI_SECONDS, + + TSDB_SML_TIMESTAMP_MICRO_SECONDS, + + TSDB_SML_TIMESTAMP_NANO_SECONDS + + Note that the timestamp resolution parameter only takes effect when the protocol type is SML_LINE_PROTOCOL. + + For OpenTSDB text protocols, the timestamp resolution follows the official resolution rules - the time precision is determined by the number of characters contained in the timestamp. + + **Supported versions** + + This functional interface is supported since version 2.3.0.0. + +```c +#include +#include +#include + +int main() { + const char* host = "127.0.0.1"; + const char* user = "root"; + const char* passwd = "taosdata"; + + // connect to server + TAOS* taos = taos_connect(host, user, passwd, "test", 0); + + // prepare the line string + char* lines1[] = { + "stg,t1=3i64,t2=4f64,t3=\"t3\" c1=3i64,c3=L\"passit\",c2=false,c4=4f64 1626006833639000000", + "stg,t1=4i64,t3=\"t4\",t2=5f64,t4=5f64 c1=3i64,c3=L\"passitagin\",c2=true,c4=5f64,c5=5f64 1626006833641000000" + }; + + // schema-less insert + TAOS_RES* res = taos_schemaless_insert(taos, lines1, 2, TSDB_SML_LINE_PROTOCOL, TSDB_SML_TIMESTAMP_NANO_SECONDS); + if (taos_errno(res) != 0) { + printf("failed to insert schema-less data, reason: %s\n", taos_errstr(res)); + } + + taos_free_result(res); + + // close the connection + taos_close(taos); + return (code); +} +``` + ### Continuous query interface TDengine provides time-driven real-time stream computing APIs. You can perform various real-time aggregation calculation operations on tables (data streams) of one or more databases at regular intervals. The operation is simple, only APIs for opening and closing streams. The details are as follows: diff --git a/src/client/src/tscLocal.c b/src/client/src/tscLocal.c index b6c8b3de3f9b1b0d84f9b1ea5eecc496cf58ba0c..f648baa9744cf00545e3f96b736661dce6e958e0 100644 --- a/src/client/src/tscLocal.c +++ b/src/client/src/tscLocal.c @@ -385,7 +385,7 @@ static int32_t tscGetTableTagValue(SCreateBuilder *builder, char *result) { if (i == num_fields - 1) { if (fields[i].type == TSDB_DATA_TYPE_JSON) { - sprintf(result + strlen(result) - 1, "%s'", ")"); + sprintf(result + strlen(result) - 1, "'%s", ")"); } else { sprintf(result + strlen(result) - 1, "%s", ")"); } diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c index 5aec7b686f84314c8efdea09d377a5a6c948ac30..4442338a7bbc8789f883f7a427687130c50a548f 100644 --- a/src/client/src/tscSQLParser.c +++ b/src/client/src/tscSQLParser.c @@ -1113,7 +1113,7 @@ static int32_t addPrimaryTsColumnForTimeWindowQuery(SQueryInfo* pQueryInfo, SSql static int32_t checkInvalidExprForTimeWindow(SSqlCmd* pCmd, SQueryInfo* pQueryInfo) { const char* msg1 = "invalid query expression"; const char* msg2 = "top/bottom query does not support order by value in time window query"; - const char* msg3 = "unique function does not supportted in time window query"; + const char* msg3 = "unique/state function does not supportted in time window query"; /* * invalid sql: @@ -1125,7 +1125,8 @@ static int32_t checkInvalidExprForTimeWindow(SSqlCmd* pCmd, SQueryInfo* pQueryIn if (pExpr->base.functionId == TSDB_FUNC_COUNT && TSDB_COL_IS_TAG(pExpr->base.colInfo.flag)) { return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1); } - if (pExpr->base.functionId == TSDB_FUNC_UNIQUE) { + if (pExpr->base.functionId == TSDB_FUNC_UNIQUE || pExpr->base.functionId == TSDB_FUNC_STATE_COUNT || + pExpr->base.functionId == TSDB_FUNC_STATE_DURATION) { return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg3); } } @@ -2717,6 +2718,7 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col const char* msg28 = "the second paramter of diff should be 0 or 1"; const char* msg29 = "key timestamp column cannot be used to unique/mode/tail function"; const char* msg30 = "offset is out of range [0, 100]"; + const char* msg31 = "state function can not be used in subquery"; switch (functionId) { case TSDB_FUNC_COUNT: { @@ -2815,7 +2817,9 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col case TSDB_FUNC_STDDEV: case TSDB_FUNC_LEASTSQR: case TSDB_FUNC_ELAPSED: - case TSDB_FUNC_MODE: { + case TSDB_FUNC_MODE: + case TSDB_FUNC_STATE_COUNT: + case TSDB_FUNC_STATE_DURATION:{ // 1. valid the number of parameters int32_t numOfParams = (pItem->pNode->Expr.paramList == NULL) ? 0 : (int32_t)taosArrayGetSize(pItem->pNode->Expr.paramList); @@ -2823,10 +2827,12 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col // no parameters or more than one parameter for function if (pItem->pNode->Expr.paramList == NULL || (functionId != TSDB_FUNC_LEASTSQR && functionId != TSDB_FUNC_DERIVATIVE && functionId != TSDB_FUNC_ELAPSED && - functionId != TSDB_FUNC_DIFF && numOfParams != 1) || + functionId != TSDB_FUNC_DIFF && functionId != TSDB_FUNC_STATE_COUNT && functionId != TSDB_FUNC_STATE_DURATION && numOfParams != 1) || ((functionId == TSDB_FUNC_LEASTSQR || functionId == TSDB_FUNC_DERIVATIVE) && numOfParams != 3) || (functionId == TSDB_FUNC_ELAPSED && numOfParams != 1 && numOfParams != 2) || - (functionId == TSDB_FUNC_DIFF && numOfParams != 1 && numOfParams != 2)) { + (functionId == TSDB_FUNC_DIFF && numOfParams != 1 && numOfParams != 2) || + (functionId == TSDB_FUNC_STATE_COUNT && numOfParams != 3) || + (functionId == TSDB_FUNC_STATE_DURATION && numOfParams != 3 && numOfParams != 4)) { return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2); } @@ -2865,6 +2871,11 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col } } + if ((functionId == TSDB_FUNC_STATE_COUNT || functionId == TSDB_FUNC_STATE_DURATION) && + pQueryInfo->pUpstream != NULL && taosArrayGetSize(pQueryInfo->pUpstream) > 0) { + return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg31); + } + STableComInfo info = tscGetTableInfo(pTableMetaInfo->pTableMeta); // functions can not be applied to tags @@ -2907,6 +2918,24 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col aAggs[TSDB_FUNC_TS_DUMMY].name, pExpr); } + if (functionId == TSDB_FUNC_STATE_COUNT || functionId == TSDB_FUNC_STATE_DURATION) { + SColumnIndex indexTS = {.tableIndex = index.tableIndex, .columnIndex = 0}; + SExprInfo* pExpr = tscExprAppend(pQueryInfo, TSDB_FUNC_PRJ, &indexTS, TSDB_DATA_TYPE_TIMESTAMP, + TSDB_KEYSIZE, 0, TSDB_KEYSIZE, false); + tstrncpy(pExpr->base.aliasName, aAggs[TSDB_FUNC_TS_DUMMY].name, sizeof(pExpr->base.aliasName)); + + SColumnList ids = createColumnList(1, 0, 0); + insertResultField(pQueryInfo, colIndex, &ids, TSDB_KEYSIZE, TSDB_DATA_TYPE_TIMESTAMP, + aAggs[TSDB_FUNC_TS].name, pExpr); + + pExpr = tscExprAppend(pQueryInfo, TSDB_FUNC_PRJ, &index, pSchema->type, + pSchema->bytes, getNewResColId(pCmd), 0, false); + tstrncpy(pExpr->base.aliasName, pParamElem->pNode->columnName.z, pParamElem->pNode->columnName.n+1); + ids = createColumnList(1, index.tableIndex, index.columnIndex); + insertResultField(pQueryInfo, colIndex + 1, &ids, pExpr->base.resBytes, (int32_t)pExpr->base.resType, + pExpr->base.aliasName, pExpr); + } + SExprInfo* pExpr = tscExprAppend(pQueryInfo, functionId, &index, resultType, resultSize, getNewResColId(pCmd), intermediateResSize, false); @@ -2985,6 +3014,41 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col } } tscExprAddParams(&pExpr->base, val, TSDB_DATA_TYPE_BIGINT, sizeof(int64_t)); + } else if (functionId == TSDB_FUNC_STATE_COUNT || functionId == TSDB_FUNC_STATE_DURATION) { + if (pParamElem[1].pNode->tokenId != TK_ID || !isValidStateOper(pParamElem[1].pNode->columnName.z, pParamElem[1].pNode->columnName.n)) { + return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2); + } + tscExprAddParams(&pExpr->base, pParamElem[1].pNode->columnName.z, TSDB_DATA_TYPE_BINARY, pParamElem[1].pNode->columnName.n); + + if (pParamElem[2].pNode->tokenId != TK_INTEGER && pParamElem[2].pNode->tokenId != TK_FLOAT) { + return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2); + } + tVariantAssign(&pExpr->base.param[pExpr->base.numOfParams++], &pParamElem[2].pNode->value); + + if (functionId == TSDB_FUNC_STATE_DURATION){ + if (numOfParams == 4) { + // unit must be 1s 1m 1h + if (pParamElem[3].pNode->tokenId != TK_TIMESTAMP || (pParamElem[3].pNode->value.i64 != MILLISECOND_PER_SECOND * 1000000L && + pParamElem[3].pNode->value.i64 != MILLISECOND_PER_MINUTE * 1000000L && pParamElem[3].pNode->value.i64 != MILLISECOND_PER_HOUR * 1000000L)) { + return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2); + } + if (info.precision == TSDB_TIME_PRECISION_MILLI) { + pParamElem[3].pNode->value.i64 /= TSDB_TICK_PER_SECOND(TSDB_TIME_PRECISION_MICRO); + } else if (info.precision == TSDB_TIME_PRECISION_MICRO) { + pParamElem[3].pNode->value.i64 /= TSDB_TICK_PER_SECOND(TSDB_TIME_PRECISION_MILLI); + } + + tVariantAssign(&pExpr->base.param[pExpr->base.numOfParams++], &pParamElem[3].pNode->value); + }else{ + int64_t tmp = TSDB_TICK_PER_SECOND(TSDB_TIME_PRECISION_NANO); + if (info.precision == TSDB_TIME_PRECISION_MILLI) { + tmp /= TSDB_TICK_PER_SECOND(TSDB_TIME_PRECISION_MICRO); + } else if (info.precision == TSDB_TIME_PRECISION_MICRO) { + tmp /= TSDB_TICK_PER_SECOND(TSDB_TIME_PRECISION_MILLI); + } + tscExprAddParams(&pExpr->base, (char *)&tmp, TSDB_DATA_TYPE_BIGINT, sizeof(tmp)); + } + } } SColumnList ids = createColumnList(1, index.tableIndex, index.columnIndex); @@ -6760,7 +6824,7 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq const char* msg8 = "only column in groupby clause allowed as order column"; const char* msg10 = "not support distinct mixed with order by"; const char* msg11 = "not support order with udf"; - const char* msg12 = "order by tags not supported with diff/derivative/csum/mavg"; + const char* msg12 = "order by tags not supported with diff/derivative/csum/mavg/stateCount/stateDuration"; const char* msg13 = "order by json tag, key is too long"; const char* msg14 = "order by json tag, must be json->'key'"; @@ -7570,7 +7634,8 @@ int32_t validateFunctionsInIntervalOrGroupbyQuery(SSqlCmd* pCmd, SQueryInfo* pQu int32_t f = pExpr->base.functionId; if ((f == TSDB_FUNC_PRJ && pExpr->base.numOfParams == 0) || f == TSDB_FUNC_DIFF || f == TSDB_FUNC_SCALAR_EXPR || f == TSDB_FUNC_DERIVATIVE || - f == TSDB_FUNC_CSUM || f == TSDB_FUNC_MAVG) + f == TSDB_FUNC_CSUM || f == TSDB_FUNC_MAVG || f == TSDB_FUNC_STATE_COUNT || + f == TSDB_FUNC_STATE_DURATION) { isProjectionFunction = true; break; @@ -8207,7 +8272,8 @@ static int32_t checkUpdateTagPrjFunctions(SQueryInfo* pQueryInfo, char* msg) { int16_t functionId = pExpr->base.functionId; if (functionId == TSDB_FUNC_TAGPRJ || functionId == TSDB_FUNC_PRJ || functionId == TSDB_FUNC_TS || - functionId == TSDB_FUNC_SCALAR_EXPR || functionId == TSDB_FUNC_TS_DUMMY) { + functionId == TSDB_FUNC_SCALAR_EXPR || functionId == TSDB_FUNC_TS_DUMMY || functionId == TSDB_FUNC_STATE_COUNT || + functionId == TSDB_FUNC_STATE_DURATION) { continue; } @@ -8414,12 +8480,11 @@ static int32_t doTagFunctionCheck(SQueryInfo* pQueryInfo) { int32_t doFunctionsCompatibleCheck(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, char* msg) { const char* msg1 = "functions/columns not allowed in group by query"; - const char* msg2 = "projection query on columns not allowed"; const char* msg3 = "group by/session/state_window not allowed on projection query"; const char* msg4 = "retrieve tags not compatible with group by or interval query"; const char* msg5 = "functions can not be mixed up"; - const char* msg6 = "TWA/Diff/Derivative/Irate/CSum/MAvg/Elapsed only support group by tbname"; - const char* msg7 = "unique function does not supportted in state window query"; + const char* msg6 = "TWA/Diff/Derivative/Irate/CSum/MAvg/Elapsed/stateCount/stateDuration only support group by tbname"; + const char* msg7 = "unique/state function does not supportted in state window query"; // only retrieve tags, group by is not supportted if (tscQueryTags(pQueryInfo)) { @@ -8452,31 +8517,11 @@ int32_t doFunctionsCompatibleCheck(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, char* SExprInfo* pExpr = tscExprGet(pQueryInfo, i); int32_t f = pExpr->base.functionId; - /* - * group by normal columns. - * Check if the column projection is identical to the group by column or not - */ - if (f == TSDB_FUNC_PRJ && pExpr->base.colInfo.colId != PRIMARYKEY_TIMESTAMP_COL_INDEX) { - bool qualified = false; - for (int32_t j = 0; j < pQueryInfo->groupbyExpr.numOfGroupCols; ++j) { - SColIndex* pColIndex = taosArrayGet(pQueryInfo->groupbyExpr.columnInfo, j); - if (pColIndex->colId == pExpr->base.colInfo.colId) { - qualified = true; - break; - } - } - - if (!qualified) { - return invalidOperationMsg(msg, msg2); - } - } - if (f < 0) { SUdfInfo* pUdfInfo = taosArrayGet(pQueryInfo->pUdfInfo, -1 * f - 1); if (pUdfInfo->funcType == TSDB_UDF_TYPE_SCALAR) { return invalidOperationMsg(msg, msg1); } - continue; } @@ -8486,7 +8531,8 @@ int32_t doFunctionsCompatibleCheck(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, char* } if ((!pQueryInfo->stateWindow) && (f == TSDB_FUNC_DIFF || f == TSDB_FUNC_DERIVATIVE || f == TSDB_FUNC_TWA || - f == TSDB_FUNC_IRATE || f == TSDB_FUNC_CSUM || f == TSDB_FUNC_MAVG || f == TSDB_FUNC_ELAPSED)) { + f == TSDB_FUNC_IRATE || f == TSDB_FUNC_CSUM || f == TSDB_FUNC_MAVG || f == TSDB_FUNC_ELAPSED || + f == TSDB_FUNC_STATE_COUNT || f == TSDB_FUNC_STATE_DURATION)) { for (int32_t j = 0; j < pQueryInfo->groupbyExpr.numOfGroupCols; ++j) { SColIndex* pColIndex = taosArrayGet(pQueryInfo->groupbyExpr.columnInfo, j); if (j == 0) { @@ -8499,7 +8545,7 @@ int32_t doFunctionsCompatibleCheck(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, char* } } - if (pQueryInfo->stateWindow && f == TSDB_FUNC_UNIQUE){ + if (pQueryInfo->stateWindow && (f == TSDB_FUNC_UNIQUE || f == TSDB_FUNC_STATE_COUNT || f == TSDB_FUNC_STATE_DURATION)){ return invalidOperationMsg(msg, msg7); } @@ -8509,11 +8555,6 @@ int32_t doFunctionsCompatibleCheck(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, char* f != TSDB_FUNC_UNIQUE && f != TSDB_FUNC_TAIL) { return invalidOperationMsg(msg, msg1); } - - - if (f == TSDB_FUNC_COUNT && pExpr->base.colInfo.colIndex == TSDB_TBNAME_COLUMN_INDEX) { - return invalidOperationMsg(msg, msg1); - } } if (checkUpdateTagPrjFunctions(pQueryInfo, msg) != TSDB_CODE_SUCCESS) { @@ -8550,7 +8591,8 @@ int32_t validateFunctionFromUpstream(SQueryInfo* pQueryInfo, char* msg) { SExprInfo* pExpr = tscExprGet(pQueryInfo, i); int32_t f = pExpr->base.functionId; - if (f == TSDB_FUNC_DERIVATIVE || f == TSDB_FUNC_TWA || f == TSDB_FUNC_IRATE || f == TSDB_FUNC_DIFF || f == TSDB_FUNC_ELAPSED) { + if (f == TSDB_FUNC_DERIVATIVE || f == TSDB_FUNC_TWA || f == TSDB_FUNC_IRATE || f == TSDB_FUNC_DIFF || f == TSDB_FUNC_ELAPSED || + f == TSDB_FUNC_STATE_COUNT || f == TSDB_FUNC_STATE_DURATION) { for (int32_t j = 0; j < upNum; ++j) { SQueryInfo* pUp = taosArrayGetP(pQueryInfo->pUpstream, j); STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pUp, 0); @@ -9547,12 +9589,23 @@ int32_t getHavingExpr(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SArray* pSelectNode return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1); } + pLeft->functionId = isValidFunction(pLeft->Expr.operand.z, pLeft->Expr.operand.n); + if (pLeft->functionId < 0) { + return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1); + } + if (pLeft->Expr.paramList) { size_t size = taosArrayGetSize(pLeft->Expr.paramList); for (int32_t i = 0; i < size; i++) { tSqlExprItem* pParamItem = taosArrayGet(pLeft->Expr.paramList, i); - tSqlExpr* pExpr1 = pParamItem->pNode; + + if (pLeft->functionId == TSDB_FUNC_STATE_COUNT || pLeft->functionId == TSDB_FUNC_STATE_DURATION){ + if (i == 1 && pExpr1->tokenId == TK_ID) continue; + if (pLeft->functionId == TSDB_FUNC_STATE_DURATION && i == 3 && pExpr1->tokenId == TK_TIMESTAMP) + continue; + } + if (pExpr1->tokenId != TK_ALL && pExpr1->tokenId != TK_ID && pExpr1->tokenId != TK_STRING && @@ -9582,11 +9635,6 @@ int32_t getHavingExpr(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SArray* pSelectNode } } - pLeft->functionId = isValidFunction(pLeft->Expr.operand.z, pLeft->Expr.operand.n); - if (pLeft->functionId < 0) { - return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1); - } - return handleExprInHavingClause(pCmd, pQueryInfo, pSelectNodeList, pExpr, parentOptr); } @@ -10110,7 +10158,7 @@ int32_t validateSqlNode(SSqlObj* pSql, SSqlNode* pSqlNode, SQueryInfo* pQueryInf const char* msg4 = "interval query not supported, since the result of sub query not include valid timestamp column"; const char* msg5 = "only tag query not compatible with normal column filter"; const char* msg6 = "not support stddev/percentile in the outer query yet"; - const char* msg7 = "derivative/twa/rate/irate/diff/tail requires timestamp column exists in subquery"; + const char* msg7 = "derivative/twa/rate/irate/diff/tail/stateCount/stateDuration requires timestamp column exists in subquery"; const char* msg8 = "condition missing for join query"; const char* msg9 = "not support 3 level select"; @@ -10195,7 +10243,8 @@ int32_t validateSqlNode(SSqlObj* pSql, SSqlNode* pSqlNode, SQueryInfo* pQueryInf int32_t f = pExpr->base.functionId; if (f == TSDB_FUNC_DERIVATIVE || f == TSDB_FUNC_TWA || f == TSDB_FUNC_IRATE || - f == TSDB_FUNC_RATE || f == TSDB_FUNC_DIFF || f == TSDB_FUNC_TAIL) { + f == TSDB_FUNC_RATE || f == TSDB_FUNC_DIFF || f == TSDB_FUNC_TAIL || + f == TSDB_FUNC_STATE_COUNT || f == TSDB_FUNC_STATE_DURATION) { return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg7); } } diff --git a/src/client/src/tscServer.c b/src/client/src/tscServer.c index 2e4f32533f845c76ee0286cddc0dc45583d170c4..c821c25987042d0c26c4aa302a142544a08b943c 100644 --- a/src/client/src/tscServer.c +++ b/src/client/src/tscServer.c @@ -1184,7 +1184,7 @@ int tscBuildQueryMsg(SSqlObj *pSql, SSqlInfo *pInfo) { tlv->len = htonl(sizeof(int16_t) * 2); *(int16_t*)tlv->value = htons(pTableMeta->sversion); *(int16_t*)(tlv->value+sizeof(int16_t)) = htons(pTableMeta->tversion); - pMsg += sizeof(*tlv) + ntohl(tlv->len); + pMsg += sizeof(*tlv) + sizeof(int16_t) * 2; tlv = (STLV *)pMsg; tlv->type = htons(TLV_TYPE_END_MARK); diff --git a/src/client/src/tscUtil.c b/src/client/src/tscUtil.c index ce3487cf5b412f87f9bfe5d75f8ed2516b6a77c4..91e2d0c388d0b6dd50ca999d35be7712f8bb18dd 100644 --- a/src/client/src/tscUtil.c +++ b/src/client/src/tscUtil.c @@ -328,7 +328,8 @@ bool tscIsProjectionQuery(SQueryInfo* pQueryInfo) { if (f != TSDB_FUNC_PRJ && f != TSDB_FUNC_TAGPRJ && f != TSDB_FUNC_TAG && f != TSDB_FUNC_TS && f != TSDB_FUNC_SCALAR_EXPR && f != TSDB_FUNC_DIFF && - f != TSDB_FUNC_DERIVATIVE && !TSDB_FUNC_IS_SCALAR(f)) { + f != TSDB_FUNC_DERIVATIVE && !TSDB_FUNC_IS_SCALAR(f) && + f != TSDB_FUNC_STATE_COUNT && f != TSDB_FUNC_STATE_DURATION) { return false; } } @@ -347,8 +348,8 @@ bool tscIsDiffDerivLikeQuery(SQueryInfo* pQueryInfo) { continue; } - if (f == TSDB_FUNC_DIFF || f == TSDB_FUNC_DERIVATIVE || - f == TSDB_FUNC_CSUM || f == TSDB_FUNC_MAVG) { + if (f == TSDB_FUNC_DIFF || f == TSDB_FUNC_DERIVATIVE || f == TSDB_FUNC_CSUM || f == TSDB_FUNC_MAVG || + f == TSDB_FUNC_STATE_COUNT || f == TSDB_FUNC_STATE_DURATION) { return true; } } @@ -356,7 +357,6 @@ bool tscIsDiffDerivLikeQuery(SQueryInfo* pQueryInfo) { return false; } - bool tscHasColumnFilter(SQueryInfo* pQueryInfo) { // filter on primary timestamp column if (pQueryInfo->window.skey != INT64_MIN || pQueryInfo->window.ekey != INT64_MAX) { @@ -5094,7 +5094,6 @@ int32_t tscCreateQueryFromQueryInfo(SQueryInfo* pQueryInfo, SQueryAttr* pQueryAt pQueryAttr->pUdfInfo = pQueryInfo->pUdfInfo; pQueryAttr->range = pQueryInfo->range; - if (pQueryInfo->order.order == TSDB_ORDER_ASC) { // TODO refactor pQueryAttr->window = pQueryInfo->window; } else { diff --git a/src/common/src/texpr.c b/src/common/src/texpr.c index fc8872ea70d7bb4e1b0496bc678ed2a5bacf13c2..2774d9e2fabd11a3fcb45d650dc696518d504501 100644 --- a/src/common/src/texpr.c +++ b/src/common/src/texpr.c @@ -1364,7 +1364,7 @@ int32_t exprValidateTimeNode(char *msgbuf, tExprNode *pExpr) { } char fraction[32] = {0}; NUM_TO_STRING(child->resultType, &child->pVal->i64, sizeof(fraction), fraction); - int32_t tsDigits = strlen(fraction); + int32_t tsDigits = (int32_t)strlen(fraction); if (tsDigits > TSDB_TIME_PRECISION_SEC_DIGITS && tsDigits != TSDB_TIME_PRECISION_MILLI_DIGITS && tsDigits != TSDB_TIME_PRECISION_MICRO_DIGITS && @@ -1444,7 +1444,7 @@ int32_t exprValidateTimeNode(char *msgbuf, tExprNode *pExpr) { if (child0->pVal->nType == TSDB_DATA_TYPE_BIGINT) { char fraction[32] = {0}; NUM_TO_STRING(child0->resultType, &child0->pVal->i64, sizeof(fraction), fraction); - int32_t tsDigits = strlen(fraction); + int32_t tsDigits = (int32_t)strlen(fraction); if (tsDigits > TSDB_TIME_PRECISION_SEC_DIGITS && tsDigits != TSDB_TIME_PRECISION_MILLI_DIGITS && tsDigits != TSDB_TIME_PRECISION_MICRO_DIGITS && @@ -1525,7 +1525,7 @@ int32_t exprValidateTimeNode(char *msgbuf, tExprNode *pExpr) { if (child[i]->pVal->nType == TSDB_DATA_TYPE_BIGINT) { char fraction[32] = {0}; NUM_TO_STRING(child[i]->resultType, &child[i]->pVal->i64, sizeof(fraction), fraction); - int32_t tsDigits = strlen(fraction); + int32_t tsDigits = (int32_t)strlen(fraction); if (tsDigits > TSDB_TIME_PRECISION_SEC_DIGITS && tsDigits != TSDB_TIME_PRECISION_MILLI_DIGITS && tsDigits != TSDB_TIME_PRECISION_MICRO_DIGITS && @@ -2234,7 +2234,7 @@ void convertStringToTimestamp(int16_t type, char *inputData, int64_t timePrec, i if (type == TSDB_DATA_TYPE_BINARY) { newColData = calloc(1, charLen + 1); memcpy(newColData, varDataVal(inputData), charLen); - taosParseTime(newColData, timeVal, charLen, timePrec, 0); + taosParseTime(newColData, timeVal, charLen, (int32_t)timePrec, 0); tfree(newColData); } else if (type == TSDB_DATA_TYPE_NCHAR) { newColData = calloc(1, charLen / TSDB_NCHAR_SIZE + 1); @@ -2245,7 +2245,7 @@ void convertStringToTimestamp(int16_t type, char *inputData, int64_t timePrec, i return; } newColData[len] = 0; - taosParseTime(newColData, timeVal, len + 1, timePrec, 0); + taosParseTime(newColData, timeVal, len + 1, (int32_t)timePrec, 0); tfree(newColData); } else { uError("input type should be binary/nchar string"); @@ -2304,7 +2304,7 @@ void vectorTimeFunc(int16_t functionId, tExprOperandInfo *pInputs, int32_t numIn char fraction[20] = {0}; bool hasFraction = false; NUM_TO_STRING(pInputs[0].type, inputData[0], sizeof(fraction), fraction); - int32_t tsDigits = strlen(fraction); + int32_t tsDigits = (int32_t)strlen(fraction); char buf[64] = {0}; int64_t timeVal; @@ -2328,7 +2328,7 @@ void vectorTimeFunc(int16_t functionId, tExprOperandInfo *pInputs, int32_t numIn int32_t len = (int32_t)strlen(buf); if (hasFraction) { - int32_t fracLen = strlen(fraction) + 1; + int32_t fracLen = (int32_t)strlen(fraction) + 1; char *tzInfo = strchr(buf, '+'); if (tzInfo) { memmove(tzInfo + fracLen, tzInfo, strlen(tzInfo)); @@ -2399,7 +2399,7 @@ void vectorTimeFunc(int16_t functionId, tExprOperandInfo *pInputs, int32_t numIn char buf[20] = {0}; NUM_TO_STRING(TSDB_DATA_TYPE_BIGINT, &timeVal, sizeof(buf), buf); - int32_t tsDigits = strlen(buf); + int32_t tsDigits = (int32_t)strlen(buf); timeUnit = timeUnit * 1000 / factor; switch (timeUnit) { case 0: { /* 1u */ @@ -2572,7 +2572,7 @@ void vectorTimeFunc(int16_t functionId, tExprOperandInfo *pInputs, int32_t numIn } char buf[20] = {0}; NUM_TO_STRING(TSDB_DATA_TYPE_BIGINT, &timeVal[j], sizeof(buf), buf); - int32_t tsDigits = strlen(buf); + int32_t tsDigits = (int32_t)strlen(buf); if (tsDigits <= TSDB_TIME_PRECISION_SEC_DIGITS) { timeVal[j] = timeVal[j] * 1000000000; } else if (tsDigits == TSDB_TIME_PRECISION_MILLI_DIGITS) { diff --git a/src/connector/C#/examples/Main.cs b/src/connector/C#/examples/Main.cs index 9d2ab85a87a541fbd891cf318f454d5d8ba001fd..dbf29fc17675e9f18633ab2e997dce3138a33800 100644 --- a/src/connector/C#/examples/Main.cs +++ b/src/connector/C#/examples/Main.cs @@ -14,12 +14,15 @@ namespace AsyncQueryExample IntPtr conn = UtilsTools.TDConnection(); AsyncQuerySample asyncQuery = new AsyncQuerySample(); - asyncQuery.RunQueryAsync(conn,"query_async"); - - SubscribeSample subscribeSample = new SubscribeSample(); + asyncQuery.RunQueryAsync(conn, "query_async"); + + SubscribeSample subscribeSample = new SubscribeSample(); subscribeSample.RunSubscribeWithCallback(conn, "subscribe_with_callback"); subscribeSample.RunSubscribeWithoutCallback(conn, "subscribe_without_callback"); + StreamSample streamSample = new StreamSample(); + streamSample.RunStreamOption1(conn, "stream_sample_option1"); + UtilsTools.CloseConnection(conn); } } diff --git a/src/connector/C#/examples/StreamSample.cs b/src/connector/C#/examples/StreamSample.cs new file mode 100644 index 0000000000000000000000000000000000000000..e90a82c4e3679f41004fa8295783e72fdf6fe643 --- /dev/null +++ b/src/connector/C#/examples/StreamSample.cs @@ -0,0 +1,107 @@ +using System; +using TDengineDriver; +using Sample.UtilsTools; +using System.Runtime.InteropServices; +using System.Threading; +using System.Collections.Generic; +using System.Text; + +namespace Example +{ + public class StreamSample + { + + public void RunStreamOption1(IntPtr conn, string table) + { + + PrepareData(conn, table); + StreamOpenCallback streamOpenCallback = new StreamOpenCallback(StreamCallback); + IntPtr stream = TDengine.OpenStream(conn, $"select count(*) from {table} interval(1m) sliding(30s)", streamOpenCallback, 0, IntPtr.Zero, null); + if (stream == IntPtr.Zero) + { + throw new Exception("OPenStream failed"); + } + else + { + Thread.Sleep(100000); + AddNewData(conn, table, 5,true); + Thread.Sleep(100000); + + TDengine.CloseStream(stream); + Console.WriteLine("stream done"); + + } + } + + + public void StreamCallback(IntPtr param, IntPtr taosRes, IntPtr taosRow) + { + + if (taosRes == IntPtr.Zero || taosRow == IntPtr.Zero) + { + return; + } + else + { + var rowData = new List(); + rowData = UtilsTools.FetchRow(taosRow, taosRes); + int count = 0; + rowData.ForEach((item) => + { + + Console.Write("{0} \t|\t", item.ToString()); + count++; + if (count % rowData.Count == 0) + { + Console.WriteLine(""); + } + }); + } + } + + public void PrepareData(IntPtr conn, string tableName) + { + string createTable = $"create table if not exists {tableName} (ts timestamp,i8 tinyint,i16 smallint,i32 int,i64 bigint);"; + UtilsTools.ExecuteUpdate(conn, createTable); + AddNewData(conn, tableName, 5); + } + + public void AddNewData(IntPtr conn, string tableName, int numRows,bool interval = false) + { + long ts = 1646150410100; + Random rs = new Random(); + StringBuilder insert = new StringBuilder(); + + Random rd = new Random(); + for (int i = 0; i < numRows; i++) + { + insert.Append("insert into "); + insert.Append(tableName); + insert.Append(" values "); + insert.Append('('); + insert.Append(ts); + insert.Append(','); + insert.Append(rs.Next(sbyte.MinValue+1, sbyte.MaxValue)); + insert.Append(','); + insert.Append(rs.Next(short.MinValue+1, short.MaxValue)); + insert.Append(','); + insert.Append(rs.Next(int.MinValue+1, int.MaxValue)); + insert.Append(','); + insert.Append(rs.Next(int.MinValue+1, int.MaxValue)); + insert.Append(')'); + UtilsTools.ExecuteUpdate(conn, insert.ToString()); + insert.Clear(); + ts += rd.Next(10000, 100000); + if( interval) + { + Thread.Sleep(rs.Next(100,300) * i); + } + else + { + continue; + } + } + } + + } +} \ No newline at end of file diff --git a/src/connector/C#/src/test/FunctionTest/QueryAsync.cs b/src/connector/C#/src/test/FunctionTest/QueryAsync.cs new file mode 100644 index 0000000000000000000000000000000000000000..23546b51f59a930303aded982735e1d00e2405d1 --- /dev/null +++ b/src/connector/C#/src/test/FunctionTest/QueryAsync.cs @@ -0,0 +1,296 @@ +using TDengineDriver; +using Test.UtilsTools; +using System; +using System.Runtime.InteropServices; +using Xunit; +using System.Collections.Generic; +using Test.UtilsTools.DataSource; +using Test.UtilsTools.ResultSet; +using Xunit.Abstractions; +using Test.Fixture; +using Test.Case.Attributes; + +namespace Cases +{ + [TestCaseOrderer("XUnit.Case.Orderers.TestExeOrderer", "Cases.ExeOrder")] + [Collection("Database collection")] + + public class QueryAsyncCases + { + DatabaseFixture database; + + private readonly ITestOutputHelper output; + + public QueryAsyncCases(DatabaseFixture fixture, ITestOutputHelper output) + { + this.database = fixture; + this.output = output; + } + /// xiaolei + /// QueryAsyncCases.QueryAsyncCases + /// Test query without condition + /// QueryAsync.cs + /// pass or failed + [Fact(DisplayName = "QueryAsyncCases.QueryWithoutCondition()"),TestExeOrder(1),Trait("Category", "QueryAWithoutCondition")] + public void QueryWithoutCondition() + { + IntPtr conn = database.conn; + IntPtr _res = IntPtr.Zero; + + var tableName = "query_a_without_condition"; + var createSql = $"create table if not exists {tableName}(ts timestamp,bl bool,i8 tinyint,i16 smallint,i32 int,i64 bigint,bnr binary(50),nchr nchar(50))tags(t_i32 int,t_bnr binary(50),t_nchr nchar(50))"; + var dropSql = $"drop table if exists {tableName}"; + + var colData = new List{1646150410100,true,1,11,1111,11111111,"value one","值壹", + 1646150410200,true,2,22,2222,22222222,"value two","值贰", + 1646150410300,false,3,33,3333,33333333,"value three","值三", + }; + var tagData = new List { 1, "tag_one", "标签壹" }; + String insertSql = UtilsTools.ConstructInsertSql(tableName + "_s01", tableName, colData, tagData, 3); + List expectResMeta = DataSource.GetMetaFromDLL(createSql); + List expectResData = UtilsTools.CombineColAndTagData(colData, tagData, 3); + + var querySql = $"select * from {tableName}"; + UtilsTools.ExecuteUpdate(conn, dropSql); + UtilsTools.ExecuteUpdate(conn, createSql); + UtilsTools.ExecuteUpdate(conn, insertSql); + + QueryAsyncCallback fq = new QueryAsyncCallback(QueryCallback); + TDengine.QueryAsync(conn, querySql, fq, IntPtr.Zero); + + void QueryCallback(IntPtr param, IntPtr taosRes, int code) + { + if (code == 0 && taosRes != IntPtr.Zero) + { + FetchRowAsyncCallback fetchRowAsyncCallback = new FetchRowAsyncCallback(FetchCallback); + TDengine.FetchRowAsync(taosRes, fetchRowAsyncCallback, param); + } + else + { + Console.WriteLine($"async query data failed, failed code {code}"); + } + + } + + void FetchCallback(IntPtr param, IntPtr taosRes, int numOfRows) + { + if (numOfRows > 0) + { + ResultSet actualResult = new ResultSet(taosRes); + List actualMeta = actualResult.GetResultMeta(); + List actualResData = actualResult.GetResultData(); + //Assert Meta data + for (int i = 0; i < actualMeta.Count; i++) + { + Assert.Equal(expectResMeta[i].name, actualMeta[i].name); + Assert.Equal(expectResMeta[i].type, actualMeta[i].type); + Assert.Equal(expectResMeta[i].size, actualMeta[i].size); + } + // Assert retrieve data + for (int i = 0; i < actualResData.Count; i++) + { + // Console.WriteLine("{0},{1},{2}", i, expectResData[i], actualResData[i]); + Assert.Equal(expectResData[i].ToString(), actualResData[i]); + } + + TDengine.FetchRowAsync(taosRes, FetchCallback, param); + } + else + { + if (numOfRows == 0) + { + Console.WriteLine("async retrieve complete."); + + } + else + { + Console.WriteLine($"FetchRowAsync callback error, error code {numOfRows}"); + } + TDengine.FreeResult(taosRes); + } + } + } + + /// xiaolei + /// QueryAsyncCases.QueryWithCondition + /// Test query with condition + /// QueryAsync.cs + /// pass or failed + [Fact(DisplayName = "QueryAsyncCases.QueryWithCondition()"),TestExeOrder(2),Trait("Category", "QueryAWithCondition")] + public void QueryWithCondition() + { + IntPtr conn = database.conn; + IntPtr _res = IntPtr.Zero; + + var tableName = "query_a_with_condition"; + var createSql = $"create table if not exists {tableName}(ts timestamp,bl bool,i8 tinyint,i16 smallint,i32 int,i64 bigint,bnr binary(50),nchr nchar(50))tags(t_i32 int,t_bnr binary(50),t_nchr nchar(50))"; + var dropSql = $"drop table if exists {tableName}"; + + var colData = new List{1646150410100,true,1,11,1111,11111111,"value one","值壹", + 1646150410200,true,2,22,2222,22222222,"value two","值贰", + 1646150410300,false,3,33,3333,33333333,"value three","值三", + }; + var colDataActual = colData.GetRange(8, 8); + var tagData = new List { 1, "tag_one", "标签壹" }; + String insertSql = UtilsTools.ConstructInsertSql(tableName + "_s01", tableName, colData, tagData, 3); + List expectResMeta = DataSource.GetMetaFromDLL(createSql); + List expectResData = UtilsTools.CombineColAndTagData(colDataActual, tagData, 1); + colDataActual.ForEach((item) => { Console.Write("{0}\t", item); }); + + var querySql = $"select * from {tableName} where bl=true and t_bnr='tag_one' and i8>1 and t_nchr = '标签壹'"; + UtilsTools.ExecuteUpdate(conn, dropSql); + UtilsTools.ExecuteUpdate(conn, createSql); + UtilsTools.ExecuteUpdate(conn, insertSql); + QueryAsyncCallback fq = new QueryAsyncCallback(QueryCallback); + TDengine.QueryAsync(conn, querySql, fq, IntPtr.Zero); + + void QueryCallback(IntPtr param, IntPtr taosRes, int code) + { + if (code == 0 && taosRes != IntPtr.Zero) + { + FetchRowAsyncCallback fetchRowAsyncCallback = new FetchRowAsyncCallback(FetchCallback); + TDengine.FetchRowAsync(taosRes, fetchRowAsyncCallback, param); + } + else + { + Console.WriteLine($"async query data failed, failed code {code}"); + } + + } + + void FetchCallback(IntPtr param, IntPtr taosRes, int numOfRows) + { + if (numOfRows > 0) + { + ResultSet actualResult = new ResultSet(taosRes); + List actualMeta = actualResult.GetResultMeta(); + List actualResData = actualResult.GetResultData(); + //Assert Meta data + for (int i = 0; i < actualMeta.Count; i++) + { + Assert.Equal(expectResMeta[i].name, actualMeta[i].name); + Assert.Equal(expectResMeta[i].type, actualMeta[i].type); + Assert.Equal(expectResMeta[i].size, actualMeta[i].size); + } + // Assert retrieve data + for (int i = 0; i < actualResData.Count; i++) + { + // Console.WriteLine("{0},{1},{2}", i, expectResData[i], actualResData[i]); + Assert.Equal(expectResData[i].ToString(), actualResData[i]); + } + + TDengine.FetchRowAsync(taosRes, FetchCallback, param); + } + else + { + if (numOfRows == 0) + { + Console.WriteLine("async retrieve complete."); + + } + else + { + Console.WriteLine($"FetchRowAsync callback error, error code {numOfRows}"); + } + TDengine.FreeResult(taosRes); + } + } + + } + + /// xiaolei + /// QueryAsyncCases.QueryWithJsonCondition + /// Test query with condition + /// QueryAsync.cs + /// pass or failed + [Fact(DisplayName = "QueryAsyncCases.QueryWithJsonCondition()"),TestExeOrder(3),Trait("Category", "QueryAWithJsonCondition")] + public void QueryWithJsonCondition() + { + IntPtr conn = database.conn; + IntPtr _res = IntPtr.Zero; + + var tableName = "query_a_json_condition"; + var createSql = $"create table if not exists {tableName}(ts timestamp,bl bool,i8 tinyint,i16 smallint,i32 int,i64 bigint,bnr binary(50),nchr nchar(50))tags(jtag json)"; + var dropSql = $"drop table if exists {tableName}"; + + var colData1 = new List{1646150410100,true,1,11,1111,11111111,"value one","值壹", + 1646150410200,true,2,22,2222,22222222,"value two","值贰", + 1646150410300,false,3,33,3333,33333333,"value three","值三", + }; + var colData2 = new List{1646150410400,false,4,44,4444,44444444,"value three","值肆", + 1646150410500,true,5,55,5555,55555555,"value one","值伍", + 1646150410600,true,6,66,6666,66666666,"value two","值陆", + }; + var tagData1 = new List { "{\"t_bnr\":\"tag1\",\"t_i32\":1,\"t_nchr\":\"标签壹\"}" }; + var tagData2 = new List { "{\"t_bnr\":\"tag2\",\"t_i32\":2,\"t_nchar\":\"标签贰\"}" }; + var querySql = $"select * from {tableName} where jtag->'t_bnr'='tag1';"; + + + String insertSql1 = UtilsTools.ConstructInsertSql(tableName + "_s01", tableName, colData1, tagData1, 3); + String insertSql2 = UtilsTools.ConstructInsertSql(tableName + "_s02", tableName, colData1, tagData2, 3); + List expectResMeta = DataSource.GetMetaFromDLL(createSql); + List expectResData = UtilsTools.CombineColAndTagData(colData1, tagData1, 3); + + UtilsTools.ExecuteUpdate(conn, dropSql); + UtilsTools.ExecuteUpdate(conn, createSql); + UtilsTools.ExecuteUpdate(conn, insertSql1); + UtilsTools.ExecuteUpdate(conn, insertSql2); + QueryAsyncCallback fq = new QueryAsyncCallback(QueryCallback); + TDengine.QueryAsync(conn, querySql, fq, IntPtr.Zero); + + void QueryCallback(IntPtr param, IntPtr taosRes, int code) + { + if (code == 0 && taosRes != IntPtr.Zero) + { + FetchRowAsyncCallback fetchRowAsyncCallback = new FetchRowAsyncCallback(FetchCallback); + TDengine.FetchRowAsync(taosRes, fetchRowAsyncCallback, param); + } + else + { + Console.WriteLine($"async query data failed, failed code {code}"); + } + + } + + void FetchCallback(IntPtr param, IntPtr taosRes, int numOfRows) + { + if (numOfRows > 0) + { + ResultSet actualResult = new ResultSet(taosRes); + List actualMeta = actualResult.GetResultMeta(); + List actualResData = actualResult.GetResultData(); + //Assert Meta data + for (int i = 0; i < actualMeta.Count; i++) + { + Assert.Equal(expectResMeta[i].name, actualMeta[i].name); + Assert.Equal(expectResMeta[i].type, actualMeta[i].type); + Assert.Equal(expectResMeta[i].size, actualMeta[i].size); + } + // Assert retrieve data + for (int i = 0; i < actualResData.Count; i++) + { + // Console.WriteLine("{0},{1},{2}", i, expectResData[i], actualResData[i]); + Assert.Equal(expectResData[i].ToString(), actualResData[i]); + } + + TDengine.FetchRowAsync(taosRes, FetchCallback, param); + } + else + { + if (numOfRows == 0) + { + Console.WriteLine("async retrieve complete."); + + } + else + { + Console.WriteLine($"FetchRowAsync callback error, error code {numOfRows}"); + } + TDengine.FreeResult(taosRes); + } + } + + + } + } +} diff --git a/src/connector/C#/src/test/FunctionTest/lib/DBFixture.cs b/src/connector/C#/src/test/FunctionTest/lib/DBFixture.cs new file mode 100644 index 0000000000000000000000000000000000000000..83492536fe7d3ce9eb012282db2cd4979b6b03f0 --- /dev/null +++ b/src/connector/C#/src/test/FunctionTest/lib/DBFixture.cs @@ -0,0 +1,79 @@ +using System; +using System.Configuration; +using System.Data.SqlClient; +using System.Runtime.InteropServices; +using TDengineDriver; + +namespace Test.Fixture +{ + public class DatabaseFixture : IDisposable + { + public IntPtr conn { get; set; } + + private string user = "root"; + private string password = "taosdata"; + private string ip = "127.0.0.1"; + private short port = 0; + + private string db = "xunit_test_fixture"; + public DatabaseFixture() + { + conn = TDengine.Connect(ip, user, password, "", port); + IntPtr res; + if (conn != IntPtr.Zero) + { + if ((res = TDengine.Query(conn, $"create database if not exists {db} keep 3650")) != IntPtr.Zero) + { + if ((res = TDengine.Query(conn, $"use {db}")) != IntPtr.Zero) + { + Console.WriteLine("Get connection success"); + } + else + { + throw new Exception(TDengine.Error(res)); + } + } + else + { + throw new Exception(TDengine.Error(res)); + } + } + else + { + throw new Exception("Get TDConnection failed"); + } + } + + // public IntPtr TDConnection { get; } + + public void Dispose() + { + // IntPtr res; + // if (conn != IntPtr.Zero) + // { + // if ((res = TDengine.Query(conn, $"drop database if exists {db}")) != IntPtr.Zero) + // { + // if (TDengine.Close(conn) == 0) + // { + // Console.WriteLine("close connection success"); + // } + // else + // { + // throw new Exception("close connection failed"); + // } + + // } + // else + // { + // throw new Exception(TDengine.Error(res)); + // } + // } + // else + // { + // throw new Exception("connection if already null"); + // } + + } + + } +} diff --git a/src/connector/C#/src/test/FunctionTest/lib/DatabaseCollection.cs b/src/connector/C#/src/test/FunctionTest/lib/DatabaseCollection.cs new file mode 100644 index 0000000000000000000000000000000000000000..11651f99b9fab5ea7c1f86bad4a14e7bea590378 --- /dev/null +++ b/src/connector/C#/src/test/FunctionTest/lib/DatabaseCollection.cs @@ -0,0 +1,9 @@ +using Xunit; +using Test.Fixture; +[CollectionDefinition("Database collection")] +public class DatabaseCollection : ICollectionFixture +{ + // This class has no code, and is never created. Its purpose is simply + // to be the place to apply [CollectionDefinition] and all the + // ICollectionFixture<> interfaces. +} \ No newline at end of file diff --git a/src/connector/C#/src/test/FunctionTest/lib/TestExeOrder.cs b/src/connector/C#/src/test/FunctionTest/lib/TestExeOrder.cs new file mode 100644 index 0000000000000000000000000000000000000000..8fec01c0bae05c1be047040c883d75b0b5091229 --- /dev/null +++ b/src/connector/C#/src/test/FunctionTest/lib/TestExeOrder.cs @@ -0,0 +1,12 @@ +using System; + +namespace Test.Case.Attributes +{ + [AttributeUsage(AttributeTargets.Method, AllowMultiple = false)] + public class TestExeOrderAttribute : Attribute + { + public int ExeOrder { get; private set; } + + public TestExeOrderAttribute(int exeOrder) => ExeOrder = exeOrder; + } +} \ No newline at end of file diff --git a/src/connector/C#/src/test/FunctionTest/lib/TestExeOrderer.cs b/src/connector/C#/src/test/FunctionTest/lib/TestExeOrderer.cs new file mode 100644 index 0000000000000000000000000000000000000000..798244941a3b5df17fc35647c9d6e8d6d4ea17ae --- /dev/null +++ b/src/connector/C#/src/test/FunctionTest/lib/TestExeOrderer.cs @@ -0,0 +1,43 @@ +using System.Collections.Generic; +using System.Linq; +using Xunit.Abstractions; +using Xunit.Sdk; +using Test.Case.Attributes; + +namespace XUnit.Case.Orderers +{ + public class TestExeOrderer : ITestCaseOrderer + { + public IEnumerable OrderTestCases( + IEnumerable testCases) where TTestCase : ITestCase + { + string assemblyName = typeof(TestExeOrderAttribute).AssemblyQualifiedName!; + var sortedMethods = new SortedDictionary>(); + foreach (TTestCase testCase in testCases) + { + int exeOrder = testCase.TestMethod.Method + .GetCustomAttributes(assemblyName) + .FirstOrDefault() + ?.GetNamedArgument(nameof(TestExeOrderAttribute.ExeOrder)) ?? 0; + + GetOrCreate(sortedMethods, exeOrder).Add(testCase); + } + + foreach (TTestCase testCase in + sortedMethods.Keys.SelectMany( + exeOrder => sortedMethods[exeOrder].OrderBy( + testCase => testCase.TestMethod.Method.Name))) + { + yield return testCase; + } + } + + private static TValue GetOrCreate( + IDictionary dictionary, TKey key) + where TKey : struct + where TValue : new() => + dictionary.TryGetValue(key, out TValue? result) + ? result + : (dictionary[key] = new TValue()); + } +} \ No newline at end of file diff --git a/src/connector/C#/src/test/FunctionTest/lib/Utils.cs b/src/connector/C#/src/test/FunctionTest/lib/Utils.cs index c2bba9298d022783c5d3d40643caa7eb1f73751c..3de6c609f8a8e61c1ceaa7b5ef47b14a6608f6aa 100644 --- a/src/connector/C#/src/test/FunctionTest/lib/Utils.cs +++ b/src/connector/C#/src/test/FunctionTest/lib/Utils.cs @@ -3,6 +3,7 @@ using TDengineDriver; using System.Runtime.InteropServices; using System.Text; using System.Collections.Generic; +using Xunit.Abstractions; namespace Test.UtilsTools { public class UtilsTools @@ -28,20 +29,20 @@ namespace Test.UtilsTools //get taos.cfg file based on different os public static string GetConfigPath() { - string configDir = "" ; - if(OperatingSystem.IsOSPlatform("Windows")) - { - configDir = "C:/TDengine/cfg"; - } - else if(OperatingSystem.IsOSPlatform("Linux")) - { - configDir = "/etc/taos"; - } - else if(OperatingSystem.IsOSPlatform("macOS")) - { - configDir = "/etc/taos"; - } - return configDir; + string configDir = ""; + if (OperatingSystem.IsOSPlatform("Windows")) + { + configDir = "C:/TDengine/cfg"; + } + else if (OperatingSystem.IsOSPlatform("Linux")) + { + configDir = "/etc/taos"; + } + else if (OperatingSystem.IsOSPlatform("macOS")) + { + configDir = "/usr/local/etc/taos"; + } + return configDir; } public static IntPtr ExecuteQuery(IntPtr conn, String sql) @@ -102,23 +103,16 @@ namespace Test.UtilsTools int fieldCount = metas.Count; IntPtr rowdata; - // StringBuilder builder = new StringBuilder(); List datas = QueryRes(res, metas); - Console.Write(" DisplayRes ---"); for (int i = 0; i < metas.Count; i++) { for (int j = 0; j < datas.Count; j++) { - Console.Write(" {0} ---", datas[i * j + i]); + Console.Write(" {0} \t|", datas[j]); } Console.WriteLine(""); } - // if (TDengine.ErrorNo(res) != 0) - // { - // Console.Write("Query is not complete, Error {0:G}", TDengine.ErrorNo(res), TDengine.Error(res)); - // } - // TDengine.FreeResult(res); Console.WriteLine(""); } public static List> GetResultSet(IntPtr res) @@ -197,14 +191,8 @@ namespace Test.UtilsTools } public static List GetResData(IntPtr res) { - List colName = new List(); - List dataRaw = new List(); - if (!IsValidResult(res)) - { - ExitProgram(); - } - List metas = GetResField(res); - dataRaw = QueryRes(res, metas); + List dataRaw = GetResDataWithoutFree(res); + FreeResult(res); return dataRaw; } @@ -288,107 +276,23 @@ namespace Test.UtilsTools private static List QueryRes(IntPtr res, List metas) { - IntPtr rowdata; - long queryRows = 0; + IntPtr taosRow; List dataRaw = new List(); int fieldCount = metas.Count; - while ((rowdata = TDengine.FetchRows(res)) != IntPtr.Zero) + while ((taosRow = TDengine.FetchRows(res)) != IntPtr.Zero) { - queryRows++; - IntPtr colLengthPtr = TDengine.FetchLengths(res); - int[] colLengthArr = new int[fieldCount]; - Marshal.Copy(colLengthPtr, colLengthArr, 0, fieldCount); - - for (int fields = 0; fields < fieldCount; ++fields) - { - TDengineMeta meta = metas[fields]; - int offset = IntPtr.Size * fields; - IntPtr data = Marshal.ReadIntPtr(rowdata, offset); - - if (data == IntPtr.Zero) - { - dataRaw.Add("NULL"); - continue; - } - - switch ((TDengineDataType)meta.type) - { - case TDengineDataType.TSDB_DATA_TYPE_BOOL: - bool v1 = Marshal.ReadByte(data) == 0 ? false : true; - dataRaw.Add(v1.ToString()); - break; - case TDengineDataType.TSDB_DATA_TYPE_TINYINT: - sbyte v2 = (sbyte)Marshal.ReadByte(data); - dataRaw.Add(v2.ToString()); - break; - case TDengineDataType.TSDB_DATA_TYPE_SMALLINT: - short v3 = Marshal.ReadInt16(data); - dataRaw.Add(v3.ToString()); - break; - case TDengineDataType.TSDB_DATA_TYPE_INT: - int v4 = Marshal.ReadInt32(data); - dataRaw.Add(v4.ToString()); - break; - case TDengineDataType.TSDB_DATA_TYPE_BIGINT: - long v5 = Marshal.ReadInt64(data); - dataRaw.Add(v5.ToString()); - break; - case TDengineDataType.TSDB_DATA_TYPE_FLOAT: - float v6 = (float)Marshal.PtrToStructure(data, typeof(float)); - dataRaw.Add(v6.ToString()); - break; - case TDengineDataType.TSDB_DATA_TYPE_DOUBLE: - double v7 = (double)Marshal.PtrToStructure(data, typeof(double)); - dataRaw.Add(v7.ToString()); - break; - case TDengineDataType.TSDB_DATA_TYPE_BINARY: - // string v8 = Marshal.PtrToStringAnsi(data, colLengthArr[fields]); - string v8 = Marshal.PtrToStringUTF8(data, colLengthArr[fields]); - dataRaw.Add(v8); - break; - case TDengineDataType.TSDB_DATA_TYPE_TIMESTAMP: - long v9 = Marshal.ReadInt64(data); - dataRaw.Add(v9.ToString()); - break; - case TDengineDataType.TSDB_DATA_TYPE_NCHAR: - // string v10 = Marshal.PtrToStringAnsi(data, colLengthArr[fields]); - string v10 = Marshal.PtrToStringUTF8(data, colLengthArr[fields]); - dataRaw.Add(v10); - break; - case TDengineDataType.TSDB_DATA_TYPE_UTINYINT: - byte v12 = Marshal.ReadByte(data); - dataRaw.Add(v12.ToString()); - break; - case TDengineDataType.TSDB_DATA_TYPE_USMALLINT: - ushort v13 = (ushort)Marshal.ReadInt16(data); - dataRaw.Add(v13.ToString()); - break; - case TDengineDataType.TSDB_DATA_TYPE_UINT: - uint v14 = (uint)Marshal.ReadInt32(data); - dataRaw.Add(v14.ToString()); - break; - case TDengineDataType.TSDB_DATA_TYPE_UBIGINT: - ulong v15 = (ulong)Marshal.ReadInt64(data); - dataRaw.Add(v15.ToString()); - break; - default: - dataRaw.Add("unknown value"); - break; - } - } - + dataRaw.AddRange(FetchRow(taosRow, res)); } if (TDengine.ErrorNo(res) != 0) { Console.Write("Query is not complete, Error {0:G}", TDengine.ErrorNo(res), TDengine.Error(res)); } - TDengine.FreeResult(res); Console.WriteLine(""); return dataRaw; } // Generate insert sql for the with the coldata and tag data - public static string ConstructInsertSql(string table,string stable,List colData,List tagData,int numOfRows) + public static string ConstructInsertSql(string table, string stable, List colData, List tagData, int numOfRows) { int numofFileds = colData.Count / numOfRows; StringBuilder insertSql; @@ -453,8 +357,8 @@ namespace Test.UtilsTools return insertSql.ToString(); } - - public static List CombineColAndTagData(List colData,List tagData, int numOfRows) + + public static List CombineColAndTagData(List colData, List tagData, int numOfRows) { var list = new List(); for (int i = 0; i < colData.Count; i++) @@ -470,6 +374,137 @@ namespace Test.UtilsTools } return list; } + + /// + /// Using this method to free TAOS_RES,otherwise will lead memory + /// leak.Notice do not call this method while subscribe/consume until + /// end of the program. + /// + /// TAOS_RES, the resultset usually is return by taos_query() + public static void FreeResult(IntPtr res) + { + TDengine.FreeResult(res); + } + + + /// + /// Using to parse TAOS_ROW. + /// + /// This is TAOS_RES pointer + /// This is TAOS_ROW pointer + /// + public static List FetchRow(IntPtr taosRow, IntPtr taosRes) + { + List metaList = TDengine.FetchFields(taosRes); + int numOfFiled = TDengine.FieldCount(taosRes); + + List dataRaw = new List(); + + IntPtr colLengthPrt = TDengine.FetchLengths(taosRes); + int[] colLengthArr = new int[numOfFiled]; + Marshal.Copy(colLengthPrt, colLengthArr, 0, numOfFiled); + + for (int i = 0; i < numOfFiled; i++) + { + TDengineMeta meta = metaList[i]; + IntPtr data = Marshal.ReadIntPtr(taosRow, IntPtr.Size * i); + + if (data == IntPtr.Zero) + { + dataRaw.Add("NULL"); + continue; + } + switch ((TDengineDataType)meta.type) + { + case TDengineDataType.TSDB_DATA_TYPE_BOOL: + bool v1 = Marshal.ReadByte(data) == 0 ? false : true; + dataRaw.Add(v1.ToString()); + break; + case TDengineDataType.TSDB_DATA_TYPE_TINYINT: + sbyte v2 = (sbyte)Marshal.ReadByte(data); + dataRaw.Add(v2.ToString()); + break; + case TDengineDataType.TSDB_DATA_TYPE_SMALLINT: + short v3 = Marshal.ReadInt16(data); + dataRaw.Add(v3.ToString()); + break; + case TDengineDataType.TSDB_DATA_TYPE_INT: + int v4 = Marshal.ReadInt32(data); + dataRaw.Add(v4.ToString()); + break; + case TDengineDataType.TSDB_DATA_TYPE_BIGINT: + long v5 = Marshal.ReadInt64(data); + dataRaw.Add(v5.ToString()); + break; + case TDengineDataType.TSDB_DATA_TYPE_FLOAT: + float v6 = (float)Marshal.PtrToStructure(data, typeof(float)); + dataRaw.Add(v6.ToString()); + break; + case TDengineDataType.TSDB_DATA_TYPE_DOUBLE: + double v7 = (double)Marshal.PtrToStructure(data, typeof(double)); + dataRaw.Add(v7.ToString()); + break; + case TDengineDataType.TSDB_DATA_TYPE_BINARY: + string v8 = Marshal.PtrToStringUTF8(data, colLengthArr[i]); + dataRaw.Add(v8); + break; + case TDengineDataType.TSDB_DATA_TYPE_TIMESTAMP: + long v9 = Marshal.ReadInt64(data); + dataRaw.Add(v9.ToString()); + break; + case TDengineDataType.TSDB_DATA_TYPE_NCHAR: + string v10 = Marshal.PtrToStringUTF8(data, colLengthArr[i]); + dataRaw.Add(v10); + break; + case TDengineDataType.TSDB_DATA_TYPE_UTINYINT: + byte v12 = Marshal.ReadByte(data); + dataRaw.Add(v12.ToString()); + break; + case TDengineDataType.TSDB_DATA_TYPE_USMALLINT: + ushort v13 = (ushort)Marshal.ReadInt16(data); + dataRaw.Add(v13.ToString()); + break; + case TDengineDataType.TSDB_DATA_TYPE_UINT: + uint v14 = (uint)Marshal.ReadInt32(data); + dataRaw.Add(v14.ToString()); + break; + case TDengineDataType.TSDB_DATA_TYPE_UBIGINT: + ulong v15 = (ulong)Marshal.ReadInt64(data); + dataRaw.Add(v15.ToString()); + break; + case TDengineDataType.TSDB_DATA_TYPE_JSONTAG: + string v16 = Marshal.PtrToStringUTF8(data, colLengthArr[i]); + dataRaw.Add(v16); + break; + default: + dataRaw.Add("nonsupport data type value"); + break; + } + + } + return dataRaw; + } + + /// + /// Get the result data from TAO_RES but this interface will + /// not free the TAO_RES at the end. Remember to free the TAOS_RES + /// when you need to do so. + /// + /// This is a TAOS_RES pointer. + /// + public static List GetResDataWithoutFree(IntPtr res) + { + List colName = new List(); + List dataRaw = new List(); + if (!IsValidResult(res)) + { + ExitProgram(); + } + List metas = GetResField(res); + dataRaw = QueryRes(res, metas); + return dataRaw; + } } + } diff --git a/src/kit/shell/src/shellEngine.c b/src/kit/shell/src/shellEngine.c index e74d81cc59ad58e28eefb94008ea1fd2411625f2..53101483099f40c1ce0cbbb1a94f20b46ca391af 100644 --- a/src/kit/shell/src/shellEngine.c +++ b/src/kit/shell/src/shellEngine.c @@ -565,6 +565,9 @@ static void shellPrintNChar(const char *str, int length, int width) { if (bytes <= 0) { break; } + if (pos + bytes > length) { + break; + } int w = 0; #ifdef WINDOWS w = bytes; @@ -575,15 +578,11 @@ static void shellPrintNChar(const char *str, int length, int width) { w = wcwidth(wc); } #endif + pos += bytes; if (w <= 0) { continue; } - pos += bytes; - if (pos > length) { - break; - } - if (width <= 0) { printf("%lc", wc); continue; diff --git a/src/kit/taos-tools b/src/kit/taos-tools index 14fb199c68ed7d32d616cd9be231b332ceae9911..f36b07f710d661dca88fdd70e73b5e3e16a960e0 160000 --- a/src/kit/taos-tools +++ b/src/kit/taos-tools @@ -1 +1 @@ -Subproject commit 14fb199c68ed7d32d616cd9be231b332ceae9911 +Subproject commit f36b07f710d661dca88fdd70e73b5e3e16a960e0 diff --git a/src/query/inc/qAggMain.h b/src/query/inc/qAggMain.h index 3eb542cb4cffe3e37e49a858666061df7eb131b4..0b87c546d570e8bf4dfc723ae9dc380442009280 100644 --- a/src/query/inc/qAggMain.h +++ b/src/query/inc/qAggMain.h @@ -30,62 +30,64 @@ extern "C" { #include "tsdb.h" #include "qUdf.h" -#define TSDB_FUNC_INVALID_ID -1 -#define TSDB_FUNC_COUNT 0 -#define TSDB_FUNC_SUM 1 -#define TSDB_FUNC_AVG 2 -#define TSDB_FUNC_MIN 3 -#define TSDB_FUNC_MAX 4 -#define TSDB_FUNC_STDDEV 5 -#define TSDB_FUNC_PERCT 6 -#define TSDB_FUNC_APERCT 7 -#define TSDB_FUNC_FIRST 8 -#define TSDB_FUNC_LAST 9 -#define TSDB_FUNC_LAST_ROW 10 -#define TSDB_FUNC_TOP 11 -#define TSDB_FUNC_BOTTOM 12 -#define TSDB_FUNC_SPREAD 13 -#define TSDB_FUNC_TWA 14 -#define TSDB_FUNC_LEASTSQR 15 - -#define TSDB_FUNC_TS 16 -#define TSDB_FUNC_TS_DUMMY 17 -#define TSDB_FUNC_TAG_DUMMY 18 -#define TSDB_FUNC_TS_COMP 19 - -#define TSDB_FUNC_TAG 20 -#define TSDB_FUNC_PRJ 21 - -#define TSDB_FUNC_TAGPRJ 22 -#define TSDB_FUNC_SCALAR_EXPR 23 -#define TSDB_FUNC_DIFF 24 - -#define TSDB_FUNC_FIRST_DST 25 -#define TSDB_FUNC_LAST_DST 26 -#define TSDB_FUNC_STDDEV_DST 27 -#define TSDB_FUNC_INTERP 28 - -#define TSDB_FUNC_RATE 29 -#define TSDB_FUNC_IRATE 30 -#define TSDB_FUNC_TID_TAG 31 -#define TSDB_FUNC_DERIVATIVE 32 - -#define TSDB_FUNC_CSUM 33 -#define TSDB_FUNC_MAVG 34 -#define TSDB_FUNC_SAMPLE 35 - -#define TSDB_FUNC_BLKINFO 36 - -#define TSDB_FUNC_ELAPSED 37 -#define TSDB_FUNC_HISTOGRAM 38 -#define TSDB_FUNC_UNIQUE 39 -#define TSDB_FUNC_MODE 40 -#define TSDB_FUNC_TAIL 41 -#define TSDB_FUNC_WSTART 42 -#define TSDB_FUNC_WSTOP 43 -#define TSDB_FUNC_WDURATION 44 - -#define TSDB_FUNC_MAX_NUM 45 +#define TSDB_FUNC_INVALID_ID -1 +#define TSDB_FUNC_COUNT 0 +#define TSDB_FUNC_SUM 1 +#define TSDB_FUNC_AVG 2 +#define TSDB_FUNC_MIN 3 +#define TSDB_FUNC_MAX 4 +#define TSDB_FUNC_STDDEV 5 +#define TSDB_FUNC_PERCT 6 +#define TSDB_FUNC_APERCT 7 +#define TSDB_FUNC_FIRST 8 +#define TSDB_FUNC_LAST 9 +#define TSDB_FUNC_LAST_ROW 10 +#define TSDB_FUNC_TOP 11 +#define TSDB_FUNC_BOTTOM 12 +#define TSDB_FUNC_SPREAD 13 +#define TSDB_FUNC_TWA 14 +#define TSDB_FUNC_LEASTSQR 15 + +#define TSDB_FUNC_TS 16 +#define TSDB_FUNC_TS_DUMMY 17 +#define TSDB_FUNC_TAG_DUMMY 18 +#define TSDB_FUNC_TS_COMP 19 + +#define TSDB_FUNC_TAG 20 +#define TSDB_FUNC_PRJ 21 + +#define TSDB_FUNC_TAGPRJ 22 +#define TSDB_FUNC_SCALAR_EXPR 23 +#define TSDB_FUNC_DIFF 24 + +#define TSDB_FUNC_FIRST_DST 25 +#define TSDB_FUNC_LAST_DST 26 +#define TSDB_FUNC_STDDEV_DST 27 +#define TSDB_FUNC_INTERP 28 + +#define TSDB_FUNC_RATE 29 +#define TSDB_FUNC_IRATE 30 +#define TSDB_FUNC_TID_TAG 31 +#define TSDB_FUNC_DERIVATIVE 32 + +#define TSDB_FUNC_CSUM 33 +#define TSDB_FUNC_MAVG 34 +#define TSDB_FUNC_SAMPLE 35 + +#define TSDB_FUNC_BLKINFO 36 + +#define TSDB_FUNC_ELAPSED 37 +#define TSDB_FUNC_HISTOGRAM 38 +#define TSDB_FUNC_UNIQUE 39 +#define TSDB_FUNC_MODE 40 +#define TSDB_FUNC_TAIL 41 +#define TSDB_FUNC_STATE_COUNT 42 +#define TSDB_FUNC_STATE_DURATION 43 +#define TSDB_FUNC_WSTART 44 +#define TSDB_FUNC_WSTOP 45 +#define TSDB_FUNC_WDURATION 46 + +#define TSDB_FUNC_MAX_NUM 47 #define TSDB_FUNCSTATE_SO 0x1u // single output #define TSDB_FUNCSTATE_MO 0x2u // dynamic number of output, not multinumber of output e.g., TOP/BOTTOM @@ -231,9 +233,11 @@ typedef struct SAggFunctionInfo { int32_t getResultDataInfo(int32_t dataType, int32_t dataBytes, int32_t functionId, int32_t param, int16_t *type, int32_t *len, int32_t *interBytes, int16_t extLength, bool isSuperTable, SUdfInfo* pUdfInfo); +int16_t getTimeWindowFunctionID(int16_t colIndex); + int32_t isValidFunction(const char* name, int32_t len); +bool isValidStateOper(char *oper, int32_t len); -int16_t getTimeWindowFunctionID(int16_t colIndex); #define IS_STREAM_QUERY_VALID(x) (((x)&TSDB_FUNCSTATE_STREAM) != 0) #define IS_MULTIOUTPUT(x) (((x)&TSDB_FUNCSTATE_MO) != 0) diff --git a/src/query/inc/qExecutor.h b/src/query/inc/qExecutor.h index a8b781718cafa7fdb381205609e97ea6f96fd409..db92d8db891dc520582544651eb5f055dcd5839e 100644 --- a/src/query/inc/qExecutor.h +++ b/src/query/inc/qExecutor.h @@ -230,6 +230,7 @@ typedef struct SQueryAttr { bool stabledev; // super table stddev query bool tsCompQuery; // is tscomp query bool diffQuery; // is diff query + bool stateQuery; // is state query bool simpleAgg; bool pointInterpQuery; // point interpolation query bool needTableSeqScan; // need scan table by table diff --git a/src/query/src/qAggMain.c b/src/query/src/qAggMain.c index 7c227a65a0af87f5e11baac6969b9a2aab26e193..9c330b52375899215b764437751622db97c1c9e6 100644 --- a/src/query/src/qAggMain.c +++ b/src/query/src/qAggMain.c @@ -211,6 +211,14 @@ typedef struct { }; } SDiffFuncInfo; + +typedef struct { + union { + int64_t countPrev; + int64_t durationStart; + }; +} SStateInfo; + typedef struct { double lower; // >lower double upper; // <=upper @@ -319,6 +327,13 @@ int32_t getResultDataInfo(int32_t dataType, int32_t dataBytes, int32_t functionI return TSDB_CODE_SUCCESS; } + if (functionId == TSDB_FUNC_STATE_COUNT || functionId == TSDB_FUNC_STATE_DURATION) { + *type = TSDB_DATA_TYPE_BIGINT; + *bytes = sizeof(int64_t); + *interBytes = sizeof(SStateInfo); + return TSDB_CODE_SUCCESS; + } + if (functionId == TSDB_FUNC_CSUM) { if (IS_SIGNED_NUMERIC_TYPE(dataType)) { *type = TSDB_DATA_TYPE_BIGINT; @@ -625,6 +640,76 @@ int32_t isValidFunction(const char* name, int32_t len) { return -1; } +bool isValidStateOper(char *oper, int32_t len){ + return strncmp(oper, "lt", len) == 0 || strncmp(oper, "gt", len) == 0 || strncmp(oper, "le", len) == 0 || + strncmp(oper, "ge", len) == 0 || strncmp(oper, "ne", len) == 0 || strncmp(oper, "eq", len) == 0; +} + +#define STATEOPER(OPER, COMP, TYPE) if (strncmp(oper->pz, OPER, oper->nLen) == 0) {\ +if (pVar->nType == TSDB_DATA_TYPE_BIGINT && *(TYPE)data COMP pVar->i64) return true;\ +else if(pVar->nType == TSDB_DATA_TYPE_DOUBLE && *(TYPE)data COMP pVar->dKey) return true;\ +else return false;} + +#define STATEJUDGE(TYPE) STATEOPER("lt", <, TYPE)\ +STATEOPER("gt", >, TYPE)\ +STATEOPER("le", <=, TYPE)\ +STATEOPER("ge", >=, TYPE)\ +STATEOPER("ne", !=, TYPE)\ +STATEOPER("eq", ==, TYPE) + +static bool isStateOperTrue(void *data, int16_t type, tVariant *oper, tVariant *pVar){ + switch (type) { + case TSDB_DATA_TYPE_INT: { + STATEJUDGE(int32_t *) + break; + } + case TSDB_DATA_TYPE_UINT: { + STATEJUDGE(uint32_t *) + break; + } + + case TSDB_DATA_TYPE_BIGINT: { + STATEJUDGE(int64_t *) + break; + }case TSDB_DATA_TYPE_UBIGINT: { + STATEJUDGE(uint64_t *) + break; + } + case TSDB_DATA_TYPE_DOUBLE: { + STATEJUDGE(double *) + break; + } + + case TSDB_DATA_TYPE_FLOAT: { + STATEJUDGE(float *) + break; + } + + case TSDB_DATA_TYPE_SMALLINT: { + STATEJUDGE(int16_t *) + break; + } + + case TSDB_DATA_TYPE_USMALLINT: { + STATEJUDGE(uint16_t *) + break; + } + + case TSDB_DATA_TYPE_TINYINT: { + STATEJUDGE(int8_t *) + break; + } + + case TSDB_DATA_TYPE_UTINYINT: { + STATEJUDGE(uint8_t *) + break; + } + default: + qError("error input type"); + } + return false; +} + static bool function_setup(SQLFunctionCtx *pCtx, SResultRowCellInfo* pResultInfo) { if (pResultInfo->initialized) { return false; @@ -5164,7 +5249,7 @@ static void copyRes(SQLFunctionCtx *pCtx, void *data, int32_t bytes) { char *tsOutput = pCtx->ptsOutputBuf; char *output = pCtx->pOutput; int32_t step = GET_FORWARD_DIRECTION_FACTOR(pCtx->param[3].i64); - char *tvp = data + (size * ((pCtx->param[3].i64 == TSDB_ORDER_ASC) ? 0 : len -1)); + char *tvp = (char*)data + (size * ((pCtx->param[3].i64 == TSDB_ORDER_ASC) ? 0 : len -1)); for (int32_t i = 0; i < len; ++i) { memcpy(tsOutput, tvp, sizeof(int64_t)); memcpy(output, tvp + sizeof(int64_t), bytes); @@ -5184,7 +5269,7 @@ static void copyRes(SQLFunctionCtx *pCtx, void *data, int32_t bytes) { pData[i] = pCtx->tagInfo.pTagCtxList[i]->pOutput; } - tvp = data + (size * ((pCtx->param[3].i64 == TSDB_ORDER_ASC) ? 0 : len -1)); + tvp = (char*)data + (size * ((pCtx->param[3].i64 == TSDB_ORDER_ASC) ? 0 : len -1)); for (int32_t i = 0; i < len; ++i) { int32_t offset = (int32_t)sizeof(int64_t) + bytes; for (int32_t j = 0; j < pCtx->tagInfo.numOfTagCols; ++j) { @@ -5317,7 +5402,7 @@ static void unique_func_finalizer(SQLFunctionCtx *pCtx) { } SortSupporter support = {0}; // user specify the order of output by sort the result according to timestamp - if (pCtx->param[2].i64 == PRIMARYKEY_TIMESTAMP_COL_INDEX) { + if (pCtx->param[2].i64 == PRIMARYKEY_TIMESTAMP_COL_INDEX || pCtx->param[2].i64 == TSDB_RES_COL_ID) { support.dataOffset = 0; support.comparFn = compareInt64Val; } else{ @@ -5596,7 +5681,7 @@ static void tail_func_finalizer(SQLFunctionCtx *pCtx) { } // if(pCtx->stableQuery){ - GET_RES_INFO(pCtx)->numOfRes = pRes->num - pCtx->param[1].i64; + GET_RES_INFO(pCtx)->numOfRes = pRes->num - (int32_t)pCtx->param[1].i64; // }else{ // GET_RES_INFO(pCtx)->numOfRes = pRes->num; // } @@ -5611,12 +5696,12 @@ static void tail_func_finalizer(SQLFunctionCtx *pCtx) { return; } for(int32_t i = 0; i < GET_RES_INFO(pCtx)->numOfRes; i++){ - memcpy(data + i * size, pRes->res[i], size); + memcpy((char*)data + i * size, pRes->res[i], size); } SortSupporter support = {0}; // user specify the order of output by sort the result according to timestamp - if (pCtx->param[2].i64 != PRIMARYKEY_TIMESTAMP_COL_INDEX) { + if (pCtx->param[2].i64 != PRIMARYKEY_TIMESTAMP_COL_INDEX && pCtx->param[2].i64 != TSDB_RES_COL_ID) { support.dataOffset = sizeof(int64_t); support.comparFn = getComparFunc(type, 0); taosqsort(data, (size_t)GET_RES_INFO(pCtx)->numOfRes, size, &support, sortCompareFn); @@ -5627,6 +5712,71 @@ static void tail_func_finalizer(SQLFunctionCtx *pCtx) { doFinalizer(pCtx); } + +static void state_count_function(SQLFunctionCtx *pCtx) { + SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx); + SStateInfo *pStateInfo = GET_ROWCELL_INTERBUF(pResInfo); + + void *data = GET_INPUT_DATA_LIST(pCtx); + int64_t *pOutput = (int64_t *)pCtx->pOutput; + + for (int32_t i = 0; i < pCtx->size; i++,pOutput++,data += pCtx->inputBytes) { + if (pCtx->hasNull && isNull(data, pCtx->inputType)) { + setNull(pOutput, TSDB_DATA_TYPE_BIGINT, 0); + continue; + } + if (isStateOperTrue(data, pCtx->inputType, &pCtx->param[0], &pCtx->param[1])){ + *pOutput = ++pStateInfo->countPrev; + }else{ + *pOutput = -1; + pStateInfo->countPrev = 0; + } + } + + for (int t = 0; t < pCtx->tagInfo.numOfTagCols; ++t) { + SQLFunctionCtx* tagCtx = pCtx->tagInfo.pTagCtxList[t]; + if (tagCtx->functionId == TSDB_FUNC_TAG_DUMMY) { + aAggs[TSDB_FUNC_TAGPRJ].xFunction(tagCtx); + } + } + pResInfo->numOfRes += pCtx->size; +} + +static void state_duration_function(SQLFunctionCtx *pCtx) { + SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx); + SStateInfo *pStateInfo = GET_ROWCELL_INTERBUF(pResInfo); + + void *data = GET_INPUT_DATA_LIST(pCtx); + TSKEY* tsList = GET_TS_LIST(pCtx); + int64_t *pOutput = (int64_t *)pCtx->pOutput; + + for (int32_t i = 0; i < pCtx->size; i++,pOutput++,data += pCtx->inputBytes) { + if (pCtx->hasNull && isNull(data, pCtx->inputType)) { + setNull(pOutput, TSDB_DATA_TYPE_BIGINT, 0); + continue; + } + if (isStateOperTrue(data, pCtx->inputType, &pCtx->param[0], &pCtx->param[1])){ + if (pStateInfo->durationStart == 0) { + *pOutput = 0; + pStateInfo->durationStart = tsList[i]; + } else { + *pOutput = (tsList[i] - pStateInfo->durationStart)/pCtx->param[2].i64; + } + } else{ + *pOutput = -1; + pStateInfo->durationStart = 0; + } + } + + for (int t = 0; t < pCtx->tagInfo.numOfTagCols; ++t) { + SQLFunctionCtx* tagCtx = pCtx->tagInfo.pTagCtxList[t]; + if (tagCtx->functionId == TSDB_FUNC_TAG_DUMMY) { + aAggs[TSDB_FUNC_TAGPRJ].xFunction(tagCtx); + } + } + pResInfo->numOfRes += pCtx->size; +} + int16_t getTimeWindowFunctionID(int16_t colIndex) { switch (colIndex) { case TSDB_TSWIN_START_COLUMN_INDEX: { @@ -5661,7 +5811,6 @@ static void wduration_function(SQLFunctionCtx *pCtx) { } *(int64_t *)(pCtx->pOutput) = duration; } - ///////////////////////////////////////////////////////////////////////////////////////////// /* * function compatible list. @@ -5674,16 +5823,16 @@ static void wduration_function(SQLFunctionCtx *pCtx) { * */ int32_t functionCompatList[] = { - // count, sum, avg, min, max, stddev, percentile, apercentile, first, last - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - // last_row,top, bottom, spread, twa, leastsqr, ts, ts_dummy, tag_dummy, ts_comp - 4, -1, -1, 1, 1, 1, 1, 1, 1, -1, - // tag, colprj, tagprj, arithm, diff, first_dist, last_dist, stddev_dst, interp rate, irate - 1, 1, 1, 1, -1, 1, 1, 1, 5, 1, 1, - // tid_tag, deriv, csum, mavg, sample, block_info, elapsed, histogram, unique, mode, tail - 6, 8, -1, -1, -1, 7, 1, -1, -1, 1, -1, - // wstart, wstop, wduration - 1, 1, 1 + // count, sum, avg, min, max, stddev, percentile, apercentile, first, last + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + // last_row, top, bottom, spread, twa, leastsqr, ts, ts_dummy, tag_dummy, ts_comp + 4, -1, -1, 1, 1, 1, 1, 1, 1, -1, + // tag, colprj, tagprj, arithm, diff, first_dist, last_dist, stddev_dst, interp rate, irate + 1, 1, 1, 1, -1, 1, 1, 1, 5, 1, 1, + // tid_tag, deriv, csum, mavg, sample, block_info, elapsed, histogram, unique, mode, tail + 6, 8, -1, -1, -1, 7, 1, -1, -1, 1, -1, + // stateCount, stateDuration, wstart, wstop, wduration, + 1, 1, 1, 1, 1, }; SAggFunctionInfo aAggs[TSDB_FUNC_MAX_NUM] = {{ @@ -6195,6 +6344,30 @@ SAggFunctionInfo aAggs[TSDB_FUNC_MAX_NUM] = {{ }, { // 42 + "stateCount", + TSDB_FUNC_STATE_COUNT, + TSDB_FUNC_INVALID_ID, + TSDB_BASE_FUNC_SO | TSDB_FUNCSTATE_NEED_TS, + function_setup, + state_count_function, + doFinalizer, + noop1, + dataBlockRequired, + }, + { + // 43 + "stateDuration", + TSDB_FUNC_STATE_DURATION, + TSDB_FUNC_INVALID_ID, + TSDB_BASE_FUNC_SO | TSDB_FUNCSTATE_NEED_TS, + function_setup, + state_duration_function, + doFinalizer, + noop1, + dataBlockRequired, + }, + { + // 44 "_wstart", TSDB_FUNC_WSTART, TSDB_FUNC_WSTART, @@ -6206,7 +6379,7 @@ SAggFunctionInfo aAggs[TSDB_FUNC_MAX_NUM] = {{ dataBlockRequired, }, { - // 43 + // 45 "_wstop", TSDB_FUNC_WSTOP, TSDB_FUNC_WSTOP, @@ -6218,7 +6391,7 @@ SAggFunctionInfo aAggs[TSDB_FUNC_MAX_NUM] = {{ dataBlockRequired, }, { - // 44 + // 46 "_wduration", TSDB_FUNC_WDURATION, TSDB_FUNC_WDURATION, diff --git a/src/query/src/qExecutor.c b/src/query/src/qExecutor.c index 3e1ccb67d7fe8869e291112c41be24b384a271b3..8cb05977cd6430e41e8eceaeeb50f84f0abdc2fc 100644 --- a/src/query/src/qExecutor.c +++ b/src/query/src/qExecutor.c @@ -2042,7 +2042,6 @@ static int32_t setupQueryRuntimeEnv(SQueryRuntimeEnv *pRuntimeEnv, int32_t numOf SQueryAttr *pQueryAttr = pRuntimeEnv->pQueryAttr; pRuntimeEnv->prevGroupId = INT32_MIN; - pRuntimeEnv->pQueryAttr = pQueryAttr; pRuntimeEnv->pResultRowHashTable = taosHashInit(numOfTables, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK); pRuntimeEnv->pResultRowListSet = taosHashInit(numOfTables * 10, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), false, HASH_NO_LOCK); @@ -2661,7 +2660,7 @@ static void updateDataCheckOrder(SQInfo *pQInfo, SQueryTableMsg* pQueryMsg, bool return; } - if (pQueryAttr->groupbyColumn && pQueryAttr->order.order == TSDB_ORDER_DESC) { + if ((pQueryAttr->groupbyColumn || pQueryAttr->stateQuery) && pQueryAttr->order.order == TSDB_ORDER_DESC) { pQueryAttr->order.order = TSDB_ORDER_ASC; if (pQueryAttr->window.skey > pQueryAttr->window.ekey) { SWAP(pQueryAttr->window.skey, pQueryAttr->window.ekey, TSKEY); @@ -3174,7 +3173,7 @@ int32_t loadDataBlockOnDemand(SQueryRuntimeEnv* pRuntimeEnv, STableScanInfo* pTa pTableScanInfo->rowCellInfoOffset) != TSDB_CODE_SUCCESS) { longjmp(pRuntimeEnv->env, TSDB_CODE_QRY_OUT_OF_MEMORY); } - } else if (pQueryAttr->stableQuery && (!pQueryAttr->tsCompQuery) && (!pQueryAttr->diffQuery) && (!pQueryAttr->pointInterpQuery)) { // stable aggregate, not interval aggregate or normal column aggregate + } else if (pQueryAttr->stableQuery && (!pQueryAttr->tsCompQuery) && (!pQueryAttr->pointInterpQuery)) { // stable aggregate, not interval aggregate or normal column aggregate doSetTableGroupOutputBuf(pRuntimeEnv, pTableScanInfo->pResultRowInfo, pTableScanInfo->pCtx, pTableScanInfo->rowCellInfoOffset, pTableScanInfo->numOfOutput, pRuntimeEnv->current->groupIndex); @@ -9599,6 +9598,9 @@ SQInfo* createQInfoImpl(SQueryTableMsg* pQueryMsg, SGroupbyExpr* pGroupbyExpr, S if (pExprs[col].base.flist.filterInfo) { ++pQueryAttr->havingNum; } + if (pExprs[col].base.functionId == TSDB_FUNC_STATE_COUNT || pExprs[col].base.functionId == TSDB_FUNC_STATE_DURATION){ + pQueryAttr->stateQuery = true; + } } doUpdateExprColumnIndex(pQueryAttr); diff --git a/src/util/src/ttokenizer.c b/src/util/src/ttokenizer.c index 93883f645195c56011de85136461a43398f14b01..afc88d5274906110ffc3f9e5abb3e6af7b92b576 100644 --- a/src/util/src/ttokenizer.c +++ b/src/util/src/ttokenizer.c @@ -48,8 +48,6 @@ static SKeyword keywordTable[] = { {"OR", TK_OR}, {"AND", TK_AND}, {"NOT", TK_NOT}, - {"EQ", TK_EQ}, - {"NE", TK_NE}, {"ISNULL", TK_ISNULL}, {"NOTNULL", TK_NOTNULL}, {"IS", TK_IS}, @@ -58,10 +56,6 @@ static SKeyword keywordTable[] = { {"GLOB", TK_GLOB}, {"BETWEEN", TK_BETWEEN}, {"IN", TK_IN}, - {"GT", TK_GT}, - {"GE", TK_GE}, - {"LT", TK_LT}, - {"LE", TK_LE}, {"BITAND", TK_BITAND}, {"BITOR", TK_BITOR}, {"LSHIFT", TK_LSHIFT}, diff --git a/tests/compatibility/init.sh b/tests/compatibility/init.sh new file mode 100755 index 0000000000000000000000000000000000000000..78002179871ba818fe79794abd40965140a46cd5 --- /dev/null +++ b/tests/compatibility/init.sh @@ -0,0 +1,118 @@ +#!/bin/bash + +function usage() { + echo "$0" + echo -e "\t -w work dir" + echo -e "\t -o old version package" + echo -e "\t -n new version package" + echo -e "\t -c client package" + echo -e "\t -h help" +} + +while getopts "w:o:n:c:h" opt; do + case $opt in + w) + WORK_DIR=$OPTARG + ;; + o) + TAOS_PKG1=$OPTARG + ;; + n) + TAOS_PKG2=$OPTARG + ;; + c) + CLIENT_PKG=$OPTARG + ;; + h) + usage + exit 0 + ;; + \?) + echo "Invalid option: -$OPTARG" + usage + exit 0 + ;; + esac +done + +if [ -z "$WORK_DIR" ]; then + usage + exit 1 +fi +if [ -z "$TAOS_PKG1" ]; then + usage + exit 1 +fi +if [ -z "$TAOS_PKG2" ]; then + usage + exit 1 +fi +if [ ! -z "$CLIENT_PKG" ]; then + if [ ! -f "$CLIENT_PKG" ]; then + echo "$CLIENT_PKG not found" + exit 1 + fi +fi + +script_dir=`dirname $0` +cd $script_dir + +source settings.sh + +code_dir=$WORK_DIR/TDinternal +container_workdir1=$WORK_DIR/compatibility/$container_name1 +container_workdir2=$WORK_DIR/compatibility/$container_name2 +container_workdir3=$WORK_DIR/compatibility/$container_name3 +container_workdir4=$WORK_DIR/compatibility/$container_name4 + + +mkdir -p $container_workdir1 +mkdir -p $container_workdir2 +mkdir -p $container_workdir3 +mkdir -p $container_workdir4 + +docker rm -f $container_name1 >/dev/null 2>&1 +docker rm -f $container_name2 >/dev/null 2>&1 +docker rm -f $container_name3 >/dev/null 2>&1 +docker rm -f $container_name4 >/dev/null 2>&1 + +net_name=mynet +docker network create --driver bridge --subnet 172.31.30.0/24 --gateway 172.31.30.1 $net_name + +./init_container.sh -d $code_dir -w $container_workdir1 -p $TAOS_PKG1 -q $TAOS_PKG2 -n $net_name -c $container_name1 & +./init_container.sh -d $code_dir -w $container_workdir2 -p $TAOS_PKG1 -q $TAOS_PKG2 -n $net_name -c $container_name2 & +./init_container.sh -d $code_dir -w $container_workdir3 -p $TAOS_PKG1 -q $TAOS_PKG2 -n $net_name -c $container_name3 & +./init_container.sh -d $code_dir -w $container_workdir4 -p $TAOS_PKG1 -q $TAOS_PKG2 -n $net_name -c $container_name4 & + +RET=0 +pids=`jobs -p` +for pid in $pids; do + wait $pid + status=$? + if [ $status -ne 0 ]; then + echo "init container $pid status is $status!" + RET=$status + fi +done + +if [ $RET -eq 0 ]; then + if [ -z "$CLIENT_PKG" ]; then + docker exec $container_name4 pip3 install /home/TDinternal/community/src/connector/python + RET=$? + else + pkg_name=`basename $CLIENT_PKG` + pkg_dir=`echo "$pkg_name"|sed "s/-Linux-x64.tar.gz//"` + docker cp $CLIENT_PKG $container_name4:/home/ + docker exec $container_name4 sh -c "cd /home;tar xzf $pkg_name;if [ -d /home/$pkg_dir/connector/python/linux/python3 ]; then pip3 install /home/$pkg_dir/connector/python/linux/python3; else pip3 install /home/$pkg_dir/connector/python; fi" + RET=$? + fi +fi + +if [ $RET -eq 0 ]; then + echo "containers created" +else + echo "containers create failed" +fi + +exit $RET + diff --git a/tests/compatibility/init_container.sh b/tests/compatibility/init_container.sh new file mode 100755 index 0000000000000000000000000000000000000000..ce1f70fee2d0f3e5d215b1f2a579807f9a1ce55a --- /dev/null +++ b/tests/compatibility/init_container.sh @@ -0,0 +1,105 @@ +#!/bin/bash + +function usage() { + echo "$0" + echo -e "\t -w work dir" + echo -e "\t -d TDinternal code dir" + echo -e "\t -p old package" + echo -e "\t -q new package" + echo -e "\t -c container name" + echo -e "\t -h help" +} + +while getopts "w:p:q:n:c:d:h" opt; do + case $opt in + w) + WORK_DIR=$OPTARG + ;; + d) + CODE_DIR=$OPTARG + ;; + p) + TAOS_PKG1=$OPTARG + ;; + q) + TAOS_PKG2=$OPTARG + ;; + c) + CONTAINER_NAME=$OPTARG + ;; + n) + NET_NAME=$OPTARG + ;; + h) + usage + exit 0 + ;; + \?) + echo "Invalid option: -$OPTARG" + usage + exit 0 + ;; + esac +done + +if [ -z "$WORK_DIR" ]; then + usage + exit 1 +fi +if [ -z "$TAOS_PKG1" ]; then + usage + exit 1 +fi +if [ -z "$TAOS_PKG2" ]; then + usage + exit 1 +fi +if [ -z "$CONTAINER_NAME" ]; then + usage + exit 1 +fi +if [ -z "$NET_NAME" ]; then + usage + exit 1 +fi +if [ -z "$CODE_DIR" ]; then + usage + exit 1 +fi +if [ ! -f "$TAOS_PKG1" ]; then + echo "$TAOS_PKG1 not found" + exit 1 +fi +if [ ! -f "$TAOS_PKG2" ]; then + echo "$TAOS_PKG2 not found" + exit 1 +fi + +pkg_name1=`basename $TAOS_PKG1` +pkg_dir1=`echo "$pkg_name1"|sed "s/-Linux-x64.tar.gz//"` +pkg_name2=`basename $TAOS_PKG2` +pkg_dir2=`echo "$pkg_name2"|sed "s/-Linux-x64.tar.gz//"` + +RET=0 +docker run -d --name $CONTAINER_NAME \ + --hostname $CONTAINER_NAME \ + --net $NET_NAME --ulimit core=-1 -it \ + -v $TAOS_PKG1:/home/tdengine1.tar.gz:ro \ + -v $TAOS_PKG2:/home/tdengine2.tar.gz:ro \ + -v $WORK_DIR/coredump:/home/coredump \ + -v $CODE_DIR:/home/TDinternal \ + taos_test:v1.0 bash +RET=$? +if [ $RET -ne 0 ]; then + echo "docker run failed with $RET" + exit $RET +fi + +docker exec $CONTAINER_NAME sh -c "cd /home;tar xzf tdengine1.tar.gz;tar xzf tdengine2.tar.gz;cd $pkg_dir1;./install.sh -v server -e no" +RET=$? +if [ $RET -ne 0 ]; then + echo "docker exec install.sh failed with $RET" + exit $RET +fi +exit 0 + diff --git a/tests/compatibility/run_test.sh b/tests/compatibility/run_test.sh new file mode 100755 index 0000000000000000000000000000000000000000..7f29b75886054f8552ad37d4728e9d943123452d --- /dev/null +++ b/tests/compatibility/run_test.sh @@ -0,0 +1,190 @@ +#!/bin/bash + +function usage() { + echo "$0" + echo -e "\t -w work dir" + echo -e "\t -o old package" + echo -e "\t -n new package" + echo -e "\t -c client package" + echo -e "\t -h help" +} + +while getopts "w:o:n:c:h" opt; do + case $opt in + w) + WORK_DIR=$OPTARG + ;; + o) + OLD_PACKAGE=$OPTARG + ;; + n) + NEW_PACKAGE=$OPTARG + ;; + c) + CLIENT_PACKAGE_PARAM="-c $OPTARG" + ;; + h) + usage + exit 0 + ;; + \?) + echo "Invalid option: -$OPTARG" + usage + exit 0 + ;; + esac +done + +if [ -z "$WORK_DIR" ]; then + usage + exit 1 +fi +if [ -z "$OLD_PACKAGE" ]; then + usage + exit 1 +fi +if [ -z "$NEW_PACKAGE" ]; then + usage + exit 1 +fi + +script_dir=`dirname $0` +cd $script_dir + +pkg_name=`basename $NEW_PACKAGE` +new_version=`echo "$pkg_name"|sed "s/TDengine-enterprise-server-//"|sed "s/-Linux-x64.tar.gz//"` +pkg_name=`basename $OLD_PACKAGE` +old_version=`echo "$pkg_name"|sed "s/TDengine-enterprise-server-//"|sed "s/-Linux-x64.tar.gz//"` + +source settings.sh + +containers="$container_name1 $container_name2 $container_name3" + +# initialize all containers and install with old version package +./init.sh -w $WORK_DIR -o $OLD_PACKAGE -n $NEW_PACKAGE $CLIENT_PACKAGE_PARAM + +# upgrade with new version package +function upgrade() { + local container_name=$1 + local new_pkg_name=`basename $NEW_PACKAGE` + local new_pkg_dir=`echo "$new_pkg_name"|sed "s/-Linux-x64.tar.gz//"` + local ret=0 + echo "upgrade ${container_name}" + docker exec $container_name service taosd stop + ret=$? + if [ $ret -ne 0 ]; then + echo "docker exec $container_name service taosd stop, exit: $ret" + return $ret + fi + docker exec $container_name sh -c "cd /home/$new_pkg_dir;./install.sh -v server -e no" + ret=$? + if [ $ret -ne 0 ]; then + echo "docker exec $container_name install.sh, exit: $ret" + return $ret + fi + docker exec $container_name service taosd start + ret=$? + if [ $ret -ne 0 ]; then + echo "docker exec $container_name service taosd start, exit: $ret" + return $ret + fi + return 0 +} + +function checkStatus() { + local container_name=$1 + local check_version=$2 + echo "python3 manualTest/TD-5114/checkClusterStatus.py $container_name $check_version" + docker exec $container_name4 sh -c "cd /home/TDinternal/community/tests/pytest;python3 manualTest/TD-5114/checkClusterStatus.py $container_name $check_version" + return $? +} + +# config container /etc/taos/taos.cfg +taos_cfg=/etc/taos/taos.cfg +for container in $containers; do + docker exec $container sed -i "s/^.*firstEp.*$/firstEp $container_name1:6030/" $taos_cfg + docker exec $container sed -i "s/^.*fqdn.*$/fqdn $container/" $taos_cfg + docker exec $container sed -i "s/^.*numOfMnodes.*$/numOfMnodes 3/" $taos_cfg +done + +# start taosd +docker exec $container_name1 service taosd start +docker exec $container_name4 taos -h $container_name1 -s "CREATE DNODE \"$container_name2:6030\";" +docker exec $container_name4 taos -h $container_name1 -s "CREATE DNODE \"$container_name3:6030\";" + +# start taosd +docker exec $container_name2 service taosd start +docker exec $container_name3 service taosd start + +sleep 10 + +# show nodes +docker exec $container_name4 taos -h $container_name1 -s "SHOW DNODES;" +docker exec $container_name4 taos -h $container_name1 -s "SHOW MNODES;" + +# check cluster status +for container in $containers; do + checkStatus $container $old_version + RET=$? + if [ $RET -ne 0 ]; then + echo "check cluster status $container error: $RET" + exit $RET + fi + echo "check cluster status $container ret: $RET" +done + +sleep 1 + +# upgrade +upgrade ${container_name3} +RET=$? +if [ $RET -ne 0 ]; then + echo "upgrade ${container_name3} error: $RET" + exit $RET +fi +sleep 10 +# check cluster status +checkStatus ${container_name3} $old_version +RET=$? +if [ $RET -ne 0 ]; then + echo "check cluster status ${container_name3} error: $RET" + exit $RET +fi +echo "check cluster status ${container_name3} ret: $RET" + +# upgrade +upgrade ${container_name2} +RET=$? +if [ $RET -ne 0 ]; then + echo "upgrade ${container_name2} error: $RET" + exit $RET +fi +sleep 10 +# check cluster status +checkStatus ${container_name2} $old_version +RET=$? +if [ $RET -ne 0 ]; then + echo "check cluster status ${container_name2} error: $RET" + exit $RET +fi +echo "check cluster status ${container_name2} ret: $RET" + +# upgrade +upgrade ${container_name1} +RET=$? +if [ $RET -ne 0 ]; then + echo "upgrade ${container_name1} error: $RET" + exit $RET +fi +sleep 10 +# check cluster status +checkStatus ${container_name3} $new_version +RET=$? +if [ $RET -ne 0 ]; then + echo "check cluster status ${container_name3} error: $RET" + exit $RET +fi +echo "check cluster status ${container_name3} ret: $RET" + +exit $RET + diff --git a/tests/compatibility/settings.sh b/tests/compatibility/settings.sh new file mode 100644 index 0000000000000000000000000000000000000000..d11e9d19c24db1b0625eb6e511c1fe84d14e229d --- /dev/null +++ b/tests/compatibility/settings.sh @@ -0,0 +1,5 @@ + +container_name1=compat_container1 +container_name2=compat_container2 +container_name3=compat_container3 +container_name4=compat_container4 diff --git a/tests/develop-test/2-query/function_state.py b/tests/develop-test/2-query/function_state.py new file mode 100644 index 0000000000000000000000000000000000000000..5a236aa6ac8abb1cb462750d18cd3ff8fdbb8da8 --- /dev/null +++ b/tests/develop-test/2-query/function_state.py @@ -0,0 +1,247 @@ +################################################################### +# Copyright (c) 2021 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +from util.log import * +from util.cases import * +from util.sql import * + + +class TDTestCase: + def caseDescription(self): + ''' + case1: [TD-11210] function stateCount stateDuration + ''' + return + + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + self._conn = conn + + def run(self): + print("running {}".format(__file__)) + tdSql.execute("drop database if exists statef") + tdSql.execute("create database if not exists statef PRECISION 'ns'") + tdSql.execute('use statef') + + tdSql.execute('create table sstatef (ts timestamp, dbig bigint, dsmall smallint, dbool bool, dtiny tinyint unsigned, dfloat float, ddouble double, dnchar nchar(4093), dbinary binary(64), dtime timestamp) tags (tbinary nchar(4093), tint int)') + tdSql.execute('create table statef1 using sstatef tags ("t1", 1)') + tdSql.execute('create table statef2 using sstatef tags ("t2", 2)') + + tdSql.execute('insert into statef1 values("2021-10-17 00:31:31", 1, -3276, true, 253, 3.32333, 4.984392323, "你好", "sddd", 333) ("2022-01-24 00:31:32", 1, -32767, false, 254, NULL, 4.982392323, "你好吗", "sdf",2323)') + tdSql.execute('insert into statef2 values("2021-10-15 00:31:33", 1, NULL, true, 23, 3.4, 4.982392323, "你好吗", "sdf", 333) ("2021-12-24 00:31:34", 2, 32767, NULL, NULL, NULL, 4.982392323, NULL, "sddd", NULL) ("2022-01-01 08:00:05", 19, 3276, true, 2, 3.323222, 4.92323, "试试", "sddd", 1645434434000)') + tdSql.execute('insert into statef2 values("2021-10-17 00:31:31", NULL, 32767, true, 123, 3.323232333, 4.2, NULL, NULL, NULL) ("2022-01-01 08:00:06", NULL, NULL, NULL, 35, 3.323232333, NULL, "试试", NULL, 1645434434000) ("2022-01-01 08:00:07", 9, 54, true, 25, 3.32333, NULL, "试试", NULL, 1645434434001)') + + # error + tdSql.error("select stateCount(ts,LE,4.923230000) from statef2") + tdSql.error("select stateCount(dbool,LE,4.923230000) from statef2") + tdSql.error("select stateCount(dnchar,LE,4.923230000) from statef2") + tdSql.error("select stateCount(dbinary,LE,4.923230000) from statef2") + tdSql.error("select stateCount(dtime,LE,4.923230000) from statef2") + tdSql.error("select stateCount(tint,LE,4.923230000) from statef2") + tdSql.error("select stateCount(tbinary,LE,4.923230000) from statef2") + tdSql.error("select stateCount(tbinary,ew,4.923230000) from statef2") + tdSql.error("select stateCount(tbinary,23,4.923230000) from statef2") + tdSql.query("select stateCount(dtiny,le,1e3) from statef2") + tdSql.error("select stateCount(dtiny,le,1e3) from statef") + tdSql.error("select stateDuration(dtiny,le,1e3) from statef") + tdSql.query("select stateDuration(dtiny,le,1e3) from statef2") + tdSql.error("select stateCount(dtiny,le,'1e3') from statef2") + tdSql.error("select stateCount(dtiny,le,le) from statef2") + tdSql.error("select stateDuration(dtiny,le,le) from statef2") + tdSql.error("select stateCount(dtiny,le,2,1s) from statef2") + tdSql.error("select stateDuration(dtiny,le,2,1) from statef2") + tdSql.error("select stateDuration(dtiny,le,2,'1s') from statef2") + tdSql.error("select stateDuration(dtiny,le,2,2s) from statef2") + + tdSql.error("select stateCount(dtiny,le,1e3),top(dtiny,1) from statef2") + tdSql.error("select stateCount(dtiny,le,1e3),first(dbig) from statef2") + tdSql.error("select stateCount(dtiny,le,1e3),ceil(dsmall) from statef2") + + #interval + tdSql.error('select stateCount(dtiny,ne,9.0) from statef2 interval(1s)') + tdSql.error('select stateDuration(dtiny,ne,9.0,1s) from statef2 interval(1s)') + #state_window + tdSql.error('select stateCount(dtiny,ne,9.0) from statef2 state_window(dbool)') + tdSql.error('select stateDuration(dtiny,ne,9.0,1s) from statef2 state_window(dbool)') + #session + tdSql.error('select stateCount(dtiny,ne,9.0) from statef2 session(ts,1w)') + tdSql.error('select stateDuration(dtiny,ne,9.0,1s) from statef2 session(ts,1w)') + + tdSql.error('select stateDuration(dfloat,Ge,3.32323) from (select dfloat from statef2)') + tdSql.error('select stateCount(dfloat,Ge,3.32323) from (select dfloat from statef2)') + + ## test normal table + tdSql.query('select stateCount(dtiny,GT,10) from statef2') + tdSql.checkRows(6) + tdSql.checkData(0, 0, "2021-10-15 00:31:33") + tdSql.checkData(0, 1, 23) + tdSql.checkData(0, 2, 1) + tdSql.checkData(1, 0, "2021-10-17 00:31:31") + tdSql.checkData(1, 1, 123) + tdSql.checkData(1, 2, 2) + tdSql.checkData(2, 0, "2021-12-24 00:31:34") + tdSql.checkData(2, 1, None) + tdSql.checkData(2, 2, None) + tdSql.checkData(3, 0, "2022-01-01 08:00:05") + tdSql.checkData(3, 1, 2) + tdSql.checkData(3, 2, -1) + tdSql.checkData(4, 0, "2022-01-01 08:00:06") + tdSql.checkData(4, 1, 35) + tdSql.checkData(4, 2, 1) + tdSql.checkData(5, 0, "2022-01-01 08:00:07") + tdSql.checkData(5, 1, 25) + tdSql.checkData(5, 2, 2) + + tdSql.query('select dtiny,ts,stateCount(dtiny,GT,10),*,tbinary from statef2') + tdSql.checkRows(6) + tdSql.checkData(0, 1, "2021-10-15 00:31:33") + tdSql.checkData(1, 2, 123) + tdSql.checkData(2, 6, 2) + tdSql.checkData(3, 15, "t2") + + tdSql.query('select stateCount(dtiny,LT,10) from statef2') + tdSql.checkRows(6) + tdSql.checkData(0, 2, -1) + tdSql.checkData(1, 2, -1) + tdSql.checkData(3, 2, 1) + tdSql.checkData(4, 2, -1) + + tdSql.query('select stateCount(ddouble,LE,4.923230000) from statef2') + tdSql.checkRows(6) + tdSql.checkData(0, 2, -1) + tdSql.checkData(1, 2, 1) + tdSql.checkData(2, 2, -1) + tdSql.checkData(3, 2, 1) + tdSql.checkData(4, 2, None) + tdSql.checkData(5, 2, None) + + tdSql.query('select stateCount(dfloat,Ge,3.32323) from statef2') + tdSql.checkRows(6) + tdSql.checkData(0, 2, 1) + tdSql.checkData(1, 2, 2) + tdSql.checkData(2, 2, None) + tdSql.checkData(3, 2, -1) + tdSql.checkData(4, 2, 1) + tdSql.checkData(5, 2, 2) + + tdSql.query('select stateCount(dsmall,eq,3276.0) from statef2') + tdSql.checkRows(6) + tdSql.checkData(0, 2, None) + tdSql.checkData(1, 2, -1) + tdSql.checkData(2, 2, -1) + tdSql.checkData(3, 2, 1) + tdSql.checkData(4, 2, None) + tdSql.checkData(5, 2, -1) + + tdSql.query('select stateCount(dbig,ne,9.0) from statef2') + tdSql.checkRows(6) + tdSql.checkData(0, 2, 1) + tdSql.checkData(1, 2, None) + tdSql.checkData(2, 2, 2) + tdSql.checkData(3, 2, 3) + tdSql.checkData(4, 2, None) + tdSql.checkData(5, 2, -1) + + tdSql.query('select stateDuration(dtiny,ne,9.0) from statef2') + tdSql.checkRows(6) + tdSql.checkData(0, 2, 0) + tdSql.checkData(1, 2, 172798) + tdSql.checkData(2, 2, None) + tdSql.checkData(3, 2, 6766112) + tdSql.checkData(4, 2, 6766113) + tdSql.checkData(5, 2, 6766114) + + tdSql.query('select stateDuration(dtiny,ne,9.0,1h) from statef2') + tdSql.checkRows(6) + tdSql.checkData(0, 2, 0) + tdSql.checkData(1, 2, 47) + tdSql.checkData(2, 2, None) + tdSql.checkData(3, 2, 1879) + tdSql.checkData(4, 2, 1879) + tdSql.checkData(5, 2, 1879) + + tdSql.query('select stateDuration(dtiny,ne,9.0,1m) from statef2') + tdSql.checkRows(6) + tdSql.checkData(0, 2, 0) + tdSql.checkData(1, 2, 2879) + tdSql.checkData(2, 2, None) + tdSql.checkData(3, 2, 112768) + tdSql.checkData(4, 2, 112768) + tdSql.checkData(5, 2, 112768) + + ## test super table + tdSql.query('select stateDuration(dtiny,ne,9.0,1s) from sstatef group by tbname') + tdSql.checkRows(8) + tdSql.checkData(0, 2, 0) + tdSql.checkData(1, 2, 8553601) + + #where + tdSql.query('select stateCount(dfloat,Ge,3.32323) from statef2 where dfloat >3.32323') + tdSql.checkRows(4) + tdSql.checkData(0, 2, 1) + tdSql.checkData(1, 2, 2) + tdSql.checkData(2, 2, 3) + tdSql.checkData(3, 2, 4) + + tdSql.query('select stateDuration(dfloat,Ge,3.32323) from statef2 where dfloat <3.4') + tdSql.checkRows(4) + tdSql.checkData(0, 2, 0) + tdSql.checkData(1, 2, -1) + tdSql.checkData(2, 2, 0) + tdSql.checkData(3, 2, 1) + + tdSql.query('select stateDuration(dfloat,Ge,3.32323,1m) from statef2 where dfloat <3.4') + tdSql.checkRows(4) + tdSql.checkData(3, 2, 0) + + #slimit/soffset + tdSql.query('select stateDuration(dtiny,ne,9.0,1s) from sstatef group by tbname slimit 2 soffset 1') + tdSql.checkRows(6) + + #limit/offset + tdSql.query('select stateCount(dfloat,Ge,3.32323) from statef2 limit 1,2') + tdSql.checkRows(2) + tdSql.checkData(0, 0, "2021-10-17 00:31:31") + tdSql.checkData(0, 2, 2) + tdSql.checkData(1, 2, None) + + #having + tdSql.query('select stateDuration(dtiny,ne,9.0,1s) from sstatef group by tbname having stateDuration(dtiny,ne,9.0,1s) > 0') + + #subquery + tdSql.error('select stateDuration(dfloat,Ge,3.32323) from (select ts,dfloat from statef2)') + + #union + tdSql.query('select stateCount(dfloat,Ge,3.32323) from statef1 union all select stateCount(dfloat,Ge,3.32323) from statef2') + tdSql.checkRows(8) + tdSql.checkData(0, 2, 1) + tdSql.checkData(3, 2, 2) + + #join + tdSql.execute('create table sstatef1 (ts timestamp, dbig bigint, dsmall smallint, dbool bool, dtiny tinyint unsigned, dfloat float, ddouble double, dnchar nchar(4093), dbinary binary(64), dtime timestamp) tags (tbinary nchar(4093), tint int)') + tdSql.execute('create table statef11 using sstatef1 tags ("t1", 1)') + + tdSql.execute('insert into statef11 values("2021-10-17 00:31:31", 1, -3276, true, 253, 3.32333, 4.984392323, "你好", "sddd", 333) ("2022-01-24 00:31:32", 1, -32767, false, 254, NULL, 4.982392323, "你好吗", "sdf",2323)') + + tdSql.error('select stateCount(sstatef.dsmall,eq,3276.0) from sstatef, sstatef1 where sstatef.ts=sstatef1.ts and sstatef.tint=sstatef1.tint') + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) + diff --git a/tests/develop-test/2-query/function_tail.py b/tests/develop-test/2-query/function_tail.py index a5be3efb906f14c6e7a96cc444c6c90915e2c82a..5898ac0b3746b85375f6c11461b9c5c3394b0fa4 100644 --- a/tests/develop-test/2-query/function_tail.py +++ b/tests/develop-test/2-query/function_tail.py @@ -321,6 +321,20 @@ class TDTestCase: tdSql.checkData(1, 0, "2022-01-01 08:00:07") tdSql.checkData(1, 1, "试试") + tdSql.query('select tail(dbig, 3) from (select * from stail) order by ts') + tdSql.checkRows(3) + tdSql.checkData(0, 0, "2022-01-01 08:00:06") + tdSql.checkData(0, 1, None) + tdSql.checkData(1, 0, "2022-01-01 08:00:07") + tdSql.checkData(1, 1, 9) + + tdSql.query('select tail(dbig, 3) from (select * from stail) order by ts desc') + tdSql.checkRows(3) + tdSql.checkData(0, 0, "2022-01-24 00:31:32") + tdSql.checkData(0, 1, 1) + tdSql.checkData(1, 0, "2022-01-01 08:00:07") + tdSql.checkData(1, 1, 9) + #union tdSql.query('select tail(dtiny,2) from tail1 union all select tail(dtiny,2) from tail2') tdSql.checkRows(4) diff --git a/tests/develop-test/2-query/function_unique.py b/tests/develop-test/2-query/function_unique.py index f6c05413f2ff95c0221002a48ed3f59672492a50..b5298a88fbd366235e272b14c2b3cd8d21187488 100644 --- a/tests/develop-test/2-query/function_unique.py +++ b/tests/develop-test/2-query/function_unique.py @@ -256,6 +256,20 @@ class TDTestCase: tdSql.query('select unique(num) from (select * from unique where voltage > 1)') tdSql.checkRows(2) + tdSql.query('select unique(num) from (select * from unique) order by ts') + tdSql.checkRows(2) + tdSql.checkData(0, 0, "2021-10-15 00:00:01") + tdSql.checkData(0, 1, 2) + tdSql.checkData(1, 0, "2021-12-25 01:31:31") + tdSql.checkData(1, 1, 4) + + tdSql.query('select unique(num) from (select * from unique) order by ts desc') + tdSql.checkRows(2) + tdSql.checkData(0, 0, "2021-12-25 01:31:31") + tdSql.checkData(0, 1, 4) + tdSql.checkData(1, 0, "2021-10-15 00:00:01") + tdSql.checkData(1, 1, 2) + #union tdSql.query('select unique(voltage) from d002 union all select unique(voltage) from d003') tdSql.checkRows(5) diff --git a/tests/parallel_test/arm.cases.task b/tests/parallel_test/arm.cases.task new file mode 100644 index 0000000000000000000000000000000000000000..b6441b6324f23c3b94bce5c7083776c82b3f8325 --- /dev/null +++ b/tests/parallel_test/arm.cases.task @@ -0,0 +1,19 @@ +4,,pytest,python3 test.py -f insert/double.py +4,,pytest,python3 test.py -f insert/date.py +4,,pytest,python3 test.py -f insert/bug3654.py +4,,pytest,python3 test.py -f insert/bool.py +4,,pytest,python3 test.py -f insert/bigint.py +4,,pytest,python3 test.py -f insert/basic.py +4,,pytest,python3 test.py -f insert/alterTableAndInsert.py +4,,pytest,python3 test.py -f import_merge/importHeadPartOverlap.py +4,,pytest,python3 test.py -f functions/function_stddev_td2555.py +4,,pytest,python3 test.py -f dbmgmt/nanoSecondCheck.py +4,,pytest,python3 bug2265.py +4,,pytest,python3 test.py -f alter/alterTabAddTagWithNULL.py +4,,pytest,python3 test.py -f alter/alter_debugFlag.py +4,,pytest,python3 test.py -f alter/alter_create_exception.py +3,,pytest,python3 test.py -f tag_lite/binary.py +3,,pytest,python3 test.py -f query/filterAllIntTypes.py +3,,develop-test,python3 ./test.py -f 2-query/ts_hidden_column.py +#3,,script,./test.sh -f general/compute/scalar_triangle.sim +3,,script,./test.sh -f general/compute/scalar_str_concat_len.sim diff --git a/tests/parallel_test/arm.json b/tests/parallel_test/arm.json new file mode 100644 index 0000000000000000000000000000000000000000..585c59647035cd5a4485fb86c07aa60986c657e5 --- /dev/null +++ b/tests/parallel_test/arm.json @@ -0,0 +1,12 @@ +[{ + "host":"192.168.1.207", + "username":"root", + "workdir":"/var/data/jenkins/workspace", + "thread":2 +}, +{ + "host":"192.168.1.204", + "username":"root", + "workdir":"/var/data/jenkins/workspace", + "thread":2 +}] diff --git a/tests/parallel_test/cases.task b/tests/parallel_test/cases.task index 8f31f7ebeecb1170e3fb149dee733c02d44b7251..7a83fddaa6343c7fd050d5241634bc70c0b2c789 100644 --- a/tests/parallel_test/cases.task +++ b/tests/parallel_test/cases.task @@ -791,4 +791,5 @@ 3,,script,./test.sh -f general/compute/scalar_str_concat_len.sim 3,,develop-test,python3 ./test.py -f 2-query/function_tail.py 2,,develop-test,python3 ./test.py -f 2-query/function_unique.py +1,,develop-test,python3 ./test.py -f 2-query/function_state.py 1,,develop-test,python3 ./test.py -f 5-taos-tools/taosbenchmark/demo.py diff --git a/tests/parallel_test/compat.json b/tests/parallel_test/compat.json new file mode 100644 index 0000000000000000000000000000000000000000..b932c2b3aeba82268a79eb7dd8edd5e222f3810b --- /dev/null +++ b/tests/parallel_test/compat.json @@ -0,0 +1,14 @@ +[ + { + "host": "192.168.0.38", + "username": "root", + "workdir": "/var/data/jenkins/workspace", + "thread": 10 + }, + { + "host": "192.168.0.39", + "username": "root", + "workdir": "/var/data/jenkins/workspace", + "thread": 10 + } +] diff --git a/tests/parallel_test/container_build.sh b/tests/parallel_test/container_build.sh new file mode 100755 index 0000000000000000000000000000000000000000..0873f6030ba1a9cb651add3eb06341edfeb9e606 --- /dev/null +++ b/tests/parallel_test/container_build.sh @@ -0,0 +1,51 @@ +#!/bin/bash + +function usage() { + echo "$0" + echo -e "\t -w work dir" + echo -e "\t -c community version" + echo -e "\t -t make thread count" + echo -e "\t -h help" +} + +while getopts "w:t:ch" opt; do + case $opt in + w) + WORKDIR=$OPTARG + ;; + c) + COMMUNITY=community + ;; + t) + THREAD_COUNT=$OPTARG + ;; + h) + usage + exit 0 + ;; + \?) + echo "Invalid option: -$OPTARG" + usage + exit 0 + ;; + esac +done + +if [ -z "$WORKDIR" ]; then + usage + exit 1 +fi +if [ -z "$THREAD_COUNT" ]; then + THREAD_COUNT=1 +fi + +ulimit -c unlimited + +INTERNAL_REPDIR=$WORKDIR/TDinternal + +docker run \ + -v $INTERNAL_REPDIR:/home \ + --rm --ulimit core=-1 taos_test:v1.0 sh -c "cd /home/$COMMUNITY;rm -rf debug;mkdir -p debug;cd debug;cmake .. -DBUILD_HTTP=false -DBUILD_TOOLS=true;make -j $THREAD_COUNT" + +ret=$? +exit $ret diff --git a/tests/parallel_test/run.sh b/tests/parallel_test/run.sh index 026bfb020d9a77d5cd1b05e9030cfce69a7ba4c7..a9263929f4a2d5abd6048d2d93114ebecd45c9a9 100755 --- a/tests/parallel_test/run.sh +++ b/tests/parallel_test/run.sh @@ -6,10 +6,11 @@ function usage() { echo -e "\t -t task file" echo -e "\t -b branch" echo -e "\t -l log dir" + echo -e "\t -o default timeout value" echo -e "\t -h help" } -while getopts "m:t:b:l:h" opt; do +while getopts "m:t:b:l:o:h" opt; do case $opt in m) config_file=$OPTARG @@ -23,6 +24,9 @@ while getopts "m:t:b:l:h" opt; do l) log_dir=$OPTARG ;; + o) + timeout_param="-o $OPTARG" + ;; h) usage exit 0 @@ -201,18 +205,21 @@ function run_thread() { if [ -z "$case_file" ]; then continue fi - case_file="$exec_dir/${case_file}.${index}.${thread_no}" + case_file="$exec_dir/${case_file}.${index}.${thread_no}.${count}" count=$(( count + 1 )) local case_path=`dirname "$case_file"` if [ ! -z "$case_path" ]; then mkdir -p $log_dir/$case_path fi - cmd="${runcase_script} ${script} -w ${workdirs[index]} -c \"${case_cmd}\" -t ${thread_no} -d ${exec_dir}" + cmd="${runcase_script} ${script} -w ${workdirs[index]} -c \"${case_cmd}\" -t ${thread_no} -d ${exec_dir} ${timeout_param}" # echo "$thread_no $count $cmd" local ret=0 local redo_count=1 start_time=`date +%s` while [ ${redo_count} -lt 6 ]; do + if [ -f $log_dir/$case_file.log ]; then + cp $log_dir/$case_file.log $log_dir/$case_file.${redo_count}.redolog + fi echo "${hosts[index]}-${thread_no} order:${count}, redo:${redo_count} task:${line}" >$log_dir/$case_file.log echo -e "\e[33m >>>>> \e[0m ${case_cmd}" date >>$log_dir/$case_file.log @@ -220,6 +227,7 @@ function run_thread() { # ret=${PIPESTATUS[0]} $cmd >>$log_dir/$case_file.log 2>&1 ret=$? + echo "${hosts[index]} `date` ret:${ret}" >>$log_dir/$case_file.log if [ $ret -eq 0 ]; then break fi diff --git a/tests/parallel_test/run_case.sh b/tests/parallel_test/run_case.sh index 5b7802ac2b346547e4d2cd171e93c1d5937a5360..50b8b5fcfe9a6b8afb3f6d2adfa1ff129c294559 100755 --- a/tests/parallel_test/run_case.sh +++ b/tests/parallel_test/run_case.sh @@ -1,18 +1,16 @@ #!/bin/bash CONTAINER_TESTDIR=/home/community -# CONTAINER_TESTDIR=/root/tang/repository/TDengine - -# export PATH=$PATH:$CONTAINER_TESTDIR/debug/build/bin function usage() { echo "$0" echo -e "\t -d execution dir" echo -e "\t -c command" + echo -e "\t -o default timeout value" echo -e "\t -h help" } -while getopts "d:c:h" opt; do +while getopts "d:c:o:h" opt; do case $opt in d) exec_dir=$OPTARG @@ -20,6 +18,9 @@ while getopts "d:c:h" opt; do c) cmd=$OPTARG ;; + o) + TIMEOUT_CMD="timeout $OPTARG" + ;; h) usage exit 0 @@ -41,29 +42,15 @@ if [ -z "$cmd" ]; then exit 0 fi -go env -w GOPROXY=https://goproxy.cn -echo "StrictHostKeyChecking no" >>/etc/ssh/ssh_config +export PATH=$PATH:/home/debug/build/bin ln -s /home/debug/build/lib/libtaos.so /usr/lib/libtaos.so 2>/dev/null -npm config -g set unsafe-perm -npm config -g set registry https://registry.npm.taobao.org mkdir -p /home/sim/tsim mkdir -p /var/lib/taos/subscribe -rm -rf ${CONTAINER_TESTDIR}/src/connector/nodejs/node_modules -rm -rf ${CONTAINER_TESTDIR}/tests/examples/nodejs/node_modules -rm -rf ${CONTAINER_TESTDIR}/tests/connectorTest/nodejsTest/nanosupport/node_modules -# ln -s /home/node_modules ${CONTAINER_TESTDIR}/src/connector/nodejs/ -# ln -s /home/node_modules ${CONTAINER_TESTDIR}/tests/examples/nodejs/ -# ln -s /home/node_modules ${CONTAINER_TESTDIR}/tests/connectorTest/nodejsTest/nanosupport/ -# echo "$cmd"|grep -q "nodejs" -# if [ $? -eq 0 ]; then -# cd $CONTAINER_TESTDIR/src/connector/nodejs -# npm install node-gyp-build@4.3.0 --ignore-scripts -# fi cd $CONTAINER_TESTDIR/tests/$exec_dir ulimit -c unlimited -$cmd +$TIMEOUT_CMD $cmd RET=$? if [ $RET -ne 0 ]; then diff --git a/tests/parallel_test/run_container.sh b/tests/parallel_test/run_container.sh index 35ef3ad5a72b8c44b4de3db3159830c54cd1a6a3..5c6751148a2a168934c9d155695bbf51ec01f77b 100755 --- a/tests/parallel_test/run_container.sh +++ b/tests/parallel_test/run_container.sh @@ -6,10 +6,11 @@ function usage() { echo -e "\t -d execution dir" echo -e "\t -c command" echo -e "\t -t thread number" + echo -e "\t -o default timeout value" echo -e "\t -h help" } -while getopts "w:d:c:t:h" opt; do +while getopts "w:d:c:t:o:h" opt; do case $opt in w) WORKDIR=$OPTARG @@ -23,6 +24,9 @@ while getopts "w:d:c:t:h" opt; do t) thread_no=$OPTARG ;; + o) + timeout_param="-o $OPTARG" + ;; h) usage exit 0 @@ -71,12 +75,6 @@ if [ ! -d "${TMP_DIR}/thread_volume/$thread_no/$exec_dir" ]; then fi MOUNT_DIR="$TMP_DIR/thread_volume/$thread_no/$exec_dir:$CONTAINER_TESTDIR/tests/$exec_dir" echo "$thread_no -> ${exec_dir}:$cmd" -echo "$cmd"|grep -q "nodejs" -if [ $? -eq 0 ]; then - MOUNT_NODE_MOD="-v $TMP_DIR/thread_volume/$thread_no/node_modules:${CONTAINER_TESTDIR}/src/connector/nodejs/node_modules \ --v $TMP_DIR/thread_volume/$thread_no/node_modules:${CONTAINER_TESTDIR}/tests/examples/nodejs/node_modules \ --v $TMP_DIR/thread_volume/$thread_no/node_modules:${CONTAINER_TESTDIR}/tests/connectorTest/nodejsTest/nanosupport/node_modules" -fi if [ -f "$REPDIR/src/plugins/taosadapter/example/config/taosadapter.toml" ]; then TAOSADAPTER_TOML="-v $REPDIR/src/plugins/taosadapter/example/config/taosadapter.toml:/etc/taos/taosadapter.toml:ro" fi @@ -99,9 +97,7 @@ docker run \ -v $REPDIR/README.md:$CONTAINER_TESTDIR/README.md:ro \ -v $REPDIR/src/connector/python/taos:/usr/local/lib/python3.8/site-packages/taos:ro \ -e LD_LIBRARY_PATH=/home/debug/build/lib:/home/debug/build/lib64 \ - -e PATH=/usr/local/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/home/debug/build/bin:/usr/local/go/bin:/usr/local/node-v12.20.0-linux-x64/bin:/usr/local/apache-maven-3.8.4/bin:/usr/local/jdk1.8.0_144/bin \ - -e JAVA_HOME=/usr/local/jdk1.8.0_144 \ - --rm --ulimit core=-1 taos_test:v1.0 $CONTAINER_TESTDIR/tests/parallel_test/run_case.sh -d "$exec_dir" -c "$cmd" + --rm --ulimit core=-1 taos_test:v1.0 $CONTAINER_TESTDIR/tests/parallel_test/run_case.sh -d "$exec_dir" -c "$cmd" $timeout_param ret=$? exit $ret diff --git a/tests/pytest/manualTest/TD-5114/checkClusterStatus.py b/tests/pytest/manualTest/TD-5114/checkClusterStatus.py new file mode 100644 index 0000000000000000000000000000000000000000..c6bff305a5b3317d03a793c9634b1ea19b3b7217 --- /dev/null +++ b/tests/pytest/manualTest/TD-5114/checkClusterStatus.py @@ -0,0 +1,113 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import os +import sys +sys.path.insert(0, os.getcwd()) +from util.log import * +from util.sql import * +from util.dnodes import * +import taos +import threading +import subprocess +import datetime +from random import choice + +class TwoClients: + def initConnection(self): + self.host = sys.argv[1] + self.user = "root" + self.password = "taosdata" + self.config = "/etc/taos/" + self.port =6030 + self.rowNum = 10 + self.ts = 1537146000000 + + def run(self): + + # new taos client + conn1 = taos.connect(host=self.host, user=self.user, password=self.password, config=self.config ) + cur1 = conn1.cursor() + print(cur1) + tdSql.init(cur1, True) + + # insert data with python connector , if you want to use this case ,cancel note. + + # check cluster status + tdSql.query("show dnodes") + print(tdSql.queryRows) + for i in range(tdSql.queryRows): + for j in range(1,tdSql.queryRows+1): + if (tdSql.queryResult[i][1] == "compat_container%d:6030" %j): + tdSql.checkData(i,4,"ready") + + tdSql.query("show mnodes") + tdSql.checkRows(3) + roles = "master slave" + for i in range(tdSql.queryRows): + if (tdSql.queryResult[i][2] in roles ): + ep = tdSql.queryResult[i][1] + role = tdSql.queryResult[i][2] + print(" the role of %s is %s " %(ep,role)) + else: + print("cluster is not ready") + + version = sys.argv[2] + tdSql.query("show variables") + for i in range(tdSql.queryRows): + if (tdSql.queryResult[i][0] == "version" ): + tdSql.checkData(i,1,"%s" % version) + + + + # for x in range(10): + dataType= [ "tinyint", "smallint", "int", "bigint", "float", "double", "bool", " binary(20)", "nchar(20)", "tinyint unsigned", "smallint unsigned", "int unsigned", "bigint unsigned"] + tdSql.execute("drop database if exists db1") + tdSql.execute("create database db1 keep 3650 replica 2 ") + tdSql.execute("use db1") + tdSql.execute('''create table test(ts timestamp, col0 tinyint, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double, + col7 bool, col8 binary(20), col9 nchar(20), col10 tinyint unsigned, col11 smallint unsigned, col12 int unsigned, col13 bigint unsigned) tags(loc nchar(3000), tag1 int)''') + print(datetime.datetime.now()) + rowNum1= 20 + for i in range(rowNum1): + tdSql.execute("alter table test add column col%d %s ;" %( i+14, choice(dataType)) ) + rowNum2= 20 + for i in range(rowNum2): + tdSql.execute("alter table test drop column col%d ;" %( i+14) ) + self.rowNum3 = 50 + self.rowNum4 = 100 + self.ts = 1537146000000 + for j in range(self.rowNum4): + tdSql.execute("create table test%d using test tags('beijing%d', 10)" % (j,j) ) + for i in range(self.rowNum3): + tdSql.execute("insert into test%d values(%d, %d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)" + % (j, self.ts + i*1000, i + 1, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1)) + print(datetime.datetime.now()) + # check data correct + tdSql.execute("show databases") + tdSql.execute("use db1") + tdSql.query("select count (tbname) from test") + tdSql.checkData(0, 0, 100) + tdSql.query("select count (*) from test") + tdSql.checkData(0, 0, 5000) + + + # delete useless file + testcaseFilename = os.path.split(__file__)[-1] + os.system("rm -rf ./insert_res.txt") + os.system("rm -rf manualTest/TD-5114/%s.sql" % testcaseFilename ) + +clients = TwoClients() +clients.initConnection() +# clients.getBuildPath() +clients.run() \ No newline at end of file diff --git a/tests/pytest/table/create_db_from_normal_db.py b/tests/pytest/table/create_db_from_normal_db.py index 8b5182c3b16ca31b2bbf966df294e2c4e4c12ff3..ad21ebfac27c099e3b3f3013b47e52d8bf93af15 100644 --- a/tests/pytest/table/create_db_from_normal_db.py +++ b/tests/pytest/table/create_db_from_normal_db.py @@ -34,7 +34,26 @@ class TDTestCase: tdSql.execute("drop table if exists db.state2;") tdSql.execute("create table db.state2 (ts timestamp, c1 int) tags (t binary(20));") tdSql.query("create table db.test2 using db.state2 tags('tt');") - tdSql.error("create table db.test22 using db.test2 tags('tt');") + tdSql.error("create table db.test22 using db.test2 tags('tt');") + + # test case for TS-1289 + tdSql.execute("create database test") + tdSql.execute("use test") + tdSql.execute("create table `metrics` (`ts` TIMESTAMP,`value` DOUBLE) TAGS (`labels` JSON)") + tdSql.execute('''CREATE TABLE `t_eb22c740776471c56ed97eff4951eb41` USING `metrics` TAGS ('{"__name__":"node_exporter:memory:used:percent","datacenter":"cvte + ","hostname":"p-tdengine-s-002","instance":"10.21.46.53:9100","ipaddress":"10.21.46.53","job":"node","product":"Prometheus","productline":"INFRA + "}');''') + + tdSql.query("show create table t_eb22c740776471c56ed97eff4951eb41") + sql = tdSql.getData(0, 1) + tdSql.execute("drop table t_eb22c740776471c56ed97eff4951eb41") + tdSql.query("show tables") + tdSql.checkRows(0) + + tdSql.execute(sql) + tdSql.query("show tables") + tdSql.checkRows(1) + tdSql.checkData(0, 0, 't_eb22c740776471c56ed97eff4951eb41') def stop(self): tdSql.close() diff --git a/tests/pytest/tools/taosdumpTest2.py b/tests/pytest/tools/taosdumpTest2.py index 6258024de8729d799690515a7133c5d9aa04330e..c405d4c5c67611ff87bee69e87c79820237419cb 100644 --- a/tests/pytest/tools/taosdumpTest2.py +++ b/tests/pytest/tools/taosdumpTest2.py @@ -11,15 +11,19 @@ # -*- coding: utf-8 -*- +from logging.config import dictConfig import sys import os from util.log import * from util.cases import * from util.sql import * from util.dnodes import * +import string +import random -class TDTestCase: +class TDTestCase: + def init(self, conn, logSql): tdLog.debug("start to execute %s" % __file__) tdSql.init(conn.cursor(), logSql) @@ -44,6 +48,13 @@ class TDTestCase: break return buildPath + def generateString(self, length): + chars = string.ascii_uppercase + string.ascii_lowercase + v = "" + for i in range(length): + v += random.choice(chars) + return v + def run(self): tdSql.prepare() @@ -70,14 +81,14 @@ class TDTestCase: os.system("rm /tmp/*.sql") os.system("rm /tmp/*.avro*") os.system( - "%staosdump --databases db -o /tmp -B 16384" % + "%staosdump --databases db -o /tmp " % binPath) tdSql.execute("drop database db") tdSql.query("show databases") tdSql.checkRows(0) - os.system("%staosdump -i /tmp" % binPath) + os.system("%staosdump -i /tmp -y" % binPath) tdSql.query("show databases") tdSql.checkRows(1) @@ -89,7 +100,33 @@ class TDTestCase: tdSql.checkData(0, 0, 'st') tdSql.query("select count(*) from t1") - tdSql.checkData(0, 0, self.numberOfRecords) + tdSql.checkData(0, 0, self.numberOfRecords) + + # test case for TS-1225 + tdSql.execute("create database test") + tdSql.execute("use test") + tdSql.execute("create table stb(ts timestamp, c1 binary(16374), c2 binary(16374), c3 binary(16374)) tags(t1 nchar(256))") + tdSql.execute("insert into t1 using stb tags('t1') values(now, '%s', '%s', '%s')" % (self.generateString(16374), self.generateString(16374), self.generateString(16374))) + + os.system("rm /tmp/*.sql") + os.system("rm /tmp/*.avro*") + os.system("%staosdump -D test -o /tmp -y" % binPath) + + tdSql.execute("drop database test") + tdSql.query("show databases") + tdSql.checkRows(1) + + os.system("%staosdump -i /tmp -y" % binPath) + + tdSql.execute("use test") + tdSql.error("show vnodes '' ") + tdSql.query("show stables") + tdSql.checkRows(1) + tdSql.checkData(0, 0, 'stb') + + tdSql.query("select * from stb") + tdSql.checkRows(1) + os.system("rm -rf dump_result.txt") def stop(self): tdSql.close() diff --git a/tests/pytest/tools/taosdumpTestBenchmark.py b/tests/pytest/tools/taosdumpTestBenchmark.py index 63af4b62b5a84444dd97b3889b7e1115aeaabb7c..97dcf3e54bc972bceec4b250df690436fa3bbbc3 100644 --- a/tests/pytest/tools/taosdumpTestBenchmark.py +++ b/tests/pytest/tools/taosdumpTestBenchmark.py @@ -52,15 +52,15 @@ class TDTestCase: return buildPath def insert_data(self, tbname, ts_start, count): - pre_insert = "insert into %s values"%tbname + pre_insert = "insert into %s values" % tbname sql = pre_insert - tdLog.debug("doing insert table %s rows=%d ..."%(tbname, count)) + tdLog.debug("doing insert table %s rows=%d ..." % (tbname, count)) for i in range(count): - sql += " (%d,%d)"%(ts_start + i*1000, i) - if i >0 and i%30000 == 0: + sql += " (%d,%d)" % (ts_start + i * 1000, i) + if i > 0 and i % 30000 == 0: tdSql.execute(sql) sql = pre_insert - # end sql + # end sql if sql != pre_insert: tdSql.execute(sql) @@ -72,14 +72,14 @@ class TDTestCase: os.makedirs("./taosdumptest") else: os.system("rm -rf ./taosdumptest") - os.makedirs("./taosdumptest") + os.makedirs("./taosdumptest") for i in range(2): - if not os.path.exists("./taosdumptest/tmp%d"%i): - os.makedirs("./taosdumptest/tmp%d"%i) + if not os.path.exists("./taosdumptest/tmp%d" % i): + os.makedirs("./taosdumptest/tmp%d" % i) else: - os.system("rm -rf ./taosdumptest/tmp%d"%i) - os.makedirs("./taosdumptest/tmp%d"%i) + os.system("rm -rf ./taosdumptest/tmp%d" % i) + os.makedirs("./taosdumptest/tmp%d" % i) buildPath = self.getBuildPath() if (buildPath == ""): @@ -93,129 +93,179 @@ class TDTestCase: tdSql.execute("drop database if exists dp2") tdSql.execute("create database if not exists dp1") tdSql.execute("use dp1") - tdSql.execute('''create table st0(ts timestamp, c1 tinyint, c2 smallint, c3 int, c4 bigint, c5 float, c6 double, - c7 bool, c8 binary(20), c9 nchar(20), c11 tinyint unsigned, c12 smallint unsigned, c13 int unsigned, c14 bigint unsigned, c15 timestamp ) - tags(t1 tinyint, t2 smallint, t3 int, t4 bigint, t5 float, t6 double, t7 bool, t8 binary(20), t9 nchar(20), t11 tinyint unsigned, + tdSql.execute( + '''create table st0(ts timestamp, c1 tinyint, c2 smallint, c3 int, c4 bigint, c5 float, c6 double, + c7 bool, c8 binary(20), c9 nchar(20), c11 tinyint unsigned, c12 smallint unsigned, c13 int unsigned, c14 bigint unsigned, c15 timestamp ) + tags(t1 tinyint, t2 smallint, t3 int, t4 bigint, t5 float, t6 double, t7 bool, t8 binary(20), t9 nchar(20), t11 tinyint unsigned, t12 smallint unsigned, t13 int unsigned, t14 bigint unsigned, t15 timestamp)''') - tdSql.execute('''create table st1(ts timestamp, c1 tinyint, c2 smallint, c3 int, c4 bigint, c5 float, c6 double, + tdSql.execute( + '''create table st1(ts timestamp, c1 tinyint, c2 smallint, c3 int, c4 bigint, c5 float, c6 double, c7 bool, c8 binary(20), c9 nchar(20), c11 tinyint unsigned, c12 smallint unsigned, c13 int unsigned, c14 bigint unsigned, c15 timestamp ) tags(jtag json)''') - intData = [] + intData = [] floatData = [] rowNum = 10 tabNum = 10 ts = 1537146000000 for j in range(tabNum): - tdSql.execute("create table st0_%d using st0 tags( %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d, %d);" - % (j, j + 1, j + 1, j + 1, j + 1, j + 0.1, j + 0.1, j % 2, j + 1, j + 1, j + 1, j + 1, j + 1, j + 1, ts)) + tdSql.execute( + "create table st0_%d using st0 tags( %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d, %d);" % + (j, + j + + 1, + j + + 1, + j + + 1, + j + + 1, + j + + 0.1, + j + + 0.1, + j % + 2, + j + + 1, + j + + 1, + j + + 1, + j + + 1, + j + + 1, + j + + 1, + ts)) for i in range(rowNum): - tdSql.execute("insert into st0_%d values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d, %d)" - % (j, ts + i, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1, ts)) - intData.append(i + 1) - floatData.append(i + 0.1) + tdSql.execute( + "insert into st0_%d values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d, %d)" % + (j, ts + i, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % + 2, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1, ts)) + intData.append(i + 1) + floatData.append(i + 0.1) rowNum = 20 tabNum = 20 for j in range(tabNum): - tdSql.execute("create table st1_%d using st1 tags('{\"nv\":null,\"tea\":true,\"\":false,\" \":123%d,\"tea\":false}');" % (j, j + 1)) + tdSql.execute( + "create table st1_%d using st1 tags('{\"nv\":null,\"tea\":true,\"\":false,\" \":123%d,\"tea\":false}');" % + (j, j + 1)) for i in range(rowNum): - tdSql.execute("insert into st1_%d values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d, %d)" - % (j, self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1, self.ts)) - intData.append(i + 1) - floatData.append(i + 0.1) + tdSql.execute( + "insert into st1_%d values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d, %d)" % + (j, self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % + 2, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1, self.ts)) + intData.append(i + 1) + floatData.append(i + 0.1) # os.system("%staosBenchmark -f tools/taosdump-insert-dp1.json -y " % binPath) - - # create db1 , three stables:stb0,include ctables stb0_0 \ stb0_1,stb1 include ctables stb1_0 and stb1_1 - # \stb3,include ctables stb3_0 and stb3_1 + # create db1 , three stables:stb0,include ctables stb0_0 \ stb0_1,stb1 include ctables stb1_0 and stb1_1 + # \stb3,include ctables stb3_0 and stb3_1 # create general three tables gt0 gt1 gt2 tdSql.execute("create database if not exists dp2") tdSql.execute("use dp2") - tdSql.execute("create stable st0(ts timestamp, c01 int, c02 nchar(10)) tags(t1 int)") - tdSql.execute("create table st0_0 using st0 tags(0) st0_1 using st0 tags(1) ") - tdSql.execute("insert into st0_0 values(1614218412000,8600,'R')(1614218422000,8600,'E')") - tdSql.execute("insert into st0_1 values(1614218413000,8601,'A')(1614218423000,8601,'D')") - tdSql.execute("create stable st1(ts timestamp, c11 float, c12 nchar(10)) tags(t1 int)") - tdSql.execute("create table st1_0 using st1 tags(0) st1_1 using st1 tags(1) ") - tdSql.execute("insert into st1_0 values(1614218412000,8610.1,'R')(1614218422000,8610.1,'E')") - tdSql.execute("insert into st1_1 values(1614218413000,8611.2,'A')(1614218423000,8611.1,'D')") - tdSql.execute("create stable st2(ts timestamp, c21 float, c22 nchar(10)) tags(t1 int)") - tdSql.execute("create table st20 using st2 tags(0) st21 using st2 tags(1) ") - tdSql.execute("insert into st20 values(1614218412000,8620.3,'R')(1614218422000,8620.3,'E')") - tdSql.execute("insert into st21 values(1614218413000,8621.4,'A')(1614218423000,8621.4,'D')") - tdSql.execute("create table if not exists gt0 (ts timestamp, c00 int, c01 float) ") - tdSql.execute("create table if not exists gt1 (ts timestamp, c10 int, c11 double) ") - tdSql.execute("create table if not exists gt2 (ts timestamp, c20 int, c21 float) ") + tdSql.execute( + "create stable st0(ts timestamp, c01 int, c02 nchar(10)) tags(t1 int)") + tdSql.execute( + "create table st0_0 using st0 tags(0) st0_1 using st0 tags(1) ") + tdSql.execute( + "insert into st0_0 values(1614218412000,8600,'R')(1614218422000,8600,'E')") + tdSql.execute( + "insert into st0_1 values(1614218413000,8601,'A')(1614218423000,8601,'D')") + tdSql.execute( + "create stable st1(ts timestamp, c11 float, c12 nchar(10)) tags(t1 int)") + tdSql.execute( + "create table st1_0 using st1 tags(0) st1_1 using st1 tags(1) ") + tdSql.execute( + "insert into st1_0 values(1614218412000,8610.1,'R')(1614218422000,8610.1,'E')") + tdSql.execute( + "insert into st1_1 values(1614218413000,8611.2,'A')(1614218423000,8611.1,'D')") + tdSql.execute( + "create stable st2(ts timestamp, c21 float, c22 nchar(10)) tags(t1 int)") + tdSql.execute( + "create table st20 using st2 tags(0) st21 using st2 tags(1) ") + tdSql.execute( + "insert into st20 values(1614218412000,8620.3,'R')(1614218422000,8620.3,'E')") + tdSql.execute( + "insert into st21 values(1614218413000,8621.4,'A')(1614218423000,8621.4,'D')") + tdSql.execute( + "create table if not exists gt0 (ts timestamp, c00 int, c01 float) ") + tdSql.execute( + "create table if not exists gt1 (ts timestamp, c10 int, c11 double) ") + tdSql.execute( + "create table if not exists gt2 (ts timestamp, c20 int, c21 float) ") tdSql.execute("insert into gt0 values(1614218412700,8637,78.86155)") - tdSql.execute("insert into gt1 values(1614218413800,8638,78.862020199)") + tdSql.execute( + "insert into gt1 values(1614218413800,8638,78.862020199)") tdSql.execute("insert into gt2 values(1614218413900,8639,78.863)") # self.insert_data("t", self.ts, 300*10000); # os.system("%staosBenchmark -f tools/taosdump-insert-dp2.json -y " % binPath) - - - # # taosdump data # os.system("%staosdump -o ./taosdumptest/tmp1 taosdump -h -ptaosdata -P 6030 -u root -o taosdumptest \ # -D dp1,dp3 -N -c /home/chr/TDinternal/community/sim/dnode1/cfg/taos.cfg -s -d deflate" % binPath) - os.system("%staosdump -o ./taosdumptest/tmp0 -D dp2,dp1 -T 8 -B 100000" % binPath) - os.system("%staosdump -o ./taosdumptest/tmp1 dp2 st0 st1_0 gt0 -T 8 -B 1000" % binPath) - - - #check taosdumptest/tmp0 + os.system( + "%staosdump -o ./taosdumptest/tmp0 -D dp2,dp1 -T 8" % + binPath) + os.system( + "%staosdump -o ./taosdumptest/tmp1 dp2 st0 st1_0 gt0 -T 8" % + binPath) + + # check taosdumptest/tmp0 tdSql.execute("drop database dp1") tdSql.execute("drop database dp2") os.system("%staosdump -i ./taosdumptest/tmp0 -T 8 " % binPath) tdSql.execute("reset query cache") - + tdSql.execute("use dp1") tdSql.query("show stables") tdSql.checkRows(3) for i in range(3): - for j in range(3): + for j in range(3): if j < 2: - if tdSql.queryResult[i][0] == 'st%d'%j: - tdSql.checkData(i, 4, (j+1)*10) + if tdSql.queryResult[i][0] == 'st%d' % j: + tdSql.checkData(i, 4, (j + 1) * 10) else: - if tdSql.queryResult[i][0] == 'st%d'%j: - tdSql.checkData(i, 4, 100002) + if tdSql.queryResult[i][0] == 'st%d' % j: + tdSql.checkData(i, 4, 100002) tdSql.query("select count(*) from st0") - tdSql.checkData(0, 0, 100) + tdSql.checkData(0, 0, 100) tdSql.query("select count(*) from st1") - tdSql.checkData(0, 0, 400) + tdSql.checkData(0, 0, 400) tdSql.query("select count(*) from st2") - tdSql.checkData(0, 0, 1000020) - + tdSql.checkData(0, 0, 1000020) tdSql.execute("use dp2") tdSql.query("show stables") tdSql.checkRows(3) for i in range(3): - for j in range(3): + for j in range(3): if j < 2: - if tdSql.queryResult[i][0] == 'st%d'%j: + if tdSql.queryResult[i][0] == 'st%d' % j: # print(i,"stb%d"%j) tdSql.checkData(i, 4, 2) else: - if tdSql.queryResult[i][0] == 'st%d'%j: + if tdSql.queryResult[i][0] == 'st%d' % j: tdSql.checkData(i, 4, 100002) tdSql.query("select count(*) from st0") - tdSql.checkData(0, 0, 4) + tdSql.checkData(0, 0, 4) tdSql.query("select count(*) from st1") - tdSql.checkData(0, 0, 4) + tdSql.checkData(0, 0, 4) tdSql.query("select count(*) from st2") - tdSql.checkData(0, 0, 1000024) + tdSql.checkData(0, 0, 1000024) tdSql.query("select ts from gt0") - tdSql.checkData(0,0,'2021-02-25 10:00:12.700') + tdSql.checkData(0, 0, '2021-02-25 10:00:12.700') tdSql.query("select c10 from gt1") tdSql.checkData(0, 0, 8638) tdSql.query("select c20 from gt2") tdSql.checkData(0, 0, 8639) - #check taosdumptest/tmp1 + # check taosdumptest/tmp1 tdSql.execute("drop database dp1") tdSql.execute("drop database dp2") - os.system("%staosdump -i ./taosdumptest/tmp1 -T 8 " % binPath) + os.system("%staosdump -i ./taosdumptest/tmp1 -T 8 " % binPath) tdSql.execute("reset query cache") tdSql.execute("use dp2") tdSql.query("show stables") @@ -223,14 +273,13 @@ class TDTestCase: tdSql.query("show tables") tdSql.checkRows(4) tdSql.query("select count(*) from st1_0") - tdSql.checkData(0,0,2) + tdSql.checkData(0, 0, 2) tdSql.query("select ts from gt0") - tdSql.checkData(0,0,'2021-02-25 10:00:12.700') + tdSql.checkData(0, 0, '2021-02-25 10:00:12.700') tdSql.error("use dp1") tdSql.error("select count(*) from st2_0") tdSql.error("select count(*) from gt2") - # #check taosdumptest/tmp2 # tdSql.execute("drop database dp1") # tdSql.execute("drop database dp2") @@ -250,10 +299,9 @@ class TDTestCase: # tdSql.error("select count(*) from gt1") # tdSql.error("use dp2") - # #check taosdumptest/tmp3 # tdSql.execute("drop database dp1") - # os.system("%staosdump -i ./taosdumptest/tmp3 -T 8 " % binPath) + # os.system("%staosdump -i ./taosdumptest/tmp3 -T 8 " % binPath) # tdSql.execute("use dp2") # tdSql.query("show stables") # tdSql.checkRows(2) @@ -269,7 +317,7 @@ class TDTestCase: # #check taosdumptest/tmp4 # tdSql.execute("drop database dp2") - # os.system("%staosdump -i ./taosdumptest/tmp4 -T 8 " % binPath) + # os.system("%staosdump -i ./taosdumptest/tmp4 -T 8 " % binPath) # tdSql.execute("use dp2") # tdSql.query("show stables") # tdSql.checkRows(2) @@ -290,10 +338,9 @@ class TDTestCase: # tdSql.error("select count(*) from st1_1") # tdSql.error("select count(*) from gt3") - # #check taosdumptest/tmp5 # tdSql.execute("drop database dp2") - # os.system("%staosdump -i ./taosdumptest/tmp5 -T 8 " % binPath) + # os.system("%staosdump -i ./taosdumptest/tmp5 -T 8 " % binPath) # tdSql.execute("use dp2") # tdSql.query("show stables") # tdSql.checkRows(3) @@ -330,7 +377,7 @@ class TDTestCase: # tdSql.execute("drop database dp1") # tdSql.execute("drop database dp2") # tdSql.execute("drop database dp3") - # os.system("%staosdump -i ./taosdumptest/tmp6 -T 8 " % binPath) + # os.system("%staosdump -i ./taosdumptest/tmp6 -T 8 " % binPath) # tdSql.execute("use dp3") # tdSql.query("show databases") # tdSql.checkRows(1) @@ -340,14 +387,14 @@ class TDTestCase: # tdSql.query("show tables") # tdSql.checkRows(1) # tdSql.query("select count(*) from st0_0") - # tdSql.checkData(0, 0, 2) + # tdSql.checkData(0, 0, 2) # tdSql.query("select * from st0 order by ts") # tdSql.checkData(0,0,'2021-02-25 10:00:12.000000001') # tdSql.checkData(0,1,8600) # # check taosdumptest/tmp7 # tdSql.execute("drop database dp3") - # os.system("%staosdump -i ./taosdumptest/tmp7 -T 8 " % binPath) + # os.system("%staosdump -i ./taosdumptest/tmp7 -T 8 " % binPath) # tdSql.execute("use dp3") # tdSql.query("show databases") # tdSql.checkRows(1) @@ -364,14 +411,14 @@ class TDTestCase: # # check taosdumptest/tmp8 # tdSql.execute("drop database dp3") - # os.system("%staosdump -i ./taosdumptest/tmp8 -T 8 " % binPath) + # os.system("%staosdump -i ./taosdumptest/tmp8 -T 8 " % binPath) # tdSql.execute("use dp3") # tdSql.query("show stables") # tdSql.checkRows(1) # tdSql.query("show tables") # tdSql.checkRows(1) # tdSql.query("select count(*) from st0_0") - # tdSql.checkRows(0) + # tdSql.checkRows(0) # # tdSql.query("select * from st0 order by ts") # # tdSql.checkData(0,0,'2021-02-25 10:00:12.000000001') # # tdSql.checkData(0,1,8600)