diff --git a/documentation20/cn/00.index/docs.md b/documentation20/cn/00.index/docs.md
index 463e59d27fcfd944bfef751d427a85bdea8e5045..a48ea49f59ce3fbccf5370de2276692f8e367fe3 100644
--- a/documentation20/cn/00.index/docs.md
+++ b/documentation20/cn/00.index/docs.md
@@ -95,7 +95,7 @@ TDengine 是一个高效的存储、查询、分析时序大数据的平台,
- [Grafana](/connections#grafana):获取并可视化保存在 TDengine 的数据
- [IDEA Database](https://www.taosdata.com/blog/2020/08/27/1767.html):通过 IDEA 数据库管理工具可视化使用 TDengine
- [TDengineGUI](https://github.com/skye0207/TDengineGUI):基于 Electron 开发的跨平台 TDengine 图形化管理工具
-- [DataX](https://www.taosdata.com/blog/2021/10/26/3156.html):支持 TDeninge 和其他数据库之间进行数据迁移的工具
+- [DataX](https://www.taosdata.com/blog/2021/10/26/3156.html):支持 TDengine 和其他数据库之间进行数据迁移的工具
## [TDengine 集群的安装、管理](/cluster)
diff --git a/documentation20/cn/02.getting-started/02.taosdemo/docs.md b/documentation20/cn/02.getting-started/02.taosdemo/docs.md
index 8d555c4778187394b8849113d68afff6d1158a4d..72972dc4f7550c84797caf8be6b24d07c9ee77b0 100644
--- a/documentation20/cn/02.getting-started/02.taosdemo/docs.md
+++ b/documentation20/cn/02.getting-started/02.taosdemo/docs.md
@@ -1,6 +1,6 @@
# 如何使用 taosBenchmark 进行性能测试
-自从 TDengine 2019年 7 月开源以来,凭借创新的数据建模设计、快捷的安装方式、易用的编程接口和强大的数据写入查询性能博得了大量时序数据开发者的青睐。其中写入和查询性能往往令刚接触 TDengine 的用户称叹不已。为了便于用户在最短时间内就可以体验到 TDengine 的高性能特点,我们专门开发了一个应用程序 taosBenchmark (曾命名为 taosdemo)用于对 TDengine 进行写入和查询的性能测试,用户可以通过 taosBenchmark 轻松模拟大量设备产生海量数据的场景,并且可以通过 taosBenchmark 参数灵活控制表的列数、数据类型、乱序比例以及并发线程数量。
+自从 TDengine 2019年 7 月开源以来,凭借创新的数据建模设计、快捷的安装方式、易用的编程接口和强大的数据写入查询性能博得了大量时序数据开发者的青睐。其中写入和查询性能往往令刚接触 TDengine 的用户称叹不已。为了便于用户在最短时间内就可以体验到 TDengine 的高性能特点,我们专门开发了一个应用程序 taosBenchmark (曾命名为 taosdemo)用于对 TDengine 进行写入和查询的性能测试,用户可以通过 taosBenchmark 轻松模拟大量设备产生海量数据的场景,并且可以通过 taosBenchmark 参数灵活按照实际场景定制表的个数(对应设备数)、表的列数(对应每个设备采样点)、数据类型、乱序数据比例、顺序或轮询插入方式、以及并发线程数量。
运行 taosBenchmark 很简单,通过下载 [TDengine 安装包](https://www.taosdata.com/cn/all-downloads/)或者自行下载 [TDengine 代码](https://github.com/taosdata/TDengine)编译都可以在安装目录或者编译结果目录中找到并运行。
@@ -153,7 +153,7 @@ insert delay, avg: 8.31ms, max: 860.12ms, min: 2.00ms
```
$ taosBenchmark --help
--f, --file=FILE The meta file to the execution procedure. Currently, we support standard UTF-8 (without BOM) encoded files only.
+-f, --file=FILE The JSON configuration file to the execution procedure. Currently, we support standard UTF-8 (without BOM) encoded files only.
-u, --user=USER The user name to use when connecting to the server.
-p, --password The password to use when connecting to the server.
-c, --config-dir=CONFIG_DIR Configuration directory.
@@ -163,7 +163,7 @@ $ taosBenchmark --help
-d, --database=DATABASE Destination database. By default is 'test'.
-a, --replica=REPLICA Set the replica parameters of the database, By default use 1, min: 1, max: 3.
-m, --table-prefix=TABLEPREFIX Table prefix name. By default use 'd'.
--s, --sql-file=FILE The select sql file.
+-s, --sql-file=FILE The select SQL file.
-N, --normal-table Use normal table flag.
-o, --output=FILE Direct output to the named file. By default use './output.txt'.
-q, --query-mode=MODE Query mode -- 0: SYNC, 1: ASYNC. By default use SYNC.
@@ -339,6 +339,7 @@ select first(current) took 0.024105 second(s)
"start_timestamp": "2020-10-01 00:00:00.000",
"sample_format": "csv",
"sample_file": "./sample.csv",
+ "use_sample_ts": "no",
"tags_file": "",
"columns": [{"type": "INT"}, {"type": "DOUBLE", "count":10}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}],
"tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}]
@@ -347,7 +348,7 @@ select first(current) took 0.024105 second(s)
}
```
-例如:我们可以通过 "thread_count" 和 "thread_count_create_tbl" 来为建表和插入数据指定不同数量的线程。可以通过 "child_table_exists"、"childtable_limit" 和 "childtable_offset" 的组合来使用多个 taosBenchmark 进程(甚至可以在不同的电脑上)对同一个超级表的不同范围子表进行同时写入。也可以通过 "data_source" 和 "sample_file" 来指定数据来源为 csv 文件,来实现导入已有数据的功能。
+例如:我们可以通过 "thread_count" 和 "thread_count_create_tbl" 来为建表和插入数据指定不同数量的线程。可以通过 "child_table_exists"、"childtable_limit" 和 "childtable_offset" 的组合来使用多个 taosBenchmark 进程(甚至可以在不同的电脑上)对同一个超级表的不同范围子表进行同时写入。也可以通过 "data_source" 和 "sample_file" 来指定数据来源为 CSV 文件,来实现导入已有数据的功能。CSV 为半角逗号分隔的数据文件,每行数据列数需要和表的数据列数(如果是标签数据,是和标签数)相同。
## 使用 taosBenchmark 进行查询和订阅测试
@@ -401,16 +402,16 @@ taosBenchmark 不仅仅可以进行数据写入,也可以执行查询和订阅
"query_times": 每种查询类型的查询次数
"query_mode": 查询数据接口,"taosc":调用TDengine的c接口;“restful”:使用 RESTful 接口。可选项。缺省是“taosc”。
"specified_table_query": { 指定表的查询
-"query_interval": 执行sqls的间隔,单位是秒。可选项,缺省是0。
-"concurrent": 并发执行sqls的线程数,可选项,缺省是1。每个线程都执行所有的sqls。
-"sqls": 可以添加多个sql语句,最多支持100条。
+"query_interval": 执行 sqls 的间隔,单位是秒。可选项,缺省是0。
+"concurrent": 并发执行 sqls 的线程数,可选项,缺省是1。每个线程都执行所有的 sqls。
+"sqls": 可以添加多个 SQL 语句,最多支持 100 条。
"sql": 查询语句。必选项。
"result": 查询结果写入的文件名。可选项,缺省是空,表示查询结果不写入文件。
"super_table_query": { 对超级表中所有子表的查询
"stblname": 超级表名称。必选项。
-"query_interval": 执行sqls的间隔,单位是秒。可选项,缺省是0。
-"threads": 并发执行sqls的线程数,可选项,缺省是1。每个线程负责一部分子表,执行所有的sqls。
-"sql": "select count(*) from xxxx"。查询超级表内所有子表的查询语句,其中表名必须写成 “xxxx”,实例会自动替换成子表名。
+"query_interval": 执行 sqls 的间隔,单位是秒。可选项,缺省是0。
+"threads": 并发执行 sqls 的线程数,可选项,缺省是1。每个线程负责一部分子表,执行所有的 sqls。
+"sql": "SELECT COUNT(*) FROM xxxx"。查询超级表内所有子表的查询语句,其中表名必须写成 “xxxx”,实例会自动替换成子表名。
"result": 查询结果写入的文件名。可选项,缺省是空,表示查询结果不写入文件。
```
@@ -463,7 +464,7 @@ taosBenchmark 不仅仅可以进行数据写入,也可以执行查询和订阅
"restart": 订阅重启。"yes":如果订阅已经存在,重新开始,"no": 继续之前的订阅。(请注意执行用户需要对 dataDir 目录有读写权限)
"keepProgress": 保留订阅信息进度。yes表示保留订阅信息,no表示不保留。该值为yes,restart为no时,才能继续之前的订阅。
"resubAfterConsume": 配合 keepProgress 使用,在订阅消费了相应次数后调用 unsubscribe 取消订阅并再次订阅。
-"result": 查询结果写入的文件名。可选项,缺省是空,表示查询结果不写入文件。 注意:每条sql语句后的保存结果的文件不能重名,且生成结果文件时,文件名会附加线程号。
+"result": 查询结果写入的文件名。可选项,缺省是空,表示查询结果不写入文件。 注意:每条 SQL 语句后的保存结果的文件不能重名,且生成结果文件时,文件名会附加线程号。
```
## 结语
@@ -478,7 +479,7 @@ taosBenchmark支持两种配置参数的模式,一种是命令行参数,一
一、命令行参数
--f:指定taosBenchmark所需参数的meta文件。当使用该参数时,其他所有命令行参数都失效。可选项,缺省是NULL。目前仅支持不含 BOM(byte-order mark)的标准 UTF-8 编码文件。
+-f:指定 taosBenchmark 所需参数的 JSON 配置文件。当使用该参数时,其他所有命令行参数都失效。可选项,缺省是 NULL。目前仅支持不含 BOM(byte-order mark)的标准 UTF-8 编码文件。
-u: 用户名。可选项,缺省是“root“。
@@ -512,7 +513,7 @@ taosBenchmark支持两种配置参数的模式,一种是命令行参数,一
-T:并发线程数。可选项,缺省是10。
--i:两次sql插入的休眠时间间隔,缺省是0。
+-i:两次 SQL 插入的休眠时间间隔,缺省是0。
-S:两次插入间隔时间戳步长,缺省是1。
@@ -601,7 +602,7 @@ taosBenchmark支持3种功能的测试,包括插入、查询、订阅。但一
"start_timestamp": "2020-10-01 00:00:00.000",
"sample_format": "csv",
"sample_file": "./sample.csv",
- "use_sample_ts": "no",
+ "use_sample_ts": "no",
"tags_file": "",
"columns": [{"type": "INT"}, {"type": "DOUBLE", "count":10}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}],
"tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}]
@@ -632,9 +633,9 @@ taosBenchmark支持3种功能的测试,包括插入、查询、订阅。但一
"insert_interval": 两次发送请求的间隔时间。可选项,缺省是0,代表无人工设置的时间间隔,单位为ms。。
-"interlace_rows": 设置轮询插入每个单表数据的条目数,如果interlace_rows*childtable_count*supertable_num小于num_of_records_per_req时,则请求插入的数目以interlace_rows*childtable_count*supertable_num为准。可选项,缺省是0。
+"interlace_rows": 设置轮询插入每个单表数据的条目数,如果 interlace_rows * childtable_count * supertable_num 小于 num_of_records_per_req 时,则请求插入的数目以 interlace_rows * childtable_count * supertable_num 为准。可选项,缺省是 0。
-"num_of_records_per_req": 每条请求数据内容包含的插入数据记录数目,该数据组成的sql不能大于maxsqllen,如果过大,则取taosd限制的1M长度(1048576)。0代表不插入数据,建议配置大于0。
+"num_of_records_per_req": 每条请求数据内容包含的插入数据记录数目,该数据组成的 SQL 不能大于 maxSqlLen,如果过大,则取 taosd 限制的1M长度(1048576)。0 代表不插入数据,建议配置大于 0。
"databases": [{
@@ -680,7 +681,7 @@ taosBenchmark支持3种功能的测试,包括插入、查询、订阅。但一
"auto_create_table": 子表的创建方式,“yes”:自动建表;"no":提前建表。可选项,缺省是“no”。当 child_table_exists 为 “yes” 时此参数将自动置为 no 。
-"batch_create_tbl_num": 一个sql批量创建子表的数目。
+"batch_create_tbl_num": 一个 SQL 批量创建子表的数目。
"data_source": 插入数据来源,"rand":实例随机生成;“sample”:从样例文件中读取。可选项。缺省是“rand”。
@@ -706,31 +707,31 @@ taosBenchmark支持3种功能的测试,包括插入、查询、订阅。但一
"start_timestamp": 子表中记录时间戳的起始值,支持"2020-10-01 00:00:00.000"和“now”两种格式,可选项,缺省是“now”。
-"sample_format": 当插入数据源选择“sample”时,sample文件的格式,"csv":csv格式,每列的值与子表的columns保持一致,但不包含第1列的时间戳。可选项,缺省是”csv”。目前仅仅支持csv格式的sample文件。
+"sample_format": 当插入数据源选择“sample”时,sample文件的格式,"csv":CSV 格式,每列的值与子表的 columns 保持一致,但不包含第1列的时间戳。可选项,缺省是”csv”。目前仅仅支持 CSV 格式的 sample 文件。
"sample_file":sample文件,包含路径和文件名。当插入数据源选择“sample”时,该项为必选项。
-"use_sample_ts":sample文件是否包含第一列时间戳,可选项: "yes" 和 "no", 默认 "no"。(注意:若为yes,则disorder_ratio 和 disorder_range失效)
+"use_sample_ts":sample 文件是否包含第一列时间戳,可选项: "yes" 和 "no", 默认 "no"。(注意:若为 yes,则 disorder_ratio 和 disorder_range 失效)
-"tags_file": 子表tags值文件,只能是csv文件格式,且必须与超级表的tags保持一致。当该项为非空时,表示子表的tags值从文件中获取;为空时,实例随机生成。可选项,缺省是空。
+"tags_file": 子表 tags 值文件,只能是 CSV 文件格式,且必须与超级表的tags保持一致。当该项为非空时,表示子表的tags值从文件中获取;为空时,实例随机生成。可选项,缺省是空。
-"columns": [{ 超级表的column列表,最大支持1024列(指所有普通列+超级列总和)。默认的第一列为时间类型,程序自动添加,不需要手工添加。
+"columns": [{ 超级表的 column 列表,最大支持 4096 列(指所有普通列+超级列总和)。默认的第一列为时间类型,程序自动添加,不需要手工添加。
"type": 该列的数据类型 ,必选项。
-"len": 该列的长度,只有type是BINARY或NCHAR时有效,可选项,缺省值是8。
+"len": 该列的长度,只有 type 是 BINARY 或 NCHAR 时有效,可选项,缺省值是 8。
-"count":该类型的连续列个数,可选项,缺省是1。
+"count":该类型的连续列个数,可选项,缺省是 1。
}],
-"tags": [{ 超级表的tags列表,type不能是timestamp类型, 最大支持128个。
+"tags": [{ 超级表的 tags 列表,type不能是 TIMESTAMP 类型, 最大支持 128 个。
"type": 该列的数据类型 ,必选项。
-"len": 该列的长度,只有type是BINARY或NCHAR时有效,可选项,缺省值是8。
+"len": 该列的长度,只有 type 是 BINARY 或 NCHAR 时有效,可选项,缺省值是 8。
-"count":该类型的连续列个数,可选项,缺省是1。
+"count":该类型的连续列个数,可选项,缺省是 1。
}]
@@ -798,11 +799,11 @@ taosBenchmark支持3种功能的测试,包括插入、查询、订阅。但一
"specified_table_query": { 指定表的查询
-"query_interval": 执行sqls的间隔,单位是秒。可选项,缺省是0。
+"query_interval": 执行 sqls 的间隔,单位是秒。可选项,缺省是0。
-"concurrent": 并发执行sqls的线程数,可选项,缺省是1。每个线程都执行所有的sqls。
+"concurrent": 并发执行 sqls 的线程数,可选项,缺省是1。每个线程都执行所有的 sqls。
-"sqls": 可以添加多个sql语句,最多支持100条。
+"sqls": 可以添加多个 SQL 语句,最多支持100条。
"sql": 查询语句。必选项。
@@ -812,15 +813,15 @@ taosBenchmark支持3种功能的测试,包括插入、查询、订阅。但一
"stblname": 超级表名称。必选项。
-"query_interval": 执行sqls的间隔,单位是秒。可选项,缺省是0。
+"query_interval": 执行 sqls 的间隔,单位是秒。可选项,缺省是0。
-"threads": 并发执行sqls的线程数,可选项,缺省是1。每个线程负责一部分子表,执行所有的sqls。
+"threads": 并发执行 sqls 的线程数,可选项,缺省是1。每个线程负责一部分子表,执行所有的 sqls。
"sql": "select count(*) from xxxx"。查询超级表内所有子表的查询语句,其中表名必须写成 “xxxx”,实例会自动替换成子表名。
"result": 查询结果写入的文件名。可选项,缺省是空,表示查询结果不写入文件。
-注意:每条sql语句后的保存结果的文件不能重名,且生成结果文件时,文件名会附加线程号。
+注意:每条 SQL 语句后的保存结果的文件不能重名,且生成结果文件时,文件名会附加线程号。
查询结果显示:如果查询线程结束一次查询距开始执行时间超过30秒打印一次查询次数、用时和QPS。所有查询结束时,汇总打印总的查询次数和QPS。
@@ -882,11 +883,11 @@ taosBenchmark支持3种功能的测试,包括插入、查询、订阅。但一
"confirm_parameter_prompt": 执行过程中提示是否确认,为no时,执行过程无需手工输入enter。可选项,缺省是no。
-注意:这里的订阅查询sql目前只支持select * ,其余不支持。
+注意:这里的订阅查询 SQL 目前只支持 SELECT * ,其余不支持。
"specified_table_query": 指定表的订阅。
-"concurrent": 并发执行sqls的线程数,可选项,缺省是1。每个线程都执行所有的sqls。
+"concurrent": 并发执行 sqls 的线程数,可选项,缺省是1。每个线程都执行所有的 sqls。
"mode": 订阅模式。目前支持同步和异步订阅,缺省是sync。
@@ -906,11 +907,11 @@ taosBenchmark支持3种功能的测试,包括插入、查询、订阅。但一
"stblname": 超级表名称。必选项。
-"threads": 并发执行sqls的线程数,可选项,缺省是1。每个线程都执行所有的sqls。
+"threads": 并发执行 sqls 的线程数,可选项,缺省是1。每个线程都执行所有的 sqls。
"mode": 订阅模式。
-"interval": 执行sqls的间隔,单位是秒。可选项,缺省是0。
+"interval": 执行 sqls 的间隔,单位是秒。可选项,缺省是 0。
"restart": 订阅重启。"yes":如果订阅已经存在,重新开始,"no": 继续之前的订阅。
@@ -918,6 +919,6 @@ taosBenchmark支持3种功能的测试,包括插入、查询、订阅。但一
"resubAfterConsume": 配合 keepProgress 使用,在订阅消费了相应次数后调用 unsubscribe 取消订阅并再次订阅。
-"sql": " select count(*) from xxxx "。查询语句,其中表名必须写成 “xxxx”,实例会自动替换成子表名。
+"sql": " SELECT COUNT(*) FROM xxxx "。查询语句,其中表名必须写成 “xxxx”,实例会自动替换成子表名。
-"result": 查询结果写入的文件名。可选项,缺省是空,表示查询结果不写入文件。 注意:每条sql语句后的保存结果的文件不能重名,且生成结果文件时,文件名会附加线程号。
+"result": 查询结果写入的文件名。可选项,缺省是空,表示查询结果不写入文件。 注意:每条 SQL 语句后的保存结果的文件不能重名,且生成结果文件时,文件名会附加线程号。
diff --git a/documentation20/cn/05.insert/docs.md b/documentation20/cn/05.insert/docs.md
index 7e28d6de1a25c7f516472097caef28d05a97854f..9337da0e39e0851e434bf21aebd0a06b39ef3716 100644
--- a/documentation20/cn/05.insert/docs.md
+++ b/documentation20/cn/05.insert/docs.md
@@ -35,7 +35,7 @@ INSERT INTO d1001 VALUES (1538548685000, 10.3, 219, 0.31) (1538548695000, 12.6,
**前言**
在物联网应用中,常会采集比较多的数据项,用于实现智能控制、业务分析、设备监控等。由于应用逻辑的版本升级,或者设备自身的硬件调整等原因,数据采集项就有可能比较频繁地出现变动。为了在这种情况下方便地完成数据记录工作,TDengine 从 2.2.0.0 版本开始,提供调用 Schemaless 写入方式,可以免于预先创建超级表/子表的步骤,随着数据写入接口能够自动创建与数据对应的存储结构。并且在必要时,Schemaless 将自动增加必要的数据列,保证用户写入的数据可以被正确存储。
- 目前,TDengine 的 C/C++ Connector 提供支持 Schemaless 的操作接口,详情请参见 [Schemaless 方式写入接口](https://www.taosdata.com/cn/documentation/connector#schemaless)章节。这里对 Schemaless 的数据表达格式进行了描述。
+ 目前,TDengine 的所有官方支持的连接器支持 Schemaless 的操作接口,详情请参见 [Schemaless 方式写入接口](https://www.taosdata.com/cn/documentation/connector#schemaless)章节。这里对 Schemaless 的数据表达格式进行了描述。
无模式写入方式建立的超级表及其对应的子表与通过 SQL 直接建立的超级表和子表完全没有区别,您也可以通过 SQL 语句直接向其中写入数据。需要注意的是,通过无模式写入方式建立的表,其表名是基于标签值按照固定的映射规则生成,所以无法明确地进行表意,缺乏可读性。
**无模式写入行协议**
diff --git a/documentation20/cn/08.connector/docs.md b/documentation20/cn/08.connector/docs.md
index ac9d93f983e8338eb048a77f2ab9688bac4b288f..b3ff66765f39636a110bff38c1a11a934982fa52 100644
--- a/documentation20/cn/08.connector/docs.md
+++ b/documentation20/cn/08.connector/docs.md
@@ -2,7 +2,7 @@
TDengine 提供了丰富的应用程序开发接口,其中包括 C/C++、Java、Python、Go、Node.js、C# 、RESTful 等,便于用户快速开发应用。
-
+
目前 TDengine 的连接器可支持的平台广泛,包括:X64/X86/ARM64/ARM32/MIPS/Alpha 等硬件平台,以及 Linux/Win64/Win32 等开发环境。对照矩阵如下:
@@ -13,7 +13,7 @@ TDengine 提供了丰富的应用程序开发接口,其中包括 C/C++、Java
| **JDBC** | ● | ● | ● | ○ | ● | ● | ○ | ○ | ○ |
| **Python** | ● | ● | ● | ○ | ● | ● | ○ | -- | ○ |
| **Go** | ● | ● | ● | ○ | ● | ● | ○ | -- | -- |
-| **NodeJs** | ● | ● | ○ | ○ | ● | ● | ○ | -- | -- |
+| **Node.js** | ● | ● | ○ | ○ | ● | ● | ○ | -- | -- |
| **C#** | ● | ● | ○ | ○ | ○ | ○ | ○ | -- | -- |
| **RESTful** | ● | ● | ● | ● | ● | ● | ○ | ○ | ○ |
@@ -22,7 +22,7 @@ TDengine 提供了丰富的应用程序开发接口,其中包括 C/C++、Java
注意:
* 在没有安装 TDengine 服务端软件的系统中使用连接器(除 RESTful 外)访问 TDengine 数据库,需要安装相应版本的客户端安装包来使应用驱动(Linux 系统中文件名为 libtaos.so,Windows 系统中为 taos.dll)被安装在系统中,否则会产生无法找到相应库文件的错误。
-* 所有执行 SQL 语句的 API,例如 C/C++ Connector 中的 `tao_query`、`taos_query_a`、`taos_subscribe` 等,以及其它语言中与它们对应的 API,每次都只能执行一条 SQL 语句,如果实际参数中包含了多条语句,它们的行为是未定义的。
+* 所有执行 SQL 语句的 API,例如 C/C++ Connector 中的 `taos_query`、`taos_query_a`、`taos_subscribe` 等,以及其它语言中与它们对应的 API,每次都只能执行一条 SQL 语句,如果实际参数中包含了多条语句,它们的行为是未定义的。
* 升级 TDengine 到 2.0.8.0 版本的用户,必须更新 JDBC。连接 TDengine 必须升级 taos-jdbcdriver 到 2.0.12 及以上。详细的版本依赖关系请参见 [taos-jdbcdriver 文档](https://www.taosdata.com/cn/documentation/connector/java#version)。
* 无论选用何种编程语言的连接器,2.0 及以上版本的 TDengine 推荐数据库应用的每个线程都建立一个独立的连接,或基于线程建立连接池,以避免连接内的“USE statement”状态量在线程之间相互干扰(但连接的查询和写入操作都是线程安全的)。
@@ -434,9 +434,9 @@ typedef struct TAOS_MULTI_BIND {
**说明**
协议类型是枚举类型,包含以下三种格式:
- TSDB_SML_LINE_PROTOCOL:InfluxDB行协议(Line Protocol)
- TSDB_SML_TELNET_PROTOCOL: OpenTSDB文本行协议
- TSDB_SML_JSON_PROTOCOL: OpenTSDB Json协议格式
+ TSDB_SML_LINE_PROTOCOL:InfluxDB 行协议(Line Protocol)
+ TSDB_SML_TELNET_PROTOCOL: OpenTSDB 文本行协议
+ TSDB_SML_JSON_PROTOCOL: OpenTSDB JSON 协议格式
时间戳分辨率的定义,定义在 taos.h 文件中,具体内容如下:
TSDB_SML_TIMESTAMP_NOT_CONFIGURED = 0,
@@ -451,7 +451,7 @@ typedef struct TAOS_MULTI_BIND {
对于 OpenTSDB 的文本协议,时间戳的解析遵循其官方解析规则 — 按照时间戳包含的字符的数量来确认时间精度。
**支持版本**
- 该功能接口从2.3.0.0版本开始支持。
+ 该功能接口从 2.3.0.0 版本开始支持。
```c
#include
diff --git a/documentation20/cn/14.devops/03.immigrate/docs.md b/documentation20/cn/14.devops/03.immigrate/docs.md
index 1b003f5d0c9adab5a2da3ce22133eacaf5132df4..bff4d4a7137a3bcb2b0ec0dec891c7a584d9df0a 100644
--- a/documentation20/cn/14.devops/03.immigrate/docs.md
+++ b/documentation20/cn/14.devops/03.immigrate/docs.md
@@ -174,7 +174,7 @@ TDengine 支持标准的 JDBC 3.0 接口操纵数据库,你也可以使用其
为了方便历史数据的迁移工作,我们为数据同步工具 DataX 提供了插件,能够将数据自动写入到 TDengine 中,需要注意的是 DataX 的自动化数据迁移只能够支持单值模型的数据迁移过程。
-DataX 具体的使用方式及如何使用 DataX 将数据写入 TDengine 请参见[基于DataX的TDeninge数据迁移工具](https://www.taosdata.com/blog/2021/10/26/3156.html)。
+DataX 具体的使用方式及如何使用 DataX 将数据写入 TDengine 请参见[基于 DataX 的 TDengine 数据迁移工具](https://www.taosdata.com/blog/2021/10/26/3156.html)。
在对 DataX 进行迁移实践后,我们发现通过启动多个进程,同时迁移多个 metric 的方式,可以大幅度的提高迁移历史数据的效率,下面是迁移过程中的部分记录,希望这些能为应用迁移工作带来参考。
diff --git a/documentation20/en/00.index/docs.md b/documentation20/en/00.index/docs.md
index 2929ee5e33eab7531e3777869a4d9dafea40e7b4..9870f0f25e4b73458357e35fa10d6a8e3575c5fa 100644
--- a/documentation20/en/00.index/docs.md
+++ b/documentation20/en/00.index/docs.md
@@ -38,6 +38,8 @@ TDengine is a highly efficient platform to store, query, and analyze time-series
- [Data Writing via Schemaless](/insert#schemaless): write one or multiple records with automatic table creation and adaptive table structure maintenance
- [Data Writing via Prometheus](/insert#prometheus): Configure Prometheus to write data directly without any code
- [Data Writing via Telegraf](/insert#telegraf): Configure Telegraf to write collected data directly without any code
+- [Data Writing via collectd](/insert#collectd): Configure collectd to write collected data directly without any code
+- [Data Writing via StatsD](/insert#statsd): Configure StatsD to write collected data directly without any code
- [Data Writing via EMQX](/insert#emq): Configure EMQX to write MQTT data directly without any code
- [Data Writing via HiveMQ Broker](/insert#hivemq): Configure HiveMQ to write MQTT data directly without any code
@@ -95,7 +97,7 @@ TDengine is a highly efficient platform to store, query, and analyze time-series
- [R](/connections#r): access data stored in TDengine server via JDBC configured within R
- [IDEA Database](https://www.taosdata.com/blog/2020/08/27/1767.html): use TDengine visually through IDEA Database Management Tool
- [TDengineGUI](https://github.com/skye0207/TDengineGUI): a TDengine management tool with Graphical User Interface
-- [DataX](https://github.com/taosdata/datax): a data immigaration tool with TDeninge supported
+- [DataX](https://github.com/taosdata/datax): a data immigration tool with TDengine supported
## [Installation and Management of TDengine Cluster](/cluster)
diff --git a/documentation20/en/02.getting-started/02.taosdemo/docs.md b/documentation20/en/02.getting-started/02.taosdemo/docs.md
index ff017c01559d9137792a25dec8ffd052ad643cda..be2a0529d26ec8b620391b9e0fd1a5ba416c047b 100644
--- a/documentation20/en/02.getting-started/02.taosdemo/docs.md
+++ b/documentation20/en/02.getting-started/02.taosdemo/docs.md
@@ -1,4 +1,4 @@
-Since TDengine was open sourced in July 2019, it has gained a lot of popularity among time-series database developers with its innovative data modeling design, simple installation method, easy programming interface, and powerful data insertion and query performance. The insertion and querying performance is often astonishing to users who are new to TDengine. In order to help users to experience the high performance and functions of TDengine in the shortest time, we developed an application called `taosBenchmark` (was named `taosdemo`) for insertion and querying performance testing of TDengine. Then user can easily simulate the scenario of a large number of devices generating a very large amount of data. User can easily manipulate the number of columns, data types, disorder ratio, and number of concurrent threads with taosBenchmark customized parameters.
+Since TDengine was open sourced in July 2019, it has gained a lot of popularity among time-series database developers with its innovative data modeling design, simple installation method, easy programming interface, and powerful data insertion and query performance. The insertion and querying performance is often astonishing to users who are new to TDengine. In order to help users to experience the high performance and functions of TDengine in the shortest time, we developed an application called `taosBenchmark` (was named `taosdemo`) for insertion and querying performance testing of TDengine. Then user can easily simulate the scenario of a large number of devices generating a very large amount of data. User can easily manipulate the number of tables, columns, data types, disorder ratio, and number of concurrent threads with taosBenchmark customized parameters.
Running taosBenchmark is very simple. Just download the [TDengine installation package](https://www.taosdata.com/cn/all-downloads/) or compiling the [TDengine code](https://github.com/taosdata/TDengine). It can be found and run in the installation directory or in the compiled results directory.
@@ -160,7 +160,7 @@ The complete list of taosBenchmark command-line arguments can be displayed via t
```
$ taosBenchmark --help
--f, --file=FILE The meta file to the execution procedure. Currently, we support standard UTF-8 (without BOM) encoded files only.
+-f, --file=FILE The JSON configuration file to the execution procedure. Currently, we support standard UTF-8 (without BOM) encoded files only.
-u, --user=USER The user name to use when connecting to the server.
-p, --password The password to use when connecting to the server.
-c, --config-dir=CONFIG_DIR Configuration directory.
@@ -170,7 +170,7 @@ $ taosBenchmark --help
-d, --database=DATABASE Destination database. By default is 'test'.
-a, --replica=REPLICA Set the replica parameters of the database, By default use 1, min: 1, max: 3.
-m, --table-prefix=TABLEPREFIX Table prefix name. By default use 'd'.
--s, --sql-file=FILE The select sql file.
+-s, --sql-file=FILE The select SQL file.
-N, --normal-table Use normal table flag.
-o, --output=FILE Direct output to the named file. By default use './output.txt'.
-q, --query-mode=MODE Query mode -- 0: SYNC, 1: ASYNC. By default use SYNC.
@@ -346,6 +346,7 @@ In addition to the command line approach, taosBenchmark also supports take a JSO
"start_timestamp": "2020-10-01 00:00:00.000",
"sample_format": "csv",
"sample_file": "./sample.csv",
+ "use_sample_ts": "no",
"tags_file": "",
"columns": [{"type": "INT"}, {"type": "DOUBLE", "count":10}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}],
"tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}]
@@ -354,7 +355,9 @@ In addition to the command line approach, taosBenchmark also supports take a JSO
}
```
-For example, we can specify different number of threads for table creation and data insertion with "thread_count" and "thread_count_create_tbl". You can use a combination of "child_table_exists", "childtable_limit" and "childtable_offset" to use multiple taosBenchmark processes (even on different computers) to write to different ranges of child tables of the same super table at the same time. You can also import existing data by specifying the data source as a csv file with "data_source" and "sample_file".
+For example, we can specify different number of threads for table creation and data insertion with `thread_count` and `thread_count_create_tbl`. You can use a combination of `child_table_exists`, `childtable_limit` and `childtable_offset` to use multiple taosBenchmark processes (even on different computers) to write to different ranges of child tables of the same super table at the same time. You can also import existing data by specifying the data source as a CSV file with `data_source` and `sample_file`. The argument `use_sample_ts` indicate whether the first column, timestamp in TDengine would use the data of the specified CSV file too.
+
+CSV file is a plain text format and use comma signs as separators between two columns. The number of columns must is same as the number of columns or tags of the table you intend to insert.
# Use taosBenchmark for query and subscription testing
@@ -410,7 +413,7 @@ The following parameters are specific to the query in the JSON file.
"specified_table_query": { query for the specified table
"query_interval": interval to execute sqls, in seconds. Optional, default is 0.
"concurrent": the number of threads to execute sqls concurrently, optional, default is 1. Each thread executes all sqls.
-"sqls": multiple sql statements can be added, support up to 100 statements.
+"sqls": multiple SQL statements can be added, support up to 100 statements.
"sql": query statement. Mandatory.
"result": the name of the file where the query result will be written. Optional, default is null, means the query result will not be written to the file.
"super_table_query": { query for all sub-tables in the super table
@@ -470,7 +473,7 @@ The following are the meanings of the parameters specific to the subscription fu
"restart": subscription restart." yes": restart the subscription if it already exists, "no": continue the previous subscription. (Please note that the executing user needs to have read/write access to the dataDir directory)
"keepProgress": keep the progress of the subscription information. yes means keep the subscription information, no means don't keep it. The value is yes and restart is no to continue the previous subscriptions.
"resubAfterConsume": Used in conjunction with keepProgress to call unsubscribe after the subscription has been consumed the appropriate number of times and to subscribe again.
-"result": the name of the file to which the query result is written. Optional, default is null, means the query result will not be written to the file. Note: The file to save the result after each sql statement cannot be renamed, and the file name will be appended with the thread number when generating the result file.
+"result": the name of the file to which the query result is written. Optional, default is null, means the query result will not be written to the file. Note: The file to save the result after each SQL statement cannot be renamed, and the file name will be appended with the thread number when generating the result file.
```
# Conclusion
diff --git a/documentation20/en/02.getting-started/docs.md b/documentation20/en/02.getting-started/docs.md
index db538f45f709442d4fbf1691d8be80e15595ee41..5608a46489a9af9814bb016071f3eb3cf9ea6e98 100644
--- a/documentation20/en/02.getting-started/docs.md
+++ b/documentation20/en/02.getting-started/docs.md
@@ -35,7 +35,7 @@ wget -qO - http://repos.taosdata.com/tdengine.key | sudo apt-key add -
echo "deb [arch=amd64] http://repos.taosdata.com/tdengine-stable stable main" | sudo tee /etc/apt/sources.list.d/tdengine-stable.list
[Optional] echo "deb [arch=amd64] http://repos.taosdata.com/tdengine-beta beta main" | sudo tee /etc/apt/sources.list.d/tdengine-beta.list
sudo apt-get update
-apt-get policy tdengine
+apt-cache policy tdengine
sudo apt-get install tdengine
```
diff --git a/documentation20/en/05.insert/docs.md b/documentation20/en/05.insert/docs.md
index 76be97d4f2cfefa4eaecdbc8c1f7a125e5d3cd54..942a1c26d66a0c25717e4b2bd58461abc705bf6c 100644
--- a/documentation20/en/05.insert/docs.md
+++ b/documentation20/en/05.insert/docs.md
@@ -4,7 +4,7 @@ TDengine supports multiple ways to write data, including SQL, Prometheus, Telegr
## Data Writing via SQL
-Applications insert data by executing SQL insert statements through C/C++, Java, Go, C#, Python, Node.js Connectors, and users can manually enter SQL insert statements to insert data through TAOS Shell. For example, the following insert writes a record to table d1001:
+Applications insert data by executing SQL insert statements through C/C++, Java, Go, C#, Python, Node.js connectors, and users can manually enter SQL insert statements to insert data through TAOS Shell. For example, the following insert writes a record to table d1001:
```mysql
INSERT INTO d1001 VALUES (1538548685000, 10.3, 219, 0.31);
@@ -36,7 +36,7 @@ For the SQL INSERT Grammar, please refer to [Taos SQL insert](https://www.taosd
**Introduction**
In many IoT applications, data collection is often used in intelligent control, business analysis and device monitoring etc. As fast application upgrade and iteration, or hardware adjustment, data collection metrics can change rapidly over time. To provide solutions to such use cases, from version 2.2.0.0, TDengine supports writing data via Schemaless. When using Schemaless, action of pre-creating table before inserting data is no longer needed anymore. Tables, data columns and tags can be created automatically. Schemaless can also add additional data columns to tables if necessary, to make sure data can be properly stored into TDengine.
- TDengine C/C++ Connector provides Schemaless API. Please see [Schemaless data writing API](https://www.taosdata.com/en/documentation/connector#schemaless) for detailed data writing format.
+ TDengine's all official connectors provide Schemaless API now. Please see [Schemaless data writing API](https://www.taosdata.com/en/documentation/connector#schemaless) for detailed data writing format.
Super table and corresponding child tables created via Schemaless are identical to the ones created via SQL, so inserting data into these tables via SQL is also supported. Note that child table names are generated via Schemaless are following special rules through tags mapping. Therefore, child table names are usually not meaningful in terms of readability.
**Schemaless writing protocols**
diff --git a/documentation20/en/08.connector/docs.md b/documentation20/en/08.connector/docs.md
index c74dc7fb0bd2252a9d517b5bde8fdd794a5c158e..fccd0c70361f804189b8f44072ba78c9f8626bbc 100644
--- a/documentation20/en/08.connector/docs.md
+++ b/documentation20/en/08.connector/docs.md
@@ -22,7 +22,7 @@ Note: ● stands for that has been verified by official tests; ○ stands for th
Note:
- To access the TDengine database through connectors (except RESTful) in the system without TDengine server software, it is necessary to install the corresponding version of the client installation package to make the application driver (the file name is libtaos.so in Linux system and taos.dll in Windows system) installed in the system, otherwise, the error that the corresponding library file cannot be found will occur.
-- All APIs that execute SQL statements, such as `tao_query()`, `taos_query_a()`, `taos_subscribe()` in C/C++ Connector, and APIs corresponding to them in other languages, can only execute one SQL statement at a time. If the actual parameters contain multiple statements, their behavior is undefined.
+- All APIs that execute SQL statements, such as `taos_query()`, `taos_query_a()`, `taos_subscribe()` in C/C++ connector, and APIs corresponding to them in other languages, can only execute one SQL statement at a time. If the actual parameters contain multiple statements, their behavior is undefined.
- Users upgrading to TDengine 2.0. 8.0 must update the JDBC connection. TDengine must upgrade taos-jdbcdriver to 2.0.12 and above.
- No matter which programming language connector is selected, TDengine version 2.0 and above recommends that each thread of database application establish an independent connection or establish a connection pool based on threads to avoid mutual interference between threads of "USE statement" state variables in the connection (but query and write operations of the connection are thread-safe).
@@ -347,6 +347,108 @@ Gets the result set of the statement. The result set is used in the same way as
Execution completed, release all resources.
+- `char * taos_stmt_errstr(TAOS_STMT *stmt)`
+
+Gets the error message if any stmt API returns error.
+
+
+### Schemaless writing API
+
+In addition to writing data using SQL or using the parameter binding API, writing can also be done using Schemaless, which eliminates the need to create a super table/data sub-table data structure in advance and writes data directly, while the TDengine system automatically creates and maintains the required table structure based on the written data content. The use of Schemaless is described in the Schemaless Writing section, and the C/C++ API used with it is described here.
+
+- `TAOS_RES* taos_schemaless_insert(TAOS* taos, const char* lines[], int numLines, int protocol, int precision)`
+
+ **Function Description**
+
+ This interface writes the text data of the line protocol to TDengine.
+
+ **Parameter Description**
+
+ taos: database connection, the database connection established by taos_connect function.
+
+ lines: text data. A pattern-free text string that meets the parsing format requirements.
+
+ numLines: the number of lines of the text data, cannot be 0.
+
+ protocol: the protocol type of the lines, used to identify the format of the text data.
+
+ precision: precision string of the timestamp in the text data.
+
+ **Return Value**
+
+ TAOS_RES structure, the application can get the error message by using taos_errstr and also get the error code by using taos_errno.
+
+ In some cases, the returned TAOS_RES is NULL, in which case taos_errno can still be called to safely get the error code information.
+
+ The returned TAOS_RES needs to be freed by the caller, otherwise a memory leak will occur.
+
+ **Description**
+
+ The protocol type is enumerated and contains the following three formats.
+
+ TSDB_SML_LINE_PROTOCOL: InfluxDB line protocol (Line Protocol)
+
+ TSDB_SML_TELNET_PROTOCOL: OpenTSDB Text Line Protocol
+
+ TSDB_SML_JSON_PROTOCOL: OpenTSDB JSON protocol format
+
+ The timestamp resolution is defined in the taos.h file, as follows
+
+ TSDB_SML_TIMESTAMP_NOT_CONFIGURED = 0,
+
+ TSDB_SML_TIMESTAMP_HOURS,
+
+ TSDB_SML_TIMESTAMP_MINUTES,
+
+ TSDB_SML_TIMESTAMP_SECONDS,
+
+ TSDB_SML_TIMESTAMP_MILLI_SECONDS,
+
+ TSDB_SML_TIMESTAMP_MICRO_SECONDS,
+
+ TSDB_SML_TIMESTAMP_NANO_SECONDS
+
+ Note that the timestamp resolution parameter only takes effect when the protocol type is SML_LINE_PROTOCOL.
+
+ For OpenTSDB text protocols, the timestamp resolution follows the official resolution rules - the time precision is determined by the number of characters contained in the timestamp.
+
+ **Supported versions**
+
+ This functional interface is supported since version 2.3.0.0.
+
+```c
+#include
+#include
+#include
+
+int main() {
+ const char* host = "127.0.0.1";
+ const char* user = "root";
+ const char* passwd = "taosdata";
+
+ // connect to server
+ TAOS* taos = taos_connect(host, user, passwd, "test", 0);
+
+ // prepare the line string
+ char* lines1[] = {
+ "stg,t1=3i64,t2=4f64,t3=\"t3\" c1=3i64,c3=L\"passit\",c2=false,c4=4f64 1626006833639000000",
+ "stg,t1=4i64,t3=\"t4\",t2=5f64,t4=5f64 c1=3i64,c3=L\"passitagin\",c2=true,c4=5f64,c5=5f64 1626006833641000000"
+ };
+
+ // schema-less insert
+ TAOS_RES* res = taos_schemaless_insert(taos, lines1, 2, TSDB_SML_LINE_PROTOCOL, TSDB_SML_TIMESTAMP_NANO_SECONDS);
+ if (taos_errno(res) != 0) {
+ printf("failed to insert schema-less data, reason: %s\n", taos_errstr(res));
+ }
+
+ taos_free_result(res);
+
+ // close the connection
+ taos_close(taos);
+ return (code);
+}
+```
+
### Continuous query interface
TDengine provides time-driven real-time stream computing APIs. You can perform various real-time aggregation calculation operations on tables (data streams) of one or more databases at regular intervals. The operation is simple, only APIs for opening and closing streams. The details are as follows:
diff --git a/src/client/src/tscLocal.c b/src/client/src/tscLocal.c
index b6c8b3de3f9b1b0d84f9b1ea5eecc496cf58ba0c..f648baa9744cf00545e3f96b736661dce6e958e0 100644
--- a/src/client/src/tscLocal.c
+++ b/src/client/src/tscLocal.c
@@ -385,7 +385,7 @@ static int32_t tscGetTableTagValue(SCreateBuilder *builder, char *result) {
if (i == num_fields - 1) {
if (fields[i].type == TSDB_DATA_TYPE_JSON) {
- sprintf(result + strlen(result) - 1, "%s'", ")");
+ sprintf(result + strlen(result) - 1, "'%s", ")");
} else {
sprintf(result + strlen(result) - 1, "%s", ")");
}
diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c
index 5aec7b686f84314c8efdea09d377a5a6c948ac30..4442338a7bbc8789f883f7a427687130c50a548f 100644
--- a/src/client/src/tscSQLParser.c
+++ b/src/client/src/tscSQLParser.c
@@ -1113,7 +1113,7 @@ static int32_t addPrimaryTsColumnForTimeWindowQuery(SQueryInfo* pQueryInfo, SSql
static int32_t checkInvalidExprForTimeWindow(SSqlCmd* pCmd, SQueryInfo* pQueryInfo) {
const char* msg1 = "invalid query expression";
const char* msg2 = "top/bottom query does not support order by value in time window query";
- const char* msg3 = "unique function does not supportted in time window query";
+ const char* msg3 = "unique/state function does not supportted in time window query";
/*
* invalid sql:
@@ -1125,7 +1125,8 @@ static int32_t checkInvalidExprForTimeWindow(SSqlCmd* pCmd, SQueryInfo* pQueryIn
if (pExpr->base.functionId == TSDB_FUNC_COUNT && TSDB_COL_IS_TAG(pExpr->base.colInfo.flag)) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
- if (pExpr->base.functionId == TSDB_FUNC_UNIQUE) {
+ if (pExpr->base.functionId == TSDB_FUNC_UNIQUE || pExpr->base.functionId == TSDB_FUNC_STATE_COUNT ||
+ pExpr->base.functionId == TSDB_FUNC_STATE_DURATION) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg3);
}
}
@@ -2717,6 +2718,7 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
const char* msg28 = "the second paramter of diff should be 0 or 1";
const char* msg29 = "key timestamp column cannot be used to unique/mode/tail function";
const char* msg30 = "offset is out of range [0, 100]";
+ const char* msg31 = "state function can not be used in subquery";
switch (functionId) {
case TSDB_FUNC_COUNT: {
@@ -2815,7 +2817,9 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
case TSDB_FUNC_STDDEV:
case TSDB_FUNC_LEASTSQR:
case TSDB_FUNC_ELAPSED:
- case TSDB_FUNC_MODE: {
+ case TSDB_FUNC_MODE:
+ case TSDB_FUNC_STATE_COUNT:
+ case TSDB_FUNC_STATE_DURATION:{
// 1. valid the number of parameters
int32_t numOfParams =
(pItem->pNode->Expr.paramList == NULL) ? 0 : (int32_t)taosArrayGetSize(pItem->pNode->Expr.paramList);
@@ -2823,10 +2827,12 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
// no parameters or more than one parameter for function
if (pItem->pNode->Expr.paramList == NULL ||
(functionId != TSDB_FUNC_LEASTSQR && functionId != TSDB_FUNC_DERIVATIVE && functionId != TSDB_FUNC_ELAPSED &&
- functionId != TSDB_FUNC_DIFF && numOfParams != 1) ||
+ functionId != TSDB_FUNC_DIFF && functionId != TSDB_FUNC_STATE_COUNT && functionId != TSDB_FUNC_STATE_DURATION && numOfParams != 1) ||
((functionId == TSDB_FUNC_LEASTSQR || functionId == TSDB_FUNC_DERIVATIVE) && numOfParams != 3) ||
(functionId == TSDB_FUNC_ELAPSED && numOfParams != 1 && numOfParams != 2) ||
- (functionId == TSDB_FUNC_DIFF && numOfParams != 1 && numOfParams != 2)) {
+ (functionId == TSDB_FUNC_DIFF && numOfParams != 1 && numOfParams != 2) ||
+ (functionId == TSDB_FUNC_STATE_COUNT && numOfParams != 3) ||
+ (functionId == TSDB_FUNC_STATE_DURATION && numOfParams != 3 && numOfParams != 4)) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
@@ -2865,6 +2871,11 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
}
}
+ if ((functionId == TSDB_FUNC_STATE_COUNT || functionId == TSDB_FUNC_STATE_DURATION) &&
+ pQueryInfo->pUpstream != NULL && taosArrayGetSize(pQueryInfo->pUpstream) > 0) {
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg31);
+ }
+
STableComInfo info = tscGetTableInfo(pTableMetaInfo->pTableMeta);
// functions can not be applied to tags
@@ -2907,6 +2918,24 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
aAggs[TSDB_FUNC_TS_DUMMY].name, pExpr);
}
+ if (functionId == TSDB_FUNC_STATE_COUNT || functionId == TSDB_FUNC_STATE_DURATION) {
+ SColumnIndex indexTS = {.tableIndex = index.tableIndex, .columnIndex = 0};
+ SExprInfo* pExpr = tscExprAppend(pQueryInfo, TSDB_FUNC_PRJ, &indexTS, TSDB_DATA_TYPE_TIMESTAMP,
+ TSDB_KEYSIZE, 0, TSDB_KEYSIZE, false);
+ tstrncpy(pExpr->base.aliasName, aAggs[TSDB_FUNC_TS_DUMMY].name, sizeof(pExpr->base.aliasName));
+
+ SColumnList ids = createColumnList(1, 0, 0);
+ insertResultField(pQueryInfo, colIndex, &ids, TSDB_KEYSIZE, TSDB_DATA_TYPE_TIMESTAMP,
+ aAggs[TSDB_FUNC_TS].name, pExpr);
+
+ pExpr = tscExprAppend(pQueryInfo, TSDB_FUNC_PRJ, &index, pSchema->type,
+ pSchema->bytes, getNewResColId(pCmd), 0, false);
+ tstrncpy(pExpr->base.aliasName, pParamElem->pNode->columnName.z, pParamElem->pNode->columnName.n+1);
+ ids = createColumnList(1, index.tableIndex, index.columnIndex);
+ insertResultField(pQueryInfo, colIndex + 1, &ids, pExpr->base.resBytes, (int32_t)pExpr->base.resType,
+ pExpr->base.aliasName, pExpr);
+ }
+
SExprInfo* pExpr = tscExprAppend(pQueryInfo, functionId, &index, resultType, resultSize, getNewResColId(pCmd),
intermediateResSize, false);
@@ -2985,6 +3014,41 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
}
}
tscExprAddParams(&pExpr->base, val, TSDB_DATA_TYPE_BIGINT, sizeof(int64_t));
+ } else if (functionId == TSDB_FUNC_STATE_COUNT || functionId == TSDB_FUNC_STATE_DURATION) {
+ if (pParamElem[1].pNode->tokenId != TK_ID || !isValidStateOper(pParamElem[1].pNode->columnName.z, pParamElem[1].pNode->columnName.n)) {
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2);
+ }
+ tscExprAddParams(&pExpr->base, pParamElem[1].pNode->columnName.z, TSDB_DATA_TYPE_BINARY, pParamElem[1].pNode->columnName.n);
+
+ if (pParamElem[2].pNode->tokenId != TK_INTEGER && pParamElem[2].pNode->tokenId != TK_FLOAT) {
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2);
+ }
+ tVariantAssign(&pExpr->base.param[pExpr->base.numOfParams++], &pParamElem[2].pNode->value);
+
+ if (functionId == TSDB_FUNC_STATE_DURATION){
+ if (numOfParams == 4) {
+ // unit must be 1s 1m 1h
+ if (pParamElem[3].pNode->tokenId != TK_TIMESTAMP || (pParamElem[3].pNode->value.i64 != MILLISECOND_PER_SECOND * 1000000L &&
+ pParamElem[3].pNode->value.i64 != MILLISECOND_PER_MINUTE * 1000000L && pParamElem[3].pNode->value.i64 != MILLISECOND_PER_HOUR * 1000000L)) {
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2);
+ }
+ if (info.precision == TSDB_TIME_PRECISION_MILLI) {
+ pParamElem[3].pNode->value.i64 /= TSDB_TICK_PER_SECOND(TSDB_TIME_PRECISION_MICRO);
+ } else if (info.precision == TSDB_TIME_PRECISION_MICRO) {
+ pParamElem[3].pNode->value.i64 /= TSDB_TICK_PER_SECOND(TSDB_TIME_PRECISION_MILLI);
+ }
+
+ tVariantAssign(&pExpr->base.param[pExpr->base.numOfParams++], &pParamElem[3].pNode->value);
+ }else{
+ int64_t tmp = TSDB_TICK_PER_SECOND(TSDB_TIME_PRECISION_NANO);
+ if (info.precision == TSDB_TIME_PRECISION_MILLI) {
+ tmp /= TSDB_TICK_PER_SECOND(TSDB_TIME_PRECISION_MICRO);
+ } else if (info.precision == TSDB_TIME_PRECISION_MICRO) {
+ tmp /= TSDB_TICK_PER_SECOND(TSDB_TIME_PRECISION_MILLI);
+ }
+ tscExprAddParams(&pExpr->base, (char *)&tmp, TSDB_DATA_TYPE_BIGINT, sizeof(tmp));
+ }
+ }
}
SColumnList ids = createColumnList(1, index.tableIndex, index.columnIndex);
@@ -6760,7 +6824,7 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq
const char* msg8 = "only column in groupby clause allowed as order column";
const char* msg10 = "not support distinct mixed with order by";
const char* msg11 = "not support order with udf";
- const char* msg12 = "order by tags not supported with diff/derivative/csum/mavg";
+ const char* msg12 = "order by tags not supported with diff/derivative/csum/mavg/stateCount/stateDuration";
const char* msg13 = "order by json tag, key is too long";
const char* msg14 = "order by json tag, must be json->'key'";
@@ -7570,7 +7634,8 @@ int32_t validateFunctionsInIntervalOrGroupbyQuery(SSqlCmd* pCmd, SQueryInfo* pQu
int32_t f = pExpr->base.functionId;
if ((f == TSDB_FUNC_PRJ && pExpr->base.numOfParams == 0) ||
f == TSDB_FUNC_DIFF || f == TSDB_FUNC_SCALAR_EXPR || f == TSDB_FUNC_DERIVATIVE ||
- f == TSDB_FUNC_CSUM || f == TSDB_FUNC_MAVG)
+ f == TSDB_FUNC_CSUM || f == TSDB_FUNC_MAVG || f == TSDB_FUNC_STATE_COUNT ||
+ f == TSDB_FUNC_STATE_DURATION)
{
isProjectionFunction = true;
break;
@@ -8207,7 +8272,8 @@ static int32_t checkUpdateTagPrjFunctions(SQueryInfo* pQueryInfo, char* msg) {
int16_t functionId = pExpr->base.functionId;
if (functionId == TSDB_FUNC_TAGPRJ || functionId == TSDB_FUNC_PRJ || functionId == TSDB_FUNC_TS ||
- functionId == TSDB_FUNC_SCALAR_EXPR || functionId == TSDB_FUNC_TS_DUMMY) {
+ functionId == TSDB_FUNC_SCALAR_EXPR || functionId == TSDB_FUNC_TS_DUMMY || functionId == TSDB_FUNC_STATE_COUNT ||
+ functionId == TSDB_FUNC_STATE_DURATION) {
continue;
}
@@ -8414,12 +8480,11 @@ static int32_t doTagFunctionCheck(SQueryInfo* pQueryInfo) {
int32_t doFunctionsCompatibleCheck(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, char* msg) {
const char* msg1 = "functions/columns not allowed in group by query";
- const char* msg2 = "projection query on columns not allowed";
const char* msg3 = "group by/session/state_window not allowed on projection query";
const char* msg4 = "retrieve tags not compatible with group by or interval query";
const char* msg5 = "functions can not be mixed up";
- const char* msg6 = "TWA/Diff/Derivative/Irate/CSum/MAvg/Elapsed only support group by tbname";
- const char* msg7 = "unique function does not supportted in state window query";
+ const char* msg6 = "TWA/Diff/Derivative/Irate/CSum/MAvg/Elapsed/stateCount/stateDuration only support group by tbname";
+ const char* msg7 = "unique/state function does not supportted in state window query";
// only retrieve tags, group by is not supportted
if (tscQueryTags(pQueryInfo)) {
@@ -8452,31 +8517,11 @@ int32_t doFunctionsCompatibleCheck(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, char*
SExprInfo* pExpr = tscExprGet(pQueryInfo, i);
int32_t f = pExpr->base.functionId;
- /*
- * group by normal columns.
- * Check if the column projection is identical to the group by column or not
- */
- if (f == TSDB_FUNC_PRJ && pExpr->base.colInfo.colId != PRIMARYKEY_TIMESTAMP_COL_INDEX) {
- bool qualified = false;
- for (int32_t j = 0; j < pQueryInfo->groupbyExpr.numOfGroupCols; ++j) {
- SColIndex* pColIndex = taosArrayGet(pQueryInfo->groupbyExpr.columnInfo, j);
- if (pColIndex->colId == pExpr->base.colInfo.colId) {
- qualified = true;
- break;
- }
- }
-
- if (!qualified) {
- return invalidOperationMsg(msg, msg2);
- }
- }
-
if (f < 0) {
SUdfInfo* pUdfInfo = taosArrayGet(pQueryInfo->pUdfInfo, -1 * f - 1);
if (pUdfInfo->funcType == TSDB_UDF_TYPE_SCALAR) {
return invalidOperationMsg(msg, msg1);
}
-
continue;
}
@@ -8486,7 +8531,8 @@ int32_t doFunctionsCompatibleCheck(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, char*
}
if ((!pQueryInfo->stateWindow) && (f == TSDB_FUNC_DIFF || f == TSDB_FUNC_DERIVATIVE || f == TSDB_FUNC_TWA ||
- f == TSDB_FUNC_IRATE || f == TSDB_FUNC_CSUM || f == TSDB_FUNC_MAVG || f == TSDB_FUNC_ELAPSED)) {
+ f == TSDB_FUNC_IRATE || f == TSDB_FUNC_CSUM || f == TSDB_FUNC_MAVG || f == TSDB_FUNC_ELAPSED ||
+ f == TSDB_FUNC_STATE_COUNT || f == TSDB_FUNC_STATE_DURATION)) {
for (int32_t j = 0; j < pQueryInfo->groupbyExpr.numOfGroupCols; ++j) {
SColIndex* pColIndex = taosArrayGet(pQueryInfo->groupbyExpr.columnInfo, j);
if (j == 0) {
@@ -8499,7 +8545,7 @@ int32_t doFunctionsCompatibleCheck(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, char*
}
}
- if (pQueryInfo->stateWindow && f == TSDB_FUNC_UNIQUE){
+ if (pQueryInfo->stateWindow && (f == TSDB_FUNC_UNIQUE || f == TSDB_FUNC_STATE_COUNT || f == TSDB_FUNC_STATE_DURATION)){
return invalidOperationMsg(msg, msg7);
}
@@ -8509,11 +8555,6 @@ int32_t doFunctionsCompatibleCheck(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, char*
f != TSDB_FUNC_UNIQUE && f != TSDB_FUNC_TAIL) {
return invalidOperationMsg(msg, msg1);
}
-
-
- if (f == TSDB_FUNC_COUNT && pExpr->base.colInfo.colIndex == TSDB_TBNAME_COLUMN_INDEX) {
- return invalidOperationMsg(msg, msg1);
- }
}
if (checkUpdateTagPrjFunctions(pQueryInfo, msg) != TSDB_CODE_SUCCESS) {
@@ -8550,7 +8591,8 @@ int32_t validateFunctionFromUpstream(SQueryInfo* pQueryInfo, char* msg) {
SExprInfo* pExpr = tscExprGet(pQueryInfo, i);
int32_t f = pExpr->base.functionId;
- if (f == TSDB_FUNC_DERIVATIVE || f == TSDB_FUNC_TWA || f == TSDB_FUNC_IRATE || f == TSDB_FUNC_DIFF || f == TSDB_FUNC_ELAPSED) {
+ if (f == TSDB_FUNC_DERIVATIVE || f == TSDB_FUNC_TWA || f == TSDB_FUNC_IRATE || f == TSDB_FUNC_DIFF || f == TSDB_FUNC_ELAPSED ||
+ f == TSDB_FUNC_STATE_COUNT || f == TSDB_FUNC_STATE_DURATION) {
for (int32_t j = 0; j < upNum; ++j) {
SQueryInfo* pUp = taosArrayGetP(pQueryInfo->pUpstream, j);
STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pUp, 0);
@@ -9547,12 +9589,23 @@ int32_t getHavingExpr(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SArray* pSelectNode
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
+ pLeft->functionId = isValidFunction(pLeft->Expr.operand.z, pLeft->Expr.operand.n);
+ if (pLeft->functionId < 0) {
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1);
+ }
+
if (pLeft->Expr.paramList) {
size_t size = taosArrayGetSize(pLeft->Expr.paramList);
for (int32_t i = 0; i < size; i++) {
tSqlExprItem* pParamItem = taosArrayGet(pLeft->Expr.paramList, i);
-
tSqlExpr* pExpr1 = pParamItem->pNode;
+
+ if (pLeft->functionId == TSDB_FUNC_STATE_COUNT || pLeft->functionId == TSDB_FUNC_STATE_DURATION){
+ if (i == 1 && pExpr1->tokenId == TK_ID) continue;
+ if (pLeft->functionId == TSDB_FUNC_STATE_DURATION && i == 3 && pExpr1->tokenId == TK_TIMESTAMP)
+ continue;
+ }
+
if (pExpr1->tokenId != TK_ALL &&
pExpr1->tokenId != TK_ID &&
pExpr1->tokenId != TK_STRING &&
@@ -9582,11 +9635,6 @@ int32_t getHavingExpr(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SArray* pSelectNode
}
}
- pLeft->functionId = isValidFunction(pLeft->Expr.operand.z, pLeft->Expr.operand.n);
- if (pLeft->functionId < 0) {
- return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1);
- }
-
return handleExprInHavingClause(pCmd, pQueryInfo, pSelectNodeList, pExpr, parentOptr);
}
@@ -10110,7 +10158,7 @@ int32_t validateSqlNode(SSqlObj* pSql, SSqlNode* pSqlNode, SQueryInfo* pQueryInf
const char* msg4 = "interval query not supported, since the result of sub query not include valid timestamp column";
const char* msg5 = "only tag query not compatible with normal column filter";
const char* msg6 = "not support stddev/percentile in the outer query yet";
- const char* msg7 = "derivative/twa/rate/irate/diff/tail requires timestamp column exists in subquery";
+ const char* msg7 = "derivative/twa/rate/irate/diff/tail/stateCount/stateDuration requires timestamp column exists in subquery";
const char* msg8 = "condition missing for join query";
const char* msg9 = "not support 3 level select";
@@ -10195,7 +10243,8 @@ int32_t validateSqlNode(SSqlObj* pSql, SSqlNode* pSqlNode, SQueryInfo* pQueryInf
int32_t f = pExpr->base.functionId;
if (f == TSDB_FUNC_DERIVATIVE || f == TSDB_FUNC_TWA || f == TSDB_FUNC_IRATE ||
- f == TSDB_FUNC_RATE || f == TSDB_FUNC_DIFF || f == TSDB_FUNC_TAIL) {
+ f == TSDB_FUNC_RATE || f == TSDB_FUNC_DIFF || f == TSDB_FUNC_TAIL ||
+ f == TSDB_FUNC_STATE_COUNT || f == TSDB_FUNC_STATE_DURATION) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg7);
}
}
diff --git a/src/client/src/tscServer.c b/src/client/src/tscServer.c
index 2e4f32533f845c76ee0286cddc0dc45583d170c4..c821c25987042d0c26c4aa302a142544a08b943c 100644
--- a/src/client/src/tscServer.c
+++ b/src/client/src/tscServer.c
@@ -1184,7 +1184,7 @@ int tscBuildQueryMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
tlv->len = htonl(sizeof(int16_t) * 2);
*(int16_t*)tlv->value = htons(pTableMeta->sversion);
*(int16_t*)(tlv->value+sizeof(int16_t)) = htons(pTableMeta->tversion);
- pMsg += sizeof(*tlv) + ntohl(tlv->len);
+ pMsg += sizeof(*tlv) + sizeof(int16_t) * 2;
tlv = (STLV *)pMsg;
tlv->type = htons(TLV_TYPE_END_MARK);
diff --git a/src/client/src/tscUtil.c b/src/client/src/tscUtil.c
index ce3487cf5b412f87f9bfe5d75f8ed2516b6a77c4..91e2d0c388d0b6dd50ca999d35be7712f8bb18dd 100644
--- a/src/client/src/tscUtil.c
+++ b/src/client/src/tscUtil.c
@@ -328,7 +328,8 @@ bool tscIsProjectionQuery(SQueryInfo* pQueryInfo) {
if (f != TSDB_FUNC_PRJ && f != TSDB_FUNC_TAGPRJ && f != TSDB_FUNC_TAG &&
f != TSDB_FUNC_TS && f != TSDB_FUNC_SCALAR_EXPR && f != TSDB_FUNC_DIFF &&
- f != TSDB_FUNC_DERIVATIVE && !TSDB_FUNC_IS_SCALAR(f)) {
+ f != TSDB_FUNC_DERIVATIVE && !TSDB_FUNC_IS_SCALAR(f) &&
+ f != TSDB_FUNC_STATE_COUNT && f != TSDB_FUNC_STATE_DURATION) {
return false;
}
}
@@ -347,8 +348,8 @@ bool tscIsDiffDerivLikeQuery(SQueryInfo* pQueryInfo) {
continue;
}
- if (f == TSDB_FUNC_DIFF || f == TSDB_FUNC_DERIVATIVE ||
- f == TSDB_FUNC_CSUM || f == TSDB_FUNC_MAVG) {
+ if (f == TSDB_FUNC_DIFF || f == TSDB_FUNC_DERIVATIVE || f == TSDB_FUNC_CSUM || f == TSDB_FUNC_MAVG ||
+ f == TSDB_FUNC_STATE_COUNT || f == TSDB_FUNC_STATE_DURATION) {
return true;
}
}
@@ -356,7 +357,6 @@ bool tscIsDiffDerivLikeQuery(SQueryInfo* pQueryInfo) {
return false;
}
-
bool tscHasColumnFilter(SQueryInfo* pQueryInfo) {
// filter on primary timestamp column
if (pQueryInfo->window.skey != INT64_MIN || pQueryInfo->window.ekey != INT64_MAX) {
@@ -5094,7 +5094,6 @@ int32_t tscCreateQueryFromQueryInfo(SQueryInfo* pQueryInfo, SQueryAttr* pQueryAt
pQueryAttr->pUdfInfo = pQueryInfo->pUdfInfo;
pQueryAttr->range = pQueryInfo->range;
-
if (pQueryInfo->order.order == TSDB_ORDER_ASC) { // TODO refactor
pQueryAttr->window = pQueryInfo->window;
} else {
diff --git a/src/common/src/texpr.c b/src/common/src/texpr.c
index fc8872ea70d7bb4e1b0496bc678ed2a5bacf13c2..2774d9e2fabd11a3fcb45d650dc696518d504501 100644
--- a/src/common/src/texpr.c
+++ b/src/common/src/texpr.c
@@ -1364,7 +1364,7 @@ int32_t exprValidateTimeNode(char *msgbuf, tExprNode *pExpr) {
}
char fraction[32] = {0};
NUM_TO_STRING(child->resultType, &child->pVal->i64, sizeof(fraction), fraction);
- int32_t tsDigits = strlen(fraction);
+ int32_t tsDigits = (int32_t)strlen(fraction);
if (tsDigits > TSDB_TIME_PRECISION_SEC_DIGITS &&
tsDigits != TSDB_TIME_PRECISION_MILLI_DIGITS &&
tsDigits != TSDB_TIME_PRECISION_MICRO_DIGITS &&
@@ -1444,7 +1444,7 @@ int32_t exprValidateTimeNode(char *msgbuf, tExprNode *pExpr) {
if (child0->pVal->nType == TSDB_DATA_TYPE_BIGINT) {
char fraction[32] = {0};
NUM_TO_STRING(child0->resultType, &child0->pVal->i64, sizeof(fraction), fraction);
- int32_t tsDigits = strlen(fraction);
+ int32_t tsDigits = (int32_t)strlen(fraction);
if (tsDigits > TSDB_TIME_PRECISION_SEC_DIGITS &&
tsDigits != TSDB_TIME_PRECISION_MILLI_DIGITS &&
tsDigits != TSDB_TIME_PRECISION_MICRO_DIGITS &&
@@ -1525,7 +1525,7 @@ int32_t exprValidateTimeNode(char *msgbuf, tExprNode *pExpr) {
if (child[i]->pVal->nType == TSDB_DATA_TYPE_BIGINT) {
char fraction[32] = {0};
NUM_TO_STRING(child[i]->resultType, &child[i]->pVal->i64, sizeof(fraction), fraction);
- int32_t tsDigits = strlen(fraction);
+ int32_t tsDigits = (int32_t)strlen(fraction);
if (tsDigits > TSDB_TIME_PRECISION_SEC_DIGITS &&
tsDigits != TSDB_TIME_PRECISION_MILLI_DIGITS &&
tsDigits != TSDB_TIME_PRECISION_MICRO_DIGITS &&
@@ -2234,7 +2234,7 @@ void convertStringToTimestamp(int16_t type, char *inputData, int64_t timePrec, i
if (type == TSDB_DATA_TYPE_BINARY) {
newColData = calloc(1, charLen + 1);
memcpy(newColData, varDataVal(inputData), charLen);
- taosParseTime(newColData, timeVal, charLen, timePrec, 0);
+ taosParseTime(newColData, timeVal, charLen, (int32_t)timePrec, 0);
tfree(newColData);
} else if (type == TSDB_DATA_TYPE_NCHAR) {
newColData = calloc(1, charLen / TSDB_NCHAR_SIZE + 1);
@@ -2245,7 +2245,7 @@ void convertStringToTimestamp(int16_t type, char *inputData, int64_t timePrec, i
return;
}
newColData[len] = 0;
- taosParseTime(newColData, timeVal, len + 1, timePrec, 0);
+ taosParseTime(newColData, timeVal, len + 1, (int32_t)timePrec, 0);
tfree(newColData);
} else {
uError("input type should be binary/nchar string");
@@ -2304,7 +2304,7 @@ void vectorTimeFunc(int16_t functionId, tExprOperandInfo *pInputs, int32_t numIn
char fraction[20] = {0};
bool hasFraction = false;
NUM_TO_STRING(pInputs[0].type, inputData[0], sizeof(fraction), fraction);
- int32_t tsDigits = strlen(fraction);
+ int32_t tsDigits = (int32_t)strlen(fraction);
char buf[64] = {0};
int64_t timeVal;
@@ -2328,7 +2328,7 @@ void vectorTimeFunc(int16_t functionId, tExprOperandInfo *pInputs, int32_t numIn
int32_t len = (int32_t)strlen(buf);
if (hasFraction) {
- int32_t fracLen = strlen(fraction) + 1;
+ int32_t fracLen = (int32_t)strlen(fraction) + 1;
char *tzInfo = strchr(buf, '+');
if (tzInfo) {
memmove(tzInfo + fracLen, tzInfo, strlen(tzInfo));
@@ -2399,7 +2399,7 @@ void vectorTimeFunc(int16_t functionId, tExprOperandInfo *pInputs, int32_t numIn
char buf[20] = {0};
NUM_TO_STRING(TSDB_DATA_TYPE_BIGINT, &timeVal, sizeof(buf), buf);
- int32_t tsDigits = strlen(buf);
+ int32_t tsDigits = (int32_t)strlen(buf);
timeUnit = timeUnit * 1000 / factor;
switch (timeUnit) {
case 0: { /* 1u */
@@ -2572,7 +2572,7 @@ void vectorTimeFunc(int16_t functionId, tExprOperandInfo *pInputs, int32_t numIn
}
char buf[20] = {0};
NUM_TO_STRING(TSDB_DATA_TYPE_BIGINT, &timeVal[j], sizeof(buf), buf);
- int32_t tsDigits = strlen(buf);
+ int32_t tsDigits = (int32_t)strlen(buf);
if (tsDigits <= TSDB_TIME_PRECISION_SEC_DIGITS) {
timeVal[j] = timeVal[j] * 1000000000;
} else if (tsDigits == TSDB_TIME_PRECISION_MILLI_DIGITS) {
diff --git a/src/connector/C#/examples/Main.cs b/src/connector/C#/examples/Main.cs
index 9d2ab85a87a541fbd891cf318f454d5d8ba001fd..dbf29fc17675e9f18633ab2e997dce3138a33800 100644
--- a/src/connector/C#/examples/Main.cs
+++ b/src/connector/C#/examples/Main.cs
@@ -14,12 +14,15 @@ namespace AsyncQueryExample
IntPtr conn = UtilsTools.TDConnection();
AsyncQuerySample asyncQuery = new AsyncQuerySample();
- asyncQuery.RunQueryAsync(conn,"query_async");
-
- SubscribeSample subscribeSample = new SubscribeSample();
+ asyncQuery.RunQueryAsync(conn, "query_async");
+
+ SubscribeSample subscribeSample = new SubscribeSample();
subscribeSample.RunSubscribeWithCallback(conn, "subscribe_with_callback");
subscribeSample.RunSubscribeWithoutCallback(conn, "subscribe_without_callback");
+ StreamSample streamSample = new StreamSample();
+ streamSample.RunStreamOption1(conn, "stream_sample_option1");
+
UtilsTools.CloseConnection(conn);
}
}
diff --git a/src/connector/C#/examples/StreamSample.cs b/src/connector/C#/examples/StreamSample.cs
new file mode 100644
index 0000000000000000000000000000000000000000..e90a82c4e3679f41004fa8295783e72fdf6fe643
--- /dev/null
+++ b/src/connector/C#/examples/StreamSample.cs
@@ -0,0 +1,107 @@
+using System;
+using TDengineDriver;
+using Sample.UtilsTools;
+using System.Runtime.InteropServices;
+using System.Threading;
+using System.Collections.Generic;
+using System.Text;
+
+namespace Example
+{
+ public class StreamSample
+ {
+
+ public void RunStreamOption1(IntPtr conn, string table)
+ {
+
+ PrepareData(conn, table);
+ StreamOpenCallback streamOpenCallback = new StreamOpenCallback(StreamCallback);
+ IntPtr stream = TDengine.OpenStream(conn, $"select count(*) from {table} interval(1m) sliding(30s)", streamOpenCallback, 0, IntPtr.Zero, null);
+ if (stream == IntPtr.Zero)
+ {
+ throw new Exception("OPenStream failed");
+ }
+ else
+ {
+ Thread.Sleep(100000);
+ AddNewData(conn, table, 5,true);
+ Thread.Sleep(100000);
+
+ TDengine.CloseStream(stream);
+ Console.WriteLine("stream done");
+
+ }
+ }
+
+
+ public void StreamCallback(IntPtr param, IntPtr taosRes, IntPtr taosRow)
+ {
+
+ if (taosRes == IntPtr.Zero || taosRow == IntPtr.Zero)
+ {
+ return;
+ }
+ else
+ {
+ var rowData = new List