diff --git a/Jenkinsfile b/Jenkinsfile index 6109e4811a34fb159e6b17c3bf7c5af3c884ea3d..4a584fbb35973cc1da82c235f0f47bebe3c50ca2 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -234,11 +234,19 @@ pipeline { cd ${WKC}/tests/examples/nodejs npm install td2.0-connector > /dev/null 2>&1 node nodejsChecker.js host=localhost + node test1970.js + cd ${WKC}/tests/connectorTest/nodejsTest/nanosupport + npm install td2.0-connector > /dev/null 2>&1 + node nanosecondTest.js + ''' sh ''' cd ${WKC}/tests/examples/C#/taosdemo mcs -out:taosdemo *.cs > /dev/null 2>&1 - echo '' |./taosdemo + echo '' |./taosdemo -c /etc/taos + cd ${WKC}/tests/connectorTest/C#Test/nanosupport + mcs -out:nano *.cs > /dev/null 2>&1 + echo '' |./nano ''' sh ''' cd ${WKC}/tests/gotest @@ -256,13 +264,11 @@ pipeline { steps { pre_test() - catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') { - timeout(time: 60, unit: 'MINUTES'){ - sh ''' - cd ${WKC}/tests/pytest - ./crash_gen.sh -a -p -t 4 -s 2000 - ''' - } + timeout(time: 60, unit: 'MINUTES'){ + sh ''' + cd ${WKC}/tests/pytest + ./crash_gen.sh -a -p -t 4 -s 2000 + ''' } timeout(time: 60, unit: 'MINUTES'){ // sh ''' @@ -453,4 +459,4 @@ pipeline { ) } } -} \ No newline at end of file +} diff --git a/cmake/define.inc b/cmake/define.inc index 9ee09c86b066afb9edb56d0cd97f671b0bb4df6a..337a143e1f129d433f12d6772e9ed9c43d57c423 100755 --- a/cmake/define.inc +++ b/cmake/define.inc @@ -133,8 +133,10 @@ IF (TD_LINUX) IF (TD_MEMORY_SANITIZER) SET(DEBUG_FLAGS "-fsanitize=address -fsanitize=undefined -fno-sanitize-recover=all -fsanitize=float-divide-by-zero -fsanitize=float-cast-overflow -fno-sanitize=null -fno-sanitize=alignment -static-libasan -O0 -g3 -DDEBUG") + MESSAGE(STATUS "memory sanitizer detected as true") ELSE () SET(DEBUG_FLAGS "-O0 -g3 -DDEBUG") + MESSAGE(STATUS "memory sanitizer detected as false") ENDIF () SET(RELEASE_FLAGS "-O3 -Wno-error") diff --git a/cmake/version.inc b/cmake/version.inc index 261e3e8162f1bae6498f6edc09f23cb243e1f5c2..dfeb26454f9b6278132c3a92640a6aa8611456da 100755 --- a/cmake/version.inc +++ b/cmake/version.inc @@ -86,7 +86,7 @@ ENDIF () MESSAGE(STATUS "============= compile version parameter information start ============= ") MESSAGE(STATUS "ver number:" ${TD_VER_NUMBER}) MESSAGE(STATUS "compatible ver number:" ${TD_VER_COMPATIBLE}) -MESSAGE(STATUS "communit commit id:" ${TD_VER_GIT}) +MESSAGE(STATUS "community commit id:" ${TD_VER_GIT}) MESSAGE(STATUS "internal commit id:" ${TD_VER_GIT_INTERNAL}) MESSAGE(STATUS "build date:" ${TD_VER_DATE}) MESSAGE(STATUS "ver type:" ${TD_VER_VERTYPE}) diff --git a/documentation20/cn/00.index/docs.md b/documentation20/cn/00.index/docs.md index 18bdc15d30430516c3ae6c847fc448477003dd66..eb136715522e34be4a70fe569ca653faf538f4ee 100644 --- a/documentation20/cn/00.index/docs.md +++ b/documentation20/cn/00.index/docs.md @@ -44,13 +44,15 @@ TDengine是一个高效的存储、查询、分析时序大数据的平台,专 * [SQL函数](/taos-sql#functions):支持各种聚合函数、选择函数、计算函数,如avg, min, diff等 * [窗口切分聚合](/taos-sql#aggregation):将表中数据按照时间段等方式进行切割后聚合,降维处理 * [边界限制](/taos-sql#limitation):库、表、SQL等边界限制条件 +* [UDF](/taos-sql/udf):用户定义函数的创建和管理方法 * [错误码](/taos-sql/error-code):TDengine 2.0 错误码以及对应的十进制码 ## [高效写入数据](/insert) -* [SQL写入](/insert#sql):使用SQL insert命令向一张或多张表写入单条或多条记录 -* [Prometheus写入](/insert#prometheus):配置Prometheus, 不用任何代码,将数据直接写入 -* [Telegraf写入](/insert#telegraf):配置Telegraf, 不用任何代码,将采集数据直接写入 +* [SQL 写入](/insert#sql):使用SQL insert命令向一张或多张表写入单条或多条记录 +* [Schemaless 写入](/insert#schemaless):免于预先建表,将数据直接写入时自动维护元数据结构 +* [Prometheus 写入](/insert#prometheus):配置Prometheus, 不用任何代码,将数据直接写入 +* [Telegraf 写入](/insert#telegraf):配置Telegraf, 不用任何代码,将采集数据直接写入 * [EMQ X Broker](/insert#emq):配置EMQ X,不用任何代码,就可将MQTT数据直接写入 * [HiveMQ Broker](/insert#hivemq):配置HiveMQ,不用任何代码,就可将MQTT数据直接写入 diff --git a/documentation20/cn/02.getting-started/docs.md b/documentation20/cn/02.getting-started/docs.md index dd7c20fe186270f1130c3916512a25df3d131169..a37afa9212911f4e48efe5e923607f3f2e05422a 100644 --- a/documentation20/cn/02.getting-started/docs.md +++ b/documentation20/cn/02.getting-started/docs.md @@ -208,7 +208,7 @@ taos> select avg(current), max(voltage), min(phase) from test.d10 interval(10s); | **C#** | ● | ● | ○ | ○ | ○ | ○ | ○ | -- | -- | | **RESTful** | ● | ● | ● | ● | ● | ● | ● | ● | ● | -注: ● 表示经过官方测试验证, ○ 表示非官方测试验证。 +注:● 表示官方测试验证通过,○ 表示非官方测试验证通过,-- 表示未经验证。 请跳转到 [连接器](https://www.taosdata.com/cn/documentation/connector) 查看更详细的信息。 diff --git a/documentation20/cn/03.architecture/docs.md b/documentation20/cn/03.architecture/docs.md index 8adafc73c21bc915a4564ccf530441bf33a16bda..3e9877b4465eac2ca05d99c88a620a0c6bf89689 100644 --- a/documentation20/cn/03.architecture/docs.md +++ b/documentation20/cn/03.architecture/docs.md @@ -250,7 +250,7 @@ vnode(虚拟数据节点)负责为采集的时序数据提供写入、查询和 创建DB时,系统并不会马上分配资源。但当创建一张表时,系统将看是否有已经分配的vnode, 且该vnode是否有空余的表空间,如果有,立即在该有空位的vnode创建表。如果没有,系统将从集群中,根据当前的负载情况,在一个dnode上创建一新的vnode, 然后创建表。如果DB有多个副本,系统不是只创建一个vnode,而是一个vgroup(虚拟数据节点组)。系统对vnode的数目没有任何限制,仅仅受限于物理节点本身的计算和存储资源。 -每张表的meda data(包含schema, 标签等)也存放于vnode里,而不是集中存放于mnode,实际上这是对Meta数据的分片,这样便于高效并行的进行标签过滤操作。 +每张表的meta data(包含schema, 标签等)也存放于vnode里,而不是集中存放于mnode,实际上这是对Meta数据的分片,这样便于高效并行的进行标签过滤操作。 ### 数据分区 diff --git a/documentation20/cn/04.model/docs.md b/documentation20/cn/04.model/docs.md index 45a4537d9b2c3a579e07978d5822df24bdb895f6..586997373726c835c0fcdb6d80820b534f21d758 100644 --- a/documentation20/cn/04.model/docs.md +++ b/documentation20/cn/04.model/docs.md @@ -43,7 +43,7 @@ CREATE STABLE meters (ts timestamp, current float, voltage int, phase float) TAG 每一种类型的数据采集点需要建立一个超级表,因此一个物联网系统,往往会有多个超级表。对于电网,我们就需要对智能电表、变压器、母线、开关等都建立一个超级表。在物联网中,一个设备就可能有多个数据采集点(比如一台风力发电的风机,有的采集点采集电流、电压等电参数,有的采集点采集温度、湿度、风向等环境参数),这个时候,对这一类型的设备,需要建立多张超级表。一张超级表里包含的采集物理量必须是同时采集的(时间戳是一致的)。 -一张超级表最多容许1024列,如果一个采集点采集的物理量个数超过1024,需要建多张超级表来处理。一个系统可以有多个DB,一个DB里可以有一到多个超级表。 +一张超级表最多容许 1024 列,如果一个采集点采集的物理量个数超过 1024,需要建多张超级表来处理。一个系统可以有多个 DB,一个 DB 里可以有一到多个超级表。(从 2.1.7.0 版本开始,列数限制由 1024 列放宽到了 4096 列。) ## 创建表 diff --git a/documentation20/cn/05.insert/docs.md b/documentation20/cn/05.insert/docs.md index f055b0c25ba4811336084d6a2a58d6752b9db1e5..556d51759cb126f3b49b032b6efeb7e9924f864c 100644 --- a/documentation20/cn/05.insert/docs.md +++ b/documentation20/cn/05.insert/docs.md @@ -2,7 +2,7 @@ TDengine支持多种接口写入数据,包括SQL, Prometheus, Telegraf, EMQ MQTT Broker, HiveMQ Broker, CSV文件等,后续还将提供Kafka, OPC等接口。数据可以单条插入,也可以批量插入,可以插入一个数据采集点的数据,也可以同时插入多个数据采集点的数据。支持多线程插入,支持时间乱序数据插入,也支持历史数据插入。 -## SQL写入 +## SQL 写入 应用通过C/C++、JDBC、GO、C#或Python Connector 执行SQL insert语句来插入数据,用户还可以通过TAOS Shell,手动输入SQL insert语句插入数据。比如下面这条insert 就将一条记录写入到表d1001中: ```mysql @@ -27,11 +27,74 @@ INSERT INTO d1001 VALUES (1538548685000, 10.3, 219, 0.31) (1538548695000, 12.6, - 对同一张表,如果新插入记录的时间戳已经存在,默认情形下(UPDATE=0)新记录将被直接抛弃,也就是说,在一张表里,时间戳必须是唯一的。如果应用自动生成记录,很有可能生成的时间戳是一样的,这样,成功插入的记录条数会小于应用插入的记录条数。如果在创建数据库时使用了 UPDATE 1 选项,插入相同时间戳的新记录将覆盖原有记录。 - 写入的数据的时间戳必须大于当前时间减去配置参数keep的时间。如果keep配置为3650天,那么无法写入比3650天还早的数据。写入数据的时间戳也不能大于当前时间加配置参数days。如果days为2,那么无法写入比当前时间还晚2天的数据。 -## Prometheus直接写入 +## Schemaless 写入 + +在物联网应用中,常会采集比较多的数据项,用于实现智能控制、业务分析、设备监控等。由于应用逻辑的版本升级,或者设备自身的硬件调整等原因,数据采集项就有可能比较频繁地出现变动。为了在这种情况下方便地完成数据记录工作,TDengine 从 2.2.0.0 版本开始,提供 Schemaless 写入方式,可以免于预先创建超级表/数据子表,而是随着数据写入,自动创建与数据对应的存储结构。并且在必要时,Schemaless 将自动增加必要的数据列,保证用户写入的数据可以被正确存储。目前,TDengine 的 C/C++ Connector 提供支持 Schemaless 的操作接口,详情请参见 [Schemaless 方式写入接口](https://www.taosdata.com/cn/documentation/connector#schemaless) 章节。这里对 Schemaless 的数据表达格式进行描述。 + +### Schemaless 数据行协议 + +Schemaless 采用一个字符串来表达最终存储的一个数据行(可以向 Schemaless 写入 API 中一次传入多个字符串来实现多个数据行的批量写入),其格式约定如下: +```json +measurement,tag_set field_set timestamp +``` + +其中, +* measurement 将作为数据表名。它与 tag_set 之间使用一个英文逗号来分隔。 +* tag_set 将作为标签数据,其格式形如 `=,=`,也即可以使用英文逗号来分隔多个标签数据。它与 field_set 之间使用一个半角空格来分隔。 +* field_set 将作为普通列数据,其格式形如 `=,=`,同样是使用英文逗号来分隔多个普通列的数据。它与 timestamp 之间使用一个半角空格来分隔。 +* timestamp 即本行数据对应的主键时间戳。 + +在 Schemaless 的数据行协议中,tag_set、field_set 中的每个数据项都需要对自身的数据类型进行描述。具体来说: +* 如果两边有英文双引号,表示 BIANRY(32) 类型。例如 `"abc"`。 +* 如果两边有英文双引号而且带有 L 前缀,表示 NCHAR(32) 类型。例如 `L"报错信息"`。 +* 对空格、等号(=)、逗号(,)、双引号("),前面需要使用反斜杠(\)进行转义。(都指的是英文半角符号) +* 数值类型将通过后缀来区分数据类型: + - 没有后缀,为 FLOAT 类型; + - 后缀为 f32,为 FLOAT 类型; + - 后缀为 f64,为 DOUBLE 类型; + - 后缀为 i8,表示为 TINYINT (INT8) 类型; + - 后缀为 i16,表示为 SMALLINT (INT16) 类型; + - 后缀为 i32,表示为 INT (INT32) 类型; + - 后缀为 i64,表示为 BIGINT (INT64) 类型; +* t, T, true, True, TRUE, f, F, false, False 将直接作为 BOOL 型来处理。 + +timestamp 位置的时间戳通过后缀来声明时间精度,具体如下: +* 不带任何后缀的长整数会被当作微秒来处理; +* 当后缀为 s 时,表示秒时间戳; +* 当后缀为 ms 时,表示毫秒时间戳; +* 当后缀为 us 时,表示微秒时间戳; +* 当后缀为 ns 时,表示纳秒时间戳; +* 当时间戳为 0 时,表示采用客户端的当前时间(因此,同一批提交的数据中,时间戳 0 会被解释为同一个时间点,于是就有可能导致时间戳重复)。 + +例如,如下 Schemaless 数据行表示:向名为 st 的超级表下的 t1 标签为 3(BIGINT 类型)、t2 标签为 4(DOUBLE 类型)、t3 标签为 "t3"(BINARY 类型)的数据子表,写入 c1 列为 3(BIGINT 类型)、c2 列为 false(BOOL 类型)、c3 列为 "passit"(NCHAR 类型)、c4 列为 4(DOUBLE 类型)、主键时间戳为 1626006833639000000(纳秒精度)的一行数据。 +```json +st,t1=3i64,t2=4f64,t3="t3" c1=3i64,c3=L"passit",c2=false,c4=4f64 1626006833639000000ns +``` + +需要注意的是,如果描述数据类型后缀时使用了错误的大小写,或者为数据指定的数据类型有误,均可能引发报错提示而导致数据写入失败。 + +### Schemaless 的处理逻辑 + +Schemaless 按照如下原则来处理行数据: +1. 当 tag_set 中有 ID 字段时,该字段的值将作为数据子表的表名。 +2. 没有 ID 字段时,将使用 `measurement + tag_value1 + tag_value2 + ...` 的 md5 值来作为子表名。 +3. 如果指定的超级表名不存在,则 Schemaless 会创建这个超级表。 +4. 如果指定的数据子表不存在,则 Schemaless 会按照步骤 1 或 2 确定的子表名来创建子表。 +5. 如果数据行中指定的标签列或普通列不存在,则 Schemaless 会在超级表中增加对应的标签列或普通列(只增不减)。 +6. 如果超级表中存在一些标签列或普通列未在一个数据行中被指定取值,那么这些列的值在这一行中会被置为 NULL。 +7. 对 BINARY 或 NCHAR 列,如果数据行中所提供值的长度超出了列类型的限制,那么 Schemaless 会增加该列允许存储的字符长度上限(只增不减),以保证数据的完整保存。 +8. 如果指定的数据子表已经存在,而且本次指定的标签列取值跟已保存的值不一样,那么最新的数据行中的值会覆盖旧的标签列取值。 +9. 整个处理过程中遇到的错误会中断写入过程,并返回错误代码。 + +**注意:**Schemaless 所有的处理逻辑,仍会遵循 TDengine 对数据结构的底层限制,例如每行数据的总长度不能超过 16k 字节。这方面的具体限制约束请参见 [TAOS SQL 边界限制](https://www.taosdata.com/cn/documentation/taos-sql#limitation) 章节。 + +关于 Schemaless 的字符串编码处理、时区设置等,均会沿用 TAOSC 客户端的设置。 + +## Prometheus 直接写入 [Prometheus](https://www.prometheus.io/)作为Cloud Native Computing Fundation毕业的项目,在性能监控以及K8S性能监控领域有着非常广泛的应用。TDengine提供一个小工具[Bailongma](https://github.com/taosdata/Bailongma),只需对Prometheus做简单配置,无需任何代码,就可将Prometheus采集的数据直接写入TDengine,并按规则在TDengine自动创建库和相关表项。博文[用Docker容器快速搭建一个Devops监控Demo](https://www.taosdata.com/blog/2020/02/03/1189.html)即是采用Bailongma将Prometheus和Telegraf的数据写入TDengine中的示例,可以参考。 -### 从源代码编译blm_prometheus +### 从源代码编译 blm_prometheus 用户需要从github下载[Bailongma](https://github.com/taosdata/Bailongma)的源码,使用Golang语言编译器编译生成可执行文件。在开始编译前,需要准备好以下条件: - Linux操作系统的服务器 @@ -46,11 +109,11 @@ go build 一切正常的情况下,就会在对应的目录下生成一个blm_prometheus的可执行程序。 -### 安装Prometheus +### 安装 Prometheus 通过Prometheus的官网下载安装。具体请见:[下载地址](https://prometheus.io/download/)。 -### 配置Prometheus +### 配置 Prometheus 参考Prometheus的[配置文档](https://prometheus.io/docs/prometheus/latest/configuration/configuration/),在Prometheus的配置文件中的部分,增加以下配置: @@ -60,7 +123,8 @@ go build 启动Prometheus后,可以通过taos客户端查询确认数据是否成功写入。 -### 启动blm_prometheus程序 +### 启动 blm_prometheus 程序 + blm_prometheus程序有以下选项,在启动blm_prometheus程序时可以通过设定这些选项来设定blm_prometheus的配置。 ```bash --tdengine-name @@ -94,7 +158,8 @@ remote_write: - url: "http://10.1.2.3:8088/receive" ``` -### 查询prometheus写入数据 +### 查询 prometheus 写入数据 + prometheus产生的数据格式如下: ```json { @@ -105,10 +170,10 @@ prometheus产生的数据格式如下: instance="192.168.99.116:8443", job="kubernetes-apiservers", le="125000", - resource="persistentvolumes", s - cope="cluster", + resource="persistentvolumes", + scope="cluster", verb="LIST", - version=“v1" + version="v1" } } ``` @@ -118,11 +183,11 @@ use prometheus; select * from apiserver_request_latencies_bucket; ``` -## Telegraf直接写入 +## Telegraf 直接写入 [Telegraf](https://www.influxdata.com/time-series-platform/telegraf/)是一流行的IT运维数据采集开源工具,TDengine提供一个小工具[Bailongma](https://github.com/taosdata/Bailongma),只需在Telegraf做简单配置,无需任何代码,就可将Telegraf采集的数据直接写入TDengine,并按规则在TDengine自动创建库和相关表项。博文[用Docker容器快速搭建一个Devops监控Demo](https://www.taosdata.com/blog/2020/02/03/1189.html)即是采用bailongma将Prometheus和Telegraf的数据写入TDengine中的示例,可以参考。 -### 从源代码编译blm_telegraf +### 从源代码编译 blm_telegraf 用户需要从github下载[Bailongma](https://github.com/taosdata/Bailongma)的源码,使用Golang语言编译器编译生成可执行文件。在开始编译前,需要准备好以下条件: @@ -139,11 +204,11 @@ go build 一切正常的情况下,就会在对应的目录下生成一个blm_telegraf的可执行程序。 -### 安装Telegraf +### 安装 Telegraf 目前TDengine支持Telegraf 1.7.4以上的版本。用户可以根据当前的操作系统,到Telegraf官网下载安装包,并执行安装。下载地址如下:https://portal.influxdata.com/downloads 。 -### 配置Telegraf +### 配置 Telegraf 修改Telegraf配置文件/etc/telegraf/telegraf.conf中与TDengine有关的配置项。 @@ -160,7 +225,8 @@ go build 关于如何使用Telegraf采集数据以及更多有关使用Telegraf的信息,请参考Telegraf官方的[文档](https://docs.influxdata.com/telegraf/v1.11/)。 -### 启动blm_telegraf程序 +### 启动 blm_telegraf 程序 + blm_telegraf程序有以下选项,在启动blm_telegraf程序时可以通过设定这些选项来设定blm_telegraf的配置。 ```bash @@ -196,7 +262,7 @@ blm_telegraf对telegraf提供服务的端口号。 url = "http://10.1.2.3:8089/telegraf" ``` -### 查询telegraf写入数据 +### 查询 telegraf 写入数据 telegraf产生的数据格式如下: ```json diff --git a/documentation20/cn/08.connector/01.java/docs.md b/documentation20/cn/08.connector/01.java/docs.md index edd81a49cd334859fd9581a601822ee782bad45a..def8d4a905eaa6ab63256673aad04bd159a5478d 100644 --- a/documentation20/cn/08.connector/01.java/docs.md +++ b/documentation20/cn/08.connector/01.java/docs.md @@ -46,7 +46,7 @@ TDengine 的 JDBC 驱动实现尽可能与关系型数据库驱动保持一致 -注意:与 JNI 方式不同,RESTful 接口是无状态的。在使用JDBC-RESTful时,需要在sql中指定表、超级表的数据库名称。(从 TDengine 2.1.8.0 版本开始,也可以在 RESTful url 中指定当前 SQL 语句所使用的默认数据库名。)例如: +注意:与 JNI 方式不同,RESTful 接口是无状态的。在使用JDBC-RESTful时,需要在sql中指定表、超级表的数据库名称。(从 TDengine 2.2.0.0 版本开始,也可以在 RESTful url 中指定当前 SQL 语句所使用的默认数据库名。)例如: ```sql INSERT INTO test.t1 USING test.weather (ts, temperature) TAGS('beijing') VALUES(now, 24.6); ``` diff --git a/documentation20/cn/08.connector/docs.md b/documentation20/cn/08.connector/docs.md index ea3f9a4d55181e319848e1a5e14abb1409a3a5e9..68b8d6f052384615ca0643663cca6ddab3b7e1d3 100644 --- a/documentation20/cn/08.connector/docs.md +++ b/documentation20/cn/08.connector/docs.md @@ -17,7 +17,7 @@ TDengine提供了丰富的应用程序开发接口,其中包括C/C++、Java、 | **C#** | ● | ● | ○ | ○ | ○ | ○ | ○ | -- | -- | | **RESTful** | ● | ● | ● | ● | ● | ● | ○ | ○ | ○ | -其中 ● 表示经过官方测试验证, ○ 表示非官方测试验证。 +其中 ● 表示官方测试验证通过,○ 表示非官方测试验证通过,-- 表示未经验证。 注意: @@ -64,7 +64,10 @@ TDengine提供了丰富的应用程序开发接口,其中包括C/C++、Java、 编辑taos.cfg文件(默认路径/etc/taos/taos.cfg),将firstEP修改为TDengine服务器的End Point,例如:h1.taos.com:6030 -**提示: 如本机没有部署TDengine服务,仅安装了应用驱动,则taos.cfg中仅需配置firstEP,无需配置FQDN。** +**提示: ** + +1. **如本机没有部署TDengine服务,仅安装了应用驱动,则taos.cfg中仅需配置firstEP,无需配置FQDN。** +2. **为防止与服务器端连接时出现“unable to resolve FQDN”错误,建议确认客户端的hosts文件已经配置正确的FQDN值。** **Windows x64/x86** @@ -96,7 +99,7 @@ TDengine提供了丰富的应用程序开发接口,其中包括C/C++、Java、 **提示:** 1. **如利用FQDN连接服务器,必须确认本机网络环境DNS已配置好,或在hosts文件中添加FQDN寻址记录,如编辑C:\Windows\system32\drivers\etc\hosts,添加如下的记录:`192.168.1.99 h1.taos.com` ** -2.**卸载:运行unins000.exe可卸载TDengine应用驱动。** +2. **卸载:运行unins000.exe可卸载TDengine应用驱动。** ### 安装验证 @@ -309,7 +312,7 @@ TDengine的异步API均采用非阻塞调用模式。应用程序可以用多线 ### 参数绑定 API -除了直接调用 `taos_query` 进行查询,TDengine 也提供了支持参数绑定的 Prepare API,与 MySQL 一样,这些 API 目前也仅支持用问号 `?` 来代表待绑定的参数。 +除了直接调用 `taos_query` 进行查询,TDengine 也提供了支持参数绑定的 Prepare API,与 MySQL 一样,这些 API 目前也仅支持用问号 `?` 来代表待绑定的参数。文档中有时也会把此功能称为“原生接口写入”。 从 2.1.1.0 和 2.1.2.0 版本开始,TDengine 大幅改进了参数绑定接口对数据写入(INSERT)场景的支持。这样在通过参数绑定接口写入数据时,就避免了 SQL 语法解析的资源消耗,从而在绝大多数情况下显著提升写入性能。此时的典型操作步骤如下: 1. 调用 `taos_stmt_init` 创建参数绑定对象; @@ -400,6 +403,25 @@ typedef struct TAOS_MULTI_BIND { (2.1.3.0 版本新增) 用于在其他 stmt API 返回错误(返回错误码或空指针)时获取错误信息。 + +### Schemaless 方式写入接口 + +除了使用 SQL 方式或者使用参数绑定 API 写入数据外,还可以使用 Schemaless 的方式完成写入。Schemaless 可以免于预先创建超级表/数据子表的数据结构,而是可以直接写入数据,TDengine 系统会根据写入的数据内容自动创建和维护所需要的表结构。Schemaless 的使用方式详见 [Schemaless 写入](https://www.taosdata.com/cn/documentation/insert#schemaless) 章节,这里介绍与之配套使用的 C/C++ API。 + +- `int taos_insert_lines(TAOS* taos, char* lines[], int numLines)` + + (2.2.0.0 版本新增) + 以 Schemaless 格式写入多行数据。其中: + * taos:调用 taos_connect 返回的数据库连接。 + * lines:由 char 字符串指针组成的数组,指向本次想要写入数据库的多行数据。 + * numLines:lines 数据的总行数。 + + 返回值为 0 表示写入成功,非零值表示出错。具体错误代码请参见 [taoserror.h](https://github.com/taosdata/TDengine/blob/develop/src/inc/taoserror.h) 文件。 + + 说明: + 1. 此接口是一个同步阻塞式接口,使用时机与 `taos_query()` 一致。 + 2. 在调用此接口之前,必须先调用 `taos_select_db()` 来确定目前是在向哪个 DB 来写入。 + ### 连续查询接口 TDengine提供时间驱动的实时流式计算API。可以每隔一指定的时间段,对一张或多张数据库的表(数据流)进行各种实时聚合计算操作。操作简单,仅有打开、关闭流的API。具体如下: @@ -654,7 +676,7 @@ conn.close() 为支持各种不同类型平台的开发,TDengine 提供符合 REST 设计标准的 API,即 RESTful API。为最大程度降低学习成本,不同于其他数据库 RESTful API 的设计方法,TDengine 直接通过 HTTP POST 请求 BODY 中包含的 SQL 语句来操作数据库,仅需要一个 URL。RESTful 连接器的使用参见[视频教程](https://www.taosdata.com/blog/2020/11/11/1965.html)。 -注意:与标准连接器的一个区别是,RESTful 接口是无状态的,因此 `USE db_name` 指令没有效果,所有对表名、超级表名的引用都需要指定数据库名前缀。(从 2.1.8.0 版本开始,支持在 RESTful url 中指定 db_name,这时如果 SQL 语句中没有指定数据库名前缀的话,会使用 url 中指定的这个 db_name。) +注意:与标准连接器的一个区别是,RESTful 接口是无状态的,因此 `USE db_name` 指令没有效果,所有对表名、超级表名的引用都需要指定数据库名前缀。(从 2.2.0.0 版本开始,支持在 RESTful url 中指定 db_name,这时如果 SQL 语句中没有指定数据库名前缀的话,会使用 url 中指定的这个 db_name。) ### 安装 @@ -695,7 +717,7 @@ http://:/rest/sql/[db_name] - fqnd: 集群中的任一台主机 FQDN 或 IP 地址 - port: 配置文件中 httpPort 配置项,缺省为 6041 -- db_name: 可选参数,指定本次所执行的 SQL 语句的默认数据库库名。(从 2.1.8.0 版本开始支持) +- db_name: 可选参数,指定本次所执行的 SQL 语句的默认数据库库名。(从 2.2.0.0 版本开始支持) 例如:http://h1.taos.com:6041/rest/sql/test 是指向地址为 h1.taos.com:6041 的 url,并将默认使用的数据库库名设置为 test。 @@ -754,7 +776,7 @@ curl -u username:password -d '' :/rest/sql/[db_name] - data: 具体返回的数据,一行一行的呈现,如果不返回结果集,那么就仅有 [[affected_rows]]。data 中每一行的数据列顺序,与 column_meta 中描述数据列的顺序完全一致。 - rows: 表明总共多少行数据。 -column_meta 中的列类型说明: +column_meta 中的列类型说明: * 1:BOOL * 2:TINYINT * 3:SMALLINT @@ -984,15 +1006,18 @@ go build ### Go连接器的使用 -TDengine提供了GO驱动程序包`taosSql`.`taosSql`实现了GO语言的内置接口`database/sql/driver`。用户只需按如下方式引入包就可以在应用程序中访问TDengine。 +TDengine提供了GO驱动程序包`taosSql`。`taosSql`实现了GO语言的内置接口`database/sql/driver`。用户只需按如下方式引入包就可以在应用程序中访问TDengine。 ```go import ( "database/sql" - _ "github.com/taosdata/driver-go/taosSql" + _ "github.com/taosdata/driver-go/v2/taosSql" ) ``` + **提示**:下划线与双引号之间必须有一个空格。 +`taosSql` 的 v2 版本进行了重构,分离出内置数据库操作接口 `database/sql/driver` 到目录 `taosSql`;订阅、stmt等其他功能放到目录 `af`。 + ### 常用API - `sql.Open(DRIVER_NAME string, dataSourceName string) *DB` diff --git a/documentation20/cn/10.cluster/docs.md b/documentation20/cn/10.cluster/docs.md index f39138d61de35a09f5988014e95f284cb0f9e26f..1f6f84dd1a3e66da5a64d07358d97e6f89bdc8c0 100644 --- a/documentation20/cn/10.cluster/docs.md +++ b/documentation20/cn/10.cluster/docs.md @@ -79,13 +79,13 @@ Query OK, 1 row(s) in set (0.006385s) taos> ``` -上述命令里,可以看到这个刚启动的这个数据节点的End Point是:h1.taos.com:6030,就是这个新集群的firstEP。 +上述命令里,可以看到这个刚启动的这个数据节点的End Point是:h1.taos.com:6030,就是这个新集群的firstEp。 ## 启动后续数据节点 将后续的数据节点添加到现有集群,具体有以下几步: -1. 按照[《立即开始》](https://www.taosdata.com/cn/documentation/getting-started/)一章的方法在每个物理节点启动taosd;(注意:每个物理节点都需要在 taos.cfg 文件中将 firstEP 参数配置为新集群首个节点的 End Point——在本例中是 h1.taos.com:6030) +1. 按照[《立即开始》](https://www.taosdata.com/cn/documentation/getting-started/)一章的方法在每个物理节点启动taosd;(注意:每个物理节点都需要在 taos.cfg 文件中将 firstEp参数配置为新集群首个节点的 End Point——在本例中是 h1.taos.com:6030) 2. 在第一个数据节点,使用CLI程序taos,登录进TDengine系统,执行命令: @@ -110,7 +110,7 @@ taos> **提示:** -- 任何已经加入集群在线的数据节点,都可以作为后续待加入节点的 firstEP。 +- 任何已经加入集群在线的数据节点,都可以作为后续待加入节点的 firstEp。 - firstEp 这个参数仅仅在该数据节点首次加入集群时有作用,加入集群后,该数据节点会保存最新的 mnode 的 End Point 列表,不再依赖这个参数。 - 接下来,配置文件中的 firstEp 参数就主要在客户端连接的时候使用了,例如 taos shell 如果不加参数,会默认连接由 firstEp 指定的节点。 - 两个没有配置 firstEp 参数的数据节点 dnode 启动后,会独立运行起来。这个时候,无法将其中一个数据节点加入到另外一个数据节点,形成集群。**无法将两个独立的集群合并成为新的集群**。 @@ -119,9 +119,14 @@ taos> 上面已经介绍如何从零开始搭建集群。集群组建完后,还可以随时添加新的数据节点进行扩容,或删除数据节点,并检查集群当前状态。 + +**提示:** + +- 以下所有执行命令的操作需要先登陆进TDengine系统,必要时请使用root权限。 + ### 添加数据节点 -执行CLI程序taos,使用root账号登录进系统,执行: +执行CLI程序taos,执行: ``` CREATE DNODE "fqdn:port"; @@ -131,7 +136,7 @@ CREATE DNODE "fqdn:port"; ### 删除数据节点 -执行CLI程序taos,使用root账号登录进TDengine系统,执行: +执行CLI程序taos,执行: ```mysql DROP DNODE "fqdn:port | dnodeID"; @@ -153,7 +158,7 @@ DROP DNODE "fqdn:port | dnodeID"; 手动将某个vnode迁移到指定的dnode。 -执行CLI程序taos,使用root账号登录进TDengine系统,执行: +执行CLI程序taos,执行: ```mysql ALTER DNODE BALANCE "VNODE:-DNODE:"; @@ -169,7 +174,7 @@ ALTER DNODE BALANCE "VNODE:-DNODE:"; ### 查看数据节点 -执行CLI程序taos,使用root账号登录进TDengine系统,执行: +执行CLI程序taos,执行: ```mysql SHOW DNODES; ``` @@ -180,8 +185,9 @@ SHOW DNODES; 为充分利用多核技术,并提供scalability,数据需要分片处理。因此TDengine会将一个DB的数据切分成多份,存放在多个vnode里。这些vnode可能分布在多个数据节点dnode里,这样就实现了水平扩展。一个vnode仅仅属于一个DB,但一个DB可以有多个vnode。vnode的是mnode根据当前系统资源的情况,自动进行分配的,无需任何人工干预。 -执行CLI程序taos,使用root账号登录进TDengine系统,执行: +执行CLI程序taos,执行: ```mysql +USE SOME_DATABASE; SHOW VGROUPS; ``` diff --git a/documentation20/cn/11.administrator/docs.md b/documentation20/cn/11.administrator/docs.md index 29e49aa9023446bbf9dd60749f5aa0ccfdadaf70..fe9417a861a5284b25525f16c87ab8d07d43181e 100644 --- a/documentation20/cn/11.administrator/docs.md +++ b/documentation20/cn/11.administrator/docs.md @@ -568,6 +568,35 @@ COMPACT 命令对指定的一个或多个 VGroup 启动碎片重整,系统会 需要注意的是,碎片重整操作会大幅消耗磁盘 I/O。因此在重整进行期间,有可能会影响节点的写入和查询性能,甚至在极端情况下导致短时间的阻写。 + +## 浮点数有损压缩 + +在车联网等物联网智能应用场景中,经常会采集和存储海量的浮点数类型数据,如果能更高效地对此类数据进行压缩,那么不但能够节省数据存储的硬件资源,也能够因降低磁盘 I/O 数据量而提升系统性能表现。 + +从 2.1.6.0 版本开始,TDengine 提供一种名为 TSZ 的新型数据压缩算法,无论设置为有损压缩还是无损压缩,都能够显著提升浮点数类型数据的压缩率表现。目前该功能以可选模块的方式进行发布,可以通过添加特定的编译参数来启用该功能(也即常规安装包中暂未包含该功能)。 + +**需要注意的是,该功能一旦启用,效果是全局的,也即会对系统中所有的 FLOAT、DOUBLE 类型的数据生效。同时,在启用了浮点数有损压缩功能后写入的数据,也无法被未启用该功能的版本载入,并有可能因此而导致数据库服务报错退出。** + +### 创建支持 TSZ 压缩算法的 TDengine 版本 + +TSZ 模块保存在单独的代码仓库 https://github.com/taosdata/TSZ 中。可以通过以下步骤创建包含此模块的 TDengine 版本: +1. TDengine 中的插件目前只支持通过 SSH 的方式拉取和编译,所以需要自己先配置好通过 SSH 拉取 GitHub 代码的环境。 +2. `git clone git@github.com:taosdata/TDengine -b your_branchname --recurse-submodules` 通过 `--recurse-submodules` 使依赖模块的源代码可以被一并下载。 +3. `mkdir debug && cd debug` 进入单独的编译目录。 +4. `cmake .. -DTSZ_ENABLED=true` 其中参数 `-DTSZ_ENABLED=true` 表示在编译过程中加入对 TSZ 插件功能的支持。如果成功激活对 TSZ 模块的编译,那么 CMAKE 过程中也会显示 `build with TSZ enabled` 字样。 +5. 编译成功后,包含 TSZ 浮点压缩功能的插件便已经编译进了 TDengine 中了,可以通过调整 taos.cfg 中的配置参数来使用此功能了。 + +### 通过配置文件来启用 TSZ 压缩算法 + +如果要启用 TSZ 压缩算法,除了在 TDengine 的编译过程需要声明启用 TSZ 模块之外,还需要在 taos.cfg 配置文件中对以下参数进行设置: +* lossyColumns:配置要进行有损压缩的浮点数数据类型。参数值类型为字符串,含义为:空 - 关闭有损压缩;float - 只对 FLOAT 类型进行有损压缩;double - 只对 DOUBLE 类型进行有损压缩;float|double:对 FLOAT 和 DOUBLE 类型都进行有损压缩。默认值是“空”,也即关闭有损压缩。 +* fPrecision:设置 float 类型浮点数压缩精度,小于此值的浮点数尾数部分将被截断。参数值类型为 FLOAT,最小值为 0.0,最大值为 100,000.0。缺省值为 0.00000001(1E-8)。 +* dPrecision:设置 double 类型浮点数压缩精度,小于此值的浮点数尾数部分将被截断。参数值类型为 DOUBLE,最小值为 0.0,最大值为 100,000.0。缺省值为 0.0000000000000001(1E-16)。 +* maxRange:表示数据的最大浮动范围。一般无需调整,在数据具有特定特征时可以配合 range 参数来实现极高的数据压缩率。默认值为 500。 +* range:表示数据大体浮动范围。一般无需调整,在数据具有特定特征时可以配合 maxRange 参数来实现极高的数据压缩率。默认值为 100。 + +**注意:**对 cfg 配置文件中参数值的任何调整,都需要重新启动 taosd 才能生效。并且以上选项为全局配置选项,配置后对所有数据库中所有表的 FLOAT 及 DOUBLE 类型的字段生效。 + ## 文件目录结构 安装TDengine后,默认会在操作系统中生成下列目录或文件: @@ -652,7 +681,7 @@ rmtaos - 表名:不能包含“.”以及特殊字符,与所属数据库名一起,不能超过 192 个字符,每行数据最大长度 16k 个字符 - 表的列名:不能包含特殊字符,不能超过 64 个字符 - 数据库名、表名、列名,都不能以数字开头,合法的可用字符集是“英文字符、数字和下划线” -- 表的列数:不能超过 1024 列,最少需要 2 列,第一列必须是时间戳 +- 表的列数:不能超过 1024 列,最少需要 2 列,第一列必须是时间戳(从 2.1.7.0 版本开始,改为最多支持 4096 列) - 记录的最大长度:包括时间戳 8 byte,不能超过 16KB(每个 BINARY/NCHAR 类型的列还会额外占用 2 个 byte 的存储位置) - 单条 SQL 语句默认最大字符串长度:65480 byte,但可通过系统配置参数 maxSQLLength 修改,最长可配置为 1048576 byte - 数据库副本数:不能超过 3 @@ -665,7 +694,7 @@ rmtaos - 库的个数:仅受节点个数限制 - 单个库上虚拟节点个数:不能超过 64 个 - 库的数目,超级表的数目、表的数目,系统不做限制,仅受系统资源限制 -- SELECT 语句的查询结果,最多允许返回 1024 列(语句中的函数调用可能也会占用一些列空间),超限时需要显式指定较少的返回数据列,以避免语句执行报错。 +- SELECT 语句的查询结果,最多允许返回 1024 列(语句中的函数调用可能也会占用一些列空间),超限时需要显式指定较少的返回数据列,以避免语句执行报错。(从 2.1.7.0 版本开始,改为最多允许 4096 列) 目前 TDengine 有将近 200 个内部保留关键字,这些关键字无论大小写均不可以用作库名、表名、STable 名、数据列名及标签列名等。这些关键字列表如下: @@ -800,7 +829,7 @@ taos -n sync -P 6042 -h `taos -n speed -h -P 6030 -N 10 -l 10000000 -S TCP` -从 2.1.8.0 版本开始,taos 工具新提供了一个网络速度诊断的模式,可以对一个正在运行中的 taosd 实例或者 `taos -n server` 方式模拟的一个服务端实例,以非压缩传输的方式进行网络测速。这个模式下可供调整的参数如下: +从 2.2.0.0 版本开始,taos 工具新提供了一个网络速度诊断的模式,可以对一个正在运行中的 taosd 实例或者 `taos -n server` 方式模拟的一个服务端实例,以非压缩传输的方式进行网络测速。这个模式下可供调整的参数如下: -n:设为“speed”时,表示对网络速度进行诊断。 -h:所要连接的服务端的 FQDN 或 ip 地址。如果不设置这一项,会使用本机 taos.cfg 文件中 FQDN 参数的设置作为默认值。 @@ -813,7 +842,7 @@ taos -n sync -P 6042 -h `taos -n fqdn -h ` -从 2.1.8.0 版本开始,taos 工具新提供了一个 FQDN 解析速度的诊断模式,可以对一个目标 FQDN 地址尝试解析,并记录解析过程中所消耗的时间。这个模式下可供调整的参数如下: +从 2.2.0.0 版本开始,taos 工具新提供了一个 FQDN 解析速度的诊断模式,可以对一个目标 FQDN 地址尝试解析,并记录解析过程中所消耗的时间。这个模式下可供调整的参数如下: -n:设为“fqdn”时,表示对 FQDN 解析进行诊断。 -h:所要解析的目标 FQDN 地址。如果不设置这一项,会使用本机 taos.cfg 文件中 FQDN 参数的设置作为默认值。 diff --git a/documentation20/cn/12.taos-sql/02.udf/docs.md b/documentation20/cn/12.taos-sql/02.udf/docs.md new file mode 100644 index 0000000000000000000000000000000000000000..6e59fef88e3df79c6f223b44fc84e1dc45516b91 --- /dev/null +++ b/documentation20/cn/12.taos-sql/02.udf/docs.md @@ -0,0 +1,136 @@ +# UDF(用户定义函数) + +在有些应用场景中,应用逻辑需要的查询无法直接使用系统内置的函数来表示。利用 UDF 功能,TDengine 可以插入用户编写的处理代码并在查询中使用它们,就能够很方便地解决特殊应用场景中的使用需求。 + +从 2.2.0.0 版本开始,TDengine 支持通过 C/C++ 语言进行 UDF 定义。接下来结合示例讲解 UDF 的使用方法。 + +## 用 C/C++ 语言来定义 UDF + +TDengine 提供 3 个 UDF 的源代码示例,分别为: +* [add_one.c](https://github.com/taosdata/TDengine/blob/develop/tests/script/sh/add_one.c) +* [abs_max.c](https://github.com/taosdata/TDengine/blob/develop/tests/script/sh/abs_max.c) +* [sum_double.c](https://github.com/taosdata/TDengine/blob/develop/tests/script/sh/sum_double.c) + +### 无需中间变量的标量函数 + +[add_one.c](https://github.com/taosdata/TDengine/blob/develop/tests/script/sh/add_one.c) 是结构最简单的 UDF 实现。其功能为:对传入的一个数据列(可能因 WHERE 子句进行了筛选)中的每一项,都输出 +1 之后的值,并且要求输入的列数据类型为 INT。 + +这一具体的处理逻辑在函数 `void add_one(char* data, short itype, short ibytes, int numOfRows, long long* ts, char* dataOutput, char* interBUf, char* tsOutput, int* numOfOutput, short otype, short obytes, SUdfInit* buf)` 中定义。这类用于实现 UDF 的基础计算逻辑的函数,我们称为 udfNormalFunc,也就是对行数据块的标量计算函数。需要注意的是,udfNormalFunc 的参数项是固定的,用于按照约束完成与引擎之间的数据交换。 + +- udfNormalFunc 中各参数的具体含义是: + * data:存有输入的数据。 + * itype:输入数据的类型。这里采用的是短整型表示法,与各种数据类型对应的值可以参见 [column_meta 中的列类型说明](https://www.taosdata.com/cn/documentation/connector#column_meta)。例如 4 用于表示 INT 型。 + * iBytes:输入数据中每个值会占用的字节数。 + * numOfRows:输入数据的总行数。 + * ts:主键时间戳在输入中的列数据。 + * dataOutput:输出数据的缓冲区。 + * interBuf:系统使用的中间临时缓冲区,通常用户逻辑无需对 interBuf 进行处理。 + * tsOutput:主键时间戳在输出时的列数据。 + * numOfOutput:输出数据的个数。 + * oType:输出数据的类型。取值含义与 itype 参数一致。 + * oBytes:输出数据中每个值会占用的字节数。 + * buf:计算过程的中间变量缓冲区。 + +其中 buf 参数需要用到一个自定义结构体 SUdfInit。在这个例子中,因为 add_one 的计算过程无需用到中间变量缓存,所以可以把 SUdfInit 定义成一个空结构体。 + +### 无需中间变量的聚合函数 + +[abs_max.c](https://github.com/taosdata/TDengine/blob/develop/tests/script/sh/abs_max.c) 实现的是一个聚合函数,功能是对一组数据按绝对值取最大值。 + +其计算过程为:与所在查询语句相关的数据会被分为多个行数据块,对每个行数据块调用 udfNormalFunc(在本例的实现代码中,实际函数名是 `abs_max`),再将每个数据块的计算结果调用 udfMergeFunc(本例中,其实际的函数名是 `abs_max_merge`)进行聚合,生成每个子表的聚合结果。如果查询指令涉及超级表,那么最后还会通过 udfFinalizeFunc(本例中,其实际的函数名是 `abs_max_finalize`)再把子表的计算结果聚合为超级表的计算结果。 + +值得注意的是,udfNormalFunc、udfMergeFunc、udfFinalizeFunc 之间,函数名约定使用相同的前缀,此前缀即 udfNormalFunc 的实际函数名。udfMergeFunc 的函数名后缀 `_merge`、udfFinalizeFunc 的函数名后缀 `_finalize`,是 UDF 实现规则的一部分,系统会按照这些函数名后缀来调用相应功能。 + +- udfMergeFunc 用于对计算中间结果进行聚合。本例中 udfMergeFunc 对应的实现函数为 `void abs_max_merge(char* data, int32_t numOfRows, char* dataOutput, int32_t* numOfOutput, SUdfInit* buf)`,其中各参数的具体含义是: + * data:udfNormalFunc 的输出组合在一起的数据,也就成为了 udfMergeFunc 的输入。 + * numOfRows:data 中数据的行数。 + * dataOutput:输出数据的缓冲区。 + * numOfOutput:输出数据的个数。 + * buf:计算过程的中间变量缓冲区。 + +- udfFinalizeFunc 用于对计算结果进行最终聚合。本例中 udfFinalizeFunc 对应的实现函数为 `void abs_max_finalize(char* dataOutput, char* interBuf, int* numOfOutput, SUdfInit* buf)`,其中各参数的具体含义是: + * dataOutput:输出数据的缓冲区。对 udfFinalizeFunc 来说,其输入数据也来自于这里。 + * interBuf:系统使用的中间临时缓冲区,与 udfNormalFunc 中的同名参数含义一致。 + * numOfOutput:输出数据的个数。 + * buf:计算过程的中间变量缓冲区。 + +同样因为 abs_max 的计算过程无需用到中间变量缓存,所以同样是可以把 SUdfInit 定义成一个空结构体。 + +### 使用中间变量的聚合函数 + +[sum_double.c](https://github.com/taosdata/TDengine/blob/develop/tests/script/sh/sum_double.c) 也是一个聚合函数,功能是对一组数据输出求和结果的倍数。 + +出于功能演示的目的,在这个用户定义函数的实现方法中,用到了中间变量缓冲区 buf。因此,在这个源代码文件中,SUdfInit 就不再是一个空的结构体,而是定义了缓冲区的具体存储内容。 + +也正是因为用到了中间变量缓冲区,因此就需要对这一缓冲区进行初始化和资源释放。具体来说,也即对应 udfInitFunc(本例中,其实际的函数名是 `sum_double_init`)和 udfDestroyFunc(本例中,其实际的函数名是 `sum_double_destroy`)。其函数名命名规则同样是采取以 udfNormalFunc 的实际函数名为前缀,以 `_init` 和 `_destroy` 为后缀。系统会在初始化和资源释放时调用对应名称的函数。 + +- udfInitFunc 用于初始化中间变量缓冲区中的变量和内容。本例中 udfInitFunc 对应的实现函数为 `int sum_double_init(SUdfInit* buf)`,其中各参数的具体含义是: + * buf:计算过程的中间变量缓冲区。 + +- udfDestroyFunc 用于释放中间变量缓冲区中的变量和内容。本例中 udfDestroyFunc 对应的实现函数为 `void sum_double_destroy(SUdfInit* buf)`,其中各参数的具体含义是: + * buf:计算过程的中间变量缓冲区。 + +注意,UDF 的实现过程中需要小心处理对中间变量缓冲区的使用,如果使用不当则有可能导致内存泄露或对资源的过度占用,甚至导致系统服务进程崩溃等。 + +### UDF 实现方式的规则总结 + +根据所要实现的 UDF 类型不同,用户所要实现的功能函数内容也会有所区别: +* 无需中间变量的标量函数:结构体 SUdfInit 可以为空,需实现 udfNormalFunc。 +* 无需中间变量的聚合函数:结构体 SUdfInit 可以为空,需实现 udfNormalFunc、udfMergeFunc、udfFinalizeFunc。 +* 使用中间变量的标量函数:结构体 SUdfInit 需要具体定义,并需实现 udfNormalFunc、udfInitFunc、udfDestroyFunc。 +* 使用中间变量的聚合函数:结构体 SUdfInit 需要具体定义,并需实现 udfNormalFunc、udfInitFunc、udfDestroyFunc、udfMergeFunc、udfFinalizeFunc。 + +## 编译 UDF + +用户定义函数的 C 语言源代码无法直接被 TDengine 系统使用,而是需要先编译为 .so 链接库,之后才能载入 TDengine 系统。 + +例如,按照上一章节描述的规则准备好了用户定义函数的源代码 add_one.c,那么可以执行如下指令编译得到动态链接库文件: +```bash +gcc -g -O0 -fPIC -shared add_one.c -o add_one.so +``` + +这样就准备好了动态链接库 add_one.so 文件,可以供后文创建 UDF 时使用了。 + +## 在系统中管理和使用 UDF + +### 创建 UDF + +用户可以通过 SQL 指令在系统中加载客户端所在主机上的 UDF 函数库(不能通过 RESTful 接口或 HTTP 管理界面来进行这一过程)。一旦创建成功,则当前 TDengine 集群的所有用户都可以在 SQL 指令中使用这些函数。UDF 存储在系统的 MNode 节点上,因此即使重启 TDengine 系统,已经创建的 UDF 也仍然可用。 + +在创建 UDF 时,需要区分标量函数和聚合函数。如果创建时声明了错误的函数类别,则可能导致通过 SQL 指令调用函数时出错。 + +- 创建标量函数:`CREATE FUNCTION ids(X) AS ids(Y) OUTPUTTYPE typename(Z) bufsize(B);` + * X:标量函数未来在 SQL 指令中被调用时的函数名,必须与函数实现中 udfNormalFunc 的实际名称一致; + * Y:包含 UDF 函数实现的动态链接库的库文件路径(指的是库文件在当前客户端所在主机上的保存路径,通常是指向一个 .so 文件); + * Z:此函数计算结果的数据类型,使用数字表示,含义与上文中 udfNormalFunc 的 itype 参数一致; + * B:系统使用的中间临时缓冲区大小,单位是字节,最小 0,最大 512,通常可以设置为 128。 + +- 创建聚合函数:`CREATE AGGREGATE FUNCTION ids(X) AS ids(Y) OUTPUTTYPE typename(Z) bufsize(B);` + * X:标量函数未来在 SQL 指令中被调用时的函数名,必须与函数实现中 udfNormalFunc 的实际名称一致; + * Y:包含 UDF 函数实现的动态链接库的库文件路径(指的是库文件在当前客户端所在主机上的保存路径,通常是指向一个 .so 文件); + * Z:此函数计算结果的数据类型,使用数字表示,含义与上文中 udfNormalFunc 的 itype 参数一致; + * B:系统使用的中间临时缓冲区大小,单位是字节,最小 0,最大 512,通常可以设置为 128。 + +### 管理 UDF + +- 删除指定名称的用户定义函数:`DROP FUNCTION ids(X);` + * X:此参数的含义与 CREATE 指令中的 X 参数一致。 +- 显示系统中当前可用的所有 UDF:`SHOW FUNCTIONS;` + +### 调用 UDF + +在 SQL 指令中,可以直接以在系统中创建 UDF 时赋予的函数名来调用用户定义函数。例如: +```sql +SELECT X(c) FROM table/stable; +``` + +表示对名为 c 的数据列调用名为 X 的用户定义函数。SQL 指令中用户定义函数可以配合 WHERE 等查询特性来使用。 + +## UDF 的一些使用限制 + +在当前版本下,使用 UDF 存在如下这些限制: +1. UDF 不能与系统内建的 SQL 函数混合使用; +2. UDF 只支持以单个数据列作为输入; +3. UDF 只要创建成功,就会被持久化存储到 MNode 节点中; +4. 无法通过 RESTful 接口来创建 UDF; +5. UDF 在 SQL 中定义的函数名,必须与 .so 库文件实现中的接口函数名前缀保持一致,也即必须是 udfNormalFunc 的名称,而且不可与 TDengine 中已有的内建 SQL 函数重名。 diff --git a/documentation20/cn/12.taos-sql/docs.md b/documentation20/cn/12.taos-sql/docs.md index 9981f1b7f83b416ec2fec6594b8cfa350a3e6348..d4ae0be7202c4a452024d58839dacef818cd4c64 100644 --- a/documentation20/cn/12.taos-sql/docs.md +++ b/documentation20/cn/12.taos-sql/docs.md @@ -233,7 +233,7 @@ TDengine 缺省的时间戳是毫秒精度,但通过在 CREATE DATABASE 时传 ``` 说明: - 1) 列的最大个数为1024,最小个数为2; + 1) 列的最大个数为1024,最小个数为2;(从 2.1.7.0 版本开始,改为最多允许 4096 列) 2) 列名最大长度为64。 @@ -713,24 +713,79 @@ Query OK, 1 row(s) in set (0.001091s) | <= | smaller than or equal to | **`timestamp`** and all numeric types | | = | equal to | all types | | <> | not equal to | all types | +| is [not] null | is null or is not null | all types | | between and | within a certain range | **`timestamp`** and all numeric types | | in | match any value in a set | all types except first column `timestamp` | | like | match a wildcard string | **`binary`** **`nchar`** | -| % | match with any char sequences | **`binary`** **`nchar`** | -| _ | match with a single char | **`binary`** **`nchar`** | 1. <> 算子也可以写为 != ,请注意,这个算子不能用于数据表第一列的 timestamp 字段。 2. like 算子使用通配符字符串进行匹配检查。 * 在通配符字符串中:'%'(百分号)匹配 0 到任意个字符;'\_'(下划线)匹配单个任意字符。 - * 如果希望匹配字符串中原本就带有的 \_(下划线)字符,那么可以在通配符字符串中写作 `\_`,也即加一个反斜线来进行转义。(从 2.1.8.0 版本开始支持) + * 如果希望匹配字符串中原本就带有的 \_(下划线)字符,那么可以在通配符字符串中写作 `\_`,也即加一个反斜线来进行转义。(从 2.2.0.0 版本开始支持) * 通配符字符串最长不能超过 20 字节。(从 2.1.6.1 版本开始,通配符字符串的长度放宽到了 100 字节,并可以通过 taos.cfg 中的 maxWildCardsLength 参数来配置这一长度限制。但不建议使用太长的通配符字符串,将有可能严重影响 LIKE 操作的执行性能。) 3. 同时进行多个字段的范围过滤,需要使用关键词 AND 来连接不同的查询条件,暂不支持 OR 连接的不同列之间的查询过滤条件。 + * 从 2.3.0.0 版本开始,已支持完整的同一列和/或不同列间的 AND/OR 运算。 4. 针对单一字段的过滤,如果是时间过滤条件,则一条语句中只支持设定一个;但针对其他的(普通)列或标签列,则可以使用 `OR` 关键字进行组合条件的查询过滤。例如: `((value > 20 AND value < 30) OR (value < 12))`。 + * 从 2.3.0.0 版本开始,允许使用多个时间过滤条件,但首列时间戳的过滤运算结果只能包含一个区间。 5. 从 2.0.17.0 版本开始,条件过滤开始支持 BETWEEN AND 语法,例如 `WHERE col2 BETWEEN 1.5 AND 3.25` 表示查询条件为“1.5 ≤ col2 ≤ 3.25”。 6. 从 2.1.4.0 版本开始,条件过滤开始支持 IN 算子,例如 `WHERE city IN ('Beijing', 'Shanghai')`。说明:BOOL 类型写作 `{true, false}` 或 `{0, 1}` 均可,但不能写作 0、1 之外的整数;FLOAT 和 DOUBLE 类型会受到浮点数精度影响,集合内的值在精度范围内认为和数据行的值完全相等才能匹配成功;TIMESTAMP 类型支持非主键的列。 + +### JOIN 子句 + +从 2.2.0.0 版本开始,TDengine 对内连接(INNER JOIN)中的自然连接(Natural join)操作实现了完整的支持。也即支持“普通表与普通表之间”、“超级表与超级表之间”、“子查询与子查询之间”进行自然连接。自然连接与内连接的主要区别是,自然连接要求参与连接的字段在不同的表/超级表中必须是同名字段。也即,TDengine 在连接关系的表达中,要求必须使用同名数据列/标签列的相等关系。 + +在普通表与普通表之间的 JOIN 操作中,只能使用主键时间戳之间的相等关系。例如: +```sql +SELECT * +FROM temp_tb_1 t1, pressure_tb_1 t2 +WHERE t1.ts = t2.ts +``` + +在超级表与超级表之间的 JOIN 操作中,除了主键时间戳一致的条件外,还要求引入能实现一一对应的标签列的相等关系。例如: +```sql +SELECT * +FROM temp_stable t1, temp_stable t2 +WHERE t1.ts = t2.ts AND t1.deviceid = t2.deviceid AND t1.status=0; +``` + +类似地,也可以对多个子查询的查询结果进行 JOIN 操作。 + +注意,JOIN 操作存在如下限制要求: +1. 参与一条语句中 JOIN 操作的表/超级表最多可以有 10 个。 +2. 在包含 JOIN 操作的查询语句中不支持 FILL。 +3. 暂不支持参与 JOIN 操作的表之间聚合后的四则运算。 +4. 不支持只对其中一部分表做 GROUP BY。 +5. JOIN 查询的不同表的过滤条件之间不能为 OR。 + + +### 嵌套查询 + +“嵌套查询”又称为“子查询”,也即在一条 SQL 语句中,“内层查询”的计算结果可以作为“外层查询”的计算对象来使用。 + +从 2.2.0.0 版本开始,TDengine 的查询引擎开始支持在 FROM 子句中使用非关联子查询(“非关联”的意思是,子查询不会用到父查询中的参数)。也即在普通 SELECT 语句的 tb_name_list 位置,用一个独立的 SELECT 语句来代替(这一 SELECT 语句被包含在英文圆括号内),于是完整的嵌套查询 SQL 语句形如: + +```mysql +SELECT ... FROM (SELECT ... FROM ...) ...; +``` + +说明: +1. 目前仅支持一层嵌套,也即不能在子查询中再嵌入子查询。 +2. 内层查询的返回结果将作为“虚拟表”供外层查询使用,此虚拟表可以使用 AS 语法做重命名,以便于外层查询中方便引用。 +3. 目前不能在“连续查询”功能中使用子查询。 +4. 在内层和外层查询中,都支持普通的表间/超级表间 JOIN。内层查询的计算结果也可以再参与数据子表的 JOIN 操作。 +5. 目前内层查询、外层查询均不支持 UNION 操作。 +6. 内层查询支持的功能特性与非嵌套的查询语句能力是一致的。 + * 内层查询的 ORDER BY 子句一般没有意义,建议避免这样的写法以免无谓的资源消耗。 +7. 与非嵌套的查询语句相比,外层查询所能支持的功能特性存在如下限制: + * 计算函数部分: + 1. 如果内层查询的结果数据未提供时间戳,那么计算过程依赖时间戳的函数在外层会无法正常工作。例如:TOP, BOTTOM, FIRST, LAST, DIFF。 + 2. 计算过程需要两遍扫描的函数,在外层查询中无法正常工作。例如:此类函数包括:STDDEV, PERCENTILE。 + * 外层查询中不支持 IN 算子,但在内层中可以使用。 + * 外层查询不支持 GROUP BY。 + -### UNION ALL 操作符 +### UNION ALL 子句 ```mysql SELECT ... @@ -1037,7 +1092,7 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数 ```mysql SELECT LAST(field_name) FROM { tb_name | stb_name } [WHERE clause]; ``` - 功能说明:统计表/超级表中某列的值最后写入的非NULL值。 + 功能说明:统计表/超级表中某列的值最后写入的非 NULL 值。 返回结果数据类型:同应用的字段。 @@ -1047,9 +1102,11 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数 说明: - 1)如果要返回各个列的最后(时间戳最大)一个非NULL值,可以使用LAST(\*); + 1)如果要返回各个列的最后(时间戳最大)一个非 NULL 值,可以使用 LAST(\*); - 2)如果结果集中的某列全部为NULL值,则该列的返回结果也是NULL;如果结果集中所有列全部为NULL值,则不返回结果。 + 2)如果结果集中的某列全部为 NULL 值,则该列的返回结果也是 NULL;如果结果集中所有列全部为 NULL 值,则不返回结果。 + + 3)在用于超级表时,时间戳完全一样且同为最大的数据行可能有多个,那么会从中随机返回一条,而并不保证多次运行所挑选的数据行必然一致。 示例: ```mysql @@ -1198,7 +1255,9 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数 适用于:**表、超级表**。 - 限制:LAST_ROW()不能与INTERVAL一起使用。 + 限制:LAST_ROW() 不能与 INTERVAL 一起使用。 + + 说明:在用于超级表时,时间戳完全一样且同为最大的数据行可能有多个,那么会从中随机返回一条,而并不保证多次运行所挑选的数据行必然一致。 示例: ```mysql @@ -1227,9 +1286,13 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数 适用于:**表、超级表**。 - 说明:(从 2.0.15.0 版本开始新增此函数)INTERP 必须指定时间断面,如果该时间断面不存在直接对应的数据,那么会根据 FILL 参数的设定进行插值。此外,条件语句里面可附带筛选条件,例如标签、tbname。 + 说明:(从 2.0.15.0 版本开始新增此函数) - INTERP 查询要求查询的时间区间必须位于数据集合(表)的所有记录的时间范围之内。如果给定的时间戳位于时间范围之外,即使有插值指令,仍然不返回结果。 + 1)INTERP 必须指定时间断面,如果该时间断面不存在直接对应的数据,那么会根据 FILL 参数的设定进行插值。此外,条件语句里面可附带筛选条件,例如标签、tbname。 + + 2)INTERP 查询要求查询的时间区间必须位于数据集合(表)的所有记录的时间范围之内。如果给定的时间戳位于时间范围之外,即使有插值指令,仍然不返回结果。 + + 3)单个 INTERP 函数查询只能够针对一个时间点进行查询,如果需要返回等时间间隔的断面数据,可以通过 INTERP 配合 EVERY 的方式来进行查询处理(而不是使用 INTERVAL),其含义是每隔固定长度的时间进行插值。 示例: ```sql @@ -1253,6 +1316,18 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数 Query OK, 1 row(s) in set (0.003056s) ``` + 如下所示代码表示在时间区间 `['2017-7-14 18:40:00', '2017-7-14 18:40:00.014']` 中每隔 5 毫秒 进行一次断面计算。 + + ```sql + taos> SELECT INTERP(current) FROM d636 WHERE ts>='2017-7-14 18:40:00' AND ts<='2017-7-14 18:40:00.014' EVERY(5a); + ts | interp(current) | + ================================================= + 2017-07-14 18:40:00.000 | 10.04179 | + 2017-07-14 18:40:00.010 | 10.16123 | + Query OK, 2 row(s) in set (0.003487s) + + ``` + ### 计算函数 - **DIFF** @@ -1427,23 +1502,19 @@ SELECT AVG(current), MAX(current), LEASTSQUARES(current, start_val, step_val), P - 数据库名最大长度为 32。 - 表名最大长度为 192,每行数据最大长度 16k 个字符(注意:数据行内每个 BINARY/NCHAR 类型的列还会额外占用 2 个字节的存储位置)。 -- 列名最大长度为 64,最多允许 1024 列,最少需要 2 列,第一列必须是时间戳。 +- 列名最大长度为 64,最多允许 1024 列,最少需要 2 列,第一列必须是时间戳。(从 2.1.7.0 版本开始,改为最多允许 4096 列) - 标签名最大长度为 64,最多允许 128 个,可以 1 个,一个表中标签值的总长度不超过 16k 个字符。 - SQL 语句最大长度 65480 个字符,但可通过系统配置参数 maxSQLLength 修改,最长可配置为 1M。 -- SELECT 语句的查询结果,最多允许返回 1024 列(语句中的函数调用可能也会占用一些列空间),超限时需要显式指定较少的返回数据列,以避免语句执行报错。 +- SELECT 语句的查询结果,最多允许返回 1024 列(语句中的函数调用可能也会占用一些列空间),超限时需要显式指定较少的返回数据列,以避免语句执行报错。(从 2.1.7.0 版本开始,改为最多允许 4096 列) - 库的数目,超级表的数目、表的数目,系统不做限制,仅受系统资源限制。 -## TAOS SQL其他约定 +## TAOS SQL 其他约定 **GROUP BY的限制** -TAOS SQL支持对标签、TBNAME进行GROUP BY操作,也支持普通列进行GROUP BY,前提是:仅限一列且该列的唯一值小于10万个。 - -**JOIN操作的限制** - -TAOS SQL支持表之间按主键时间戳来join两张表的列,暂不支持两个表之间聚合后的四则运算。 +TAOS SQL 支持对标签、TBNAME 进行 GROUP BY 操作,也支持普通列进行 GROUP BY,前提是:仅限一列且该列的唯一值小于 10 万个。 -**IS NOT NULL与不为空的表达式适用范围** +**IS NOT NULL 与不为空的表达式适用范围** -IS NOT NULL支持所有类型的列。不为空的表达式为 <>"",仅对非数值类型的列适用。 +IS NOT NULL 支持所有类型的列。不为空的表达式为 <>"",仅对非数值类型的列适用。 diff --git a/documentation20/cn/13.faq/docs.md b/documentation20/cn/13.faq/docs.md index d89b2adeb8561586abb6201fc639415a8347254f..3d6f03b30353524d55a4a49ea69625a519fe3ebe 100644 --- a/documentation20/cn/13.faq/docs.md +++ b/documentation20/cn/13.faq/docs.md @@ -98,7 +98,7 @@ TDengine 目前尚不支持删除功能,未来根据用户需求可能会支 ## 10. 我怎么创建超过1024列的表? -使用2.0及其以上版本,默认支持1024列;2.0之前的版本,TDengine最大允许创建250列的表。但是如果确实超过限值,建议按照数据特性,逻辑地将这个宽表分解成几个小表。 +使用 2.0 及其以上版本,默认支持 1024 列;2.0 之前的版本,TDengine 最大允许创建 250 列的表。但是如果确实超过限值,建议按照数据特性,逻辑地将这个宽表分解成几个小表。(从 2.1.7.0 版本开始,表的最大列数增加到了 4096 列。) ## 11. 最有效的写入数据的方法是什么? diff --git a/documentation20/en/00.index/docs.md b/documentation20/en/00.index/docs.md index 0ac4a06ef49ab99f78d39c81cb301ff925445acf..258b2f718feb87a2fa8d92b17a403919ac2e8f56 100644 --- a/documentation20/en/00.index/docs.md +++ b/documentation20/en/00.index/docs.md @@ -1,6 +1,6 @@ # TDengine Documentation -TDengine is a highly efficient platform to store, query, and analyze time-series data. It is specially designed and optimized for IoT, Internet of Vehicles, Industrial IoT, IT Infrastructure and Application Monitoring, etc. It works like a relational database, such as MySQL, but you are strongly encouraged to read through the following documentation before you experience it, especially the Data Model and Data Modeling sections. In addition to this document, you should also download and read our technology white paper. For the older TDengine version 1.6 documentation, please click here. +TDengine is a highly efficient platform to store, query, and analyze time-series data. It is specially designed and optimized for IoT, Internet of Vehicles, Industrial IoT, IT Infrastructure and Application Monitoring, etc. It works like a relational database, such as MySQL, but you are strongly encouraged to read through the following documentation before you experience it, especially the Data Modeling sections. In addition to this document, you should also download and read the technology white paper. For the older TDengine version 1.6 documentation, please click [here](https://www.taosdata.com/en/documentation16/). ## [TDengine Introduction](/evaluation) @@ -10,28 +10,41 @@ TDengine is a highly efficient platform to store, query, and analyze time-series ## [Getting Started](/getting-started) -* [Quickly Install](/getting-started#install): install via source code/package / Docker within seconds - -- [Easy to Launch](/getting-started#start): start / stop TDengine with systemctl -- [Command-line](/getting-started#console) : an easy way to access TDengine server -- [Experience Lightning Speed](/getting-started#demo): running a demo, inserting/querying data to experience faster speed -- [List of Supported Platforms](/getting-started#platforms): a list of platforms supported by TDengine server and client -- [Deploy to Kubernetes](https://taosdata.github.io/TDengine-Operator/en/index.html):a detailed guide for TDengine deployment in Kubernetes environment +* [Quick Install](/getting-started#install): install via source code/package / Docker within seconds +* [Quick Launch](/getting-started#start): start / stop TDengine quickly with systemctl +* [Command-line](/getting-started#console) : an easy way to access TDengine server +* [Experience Lightning Speed](/getting-started#demo): running a demo, inserting/querying data to experience faster speed +* [List of Supported Platforms](/getting-started#platforms): a list of platforms supported by TDengine server and client +* [Deploy to Kubernetes](https://taosdata.github.io/TDengine-Operator/en/index.html):a detailed guide for TDengine deployment in Kubernetes environment ## [Overall Architecture](/architecture) -- [Data Model](/architecture#model): relational database model, but one table for one device with static tags -- [Cluster and Primary Logical Unit](/architecture#cluster): Take advantage of NoSQL, support scale-out and high-reliability -- [Storage Model and Data Partitioning/Sharding](/architecture#sharding): tag data will be separated from time-series data, segmented by vnode and time -- [Data Writing and Replication Process](/architecture#replication): records received are written to WAL, cached, with acknowledgement is sent back to client, while supporting multi-replicas +- [Data Model](/architecture#model): relational database model, but one table for one data collection point with static tags +- [Cluster and Primary Logical Unit](/architecture#cluster): Take advantage of NoSQL architecture, high availability and horizontal scalability +- [Storage Model and Data Partitioning/Sharding](/architecture#sharding): tag data is separated from time-series data, sharded by vnodes and partitioned by time +- [Data Writing and Replication Process](/architecture#replication): records received are written to WAL, cached, with acknowledgement sent back to client, while supporting data replications - [Caching and Persistence](/architecture#persistence): latest records are cached in memory, but are written in columnar format with an ultra-high compression ratio -- [Data Query](/architecture#query): support various functions, time-axis aggregation, interpolation, and multi-table aggregation +- [Data Query](/architecture#query): support various SQL functions, downsampling, interpolation, and multi-table aggregation ## [Data Modeling](/model) -- [Create a Database](/model#create-db): create a database for all data collection points with similar features +- [Create a Database](/model#create-db): create a database for all data collection points with similar data characteristics - [Create a Super Table(STable)](/model#create-stable): create a STable for all data collection points with the same type -- [Create a Table](/model#create-table): use STable as the template, to create a table for each data collecting point +- [Create a Table](/model#create-table): use STable as the template to create a table for each data collecting point + +## [Efficient Data Ingestion](/insert) + +- [Data Writing via SQL](/insert#sql): write one or multiple records into one or multiple tables via SQL insert command +- [Data Writing via Prometheus](/insert#prometheus): Configure Prometheus to write data directly without any code +- [Data Writing via Telegraf](/insert#telegraf): Configure Telegraf to write collected data directly without any code +- [Data Writing via EMQ X](/insert#emq): Configure EMQ X to write MQTT data directly without any code +- [Data Writing via HiveMQ Broker](/insert#hivemq): Configure HiveMQ to write MQTT data directly without any code + +## [Efficient Data Querying](/queries) + +- [Major Features](/queries#queries): support various standard query functions, setting filter conditions, and querying per time segment +- [Multi-table Aggregation](/queries#aggregation): use STable and set tag filter conditions to perform efficient aggregation +- [Downsampling](/queries#sampling): aggregate data in successive time windows, support interpolation ## [TAOS SQL](/taos-sql) @@ -40,27 +53,13 @@ TDengine is a highly efficient platform to store, query, and analyze time-series - [Table Management](/taos-sql#table): add, drop, check, alter tables - [STable Management](/taos-sql#super-table): add, drop, check, alter STables - [Tag Management](/taos-sql#tags): add, drop, alter tags -- [Inserting Records](/taos-sql#insert): support to write single/multiple items per table, multiple items across tables, and support to write historical data +- [Inserting Records](/taos-sql#insert): write single/multiple records a table, multiple records across tables, and historical data - [Data Query](/taos-sql#select): support time segment, value filtering, sorting, manual paging of query results, etc - [SQL Function](/taos-sql#functions): support various aggregation functions, selection functions, and calculation functions, such as avg, min, diff, etc -- [Time Dimensions Aggregation](/taos-sql#aggregation): aggregate and reduce the dimension after cutting table data by time segment +- [Cutting and Aggregation](/taos-sql#aggregation): aggregate and reduce the dimension after cutting table data by time segment - [Boundary Restrictions](/taos-sql#limitation): restrictions for the library, table, SQL, and others - [Error Code](/taos-sql/error-code): TDengine 2.0 error codes and corresponding decimal codes -## [Efficient Data Ingestion](/insert) - -- [SQL Ingestion](/insert#sql): write one or multiple records into one or multiple tables via SQL insert command -- [Prometheus Ingestion](/insert#prometheus): Configure Prometheus to write data directly without any code -- [Telegraf Ingestion](/insert#telegraf): Configure Telegraf to write collected data directly without any code -- [EMQ X Broker](/insert#emq): Configure EMQ X to write MQTT data directly without any code -- [HiveMQ Broker](/insert#hivemq): Configure HiveMQ to write MQTT data directly without any code - -## [Efficient Data Querying](/queries) - -- [Main Query Features](/queries#queries): support various standard functions, setting filter conditions, and querying per time segment -- [Multi-table Aggregation Query](/queries#aggregation): use STable and set tag filter conditions to perform efficient aggregation queries -- [Downsampling to Query Value](/queries#sampling): aggregate data in successive time windows, support interpolation - ## [Advanced Features](/advanced-features) - [Continuous Query](/advanced-features#continuous-query): Based on sliding windows, the data stream is automatically queried and calculated at regular intervals @@ -88,12 +87,12 @@ TDengine is a highly efficient platform to store, query, and analyze time-series ## [Installation and Management of TDengine Cluster](/cluster) -- [Preparation](/cluster#prepare): important considerations before deploying TDengine for production usage -- [Create Your First Node](/cluster#node-one): simple to follow the quick setup +- [Preparation](/cluster#prepare): important steps before deploying TDengine for production usage +- [Create the First Node](/cluster#node-one): just follow the steps in quick start - [Create Subsequent Nodes](/cluster#node-other): configure taos.cfg for new nodes to add more to the existing cluster - [Node Management](/cluster#management): add, delete, and check nodes in the cluster -- [High-availability of Vnode](/cluster#high-availability): implement high-availability of Vnode through multi-replicas -- [Mnode Management](/cluster#mnode): automatic system creation without any manual intervention +- [High-availability of Vnode](/cluster#high-availability): implement high-availability of Vnode through replicas +- [Mnode Management](/cluster#mnode): mnodes are created automatically without any manual intervention - [Load Balancing](/cluster#load-balancing): automatically performed once the number of nodes or load changes - [Offline Node Processing](/cluster#offline): any node that offline for more than a certain period will be removed from the cluster - [Arbitrator](/cluster#arbitrator): used in the case of an even number of replicas to prevent split-brain @@ -108,27 +107,14 @@ TDengine is a highly efficient platform to store, query, and analyze time-series - [Export Data](/administrator#export): export data either from TDengine shell or from the taosdump tool - [System Monitor](/administrator#status): monitor the system connections, queries, streaming calculation, logs, and events - [File Directory Structure](/administrator#directories): directories where TDengine data files and configuration files located -- [Parameter Restrictions and Reserved Keywords](/administrator#keywords): TDengine’s list of parameter restrictions and reserved keywords - -## TDengine Technical Design - -- [System Module]: taosd functions and modules partitioning -- [Data Replication]: support real-time synchronous/asynchronous replication, to ensure high-availability of the system -- [Technical Blog](https://www.taosdata.com/cn/blog/?categories=3): More technical analysis and architecture design articles - -## Common Tools - -- [TDengine sample import tools](https://www.taosdata.com/blog/2020/01/18/1166.html) -- [TDengine performance comparison test tools](https://www.taosdata.com/blog/2020/01/18/1166.html) -- [Use TDengine visually through IDEA Database Management Tool](https://www.taosdata.com/blog/2020/08/27/1767.html) +- [Parameter Limitss and Reserved Keywords](/administrator#keywords): TDengine’s list of parameter limits and reserved keywords ## Performance: TDengine vs Others -- [Performance: TDengine vs InfluxDB with InfluxDB’s open-source performance testing tool](https://www.taosdata.com/blog/2020/01/13/1105.html) -- [Performance: TDengine vs OpenTSDB](https://www.taosdata.com/blog/2019/08/21/621.html) -- [Performance: TDengine vs Cassandra](https://www.taosdata.com/blog/2019/08/14/573.html) -- [Performance: TDengine vs InfluxDB](https://www.taosdata.com/blog/2019/07/19/419.html) -- [Performance Test Reports of TDengine vs InfluxDB/OpenTSDB/Cassandra/MySQL/ClickHouse](https://www.taosdata.com/downloads/TDengine_Testing_Report_cn.pdf) +- [Performance: TDengine vs OpenTSDB](https://www.taosdata.com/blog/2019/09/12/710.html) +- [Performance: TDengine vs Cassandra](https://www.taosdata.com/blog/2019/09/12/708.html) +- [Performance: TDengine vs InfluxDB](https://www.taosdata.com/blog/2019/09/12/706.html) +- [Performance Test Reports of TDengine vs InfluxDB/OpenTSDB/Cassandra/MySQL/ClickHouse](https://www.taosdata.com/downloads/TDengine_Testing_Report_en.pdf) ## More on IoT Big Data @@ -136,7 +122,8 @@ TDengine is a highly efficient platform to store, query, and analyze time-series - [Features and Functions of IoT Big Data platforms](https://www.taosdata.com/blog/2019/07/29/542.html) - [Why don’t General Big Data Platforms Fit IoT Scenarios?](https://www.taosdata.com/blog/2019/07/09/why-does-the-general-big-data-platform-not-fit-iot-data-processing/) - [Why TDengine is the best choice for IoT, Internet of Vehicles, and Industry Internet Big Data platforms?](https://www.taosdata.com/blog/2019/07/09/why-tdengine-is-the-best-choice-for-iot-big-data-processing/) +- [Technical Blog](https://www.taosdata.com/cn/blog/?categories=3): More technical analysis and architecture design articles ## FAQ -- [FAQ: Common questions and answers](/faq) +- [FAQ: Common questions and answers](/faq) \ No newline at end of file diff --git a/documentation20/en/01.evaluation/docs.md b/documentation20/en/01.evaluation/docs.md index 250f465d7b1280a78e18250f95aefaeca0c95415..ecbde8c5776e3bd3735aed2bd64906f8bef1afc1 100644 --- a/documentation20/en/01.evaluation/docs.md +++ b/documentation20/en/01.evaluation/docs.md @@ -2,18 +2,18 @@ ## About TDengine -TDengine is an innovative Big Data processing product launched by Taos Data in the face of the fast-growing Internet of Things (IoT) Big Data market and technical challenges. It does not rely on any third-party software, nor does it optimize or package any open-source database or stream computing product. Instead, it is a product independently developed after absorbing the advantages of many traditional relational databases, NoSQL databases, stream computing engines, message queues, and other software. TDengine has its own unique Big Data processing advantages in time-series space. +TDengine is an innovative Big Data processing product launched by TAOS Data in the face of the fast-growing Internet of Things (IoT) Big Data market and technical challenges. It does not rely on any third-party software, nor does it optimize or package any open-source database or stream computing product. Instead, it is a product independently developed after absorbing the advantages of many traditional relational databases, NoSQL databases, stream computing engines, message queues, and other software. TDengine has its own unique Big Data processing advantages in time-series space. One of the modules of TDengine is the time-series database. However, in addition to this, to reduce the complexity of research and development and the difficulty of system operation, TDengine also provides functions such as caching, message queuing, subscription, stream computing, etc. TDengine provides a full-stack technical solution for the processing of IoT and Industrial Internet BigData. It is an efficient and easy-to-use IoT Big Data platform. Compared with typical Big Data platforms such as Hadoop, TDengine has the following distinct characteristics: - **Performance improvement over 10 times**: An innovative data storage structure is defined, with each single core can process at least 20,000 requests per second, insert millions of data points, and read more than 10 million data points, which is more than 10 times faster than other existing general database. - **Reduce the cost of hardware or cloud services to 1/5**: Due to its ultra-performance, TDengine’s computing resources consumption is less than 1/5 of other common Big Data solutions; through columnar storage and advanced compression algorithms, the storage consumption is less than 1/10 of other general databases. - **Full-stack time-series data processing engine**: Integrate database, message queue, cache, stream computing, and other functions, and the applications do not need to integrate with software such as Kafka/Redis/HBase/Spark/HDFS, thus greatly reducing the complexity cost of application development and maintenance. -- **Powerful analysis functions**: Data from ten years ago or one second ago, can all be queried based on a specified time range. Data can be aggregated on a timeline or multiple devices. Ad-hoc queries can be made at any time through Shell, Python, R, and MATLAB. -- **Seamless connection with third-party tools**: Integration with Telegraf, Grafana, EMQ, HiveMQ, Prometheus, MATLAB, R, etc. without even one single line of code. OPC, Hadoop, Spark, etc. will be supported in the future, and more BI tools will be seamlessly connected to. +- **Highly Available and Horizontal Scalable **: With the distributed architecture and consistency algorithm, via multi-replication and clustering features, TDengine ensures high availability and horizontal scalability to support the mission-critical applications. - **Zero operation cost & zero learning cost**: Installing clusters is simple and quick, with real-time backup built-in, and no need to split libraries or tables. Similar to standard SQL, TDengine can support RESTful, Python/Java/C/C++/C#/Go/Node.js, and similar to MySQL with zero learning cost. +- **Core is Open Sourced:** Except some auxiliary features, the core of TDengine is open sourced. Enterprise won't be locked by the database anymore. Ecosystem is more strong, product is more stable, and developer communities are more active. -With TDengine, the total cost of ownership of typical IoT, Internet of Vehicles, and Industrial Internet Big Data platforms can be greatly reduced. However, it should be pointed out that due to making full use of the characteristics of IoT time-series data, TDengine cannot be used to process general data from web crawlers, microblogs, WeChat, e-commerce, ERP, CRM, and other sources. +With TDengine, the total cost of ownership of typical IoT, Internet of Vehicles, and Industrial Internet Big Data platforms can be greatly reduced. However, since it makes full use of the characteristics of IoT time-series data, TDengine cannot be used to process general data from web crawlers, microblogs, WeChat, e-commerce, ERP, CRM, and other sources. ![TDengine Technology Ecosystem](page://images/eco_system.png) @@ -62,4 +62,4 @@ From the perspective of data sources, designers can analyze the applicability of | ------------------------------------------------- | ------------------ | ----------------------- | ------------------- | ------------------------------------------------------------ | | Require system with high-reliability | | | √ | TDengine has a very robust and reliable system architecture to implement simple and convenient daily operation with streamlined experiences for operators, thus human errors and accidents are eliminated to the greatest extent. | | Require controllable operation learning cost | | | √ | As above. | -| Require abundant talent supply | √ | | | As a new-generation product, it’s still difficult to find talents with TDengine experiences from market. However, the learning cost is low. As the vendor, we also provide extensive operation training and counselling services. | +| Require abundant talent supply | √ | | | As a new-generation product, it’s still difficult to find talents with TDengine experiences from market. However, the learning cost is low. As the vendor, we also provide extensive operation training and counselling services. | \ No newline at end of file diff --git a/documentation20/en/02.getting-started/docs.md b/documentation20/en/02.getting-started/docs.md index 6941e44cf54fc03f40685ca85d593e54b475b1a4..50a8c2fabb8c93a847a79a4de47c218de7ccd60a 100644 --- a/documentation20/en/02.getting-started/docs.md +++ b/documentation20/en/02.getting-started/docs.md @@ -2,7 +2,7 @@ ## Quick Install -TDengine software consists of 3 components: server, client, and alarm module. At the moment, TDengine server only runs on Linux (Windows, mac OS and more OS supports will come soon), but client can run on either Windows or Linux. TDengine client can be installed and run on Windows or Linux. Applications based-on any OSes can all connect to server taosd via a RESTful interface. About CPU, TDengine supports X64/ARM64/MIPS64/Alpha64, and ARM32、RISC-V, other more CPU architectures will be supported soon. You can set up and install TDengine server either from the [source code](https://www.taosdata.com/en/getting-started/#Install-from-Source) or the [packages](https://www.taosdata.com/en/getting-started/#Install-from-Package). +TDengine software consists of 3 parts: server, client, and alarm module. At the moment, TDengine server only runs on Linux (Windows, mac OS and more OS supports will come soon), but client can run on either Windows or Linux. TDengine client can be installed and run on Windows or Linux. Applications based-on any OSes can all connect to server taosd via a RESTful interface. About CPU, TDengine supports X64/ARM64/MIPS64/Alpha64, and ARM32、RISC-V, other more CPU architectures will be supported soon. You can set up and install TDengine server either from the [source code](https://www.taosdata.com/en/getting-started/#Install-from-Source) or the [packages](https://www.taosdata.com/en/getting-started/#Install-from-Package). ### Install from Source @@ -16,7 +16,7 @@ Please refer to the detailed operation in [Quickly experience TDengine through D ### Install from Package -It’s extremely easy to install for TDengine, which takes only a few seconds from downloaded to successful installed. The server installation package includes clients and connectors. We provide 3 installation packages, which you can choose according to actual needs: +Three different packages for TDengine server are provided, please pick up the one you like. (Lite packages only have execution files and connector of C/C++, but standard packages support connectors of nearly all programming languages.) Beta version has more features, but we suggest you to install stable version for production or testing. Click [here](https://www.taosdata.com/en/getting-started/#Install-from-Package) to download the install package. @@ -131,7 +131,7 @@ After starting the TDengine server, you can execute the command `taosdemo` in th $ taosdemo ``` -Using this command, a STable named `meters` will be created in the database `test` There are 10k tables under this stable, named from `t0` to `t9999`. In each table there are 100k rows of records, each row with columns (`f1`, `f2` and `f3`. The timestamp is from "2017-07-14 10:40:00 000" to "2017-07-14 10:41:39 999". Each table also has tags `areaid` and `loc`: `areaid` is set from 1 to 10, `loc` is set to "beijing" or "shanghai". +Using this command, a STable named `meters` will be created in the database `test`. There are 10k tables under this STable, named from `t0` to `t9999`. In each table there are 100k rows of records, each row with columns (`f1`, `f2` and `f3`. The timestamp is from "2017-07-14 10:40:00 000" to "2017-07-14 10:41:39 999". Each table also has tags `areaid` and `loc`: `areaid` is set from 1 to 10, `loc` is set to "beijing" or "shanghai". It takes about 10 minutes to execute this command. Once finished, 1 billion rows of records will be inserted. @@ -201,7 +201,7 @@ Note: ● has been verified by official tests; ○ has been verified by unoffici List of platforms supported by TDengine client and connectors -At the moment, TDengine connectors can support a wide range of platforms, including hardware platforms such as X64/X86/ARM64/ARM32/MIPS/Alpha, and development environments such as Linux/Win64/Win32. +At the moment, TDengine connectors can support a wide range of platforms, including hardware platforms such as X64/X86/ARM64/ARM32/MIPS/Alpha, and operating system such as Linux/Win64/Win32. Comparison matrix as following: @@ -218,4 +218,4 @@ Comparison matrix as following: Note: ● has been verified by official tests; ○ has been verified by unofficial tests. -Please visit [Connectors](https://www.taosdata.com/en/documentation/connector) section for more detailed information. +Please visit Connectors section for more detailed information. \ No newline at end of file diff --git a/documentation20/en/03.architecture/docs.md b/documentation20/en/03.architecture/docs.md index ac6c94fe40f0283d31ee85d8f1c3c09c82605e2a..e0a324acad37d3c9e23d73a1f5762651b32e924d 100644 --- a/documentation20/en/03.architecture/docs.md +++ b/documentation20/en/03.architecture/docs.md @@ -4,7 +4,7 @@ ### A Typical IoT Scenario -In typical IoT, Internet of Vehicles and Operation Monitoring scenarios, there are often many different types of data collecting devices that collect one or more different physical metrics. However, for the collection devices of the same type, there are often many specific collection devices distributed in places. BigData processing system aims to collect all kinds of data, and then calculate and analyze them. For the same kind of devices, the data collected are very regular. Taking smart meters as an example, assuming that each smart meter collects three metrics of current, voltage and phase, the collected data are similar to the following table: +In typical industry IoT, Internet of Vehicles and Operation Monitoring scenarios, there are often many different types of data collecting devices that collect one or more different physical metrics. However, for the data collection devices of the same type, there are often many specific collection devices distributed in places. Big Data processing system aims to collect all kinds of data, then store and analyze them. For the same kind of devices, the data collected are very structured. Taking smart meters as an example, assuming that each smart meter collects three metrics of current, voltage and phase, the collected data are similar to the following table:
@@ -13,7 +13,6 @@ In typical IoT, Internet of Vehicles and Operation Monitoring scenarios, there a - @@ -110,46 +109,46 @@ As the data points are a series of data points over time, the data points genera 1. Metrics are always structured data; 2. There are rarely delete/update operations on collected data; -3. No need for transactions of traditional databases -4. The ratio of reading is lower but write is higher than typical Internet applications; -5. data flow is uniform and can be predicted according to the number of devices and collection frequency; -6. the user pays attention to the trend of data, not a specific value at a specific time; -7. there is always a data retention policy; -8. the data query is always executed in a given time range and a subset of space; -9. in addition to storage and query operations, various statistical and real-time calculation operations are also required; -10. data volume is huge, a system may generate over 10 billion data points in a day. +3. Unlike traditional databases, transaction processing is not required; +4. The ratio of writing over reading is much higher than typical Internet applications; +5. Data volume is stable and can be predicted according to the number of devices and sampling rate; +6. The user pays attention to the trend of data, not a specific value at a specific time; +7. There is always a data retention policy; +8. The data query is always executed in a given time range and a subset of space; +9. In addition to storage and query operations, various statistical and real-time computing are also required; +10. Data volume is huge, a system may generate over 10 billion data points in a day. -In light of the characteristics mentioned above, TDengine designs the storage and computing engine in a special and optimized way for time-series data, resulting in massive improvements in system efficiency. +By utilizing the above characteristics, TDengine designs the storage and computing engine in a special and optimized way for time-series data, resulting in massive improvements in system efficiency. ### Relational Database Model -Since time-series data is most likely to be structured data, TDengine adopts the traditional relational database model to process them with a shallow learning curve. You need to create a database, create tables with schema definitions, then insert data points and execute queries to explore the data. Standard SQL is used, instead of NoSQL’s key-value storage. +Since time-series data is most likely to be structured data, TDengine adopts the traditional relational database model to process them with a short learning curve. You need to create a database, create tables with schema definitions, then insert data points and execute queries to explore the data. SQL like syntax is used, instead of NoSQL’s key-value storage. -### One Table for One Collection Point +### One Table for One Data Collection Point -To utilize this time-series and other data features, TDengine requires the user to create a table for each collection point to store collected time-series data. For example, if there are over 10 millions smart meters, means 10 millions tables shall be created. For the table above, 4 tables shall be created for devices D1001, D1002, D1003, and D1004 to store the data collected. This design has several advantages: +To utilize this time-series and other data features, TDengine requires the user to create a table for each data collection point to store collected time-series data. For example, if there are over 10 million smart meters, it means 10 million tables shall be created. For the table above, 4 tables shall be created for devices D1001, D1002, D1003, and D1004 to store the data collected. This design has several advantages: -1. Guarantee that all data from a collection point can be saved in a continuous memory/hard disk space block by block. If queries are applied only on one point in a time range, this design will reduce the random read latency significantly, thus increase read and query speed by orders of magnitude. -2. Since the data generation process of each collection device is completely independent, means each device has its unique data source, thus writes can be carried out in a lock-free manner to greatly improve the speed. +1. Guarantee that all data from a data collection point can be saved in a continuous memory/hard disk space block by block. If queries are applied only on one data collection point in a time range, this design will reduce the random read latency significantly, thus increase read and query speed by orders of magnitude. +2. Since the data generation process of each data collection device is completely independent, and each data collection point has its unique data source, thus writes can be carried out in a lock-free manner to greatly improve the performance. 3. Write latency can be significantly reduced too as the data points generated by the same device will arrive in time order, the new data point will be simply appended to a block. -If the data of multiple devices are written into a table in the traditional way, due to the uncontrollable network delay, the timing of the data from different devices arriving at the server cannot be guaranteed, the writing operation must be protected by locks, and the data of one device cannot be guaranteed to continuously stored together. **The method of one table for each data collection point can ensure the optimal performance of insertion and query of a single data collection point to the greatest extent.** +If the data of multiple devices are traditionally written into a table, due to the uncontrollable network delay, the timing of the data from different devices arriving at the server cannot be guaranteed, the writing operation must be protected by locks, and the data of one device cannot be guaranteed to be continuously stored together. **One table for each data collection point can ensure the optimal performance of insert and query of a single data collection point to the greatest extent.** -TDengine suggests using collection point ID as the table name (like D1001 in the above table). Each point may collect one or more metrics (like the current, voltage, phase as above). Each metric has a column in the table. The data type for a column can be int, float, string and others. In addition, the first column in the table must be a timestamp. TDengine uses the time stamp as the index, and won’t build the index on any metrics stored. All data will be stored in columns. +TDengine suggests using data collection point ID as the table name (like D1001 in the above table). Each point may collect one or more metrics (like the current, voltage, phase as above). Each metric has a column in the table. The data type for a column can be int, float, string and others. In addition, the first column in the table must be a timestamp. TDengine uses the time stamp as the index, and won’t build the index on any metrics stored. All data will be stored in columns. ### STable: A Collection of Data Points in the Same Type -The method of one table for each point will bring a greatly increasing number of tables, which is difficult to manage. Moreover, applications often need to take aggregation operations between collection points, thus aggregation operations will become complicated. To support aggregation over multiple tables efficiently, the STable (Super Table) concept is introduced by TDengine. +The design of one table for each data collection point will require a huge number of tables, which is difficult to manage. Moreover, applications often need to take aggregation operations between data collection points, thus aggregation operations will become complicated. To support aggregation over multiple tables efficiently, the [STable(Super Table)](https://www.taosdata.com/en/documentation/super-table) concept is introduced by TDengine. -STable is an abstract collection for a type of data point. A STable contains a set of points (tables) that have the same schema or data structure, but with different static attributes (tags). To describe a STable (a combination of data collection points of a specific type), in addition to defining the table structure of the collected metrics, it is also necessary to define the schema of its tag. The data type of tags can be int, float, string, and there can be multiple tags, which can be added, deleted, or modified afterward. If the whole system has N different types of data collection points, N STables need to be established. +STable is an abstract set for a type of data collection point. A STable contains a set of data collection points (tables) that have the same schema or data structure, but with different static attributes (tags). To describe a STable (a set of data collection points of a specific type), in addition to defining the table structure of the collected metrics, it is also necessary to define the schema of its tags. The data type of tags can be int, float, string, and there can be multiple tags, which can be added, deleted, or modified afterward. If the whole system has N different types of data collection points, N STables need to be established. In the design of TDengine, **a table is used to represent a specific data collection point, and STable is used to represent a set of data collection points of the same type**. When creating a table for a specific data collection point, the user uses the definition of STable as a template and specifies the tag value of the specific collection point (table). Compared with the traditional relational database, the table (a data collection point) has static tags, and these tags can be added, deleted, and modified afterward. **A STable contains multiple tables with the same time-series data schema but different tag values.** -When aggregating multiple data collection points with the same data type, TDEngine will first find out the tables that meet the tag filters from the STables, and then scan the time-series data of these tables to perform aggregation operation, which can greatly reduce the data sets to be scanned, thus greatly improving the performance of aggregation calculation. +When aggregating multiple data collection points with the same data type, TDengine will first find out the tables that meet the tag filter conditions from the STables, then scan the time-series data of these tables to perform aggregation operation, which can greatly reduce the data sets to be scanned, thus greatly improving the performance of data aggregation. ## Cluster and Primary Logic Unit -The design of TDengine is based on the assumption that one single hardware or software system is unreliable and that no single computer can provide sufficient computing and storage resources to process massive data. Therefore, TDengine has been designed according to a distributed and high-reliability architecture since Day One of R&D, which supports scale-out, so that hardware failure or software failure of any single or multiple servers will not affect the availability and reliability of the system. At the same time, through node virtualization and automatic load-balancing technology, TDengine can make the most efficient use of computing and storage resources in heterogeneous clusters to reduce hardware investment. +The design of TDengine is based on the assumption that one single node or software system is unreliable and that no single node can provide sufficient computing and storage resources to process massive data. Therefore, TDengine has been designed in a distributed and high-reliability architecture since day one of the development, so that hardware failure or software failure of any single or multiple servers will not affect the availability and reliability of the system. At the same time, through node virtualization and automatic load-balancing technology, TDengine can make the most efficient use of computing and storage resources in heterogeneous clusters to reduce hardware investment. ### Primary Logic Unit @@ -160,106 +159,110 @@ Logical structure diagram of TDengine distributed architecture as following: -A complete TDengine system runs on one or more physical nodes. Logically, it includes data node (dnode), TDEngine application driver (taosc) and application (app). There are one or more data nodes in the system, which form a cluster. The application interacts with the TDengine cluster through taosc's API. The following is a brief introduction to each logical unit. +A complete TDengine system runs on one or more physical nodes. Logically, it includes data node (dnode), TDEngine application driver (TAOSC) and application (app). There are one or more data nodes in the system, which form a cluster. The application interacts with the TDengine cluster through TAOSC's API. The following is a brief introduction to each logical unit. -**Physical node (pnode)**: A pnode is a computer that runs independently and has its own computing, storage and network capabilities. It can be a physical machine, virtual machine or Docker container installed with OS. The physical node is identified by its configured FQDN (Fully Qualified Domain Name). TDengine relies entirely on FQDN for network communication. If you don't know about FQDN, please read the blog post "[All about FQDN of TDengine](https://www.taosdata.com/blog/2020/09/11/1824.html)". +**Physical node (pnode)**: A pnode is a computer that runs independently and has its own computing, storage and network capabilities. It can be a physical machine, virtual machine, or Docker container installed with OS. The physical node is identified by its configured FQDN (Fully Qualified Domain Name). TDengine relies entirely on FQDN for network communication. If you don't know about FQDN, please read the blog post "[All about FQDN of TDengine](https://www.taosdata.com/blog/2020/09/11/1824.html)". **Data node (dnode):** A dnode is a running instance of the TDengine server-side execution code taosd on a physical node. A working system must have at least one data node. A dnode contains zero to multiple logical virtual nodes (VNODE), zero or at most one logical management node (mnode). The unique identification of a dnode in the system is determined by the instance's End Point (EP). EP is a combination of FQDN (Fully Qualified Domain Name) of the physical node where the dnode is located and the network port number (Port) configured by the system. By configuring different ports, a physical node (a physical machine, virtual machine or container) can run multiple instances or have multiple data nodes. -**Virtual node (vnode)**: In order to better support data sharding, load balancing and prevent data from overheating or skewing, data nodes are virtualized into multiple virtual nodes (vnode, V2, V3, V4, etc. in the figure). Each vnode is a relatively independent work unit, which is the basic unit of time-series data storage, and has independent running threads, memory space and persistent storage path. A vnode contains a certain number of tables (data collection points). When a new table is created, the system checks whether a new vnode needs to be created. The number of vnodes that can be created on a data node depends on the hardware capacities of the physical node where the data node is located. A vnode belongs to only one DB, but a DB can have multiple vnodes. In addition to the stored time-series data, a vnode also stores the schema and tag values of the included tables. A virtual node is uniquely identified in the system by the EP of the data node and the VGroup ID to which it belongs, and is created and managed by the management node. +**Virtual node (vnode)**: To better support data sharding, load balancing and prevent data from overheating or skewing, data nodes are virtualized into multiple virtual nodes (vnode, V2, V3, V4, etc. in the figure). Each vnode is a relatively independent work unit, which is the basic unit of time-series data storage and has independent running threads, memory space and persistent storage path. A vnode contains a certain number of tables (data collection points). When a new table is created, the system checks whether a new vnode needs to be created. The number of vnodes that can be created on a data node depends on the hardware capacities of the physical node where the data node is located. A vnode belongs to only one DB, but a DB can have multiple vnodes. In addition to the stored time-series data, a vnode also stores the schema and tag values of the included tables. A virtual node is uniquely identified in the system by the EP of the data node and the VGroup ID to which it belongs and is created and managed by the management node. -**Management node (mnode)**: A virtual logical unit responsible for monitoring and maintaining the running status of all data nodes and load balancing among nodes (M in figure). At the same time, the management node is also responsible for the storage and management of metadata (including users, databases, tables, static tags, etc.), so it is also called Meta Node. Multiple (up to 5) mnodes can be configured in a TDengine cluster, and they are automatically constructed into a virtual management node group (M0, M1, M2 in the figure). The master/slave mechanism is used to manage between mnodes, and the data synchronization is carried out in a strong consistent way. Any data update operation can only be done on the master. The creation of mnode cluster is completed automatically by the system without manual intervention. There is at most one mnode on each dnode, which is uniquely identified by the EP of the data node to which it belongs. Each dnode automatically obtains the EP of the dnode where all mnodes in the whole cluster are located through internal messaging interaction. +**Management node (mnode)**: A virtual logical unit responsible for monitoring and maintaining the running status of all data nodes and load balancing among nodes (M in the figure). At the same time, the management node is also responsible for the storage and management of metadata (including users, databases, tables, static tags, etc.), so it is also called Meta Node. Multiple (up to 5) mnodes can be configured in a TDengine cluster, and they are automatically constructed into a virtual management node group (M0, M1, M2 in the figure). The master/slave mechanism is adopted for the mnode group and the data synchronization is carried out in a strongly consistent way. Any data update operation can only be executed on the master. The creation of mnode cluster is completed automatically by the system without manual intervention. There is at most one mnode on each dnode, which is uniquely identified by the EP of the data node to which it belongs. Each dnode automatically obtains the EP of the dnode where all mnodes in the whole cluster are located through internal messaging interaction. -**Virtual node group (VGroup)**: Vnodes on different data nodes can form a virtual node group to ensure the high reliability of the system. The virtual node group is managed in a master/slave structure. Write operations can only be performed on the master vnode, and the system synchronizes data to the slave vnode via replication, thus ensuring that one single replica of data is copied on multiple physical nodes. The number of virtual nodes in a vgroup equals the number of data replicas. If the number of replicas of a DB is N, the system must have at least N data nodes. The number of replicas can be specified by the parameter replica when creating DB, and the default is 1. Using the multi-replica feature of TDengine, the same high data reliability can be done without the need for expensive storage devices such as disk arrays. Virtual node group is created and managed by management node, and the management node assigns a system unique ID, aka VGroup ID. If two virtual nodes has the same vnode group ID, means that they belong to the same group and the data is backed up to each other. The number of virtual nodes in a virtual node group can be dynamically changed, allowing only one, that is, no data replication. VGroup ID is never changed. Even if a virtual node group is deleted, its ID will not be reused. +**Virtual node group (VGroup)**: Vnodes on different data nodes can form a virtual node group to ensure the high availability of the system. The virtual node group is managed in a master/slave mechanism. Write operations can only be performed on the master vnode, and then replicated to slave vnodes, thus ensuring that one single replica of data is copied on multiple physical nodes. The number of virtual nodes in a vgroup equals the number of data replicas. If the number of replicas of a DB is N, the system must have at least N data nodes. The number of replicas can be specified by the parameter `replica` when creating DB, and the default is 1. Using the multi-replication feature of TDengine, the same high data reliability can be achieved without the need for expensive storage devices such as disk arrays. Virtual node group is created and managed by the management node, and the management node assigns a system unique ID, aka VGroup ID. If two virtual nodes have the same vnode group ID, means that they belong to the same group and the data is backed up to each other. The number of virtual nodes in a virtual node group can be dynamically changed, allowing only one, that is, no data replication. VGroup ID is never changed. Even if a virtual node group is deleted, its ID will not be reused. -**TAOSC**: TAOSC is the driver provided by TDengine to applications, which is responsible for dealing with the interface interaction between application and cluster, and provides the native interface of C/C++ language, which is embedded in JDBC, C #, Python, Go, Node.js language connection libraries. Applications interact with the whole cluster through taosc instead of directly connecting to data nodes in the cluster. This module is responsible for obtaining and caching metadata; forwarding requests for insertion, query, etc. to the correct data node; when returning the results to the application, taosc also need to be responsible for the final level of aggregation, sorting, filtering and other operations. For JDBC, C/C++/C #/Python/Go/Node.js interfaces, this module runs on the physical node where the application is located. At the same time, in order to support the fully distributed RESTful interface, taosc has a running instance on each dnode of TDengine cluster. +**TAOSC**: TAOSC is the driver provided by TDengine to applications, which is responsible for dealing with the interaction between application and cluster, and provides the native interface of C/C++ language, which is embedded in JDBC, C #, Python, Go, Node.js language connection libraries. Applications interact with the whole cluster through TAOSC instead of directly connecting to data nodes in the cluster. This module is responsible for obtaining and caching metadata; forwarding requests for insertion, query, etc. to the correct data node; when returning the results to the application, TAOSC also needs to be responsible for the final level of aggregation, sorting, filtering and other operations. For JDBC, C/C++/C #/Python/Go/Node.js interfaces, this module runs on the physical node where the application is located. At the same time, in order to support the fully distributed RESTful interface, TAOSC has a running instance on each dnode of TDengine cluster. ### Node Communication -**Communication mode**: The communication among each data node of TDengine system, and among application driver and each data node is carried out through TCP/UDP. Considering an IoT scenario, the data writing packets are generally not large, so TDengine uses UDP in addition to TCP for transmission, because UDP is more efficient and is not limited by the number of connections. TDengine implements its own timeout, retransmission, confirmation and other mechanisms to ensure reliable transmission of UDP. For packets with a data volume of less than 15K, UDP is adopted for transmission, and TCP is automatically adopted for transmission of packets with a data volume of more than 15K or query operations. At the same time, TDengine will automatically compress/decompress the data, digital sign/authenticate the data according to the configuration and data packet. For data replication among data nodes, only TCP is used for data transmission. +**Communication mode**: The communication among each data node of TDengine system, and among the application driver and each data node is carried out through TCP/UDP. Considering an IoT scenario, the data writing packets are generally not large, so TDengine uses UDP in addition to TCP for transmission, because UDP is more efficient and is not limited by the number of connections. TDengine implements its own timeout, retransmission, confirmation and other mechanisms to ensure reliable transmission of UDP. For packets with a data volume of less than 15K, UDP is adopted for transmission, and TCP is automatically adopted for transmission of packets with a data volume of more than 15K or query operations. At the same time, TDengine will automatically compress/decompress the data, digital sign/authenticate the data according to the configuration and data packet. For data replication among data nodes, only TCP is used for data transportation. + +**FQDN configuration:** A data node has one or more FQDNs, which can be specified in the system configuration file taos.cfg with the parameter "fqdn". If it is not specified, the system will automatically use the hostname of the computer as its FQDN. If the node is not configured with FQDN, you can directly set the configuration parameter fqdn of the node to its IP address. However, IP is not recommended because IP address may be changed, and once it changes, the cluster will not work properly. The EP (End Point) of a data node consists of FQDN + Port. With FQDN, it is necessary to ensure the DNS service is running, or hosts files on nodes are configured properly. -**FQDN configuration:** A data node has one or more FQDNs, which can be specified in the system configuration file taos.cfg with the parameter "fqdn". If it is not specified, the system will automatically use the hostname of the computer as its FQDN. If the node is not configured with FQDN, you can directly set the configuration parameter fqdn of the node to its IP address. However, IP is not recommended because IP address is variable, and once it changes, the cluster will not work properly. The EP (End Point) of a data node consists of FQDN + Port. With FQDN, it is necessary to ensure the normal operation of DNS service, or configure hosts files on nodes and the nodes where applications are located. +**Port configuration**: The external port of a data node is determined by the system configuration parameter serverPort in TDengine, and the port for internal communication of cluster is serverPort+5. The data replication operation among data nodes in the cluster also occupies a TCP port, which is serverPort+10. In order to support multithreading and efficient processing of UDP data, each internal and external UDP connection needs to occupy 5 consecutive ports. Therefore, the total port range of a data node will be serverPort to serverPort + 10, for a total of 11 TCP/UDP ports. To run the system, make sure that the firewall keeps these ports open. Each data node can be configured with a different serverPort. -**Port configuration**: The external port of a data node is determined by the system configuration parameter serverPort in TDengine, and the port for internal communication of cluster is serverPort+5. The data replication operation among data nodes in the cluster also occupies a TCP port, which is serverPort+10. In order to support multithreading and efficient processing of UDP data, each internal and external UDP connection needs to occupy 5 consecutive ports. Therefore, the total port range of a data node will be serverPort to serverPort + 10, for a total of 11 TCP/UDP ports. When using, make sure that the firewall keeps these ports open. Each data node can be configured with a different serverPort. +**Cluster external connection**: TDengine cluster can accommodate one single, multiple or even thousands of data nodes. The application only needs to initiate a connection to any data node in the cluster. The network parameter required for connection is the End Point (FQDN plus configured port number) of a data node. When starting the application taos through CLI, the FQDN of the data node can be specified through the option `-h`, and the configured port number can be specified through `-p`. If the port is not configured, the system configuration parameter serverPort of TDengine will be adopted. -**Cluster external connection**: TDengine cluster can accommodate one single, multiple or even thousands of data nodes. The application only needs to initiate a connection to any data node in the cluster. The network parameter required for connection is the End Point (FQDN plus configured port number) of a data node. When starting the application taos through CLI, the FQDN of the data node can be specified through the option-h, and the configured port number can be specified through -p. If the port is not configured, the system configuration parameter serverPort of TDengine will be adopted. +**Inter-cluster communication**: Data nodes connect with each other through TCP/UDP. When a data node starts, it will obtain the EP information of the dnode where the mnode is located, and then establish a connection with the mnode in the system to exchange information. There are three steps to obtain EP information of the mnode: -**Inter-cluster communication**: Data nodes connect with each other through TCP/UDP. When a data node starts, it will obtain the EP information of the dnode where the mnode is located, and then establish a connection with the mnode in the system to exchange information. There are three steps to obtain EP information of the mnode: 1. Check whether the mnodeEpList file exists, if it does not exist or cannot be opened normally to obtain EP information of the mnode, skip to the second step; 2: Check the system configuration file taos.cfg to obtain node configuration parameters firstEp and secondEp (the node specified by these two parameters can be a normal node without mnode, in this case, the node will try to redirect to the mnode node when connected). If these two configuration parameters do not exist or do not exist in taos.cfg, or are invalid, skip to the third step; 3: Set your own EP as a mnode EP and run it independently. After obtaining the mnode EP list, the data node initiates the connection. It will successfully join the working cluster after connected. If not successful, it will try the next item in the mnode EP list. If all attempts are made, but the connection still fails, sleep for a few seconds before trying again. +1. Check whether the mnodeEpList file exists, if it does not exist or cannot be opened normally to obtain EP information of the mnode, skip to the second step; +2. Check the system configuration file taos.cfg to obtain node configuration parameters firstEp and secondEp (the node specified by these two parameters can be a normal node without mnode, in this case, the node will try to redirect to the mnode node when connected). If these two configuration parameters do not exist or do not exist in taos.cfg, or are invalid, skip to the third step; +3. Set your own EP as a mnode EP and run it independently. After obtaining the mnode EP list, the data node initiates the connection. It will successfully join the working cluster after connection. If not successful, it will try the next item in the mnode EP list. If all attempts are made, but the connection still fails, sleep for a few seconds before trying again. -**The choice of MNODE**: TDengine logically has a management node, but there is no separated execution code. The server side only has a set of execution code taosd. So which data node will be the management node? This is determined automatically by the system without any manual intervention. The principle is as follows: when a data node starts, it will check its End Point and compare it with the obtained mnode EP List. If its EP exists in it, the data node shall start the mnode module and become a mnode. If your own EP is not in the mnode EP List, the mnode module will not start. During the system operation, due to load balancing, downtime and other reasons, mnode may migrate to the new dnode, while totally transparent without manual intervention. The modification of configuration parameters is the decision made by mnode itself according to resources usage. +**The choice of MNODE**: TDengine logically has a management node, but there is no separated execution code. The server-side only has a set of execution code taosd. So which data node will be the management node? This is determined automatically by the system without any manual intervention. The principle is as follows: when a data node starts, it will check its End Point and compare it with the obtained mnode EP List. If its EP exists in it, the data node shall start the mnode module and become a mnode. If your own EP is not in the mnode EP List, the mnode module will not start. During the system operation, due to load balancing, downtime and other reasons, mnode may migrate to the new dnode, while totally transparent without manual intervention. The modification of configuration parameters is the decision made by mnode itself according to resources usage. **Add new data nodes:** After the system has a data node, it has become a working system. There are two steps to add a new node into the cluster. Step1: Connect to the existing working data node using TDengine CLI, and then add the End Point of the new data node with the command "create dnode"; Step 2: In the system configuration parameter file taos.cfg of the new data node, set the firstEp and secondEp parameters to the EP of any two data nodes in the existing cluster. Please refer to the detailed user tutorial for detailed steps. In this way, the cluster will be established step by step. -**Redirection**: No matter about dnode or taosc, the connection to the mnode shall be initiated first, but the mnode is automatically created and maintained by the system, so user does not know which dnode is running the mnode. TDengine only requires a connection to any working dnode in the system. Because any running dnode maintains the currently running mnode EP List, when receiving a connecting request from the newly started dnode or taosc, if it’s not an mnode by self, it will reply the mnode EP List back. After receiving this list, taosc or the newly started dnode will try to establish the connection again. When the mnode EP List changes, each data node quickly obtains the latest list and notifies taosc through messaging interaction among nodes. +**Redirection**: No matter about dnode or TAOSC, the connection to the mnode shall be initiated first, but the mnode is automatically created and maintained by the system, so the user does not know which dnode is running the mnode. TDengine only requires a connection to any working dnode in the system. Because any running dnode maintains the currently running mnode EP List, when receiving a connecting request from the newly started dnode or TAOSC, if it’s not a mnode by self, it will reply to the mnode EP List back. After receiving this list, TAOSC or the newly started dnode will try to establish the connection again. When the mnode EP List changes, each data node quickly obtains the latest list and notifies TAOSC through messaging interaction among nodes. -### A Typical Messaging Process +### A Typical Data Writinfg Process -To explain the relationship between vnode, mnode, taosc and application and their respective roles, the following is an analysis of a typical data writing process. +To explain the relationship between vnode, mnode, TAOSC and application and their respective roles, the following is an analysis of a typical data writing process. ![ typical process of TDengine](page://images/architecture/message.png)
Picture 2 typical process of TDengine
1. Application initiates a request to insert data through JDBC, ODBC, or other APIs. -2. Cache be checked by taosc that if meta data existing for the table. If so, go straight to Step 4. If not, taosc sends a get meta-data request to mnode. -3. Mnode returns the meta-data of the table to taosc. Meta-data contains the schema of the table, and also the vgroup information to which the table belongs (the vnode ID and the End Point of the dnode where the table belongs. If the number of replicas is N, there will be N groups of End Points). If taosc does not receive a response from the mnode for a long time, and there are multiple mnodes, taosc will send a request to the next mnode. -4. Taosc initiates an insert request to master vnode. -5. After vnode inserts the data, it gives a reply to taosc, indicating that the insertion is successful. If taosc doesn't get a response from vnode for a long time, taosc will judge the node as offline. In this case, if there are multiple replicas of the inserted database, taosc will issue an insert request to the next vnode in vgroup. -6. Taosc notifies APP that writing is successful. +2. TAOSC checks if meta data existing for the table in the cache. If so, go straight to Step 4. If not, TAOSC sends a get meta-data request to mnode. +3. Mnode returns the meta-data of the table to TAOSC. Meta-data contains the schema of the table, and also the vgroup information to which the table belongs (the vnode ID and the End Point of the dnode where the table belongs. If the number of replicas is N, there will be N groups of End Points). If TAOSC does not receive a response from the mnode for a long time, and there are multiple mnodes, TAOSC will send a request to the next mnode. +4. TAOSC initiates an insert request to master vnode. +5. After vnode inserts the data, it gives a reply to TAOSC, indicating that the insertion is successful. If TAOSC doesn't get a response from vnode for a long time, TAOSC will treat this node as offline. In this case, if there are multiple replicas of the inserted database, TAOSC will issue an insert request to the next vnode in vgroup. +6. TAOSC notifies APP that writing is successful. -For Step 2 and 3, when taosc starts, it does not know the End Point of mnode, so it will directly initiate a request to the externally serving End Point of the configured cluster. If the dnode that received the request does not have an mnode configured, it will inform the mnode EP list in a reply message, so that taosc will re-issue a request to obtain meta-data to the EP of another new mnode. +For Step 2 and 3, when TAOSC starts, it does not know the End Point of mnode, so it will directly initiate a request to the configured serving End Point of the cluster. If the dnode that receives the request does not have a mnode configured, it will inform the mnode EP list in a reply message, so that TAOSC will re-issue a request to obtain meta-data to the EP of another new mnode. -For Step 4 and 5, without caching, taosc can't recognize the master in the virtual node group, so assumes that the first vnodeID is the master and send a request to it. If the requested vnode is not the master, it will reply the actual master as a new target taosc makes a request to. Once the reply of successful insertion is obtained, taosc will cache the information of master node. +For Step 4 and 5, without caching, TAOSC can't recognize the master in the virtual node group, so assumes that the first vnode is the master and sends a request to it. If this vnode is not the master, it will reply to the actual master as a new target where TAOSC shall send a request to. Once the reply of successful insertion is obtained, TAOSC will cache the information of master node. -The above is the process of inserting data, and the processes of querying and calculating are completely consistent. Taosc encapsulates and shields all these complicated processes, and has no perception and no special treatment for applications. +The above is the process of inserting data, and the processes of querying and computing are the same. TAOSC encapsulates and hides all these complicated processes, and it is transparent to applications. -Through taosc caching mechanism, mnode needs to be accessed only when a table is operated for the first time, so mnode will not become a system bottleneck. However, because schema and vgroup may change (such as load balancing), taosc will interact with mnode regularly to automatically update the cache. +Through TAOSC caching mechanism, mnode needs to be accessed only when a table is accessed for the first time, so mnode will not become a system bottleneck. However, because schema and vgroup may change (such as load balancing), TAOSC will interact with mnode regularly to automatically update the cache. ## Storage Model and Data Partitioning/Sharding ### Storage Model -The data stored by TDengine include collected time-series data, metadata related to libraries and tables, tag data, etc. These data are specifically divided into three parts: +The data stored by TDengine include collected time-series data, metadata related to database and tables, tag data, etc. These data are specifically divided into three parts: -- Time-series data: stored in vnode and composed of data, head and last files. The amount of data is large and query amount depends on the application scenario. Out-of-order writing is allowed, but delete operation is not supported for the time being, and update operation is only allowed when update parameter is set to 1. By adopting the model with one table for each collection point, the data of a given time period is continuously stored, and the writing against one single table is a simple add operation. Multiple records can be read at one time, thus ensuring the insert and query operation of a single collection point with best performance. -- Tag data: meta files stored in vnode support four standard operations of add, delete, modify and check. The amount of data is not large. If there are N tables, there are N records, so all can be stored in memory. If there are many tag filtering operations, queries will be very frequent and TDengine supports multi-core and multi-threaded concurrent queries. As long as the computing resources are sufficient, even in face of millions of tables, the filtering results will return in milliseconds. -- Metadata: stored in mnode, including system node, user, DB, Table Schema and other information. Four standard operations of add, delete, modify and query are supported. The amount of these data are not large and can be stored in memory, moreover the query amount is not large because of the client cache. Therefore, TDengine uses centralized storage management, however, there will be no performance bottleneck. +- Time-series data: stored in vnode and composed of data, head and last files. The amount of data is large and query amount depends on the application scenario. Out-of-order writing is allowed, but delete operation is not supported for the time being, and update operation is only allowed when database update parameter is set to 1. By adopting the model with one table for each data collection point, the data of a given time period is continuously stored, and the writing against one single table is a simple appending operation. Multiple records can be read at one time, thus ensuring the insert and query operation of a single data collection point with the best performance. +- Tag data: meta files stored in vnode. Four standard operations of create, read, update and delete are supported. The amount of data is not large. If there are N tables, there are N records, so all can be stored in memory. To make tag filtering efficient, TDengine supports multi-core and multi-threaded concurrent queries. As long as the computing resources are sufficient, even in face of millions of tables, the tag filtering results will return in milliseconds. +- Metadata: stored in mnode, including system node, user, DB, Table Schema and other information. Four standard operations of create, delete, update and read are supported. The amount of these data are not large and can be stored in memory, moreover, the query amount is not large because of the client cache. Therefore, TDengine uses centralized storage management, however, there will be no performance bottleneck. Compared with the typical NoSQL storage model, TDengine stores tag data and time-series data completely separately, which has two major advantages: -- Greatly reduce the redundancy of tag data storage: general NoSQL database or time-series database adopts K-V storage, in which Key includes timestamp, device ID and various tags. Each record carries these duplicates, so wasting storage space. Moreover, if the application needs to add, modify or delete tags on historical data, it has to traverse the data and rewrite again, which is extremely expensive to operate. -- Realize extremely efficient aggregation query between multiple tables: when doing aggregation query between multiple tables, it firstly finds out the tag filtered tables, and then find out the corresponding data blocks of these tables to greatly reduce the data sets to be scanned, thus greatly improving the query efficiency. Moreover, tag data is managed and maintained in a full-memory structure, and tag data queries in tens of millions can return in milliseconds. +- Greatly reduce the redundancy of tag data storage: general NoSQL database or time-series database adopts K-V storage, in which Key includes a timestamp, a device ID and various tags. Each record carries these duplicated tags, so storage space is wasted. Moreover, if the application needs to add, modify or delete tags on historical data, it has to traverse the data and rewrite them again, which is extremely expensive to operate. +- Aggregate data efficiently between multiple tables: when aggregating data between multiple tables, it first finds out the tables which satisfy the filtering conditions, and then find out the corresponding data blocks of these tables to greatly reduce the data sets to be scanned, thus greatly improving the aggregation efficiency. Moreover, tag data is managed and maintained in a full-memory structure, and tag data queries in tens of millions can return in milliseconds. ### Data Sharding -For large-scale data management, to achieve scale-out, it is generally necessary to adopt the a Partitioning strategy as Sharding. TDengine implements data sharding via vnode, and time-series data partitioning via one data file for each time range. +For large-scale data management, to achieve scale-out, it is generally necessary to adopt the Partitioning or Sharding strategy. TDengine implements data sharding via vnode, and time-series data partitioning via one data file for each time range. -VNode (Virtual Data Node) is responsible for providing writing, query and calculation functions for collected time-series data. To facilitate load balancing, data recovery and support heterogeneous environments, TDengine splits a data node into multiple vnodes according to its computing and storage resources. The management of these vnodes is done automatically by TDengine and completely transparent to the application. +VNode (Virtual Data Node) is responsible for providing writing, query and computing functions for collected time-series data. To facilitate load balancing, data recovery and support heterogeneous environments, TDengine splits a data node into multiple vnodes according to its computing and storage resources. The management of these vnodes is done automatically by TDengine and is completely transparent to the application. For a single data collection point, regardless of the amount of data, a vnode (or vnode group, if the number of replicas is greater than 1) has enough computing resource and storage resource to process (if a 16-byte record is generated per second, the original data generated in one year will be less than 0.5 G), so TDengine stores all the data of a table (a data collection point) in one vnode instead of distributing the data to two or more dnodes. Moreover, a vnode can store data from multiple data collection points (tables), and the upper limit of the tables’ quantity for a vnode is one million. By design, all tables in a vnode belong to the same DB. On a data node, unless specially configured, the number of vnodes owned by a DB will not exceed the number of system cores. When creating a DB, the system does not allocate resources immediately. However, when creating a table, the system will check if there is an allocated vnode with free tablespace. If so, the table will be created in the vacant vnode immediately. If not, the system will create a new vnode on a dnode from the cluster according to the current workload, and then a table. If there are multiple replicas of a DB, the system does not create only one vnode, but a vgroup (virtual data node group). The system has no limit on the number of vnodes, which is just limited by the computing and storage resources of physical nodes. -The meda data of each table (including schema, tags, etc.) is also stored in vnode instead of centralized storage in mnode. In fact, this means sharding of meta data, which is convenient for efficient and parallel tag filtering operations. +The meta data of each table (including schema, tags, etc.) is also stored in vnode instead of centralized storage in mnode. In fact, this means sharding of meta data, which is good for efficient and parallel tag filtering operations. ### Data Partitioning -In addition to vnode sharding, TDengine partitions the time-series data by time range. Each data file contains only one time range of time-series data, and the length of the time range is determined by DB's configuration parameter “days”. This method of partitioning by time rang is also convenient to efficiently implement the data retention strategy. As long as the data file exceeds the specified number of days (system configuration parameter ‘keep’), it will be automatically deleted. Moreover, different time ranges can be stored in different paths and storage media, so as to facilitate the cold/hot management of big data and realize tiered-storage. +In addition to vnode sharding, TDengine partitions the time-series data by time range. Each data file contains only one time range of time-series data, and the length of the time range is determined by DB's configuration parameter `days`. This method of partitioning by time rang is also convenient to efficiently implement the data retention policy. As long as the data file exceeds the specified number of days (system configuration parameter `keep`), it will be automatically deleted. Moreover, different time ranges can be stored in different paths and storage media, so as to facilitate the tiered-storage. Cold/hot data can be stored in different storage meida to reduce the storage cost. -In general, **TDengine splits big data by vnode and time as two dimensions**, which is convenient for parallel and efficient management with scale-out. +In general, **TDengine splits big data by vnode and time range in two dimensions** to manage the data efficiently with horizontal scalability. ### Load Balancing -Each dnode regularly reports its status (including hard disk space, memory size, CPU, network, number of virtual nodes, etc.) to the mnode (virtual management node) for declaring the status of the entire cluster. Based on the overall state, when an mnode finds an overloaded dnode, it will migrate one or more vnodes to other dnodes. In the process, external services keep running and the data insertion, query and calculation operations are not affected. +Each dnode regularly reports its status (including hard disk space, memory size, CPU, network, number of virtual nodes, etc.) to the mnode (virtual management node), so mnode knows the status of the entire cluster. Based on the overall status, when the mnode finds a dnode is overloaded, it will migrate one or more vnodes to other dnodes. During the process, TDengine services keep running and the data insertion, query and computing operations are not affected. -If the mnode has not received the dnode status for a period of time, the dnode will be judged as offline. When offline lasts a certain period of time (the duration is determined by the configuration parameter ‘offlineThreshold’), the dnode will be forcibly removed from the cluster by mnode. If the number of replicas of vnodes on this dnode is greater than one, the system will automatically create new replicas on other dnodes to ensure the replica number. If there are other mnodes on this dnode and the number of mnodes replicas is greater than one, the system will automatically create new mnodes on other dnodes to ensure t the replica number. +If the mnode has not received the dnode status for a period of time, the dnode will be treated as offline. When offline lasts a certain period of time (configured by parameter `offlineThreshold`), the dnode will be forcibly removed from the cluster by mnode. If the number of replicas of vnodes on this dnode is greater than one, the system will automatically create new replicas on other dnodes to ensure the replica number. If there are other mnodes on this dnode and the number of mnodes replicas is greater than one, the system will automatically create new mnodes on other dnodes to ensure the replica number. -When new data nodes are added to the cluster, with new computing and storage are added, the system will automatically start the load balancing process. +When new data nodes are added to the cluster, with new computing and storage resources are added, the system will automatically start the load balancing process. -The load balancing process does not require any manual intervention without application restarted. It will automatically connect new nodes with completely transparence. **Note: load balancing is controlled by parameter “balance”, which determines to turn on/off automatic load balancing.** +The load balancing process does not require any manual intervention, and it is transparent to the application. **Note: load balancing is controlled by parameter “balance”, which determines to turn on/off automatic load balancing.** ## Data Writing and Replication Process -If a database has N replicas, thus a virtual node group has N virtual nodes, but only one as Master and all others are slaves. When the application writes a new record to system, only the Master vnode can accept the writing request. If a slave vnode receives a writing request, the system will notifies taosc to redirect. +If a database has N replicas, thus a virtual node group has N virtual nodes, but only one as Master and all others are slaves. When the application writes a new record to system, only the Master vnode can accept the writing request. If a slave vnode receives a writing request, the system will notifies TAOSC to redirect. ### Master vnode Writing Process @@ -267,8 +270,8 @@ Master Vnode uses a writing process as follows: Figure 3: TDengine Master writing process -1. Master vnode receives the application data insertion request, verifies, and to next step; -2. If the system configuration parameter “walLevel” is greater than 0, vnode will write the original request packet into database log file WAL. If walLevel is set to 2 and fsync is set to 0, TDengine will make WAL data written immediately to ensure that even system goes down, all data can be recovered from database log file; +1. Master vnode receives the application data insertion request, verifies, and moves to next step; +2. If the system configuration parameter `walLevel` is greater than 0, vnode will write the original request packet into database log file WAL. If walLevel is set to 2 and fsync is set to 0, TDengine will make WAL data written immediately to ensure that even system goes down, all data can be recovered from database log file; 3. If there are multiple replicas, vnode will forward data packet to slave vnodes in the same virtual node group, and the forwarded packet has a version number with data; 4. Write into memory and add the record to “skip list”; 5. Master vnode returns a confirmation message to the application, indicating a successful writing. @@ -281,31 +284,31 @@ For a slave vnode, the write process as follows: ![TDengine Slave Writing Process](page://images/architecture/write_master.png)
Picture 3 TDengine Slave Writing Process
-1. Slave vnode receives a data insertion request forwarded by Master vnode. -2. If the system configuration parameter “walLevel” is greater than 0, vnode will write the original request packet into database log file WAL. If walLevel is set to 2 and fsync is set to 0, TDengine will make WAL data written immediately to ensure that even system goes down, all data can be recovered from database log file; -3. Write into memory and add the record to “skip list”; +1. Slave vnode receives a data insertion request forwarded by Master vnode; +2. If the system configuration parameter `walLevel` is greater than 0, vnode will write the original request packet into database log file WAL. If walLevel is set to 2 and fsync is set to 0, TDengine will make WAL data written immediately to ensure that even system goes down, all data can be recovered from database log file; +3. Write into memory and add the record to “skip list”. -Compared with Master vnode, slave vnode has no forwarding or reply confirmation step, means two steps less. But writing into memory is exactly the same as WAL. +Compared with Master vnode, slave vnode has no forwarding or reply confirmation step, means two steps less. But writing into memory and WAL is exactly the same. ### Remote Disaster Recovery and IDC Migration -As above Master and Slave processes discussed, TDengine adopts asynchronous replication for data synchronization. This method can greatly improve the writing performance, with not obvious impact from network delay. By configuring IDC and rack number for each physical node, it can be ensured that for a virtual node group, virtual nodes are composed of physical nodes from different IDC and different racks, thus implementing remote disaster recovery without other tools. +As above Master and Slave processes discussed, TDengine adopts asynchronous replication for data synchronization. This method can greatly improve the writing performance, with no obvious impact from network delay. By configuring IDC and rack number for each physical node, it can be ensured that for a virtual node group, virtual nodes are composed of physical nodes from different IDC and different racks, thus implementing remote disaster recovery without other tools. -On the other hand, TDengine supports dynamic modification of the replicas number. Once the number of replicas increases, the newly added virtual nodes will immediately enter the data synchronization process. After synchronization completed, added virtual nodes can provide services. In the synchronization process, master and other synchronized virtual nodes keep serving. With this feature, TDengine can realize IDC room migration without service interruption. It is only necessary to add new physical nodes to the existing IDC cluster, and then remove old physical nodes after the data synchronization is completed. +On the other hand, TDengine supports dynamic modification of the replicas number. Once the number of replicas increases, the newly added virtual nodes will immediately enter the data synchronization process. After synchronization completed, added virtual nodes can provide services. In the synchronization process, master and other synchronized virtual nodes keep serving. With this feature, TDengine can provide IDC migration without service interruption. It is only necessary to add new physical nodes to the existing IDC cluster, and then remove old physical nodes after the data synchronization is completed. -However, this asynchronous replication method has a tiny time window of written data lost. The specific scenario is as follows: +However, the asynchronous replication has a tiny time window where data can be lost. The specific scenario is as follows: -1. Master vnode has completed its 5-step operations, confirmed the success of writing to APP, and then went down; +1. Master vnode has finished its 5-step operations, confirmed the success of writing to APP, and then went down; 2. Slave vnode receives the write request, then processing fails before writing to the log in Step 2; -3. Slave vnode will become the new master, thus losing one record +3. Slave vnode will become the new master, thus losing one record. -In theory, as long as in asynchronous replication, there is no guarantee for no losing. However, this window is extremely small, only if mater and slave fail at the same time, and just confirm the successful write to the application before. +In theory, for asynchronous replication, there is no guarantee to prevent data loss. However, this window is extremely small, only if mater and slave fail at the same time, and just confirm the successful write to the application before. Note: Remote disaster recovery and no-downtime IDC migration are only supported by Enterprise Edition. **Hint: This function is not available yet** ### Master/slave Selection -Vnode maintains a Version number. When memory data is persisted, the version number will also be persisted. For each data update operation, whether it is collecting time-series data or metadata, this version number will be increased by one. +Vnode maintains a version number. When memory data is persisted, the version number will also be persisted. For each data update operation, whether it is time-series data or metadata, this version number will be increased by one. When a vnode starts, the roles (master, slave) are uncertain, and the data is in an unsynchronized state. It’s necessary to establish TCP connections with other nodes in the virtual node group and exchange status, including version and its own roles. Through the exchange, the system implements a master-selection process. The rules are as follows: @@ -318,7 +321,7 @@ See [TDengine 2.0 Data Replication Module Design](https://www.taosdata.com/cn/do ### Synchronous Replication -For scenarios with higher data consistency requirements, asynchronous data replication is not applicable, because there is some small probability of data loss. So, TDengine provides a synchronous replication mechanism for users. When creating a database, in addition to specifying the number of replicas, user also needs to specify a new parameter “quorum”. If quorum is greater than one, it means that every time the Master forwards a message to the replica, it needs to wait for “quorum-1” reply confirms before informing the application that data has been successfully written in slave. If “quorum-1” reply confirms are not received within a certain period of time, the master vnode will return an error to the application. +For scenarios with strong data consistency requirements, asynchronous data replication is not applicable, because there is a small probability of data loss. So, TDengine provides a synchronous replication mechanism for users. When creating a database, in addition to specifying the number of replicas, user also needs to specify a new parameter “quorum”. If quorum is greater than one, it means that every time the Master forwards a message to the replica, it needs to wait for “quorum-1” reply confirms before informing the application that data has been successfully written in slave. If “quorum-1” reply confirms are not received within a certain period of time, the master vnode will return an error to the application. With synchronous replication, performance of system will decrease and latency will increase. Because metadata needs strong consistent, the default for data synchronization between mnodes is synchronous replication. @@ -336,17 +339,17 @@ Each vnode has its own independent memory, and it is composed of multiple memory TDengine uses a data-driven method to write the data from buffer into hard disk for persistent storage. When the cached data in vnode reaches a certain volume, TDengine will also pull up the disk-writing thread to write the cached data into persistent storage in order not to block subsequent data writing. TDengine will open a new database log file when the data is written, and delete the old database log file after written successfully to avoid unlimited log growth. -To make full use of the characteristics of time-series data, TDengine splits the data stored in persistent storage by a vnode into multiple files, each file only saves data for a fixed number of days, which is determined by the system configuration parameter “days”. By so, for the given start and end date of a query, you can locate the data files to open immediately without any index, thus greatly speeding up reading operations. +To make full use of the characteristics of time-series data, TDengine splits the data stored in persistent storage by a vnode into multiple files, each file only saves data for a fixed number of days, which is determined by the system configuration parameter `days`. By so, for the given start and end date of a query, you can locate the data files to open immediately without any index, thus greatly speeding up reading operations. -For collected data, there is generally a retention period, which is determined by the system configuration parameter “keep”. Data files exceeding this set number of days will be automatically deleted by the system to free up storage space. +For time-series data, there is generally a retention policy, which is determined by the system configuration parameter `keep`. Data files exceeding this set number of days will be automatically deleted by the system to free up storage space. Given “days” and “keep” parameters, the total number of data files in a vnode is: keep/days. The total number of data files should not be too large or too small. 10 to 100 is appropriate. Based on this principle, reasonable days can be set. In the current version, parameter “keep” can be modified, but parameter “days” cannot be modified once it is set. -In each data file, the data of a table is stored by blocks. A table can have one or more data file blocks. In a file block, data is stored in columns, occupying a continuous storage space, thus greatly improving the reading speed. The size of file block is determined by the system parameter “maxRows” (the maximum number of records per block), and the default value is 4096. This value should not be too large or too small. If it is too large, the data locating in search will cost longer; if too small, the index of data block is too large, and the compression efficiency will be low with slower reading speed. +In each data file, the data of a table is stored by blocks. A table can have one or more data file blocks. In a file block, data is stored in columns, occupying a continuous storage space, thus greatly improving the reading speed. The size of file block is determined by the system parameter `maxRows` (the maximum number of records per block), and the default value is 4096. This value should not be too large or too small. If it is too large, the data locating in search will cost longer; if too small, the index of data block is too large, and the compression efficiency will be low with slower reading speed. -Each data file (with a .data postfix) has a corresponding index file (with a .head postfix). The index file has summary information of a data block for each table, recording the offset of each data block in the data file, start and end time of data and other information, so as to lead system quickly locate the data to be found. Each data file also has a corresponding last file (with a .last postfix), which is designed to prevent data block fragmentation when written in disk. If the number of written records from a table does not reach the system configuration parameter “minRows” (minimum number of records per block), it will be stored in the last file first. When write to disk next time, the newly written records will be merged with the records in last file and then written into data file. +Each data file (with a .data postfix) has a corresponding index file (with a .head postfix). The index file has summary information of a data block for each table, recording the offset of each data block in the data file, start and end time of data and other information, so as to lead system quickly locate the data to be found. Each data file also has a corresponding last file (with a .last postfix), which is designed to prevent data block fragmentation when written in disk. If the number of written records from a table does not reach the system configuration parameter `minRows` (minimum number of records per block), it will be stored in the last file first. When write to disk next time, the newly written records will be merged with the records in last file and then written into data file. -When data is written to disk, it is decided whether to compress the data according to system configuration parameter “comp”. TDengine provides three compression options: no compression, one-stage compression and two-stage compression, corresponding to comp values of 0, 1 and 2 respectively. One-stage compression is carried out according to the type of data. Compression algorithms include delta-delta coding, simple 8B method, zig-zag coding, LZ4 and other algorithms. Two-stage compression is based on one-stage compression and compressed by general compression algorithm, which has higher compression ratio. +When data is written to disk, it is decided whether to compress the data according to system configuration parameter `comp`. TDengine provides three compression options: no compression, one-stage compression and two-stage compression, corresponding to comp values of 0, 1 and 2 respectively. One-stage compression is carried out according to the type of data. Compression algorithms include delta-delta coding, simple 8B method, zig-zag coding, LZ4 and other algorithms. Two-stage compression is based on one-stage compression and compressed by general compression algorithm, which has higher compression ratio. ### Tiered Storage @@ -393,17 +396,15 @@ When client obtains query result, the worker thread in query execution queue of ### Aggregation by Time Axis, Downsampling, Interpolation -The remarkable feature that time-series data is different from ordinary data is that each record has a timestamp, so aggregating data with timestamps on the time axis is an important and unique function from common databases. From this point of view, it is similar to the window query of stream computing engine. +The remarkable feature that time-series data is different from ordinary data is that each record has a timestamp, so aggregating data with timestamps on the time axis is an important and distinct feature from common databases. From this point of view, it is similar to the window query of stream computing engine. -The keyword “interval” is introduced into TDengine to split fixed length time windows on time axis, and the data are aggregated according to time windows, and the data within window range are aggregated as needed. For example: +The keyword `interval` is introduced into TDengine to split fixed length time windows on time axis, and the data are aggregated based on time windows, and the data within window range are aggregated as needed. For example: ```mysql select count(*) from d1001 interval(1h); ``` -According to the data collected by device D1001, the number of records stored per hour is returned by a 1-hour time window. - - +For the data collected by device D1001, the number of records stored per hour is returned by a 1-hour time window. In application scenarios where query results need to be obtained continuously, if there is data missing in a given time interval, the data results in this interval will also be lost. TDengine provides a strategy to interpolate the results of timeline aggregation calculation. The results of time axis aggregation can be interpolated by using keyword Fill. For example: @@ -411,24 +412,25 @@ In application scenarios where query results need to be obtained continuously, i select count(*) from d1001 interval(1h) fill(prev); ``` -According to the data collected by device D1001, the number of records per hour is counted. If there is no data in a certain hour, statistical data of the previous hour is returned. TDengine provides forward interpolation (prev), linear interpolation (linear), NULL value populating (NULL), and specific value populating (value). +For the data collected by device D1001, the number of records per hour is counted. If there is no data in a certain hour, statistical data of the previous hour is returned. TDengine provides forward interpolation (prev), linear interpolation (linear), NULL value populating (NULL), and specific value populating (value). ### Multi-table Aggregation Query -TDengine creates a separate table for each data collection point, but in practical applications, it is often necessary to aggregate data from different collection points. In order to perform aggregation operations efficiently, TDengine introduces the concept of STable. STable is used to represent a specific type of data collection point. It is a table set containing multiple tables. The schema of each table in the set is completely consistent, but each table has its own static tag. The tags can be multiple and be added, deleted and modified at any time. Applications can aggregate or statistically operate all or a subset of tables under a STABLE by specifying tag filters, thus greatly simplifying the development of applications. The process is shown in the following figure: +TDengine creates a separate table for each data collection point, but in practical applications, it is often necessary to aggregate data from different data collection points. In order to perform aggregation operations efficiently, TDengine introduces the concept of STable. STable is used to represent a specific type of data collection point. It is a table set containing multiple tables. The schema of each table in the set is the same, but each table has its own static tag. The tags can be multiple and be added, deleted and modified at any time. Applications can aggregate or statistically operate all or a subset of tables under a STABLE by specifying tag filters, thus greatly simplifying the development of applications. The process is shown in the following figure: ![Diagram of multi-table aggregation query](page://images/architecture/multi_tables.png)
Picture 4 Diagram of multi-table aggregation query
1. Application sends a query condition to system; -2. taosc sends the STable name to Meta Node(management node); -3. Management node sends the vnode list owned by the STable back to taosc; -4. taosc sends the computing request together with tag filters to multiple data nodes corresponding to these vnodes; -5. Each vnode first finds out the set of tables within its own node that meet the tag filters from memory, then scans the stored time-series data, completes corresponding aggregation calculations, and returns result to taosc; -6. taosc finally aggregates the results returned by multiple data nodes and send them back to application. +2. TAOSC sends the STable name to Meta Node(management node); +3. Management node sends the vnode list owned by the STable back to TAOSC; +4. TAOSC sends the computing request together with tag filters to multiple data nodes corresponding to these vnodes; +5. Each vnode first finds out the set of tables within its own node that meet the tag filters from memory, then scans the stored time-series data, completes corresponding aggregation calculations, and returns result to TAOSC; +6. TAOSC finally aggregates the results returned by multiple data nodes and send them back to application. -Since TDengine stores tag data and time-series data separately in vnode, by filtering tag data in memory, the set of tables that need to participate in aggregation operation is first found, which greatly reduces the volume of data scanned and improves aggregation calculation speed. At the same time, because the data is distributed in multiple vnodes/dnodes, the aggregation calculation operation is carried out concurrently in multiple vnodes, which further improves the aggregation speed. Aggregation functions for ordinary tables and most operations are applicable to STables. The syntax is exactly the same. Please see TAOS SQL for details. +Since TDengine stores tag data and time-series data separately in vnode, by filtering tag data in memory, the set of tables that need to participate in aggregation operation is first found, which greatly reduces the volume of data scanned and improves aggregation speed. At the same time, because the data is distributed in multiple vnodes/dnodes, the aggregation operation is carried out concurrently in multiple vnodes, which further improves the aggregation speed. Aggregation functions for ordinary tables and most operations are applicable to STables. The syntax is exactly the same. Please see TAOS SQL for details. ### Precomputation In order to effectively improve the performance of query processing, based-on the unchangeable feature of IoT data, statistical information of data stored in data block is recorded in the head of data block, including max value, min value, and sum. We call it a precomputing unit. If the query processing involves all the data of a whole data block, the pre-calculated results are directly used, and no need to read the data block contents at all. Since the amount of pre-calculated data is much smaller than the actual size of data block stored on disk, for query processing with disk IO as bottleneck, the use of pre-calculated results can greatly reduce the pressure of reading IO and accelerate the query process. The precomputation mechanism is similar to the index BRIN (Block Range Index) of PostgreSQL. + diff --git a/documentation20/en/04.model/docs.md b/documentation20/en/04.model/docs.md index 08d952d317e30ef91dad32333c9abb0e646b8e48..e28dd906f3b1fae76cf6657c8f946468b92788f0 100644 --- a/documentation20/en/04.model/docs.md +++ b/documentation20/en/04.model/docs.md @@ -2,17 +2,15 @@ TDengine adopts a relational data model, so we need to build the "database" and "table". Therefore, for a specific application scenario, it is necessary to consider the design of the database, STable and ordinary table. This section does not discuss detailed syntax rules, but only concepts. -Please watch the [video tutorial](https://www.taosdata.com/blog/2020/11/11/1945.html) for data modeling. - ## Create a Database -Different types of data collection points often have different data characteristics, including frequency of data collecting, length of data retention time, number of replicas, size of data blocks, whether to update data or not, and so on. To ensure TDengine working with great efficiency in various scenarios, TDengine suggests creating tables with different data characteristics in different databases, because each database can be configured with different storage strategies. When creating a database, in addition to SQL standard options, the application can also specify a variety of parameters such as retention duration, number of replicas, number of memory blocks, time accuracy, max and min number of records in a file block, whether it is compressed or not, and number of days a data file will be overwritten. For example: +Different types of data collection points often have different data characteristics, including data sampling rate, length of data retention time, number of replicas, size of data blocks, whether to update data or not, and so on. To ensure TDengine working with great efficiency in various scenarios, TDengine suggests creating tables with different data characteristics in different databases, because each database can be configured with different storage strategies. When creating a database, in addition to SQL standard options, the application can also specify a variety of parameters such as retention duration, number of replicas, number of memory blocks, time resolution, max and min number of records in a file block, whether it is compressed or not, and number of days covered by a data file. For example: ```mysql CREATE DATABASE power KEEP 365 DAYS 10 BLOCKS 6 UPDATE 1; ``` -The above statement will create a database named “power”. The data of this database will be kept for 365 days (it will be automatically deleted 365 days later), one data file created per 10 days, and the number of memory blocks is 4 for data updating. For detailed syntax and parameters, please refer to [Data Management section of TAOS SQL](https://www.taosdata.com/en/documentation/taos-sql#management). +The above statement will create a database named “power”. The data of this database will be kept for 365 days (data will be automatically deleted 365 days later), one data file will be created per 10 days, the number of memory blocks is 4, and data updating is allowed. For detailed syntax and parameters, please refer to [Data Management section of TAOS SQL](https://www.taosdata.com/en/documentation/taos-sql#management). After the database created, please use SQL command USE to switch to the new database, for example: @@ -20,13 +18,12 @@ After the database created, please use SQL command USE to switch to the new data USE power; ``` -Replace the database operating in the current connection with “power”, otherwise, before operating on a specific table, you need to use "database name. table name" to specify the name of database to use. +Specify the database operating in the current connection with “power”, otherwise, before operating on a specific table, you need to use "database-name.table-name" to specify the name of database to use. **Note:** - Any table or STable belongs to a database. Before creating a table, a database must be created first. - Tables in two different databases cannot be JOIN. -- You need to specify a timestamp when creating and inserting records, or querying history records. ## Create a STable @@ -38,11 +35,11 @@ CREATE STABLE meters (ts timestamp, current float, voltage int, phase float) TAG **Note:** The STABLE keyword in this instruction needs to be written as TABLE in versions before 2.0.15. -Just like creating an ordinary table, you need to provide the table name (‘meters’ in the example) and the table structure Schema, that is, the definition of data columns. The first column must be a timestamp (‘ts’ in the example), the other columns are the physical metrics collected (current, volume, phase in the example), and the data types can be int, float, string, etc. In addition, you need to provide the schema of the tag (location, groupId in the example), and the data types of the tag can be int, float, string and so on. Static attributes of collection points can often be used as tags, such as geographic location of collection points, device model, device group ID, administrator ID, etc. The schema of the tag can be added, deleted and modified afterwards. Please refer to the [STable Management section of TAOS SQL](https://www.taosdata.com/cn/documentation/taos-sql#super-table) for specific definitions and details. +Just like creating an ordinary table, you need to provide the table name (‘meters’ in the example) and the table structure Schema, that is, the definition of data columns. The first column must be a timestamp (‘ts’ in the example), the other columns are the physical metrics collected (current, volume, phase in the example), and the data types can be int, float, string, etc. In addition, you need to provide the schema of the tag (location, groupId in the example), and the data types of the tag can be int, float, string and so on. Static attributes of data collection points can often be used as tags, such as geographic location of collection points, device model, device group ID, administrator ID, etc. The schema of the tags can be added, deleted and modified afterwards. Please refer to the [STable Management section of TAOS SQL](https://www.taosdata.com/cn/documentation/taos-sql#super-table) for specific definitions and details. -Each type of data collection point needs an established STable, so an IoT system often has multiple STables. For the power grid, we need to build a STable for smart meters, transformers, buses, switches, etc. For IoT, a device may have multiple data collection points (for example, a fan for wind-driven generator, some collection points capture parameters such as current and voltage, and some capture environmental parameters such as temperature, humidity and wind direction). In this case, multiple STables need to be established for corresponding types of devices. All collected physical metrics contained in one and the same STable must be collected at the same time (with a consistent timestamp). +A STable must be created for each type of data collection point, so an IoT system often has multiple STables. For the power grid, we need to build a STable for smart meters, a STable for transformers, a STable for buses, a STable for switches, etc. For IoT, a device may have multiple data collection points (for example, a fan for wind-driven generator, one data collection point captures metrics such as current and voltage, and one data collection point captures environmental parameters such as temperature, humidity and wind direction). In this case, multiple STables need to be established for corresponding types of devices. All metrics contained in a STable must be collected at the same time (with the same timestamp). -A STable allows up to 1024 columns. If the number of physical metrics collected at a collection point exceeds 1024, multiple STables need to be built to process them. A system can have multiple DBs, and a DB can have one or more STables. +A STable allows up to 1024 columns. If the number of metrics collected at a data collection point exceeds 1024, multiple STables need to be built to process them. A system can have multiple DBs, and a DB can have one or more STables. ## Create a Table @@ -54,22 +51,23 @@ CREATE TABLE d1001 USING meters TAGS ("Beijing.Chaoyang", 2); Where d1001 is the table name, meters is the name of the STable, followed by the specific tag value of tag Location as "Beijing.Chaoyang", and the specific tag value of tag groupId 2. Although the tag value needs to be specified when creating the table, it can be modified afterwards. Please refer to the [Table Management section of TAOS SQL](https://www.taosdata.com/en/documentation/taos-sql#table) for details. -**Note: ** At present, TDengine does not technically restrict the use of a STable of a database (dbA) as a template to create a sub-table of another database (dbB). This usage will be prohibited later, and it is not recommended to use this method to create a table. +**Note: ** At present, TDengine does not technically restrict the use of a STable of a database (dbA) as a template to create a sub-table of another database (dbB). This usage will be prohibited later, and it is not recommended to use this way to create a table. TDengine suggests to use the globally unique ID of data collection point as a table name (such as device serial number). However, in some scenarios, there is no unique ID, and multiple IDs can be combined into a unique ID. It is not recommended to use a unique ID as tag value. -**Automatic table creating** : In some special scenarios, user is not sure whether the table of a certain data collection point exists when writing data. In this case, the non-existent table can be created by using automatic table building syntax when writing data. If the table already exists, no new table will be created. For example: +**Automatic table creating** : In some special scenarios, user is not sure whether the table of a certain data collection point exists when writing data. In this case, the non-existent table can be created by using automatic table creating syntax when writing data. If the table already exists, no new table will be created. For example: ```mysql INSERT INTO d1001 USING METERS TAGS ("Beijng.Chaoyang", 2) VALUES (now, 10.2, 219, 0.32); ``` -The SQL statement above inserts records (now, 10.2, 219, 0.32) into table d1001. If table d1001 has not been created yet, the STable meters is used as the template to automatically create it, and the tag value "Beijing.Chaoyang", 2 is marked at the same time. +The SQL statement above inserts records (now, 10.2, 219, 0.32) into table d1001. If table d1001 has not been created yet, the STable meters is used as the template to create it automatically, and the tag value "Beijing.Chaoyang", 2 is set at the same time. For detailed syntax of automatic table building, please refer to the "[Automatic Table Creation When Inserting Records](https://www.taosdata.com/en/documentation/taos-sql#auto_create_table)" section. ## Multi-column Model vs Single-column Model -TDengine supports multi-column model. As long as physical metrics are collected simultaneously by a data collection point (with a consistent timestamp), these metrics can be placed in a STable as different columns. However, there is also an extreme design, a single-column model, in which each collected physical metric is set up separately, so each type of physical metrics is set up separately with a STable. For example, create 3 Stables, one each for current, voltage and phase. +TDengine supports multi-column model. As long as metrics are collected simultaneously by a data collection point (with the same timestamp), these metrics can be placed in a STable as different columns. However, there is also an extreme design, a single-column model, in which a STable is created for each metric. For smart meter example, we need to create 3 Stables, one for current, one for voltage and one for phase. + +TDengine recommends using multi-column model as much as possible because of higher insertion and storage efficiency. However, for some scenarios, types of collected metrics often change. In this case, if multi-column model is adopted, the schema definition of STable needs to be modified frequently and the application becomes complicated. To avoid that, single-column model is recommended. -TDengine recommends using multi-column model as much as possible because of higher insertion and storage efficiency. However, for some scenarios, types of collected metrics often change. In this case, if multi-column model is adopted, the structure definition of STable needs to be frequently modified so make the application complicated. To avoid that, single-column model is recommended. diff --git a/documentation20/en/05.insert/docs.md b/documentation20/en/05.insert/docs.md index 88746ea60867b37e5956075f88c48ebd8276dfaa..7e99cf09dbae6a09429c83810f07db6ef4dafbe7 100644 --- a/documentation20/en/05.insert/docs.md +++ b/documentation20/en/05.insert/docs.md @@ -1,22 +1,22 @@ # Efficient Data Writing -TDengine supports multiple interfaces to write data, including SQL, Prometheus, Telegraf, EMQ MQTT Broker, HiveMQ Broker, CSV file, etc. Kafka, OPC and other interfaces will be provided in the future. Data can be inserted in a single piece or in batches, data from one or multiple data collection points can be inserted at the same time. TDengine supports multi-thread insertion, nonsequential data insertion, and also historical data insertion. +TDengine supports multiple ways to write data, including SQL, Prometheus, Telegraf, EMQ MQTT Broker, HiveMQ Broker, CSV file, etc. Kafka, OPC and other interfaces will be provided in the future. Data can be inserted in one single record or in batches, data from one or multiple data collection points can be inserted at the same time. TDengine supports multi-thread insertion, out-of-order data insertion, and also historical data insertion. -## SQL Writing +## Data Writing via SQL -Applications insert data by executing SQL insert statements through C/C++, JDBC, GO, or Python Connector, and users can manually enter SQL insert statements to insert data through TAOS Shell. For example, the following insert writes a record to table d1001: +Applications insert data by executing SQL insert statements through C/C++, JDBC, GO, C#, or Python Connector, and users can manually enter SQL insert statements to insert data through TAOS Shell. For example, the following insert writes a record to table d1001: ```mysql INSERT INTO d1001 VALUES (1538548685000, 10.3, 219, 0.31); ``` -TDengine supports writing multiple records at a time. For example, the following command writes two records to table d1001: +TDengine supports writing multiple records in a single statement. For example, the following command writes two records to table d1001: ```mysql INSERT INTO d1001 VALUES (1538548684000, 10.2, 220, 0.23) (1538548696650, 10.3, 218, 0.25); ``` -TDengine also supports writing data to multiple tables at a time. For example, the following command writes two records to d1001 and one record to d1002: +TDengine also supports writing data to multiple tables in a single statement. For example, the following command writes two records to d1001 and one record to d1002: ```mysql INSERT INTO d1001 VALUES (1538548685000, 10.3, 219, 0.31) (1538548695000, 12.6, 218, 0.33) d1002 VALUES (1538548696800, 12.3, 221, 0.31); @@ -26,22 +26,22 @@ For the SQL INSERT Grammar, please refer to [Taos SQL insert](https://www.taosd **Tips:** -- To improve writing efficiency, batch writing is required. The more records written in a batch, the higher the insertion efficiency. However, a record cannot exceed 16K, and the total length of an SQL statement cannot exceed 64K (it can be configured by parameter maxSQLLength, and the maximum can be configured to 1M). -- TDengine supports multi-thread parallel writing. To further improve writing speed, a client needs to open more than 20 threads to write parallelly. However, after the number of threads reaches a certain threshold, it cannot be increased or even become decreased, because too much frequent thread switching brings extra overhead. -- For a same table, if the timestamp of a newly inserted record already exists, (no database was created using UPDATE 1) the new record will be discarded as default, that is, the timestamp must be unique in a table. If an application automatically generates records, it is very likely that the generated timestamps will be the same, so the number of records successfully inserted will be smaller than the number of records the application try to insert. If you use UPDATE 1 option when creating a database, inserting a new record with the same timestamp will overwrite the original record. +- To improve writing efficiency, batch writing is required. The more records written in a batch, the higher the insertion efficiency. However, a record size cannot exceed 16K, and the total length of an SQL statement cannot exceed 64K (it can be configured by parameter maxSQLLength, and the maximum can be configured to 1M). +- TDengine supports multi-thread parallel writing. To further improve writing speed, a client needs to open more than 20 threads to write parallelly. However, after the number of threads reaches a certain threshold, it cannot be increased or even become decreased, because too much thread switching brings extra overhead. +- For the same table, if the timestamp of a newly inserted record already exists, the new record will be discarded as default (database option update = 0), that is, the timestamp must be unique in a table. If an application automatically generates records, it is very likely that the generated timestamps will be the same, so the number of records successfully inserted will be smaller than the number of records the application try to insert. If you use UPDATE 1 option when creating a database, inserting a new record with the same timestamp will overwrite the original record. - The timestamp of written data must be greater than the current time minus the time of configuration parameter keep. If keep is configured for 3650 days, data older than 3650 days cannot be written. The timestamp for writing data cannot be greater than the current time plus configuration parameter days. If days is configured to 2, data 2 days later than the current time cannot be written. -## Direct Writing of Prometheus +## Data Writing via Prometheus As a graduate project of Cloud Native Computing Foundation, [Prometheus](https://www.prometheus.io/) is widely used in the field of performance monitoring and K8S performance monitoring. TDengine provides a simple tool [Bailongma](https://github.com/taosdata/Bailongma), which only needs to be simply configured in Prometheus without any code, and can directly write the data collected by Prometheus into TDengine, then automatically create databases and related table entries in TDengine according to rules. Blog post [Use Docker Container to Quickly Build a Devops Monitoring Demo](https://www.taosdata.com/blog/2020/02/03/1189.html), which is an example of using bailongma to write Prometheus and Telegraf data into TDengine. ### Compile blm_prometheus From Source -Users need to download the source code of [Bailongma](https://github.com/taosdata/Bailongma) from github, then compile and generate an executable file using Golang language compiler. Before you start compiling, you need to complete following prepares: +Users need to download the source code of [Bailongma](https://github.com/taosdata/Bailongma) from github, then compile and generate an executable file using Golang language compiler. Before you start compiling, you need to prepare: - A server running Linux OS - Golang version 1.10 and higher installed -- An appropriated TDengine version. Because the client dynamic link library of TDengine is used, it is necessary to install the same version of TDengine as the server-side; for example, if the server version is TDengine 2.0. 0, ensure install the same version on the linux server where bailongma is located (can be on the same server as TDengine, or on a different server) +- Since the client dynamic link library of TDengine is used, it is necessary to install the same version of TDengine as the server-side. For example, if the server version is TDengine 2.0. 0, ensure install the same version on the linux server where bailongma is located (can be on the same server as TDengine, or on a different server) Bailongma project has a folder, blm_prometheus, which holds the prometheus writing API. The compiling process is as follows: @@ -134,7 +134,7 @@ The format of generated data by Prometheus is as follows: } ``` -Where apiserver_request_latencies_bucket is the name of the time-series data collected by prometheus, and the tag of the time-series data is in the following {}. blm_prometheus automatically creates a STable in TDengine with the name of the time series data, and converts the tag in {} into the tag value of TDengine, with Timestamp as the timestamp and value as the value of the time-series data. Therefore, in the client of TDEngine, you can check whether this data was successfully written through the following instruction. +Where apiserver_request_latencies_bucket is the name of the time-series data collected by prometheus, and the tag of the time-series data is in the following {}. blm_prometheus automatically creates a STable in TDengine with the name of the time series data, and converts the tag in {} into the tag value of TDengine, with Timestamp as the timestamp and value as the value of the time-series data. Therefore, in the client of TDengine, you can check whether this data was successfully written through the following instruction. ```mysql use prometheus; @@ -144,7 +144,7 @@ select * from apiserver_request_latencies_bucket; -## Direct Writing of Telegraf +## Data Writing via Telegraf [Telegraf](https://www.influxdata.com/time-series-platform/telegraf/) is a popular open source tool for IT operation data collection. TDengine provides a simple tool [Bailongma](https://github.com/taosdata/Bailongma), which only needs to be simply configured in Telegraf without any code, and can directly write the data collected by Telegraf into TDengine, then automatically create databases and related table entries in TDengine according to rules. Blog post [Use Docker Container to Quickly Build a Devops Monitoring Demo](https://www.taosdata.com/blog/2020/02/03/1189.html), which is an example of using bailongma to write Prometheus and Telegraf data into TDengine. @@ -271,12 +271,12 @@ select * from cpu; MQTT is a popular data transmission protocol in the IoT. TDengine can easily access the data received by MQTT Broker and write it to TDengine. -## Direct Writing of EMQ Broker +## Data Writing via EMQ Broker [EMQ](https://github.com/emqx/emqx) is an open source MQTT Broker software, with no need of coding, only to use "rules" in EMQ Dashboard for simple configuration, and MQTT data can be directly written into TDengine. EMQ X supports storing data to the TDengine by sending it to a Web service, and also provides a native TDengine driver on Enterprise Edition for direct data store. Please refer to [EMQ official documents](https://docs.emqx.io/broker/latest/cn/rule/rule-example.html#%E4%BF%9D%E5%AD%98%E6%95%B0%E6%8D%AE%E5%88%B0-tdengine) for more details. -## Direct Writing of HiveMQ Broker +## Data Writing via HiveMQ Broker -[HiveMQ](https://www.hivemq.com/) is an MQTT agent that provides Free Personal and Enterprise Edition versions. It is mainly used for enterprises, emerging machine-to-machine(M2M) communication and internal transmission to meet scalability, easy management and security features. HiveMQ provides an open source plug-in development kit. You can store data to TDengine via HiveMQ extension-TDengine. Refer to the [HiveMQ extension-TDengine documentation](https://github.com/huskar-t/hivemq-tdengine-extension/blob/b62a26ecc164a310104df57691691b237e091c89/README.md) for more details. +[HiveMQ](https://www.hivemq.com/) is an MQTT agent that provides Free Personal and Enterprise Edition versions. It is mainly used for enterprises, emerging machine-to-machine(M2M) communication and internal transmission to meet scalability, easy management and security features. HiveMQ provides an open source plug-in development kit. You can store data to TDengine via HiveMQ extension-TDengine. Refer to the [HiveMQ extension-TDengine documentation](https://github.com/huskar-t/hivemq-tdengine-extension/blob/b62a26ecc164a310104df57691691b237e091c89/README.md) for more details. \ No newline at end of file diff --git a/documentation20/en/06.queries/docs.md b/documentation20/en/06.queries/docs.md index c4f1359820a28b390e84be93e077fecb1d5ede0e..7688a941f0fb5b685f592833322906e4c4760b79 100644 --- a/documentation20/en/06.queries/docs.md +++ b/documentation20/en/06.queries/docs.md @@ -28,7 +28,7 @@ For specific query syntax, please see the [Data Query section of TAOS SQL](https ## Multi-table Aggregation Query -In an IoT scenario, there are often multiple data collection points in a same type. TDengine uses the concept of STable to describe a certain type of data collection point, and an ordinary table to describe a specific data collection point. At the same time, TDengine uses tags to describe the statical attributes of data collection points. A given data collection point has a specific tag value. By specifying the filters of tags, TDengine provides an efficient method to aggregate and query the sub-tables of STables (data collection points of a certain type). Aggregation functions and most operations on ordinary tables are applicable to STables, and the syntax is exactly the same. +In an IoT scenario, there are often multiple data collection points in a same type. TDengine uses the concept of STable to describe a certain type of data collection point, and an ordinary table to describe a specific data collection point. At the same time, TDengine uses tags to describe the static attributes of data collection points. A given data collection point has a specific tag value. By specifying the filters of tags, TDengine provides an efficient method to aggregate and query the sub-tables of STables (data collection points of a certain type). Aggregation functions and most operations on ordinary tables are applicable to STables, and the syntax is exactly the same. **Example 1**: In TAOS Shell, look up the average voltages collected by all smart meters in Beijing and group them by location @@ -55,7 +55,7 @@ TDengine only allows aggregation queries between tables belonging to a same STab ## Down Sampling Query, Interpolation -In a scenario of IoT, it is often necessary to aggregate the collected data by intervals through down sampling. TDengine provides a simple keyword interval, which makes query operations according to time windows extremely simple. For example, the current values collected by smart meter d1001 are summed every 10 seconds. +In a scenario of IoT, it is often necessary to aggregate the collected data by intervals through down sampling. TDengine provides a simple keyword `interval`, which makes query operations according to time windows extremely simple. For example, the current values collected by smart meter d1001 are summed every 10 seconds. ```mysql taos> SELECT sum(current) FROM d1001 INTERVAL(10s); @@ -94,6 +94,6 @@ taos> SELECT SUM(current) FROM meters INTERVAL(1s, 500a); Query OK, 5 row(s) in set (0.001521s) ``` -In a scenario of IoT, it is difficult to synchronize the time stamp of collected data at each point, but many analysis algorithms (such as FFT) need to align the collected data strictly at equal intervals of time. In many systems, it’s required to write their own programs to process, but the down sampling operation of TDengine can be easily solved. If there is no collected data in an interval, TDengine also provides interpolation calculation function. +In IoT scenario, it is difficult to synchronize the time stamp of collected data at each point, but many analysis algorithms (such as FFT) need to align the collected data strictly at equal intervals of time. In many systems, it’s required to write their own programs to process, but the down sampling operation of TDengine can be used to solve the problem easily. If there is no collected data in an interval, TDengine also provides interpolation calculation function. For details of syntax rules, please refer to the [Time-dimension Aggregation section of TAOS SQL](https://www.taosdata.com/en/documentation/taos-sql#aggregation). \ No newline at end of file diff --git a/documentation20/en/07.advanced-features/docs.md b/documentation20/en/07.advanced-features/docs.md index d9103c70216772a9ce24f67c719b379b106a9055..38c70862b637daf5840606535971e412d938b9e8 100644 --- a/documentation20/en/07.advanced-features/docs.md +++ b/documentation20/en/07.advanced-features/docs.md @@ -9,8 +9,8 @@ Continuous query of TDengine adopts time-driven mode, which can be defined direc The continuous query provided by TDengine differs from the time window calculation in ordinary stream computing in the following ways: - Unlike the real-time feedback calculated results of stream computing, continuous query only starts calculation after the time window is closed. For example, if the time period is 1 day, the results of that day will only be generated after 23:59:59. -- If a history record is written to the time interval that has been calculated, the continuous query will not recalculate and will not push the results to the user again. For the mode of writing back to TDengine, the existing calculated results will not be updated. -- Using the mode of continuous query pushing results, the server does not cache the client's calculation status, nor does it provide Exactly-Once semantic guarantee. If the user's application side crashed, the continuous query pulled up again would only recalculate the latest complete time window from the time pulled up again. If writeback mode is used, TDengine can ensure the validity and continuity of data writeback. +- If a history record is written to the time interval that has been calculated, the continuous query will not re-calculate and will not push the new results to the user again. +- TDengine server does not cache or save the client's status, nor does it provide Exactly-Once semantic guarantee. If the application crashes, the continuous query will be pull up again and starting time must be provided by the application. ### How to use continuous query @@ -29,7 +29,7 @@ We already know that the average voltage of these meters can be counted with one select avg(voltage) from meters interval(1m) sliding(30s); ``` -Every time this statement is executed, all data will be recalculated. If you need to execute every 30 seconds to incrementally calculate the data of the latest minute, you can improve the above statement as following, using a different `startTime` each time and executing it regularly: +Every time this statement is executed, all data will be re-calculated. If you need to execute every 30 seconds to incrementally calculate the data of the latest minute, you can improve the above statement as following, using a different `startTime` each time and executing it regularly: ```sql select avg(voltage) from meters where ts > {startTime} interval(1m) sliding(30s); @@ -65,7 +65,7 @@ It should be noted that now in the above example refers to the time when continu ### Manage the Continuous Query -Users can view all continuous queries running in the system through the show streams command in the console, and can kill the corresponding continuous queries through the kill stream command. Subsequent versions will provide more finer-grained and convenient continuous query management commands. +Users can view all continuous queries running in the system through the `show streams` command in the console, and can kill the corresponding continuous queries through the `kill stream` command. Subsequent versions will provide more finer-grained and convenient continuous query management commands. ## Publisher/Subscriber @@ -101,7 +101,7 @@ Another method is to query the STable. In this way, no matter how many meters th select * from meters where ts > {last_timestamp} and current > 10; ``` -However, how to choose `last_timestamp` has become a new problem. Because, on the one hand, the time of data generation (the data timestamp) and the time of data storage are generally not the same, and sometimes the deviation is still very large; On the other hand, the time when the data of different meters arrive at TDengine will also vary. Therefore, if we use the timestamp of the data from the slowest meter as `last_timestamp` in the query, we may repeatedly read the data of other meters; If the timestamp of the fastest meter is used, the data of other meters may be missed. +However, how to choose `last_timestamp` has become a new problem. Because, on the one hand, the time of data generation (the data timestamp) and the time of data writing are generally not the same, and sometimes the deviation is still very large; On the other hand, the time when the data of different meters arrive at TDengine will also vary. Therefore, if we use the timestamp of the data from the slowest meter as `last_timestamp` in the query, we may repeatedly read the data of other meters; If the timestamp of the fastest meter is used, the data of other meters may be missed. The subscription function of TDengine provides a thorough solution to the above problem. @@ -357,4 +357,4 @@ This SQL statement will obtain the last recorded voltage value of all smart mete In scenarios of TDengine, alarm monitoring is a common requirement. Conceptually, it requires the program to filter out data that meet certain conditions from the data of the latest period of time, and calculate a result according to a defined formula based on these data. When the result meets certain conditions and lasts for a certain period of time, it will notify the user in some form. -In order to meet the needs of users for alarm monitoring, TDengine provides this function in the form of an independent module. For its installation and use, please refer to the blog [How to Use TDengine for Alarm Monitoring](https://www.taosdata.com/blog/2020/04/14/1438.html). +In order to meet the needs of users for alarm monitoring, TDengine provides this function in the form of an independent module. For its installation and use, please refer to the blog [How to Use TDengine for Alarm Monitoring](https://www.taosdata.com/blog/2020/04/14/1438.html). \ No newline at end of file diff --git a/documentation20/en/08.connector/docs.md b/documentation20/en/08.connector/docs.md index 9cbd3952068d8eac23ffa9bcd7497ff158a21d86..fd9d129e50fa4450aed2fbebe80eddb978ef1263 100644 --- a/documentation20/en/08.connector/docs.md +++ b/documentation20/en/08.connector/docs.md @@ -66,7 +66,11 @@ Run install_client.sh to install. Edit the taos.cfg file (default path/etc/taos/taos.cfg) and change firstEP to End Point of the TDengine server, for example: [h1.taos.com](http://h1.taos.com/):6030. -**Tip: If no TDengine service deployed in this machine, but only the application driver is installed, only firstEP needs to be configured in taos.cfg, and FQDN does not.** +**Tip: ** + +**1. If no TDengine service deployed in this machine, but only the application driver is installed, only firstEP needs to be configured in taos.cfg, and FQDN does not.** + +**2. To prevent “unable to resolve FQDN” error when connecting to the server, ensure that the hosts file of the client has the correct FQDN value.** **Windows x64/x86** @@ -128,7 +132,7 @@ taos> **Windows (x64/x86) environment:** -Under cmd, enter the c:\ tdengine directory and directly execute taos.exe, and you should be able to connect to tdengine service normally and jump to taos shell interface. For example: +Under cmd, enter the c:\TDengine directory and directly execute taos.exe, and you should be able to connect to tdengine service normally and jump to taos shell interface. For example: ```mysql C:\TDengine>taos @@ -296,9 +300,7 @@ Asynchronous APIs have relatively high requirements for users, who can selective The asynchronous APIs of TDengine all use non-blocking calling mode. Applications can use multithreading to open multiple tables at the same time, and can query or insert to each open table at the same time. It should be pointed out that the **application client must ensure that the operation on the same table is completely serialized**, that is, when the insertion or query operation on the same table is not completed (when no result returned), the second insertion or query operation cannot be performed. - - ### Parameter binding API In addition to calling `taos_query` directly for queries, TDengine also provides a Prepare API that supports parameter binding. Like MySQL, these APIs currently only support using question mark `?` to represent the parameters to be bound, as follows: @@ -411,11 +413,11 @@ See [video tutorials](https://www.taosdata.com/blog/2020/11/11/1963.html) for th Users can find the connector package for python2 and python3 in the source code src/connector/python (or tar.gz/connector/python) folder. Users can install it through `pip` command: -`pip install src/connector/python/linux/python2/` +`pip install src/connector/python/` or - `pip3 install src/connector/python/linux/python3/` + `pip3 install src/connector/python/` #### Windows @@ -823,12 +825,12 @@ https://www.taosdata.com/blog/2020/11/02/1901.html The TDengine provides the GO driver taosSql. taosSql implements the GO language's built-in interface database/sql/driver. Users can access TDengine in the application by simply importing the package as follows, see https://github.com/taosdata/driver-go/blob/develop/taosSql/driver_test.go for details. -Sample code for using the Go connector can be found in https://github.com/taosdata/TDengine/tree/develop/tests/examples/go and the [video tutorial](https://www.taosdata.com/blog/2020/11/11/1951.html). +Sample code for using the Go connector can be found in https://github.com/taosdata/TDengine/tree/develop/tests/examples/go . ```Go import ( "database/sql" - _ "github.com/taosdata/driver-go/taosSql" + _ "github.com/taosdata/driver-go/v2/taosSql" ) ``` @@ -839,6 +841,8 @@ go env -w GO111MODULE=on go env -w GOPROXY=https://goproxy.io,direct ``` +`taosSql` v2 completed refactoring of the v1 version and separated the built-in database operation interface `database/sql/driver` to the directory `taosSql`, and put other advanced functions such as subscription and stmt into the directory `af`. + ### Common APIs - `sql.Open(DRIVER_NAME string, dataSourceName string) *DB` @@ -937,7 +941,7 @@ After installing the TDengine client, the nodejsChecker.js program can verify wh Steps: -1. Create a new installation verification directory, for example: ~/tdengine-test, copy the nodejsChecker.js source program on github. Download address: (https://github.com/taosdata/TDengine/tree/develop/tests/examples/nodejs/nodejsChecker.js). +1. Create a new installation verification directory, for example: `~/tdengine-test`, copy the nodejsChecker.js source program on github. Download address: (https://github.com/taosdata/TDengine/tree/develop/tests/examples/nodejs/nodejsChecker.js). 2. Execute the following command: diff --git a/documentation20/en/09.connections/docs.md b/documentation20/en/09.connections/docs.md index b693d228cff808661caa4cc22afdc2721709bdc2..19544af0fa50af258f975532ad8399fcb8588b42 100644 --- a/documentation20/en/09.connections/docs.md +++ b/documentation20/en/09.connections/docs.md @@ -2,7 +2,7 @@ ## Grafana -TDengine can quickly integrate with [Grafana](https://www.grafana.com/), an open source data visualization system, to build a data monitoring and alarming system. The whole process does not require any code to write. The contents of the data table in TDengine can be visually showed on DashBoard. +TDengine can be quickly integrated with [Grafana](https://www.grafana.com/), an open source data visualization system, to build a data monitoring and alarming system. The whole process does not require any code to write. The contents of the data table in TDengine can be visually showed on DashBoard. ### Install Grafana diff --git a/documentation20/en/10.cluster/docs.md b/documentation20/en/10.cluster/docs.md index 05d0a463aa1bcf5bf56e9ca4032be9ddbaa59b4d..864bc46200767468561ff940f3ac271d558c833c 100644 --- a/documentation20/en/10.cluster/docs.md +++ b/documentation20/en/10.cluster/docs.md @@ -1,8 +1,8 @@ # TDengine Cluster Management -Multiple TDengine servers, that is, multiple running instances of taosd, can form a cluster to ensure the highly reliable operation of TDengine and provide scale-out features. To understand cluster management in TDengine 2.0, it is necessary to understand the basic concepts of clustering. Please refer to the chapter "Overall Architecture of TDengine 2.0". And before installing the cluster, please follow the chapter ["Getting started"](https://www.taosdata.com/en/documentation/getting-started/) to install and experience the single node function. +Multiple TDengine servers, that is, multiple running instances of taosd, can form a cluster to ensure the highly reliable operation of TDengine and provide scale-out features. To understand cluster management in TDengine 2.0, it is necessary to understand the basic concepts of clustering. Please refer to the chapter "Overall Architecture of TDengine 2.0". And before installing the cluster, please follow the chapter ["Getting started"](https://www.taosdata.com/en/documentation/getting-started/) to install and experience the single node TDengine. -Each data node of the cluster is uniquely identified by End Point, which is composed of FQDN (Fully Qualified Domain Name) plus Port, such as [h1.taosdata.com](http://h1.taosdata.com/):6030. The general FQDN is the hostname of the server, which can be obtained through the Linux command `hostname -f` (how to configure FQDN, please refer to: [All about FQDN of TDengine](https://www.taosdata.com/blog/2020/09/11/1824.html)). Port is the external service port number of this data node. The default is 6030, but it can be modified by configuring the parameter serverPort in taos.cfg. A physical node may be configured with multiple hostnames, and TDengine will automatically get the first one, but it can also be specified through the configuration parameter fqdn in taos.cfg. If you are accustomed to direct IP address access, you can set the parameter fqdn to the IP address of this node. +Each data node of the cluster is uniquely identified by End Point, which is composed of FQDN (Fully Qualified Domain Name) plus Port, such as [h1.taosdata.com](http://h1.taosdata.com/):6030. The general FQDN is the hostname of the server, which can be obtained through the Linux command `hostname -f` (how to configure FQDN, please refer to: [All about FQDN of TDengine](https://www.taosdata.com/blog/2020/09/11/1824.html)). Port is the external service port number of this data node. The default is 6030, but it can be modified by configuring the parameter serverPort in taos.cfg. A physical node may be configured with multiple hostnames, and TDengine will automatically get the first one, but it can also be specified through the configuration parameter `fqdn` in taos.cfg. If you want to access via direct IP address, you can set the parameter `fqdn` to the IP address of this node. The cluster management of TDengine is extremely simple. Except for manual intervention in adding and deleting nodes, all other tasks are completed automatically, thus minimizing the workload of operation. This chapter describes the operations of cluster management in detail. @@ -12,7 +12,7 @@ Please refer to the [video tutorial](https://www.taosdata.com/blog/2020/11/11/19 **Step 0:** Plan FQDN of all physical nodes in the cluster, and add the planned FQDN to /etc/hostname of each physical node respectively; modify the /etc/hosts of each physical node, and add the corresponding IP and FQDN of all cluster physical nodes. [If DNS is deployed, contact your network administrator to configure it on DNS] -**Step 1:** If the physical nodes have previous test data, installed with version 1. x, or installed with other versions of TDengine, please delete it first and drop all data. For specific steps, please refer to the blog "[Installation and Uninstallation of Various Packages of TDengine](https://www.taosdata.com/blog/2019/08/09/566.html)" +**Step 1:** If the physical nodes have previous test data, installed with version 1. x, or installed with other versions of TDengine, please backup all data, then delete it and drop all data. For specific steps, please refer to the blog "[Installation and Uninstallation of Various Packages of TDengine](https://www.taosdata.com/blog/2019/08/09/566.html)" **Note 1:** Because the information of FQDN will be written into a file, if FQDN has not been configured or changed before, and TDengine has been started, be sure to clean up the previous data (`rm -rf /var/lib/taos/*`)on the premise of ensuring that the data is useless or backed up; @@ -136,7 +136,7 @@ Execute the CLI program taos, log in to the TDengine system using the root accou DROP DNODE "fqdn:port"; ``` -Where fqdn is the FQDN of the deleted node, and port is the port number of its external server. +Where fqdn is the FQDN of the deleted node, and port is the port number. **【Note】** @@ -185,7 +185,7 @@ Because of the introduction of vnode, it is impossible to simply draw a conclusi TDengine cluster is managed by mnode (a module of taosd, management node). In order to ensure the high-availability of mnode, multiple mnode replicas can be configured. The number of replicas is determined by system configuration parameter numOfMnodes, and the effective range is 1-3. In order to ensure the strong consistency of metadata, mnode replicas are duplicated synchronously. -A cluster has multiple data node dnodes, but a dnode runs at most one mnode instance. In the case of multiple dnodes, which dnode can be used as an mnode? This is automatically specified by the system according to the resource situation on the whole. User can execute the following command in the console of TDengine through the CLI program taos: +A cluster has multiple data node dnodes, but a dnode runs at most one mnode instance. In the case of multiple dnodes, which dnode can be used as an mnode? This is automatically selected by the system based on the resource on the whole. User can execute the following command in the console of TDengine through the CLI program taos: ``` SHOW MNODES; @@ -213,7 +213,7 @@ When the above three situations occur, the system will start a load computing of If a data node is offline, the TDengine cluster will automatically detect it. There are two detailed situations: -- If the data node is offline for more than a certain period of time (configuration parameter offlineThreshold in taos.cfg controls the duration), the system will automatically delete the data node, generate system alarm information and trigger the load balancing process. If the deleted data node is online again, it will not be able to join the cluster, and the system administrator will need to add it to the cluster again. +- If the data node is offline for more than a certain period of time (configuration parameter `offlineThreshold` in taos.cfg controls the duration), the system will automatically delete the data node, generate system alarm information and trigger the load balancing process. If the deleted data node is online again, it will not be able to join the cluster, and the system administrator will need to add it to the cluster again. - After offline, the system will automatically start the data recovery process if it goes online again within the duration of offlineThreshold. After the data is fully recovered, the node will start to work normally. **Note:** If each data node belonging to a virtual node group (including mnode group) is in offline or unsynced state, Master can only be elected after all data nodes in the virtual node group are online and can exchange status information, and the virtual node group can serve externally. For example, the whole cluster has 3 data nodes with 3 replicas. If all 3 data nodes go down and then 2 data nodes restart, it will not work. Only when all 3 data nodes restart successfully can serve externally again. @@ -229,7 +229,7 @@ The name of the executable for Arbitrator is tarbitrator. The executable has alm 1. Click [Package Download](https://www.taosdata.com/cn/all-downloads/), and in the TDengine Arbitrator Linux section, select the appropriate version to download and install. -2. The command line parameter -p of this application can specify the port number of its external service, and the default is 6042. +2. The command line parameter -p of this application can specify the port number of its service, and the default is 6042. 3. Modify the configuration file of each taosd instance, and set parameter arbitrator to the End Point corresponding to the tarbitrator in taos.cfg. (If this parameter is configured, when the number of replicas is even, the system will automatically connect the configured Arbitrator. If the number of replicas is odd, even if the Arbitrator is configured, the system will not establish a connection.) 4. The Arbitrator configured in the configuration file will appear in the return result of instruction `SHOW DNODES`; the value of the corresponding role column will be "arb". diff --git a/documentation20/en/11.administrator/docs.md b/documentation20/en/11.administrator/docs.md index 3817a41766d515d663661fd4382c883e0d8f179b..a2c2486b8e96cab95fad0f90470726d508dd63f7 100644 --- a/documentation20/en/11.administrator/docs.md +++ b/documentation20/en/11.administrator/docs.md @@ -22,8 +22,8 @@ If there is plenty of memory, the configuration of Blocks can be increased so th CPU requirements depend on the following two aspects: -- **Data insertion** TDengine single core can handle at least 10,000 insertion requests per second. Each insertion request can take multiple records, and inserting one record at a time is almost the same as inserting 10 records in computing resources consuming. Therefore, the larger the number of inserts, the higher the insertion efficiency. If an insert request has more than 200 records, a single core can insert 1 million records per second. However, the faster the insertion speed, the higher the requirement for front-end data collection, because records need to be cached and then inserted in batches. -- **Query requirements** TDengine to provide efficient queries, but the queries in each scenario vary greatly and the query frequency too, making it difficult to give objective figures. Users need to write some query statements for their own scenes to determine. +- **Data insertion**: TDengine single core can handle at least 10,000 insertion requests per second. Each insertion request can take multiple records, and inserting one record at a time is almost the same as inserting 10 records in computing resources consuming. Therefore, the larger the number of records per insert, the higher the insertion efficiency. If an insert request has more than 200 records, a single core can insert 1 million records per second. However, the faster the insertion speed, the higher the requirement for front-end data collection, because records need to be cached and then inserted in batches. +- **Query**: TDengine provides efficient queries, but the queries in each scenario vary greatly and the query frequency too, making it difficult to give objective figures. Users need to write some query statements for their own scenes to estimate. Therefore, only for data insertion, CPU can be estimated, but the computing resources consumed by query cannot be that clear. In the actual operation, it is not recommended to make CPU utilization rate over 50%. After that, new nodes need to be added to bring more computing resources. @@ -78,7 +78,7 @@ When the nodes in TDengine cluster are deployed on different physical machines a ## Server-side Configuration -The background service of TDengine system is provided by taosd, and the configuration parameters can be modified in the configuration file taos.cfg to meet the requirements of different scenarios. The default location of the configuration file is the /etc/taos directory, which can be specified by executing the parameter -c from the taosd command line. Such as taosd-c/home/user, to specify that the configuration file is located in the /home/user directory. +The background service of TDengine system is provided by taosd, and the configuration parameters can be modified in the configuration file taos.cfg to meet the requirements of different scenarios. The default location of the configuration file is the /etc/taos directory, which can be specified by executing the parameter `-c` from the taosd command line. Such as `taosd -c /home/user`, to specify that the configuration file is located in the /home/user directory. You can also use “-C” to show the current server configuration parameters: @@ -88,14 +88,14 @@ taosd -C Only some important configuration parameters are listed below. For more parameters, please refer to the instructions in the configuration file. Please refer to the previous chapters for detailed introduction and function of each parameter, and the default of these parameters is working and generally does not need to be set. **Note: After the configuration is modified, \*taosd service\* needs to be restarted to take effect.** -- firstEp: end point of the first dnode in the actively connected cluster when taosd starts, the default value is localhost: 6030. -- fqdn: FQDN of the data node, which defaults to the first hostname configured by the operating system. If you are accustomed to IP address access, you can set it to the IP address of the node. +- firstEp: end point of the first dnode which will be connected in the cluster when taosd starts, the default value is localhost: 6030. +- fqdn: FQDN of the data node, which defaults to the first hostname configured by the operating system. If you want to access via IP address directly, you can set it to the IP address of the node. - serverPort: the port number of the external service after taosd started, the default value is 6030. - httpPort: the port number used by the RESTful service to which all HTTP requests (TCP) require a query/write request. The default value is 6041. - dataDir: the data file directory to which all data files will be written. [Default:/var/lib/taos](http://default/var/lib/taos). - logDir: the log file directory to which the running log files of the client and server will be written. [Default:/var/log/taos](http://default/var/log/taos). -- arbitrator: the end point of the arbiter in the system; the default value is null. -- role: optional role for dnode. 0-any; it can be used as an mnode and to allocate vnodes; 1-mgmt; It can only be an mnode, but not to allocate vnodes; 2-dnode; caannot be an mnode, only vnode can be allocated +- arbitrator: the end point of the arbitrator in the system; the default value is null. +- role: optional role for dnode. 0-any; it can be used as an mnode and to allocate vnodes; 1-mgmt; It can only be an mnode, but not to allocate vnodes; 2-dnode; cannot be an mnode, only vnode can be allocated - debugFlage: run the log switch. 131 (output error and warning logs), 135 (output error, warning, and debug logs), 143 (output error, warning, debug, and trace logs). Default value: 131 or 135 (different modules have different default values). - numOfLogLines: the maximum number of lines allowed for a single log file. Default: 10,000,000 lines. - logKeepDays: the maximum retention time of the log file. When it is greater than 0, the log file will be renamed to taosdlog.xxx, where xxx is the timestamp of the last modification of the log file in seconds. Default: 0 days. @@ -161,18 +161,18 @@ For example: ## Client Configuration -The foreground interactive client application of TDengine system is taos and application driver, which shares the same configuration file taos.cfg with taosd. When running taos, use the parameter -c to specify the configuration file directory, such as taos-c/home/cfg, which means using the parameters in the taos.cfg configuration file under the /home/cfg/ directory. The default directory is /etc/taos. For more information on how to use taos, see the help information taos --help. This section mainly describes the parameters used by the taos client application in the configuration file taos.cfg. +The foreground interactive client application of TDengine system is taos and application driver, which shares the same configuration file taos.cfg with taosd. When running taos, use the parameter `-c` to specify the configuration file directory, such as `taos -c /home/cfg`, which means using the parameters in the taos.cfg configuration file under the /home/cfg/ directory. The default directory is /etc/taos. For more information on how to use taos, see the help information `taos --help`. This section mainly describes the parameters used by the taos client application in the configuration file taos.cfg. **Versions after 2.0. 10.0 support the following parameters on command line to display the current client configuration parameters** ```bash -taos -C 或 taos --dump-config +taos -C or taos --dump-config ``` Client configuration parameters: - firstEp: end point of the first taosd instance in the actively connected cluster when taos is started, the default value is localhost: 6030. -- secondEp: when taos starts, if not impossible to connect to firstEp, it will try to connect to secondEp. +- secondEp: when taos starts, if unable to connect to firstEp, it will try to connect to secondEp. - locale Default value: obtained dynamically from the system. If the automatic acquisition fails, user needs to set it in the configuration file or through API @@ -493,4 +493,4 @@ At the moment, TDengine has nearly 200 internal reserved keywords, which cannot | CONCAT | GLOB | METRICS | SET | VIEW | | CONFIGS | GRANTS | MIN | SHOW | WAVG | | CONFLICT | GROUP | MINUS | SLASH | WHERE | -| CONNECTION | | | | | +| CONNECTION | | | | | \ No newline at end of file diff --git a/documentation20/en/12.taos-sql/docs.md b/documentation20/en/12.taos-sql/docs.md index dfa1742c999adbf4a3e7846955dc8a564339d0c2..7aaeb6c32b25cef8f0d1bf2f67ef94c3a2a007ee 100644 --- a/documentation20/en/12.taos-sql/docs.md +++ b/documentation20/en/12.taos-sql/docs.md @@ -1,8 +1,8 @@ # TAOS SQL -TDengine provides a SQL-style language, TAOS SQL, to insert or query data, and support other common tips. To finish this document, you should have some understanding about SQL. +TDengine provides a SQL-style language, TAOS SQL, to insert or query data. This document introduces TAOS SQL and supports other common tips. To read through this document, readers should have basic understanding about SQL. -TAOS SQL is the main tool for users to write and query data to TDengine. TAOS SQL provides a style and mode similar to standard SQL to facilitate users to get started quickly. Strictly speaking, TAOS SQL is not and does not attempt to provide SQL standard syntax. In addition, since TDengine does not provide deletion function for temporal structured data, the relevant function of data deletion is non-existent in TAO SQL. +TAOS SQL is the main tool for users to write and query data into/from TDengine. TAOS SQL provides a syntax style similar to standard SQL to facilitate users to get started quickly. Strictly speaking, TAOS SQL is not and does not attempt to provide SQL standard syntax. In addition, since TDengine does not provide deletion functionality for time-series data, the relevant functions of data deletion is unsupported in TAO SQL. Let’s take a look at the conventions used for syntax descriptions. @@ -37,7 +37,7 @@ With TDengine, the most important thing is timestamp. When creating and insertin - Epch Time: a timestamp value can also be a long integer representing milliseconds since 1970-01-01 08:00:00.000. - Arithmetic operations can be applied to timestamp. For example: now-2h represents a timestamp which is 2 hours ago from the current server time. Units include u( microsecond), a (milliseconds), s (seconds), m (minutes), h (hours), d (days), w (weeks). In `select * from t1 where ts > now-2w and ts <= now-1w`, which queries data of the whole week before two weeks. To specify the interval of down sampling, you can also use n(calendar month) and y(calendar year) as time units. -Default time precision of TDengine is millisecond, you can change it to microseocnd by setting parameter enableMicrosecond. +TDengine's timestamp is set to millisecond accuracy by default. Microsecond/nanosecond accuracy can be set using CREATE DATABASE with PRECISION parameter. (Nanosecond resolution is supported from version 2.1.5.0 onwards.) In TDengine, the following 10 data types can be used in data model of an ordinary table. @@ -127,7 +127,7 @@ Note: ALTER DATABASE db_name CACHELAST 0; ``` CACHELAST parameter controls whether last_row of the data subtable is cached in memory. The default value is 0, and the value range is [0, 1]. Where 0 means not enabled and 1 means enabled. (supported from version 2.0. 11) - + **Tips**: After all the above parameters are modified, show databases can be used to confirm whether the modification is successful. - **Show all databases in system** @@ -138,14 +138,17 @@ Note: ## Table Management -- Create a table -Note: +- **Create a table** -1. The first field must be a timestamp, and system will set it as the primary key; -2. The max length of table name is 192; -3. The length of each row of the table cannot exceed 16k characters; -4. Sub-table names can only consist of letters, numbers, and underscores, and cannot begin with numbers -5. If the data type binary or nchar is used, the maximum number of bytes should be specified, such as binary (20), which means 20 bytes; + ```mysql + CREATE TABLE [IF NOT EXISTS] tb_name (timestamp_field_name TIMESTAMP, field1_name data_type1 [, field2_name data_type2 ...]); + ``` + Note: + 1. The first field must be a timestamp, and system will set it as the primary key; + 2. The max length of table name is 192; + 3. The length of each row of the table cannot exceed 16k characters; + 4. Sub-table names can only consist of letters, numbers, and underscores, and cannot begin with numbers + 5. If the data type binary or nchar is used, the maximum number of bytes should be specified, such as binary (20), which means 20 bytes; - **Create a table via STable** @@ -171,10 +174,10 @@ Note: Note: 1. The method of batch creating tables requires that the data table must use STable as a template. - 2. On the premise of not exceeding the length limit of SQL statements, it is suggested that the number of tables in a single statement should be controlled between 1000 and 3000, which will obtain an ideal speed of table building. + 2. On the premise of not exceeding the length limit of SQL statements, it is suggested that the number of tables in a single statement should be controlled between 1000 and 3000, which will obtain an ideal speed of table creating. - **Drop a table** - + ```mysql DROP TABLE [IF EXISTS] tb_name; ``` @@ -218,7 +221,7 @@ Note: ## STable Management -Note: In 2.0. 15.0 and later versions, STABLE reserved words are supported. That is, in the instruction description later in this section, the three instructions of CREATE, DROP and ALTER need to write TABLE instead of STABLE in the old version as the reserved word. +Note: In 2.0.15.0 and later versions, STABLE reserved words are supported. That is, in the instruction description later in this section, the three instructions of CREATE, DROP and ALTER need to write TABLE instead of STABLE in the old version as the reserved word. - **Create a STable** @@ -290,7 +293,7 @@ Note: In 2.0. 15.0 and later versions, STABLE reserved words are supported. That Modify a tag name of STable. After modifying, all sub-tables under the STable will automatically update the new tag name. - **Modify a tag value of sub-table** - + ```mysql ALTER TABLE tb_name SET TAG tag_name=new_tag_value; ``` @@ -306,7 +309,7 @@ Note: In 2.0. 15.0 and later versions, STABLE reserved words are supported. That Insert a record into table tb_name. - **Insert a record with data corresponding to a given column** - + ```mysql INSERT INTO tb_name (field1_name, ...) VALUES (field1_value1, ...); ``` @@ -320,14 +323,14 @@ Note: In 2.0. 15.0 and later versions, STABLE reserved words are supported. That Insert multiple records into table tb_name. - **Insert multiple records into a given column** - + ```mysql INSERT INTO tb_name (field1_name, ...) VALUES (field1_value1, ...) (field1_value2, ...) ...; ``` Insert multiple records into a given column of table tb_name. - **Insert multiple records into multiple tables** - + ```mysql INSERT INTO tb1_name VALUES (field1_value1, ...) (field1_value2, ...) ... tb2_name VALUES (field1_value1, ...) (field1_value2, ...) ...; @@ -421,7 +424,7 @@ taos> SELECT * FROM d1001; Query OK, 3 row(s) in set (0.001165s) ``` -For Stables, wildcards contain *tag columns*. +For STables, wildcards contain *tag columns*. ```mysql taos> SELECT * FROM meters; @@ -720,7 +723,7 @@ TDengine supports aggregations over data, they are listed below: ================================================ 9 | 9 | Query OK, 1 row(s) in set (0.004475s) - + taos> SELECT COUNT(*), COUNT(voltage) FROM d1001; count(*) | count(voltage) | ================================================ @@ -758,7 +761,7 @@ TDengine supports aggregations over data, they are listed below: ``` - **TWA** - + ```mysql SELECT TWA(field_name) FROM tb_name WHERE clause; ``` @@ -799,7 +802,7 @@ TDengine supports aggregations over data, they are listed below: ================================================================================ 35.200000763 | 658 | 0.950000018 | Query OK, 1 row(s) in set (0.000980s) - ``` + ``` - **STDDEV** @@ -896,7 +899,7 @@ TDengine supports aggregations over data, they are listed below: ====================================== 13.40000 | 223 | Query OK, 1 row(s) in set (0.001123s) - + taos> SELECT MAX(current), MAX(voltage) FROM d1001; max(current) | max(voltage) | ====================================== @@ -937,8 +940,6 @@ TDengine supports aggregations over data, they are listed below: Query OK, 1 row(s) in set (0.001023s) ``` -- - - **LAST** ```mysql @@ -972,7 +973,7 @@ TDengine supports aggregations over data, they are listed below: ``` - **TOP** - + ```mysql SELECT TOP(field_name, K) FROM { tb_name | stb_name } [WHERE clause]; ``` @@ -1029,7 +1030,7 @@ TDengine supports aggregations over data, they are listed below: 2018-10-03 14:38:15.000 | 218 | 2018-10-03 14:38:16.650 | 218 | Query OK, 2 row(s) in set (0.001332s) - + taos> SELECT BOTTOM(current, 2) FROM d1001; ts | bottom(current, 2) | ================================================= @@ -1092,7 +1093,7 @@ TDengine supports aggregations over data, they are listed below: ======================= 12.30000 | Query OK, 1 row(s) in set (0.001238s) - + taos> SELECT LAST_ROW(current) FROM d1002; last_row(current) | ======================= @@ -1146,7 +1147,7 @@ TDengine supports aggregations over data, they are listed below: ============================ 5.000000000 | Query OK, 1 row(s) in set (0.001792s) - + taos> SELECT SPREAD(voltage) FROM d1001; spread(voltage) | ============================ @@ -1172,7 +1173,7 @@ TDengine supports aggregations over data, they are listed below: ## Time-dimension Aggregation -TDengine supports aggregating by intervals. Data in a table can partitioned by intervals and aggregated to generate results. For example, a temperature sensor collects data once per second, but the average temperature needs to be queried every 10 minutes. This aggregation is suitable for down sample operation, and the syntax is as follows: +TDengine supports aggregating by intervals (time range). Data in a table can partitioned by intervals and aggregated to generate results. For example, a temperature sensor collects data once per second, but the average temperature needs to be queried every 10 minutes. This aggregation is suitable for down sample operation, and the syntax is as follows: ```mysql SELECT function_list FROM tb_name @@ -1235,11 +1236,11 @@ SELECT AVG(current), MAX(current), LEASTSQUARES(current, start_val, step_val), P **Restrictions on group by** -TAOS SQL supports group by operation on tags, tbnames and ordinary columns, required that only one column and whichhas less than 100,000 unique values. +TAOS SQL supports group by operation on tags, tbnames and ordinary columns, required that only one column and which has less than 100,000 unique values. **Restrictions on join operation** -TAOS SQL supports join columns of two tables by Primary Key timestamp between them, and does not support four operations after tables aggregated for the time being. +TAOS SQL supports join columns of two tables by Primary Key timestamp between them, and does not support four arithmetic operations after tables aggregated for the time being. **Availability of is no null** diff --git a/packaging/tools/install.sh b/packaging/tools/install.sh index e116d72d2649940f9d272b8d3d01e34576a4049d..9c6a6e62f5b5fda1cfbaf1b5fff9593a5e349271 100755 --- a/packaging/tools/install.sh +++ b/packaging/tools/install.sh @@ -102,6 +102,12 @@ elif echo $osinfo | grep -qwi "centos" ; then elif echo $osinfo | grep -qwi "fedora" ; then # echo "This is fedora system" os_type=2 +elif echo $osinfo | grep -qwi "Linx" ; then +# echo "This is Linx system" + os_type=1 + service_mod=0 + initd_mod=0 + service_config_dir="/etc/systemd/system" else echo " osinfo: ${osinfo}" echo " This is an officially unverified linux system," diff --git a/packaging/tools/install_client.sh b/packaging/tools/install_client.sh index aa09013e538253b8740a0aaf70d04358320a6dd8..3df7013b197baaf4d78bb0f0ae5d507d6be92715 100755 --- a/packaging/tools/install_client.sh +++ b/packaging/tools/install_client.sh @@ -128,8 +128,12 @@ function install_lib() { ${csudo} ln -s ${install_main_dir}/driver/libtaos.* ${lib_link_dir}/libtaos.1.dylib ${csudo} ln -s ${lib_link_dir}/libtaos.1.dylib ${lib_link_dir}/libtaos.dylib fi - - ${csudo} ldconfig + + if [ "$osType" != "Darwin" ]; then + ${csudo} ldconfig + else + ${csudo} update_dyld_shared_cache + fi } function install_header() { diff --git a/packaging/tools/make_install.sh b/packaging/tools/make_install.sh index 0849a76e31a631bae7a6ba0d6ef7ffcb58b8480b..d400d0b91a2d02e9b3e0232d67e2ed6b00cdf541 100755 --- a/packaging/tools/make_install.sh +++ b/packaging/tools/make_install.sh @@ -20,44 +20,33 @@ fi # Dynamic directory - if [ "$osType" != "Darwin" ]; then data_dir="/var/lib/taos" log_dir="/var/log/taos" -else - data_dir="/usr/local/var/lib/taos" - log_dir="/usr/local/var/log/taos" -fi -if [ "$osType" != "Darwin" ]; then cfg_install_dir="/etc/taos" -else - cfg_install_dir="/usr/local/etc/taos" -fi -if [ "$osType" != "Darwin" ]; then bin_link_dir="/usr/bin" lib_link_dir="/usr/lib" lib64_link_dir="/usr/lib64" inc_link_dir="/usr/include" + + install_main_dir="/usr/local/taos" + + bin_dir="/usr/local/taos/bin" else + data_dir="/usr/local/var/lib/taos" + log_dir="/usr/local/var/log/taos" + + cfg_install_dir="/usr/local/etc/taos" + bin_link_dir="/usr/local/bin" lib_link_dir="/usr/local/lib" inc_link_dir="/usr/local/include" -fi -#install main path -if [ "$osType" != "Darwin" ]; then - install_main_dir="/usr/local/taos" -else install_main_dir="/usr/local/Cellar/tdengine/${verNumber}" -fi -# old bin dir -if [ "$osType" != "Darwin" ]; then - bin_dir="/usr/local/taos/bin" -else - bin_dir="/usr/local/Cellar/tdengine/${verNumber}/bin" + bin_dir="/usr/local/Cellar/tdengine/${verNumber}/bin" fi service_config_dir="/etc/systemd/system" @@ -254,7 +243,10 @@ function install_lib() { ${csudo} ln -sf ${lib64_link_dir}/libtaos.so.1 ${lib64_link_dir}/libtaos.so fi else - ${csudo} cp -Rf ${binary_dir}/build/lib/libtaos.* ${install_main_dir}/driver && ${csudo} chmod 777 ${install_main_dir}/driver/* + ${csudo} cp -Rf ${binary_dir}/build/lib/libtaos.${verNumber}.dylib ${install_main_dir}/driver && ${csudo} chmod 777 ${install_main_dir}/driver/* + + ${csudo} ln -sf ${install_main_dir}/driver/libtaos.* ${lib_link_dir}/libtaos.1.dylib + ${csudo} ln -sf ${lib_link_dir}/libtaos.1.dylib ${lib_link_dir}/libtaos.dylib fi install_jemalloc diff --git a/src/client/CMakeLists.txt b/src/client/CMakeLists.txt index 0d06e5d39c0ed1916e0c2af7ccce5918e31ac42f..df0cf15fe00eda5bc8e6004e2794733537b8aee0 100644 --- a/src/client/CMakeLists.txt +++ b/src/client/CMakeLists.txt @@ -13,13 +13,13 @@ IF (TD_LINUX) # set the static lib name ADD_LIBRARY(taos_static STATIC ${SRC}) - TARGET_LINK_LIBRARIES(taos_static common query trpc tutil pthread m rt ${VAR_TSZ}) + TARGET_LINK_LIBRARIES(taos_static common query trpc tutil pthread m rt cJson ${VAR_TSZ}) SET_TARGET_PROPERTIES(taos_static PROPERTIES OUTPUT_NAME "taos_static") SET_TARGET_PROPERTIES(taos_static PROPERTIES CLEAN_DIRECT_OUTPUT 1) # generate dynamic library (*.so) ADD_LIBRARY(taos SHARED ${SRC}) - TARGET_LINK_LIBRARIES(taos common query trpc tutil pthread m rt) + TARGET_LINK_LIBRARIES(taos common query trpc tutil pthread m rt cJson) IF (TD_LINUX_64) TARGET_LINK_LIBRARIES(taos lua) ENDIF () @@ -39,13 +39,13 @@ ELSEIF (TD_DARWIN) # set the static lib name ADD_LIBRARY(taos_static STATIC ${SRC}) - TARGET_LINK_LIBRARIES(taos_static common query trpc tutil pthread m lua) + TARGET_LINK_LIBRARIES(taos_static common query trpc tutil pthread m lua cJson) SET_TARGET_PROPERTIES(taos_static PROPERTIES OUTPUT_NAME "taos_static") SET_TARGET_PROPERTIES(taos_static PROPERTIES CLEAN_DIRECT_OUTPUT 1) # generate dynamic library (*.dylib) ADD_LIBRARY(taos SHARED ${SRC}) - TARGET_LINK_LIBRARIES(taos common query trpc tutil pthread m lua) + TARGET_LINK_LIBRARIES(taos common query trpc tutil pthread m lua cJson) SET_TARGET_PROPERTIES(taos PROPERTIES CLEAN_DIRECT_OUTPUT 1) #set version of .dylib @@ -63,26 +63,26 @@ ELSEIF (TD_WINDOWS) CONFIGURE_FILE("${TD_COMMUNITY_DIR}/src/client/src/taos.rc.in" "${TD_COMMUNITY_DIR}/src/client/src/taos.rc") ADD_LIBRARY(taos_static STATIC ${SRC}) - TARGET_LINK_LIBRARIES(taos_static trpc tutil query) + TARGET_LINK_LIBRARIES(taos_static trpc tutil query cJson) # generate dynamic library (*.dll) ADD_LIBRARY(taos SHARED ${SRC} ${TD_COMMUNITY_DIR}/src/client/src/taos.rc) IF (NOT TD_GODLL) SET_TARGET_PROPERTIES(taos PROPERTIES LINK_FLAGS /DEF:${TD_COMMUNITY_DIR}/src/client/src/taos.def) ENDIF () - TARGET_LINK_LIBRARIES(taos trpc tutil query lua) + TARGET_LINK_LIBRARIES(taos trpc tutil query lua cJson) ELSEIF (TD_DARWIN) SET(CMAKE_MACOSX_RPATH 1) INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/deps/jni/linux) ADD_LIBRARY(taos_static STATIC ${SRC}) - TARGET_LINK_LIBRARIES(taos_static query trpc tutil pthread m lua) + TARGET_LINK_LIBRARIES(taos_static query trpc tutil pthread m lua cJson) SET_TARGET_PROPERTIES(taos_static PROPERTIES OUTPUT_NAME "taos_static") # generate dynamic library (*.dylib) ADD_LIBRARY(taos SHARED ${SRC}) - TARGET_LINK_LIBRARIES(taos query trpc tutil pthread m lua) + TARGET_LINK_LIBRARIES(taos query trpc tutil pthread m lua cJson) SET_TARGET_PROPERTIES(taos PROPERTIES CLEAN_DIRECT_OUTPUT 1) diff --git a/src/client/inc/tscParseLine.h b/src/client/inc/tscParseLine.h new file mode 100644 index 0000000000000000000000000000000000000000..401dcafdfbefd28e79ebdf30d810e194564a5056 --- /dev/null +++ b/src/client/inc/tscParseLine.h @@ -0,0 +1,70 @@ +/* + * Copyright (c) 2021 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#ifndef TDENGINE_TSCPARSELINE_H +#define TDENGINE_TSCPARSELINE_H + +#ifdef __cplusplus +extern "C" { +#endif + +typedef struct { + char* key; + uint8_t type; + int16_t length; + char* value; +} TAOS_SML_KV; + +typedef struct { + char* stableName; + + char* childTableName; + TAOS_SML_KV* tags; + int32_t tagNum; + + // first kv must be timestamp + TAOS_SML_KV* fields; + int32_t fieldNum; +} TAOS_SML_DATA_POINT; + +typedef enum { + SML_TIME_STAMP_NOW, + SML_TIME_STAMP_SECONDS, + SML_TIME_STAMP_MILLI_SECONDS, + SML_TIME_STAMP_MICRO_SECONDS, + SML_TIME_STAMP_NANO_SECONDS +} SMLTimeStampType; + +typedef struct { + uint64_t id; + SHashObj* smlDataToSchema; +} SSmlLinesInfo; + +int tscSmlInsert(TAOS* taos, TAOS_SML_DATA_POINT* points, int numPoint, SSmlLinesInfo* info); +bool checkDuplicateKey(char *key, SHashObj *pHash, SSmlLinesInfo* info); +int32_t isValidChildTableName(const char *pTbName, int16_t len); + +bool convertSmlValueType(TAOS_SML_KV *pVal, char *value, + uint16_t len, SSmlLinesInfo* info); +int32_t convertSmlTimeStamp(TAOS_SML_KV *pVal, char *value, + uint16_t len, SSmlLinesInfo* info); + +void destroySmlDataPoint(TAOS_SML_DATA_POINT* point); + +#ifdef __cplusplus +} +#endif + +#endif // TDENGINE_TSCPARSELINE_H diff --git a/src/client/inc/tscUtil.h b/src/client/inc/tscUtil.h index c59ec3e624ec84a76ae805d969a6b319667fb1af..c858bd5867c64da4c7397aed2035119ff414d112 100644 --- a/src/client/inc/tscUtil.h +++ b/src/client/inc/tscUtil.h @@ -92,7 +92,7 @@ typedef struct SMergeTsCtx { }SMergeTsCtx; typedef struct SVgroupTableInfo { - SVgroupInfo vgInfo; + SVgroupMsg vgInfo; SArray *itemList; // SArray } SVgroupTableInfo; @@ -174,7 +174,9 @@ void tscClearInterpInfo(SQueryInfo* pQueryInfo); bool tscIsInsertData(char* sqlstr); -int tscAllocPayload(SSqlCmd* pCmd, int size); +// the memory is not reset in case of fast allocate payload function +int32_t tscAllocPayloadFast(SSqlCmd *pCmd, size_t size); +int32_t tscAllocPayload(SSqlCmd* pCmd, int size); TAOS_FIELD tscCreateField(int8_t type, const char* name, int16_t bytes); @@ -288,7 +290,11 @@ void doExecuteQuery(SSqlObj* pSql, SQueryInfo* pQueryInfo); SVgroupsInfo* tscVgroupInfoClone(SVgroupsInfo *pInfo); void* tscVgroupInfoClear(SVgroupsInfo *pInfo); + +#if 0 void tscSVgroupInfoCopy(SVgroupInfo* dst, const SVgroupInfo* src); +#endif + /** * The create object function must be successful expect for the out of memory issue. * @@ -318,6 +324,7 @@ void doAddGroupColumnForSubquery(SQueryInfo* pQueryInfo, int32_t tagIndex, SSqlC int16_t tscGetJoinTagColIdByUid(STagCond* pTagCond, uint64_t uid); int16_t tscGetTagColIndexById(STableMeta* pTableMeta, int16_t colId); +int32_t doInitSubState(SSqlObj* pSql, int32_t numOfSubqueries); void tscPrintSelNodeList(SSqlObj* pSql, int32_t subClauseIndex); diff --git a/src/client/inc/tsclient.h b/src/client/inc/tsclient.h index b8eb0a5286a7b72b3ddd1d34b103e5b6239a496c..ff796cdcbf5cdc7a3d9fbc313226cb9e013b0eda 100644 --- a/src/client/inc/tsclient.h +++ b/src/client/inc/tsclient.h @@ -234,7 +234,6 @@ typedef struct STableDataBlocks { typedef struct { STableMeta *pTableMeta; SArray *vgroupIdList; -// SVgroupsInfo *pVgroupsInfo; } STableMetaVgroupInfo; typedef struct SInsertStatementParam { @@ -286,20 +285,14 @@ typedef struct { int32_t resColumnId; } SSqlCmd; -typedef struct SResRec { - int numOfRows; - int numOfTotal; -} SResRec; - typedef struct { int32_t numOfRows; // num of results in current retrieval - int64_t numOfRowsGroup; // num of results of current group int64_t numOfTotal; // num of total results int64_t numOfClauseTotal; // num of total result in current subclause char * pRsp; int32_t rspType; int32_t rspLen; - uint64_t qId; + uint64_t qId; // query id of SQInfo int64_t useconds; int64_t offset; // offset value from vnode during projection query of stable int32_t row; @@ -307,8 +300,6 @@ typedef struct { int16_t precision; bool completed; int32_t code; - int32_t numOfGroups; - SResRec * pGroupRec; char * data; TAOS_ROW tsrow; TAOS_ROW urow; @@ -316,8 +307,7 @@ typedef struct { char ** buffer; // Buffer used to put multibytes encoded using unicode (wchar_t) SColumnIndex* pColumnIndex; - TAOS_FIELD* final; - SArithmeticSupport *pArithSup; // support the arithmetic expression calculation on agg functions + TAOS_FIELD* final; struct SGlobalMerger *pMerger; } SSqlRes; @@ -377,7 +367,6 @@ typedef struct SSqlObj { tsem_t rspSem; SSqlCmd cmd; SSqlRes res; - bool isBind; SSubqueryState subState; struct SSqlObj **pSubs; diff --git a/src/client/src/tscAsync.c b/src/client/src/tscAsync.c index 6b12cd0da04c0f791201182c793d647fc54c00b1..4a621d47c0dcae4c2765d53b0d5b650e22d64a58 100644 --- a/src/client/src/tscAsync.c +++ b/src/client/src/tscAsync.c @@ -60,17 +60,25 @@ void doAsyncQuery(STscObj* pObj, SSqlObj* pSql, __async_cb_func_t fp, void* para tscDebugL("0x%"PRIx64" SQL: %s", pSql->self, pSql->sqlstr); pCmd->resColumnId = TSDB_RES_COL_ID; + taosAcquireRef(tscObjRef, pSql->self); + int32_t code = tsParseSql(pSql, true); - if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) return; + + if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) { + taosReleaseRef(tscObjRef, pSql->self); + return; + } if (code != TSDB_CODE_SUCCESS) { pSql->res.code = code; tscAsyncResultOnError(pSql); + taosReleaseRef(tscObjRef, pSql->self); return; } SQueryInfo* pQueryInfo = tscGetQueryInfo(pCmd); executeQuery(pSql, pQueryInfo); + taosReleaseRef(tscObjRef, pSql->self); } // TODO return the correct error code to client in tscQueueAsyncError diff --git a/src/client/src/tscParseLineProtocol.c b/src/client/src/tscParseLineProtocol.c index 778c0cfb47ad255663b403412931be71a3200d2a..e26e439492cec9c83b624c2bbb2bbc3a95de97b0 100644 --- a/src/client/src/tscParseLineProtocol.c +++ b/src/client/src/tscParseLineProtocol.c @@ -17,6 +17,7 @@ #include "tscLog.h" #include "taos.h" +#include "tscParseLine.h" typedef struct { char sTableName[TSDB_TABLE_NAME_LEN]; @@ -27,38 +28,6 @@ typedef struct { uint8_t precision; } SSmlSTableSchema; -typedef struct { - char* key; - uint8_t type; - int16_t length; - char* value; -} TAOS_SML_KV; - -typedef struct { - char* stableName; - - char* childTableName; - TAOS_SML_KV* tags; - int32_t tagNum; - - // first kv must be timestamp - TAOS_SML_KV* fields; - int32_t fieldNum; -} TAOS_SML_DATA_POINT; - -typedef enum { - SML_TIME_STAMP_NOW, - SML_TIME_STAMP_SECONDS, - SML_TIME_STAMP_MILLI_SECONDS, - SML_TIME_STAMP_MICRO_SECONDS, - SML_TIME_STAMP_NANO_SECONDS -} SMLTimeStampType; - -typedef struct { - uint64_t id; - SHashObj* smlDataToSchema; -} SSmlLinesInfo; - //================================================================================================= static uint64_t linesSmlHandleId = 0; @@ -1565,8 +1534,8 @@ static bool convertStrToNumber(TAOS_SML_KV *pVal, char*str, SSmlLinesInfo* info) return true; } //len does not include '\0' from value. -static bool convertSmlValueType(TAOS_SML_KV *pVal, char *value, - uint16_t len, SSmlLinesInfo* info) { +bool convertSmlValueType(TAOS_SML_KV *pVal, char *value, + uint16_t len, SSmlLinesInfo* info) { if (len <= 0) { return false; } @@ -1708,7 +1677,7 @@ static int32_t getTimeStampValue(char *value, uint16_t len, if (len >= 2) { for (int i = 0; i < len - 2; ++i) { if(!isdigit(value[i])) { - return TSDB_CODE_TSC_LINE_SYNTAX_ERROR; + return TSDB_CODE_TSC_INVALID_TIME_STAMP; } } } @@ -1743,20 +1712,20 @@ static int32_t getTimeStampValue(char *value, uint16_t len, break; } default: { - return TSDB_CODE_TSC_LINE_SYNTAX_ERROR; + return TSDB_CODE_TSC_INVALID_TIME_STAMP; } } return TSDB_CODE_SUCCESS; } -static int32_t convertSmlTimeStamp(TAOS_SML_KV *pVal, char *value, - uint16_t len, SSmlLinesInfo* info) { +int32_t convertSmlTimeStamp(TAOS_SML_KV *pVal, char *value, + uint16_t len, SSmlLinesInfo* info) { int32_t ret; SMLTimeStampType type; int64_t tsVal; if (!isTimeStamp(value, len, &type)) { - return TSDB_CODE_TSC_LINE_SYNTAX_ERROR; + return TSDB_CODE_TSC_INVALID_TIME_STAMP; } ret = getTimeStampValue(value, len, type, &tsVal); @@ -1805,7 +1774,7 @@ static int32_t parseSmlTimeStamp(TAOS_SML_KV **pTS, const char **index, SSmlLine return ret; } -static bool checkDuplicateKey(char *key, SHashObj *pHash, SSmlLinesInfo* info) { +bool checkDuplicateKey(char *key, SHashObj *pHash, SSmlLinesInfo* info) { char *val = NULL; char *cur = key; char keyLower[TSDB_COL_NAME_LEN]; @@ -1842,7 +1811,7 @@ static int32_t parseSmlKey(TAOS_SML_KV *pKV, const char **index, SHashObj *pHash while (*cur != '\0') { if (len > TSDB_COL_NAME_LEN) { tscError("SML:0x%"PRIx64" Key field cannot exceeds 65 characters", info->id); - return TSDB_CODE_TSC_LINE_SYNTAX_ERROR; + return TSDB_CODE_TSC_INVALID_COLUMN_LENGTH; } //unescaped '=' identifies a tag key if (*cur == '=' && *(cur - 1) != '\\') { @@ -1902,7 +1871,7 @@ static bool parseSmlValue(TAOS_SML_KV *pKV, const char **index, free(pKV->key); pKV->key = NULL; free(value); - return TSDB_CODE_TSC_LINE_SYNTAX_ERROR; + return TSDB_CODE_TSC_INVALID_VALUE; } free(value); @@ -1931,7 +1900,7 @@ static int32_t parseSmlMeasurement(TAOS_SML_DATA_POINT *pSml, const char **index tscError("SML:0x%"PRIx64" Measurement field cannot exceeds 193 characters", info->id); free(pSml->stableName); pSml->stableName = NULL; - return TSDB_CODE_TSC_LINE_SYNTAX_ERROR; + return TSDB_CODE_TSC_INVALID_TABLE_ID_LENGTH; } //first unescaped comma or space identifies measurement //if space detected first, meaning no tag in the input @@ -1958,7 +1927,7 @@ static int32_t parseSmlMeasurement(TAOS_SML_DATA_POINT *pSml, const char **index } //Table name can only contain digits(0-9),alphebet(a-z),underscore(_) -static int32_t isValidChildTableName(const char *pTbName, int16_t len) { +int32_t isValidChildTableName(const char *pTbName, int16_t len) { const char *cur = pTbName; for (int i = 0; i < len; ++i) { if(!isdigit(cur[i]) && !isalpha(cur[i]) && (cur[i] != '_')) { @@ -2146,24 +2115,25 @@ int32_t tscParseLines(char* lines[], int numLines, SArray* points, SArray* faile if (code != TSDB_CODE_SUCCESS) { tscError("SML:0x%"PRIx64" data point line parse failed. line %d : %s", info->id, i, lines[i]); destroySmlDataPoint(&point); - return TSDB_CODE_TSC_LINE_SYNTAX_ERROR; + return code; } else { tscDebug("SML:0x%"PRIx64" data point line parse success. line %d", info->id, i); } taosArrayPush(points, &point); } - return 0; + return TSDB_CODE_SUCCESS; } int taos_insert_lines(TAOS* taos, char* lines[], int numLines) { int32_t code = 0; - SSmlLinesInfo* info = calloc(1, sizeof(SSmlLinesInfo)); + SSmlLinesInfo* info = tcalloc(1, sizeof(SSmlLinesInfo)); info->id = genLinesSmlId(); if (numLines <= 0 || numLines > 65536) { tscError("SML:0x%"PRIx64" taos_insert_lines numLines should be between 1 and 65536. numLines: %d", info->id, numLines); + tfree(info); code = TSDB_CODE_TSC_APP_ERROR; return code; } @@ -2171,7 +2141,7 @@ int taos_insert_lines(TAOS* taos, char* lines[], int numLines) { for (int i = 0; i < numLines; ++i) { if (lines[i] == NULL) { tscError("SML:0x%"PRIx64" taos_insert_lines line %d is NULL", info->id, i); - free(info); + tfree(info); code = TSDB_CODE_TSC_APP_ERROR; return code; } @@ -2180,7 +2150,7 @@ int taos_insert_lines(TAOS* taos, char* lines[], int numLines) { SArray* lpPoints = taosArrayInit(numLines, sizeof(TAOS_SML_DATA_POINT)); if (lpPoints == NULL) { tscError("SML:0x%"PRIx64" taos_insert_lines failed to allocate memory", info->id); - free(info); + tfree(info); return TSDB_CODE_TSC_OUT_OF_MEMORY; } @@ -2208,7 +2178,7 @@ cleanup: taosArrayDestroy(lpPoints); - free(info); + tfree(info); return code; } diff --git a/src/client/src/tscParseOpenTSDB.c b/src/client/src/tscParseOpenTSDB.c new file mode 100644 index 0000000000000000000000000000000000000000..12f8ec84fd301d2d8851e92826cfcb3d860c1ceb --- /dev/null +++ b/src/client/src/tscParseOpenTSDB.c @@ -0,0 +1,973 @@ +#include +#include +#include +#include + +#include "cJSON.h" +#include "hash.h" +#include "taos.h" + +#include "tscUtil.h" +#include "tsclient.h" +#include "tscLog.h" + +#include "tscParseLine.h" + +#define OTD_MAX_FIELDS_NUM 2 +#define OTD_JSON_SUB_FIELDS_NUM 2 +#define OTD_JSON_FIELDS_NUM 4 + +#define OTD_TIMESTAMP_COLUMN_NAME "ts" +#define OTD_METRIC_VALUE_COLUMN_NAME "value" + +/* telnet style API parser */ +static uint64_t HandleId = 0; + +static uint64_t genUID() { + uint64_t id; + + do { + id = atomic_add_fetch_64(&HandleId, 1); + } while (id == 0); + + return id; +} + +static int32_t parseTelnetMetric(TAOS_SML_DATA_POINT *pSml, const char **index, SSmlLinesInfo* info) { + const char *cur = *index; + uint16_t len = 0; + + pSml->stableName = tcalloc(TSDB_TABLE_NAME_LEN + 1, 1); // +1 to avoid 1772 line over write + if (pSml->stableName == NULL){ + return TSDB_CODE_TSC_OUT_OF_MEMORY; + } + if (isdigit(*cur)) { + tscError("OTD:0x%"PRIx64" Metric cannnot start with digit", info->id); + tfree(pSml->stableName); + return TSDB_CODE_TSC_LINE_SYNTAX_ERROR; + } + + while (*cur != '\0') { + if (len > TSDB_TABLE_NAME_LEN) { + tscError("OTD:0x%"PRIx64" Metric cannot exceeds 193 characters", info->id); + tfree(pSml->stableName); + return TSDB_CODE_TSC_INVALID_TABLE_ID_LENGTH; + } + + if (*cur == ' ') { + break; + } + + pSml->stableName[len] = *cur; + cur++; + len++; + } + if (len == 0 || *cur == '\0') { + tfree(pSml->stableName); + return TSDB_CODE_TSC_LINE_SYNTAX_ERROR; + } + + pSml->stableName[len] = '\0'; + *index = cur + 1; + tscDebug("OTD:0x%"PRIx64" Stable name in metric:%s|len:%d", info->id, pSml->stableName, len); + + return TSDB_CODE_SUCCESS; +} + +static int32_t parseTelnetTimeStamp(TAOS_SML_KV **pTS, int *num_kvs, const char **index, SSmlLinesInfo* info) { + //Timestamp must be the first KV to parse + assert(*num_kvs == 0); + + const char *start, *cur; + int32_t ret = TSDB_CODE_SUCCESS; + int len = 0; + char key[] = OTD_TIMESTAMP_COLUMN_NAME; + char *value = NULL; + + start = cur = *index; + //allocate fields for timestamp and value + *pTS = tcalloc(OTD_MAX_FIELDS_NUM, sizeof(TAOS_SML_KV)); + + while(*cur != '\0') { + if (*cur == ' ') { + break; + } + cur++; + len++; + } + + if (len > 0 && *cur != '\0') { + value = tcalloc(len + 1, 1); + memcpy(value, start, len); + } else { + tfree(*pTS); + return TSDB_CODE_TSC_LINE_SYNTAX_ERROR; + } + + ret = convertSmlTimeStamp(*pTS, value, len, info); + if (ret) { + tfree(value); + tfree(*pTS); + return ret; + } + tfree(value); + + (*pTS)->key = tcalloc(sizeof(key), 1); + memcpy((*pTS)->key, key, sizeof(key)); + + *num_kvs += 1; + *index = cur + 1; + + return ret; +} + +static int32_t parseTelnetMetricValue(TAOS_SML_KV **pKVs, int *num_kvs, const char **index, SSmlLinesInfo* info) { + //skip timestamp + TAOS_SML_KV *pVal = *pKVs + 1; + const char *start, *cur; + int32_t ret = TSDB_CODE_SUCCESS; + int len = 0; + char key[] = OTD_METRIC_VALUE_COLUMN_NAME; + char *value = NULL; + + start = cur = *index; + + while(*cur != '\0') { + if (*cur == ' ') { + break; + } + cur++; + len++; + } + + if (len > 0 && *cur != '\0') { + value = tcalloc(len + 1, 1); + memcpy(value, start, len); + } else { + return TSDB_CODE_TSC_LINE_SYNTAX_ERROR; + } + + if (!convertSmlValueType(pVal, value, len, info)) { + tscError("OTD:0x%"PRIx64" Failed to convert metric value string(%s) to any type", + info->id, value); + tfree(value); + return TSDB_CODE_TSC_INVALID_VALUE; + } + tfree(value); + + pVal->key = tcalloc(sizeof(key), 1); + memcpy(pVal->key, key, sizeof(key)); + *num_kvs += 1; + + *index = cur + 1; + return ret; +} + +static int32_t parseTelnetTagKey(TAOS_SML_KV *pKV, const char **index, SHashObj *pHash, SSmlLinesInfo* info) { + const char *cur = *index; + char key[TSDB_COL_NAME_LEN + 1]; // +1 to avoid key[len] over write + uint16_t len = 0; + + //key field cannot start with digit + if (isdigit(*cur)) { + tscError("OTD:0x%"PRIx64" Tag key cannnot start with digit", info->id); + return TSDB_CODE_TSC_LINE_SYNTAX_ERROR; + } + while (*cur != '\0') { + if (len > TSDB_COL_NAME_LEN) { + tscError("OTD:0x%"PRIx64" Tag key cannot exceeds 65 characters", info->id); + return TSDB_CODE_TSC_INVALID_COLUMN_LENGTH; + } + if (*cur == '=') { + break; + } + + key[len] = *cur; + cur++; + len++; + } + if (len == 0 || *cur == '\0') { + return TSDB_CODE_TSC_LINE_SYNTAX_ERROR; + } + key[len] = '\0'; + + if (checkDuplicateKey(key, pHash, info)) { + return TSDB_CODE_TSC_DUP_TAG_NAMES; + } + + pKV->key = tcalloc(len + 1, 1); + memcpy(pKV->key, key, len + 1); + //tscDebug("OTD:0x%"PRIx64" Key:%s|len:%d", info->id, pKV->key, len); + *index = cur + 1; + return TSDB_CODE_SUCCESS; +} + + +static int32_t parseTelnetTagValue(TAOS_SML_KV *pKV, const char **index, + bool *is_last_kv, SSmlLinesInfo* info) { + const char *start, *cur; + char *value = NULL; + uint16_t len = 0; + start = cur = *index; + + while (1) { + // ',' or '\0' identifies a value + if (*cur == ',' || *cur == '\0') { + // '\0' indicates end of value + *is_last_kv = (*cur == '\0') ? true : false; + break; + } + cur++; + len++; + } + + if (len == 0) { + tfree(pKV->key); + return TSDB_CODE_TSC_LINE_SYNTAX_ERROR; + } + + value = tcalloc(len + 1, 1); + memcpy(value, start, len); + value[len] = '\0'; + if (!convertSmlValueType(pKV, value, len, info)) { + tscError("OTD:0x%"PRIx64" Failed to convert sml value string(%s) to any type", + info->id, value); + //free previous alocated key field + tfree(pKV->key); + tfree(value); + return TSDB_CODE_TSC_INVALID_VALUE; + } + tfree(value); + + *index = (*cur == '\0') ? cur : cur + 1; + return TSDB_CODE_SUCCESS; +} + +static int32_t parseTelnetTagKvs(TAOS_SML_KV **pKVs, int *num_kvs, + const char **index, char **childTableName, + SHashObj *pHash, SSmlLinesInfo* info) { + const char *cur = *index; + int32_t ret = TSDB_CODE_SUCCESS; + TAOS_SML_KV *pkv; + bool is_last_kv = false; + + int32_t capacity = 4; + *pKVs = tcalloc(capacity, sizeof(TAOS_SML_KV)); + pkv = *pKVs; + + while (*cur != '\0') { + ret = parseTelnetTagKey(pkv, &cur, pHash, info); + if (ret) { + tscError("OTD:0x%"PRIx64" Unable to parse key", info->id); + return ret; + } + ret = parseTelnetTagValue(pkv, &cur, &is_last_kv, info); + if (ret) { + tscError("OTD:0x%"PRIx64" Unable to parse value", info->id); + return ret; + } + if ((strcasecmp(pkv->key, "ID") == 0) && pkv->type == TSDB_DATA_TYPE_BINARY) { + ret = isValidChildTableName(pkv->value, pkv->length); + if (ret) { + return ret; + } + *childTableName = malloc(pkv->length + 1); + memcpy(*childTableName, pkv->value, pkv->length); + (*childTableName)[pkv->length] = '\0'; + tfree(pkv->key); + tfree(pkv->value); + } else { + *num_kvs += 1; + } + + if (is_last_kv) { + break; + } + + //reallocate addtional memory for more kvs + if ((*num_kvs + 1) > capacity) { + TAOS_SML_KV *more_kvs = NULL; + capacity *= 3; capacity /= 2; + more_kvs = realloc(*pKVs, capacity * sizeof(TAOS_SML_KV)); + if (!more_kvs) { + return TSDB_CODE_TSC_OUT_OF_MEMORY; + } + *pKVs = more_kvs; + } + + //move pKV points to next TAOS_SML_KV block + pkv = *pKVs + *num_kvs; + } + + return ret; +} + +int32_t tscParseTelnetLine(const char* line, TAOS_SML_DATA_POINT* smlData, SSmlLinesInfo* info) { + const char* index = line; + int32_t ret = TSDB_CODE_SUCCESS; + + //Parse metric + ret = parseTelnetMetric(smlData, &index, info); + if (ret) { + tscError("OTD:0x%"PRIx64" Unable to parse metric", info->id); + return ret; + } + tscDebug("OTD:0x%"PRIx64" Parse metric finished", info->id); + + //Parse timestamp + ret = parseTelnetTimeStamp(&smlData->fields, &smlData->fieldNum, &index, info); + if (ret) { + tscError("OTD:0x%"PRIx64" Unable to parse timestamp", info->id); + return ret; + } + tscDebug("OTD:0x%"PRIx64" Parse timestamp finished", info->id); + + //Parse value + ret = parseTelnetMetricValue(&smlData->fields, &smlData->fieldNum, &index, info); + if (ret) { + tscError("OTD:0x%"PRIx64" Unable to parse metric value", info->id); + return ret; + } + tscDebug("OTD:0x%"PRIx64" Parse metric value finished", info->id); + + //Parse tagKVs + SHashObj *keyHashTable = taosHashInit(128, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, false); + ret = parseTelnetTagKvs(&smlData->tags, &smlData->tagNum, &index, &smlData->childTableName, keyHashTable, info); + if (ret) { + tscError("OTD:0x%"PRIx64" Unable to parse tags", info->id); + taosHashCleanup(keyHashTable); + return ret; + } + tscDebug("OTD:0x%"PRIx64" Parse tags finished", info->id); + taosHashCleanup(keyHashTable); + + + return TSDB_CODE_SUCCESS; +} + +int32_t tscParseTelnetLines(char* lines[], int numLines, SArray* points, SArray* failedLines, SSmlLinesInfo* info) { + for (int32_t i = 0; i < numLines; ++i) { + TAOS_SML_DATA_POINT point = {0}; + int32_t code = tscParseTelnetLine(lines[i], &point, info); + if (code != TSDB_CODE_SUCCESS) { + tscError("OTD:0x%"PRIx64" data point line parse failed. line %d : %s", info->id, i, lines[i]); + destroySmlDataPoint(&point); + return code; + } else { + tscDebug("OTD:0x%"PRIx64" data point line parse success. line %d", info->id, i); + } + + taosArrayPush(points, &point); + } + return TSDB_CODE_SUCCESS; +} + +int taos_insert_telnet_lines(TAOS* taos, char* lines[], int numLines) { + int32_t code = 0; + + SSmlLinesInfo* info = tcalloc(1, sizeof(SSmlLinesInfo)); + info->id = genUID(); + + if (numLines <= 0 || numLines > 65536) { + tscError("OTD:0x%"PRIx64" taos_insert_telnet_lines numLines should be between 1 and 65536. numLines: %d", info->id, numLines); + tfree(info); + code = TSDB_CODE_TSC_APP_ERROR; + return code; + } + + for (int i = 0; i < numLines; ++i) { + if (lines[i] == NULL) { + tscError("OTD:0x%"PRIx64" taos_insert_telnet_lines line %d is NULL", info->id, i); + tfree(info); + code = TSDB_CODE_TSC_APP_ERROR; + return code; + } + } + + SArray* lpPoints = taosArrayInit(numLines, sizeof(TAOS_SML_DATA_POINT)); + if (lpPoints == NULL) { + tscError("OTD:0x%"PRIx64" taos_insert_telnet_lines failed to allocate memory", info->id); + tfree(info); + return TSDB_CODE_TSC_OUT_OF_MEMORY; + } + + tscDebug("OTD:0x%"PRIx64" taos_insert_telnet_lines begin inserting %d lines, first line: %s", info->id, numLines, lines[0]); + code = tscParseTelnetLines(lines, numLines, lpPoints, NULL, info); + size_t numPoints = taosArrayGetSize(lpPoints); + + if (code != 0) { + goto cleanup; + } + + TAOS_SML_DATA_POINT* points = TARRAY_GET_START(lpPoints); + code = tscSmlInsert(taos, points, (int)numPoints, info); + if (code != 0) { + tscError("OTD:0x%"PRIx64" taos_insert_telnet_lines error: %s", info->id, tstrerror((code))); + } + +cleanup: + tscDebug("OTD:0x%"PRIx64" taos_insert_telnet_lines finish inserting %d lines. code: %d", info->id, numLines, code); + points = TARRAY_GET_START(lpPoints); + numPoints = taosArrayGetSize(lpPoints); + for (int i = 0; i < numPoints; ++i) { + destroySmlDataPoint(points+i); + } + + taosArrayDestroy(lpPoints); + + tfree(info); + return code; +} + +int taos_telnet_insert(TAOS* taos, TAOS_SML_DATA_POINT* points, int numPoint) { + SSmlLinesInfo* info = tcalloc(1, sizeof(SSmlLinesInfo)); + info->id = genUID(); + int code = tscSmlInsert(taos, points, numPoint, info); + tfree(info); + return code; +} + + +/* telnet style API parser */ +int32_t parseMetricFromJSON(cJSON *root, TAOS_SML_DATA_POINT* pSml, SSmlLinesInfo* info) { + cJSON *metric = cJSON_GetObjectItem(root, "metric"); + if (!cJSON_IsString(metric)) { + return TSDB_CODE_TSC_INVALID_JSON; + } + + size_t stableLen = strlen(metric->valuestring); + if (stableLen > TSDB_TABLE_NAME_LEN) { + tscError("OTD:0x%"PRIx64" Metric cannot exceeds 193 characters in JSON", info->id); + return TSDB_CODE_TSC_INVALID_TABLE_ID_LENGTH; + } + + pSml->stableName = tcalloc(stableLen + 1, sizeof(char)); + if (pSml->stableName == NULL){ + return TSDB_CODE_TSC_OUT_OF_MEMORY; + } + + if (isdigit(metric->valuestring[0])) { + tscError("OTD:0x%"PRIx64" Metric cannnot start with digit in JSON", info->id); + tfree(pSml->stableName); + return TSDB_CODE_TSC_INVALID_JSON; + } + + tstrncpy(pSml->stableName, metric->valuestring, stableLen + 1); + + return TSDB_CODE_SUCCESS; + +} + +int32_t parseTimestampFromJSONObj(cJSON *root, int64_t *tsVal, SSmlLinesInfo* info) { + int32_t size = cJSON_GetArraySize(root); + if (size != OTD_JSON_SUB_FIELDS_NUM) { + return TSDB_CODE_TSC_INVALID_JSON; + } + + cJSON *value = cJSON_GetObjectItem(root, "value"); + if (!cJSON_IsNumber(value)) { + return TSDB_CODE_TSC_INVALID_JSON; + } + + cJSON *type = cJSON_GetObjectItem(root, "type"); + if (!cJSON_IsString(type)) { + return TSDB_CODE_TSC_INVALID_JSON; + } + + *tsVal = value->valueint; + //if timestamp value is 0 use current system time + if (*tsVal == 0) { + *tsVal = taosGetTimestampNs(); + return TSDB_CODE_SUCCESS; + } + + size_t typeLen = strlen(type->valuestring); + if (typeLen == 1 && type->valuestring[0] == 's') { + //seconds + *tsVal = (int64_t)(*tsVal * 1e9); + } else if (typeLen == 2 && type->valuestring[1] == 's') { + switch (type->valuestring[0]) { + case 'm': + //milliseconds + *tsVal = convertTimePrecision(*tsVal, TSDB_TIME_PRECISION_MILLI, TSDB_TIME_PRECISION_NANO); + break; + case 'u': + //microseconds + *tsVal = convertTimePrecision(*tsVal, TSDB_TIME_PRECISION_MICRO, TSDB_TIME_PRECISION_NANO); + break; + case 'n': + //nanoseconds + *tsVal = *tsVal * 1; + break; + default: + return TSDB_CODE_TSC_INVALID_JSON; + } + } + + return TSDB_CODE_SUCCESS; +} + +int32_t parseTimestampFromJSON(cJSON *root, TAOS_SML_KV **pTS, int *num_kvs, SSmlLinesInfo* info) { + //Timestamp must be the first KV to parse + assert(*num_kvs == 0); + int64_t tsVal; + char key[] = OTD_TIMESTAMP_COLUMN_NAME; + + cJSON *timestamp = cJSON_GetObjectItem(root, "timestamp"); + if (cJSON_IsNumber(timestamp)) { + //timestamp value 0 indicates current system time + if (timestamp->valueint == 0) { + tsVal = taosGetTimestampNs(); + } else { + tsVal = convertTimePrecision(timestamp->valueint, TSDB_TIME_PRECISION_MICRO, TSDB_TIME_PRECISION_NANO); + } + } else if (cJSON_IsObject(timestamp)) { + int32_t ret = parseTimestampFromJSONObj(timestamp, &tsVal, info); + if (ret != TSDB_CODE_SUCCESS) { + tscError("OTD:0x%"PRIx64" Failed to parse timestamp from JSON Obj", info->id); + return ret; + } + } else { + return TSDB_CODE_TSC_INVALID_JSON; + } + + //allocate fields for timestamp and value + *pTS = tcalloc(OTD_MAX_FIELDS_NUM, sizeof(TAOS_SML_KV)); + + + (*pTS)->key = tcalloc(sizeof(key), 1); + memcpy((*pTS)->key, key, sizeof(key)); + + (*pTS)->type = TSDB_DATA_TYPE_TIMESTAMP; + (*pTS)->length = (int16_t)tDataTypes[(*pTS)->type].bytes; + (*pTS)->value = tcalloc((*pTS)->length, 1); + memcpy((*pTS)->value, &tsVal, (*pTS)->length); + + *num_kvs += 1; + return TSDB_CODE_SUCCESS; + +} + +int32_t convertJSONBool(TAOS_SML_KV *pVal, char* typeStr, int64_t valueInt, SSmlLinesInfo* info) { + if (strcasecmp(typeStr, "bool") != 0) { + tscError("OTD:0x%"PRIx64" invalid type(%s) for JSON Bool", info->id, typeStr); + return TSDB_CODE_TSC_INVALID_JSON_TYPE; + } + pVal->type = TSDB_DATA_TYPE_BOOL; + pVal->length = (int16_t)tDataTypes[pVal->type].bytes; + pVal->value = tcalloc(pVal->length, 1); + *(bool *)(pVal->value) = valueInt ? true : false; + + return TSDB_CODE_SUCCESS; +} + +int32_t convertJSONNumber(TAOS_SML_KV *pVal, char* typeStr, cJSON *value, SSmlLinesInfo* info) { + //tinyint + if (strcasecmp(typeStr, "i8") == 0 || + strcasecmp(typeStr, "tinyint") == 0) { + if (!IS_VALID_TINYINT(value->valueint)) { + tscError("OTD:0x%"PRIx64" JSON value(%"PRId64") cannot fit in type(tinyint)", info->id, value->valueint); + return TSDB_CODE_TSC_VALUE_OUT_OF_RANGE; + } + pVal->type = TSDB_DATA_TYPE_TINYINT; + pVal->length = (int16_t)tDataTypes[pVal->type].bytes; + pVal->value = tcalloc(pVal->length, 1); + *(int8_t *)(pVal->value) = (int8_t)(value->valueint); + return TSDB_CODE_SUCCESS; + } + //smallint + if (strcasecmp(typeStr, "i16") == 0 || + strcasecmp(typeStr, "smallint") == 0) { + if (!IS_VALID_SMALLINT(value->valueint)) { + tscError("OTD:0x%"PRIx64" JSON value(%"PRId64") cannot fit in type(smallint)", info->id, value->valueint); + return TSDB_CODE_TSC_VALUE_OUT_OF_RANGE; + } + pVal->type = TSDB_DATA_TYPE_SMALLINT; + pVal->length = (int16_t)tDataTypes[pVal->type].bytes; + pVal->value = tcalloc(pVal->length, 1); + *(int16_t *)(pVal->value) = (int16_t)(value->valueint); + return TSDB_CODE_SUCCESS; + } + //int + if (strcasecmp(typeStr, "i32") == 0 || + strcasecmp(typeStr, "int") == 0) { + if (!IS_VALID_INT(value->valueint)) { + tscError("OTD:0x%"PRIx64" JSON value(%"PRId64") cannot fit in type(int)", info->id, value->valueint); + return TSDB_CODE_TSC_VALUE_OUT_OF_RANGE; + } + pVal->type = TSDB_DATA_TYPE_INT; + pVal->length = (int16_t)tDataTypes[pVal->type].bytes; + pVal->value = tcalloc(pVal->length, 1); + *(int32_t *)(pVal->value) = (int32_t)(value->valueint); + return TSDB_CODE_SUCCESS; + } + //bigint + if (strcasecmp(typeStr, "i64") == 0 || + strcasecmp(typeStr, "bigint") == 0) { + if (!IS_VALID_BIGINT(value->valueint)) { + tscError("OTD:0x%"PRIx64" JSON value(%"PRId64") cannot fit in type(bigint)", info->id, value->valueint); + return TSDB_CODE_TSC_VALUE_OUT_OF_RANGE; + } + pVal->type = TSDB_DATA_TYPE_BIGINT; + pVal->length = (int16_t)tDataTypes[pVal->type].bytes; + pVal->value = tcalloc(pVal->length, 1); + *(int64_t *)(pVal->value) = (int64_t)(value->valueint); + return TSDB_CODE_SUCCESS; + } + //float + if (strcasecmp(typeStr, "f32") == 0 || + strcasecmp(typeStr, "float") == 0) { + if (!IS_VALID_FLOAT(value->valuedouble)) { + tscError("OTD:0x%"PRIx64" JSON value(%f) cannot fit in type(float)", info->id, value->valuedouble); + return TSDB_CODE_TSC_VALUE_OUT_OF_RANGE; + } + pVal->type = TSDB_DATA_TYPE_FLOAT; + pVal->length = (int16_t)tDataTypes[pVal->type].bytes; + pVal->value = tcalloc(pVal->length, 1); + *(float *)(pVal->value) = (float)(value->valuedouble); + return TSDB_CODE_SUCCESS; + } + //double + if (strcasecmp(typeStr, "f64") == 0 || + strcasecmp(typeStr, "double") == 0) { + if (!IS_VALID_DOUBLE(value->valuedouble)) { + tscError("OTD:0x%"PRIx64" JSON value(%f) cannot fit in type(double)", info->id, value->valuedouble); + return TSDB_CODE_TSC_VALUE_OUT_OF_RANGE; + } + pVal->type = TSDB_DATA_TYPE_DOUBLE; + pVal->length = (int16_t)tDataTypes[pVal->type].bytes; + pVal->value = tcalloc(pVal->length, 1); + *(double *)(pVal->value) = (double)(value->valuedouble); + return TSDB_CODE_SUCCESS; + } + + //if reach here means type is unsupported + tscError("OTD:0x%"PRIx64" invalid type(%s) for JSON Number", info->id, typeStr); + return TSDB_CODE_TSC_INVALID_JSON_TYPE; +} + +int32_t convertJSONString(TAOS_SML_KV *pVal, char* typeStr, cJSON *value, SSmlLinesInfo* info) { + if (strcasecmp(typeStr, "binary") == 0) { + pVal->type = TSDB_DATA_TYPE_BINARY; + } else if (strcasecmp(typeStr, "nchar") == 0) { + pVal->type = TSDB_DATA_TYPE_NCHAR; + } else { + tscError("OTD:0x%"PRIx64" invalid type(%s) for JSON String", info->id, typeStr); + return TSDB_CODE_TSC_INVALID_JSON_TYPE; + } + pVal->length = (int16_t)strlen(value->valuestring); + pVal->value = tcalloc(pVal->length + 1, 1); + memcpy(pVal->value, value->valuestring, pVal->length); + return TSDB_CODE_SUCCESS; +} + +int32_t parseValueFromJSONObj(cJSON *root, TAOS_SML_KV *pVal, SSmlLinesInfo* info) { + int32_t ret = TSDB_CODE_SUCCESS; + int32_t size = cJSON_GetArraySize(root); + + if (size != OTD_JSON_SUB_FIELDS_NUM) { + return TSDB_CODE_TSC_INVALID_JSON; + } + + cJSON *value = cJSON_GetObjectItem(root, "value"); + if (value == NULL) { + return TSDB_CODE_TSC_INVALID_JSON; + } + + cJSON *type = cJSON_GetObjectItem(root, "type"); + if (!cJSON_IsString(type)) { + return TSDB_CODE_TSC_INVALID_JSON; + } + + switch (value->type) { + case cJSON_True: + case cJSON_False: { + ret = convertJSONBool(pVal, type->valuestring, value->valueint, info); + if (ret != TSDB_CODE_SUCCESS) { + return ret; + } + break; + } + case cJSON_Number: { + ret = convertJSONNumber(pVal, type->valuestring, value, info); + if (ret != TSDB_CODE_SUCCESS) { + return ret; + } + break; + } + case cJSON_String: { + ret = convertJSONString(pVal, type->valuestring, value, info); + if (ret != TSDB_CODE_SUCCESS) { + return ret; + } + break; + } + default: + return TSDB_CODE_TSC_INVALID_JSON_TYPE; + } + + return TSDB_CODE_SUCCESS; +} + +int32_t parseValueFromJSON(cJSON *root, TAOS_SML_KV *pVal, SSmlLinesInfo* info) { + int type = root->type; + + switch (type) { + case cJSON_True: + case cJSON_False: { + pVal->type = TSDB_DATA_TYPE_BOOL; + pVal->length = (int16_t)tDataTypes[pVal->type].bytes; + pVal->value = tcalloc(pVal->length, 1); + *(bool *)(pVal->value) = root->valueint ? true : false; + break; + } + case cJSON_Number: { + //convert default JSON Number type to float + pVal->type = TSDB_DATA_TYPE_FLOAT; + pVal->length = (int16_t)tDataTypes[pVal->type].bytes; + pVal->value = tcalloc(pVal->length, 1); + *(float *)(pVal->value) = (float)(root->valuedouble); + break; + } + case cJSON_String: { + //convert default JSON String type to nchar + pVal->type = TSDB_DATA_TYPE_NCHAR; + //pVal->length = wcslen((wchar_t *)root->valuestring) * TSDB_NCHAR_SIZE; + pVal->length = (int16_t)strlen(root->valuestring); + pVal->value = tcalloc(pVal->length + 1, 1); + memcpy(pVal->value, root->valuestring, pVal->length); + break; + } + case cJSON_Object: { + int32_t ret = parseValueFromJSONObj(root, pVal, info); + if (ret != TSDB_CODE_SUCCESS) { + tscError("OTD:0x%"PRIx64" Failed to parse timestamp from JSON Obj", info->id); + return ret; + } + break; + } + default: + return TSDB_CODE_TSC_INVALID_JSON; + } + + return TSDB_CODE_SUCCESS; +} + +int32_t parseMetricValueFromJSON(cJSON *root, TAOS_SML_KV **pKVs, int *num_kvs, SSmlLinesInfo* info) { + //skip timestamp + TAOS_SML_KV *pVal = *pKVs + 1; + char key[] = OTD_METRIC_VALUE_COLUMN_NAME; + + cJSON *metricVal = cJSON_GetObjectItem(root, "value"); + if (metricVal == NULL) { + return TSDB_CODE_TSC_INVALID_JSON; + } + + int32_t ret = parseValueFromJSON(metricVal, pVal, info); + if (ret != TSDB_CODE_SUCCESS) { + return ret; + } + + pVal->key = tcalloc(sizeof(key), 1); + memcpy(pVal->key, key, sizeof(key)); + + *num_kvs += 1; + return TSDB_CODE_SUCCESS; + +} + +int32_t parseTagsFromJSON(cJSON *root, TAOS_SML_KV **pKVs, int *num_kvs, char **childTableName, SSmlLinesInfo* info) { + int32_t ret = TSDB_CODE_SUCCESS; + + cJSON *tags = cJSON_GetObjectItem(root, "tags"); + if (tags == NULL || tags->type != cJSON_Object) { + return TSDB_CODE_TSC_INVALID_JSON; + } + + //only pick up the first ID value as child table name + cJSON *id = cJSON_GetObjectItem(tags, "ID"); + if (id != NULL) { + size_t idLen = strlen(id->valuestring); + ret = isValidChildTableName(id->valuestring, (int16_t)idLen); + if (ret != TSDB_CODE_SUCCESS) { + return ret; + } + *childTableName = tcalloc(idLen + 1, sizeof(char)); + memcpy(*childTableName, id->valuestring, idLen); + //remove all ID fields from tags list no case sensitive + while (id != NULL) { + cJSON_DeleteItemFromObject(tags, "ID"); + id = cJSON_GetObjectItem(tags, "ID"); + } + } + + int32_t tagNum = cJSON_GetArraySize(tags); + //at least one tag pair required + if (tagNum <= 0) { + return TSDB_CODE_TSC_INVALID_JSON; + } + + //allocate memory for tags + *pKVs = tcalloc(tagNum, sizeof(TAOS_SML_KV)); + TAOS_SML_KV *pkv = *pKVs; + + for (int32_t i = 0; i < tagNum; ++i) { + cJSON *tag = cJSON_GetArrayItem(tags, i); + if (tag == NULL) { + return TSDB_CODE_TSC_INVALID_JSON; + } + //key + size_t keyLen = strlen(tag->string); + pkv->key = tcalloc(keyLen + 1, sizeof(char)); + strncpy(pkv->key, tag->string, keyLen); + //value + ret = parseValueFromJSON(tag, pkv, info); + if (ret != TSDB_CODE_SUCCESS) { + return ret; + } + *num_kvs += 1; + pkv++; + } + + return ret; + +} + +int32_t tscParseJSONPayload(cJSON *root, TAOS_SML_DATA_POINT* pSml, SSmlLinesInfo* info) { + int32_t ret = TSDB_CODE_SUCCESS; + + if (!cJSON_IsObject(root)) { + tscError("OTD:0x%"PRIx64" data point needs to be JSON object", info->id); + return TSDB_CODE_TSC_INVALID_JSON; + } + + int32_t size = cJSON_GetArraySize(root); + //outmost json fields has to be exactly 4 + if (size != OTD_JSON_FIELDS_NUM) { + tscError("OTD:0x%"PRIx64" Invalid number of JSON fields in data point %d", info->id, size); + return TSDB_CODE_TSC_INVALID_JSON; + } + + //Parse metric + ret = parseMetricFromJSON(root, pSml, info); + if (ret != TSDB_CODE_SUCCESS) { + tscError("OTD:0x%"PRIx64" Unable to parse metric from JSON payload", info->id); + return ret; + } + tscDebug("OTD:0x%"PRIx64" Parse metric from JSON payload finished", info->id); + + //Parse timestamp + ret = parseTimestampFromJSON(root, &pSml->fields, &pSml->fieldNum, info); + if (ret) { + tscError("OTD:0x%"PRIx64" Unable to parse timestamp from JSON payload", info->id); + return ret; + } + tscDebug("OTD:0x%"PRIx64" Parse timestamp from JSON payload finished", info->id); + + //Parse metric value + ret = parseMetricValueFromJSON(root, &pSml->fields, &pSml->fieldNum, info); + if (ret) { + tscError("OTD:0x%"PRIx64" Unable to parse metric value from JSON payload", info->id); + return ret; + } + tscDebug("OTD:0x%"PRIx64" Parse metric value from JSON payload finished", info->id); + + //Parse tags + ret = parseTagsFromJSON(root, &pSml->tags, &pSml->tagNum, &pSml->childTableName, info); + if (ret) { + tscError("OTD:0x%"PRIx64" Unable to parse tags from JSON payload", info->id); + return ret; + } + tscDebug("OTD:0x%"PRIx64" Parse tags from JSON payload finished", info->id); + + return TSDB_CODE_SUCCESS; +} + +int32_t tscParseMultiJSONPayload(char* payload, SArray* points, SSmlLinesInfo* info) { + int32_t payloadNum, ret; + ret = TSDB_CODE_SUCCESS; + + if (payload == NULL) { + tscError("OTD:0x%"PRIx64" empty JSON Payload", info->id); + return TSDB_CODE_TSC_INVALID_JSON; + } + + cJSON *root = cJSON_Parse(payload); + //multiple data points must be sent in JSON array + if (cJSON_IsObject(root)) { + payloadNum = 1; + } else if (cJSON_IsArray(root)) { + payloadNum = cJSON_GetArraySize(root); + } else { + tscError("OTD:0x%"PRIx64" Invalid JSON Payload", info->id); + ret = TSDB_CODE_TSC_INVALID_JSON; + goto PARSE_JSON_OVER; + } + + for (int32_t i = 0; i < payloadNum; ++i) { + TAOS_SML_DATA_POINT point = {0}; + cJSON *dataPoint = (payloadNum == 1) ? root : cJSON_GetArrayItem(root, i); + + ret = tscParseJSONPayload(dataPoint, &point, info); + if (ret != TSDB_CODE_SUCCESS) { + tscError("OTD:0x%"PRIx64" JSON data point parse failed", info->id); + destroySmlDataPoint(&point); + goto PARSE_JSON_OVER; + } else { + tscDebug("OTD:0x%"PRIx64" JSON data point parse success", info->id); + } + taosArrayPush(points, &point); + } + +PARSE_JSON_OVER: + cJSON_Delete(root); + return ret; +} + +int taos_insert_json_payload(TAOS* taos, char* payload) { + int32_t code = 0; + + SSmlLinesInfo* info = tcalloc(1, sizeof(SSmlLinesInfo)); + info->id = genUID(); + + if (payload == NULL) { + tscError("OTD:0x%"PRIx64" taos_insert_json_payload payload is NULL", info->id); + tfree(info); + code = TSDB_CODE_TSC_APP_ERROR; + return code; + } + + SArray* lpPoints = taosArrayInit(1, sizeof(TAOS_SML_DATA_POINT)); + if (lpPoints == NULL) { + tscError("OTD:0x%"PRIx64" taos_insert_json_payload failed to allocate memory", info->id); + tfree(info); + return TSDB_CODE_TSC_OUT_OF_MEMORY; + } + + tscDebug("OTD:0x%"PRIx64" taos_insert_telnet_lines begin inserting %d points", info->id, 1); + code = tscParseMultiJSONPayload(payload, lpPoints, info); + size_t numPoints = taosArrayGetSize(lpPoints); + + if (code != 0) { + goto cleanup; + } + + TAOS_SML_DATA_POINT* points = TARRAY_GET_START(lpPoints); + code = tscSmlInsert(taos, points, (int)numPoints, info); + if (code != 0) { + tscError("OTD:0x%"PRIx64" taos_insert_json_payload error: %s", info->id, tstrerror((code))); + } + +cleanup: + tscDebug("OTD:0x%"PRIx64" taos_insert_json_payload finish inserting 1 Point. code: %d", info->id, code); + points = TARRAY_GET_START(lpPoints); + numPoints = taosArrayGetSize(lpPoints); + for (int i = 0; i < numPoints; ++i) { + destroySmlDataPoint(points+i); + } + + taosArrayDestroy(lpPoints); + + tfree(info); + return code; +} diff --git a/src/client/src/tscPrepare.c b/src/client/src/tscPrepare.c index bbddc4bff925de1a7d0b67fd233b6e2e88a618a3..d0ac0ccf4ee4bfa381a78090409a761717ceb4b0 100644 --- a/src/client/src/tscPrepare.c +++ b/src/client/src/tscPrepare.c @@ -1491,7 +1491,6 @@ TAOS_STMT* taos_stmt_init(TAOS* taos) { pSql->signature = pSql; pSql->pTscObj = pObj; pSql->maxRetry = TSDB_MAX_REPLICA; - pSql->isBind = true; pStmt->pSql = pSql; pStmt->last = STMT_INIT; diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c index 254c39d37e388b97fe3df38132e3a19e3fdece8c..3ca2004bfd7be260d8eaaa047d7c9e67013e0804 100644 --- a/src/client/src/tscSQLParser.c +++ b/src/client/src/tscSQLParser.c @@ -22,6 +22,7 @@ #include #include "os.h" +#include "regex.h" #include "qPlan.h" #include "qSqlparser.h" #include "qTableMeta.h" @@ -278,6 +279,10 @@ static uint8_t convertRelationalOperator(SStrToken *pToken) { return TSDB_BINARY_OP_REMAINDER; case TK_LIKE: return TSDB_RELATION_LIKE; + case TK_MATCH: + return TSDB_RELATION_MATCH; + case TK_NMATCH: + return TSDB_RELATION_NMATCH; case TK_ISNULL: return TSDB_RELATION_ISNULL; case TK_NOTNULL: @@ -3796,6 +3801,12 @@ static int32_t doExtractColumnFilterInfo(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, case TK_LIKE: pColumnFilter->lowerRelOptr = TSDB_RELATION_LIKE; break; + case TK_MATCH: + pColumnFilter->lowerRelOptr = TSDB_RELATION_MATCH; + break; + case TK_NMATCH: + pColumnFilter->lowerRelOptr = TSDB_RELATION_NMATCH; + break; case TK_ISNULL: pColumnFilter->lowerRelOptr = TSDB_RELATION_ISNULL; break; @@ -3859,10 +3870,18 @@ static int32_t tablenameListToString(tSqlExpr* pExpr, SStringBuilder* sb) { return TSDB_CODE_SUCCESS; } -static int32_t tablenameCondToString(tSqlExpr* pExpr, SStringBuilder* sb) { - taosStringBuilderAppendStringLen(sb, QUERY_COND_REL_PREFIX_LIKE, QUERY_COND_REL_PREFIX_LIKE_LEN); - taosStringBuilderAppendString(sb, pExpr->value.pz); - +static int32_t tablenameCondToString(tSqlExpr* pExpr, uint32_t opToken, SStringBuilder* sb) { + assert(opToken == TK_LIKE || opToken == TK_MATCH || opToken == TK_NMATCH); + if (opToken == TK_LIKE) { + taosStringBuilderAppendStringLen(sb, QUERY_COND_REL_PREFIX_LIKE, QUERY_COND_REL_PREFIX_LIKE_LEN); + taosStringBuilderAppendString(sb, pExpr->value.pz); + } else if (opToken == TK_MATCH) { + taosStringBuilderAppendStringLen(sb, QUERY_COND_REL_PREFIX_MATCH, QUERY_COND_REL_PREFIX_MATCH_LEN); + taosStringBuilderAppendString(sb, pExpr->value.pz); + } else if (opToken == TK_NMATCH) { + taosStringBuilderAppendStringLen(sb, QUERY_COND_REL_PREFIX_NMATCH, QUERY_COND_REL_PREFIX_NMATCH_LEN); + taosStringBuilderAppendString(sb, pExpr->value.pz); + } return TSDB_CODE_SUCCESS; } @@ -3882,7 +3901,7 @@ static int32_t checkColumnFilterInfo(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SCol STableMeta* pTableMeta = pTableMetaInfo->pTableMeta; SSchema* pSchema = tscGetTableColumnSchema(pTableMeta, pIndex->columnIndex); int32_t ret = 0; - const char* msg1 = "non binary column not support like operator"; + const char* msg1 = "non binary column not support like/match operator"; const char* msg2 = "binary column not support this operator"; const char* msg3 = "bool column not support this operator"; const char* msg4 = "primary key not support this operator"; @@ -3910,12 +3929,14 @@ static int32_t checkColumnFilterInfo(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SCol && pExpr->tokenId != TK_ISNULL && pExpr->tokenId != TK_NOTNULL && pExpr->tokenId != TK_LIKE + && pExpr->tokenId != TK_MATCH + && pExpr->tokenId != TK_NMATCH && pExpr->tokenId != TK_IN) { ret = invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2); goto _err_ret; } } else { - if (pExpr->tokenId == TK_LIKE) { + if (pExpr->tokenId == TK_LIKE || pExpr->tokenId == TK_MATCH || pExpr->tokenId == TK_NMATCH) { ret = invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1); goto _err_ret; } @@ -3963,12 +3984,12 @@ static int32_t getTablenameCond(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSqlExpr* if (pTableCond->tokenId == TK_IN) { ret = tablenameListToString(pRight, sb); - } else if (pTableCond->tokenId == TK_LIKE) { + } else if (pTableCond->tokenId == TK_LIKE || pTableCond->tokenId == TK_MATCH || pTableCond->tokenId == TK_NMATCH) { if (pRight->tokenId != TK_STRING) { return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1); } - ret = tablenameCondToString(pRight, sb); + ret = tablenameCondToString(pRight, pTableCond->tokenId, sb); } if (ret != TSDB_CODE_SUCCESS) { @@ -4417,7 +4438,7 @@ static bool validateJoinExprNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSqlExpr } static bool validTableNameOptr(tSqlExpr* pExpr) { - const char nameFilterOptr[] = {TK_IN, TK_LIKE}; + const char nameFilterOptr[] = {TK_IN, TK_LIKE, TK_MATCH, TK_NMATCH}; for (int32_t i = 0; i < tListLen(nameFilterOptr); ++i) { if (pExpr->tokenId == nameFilterOptr[i]) { @@ -4509,6 +4530,49 @@ static int32_t validateLikeExpr(tSqlExpr* pExpr, STableMeta* pTableMeta, int32_t return TSDB_CODE_SUCCESS; } +// check for match expression +static int32_t validateMatchExpr(tSqlExpr* pExpr, STableMeta* pTableMeta, int32_t index, char* msgBuf) { + const char* msg1 = "regular expression string should be less than %d characters"; + const char* msg2 = "illegal column type for match/nmatch"; + const char* msg3 = "invalid regular expression"; + + tSqlExpr* pLeft = pExpr->pLeft; + tSqlExpr* pRight = pExpr->pRight; + + if (pExpr->tokenId == TK_MATCH || pExpr->tokenId == TK_NMATCH) { + if (pRight->value.nLen > tsMaxRegexStringLen) { + char tmp[64] = {0}; + sprintf(tmp, msg1, tsMaxRegexStringLen); + return invalidOperationMsg(msgBuf, tmp); + } + + SSchema* pSchema = tscGetTableSchema(pTableMeta); + if ((!isTablenameToken(&pLeft->columnName)) &&(pSchema[index].type != TSDB_DATA_TYPE_BINARY)) { + return invalidOperationMsg(msgBuf, msg2); + } + + if (!(pRight->type == SQL_NODE_VALUE && pRight->value.nType == TSDB_DATA_TYPE_BINARY)) { + return invalidOperationMsg(msgBuf, msg3); + } + + int errCode = 0; + regex_t regex; + char regErrBuf[256] = {0}; + + const char* pattern = pRight->value.pz; + int cflags = REG_EXTENDED; + if ((errCode = regcomp(®ex, pattern, cflags)) != 0) { + regerror(errCode, ®ex, regErrBuf, sizeof(regErrBuf)); + tscError("Failed to compile regex pattern %s. reason %s", pattern, regErrBuf); + return invalidOperationMsg(msgBuf, msg3); + } + regfree(®ex); + } + + return TSDB_CODE_SUCCESS; +} + + int32_t handleNeOptr(tSqlExpr** rexpr, tSqlExpr* expr) { tSqlExpr* left = tSqlExprClone(expr); tSqlExpr* right = expr; @@ -4560,6 +4624,12 @@ static int32_t handleExprInQueryCond(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSql return code; } + // validate the match expression + code = validateMatchExpr(*pExpr, pTableMeta, index.columnIndex, tscGetErrorMsgPayload(pCmd)); + if (code != TSDB_CODE_SUCCESS) { + return code; + } + SSchema* pSchema = tscGetTableColumnSchema(pTableMeta, index.columnIndex); if (pSchema->type == TSDB_DATA_TYPE_TIMESTAMP && index.columnIndex == PRIMARYKEY_TIMESTAMP_COL_INDEX) { // query on time range if (!validateJoinExprNode(pCmd, pQueryInfo, *pExpr, &index)) { @@ -4887,65 +4957,69 @@ static int32_t setTableCondForSTableQuery(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, STagCond* pTagCond = &pQueryInfo->tagCond; pTagCond->tbnameCond.uid = pTableMetaInfo->pTableMeta->id.uid; - assert(pExpr->tokenId == TK_LIKE || pExpr->tokenId == TK_IN); + assert(pExpr->tokenId == TK_LIKE + || pExpr->tokenId == TK_MATCH + || pExpr->tokenId == TK_NMATCH + || pExpr->tokenId == TK_IN); - if (pExpr->tokenId == TK_LIKE) { + if (pExpr->tokenId == TK_LIKE || pExpr->tokenId == TK_MATCH || pExpr->tokenId == TK_NMATCH) { char* str = taosStringBuilderGetResult(sb, NULL); pQueryInfo->tagCond.tbnameCond.cond = strdup(str); pQueryInfo->tagCond.tbnameCond.len = (int32_t) strlen(str); return TSDB_CODE_SUCCESS; - } - - SStringBuilder sb1; memset(&sb1, 0, sizeof(sb1)); - taosStringBuilderAppendStringLen(&sb1, QUERY_COND_REL_PREFIX_IN, QUERY_COND_REL_PREFIX_IN_LEN); + } else { + SStringBuilder sb1; + memset(&sb1, 0, sizeof(sb1)); + taosStringBuilderAppendStringLen(&sb1, QUERY_COND_REL_PREFIX_IN, QUERY_COND_REL_PREFIX_IN_LEN); - // remove the duplicated input table names - int32_t num = 0; - char* tableNameString = taosStringBuilderGetResult(sb, NULL); + // remove the duplicated input table names + int32_t num = 0; + char* tableNameString = taosStringBuilderGetResult(sb, NULL); - char** segments = strsplit(tableNameString + QUERY_COND_REL_PREFIX_IN_LEN, TBNAME_LIST_SEP, &num); - qsort(segments, num, POINTER_BYTES, tableNameCompar); + char** segments = strsplit(tableNameString + QUERY_COND_REL_PREFIX_IN_LEN, TBNAME_LIST_SEP, &num); + qsort(segments, num, POINTER_BYTES, tableNameCompar); - int32_t j = 1; - for (int32_t i = 1; i < num; ++i) { - if (strcmp(segments[i], segments[i - 1]) != 0) { - segments[j++] = segments[i]; + int32_t j = 1; + for (int32_t i = 1; i < num; ++i) { + if (strcmp(segments[i], segments[i - 1]) != 0) { + segments[j++] = segments[i]; + } } - } - num = j; + num = j; - char name[TSDB_DB_NAME_LEN] = {0}; - tNameGetDbName(&pTableMetaInfo->name, name); - SStrToken dbToken = { .type = TK_STRING, .z = name, .n = (uint32_t)strlen(name) }; - - for (int32_t i = 0; i < num; ++i) { - if (i >= 1) { - taosStringBuilderAppendStringLen(&sb1, TBNAME_LIST_SEP, 1); - } + char name[TSDB_DB_NAME_LEN] = {0}; + tNameGetDbName(&pTableMetaInfo->name, name); + SStrToken dbToken = {.type = TK_STRING, .z = name, .n = (uint32_t)strlen(name)}; + + for (int32_t i = 0; i < num; ++i) { + if (i >= 1) { + taosStringBuilderAppendStringLen(&sb1, TBNAME_LIST_SEP, 1); + } - char idBuf[TSDB_TABLE_FNAME_LEN] = {0}; - int32_t xlen = (int32_t)strlen(segments[i]); - SStrToken t = {.z = segments[i], .n = xlen, .type = TK_STRING}; + char idBuf[TSDB_TABLE_FNAME_LEN] = {0}; + int32_t xlen = (int32_t)strlen(segments[i]); + SStrToken t = {.z = segments[i], .n = xlen, .type = TK_STRING}; - int32_t ret = setObjFullName(idBuf, account, &dbToken, &t, &xlen); - if (ret != TSDB_CODE_SUCCESS) { - taosStringBuilderDestroy(&sb1); - tfree(segments); + int32_t ret = setObjFullName(idBuf, account, &dbToken, &t, &xlen); + if (ret != TSDB_CODE_SUCCESS) { + taosStringBuilderDestroy(&sb1); + tfree(segments); - invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg); - return ret; - } + invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg); + return ret; + } - taosStringBuilderAppendString(&sb1, idBuf); - } + taosStringBuilderAppendString(&sb1, idBuf); + } - char* str = taosStringBuilderGetResult(&sb1, NULL); - pQueryInfo->tagCond.tbnameCond.cond = strdup(str); - pQueryInfo->tagCond.tbnameCond.len = (int32_t) strlen(str); + char* str = taosStringBuilderGetResult(&sb1, NULL); + pQueryInfo->tagCond.tbnameCond.cond = strdup(str); + pQueryInfo->tagCond.tbnameCond.len = (int32_t)strlen(str); - taosStringBuilderDestroy(&sb1); - tfree(segments); - return TSDB_CODE_SUCCESS; + taosStringBuilderDestroy(&sb1); + tfree(segments); + return TSDB_CODE_SUCCESS; + } } int32_t mergeTimeRange(SSqlCmd* pCmd, STimeWindow* res, STimeWindow* win, int32_t optr) { @@ -8132,7 +8206,7 @@ int32_t tscGetExprFilters(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SArray* pSelect } static int32_t handleExprInHavingClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SArray* pSelectNodeList, tSqlExpr* pExpr, int32_t sqlOptr) { - const char* msg1 = "non binary column not support like operator"; + const char* msg1 = "non binary column not support like/match operator"; const char* msg2 = "invalid operator for binary column in having clause"; const char* msg3 = "invalid operator for bool column in having clause"; @@ -8184,11 +8258,13 @@ static int32_t handleExprInHavingClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, S && pExpr->tokenId != TK_ISNULL && pExpr->tokenId != TK_NOTNULL && pExpr->tokenId != TK_LIKE + && pExpr->tokenId != TK_MATCH + && pExpr->tokenId != TK_NMATCH ) { return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2); } } else { - if (pExpr->tokenId == TK_LIKE) { + if (pExpr->tokenId == TK_LIKE || pExpr->tokenId == TK_MATCH || pExpr->tokenId == TK_NMATCH) { return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1); } @@ -8645,7 +8721,7 @@ static int32_t doLoadAllTableMeta(SSqlObj* pSql, SQueryInfo* pQueryInfo, SSqlNod if (p->vgroupIdList != NULL) { size_t s = taosArrayGetSize(p->vgroupIdList); - size_t vgroupsz = sizeof(SVgroupInfo) * s + sizeof(SVgroupsInfo); + size_t vgroupsz = sizeof(SVgroupMsg) * s + sizeof(SVgroupsInfo); pTableMetaInfo->vgroupList = calloc(1, vgroupsz); if (pTableMetaInfo->vgroupList == NULL) { return TSDB_CODE_TSC_OUT_OF_MEMORY; @@ -8660,14 +8736,11 @@ static int32_t doLoadAllTableMeta(SSqlObj* pSql, SQueryInfo* pQueryInfo, SSqlNod taosHashGetClone(tscVgroupMap, id, sizeof(*id), NULL, &existVgroupInfo); assert(existVgroupInfo.inUse >= 0); - SVgroupInfo *pVgroup = &pTableMetaInfo->vgroupList->vgroups[j]; + SVgroupMsg *pVgroup = &pTableMetaInfo->vgroupList->vgroups[j]; pVgroup->numOfEps = existVgroupInfo.numOfEps; pVgroup->vgId = existVgroupInfo.vgId; - for (int32_t k = 0; k < existVgroupInfo.numOfEps; ++k) { - pVgroup->epAddr[k].port = existVgroupInfo.ep[k].port; - pVgroup->epAddr[k].fqdn = strndup(existVgroupInfo.ep[k].fqdn, TSDB_FQDN_LEN); - } + memcpy(&pVgroup->epAddr, &existVgroupInfo.ep, sizeof(pVgroup->epAddr)); } } } diff --git a/src/client/src/tscServer.c b/src/client/src/tscServer.c index 9d523f273016e258940c67eaa1596153de0998eb..b87ec92ff1f056fdc5eeb8992cec418d07158b0b 100644 --- a/src/client/src/tscServer.c +++ b/src/client/src/tscServer.c @@ -73,7 +73,7 @@ static int32_t removeDupVgid(int32_t *src, int32_t sz) { return ret; } -static void tscSetDnodeEpSet(SRpcEpSet* pEpSet, SVgroupInfo* pVgroupInfo) { +static void tscSetDnodeEpSet(SRpcEpSet* pEpSet, SVgroupMsg* pVgroupInfo) { assert(pEpSet != NULL && pVgroupInfo != NULL && pVgroupInfo->numOfEps > 0); // Issue the query to one of the vnode among a vgroup randomly. @@ -93,6 +93,7 @@ static void tscSetDnodeEpSet(SRpcEpSet* pEpSet, SVgroupInfo* pVgroupInfo) { existed = true; } } + assert(existed); } @@ -723,7 +724,7 @@ static char *doSerializeTableInfo(SQueryTableMsg *pQueryMsg, SSqlObj *pSql, STab int32_t index = pTableMetaInfo->vgroupIndex; assert(index >= 0); - SVgroupInfo* pVgroupInfo = NULL; + SVgroupMsg* pVgroupInfo = NULL; if (pTableMetaInfo->vgroupList && pTableMetaInfo->vgroupList->numOfVgroups > 0) { assert(index < pTableMetaInfo->vgroupList->numOfVgroups); pVgroupInfo = &pTableMetaInfo->vgroupList->vgroups[index]; @@ -861,8 +862,8 @@ static int32_t serializeSqlExpr(SSqlExpr* pExpr, STableMetaInfo* pTableMetaInfo, (*pMsg) += sizeof(SSqlExpr); for (int32_t j = 0; j < pExpr->numOfParams; ++j) { // todo add log - pSqlExpr->param[j].nType = htons((uint16_t)pExpr->param[j].nType); - pSqlExpr->param[j].nLen = htons(pExpr->param[j].nLen); + pSqlExpr->param[j].nType = htonl(pExpr->param[j].nType); + pSqlExpr->param[j].nLen = htonl(pExpr->param[j].nLen); if (pExpr->param[j].nType == TSDB_DATA_TYPE_BINARY) { memcpy((*pMsg), pExpr->param[j].pz, pExpr->param[j].nLen); @@ -880,17 +881,22 @@ static int32_t serializeSqlExpr(SSqlExpr* pExpr, STableMetaInfo* pTableMetaInfo, int tscBuildQueryMsg(SSqlObj *pSql, SSqlInfo *pInfo) { SSqlCmd *pCmd = &pSql->cmd; + SQueryInfo *pQueryInfo = NULL; + STableMeta *pTableMeta = NULL; + STableMetaInfo *pTableMetaInfo = NULL; + int32_t code = TSDB_CODE_SUCCESS; int32_t size = tscEstimateQueryMsgSize(pSql); + assert(size > 0); - if (TSDB_CODE_SUCCESS != tscAllocPayload(pCmd, size)) { + if (TSDB_CODE_SUCCESS != tscAllocPayloadFast(pCmd, size)) { tscError("%p failed to malloc for query msg", pSql); return TSDB_CODE_TSC_INVALID_OPERATION; // todo add test for this } - SQueryInfo *pQueryInfo = tscGetQueryInfo(pCmd); - STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0); - STableMeta * pTableMeta = pTableMetaInfo->pTableMeta; + pQueryInfo = tscGetQueryInfo(pCmd); + pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0); + pTableMeta = pTableMetaInfo->pTableMeta; SQueryAttr query = {{0}}; tscCreateQueryFromQueryInfo(pQueryInfo, &query, pSql); @@ -941,14 +947,13 @@ int tscBuildQueryMsg(SSqlObj *pSql, SSqlInfo *pInfo) { pQueryMsg->pointInterpQuery = query.pointInterpQuery; pQueryMsg->needReverseScan = query.needReverseScan; pQueryMsg->stateWindow = query.stateWindow; - pQueryMsg->numOfTags = htonl(numOfTags); pQueryMsg->sqlstrLen = htonl(sqlLen); pQueryMsg->sw.gap = htobe64(query.sw.gap); pQueryMsg->sw.primaryColId = htonl(PRIMARYKEY_TIMESTAMP_COL_INDEX); pQueryMsg->secondStageOutput = htonl(query.numOfExpr2); - pQueryMsg->numOfOutput = htons((int16_t)query.numOfOutput); // this is the stage one output column number + pQueryMsg->numOfOutput = htons((int16_t)query.numOfOutput); // this is the stage one output column number pQueryMsg->numOfGroupCols = htons(pQueryInfo->groupbyExpr.numOfGroupCols); pQueryMsg->tagNameRelType = htons(pQueryInfo->tagCond.relType); @@ -968,7 +973,7 @@ int tscBuildQueryMsg(SSqlObj *pSql, SSqlInfo *pInfo) { pQueryMsg->tableCols[i].type = htons(pCol->type); //pQueryMsg->tableCols[i].flist.numOfFilters = htons(pCol->flist.numOfFilters); pQueryMsg->tableCols[i].flist.numOfFilters = 0; - + pQueryMsg->tableCols[i].flist.filterInfo = 0; // append the filter information after the basic column information //serializeColFilterInfo(pCol->flist.filterInfo, pCol->flist.numOfFilters, &pMsg); } @@ -981,6 +986,8 @@ int tscBuildQueryMsg(SSqlObj *pSql, SSqlInfo *pInfo) { pMsg += pCond->len; } + } else { + pQueryMsg->colCondLen = 0; } for (int32_t i = 0; i < query.numOfOutput; ++i) { @@ -1060,6 +1067,8 @@ int tscBuildQueryMsg(SSqlObj *pSql, SSqlInfo *pInfo) { pMsg += pCond->len; } + } else { + pQueryMsg->tagCondLen = 0; } if (pQueryInfo->bufLen > 0) { @@ -1089,6 +1098,9 @@ int tscBuildQueryMsg(SSqlObj *pSql, SSqlInfo *pInfo) { pQueryMsg->tsBuf.tsOrder = htonl(pQueryInfo->tsBuf->tsOrder); pQueryMsg->tsBuf.tsLen = htonl(pQueryMsg->tsBuf.tsLen); pQueryMsg->tsBuf.tsNumOfBlocks = htonl(pQueryMsg->tsBuf.tsNumOfBlocks); + } else { + pQueryMsg->tsBuf.tsLen = 0; + pQueryMsg->tsBuf.tsNumOfBlocks = 0; } int32_t numOfOperator = (int32_t) taosArrayGetSize(queryOperator); @@ -1126,6 +1138,9 @@ int tscBuildQueryMsg(SSqlObj *pSql, SSqlInfo *pInfo) { pMsg += pUdfInfo->contLen; } + } else { + pQueryMsg->udfContentOffset = 0; + pQueryMsg->udfContentLen = 0; } memcpy(pMsg, pSql->sqlstr, sqlLen); @@ -2146,7 +2161,7 @@ static SVgroupsInfo* createVgroupInfoFromMsg(char* pMsg, int32_t* size, uint64_t *size = (int32_t)(sizeof(SVgroupMsg) * pVgroupMsg->numOfVgroups + sizeof(SVgroupsMsg)); - size_t vgroupsz = sizeof(SVgroupInfo) * pVgroupMsg->numOfVgroups + sizeof(SVgroupsInfo); + size_t vgroupsz = sizeof(SVgroupMsg) * pVgroupMsg->numOfVgroups + sizeof(SVgroupsInfo); SVgroupsInfo *pVgroupInfo = calloc(1, vgroupsz); assert(pVgroupInfo != NULL); @@ -2156,7 +2171,7 @@ static SVgroupsInfo* createVgroupInfoFromMsg(char* pMsg, int32_t* size, uint64_t } else { for (int32_t j = 0; j < pVgroupInfo->numOfVgroups; ++j) { // just init, no need to lock - SVgroupInfo *pVgroup = &pVgroupInfo->vgroups[j]; + SVgroupMsg *pVgroup = &pVgroupInfo->vgroups[j]; SVgroupMsg *vmsg = &pVgroupMsg->vgroups[j]; vmsg->vgId = htonl(vmsg->vgId); @@ -2168,7 +2183,8 @@ static SVgroupsInfo* createVgroupInfoFromMsg(char* pMsg, int32_t* size, uint64_t pVgroup->vgId = vmsg->vgId; for (int32_t k = 0; k < vmsg->numOfEps; ++k) { pVgroup->epAddr[k].port = vmsg->epAddr[k].port; - pVgroup->epAddr[k].fqdn = strndup(vmsg->epAddr[k].fqdn, TSDB_FQDN_LEN); + tstrncpy(pVgroup->epAddr[k].fqdn, vmsg->epAddr[k].fqdn, TSDB_FQDN_LEN); +// pVgroup->epAddr[k].fqdn = strndup(vmsg->epAddr[k].fqdn, TSDB_FQDN_LEN); } doUpdateVgroupInfo(pVgroup->vgId, vmsg); @@ -2618,7 +2634,11 @@ int tscProcessAlterTableMsgRsp(SSqlObj *pSql) { tfree(pTableMetaInfo->pTableMeta); if (isSuperTable) { // if it is a super table, iterate the hashTable and remove all the childTableMeta - taosHashClear(tscTableMetaMap); + if (pSql->res.pRsp == NULL) { + tscDebug("0x%"PRIx64" unexpected resp from mnode, super table: %s failed to update super table meta ", pSql->self, name); + return 0; + } + return tscProcessTableMetaRsp(pSql); } return 0; diff --git a/src/client/src/tscSubquery.c b/src/client/src/tscSubquery.c index e3bec2c2eaa50bb8e753ad499889b94dc23c2f40..275042a238dc9bd580ff5d09b8e85874b9031031 100644 --- a/src/client/src/tscSubquery.c +++ b/src/client/src/tscSubquery.c @@ -623,13 +623,12 @@ static int32_t tscLaunchRealSubqueries(SSqlObj* pSql) { int16_t colId = tscGetJoinTagColIdByUid(&pQueryInfo->tagCond, pTableMetaInfo->pTableMeta->id.uid); // set the tag column id for executor to extract correct tag value -#ifndef _TD_NINGSI_60 - pExpr->base.param[0] = (tVariant) {.i64 = colId, .nType = TSDB_DATA_TYPE_BIGINT, .nLen = sizeof(int64_t)}; -#else - pExpr->base.param[0].i64 = colId; - pExpr->base.param[0].nType = TSDB_DATA_TYPE_BIGINT; - pExpr->base.param[0].nLen = sizeof(int64_t); -#endif + tVariant* pVariant = &pExpr->base.param[0]; + + pVariant->i64 = colId; + pVariant->nType = TSDB_DATA_TYPE_BIGINT; + pVariant->nLen = sizeof(int64_t); + pExpr->base.numOfParams = 1; } @@ -748,10 +747,11 @@ void tscBuildVgroupTableInfo(SSqlObj* pSql, STableMetaInfo* pTableMetaInfo, SArr SVgroupTableInfo info = {{0}}; for (int32_t m = 0; m < pvg->numOfVgroups; ++m) { if (tt->vgId == pvg->vgroups[m].vgId) { - tscSVgroupInfoCopy(&info.vgInfo, &pvg->vgroups[m]); + memcpy(&info.vgInfo, &pvg->vgroups[m], sizeof(info.vgInfo)); break; } } + assert(info.vgInfo.numOfEps != 0); vgTables = taosArrayInit(4, sizeof(STableIdInfo)); @@ -2459,11 +2459,48 @@ static void doSendQueryReqs(SSchedMsg* pSchedMsg) { tfree(p); } +static void doConcurrentlySendSubQueries(SSqlObj* pSql) { + SSubqueryState *pState = &pSql->subState; + + // concurrently sent the query requests. + const int32_t MAX_REQUEST_PER_TASK = 4; + + int32_t numOfTasks = (pState->numOfSub + MAX_REQUEST_PER_TASK - 1)/MAX_REQUEST_PER_TASK; + assert(numOfTasks >= 1); + + int32_t num; + if (pState->numOfSub / numOfTasks == MAX_REQUEST_PER_TASK) { + num = MAX_REQUEST_PER_TASK; + } else { + num = pState->numOfSub / numOfTasks + 1; + } + tscDebug("0x%"PRIx64 " query will be sent by %d threads", pSql->self, numOfTasks); + + for(int32_t j = 0; j < numOfTasks; ++j) { + SSchedMsg schedMsg = {0}; + schedMsg.fp = doSendQueryReqs; + schedMsg.ahandle = (void*)pSql; + + schedMsg.thandle = NULL; + SPair* p = calloc(1, sizeof(SPair)); + p->first = j * num; + + if (j == numOfTasks - 1) { + p->second = pState->numOfSub; + } else { + p->second = (j + 1) * num; + } + + schedMsg.msg = p; + taosScheduleTask(tscQhandle, &schedMsg); + } +} + int32_t tscHandleMasterSTableQuery(SSqlObj *pSql) { SSqlRes *pRes = &pSql->res; SSqlCmd *pCmd = &pSql->cmd; - // pRes->code check only serves in launching metric sub-queries + // pRes->code check only serves in launching super table sub-queries if (pRes->code == TSDB_CODE_TSC_QUERY_CANCELLED) { pCmd->command = TSDB_SQL_RETRIEVE_GLOBALMERGE; // enable the abort of kill super table function. return pRes->code; @@ -2474,22 +2511,23 @@ int32_t tscHandleMasterSTableQuery(SSqlObj *pSql) { pRes->qId = 0x1; // hack the qhandle check - const uint32_t nBufferSize = (1u << 18u); // 256KB + const uint32_t nBufferSize = (1u << 18u); // 256KB, default buffer size SQueryInfo *pQueryInfo = tscGetQueryInfo(pCmd); STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0); + SSubqueryState *pState = &pSql->subState; - pState->numOfSub = 0; - if (pTableMetaInfo->pVgroupTables == NULL) { - pState->numOfSub = pTableMetaInfo->vgroupList->numOfVgroups; - } else { - pState->numOfSub = (int32_t)taosArrayGetSize(pTableMetaInfo->pVgroupTables); + int32_t numOfSub = (pTableMetaInfo->pVgroupTables == NULL) ? pTableMetaInfo->vgroupList->numOfVgroups + : (int32_t)taosArrayGetSize(pTableMetaInfo->pVgroupTables); + + int32_t ret = doInitSubState(pSql, numOfSub); + if (ret != 0) { + tscAsyncResultOnError(pSql); + return ret; } - assert(pState->numOfSub > 0); - - int32_t ret = tscCreateGlobalMergerEnv(pQueryInfo, &pMemoryBuf, pSql->subState.numOfSub, &pDesc, nBufferSize, pSql->self); + ret = tscCreateGlobalMergerEnv(pQueryInfo, &pMemoryBuf, pSql->subState.numOfSub, &pDesc, nBufferSize, pSql->self); if (ret != 0) { pRes->code = ret; tscAsyncResultOnError(pSql); @@ -2499,32 +2537,6 @@ int32_t tscHandleMasterSTableQuery(SSqlObj *pSql) { } tscDebug("0x%"PRIx64" retrieved query data from %d vnode(s)", pSql->self, pState->numOfSub); - pSql->pSubs = calloc(pState->numOfSub, POINTER_BYTES); - if (pSql->pSubs == NULL) { - tfree(pSql->pSubs); - pRes->code = TSDB_CODE_TSC_OUT_OF_MEMORY; - tscDestroyGlobalMergerEnv(pMemoryBuf, pDesc,pState->numOfSub); - - tscAsyncResultOnError(pSql); - return ret; - } - - if (pState->states == NULL) { - pState->states = calloc(pState->numOfSub, sizeof(*pState->states)); - if (pState->states == NULL) { - pRes->code = TSDB_CODE_TSC_OUT_OF_MEMORY; - tscDestroyGlobalMergerEnv(pMemoryBuf, pDesc,pState->numOfSub); - - tscAsyncResultOnError(pSql); - return ret; - } - - pthread_mutex_init(&pState->mutex, NULL); - } - - memset(pState->states, 0, sizeof(*pState->states) * pState->numOfSub); - tscDebug("0x%"PRIx64" reset all sub states to 0", pSql->self); - pRes->code = TSDB_CODE_SUCCESS; int32_t i = 0; @@ -2538,15 +2550,16 @@ int32_t tscHandleMasterSTableQuery(SSqlObj *pSql) { trs->pExtMemBuffer = pMemoryBuf; trs->pOrderDescriptor = pDesc; - trs->localBuffer = (tFilePage *)calloc(1, nBufferSize + sizeof(tFilePage)); + trs->localBuffer = (tFilePage *)malloc(nBufferSize + sizeof(tFilePage)); if (trs->localBuffer == NULL) { tscError("0x%"PRIx64" failed to malloc buffer for local buffer, orderOfSub:%d, reason:%s", pSql->self, i, strerror(errno)); tfree(trs); break; } - - trs->subqueryIndex = i; - trs->pParentSql = pSql; + + trs->localBuffer->num = 0; + trs->subqueryIndex = i; + trs->pParentSql = pSql; SSqlObj *pNew = tscCreateSTableSubquery(pSql, trs, NULL); if (pNew == NULL) { @@ -2582,39 +2595,7 @@ int32_t tscHandleMasterSTableQuery(SSqlObj *pSql) { return pRes->code; } - // concurrently sent the query requests. - const int32_t MAX_REQUEST_PER_TASK = 8; - - int32_t numOfTasks = (pState->numOfSub + MAX_REQUEST_PER_TASK - 1)/MAX_REQUEST_PER_TASK; - assert(numOfTasks >= 1); - - int32_t num; - if (pState->numOfSub / numOfTasks == MAX_REQUEST_PER_TASK) { - num = MAX_REQUEST_PER_TASK; - } else { - num = pState->numOfSub / numOfTasks + 1; - } - tscDebug("0x%"PRIx64 " query will be sent by %d threads", pSql->self, numOfTasks); - - for(int32_t j = 0; j < numOfTasks; ++j) { - SSchedMsg schedMsg = {0}; - schedMsg.fp = doSendQueryReqs; - schedMsg.ahandle = (void*)pSql; - - schedMsg.thandle = NULL; - SPair* p = calloc(1, sizeof(SPair)); - p->first = j * num; - - if (j == numOfTasks - 1) { - p->second = pState->numOfSub; - } else { - p->second = (j + 1) * num; - } - - schedMsg.msg = p; - taosScheduleTask(tscQhandle, &schedMsg); - } - + doConcurrentlySendSubQueries(pSql); return TSDB_CODE_SUCCESS; } @@ -2671,7 +2652,7 @@ static int32_t tscReissueSubquery(SRetrieveSupport *oriTrs, SSqlObj *pSql, int32 int32_t subqueryIndex = trsupport->subqueryIndex; STableMetaInfo* pTableMetaInfo = tscGetTableMetaInfoFromCmd(&pSql->cmd, 0); - SVgroupInfo* pVgroup = &pTableMetaInfo->vgroupList->vgroups[0]; + SVgroupMsg* pVgroup = &pTableMetaInfo->vgroupList->vgroups[0]; tExtMemBufferClear(trsupport->pExtMemBuffer[subqueryIndex]); @@ -2899,7 +2880,6 @@ static void tscAllDataRetrievedFromDnode(SRetrieveSupport *trsupport, SSqlObj* p pParentSql->res.precision = pSql->res.precision; pParentSql->res.numOfRows = 0; pParentSql->res.row = 0; - pParentSql->res.numOfGroups = 0; tscFreeRetrieveSup(pSql); @@ -2950,7 +2930,7 @@ static void tscRetrieveFromDnodeCallBack(void *param, TAOS_RES *tres, int numOfR SSubqueryState* pState = &pParentSql->subState; STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(&pSql->cmd, 0); - SVgroupInfo *pVgroup = &pTableMetaInfo->vgroupList->vgroups[0]; + SVgroupMsg *pVgroup = &pTableMetaInfo->vgroupList->vgroups[0]; if (pParentSql->res.code != TSDB_CODE_SUCCESS) { trsupport->numOfRetry = MAX_NUM_OF_SUBQUERY_RETRY; @@ -3078,7 +3058,7 @@ void tscRetrieveDataRes(void *param, TAOS_RES *tres, int code) { assert(pQueryInfo->numOfTables == 1); STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(&pSql->cmd, 0); - SVgroupInfo* pVgroup = &pTableMetaInfo->vgroupList->vgroups[trsupport->subqueryIndex]; + SVgroupMsg* pVgroup = &pTableMetaInfo->vgroupList->vgroups[trsupport->subqueryIndex]; // stable query killed or other subquery failed, all query stopped if (pParentSql->res.code != TSDB_CODE_SUCCESS) { @@ -3424,7 +3404,6 @@ static void doBuildResFromSubqueries(SSqlObj* pSql) { return; } -// tscRestoreFuncForSTableQuery(pQueryInfo); int32_t rowSize = tscGetResRowLength(pQueryInfo->exprList); assert(numOfRes * rowSize > 0); diff --git a/src/client/src/tscSystem.c b/src/client/src/tscSystem.c index c04765b0651f59066dd5897f2eaf0924b7113a21..8af340030cccee1431a82eb88344642011f2e019 100644 --- a/src/client/src/tscSystem.c +++ b/src/client/src/tscSystem.c @@ -122,6 +122,10 @@ int32_t tscAcquireRpc(const char *key, const char *user, const char *secretEncry void taos_init_imp(void) { char temp[128] = {0}; + + // In the APIs of other program language, taos_cleanup is not available yet. + // So, to make sure taos_cleanup will be invoked to clean up the allocated resource to suppress the valgrind warning. + atexit(taos_cleanup); errno = TSDB_CODE_SUCCESS; srand(taosGetTimestampSec()); @@ -197,10 +201,6 @@ void taos_init_imp(void) { tscRefId = taosOpenRef(200, tscCloseTscObj); - // In the APIs of other program language, taos_cleanup is not available yet. - // So, to make sure taos_cleanup will be invoked to clean up the allocated resource to suppress the valgrind warning. - atexit(taos_cleanup); - tscDebug("client is initialized successfully"); } diff --git a/src/client/src/tscUtil.c b/src/client/src/tscUtil.c index 31631560af12a01e27d5a71acaecb724cc822b5d..2bd601d812294ea311e30fece732d1e1c2c533ec 100644 --- a/src/client/src/tscUtil.c +++ b/src/client/src/tscUtil.c @@ -1347,14 +1347,7 @@ static void tscDestroyResPointerInfo(SSqlRes* pRes) { tfree(pRes->buffer); tfree(pRes->urow); - tfree(pRes->pGroupRec); tfree(pRes->pColumnIndex); - - if (pRes->pArithSup != NULL) { - tfree(pRes->pArithSup->data); - tfree(pRes->pArithSup); - } - tfree(pRes->final); pRes->data = NULL; // pRes->data points to the buffer of pRsp, no need to free @@ -2087,32 +2080,35 @@ bool tscIsInsertData(char* sqlstr) { } while (1); } -int tscAllocPayload(SSqlCmd* pCmd, int size) { +int32_t tscAllocPayloadFast(SSqlCmd *pCmd, size_t size) { if (pCmd->payload == NULL) { assert(pCmd->allocSize == 0); - pCmd->payload = (char*)calloc(1, size); - if (pCmd->payload == NULL) { + pCmd->payload = malloc(size); + pCmd->allocSize = (uint32_t) size; + } else if (pCmd->allocSize < size) { + char* tmp = realloc(pCmd->payload, size); + if (tmp == NULL) { return TSDB_CODE_TSC_OUT_OF_MEMORY; } - pCmd->allocSize = size; - } else { - if (pCmd->allocSize < (uint32_t)size) { - char* b = realloc(pCmd->payload, size); - if (b == NULL) { - return TSDB_CODE_TSC_OUT_OF_MEMORY; - } + pCmd->payload = tmp; + pCmd->allocSize = (uint32_t) size; + } - pCmd->payload = b; - pCmd->allocSize = size; - } + assert(pCmd->allocSize >= size); + return TSDB_CODE_SUCCESS; +} +int32_t tscAllocPayload(SSqlCmd* pCmd, int size) { + assert(size > 0); + + int32_t code = tscAllocPayloadFast(pCmd, (size_t) size); + if (code == TSDB_CODE_SUCCESS) { memset(pCmd->payload, 0, pCmd->allocSize); } - assert(pCmd->allocSize >= (uint32_t)size && size > 0); - return TSDB_CODE_SUCCESS; + return code; } TAOS_FIELD tscCreateField(int8_t type, const char* name, int16_t bytes) { @@ -3369,11 +3365,11 @@ void tscFreeVgroupTableInfo(SArray* pVgroupTables) { size_t num = taosArrayGetSize(pVgroupTables); for (size_t i = 0; i < num; i++) { SVgroupTableInfo* pInfo = taosArrayGet(pVgroupTables, i); - +#if 0 for(int32_t j = 0; j < pInfo->vgInfo.numOfEps; ++j) { tfree(pInfo->vgInfo.epAddr[j].fqdn); } - +#endif taosArrayDestroy(pInfo->itemList); } @@ -3387,9 +3383,9 @@ void tscRemoveVgroupTableGroup(SArray* pVgroupTable, int32_t index) { assert(size > index); SVgroupTableInfo* pInfo = taosArrayGet(pVgroupTable, index); - for(int32_t j = 0; j < pInfo->vgInfo.numOfEps; ++j) { - tfree(pInfo->vgInfo.epAddr[j].fqdn); - } +// for(int32_t j = 0; j < pInfo->vgInfo.numOfEps; ++j) { +// tfree(pInfo->vgInfo.epAddr[j].fqdn); +// } taosArrayDestroy(pInfo->itemList); taosArrayRemove(pVgroupTable, index); @@ -3399,9 +3395,12 @@ void tscVgroupTableCopy(SVgroupTableInfo* info, SVgroupTableInfo* pInfo) { memset(info, 0, sizeof(SVgroupTableInfo)); info->vgInfo = pInfo->vgInfo; + +#if 0 for(int32_t j = 0; j < pInfo->vgInfo.numOfEps; ++j) { info->vgInfo.epAddr[j].fqdn = strdup(pInfo->vgInfo.epAddr[j].fqdn); } +#endif if (pInfo->itemList) { info->itemList = taosArrayDup(pInfo->itemList); @@ -3464,13 +3463,9 @@ STableMetaInfo* tscAddTableMetaInfo(SQueryInfo* pQueryInfo, SName* name, STableM } pTableMetaInfo->pTableMeta = pTableMeta; - if (pTableMetaInfo->pTableMeta == NULL) { - pTableMetaInfo->tableMetaSize = 0; - } else { - pTableMetaInfo->tableMetaSize = tscGetTableMetaSize(pTableMeta); - } + pTableMetaInfo->tableMetaSize = (pTableMetaInfo->pTableMeta == NULL)? 0:tscGetTableMetaSize(pTableMeta); + pTableMetaInfo->tableMetaCapacity = (size_t)(pTableMetaInfo->tableMetaSize); - if (vgroupList != NULL) { pTableMetaInfo->vgroupList = tscVgroupInfoClone(vgroupList); @@ -3718,8 +3713,8 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, __async_cb_func_t terrno = TSDB_CODE_TSC_OUT_OF_MEMORY; goto _error; } - pNewQueryInfo->numOfFillVal = pQueryInfo->fieldsInfo.numOfOutput; + pNewQueryInfo->numOfFillVal = pQueryInfo->fieldsInfo.numOfOutput; memcpy(pNewQueryInfo->fillVal, pQueryInfo->fillVal, pQueryInfo->fieldsInfo.numOfOutput * sizeof(int64_t)); } @@ -3760,7 +3755,6 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, __async_cb_func_t pFinalInfo = tscAddTableMetaInfo(pNewQueryInfo, &pTableMetaInfo->name, pTableMeta, pTableMetaInfo->vgroupList, pTableMetaInfo->tagColList, pTableMetaInfo->pVgroupTables); - } else { // transfer the ownership of pTableMeta to the newly create sql object. STableMetaInfo* pPrevInfo = tscGetTableMetaInfoFromCmd(&pPrevSql->cmd, 0); if (pPrevInfo->pTableMeta && pPrevInfo->pTableMeta->tableType < 0) { @@ -3770,8 +3764,8 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, __async_cb_func_t STableMeta* pPrevTableMeta = tscTableMetaDup(pPrevInfo->pTableMeta); SVgroupsInfo* pVgroupsInfo = pPrevInfo->vgroupList; - pFinalInfo = tscAddTableMetaInfo(pNewQueryInfo, &pTableMetaInfo->name, pPrevTableMeta, pVgroupsInfo, pTableMetaInfo->tagColList, - pTableMetaInfo->pVgroupTables); + pFinalInfo = tscAddTableMetaInfo(pNewQueryInfo, &pTableMetaInfo->name, pPrevTableMeta, pVgroupsInfo, + pTableMetaInfo->tagColList, pTableMetaInfo->pVgroupTables); } // this case cannot be happened @@ -3944,6 +3938,21 @@ static void tscSubqueryCompleteCallback(void* param, TAOS_RES* tres, int code) { taos_fetch_rows_a(tres, tscSubqueryRetrieveCallback, param); } +int32_t doInitSubState(SSqlObj* pSql, int32_t numOfSubqueries) { + assert(pSql->subState.numOfSub == 0 && pSql->pSubs == NULL && pSql->subState.states == NULL); + pSql->subState.numOfSub = numOfSubqueries; + + pSql->pSubs = calloc(pSql->subState.numOfSub, POINTER_BYTES); + pSql->subState.states = calloc(pSql->subState.numOfSub, sizeof(int8_t)); + + int32_t code = pthread_mutex_init(&pSql->subState.mutex, NULL); + if (pSql->pSubs == NULL || pSql->subState.states == NULL || code != 0) { + return TSDB_CODE_TSC_OUT_OF_MEMORY; + } + + return TSDB_CODE_SUCCESS; +} + // do execute the query according to the query execution plan void executeQuery(SSqlObj* pSql, SQueryInfo* pQueryInfo) { int32_t code = TSDB_CODE_SUCCESS; @@ -3959,16 +3968,8 @@ void executeQuery(SSqlObj* pSql, SQueryInfo* pQueryInfo) { } if (taosArrayGetSize(pQueryInfo->pUpstream) > 0) { // nest query. do execute it firstly - assert(pSql->subState.numOfSub == 0); - pSql->subState.numOfSub = (int32_t) taosArrayGetSize(pQueryInfo->pUpstream); - assert(pSql->pSubs == NULL); - pSql->pSubs = calloc(pSql->subState.numOfSub, POINTER_BYTES); - assert(pSql->subState.states == NULL); - pSql->subState.states = calloc(pSql->subState.numOfSub, sizeof(int8_t)); - code = pthread_mutex_init(&pSql->subState.mutex, NULL); - - if (pSql->pSubs == NULL || pSql->subState.states == NULL || code != TSDB_CODE_SUCCESS) { - code = TSDB_CODE_TSC_OUT_OF_MEMORY; + code = doInitSubState(pSql, (int32_t) taosArrayGetSize(pQueryInfo->pUpstream)); + if (code != TSDB_CODE_SUCCESS) { goto _error; } @@ -4315,7 +4316,9 @@ void tscTryQueryNextClause(SSqlObj* pSql, __async_cb_func_t fp) { } tfree(pSql->pSubs); + tfree(pSql->subState.states); pSql->subState.numOfSub = 0; + pthread_mutex_destroy(&pSql->subState.mutex); pSql->fp = fp; @@ -4406,8 +4409,8 @@ SVgroupsInfo* tscVgroupInfoClone(SVgroupsInfo *vgroupList) { return NULL; } - size_t size = sizeof(SVgroupsInfo) + sizeof(SVgroupInfo) * vgroupList->numOfVgroups; - SVgroupsInfo* pNew = calloc(1, size); + size_t size = sizeof(SVgroupsInfo) + sizeof(SVgroupMsg) * vgroupList->numOfVgroups; + SVgroupsInfo* pNew = malloc(size); if (pNew == NULL) { return NULL; } @@ -4415,15 +4418,15 @@ SVgroupsInfo* tscVgroupInfoClone(SVgroupsInfo *vgroupList) { pNew->numOfVgroups = vgroupList->numOfVgroups; for(int32_t i = 0; i < vgroupList->numOfVgroups; ++i) { - SVgroupInfo* pNewVInfo = &pNew->vgroups[i]; + SVgroupMsg* pNewVInfo = &pNew->vgroups[i]; - SVgroupInfo* pvInfo = &vgroupList->vgroups[i]; + SVgroupMsg* pvInfo = &vgroupList->vgroups[i]; pNewVInfo->vgId = pvInfo->vgId; pNewVInfo->numOfEps = pvInfo->numOfEps; for(int32_t j = 0; j < pvInfo->numOfEps; ++j) { - pNewVInfo->epAddr[j].fqdn = strdup(pvInfo->epAddr[j].fqdn); pNewVInfo->epAddr[j].port = pvInfo->epAddr[j].port; + tstrncpy(pNewVInfo->epAddr[j].fqdn, pvInfo->epAddr[j].fqdn, TSDB_FQDN_LEN); } } @@ -4435,8 +4438,9 @@ void* tscVgroupInfoClear(SVgroupsInfo *vgroupList) { return NULL; } +#if 0 for(int32_t i = 0; i < vgroupList->numOfVgroups; ++i) { - SVgroupInfo* pVgroupInfo = &vgroupList->vgroups[i]; + SVgroupMsg* pVgroupInfo = &vgroupList->vgroups[i]; for(int32_t j = 0; j < pVgroupInfo->numOfEps; ++j) { tfree(pVgroupInfo->epAddr[j].fqdn); @@ -4447,10 +4451,11 @@ void* tscVgroupInfoClear(SVgroupsInfo *vgroupList) { } } +#endif tfree(vgroupList); return NULL; } - +# if 0 void tscSVgroupInfoCopy(SVgroupInfo* dst, const SVgroupInfo* src) { dst->vgId = src->vgId; dst->numOfEps = src->numOfEps; @@ -4463,6 +4468,8 @@ void tscSVgroupInfoCopy(SVgroupInfo* dst, const SVgroupInfo* src) { } } +#endif + char* serializeTagData(STagData* pTagData, char* pMsg) { int32_t n = (int32_t) strlen(pTagData->name); *(int32_t*) pMsg = htonl(n); @@ -4603,11 +4610,12 @@ STableMeta* tscTableMetaDup(STableMeta* pTableMeta) { SVgroupsInfo* tscVgroupsInfoDup(SVgroupsInfo* pVgroupsInfo) { assert(pVgroupsInfo != NULL); - size_t size = sizeof(SVgroupInfo) * pVgroupsInfo->numOfVgroups + sizeof(SVgroupsInfo); + size_t size = sizeof(SVgroupMsg) * pVgroupsInfo->numOfVgroups + sizeof(SVgroupsInfo); SVgroupsInfo* pInfo = calloc(1, size); pInfo->numOfVgroups = pVgroupsInfo->numOfVgroups; for (int32_t m = 0; m < pVgroupsInfo->numOfVgroups; ++m) { - tscSVgroupInfoCopy(&pInfo->vgroups[m], &pVgroupsInfo->vgroups[m]); + memcpy(&pInfo->vgroups[m], &pVgroupsInfo->vgroups[m], sizeof(SVgroupMsg)); +// tscSVgroupInfoCopy(&pInfo->vgroups[m], &pVgroupsInfo->vgroups[m]); } return pInfo; } diff --git a/src/common/inc/texpr.h b/src/common/inc/texpr.h index 2e49a69366c2277c98ec32a1d8419c141ddecc0f..0ede2e1e6f71c69aa6aaed2c05d671840679ce5c 100644 --- a/src/common/inc/texpr.h +++ b/src/common/inc/texpr.h @@ -33,9 +33,13 @@ struct SSchema; #define QUERY_COND_REL_PREFIX_IN "IN|" #define QUERY_COND_REL_PREFIX_LIKE "LIKE|" +#define QUERY_COND_REL_PREFIX_MATCH "MATCH|" +#define QUERY_COND_REL_PREFIX_NMATCH "NMATCH|" #define QUERY_COND_REL_PREFIX_IN_LEN 3 #define QUERY_COND_REL_PREFIX_LIKE_LEN 5 +#define QUERY_COND_REL_PREFIX_MATCH_LEN 6 +#define QUERY_COND_REL_PREFIX_NMATCH_LEN 7 typedef bool (*__result_filter_fn_t)(const void *, void *); typedef void (*__do_filter_suppl_fn_t)(void *, void *); diff --git a/src/common/inc/tglobal.h b/src/common/inc/tglobal.h index 285145f8f63693e193817a83e079f978ce4ebe6a..604ce89432bcf662b319fb2ec11f55026450a2be 100644 --- a/src/common/inc/tglobal.h +++ b/src/common/inc/tglobal.h @@ -74,6 +74,7 @@ extern int8_t tsKeepOriginalColumnName; // client extern int32_t tsMaxSQLStringLen; extern int32_t tsMaxWildCardsLen; +extern int32_t tsMaxRegexStringLen; extern int8_t tsTscEnableRecordSql; extern int32_t tsMaxNumOfOrderedResults; extern int32_t tsMinSlidingTime; @@ -223,6 +224,8 @@ extern uint32_t maxRange; extern uint32_t curRange; extern char Compressor[]; #endif +// long query +extern int8_t tsDeadLockKillQuery; typedef struct { char dir[TSDB_FILENAME_LEN]; diff --git a/src/common/src/tdataformat.c b/src/common/src/tdataformat.c index aa60803dac740157e1a9f5229e524bca8401c4cd..61378c79c4b5c44ffa11ae9132aa6f8b89ab5f71 100644 --- a/src/common/src/tdataformat.c +++ b/src/common/src/tdataformat.c @@ -448,6 +448,7 @@ static void tdAppendDataRowToDataCol(SDataRow row, STSchema *pSchema, SDataCols int dcol = 0; while (dcol < pCols->numOfCols) { + bool setCol = 0; SDataCol *pDataCol = &(pCols->cols[dcol]); if (rcol >= schemaNCols(pSchema)) { dataColAppendVal(pDataCol, getNullValue(pDataCol->type), pCols->numOfRows, pCols->maxPoints); @@ -458,13 +459,14 @@ static void tdAppendDataRowToDataCol(SDataRow row, STSchema *pSchema, SDataCols STColumn *pRowCol = schemaColAt(pSchema, rcol); if (pRowCol->colId == pDataCol->colId) { void *value = tdGetRowDataOfCol(row, pRowCol->type, pRowCol->offset + TD_DATA_ROW_HEAD_SIZE); + if(!isNull(value, pDataCol->type)) setCol = 1; dataColAppendVal(pDataCol, value, pCols->numOfRows, pCols->maxPoints); dcol++; rcol++; } else if (pRowCol->colId < pDataCol->colId) { rcol++; } else { - if(forceSetNull) { + if(forceSetNull || setCol) { dataColAppendVal(pDataCol, getNullValue(pDataCol->type), pCols->numOfRows, pCols->maxPoints); } dcol++; @@ -482,6 +484,7 @@ static void tdAppendKvRowToDataCol(SKVRow row, STSchema *pSchema, SDataCols *pCo int nRowCols = kvRowNCols(row); while (dcol < pCols->numOfCols) { + bool setCol = 0; SDataCol *pDataCol = &(pCols->cols[dcol]); if (rcol >= nRowCols || rcol >= schemaNCols(pSchema)) { dataColAppendVal(pDataCol, getNullValue(pDataCol->type), pCols->numOfRows, pCols->maxPoints); @@ -493,13 +496,14 @@ static void tdAppendKvRowToDataCol(SKVRow row, STSchema *pSchema, SDataCols *pCo if (colIdx->colId == pDataCol->colId) { void *value = tdGetKvRowDataOfCol(row, colIdx->offset); + if(!isNull(value, pDataCol->type)) setCol = 1; dataColAppendVal(pDataCol, value, pCols->numOfRows, pCols->maxPoints); ++dcol; ++rcol; } else if (colIdx->colId < pDataCol->colId) { ++rcol; } else { - if (forceSetNull) { + if(forceSetNull || setCol) { dataColAppendVal(pDataCol, getNullValue(pDataCol->type), pCols->numOfRows, pCols->maxPoints); } ++dcol; @@ -518,7 +522,6 @@ void tdAppendMemRowToDataCol(SMemRow row, STSchema *pSchema, SDataCols *pCols, b } } -//TODO: refactor this function to eliminate additional memory copy int tdMergeDataCols(SDataCols *target, SDataCols *source, int rowsToMerge, int *pOffset, bool forceSetNull) { ASSERT(rowsToMerge > 0 && rowsToMerge <= source->numOfRows); ASSERT(target->numOfCols == source->numOfCols); @@ -534,7 +537,7 @@ int tdMergeDataCols(SDataCols *target, SDataCols *source, int rowsToMerge, int * ASSERT(target->numOfRows + rowsToMerge <= target->maxPoints); for (int i = 0; i < rowsToMerge; i++) { for (int j = 0; j < source->numOfCols; j++) { - if (source->cols[j].len > 0) { + if (source->cols[j].len > 0 || target->cols[j].len > 0) { dataColAppendVal(target->cols + j, tdGetColDataOfRow(source->cols + j, i + (*pOffset)), target->numOfRows, target->maxPoints); } @@ -578,7 +581,7 @@ static void tdMergeTwoDataCols(SDataCols *target, SDataCols *src1, int *iter1, i if (key1 < key2) { for (int i = 0; i < src1->numOfCols; i++) { ASSERT(target->cols[i].type == src1->cols[i].type); - if (src1->cols[i].len > 0) { + if (src1->cols[i].len > 0 || target->cols[i].len > 0) { dataColAppendVal(&(target->cols[i]), tdGetColDataOfRow(src1->cols + i, *iter1), target->numOfRows, target->maxPoints); } @@ -596,6 +599,8 @@ static void tdMergeTwoDataCols(SDataCols *target, SDataCols *src1, int *iter1, i } else if(!forceSetNull && key1 == key2 && src1->cols[i].len > 0) { dataColAppendVal(&(target->cols[i]), tdGetColDataOfRow(src1->cols + i, *iter1), target->numOfRows, target->maxPoints); + } else if(target->cols[i].len > 0) { + dataColSetNullAt(&target->cols[i], target->numOfRows); } } target->numOfRows++; diff --git a/src/common/src/texpr.c b/src/common/src/texpr.c index ebdb33fd5b804e169a5e8ffc0b9a59e8dc0a331e..6823de631fe65a27f527be011def4819dd77c73a 100644 --- a/src/common/src/texpr.c +++ b/src/common/src/texpr.c @@ -430,6 +430,26 @@ tExprNode* exprTreeFromTableName(const char* tbnameCond) { pVal->nType = TSDB_DATA_TYPE_BINARY; pVal->nLen = (int32_t)len; + } else if (strncmp(tbnameCond, QUERY_COND_REL_PREFIX_MATCH, QUERY_COND_REL_PREFIX_MATCH_LEN) == 0) { + right->nodeType = TSQL_NODE_VALUE; + expr->_node.optr = TSDB_RELATION_MATCH; + tVariant* pVal = exception_calloc(1, sizeof(tVariant)); + right->pVal = pVal; + size_t len = strlen(tbnameCond + QUERY_COND_REL_PREFIX_MATCH_LEN) + 1; + pVal->pz = exception_malloc(len); + memcpy(pVal->pz, tbnameCond + QUERY_COND_REL_PREFIX_MATCH_LEN, len); + pVal->nType = TSDB_DATA_TYPE_BINARY; + pVal->nLen = (int32_t)len; + } else if (strncmp(tbnameCond, QUERY_COND_REL_PREFIX_NMATCH, QUERY_COND_REL_PREFIX_NMATCH_LEN) == 0) { + right->nodeType = TSQL_NODE_VALUE; + expr->_node.optr = TSDB_RELATION_NMATCH; + tVariant* pVal = exception_calloc(1, sizeof(tVariant)); + right->pVal = pVal; + size_t len = strlen(tbnameCond + QUERY_COND_REL_PREFIX_NMATCH_LEN) + 1; + pVal->pz = exception_malloc(len); + memcpy(pVal->pz, tbnameCond + QUERY_COND_REL_PREFIX_NMATCH_LEN, len); + pVal->nType = TSDB_DATA_TYPE_BINARY; + pVal->nLen = (int32_t)len; } else if (strncmp(tbnameCond, QUERY_COND_REL_PREFIX_IN, QUERY_COND_REL_PREFIX_IN_LEN) == 0) { right->nodeType = TSQL_NODE_VALUE; expr->_node.optr = TSDB_RELATION_IN; diff --git a/src/common/src/tglobal.c b/src/common/src/tglobal.c index 0f1fe5e2b6761629509a82b29fd3e742341c6e90..339fa35bb3009db96c9c6e0cabea6b60881f05c5 100644 --- a/src/common/src/tglobal.c +++ b/src/common/src/tglobal.c @@ -85,6 +85,8 @@ int32_t tsCompressColData = -1; // client int32_t tsMaxSQLStringLen = TSDB_MAX_ALLOWED_SQL_LEN; int32_t tsMaxWildCardsLen = TSDB_PATTERN_STRING_DEFAULT_LEN; +int32_t tsMaxRegexStringLen = TSDB_REGEX_STRING_DEFAULT_LEN; + int8_t tsTscEnableRecordSql = 0; // the maximum number of results for projection query on super table that are returned from @@ -277,6 +279,9 @@ uint32_t curRange = 100; // range char Compressor[32] = "ZSTD_COMPRESSOR"; // ZSTD_COMPRESSOR or GZIP_COMPRESSOR #endif +// long query death-lock +int8_t tsDeadLockKillQuery = 0; + int32_t (*monStartSystemFp)() = NULL; void (*monStopSystemFp)() = NULL; void (*monExecuteSQLFp)(char *sql) = NULL; @@ -1034,6 +1039,16 @@ static void doInitGlobalConfig(void) { cfg.unitType = TAOS_CFG_UTYPE_BYTE; taosInitConfigOption(cfg); + cfg.option = "maxRegexStringLen"; + cfg.ptr = &tsMaxRegexStringLen; + cfg.valType = TAOS_CFG_VTYPE_INT32; + cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_CLIENT | TSDB_CFG_CTYPE_B_SHOW; + cfg.minValue = 0; + cfg.maxValue = TSDB_MAX_FIELD_LEN; + cfg.ptrLength = 0; + cfg.unitType = TAOS_CFG_UTYPE_BYTE; + taosInitConfigOption(cfg); + cfg.option = "maxNumOfOrderedRes"; cfg.ptr = &tsMaxNumOfOrderedResults; cfg.valType = TAOS_CFG_VTYPE_INT32; @@ -1611,7 +1626,17 @@ static void doInitGlobalConfig(void) { cfg.unitType = TAOS_CFG_UTYPE_NONE; taosInitConfigOption(cfg); - assert(tsGlobalConfigNum <= TSDB_CFG_MAX_NUM); + // enable kill long query + cfg.option = "deadLockKillQuery"; + cfg.ptr = &tsDeadLockKillQuery; + cfg.valType = TAOS_CFG_VTYPE_INT8; + cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW; + cfg.minValue = 0; + cfg.maxValue = 1; + cfg.ptrLength = 1; + cfg.unitType = TAOS_CFG_UTYPE_NONE; + taosInitConfigOption(cfg); + #ifdef TD_TSZ // lossy compress cfg.option = "lossyColumns"; @@ -1665,6 +1690,9 @@ static void doInitGlobalConfig(void) { cfg.ptrLength = 0; cfg.unitType = TAOS_CFG_UTYPE_NONE; taosInitConfigOption(cfg); + assert(tsGlobalConfigNum == TSDB_CFG_MAX_NUM); +#else + assert(tsGlobalConfigNum == TSDB_CFG_MAX_NUM - 5); #endif } diff --git a/src/connector/jdbc/pom.xml b/src/connector/jdbc/pom.xml index 6b9fc9d96ce16700ee1243ef7c148a423a965d0b..256fa614e0bd0a88e0c676a3ae13c9c177eb5215 100644 --- a/src/connector/jdbc/pom.xml +++ b/src/connector/jdbc/pom.xml @@ -113,6 +113,7 @@ **/AppMemoryLeakTest.java + **/JDBCTypeAndTypeCompareTest.java **/ConnectMultiTaosdByRestfulWithDifferentTokenTest.java **/DatetimeBefore1970Test.java **/FailOverTest.java diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/TSDBPreparedStatementTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/TSDBPreparedStatementTest.java index 6bddd3f42835e6706ef922f2175d6e9a36dcf509..3d76e1f98d4f8aa1d0ba3d68395e4036c5b069e6 100644 --- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/TSDBPreparedStatementTest.java +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/TSDBPreparedStatementTest.java @@ -586,6 +586,130 @@ public class TSDBPreparedStatementTest { Assert.assertEquals(numOfRows, rows); } + @Test + public void bindDataQueryTest() throws SQLException { + Statement stmt = conn.createStatement(); + + stmt.execute("drop table if exists weather_test"); + stmt.execute("create table weather_test(ts timestamp, f1 nchar(10), f2 binary(10)) tags (t1 int, t2 binary(10))"); + + int numOfRows = 1; + + TSDBPreparedStatement s = (TSDBPreparedStatement) conn.prepareStatement("insert into ? using weather_test tags(?,?) (ts, f2) values(?, ?)"); + s.setTableName("w2"); + s.setTagInt(0, 1); + s.setTagString(1, "test"); + + + ArrayList ts = new ArrayList<>(); + for (int i = 0; i < numOfRows; i++) { + ts.add(System.currentTimeMillis() + i); + } + s.setTimestamp(0, ts); + + ArrayList s2 = new ArrayList<>(); + for (int i = 0; i < numOfRows; i++) { + s2.add("test" + i % 4); + } + s.setString(1, s2, 10); + + s.columnDataAddBatch(); + s.columnDataExecuteBatch(); + s.columnDataCloseBatch(); + + String sql = "select * from weather_test where t1 >= ? and t1 <= ?"; + TSDBPreparedStatement s1 = (TSDBPreparedStatement) conn.prepareStatement(sql); + s1.setInt(1, 0); + s1.setInt(2, 10); + + ResultSet rs = s1.executeQuery(); + int rows = 0; + while (rs.next()) { + rows++; + } + Assert.assertEquals(numOfRows, rows); + } + + @Test + public void setTagNullTest()throws SQLException { + Statement stmt = conn.createStatement(); + + stmt.execute("drop table if exists weather_test"); + stmt.execute("create table weather_test(ts timestamp, c1 int) tags (t1 tinyint, t2 smallint, t3 int, t4 bigint, t5 float, t6 double, t7 bool, t8 binary(10), t9 nchar(10))"); + + int numOfRows = 1; + + TSDBPreparedStatement s = (TSDBPreparedStatement) conn.prepareStatement("insert into ? using weather_test tags(?,?,?,?,?,?,?,?,?) values(?, ?)"); + s.setTableName("w3"); + s.setTagNull(0, TSDBConstants.TSDB_DATA_TYPE_TINYINT); + s.setTagNull(1, TSDBConstants.TSDB_DATA_TYPE_SMALLINT); + s.setTagNull(2, TSDBConstants.TSDB_DATA_TYPE_INT); + s.setTagNull(3, TSDBConstants.TSDB_DATA_TYPE_BIGINT); + s.setTagNull(4, TSDBConstants.TSDB_DATA_TYPE_FLOAT); + s.setTagNull(5, TSDBConstants.TSDB_DATA_TYPE_DOUBLE); + s.setTagNull(6, TSDBConstants.TSDB_DATA_TYPE_BOOL); + s.setTagNull(7, TSDBConstants.TSDB_DATA_TYPE_BINARY); + s.setTagNull(8, TSDBConstants.TSDB_DATA_TYPE_NCHAR); + + ArrayList ts = new ArrayList<>(); + for (int i = 0; i < numOfRows; i++) { + ts.add(System.currentTimeMillis() + i); + } + s.setTimestamp(0, ts); + + ArrayList s2 = new ArrayList<>(); + for (int i = 0; i < numOfRows; i++) { + s2.add(i); + } + s.setInt(1, s2); + + s.columnDataAddBatch(); + s.columnDataExecuteBatch(); + s.columnDataCloseBatch(); + } + + private String stringGenerator(int length) { + String source = "abcdefghijklmnopqrstuvwxyz"; + StringBuilder sb = new StringBuilder(); + Random rand = new Random(); + for(int i = 0; i < length; i++) { + sb.append(source.charAt(rand.nextInt(26))); + } + return sb.toString(); + } + + @Test(expected = SQLException.class) + public void setMaxTableNameTest()throws SQLException { + Statement stmt = conn.createStatement(); + + stmt.execute("drop table if exists weather_test"); + stmt.execute("create table weather_test(ts timestamp, c1 int) tags (t1 int)"); + + TSDBPreparedStatement s = (TSDBPreparedStatement) conn.prepareStatement("insert into ? using weather_test tags(?) values(?, ?)"); + String tbname = stringGenerator(193); + s.setTableName(tbname); + s.setTagInt(0, 1); + + int numOfRows = 1; + + ArrayList ts = new ArrayList<>(); + for (int i = 0; i < numOfRows; i++) { + ts.add(System.currentTimeMillis() + i); + } + s.setTimestamp(0, ts); + + ArrayList s2 = new ArrayList<>(); + for (int i = 0; i < numOfRows; i++) { + s2.add(i); + } + s.setInt(1, s2); + + s.columnDataAddBatch(); + s.columnDataExecuteBatch(); + s.columnDataCloseBatch(); + } + + @Test(expected = SQLException.class) public void createTwoSameDbTest() throws SQLException { // when diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/JDBCTypeAndTypeCompareTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/JDBCTypeAndTypeCompareTest.java new file mode 100644 index 0000000000000000000000000000000000000000..eb3b2985dfaff1b956909a50ca23470279cb48ca --- /dev/null +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/JDBCTypeAndTypeCompareTest.java @@ -0,0 +1,34 @@ +package com.taosdata.jdbc.cases; + +import org.junit.Test; + +import java.sql.*; + +public class JDBCTypeAndTypeCompareTest { + + @Test + public void test() throws SQLException { + Connection conn = DriverManager.getConnection("jdbc:TAOS://192.168.17.156:6030/", "root", "taosdata"); + Statement stmt = conn.createStatement(); + + stmt.execute("drop database if exists test"); + stmt.execute("create database if not exists test"); + stmt.execute("use test"); + stmt.execute("create table weather(ts timestamp, f1 int, f2 bigint, f3 float, f4 double, f5 smallint, f6 tinyint, f7 bool, f8 binary(10), f9 nchar(10) )"); + stmt.execute("insert into weather values(now, 1, 2, 3.0, 4.0, 5, 6, true, 'test','test')"); + + ResultSet rs = stmt.executeQuery("select * from weather"); + ResultSetMetaData meta = rs.getMetaData(); + while (rs.next()) { + for (int i = 1; i <= meta.getColumnCount(); i++) { + String columnName = meta.getColumnName(i); + String columnTypeName = meta.getColumnTypeName(i); + Object value = rs.getObject(i); + System.out.printf("columnName : %s, columnTypeName: %s, JDBCType: %s\n", columnName, columnTypeName, value.getClass().getName()); + } + } + + stmt.close(); + conn.close(); + } +} diff --git a/src/connector/python/taos/cinterface.py b/src/connector/python/taos/cinterface.py index a1b6fe312b5725b8bf030701608d93c3e0c85706..c5737ea5a07b7678e058307dfe3b47546dd99909 100644 --- a/src/connector/python/taos/cinterface.py +++ b/src/connector/python/taos/cinterface.py @@ -827,6 +827,22 @@ def taos_insert_lines(connection, lines): if errno != 0: raise LinesError("insert lines error", errno) +def taos_insert_telnet_lines(connection, lines): + # type: (c_void_p, list[str] | tuple(str)) -> None + num_of_lines = len(lines) + lines = (c_char_p(line.encode("utf-8")) for line in lines) + lines_type = ctypes.c_char_p * num_of_lines + p_lines = lines_type(*lines) + errno = _libtaos.taos_insert_telnet_lines(connection, p_lines, num_of_lines) + if errno != 0: + raise TelnetLinesError("insert telnet lines error", errno) + +def taos_insert_json_payload(connection, payload): + # type: (c_void_p, list[str] | tuple(str)) -> None + payload = payload.encode("utf-8") + errno = _libtaos.taos_insert_json_payload(connection, payload) + if errno != 0: + raise JsonPayloadError("insert json payload error", errno) class CTaosInterface(object): def __init__(self, config=None): diff --git a/src/connector/python/taos/connection.py b/src/connector/python/taos/connection.py index 7857c8c706dbe27fd9440e6bf2eb698b6822650e..35aca1fb26c1e612c3b3f6b1d8c794495bed0035 100644 --- a/src/connector/python/taos/connection.py +++ b/src/connector/python/taos/connection.py @@ -145,6 +145,34 @@ class TaosConnection(object): """ return taos_insert_lines(self._conn, lines) + def insert_telnet_lines(self, lines): + """OpenTSDB telnet style API format support + + ## Example + cpu_load 1626056811855516532ns 2.0f32 id="tb1",host="host0",interface="eth0" + + """ + return taos_insert_telnet_lines(self._conn, lines) + + def insert_json_payload(self, payload): + """OpenTSDB HTTP JSON format support + + ## Example + "{ + "metric": "cpu_load_0", + "timestamp": 1626006833610123, + "value": 55.5, + "tags": + { + "host": "ubuntu", + "interface": "eth0", + "Id": "tb0" + } + }" + + """ + return taos_insert_json_payload(self._conn, payload) + def cursor(self): # type: () -> TaosCursor """Return a new Cursor object using the connection.""" diff --git a/src/connector/python/taos/error.py b/src/connector/python/taos/error.py index a30adbb162f1c194bdfcf4cca5c43f01107a9776..f6a9d41f56a3fb071080daaae3bdd840190b154d 100644 --- a/src/connector/python/taos/error.py +++ b/src/connector/python/taos/error.py @@ -83,4 +83,14 @@ class ResultError(DatabaseError): class LinesError(DatabaseError): """taos_insert_lines errors.""" - pass \ No newline at end of file + pass + +class TelnetLinesError(DatabaseError): + """taos_insert_telnet_lines errors.""" + + pass + +class JsonPayloadError(DatabaseError): + """taos_insert_json_payload errors.""" + + pass diff --git a/src/inc/query.h b/src/inc/query.h index fb9cbff8584892b4a6bc6e4a6ce046a7500aef39..0872e3dbaa517ded77dd758b30e69f273c13a580 100644 --- a/src/inc/query.h +++ b/src/inc/query.h @@ -76,6 +76,11 @@ void* qGetResultRetrieveMsg(qinfo_t qinfo); */ int32_t qKillQuery(qinfo_t qinfo); +//kill by qid +int32_t qKillQueryByQId(void* pMgmt, int64_t qId, int32_t waitMs, int32_t waitCount); + +bool qSolveCommitNoBlock(void* pRepo, void* pMgmt); + int32_t qQueryCompleted(qinfo_t qinfo); /** diff --git a/src/inc/taos.h b/src/inc/taos.h index 6fa30737e71e8f40cee817386ad4d2c26661777f..edb1552b811a2ff4b8c78c19523cc6f2ad82ba74 100644 --- a/src/inc/taos.h +++ b/src/inc/taos.h @@ -172,6 +172,10 @@ DLL_EXPORT int taos_load_table_info(TAOS *taos, const char* tableNameList); DLL_EXPORT int taos_insert_lines(TAOS* taos, char* lines[], int numLines); +DLL_EXPORT int taos_insert_telnet_lines(TAOS* taos, char* lines[], int numLines); + +DLL_EXPORT int taos_insert_json_payload(TAOS* taos, char* payload); + #ifdef __cplusplus } #endif diff --git a/src/inc/taosdef.h b/src/inc/taosdef.h index 44b3a2cf0d08d550e037113ac86abde9c79b176e..89de733ba123157f6454fb35b26f06202a2095c4 100644 --- a/src/inc/taosdef.h +++ b/src/inc/taosdef.h @@ -164,6 +164,9 @@ do { \ #define TSDB_RELATION_OR 12 #define TSDB_RELATION_NOT 13 +#define TSDB_RELATION_MATCH 14 +#define TSDB_RELATION_NMATCH 15 + #define TSDB_BINARY_OP_ADD 30 #define TSDB_BINARY_OP_SUBTRACT 31 #define TSDB_BINARY_OP_MULTIPLY 32 diff --git a/src/inc/taoserror.h b/src/inc/taoserror.h index c401ab762eb7f17b075ca52ab1e9454eb136a2ab..d59b88c7e698b3e965b5923efdc760e0289f7250 100644 --- a/src/inc/taoserror.h +++ b/src/inc/taoserror.h @@ -35,6 +35,7 @@ int32_t* taosGetErrno(); #define terrno (*taosGetErrno()) #define TSDB_CODE_SUCCESS 0 +#define TSDB_CODE_FAILED -1 // unknown or needn't tell detail error // rpc #define TSDB_CODE_RPC_ACTION_IN_PROGRESS TAOS_DEF_ERROR_CODE(0, 0x0001) //"Action in progress") @@ -106,6 +107,10 @@ int32_t* taosGetErrno(); #define TSDB_CODE_TSC_DUP_COL_NAMES TAOS_DEF_ERROR_CODE(0, 0x021D) //"duplicated column names") #define TSDB_CODE_TSC_INVALID_TAG_LENGTH TAOS_DEF_ERROR_CODE(0, 0x021E) //"Invalid tag length") #define TSDB_CODE_TSC_INVALID_COLUMN_LENGTH TAOS_DEF_ERROR_CODE(0, 0x021F) //"Invalid column length") +#define TSDB_CODE_TSC_DUP_TAG_NAMES TAOS_DEF_ERROR_CODE(0, 0x0220) //"duplicated tag names") +#define TSDB_CODE_TSC_INVALID_JSON TAOS_DEF_ERROR_CODE(0, 0x0221) //"Invalid JSON format") +#define TSDB_CODE_TSC_INVALID_JSON_TYPE TAOS_DEF_ERROR_CODE(0, 0x0222) //"Invalid JSON data type") +#define TSDB_CODE_TSC_VALUE_OUT_OF_RANGE TAOS_DEF_ERROR_CODE(0, 0x0223) //"Value out of range") // mnode #define TSDB_CODE_MND_MSG_NOT_PROCESSED TAOS_DEF_ERROR_CODE(0, 0x0300) //"Message not processed") diff --git a/src/inc/taosmsg.h b/src/inc/taosmsg.h index 8f5269c158bd4a733d08b727ed0b3e3741821b25..bb93c52142a73d00ebde9e039143a5e124eb6e9a 100644 --- a/src/inc/taosmsg.h +++ b/src/inc/taosmsg.h @@ -766,27 +766,16 @@ typedef struct SSTableVgroupMsg { int32_t numOfTables; } SSTableVgroupMsg, SSTableVgroupRspMsg; -typedef struct { - int32_t vgId; - int8_t numOfEps; - SEpAddr1 epAddr[TSDB_MAX_REPLICA]; -} SVgroupInfo; - typedef struct { int32_t vgId; int8_t numOfEps; SEpAddrMsg epAddr[TSDB_MAX_REPLICA]; } SVgroupMsg; -typedef struct { - int32_t numOfVgroups; - SVgroupInfo vgroups[]; -} SVgroupsInfo; - typedef struct { int32_t numOfVgroups; SVgroupMsg vgroups[]; -} SVgroupsMsg; +} SVgroupsMsg, SVgroupsInfo; typedef struct STableMetaMsg { int32_t contLen; diff --git a/src/inc/tsdb.h b/src/inc/tsdb.h index 7abe3e99c720af1682fc103beec9a5d4caeb09eb..089e30ac3728761c68fe155f960c8650a32c2f7a 100644 --- a/src/inc/tsdb.h +++ b/src/inc/tsdb.h @@ -39,6 +39,7 @@ extern "C" { #define TSDB_STATUS_COMMIT_START 1 #define TSDB_STATUS_COMMIT_OVER 2 +#define TSDB_STATUS_COMMIT_NOBLOCK 3 //commit no block, need to be solved // TSDB STATE DEFINITION #define TSDB_STATE_OK 0x0 @@ -413,6 +414,11 @@ int tsdbSyncRecv(void *pRepo, SOCKET socketFd); // For TSDB Compact int tsdbCompact(STsdbRepo *pRepo); +// For TSDB Health Monitor + +// no problem return true +bool tsdbNoProblem(STsdbRepo* pRepo); + #ifdef __cplusplus } #endif diff --git a/src/inc/ttokendef.h b/src/inc/ttokendef.h index 3d35a51006b9742528fc07fb2ec91e9cd6fa38db..9b0ad2cf13b6df01bda56906891a4de677ff08a3 100644 --- a/src/inc/ttokendef.h +++ b/src/inc/ttokendef.h @@ -37,180 +37,181 @@ #define TK_NOTNULL 19 #define TK_IS 20 #define TK_LIKE 21 -#define TK_GLOB 22 -#define TK_BETWEEN 23 -#define TK_IN 24 -#define TK_GT 25 -#define TK_GE 26 -#define TK_LT 27 -#define TK_LE 28 -#define TK_BITAND 29 -#define TK_BITOR 30 -#define TK_LSHIFT 31 -#define TK_RSHIFT 32 -#define TK_PLUS 33 -#define TK_MINUS 34 -#define TK_DIVIDE 35 -#define TK_TIMES 36 -#define TK_STAR 37 -#define TK_SLASH 38 -#define TK_REM 39 -#define TK_CONCAT 40 -#define TK_UMINUS 41 -#define TK_UPLUS 42 -#define TK_BITNOT 43 -#define TK_SHOW 44 -#define TK_DATABASES 45 -#define TK_TOPICS 46 -#define TK_FUNCTIONS 47 -#define TK_MNODES 48 -#define TK_DNODES 49 -#define TK_ACCOUNTS 50 -#define TK_USERS 51 -#define TK_MODULES 52 -#define TK_QUERIES 53 -#define TK_CONNECTIONS 54 -#define TK_STREAMS 55 -#define TK_VARIABLES 56 -#define TK_SCORES 57 -#define TK_GRANTS 58 -#define TK_VNODES 59 -#define TK_DOT 60 -#define TK_CREATE 61 -#define TK_TABLE 62 -#define TK_STABLE 63 -#define TK_DATABASE 64 -#define TK_TABLES 65 -#define TK_STABLES 66 -#define TK_VGROUPS 67 -#define TK_DROP 68 -#define TK_TOPIC 69 -#define TK_FUNCTION 70 -#define TK_DNODE 71 -#define TK_USER 72 -#define TK_ACCOUNT 73 -#define TK_USE 74 -#define TK_DESCRIBE 75 -#define TK_DESC 76 -#define TK_ALTER 77 -#define TK_PASS 78 -#define TK_PRIVILEGE 79 -#define TK_LOCAL 80 -#define TK_COMPACT 81 -#define TK_LP 82 -#define TK_RP 83 -#define TK_IF 84 -#define TK_EXISTS 85 -#define TK_AS 86 -#define TK_OUTPUTTYPE 87 -#define TK_AGGREGATE 88 -#define TK_BUFSIZE 89 -#define TK_PPS 90 -#define TK_TSERIES 91 -#define TK_DBS 92 -#define TK_STORAGE 93 -#define TK_QTIME 94 -#define TK_CONNS 95 -#define TK_STATE 96 -#define TK_COMMA 97 -#define TK_KEEP 98 -#define TK_CACHE 99 -#define TK_REPLICA 100 -#define TK_QUORUM 101 -#define TK_DAYS 102 -#define TK_MINROWS 103 -#define TK_MAXROWS 104 -#define TK_BLOCKS 105 -#define TK_CTIME 106 -#define TK_WAL 107 -#define TK_FSYNC 108 -#define TK_COMP 109 -#define TK_PRECISION 110 -#define TK_UPDATE 111 -#define TK_CACHELAST 112 -#define TK_PARTITIONS 113 -#define TK_UNSIGNED 114 -#define TK_TAGS 115 -#define TK_USING 116 -#define TK_NULL 117 -#define TK_NOW 118 -#define TK_SELECT 119 -#define TK_UNION 120 -#define TK_ALL 121 -#define TK_DISTINCT 122 -#define TK_FROM 123 -#define TK_VARIABLE 124 -#define TK_INTERVAL 125 -#define TK_EVERY 126 -#define TK_SESSION 127 -#define TK_STATE_WINDOW 128 -#define TK_FILL 129 -#define TK_SLIDING 130 -#define TK_ORDER 131 -#define TK_BY 132 -#define TK_ASC 133 -#define TK_GROUP 134 -#define TK_HAVING 135 -#define TK_LIMIT 136 -#define TK_OFFSET 137 -#define TK_SLIMIT 138 -#define TK_SOFFSET 139 -#define TK_WHERE 140 -#define TK_RESET 141 -#define TK_QUERY 142 -#define TK_SYNCDB 143 -#define TK_ADD 144 -#define TK_COLUMN 145 -#define TK_MODIFY 146 -#define TK_TAG 147 -#define TK_CHANGE 148 -#define TK_SET 149 -#define TK_KILL 150 -#define TK_CONNECTION 151 -#define TK_STREAM 152 -#define TK_COLON 153 -#define TK_ABORT 154 -#define TK_AFTER 155 -#define TK_ATTACH 156 -#define TK_BEFORE 157 -#define TK_BEGIN 158 -#define TK_CASCADE 159 -#define TK_CLUSTER 160 -#define TK_CONFLICT 161 -#define TK_COPY 162 -#define TK_DEFERRED 163 -#define TK_DELIMITERS 164 -#define TK_DETACH 165 -#define TK_EACH 166 -#define TK_END 167 -#define TK_EXPLAIN 168 -#define TK_FAIL 169 -#define TK_FOR 170 -#define TK_IGNORE 171 -#define TK_IMMEDIATE 172 -#define TK_INITIALLY 173 -#define TK_INSTEAD 174 -#define TK_MATCH 175 -#define TK_KEY 176 -#define TK_OF 177 -#define TK_RAISE 178 -#define TK_REPLACE 179 -#define TK_RESTRICT 180 -#define TK_ROW 181 -#define TK_STATEMENT 182 -#define TK_TRIGGER 183 -#define TK_VIEW 184 -#define TK_IPTOKEN 185 -#define TK_SEMI 186 -#define TK_NONE 187 -#define TK_PREV 188 -#define TK_LINEAR 189 -#define TK_IMPORT 190 -#define TK_TBNAME 191 -#define TK_JOIN 192 -#define TK_INSERT 193 -#define TK_INTO 194 -#define TK_VALUES 195 +#define TK_MATCH 22 +#define TK_NMATCH 23 +#define TK_GLOB 24 +#define TK_BETWEEN 25 +#define TK_IN 26 +#define TK_GT 27 +#define TK_GE 28 +#define TK_LT 29 +#define TK_LE 30 +#define TK_BITAND 31 +#define TK_BITOR 32 +#define TK_LSHIFT 33 +#define TK_RSHIFT 34 +#define TK_PLUS 35 +#define TK_MINUS 36 +#define TK_DIVIDE 37 +#define TK_TIMES 38 +#define TK_STAR 39 +#define TK_SLASH 40 +#define TK_REM 41 +#define TK_CONCAT 42 +#define TK_UMINUS 43 +#define TK_UPLUS 44 +#define TK_BITNOT 45 +#define TK_SHOW 46 +#define TK_DATABASES 47 +#define TK_TOPICS 48 +#define TK_FUNCTIONS 49 +#define TK_MNODES 50 +#define TK_DNODES 51 +#define TK_ACCOUNTS 52 +#define TK_USERS 53 +#define TK_MODULES 54 +#define TK_QUERIES 55 +#define TK_CONNECTIONS 56 +#define TK_STREAMS 57 +#define TK_VARIABLES 58 +#define TK_SCORES 59 +#define TK_GRANTS 60 +#define TK_VNODES 61 +#define TK_DOT 62 +#define TK_CREATE 63 +#define TK_TABLE 64 +#define TK_STABLE 65 +#define TK_DATABASE 66 +#define TK_TABLES 67 +#define TK_STABLES 68 +#define TK_VGROUPS 69 +#define TK_DROP 70 +#define TK_TOPIC 71 +#define TK_FUNCTION 72 +#define TK_DNODE 73 +#define TK_USER 74 +#define TK_ACCOUNT 75 +#define TK_USE 76 +#define TK_DESCRIBE 77 +#define TK_DESC 78 +#define TK_ALTER 79 +#define TK_PASS 80 +#define TK_PRIVILEGE 81 +#define TK_LOCAL 82 +#define TK_COMPACT 83 +#define TK_LP 84 +#define TK_RP 85 +#define TK_IF 86 +#define TK_EXISTS 87 +#define TK_AS 88 +#define TK_OUTPUTTYPE 89 +#define TK_AGGREGATE 90 +#define TK_BUFSIZE 91 +#define TK_PPS 92 +#define TK_TSERIES 93 +#define TK_DBS 94 +#define TK_STORAGE 95 +#define TK_QTIME 96 +#define TK_CONNS 97 +#define TK_STATE 98 +#define TK_COMMA 99 +#define TK_KEEP 100 +#define TK_CACHE 101 +#define TK_REPLICA 102 +#define TK_QUORUM 103 +#define TK_DAYS 104 +#define TK_MINROWS 105 +#define TK_MAXROWS 106 +#define TK_BLOCKS 107 +#define TK_CTIME 108 +#define TK_WAL 109 +#define TK_FSYNC 110 +#define TK_COMP 111 +#define TK_PRECISION 112 +#define TK_UPDATE 113 +#define TK_CACHELAST 114 +#define TK_PARTITIONS 115 +#define TK_UNSIGNED 116 +#define TK_TAGS 117 +#define TK_USING 118 +#define TK_NULL 119 +#define TK_NOW 120 +#define TK_SELECT 121 +#define TK_UNION 122 +#define TK_ALL 123 +#define TK_DISTINCT 124 +#define TK_FROM 125 +#define TK_VARIABLE 126 +#define TK_INTERVAL 127 +#define TK_EVERY 128 +#define TK_SESSION 129 +#define TK_STATE_WINDOW 130 +#define TK_FILL 131 +#define TK_SLIDING 132 +#define TK_ORDER 133 +#define TK_BY 134 +#define TK_ASC 135 +#define TK_GROUP 136 +#define TK_HAVING 137 +#define TK_LIMIT 138 +#define TK_OFFSET 139 +#define TK_SLIMIT 140 +#define TK_SOFFSET 141 +#define TK_WHERE 142 +#define TK_RESET 143 +#define TK_QUERY 144 +#define TK_SYNCDB 145 +#define TK_ADD 146 +#define TK_COLUMN 147 +#define TK_MODIFY 148 +#define TK_TAG 149 +#define TK_CHANGE 150 +#define TK_SET 151 +#define TK_KILL 152 +#define TK_CONNECTION 153 +#define TK_STREAM 154 +#define TK_COLON 155 +#define TK_ABORT 156 +#define TK_AFTER 157 +#define TK_ATTACH 158 +#define TK_BEFORE 159 +#define TK_BEGIN 160 +#define TK_CASCADE 161 +#define TK_CLUSTER 162 +#define TK_CONFLICT 163 +#define TK_COPY 164 +#define TK_DEFERRED 165 +#define TK_DELIMITERS 166 +#define TK_DETACH 167 +#define TK_EACH 168 +#define TK_END 169 +#define TK_EXPLAIN 170 +#define TK_FAIL 171 +#define TK_FOR 172 +#define TK_IGNORE 173 +#define TK_IMMEDIATE 174 +#define TK_INITIALLY 175 +#define TK_INSTEAD 176 +#define TK_KEY 177 +#define TK_OF 178 +#define TK_RAISE 179 +#define TK_REPLACE 180 +#define TK_RESTRICT 181 +#define TK_ROW 182 +#define TK_STATEMENT 183 +#define TK_TRIGGER 184 +#define TK_VIEW 185 +#define TK_IPTOKEN 186 +#define TK_SEMI 187 +#define TK_NONE 188 +#define TK_PREV 189 +#define TK_LINEAR 190 +#define TK_IMPORT 191 +#define TK_TBNAME 192 +#define TK_JOIN 193 +#define TK_INSERT 194 +#define TK_INTO 195 +#define TK_VALUES 196 #define TK_SPACE 300 diff --git a/src/kit/shell/src/shellDarwin.c b/src/kit/shell/src/shellDarwin.c index 9161860f07dfb0683a47ea4ccb2a759ae49562e7..a1413be1ce4ce6f67516fc09121115f30bbc56f0 100644 --- a/src/kit/shell/src/shellDarwin.c +++ b/src/kit/shell/src/shellDarwin.c @@ -98,6 +98,7 @@ void shellParseArgument(int argc, char *argv[], SShellArguments *arguments) { tstrncpy(g_password, (char *)(argv[i] + 2), SHELL_MAX_PASSWORD_LEN); } arguments->password = g_password; + arguments->is_use_passwd = true; strcpy(argv[i], ""); argc -= 1; } diff --git a/src/kit/taosdemo/taosdemo.c b/src/kit/taosdemo/taosdemo.c index 87102cc1c76206a0c6f779efc9ef22e9607409ef..9b3be1556a47b52142f41b84ca385108088a9018 100644 --- a/src/kit/taosdemo/taosdemo.c +++ b/src/kit/taosdemo/taosdemo.c @@ -20,6 +20,7 @@ #include #include +#include #define _GNU_SOURCE #define CURL_STATICLIB @@ -87,7 +88,7 @@ extern char configDir[]; #define DOUBLE_BUFF_LEN 42 #define TIMESTAMP_BUFF_LEN 21 -#define MAX_SAMPLES_ONCE_FROM_FILE 10000 +#define MAX_SAMPLES 10000 #define MAX_NUM_COLUMNS (TSDB_MAX_COLUMNS - 1) // exclude first column timestamp #define MAX_DB_COUNT 8 @@ -108,6 +109,13 @@ extern char configDir[]; #define DEFAULT_DATATYPE_NUM 1 #define DEFAULT_CHILDTABLES 10000 +#define STMT_BIND_PARAM_BATCH 1 + +char* g_sampleDataBuf = NULL; +#if STMT_BIND_PARAM_BATCH == 1 + // bind param batch +char* g_sampleBindBatchArray = NULL; +#endif enum TEST_MODE { INSERT_TEST, // 0 @@ -116,17 +124,17 @@ enum TEST_MODE { INVAID_TEST }; -typedef enum CREATE_SUB_TALBE_MOD_EN { +typedef enum CREATE_SUB_TABLE_MOD_EN { PRE_CREATE_SUBTBL, AUTO_CREATE_SUBTBL, NO_CREATE_SUBTBL -} CREATE_SUB_TALBE_MOD_EN; +} CREATE_SUB_TABLE_MOD_EN; -typedef enum TALBE_EXISTS_EN { +typedef enum TABLE_EXISTS_EN { TBL_NO_EXISTS, TBL_ALREADY_EXISTS, TBL_EXISTS_BUTT -} TALBE_EXISTS_EN; +} TABLE_EXISTS_EN; enum enumSYNC_MODE { SYNC_MODE, @@ -226,21 +234,23 @@ typedef struct SArguments_S { bool performance_print; char * output_file; bool async_mode; - char * datatype[MAX_NUM_COLUMNS + 1]; + char data_type[MAX_NUM_COLUMNS+1]; + char *dataType[MAX_NUM_COLUMNS+1]; uint32_t binwidth; - uint32_t num_of_CPR; - uint32_t num_of_threads; + uint32_t columnCount; + uint64_t lenOfOneRow; + uint32_t nthreads; uint64_t insert_interval; uint64_t timestamp_step; int64_t query_times; - uint32_t interlace_rows; - uint32_t num_of_RPR; // num_of_records_per_req + uint32_t interlaceRows; + uint32_t reqPerReq; // num_of_records_per_req uint64_t max_sql_len; - int64_t num_of_tables; - int64_t num_of_DPT; + int64_t ntables; + int64_t insertRows; int abort; uint32_t disorderRatio; // 0: no disorder, >0: x% - int disorderRange; // ms, us or ns. accordig to database precision + int disorderRange; // ms, us or ns. according to database precision uint32_t method_of_delete; uint64_t totalInsertRows; uint64_t totalAffectedRows; @@ -248,14 +258,15 @@ typedef struct SArguments_S { } SArguments; typedef struct SColumn_S { - char field[TSDB_COL_NAME_LEN]; - char dataType[DATATYPE_BUFF_LEN]; - uint32_t dataLen; - char note[NOTE_BUFF_LEN]; + char field[TSDB_COL_NAME_LEN]; + char data_type; + char dataType[DATATYPE_BUFF_LEN]; + uint32_t dataLen; + char note[NOTE_BUFF_LEN]; } StrColumn; typedef struct SSuperTable_S { - char sTblName[TSDB_TABLE_NAME_LEN]; + char stbName[TSDB_TABLE_NAME_LEN]; char dataSource[SMALL_BUFF_LEN]; // rand_gen or sample char childTblPrefix[TBNAME_PREFIX_LEN]; uint16_t childTblExists; @@ -291,14 +302,16 @@ typedef struct SSuperTable_S { uint64_t lenOfTagOfOneRow; char* sampleDataBuf; - //int sampleRowCount; - //int sampleUsePos; uint32_t tagSource; // 0: rand, 1: tag sample char* tagDataBuf; uint32_t tagSampleCount; uint32_t tagUsePos; +#if STMT_BIND_PARAM_BATCH == 1 + // bind param batch + char *sampleBindBatchArray; +#endif // statistics uint64_t totalInsertRows; uint64_t totalAffectedRows; @@ -378,7 +391,7 @@ typedef struct SDbs_S { } SDbs; typedef struct SpecifiedQueryInfo_S { - uint64_t queryInterval; // 0: unlimit > 0 loop/s + uint64_t queryInterval; // 0: unlimited > 0 loop/s uint32_t concurrent; int sqlCount; uint32_t asyncMode; // 0: sync, 1: async @@ -398,8 +411,8 @@ typedef struct SpecifiedQueryInfo_S { } SpecifiedQueryInfo; typedef struct SuperQueryInfo_S { - char sTblName[TSDB_TABLE_NAME_LEN]; - uint64_t queryInterval; // 0: unlimit > 0 loop/s + char stbName[TSDB_TABLE_NAME_LEN]; + uint64_t queryInterval; // 0: unlimited > 0 loop/s uint32_t threadCnt; uint32_t asyncMode; // 0: sync, 1: async uint64_t subscribeInterval; // ms @@ -437,8 +450,16 @@ typedef struct SQueryMetaInfo_S { typedef struct SThreadInfo_S { TAOS * taos; TAOS_STMT *stmt; - char* sampleBindArray; - int64_t *bind_ts; + int64_t *bind_ts; + +#if STMT_BIND_PARAM_BATCH == 1 + int64_t *bind_ts_array; + char *bindParams; + char *is_null; +#else + char* sampleBindArray; +#endif + int threadID; char db_name[TSDB_DB_NAME_LEN]; uint32_t time_precision; @@ -584,8 +605,8 @@ char *g_rand_current_buff = NULL; char *g_rand_phase_buff = NULL; char *g_randdouble_buff = NULL; -char *g_aggreFunc[] = {"*", "count(*)", "avg(col0)", "sum(col0)", - "max(col0)", "min(col0)", "first(col0)", "last(col0)"}; +char *g_aggreFunc[] = {"*", "count(*)", "avg(C0)", "sum(C0)", + "max(C0)", "min(C0)", "first(C0)", "last(C0)"}; SArguments g_args = { NULL, // metaFile @@ -614,22 +635,26 @@ SArguments g_args = { false, // answer_yes; "./output.txt", // output_file 0, // mode : sync or async + {TSDB_DATA_TYPE_FLOAT, + TSDB_DATA_TYPE_INT, + TSDB_DATA_TYPE_FLOAT}, { - "FLOAT", // datatype - "INT", // datatype - "FLOAT", // datatype. DEFAULT_DATATYPE_NUM is 3 + "FLOAT", // dataType + "INT", // dataType + "FLOAT", // dataType. demo mode has 3 columns }, 64, // binwidth - 4, // num_of_CPR - 10, // num_of_connections/thread + 4, // columnCount, timestamp + float + int + float + 20 + FLOAT_BUFF_LEN + INT_BUFF_LEN + FLOAT_BUFF_LEN, // lenOfOneRow + 8, // num_of_connections/thread 0, // insert_interval DEFAULT_TIMESTAMP_STEP, // timestamp_step 1, // query_times - DEFAULT_INTERLACE_ROWS, // interlace_rows; - 30000, // num_of_RPR + DEFAULT_INTERLACE_ROWS, // interlaceRows; + 30000, // reqPerReq (1024*1024), // max_sql_len - DEFAULT_CHILDTABLES, // num_of_tables - 10000, // num_of_DPT + DEFAULT_CHILDTABLES, // ntables + 10000, // insertRows 0, // abort 0, // disorderRatio 1000, // disorderRange @@ -711,10 +736,10 @@ static void printVersion() { char taosdemo_status[] = TAOSDEMO_STATUS; if (strlen(taosdemo_status) == 0) { - printf("taosdemo verison %s-%s\n", + printf("taosdemo version %s-%s\n", tdengine_ver, taosdemo_ver); } else { - printf("taosdemo verison %s-%s, status:%s\n", + printf("taosdemo version %s-%s, status:%s\n", tdengine_ver, taosdemo_ver, taosdemo_status); } } @@ -791,7 +816,7 @@ static void printHelp() { "The number of records per table. Default is 10000."); printf("%s%s%s%s\n", indent, "-M, --random", "\t\t\t", "The value of records generated are totally random."); - printf("%s\n", "\t\t\t\tThe default is to simulate power equipment senario."); + printf("%s\n", "\t\t\t\tThe default is to simulate power equipment scenario."); printf("%s%s%s%s\n", indent, "-x, --no-insert", "\t\t", "No-insert flag."); printf("%s%s%s%s\n", indent, "-y, --answer-yes", "\t\t", "Default input yes for prompt."); @@ -836,7 +861,7 @@ static void errorWrongValue(char *program, char *wrong_arg, char *wrong_value) fprintf(stderr, "Try `taosdemo --help' or `taosdemo --usage' for more information.\n"); } -static void errorUnreconized(char *program, char *wrong_arg) +static void errorUnrecognized(char *program, char *wrong_arg) { fprintf(stderr, "%s: unrecognized options '%s'\n", program, wrong_arg); fprintf(stderr, "Try `taosdemo --help' or `taosdemo --usage' for more information.\n"); @@ -893,7 +918,7 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { } else if (0 == strncmp(argv[i], "--file=", strlen("--file="))) { arguments->metaFile = (char *)(argv[i] + strlen("--file=")); } else { - errorUnreconized(argv[0], argv[i]); + errorUnrecognized(argv[0], argv[i]); exit(EXIT_FAILURE); } } else if ((0 == strncmp(argv[i], "-c", strlen("-c"))) @@ -915,7 +940,7 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { } else if (0 == strncmp(argv[i], "--config-dir=", strlen("--config-dir="))) { tstrncpy(configDir, (char *)(argv[i] + strlen("--config-dir=")), TSDB_FILENAME_LEN); } else { - errorUnreconized(argv[0], argv[i]); + errorUnrecognized(argv[0], argv[i]); exit(EXIT_FAILURE); } } else if ((0 == strncmp(argv[i], "-h", strlen("-h"))) @@ -937,7 +962,7 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { } else if (0 == strncmp(argv[i], "--host=", strlen("--host="))) { arguments->host = (char *)(argv[i] + strlen("--host=")); } else { - errorUnreconized(argv[0], argv[i]); + errorUnrecognized(argv[0], argv[i]); exit(EXIT_FAILURE); } } else if (strcmp(argv[i], "-PP") == 0) { @@ -971,7 +996,7 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { } arguments->port = atoi(argv[++i]); } else { - errorUnreconized(argv[0], argv[i]); + errorUnrecognized(argv[0], argv[i]); exit(EXIT_FAILURE); } } else if ((0 == strncmp(argv[i], "-I", strlen("-I"))) @@ -1032,7 +1057,7 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { } i++; } else { - errorUnreconized(argv[0], argv[i]); + errorUnrecognized(argv[0], argv[i]); exit(EXIT_FAILURE); } } else if ((0 == strncmp(argv[i], "-u", strlen("-u"))) @@ -1054,7 +1079,7 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { } arguments->user = argv[++i]; } else { - errorUnreconized(argv[0], argv[i]); + errorUnrecognized(argv[0], argv[i]); exit(EXIT_FAILURE); } } else if ((0 == strncmp(argv[i], "-p", strlen("-p"))) @@ -1088,7 +1113,7 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { } arguments->output_file = argv[++i]; } else { - errorUnreconized(argv[0], argv[i]); + errorUnrecognized(argv[0], argv[i]); exit(EXIT_FAILURE); } } else if ((0 == strncmp(argv[i], "-s", strlen("-s"))) @@ -1110,7 +1135,7 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { } arguments->sqlFile = argv[++i]; } else { - errorUnreconized(argv[0], argv[i]); + errorUnrecognized(argv[0], argv[i]); exit(EXIT_FAILURE); } } else if ((0 == strncmp(argv[i], "-q", strlen("-q"))) @@ -1148,7 +1173,7 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { } arguments->async_mode = atoi(argv[++i]); } else { - errorUnreconized(argv[0], argv[i]); + errorUnrecognized(argv[0], argv[i]); exit(EXIT_FAILURE); } } else if ((0 == strncmp(argv[i], "-T", strlen("-T"))) @@ -1161,17 +1186,17 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { errorPrintReqArg2(argv[0], "T"); exit(EXIT_FAILURE); } - arguments->num_of_threads = atoi(argv[++i]); + arguments->nthreads = atoi(argv[++i]); } else if (0 == strncmp(argv[i], "--threads=", strlen("--threads="))) { if (isStringNumber((char *)(argv[i] + strlen("--threads=")))) { - arguments->num_of_threads = atoi((char *)(argv[i]+strlen("--threads="))); + arguments->nthreads = atoi((char *)(argv[i]+strlen("--threads="))); } else { errorPrintReqArg2(argv[0], "--threads"); exit(EXIT_FAILURE); } } else if (0 == strncmp(argv[i], "-T", strlen("-T"))) { if (isStringNumber((char *)(argv[i] + strlen("-T")))) { - arguments->num_of_threads = atoi((char *)(argv[i]+strlen("-T"))); + arguments->nthreads = atoi((char *)(argv[i]+strlen("-T"))); } else { errorPrintReqArg2(argv[0], "-T"); exit(EXIT_FAILURE); @@ -1184,9 +1209,9 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { errorPrintReqArg2(argv[0], "--threads"); exit(EXIT_FAILURE); } - arguments->num_of_threads = atoi(argv[++i]); + arguments->nthreads = atoi(argv[++i]); } else { - errorUnreconized(argv[0], argv[i]); + errorUnrecognized(argv[0], argv[i]); exit(EXIT_FAILURE); } } else if ((0 == strncmp(argv[i], "-i", strlen("-i"))) @@ -1224,7 +1249,7 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { } arguments->insert_interval = atoi(argv[++i]); } else { - errorUnreconized(argv[0], argv[i]); + errorUnrecognized(argv[0], argv[i]); exit(EXIT_FAILURE); } } else if ((0 == strncmp(argv[i], "-S", strlen("-S"))) @@ -1262,7 +1287,7 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { } arguments->async_mode = atoi(argv[++i]); } else { - errorUnreconized(argv[0], argv[i]); + errorUnrecognized(argv[0], argv[i]); exit(EXIT_FAILURE); } } else if (strcmp(argv[i], "-qt") == 0) { @@ -1283,17 +1308,17 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { errorPrintReqArg2(argv[0], "B"); exit(EXIT_FAILURE); } - arguments->interlace_rows = atoi(argv[++i]); + arguments->interlaceRows = atoi(argv[++i]); } else if (0 == strncmp(argv[i], "--interlace-rows=", strlen("--interlace-rows="))) { if (isStringNumber((char *)(argv[i] + strlen("--interlace-rows=")))) { - arguments->interlace_rows = atoi((char *)(argv[i]+strlen("--interlace-rows="))); + arguments->interlaceRows = atoi((char *)(argv[i]+strlen("--interlace-rows="))); } else { errorPrintReqArg2(argv[0], "--interlace-rows"); exit(EXIT_FAILURE); } } else if (0 == strncmp(argv[i], "-B", strlen("-B"))) { if (isStringNumber((char *)(argv[i] + strlen("-B")))) { - arguments->interlace_rows = atoi((char *)(argv[i]+strlen("-B"))); + arguments->interlaceRows = atoi((char *)(argv[i]+strlen("-B"))); } else { errorPrintReqArg2(argv[0], "-B"); exit(EXIT_FAILURE); @@ -1306,9 +1331,9 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { errorPrintReqArg2(argv[0], "--interlace-rows"); exit(EXIT_FAILURE); } - arguments->interlace_rows = atoi(argv[++i]); + arguments->interlaceRows = atoi(argv[++i]); } else { - errorUnreconized(argv[0], argv[i]); + errorUnrecognized(argv[0], argv[i]); exit(EXIT_FAILURE); } } else if ((0 == strncmp(argv[i], "-r", strlen("-r"))) @@ -1321,17 +1346,17 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { errorPrintReqArg2(argv[0], "r"); exit(EXIT_FAILURE); } - arguments->num_of_RPR = atoi(argv[++i]); + arguments->reqPerReq = atoi(argv[++i]); } else if (0 == strncmp(argv[i], "--rec-per-req=", strlen("--rec-per-req="))) { if (isStringNumber((char *)(argv[i] + strlen("--rec-per-req=")))) { - arguments->num_of_RPR = atoi((char *)(argv[i]+strlen("--rec-per-req="))); + arguments->reqPerReq = atoi((char *)(argv[i]+strlen("--rec-per-req="))); } else { errorPrintReqArg2(argv[0], "--rec-per-req"); exit(EXIT_FAILURE); } } else if (0 == strncmp(argv[i], "-r", strlen("-r"))) { if (isStringNumber((char *)(argv[i] + strlen("-r")))) { - arguments->num_of_RPR = atoi((char *)(argv[i]+strlen("-r"))); + arguments->reqPerReq = atoi((char *)(argv[i]+strlen("-r"))); } else { errorPrintReqArg2(argv[0], "-r"); exit(EXIT_FAILURE); @@ -1344,9 +1369,9 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { errorPrintReqArg2(argv[0], "--rec-per-req"); exit(EXIT_FAILURE); } - arguments->num_of_RPR = atoi(argv[++i]); + arguments->reqPerReq = atoi(argv[++i]); } else { - errorUnreconized(argv[0], argv[i]); + errorUnrecognized(argv[0], argv[i]); exit(EXIT_FAILURE); } } else if ((0 == strncmp(argv[i], "-t", strlen("-t"))) @@ -1359,17 +1384,17 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { errorPrintReqArg2(argv[0], "t"); exit(EXIT_FAILURE); } - arguments->num_of_tables = atoi(argv[++i]); + arguments->ntables = atoi(argv[++i]); } else if (0 == strncmp(argv[i], "--tables=", strlen("--tables="))) { if (isStringNumber((char *)(argv[i] + strlen("--tables=")))) { - arguments->num_of_tables = atoi((char *)(argv[i]+strlen("--tables="))); + arguments->ntables = atoi((char *)(argv[i]+strlen("--tables="))); } else { errorPrintReqArg2(argv[0], "--tables"); exit(EXIT_FAILURE); } } else if (0 == strncmp(argv[i], "-t", strlen("-t"))) { if (isStringNumber((char *)(argv[i] + strlen("-t")))) { - arguments->num_of_tables = atoi((char *)(argv[i]+strlen("-t"))); + arguments->ntables = atoi((char *)(argv[i]+strlen("-t"))); } else { errorPrintReqArg2(argv[0], "-t"); exit(EXIT_FAILURE); @@ -1382,13 +1407,13 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { errorPrintReqArg2(argv[0], "--tables"); exit(EXIT_FAILURE); } - arguments->num_of_tables = atoi(argv[++i]); + arguments->ntables = atoi(argv[++i]); } else { - errorUnreconized(argv[0], argv[i]); + errorUnrecognized(argv[0], argv[i]); exit(EXIT_FAILURE); } - g_totalChildTables = arguments->num_of_tables; + g_totalChildTables = arguments->ntables; } else if ((0 == strncmp(argv[i], "-n", strlen("-n"))) || (0 == strncmp(argv[i], "--records", strlen("--records")))) { if (2 == strlen(argv[i])) { @@ -1399,17 +1424,17 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { errorPrintReqArg2(argv[0], "n"); exit(EXIT_FAILURE); } - arguments->num_of_DPT = atoi(argv[++i]); + arguments->insertRows = atoi(argv[++i]); } else if (0 == strncmp(argv[i], "--records=", strlen("--records="))) { if (isStringNumber((char *)(argv[i] + strlen("--records=")))) { - arguments->num_of_DPT = atoi((char *)(argv[i]+strlen("--records="))); + arguments->insertRows = atoi((char *)(argv[i]+strlen("--records="))); } else { errorPrintReqArg2(argv[0], "--records"); exit(EXIT_FAILURE); } } else if (0 == strncmp(argv[i], "-n", strlen("-n"))) { if (isStringNumber((char *)(argv[i] + strlen("-n")))) { - arguments->num_of_DPT = atoi((char *)(argv[i]+strlen("-n"))); + arguments->insertRows = atoi((char *)(argv[i]+strlen("-n"))); } else { errorPrintReqArg2(argv[0], "-n"); exit(EXIT_FAILURE); @@ -1422,9 +1447,9 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { errorPrintReqArg2(argv[0], "--records"); exit(EXIT_FAILURE); } - arguments->num_of_DPT = atoi(argv[++i]); + arguments->insertRows = atoi(argv[++i]); } else { - errorUnreconized(argv[0], argv[i]); + errorUnrecognized(argv[0], argv[i]); exit(EXIT_FAILURE); } } else if ((0 == strncmp(argv[i], "-d", strlen("-d"))) @@ -1446,7 +1471,7 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { } arguments->database = argv[++i]; } else { - errorUnreconized(argv[0], argv[i]); + errorUnrecognized(argv[0], argv[i]); exit(EXIT_FAILURE); } } else if ((0 == strncmp(argv[i], "-l", strlen("-l"))) @@ -1460,17 +1485,17 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { errorPrintReqArg2(argv[0], "l"); exit(EXIT_FAILURE); } - arguments->num_of_CPR = atoi(argv[++i]); + arguments->columnCount = atoi(argv[++i]); } else if (0 == strncmp(argv[i], "--columns=", strlen("--columns="))) { if (isStringNumber((char *)(argv[i] + strlen("--columns=")))) { - arguments->num_of_CPR = atoi((char *)(argv[i]+strlen("--columns="))); + arguments->columnCount = atoi((char *)(argv[i]+strlen("--columns="))); } else { errorPrintReqArg2(argv[0], "--columns"); exit(EXIT_FAILURE); } } else if (0 == strncmp(argv[i], "-l", strlen("-l"))) { if (isStringNumber((char *)(argv[i] + strlen("-l")))) { - arguments->num_of_CPR = atoi((char *)(argv[i]+strlen("-l"))); + arguments->columnCount = atoi((char *)(argv[i]+strlen("-l"))); } else { errorPrintReqArg2(argv[0], "-l"); exit(EXIT_FAILURE); @@ -1483,23 +1508,25 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { errorPrintReqArg2(argv[0], "--columns"); exit(EXIT_FAILURE); } - arguments->num_of_CPR = atoi(argv[++i]); + arguments->columnCount = atoi(argv[++i]); } else { - errorUnreconized(argv[0], argv[i]); + errorUnrecognized(argv[0], argv[i]); exit(EXIT_FAILURE); } - if (arguments->num_of_CPR > MAX_NUM_COLUMNS) { - printf("WARNING: max acceptible columns count is %d\n", MAX_NUM_COLUMNS); + if (arguments->columnCount > MAX_NUM_COLUMNS) { + printf("WARNING: max acceptable columns count is %d\n", MAX_NUM_COLUMNS); prompt(); - arguments->num_of_CPR = MAX_NUM_COLUMNS; + arguments->columnCount = MAX_NUM_COLUMNS; } - for (int col = DEFAULT_DATATYPE_NUM; col < arguments->num_of_CPR; col ++) { - arguments->datatype[col] = "INT"; + for (int col = DEFAULT_DATATYPE_NUM; col < arguments->columnCount; col ++) { + arguments->dataType[col] = "INT"; + arguments->data_type[col] = TSDB_DATA_TYPE_INT; } - for (int col = arguments->num_of_CPR; col < MAX_NUM_COLUMNS; col++) { - arguments->datatype[col] = NULL; + for (int col = arguments->columnCount; col < MAX_NUM_COLUMNS; col++) { + arguments->dataType[col] = NULL; + arguments->data_type[col] = TSDB_DATA_TYPE_NULL; } } else if ((0 == strncmp(argv[i], "-b", strlen("-b"))) || (0 == strncmp(argv[i], "--data-type", strlen("--data-type")))) { @@ -1523,7 +1550,7 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { } dataType = argv[++i]; } else { - errorUnreconized(argv[0], argv[i]); + errorUnrecognized(argv[0], argv[i]); exit(EXIT_FAILURE); } @@ -1543,8 +1570,32 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { errorPrint("%s", "-b: Invalid data_type!\n"); exit(EXIT_FAILURE); } - arguments->datatype[0] = dataType; - arguments->datatype[1] = NULL; + arguments->dataType[0] = dataType; + if (0 == strcasecmp(dataType, "INT")) { + arguments->data_type[0] = TSDB_DATA_TYPE_INT; + } else if (0 == strcasecmp(dataType, "TINYINT")) { + arguments->data_type[0] = TSDB_DATA_TYPE_TINYINT; + } else if (0 == strcasecmp(dataType, "SMALLINT")) { + arguments->data_type[0] = TSDB_DATA_TYPE_SMALLINT; + } else if (0 == strcasecmp(dataType, "BIGINT")) { + arguments->data_type[0] = TSDB_DATA_TYPE_BIGINT; + } else if (0 == strcasecmp(dataType, "FLOAT")) { + arguments->data_type[0] = TSDB_DATA_TYPE_FLOAT; + } else if (0 == strcasecmp(dataType, "DOUBLE")) { + arguments->data_type[0] = TSDB_DATA_TYPE_DOUBLE; + } else if (0 == strcasecmp(dataType, "BINARY")) { + arguments->data_type[0] = TSDB_DATA_TYPE_BINARY; + } else if (0 == strcasecmp(dataType, "NCHAR")) { + arguments->data_type[0] = TSDB_DATA_TYPE_NCHAR; + } else if (0 == strcasecmp(dataType, "BOOL")) { + arguments->data_type[0] = TSDB_DATA_TYPE_BOOL; + } else if (0 == strcasecmp(dataType, "TIMESTAMP")) { + arguments->data_type[0] = TSDB_DATA_TYPE_TIMESTAMP; + } else { + arguments->data_type[0] = TSDB_DATA_TYPE_NULL; + } + arguments->dataType[1] = NULL; + arguments->data_type[1] = TSDB_DATA_TYPE_NULL; } else { // more than one col int index = 0; @@ -1567,11 +1618,37 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { errorPrint("%s", "-b: Invalid data_type!\n"); exit(EXIT_FAILURE); } - arguments->datatype[index++] = token; + + if (0 == strcasecmp(token, "INT")) { + arguments->data_type[index] = TSDB_DATA_TYPE_INT; + } else if (0 == strcasecmp(token, "FLOAT")) { + arguments->data_type[index] = TSDB_DATA_TYPE_FLOAT; + } else if (0 == strcasecmp(token, "SMALLINT")) { + arguments->data_type[index] = TSDB_DATA_TYPE_SMALLINT; + } else if (0 == strcasecmp(token, "BIGINT")) { + arguments->data_type[index] = TSDB_DATA_TYPE_BIGINT; + } else if (0 == strcasecmp(token, "DOUBLE")) { + arguments->data_type[index] = TSDB_DATA_TYPE_FLOAT; + } else if (0 == strcasecmp(token, "TINYINT")) { + arguments->data_type[index] = TSDB_DATA_TYPE_TINYINT; + } else if (0 == strcasecmp(token, "BINARY")) { + arguments->data_type[index] = TSDB_DATA_TYPE_BINARY; + } else if (0 == strcasecmp(token, "NCHAR")) { + arguments->data_type[index] = TSDB_DATA_TYPE_NCHAR; + } else if (0 == strcasecmp(token, "BOOL")) { + arguments->data_type[index] = TSDB_DATA_TYPE_BOOL; + } else if (0 == strcasecmp(token, "TIMESTAMP")) { + arguments->data_type[index] = TSDB_DATA_TYPE_TIMESTAMP; + } else { + arguments->data_type[index] = TSDB_DATA_TYPE_NULL; + } + arguments->dataType[index] = token; + index ++; token = strsep(&running, ","); if (index >= MAX_NUM_COLUMNS) break; } - arguments->datatype[index] = NULL; + arguments->dataType[index] = NULL; + arguments->data_type[index] = TSDB_DATA_TYPE_NULL; } } else if ((0 == strncmp(argv[i], "-w", strlen("-w"))) || (0 == strncmp(argv[i], "--binwidth", strlen("--binwidth")))) { @@ -1608,7 +1685,7 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { } arguments->binwidth = atoi(argv[++i]); } else { - errorUnreconized(argv[0], argv[i]); + errorUnrecognized(argv[0], argv[i]); exit(EXIT_FAILURE); } } else if ((0 == strncmp(argv[i], "-m", strlen("-m"))) @@ -1630,7 +1707,7 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { } arguments->tb_prefix = argv[++i]; } else { - errorUnreconized(argv[0], argv[i]); + errorUnrecognized(argv[0], argv[i]); exit(EXIT_FAILURE); } } else if ((strcmp(argv[i], "-N") == 0) @@ -1695,7 +1772,7 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { } arguments->disorderRange = atoi(argv[++i]); } else { - errorUnreconized(argv[0], argv[i]); + errorUnrecognized(argv[0], argv[i]); exit(EXIT_FAILURE); } } else if ((0 == strncmp(argv[i], "-O", strlen("-O"))) @@ -1733,7 +1810,7 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { } arguments->disorderRatio = atoi(argv[++i]); } else { - errorUnreconized(argv[0], argv[i]); + errorUnrecognized(argv[0], argv[i]); exit(EXIT_FAILURE); } @@ -1787,7 +1864,7 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { } arguments->replica = atoi(argv[++i]); } else { - errorUnreconized(argv[0], argv[i]); + errorUnrecognized(argv[0], argv[i]); exit(EXIT_FAILURE); } @@ -1799,7 +1876,7 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { } else if (strcmp(argv[i], "-D") == 0) { arguments->method_of_delete = atoi(argv[++i]); if (arguments->method_of_delete > 3) { - errorPrint("%s", "\n\t-D need a valud (0~3) number following!\n"); + errorPrint("%s", "\n\t-D need a value (0~3) number following!\n"); exit(EXIT_FAILURE); } } else if ((strcmp(argv[i], "--version") == 0) @@ -1814,7 +1891,7 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { printf(" Usage: taosdemo [-f JSONFILE] [-u USER] [-p PASSWORD] [-c CONFIG_DIR]\n\ [-h HOST] [-P PORT] [-I INTERFACE] [-d DATABASE] [-a REPLICA]\n\ [-m TABLEPREFIX] [-s SQLFILE] [-N] [-o OUTPUTFILE] [-q QUERYMODE]\n\ - [-b DATATYPES] [-w WIDTH_OF_BINARY] [-l COLUNNS] [-T THREADNUMBER]\n\ + [-b DATATYPES] [-w WIDTH_OF_BINARY] [-l COLUMNS] [-T THREADNUMBER]\n\ [-i SLEEPTIME] [-S TIME_STEP] [-B INTERLACE_ROWS] [-t TABLES]\n\ [-n RECORDS] [-M] [-x] [-y] [-O ORDERMODE] [-R RANGE] [-a REPLIcA][-g]\n\ [--help] [--usage] [--version]\n"); @@ -1842,7 +1919,7 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { int columnCount; for (columnCount = 0; columnCount < MAX_NUM_COLUMNS; columnCount ++) { - if (g_args.datatype[columnCount] == NULL) { + if (g_args.dataType[columnCount] == NULL) { break; } } @@ -1850,7 +1927,56 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { if (0 == columnCount) { ERROR_EXIT("data type error!"); } - g_args.num_of_CPR = columnCount; + g_args.columnCount = columnCount; + + g_args.lenOfOneRow = 20; // timestamp + for (int c = 0; c < g_args.columnCount; c++) { + switch(g_args.data_type[c]) { + case TSDB_DATA_TYPE_BINARY: + g_args.lenOfOneRow += g_args.binwidth + 3; + break; + + case TSDB_DATA_TYPE_NCHAR: + g_args.lenOfOneRow += g_args.binwidth + 3; + break; + + case TSDB_DATA_TYPE_INT: + g_args.lenOfOneRow += INT_BUFF_LEN; + break; + + case TSDB_DATA_TYPE_BIGINT: + g_args.lenOfOneRow += BIGINT_BUFF_LEN; + break; + + case TSDB_DATA_TYPE_SMALLINT: + g_args.lenOfOneRow += SMALLINT_BUFF_LEN; + break; + + case TSDB_DATA_TYPE_TINYINT: + g_args.lenOfOneRow += TINYINT_BUFF_LEN; + break; + + case TSDB_DATA_TYPE_BOOL: + g_args.lenOfOneRow += BOOL_BUFF_LEN; + break; + + case TSDB_DATA_TYPE_FLOAT: + g_args.lenOfOneRow += FLOAT_BUFF_LEN; + break; + + case TSDB_DATA_TYPE_DOUBLE: + g_args.lenOfOneRow += DOUBLE_BUFF_LEN; + break; + + case TSDB_DATA_TYPE_TIMESTAMP: + g_args.lenOfOneRow += TIMESTAMP_BUFF_LEN; + break; + + default: + errorPrint2("get error data type : %s\n", g_args.dataType[c]); + exit(EXIT_FAILURE); + } + } if (((arguments->debug_print) && (NULL != arguments->metaFile)) || arguments->verbose_print) { @@ -1863,11 +1989,11 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { printf("# Password: %s\n", arguments->password); printf("# Use metric: %s\n", arguments->use_metric ? "true" : "false"); - if (*(arguments->datatype)) { + if (*(arguments->dataType)) { printf("# Specified data type: "); for (int c = 0; c < MAX_NUM_COLUMNS; c++) - if (arguments->datatype[c]) - printf("%s,", arguments->datatype[c]); + if (arguments->dataType[c]) + printf("%s,", arguments->dataType[c]); else break; printf("\n"); @@ -1875,15 +2001,15 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { printf("# Insertion interval: %"PRIu64"\n", arguments->insert_interval); printf("# Number of records per req: %u\n", - arguments->num_of_RPR); + arguments->reqPerReq); printf("# Max SQL length: %"PRIu64"\n", arguments->max_sql_len); printf("# Length of Binary: %d\n", arguments->binwidth); - printf("# Number of Threads: %d\n", arguments->num_of_threads); + printf("# Number of Threads: %d\n", arguments->nthreads); printf("# Number of Tables: %"PRId64"\n", - arguments->num_of_tables); + arguments->ntables); printf("# Number of Data per Table: %"PRId64"\n", - arguments->num_of_DPT); + arguments->insertRows); printf("# Database name: %s\n", arguments->database); printf("# Table prefix: %s\n", arguments->tb_prefix); if (arguments->disorderRatio) { @@ -1909,31 +2035,20 @@ static void tmfclose(FILE *fp) { static void tmfree(char *buf) { if (NULL != buf) { free(buf); + buf = NULL; } } static int queryDbExec(TAOS *taos, char *command, QUERY_TYPE type, bool quiet) { - int i; - TAOS_RES *res = NULL; - int32_t code = -1; - for (i = 0; i < 5 /* retry */; i++) { - if (NULL != res) { - taos_free_result(res); - res = NULL; - } + verbosePrint("%s() LN%d - command: %s\n", __func__, __LINE__, command); - res = taos_query(taos, command); - code = taos_errno(res); - if (0 == code) { - break; - } - } + TAOS_RES *res = taos_query(taos, command); + int32_t code = taos_errno(res); - verbosePrint("%s() LN%d - command: %s\n", __func__, __LINE__, command); if (code != 0) { if (!quiet) { - errorPrint2("Failed to execute %s, reason: %s\n", + errorPrint2("Failed to execute <%s>, reason: %s\n", command, taos_errstr(res)); } taos_free_result(res); @@ -2338,7 +2453,7 @@ static int printfInsertMeta() { printf("top insert interval: \033[33m%"PRIu64"\033[0m\n", g_args.insert_interval); printf("number of records per req: \033[33m%u\033[0m\n", - g_args.num_of_RPR); + g_args.reqPerReq); printf("max sql length: \033[33m%"PRIu64"\033[0m\n", g_args.max_sql_len); @@ -2349,9 +2464,9 @@ static int printfInsertMeta() { printf(" database[%d] name: \033[33m%s\033[0m\n", i, g_Dbs.db[i].dbName); if (0 == g_Dbs.db[i].drop) { - printf(" drop: \033[33mno\033[0m\n"); + printf(" drop: \033[33m no\033[0m\n"); } else { - printf(" drop: \033[33myes\033[0m\n"); + printf(" drop: \033[33m yes\033[0m\n"); } if (g_Dbs.db[i].dbCfg.blocks > 0) { @@ -2420,7 +2535,7 @@ static int printfInsertMeta() { printf(" super table[\033[33m%"PRIu64"\033[0m]:\n", j); printf(" stbName: \033[33m%s\033[0m\n", - g_Dbs.db[i].superTbls[j].sTblName); + g_Dbs.db[i].superTbls[j].stbName); if (PRE_CREATE_SUBTBL == g_Dbs.db[i].superTbls[j].autoCreateTable) { printf(" autoCreateTable: \033[33m%s\033[0m\n", "no"); @@ -2460,9 +2575,9 @@ static int printfInsertMeta() { g_Dbs.db[i].superTbls[j].insertRows); /* if (0 == g_Dbs.db[i].superTbls[j].multiThreadWriteOneTbl) { - printf(" multiThreadWriteOneTbl: \033[33mno\033[0m\n"); + printf(" multiThreadWriteOneTbl: \033[33m no\033[0m\n"); }else { - printf(" multiThreadWriteOneTbl: \033[33myes\033[0m\n"); + printf(" multiThreadWriteOneTbl: \033[33m yes\033[0m\n"); } */ printf(" interlaceRows: \033[33m%u\033[0m\n", @@ -2543,7 +2658,7 @@ static void printfInsertMetaToFile(FILE* fp) { fprintf(fp, "resultFile: %s\n", g_Dbs.resultFile); fprintf(fp, "thread num of insert data: %d\n", g_Dbs.threadCount); fprintf(fp, "thread num of create table: %d\n", g_Dbs.threadCountForCreateTbl); - fprintf(fp, "number of records per req: %u\n", g_args.num_of_RPR); + fprintf(fp, "number of records per req: %u\n", g_args.reqPerReq); fprintf(fp, "max sql length: %"PRIu64"\n", g_args.max_sql_len); fprintf(fp, "database count: %d\n", g_Dbs.dbCount); @@ -2610,7 +2725,7 @@ static void printfInsertMetaToFile(FILE* fp) { fprintf(fp, " super table[%d]:\n", j); fprintf(fp, " stbName: %s\n", - g_Dbs.db[i].superTbls[j].sTblName); + g_Dbs.db[i].superTbls[j].stbName); if (PRE_CREATE_SUBTBL == g_Dbs.db[i].superTbls[j].autoCreateTable) { fprintf(fp, " autoCreateTable: %s\n", "no"); @@ -2769,7 +2884,7 @@ static void printfQueryMeta() { printf("childTblCount: \033[33m%"PRId64"\033[0m\n", g_queryInfo.superQueryInfo.childTblCount); printf("stable name: \033[33m%s\033[0m\n", - g_queryInfo.superQueryInfo.sTblName); + g_queryInfo.superQueryInfo.stbName); printf("stb query times:\033[33m%"PRIu64"\033[0m\n", g_queryInfo.superQueryInfo.queryTimes); @@ -2840,36 +2955,45 @@ static void xDumpFieldToFile(FILE* fp, const char* val, char buf[TSDB_MAX_BYTES_PER_ROW]; switch (field->type) { case TSDB_DATA_TYPE_BOOL: - fprintf(fp, "%d", ((((int32_t)(*((char *)val))) == 1) ? 1 : 0)); + fprintf(fp, "%d", ((((int32_t)(*((int8_t*)val))) == 1) ? 1 : 0)); break; + case TSDB_DATA_TYPE_TINYINT: fprintf(fp, "%d", *((int8_t *)val)); break; + case TSDB_DATA_TYPE_SMALLINT: fprintf(fp, "%d", *((int16_t *)val)); break; + case TSDB_DATA_TYPE_INT: fprintf(fp, "%d", *((int32_t *)val)); break; + case TSDB_DATA_TYPE_BIGINT: fprintf(fp, "%"PRId64"", *((int64_t *)val)); break; + case TSDB_DATA_TYPE_FLOAT: fprintf(fp, "%.5f", GET_FLOAT_VAL(val)); break; + case TSDB_DATA_TYPE_DOUBLE: fprintf(fp, "%.9f", GET_DOUBLE_VAL(val)); break; + case TSDB_DATA_TYPE_BINARY: case TSDB_DATA_TYPE_NCHAR: memcpy(buf, val, length); buf[length] = 0; fprintf(fp, "\'%s\'", buf); break; + case TSDB_DATA_TYPE_TIMESTAMP: formatTimestamp(buf, *(int64_t*)val, precision); fprintf(fp, "'%s'", buf); break; + default: break; } @@ -3356,29 +3480,50 @@ static int calcRowLen(SSuperTable* superTbls) { for (colIndex = 0; colIndex < superTbls->columnCount; colIndex++) { char* dataType = superTbls->columns[colIndex].dataType; - if (strcasecmp(dataType, "BINARY") == 0) { - lenOfOneRow += superTbls->columns[colIndex].dataLen + 3; - } else if (strcasecmp(dataType, "NCHAR") == 0) { - lenOfOneRow += superTbls->columns[colIndex].dataLen + 3; - } else if (strcasecmp(dataType, "INT") == 0) { - lenOfOneRow += INT_BUFF_LEN; - } else if (strcasecmp(dataType, "BIGINT") == 0) { - lenOfOneRow += BIGINT_BUFF_LEN; - } else if (strcasecmp(dataType, "SMALLINT") == 0) { - lenOfOneRow += SMALLINT_BUFF_LEN; - } else if (strcasecmp(dataType, "TINYINT") == 0) { - lenOfOneRow += TINYINT_BUFF_LEN; - } else if (strcasecmp(dataType, "BOOL") == 0) { - lenOfOneRow += BOOL_BUFF_LEN; - } else if (strcasecmp(dataType, "FLOAT") == 0) { - lenOfOneRow += FLOAT_BUFF_LEN; - } else if (strcasecmp(dataType, "DOUBLE") == 0) { - lenOfOneRow += DOUBLE_BUFF_LEN; - } else if (strcasecmp(dataType, "TIMESTAMP") == 0) { - lenOfOneRow += TIMESTAMP_BUFF_LEN; - } else { - errorPrint2("get error data type : %s\n", dataType); - exit(EXIT_FAILURE); + switch(superTbls->columns[colIndex].data_type) { + case TSDB_DATA_TYPE_BINARY: + lenOfOneRow += superTbls->columns[colIndex].dataLen + 3; + break; + + case TSDB_DATA_TYPE_NCHAR: + lenOfOneRow += superTbls->columns[colIndex].dataLen + 3; + break; + + case TSDB_DATA_TYPE_INT: + lenOfOneRow += INT_BUFF_LEN; + break; + + case TSDB_DATA_TYPE_BIGINT: + lenOfOneRow += BIGINT_BUFF_LEN; + break; + + case TSDB_DATA_TYPE_SMALLINT: + lenOfOneRow += SMALLINT_BUFF_LEN; + break; + + case TSDB_DATA_TYPE_TINYINT: + lenOfOneRow += TINYINT_BUFF_LEN; + break; + + case TSDB_DATA_TYPE_BOOL: + lenOfOneRow += BOOL_BUFF_LEN; + break; + + case TSDB_DATA_TYPE_FLOAT: + lenOfOneRow += FLOAT_BUFF_LEN; + break; + + case TSDB_DATA_TYPE_DOUBLE: + lenOfOneRow += DOUBLE_BUFF_LEN; + break; + + case TSDB_DATA_TYPE_TIMESTAMP: + lenOfOneRow += TIMESTAMP_BUFF_LEN; + break; + + default: + errorPrint2("get error data type : %s\n", dataType); + exit(EXIT_FAILURE); } } @@ -3418,9 +3563,8 @@ static int calcRowLen(SSuperTable* superTbls) { return 0; } - static int getChildNameOfSuperTableWithLimitAndOffset(TAOS * taos, - char* dbName, char* sTblName, char** childTblNameOfSuperTbl, + char* dbName, char* stbName, char** childTblNameOfSuperTbl, int64_t* childTblCountOfSuperTbl, int64_t limit, uint64_t offset) { char command[1024] = "\0"; @@ -3431,14 +3575,12 @@ static int getChildNameOfSuperTableWithLimitAndOffset(TAOS * taos, char* childTblName = *childTblNameOfSuperTbl; - if (offset >= 0) { - snprintf(limitBuf, 100, " limit %"PRId64" offset %"PRIu64"", - limit, offset); - } + snprintf(limitBuf, 100, " limit %"PRId64" offset %"PRIu64"", + limit, offset); //get all child table name use cmd: select tbname from superTblName; snprintf(command, 1024, "select tbname from %s.%s %s", - dbName, sTblName, limitBuf); + dbName, stbName, limitBuf); res = taos_query(taos, command); int32_t code = taos_errno(res); @@ -3489,7 +3631,7 @@ static int getChildNameOfSuperTableWithLimitAndOffset(TAOS * taos, taos_free_result(res); taos_close(taos); errorPrint2("%s() LN%d, realloc fail for save child table name of %s.%s\n", - __func__, __LINE__, dbName, sTblName); + __func__, __LINE__, dbName, stbName); exit(EXIT_FAILURE); } } @@ -3504,10 +3646,10 @@ static int getChildNameOfSuperTableWithLimitAndOffset(TAOS * taos, } static int getAllChildNameOfSuperTable(TAOS * taos, char* dbName, - char* sTblName, char** childTblNameOfSuperTbl, + char* stbName, char** childTblNameOfSuperTbl, int64_t* childTblCountOfSuperTbl) { - return getChildNameOfSuperTableWithLimitAndOffset(taos, dbName, sTblName, + return getChildNameOfSuperTableWithLimitAndOffset(taos, dbName, stbName, childTblNameOfSuperTbl, childTblCountOfSuperTbl, -1, 0); } @@ -3521,7 +3663,7 @@ static int getSuperTableFromServer(TAOS * taos, char* dbName, int count = 0; //get schema use cmd: describe superTblName; - snprintf(command, 1024, "describe %s.%s", dbName, superTbls->sTblName); + snprintf(command, 1024, "describe %s.%s", dbName, superTbls->stbName); res = taos_query(taos, command); int32_t code = taos_errno(res); if (code != 0) { @@ -3547,6 +3689,39 @@ static int getSuperTableFromServer(TAOS * taos, char* dbName, (char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX], min(DATATYPE_BUFF_LEN, fields[TSDB_DESCRIBE_METRIC_TYPE_INDEX].bytes) + 1); + if (0 == strncasecmp(superTbls->tags[tagIndex].dataType, + "INT", strlen("INT"))) { + superTbls->tags[tagIndex].data_type = TSDB_DATA_TYPE_INT; + } else if (0 == strncasecmp(superTbls->tags[tagIndex].dataType, + "TINYINT", strlen("TINYINT"))) { + superTbls->tags[tagIndex].data_type = TSDB_DATA_TYPE_TINYINT; + } else if (0 == strncasecmp(superTbls->tags[tagIndex].dataType, + "SMALLINT", strlen("SMALLINT"))) { + superTbls->tags[tagIndex].data_type = TSDB_DATA_TYPE_SMALLINT; + } else if (0 == strncasecmp(superTbls->tags[tagIndex].dataType, + "BIGINT", strlen("BIGINT"))) { + superTbls->tags[tagIndex].data_type = TSDB_DATA_TYPE_BIGINT; + } else if (0 == strncasecmp(superTbls->tags[tagIndex].dataType, + "FLOAT", strlen("FLOAT"))) { + superTbls->tags[tagIndex].data_type = TSDB_DATA_TYPE_FLOAT; + } else if (0 == strncasecmp(superTbls->tags[tagIndex].dataType, + "DOUBLE", strlen("DOUBLE"))) { + superTbls->tags[tagIndex].data_type = TSDB_DATA_TYPE_DOUBLE; + } else if (0 == strncasecmp(superTbls->tags[tagIndex].dataType, + "BINARY", strlen("BINARY"))) { + superTbls->tags[tagIndex].data_type = TSDB_DATA_TYPE_BINARY; + } else if (0 == strncasecmp(superTbls->tags[tagIndex].dataType, + "NCHAR", strlen("NCHAR"))) { + superTbls->tags[tagIndex].data_type = TSDB_DATA_TYPE_NCHAR; + } else if (0 == strncasecmp(superTbls->tags[tagIndex].dataType, + "BOOL", strlen("BOOL"))) { + superTbls->tags[tagIndex].data_type = TSDB_DATA_TYPE_BOOL; + } else if (0 == strncasecmp(superTbls->tags[tagIndex].dataType, + "TIMESTAMP", strlen("TIMESTAMP"))) { + superTbls->tags[tagIndex].data_type = TSDB_DATA_TYPE_TIMESTAMP; + } else { + superTbls->tags[tagIndex].data_type = TSDB_DATA_TYPE_NULL; + } superTbls->tags[tagIndex].dataLen = *((int *)row[TSDB_DESCRIBE_METRIC_LENGTH_INDEX]); tstrncpy(superTbls->tags[tagIndex].note, @@ -3558,16 +3733,51 @@ static int getSuperTableFromServer(TAOS * taos, char* dbName, tstrncpy(superTbls->columns[columnIndex].field, (char *)row[TSDB_DESCRIBE_METRIC_FIELD_INDEX], fields[TSDB_DESCRIBE_METRIC_FIELD_INDEX].bytes); + tstrncpy(superTbls->columns[columnIndex].dataType, (char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX], min(DATATYPE_BUFF_LEN, fields[TSDB_DESCRIBE_METRIC_TYPE_INDEX].bytes) + 1); + if (0 == strncasecmp(superTbls->columns[columnIndex].dataType, + "INT", strlen("INT"))) { + superTbls->columns[columnIndex].data_type = TSDB_DATA_TYPE_INT; + } else if (0 == strncasecmp(superTbls->columns[columnIndex].dataType, + "TINYINT", strlen("TINYINT"))) { + superTbls->columns[columnIndex].data_type = TSDB_DATA_TYPE_TINYINT; + } else if (0 == strncasecmp(superTbls->columns[columnIndex].dataType, + "SMALLINT", strlen("SMALLINT"))) { + superTbls->columns[columnIndex].data_type = TSDB_DATA_TYPE_SMALLINT; + } else if (0 == strncasecmp(superTbls->columns[columnIndex].dataType, + "BIGINT", strlen("BIGINT"))) { + superTbls->columns[columnIndex].data_type = TSDB_DATA_TYPE_BIGINT; + } else if (0 == strncasecmp(superTbls->columns[columnIndex].dataType, + "FLOAT", strlen("FLOAT"))) { + superTbls->columns[columnIndex].data_type = TSDB_DATA_TYPE_FLOAT; + } else if (0 == strncasecmp(superTbls->columns[columnIndex].dataType, + "DOUBLE", strlen("DOUBLE"))) { + superTbls->columns[columnIndex].data_type = TSDB_DATA_TYPE_DOUBLE; + } else if (0 == strncasecmp(superTbls->columns[columnIndex].dataType, + "BINARY", strlen("BINARY"))) { + superTbls->columns[columnIndex].data_type = TSDB_DATA_TYPE_BINARY; + } else if (0 == strncasecmp(superTbls->columns[columnIndex].dataType, + "NCHAR", strlen("NCHAR"))) { + superTbls->columns[columnIndex].data_type = TSDB_DATA_TYPE_NCHAR; + } else if (0 == strncasecmp(superTbls->columns[columnIndex].dataType, + "BOOL", strlen("BOOL"))) { + superTbls->columns[columnIndex].data_type = TSDB_DATA_TYPE_BOOL; + } else if (0 == strncasecmp(superTbls->columns[columnIndex].dataType, + "TIMESTAMP", strlen("TIMESTAMP"))) { + superTbls->columns[columnIndex].data_type = TSDB_DATA_TYPE_TIMESTAMP; + } else { + superTbls->columns[columnIndex].data_type = TSDB_DATA_TYPE_NULL; + } superTbls->columns[columnIndex].dataLen = *((int *)row[TSDB_DESCRIBE_METRIC_LENGTH_INDEX]); tstrncpy(superTbls->columns[columnIndex].note, (char *)row[TSDB_DESCRIBE_METRIC_NOTE_INDEX], min(NOTE_BUFF_LEN, fields[TSDB_DESCRIBE_METRIC_NOTE_INDEX].bytes) + 1); + columnIndex++; } count++; @@ -3589,7 +3799,7 @@ static int getSuperTableFromServer(TAOS * taos, char* dbName, return -1; } getAllChildNameOfSuperTable(taos, dbName, - superTbls->sTblName, + superTbls->stbName, &superTbls->childTblName, &superTbls->childTblCount); } @@ -3605,7 +3815,6 @@ static int createSuperTable( assert(command); char cols[COL_BUFFER_LEN] = "\0"; - int colIndex; int len = 0; int lenOfOneRow = 0; @@ -3617,67 +3826,87 @@ static int createSuperTable( return -1; } - for (colIndex = 0; colIndex < superTbl->columnCount; colIndex++) { - char* dataType = superTbl->columns[colIndex].dataType; + for (int colIndex = 0; colIndex < superTbl->columnCount; colIndex++) { - if (strcasecmp(dataType, "BINARY") == 0) { - len += snprintf(cols + len, COL_BUFFER_LEN - len, - ",C%d %s(%d)", colIndex, "BINARY", - superTbl->columns[colIndex].dataLen); - lenOfOneRow += superTbl->columns[colIndex].dataLen + 3; - } else if (strcasecmp(dataType, "NCHAR") == 0) { - len += snprintf(cols + len, COL_BUFFER_LEN - len, - ",C%d %s(%d)", colIndex, "NCHAR", - superTbl->columns[colIndex].dataLen); - lenOfOneRow += superTbl->columns[colIndex].dataLen + 3; - } else if (strcasecmp(dataType, "INT") == 0) { - if ((g_args.demo_mode) && (colIndex == 1)) { + switch(superTbl->columns[colIndex].data_type) { + case TSDB_DATA_TYPE_BINARY: len += snprintf(cols + len, COL_BUFFER_LEN - len, - ", VOLTAGE INT"); - } else { - len += snprintf(cols + len, COL_BUFFER_LEN - len, ",C%d %s", colIndex, "INT"); - } - lenOfOneRow += INT_BUFF_LEN; - } else if (strcasecmp(dataType, "BIGINT") == 0) { - len += snprintf(cols + len, COL_BUFFER_LEN - len, ",C%d %s", - colIndex, "BIGINT"); - lenOfOneRow += BIGINT_BUFF_LEN; - } else if (strcasecmp(dataType, "SMALLINT") == 0) { - len += snprintf(cols + len, COL_BUFFER_LEN - len, ",C%d %s", - colIndex, "SMALLINT"); - lenOfOneRow += SMALLINT_BUFF_LEN; - } else if (strcasecmp(dataType, "TINYINT") == 0) { - len += snprintf(cols + len, COL_BUFFER_LEN - len, ",C%d %s", colIndex, "TINYINT"); - lenOfOneRow += TINYINT_BUFF_LEN; - } else if (strcasecmp(dataType, "BOOL") == 0) { - len += snprintf(cols + len, COL_BUFFER_LEN - len, ",C%d %s", colIndex, "BOOL"); - lenOfOneRow += BOOL_BUFF_LEN; - } else if (strcasecmp(dataType, "FLOAT") == 0) { - if (g_args.demo_mode) { - if (colIndex == 0) { - len += snprintf(cols + len, COL_BUFFER_LEN - len, ", CURRENT FLOAT"); - } else if (colIndex == 2) { - len += snprintf(cols + len, COL_BUFFER_LEN - len, ", PHASE FLOAT"); + ",C%d %s(%d)", colIndex, "BINARY", + superTbl->columns[colIndex].dataLen); + lenOfOneRow += superTbl->columns[colIndex].dataLen + 3; + break; + + case TSDB_DATA_TYPE_NCHAR: + len += snprintf(cols + len, COL_BUFFER_LEN - len, + ",C%d %s(%d)", colIndex, "NCHAR", + superTbl->columns[colIndex].dataLen); + lenOfOneRow += superTbl->columns[colIndex].dataLen + 3; + break; + + case TSDB_DATA_TYPE_INT: + if ((g_args.demo_mode) && (colIndex == 1)) { + len += snprintf(cols + len, COL_BUFFER_LEN - len, + ", VOLTAGE INT"); + } else { + len += snprintf(cols + len, COL_BUFFER_LEN - len, ",C%d %s", colIndex, "INT"); } - } else { - len += snprintf(cols + len, COL_BUFFER_LEN - len, ",C%d %s", colIndex, "FLOAT"); - } + lenOfOneRow += INT_BUFF_LEN; + break; - lenOfOneRow += FLOAT_BUFF_LEN; - } else if (strcasecmp(dataType, "DOUBLE") == 0) { - len += snprintf(cols + len, COL_BUFFER_LEN - len, ",C%d %s", - colIndex, "DOUBLE"); - lenOfOneRow += DOUBLE_BUFF_LEN; - } else if (strcasecmp(dataType, "TIMESTAMP") == 0) { - len += snprintf(cols + len, COL_BUFFER_LEN - len, ",C%d %s", - colIndex, "TIMESTAMP"); - lenOfOneRow += TIMESTAMP_BUFF_LEN; - } else { - taos_close(taos); - free(command); - errorPrint2("%s() LN%d, config error data type : %s\n", - __func__, __LINE__, dataType); - exit(EXIT_FAILURE); + case TSDB_DATA_TYPE_BIGINT: + len += snprintf(cols + len, COL_BUFFER_LEN - len, ",C%d %s", + colIndex, "BIGINT"); + lenOfOneRow += BIGINT_BUFF_LEN; + break; + + case TSDB_DATA_TYPE_SMALLINT: + len += snprintf(cols + len, COL_BUFFER_LEN - len, ",C%d %s", + colIndex, "SMALLINT"); + lenOfOneRow += SMALLINT_BUFF_LEN; + break; + + case TSDB_DATA_TYPE_TINYINT: + len += snprintf(cols + len, COL_BUFFER_LEN - len, ",C%d %s", colIndex, "TINYINT"); + lenOfOneRow += TINYINT_BUFF_LEN; + break; + + case TSDB_DATA_TYPE_BOOL: + len += snprintf(cols + len, COL_BUFFER_LEN - len, ",C%d %s", colIndex, "BOOL"); + lenOfOneRow += BOOL_BUFF_LEN; + break; + + case TSDB_DATA_TYPE_FLOAT: + if (g_args.demo_mode) { + if (colIndex == 0) { + len += snprintf(cols + len, COL_BUFFER_LEN - len, ", CURRENT FLOAT"); + } else if (colIndex == 2) { + len += snprintf(cols + len, COL_BUFFER_LEN - len, ", PHASE FLOAT"); + } + } else { + len += snprintf(cols + len, COL_BUFFER_LEN - len, ",C%d %s", colIndex, "FLOAT"); + } + + lenOfOneRow += FLOAT_BUFF_LEN; + break; + + case TSDB_DATA_TYPE_DOUBLE: + len += snprintf(cols + len, COL_BUFFER_LEN - len, ",C%d %s", + colIndex, "DOUBLE"); + lenOfOneRow += DOUBLE_BUFF_LEN; + break; + + case TSDB_DATA_TYPE_TIMESTAMP: + len += snprintf(cols + len, COL_BUFFER_LEN - len, ",C%d %s", + colIndex, "TIMESTAMP"); + lenOfOneRow += TIMESTAMP_BUFF_LEN; + break; + + default: + taos_close(taos); + free(command); + errorPrint2("%s() LN%d, config error data type : %s\n", + __func__, __LINE__, superTbl->columns[colIndex].dataType); + exit(EXIT_FAILURE); } } @@ -3777,16 +4006,16 @@ static int createSuperTable( superTbl->lenOfTagOfOneRow = lenOfTagOfOneRow; snprintf(command, BUFFER_SIZE, - "create table if not exists %s.%s (ts timestamp%s) tags %s", - dbName, superTbl->sTblName, cols, tags); + "CREATE TABLE IF NOT EXISTS %s.%s (ts TIMESTAMP%s) TAGS %s", + dbName, superTbl->stbName, cols, tags); if (0 != queryDbExec(taos, command, NO_INSERT_TYPE, false)) { errorPrint2("create supertable %s failed!\n\n", - superTbl->sTblName); + superTbl->stbName); free(command); return -1; } - debugPrint("create supertable %s success!\n\n", superTbl->sTblName); + debugPrint("create supertable %s success!\n\n", superTbl->stbName); free(command); return 0; } @@ -3810,42 +4039,42 @@ int createDatabasesAndStables(char *command) { int dataLen = 0; dataLen += snprintf(command + dataLen, - BUFFER_SIZE - dataLen, "create database if not exists %s", + BUFFER_SIZE - dataLen, "CREATE DATABASE IF NOT EXISTS %s", g_Dbs.db[i].dbName); if (g_Dbs.db[i].dbCfg.blocks > 0) { dataLen += snprintf(command + dataLen, - BUFFER_SIZE - dataLen, " blocks %d", + BUFFER_SIZE - dataLen, " BLOCKS %d", g_Dbs.db[i].dbCfg.blocks); } if (g_Dbs.db[i].dbCfg.cache > 0) { dataLen += snprintf(command + dataLen, - BUFFER_SIZE - dataLen, " cache %d", + BUFFER_SIZE - dataLen, " CACHE %d", g_Dbs.db[i].dbCfg.cache); } if (g_Dbs.db[i].dbCfg.days > 0) { dataLen += snprintf(command + dataLen, - BUFFER_SIZE - dataLen, " days %d", + BUFFER_SIZE - dataLen, " DAYS %d", g_Dbs.db[i].dbCfg.days); } if (g_Dbs.db[i].dbCfg.keep > 0) { dataLen += snprintf(command + dataLen, - BUFFER_SIZE - dataLen, " keep %d", + BUFFER_SIZE - dataLen, " KEEP %d", g_Dbs.db[i].dbCfg.keep); } if (g_Dbs.db[i].dbCfg.quorum > 1) { dataLen += snprintf(command + dataLen, - BUFFER_SIZE - dataLen, " quorum %d", + BUFFER_SIZE - dataLen, " QUORUM %d", g_Dbs.db[i].dbCfg.quorum); } if (g_Dbs.db[i].dbCfg.replica > 0) { dataLen += snprintf(command + dataLen, - BUFFER_SIZE - dataLen, " replica %d", + BUFFER_SIZE - dataLen, " REPLICA %d", g_Dbs.db[i].dbCfg.replica); } if (g_Dbs.db[i].dbCfg.update > 0) { dataLen += snprintf(command + dataLen, - BUFFER_SIZE - dataLen, " update %d", + BUFFER_SIZE - dataLen, " UPDATE %d", g_Dbs.db[i].dbCfg.update); } //if (g_Dbs.db[i].dbCfg.maxtablesPerVnode > 0) { @@ -3854,17 +4083,17 @@ int createDatabasesAndStables(char *command) { //} if (g_Dbs.db[i].dbCfg.minRows > 0) { dataLen += snprintf(command + dataLen, - BUFFER_SIZE - dataLen, " minrows %d", + BUFFER_SIZE - dataLen, " MINROWS %d", g_Dbs.db[i].dbCfg.minRows); } if (g_Dbs.db[i].dbCfg.maxRows > 0) { dataLen += snprintf(command + dataLen, - BUFFER_SIZE - dataLen, " maxrows %d", + BUFFER_SIZE - dataLen, " MAXROWS %d", g_Dbs.db[i].dbCfg.maxRows); } if (g_Dbs.db[i].dbCfg.comp > 0) { dataLen += snprintf(command + dataLen, - BUFFER_SIZE - dataLen, " comp %d", + BUFFER_SIZE - dataLen, " COMP %d", g_Dbs.db[i].dbCfg.comp); } if (g_Dbs.db[i].dbCfg.walLevel > 0) { @@ -3874,12 +4103,12 @@ int createDatabasesAndStables(char *command) { } if (g_Dbs.db[i].dbCfg.cacheLast > 0) { dataLen += snprintf(command + dataLen, - BUFFER_SIZE - dataLen, " cachelast %d", + BUFFER_SIZE - dataLen, " CACHELAST %d", g_Dbs.db[i].dbCfg.cacheLast); } if (g_Dbs.db[i].dbCfg.fsync > 0) { dataLen += snprintf(command + dataLen, BUFFER_SIZE - dataLen, - " fsync %d", g_Dbs.db[i].dbCfg.fsync); + " FSYNC %d", g_Dbs.db[i].dbCfg.fsync); } if ((0 == strncasecmp(g_Dbs.db[i].dbCfg.precision, "ms", 2)) || (0 == strncasecmp(g_Dbs.db[i].dbCfg.precision, @@ -3906,7 +4135,7 @@ int createDatabasesAndStables(char *command) { for (uint64_t j = 0; j < g_Dbs.db[i].superTblCount; j++) { sprintf(command, "describe %s.%s;", g_Dbs.db[i].dbName, - g_Dbs.db[i].superTbls[j].sTblName); + g_Dbs.db[i].superTbls[j].stbName); ret = queryDbExec(taos, command, NO_INSERT_TYPE, true); if ((ret != 0) || (g_Dbs.db[i].drop)) { @@ -3923,7 +4152,7 @@ int createDatabasesAndStables(char *command) { &g_Dbs.db[i].superTbls[j]); if (0 != ret) { errorPrint2("\nget super table %s.%s info failed!\n\n", - g_Dbs.db[i].dbName, g_Dbs.db[i].superTbls[j].sTblName); + g_Dbs.db[i].dbName, g_Dbs.db[i].superTbls[j].stbName); continue; } @@ -3965,7 +4194,7 @@ static void* createTable(void *sarg) i <= pThreadInfo->end_table_to; i++) { if (0 == g_Dbs.use_metric) { snprintf(pThreadInfo->buffer, buff_len, - "create table if not exists %s.%s%"PRIu64" %s;", + "CREATE TABLE IF NOT EXISTS %s.%s%"PRIu64" %s;", pThreadInfo->db_name, g_args.tb_prefix, i, pThreadInfo->cols); @@ -3981,7 +4210,7 @@ static void* createTable(void *sarg) batchNum = 0; memset(pThreadInfo->buffer, 0, buff_len); len += snprintf(pThreadInfo->buffer + len, - buff_len - len, "create table "); + buff_len - len, "CREATE TABLE "); } char* tagsValBuf = NULL; @@ -4006,7 +4235,7 @@ static void* createTable(void *sarg) "if not exists %s.%s%"PRIu64" using %s.%s tags %s ", pThreadInfo->db_name, stbInfo->childTblPrefix, i, pThreadInfo->db_name, - stbInfo->sTblName, tagsValBuf); + stbInfo->stbName, tagsValBuf); free(tagsValBuf); batchNum++; if ((batchNum < stbInfo->batchCreateTableNum) @@ -4151,15 +4380,15 @@ static void createChildTables() { } else { // normal table len = snprintf(tblColsBuf, TSDB_MAX_BYTES_PER_ROW, "(TS TIMESTAMP"); - for (int j = 0; j < g_args.num_of_CPR; j++) { - if ((strncasecmp(g_args.datatype[j], "BINARY", strlen("BINARY")) == 0) - || (strncasecmp(g_args.datatype[j], + for (int j = 0; j < g_args.columnCount; j++) { + if ((strncasecmp(g_args.dataType[j], "BINARY", strlen("BINARY")) == 0) + || (strncasecmp(g_args.dataType[j], "NCHAR", strlen("NCHAR")) == 0)) { snprintf(tblColsBuf + len, TSDB_MAX_BYTES_PER_ROW - len, - ",C%d %s(%d)", j, g_args.datatype[j], g_args.binwidth); + ",C%d %s(%d)", j, g_args.dataType[j], g_args.binwidth); } else { snprintf(tblColsBuf + len, TSDB_MAX_BYTES_PER_ROW - len, - ",C%d %s", j, g_args.datatype[j]); + ",C%d %s", j, g_args.dataType[j]); } len = strlen(tblColsBuf); } @@ -4168,12 +4397,12 @@ static void createChildTables() { verbosePrint("%s() LN%d: dbName: %s num of tb: %"PRId64" schema: %s\n", __func__, __LINE__, - g_Dbs.db[i].dbName, g_args.num_of_tables, tblColsBuf); + g_Dbs.db[i].dbName, g_args.ntables, tblColsBuf); startMultiThreadCreateChildTable( tblColsBuf, g_Dbs.threadCountForCreateTbl, 0, - g_args.num_of_tables, + g_args.ntables, g_Dbs.db[i].dbName, NULL); } @@ -4251,7 +4480,7 @@ static int readTagFromCsvFileToMem(SSuperTable * stbInfo) { /* Read 10000 lines at most. If more than 10000 lines, continue to read after using */ -static int readSampleFromCsvFileToMem( +static int generateSampleFromCsvForStb( SSuperTable* stbInfo) { size_t n = 0; ssize_t readLen = 0; @@ -4267,7 +4496,7 @@ static int readSampleFromCsvFileToMem( assert(stbInfo->sampleDataBuf); memset(stbInfo->sampleDataBuf, 0, - MAX_SAMPLES_ONCE_FROM_FILE * stbInfo->lenOfOneRow); + MAX_SAMPLES * stbInfo->lenOfOneRow); while(1) { readLen = tgetline(&line, &n, fp); if (-1 == readLen) { @@ -4298,7 +4527,7 @@ static int readSampleFromCsvFileToMem( line, readLen); getRows++; - if (getRows == MAX_SAMPLES_ONCE_FROM_FILE) { + if (getRows == MAX_SAMPLES) { break; } } @@ -4377,6 +4606,7 @@ static bool getColumnAndTagTypeFromInsertJsonFile( tstrncpy(superTbls->columns[index].dataType, columnCase.dataType, min(DATATYPE_BUFF_LEN, strlen(columnCase.dataType) + 1)); + superTbls->columns[index].dataLen = columnCase.dataLen; index++; } @@ -4390,6 +4620,42 @@ static bool getColumnAndTagTypeFromInsertJsonFile( superTbls->columnCount = index; + for (int c = 0; c < superTbls->columnCount; c++) { + if (0 == strncasecmp(superTbls->columns[c].dataType, + "INT", strlen("INT"))) { + superTbls->columns[c].data_type = TSDB_DATA_TYPE_INT; + } else if (0 == strncasecmp(superTbls->columns[c].dataType, + "TINYINT", strlen("TINYINT"))) { + superTbls->columns[c].data_type = TSDB_DATA_TYPE_TINYINT; + } else if (0 == strncasecmp(superTbls->columns[c].dataType, + "SMALLINT", strlen("SMALLINT"))) { + superTbls->columns[c].data_type = TSDB_DATA_TYPE_SMALLINT; + } else if (0 == strncasecmp(superTbls->columns[c].dataType, + "BIGINT", strlen("BIGINT"))) { + superTbls->columns[c].data_type = TSDB_DATA_TYPE_BIGINT; + } else if (0 == strncasecmp(superTbls->columns[c].dataType, + "FLOAT", strlen("FLOAT"))) { + superTbls->columns[c].data_type = TSDB_DATA_TYPE_FLOAT; + } else if (0 == strncasecmp(superTbls->columns[c].dataType, + "DOUBLE", strlen("DOUBLE"))) { + superTbls->columns[c].data_type = TSDB_DATA_TYPE_DOUBLE; + } else if (0 == strncasecmp(superTbls->columns[c].dataType, + "BINARY", strlen("BINARY"))) { + superTbls->columns[c].data_type = TSDB_DATA_TYPE_BINARY; + } else if (0 == strncasecmp(superTbls->columns[c].dataType, + "NCHAR", strlen("NCHAR"))) { + superTbls->columns[c].data_type = TSDB_DATA_TYPE_NCHAR; + } else if (0 == strncasecmp(superTbls->columns[c].dataType, + "BOOL", strlen("BOOL"))) { + superTbls->columns[c].data_type = TSDB_DATA_TYPE_BOOL; + } else if (0 == strncasecmp(superTbls->columns[c].dataType, + "TIMESTAMP", strlen("TIMESTAMP"))) { + superTbls->columns[c].data_type = TSDB_DATA_TYPE_TIMESTAMP; + } else { + superTbls->columns[c].data_type = TSDB_DATA_TYPE_NULL; + } + } + count = 1; index = 0; // tags @@ -4459,6 +4725,42 @@ static bool getColumnAndTagTypeFromInsertJsonFile( superTbls->tagCount = index; + for (int t = 0; t < superTbls->tagCount; t++) { + if (0 == strncasecmp(superTbls->tags[t].dataType, + "INT", strlen("INT"))) { + superTbls->tags[t].data_type = TSDB_DATA_TYPE_INT; + } else if (0 == strncasecmp(superTbls->tags[t].dataType, + "TINYINT", strlen("TINYINT"))) { + superTbls->tags[t].data_type = TSDB_DATA_TYPE_TINYINT; + } else if (0 == strncasecmp(superTbls->tags[t].dataType, + "SMALLINT", strlen("SMALLINT"))) { + superTbls->tags[t].data_type = TSDB_DATA_TYPE_SMALLINT; + } else if (0 == strncasecmp(superTbls->tags[t].dataType, + "BIGINT", strlen("BIGINT"))) { + superTbls->tags[t].data_type = TSDB_DATA_TYPE_BIGINT; + } else if (0 == strncasecmp(superTbls->tags[t].dataType, + "FLOAT", strlen("FLOAT"))) { + superTbls->tags[t].data_type = TSDB_DATA_TYPE_FLOAT; + } else if (0 == strncasecmp(superTbls->tags[t].dataType, + "DOUBLE", strlen("DOUBLE"))) { + superTbls->tags[t].data_type = TSDB_DATA_TYPE_DOUBLE; + } else if (0 == strncasecmp(superTbls->tags[t].dataType, + "BINARY", strlen("BINARY"))) { + superTbls->tags[t].data_type = TSDB_DATA_TYPE_BINARY; + } else if (0 == strncasecmp(superTbls->tags[t].dataType, + "NCHAR", strlen("NCHAR"))) { + superTbls->tags[t].data_type = TSDB_DATA_TYPE_NCHAR; + } else if (0 == strncasecmp(superTbls->tags[t].dataType, + "BOOL", strlen("BOOL"))) { + superTbls->tags[t].data_type = TSDB_DATA_TYPE_BOOL; + } else if (0 == strncasecmp(superTbls->tags[t].dataType, + "TIMESTAMP", strlen("TIMESTAMP"))) { + superTbls->tags[t].data_type = TSDB_DATA_TYPE_TIMESTAMP; + } else { + superTbls->tags[t].data_type = TSDB_DATA_TYPE_NULL; + } + } + if ((superTbls->columnCount + superTbls->tagCount + 1 /* ts */) > TSDB_MAX_COLUMNS) { errorPrint("columns + tags is more than allowed max columns count: %d\n", TSDB_MAX_COLUMNS); @@ -4553,15 +4855,15 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { cJSON* interlaceRows = cJSON_GetObjectItem(root, "interlace_rows"); if (interlaceRows && interlaceRows->type == cJSON_Number) { if (interlaceRows->valueint < 0) { - errorPrint("%s", "failed to read json, interlace_rows input mistake\n"); + errorPrint("%s", "failed to read json, interlaceRows input mistake\n"); goto PARSE_OVER; } - g_args.interlace_rows = interlaceRows->valueint; + g_args.interlaceRows = interlaceRows->valueint; } else if (!interlaceRows) { - g_args.interlace_rows = 0; // 0 means progressive mode, > 0 mean interlace mode. max value is less or equ num_of_records_per_req + g_args.interlaceRows = 0; // 0 means progressive mode, > 0 mean interlace mode. max value is less or equ num_of_records_per_req } else { - errorPrint("%s", "failed to read json, interlace_rows input mistake\n"); + errorPrint("%s", "failed to read json, interlaceRows input mistake\n"); goto PARSE_OVER; } @@ -4595,9 +4897,9 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { prompt(); numRecPerReq->valueint = MAX_RECORDS_PER_REQ; } - g_args.num_of_RPR = numRecPerReq->valueint; + g_args.reqPerReq = numRecPerReq->valueint; } else if (!numRecPerReq) { - g_args.num_of_RPR = MAX_RECORDS_PER_REQ; + g_args.reqPerReq = MAX_RECORDS_PER_REQ; } else { errorPrint("%s() LN%d, failed to read json, num_of_records_per_req not found\n", __func__, __LINE__); @@ -4623,13 +4925,13 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { } // rows per table need be less than insert batch - if (g_args.interlace_rows > g_args.num_of_RPR) { + if (g_args.interlaceRows > g_args.reqPerReq) { printf("NOTICE: interlace rows value %u > num_of_records_per_req %u\n\n", - g_args.interlace_rows, g_args.num_of_RPR); + g_args.interlaceRows, g_args.reqPerReq); printf(" interlace rows value will be set to num_of_records_per_req %u\n\n", - g_args.num_of_RPR); + g_args.reqPerReq); prompt(); - g_args.interlace_rows = g_args.num_of_RPR; + g_args.interlaceRows = g_args.reqPerReq; } cJSON* dbs = cJSON_GetObjectItem(root, "databases"); @@ -4831,7 +5133,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { goto PARSE_OVER; } - // super_talbes + // super_tables cJSON *stables = cJSON_GetObjectItem(dbinfos, "super_tables"); if (!stables || stables->type != cJSON_Array) { errorPrint("%s", "failed to read json, super_tables not found\n"); @@ -4858,7 +5160,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { errorPrint("%s", "failed to read json, stb name not found\n"); goto PARSE_OVER; } - tstrncpy(g_Dbs.db[i].superTbls[j].sTblName, stbName->valuestring, + tstrncpy(g_Dbs.db[i].superTbls[j].stbName, stbName->valuestring, TSDB_TABLE_NAME_LEN); cJSON *prefix = cJSON_GetObjectItem(stbInfo, "childtable_prefix"); @@ -5127,7 +5429,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { g_Dbs.db[i].superTbls[j].interlaceRows = g_Dbs.db[i].superTbls[j].insertRows; } } else if (!stbInterlaceRows) { - g_Dbs.db[i].superTbls[j].interlaceRows = 0; // 0 means progressive mode, > 0 mean interlace mode. max value is less or equ num_of_records_per_req + g_Dbs.db[i].superTbls[j].interlaceRows = g_args.interlaceRows; // 0 means progressive mode, > 0 mean interlace mode. max value is less or equ num_of_records_per_req } else { errorPrint( "%s", "failed to read json, interlace rows input mistake\n"); @@ -5168,7 +5470,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { goto PARSE_OVER; } } else if (!insertInterval) { - verbosePrint("%s() LN%d: stable insert interval be overrided by global %"PRIu64".\n", + verbosePrint("%s() LN%d: stable insert interval be overrode by global %"PRIu64".\n", __func__, __LINE__, g_args.insert_interval); g_Dbs.db[i].superTbls[j].insertInterval = g_args.insert_interval; } else { @@ -5512,7 +5814,7 @@ static bool getMetaFromQueryJsonFile(cJSON* root) { cJSON* stblname = cJSON_GetObjectItem(superQuery, "stblname"); if (stblname && stblname->type == cJSON_String && stblname->valuestring != NULL) { - tstrncpy(g_queryInfo.superQueryInfo.sTblName, stblname->valuestring, + tstrncpy(g_queryInfo.superQueryInfo.stbName, stblname->valuestring, TSDB_TABLE_NAME_LEN); } else { errorPrint("%s", "failed to read json, super table name input error\n"); @@ -5734,23 +6036,37 @@ static int prepareSampleData() { static void postFreeResource() { tmfclose(g_fpOfInsertResult); + for (int i = 0; i < g_Dbs.dbCount; i++) { for (uint64_t j = 0; j < g_Dbs.db[i].superTblCount; j++) { if (0 != g_Dbs.db[i].superTbls[j].colsOfCreateChildTable) { - free(g_Dbs.db[i].superTbls[j].colsOfCreateChildTable); + tmfree(g_Dbs.db[i].superTbls[j].colsOfCreateChildTable); g_Dbs.db[i].superTbls[j].colsOfCreateChildTable = NULL; } if (0 != g_Dbs.db[i].superTbls[j].sampleDataBuf) { - free(g_Dbs.db[i].superTbls[j].sampleDataBuf); + tmfree(g_Dbs.db[i].superTbls[j].sampleDataBuf); g_Dbs.db[i].superTbls[j].sampleDataBuf = NULL; } +#if STMT_BIND_PARAM_BATCH == 1 + for (int c = 0; + c < g_Dbs.db[i].superTbls[j].columnCount; c ++) { + + if (g_Dbs.db[i].superTbls[j].sampleBindBatchArray) { + + tmfree((char *)((uintptr_t)*(uintptr_t*)( + g_Dbs.db[i].superTbls[j].sampleBindBatchArray + + sizeof(char*) * c))); + } + } + tmfree(g_Dbs.db[i].superTbls[j].sampleBindBatchArray); +#endif if (0 != g_Dbs.db[i].superTbls[j].tagDataBuf) { - free(g_Dbs.db[i].superTbls[j].tagDataBuf); + tmfree(g_Dbs.db[i].superTbls[j].tagDataBuf); g_Dbs.db[i].superTbls[j].tagDataBuf = NULL; } if (0 != g_Dbs.db[i].superTbls[j].childTblName) { - free(g_Dbs.db[i].superTbls[j].childTblName); + tmfree(g_Dbs.db[i].superTbls[j].childTblName); g_Dbs.db[i].superTbls[j].childTblName = NULL; } } @@ -5766,13 +6082,26 @@ static void postFreeResource() { tmfree(g_rand_current_buff); tmfree(g_rand_phase_buff); + tmfree(g_sampleDataBuf); + +#if STMT_BIND_PARAM_BATCH == 1 + for (int l = 0; + l < g_args.columnCount; l ++) { + if (g_sampleBindBatchArray) { + tmfree((char *)((uintptr_t)*(uintptr_t*)( + g_sampleBindBatchArray + + sizeof(char*) * l))); + } + } + tmfree(g_sampleBindBatchArray); +#endif } static int getRowDataFromSample( char* dataBuf, int64_t maxLen, int64_t timestamp, SSuperTable* stbInfo, int64_t* sampleUsePos) { - if ((*sampleUsePos) == MAX_SAMPLES_ONCE_FROM_FILE) { + if ((*sampleUsePos) == MAX_SAMPLES) { *sampleUsePos = 0; } @@ -5803,13 +6132,14 @@ static int64_t generateStbRowData( int tmpLen; dataLen += snprintf(pstr + dataLen, maxLen - dataLen, - "(%" PRId64 ",", timestamp); + "(%" PRId64 "", timestamp); for (int i = 0; i < stbInfo->columnCount; i++) { - if ((0 == strncasecmp(stbInfo->columns[i].dataType, - "BINARY", 6)) - || (0 == strncasecmp(stbInfo->columns[i].dataType, - "NCHAR", 5))) { + tstrncpy(pstr + dataLen, ",", 2); + dataLen += 1; + + if ((stbInfo->columns[i].data_type == TSDB_DATA_TYPE_BINARY) + || (stbInfo->columns[i].data_type == TSDB_DATA_TYPE_NCHAR)) { if (stbInfo->columns[i].dataLen > TSDB_MAX_BINARY_LEN) { errorPrint2("binary or nchar length overflow, max size:%u\n", (uint32_t)TSDB_MAX_BINARY_LEN); @@ -5827,80 +6157,91 @@ static int64_t generateStbRowData( return -1; } rand_string(buf, stbInfo->columns[i].dataLen); - dataLen += snprintf(pstr + dataLen, maxLen - dataLen, "\'%s\',", buf); + dataLen += snprintf(pstr + dataLen, maxLen - dataLen, "\'%s\'", buf); tmfree(buf); } else { - char *tmp; + char *tmp = NULL; + switch(stbInfo->columns[i].data_type) { + case TSDB_DATA_TYPE_INT: + if ((g_args.demo_mode) && (i == 1)) { + tmp = demo_voltage_int_str(); + } else { + tmp = rand_int_str(); + } + tmpLen = strlen(tmp); + tstrncpy(pstr + dataLen, tmp, min(tmpLen + 1, INT_BUFF_LEN)); + break; - if (0 == strncasecmp(stbInfo->columns[i].dataType, - "INT", 3)) { - if ((g_args.demo_mode) && (i == 1)) { - tmp = demo_voltage_int_str(); - } else { - tmp = rand_int_str(); - } - tmpLen = strlen(tmp); - tstrncpy(pstr + dataLen, tmp, min(tmpLen + 1, INT_BUFF_LEN)); - } else if (0 == strncasecmp(stbInfo->columns[i].dataType, - "BIGINT", 6)) { - tmp = rand_bigint_str(); - tstrncpy(pstr + dataLen, tmp, BIGINT_BUFF_LEN); - } else if (0 == strncasecmp(stbInfo->columns[i].dataType, - "FLOAT", 5)) { - if (g_args.demo_mode) { - if (i == 0) { - tmp = demo_current_float_str(); + case TSDB_DATA_TYPE_BIGINT: + tmp = rand_bigint_str(); + tstrncpy(pstr + dataLen, tmp, BIGINT_BUFF_LEN); + break; + + case TSDB_DATA_TYPE_FLOAT: + if (g_args.demo_mode) { + if (i == 0) { + tmp = demo_current_float_str(); + } else { + tmp = demo_phase_float_str(); + } } else { - tmp = demo_phase_float_str(); + tmp = rand_float_str(); } - } else { - tmp = rand_float_str(); - } - tmpLen = strlen(tmp); - tstrncpy(pstr + dataLen, tmp, min(tmpLen +1, FLOAT_BUFF_LEN)); - } else if (0 == strncasecmp(stbInfo->columns[i].dataType, - "DOUBLE", 6)) { - tmp = rand_double_str(); - tmpLen = strlen(tmp); - tstrncpy(pstr + dataLen, tmp, min(tmpLen +1, DOUBLE_BUFF_LEN)); - } else if (0 == strncasecmp(stbInfo->columns[i].dataType, - "SMALLINT", 8)) { - tmp = rand_smallint_str(); - tmpLen = strlen(tmp); - tstrncpy(pstr + dataLen, tmp, - min(tmpLen + 1, SMALLINT_BUFF_LEN)); - } else if (0 == strncasecmp(stbInfo->columns[i].dataType, - "TINYINT", 7)) { - tmp = rand_tinyint_str(); - tmpLen = strlen(tmp); - tstrncpy(pstr + dataLen, tmp, min(tmpLen +1, TINYINT_BUFF_LEN)); - } else if (0 == strncasecmp(stbInfo->columns[i].dataType, - "BOOL", 4)) { - tmp = rand_bool_str(); - tmpLen = strlen(tmp); - tstrncpy(pstr + dataLen, tmp, min(tmpLen +1, BOOL_BUFF_LEN)); - } else if (0 == strncasecmp(stbInfo->columns[i].dataType, - "TIMESTAMP", 9)) { - tmp = rand_bigint_str(); - tmpLen = strlen(tmp); - tstrncpy(pstr + dataLen, tmp, min(tmpLen +1, BIGINT_BUFF_LEN)); - } else { - errorPrint2("Not support data type: %s\n", - stbInfo->columns[i].dataType); - return -1; + tmpLen = strlen(tmp); + tstrncpy(pstr + dataLen, tmp, min(tmpLen +1, FLOAT_BUFF_LEN)); + break; + + case TSDB_DATA_TYPE_DOUBLE: + tmp = rand_double_str(); + tmpLen = strlen(tmp); + tstrncpy(pstr + dataLen, tmp, min(tmpLen +1, DOUBLE_BUFF_LEN)); + break; + + case TSDB_DATA_TYPE_SMALLINT: + tmp = rand_smallint_str(); + tmpLen = strlen(tmp); + tstrncpy(pstr + dataLen, tmp, + min(tmpLen + 1, SMALLINT_BUFF_LEN)); + break; + + case TSDB_DATA_TYPE_TINYINT: + tmp = rand_tinyint_str(); + tmpLen = strlen(tmp); + tstrncpy(pstr + dataLen, tmp, min(tmpLen +1, TINYINT_BUFF_LEN)); + break; + + case TSDB_DATA_TYPE_BOOL: + tmp = rand_bool_str(); + tmpLen = strlen(tmp); + tstrncpy(pstr + dataLen, tmp, min(tmpLen +1, BOOL_BUFF_LEN)); + break; + + case TSDB_DATA_TYPE_TIMESTAMP: + tmp = rand_bigint_str(); + tmpLen = strlen(tmp); + tstrncpy(pstr + dataLen, tmp, min(tmpLen +1, BIGINT_BUFF_LEN)); + break; + + case TSDB_DATA_TYPE_NULL: + break; + + default: + errorPrint2("Not support data type: %s\n", + stbInfo->columns[i].dataType); + exit(EXIT_FAILURE); } - dataLen += strlen(tmp); - tstrncpy(pstr + dataLen, ",", 2); - dataLen += 1; + if (tmp) { + dataLen += strlen(tmp); + } } if (dataLen > (remainderBufLen - (128))) return 0; } - tstrncpy(pstr + dataLen - 1, ")", 2); + tstrncpy(pstr + dataLen, ")", 2); verbosePrint("%s() LN%d, dataLen:%"PRId64"\n", __func__, __LINE__, dataLen); verbosePrint("%s() LN%d, recBuf:\n\t%s\n", __func__, __LINE__, recBuf); @@ -5908,53 +6249,83 @@ static int64_t generateStbRowData( return strlen(recBuf); } -static int64_t generateData(char *recBuf, char **data_type, +static int64_t generateData(char *recBuf, char *data_type, int64_t timestamp, int lenOfBinary) { memset(recBuf, 0, MAX_DATA_SIZE); char *pstr = recBuf; pstr += sprintf(pstr, "(%"PRId64"", timestamp); - int columnCount = g_args.num_of_CPR; + int columnCount = g_args.columnCount; + bool b; + char *s; for (int i = 0; i < columnCount; i++) { - if (strcasecmp(data_type[i % columnCount], "TINYINT") == 0) { - pstr += sprintf(pstr, ",%d", rand_tinyint() ); - } else if (strcasecmp(data_type[i % columnCount], "SMALLINT") == 0) { - pstr += sprintf(pstr, ",%d", rand_smallint()); - } else if (strcasecmp(data_type[i % columnCount], "INT") == 0) { - pstr += sprintf(pstr, ",%d", rand_int()); - } else if (strcasecmp(data_type[i % columnCount], "BIGINT") == 0) { - pstr += sprintf(pstr, ",%"PRId64"", rand_bigint()); - } else if (strcasecmp(data_type[i % columnCount], "TIMESTAMP") == 0) { - pstr += sprintf(pstr, ",%"PRId64"", rand_bigint()); - } else if (strcasecmp(data_type[i % columnCount], "FLOAT") == 0) { - pstr += sprintf(pstr, ",%10.4f", rand_float()); - } else if (strcasecmp(data_type[i % columnCount], "DOUBLE") == 0) { - double t = rand_double(); - pstr += sprintf(pstr, ",%20.8f", t); - } else if (strcasecmp(data_type[i % columnCount], "BOOL") == 0) { - bool b = rand_bool() & 1; - pstr += sprintf(pstr, ",%s", b ? "true" : "false"); - } else if (strcasecmp(data_type[i % columnCount], "BINARY") == 0) { - char *s = malloc(lenOfBinary + 1); - if (s == NULL) { - errorPrint2("%s() LN%d, memory allocation %d bytes failed\n", - __func__, __LINE__, lenOfBinary + 1); - exit(EXIT_FAILURE); - } - rand_string(s, lenOfBinary); - pstr += sprintf(pstr, ",\"%s\"", s); - free(s); - } else if (strcasecmp(data_type[i % columnCount], "NCHAR") == 0) { - char *s = malloc(lenOfBinary + 1); - if (s == NULL) { - errorPrint2("%s() LN%d, memory allocation %d bytes failed\n", - __func__, __LINE__, lenOfBinary + 1); + switch (data_type[i]) { + case TSDB_DATA_TYPE_TINYINT: + pstr += sprintf(pstr, ",%d", rand_tinyint() ); + break; + + case TSDB_DATA_TYPE_SMALLINT: + pstr += sprintf(pstr, ",%d", rand_smallint()); + break; + + case TSDB_DATA_TYPE_INT: + pstr += sprintf(pstr, ",%d", rand_int()); + break; + + case TSDB_DATA_TYPE_BIGINT: + pstr += sprintf(pstr, ",%"PRId64"", rand_bigint()); + break; + + case TSDB_DATA_TYPE_TIMESTAMP: + pstr += sprintf(pstr, ",%"PRId64"", rand_bigint()); + break; + + case TSDB_DATA_TYPE_FLOAT: + pstr += sprintf(pstr, ",%10.4f", rand_float()); + break; + + case TSDB_DATA_TYPE_DOUBLE: + pstr += sprintf(pstr, ",%20.8f", rand_double()); + break; + + case TSDB_DATA_TYPE_BOOL: + b = rand_bool() & 1; + pstr += sprintf(pstr, ",%s", b ? "true" : "false"); + break; + + case TSDB_DATA_TYPE_BINARY: + s = malloc(lenOfBinary + 1); + if (s == NULL) { + errorPrint2("%s() LN%d, memory allocation %d bytes failed\n", + __func__, __LINE__, lenOfBinary + 1); + exit(EXIT_FAILURE); + } + rand_string(s, lenOfBinary); + pstr += sprintf(pstr, ",\"%s\"", s); + free(s); + break; + + case TSDB_DATA_TYPE_NCHAR: + s = malloc(lenOfBinary + 1); + if (s == NULL) { + errorPrint2("%s() LN%d, memory allocation %d bytes failed\n", + __func__, __LINE__, lenOfBinary + 1); + exit(EXIT_FAILURE); + } + rand_string(s, lenOfBinary); + pstr += sprintf(pstr, ",\"%s\"", s); + free(s); + break; + + case TSDB_DATA_TYPE_NULL: + break; + + default: + errorPrint2("%s() LN%d, Unknown data type %d\n", + __func__, __LINE__, + data_type[i]); exit(EXIT_FAILURE); - } - rand_string(s, lenOfBinary); - pstr += sprintf(pstr, ",\"%s\"", s); - free(s); } if (strlen(recBuf) > MAX_DATA_SIZE) { @@ -5969,97 +6340,160 @@ static int64_t generateData(char *recBuf, char **data_type, return (int32_t)strlen(recBuf); } -static int generateSampleMemoryFromRand(SSuperTable *stbInfo) +static int generateSampleFromRand( + char *sampleDataBuf, + uint64_t lenOfOneRow, + int columnCount, + StrColumn *columns + ) { char data[MAX_DATA_SIZE]; memset(data, 0, MAX_DATA_SIZE); - char *buff = malloc(stbInfo->lenOfOneRow); + char *buff = malloc(lenOfOneRow); if (NULL == buff) { - errorPrint2("%s() LN%d, memory allocation %"PRId64" bytes failed\n", - __func__, __LINE__, stbInfo->lenOfOneRow); + errorPrint2("%s() LN%d, memory allocation %"PRIu64" bytes failed\n", + __func__, __LINE__, lenOfOneRow); exit(EXIT_FAILURE); } - for (int i=0; i < MAX_SAMPLES_ONCE_FROM_FILE; i++) { + for (int i=0; i < MAX_SAMPLES; i++) { uint64_t pos = 0; - memset(buff, 0, stbInfo->lenOfOneRow); + memset(buff, 0, lenOfOneRow); - for (int c = 0; c < stbInfo->columnCount; c++) { - char *tmp; - if (0 == strncasecmp(stbInfo->columns[c].dataType, - "BINARY", strlen("BINARY"))) { - rand_string(data, stbInfo->columns[c].dataLen); - pos += sprintf(buff + pos, "%s,", data); - } else if (0 == strncasecmp(stbInfo->columns[c].dataType, - "NCHAR", strlen("NCHAR"))) { - rand_string(data, stbInfo->columns[c].dataLen); - pos += sprintf(buff + pos, "%s,", data); - } else if (0 == strncasecmp(stbInfo->columns[c].dataType, - "INT", strlen("INT"))) { - if ((g_args.demo_mode) && (c == 1)) { - tmp = demo_voltage_int_str(); - } else { - tmp = rand_int_str(); - } - pos += sprintf(buff + pos, "%s,", tmp); - } else if (0 == strncasecmp(stbInfo->columns[c].dataType, - "BIGINT", strlen("BIGINT"))) { - pos += sprintf(buff + pos, "%s,", rand_bigint_str()); - } else if (0 == strncasecmp(stbInfo->columns[c].dataType, - "FLOAT", strlen("FLOAT"))) { - if (g_args.demo_mode) { - if (c == 0) { - tmp = demo_current_float_str(); + for (int c = 0; c < columnCount; c++) { + char *tmp = NULL; + + uint32_t dataLen; + char data_type = (columns)?(columns[c].data_type):g_args.data_type[c]; + + switch(data_type) { + case TSDB_DATA_TYPE_BINARY: + dataLen = (columns)?columns[c].dataLen:g_args.binwidth; + rand_string(data, dataLen); + pos += sprintf(buff + pos, "%s,", data); + break; + + case TSDB_DATA_TYPE_NCHAR: + dataLen = (columns)?columns[c].dataLen:g_args.binwidth; + rand_string(data, dataLen); + pos += sprintf(buff + pos, "%s,", data); + break; + + case TSDB_DATA_TYPE_INT: + if ((g_args.demo_mode) && (c == 1)) { + tmp = demo_voltage_int_str(); } else { - tmp = demo_phase_float_str(); + tmp = rand_int_str(); } - } else { - tmp = rand_float_str(); - } - pos += sprintf(buff + pos, "%s,", tmp); - } else if (0 == strncasecmp(stbInfo->columns[c].dataType, - "DOUBLE", strlen("DOUBLE"))) { - pos += sprintf(buff + pos, "%s,", rand_double_str()); - } else if (0 == strncasecmp(stbInfo->columns[c].dataType, - "SMALLINT", strlen("SMALLINT"))) { - pos += sprintf(buff + pos, "%s,", rand_smallint_str()); - } else if (0 == strncasecmp(stbInfo->columns[c].dataType, - "TINYINT", strlen("TINYINT"))) { - pos += sprintf(buff + pos, "%s,", rand_tinyint_str()); - } else if (0 == strncasecmp(stbInfo->columns[c].dataType, - "BOOL", strlen("BOOL"))) { - pos += sprintf(buff + pos, "%s,", rand_bool_str()); - } else if (0 == strncasecmp(stbInfo->columns[c].dataType, - "TIMESTAMP", strlen("TIMESTAMP"))) { - pos += sprintf(buff + pos, "%s,", rand_bigint_str()); + pos += sprintf(buff + pos, "%s,", tmp); + break; + + case TSDB_DATA_TYPE_BIGINT: + pos += sprintf(buff + pos, "%s,", rand_bigint_str()); + break; + + case TSDB_DATA_TYPE_FLOAT: + if (g_args.demo_mode) { + if (c == 0) { + tmp = demo_current_float_str(); + } else { + tmp = demo_phase_float_str(); + } + } else { + tmp = rand_float_str(); + } + pos += sprintf(buff + pos, "%s,", tmp); + break; + + case TSDB_DATA_TYPE_DOUBLE: + pos += sprintf(buff + pos, "%s,", rand_double_str()); + break; + + case TSDB_DATA_TYPE_SMALLINT: + pos += sprintf(buff + pos, "%s,", rand_smallint_str()); + break; + + case TSDB_DATA_TYPE_TINYINT: + pos += sprintf(buff + pos, "%s,", rand_tinyint_str()); + break; + + case TSDB_DATA_TYPE_BOOL: + pos += sprintf(buff + pos, "%s,", rand_bool_str()); + break; + + case TSDB_DATA_TYPE_TIMESTAMP: + pos += sprintf(buff + pos, "%s,", rand_bigint_str()); + break; + + case TSDB_DATA_TYPE_NULL: + break; + + default: + errorPrint2("%s() LN%d, Unknown data type %s\n", + __func__, __LINE__, + (columns)?(columns[c].dataType):g_args.dataType[c]); + exit(EXIT_FAILURE); } } + *(buff + pos - 1) = 0; - memcpy(stbInfo->sampleDataBuf + i * stbInfo->lenOfOneRow, buff, pos); + memcpy(sampleDataBuf + i * lenOfOneRow, buff, pos); } free(buff); return 0; } -static int prepareSampleDataForSTable(SSuperTable *stbInfo) { +static int generateSampleFromRandForNtb() +{ + return generateSampleFromRand( + g_sampleDataBuf, + g_args.lenOfOneRow, + g_args.columnCount, + NULL); +} + +static int generateSampleFromRandForStb(SSuperTable *stbInfo) +{ + return generateSampleFromRand( + stbInfo->sampleDataBuf, + stbInfo->lenOfOneRow, + stbInfo->columnCount, + stbInfo->columns); +} + +static int prepareSampleForNtb() { + g_sampleDataBuf = calloc(g_args.lenOfOneRow * MAX_SAMPLES, 1); + if (NULL == g_sampleDataBuf) { + errorPrint2("%s() LN%d, Failed to calloc %"PRIu64" Bytes, reason:%s\n", + __func__, __LINE__, + g_args.lenOfOneRow * MAX_SAMPLES, + strerror(errno)); + return -1; + } + + return generateSampleFromRandForNtb(); +} + +static int prepareSampleForStb(SSuperTable *stbInfo) { stbInfo->sampleDataBuf = calloc( - stbInfo->lenOfOneRow * MAX_SAMPLES_ONCE_FROM_FILE, 1); + stbInfo->lenOfOneRow * MAX_SAMPLES, 1); if (NULL == stbInfo->sampleDataBuf) { errorPrint2("%s() LN%d, Failed to calloc %"PRIu64" Bytes, reason:%s\n", __func__, __LINE__, - stbInfo->lenOfOneRow * MAX_SAMPLES_ONCE_FROM_FILE, + stbInfo->lenOfOneRow * MAX_SAMPLES, strerror(errno)); return -1; } int ret; - if (0 == strncasecmp(stbInfo->dataSource, "sample", strlen("sample"))) - ret = readSampleFromCsvFileToMem(stbInfo); - else - ret = generateSampleMemoryFromRand(stbInfo); + if (0 == strncasecmp(stbInfo->dataSource, "sample", strlen("sample"))) { + ret = generateSampleFromCsvForStb(stbInfo); + } else { + ret = generateSampleFromRandForStb(stbInfo); + } if (0 != ret) { errorPrint2("%s() LN%d, read sample from csv file failed.\n", @@ -6184,7 +6618,7 @@ static int32_t generateDataTailWithoutStb( int64_t retLen = 0; - char **data_type = g_args.datatype; + char *data_type = g_args.data_type; int lenOfBinary = g_args.binwidth; if (g_args.disorderRatio) { @@ -6370,7 +6804,7 @@ static int generateStbSQLHead( dbName, tableName, dbName, - stbInfo->sTblName, + stbInfo->stbName, tagsValBuf); tmfree(tagsValBuf); } else if (TBL_ALREADY_EXISTS == stbInfo->childTblExists) { @@ -6502,202 +6936,224 @@ static int64_t generateInterlaceDataWithoutStb( static int32_t prepareStmtBindArrayByType( TAOS_BIND *bind, - char *dataType, int32_t dataLen, + char data_type, int32_t dataLen, int32_t timePrec, char *value) { - if (0 == strncasecmp(dataType, - "BINARY", strlen("BINARY"))) { - if (dataLen > TSDB_MAX_BINARY_LEN) { - errorPrint2("binary length overflow, max size:%u\n", - (uint32_t)TSDB_MAX_BINARY_LEN); - return -1; - } - char *bind_binary; + int32_t *bind_int; + int64_t *bind_bigint; + float *bind_float; + double *bind_double; + int8_t *bind_bool; + int64_t *bind_ts2; + int16_t *bind_smallint; + int8_t *bind_tinyint; + + switch(data_type) { + case TSDB_DATA_TYPE_BINARY: + if (dataLen > TSDB_MAX_BINARY_LEN) { + errorPrint2("binary length overflow, max size:%u\n", + (uint32_t)TSDB_MAX_BINARY_LEN); + return -1; + } + char *bind_binary; - bind->buffer_type = TSDB_DATA_TYPE_BINARY; - if (value) { - bind_binary = calloc(1, strlen(value) + 1); - strncpy(bind_binary, value, strlen(value)); - bind->buffer_length = strlen(bind_binary); - } else { - bind_binary = calloc(1, dataLen + 1); - rand_string(bind_binary, dataLen); - bind->buffer_length = dataLen; - } + bind->buffer_type = TSDB_DATA_TYPE_BINARY; + if (value) { + bind_binary = calloc(1, strlen(value) + 1); + strncpy(bind_binary, value, strlen(value)); + bind->buffer_length = strlen(bind_binary); + } else { + bind_binary = calloc(1, dataLen + 1); + rand_string(bind_binary, dataLen); + bind->buffer_length = dataLen; + } - bind->length = &bind->buffer_length; - bind->buffer = bind_binary; - bind->is_null = NULL; - } else if (0 == strncasecmp(dataType, - "NCHAR", strlen("NCHAR"))) { - if (dataLen > TSDB_MAX_BINARY_LEN) { - errorPrint2("nchar length overflow, max size:%u\n", - (uint32_t)TSDB_MAX_BINARY_LEN); - return -1; - } - char *bind_nchar; + bind->length = &bind->buffer_length; + bind->buffer = bind_binary; + bind->is_null = NULL; + break; - bind->buffer_type = TSDB_DATA_TYPE_NCHAR; - if (value) { - bind_nchar = calloc(1, strlen(value) + 1); - strncpy(bind_nchar, value, strlen(value)); - } else { - bind_nchar = calloc(1, dataLen + 1); - rand_string(bind_nchar, dataLen); - } + case TSDB_DATA_TYPE_NCHAR: + if (dataLen > TSDB_MAX_BINARY_LEN) { + errorPrint2("nchar length overflow, max size:%u\n", + (uint32_t)TSDB_MAX_BINARY_LEN); + return -1; + } + char *bind_nchar; - bind->buffer_length = strlen(bind_nchar); - bind->buffer = bind_nchar; - bind->length = &bind->buffer_length; - bind->is_null = NULL; - } else if (0 == strncasecmp(dataType, - "INT", strlen("INT"))) { - int32_t *bind_int = malloc(sizeof(int32_t)); - assert(bind_int); + bind->buffer_type = TSDB_DATA_TYPE_NCHAR; + if (value) { + bind_nchar = calloc(1, strlen(value) + 1); + strncpy(bind_nchar, value, strlen(value)); + } else { + bind_nchar = calloc(1, dataLen + 1); + rand_string(bind_nchar, dataLen); + } - if (value) { - *bind_int = atoi(value); - } else { - *bind_int = rand_int(); - } - bind->buffer_type = TSDB_DATA_TYPE_INT; - bind->buffer_length = sizeof(int32_t); - bind->buffer = bind_int; - bind->length = &bind->buffer_length; - bind->is_null = NULL; - } else if (0 == strncasecmp(dataType, - "BIGINT", strlen("BIGINT"))) { - int64_t *bind_bigint = malloc(sizeof(int64_t)); - assert(bind_bigint); + bind->buffer_length = strlen(bind_nchar); + bind->buffer = bind_nchar; + bind->length = &bind->buffer_length; + bind->is_null = NULL; + break; - if (value) { - *bind_bigint = atoll(value); - } else { - *bind_bigint = rand_bigint(); - } - bind->buffer_type = TSDB_DATA_TYPE_BIGINT; - bind->buffer_length = sizeof(int64_t); - bind->buffer = bind_bigint; - bind->length = &bind->buffer_length; - bind->is_null = NULL; - } else if (0 == strncasecmp(dataType, - "FLOAT", strlen("FLOAT"))) { - float *bind_float = malloc(sizeof(float)); - assert(bind_float); + case TSDB_DATA_TYPE_INT: + bind_int = malloc(sizeof(int32_t)); + assert(bind_int); - if (value) { - *bind_float = (float)atof(value); - } else { - *bind_float = rand_float(); - } - bind->buffer_type = TSDB_DATA_TYPE_FLOAT; - bind->buffer_length = sizeof(float); - bind->buffer = bind_float; - bind->length = &bind->buffer_length; - bind->is_null = NULL; - } else if (0 == strncasecmp(dataType, - "DOUBLE", strlen("DOUBLE"))) { - double *bind_double = malloc(sizeof(double)); - assert(bind_double); + if (value) { + *bind_int = atoi(value); + } else { + *bind_int = rand_int(); + } + bind->buffer_type = TSDB_DATA_TYPE_INT; + bind->buffer_length = sizeof(int32_t); + bind->buffer = bind_int; + bind->length = &bind->buffer_length; + bind->is_null = NULL; + break; - if (value) { - *bind_double = atof(value); - } else { - *bind_double = rand_double(); - } - bind->buffer_type = TSDB_DATA_TYPE_DOUBLE; - bind->buffer_length = sizeof(double); - bind->buffer = bind_double; - bind->length = &bind->buffer_length; - bind->is_null = NULL; - } else if (0 == strncasecmp(dataType, - "SMALLINT", strlen("SMALLINT"))) { - int16_t *bind_smallint = malloc(sizeof(int16_t)); - assert(bind_smallint); + case TSDB_DATA_TYPE_BIGINT: + bind_bigint = malloc(sizeof(int64_t)); + assert(bind_bigint); - if (value) { - *bind_smallint = (int16_t)atoi(value); - } else { - *bind_smallint = rand_smallint(); - } - bind->buffer_type = TSDB_DATA_TYPE_SMALLINT; - bind->buffer_length = sizeof(int16_t); - bind->buffer = bind_smallint; - bind->length = &bind->buffer_length; - bind->is_null = NULL; - } else if (0 == strncasecmp(dataType, - "TINYINT", strlen("TINYINT"))) { - int8_t *bind_tinyint = malloc(sizeof(int8_t)); - assert(bind_tinyint); + if (value) { + *bind_bigint = atoll(value); + } else { + *bind_bigint = rand_bigint(); + } + bind->buffer_type = TSDB_DATA_TYPE_BIGINT; + bind->buffer_length = sizeof(int64_t); + bind->buffer = bind_bigint; + bind->length = &bind->buffer_length; + bind->is_null = NULL; + break; - if (value) { - *bind_tinyint = (int8_t)atoi(value); - } else { - *bind_tinyint = rand_tinyint(); - } - bind->buffer_type = TSDB_DATA_TYPE_TINYINT; - bind->buffer_length = sizeof(int8_t); - bind->buffer = bind_tinyint; - bind->length = &bind->buffer_length; - bind->is_null = NULL; - } else if (0 == strncasecmp(dataType, - "BOOL", strlen("BOOL"))) { - int8_t *bind_bool = malloc(sizeof(int8_t)); - assert(bind_bool); - - if (value) { - if (strncasecmp(value, "true", 4)) { - *bind_bool = true; + case TSDB_DATA_TYPE_FLOAT: + bind_float = malloc(sizeof(float)); + assert(bind_float); + + if (value) { + *bind_float = (float)atof(value); } else { - *bind_bool = false; + *bind_float = rand_float(); } - } else { - *bind_bool = rand_bool(); - } - bind->buffer_type = TSDB_DATA_TYPE_BOOL; - bind->buffer_length = sizeof(int8_t); - bind->buffer = bind_bool; - bind->length = &bind->buffer_length; - bind->is_null = NULL; + bind->buffer_type = TSDB_DATA_TYPE_FLOAT; + bind->buffer_length = sizeof(float); + bind->buffer = bind_float; + bind->length = &bind->buffer_length; + bind->is_null = NULL; + break; + + case TSDB_DATA_TYPE_DOUBLE: + bind_double = malloc(sizeof(double)); + assert(bind_double); + + if (value) { + *bind_double = atof(value); + } else { + *bind_double = rand_double(); + } + bind->buffer_type = TSDB_DATA_TYPE_DOUBLE; + bind->buffer_length = sizeof(double); + bind->buffer = bind_double; + bind->length = &bind->buffer_length; + bind->is_null = NULL; + break; - } else if (0 == strncasecmp(dataType, - "TIMESTAMP", strlen("TIMESTAMP"))) { - int64_t *bind_ts2 = malloc(sizeof(int64_t)); - assert(bind_ts2); - - if (value) { - if (strchr(value, ':') && strchr(value, '-')) { - int i = 0; - while(value[i] != '\0') { - if (value[i] == '\"' || value[i] == '\'') { - value[i] = ' '; + case TSDB_DATA_TYPE_SMALLINT: + bind_smallint = malloc(sizeof(int16_t)); + assert(bind_smallint); + + if (value) { + *bind_smallint = (int16_t)atoi(value); + } else { + *bind_smallint = rand_smallint(); + } + bind->buffer_type = TSDB_DATA_TYPE_SMALLINT; + bind->buffer_length = sizeof(int16_t); + bind->buffer = bind_smallint; + bind->length = &bind->buffer_length; + bind->is_null = NULL; + break; + + case TSDB_DATA_TYPE_TINYINT: + bind_tinyint = malloc(sizeof(int8_t)); + assert(bind_tinyint); + + if (value) { + *bind_tinyint = (int8_t)atoi(value); + } else { + *bind_tinyint = rand_tinyint(); + } + bind->buffer_type = TSDB_DATA_TYPE_TINYINT; + bind->buffer_length = sizeof(int8_t); + bind->buffer = bind_tinyint; + bind->length = &bind->buffer_length; + bind->is_null = NULL; + break; + + case TSDB_DATA_TYPE_BOOL: + bind_bool = malloc(sizeof(int8_t)); + assert(bind_bool); + + if (value) { + if (strncasecmp(value, "true", 4)) { + *bind_bool = true; + } else { + *bind_bool = false; + } + } else { + *bind_bool = rand_bool(); + } + bind->buffer_type = TSDB_DATA_TYPE_BOOL; + bind->buffer_length = sizeof(int8_t); + bind->buffer = bind_bool; + bind->length = &bind->buffer_length; + bind->is_null = NULL; + break; + + case TSDB_DATA_TYPE_TIMESTAMP: + bind_ts2 = malloc(sizeof(int64_t)); + assert(bind_ts2); + + if (value) { + if (strchr(value, ':') && strchr(value, '-')) { + int i = 0; + while(value[i] != '\0') { + if (value[i] == '\"' || value[i] == '\'') { + value[i] = ' '; + } + i++; } - i++; - } - int64_t tmpEpoch; - if (TSDB_CODE_SUCCESS != taosParseTime( - value, &tmpEpoch, strlen(value), - timePrec, 0)) { - free(bind_ts2); - errorPrint2("Input %s, time format error!\n", value); - return -1; + int64_t tmpEpoch; + if (TSDB_CODE_SUCCESS != taosParseTime( + value, &tmpEpoch, strlen(value), + timePrec, 0)) { + free(bind_ts2); + errorPrint2("Input %s, time format error!\n", value); + return -1; + } + *bind_ts2 = tmpEpoch; + } else { + *bind_ts2 = atoll(value); } - *bind_ts2 = tmpEpoch; } else { - *bind_ts2 = atoll(value); + *bind_ts2 = rand_bigint(); } - } else { - *bind_ts2 = rand_bigint(); - } - bind->buffer_type = TSDB_DATA_TYPE_TIMESTAMP; - bind->buffer_length = sizeof(int64_t); - bind->buffer = bind_ts2; - bind->length = &bind->buffer_length; - bind->is_null = NULL; - } else { - errorPrint2("Not support data type: %s\n", dataType); - return -1; + bind->buffer_type = TSDB_DATA_TYPE_TIMESTAMP; + bind->buffer_length = sizeof(int64_t); + bind->buffer = bind_ts2; + bind->length = &bind->buffer_length; + bind->is_null = NULL; + break; + + case TSDB_DATA_TYPE_NULL: + break; + + default: + errorPrint2("Not support data type: %d\n", data_type); + exit(EXIT_FAILURE); } return 0; @@ -6705,209 +7161,230 @@ static int32_t prepareStmtBindArrayByType( static int32_t prepareStmtBindArrayByTypeForRand( TAOS_BIND *bind, - char *dataType, int32_t dataLen, + char data_type, int32_t dataLen, int32_t timePrec, char **ptr, char *value) { - if (0 == strncasecmp(dataType, - "BINARY", strlen("BINARY"))) { - if (dataLen > TSDB_MAX_BINARY_LEN) { - errorPrint2("binary length overflow, max size:%u\n", - (uint32_t)TSDB_MAX_BINARY_LEN); - return -1; - } - char *bind_binary = (char *)*ptr; + int32_t *bind_int; + int64_t *bind_bigint; + float *bind_float; + double *bind_double; + int16_t *bind_smallint; + int8_t *bind_tinyint; + int8_t *bind_bool; + int64_t *bind_ts2; + + switch(data_type) { + case TSDB_DATA_TYPE_BINARY: - bind->buffer_type = TSDB_DATA_TYPE_BINARY; - if (value) { - strncpy(bind_binary, value, strlen(value)); - bind->buffer_length = strlen(bind_binary); - } else { - rand_string(bind_binary, dataLen); - bind->buffer_length = dataLen; - } + if (dataLen > TSDB_MAX_BINARY_LEN) { + errorPrint2("binary length overflow, max size:%u\n", + (uint32_t)TSDB_MAX_BINARY_LEN); + return -1; + } + char *bind_binary = (char *)*ptr; - bind->length = &bind->buffer_length; - bind->buffer = bind_binary; - bind->is_null = NULL; + bind->buffer_type = TSDB_DATA_TYPE_BINARY; + if (value) { + strncpy(bind_binary, value, strlen(value)); + bind->buffer_length = strlen(bind_binary); + } else { + rand_string(bind_binary, dataLen); + bind->buffer_length = dataLen; + } - *ptr += bind->buffer_length; - } else if (0 == strncasecmp(dataType, - "NCHAR", strlen("NCHAR"))) { - if (dataLen > TSDB_MAX_BINARY_LEN) { - errorPrint2("nchar length overflow, max size: %u\n", - (uint32_t)TSDB_MAX_BINARY_LEN); - return -1; - } - char *bind_nchar = (char *)*ptr; + bind->length = &bind->buffer_length; + bind->buffer = bind_binary; + bind->is_null = NULL; - bind->buffer_type = TSDB_DATA_TYPE_NCHAR; - if (value) { - strncpy(bind_nchar, value, strlen(value)); - } else { - rand_string(bind_nchar, dataLen); - } + *ptr += bind->buffer_length; + break; - bind->buffer_length = strlen(bind_nchar); - bind->buffer = bind_nchar; - bind->length = &bind->buffer_length; - bind->is_null = NULL; + case TSDB_DATA_TYPE_NCHAR: + if (dataLen > TSDB_MAX_BINARY_LEN) { + errorPrint2("nchar length overflow, max size: %u\n", + (uint32_t)TSDB_MAX_BINARY_LEN); + return -1; + } + char *bind_nchar = (char *)*ptr; - *ptr += bind->buffer_length; - } else if (0 == strncasecmp(dataType, - "INT", strlen("INT"))) { - int32_t *bind_int = (int32_t *)*ptr; + bind->buffer_type = TSDB_DATA_TYPE_NCHAR; + if (value) { + strncpy(bind_nchar, value, strlen(value)); + } else { + rand_string(bind_nchar, dataLen); + } - if (value) { - *bind_int = atoi(value); - } else { - *bind_int = rand_int(); - } - bind->buffer_type = TSDB_DATA_TYPE_INT; - bind->buffer_length = sizeof(int32_t); - bind->buffer = bind_int; - bind->length = &bind->buffer_length; - bind->is_null = NULL; + bind->buffer_length = strlen(bind_nchar); + bind->buffer = bind_nchar; + bind->length = &bind->buffer_length; + bind->is_null = NULL; - *ptr += bind->buffer_length; - } else if (0 == strncasecmp(dataType, - "BIGINT", strlen("BIGINT"))) { - int64_t *bind_bigint = (int64_t *)*ptr; + *ptr += bind->buffer_length; + break; - if (value) { - *bind_bigint = atoll(value); - } else { - *bind_bigint = rand_bigint(); - } - bind->buffer_type = TSDB_DATA_TYPE_BIGINT; - bind->buffer_length = sizeof(int64_t); - bind->buffer = bind_bigint; - bind->length = &bind->buffer_length; - bind->is_null = NULL; + case TSDB_DATA_TYPE_INT: + bind_int = (int32_t *)*ptr; - *ptr += bind->buffer_length; - } else if (0 == strncasecmp(dataType, - "FLOAT", strlen("FLOAT"))) { - float *bind_float = (float *)*ptr; + if (value) { + *bind_int = atoi(value); + } else { + *bind_int = rand_int(); + } + bind->buffer_type = TSDB_DATA_TYPE_INT; + bind->buffer_length = sizeof(int32_t); + bind->buffer = bind_int; + bind->length = &bind->buffer_length; + bind->is_null = NULL; - if (value) { - *bind_float = (float)atof(value); - } else { - *bind_float = rand_float(); - } - bind->buffer_type = TSDB_DATA_TYPE_FLOAT; - bind->buffer_length = sizeof(float); - bind->buffer = bind_float; - bind->length = &bind->buffer_length; - bind->is_null = NULL; + *ptr += bind->buffer_length; + break; - *ptr += bind->buffer_length; - } else if (0 == strncasecmp(dataType, - "DOUBLE", strlen("DOUBLE"))) { - double *bind_double = (double *)*ptr; + case TSDB_DATA_TYPE_BIGINT: + bind_bigint = (int64_t *)*ptr; - if (value) { - *bind_double = atof(value); - } else { - *bind_double = rand_double(); - } - bind->buffer_type = TSDB_DATA_TYPE_DOUBLE; - bind->buffer_length = sizeof(double); - bind->buffer = bind_double; - bind->length = &bind->buffer_length; - bind->is_null = NULL; + if (value) { + *bind_bigint = atoll(value); + } else { + *bind_bigint = rand_bigint(); + } + bind->buffer_type = TSDB_DATA_TYPE_BIGINT; + bind->buffer_length = sizeof(int64_t); + bind->buffer = bind_bigint; + bind->length = &bind->buffer_length; + bind->is_null = NULL; - *ptr += bind->buffer_length; - } else if (0 == strncasecmp(dataType, - "SMALLINT", strlen("SMALLINT"))) { - int16_t *bind_smallint = (int16_t *)*ptr; + *ptr += bind->buffer_length; + break; - if (value) { - *bind_smallint = (int16_t)atoi(value); - } else { - *bind_smallint = rand_smallint(); - } - bind->buffer_type = TSDB_DATA_TYPE_SMALLINT; - bind->buffer_length = sizeof(int16_t); - bind->buffer = bind_smallint; - bind->length = &bind->buffer_length; - bind->is_null = NULL; + case TSDB_DATA_TYPE_FLOAT: + bind_float = (float *)*ptr; - *ptr += bind->buffer_length; - } else if (0 == strncasecmp(dataType, - "TINYINT", strlen("TINYINT"))) { - int8_t *bind_tinyint = (int8_t *)*ptr; + if (value) { + *bind_float = (float)atof(value); + } else { + *bind_float = rand_float(); + } + bind->buffer_type = TSDB_DATA_TYPE_FLOAT; + bind->buffer_length = sizeof(float); + bind->buffer = bind_float; + bind->length = &bind->buffer_length; + bind->is_null = NULL; - if (value) { - *bind_tinyint = (int8_t)atoi(value); - } else { - *bind_tinyint = rand_tinyint(); - } - bind->buffer_type = TSDB_DATA_TYPE_TINYINT; - bind->buffer_length = sizeof(int8_t); - bind->buffer = bind_tinyint; - bind->length = &bind->buffer_length; - bind->is_null = NULL; + *ptr += bind->buffer_length; + break; - *ptr += bind->buffer_length; - } else if (0 == strncasecmp(dataType, - "BOOL", strlen("BOOL"))) { - int8_t *bind_bool = (int8_t *)*ptr; + case TSDB_DATA_TYPE_DOUBLE: + bind_double = (double *)*ptr; - if (value) { - if (strncasecmp(value, "true", 4)) { - *bind_bool = true; + if (value) { + *bind_double = atof(value); } else { - *bind_bool = false; + *bind_double = rand_double(); } - } else { - *bind_bool = rand_bool(); - } - bind->buffer_type = TSDB_DATA_TYPE_BOOL; - bind->buffer_length = sizeof(int8_t); - bind->buffer = bind_bool; - bind->length = &bind->buffer_length; - bind->is_null = NULL; + bind->buffer_type = TSDB_DATA_TYPE_DOUBLE; + bind->buffer_length = sizeof(double); + bind->buffer = bind_double; + bind->length = &bind->buffer_length; + bind->is_null = NULL; - *ptr += bind->buffer_length; - } else if (0 == strncasecmp(dataType, - "TIMESTAMP", strlen("TIMESTAMP"))) { - int64_t *bind_ts2 = (int64_t *)*ptr; - - if (value) { - if (strchr(value, ':') && strchr(value, '-')) { - int i = 0; - while(value[i] != '\0') { - if (value[i] == '\"' || value[i] == '\'') { - value[i] = ' '; - } - i++; + *ptr += bind->buffer_length; + break; + + case TSDB_DATA_TYPE_SMALLINT: + bind_smallint = (int16_t *)*ptr; + + if (value) { + *bind_smallint = (int16_t)atoi(value); + } else { + *bind_smallint = rand_smallint(); + } + bind->buffer_type = TSDB_DATA_TYPE_SMALLINT; + bind->buffer_length = sizeof(int16_t); + bind->buffer = bind_smallint; + bind->length = &bind->buffer_length; + bind->is_null = NULL; + + *ptr += bind->buffer_length; + break; + + case TSDB_DATA_TYPE_TINYINT: + bind_tinyint = (int8_t *)*ptr; + + if (value) { + *bind_tinyint = (int8_t)atoi(value); + } else { + *bind_tinyint = rand_tinyint(); + } + bind->buffer_type = TSDB_DATA_TYPE_TINYINT; + bind->buffer_length = sizeof(int8_t); + bind->buffer = bind_tinyint; + bind->length = &bind->buffer_length; + bind->is_null = NULL; + + *ptr += bind->buffer_length; + break; + + case TSDB_DATA_TYPE_BOOL: + bind_bool = (int8_t *)*ptr; + + if (value) { + if (strncasecmp(value, "true", 4)) { + *bind_bool = true; + } else { + *bind_bool = false; } - int64_t tmpEpoch; - if (TSDB_CODE_SUCCESS != taosParseTime( - value, &tmpEpoch, strlen(value), - timePrec, 0)) { - errorPrint2("Input %s, time format error!\n", value); - return -1; + } else { + *bind_bool = rand_bool(); + } + bind->buffer_type = TSDB_DATA_TYPE_BOOL; + bind->buffer_length = sizeof(int8_t); + bind->buffer = bind_bool; + bind->length = &bind->buffer_length; + bind->is_null = NULL; + + *ptr += bind->buffer_length; + break; + + case TSDB_DATA_TYPE_TIMESTAMP: + bind_ts2 = (int64_t *)*ptr; + + if (value) { + if (strchr(value, ':') && strchr(value, '-')) { + int i = 0; + while(value[i] != '\0') { + if (value[i] == '\"' || value[i] == '\'') { + value[i] = ' '; + } + i++; + } + int64_t tmpEpoch; + if (TSDB_CODE_SUCCESS != taosParseTime( + value, &tmpEpoch, strlen(value), + timePrec, 0)) { + errorPrint2("Input %s, time format error!\n", value); + return -1; + } + *bind_ts2 = tmpEpoch; + } else { + *bind_ts2 = atoll(value); } - *bind_ts2 = tmpEpoch; } else { - *bind_ts2 = atoll(value); + *bind_ts2 = rand_bigint(); } - } else { - *bind_ts2 = rand_bigint(); - } - bind->buffer_type = TSDB_DATA_TYPE_TIMESTAMP; - bind->buffer_length = sizeof(int64_t); - bind->buffer = bind_ts2; - bind->length = &bind->buffer_length; - bind->is_null = NULL; + bind->buffer_type = TSDB_DATA_TYPE_TIMESTAMP; + bind->buffer_length = sizeof(int64_t); + bind->buffer = bind_ts2; + bind->length = &bind->buffer_length; + bind->is_null = NULL; - *ptr += bind->buffer_length; - } else { - errorPrint2("No support data type: %s\n", dataType); - return -1; + *ptr += bind->buffer_length; + break; + + default: + errorPrint2("No support data type: %d\n", data_type); + return -1; } return 0; @@ -6929,12 +7406,12 @@ static int32_t prepareStmtWithoutStb( return ret; } - char **data_type = g_args.datatype; + char *data_type = g_args.data_type; - char *bindArray = malloc(sizeof(TAOS_BIND) * (g_args.num_of_CPR + 1)); + char *bindArray = malloc(sizeof(TAOS_BIND) * (g_args.columnCount + 1)); if (bindArray == NULL) { errorPrint2("Failed to allocate %d bind params\n", - (g_args.num_of_CPR + 1)); + (g_args.columnCount + 1)); return -1; } @@ -6961,7 +7438,7 @@ static int32_t prepareStmtWithoutStb( bind->length = &bind->buffer_length; bind->is_null = NULL; - for (int i = 0; i < g_args.num_of_CPR; i ++) { + for (int i = 0; i < g_args.columnCount; i ++) { bind = (TAOS_BIND *)((char *)bindArray + (sizeof(TAOS_BIND) * (i + 1))); if ( -1 == prepareStmtBindArrayByType( @@ -6970,6 +7447,7 @@ static int32_t prepareStmtWithoutStb( g_args.binwidth, pThreadInfo->time_precision, NULL)) { + free(bindArray); return -1; } } @@ -7001,29 +7479,20 @@ static int32_t prepareStbStmtBindTag( char *tagsVal, int32_t timePrec) { - char *bindBuffer = calloc(1, DOUBLE_BUFF_LEN); // g_args.binwidth); - if (bindBuffer == NULL) { - errorPrint2("%s() LN%d, Failed to allocate %d bind buffer\n", - __func__, __LINE__, DOUBLE_BUFF_LEN); - return -1; - } - TAOS_BIND *tag; for (int t = 0; t < stbInfo->tagCount; t ++) { tag = (TAOS_BIND *)((char *)bindArray + (sizeof(TAOS_BIND) * t)); if ( -1 == prepareStmtBindArrayByType( tag, - stbInfo->tags[t].dataType, + stbInfo->tags[t].data_type, stbInfo->tags[t].dataLen, timePrec, NULL)) { - free(bindBuffer); return -1; } } - free(bindBuffer); return 0; } @@ -7033,13 +7502,6 @@ static int32_t prepareStbStmtBindRand( int64_t startTime, int32_t recSeq, int32_t timePrec) { - char *bindBuffer = calloc(1, DOUBLE_BUFF_LEN); // g_args.binwidth); - if (bindBuffer == NULL) { - errorPrint2("%s() LN%d, Failed to allocate %d bind buffer\n", - __func__, __LINE__, DOUBLE_BUFF_LEN); - return -1; - } - char data[MAX_DATA_SIZE]; memset(data, 0, MAX_DATA_SIZE); char *ptr = data; @@ -7069,51 +7531,15 @@ static int32_t prepareStbStmtBindRand( ptr += bind->buffer_length; } else if ( -1 == prepareStmtBindArrayByTypeForRand( bind, - stbInfo->columns[i-1].dataType, + stbInfo->columns[i-1].data_type, stbInfo->columns[i-1].dataLen, timePrec, &ptr, NULL)) { - tmfree(bindBuffer); return -1; } } - tmfree(bindBuffer); - return 0; -} - -static int32_t prepareStbStmtBindStartTime( - char *tableName, - int64_t *ts, - char *bindArray, SSuperTable *stbInfo, - int64_t startTime, int32_t recSeq, - int32_t timePrec) -{ - TAOS_BIND *bind; - - bind = (TAOS_BIND *)bindArray; - - int64_t *bind_ts = ts; - - bind->buffer_type = TSDB_DATA_TYPE_TIMESTAMP; - if (stbInfo->disorderRatio) { - *bind_ts = startTime + getTSRandTail( - stbInfo->timeStampStep, recSeq, - stbInfo->disorderRatio, - stbInfo->disorderRange); - } else { - *bind_ts = startTime + stbInfo->timeStampStep * recSeq; - } - - verbosePrint("%s() LN%d, tableName: %s, bind_ts=%"PRId64"\n", - __func__, __LINE__, tableName, *bind_ts); - - bind->buffer_length = sizeof(int64_t); - bind->buffer = bind_ts; - bind->length = &bind->buffer_length; - bind->is_null = NULL; - return 0; } @@ -7229,7 +7655,8 @@ UNUSED_FUNC static int32_t prepareStbStmtRand( return k; } -static int32_t prepareStbStmtWithSample( +#if STMT_BIND_PARAM_BATCH == 1 +static int execBindParamBatch( threadInfo *pThreadInfo, char *tableName, int64_t tableSeq, @@ -7240,94 +7667,182 @@ static int32_t prepareStbStmtWithSample( int64_t *pSamplePos) { int ret; - SSuperTable *stbInfo = pThreadInfo->stbInfo; TAOS_STMT *stmt = pThreadInfo->stmt; - if (AUTO_CREATE_SUBTBL == stbInfo->autoCreateTable) { - char* tagsValBuf = NULL; + SSuperTable *stbInfo = pThreadInfo->stbInfo; + uint32_t columnCount = (stbInfo)?pThreadInfo->stbInfo->columnCount:g_args.columnCount; + + uint32_t thisBatch = MAX_SAMPLES - (*pSamplePos); + + if (thisBatch > batch) { + thisBatch = batch; + } + verbosePrint("%s() LN%d, batch=%d pos=%"PRId64" thisBatch=%d\n", + __func__, __LINE__, batch, *pSamplePos, thisBatch); + + memset(pThreadInfo->bindParams, 0, + (sizeof(TAOS_MULTI_BIND) * (columnCount + 1))); + memset(pThreadInfo->is_null, 0, thisBatch); + + for (int c = 0; c < columnCount + 1; c ++) { + TAOS_MULTI_BIND *param = (TAOS_MULTI_BIND *)(pThreadInfo->bindParams + sizeof(TAOS_MULTI_BIND) * c); + + char data_type; + + if (c == 0) { + data_type = TSDB_DATA_TYPE_TIMESTAMP; + param->buffer_length = sizeof(int64_t); + param->buffer = pThreadInfo->bind_ts_array; - if (0 == stbInfo->tagSource) { - tagsValBuf = generateTagValuesForStb(stbInfo, tableSeq); } else { - tagsValBuf = getTagValueFromTagSample( - stbInfo, - tableSeq % stbInfo->tagSampleCount); - } + data_type = (stbInfo)?stbInfo->columns[c-1].data_type:g_args.data_type[c-1]; - if (NULL == tagsValBuf) { - errorPrint2("%s() LN%d, tag buf failed to allocate memory\n", - __func__, __LINE__); - return -1; - } + char *tmpP; - char *tagsArray = calloc(1, sizeof(TAOS_BIND) * stbInfo->tagCount); - if (NULL == tagsArray) { - tmfree(tagsValBuf); - errorPrint2("%s() LN%d, tag buf failed to allocate memory\n", - __func__, __LINE__); - return -1; - } + switch(data_type) { + case TSDB_DATA_TYPE_BINARY: + case TSDB_DATA_TYPE_NCHAR: + param->buffer_length = + ((stbInfo)?stbInfo->columns[c-1].dataLen:g_args.binwidth); - if (-1 == prepareStbStmtBindTag( - tagsArray, stbInfo, tagsValBuf, pThreadInfo->time_precision - /* is tag */)) { - tmfree(tagsValBuf); - tmfree(tagsArray); - return -1; - } + tmpP = + (char *)((uintptr_t)*(uintptr_t*)(stbInfo->sampleBindBatchArray + +sizeof(char*)*(c-1))); - ret = taos_stmt_set_tbname_tags(stmt, tableName, (TAOS_BIND *)tagsArray); + verbosePrint("%s() LN%d, tmpP=%p pos=%"PRId64" width=%d position=%"PRId64"\n", + __func__, __LINE__, tmpP, *pSamplePos, + (((stbInfo)?stbInfo->columns[c-1].dataLen:g_args.binwidth)), + (*pSamplePos) * + (((stbInfo)?stbInfo->columns[c-1].dataLen:g_args.binwidth))); - tmfree(tagsValBuf); - tmfree(tagsArray); + param->buffer = (void *)(tmpP + *pSamplePos * + (((stbInfo)?stbInfo->columns[c-1].dataLen:g_args.binwidth)) + ); + break; - if (0 != ret) { - errorPrint2("%s() LN%d, stmt_set_tbname_tags() failed! reason: %s\n", - __func__, __LINE__, taos_stmt_errstr(stmt)); - return -1; + case TSDB_DATA_TYPE_INT: + param->buffer_length = sizeof(int32_t); + param->buffer = (stbInfo)? + (void *)((uintptr_t)*(uintptr_t*)(stbInfo->sampleBindBatchArray+sizeof(char*)*(c-1)) + + stbInfo->columns[c-1].dataLen * (*pSamplePos)): + (void *)((uintptr_t)*(uintptr_t*)(g_sampleBindBatchArray+sizeof(char*)*(c-1)) + + sizeof(int32_t)*(*pSamplePos)); + break; + + case TSDB_DATA_TYPE_TINYINT: + param->buffer_length = sizeof(int8_t); + param->buffer = (stbInfo)? + (void *)((uintptr_t)*(uintptr_t*)( + stbInfo->sampleBindBatchArray + +sizeof(char*)*(c-1)) + + stbInfo->columns[c-1].dataLen*(*pSamplePos)): + (void *)((uintptr_t)*(uintptr_t*)( + g_sampleBindBatchArray+sizeof(char*)*(c-1)) + + sizeof(int8_t)*(*pSamplePos)); + break; + + case TSDB_DATA_TYPE_SMALLINT: + param->buffer_length = sizeof(int16_t); + param->buffer = (stbInfo)? + (void *)((uintptr_t)*(uintptr_t*)(stbInfo->sampleBindBatchArray+sizeof(char*)*(c-1)) + + stbInfo->columns[c-1].dataLen * (*pSamplePos)): + (void *)((uintptr_t)*(uintptr_t*)(g_sampleBindBatchArray+sizeof(char*)*(c-1)) + + sizeof(int16_t)*(*pSamplePos)); + break; + + case TSDB_DATA_TYPE_BIGINT: + param->buffer_length = sizeof(int64_t); + param->buffer = (stbInfo)? + (void *)((uintptr_t)*(uintptr_t*)(stbInfo->sampleBindBatchArray+sizeof(char*)*(c-1)) + + stbInfo->columns[c-1].dataLen * (*pSamplePos)): + (void *)((uintptr_t)*(uintptr_t*)(g_sampleBindBatchArray+sizeof(char*)*(c-1)) + + sizeof(int64_t)*(*pSamplePos)); + break; + + case TSDB_DATA_TYPE_BOOL: + param->buffer_length = sizeof(int8_t); + param->buffer = (stbInfo)? + (void *)((uintptr_t)*(uintptr_t*)(stbInfo->sampleBindBatchArray+sizeof(char*)*(c-1)) + + stbInfo->columns[c-1].dataLen * (*pSamplePos)): + (void *)((uintptr_t)*(uintptr_t*)(g_sampleBindBatchArray+sizeof(char*)*(c-1)) + + sizeof(int8_t)*(*pSamplePos)); + break; + + case TSDB_DATA_TYPE_FLOAT: + param->buffer_length = sizeof(float); + param->buffer = (stbInfo)? + (void *)((uintptr_t)*(uintptr_t*)(stbInfo->sampleBindBatchArray+sizeof(char*)*(c-1)) + + stbInfo->columns[c-1].dataLen * (*pSamplePos)): + (void *)((uintptr_t)*(uintptr_t*)(g_sampleBindBatchArray+sizeof(char*)*(c-1)) + + sizeof(float)*(*pSamplePos)); + break; + + case TSDB_DATA_TYPE_DOUBLE: + param->buffer_length = sizeof(double); + param->buffer = (stbInfo)? + (void *)((uintptr_t)*(uintptr_t*)(stbInfo->sampleBindBatchArray+sizeof(char*)*(c-1)) + + stbInfo->columns[c-1].dataLen * (*pSamplePos)): + (void *)((uintptr_t)*(uintptr_t*)(g_sampleBindBatchArray+sizeof(char*)*(c-1)) + + sizeof(double)*(*pSamplePos)); + break; + + case TSDB_DATA_TYPE_TIMESTAMP: + param->buffer_length = sizeof(int64_t); + param->buffer = (stbInfo)? + (void *)((uintptr_t)*(uintptr_t*)(stbInfo->sampleBindBatchArray+sizeof(char*)*(c-1)) + + stbInfo->columns[c-1].dataLen * (*pSamplePos)): + (void *)((uintptr_t)*(uintptr_t*)(g_sampleBindBatchArray+sizeof(char*)*(c-1)) + + sizeof(int64_t)*(*pSamplePos)); + break; + + default: + errorPrint("%s() LN%d, wrong data type: %d\n", + __func__, + __LINE__, + data_type); + exit(EXIT_FAILURE); + + } } - } else { - ret = taos_stmt_set_tbname(stmt, tableName); - if (0 != ret) { - errorPrint2("%s() LN%d, stmt_set_tbname() failed! reason: %s\n", - __func__, __LINE__, taos_stmt_errstr(stmt)); - return -1; + + param->buffer_type = data_type; + param->length = malloc(sizeof(int32_t) * thisBatch); + assert(param->length); + + for (int b = 0; b < thisBatch; b++) { + if (param->buffer_type == TSDB_DATA_TYPE_NCHAR) { + param->length[b] = strlen( + (char *)param->buffer + b * + ((stbInfo)?stbInfo->columns[c].dataLen:g_args.binwidth) + ); + } else { + param->length[b] = param->buffer_length; + } } + param->is_null = pThreadInfo->is_null; + param->num = thisBatch; } uint32_t k; - for (k = 0; k < batch;) { - char *bindArray = (char *)(*((uintptr_t *) - (pThreadInfo->sampleBindArray + (sizeof(char *)) * (*pSamplePos)))); + for (k = 0; k < thisBatch;) { /* columnCount + 1 (ts) */ - if (-1 == prepareStbStmtBindStartTime( - tableName, - pThreadInfo->bind_ts, - bindArray, stbInfo, - startTime, k, - pThreadInfo->time_precision - /* is column */)) { - return -1; - } - ret = taos_stmt_bind_param(stmt, (TAOS_BIND *)bindArray); - if (0 != ret) { - errorPrint2("%s() LN%d, stmt_bind_param() failed! reason: %s\n", - __func__, __LINE__, taos_stmt_errstr(stmt)); - return -1; - } - // if msg > 3MB, break - ret = taos_stmt_add_batch(stmt); - if (0 != ret) { - errorPrint2("%s() LN%d, stmt_add_batch() failed! reason: %s\n", - __func__, __LINE__, taos_stmt_errstr(stmt)); - return -1; + if (stbInfo->disorderRatio) { + *(pThreadInfo->bind_ts_array + k) = startTime + getTSRandTail( + stbInfo->timeStampStep, k, + stbInfo->disorderRatio, + stbInfo->disorderRange); + } else { + *(pThreadInfo->bind_ts_array + k) = startTime + stbInfo->timeStampStep * k; } + debugPrint("%s() LN%d, k=%d ts=%"PRId64"\n", + __func__, __LINE__, + k, *(pThreadInfo->bind_ts_array +k)); k++; recordFrom ++; (*pSamplePos) ++; - if ((*pSamplePos) == MAX_SAMPLES_ONCE_FROM_FILE) { + if ((*pSamplePos) == MAX_SAMPLES) { *pSamplePos = 0; } @@ -7336,115 +7851,1074 @@ static int32_t prepareStbStmtWithSample( } } + ret = taos_stmt_bind_param_batch(stmt, (TAOS_MULTI_BIND *)pThreadInfo->bindParams); + if (0 != ret) { + errorPrint2("%s() LN%d, stmt_bind_param() failed! reason: %s\n", + __func__, __LINE__, taos_stmt_errstr(stmt)); + return -1; + } + + for (int c = 0; c < stbInfo->columnCount + 1; c ++) { + TAOS_MULTI_BIND *param = (TAOS_MULTI_BIND *)(pThreadInfo->bindParams + sizeof(TAOS_MULTI_BIND) * c); + free(param->length); + } + + // if msg > 3MB, break + ret = taos_stmt_add_batch(stmt); + if (0 != ret) { + errorPrint2("%s() LN%d, stmt_add_batch() failed! reason: %s\n", + __func__, __LINE__, taos_stmt_errstr(stmt)); + return -1; + } return k; } -static int32_t generateStbProgressiveData( - SSuperTable *stbInfo, - char *tableName, - int64_t tableSeq, - char *dbName, char *buffer, - int64_t insertRows, - uint64_t recordFrom, int64_t startTime, int64_t *pSamplePos, - int64_t *pRemainderBufLen) +static int parseSamplefileToStmtBatch( + SSuperTable* stbInfo) { - assert(buffer != NULL); - char *pstr = buffer; + // char *sampleDataBuf = (stbInfo)? + // stbInfo->sampleDataBuf:g_sampleDataBuf; + int32_t columnCount = (stbInfo)?stbInfo->columnCount:g_args.columnCount; + char *sampleBindBatchArray = NULL; - memset(pstr, 0, *pRemainderBufLen); + if (stbInfo) { + stbInfo->sampleBindBatchArray = calloc(1, sizeof(uintptr_t *) * columnCount); + sampleBindBatchArray = stbInfo->sampleBindBatchArray; + } else { + g_sampleBindBatchArray = calloc(1, sizeof(uintptr_t *) * columnCount); + sampleBindBatchArray = g_sampleBindBatchArray; + } + assert(sampleBindBatchArray); - int64_t headLen = generateStbSQLHead( - stbInfo, - tableName, tableSeq, dbName, - buffer, *pRemainderBufLen); + for (int c = 0; c < columnCount; c++) { + char data_type = (stbInfo)?stbInfo->columns[c].data_type:g_args.data_type[c]; - if (headLen <= 0) { - return 0; - } - pstr += headLen; - *pRemainderBufLen -= headLen; + char *tmpP = NULL; - int64_t dataLen; + switch(data_type) { + case TSDB_DATA_TYPE_INT: + tmpP = calloc(1, sizeof(int) * MAX_SAMPLES); + assert(tmpP); + *(uintptr_t*)(sampleBindBatchArray+ sizeof(uintptr_t*)*c) = (uintptr_t)tmpP; + break; - return generateStbDataTail(stbInfo, - g_args.num_of_RPR, pstr, *pRemainderBufLen, - insertRows, recordFrom, - startTime, - pSamplePos, &dataLen); -} + case TSDB_DATA_TYPE_TINYINT: + tmpP = calloc(1, sizeof(int8_t) * MAX_SAMPLES); + assert(tmpP); + *(uintptr_t*)(sampleBindBatchArray+ sizeof(uintptr_t*)*c) = (uintptr_t)tmpP; + break; -static int32_t generateProgressiveDataWithoutStb( - char *tableName, - /* int64_t tableSeq, */ - threadInfo *pThreadInfo, char *buffer, - int64_t insertRows, - uint64_t recordFrom, int64_t startTime, /*int64_t *pSamplePos, */ - int64_t *pRemainderBufLen) -{ - assert(buffer != NULL); - char *pstr = buffer; + case TSDB_DATA_TYPE_SMALLINT: + tmpP = calloc(1, sizeof(int16_t) * MAX_SAMPLES); + assert(tmpP); + *(uintptr_t*)(sampleBindBatchArray+ sizeof(uintptr_t*)*c) = (uintptr_t)tmpP; + break; - memset(buffer, 0, *pRemainderBufLen); + case TSDB_DATA_TYPE_BIGINT: + tmpP = calloc(1, sizeof(int64_t) * MAX_SAMPLES); + assert(tmpP); + *(uintptr_t*)(sampleBindBatchArray+ sizeof(uintptr_t*)*c) = (uintptr_t)tmpP; + break; - int64_t headLen = generateSQLHeadWithoutStb( - tableName, pThreadInfo->db_name, - buffer, *pRemainderBufLen); + case TSDB_DATA_TYPE_BOOL: + tmpP = calloc(1, sizeof(int8_t) * MAX_SAMPLES); + assert(tmpP); + *(uintptr_t*)(sampleBindBatchArray+ sizeof(uintptr_t*)*c) = (uintptr_t)tmpP; + break; - if (headLen <= 0) { - return 0; - } + case TSDB_DATA_TYPE_FLOAT: + tmpP = calloc(1, sizeof(float) * MAX_SAMPLES); + assert(tmpP); + *(uintptr_t*)(sampleBindBatchArray+ sizeof(uintptr_t*)*c) = (uintptr_t)tmpP; + break; + + case TSDB_DATA_TYPE_DOUBLE: + tmpP = calloc(1, sizeof(double) * MAX_SAMPLES); + assert(tmpP); + *(uintptr_t*)(sampleBindBatchArray+ sizeof(uintptr_t*)*c) = (uintptr_t)tmpP; + break; + + case TSDB_DATA_TYPE_BINARY: + case TSDB_DATA_TYPE_NCHAR: + tmpP = calloc(1, MAX_SAMPLES * + (((stbInfo)?stbInfo->columns[c].dataLen:g_args.binwidth))); + assert(tmpP); + *(uintptr_t*)(sampleBindBatchArray+ sizeof(uintptr_t*)*c) = (uintptr_t)tmpP; + break; + + case TSDB_DATA_TYPE_TIMESTAMP: + tmpP = calloc(1, sizeof(int64_t) * MAX_SAMPLES); + assert(tmpP); + *(uintptr_t*)(sampleBindBatchArray+ sizeof(uintptr_t*)*c) = (uintptr_t)tmpP; + break; + + default: + errorPrint("Unknown data type: %s\n", + (stbInfo)?stbInfo->columns[c].dataType:g_args.dataType[c]); + exit(EXIT_FAILURE); + } + } + + char *sampleDataBuf = (stbInfo)?stbInfo->sampleDataBuf:g_sampleDataBuf; + int64_t lenOfOneRow = (stbInfo)?stbInfo->lenOfOneRow:g_args.lenOfOneRow; + + for (int i=0; i < MAX_SAMPLES; i++) { + int cursor = 0; + + for (int c = 0; c < columnCount; c++) { + char data_type = (stbInfo)? + stbInfo->columns[c].data_type: + g_args.data_type[c]; + char *restStr = sampleDataBuf + + lenOfOneRow * i + cursor; + int lengthOfRest = strlen(restStr); + + int index = 0; + for (index = 0; index < lengthOfRest; index ++) { + if (restStr[index] == ',') { + break; + } + } + + char *tmpStr = calloc(1, index + 1); + if (NULL == tmpStr) { + errorPrint2("%s() LN%d, Failed to allocate %d bind buffer\n", + __func__, __LINE__, index + 1); + return -1; + } + + strncpy(tmpStr, restStr, index); + cursor += index + 1; // skip ',' too + char *tmpP; + + switch(data_type) { + case TSDB_DATA_TYPE_INT: + *((int32_t*)((uintptr_t)*(uintptr_t*)(sampleBindBatchArray + +sizeof(char*)*c)+sizeof(int32_t)*i)) = + atoi(tmpStr); + break; + + case TSDB_DATA_TYPE_FLOAT: + *(float*)(((uintptr_t)*(uintptr_t*)(sampleBindBatchArray + +sizeof(char*)*c)+sizeof(float)*i)) = + (float)atof(tmpStr); + break; + + case TSDB_DATA_TYPE_DOUBLE: + *(double*)(((uintptr_t)*(uintptr_t*)(sampleBindBatchArray + +sizeof(char*)*c)+sizeof(double)*i)) = + atof(tmpStr); + break; + + case TSDB_DATA_TYPE_TINYINT: + *((int8_t*)((uintptr_t)*(uintptr_t*)(sampleBindBatchArray + +sizeof(char*)*c)+sizeof(int8_t)*i)) = + (int8_t)atoi(tmpStr); + break; + + case TSDB_DATA_TYPE_SMALLINT: + *((int16_t*)((uintptr_t)*(uintptr_t*)(sampleBindBatchArray + +sizeof(char*)*c)+sizeof(int16_t)*i)) = + (int16_t)atoi(tmpStr); + break; + + case TSDB_DATA_TYPE_BIGINT: + *((int64_t*)((uintptr_t)*(uintptr_t*)(sampleBindBatchArray + +sizeof(char*)*c)+sizeof(int64_t)*i)) = + (int64_t)atol(tmpStr); + break; + + case TSDB_DATA_TYPE_BOOL: + *((int8_t*)((uintptr_t)*(uintptr_t*)(sampleBindBatchArray + +sizeof(char*)*c)+sizeof(int8_t)*i)) = + (int8_t)atoi(tmpStr); + break; + + case TSDB_DATA_TYPE_TIMESTAMP: + *((int64_t*)((uintptr_t)*(uintptr_t*)(sampleBindBatchArray + +sizeof(char*)*c)+sizeof(int64_t)*i)) = + (int64_t)atol(tmpStr); + break; + + case TSDB_DATA_TYPE_BINARY: + case TSDB_DATA_TYPE_NCHAR: + tmpP = (char *)(*(uintptr_t*)(sampleBindBatchArray + +sizeof(char*)*c)); + strcpy(tmpP + i* + (((stbInfo)?stbInfo->columns[c].dataLen:g_args.binwidth)) + , tmpStr); + break; + + default: + break; + } + + free(tmpStr); + } + } + + return 0; +} + +static int parseSampleToStmtBatchForThread( + threadInfo *pThreadInfo, SSuperTable *stbInfo, + uint32_t timePrec, + uint32_t batch) +{ + uint32_t columnCount = (stbInfo)?stbInfo->columnCount:g_args.columnCount; + + pThreadInfo->bind_ts_array = malloc(sizeof(int64_t) * batch); + assert(pThreadInfo->bind_ts_array); + + pThreadInfo->bindParams = malloc(sizeof(TAOS_MULTI_BIND) * (columnCount + 1)); + assert(pThreadInfo->bindParams); + + pThreadInfo->is_null = malloc(batch); + assert(pThreadInfo->is_null); + + return 0; +} + +static int parseStbSampleToStmtBatchForThread( + threadInfo *pThreadInfo, + SSuperTable *stbInfo, + uint32_t timePrec, + uint32_t batch) +{ + return parseSampleToStmtBatchForThread( + pThreadInfo, stbInfo, timePrec, batch); +} + +static int parseNtbSampleToStmtBatchForThread( + threadInfo *pThreadInfo, uint32_t timePrec, uint32_t batch) +{ + return parseSampleToStmtBatchForThread( + pThreadInfo, NULL, timePrec, batch); +} + +#else +static int parseSampleToStmt( + threadInfo *pThreadInfo, + SSuperTable *stbInfo, uint32_t timePrec) +{ + pThreadInfo->sampleBindArray = + (char *)calloc(1, sizeof(char *) * MAX_SAMPLES); + if (pThreadInfo->sampleBindArray == NULL) { + errorPrint2("%s() LN%d, Failed to allocate %"PRIu64" bind array buffer\n", + __func__, __LINE__, + (uint64_t)sizeof(char *) * MAX_SAMPLES); + return -1; + } + + int32_t columnCount = (stbInfo)?stbInfo->columnCount:g_args.columnCount; + char *sampleDataBuf = (stbInfo)?stbInfo->sampleDataBuf:g_sampleDataBuf; + int64_t lenOfOneRow = (stbInfo)?stbInfo->lenOfOneRow:g_args.lenOfOneRow; + + for (int i=0; i < MAX_SAMPLES; i++) { + char *bindArray = + calloc(1, sizeof(TAOS_BIND) * (columnCount + 1)); + if (bindArray == NULL) { + errorPrint2("%s() LN%d, Failed to allocate %d bind params\n", + __func__, __LINE__, (columnCount + 1)); + return -1; + } + + TAOS_BIND *bind; + int cursor = 0; + + for (int c = 0; c < columnCount + 1; c++) { + bind = (TAOS_BIND *)((char *)bindArray + (sizeof(TAOS_BIND) * c)); + + if (c == 0) { + bind->buffer_type = TSDB_DATA_TYPE_TIMESTAMP; + bind->buffer_length = sizeof(int64_t); + bind->buffer = NULL; //bind_ts; + bind->length = &bind->buffer_length; + bind->is_null = NULL; + } else { + char data_type = (stbInfo)? + stbInfo->columns[c-1].data_type: + g_args.data_type[c-1]; + int32_t dataLen = (stbInfo)? + stbInfo->columns[c-1].dataLen: + g_args.binwidth; + char *restStr = sampleDataBuf + + lenOfOneRow * i + cursor; + int lengthOfRest = strlen(restStr); + + int index = 0; + for (index = 0; index < lengthOfRest; index ++) { + if (restStr[index] == ',') { + break; + } + } + + char *bindBuffer = calloc(1, index + 1); + if (bindBuffer == NULL) { + errorPrint2("%s() LN%d, Failed to allocate %d bind buffer\n", + __func__, __LINE__, index + 1); + return -1; + } + + strncpy(bindBuffer, restStr, index); + cursor += index + 1; // skip ',' too + + if (-1 == prepareStmtBindArrayByType( + bind, + data_type, + dataLen, + timePrec, + bindBuffer)) { + free(bindBuffer); + free(bindArray); + return -1; + } + free(bindBuffer); + } + } + *((uintptr_t *)(pThreadInfo->sampleBindArray + (sizeof(char *)) * i)) = + (uintptr_t)bindArray; + } + + return 0; +} + +static int parseStbSampleToStmt( + threadInfo *pThreadInfo, + SSuperTable *stbInfo, uint32_t timePrec) +{ + return parseSampleToStmt( + pThreadInfo, + stbInfo, timePrec); +} + +static int parseNtbSampleToStmt( + threadInfo *pThreadInfo, + uint32_t timePrec) +{ + return parseSampleToStmt( + pThreadInfo, + NULL, + timePrec); +} + +static int32_t prepareStbStmtBindStartTime( + char *tableName, + int64_t *ts, + char *bindArray, SSuperTable *stbInfo, + int64_t startTime, int32_t recSeq) +{ + TAOS_BIND *bind; + + bind = (TAOS_BIND *)bindArray; + + int64_t *bind_ts = ts; + + bind->buffer_type = TSDB_DATA_TYPE_TIMESTAMP; + if (stbInfo->disorderRatio) { + *bind_ts = startTime + getTSRandTail( + stbInfo->timeStampStep, recSeq, + stbInfo->disorderRatio, + stbInfo->disorderRange); + } else { + *bind_ts = startTime + stbInfo->timeStampStep * recSeq; + } + + verbosePrint("%s() LN%d, tableName: %s, bind_ts=%"PRId64"\n", + __func__, __LINE__, tableName, *bind_ts); + + bind->buffer_length = sizeof(int64_t); + bind->buffer = bind_ts; + bind->length = &bind->buffer_length; + bind->is_null = NULL; + + return 0; +} + +static uint32_t execBindParam( + threadInfo *pThreadInfo, + char *tableName, + int64_t tableSeq, + uint32_t batch, + uint64_t insertRows, + uint64_t recordFrom, + int64_t startTime, + int64_t *pSamplePos) +{ + int ret; + SSuperTable *stbInfo = pThreadInfo->stbInfo; + TAOS_STMT *stmt = pThreadInfo->stmt; + + uint32_t k; + for (k = 0; k < batch;) { + char *bindArray = (char *)(*((uintptr_t *) + (pThreadInfo->sampleBindArray + (sizeof(char *)) * (*pSamplePos)))); + /* columnCount + 1 (ts) */ + if (-1 == prepareStbStmtBindStartTime( + tableName, + pThreadInfo->bind_ts, + bindArray, stbInfo, + startTime, k + /* is column */)) { + return -1; + } + ret = taos_stmt_bind_param(stmt, (TAOS_BIND *)bindArray); + if (0 != ret) { + errorPrint2("%s() LN%d, stmt_bind_param() failed! reason: %s\n", + __func__, __LINE__, taos_stmt_errstr(stmt)); + return -1; + } + // if msg > 3MB, break + ret = taos_stmt_add_batch(stmt); + if (0 != ret) { + errorPrint2("%s() LN%d, stmt_add_batch() failed! reason: %s\n", + __func__, __LINE__, taos_stmt_errstr(stmt)); + return -1; + } + + k++; + recordFrom ++; + + (*pSamplePos) ++; + if ((*pSamplePos) == MAX_SAMPLES) { + *pSamplePos = 0; + } + + if (recordFrom >= insertRows) { + break; + } + } + + return k; +} +#endif + +static int32_t prepareStbStmt( + threadInfo *pThreadInfo, + char *tableName, + int64_t tableSeq, + uint32_t batch, + uint64_t insertRows, + uint64_t recordFrom, + int64_t startTime, + int64_t *pSamplePos) +{ + int ret; + SSuperTable *stbInfo = pThreadInfo->stbInfo; + TAOS_STMT *stmt = pThreadInfo->stmt; + + if (AUTO_CREATE_SUBTBL == stbInfo->autoCreateTable) { + char* tagsValBuf = NULL; + + if (0 == stbInfo->tagSource) { + tagsValBuf = generateTagValuesForStb(stbInfo, tableSeq); + } else { + tagsValBuf = getTagValueFromTagSample( + stbInfo, + tableSeq % stbInfo->tagSampleCount); + } + + if (NULL == tagsValBuf) { + errorPrint2("%s() LN%d, tag buf failed to allocate memory\n", + __func__, __LINE__); + return -1; + } + + char *tagsArray = calloc(1, sizeof(TAOS_BIND) * stbInfo->tagCount); + if (NULL == tagsArray) { + tmfree(tagsValBuf); + errorPrint2("%s() LN%d, tag buf failed to allocate memory\n", + __func__, __LINE__); + return -1; + } + + if (-1 == prepareStbStmtBindTag( + tagsArray, stbInfo, tagsValBuf, pThreadInfo->time_precision + /* is tag */)) { + tmfree(tagsValBuf); + tmfree(tagsArray); + return -1; + } + + ret = taos_stmt_set_tbname_tags(stmt, tableName, (TAOS_BIND *)tagsArray); + + tmfree(tagsValBuf); + tmfree(tagsArray); + + if (0 != ret) { + errorPrint2("%s() LN%d, stmt_set_tbname_tags() failed! reason: %s\n", + __func__, __LINE__, taos_stmt_errstr(stmt)); + return -1; + } + } else { + ret = taos_stmt_set_tbname(stmt, tableName); + if (0 != ret) { + errorPrint2("%s() LN%d, stmt_set_tbname() failed! reason: %s\n", + __func__, __LINE__, taos_stmt_errstr(stmt)); + return -1; + } + } + +#if STMT_BIND_PARAM_BATCH == 1 + return execBindParamBatch( + pThreadInfo, + tableName, + tableSeq, + batch, + insertRows, + recordFrom, + startTime, + pSamplePos); +#else + return execBindParam( + pThreadInfo, + tableName, + tableSeq, + batch, + insertRows, + recordFrom, + startTime, + pSamplePos); +#endif +} + +static int32_t generateStbProgressiveData( + SSuperTable *stbInfo, + char *tableName, + int64_t tableSeq, + char *dbName, char *buffer, + int64_t insertRows, + uint64_t recordFrom, int64_t startTime, int64_t *pSamplePos, + int64_t *pRemainderBufLen) +{ + assert(buffer != NULL); + char *pstr = buffer; + + memset(pstr, 0, *pRemainderBufLen); + + int64_t headLen = generateStbSQLHead( + stbInfo, + tableName, tableSeq, dbName, + buffer, *pRemainderBufLen); + + if (headLen <= 0) { + return 0; + } + pstr += headLen; + *pRemainderBufLen -= headLen; + + int64_t dataLen; + + return generateStbDataTail(stbInfo, + g_args.reqPerReq, pstr, *pRemainderBufLen, + insertRows, recordFrom, + startTime, + pSamplePos, &dataLen); +} + +static int32_t generateProgressiveDataWithoutStb( + char *tableName, + /* int64_t tableSeq, */ + threadInfo *pThreadInfo, char *buffer, + int64_t insertRows, + uint64_t recordFrom, int64_t startTime, /*int64_t *pSamplePos, */ + int64_t *pRemainderBufLen) +{ + assert(buffer != NULL); + char *pstr = buffer; + + memset(buffer, 0, *pRemainderBufLen); + + int64_t headLen = generateSQLHeadWithoutStb( + tableName, pThreadInfo->db_name, + buffer, *pRemainderBufLen); + + if (headLen <= 0) { + return 0; + } pstr += headLen; *pRemainderBufLen -= headLen; - int64_t dataLen; + int64_t dataLen; + + return generateDataTailWithoutStb( + g_args.reqPerReq, pstr, *pRemainderBufLen, insertRows, recordFrom, + startTime, + /*pSamplePos, */&dataLen); +} + +static void printStatPerThread(threadInfo *pThreadInfo) +{ + if (0 == pThreadInfo->totalDelay) + pThreadInfo->totalDelay = 1; + + fprintf(stderr, "====thread[%d] completed total inserted rows: %"PRIu64 ", total affected rows: %"PRIu64". %.2f records/second====\n", + pThreadInfo->threadID, + pThreadInfo->totalInsertRows, + pThreadInfo->totalAffectedRows, + (double)(pThreadInfo->totalAffectedRows/((double)pThreadInfo->totalDelay/1000000.0)) + ); +} + +#if STMT_BIND_PARAM_BATCH == 1 +// stmt sync write interlace data +static void* syncWriteInterlaceStmtBatch(threadInfo *pThreadInfo, uint32_t interlaceRows) { + debugPrint("[%d] %s() LN%d: ### stmt interlace write\n", + pThreadInfo->threadID, __func__, __LINE__); + + int64_t insertRows; + int64_t timeStampStep; + uint64_t insert_interval; + + SSuperTable* stbInfo = pThreadInfo->stbInfo; + + if (stbInfo) { + insertRows = stbInfo->insertRows; + timeStampStep = stbInfo->timeStampStep; + insert_interval = stbInfo->insertInterval; + } else { + insertRows = g_args.insertRows; + timeStampStep = g_args.timestamp_step; + insert_interval = g_args.insert_interval; + } + + debugPrint("[%d] %s() LN%d: start_table_from=%"PRIu64" ntables=%"PRId64" insertRows=%"PRIu64"\n", + pThreadInfo->threadID, __func__, __LINE__, + pThreadInfo->start_table_from, + pThreadInfo->ntables, insertRows); + + uint64_t timesInterlace = (insertRows / interlaceRows) + 1; + uint32_t precalcBatch = interlaceRows; + + if (precalcBatch > g_args.reqPerReq) + precalcBatch = g_args.reqPerReq; + + if (precalcBatch > MAX_SAMPLES) + precalcBatch = MAX_SAMPLES; + + pThreadInfo->totalInsertRows = 0; + pThreadInfo->totalAffectedRows = 0; + + uint64_t st = 0; + uint64_t et = UINT64_MAX; + + uint64_t lastPrintTime = taosGetTimestampMs(); + uint64_t startTs = taosGetTimestampMs(); + uint64_t endTs; + + uint64_t tableSeq = pThreadInfo->start_table_from; + int64_t startTime; + + bool flagSleep = true; + uint64_t sleepTimeTotal = 0; + + int percentComplete = 0; + int64_t totalRows = insertRows * pThreadInfo->ntables; + pThreadInfo->samplePos = 0; + + for (int64_t interlace = 0; + interlace < timesInterlace; interlace ++) { + if ((flagSleep) && (insert_interval)) { + st = taosGetTimestampMs(); + flagSleep = false; + } + + int64_t generated = 0; + int64_t samplePos; + + for (; tableSeq < pThreadInfo->start_table_from + pThreadInfo->ntables; tableSeq ++) { + char tableName[TSDB_TABLE_NAME_LEN]; + getTableName(tableName, pThreadInfo, tableSeq); + if (0 == strlen(tableName)) { + errorPrint2("[%d] %s() LN%d, getTableName return null\n", + pThreadInfo->threadID, __func__, __LINE__); + return NULL; + } + + samplePos = pThreadInfo->samplePos; + startTime = pThreadInfo->start_time + + interlace * interlaceRows * timeStampStep; + uint64_t remainRecPerTbl = + insertRows - interlaceRows * interlace; + uint64_t recPerTbl = 0; + + uint64_t remainPerInterlace; + if (remainRecPerTbl > interlaceRows) { + remainPerInterlace = interlaceRows; + } else { + remainPerInterlace = remainRecPerTbl; + } + + while(remainPerInterlace > 0) { + + uint32_t batch; + if (remainPerInterlace > precalcBatch) { + batch = precalcBatch; + } else { + batch = remainPerInterlace; + } + debugPrint("[%d] %s() LN%d, tableName:%s, batch:%d startTime:%"PRId64"\n", + pThreadInfo->threadID, + __func__, __LINE__, + tableName, batch, startTime); + + if (stbInfo) { + generated = prepareStbStmt( + pThreadInfo, + tableName, + tableSeq, + batch, + insertRows, 0, + startTime, + &samplePos); + } else { + generated = prepareStmtWithoutStb( + pThreadInfo, + tableName, + batch, + insertRows, + interlaceRows * interlace + recPerTbl, + startTime); + } + + debugPrint("[%d] %s() LN%d, generated records is %"PRId64"\n", + pThreadInfo->threadID, __func__, __LINE__, generated); + if (generated < 0) { + errorPrint2("[%d] %s() LN%d, generated records is %"PRId64"\n", + pThreadInfo->threadID, __func__, __LINE__, generated); + goto free_of_interlace_stmt; + } else if (generated == 0) { + break; + } + + recPerTbl += generated; + remainPerInterlace -= generated; + pThreadInfo->totalInsertRows += generated; + + verbosePrint("[%d] %s() LN%d totalInsertRows=%"PRIu64"\n", + pThreadInfo->threadID, __func__, __LINE__, + pThreadInfo->totalInsertRows); + + startTs = taosGetTimestampUs(); + + int64_t affectedRows = execInsert(pThreadInfo, generated); + + endTs = taosGetTimestampUs(); + uint64_t delay = endTs - startTs; + performancePrint("%s() LN%d, insert execution time is %10.2f ms\n", + __func__, __LINE__, delay / 1000.0); + verbosePrint("[%d] %s() LN%d affectedRows=%"PRId64"\n", + pThreadInfo->threadID, + __func__, __LINE__, affectedRows); + + if (delay > pThreadInfo->maxDelay) pThreadInfo->maxDelay = delay; + if (delay < pThreadInfo->minDelay) pThreadInfo->minDelay = delay; + pThreadInfo->cntDelay++; + pThreadInfo->totalDelay += delay; + + if (generated != affectedRows) { + errorPrint2("[%d] %s() LN%d execInsert() insert %"PRId64", affected rows: %"PRId64"\n\n", + pThreadInfo->threadID, __func__, __LINE__, + generated, affectedRows); + goto free_of_interlace_stmt; + } + + pThreadInfo->totalAffectedRows += affectedRows; + + int currentPercent = pThreadInfo->totalAffectedRows * 100 / totalRows; + if (currentPercent > percentComplete ) { + printf("[%d]:%d%%\n", pThreadInfo->threadID, currentPercent); + percentComplete = currentPercent; + } + int64_t currentPrintTime = taosGetTimestampMs(); + if (currentPrintTime - lastPrintTime > 30*1000) { + printf("thread[%d] has currently inserted rows: %"PRIu64 ", affected rows: %"PRIu64 "\n", + pThreadInfo->threadID, + pThreadInfo->totalInsertRows, + pThreadInfo->totalAffectedRows); + lastPrintTime = currentPrintTime; + } + + startTime += (generated * timeStampStep); + } + } + pThreadInfo->samplePos = samplePos; - return generateDataTailWithoutStb( - g_args.num_of_RPR, pstr, *pRemainderBufLen, insertRows, recordFrom, - startTime, - /*pSamplePos, */&dataLen); + if (tableSeq == pThreadInfo->start_table_from + + pThreadInfo->ntables) { + // turn to first table + tableSeq = pThreadInfo->start_table_from; + + flagSleep = true; + } + + if ((insert_interval) && flagSleep) { + et = taosGetTimestampMs(); + + if (insert_interval > (et - st) ) { + uint64_t sleepTime = insert_interval - (et -st); + performancePrint("%s() LN%d sleep: %"PRId64" ms for insert interval\n", + __func__, __LINE__, sleepTime); + taosMsleep(sleepTime); // ms + sleepTimeTotal += insert_interval; + } + } + } + if (percentComplete < 100) + printf("[%d]:%d%%\n", pThreadInfo->threadID, percentComplete); + +free_of_interlace_stmt: + printStatPerThread(pThreadInfo); + return NULL; } +#else +// stmt sync write interlace data +static void* syncWriteInterlaceStmt(threadInfo *pThreadInfo, uint32_t interlaceRows) { + debugPrint("[%d] %s() LN%d: ### stmt interlace write\n", + pThreadInfo->threadID, __func__, __LINE__); -static void printStatPerThread(threadInfo *pThreadInfo) -{ - fprintf(stderr, "====thread[%d] completed total inserted rows: %"PRIu64 ", total affected rows: %"PRIu64". %.2f records/second====\n", - pThreadInfo->threadID, - pThreadInfo->totalInsertRows, - pThreadInfo->totalAffectedRows, - (pThreadInfo->totalDelay)? - (double)(pThreadInfo->totalAffectedRows/((double)pThreadInfo->totalDelay/1000000.0)): - FLT_MAX); + int64_t insertRows; + uint64_t maxSqlLen; + int64_t timeStampStep; + uint64_t insert_interval; + + SSuperTable* stbInfo = pThreadInfo->stbInfo; + + if (stbInfo) { + insertRows = stbInfo->insertRows; + maxSqlLen = stbInfo->maxSqlLen; + timeStampStep = stbInfo->timeStampStep; + insert_interval = stbInfo->insertInterval; + } else { + insertRows = g_args.insertRows; + maxSqlLen = g_args.max_sql_len; + timeStampStep = g_args.timestamp_step; + insert_interval = g_args.insert_interval; + } + + debugPrint("[%d] %s() LN%d: start_table_from=%"PRIu64" ntables=%"PRId64" insertRows=%"PRIu64"\n", + pThreadInfo->threadID, __func__, __LINE__, + pThreadInfo->start_table_from, + pThreadInfo->ntables, insertRows); + + uint32_t batchPerTbl = interlaceRows; + uint32_t batchPerTblTimes; + + if (interlaceRows > g_args.reqPerReq) + interlaceRows = g_args.reqPerReq; + + if ((interlaceRows > 0) && (pThreadInfo->ntables > 1)) { + batchPerTblTimes = + g_args.reqPerReq / interlaceRows; + } else { + batchPerTblTimes = 1; + } + + pThreadInfo->totalInsertRows = 0; + pThreadInfo->totalAffectedRows = 0; + + uint64_t st = 0; + uint64_t et = UINT64_MAX; + + uint64_t lastPrintTime = taosGetTimestampMs(); + uint64_t startTs = taosGetTimestampMs(); + uint64_t endTs; + + uint64_t tableSeq = pThreadInfo->start_table_from; + int64_t startTime = pThreadInfo->start_time; + + uint64_t generatedRecPerTbl = 0; + bool flagSleep = true; + uint64_t sleepTimeTotal = 0; + + int percentComplete = 0; + int64_t totalRows = insertRows * pThreadInfo->ntables; + + while(pThreadInfo->totalInsertRows < pThreadInfo->ntables * insertRows) { + if ((flagSleep) && (insert_interval)) { + st = taosGetTimestampMs(); + flagSleep = false; + } + + uint32_t recOfBatch = 0; + + int32_t generated; + for (uint64_t i = 0; i < batchPerTblTimes; i ++) { + char tableName[TSDB_TABLE_NAME_LEN]; + + getTableName(tableName, pThreadInfo, tableSeq); + if (0 == strlen(tableName)) { + errorPrint2("[%d] %s() LN%d, getTableName return null\n", + pThreadInfo->threadID, __func__, __LINE__); + return NULL; + } + + debugPrint("[%d] %s() LN%d, tableName:%s, batch:%d startTime:%"PRId64"\n", + pThreadInfo->threadID, + __func__, __LINE__, + tableName, batchPerTbl, startTime); + if (stbInfo) { + generated = prepareStbStmt( + pThreadInfo, + tableName, + tableSeq, + batchPerTbl, + insertRows, 0, + startTime, + &(pThreadInfo->samplePos)); + } else { + generated = prepareStmtWithoutStb( + pThreadInfo, + tableName, + batchPerTbl, + insertRows, i, + startTime); + } + + debugPrint("[%d] %s() LN%d, generated records is %d\n", + pThreadInfo->threadID, __func__, __LINE__, generated); + if (generated < 0) { + errorPrint2("[%d] %s() LN%d, generated records is %d\n", + pThreadInfo->threadID, __func__, __LINE__, generated); + goto free_of_interlace_stmt; + } else if (generated == 0) { + break; + } + + tableSeq ++; + recOfBatch += batchPerTbl; + + pThreadInfo->totalInsertRows += batchPerTbl; + + verbosePrint("[%d] %s() LN%d batchPerTbl=%d recOfBatch=%d\n", + pThreadInfo->threadID, __func__, __LINE__, + batchPerTbl, recOfBatch); + + if (tableSeq == pThreadInfo->start_table_from + pThreadInfo->ntables) { + // turn to first table + tableSeq = pThreadInfo->start_table_from; + generatedRecPerTbl += batchPerTbl; + + startTime = pThreadInfo->start_time + + generatedRecPerTbl * timeStampStep; + + flagSleep = true; + if (generatedRecPerTbl >= insertRows) + break; + + int64_t remainRows = insertRows - generatedRecPerTbl; + if ((remainRows > 0) && (batchPerTbl > remainRows)) + batchPerTbl = remainRows; + + if (pThreadInfo->ntables * batchPerTbl < g_args.reqPerReq) + break; + } + + verbosePrint("[%d] %s() LN%d generatedRecPerTbl=%"PRId64" insertRows=%"PRId64"\n", + pThreadInfo->threadID, __func__, __LINE__, + generatedRecPerTbl, insertRows); + + if ((g_args.reqPerReq - recOfBatch) < batchPerTbl) + break; + } + + verbosePrint("[%d] %s() LN%d recOfBatch=%d totalInsertRows=%"PRIu64"\n", + pThreadInfo->threadID, __func__, __LINE__, recOfBatch, + pThreadInfo->totalInsertRows); + + startTs = taosGetTimestampUs(); + + if (recOfBatch == 0) { + errorPrint2("[%d] %s() LN%d Failed to insert records of batch %d\n", + pThreadInfo->threadID, __func__, __LINE__, + batchPerTbl); + if (batchPerTbl > 0) { + errorPrint("\tIf the batch is %d, the length of the SQL to insert a row must be less then %"PRId64"\n", + batchPerTbl, maxSqlLen / batchPerTbl); + } + goto free_of_interlace_stmt; + } + int64_t affectedRows = execInsert(pThreadInfo, recOfBatch); + + endTs = taosGetTimestampUs(); + uint64_t delay = endTs - startTs; + performancePrint("%s() LN%d, insert execution time is %10.2f ms\n", + __func__, __LINE__, delay / 1000.0); + verbosePrint("[%d] %s() LN%d affectedRows=%"PRId64"\n", + pThreadInfo->threadID, + __func__, __LINE__, affectedRows); + + if (delay > pThreadInfo->maxDelay) pThreadInfo->maxDelay = delay; + if (delay < pThreadInfo->minDelay) pThreadInfo->minDelay = delay; + pThreadInfo->cntDelay++; + pThreadInfo->totalDelay += delay; + + if (recOfBatch != affectedRows) { + errorPrint2("[%d] %s() LN%d execInsert insert %d, affected rows: %"PRId64"\n\n", + pThreadInfo->threadID, __func__, __LINE__, + recOfBatch, affectedRows); + goto free_of_interlace_stmt; + } + + pThreadInfo->totalAffectedRows += affectedRows; + + int currentPercent = pThreadInfo->totalAffectedRows * 100 / totalRows; + if (currentPercent > percentComplete ) { + printf("[%d]:%d%%\n", pThreadInfo->threadID, currentPercent); + percentComplete = currentPercent; + } + int64_t currentPrintTime = taosGetTimestampMs(); + if (currentPrintTime - lastPrintTime > 30*1000) { + printf("thread[%d] has currently inserted rows: %"PRIu64 ", affected rows: %"PRIu64 "\n", + pThreadInfo->threadID, + pThreadInfo->totalInsertRows, + pThreadInfo->totalAffectedRows); + lastPrintTime = currentPrintTime; + } + + if ((insert_interval) && flagSleep) { + et = taosGetTimestampMs(); + + if (insert_interval > (et - st) ) { + uint64_t sleepTime = insert_interval - (et -st); + performancePrint("%s() LN%d sleep: %"PRId64" ms for insert interval\n", + __func__, __LINE__, sleepTime); + taosMsleep(sleepTime); // ms + sleepTimeTotal += insert_interval; + } + } + } + if (percentComplete < 100) + printf("[%d]:%d%%\n", pThreadInfo->threadID, percentComplete); + +free_of_interlace_stmt: + printStatPerThread(pThreadInfo); + return NULL; } +#endif + // sync write interlace data -static void* syncWriteInterlace(threadInfo *pThreadInfo) { +static void* syncWriteInterlace(threadInfo *pThreadInfo, uint32_t interlaceRows) { debugPrint("[%d] %s() LN%d: ### interlace write\n", pThreadInfo->threadID, __func__, __LINE__); int64_t insertRows; - uint32_t interlaceRows; uint64_t maxSqlLen; - int64_t nTimeStampStep; + int64_t timeStampStep; uint64_t insert_interval; SSuperTable* stbInfo = pThreadInfo->stbInfo; if (stbInfo) { insertRows = stbInfo->insertRows; - - if ((stbInfo->interlaceRows == 0) - && (g_args.interlace_rows > 0)) { - interlaceRows = g_args.interlace_rows; - } else { - interlaceRows = stbInfo->interlaceRows; - } maxSqlLen = stbInfo->maxSqlLen; - nTimeStampStep = stbInfo->timeStampStep; + timeStampStep = stbInfo->timeStampStep; insert_interval = stbInfo->insertInterval; } else { - insertRows = g_args.num_of_DPT; - interlaceRows = g_args.interlace_rows; + insertRows = g_args.insertRows; maxSqlLen = g_args.max_sql_len; - nTimeStampStep = g_args.timestamp_step; + timeStampStep = g_args.timestamp_step; insert_interval = g_args.insert_interval; } @@ -7452,23 +8926,35 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) { pThreadInfo->threadID, __func__, __LINE__, pThreadInfo->start_table_from, pThreadInfo->ntables, insertRows); - - if (interlaceRows > insertRows) - interlaceRows = insertRows; - - if (interlaceRows > g_args.num_of_RPR) - interlaceRows = g_args.num_of_RPR; +#if 1 + if (interlaceRows > g_args.reqPerReq) + interlaceRows = g_args.reqPerReq; uint32_t batchPerTbl = interlaceRows; uint32_t batchPerTblTimes; if ((interlaceRows > 0) && (pThreadInfo->ntables > 1)) { batchPerTblTimes = - g_args.num_of_RPR / interlaceRows; + g_args.reqPerReq / interlaceRows; } else { batchPerTblTimes = 1; } +#else + uint32_t batchPerTbl; + if (interlaceRows > g_args.reqPerReq) + batchPerTbl = g_args.reqPerReq; + else + batchPerTbl = interlaceRows; + + uint32_t batchPerTblTimes; + if ((interlaceRows > 0) && (pThreadInfo->ntables > 1)) { + batchPerTblTimes = + interlaceRows / batchPerTbl; + } else { + batchPerTblTimes = 1; + } +#endif pThreadInfo->buffer = calloc(maxSqlLen, 1); if (NULL == pThreadInfo->buffer) { errorPrint2( "%s() LN%d, Failed to alloc %"PRIu64" Bytes, reason:%s\n", @@ -7501,6 +8987,7 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) { st = taosGetTimestampMs(); flagSleep = false; } + // generate data memset(pThreadInfo->buffer, 0, maxSqlLen); uint64_t remainderBufLen = maxSqlLen; @@ -7514,6 +9001,7 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) { uint32_t recOfBatch = 0; + int32_t generated; for (uint64_t i = 0; i < batchPerTblTimes; i ++) { char tableName[TSDB_TABLE_NAME_LEN]; @@ -7526,50 +9014,25 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) { } uint64_t oldRemainderLen = remainderBufLen; - - int32_t generated; - if (stbInfo) { - if (stbInfo->iface == STMT_IFACE) { - generated = prepareStbStmtWithSample( - pThreadInfo, - tableName, - tableSeq, - batchPerTbl, - insertRows, 0, - startTime, - &(pThreadInfo->samplePos)); - } else { - generated = generateStbInterlaceData( - pThreadInfo, - tableName, batchPerTbl, i, - batchPerTblTimes, - tableSeq, - pstr, - insertRows, - startTime, - &remainderBufLen); - } + + if (stbInfo) { + generated = generateStbInterlaceData( + pThreadInfo, + tableName, batchPerTbl, i, + batchPerTblTimes, + tableSeq, + pstr, + insertRows, + startTime, + &remainderBufLen); } else { - if (g_args.iface == STMT_IFACE) { - debugPrint("[%d] %s() LN%d, tableName:%s, batch:%d startTime:%"PRId64"\n", - pThreadInfo->threadID, - __func__, __LINE__, - tableName, batchPerTbl, startTime); - generated = prepareStmtWithoutStb( - pThreadInfo, - tableName, - batchPerTbl, - insertRows, i, - startTime); - } else { - generated = generateInterlaceDataWithoutStb( - tableName, batchPerTbl, - tableSeq, - pThreadInfo->db_name, pstr, - insertRows, - startTime, - &remainderBufLen); - } + generated = generateInterlaceDataWithoutStb( + tableName, batchPerTbl, + tableSeq, + pThreadInfo->db_name, pstr, + insertRows, + startTime, + &remainderBufLen); } debugPrint("[%d] %s() LN%d, generated records is %d\n", @@ -7598,7 +9061,7 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) { generatedRecPerTbl += batchPerTbl; startTime = pThreadInfo->start_time - + generatedRecPerTbl * nTimeStampStep; + + generatedRecPerTbl * timeStampStep; flagSleep = true; if (generatedRecPerTbl >= insertRows) @@ -7608,7 +9071,7 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) { if ((remainRows > 0) && (batchPerTbl > remainRows)) batchPerTbl = remainRows; - if (pThreadInfo->ntables * batchPerTbl < g_args.num_of_RPR) + if (pThreadInfo->ntables * batchPerTbl < g_args.reqPerReq) break; } @@ -7616,7 +9079,7 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) { pThreadInfo->threadID, __func__, __LINE__, generatedRecPerTbl, insertRows); - if ((g_args.num_of_RPR - recOfBatch) < batchPerTbl) + if ((g_args.reqPerReq - recOfBatch) < batchPerTbl) break; } @@ -7699,6 +9162,144 @@ free_of_interlace: return NULL; } +static void* syncWriteProgressiveStmt(threadInfo *pThreadInfo) { + debugPrint("%s() LN%d: ### stmt progressive write\n", __func__, __LINE__); + + SSuperTable* stbInfo = pThreadInfo->stbInfo; + int64_t timeStampStep = + stbInfo?stbInfo->timeStampStep:g_args.timestamp_step; + int64_t insertRows = + (stbInfo)?stbInfo->insertRows:g_args.insertRows; + verbosePrint("%s() LN%d insertRows=%"PRId64"\n", + __func__, __LINE__, insertRows); + + uint64_t lastPrintTime = taosGetTimestampMs(); + uint64_t startTs = taosGetTimestampMs(); + uint64_t endTs; + + pThreadInfo->totalInsertRows = 0; + pThreadInfo->totalAffectedRows = 0; + + pThreadInfo->samplePos = 0; + + int percentComplete = 0; + int64_t totalRows = insertRows * pThreadInfo->ntables; + + for (uint64_t tableSeq = pThreadInfo->start_table_from; + tableSeq <= pThreadInfo->end_table_to; + tableSeq ++) { + int64_t start_time = pThreadInfo->start_time; + + for (uint64_t i = 0; i < insertRows;) { + char tableName[TSDB_TABLE_NAME_LEN]; + getTableName(tableName, pThreadInfo, tableSeq); + verbosePrint("%s() LN%d: tid=%d seq=%"PRId64" tableName=%s\n", + __func__, __LINE__, + pThreadInfo->threadID, tableSeq, tableName); + if (0 == strlen(tableName)) { + errorPrint2("[%d] %s() LN%d, getTableName return null\n", + pThreadInfo->threadID, __func__, __LINE__); + return NULL; + } + + // measure prepare + insert + startTs = taosGetTimestampUs(); + + int32_t generated; + if (stbInfo) { + generated = prepareStbStmt( + pThreadInfo, + tableName, + tableSeq, + (g_args.reqPerReq>stbInfo->insertRows)? + stbInfo->insertRows: + g_args.reqPerReq, + insertRows, i, start_time, + &(pThreadInfo->samplePos)); + } else { + generated = prepareStmtWithoutStb( + pThreadInfo, + tableName, + g_args.reqPerReq, + insertRows, i, + start_time); + } + + verbosePrint("[%d] %s() LN%d generated=%d\n", + pThreadInfo->threadID, + __func__, __LINE__, generated); + + if (generated > 0) + i += generated; + else + goto free_of_stmt_progressive; + + start_time += generated * timeStampStep; + pThreadInfo->totalInsertRows += generated; + + // only measure insert + // startTs = taosGetTimestampUs(); + + int32_t affectedRows = execInsert(pThreadInfo, generated); + + endTs = taosGetTimestampUs(); + uint64_t delay = endTs - startTs; + performancePrint("%s() LN%d, insert execution time is %10.f ms\n", + __func__, __LINE__, delay/1000.0); + verbosePrint("[%d] %s() LN%d affectedRows=%d\n", + pThreadInfo->threadID, + __func__, __LINE__, affectedRows); + + if (delay > pThreadInfo->maxDelay) pThreadInfo->maxDelay = delay; + if (delay < pThreadInfo->minDelay) pThreadInfo->minDelay = delay; + pThreadInfo->cntDelay++; + pThreadInfo->totalDelay += delay; + + if (affectedRows < 0) { + errorPrint2("%s() LN%d, affected rows: %d\n", + __func__, __LINE__, affectedRows); + goto free_of_stmt_progressive; + } + + pThreadInfo->totalAffectedRows += affectedRows; + + int currentPercent = pThreadInfo->totalAffectedRows * 100 / totalRows; + if (currentPercent > percentComplete ) { + printf("[%d]:%d%%\n", pThreadInfo->threadID, currentPercent); + percentComplete = currentPercent; + } + int64_t currentPrintTime = taosGetTimestampMs(); + if (currentPrintTime - lastPrintTime > 30*1000) { + printf("thread[%d] has currently inserted rows: %"PRId64 ", affected rows: %"PRId64 "\n", + pThreadInfo->threadID, + pThreadInfo->totalInsertRows, + pThreadInfo->totalAffectedRows); + lastPrintTime = currentPrintTime; + } + + if (i >= insertRows) + break; + } // insertRows + + if ((g_args.verbose_print) && + (tableSeq == pThreadInfo->ntables - 1) && (stbInfo) + && (0 == strncasecmp( + stbInfo->dataSource, + "sample", strlen("sample")))) { + verbosePrint("%s() LN%d samplePos=%"PRId64"\n", + __func__, __LINE__, pThreadInfo->samplePos); + } + } // tableSeq + + if (percentComplete < 100) { + printf("[%d]:%d%%\n", pThreadInfo->threadID, percentComplete); + } + +free_of_stmt_progressive: + tmfree(pThreadInfo->buffer); + printStatPerThread(pThreadInfo); + return NULL; +} // sync insertion progressive data static void* syncWriteProgressive(threadInfo *pThreadInfo) { debugPrint("%s() LN%d: ### progressive write\n", __func__, __LINE__); @@ -7708,7 +9309,7 @@ static void* syncWriteProgressive(threadInfo *pThreadInfo) { int64_t timeStampStep = stbInfo?stbInfo->timeStampStep:g_args.timestamp_step; int64_t insertRows = - (stbInfo)?stbInfo->insertRows:g_args.num_of_DPT; + (stbInfo)?stbInfo->insertRows:g_args.insertRows; verbosePrint("%s() LN%d insertRows=%"PRId64"\n", __func__, __LINE__, insertRows); @@ -7765,11 +9366,13 @@ static void* syncWriteProgressive(threadInfo *pThreadInfo) { int32_t generated; if (stbInfo) { if (stbInfo->iface == STMT_IFACE) { - generated = prepareStbStmtWithSample( + generated = prepareStbStmt( pThreadInfo, tableName, tableSeq, - g_args.num_of_RPR, + (g_args.reqPerReq>stbInfo->insertRows)? + stbInfo->insertRows: + g_args.reqPerReq, insertRows, i, start_time, &(pThreadInfo->samplePos)); } else { @@ -7786,7 +9389,7 @@ static void* syncWriteProgressive(threadInfo *pThreadInfo) { generated = prepareStmtWithoutStb( pThreadInfo, tableName, - g_args.num_of_RPR, + g_args.reqPerReq, insertRows, i, start_time); } else { @@ -7854,7 +9457,7 @@ static void* syncWriteProgressive(threadInfo *pThreadInfo) { if (i >= insertRows) break; - } // num_of_DPT + } // insertRows if ((g_args.verbose_print) && (tableSeq == pThreadInfo->ntables - 1) && (stbInfo) @@ -7865,8 +9468,10 @@ static void* syncWriteProgressive(threadInfo *pThreadInfo) { __func__, __LINE__, pThreadInfo->samplePos); } } // tableSeq - if (percentComplete < 100) + + if (percentComplete < 100) { printf("[%d]:%d%%\n", pThreadInfo->threadID, percentComplete); + } free_of_progressive: tmfree(pThreadInfo->buffer); @@ -7881,26 +9486,40 @@ static void* syncWrite(void *sarg) { setThreadName("syncWrite"); - uint32_t interlaceRows; + uint32_t interlaceRows = 0; if (stbInfo) { - if ((stbInfo->interlaceRows == 0) - && (g_args.interlace_rows > 0)) { - interlaceRows = g_args.interlace_rows; - } else { + if (stbInfo->interlaceRows < stbInfo->insertRows) interlaceRows = stbInfo->interlaceRows; - } } else { - interlaceRows = g_args.interlace_rows; + if (g_args.interlaceRows < g_args.insertRows) + interlaceRows = g_args.interlaceRows; } if (interlaceRows > 0) { // interlace mode - return syncWriteInterlace(pThreadInfo); + if (stbInfo) { + if (STMT_IFACE == stbInfo->iface) { +#if STMT_BIND_PARAM_BATCH == 1 + return syncWriteInterlaceStmtBatch(pThreadInfo, interlaceRows); +#else + return syncWriteInterlaceStmt(pThreadInfo, interlaceRows); +#endif + } else { + return syncWriteInterlace(pThreadInfo, interlaceRows); + } + } } else { - // progressive mode - return syncWriteProgressive(pThreadInfo); + // progressive mode + if (((stbInfo) && (STMT_IFACE == stbInfo->iface)) + || (STMT_IFACE == g_args.iface)) { + return syncWriteProgressiveStmt(pThreadInfo); + } else { + return syncWriteProgressive(pThreadInfo); + } } + + return NULL; } static void callBack(void *param, TAOS_RES *res, int code) { @@ -7919,11 +9538,11 @@ static void callBack(void *param, TAOS_RES *res, int code) { char *buffer = calloc(1, pThreadInfo->stbInfo->maxSqlLen); char data[MAX_DATA_SIZE]; char *pstr = buffer; - pstr += sprintf(pstr, "insert into %s.%s%"PRId64" values", + pstr += sprintf(pstr, "INSERT INTO %s.%s%"PRId64" VALUES", pThreadInfo->db_name, pThreadInfo->tb_prefix, pThreadInfo->start_table_from); // if (pThreadInfo->counter >= pThreadInfo->stbInfo->insertRows) { - if (pThreadInfo->counter >= g_args.num_of_RPR) { + if (pThreadInfo->counter >= g_args.reqPerReq) { pThreadInfo->start_table_from++; pThreadInfo->counter = 0; } @@ -7934,7 +9553,7 @@ static void callBack(void *param, TAOS_RES *res, int code) { return; } - for (int i = 0; i < g_args.num_of_RPR; i++) { + for (int i = 0; i < g_args.reqPerReq; i++) { int rand_num = taosRandom() % 100; if (0 != pThreadInfo->stbInfo->disorderRatio && rand_num < pThreadInfo->stbInfo->disorderRatio) { @@ -8014,81 +9633,6 @@ static int convertHostToServAddr(char *host, uint16_t port, struct sockaddr_in * return 0; } -static int parseSampleFileToStmt( - threadInfo *pThreadInfo, - SSuperTable *stbInfo, uint32_t timePrec) -{ - pThreadInfo->sampleBindArray = - calloc(1, sizeof(char *) * MAX_SAMPLES_ONCE_FROM_FILE); - if (pThreadInfo->sampleBindArray == NULL) { - errorPrint2("%s() LN%d, Failed to allocate %"PRIu64" bind array buffer\n", - __func__, __LINE__, - (uint64_t)sizeof(char *) * MAX_SAMPLES_ONCE_FROM_FILE); - return -1; - } - - for (int i=0; i < MAX_SAMPLES_ONCE_FROM_FILE; i++) { - char *bindArray = - calloc(1, sizeof(TAOS_BIND) * (stbInfo->columnCount + 1)); - if (bindArray == NULL) { - errorPrint2("%s() LN%d, Failed to allocate %d bind params\n", - __func__, __LINE__, (stbInfo->columnCount + 1)); - return -1; - } - - TAOS_BIND *bind; - int cursor = 0; - - for (int c = 0; c < stbInfo->columnCount + 1; c++) { - bind = (TAOS_BIND *)((char *)bindArray + (sizeof(TAOS_BIND) * c)); - - if (c == 0) { - bind->buffer_type = TSDB_DATA_TYPE_TIMESTAMP; - bind->buffer_length = sizeof(int64_t); - bind->buffer = NULL; //bind_ts; - bind->length = &bind->buffer_length; - bind->is_null = NULL; - } else { - char *restStr = stbInfo->sampleDataBuf - + stbInfo->lenOfOneRow * i + cursor; - int lengthOfRest = strlen(restStr); - - int index = 0; - for (index = 0; index < lengthOfRest; index ++) { - if (restStr[index] == ',') { - break; - } - } - - char *bindBuffer = calloc(1, index + 1); - if (bindBuffer == NULL) { - errorPrint2("%s() LN%d, Failed to allocate %d bind buffer\n", - __func__, __LINE__, DOUBLE_BUFF_LEN); - return -1; - } - - strncpy(bindBuffer, restStr, index); - cursor += index + 1; // skip ',' too - - if (-1 == prepareStmtBindArrayByType( - bind, - stbInfo->columns[c-1].dataType, - stbInfo->columns[c-1].dataLen, - timePrec, - bindBuffer)) { - free(bindBuffer); - return -1; - } - free(bindBuffer); - } - } - *((uintptr_t *)(pThreadInfo->sampleBindArray + (sizeof(char *)) * i)) = - (uintptr_t)bindArray; - } - - return 0; -} - static void startMultiThreadInsertData(int threads, char* db_name, char* precision, SSuperTable* stbInfo) { @@ -8106,32 +9650,37 @@ static void startMultiThreadInsertData(int threads, char* db_name, } } - int64_t start_time; + int64_t startTime; if (stbInfo) { if (0 == strncasecmp(stbInfo->startTimestamp, "now", 3)) { - start_time = taosGetTimestamp(timePrec); + startTime = taosGetTimestamp(timePrec); } else { if (TSDB_CODE_SUCCESS != taosParseTime( stbInfo->startTimestamp, - &start_time, + &startTime, strlen(stbInfo->startTimestamp), timePrec, 0)) { ERROR_EXIT("failed to parse time!\n"); } } } else { - start_time = DEFAULT_START_TIME; + startTime = DEFAULT_START_TIME; } - debugPrint("%s() LN%d, start_time= %"PRId64"\n", - __func__, __LINE__, start_time); + debugPrint("%s() LN%d, startTime= %"PRId64"\n", + __func__, __LINE__, startTime); // read sample data from file first + int ret; if (stbInfo) { - if (0 != prepareSampleDataForSTable(stbInfo)) { - errorPrint2("%s() LN%d, prepare sample data for stable failed!\n", - __func__, __LINE__); - exit(EXIT_FAILURE); - } + ret = prepareSampleForStb(stbInfo); + } else { + ret = prepareSampleForNtb(); + } + + if (0 != ret) { + errorPrint2("%s() LN%d, prepare sample data for stable failed!\n", + __func__, __LINE__); + exit(EXIT_FAILURE); } TAOS* taos0 = taos_connect( @@ -8162,6 +9711,12 @@ static void startMultiThreadInsertData(int threads, char* db_name, || ((stbInfo->childTblOffset + stbInfo->childTblLimit) > (stbInfo->childTblCount))) { + + if (stbInfo->childTblCount < stbInfo->childTblOffset) { + printf("WARNING: offset will not be used since the child tables count is less then offset!\n"); + + stbInfo->childTblOffset = 0; + } stbInfo->childTblLimit = stbInfo->childTblCount - stbInfo->childTblOffset; } @@ -8200,12 +9755,13 @@ static void startMultiThreadInsertData(int threads, char* db_name, int64_t childTblCount; getChildNameOfSuperTableWithLimitAndOffset( taos0, - db_name, stbInfo->sTblName, + db_name, stbInfo->stbName, &stbInfo->childTblName, &childTblCount, limit, offset); + ntables = childTblCount; // CBD } else { - ntables = g_args.num_of_tables; + ntables = g_args.ntables; tableFrom = 0; } @@ -8231,16 +9787,34 @@ static void startMultiThreadInsertData(int threads, char* db_name, } pthread_t *pids = calloc(1, threads * sizeof(pthread_t)); - assert(pids != NULL); - threadInfo *infos = calloc(1, threads * sizeof(threadInfo)); + assert(pids != NULL); assert(infos != NULL); - memset(pids, 0, threads * sizeof(pthread_t)); - memset(infos, 0, threads * sizeof(threadInfo)); - char *stmtBuffer = calloc(1, BUFFER_SIZE); assert(stmtBuffer); + +#if STMT_BIND_PARAM_BATCH == 1 + uint32_t interlaceRows = 0; + uint32_t batch; + + if (stbInfo) { + if (stbInfo->interlaceRows < stbInfo->insertRows) + interlaceRows = stbInfo->interlaceRows; + } else { + if (g_args.interlaceRows < g_args.insertRows) + interlaceRows = g_args.interlaceRows; + } + + if (interlaceRows > 0) { + batch = interlaceRows; + } else { + batch = (g_args.reqPerReq>g_args.insertRows)? + g_args.insertRows:g_args.reqPerReq; + } + +#endif + if ((g_args.iface == STMT_IFACE) || ((stbInfo) && (stbInfo->iface == STMT_IFACE))) { @@ -8250,7 +9824,7 @@ static void startMultiThreadInsertData(int threads, char* db_name, && (AUTO_CREATE_SUBTBL == stbInfo->autoCreateTable)) { pstr += sprintf(pstr, "INSERT INTO ? USING %s TAGS(?", - stbInfo->sTblName); + stbInfo->stbName); for (int tag = 0; tag < (stbInfo->tagCount - 1); tag ++ ) { pstr += sprintf(pstr, ",?"); @@ -8260,12 +9834,9 @@ static void startMultiThreadInsertData(int threads, char* db_name, pstr += sprintf(pstr, "INSERT INTO ? VALUES(?"); } - int columnCount; - if (stbInfo) { - columnCount = stbInfo->columnCount; - } else { - columnCount = g_args.num_of_CPR; - } + int columnCount = (stbInfo)? + stbInfo->columnCount: + g_args.columnCount; for (int col = 0; col < columnCount; col ++) { pstr += sprintf(pstr, ",?"); @@ -8273,6 +9844,9 @@ static void startMultiThreadInsertData(int threads, char* db_name, pstr += sprintf(pstr, ")"); debugPrint("%s() LN%d, stmtBuffer: %s", __func__, __LINE__, stmtBuffer); +#if STMT_BIND_PARAM_BATCH == 1 + parseSamplefileToStmtBatch(stbInfo); +#endif } for (int i = 0; i < threads; i++) { @@ -8283,7 +9857,7 @@ static void startMultiThreadInsertData(int threads, char* db_name, pThreadInfo->time_precision = timePrec; pThreadInfo->stbInfo = stbInfo; - pThreadInfo->start_time = start_time; + pThreadInfo->start_time = startTime; pThreadInfo->minDelay = UINT64_MAX; if ((NULL == stbInfo) || @@ -8316,8 +9890,7 @@ static void startMultiThreadInsertData(int threads, char* db_name, exit(EXIT_FAILURE); } - int ret = taos_stmt_prepare(pThreadInfo->stmt, stmtBuffer, 0); - if (ret != 0) { + if (0 != taos_stmt_prepare(pThreadInfo->stmt, stmtBuffer, 0)) { free(pids); free(infos); free(stmtBuffer); @@ -8328,7 +9901,19 @@ static void startMultiThreadInsertData(int threads, char* db_name, pThreadInfo->bind_ts = malloc(sizeof(int64_t)); if (stbInfo) { - parseSampleFileToStmt(pThreadInfo, stbInfo, timePrec); +#if STMT_BIND_PARAM_BATCH == 1 + parseStbSampleToStmtBatchForThread( + pThreadInfo, stbInfo, timePrec, batch); +#else + parseStbSampleToStmt(pThreadInfo, stbInfo, timePrec); +#endif + } else { +#if STMT_BIND_PARAM_BATCH == 1 + parseNtbSampleToStmtBatchForThread( + pThreadInfo, timePrec, batch); +#else + parseNtbSampleToStmt(pThreadInfo, timePrec); +#endif } } } else { @@ -8373,19 +9958,28 @@ static void startMultiThreadInsertData(int threads, char* db_name, for (int i = 0; i < threads; i++) { threadInfo *pThreadInfo = infos + i; + tsem_destroy(&(pThreadInfo->lock_sem)); + taos_close(pThreadInfo->taos); + if (pThreadInfo->stmt) { taos_stmt_close(pThreadInfo->stmt); - tmfree((char *)pThreadInfo->bind_ts); } - tsem_destroy(&(pThreadInfo->lock_sem)); - taos_close(pThreadInfo->taos); + tmfree((char *)pThreadInfo->bind_ts); +#if STMT_BIND_PARAM_BATCH == 1 + tmfree((char *)pThreadInfo->bind_ts_array); + tmfree(pThreadInfo->bindParams); + tmfree(pThreadInfo->is_null); +#else if (pThreadInfo->sampleBindArray) { - for (int k = 0; k < MAX_SAMPLES_ONCE_FROM_FILE; k++) { + for (int k = 0; k < MAX_SAMPLES; k++) { uintptr_t *tmp = (uintptr_t *)(*(uintptr_t *)( pThreadInfo->sampleBindArray + sizeof(uintptr_t *) * k)); - for (int c = 1; c < pThreadInfo->stbInfo->columnCount + 1; c++) { + int columnCount = (pThreadInfo->stbInfo)? + pThreadInfo->stbInfo->columnCount: + g_args.columnCount; + for (int c = 1; c < columnCount + 1; c++) { TAOS_BIND *bind = (TAOS_BIND *)((char *)tmp + (sizeof(TAOS_BIND) * c)); if (bind) tmfree(bind->buffer); @@ -8394,6 +9988,7 @@ static void startMultiThreadInsertData(int threads, char* db_name, } tmfree(pThreadInfo->sampleBindArray); } +#endif debugPrint("%s() LN%d, [%d] totalInsert=%"PRIu64" totalAffected=%"PRIu64"\n", __func__, __LINE__, @@ -8412,7 +10007,6 @@ static void startMultiThreadInsertData(int threads, char* db_name, if (pThreadInfo->maxDelay > maxDelay) maxDelay = pThreadInfo->maxDelay; if (pThreadInfo->minDelay < minDelay) minDelay = pThreadInfo->minDelay; } - cntDelay -= 1; if (cntDelay == 0) cntDelay = 1; avgDelay = (double)totalDelay / cntDelay; @@ -8427,7 +10021,7 @@ static void startMultiThreadInsertData(int threads, char* db_name, fprintf(stderr, "Spent %.4f seconds to insert rows: %"PRIu64", affected rows: %"PRIu64" with %d thread(s) into %s.%s. %.2f records/second\n\n", tInMs, stbInfo->totalInsertRows, stbInfo->totalAffectedRows, - threads, db_name, stbInfo->sTblName, + threads, db_name, stbInfo->stbName, (double)(stbInfo->totalInsertRows/tInMs)); if (g_fpOfInsertResult) { @@ -8435,7 +10029,7 @@ static void startMultiThreadInsertData(int threads, char* db_name, "Spent %.4f seconds to insert rows: %"PRIu64", affected rows: %"PRIu64" with %d thread(s) into %s.%s. %.2f records/second\n\n", tInMs, stbInfo->totalInsertRows, stbInfo->totalAffectedRows, - threads, db_name, stbInfo->sTblName, + threads, db_name, stbInfo->stbName, (double)(stbInfo->totalInsertRows/tInMs)); } } else { @@ -8479,7 +10073,7 @@ static void *readTable(void *sarg) { char *command = calloc(1, BUFFER_SIZE); assert(command); - uint64_t sTime = pThreadInfo->start_time; + uint64_t startTime = pThreadInfo->start_time; char *tb_prefix = pThreadInfo->tb_prefix; FILE *fp = fopen(pThreadInfo->filePath, "a"); if (NULL == fp) { @@ -8488,16 +10082,16 @@ static void *readTable(void *sarg) { return NULL; } - int64_t num_of_DPT; + int64_t insertRows; /* if (pThreadInfo->stbInfo) { - num_of_DPT = pThreadInfo->stbInfo->insertRows; // nrecords_per_table; + insertRows = pThreadInfo->stbInfo->insertRows; // nrecords_per_table; } else { */ - num_of_DPT = g_args.num_of_DPT; + insertRows = g_args.insertRows; // } - int64_t num_of_tables = pThreadInfo->ntables; // rinfo->end_table_to - rinfo->start_table_from + 1; - int64_t totalData = num_of_DPT * num_of_tables; + int64_t ntables = pThreadInfo->ntables; // pThreadInfo->end_table_to - pThreadInfo->start_table_from + 1; + int64_t totalData = insertRows * ntables; bool do_aggreFunc = g_Dbs.do_aggreFunc; int n = do_aggreFunc ? (sizeof(g_aggreFunc) / sizeof(g_aggreFunc[0])) : 2; @@ -8510,9 +10104,9 @@ static void *readTable(void *sarg) { for (int j = 0; j < n; j++) { double totalT = 0; uint64_t count = 0; - for (int64_t i = 0; i < num_of_tables; i++) { - sprintf(command, "select %s from %s%"PRId64" where ts>= %" PRIu64, - g_aggreFunc[j], tb_prefix, i, sTime); + for (int64_t i = 0; i < ntables; i++) { + sprintf(command, "SELECT %s FROM %s%"PRId64" WHERE ts>= %" PRIu64, + g_aggreFunc[j], tb_prefix, i, startTime); double t = taosGetTimestampMs(); TAOS_RES *pSql = taos_query(taos, command); @@ -8539,7 +10133,7 @@ static void *readTable(void *sarg) { fprintf(fp, "|%10s | %"PRId64" | %12.2f | %10.2f |\n", g_aggreFunc[j][0] == '*' ? " * " : g_aggreFunc[j], totalData, - (double)(num_of_tables * num_of_DPT) / totalT, totalT * 1000); + (double)(ntables * insertRows) / totalT, totalT * 1000); printf("select %10s took %.6f second(s)\n", g_aggreFunc[j], totalT * 1000); } fprintf(fp, "\n"); @@ -8564,9 +10158,9 @@ static void *readMetric(void *sarg) { return NULL; } - int64_t num_of_DPT = pThreadInfo->stbInfo->insertRows; - int64_t num_of_tables = pThreadInfo->ntables; // rinfo->end_table_to - rinfo->start_table_from + 1; - int64_t totalData = num_of_DPT * num_of_tables; + int64_t insertRows = pThreadInfo->stbInfo->insertRows; + int64_t ntables = pThreadInfo->ntables; // pThreadInfo->end_table_to - pThreadInfo->start_table_from + 1; + int64_t totalData = insertRows * ntables; bool do_aggreFunc = g_Dbs.do_aggreFunc; int n = do_aggreFunc ? (sizeof(g_aggreFunc) / sizeof(g_aggreFunc[0])) : 2; @@ -8580,7 +10174,7 @@ static void *readMetric(void *sarg) { char condition[COND_BUF_LEN] = "\0"; char tempS[64] = "\0"; - int64_t m = 10 < num_of_tables ? 10 : num_of_tables; + int64_t m = 10 < ntables ? 10 : ntables; for (int64_t i = 1; i <= m; i++) { if (i == 1) { @@ -8590,7 +10184,7 @@ static void *readMetric(void *sarg) { } strncat(condition, tempS, COND_BUF_LEN - 1); - sprintf(command, "select %s from meters where %s", g_aggreFunc[j], condition); + sprintf(command, "SELECT %s FROM meters WHERE %s", g_aggreFunc[j], condition); printf("Where condition: %s\n", condition); fprintf(fp, "%s\n", command); @@ -8615,7 +10209,7 @@ static void *readMetric(void *sarg) { t = taosGetTimestampMs() - t; fprintf(fp, "| Speed: %12.2f(per s) | Latency: %.4f(ms) |\n", - num_of_tables * num_of_DPT / (t * 1000.0), t); + ntables * insertRows / (t * 1000.0), t); printf("select %10s took %.6f second(s)\n\n", g_aggreFunc[j], t * 1000.0); taos_free_result(pSql); @@ -8671,7 +10265,7 @@ static int insertTestProcess() { } free(cmdBuffer); - // pretreatement + // pretreatment if (prepareSampleData() != 0) { if (g_fpOfInsertResult) fclose(g_fpOfInsertResult); @@ -8948,7 +10542,7 @@ static int queryTestProcess() { if (0 != g_queryInfo.superQueryInfo.sqlCount) { getAllChildNameOfSuperTable(taos, g_queryInfo.dbName, - g_queryInfo.superQueryInfo.sTblName, + g_queryInfo.superQueryInfo.stbName, &g_queryInfo.superQueryInfo.childTblName, &g_queryInfo.superQueryInfo.childTblCount); } @@ -9004,7 +10598,7 @@ static int queryTestProcess() { } } - pThreadInfo->taos = NULL;// TODO: workaround to use separate taos connection; + pThreadInfo->taos = NULL;// workaround to use separate taos connection; pthread_create(pids + seq, NULL, specifiedTableQuery, pThreadInfo); @@ -9054,7 +10648,7 @@ static int queryTestProcess() { pThreadInfo->ntables = iend_table_to = i < b ? tableFrom + a : tableFrom + a - 1; tableFrom = pThreadInfo->end_table_to + 1; - pThreadInfo->taos = NULL; // TODO: workaround to use separate taos connection; + pThreadInfo->taos = NULL; // workaround to use separate taos connection; pthread_create(pidsOfSub + i, NULL, superTableQuery, pThreadInfo); } @@ -9081,7 +10675,7 @@ static int queryTestProcess() { tmfree((char*)pidsOfSub); tmfree((char*)infosOfSub); - // taos_close(taos);// TODO: workaround to use separate taos connection; + // taos_close(taos);// workaround to use separate taos connection; uint64_t endTs = taosGetTimestampMs(); uint64_t totalQueried = g_queryInfo.specifiedQueryInfo.totalQueried + @@ -9103,7 +10697,7 @@ static void stable_sub_callback( if (param) fetchResult(res, (threadInfo *)param); - // tao_unscribe() will free result. + // tao_unsubscribe() will free result. } static void specified_sub_callback( @@ -9116,7 +10710,7 @@ static void specified_sub_callback( if (param) fetchResult(res, (threadInfo *)param); - // tao_unscribe() will free result. + // tao_unsubscribe() will free result. } static TAOS_SUB* subscribeImpl( @@ -9441,12 +11035,12 @@ static int subscribeTestProcess() { if (0 != g_queryInfo.superQueryInfo.sqlCount) { getAllChildNameOfSuperTable(taos, g_queryInfo.dbName, - g_queryInfo.superQueryInfo.sTblName, + g_queryInfo.superQueryInfo.stbName, &g_queryInfo.superQueryInfo.childTblName, &g_queryInfo.superQueryInfo.childTblCount); } - taos_close(taos); // TODO: workaround to use separate taos connection; + taos_close(taos); // workaround to use separate taos connection; pthread_t *pids = NULL; threadInfo *infos = NULL; @@ -9456,12 +11050,12 @@ static int subscribeTestProcess() { //==== create threads for query for specified table if (g_queryInfo.specifiedQueryInfo.sqlCount <= 0) { - debugPrint("%s() LN%d, sepcified query sqlCount %d.\n", + debugPrint("%s() LN%d, specified query sqlCount %d.\n", __func__, __LINE__, g_queryInfo.specifiedQueryInfo.sqlCount); } else { if (g_queryInfo.specifiedQueryInfo.concurrent <= 0) { - errorPrint2("%s() LN%d, sepcified query sqlCount %d.\n", + errorPrint2("%s() LN%d, specified query sqlCount %d.\n", __func__, __LINE__, g_queryInfo.specifiedQueryInfo.sqlCount); exit(EXIT_FAILURE); @@ -9488,7 +11082,7 @@ static int subscribeTestProcess() { threadInfo *pThreadInfo = infos + seq; pThreadInfo->threadID = seq; pThreadInfo->querySeq = i; - pThreadInfo->taos = NULL; // TODO: workaround to use separate taos connection; + pThreadInfo->taos = NULL; // workaround to use separate taos connection; pthread_create(pids + seq, NULL, specifiedSubscribe, pThreadInfo); } } @@ -9545,7 +11139,7 @@ static int subscribeTestProcess() { pThreadInfo->ntables = jend_table_to = jend_table_to + 1; - pThreadInfo->taos = NULL; // TODO: workaround to use separate taos connection; + pThreadInfo->taos = NULL; // workaround to use separate taos connection; pthread_create(pidsOfStable + seq, NULL, superSubscribe, pThreadInfo); } @@ -9618,8 +11212,8 @@ static void setParaFromArg() { g_Dbs.port = g_args.port; } - g_Dbs.threadCount = g_args.num_of_threads; - g_Dbs.threadCountForCreateTbl = g_args.num_of_threads; + g_Dbs.threadCount = g_args.nthreads; + g_Dbs.threadCountForCreateTbl = g_args.nthreads; g_Dbs.dbCount = 1; g_Dbs.db[0].drop = true; @@ -9636,22 +11230,23 @@ static void setParaFromArg() { g_Dbs.do_aggreFunc = true; char dataString[TSDB_MAX_BYTES_PER_ROW]; - char **data_type = g_args.datatype; + char *data_type = g_args.data_type; + char **dataType = g_args.dataType; memset(dataString, 0, TSDB_MAX_BYTES_PER_ROW); - if (strcasecmp(data_type[0], "BINARY") == 0 - || strcasecmp(data_type[0], "BOOL") == 0 - || strcasecmp(data_type[0], "NCHAR") == 0 ) { + if ((data_type[0] == TSDB_DATA_TYPE_BINARY) + || (data_type[0] == TSDB_DATA_TYPE_BOOL) + || (data_type[0] == TSDB_DATA_TYPE_NCHAR)) { g_Dbs.do_aggreFunc = false; } if (g_args.use_metric) { g_Dbs.db[0].superTblCount = 1; - tstrncpy(g_Dbs.db[0].superTbls[0].sTblName, "meters", TSDB_TABLE_NAME_LEN); - g_Dbs.db[0].superTbls[0].childTblCount = g_args.num_of_tables; - g_Dbs.threadCount = g_args.num_of_threads; - g_Dbs.threadCountForCreateTbl = g_args.num_of_threads; + tstrncpy(g_Dbs.db[0].superTbls[0].stbName, "meters", TSDB_TABLE_NAME_LEN); + g_Dbs.db[0].superTbls[0].childTblCount = g_args.ntables; + g_Dbs.threadCount = g_args.nthreads; + g_Dbs.threadCountForCreateTbl = g_args.nthreads; g_Dbs.asyncMode = g_args.async_mode; g_Dbs.db[0].superTbls[0].autoCreateTable = PRE_CREATE_SUBTBL; @@ -9671,26 +11266,28 @@ static void setParaFromArg() { "2017-07-14 10:40:00.000", MAX_TB_NAME_SIZE); g_Dbs.db[0].superTbls[0].timeStampStep = g_args.timestamp_step; - g_Dbs.db[0].superTbls[0].insertRows = g_args.num_of_DPT; + g_Dbs.db[0].superTbls[0].insertRows = g_args.insertRows; g_Dbs.db[0].superTbls[0].maxSqlLen = g_args.max_sql_len; g_Dbs.db[0].superTbls[0].columnCount = 0; for (int i = 0; i < MAX_NUM_COLUMNS; i++) { - if (data_type[i] == NULL) { + if (data_type[i] == TSDB_DATA_TYPE_NULL) { break; } + g_Dbs.db[0].superTbls[0].columns[i].data_type = data_type[i]; tstrncpy(g_Dbs.db[0].superTbls[0].columns[i].dataType, - data_type[i], min(DATATYPE_BUFF_LEN, strlen(data_type[i]) + 1)); + dataType[i], min(DATATYPE_BUFF_LEN, strlen(dataType[i]) + 1)); g_Dbs.db[0].superTbls[0].columns[i].dataLen = g_args.binwidth; g_Dbs.db[0].superTbls[0].columnCount++; } - if (g_Dbs.db[0].superTbls[0].columnCount > g_args.num_of_CPR) { - g_Dbs.db[0].superTbls[0].columnCount = g_args.num_of_CPR; + if (g_Dbs.db[0].superTbls[0].columnCount > g_args.columnCount) { + g_Dbs.db[0].superTbls[0].columnCount = g_args.columnCount; } else { for (int i = g_Dbs.db[0].superTbls[0].columnCount; - i < g_args.num_of_CPR; i++) { + i < g_args.columnCount; i++) { + g_Dbs.db[0].superTbls[0].columns[i].data_type = TSDB_DATA_TYPE_INT; tstrncpy(g_Dbs.db[0].superTbls[0].columns[i].dataType, "INT", min(DATATYPE_BUFF_LEN, strlen("INT") + 1)); g_Dbs.db[0].superTbls[0].columns[i].dataLen = 0; @@ -9707,7 +11304,7 @@ static void setParaFromArg() { g_Dbs.db[0].superTbls[0].tags[1].dataLen = g_args.binwidth; g_Dbs.db[0].superTbls[0].tagCount = 2; } else { - g_Dbs.threadCountForCreateTbl = g_args.num_of_threads; + g_Dbs.threadCountForCreateTbl = g_args.nthreads; g_Dbs.db[0].superTbls[0].tagCount = 0; } } @@ -9840,8 +11437,8 @@ static void queryResult() { tstrncpy(pThreadInfo->tb_prefix, g_Dbs.db[0].superTbls[0].childTblPrefix, TBNAME_PREFIX_LEN); } else { - pThreadInfo->ntables = g_args.num_of_tables; - pThreadInfo->end_table_to = g_args.num_of_tables -1; + pThreadInfo->ntables = g_args.ntables; + pThreadInfo->end_table_to = g_args.ntables -1; tstrncpy(pThreadInfo->tb_prefix, g_args.tb_prefix, TSDB_TABLE_NAME_LEN); } diff --git a/src/kit/taosdump/taosdump.c b/src/kit/taosdump/taosdump.c index ae2193a82eb447f0e948abc1757c21cab46ccf34..fe7616fa174f5af707892cf3d251689a60111ed6 100644 --- a/src/kit/taosdump/taosdump.c +++ b/src/kit/taosdump/taosdump.c @@ -1209,14 +1209,14 @@ _dump_db_point: fprintf(fp, "USE %s;\n\n", g_dbInfos[0]->name); - int32_t totalNumOfThread = 1; // 0: all normal talbe into .tables.tmp.0 + int32_t totalNumOfThread = 1; // 0: all normal table into .tables.tmp.0 int normalTblFd = -1; int32_t retCode; int superTblCnt = 0 ; for (int i = 1; g_args.arg_list[i]; i++) { if (taosGetTableRecordInfo(g_args.arg_list[i], &tableRecordInfo, taos) < 0) { - errorPrint("input the invalide table %s\n", + errorPrint("input the invalid table %s\n", g_args.arg_list[i]); continue; } @@ -1341,11 +1341,10 @@ static int taosGetTableDes( return count; } - // if chidl-table have tag, using select tagName from table to get tagValue + // if child-table have tag, using select tagName from table to get tagValue for (int i = 0 ; i < count; i++) { if (strcmp(stableDes->cols[i].note, "TAG") != 0) continue; - sprintf(sqlstr, "select %s from %s.%s", stableDes->cols[i].field, dbName, table); @@ -2443,7 +2442,7 @@ static int taosGetFilesNum(const char *directoryName, } if (fileNum <= 0) { - errorPrint("directory:%s is empry\n", directoryName); + errorPrint("directory:%s is empty\n", directoryName); exit(-1); } @@ -2620,9 +2619,9 @@ static int taosDumpInOneFile(TAOS* taos, FILE* fp, char* fcharset, memcpy(cmd + cmd_len, line, read_len); cmd[read_len + cmd_len]= '\0'; if (queryDbImpl(taos, cmd)) { - errorPrint("%s() LN%d, error sql: linenu:%d, file:%s\n", + errorPrint("%s() LN%d, error sql: lineno:%d, file:%s\n", __func__, __LINE__, lineNo, fileName); - fprintf(g_fpOfResult, "error sql: linenu:%d, file:%s\n", lineNo, fileName); + fprintf(g_fpOfResult, "error sql: lineno:%d, file:%s\n", lineNo, fileName); } memset(cmd, 0, TSDB_MAX_ALLOWED_SQL_LEN); diff --git a/src/mnode/src/mnodeTable.c b/src/mnode/src/mnodeTable.c index 68529ab8a240c2313ae9417bef9f4112759b0c9f..a6158906a7cc77b57244594fe51881e5df0b68c8 100644 --- a/src/mnode/src/mnodeTable.c +++ b/src/mnode/src/mnodeTable.c @@ -1231,7 +1231,9 @@ static int32_t mnodeAddSuperTableTagCb(SMnodeMsg *pMsg, int32_t code) { SSTableObj *pStable = (SSTableObj *)pMsg->pTable; mLInfo("msg:%p, app:%p stable %s, add tag result:%s, numOfTags:%d", pMsg, pMsg->rpcMsg.ahandle, pStable->info.tableId, tstrerror(code), pStable->numOfTags); - + if (code == TSDB_CODE_SUCCESS) { + code = mnodeGetSuperTableMeta(pMsg); + } return code; } @@ -1287,6 +1289,9 @@ static int32_t mnodeDropSuperTableTagCb(SMnodeMsg *pMsg, int32_t code) { SSTableObj *pStable = (SSTableObj *)pMsg->pTable; mLInfo("msg:%p, app:%p stable %s, drop tag result:%s", pMsg, pMsg->rpcMsg.ahandle, pStable->info.tableId, tstrerror(code)); + if (code == TSDB_CODE_SUCCESS) { + code = mnodeGetSuperTableMeta(pMsg); + } return code; } @@ -1321,6 +1326,9 @@ static int32_t mnodeModifySuperTableTagNameCb(SMnodeMsg *pMsg, int32_t code) { SSTableObj *pStable = (SSTableObj *)pMsg->pTable; mLInfo("msg:%p, app:%p stable %s, modify tag result:%s", pMsg, pMsg->rpcMsg.ahandle, pStable->info.tableId, tstrerror(code)); + if (code == TSDB_CODE_SUCCESS) { + code = mnodeGetSuperTableMeta(pMsg); + } return code; } @@ -1376,6 +1384,9 @@ static int32_t mnodeAddSuperTableColumnCb(SMnodeMsg *pMsg, int32_t code) { SSTableObj *pStable = (SSTableObj *)pMsg->pTable; mLInfo("msg:%p, app:%p stable %s, add column result:%s", pMsg, pMsg->rpcMsg.ahandle, pStable->info.tableId, tstrerror(code)); + if (code == TSDB_CODE_SUCCESS) { + code = mnodeGetSuperTableMeta(pMsg); + } return code; } @@ -1444,6 +1455,9 @@ static int32_t mnodeDropSuperTableColumnCb(SMnodeMsg *pMsg, int32_t code) { SSTableObj *pStable = (SSTableObj *)pMsg->pTable; mLInfo("msg:%p, app:%p stable %s, delete column result:%s", pMsg, pMsg->rpcMsg.ahandle, pStable->info.tableId, tstrerror(code)); + if (code == TSDB_CODE_SUCCESS) { + code = mnodeGetSuperTableMeta(pMsg); + } return code; } @@ -1489,6 +1503,9 @@ static int32_t mnodeChangeSuperTableColumnCb(SMnodeMsg *pMsg, int32_t code) { SSTableObj *pStable = (SSTableObj *)pMsg->pTable; mLInfo("msg:%p, app:%p stable %s, change column result:%s", pMsg, pMsg->rpcMsg.ahandle, pStable->info.tableId, tstrerror(code)); + if (code == TSDB_CODE_SUCCESS) { + code = mnodeGetSuperTableMeta(pMsg); + } return code; } diff --git a/src/os/src/darwin/darwinSystem.c b/src/os/src/darwin/darwinSystem.c index 6f296c9fef4f8db5249e90892214c3b928873939..f152e36d7b9f41e5ddf97db3f5d0c4cf2d714632 100644 --- a/src/os/src/darwin/darwinSystem.c +++ b/src/os/src/darwin/darwinSystem.c @@ -31,7 +31,6 @@ void taosCloseDll(void *handle) { int taosSetConsoleEcho(bool on) { -#if 0 #define ECHOFLAGS (ECHO | ECHOE | ECHOK | ECHONL) int err; struct termios term; @@ -52,7 +51,6 @@ int taosSetConsoleEcho(bool on) return -1; } -#endif return 0; } diff --git a/src/plugins/http/inc/httpInt.h b/src/plugins/http/inc/httpInt.h index 99a5b770aa140df0d0c5787091cb8950ae6dd25f..6c567e23bc817957d7f376ef101f8e5ca88559e6 100644 --- a/src/plugins/http/inc/httpInt.h +++ b/src/plugins/http/inc/httpInt.h @@ -147,6 +147,7 @@ typedef struct HttpContext { int32_t state; uint8_t reqType; uint8_t parsed; + uint8_t error; char ipstr[22]; char user[TSDB_USER_LEN]; // parsed from auth token or login message char pass[HTTP_PASSWORD_LEN]; diff --git a/src/plugins/http/inc/httpUtil.h b/src/plugins/http/inc/httpUtil.h index 54c95b6980f8241c3ea6c8e563e0e42c7c737286..21690ebca96d35423e126a9e747d8ce6bb5a43a0 100644 --- a/src/plugins/http/inc/httpUtil.h +++ b/src/plugins/http/inc/httpUtil.h @@ -17,6 +17,7 @@ #define TDENGINE_HTTP_UTIL_H bool httpCheckUsedbSql(char *sql); +bool httpCheckAlterSql(char *sql); void httpTimeToString(int32_t t, char *buf, int32_t buflen); bool httpUrlMatch(HttpContext *pContext, int32_t pos, char *cmp); diff --git a/src/plugins/http/src/httpContext.c b/src/plugins/http/src/httpContext.c index 51adef11b9af3ebb83537024edbb3ba369aaeb03..11945453c56ab7fdd1fc8b0c4f2510bbbdda1a6e 100644 --- a/src/plugins/http/src/httpContext.c +++ b/src/plugins/http/src/httpContext.c @@ -188,11 +188,12 @@ void httpCloseContextByApp(HttpContext *pContext) { pContext->parsed = false; bool keepAlive = true; - if (parser && parser->httpVersion == HTTP_VERSION_10 && parser->keepAlive != HTTP_KEEPALIVE_ENABLE) { + if (pContext->error == true) { + keepAlive = false; + } else if (parser && parser->httpVersion == HTTP_VERSION_10 && parser->keepAlive != HTTP_KEEPALIVE_ENABLE) { keepAlive = false; } else if (parser && parser->httpVersion != HTTP_VERSION_10 && parser->keepAlive == HTTP_KEEPALIVE_DISABLE) { keepAlive = false; - } else { } if (keepAlive) { diff --git a/src/plugins/http/src/httpHandle.c b/src/plugins/http/src/httpHandle.c index d51c774ff269d5790868727941a632d133dd6733..9719d93824b50064ec1cf23677c641428434592c 100644 --- a/src/plugins/http/src/httpHandle.c +++ b/src/plugins/http/src/httpHandle.c @@ -35,6 +35,7 @@ bool httpProcessData(HttpContext* pContext) { if (!httpAlterContextState(pContext, HTTP_CONTEXT_STATE_READY, HTTP_CONTEXT_STATE_HANDLING)) { httpTrace("context:%p, fd:%d, state:%s not in ready state, stop process request", pContext, pContext->fd, httpContextStateStr(pContext->state)); + pContext->error = true; httpCloseContextByApp(pContext); return false; } diff --git a/src/plugins/http/src/httpJson.c b/src/plugins/http/src/httpJson.c index 3c72b795eef69186ef4e6308937678589224c60d..86e0f2f40beffdf7d035ba3329d51bb69c2cf796 100644 --- a/src/plugins/http/src/httpJson.c +++ b/src/plugins/http/src/httpJson.c @@ -272,26 +272,35 @@ void httpJsonTimestamp(JsonBuf* buf, int64_t t, int32_t timePrecision) { switch (timePrecision) { case TSDB_TIME_PRECISION_MILLI: { + mod = ((t) % 1000 + 1000) % 1000; + if (t < 0 && mod != 0) { + t -= 1000; + } quot = t / 1000; fractionLen = 5; format = ".%03" PRId64; - mod = t % 1000; break; } case TSDB_TIME_PRECISION_MICRO: { + mod = ((t) % 1000000 + 1000000) % 1000000; + if (t < 0 && mod != 0) { + t -= 1000000; + } quot = t / 1000000; fractionLen = 8; format = ".%06" PRId64; - mod = t % 1000000; break; } case TSDB_TIME_PRECISION_NANO: { + mod = ((t) % 1000000000 + 1000000000) % 1000000000; + if (t < 0 && mod != 0) { + t -= 1000000000; + } quot = t / 1000000000; fractionLen = 11; format = ".%09" PRId64; - mod = t % 1000000000; break; } @@ -319,26 +328,35 @@ void httpJsonUtcTimestamp(JsonBuf* buf, int64_t t, int32_t timePrecision) { switch (timePrecision) { case TSDB_TIME_PRECISION_MILLI: { + mod = ((t) % 1000 + 1000) % 1000; + if (t < 0 && mod != 0) { + t -= 1000; + } quot = t / 1000; fractionLen = 5; format = ".%03" PRId64; - mod = t % 1000; break; } case TSDB_TIME_PRECISION_MICRO: { + mod = ((t) % 1000000 + 1000000) % 1000000; + if (t < 0 && mod != 0) { + t -= 1000000; + } quot = t / 1000000; fractionLen = 8; format = ".%06" PRId64; - mod = t % 1000000; break; } case TSDB_TIME_PRECISION_NANO: { + mod = ((t) % 1000000000 + 1000000000) % 1000000000; + if (t < 0 && mod != 0) { + t -= 1000000000; + } quot = t / 1000000000; fractionLen = 11; format = ".%09" PRId64; - mod = t % 1000000000; break; } diff --git a/src/plugins/http/src/httpParser.c b/src/plugins/http/src/httpParser.c index 02f21037b8592cc847f02f1b2fbe3c01acd508d8..7066f19769754e78dffeed6a40b672584c0310f1 100644 --- a/src/plugins/http/src/httpParser.c +++ b/src/plugins/http/src/httpParser.c @@ -663,7 +663,7 @@ static int32_t httpParserOnTarget(HttpParser *parser, HTTP_PARSER_STATE state, c HttpContext *pContext = parser->pContext; int32_t ok = 0; do { - if (!isspace(c) && c != '\r' && c != '\n') { + if (!isspace(c)) { if (httpAppendString(&parser->str, &c, 1)) { httpError("context:%p, fd:%d, parser state:%d, char:[%c]%02x, oom", pContext, pContext->fd, state, c, c); ok = -1; diff --git a/src/plugins/http/src/httpResp.c b/src/plugins/http/src/httpResp.c index 79e728dd456fb8a340e50f9d7e9cbd3c409614db..1d05b455cb5c66e4f492140e1f337210da04caef 100644 --- a/src/plugins/http/src/httpResp.c +++ b/src/plugins/http/src/httpResp.c @@ -147,6 +147,8 @@ void httpSendErrorResp(HttpContext *pContext, int32_t errNo) { httpCode = pContext->parser->httpCode; } + pContext->error = true; + char *httpCodeStr = httpGetStatusDesc(httpCode); httpSendErrorRespImp(pContext, httpCode, httpCodeStr, errNo & 0XFFFF, tstrerror(errNo)); } diff --git a/src/plugins/http/src/httpRestJson.c b/src/plugins/http/src/httpRestJson.c index 47f2d4ff5bcc513aafb8ea8f4e2a85db5a35b12a..13596b0e8a4ea4d183cc4bf75917fd08a9dd7290 100644 --- a/src/plugins/http/src/httpRestJson.c +++ b/src/plugins/http/src/httpRestJson.c @@ -16,6 +16,7 @@ #define _DEFAULT_SOURCE #include "os.h" #include "tglobal.h" +#include "tsclient.h" #include "httpLog.h" #include "httpJson.h" #include "httpRestHandle.h" @@ -62,13 +63,21 @@ void restStartSqlJson(HttpContext *pContext, HttpSqlCmd *cmd, TAOS_RES *result) httpJsonItemToken(jsonBuf); httpJsonToken(jsonBuf, JsonArrStt); + SSqlObj *pObj = (SSqlObj *) result; + bool isAlterSql = (pObj->sqlstr == NULL) ? false : httpCheckAlterSql(pObj->sqlstr); + if (num_fields == 0) { httpJsonItemToken(jsonBuf); httpJsonString(jsonBuf, REST_JSON_AFFECT_ROWS, REST_JSON_AFFECT_ROWS_LEN); } else { - for (int32_t i = 0; i < num_fields; ++i) { + if (isAlterSql == true) { httpJsonItemToken(jsonBuf); - httpJsonString(jsonBuf, fields[i].name, (int32_t)strlen(fields[i].name)); + httpJsonString(jsonBuf, REST_JSON_AFFECT_ROWS, REST_JSON_AFFECT_ROWS_LEN); + } else { + for (int32_t i = 0; i < num_fields; ++i) { + httpJsonItemToken(jsonBuf); + httpJsonString(jsonBuf, fields[i].name, (int32_t)strlen(fields[i].name)); + } } } @@ -99,8 +108,14 @@ void restStartSqlJson(HttpContext *pContext, HttpSqlCmd *cmd, TAOS_RES *result) httpJsonItemToken(jsonBuf); httpJsonToken(jsonBuf, JsonArrStt); - httpJsonItemToken(jsonBuf); - httpJsonString(jsonBuf, fields[i].name, (int32_t)strlen(fields[i].name)); + if (isAlterSql == true) { + httpJsonItemToken(jsonBuf); + httpJsonString(jsonBuf, REST_JSON_AFFECT_ROWS, REST_JSON_AFFECT_ROWS_LEN); + } else { + httpJsonItemToken(jsonBuf); + httpJsonString(jsonBuf, fields[i].name, (int32_t)strlen(fields[i].name)); + } + httpJsonItemToken(jsonBuf); httpJsonInt(jsonBuf, fields[i].type); httpJsonItemToken(jsonBuf); diff --git a/src/plugins/http/src/httpServer.c b/src/plugins/http/src/httpServer.c index f02859f165499b0c69b095599dd47890e644c604..13a0835c3960333c6d12aa443025de5fb95d565e 100644 --- a/src/plugins/http/src/httpServer.c +++ b/src/plugins/http/src/httpServer.c @@ -191,8 +191,6 @@ static void httpProcessHttpData(void *param) { if (httpReadData(pContext)) { (*(pThread->processData))(pContext); atomic_fetch_add_32(&pServer->requestNum, 1); - } else { - httpReleaseContext(pContext/*, false*/); } } } @@ -402,13 +400,17 @@ static bool httpReadData(HttpContext *pContext) { } else if (nread < 0) { if (errno == EINTR || errno == EAGAIN || errno == EWOULDBLOCK) { httpDebug("context:%p, fd:%d, read from socket error:%d, wait another event", pContext, pContext->fd, errno); - return false; // later again + continue; // later again } else { httpError("context:%p, fd:%d, read from socket error:%d, close connect", pContext, pContext->fd, errno); + taosCloseSocket(pContext->fd); + httpReleaseContext(pContext/*, false */); return false; } } else { httpError("context:%p, fd:%d, nread:%d, wait another event", pContext, pContext->fd, nread); + taosCloseSocket(pContext->fd); + httpReleaseContext(pContext/*, false */); return false; } } diff --git a/src/plugins/http/src/httpSql.c b/src/plugins/http/src/httpSql.c index 0dd451f72dbd78233ac8f73d552b6815e3a3fab8..602767a6563b3ca3430501c0dbcee65333f1d44b 100644 --- a/src/plugins/http/src/httpSql.c +++ b/src/plugins/http/src/httpSql.c @@ -405,7 +405,6 @@ void httpProcessRequestCb(void *param, TAOS_RES *result, int32_t code) { if (pContext->session == NULL) { httpSendErrorResp(pContext, TSDB_CODE_HTTP_SESSION_FULL); - httpCloseContextByApp(pContext); } else { httpExecCmd(pContext); } diff --git a/src/plugins/http/src/httpUtil.c b/src/plugins/http/src/httpUtil.c index ade50bdad6bf6b0a7a2d43bb354851d90686be49..f30ac7326eef20f4abf5558b288f16f6ee313b42 100644 --- a/src/plugins/http/src/httpUtil.c +++ b/src/plugins/http/src/httpUtil.c @@ -21,6 +21,7 @@ #include "httpResp.h" #include "httpSql.h" #include "httpUtil.h" +#include "ttoken.h" bool httpCheckUsedbSql(char *sql) { if (strstr(sql, "use ") != NULL) { @@ -29,6 +30,17 @@ bool httpCheckUsedbSql(char *sql) { return false; } +bool httpCheckAlterSql(char *sql) { + int32_t index = 0; + + do { + SStrToken t0 = tStrGetToken(sql, &index, false); + if (t0.type != TK_LP) { + return t0.type == TK_ALTER; + } + } while (1); +} + void httpTimeToString(int32_t t, char *buf, int32_t buflen) { memset(buf, 0, (size_t)buflen); char ts[32] = {0}; diff --git a/src/query/inc/qExecutor.h b/src/query/inc/qExecutor.h index 31db6492f69c35904970cc5f48cc4a10c9fecd39..19ca8e7ed8867f32c9625fe66ad4c04d64bc98e7 100644 --- a/src/query/inc/qExecutor.h +++ b/src/query/inc/qExecutor.h @@ -86,11 +86,18 @@ typedef struct SResultRow { char *key; // start key of current result row } SResultRow; +typedef struct SResultRowCell { + uint64_t groupId; + SResultRow *pRow; +} SResultRowCell; + typedef struct SGroupResInfo { int32_t totalGroup; int32_t currentGroup; int32_t index; SArray* pRows; // SArray + bool ordered; + int32_t position; } SGroupResInfo; /** @@ -284,8 +291,9 @@ typedef struct SQueryRuntimeEnv { SDiskbasedResultBuf* pResultBuf; // query result buffer based on blocked-wised disk file SHashObj* pResultRowHashTable; // quick locate the window object for each result SHashObj* pResultRowListSet; // used to check if current ResultRowInfo has ResultRow object or not + SArray* pResultRowArrayList; // The array list that contains the Result rows char* keyBuf; // window key buffer - SResultRowPool* pool; // window result object pool + SResultRowPool* pool; // The window result objects pool, all the resultRow Objects are allocated and managed by this object. char** prevRow; SArray* prevResult; // intermediate result, SArray diff --git a/src/query/inc/sql.y b/src/query/inc/sql.y index 8466b7262ab43e49496b078dd3ca25985fe6605a..336e8620f210351471bddb9c94d56fcaa7f8a0fc 100644 --- a/src/query/inc/sql.y +++ b/src/query/inc/sql.y @@ -11,7 +11,7 @@ %left OR. %left AND. %right NOT. -%left EQ NE ISNULL NOTNULL IS LIKE GLOB BETWEEN IN. +%left EQ NE ISNULL NOTNULL IS LIKE MATCH NMATCH GLOB BETWEEN IN. %left GT GE LT LE. %left BITAND BITOR LSHIFT RSHIFT. %left PLUS MINUS. @@ -751,6 +751,10 @@ expr(A) ::= expr(X) REM expr(Y). {A = tSqlExprCreate(X, Y, TK_REM); } // like expression expr(A) ::= expr(X) LIKE expr(Y). {A = tSqlExprCreate(X, Y, TK_LIKE); } +// match expression +expr(A) ::= expr(X) MATCH expr(Y). {A = tSqlExprCreate(X, Y, TK_MATCH); } +expr(A) ::= expr(X) NMATCH expr(Y). {A = tSqlExprCreate(X, Y, TK_NMATCH); } + //in expression expr(A) ::= expr(X) IN LP exprlist(Y) RP. {A = tSqlExprCreate(X, (tSqlExpr*)Y, TK_IN); } @@ -916,5 +920,5 @@ cmd ::= KILL QUERY INTEGER(X) COLON(Z) INTEGER(Y). {X.n += (Z.n + Y.n); s %fallback ID ABORT AFTER ASC ATTACH BEFORE BEGIN CASCADE CLUSTER CONFLICT COPY DATABASE DEFERRED DELIMITERS DESC DETACH EACH END EXPLAIN FAIL FOR GLOB IGNORE IMMEDIATE INITIALLY INSTEAD - LIKE MATCH KEY OF OFFSET RAISE REPLACE RESTRICT ROW STATEMENT TRIGGER VIEW ALL + LIKE MATCH NMATCH KEY OF OFFSET RAISE REPLACE RESTRICT ROW STATEMENT TRIGGER VIEW ALL NOW IPTOKEN SEMI NONE PREV LINEAR IMPORT TBNAME JOIN STABLE NULL INSERT INTO VALUES. diff --git a/src/query/src/qExecutor.c b/src/query/src/qExecutor.c index 700cf17fd38f69d4f2363174109c3217891a3e26..54a5423219c8822e42c20a1d9a0d392913f8fdef 100644 --- a/src/query/src/qExecutor.c +++ b/src/query/src/qExecutor.c @@ -544,6 +544,8 @@ static SResultRow* doSetResultOutBufByKey(SQueryRuntimeEnv* pRuntimeEnv, SResult // add a new result set for a new group taosHashPut(pRuntimeEnv->pResultRowHashTable, pRuntimeEnv->keyBuf, GET_RES_WINDOW_KEY_LEN(bytes), &pResult, POINTER_BYTES); + SResultRowCell cell = {.groupId = tableGroupId, .pRow = pResult}; + taosArrayPush(pRuntimeEnv->pResultRowArrayList, &cell); } else { pResult = *p1; } @@ -2107,9 +2109,10 @@ static int32_t setupQueryRuntimeEnv(SQueryRuntimeEnv *pRuntimeEnv, int32_t numOf pRuntimeEnv->pQueryAttr = pQueryAttr; pRuntimeEnv->pResultRowHashTable = taosHashInit(numOfTables, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK); - pRuntimeEnv->pResultRowListSet = taosHashInit(numOfTables, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), false, HASH_NO_LOCK); + pRuntimeEnv->pResultRowListSet = taosHashInit(numOfTables * 10, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), false, HASH_NO_LOCK); pRuntimeEnv->keyBuf = malloc(pQueryAttr->maxTableColumnWidth + sizeof(int64_t) + POINTER_BYTES); pRuntimeEnv->pool = initResultRowPool(getResultRowSize(pRuntimeEnv)); + pRuntimeEnv->pResultRowArrayList = taosArrayInit(numOfTables, sizeof(SResultRowCell)); pRuntimeEnv->prevRow = malloc(POINTER_BYTES * pQueryAttr->numOfCols + pQueryAttr->srcRowSize); pRuntimeEnv->tagVal = malloc(pQueryAttr->tagLen); @@ -2384,6 +2387,7 @@ static void teardownQueryRuntimeEnv(SQueryRuntimeEnv *pRuntimeEnv) { pRuntimeEnv->pool = destroyResultRowPool(pRuntimeEnv->pool); taosArrayDestroyEx(pRuntimeEnv->prevResult, freeInterResult); + taosArrayDestroy(pRuntimeEnv->pResultRowArrayList); pRuntimeEnv->prevResult = NULL; } @@ -2398,11 +2402,11 @@ bool isQueryKilled(SQInfo *pQInfo) { // query has been executed more than tsShellActivityTimer, and the retrieve has not arrived // abort current query execution. - if (pQInfo->owner != 0 && ((taosGetTimestampSec() - pQInfo->startExecTs) > getMaximumIdleDurationSec()) && + if (pQInfo->owner != 0 && ((taosGetTimestampSec() - pQInfo->startExecTs/1000) > getMaximumIdleDurationSec()) && (!needBuildResAfterQueryComplete(pQInfo))) { assert(pQInfo->startExecTs != 0); - qDebug("QInfo:%" PRIu64 " retrieve not arrive beyond %d sec, abort current query execution, start:%" PRId64 + qDebug("QInfo:%" PRIu64 " retrieve not arrive beyond %d ms, abort current query execution, start:%" PRId64 ", current:%d", pQInfo->qId, 1, pQInfo->startExecTs, taosGetTimestampSec()); return true; } @@ -4808,7 +4812,6 @@ int32_t doInitQInfo(SQInfo* pQInfo, STSBuf* pTsBuf, void* tsdb, void* sourceOptr SQueryAttr *pQueryAttr = pQInfo->runtimeEnv.pQueryAttr; pQueryAttr->tsdb = tsdb; - if (tsdb != NULL) { int32_t code = setupQueryHandle(tsdb, pRuntimeEnv, pQInfo->qId, pQueryAttr->stableQuery); if (code != TSDB_CODE_SUCCESS) { @@ -6379,6 +6382,7 @@ static SSDataBlock* hashGroupbyAggregate(void* param, bool* newgroup) { if (!pRuntimeEnv->pQueryAttr->stableQuery) { sortGroupResByOrderList(&pRuntimeEnv->groupResInfo, pRuntimeEnv, pInfo->binfo.pRes); } + toSSDataBlock(&pRuntimeEnv->groupResInfo, pRuntimeEnv, pInfo->binfo.pRes); if (pInfo->binfo.pRes->info.rows == 0 || !hasRemainDataInCurrentGroup(&pRuntimeEnv->groupResInfo)) { @@ -6388,6 +6392,19 @@ static SSDataBlock* hashGroupbyAggregate(void* param, bool* newgroup) { return pInfo->binfo.pRes; } +static void doHandleRemainBlockForNewGroupImpl(SFillOperatorInfo *pInfo, SQueryRuntimeEnv* pRuntimeEnv, bool* newgroup) { + pInfo->totalInputRows = pInfo->existNewGroupBlock->info.rows; + int64_t ekey = Q_STATUS_EQUAL(pRuntimeEnv->status, QUERY_COMPLETED)?pRuntimeEnv->pQueryAttr->window.ekey:pInfo->existNewGroupBlock->info.window.ekey; + taosResetFillInfo(pInfo->pFillInfo, pInfo->pFillInfo->start); + + taosFillSetStartInfo(pInfo->pFillInfo, pInfo->existNewGroupBlock->info.rows, ekey); + taosFillSetInputDataBlock(pInfo->pFillInfo, pInfo->existNewGroupBlock); + + doFillTimeIntervalGapsInResults(pInfo->pFillInfo, pInfo->pRes, pRuntimeEnv->resultInfo.capacity, pInfo->p); + pInfo->existNewGroupBlock = NULL; + *newgroup = true; +} + static void doHandleRemainBlockFromNewGroup(SFillOperatorInfo *pInfo, SQueryRuntimeEnv *pRuntimeEnv, bool *newgroup) { if (taosFillHasMoreResults(pInfo->pFillInfo)) { *newgroup = false; @@ -6399,16 +6416,7 @@ static void doHandleRemainBlockFromNewGroup(SFillOperatorInfo *pInfo, SQueryRunt // handle the cached new group data block if (pInfo->existNewGroupBlock) { - pInfo->totalInputRows = pInfo->existNewGroupBlock->info.rows; - int64_t ekey = Q_STATUS_EQUAL(pRuntimeEnv->status, QUERY_COMPLETED)?pRuntimeEnv->pQueryAttr->window.ekey:pInfo->existNewGroupBlock->info.window.ekey; - taosResetFillInfo(pInfo->pFillInfo, pInfo->pFillInfo->start); - - taosFillSetStartInfo(pInfo->pFillInfo, pInfo->existNewGroupBlock->info.rows, ekey); - taosFillSetInputDataBlock(pInfo->pFillInfo, pInfo->existNewGroupBlock); - - doFillTimeIntervalGapsInResults(pInfo->pFillInfo, pInfo->pRes, pRuntimeEnv->resultInfo.capacity, pInfo->p); - pInfo->existNewGroupBlock = NULL; - *newgroup = true; + doHandleRemainBlockForNewGroupImpl(pInfo, pRuntimeEnv, newgroup); } } @@ -6427,26 +6435,6 @@ static SSDataBlock* doFill(void* param, bool* newgroup) { if (pInfo->pRes->info.rows > pRuntimeEnv->resultInfo.threshold || (!pInfo->multigroupResult && pInfo->pRes->info.rows > 0)) { return pInfo->pRes; } -// if (taosFillHasMoreResults(pInfo->pFillInfo)) { -// *newgroup = false; -// doFillTimeIntervalGapsInResults(pInfo->pFillInfo, pInfo->pRes, (int32_t)pRuntimeEnv->resultInfo.capacity); -// return pInfo->pRes; -// } -// -// // handle the cached new group data block -// if (pInfo->existNewGroupBlock) { -// pInfo->totalInputRows = pInfo->existNewGroupBlock->info.rows; -// int64_t ekey = Q_STATUS_EQUAL(pRuntimeEnv->status, QUERY_COMPLETED)?pRuntimeEnv->pQueryAttr->window.ekey:pInfo->existNewGroupBlock->info.window.ekey; -// taosResetFillInfo(pInfo->pFillInfo, pInfo->pFillInfo->start); -// -// taosFillSetStartInfo(pInfo->pFillInfo, pInfo->existNewGroupBlock->info.rows, ekey); -// taosFillSetInputDataBlock(pInfo->pFillInfo, pInfo->existNewGroupBlock); -// -// doFillTimeIntervalGapsInResults(pInfo->pFillInfo, pInfo->pRes, pRuntimeEnv->resultInfo.capacity); -// pInfo->existNewGroupBlock = NULL; -// *newgroup = true; -// return (pInfo->pRes->info.rows > 0)? pInfo->pRes:NULL; -// } while(1) { publishOperatorProfEvent(pOperator->upstream[0], QUERY_PROF_BEFORE_OPERATOR_EXEC); @@ -6493,46 +6481,13 @@ static SSDataBlock* doFill(void* param, bool* newgroup) { if (pInfo->pRes->info.rows > pRuntimeEnv->resultInfo.threshold || pBlock == NULL) { return pInfo->pRes; } - -// if (taosFillHasMoreResults(pInfo->pFillInfo)) { -// *newgroup = false; -// doFillTimeIntervalGapsInResults(pInfo->pFillInfo, pInfo->pRes, (int32_t)pRuntimeEnv->resultInfo.capacity); -// return pInfo->pRes; -// } -// -// // handle the cached new group data block -// if (pInfo->existNewGroupBlock) { -// pInfo->totalInputRows = pInfo->existNewGroupBlock->info.rows; -// int64_t ekey = Q_STATUS_EQUAL(pRuntimeEnv->status, QUERY_COMPLETED)?pRuntimeEnv->pQueryAttr->window.ekey:pInfo->existNewGroupBlock->info.window.ekey; -// taosResetFillInfo(pInfo->pFillInfo, pInfo->pFillInfo->start); -// -// taosFillSetStartInfo(pInfo->pFillInfo, pInfo->existNewGroupBlock->info.rows, ekey); -// taosFillSetInputDataBlock(pInfo->pFillInfo, pInfo->existNewGroupBlock); -// -// doFillTimeIntervalGapsInResults(pInfo->pFillInfo, pInfo->pRes, pRuntimeEnv->resultInfo.capacity); -// pInfo->existNewGroupBlock = NULL; -// *newgroup = true; -// -// if (pInfo->pRes->info.rows > pRuntimeEnv->resultInfo.threshold) { -// return pInfo->pRes; -// } -// -//// return (pInfo->pRes->info.rows > 0)? pInfo->pRes:NULL; -// } - } else if (pInfo->existNewGroupBlock) { // try next group - pInfo->totalInputRows = pInfo->existNewGroupBlock->info.rows; - int64_t ekey = pInfo->existNewGroupBlock->info.window.ekey; - taosResetFillInfo(pInfo->pFillInfo, pInfo->pFillInfo->start); - - taosFillSetStartInfo(pInfo->pFillInfo, pInfo->existNewGroupBlock->info.rows, ekey); - taosFillSetInputDataBlock(pInfo->pFillInfo, pInfo->existNewGroupBlock); - - doFillTimeIntervalGapsInResults(pInfo->pFillInfo, pInfo->pRes, pRuntimeEnv->resultInfo.capacity, pInfo->p); - pInfo->existNewGroupBlock = NULL; - *newgroup = true; + assert(pBlock != NULL); + doHandleRemainBlockForNewGroupImpl(pInfo, pRuntimeEnv, newgroup); - return (pInfo->pRes->info.rows > 0) ? pInfo->pRes : NULL; + if (pInfo->pRes->info.rows > pRuntimeEnv->resultInfo.threshold) { + return pInfo->pRes; + } } else { return NULL; } @@ -7649,8 +7604,8 @@ int32_t convertQueryMsg(SQueryTableMsg *pQueryMsg, SQueryParam* param) { pMsg += sizeof(SSqlExpr); for (int32_t j = 0; j < pExprMsg->numOfParams; ++j) { - pExprMsg->param[j].nType = htons(pExprMsg->param[j].nType); - pExprMsg->param[j].nLen = htons(pExprMsg->param[j].nLen); + pExprMsg->param[j].nType = htonl(pExprMsg->param[j].nType); + pExprMsg->param[j].nLen = htonl(pExprMsg->param[j].nLen); if (pExprMsg->param[j].nType == TSDB_DATA_TYPE_BINARY) { pExprMsg->param[j].pz = pMsg; @@ -7697,8 +7652,8 @@ int32_t convertQueryMsg(SQueryTableMsg *pQueryMsg, SQueryParam* param) { pMsg += sizeof(SSqlExpr); for (int32_t j = 0; j < pExprMsg->numOfParams; ++j) { - pExprMsg->param[j].nType = htons(pExprMsg->param[j].nType); - pExprMsg->param[j].nLen = htons(pExprMsg->param[j].nLen); + pExprMsg->param[j].nType = htonl(pExprMsg->param[j].nType); + pExprMsg->param[j].nLen = htonl(pExprMsg->param[j].nLen); if (pExprMsg->param[j].nType == TSDB_DATA_TYPE_BINARY) { pExprMsg->param[j].pz = pMsg; @@ -8458,6 +8413,7 @@ SQInfo* createQInfoImpl(SQueryTableMsg* pQueryMsg, SGroupbyExpr* pGroupbyExpr, S } pQInfo->qId = qId; + pQInfo->startExecTs = 0; pQInfo->runtimeEnv.pUdfInfo = pUdfInfo; @@ -8696,7 +8652,6 @@ int32_t initQInfo(STsBufInfo* pTsBufInfo, void* tsdb, void* sourceOptr, SQInfo* SArray* prevResult = NULL; if (prevResultLen > 0) { prevResult = interResFromBinary(param->prevResult, prevResultLen); - pRuntimeEnv->prevResult = prevResult; } diff --git a/src/query/src/qFilter.c b/src/query/src/qFilter.c index 72f8376af6d5b7d25ba154a95e034be4eb0e6f66..5e8ff126d1510cd32446d695d5c8d698aa32d1b9 100644 --- a/src/query/src/qFilter.c +++ b/src/query/src/qFilter.c @@ -28,6 +28,8 @@ OptrStr gOptrStr[] = { {TSDB_RELATION_GREATER_EQUAL, ">="}, {TSDB_RELATION_NOT_EQUAL, "!="}, {TSDB_RELATION_LIKE, "like"}, + {TSDB_RELATION_MATCH, "match"}, + {TSDB_RELATION_MATCH, "nmatch"}, {TSDB_RELATION_ISNULL, "is null"}, {TSDB_RELATION_NOTNULL, "not null"}, {TSDB_RELATION_IN, "in"}, @@ -156,7 +158,7 @@ int8_t filterGetRangeCompFuncFromOptrs(uint8_t optr, uint8_t optr2) { __compar_fn_t gDataCompare[] = {compareInt32Val, compareInt8Val, compareInt16Val, compareInt64Val, compareFloatVal, compareDoubleVal, compareLenPrefixedStr, compareStrPatternComp, compareFindItemInSet, compareWStrPatternComp, compareLenPrefixedWStr, compareUint8Val, compareUint16Val, compareUint32Val, compareUint64Val, - setCompareBytes1, setCompareBytes2, setCompareBytes4, setCompareBytes8 + setCompareBytes1, setCompareBytes2, setCompareBytes4, setCompareBytes8, compareStrRegexCompMatch, compareStrRegexCompNMatch }; int8_t filterGetCompFuncIdx(int32_t type, int32_t optr) { @@ -195,7 +197,11 @@ int8_t filterGetCompFuncIdx(int32_t type, int32_t optr) { case TSDB_DATA_TYPE_FLOAT: comparFn = 4; break; case TSDB_DATA_TYPE_DOUBLE: comparFn = 5; break; case TSDB_DATA_TYPE_BINARY: { - if (optr == TSDB_RELATION_LIKE) { /* wildcard query using like operator */ + if (optr == TSDB_RELATION_MATCH) { + comparFn = 19; + } else if (optr == TSDB_RELATION_NMATCH) { + comparFn = 20; + } else if (optr == TSDB_RELATION_LIKE) { /* wildcard query using like operator */ comparFn = 7; } else if (optr == TSDB_RELATION_IN) { comparFn = 8; @@ -207,7 +213,11 @@ int8_t filterGetCompFuncIdx(int32_t type, int32_t optr) { } case TSDB_DATA_TYPE_NCHAR: { - if (optr == TSDB_RELATION_LIKE) { + if (optr == TSDB_RELATION_MATCH) { + comparFn = 19; + } else if (optr == TSDB_RELATION_NMATCH) { + comparFn = 20; + } else if (optr == TSDB_RELATION_LIKE) { comparFn = 9; } else if (optr == TSDB_RELATION_IN) { comparFn = 8; @@ -1871,6 +1881,12 @@ bool filterDoCompare(__compar_fn_t func, uint8_t optr, void *left, void *right) case TSDB_RELATION_LIKE: { return ret == 0; } + case TSDB_RELATION_MATCH: { + return ret == 0; + } + case TSDB_RELATION_NMATCH: { + return ret == 0; + } case TSDB_RELATION_IN: { return ret == 1; } @@ -2641,7 +2657,7 @@ int32_t filterRmUnitByRange(SFilterInfo *info, SDataStatis *pDataStatis, int32_t } if (cunit->optr == TSDB_RELATION_ISNULL || cunit->optr == TSDB_RELATION_NOTNULL - || cunit->optr == TSDB_RELATION_IN || cunit->optr == TSDB_RELATION_LIKE + || cunit->optr == TSDB_RELATION_IN || cunit->optr == TSDB_RELATION_LIKE || cunit->optr == TSDB_RELATION_MATCH || cunit->optr == TSDB_RELATION_NOT_EQUAL) { continue; } diff --git a/src/query/src/qUtil.c b/src/query/src/qUtil.c index 4caf351799adbf000265566fb22617067efb725d..bc27e094db3dcb85ffa73810e922d73cd42ab3a0 100644 --- a/src/query/src/qUtil.c +++ b/src/query/src/qUtil.c @@ -436,13 +436,13 @@ static int32_t tableResultComparFn(const void *pLeft, const void *pRight, void * } STableQueryInfo** pList = supporter->pTableQueryInfo; - - SResultRowInfo *pWindowResInfo1 = &(pList[left]->resInfo); - SResultRow * pWindowRes1 = getResultRow(pWindowResInfo1, leftPos); + SResultRow* pWindowRes1 = pList[left]->resInfo.pResult[leftPos]; +// SResultRow * pWindowRes1 = getResultRow(&(pList[left]->resInfo), leftPos); TSKEY leftTimestamp = pWindowRes1->win.skey; - SResultRowInfo *pWindowResInfo2 = &(pList[right]->resInfo); - SResultRow * pWindowRes2 = getResultRow(pWindowResInfo2, rightPos); +// SResultRowInfo *pWindowResInfo2 = &(pList[right]->resInfo); +// SResultRow * pWindowRes2 = getResultRow(pWindowResInfo2, rightPos); + SResultRow* pWindowRes2 = pList[right]->resInfo.pResult[rightPos]; TSKEY rightTimestamp = pWindowRes2->win.skey; if (leftTimestamp == rightTimestamp) { @@ -456,7 +456,77 @@ static int32_t tableResultComparFn(const void *pLeft, const void *pRight, void * } } -static int32_t mergeIntoGroupResultImpl(SQueryRuntimeEnv *pRuntimeEnv, SGroupResInfo* pGroupResInfo, SArray *pTableList, +int32_t tsAscOrder(const void* p1, const void* p2) { + SResultRowCell* pc1 = (SResultRowCell*) p1; + SResultRowCell* pc2 = (SResultRowCell*) p2; + + if (pc1->groupId == pc2->groupId) { + if (pc1->pRow->win.skey == pc2->pRow->win.skey) { + return 0; + } else { + return (pc1->pRow->win.skey < pc2->pRow->win.skey)? -1:1; + } + } else { + return (pc1->groupId < pc2->groupId)? -1:1; + } +} + +int32_t tsDescOrder(const void* p1, const void* p2) { + SResultRowCell* pc1 = (SResultRowCell*) p1; + SResultRowCell* pc2 = (SResultRowCell*) p2; + + if (pc1->groupId == pc2->groupId) { + if (pc1->pRow->win.skey == pc2->pRow->win.skey) { + return 0; + } else { + return (pc1->pRow->win.skey < pc2->pRow->win.skey)? 1:-1; + } + } else { + return (pc1->groupId < pc2->groupId)? -1:1; + } +} + +void orderTheResultRows(SQueryRuntimeEnv* pRuntimeEnv) { + __compar_fn_t fn = NULL; + if (pRuntimeEnv->pQueryAttr->order.order == TSDB_ORDER_ASC) { + fn = tsAscOrder; + } else { + fn = tsDescOrder; + } + + taosArraySort(pRuntimeEnv->pResultRowArrayList, fn); +} + +static int32_t mergeIntoGroupResultImplRv(SQueryRuntimeEnv *pRuntimeEnv, SGroupResInfo* pGroupResInfo, uint64_t groupId, int32_t* rowCellInfoOffset) { + if (!pGroupResInfo->ordered) { + orderTheResultRows(pRuntimeEnv); + pGroupResInfo->ordered = true; + } + + if (pGroupResInfo->pRows == NULL) { + pGroupResInfo->pRows = taosArrayInit(100, POINTER_BYTES); + } + + size_t len = taosArrayGetSize(pRuntimeEnv->pResultRowArrayList); + for(; pGroupResInfo->position < len; ++pGroupResInfo->position) { + SResultRowCell* pResultRowCell = taosArrayGet(pRuntimeEnv->pResultRowArrayList, pGroupResInfo->position); + if (pResultRowCell->groupId != groupId) { + break; + } + + int64_t num = getNumOfResultWindowRes(pRuntimeEnv, pResultRowCell->pRow, rowCellInfoOffset); + if (num <= 0) { + continue; + } + + taosArrayPush(pGroupResInfo->pRows, &pResultRowCell->pRow); + pResultRowCell->pRow->numOfRows = (uint32_t) num; + } + + return TSDB_CODE_SUCCESS; +} + +static UNUSED_FUNC int32_t mergeIntoGroupResultImpl(SQueryRuntimeEnv *pRuntimeEnv, SGroupResInfo* pGroupResInfo, SArray *pTableList, int32_t* rowCellInfoOffset) { bool ascQuery = QUERY_IS_ASC_QUERY(pRuntimeEnv->pQueryAttr); @@ -562,12 +632,7 @@ int32_t mergeIntoGroupResult(SGroupResInfo* pGroupResInfo, SQueryRuntimeEnv* pRu int64_t st = taosGetTimestampUs(); while (pGroupResInfo->currentGroup < pGroupResInfo->totalGroup) { - SArray *group = GET_TABLEGROUP(pRuntimeEnv, pGroupResInfo->currentGroup); - - int32_t ret = mergeIntoGroupResultImpl(pRuntimeEnv, pGroupResInfo, group, offset); - if (ret != TSDB_CODE_SUCCESS) { - return ret; - } + mergeIntoGroupResultImplRv(pRuntimeEnv, pGroupResInfo, pGroupResInfo->currentGroup, offset); // this group generates at least one result, return results if (taosArrayGetSize(pGroupResInfo->pRows) > 0) { @@ -583,7 +648,6 @@ int32_t mergeIntoGroupResult(SGroupResInfo* pGroupResInfo, SQueryRuntimeEnv* pRu qDebug("QInfo:%"PRIu64" merge res data into group, index:%d, total group:%d, elapsed time:%" PRId64 "us", GET_QID(pRuntimeEnv), pGroupResInfo->currentGroup, pGroupResInfo->totalGroup, elapsedTime); -// pQInfo->summary.firstStageMergeTime += elapsedTime; return TSDB_CODE_SUCCESS; } diff --git a/src/query/src/queryMain.c b/src/query/src/queryMain.c index d56c12ab8735d0683db146f7000429d4d554dda5..1460fbdc0fd0324da28bf1161c34c564584258cd 100644 --- a/src/query/src/queryMain.c +++ b/src/query/src/queryMain.c @@ -35,7 +35,7 @@ typedef struct SQueryMgmt { bool closed; } SQueryMgmt; -static void queryMgmtKillQueryFn(void* handle) { +static void queryMgmtKillQueryFn(void* handle, void* param1) { void** fp = (void**)handle; qKillQuery(*fp); } @@ -215,6 +215,51 @@ int32_t qCreateQueryInfo(void* tsdb, int32_t vgId, SQueryTableMsg* pQueryMsg, qi return code; } +#ifdef TEST_IMPL +// wait moment +int waitMoment(SQInfo* pQInfo){ + if(pQInfo->sql) { + int ms = 0; + char* pcnt = strstr(pQInfo->sql, " count(*)"); + if(pcnt) return 0; + + char* pos = strstr(pQInfo->sql, " t_"); + if(pos){ + pos += 3; + ms = atoi(pos); + while(*pos >= '0' && *pos <= '9'){ + pos ++; + } + char unit_char = *pos; + if(unit_char == 'h'){ + ms *= 3600*1000; + } else if(unit_char == 'm'){ + ms *= 60*1000; + } else if(unit_char == 's'){ + ms *= 1000; + } + } + if(ms == 0) return 0; + printf("test wait sleep %dms. sql=%s ...\n", ms, pQInfo->sql); + + if(ms < 1000) { + taosMsleep(ms); + } else { + int used_ms = 0; + while(used_ms < ms) { + taosMsleep(1000); + used_ms += 1000; + if(isQueryKilled(pQInfo)){ + printf("test check query is canceled, sleep break.%s\n", pQInfo->sql); + break; + } + } + } + } + return 1; +} +#endif + bool qTableQuery(qinfo_t qinfo, uint64_t *qId) { SQInfo *pQInfo = (SQInfo *)qinfo; assert(pQInfo && pQInfo->signature == pQInfo); @@ -228,7 +273,8 @@ bool qTableQuery(qinfo_t qinfo, uint64_t *qId) { } *qId = pQInfo->qId; - pQInfo->startExecTs = taosGetTimestampSec(); + if(pQInfo->startExecTs == 0) + pQInfo->startExecTs = taosGetTimestampMs(); if (isQueryKilled(pQInfo)) { qDebug("QInfo:0x%"PRIx64" it is already killed, abort", pQInfo->qId); @@ -259,7 +305,9 @@ bool qTableQuery(qinfo_t qinfo, uint64_t *qId) { int64_t st = taosGetTimestampUs(); pRuntimeEnv->outputBuf = pRuntimeEnv->proot->exec(pRuntimeEnv->proot, &newgroup); pQInfo->summary.elapsedTime += (taosGetTimestampUs() - st); - +#ifdef TEST_IMPL + waitMoment(pQInfo); +#endif publishOperatorProfEvent(pRuntimeEnv->proot, QUERY_PROF_AFTER_OPERATOR_EXEC); pRuntimeEnv->resultInfo.total += GET_NUM_OF_RESULTS(pRuntimeEnv); @@ -479,7 +527,7 @@ void qQueryMgmtNotifyClosed(void* pQMgmt) { pQueryMgmt->closed = true; pthread_mutex_unlock(&pQueryMgmt->lock); - taosCacheRefresh(pQueryMgmt->qinfoPool, queryMgmtKillQueryFn); + taosCacheRefresh(pQueryMgmt->qinfoPool, queryMgmtKillQueryFn, NULL); } void qQueryMgmtReOpen(void *pQMgmt) { @@ -574,3 +622,148 @@ void** qReleaseQInfo(void* pMgmt, void* pQInfo, bool freeHandle) { taosCacheRelease(pQueryMgmt->qinfoPool, pQInfo, freeHandle); return 0; } + +//kill by qid +int32_t qKillQueryByQId(void* pMgmt, int64_t qId, int32_t waitMs, int32_t waitCount) { + int32_t error = TSDB_CODE_SUCCESS; + void** handle = qAcquireQInfo(pMgmt, qId); + if(handle == NULL) return terrno; + + SQInfo* pQInfo = (SQInfo*)(*handle); + if (pQInfo == NULL || !isValidQInfo(pQInfo)) { + return TSDB_CODE_QRY_INVALID_QHANDLE; + } + qWarn("QId:0x%"PRIx64" be killed(no memory commit).", pQInfo->qId); + setQueryKilled(pQInfo); + + // wait query stop + int32_t loop = 0; + while (pQInfo->owner != 0) { + taosMsleep(waitMs); + if(loop++ > waitCount){ + error = TSDB_CODE_FAILED; + break; + } + } + + qReleaseQInfo(pMgmt, (void **)&handle, true); + return error; +} + +// local struct +typedef struct { + int64_t qId; + int64_t startExecTs; +} SLongQuery; + +// callbark for sort compare +static int compareLongQuery(const void* p1, const void* p2) { + // sort desc + SLongQuery* plq1 = *(SLongQuery**)p1; + SLongQuery* plq2 = *(SLongQuery**)p2; + if(plq1->startExecTs == plq2->startExecTs) { + return 0; + } else if(plq1->startExecTs > plq2->startExecTs) { + return 1; + } else { + return -1; + } +} + +// callback for taosCacheRefresh +static void cbFoundItem(void* handle, void* param1) { + SQInfo * qInfo = *(SQInfo**) handle; + if(qInfo == NULL) return ; + SArray* qids = (SArray*) param1; + if(qids == NULL) return ; + + bool usedMem = true; + bool usedIMem = true; + SMemTable* mem = qInfo->query.memRef.snapshot.omem; + SMemTable* imem = qInfo->query.memRef.snapshot.imem; + if(mem == NULL || T_REF_VAL_GET(mem) == 0) + usedMem = false; + if(imem == NULL || T_REF_VAL_GET(mem) == 0) + usedIMem = false ; + + if(!usedMem && !usedIMem) + return ; + + // push to qids + SLongQuery* plq = (SLongQuery*)malloc(sizeof(SLongQuery)); + plq->qId = qInfo->qId; + plq->startExecTs = qInfo->startExecTs; + taosArrayPush(qids, &plq); +} + +// longquery +void* qObtainLongQuery(void* param){ + SQueryMgmt* qMgmt = (SQueryMgmt*)param; + if(qMgmt == NULL || qMgmt->qinfoPool == NULL) + return NULL; + SArray* qids = taosArrayInit(4, sizeof(int64_t*)); + if(qids == NULL) return NULL; + // Get each item + taosCacheRefresh(qMgmt->qinfoPool, cbFoundItem, qids); + + size_t cnt = taosArrayGetSize(qids); + if(cnt == 0) { + taosArrayDestroy(qids); + return NULL; + } + if(cnt > 1) + taosArraySort(qids, compareLongQuery); + + return qids; +} + +//solve tsdb no block to commit +bool qFixedNoBlock(void* pRepo, void* pMgmt, int32_t longQueryMs) { + SQueryMgmt *pQueryMgmt = pMgmt; + bool fixed = false; + + // qid top list + SArray *qids = (SArray*)qObtainLongQuery(pQueryMgmt); + if(qids == NULL) return false; + + // kill Query + int64_t now = taosGetTimestampMs(); + size_t cnt = taosArrayGetSize(qids); + size_t i; + SLongQuery* plq; + for(i=0; i < cnt; i++) { + plq = (SLongQuery* )taosArrayGetP(qids, i); + if(plq->startExecTs > now) continue; + if(now - plq->startExecTs >= longQueryMs) { + qKillQueryByQId(pMgmt, plq->qId, 500, 10); // wait 50*100 ms + if(tsdbNoProblem(pRepo)) { + fixed = true; + qWarn("QId:0x%"PRIx64" fixed problem after kill this query.", plq->qId); + break; + } + } + } + + // free qids + for(i=0; i < cnt; i++) { + free(taosArrayGetP(qids, i)); + } + taosArrayDestroy(qids); + return fixed; +} + +//solve tsdb no block to commit +bool qSolveCommitNoBlock(void* pRepo, void* pMgmt) { + qWarn("pRepo=%p start solve problem.", pRepo); + if(qFixedNoBlock(pRepo, pMgmt, 10*60*1000)) { + return true; + } + if(qFixedNoBlock(pRepo, pMgmt, 2*60*1000)){ + return true; + } + if(qFixedNoBlock(pRepo, pMgmt, 30*1000)){ + return true; + } + qWarn("pRepo=%p solve problem failed.", pRepo); + return false; +} diff --git a/src/query/src/sql.c b/src/query/src/sql.c index 16bb4e49a80fa7a263b7c6e0446cb4aa328343bb..e89b6232f7e42b764df7660f06dcd207bfe6e4de 100644 --- a/src/query/src/sql.c +++ b/src/query/src/sql.c @@ -23,7 +23,9 @@ ** input grammar file: */ #include +#include /************ Begin %include sections from the grammar ************************/ +#line 23 "sql.y" #include #include @@ -36,6 +38,7 @@ #include "ttokendef.h" #include "tutil.h" #include "tvariant.h" +#line 42 "sql.c" /**************** End of %include directives **********************************/ /* These constants specify the various numeric values for terminal symbols ** in a format understandable to "makeheaders". This section is blank unless @@ -76,8 +79,10 @@ ** zero the stack is dynamically sized using realloc() ** ParseARG_SDECL A static variable declaration for the %extra_argument ** ParseARG_PDECL A parameter declaration for the %extra_argument +** ParseARG_PARAM Code to pass %extra_argument as a subroutine parameter ** ParseARG_STORE Code to store %extra_argument into yypParser ** ParseARG_FETCH Code to extract %extra_argument from yypParser +** ParseCTX_* As ParseARG_ except for %extra_context ** YYERRORSYMBOL is the code number of the error symbol. If not ** defined, then do no error processing. ** YYNSTATE the combined number of states. @@ -97,7 +102,7 @@ #endif /************* Begin control #defines *****************************************/ #define YYCODETYPE unsigned short int -#define YYNOCODE 279 +#define YYNOCODE 278 #define YYACTIONTYPE unsigned short int #define ParseTOKENTYPE SStrToken typedef union { @@ -126,21 +131,29 @@ typedef union { #endif #define ParseARG_SDECL SSqlInfo* pInfo; #define ParseARG_PDECL ,SSqlInfo* pInfo -#define ParseARG_FETCH SSqlInfo* pInfo = yypParser->pInfo -#define ParseARG_STORE yypParser->pInfo = pInfo +#define ParseARG_PARAM ,pInfo +#define ParseARG_FETCH SSqlInfo* pInfo=yypParser->pInfo; +#define ParseARG_STORE yypParser->pInfo=pInfo; +#define ParseCTX_SDECL +#define ParseCTX_PDECL +#define ParseCTX_PARAM +#define ParseCTX_FETCH +#define ParseCTX_STORE #define YYFALLBACK 1 -#define YYNSTATE 364 -#define YYNRULE 292 -#define YYNTOKEN 196 -#define YY_MAX_SHIFT 363 -#define YY_MIN_SHIFTREDUCE 572 -#define YY_MAX_SHIFTREDUCE 863 -#define YY_ERROR_ACTION 864 -#define YY_ACCEPT_ACTION 865 -#define YY_NO_ACTION 866 -#define YY_MIN_REDUCE 867 -#define YY_MAX_REDUCE 1158 +#define YYNSTATE 368 +#define YYNRULE 294 +#define YYNRULE_WITH_ACTION 294 +#define YYNTOKEN 197 +#define YY_MAX_SHIFT 367 +#define YY_MIN_SHIFTREDUCE 576 +#define YY_MAX_SHIFTREDUCE 869 +#define YY_ERROR_ACTION 870 +#define YY_ACCEPT_ACTION 871 +#define YY_NO_ACTION 872 +#define YY_MIN_REDUCE 873 +#define YY_MAX_REDUCE 1166 /************* End control #defines *******************************************/ +#define YY_NLOOKAHEAD ((int)(sizeof(yy_lookahead)/sizeof(yy_lookahead[0]))) /* Define the yytestcase() macro to be a no-op if is not already defined ** otherwise. @@ -205,164 +218,166 @@ typedef union { ** yy_default[] Default action for each state. ** *********** Begin parsing tables **********************************************/ -#define YY_ACTTAB_COUNT (759) +#define YY_ACTTAB_COUNT (773) static const YYACTIONTYPE yy_action[] = { - /* 0 */ 171, 624, 248, 624, 624, 23, 362, 231, 162, 625, - /* 10 */ 247, 625, 625, 57, 58, 206, 61, 62, 282, 1045, - /* 20 */ 251, 51, 252, 60, 320, 65, 63, 66, 64, 993, - /* 30 */ 209, 991, 992, 56, 55, 162, 994, 54, 53, 52, - /* 40 */ 995, 1134, 996, 997, 155, 660, 79, 573, 574, 575, - /* 50 */ 576, 577, 578, 579, 580, 581, 582, 583, 584, 585, - /* 60 */ 586, 153, 209, 232, 57, 58, 207, 61, 62, 1009, - /* 70 */ 209, 251, 51, 1135, 60, 320, 65, 63, 66, 64, - /* 80 */ 1008, 1135, 209, 1083, 56, 55, 80, 1042, 54, 53, - /* 90 */ 52, 57, 58, 1135, 61, 62, 237, 318, 251, 51, - /* 100 */ 1023, 60, 320, 65, 63, 66, 64, 708, 294, 86, - /* 110 */ 91, 56, 55, 280, 279, 54, 53, 52, 57, 59, - /* 120 */ 243, 61, 62, 350, 1023, 251, 51, 95, 60, 320, - /* 130 */ 65, 63, 66, 64, 802, 340, 339, 245, 56, 55, - /* 140 */ 213, 1023, 54, 53, 52, 58, 45, 61, 62, 767, - /* 150 */ 768, 251, 51, 318, 60, 320, 65, 63, 66, 64, - /* 160 */ 1006, 1007, 35, 1010, 56, 55, 865, 363, 54, 53, - /* 170 */ 52, 44, 316, 357, 356, 315, 314, 313, 355, 312, - /* 180 */ 311, 310, 354, 309, 353, 352, 985, 973, 974, 975, - /* 190 */ 976, 977, 978, 979, 980, 981, 982, 983, 984, 986, - /* 200 */ 987, 61, 62, 24, 1131, 251, 51, 624, 60, 320, - /* 210 */ 65, 63, 66, 64, 162, 625, 1036, 98, 56, 55, - /* 220 */ 212, 1036, 54, 53, 52, 56, 55, 218, 38, 54, - /* 230 */ 53, 52, 273, 137, 136, 135, 217, 234, 250, 817, - /* 240 */ 325, 86, 806, 1017, 809, 16, 812, 15, 250, 817, - /* 250 */ 123, 265, 806, 1011, 809, 748, 812, 5, 41, 180, - /* 260 */ 269, 268, 350, 1130, 179, 104, 109, 100, 108, 916, - /* 270 */ 229, 230, 1129, 233, 321, 227, 190, 1020, 45, 1036, - /* 280 */ 229, 230, 38, 305, 65, 63, 66, 64, 29, 1084, - /* 290 */ 244, 292, 56, 55, 260, 235, 54, 53, 52, 254, - /* 300 */ 272, 732, 78, 162, 729, 176, 730, 259, 731, 225, - /* 310 */ 260, 54, 53, 52, 228, 200, 198, 196, 808, 67, - /* 320 */ 811, 177, 195, 141, 140, 139, 138, 241, 807, 67, - /* 330 */ 810, 1020, 256, 257, 121, 115, 126, 152, 150, 149, - /* 340 */ 752, 125, 260, 131, 134, 124, 38, 38, 38, 361, - /* 350 */ 360, 146, 128, 1021, 818, 813, 44, 38, 357, 356, - /* 360 */ 38, 814, 38, 355, 818, 813, 1022, 354, 93, 353, - /* 370 */ 352, 814, 784, 38, 38, 255, 38, 253, 92, 328, - /* 380 */ 327, 38, 81, 261, 926, 258, 322, 335, 334, 745, - /* 390 */ 14, 190, 242, 329, 94, 1019, 1020, 1020, 71, 358, - /* 400 */ 954, 917, 330, 3, 191, 331, 1020, 332, 190, 1020, - /* 410 */ 34, 1020, 1, 178, 274, 9, 733, 734, 336, 337, - /* 420 */ 83, 338, 1020, 1020, 97, 1020, 342, 84, 39, 783, - /* 430 */ 1020, 764, 774, 74, 775, 718, 297, 815, 720, 299, - /* 440 */ 72, 719, 838, 300, 819, 157, 68, 816, 26, 39, - /* 450 */ 39, 804, 68, 96, 249, 68, 25, 276, 25, 114, - /* 460 */ 623, 113, 77, 18, 276, 17, 737, 735, 738, 736, - /* 470 */ 20, 210, 19, 75, 25, 120, 6, 119, 22, 211, - /* 480 */ 21, 133, 132, 214, 1154, 208, 215, 805, 821, 216, - /* 490 */ 220, 221, 222, 219, 707, 205, 1094, 1146, 48, 1093, - /* 500 */ 239, 1090, 1089, 240, 341, 270, 154, 1076, 1044, 1055, - /* 510 */ 1052, 1053, 151, 1037, 277, 1057, 156, 1075, 161, 288, - /* 520 */ 172, 173, 275, 1018, 281, 1016, 174, 168, 175, 165, - /* 530 */ 1034, 931, 763, 302, 303, 169, 167, 304, 307, 308, - /* 540 */ 46, 203, 42, 236, 319, 925, 326, 283, 285, 1153, - /* 550 */ 76, 73, 163, 111, 295, 164, 1152, 50, 293, 1149, - /* 560 */ 166, 181, 333, 1145, 117, 1144, 1141, 182, 951, 43, - /* 570 */ 291, 40, 47, 204, 913, 127, 289, 911, 129, 284, - /* 580 */ 130, 909, 908, 262, 193, 194, 905, 904, 903, 902, - /* 590 */ 901, 900, 899, 287, 197, 199, 896, 894, 892, 890, - /* 600 */ 201, 887, 306, 202, 883, 49, 351, 82, 87, 343, - /* 610 */ 286, 1077, 122, 344, 345, 346, 226, 246, 301, 347, - /* 620 */ 348, 349, 359, 863, 263, 264, 862, 223, 105, 930, - /* 630 */ 929, 224, 106, 266, 267, 861, 844, 271, 907, 843, - /* 640 */ 906, 296, 142, 276, 143, 185, 184, 952, 183, 186, - /* 650 */ 187, 189, 188, 144, 898, 897, 953, 145, 989, 889, - /* 660 */ 888, 10, 85, 740, 33, 170, 4, 30, 2, 278, - /* 670 */ 88, 999, 765, 158, 160, 776, 159, 238, 770, 89, - /* 680 */ 31, 772, 90, 290, 11, 32, 12, 13, 27, 298, - /* 690 */ 28, 97, 99, 102, 36, 101, 638, 37, 103, 673, - /* 700 */ 671, 670, 669, 667, 666, 665, 662, 628, 317, 107, - /* 710 */ 7, 323, 324, 822, 110, 820, 8, 112, 69, 70, - /* 720 */ 710, 39, 709, 116, 706, 118, 654, 652, 644, 650, - /* 730 */ 646, 648, 642, 640, 676, 675, 674, 672, 668, 664, - /* 740 */ 663, 192, 626, 590, 867, 866, 866, 866, 866, 866, - /* 750 */ 866, 866, 866, 866, 866, 866, 866, 147, 148, + /* 0 */ 23, 628, 366, 235, 1051, 208, 241, 712, 211, 629, + /* 10 */ 1029, 871, 367, 59, 60, 173, 63, 64, 1042, 1142, + /* 20 */ 255, 53, 52, 51, 628, 62, 324, 67, 65, 68, + /* 30 */ 66, 157, 629, 286, 238, 58, 57, 344, 343, 56, + /* 40 */ 55, 54, 59, 60, 247, 63, 64, 252, 1029, 255, + /* 50 */ 53, 52, 51, 209, 62, 324, 67, 65, 68, 66, + /* 60 */ 999, 1042, 997, 998, 58, 57, 664, 1000, 56, 55, + /* 70 */ 54, 1001, 1048, 1002, 1003, 58, 57, 277, 1015, 56, + /* 80 */ 55, 54, 59, 60, 164, 63, 64, 38, 82, 255, + /* 90 */ 53, 52, 51, 88, 62, 324, 67, 65, 68, 66, + /* 100 */ 284, 283, 249, 752, 58, 57, 1029, 211, 56, 55, + /* 110 */ 54, 38, 59, 61, 806, 63, 64, 1042, 1143, 255, + /* 120 */ 53, 52, 51, 628, 62, 324, 67, 65, 68, 66, + /* 130 */ 45, 629, 237, 239, 58, 57, 1026, 164, 56, 55, + /* 140 */ 54, 60, 1023, 63, 64, 771, 772, 255, 53, 52, + /* 150 */ 51, 95, 62, 324, 67, 65, 68, 66, 38, 1090, + /* 160 */ 1025, 296, 58, 57, 322, 83, 56, 55, 54, 577, + /* 170 */ 578, 579, 580, 581, 582, 583, 584, 585, 586, 587, + /* 180 */ 588, 589, 590, 155, 322, 236, 63, 64, 756, 248, + /* 190 */ 255, 53, 52, 51, 628, 62, 324, 67, 65, 68, + /* 200 */ 66, 251, 629, 245, 354, 58, 57, 1026, 215, 56, + /* 210 */ 55, 54, 1089, 44, 320, 361, 360, 319, 318, 317, + /* 220 */ 359, 316, 315, 314, 358, 313, 357, 356, 808, 38, + /* 230 */ 1, 180, 24, 991, 979, 980, 981, 982, 983, 984, + /* 240 */ 985, 986, 987, 988, 989, 990, 992, 993, 256, 214, + /* 250 */ 38, 254, 821, 922, 100, 810, 222, 813, 164, 816, + /* 260 */ 192, 211, 139, 138, 137, 221, 809, 254, 821, 329, + /* 270 */ 88, 810, 1143, 813, 246, 816, 1028, 29, 1026, 67, + /* 280 */ 65, 68, 66, 38, 1162, 233, 234, 58, 57, 325, + /* 290 */ 1017, 56, 55, 54, 38, 333, 56, 55, 54, 1026, + /* 300 */ 269, 233, 234, 258, 5, 41, 182, 45, 211, 273, + /* 310 */ 272, 181, 106, 111, 102, 110, 164, 73, 736, 1143, + /* 320 */ 932, 733, 812, 734, 815, 735, 263, 192, 334, 276, + /* 330 */ 309, 80, 1026, 94, 69, 123, 117, 128, 229, 335, + /* 340 */ 362, 960, 127, 1026, 133, 136, 126, 202, 200, 198, + /* 350 */ 69, 260, 261, 130, 197, 143, 142, 141, 140, 74, + /* 360 */ 44, 97, 361, 360, 788, 923, 38, 359, 38, 822, + /* 370 */ 817, 358, 192, 357, 356, 38, 818, 38, 38, 259, + /* 380 */ 811, 257, 814, 332, 331, 822, 817, 264, 125, 298, + /* 390 */ 264, 93, 818, 326, 1012, 1013, 35, 1016, 178, 14, + /* 400 */ 354, 179, 265, 96, 262, 264, 339, 338, 154, 152, + /* 410 */ 151, 336, 749, 340, 81, 1026, 1027, 1026, 3, 193, + /* 420 */ 341, 787, 342, 346, 1026, 278, 1026, 1026, 365, 364, + /* 430 */ 148, 85, 86, 99, 76, 737, 738, 768, 9, 39, + /* 440 */ 778, 779, 722, 819, 301, 724, 216, 303, 1014, 723, + /* 450 */ 34, 159, 844, 823, 70, 26, 39, 253, 39, 70, + /* 460 */ 79, 98, 627, 70, 135, 134, 25, 25, 280, 280, + /* 470 */ 16, 116, 15, 115, 77, 18, 25, 17, 741, 6, + /* 480 */ 742, 274, 739, 304, 740, 20, 122, 19, 121, 22, + /* 490 */ 217, 21, 711, 1100, 1137, 1136, 1135, 825, 231, 156, + /* 500 */ 232, 820, 212, 213, 218, 210, 1099, 219, 220, 224, + /* 510 */ 225, 226, 223, 207, 1154, 243, 1096, 1095, 244, 345, + /* 520 */ 1050, 1061, 1043, 48, 1058, 1059, 1063, 153, 281, 158, + /* 530 */ 163, 292, 1024, 175, 1082, 174, 1081, 279, 84, 285, + /* 540 */ 1022, 310, 176, 240, 177, 171, 167, 937, 306, 307, + /* 550 */ 308, 767, 311, 312, 1040, 165, 166, 46, 287, 289, + /* 560 */ 297, 299, 205, 168, 42, 78, 75, 50, 323, 931, + /* 570 */ 330, 1161, 113, 1160, 295, 169, 293, 291, 1157, 183, + /* 580 */ 337, 1153, 119, 288, 1152, 1149, 184, 957, 43, 40, + /* 590 */ 47, 206, 919, 129, 49, 917, 131, 132, 915, 914, + /* 600 */ 266, 195, 196, 911, 910, 909, 908, 907, 906, 905, + /* 610 */ 199, 201, 902, 900, 898, 896, 203, 893, 204, 889, + /* 620 */ 355, 124, 89, 290, 1083, 347, 348, 349, 350, 351, + /* 630 */ 352, 353, 363, 869, 230, 250, 305, 267, 268, 868, + /* 640 */ 270, 227, 228, 271, 867, 850, 107, 936, 935, 108, + /* 650 */ 849, 275, 280, 300, 10, 282, 744, 87, 30, 90, + /* 660 */ 913, 912, 904, 186, 958, 190, 185, 187, 144, 191, + /* 670 */ 189, 188, 145, 146, 147, 903, 995, 895, 4, 894, + /* 680 */ 959, 769, 160, 33, 780, 170, 172, 2, 161, 162, + /* 690 */ 774, 91, 242, 776, 92, 1005, 294, 11, 12, 31, + /* 700 */ 32, 13, 27, 302, 28, 99, 101, 104, 36, 103, + /* 710 */ 642, 37, 105, 677, 675, 674, 673, 671, 670, 669, + /* 720 */ 666, 321, 109, 632, 7, 826, 824, 8, 327, 328, + /* 730 */ 112, 114, 71, 72, 118, 714, 39, 120, 713, 710, + /* 740 */ 658, 656, 648, 654, 650, 652, 646, 644, 680, 679, + /* 750 */ 678, 676, 672, 668, 667, 194, 630, 594, 873, 872, + /* 760 */ 872, 872, 872, 872, 872, 872, 872, 872, 872, 872, + /* 770 */ 872, 149, 150, }; static const YYCODETYPE yy_lookahead[] = { - /* 0 */ 253, 1, 206, 1, 1, 266, 199, 200, 199, 9, - /* 10 */ 206, 9, 9, 13, 14, 266, 16, 17, 271, 199, - /* 20 */ 20, 21, 206, 23, 24, 25, 26, 27, 28, 223, - /* 30 */ 266, 225, 226, 33, 34, 199, 230, 37, 38, 39, - /* 40 */ 234, 277, 236, 237, 199, 5, 207, 45, 46, 47, - /* 50 */ 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, - /* 60 */ 58, 59, 266, 61, 13, 14, 266, 16, 17, 0, - /* 70 */ 266, 20, 21, 277, 23, 24, 25, 26, 27, 28, - /* 80 */ 241, 277, 266, 274, 33, 34, 86, 267, 37, 38, - /* 90 */ 39, 13, 14, 277, 16, 17, 245, 84, 20, 21, - /* 100 */ 249, 23, 24, 25, 26, 27, 28, 5, 272, 82, - /* 110 */ 274, 33, 34, 268, 269, 37, 38, 39, 13, 14, - /* 120 */ 245, 16, 17, 90, 249, 20, 21, 207, 23, 24, - /* 130 */ 25, 26, 27, 28, 83, 33, 34, 245, 33, 34, - /* 140 */ 266, 249, 37, 38, 39, 14, 119, 16, 17, 125, - /* 150 */ 126, 20, 21, 84, 23, 24, 25, 26, 27, 28, - /* 160 */ 240, 241, 242, 243, 33, 34, 197, 198, 37, 38, - /* 170 */ 39, 98, 99, 100, 101, 102, 103, 104, 105, 106, - /* 180 */ 107, 108, 109, 110, 111, 112, 223, 224, 225, 226, - /* 190 */ 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, - /* 200 */ 237, 16, 17, 44, 266, 20, 21, 1, 23, 24, - /* 210 */ 25, 26, 27, 28, 199, 9, 247, 207, 33, 34, - /* 220 */ 61, 247, 37, 38, 39, 33, 34, 68, 199, 37, - /* 230 */ 38, 39, 263, 74, 75, 76, 77, 263, 1, 2, - /* 240 */ 81, 82, 5, 199, 7, 145, 9, 147, 1, 2, - /* 250 */ 78, 142, 5, 243, 7, 37, 9, 62, 63, 64, - /* 260 */ 151, 152, 90, 266, 69, 70, 71, 72, 73, 205, - /* 270 */ 33, 34, 266, 244, 37, 266, 212, 248, 119, 247, - /* 280 */ 33, 34, 199, 88, 25, 26, 27, 28, 82, 274, - /* 290 */ 246, 276, 33, 34, 199, 263, 37, 38, 39, 68, - /* 300 */ 141, 2, 143, 199, 5, 210, 7, 68, 9, 150, - /* 310 */ 199, 37, 38, 39, 266, 62, 63, 64, 5, 82, - /* 320 */ 7, 210, 69, 70, 71, 72, 73, 244, 5, 82, - /* 330 */ 7, 248, 33, 34, 62, 63, 64, 62, 63, 64, - /* 340 */ 122, 69, 199, 71, 72, 73, 199, 199, 199, 65, - /* 350 */ 66, 67, 80, 210, 117, 118, 98, 199, 100, 101, - /* 360 */ 199, 124, 199, 105, 117, 118, 249, 109, 250, 111, - /* 370 */ 112, 124, 76, 199, 199, 144, 199, 146, 274, 148, - /* 380 */ 149, 199, 264, 144, 205, 146, 15, 148, 149, 97, - /* 390 */ 82, 212, 244, 244, 86, 248, 248, 248, 97, 221, - /* 400 */ 222, 205, 244, 203, 204, 244, 248, 244, 212, 248, - /* 410 */ 82, 248, 208, 209, 83, 123, 117, 118, 244, 244, - /* 420 */ 83, 244, 248, 248, 116, 248, 244, 83, 97, 133, - /* 430 */ 248, 83, 83, 97, 83, 83, 83, 124, 83, 83, - /* 440 */ 139, 83, 83, 115, 83, 97, 97, 124, 97, 97, - /* 450 */ 97, 1, 97, 97, 60, 97, 97, 120, 97, 145, - /* 460 */ 83, 147, 82, 145, 120, 147, 5, 5, 7, 7, - /* 470 */ 145, 266, 147, 137, 97, 145, 82, 147, 145, 266, - /* 480 */ 147, 78, 79, 266, 249, 266, 266, 37, 117, 266, - /* 490 */ 266, 266, 266, 266, 114, 266, 239, 249, 265, 239, - /* 500 */ 239, 239, 239, 239, 239, 199, 199, 275, 199, 199, - /* 510 */ 199, 199, 60, 247, 247, 199, 199, 275, 199, 199, - /* 520 */ 251, 199, 201, 247, 270, 199, 199, 256, 199, 259, - /* 530 */ 262, 199, 124, 199, 199, 255, 257, 199, 199, 199, - /* 540 */ 199, 199, 199, 270, 199, 199, 199, 270, 270, 199, - /* 550 */ 136, 138, 261, 199, 131, 260, 199, 135, 134, 199, - /* 560 */ 258, 199, 199, 199, 199, 199, 199, 199, 199, 199, - /* 570 */ 129, 199, 199, 199, 199, 199, 128, 199, 199, 130, - /* 580 */ 199, 199, 199, 199, 199, 199, 199, 199, 199, 199, - /* 590 */ 199, 199, 199, 127, 199, 199, 199, 199, 199, 199, - /* 600 */ 199, 199, 89, 199, 199, 140, 113, 201, 201, 95, - /* 610 */ 201, 201, 96, 51, 92, 94, 201, 201, 201, 55, - /* 620 */ 93, 91, 84, 5, 153, 5, 5, 201, 207, 211, - /* 630 */ 211, 201, 207, 153, 5, 5, 100, 142, 201, 99, - /* 640 */ 201, 115, 202, 120, 202, 214, 218, 220, 219, 217, - /* 650 */ 215, 213, 216, 202, 201, 201, 222, 202, 238, 201, - /* 660 */ 201, 82, 121, 83, 252, 254, 203, 82, 208, 97, - /* 670 */ 97, 238, 83, 82, 97, 83, 82, 1, 83, 82, - /* 680 */ 97, 83, 82, 82, 132, 97, 132, 82, 82, 115, - /* 690 */ 82, 116, 78, 70, 87, 86, 5, 87, 86, 9, - /* 700 */ 5, 5, 5, 5, 5, 5, 5, 85, 15, 78, - /* 710 */ 82, 24, 59, 117, 147, 83, 82, 147, 16, 16, - /* 720 */ 5, 97, 5, 147, 83, 147, 5, 5, 5, 5, - /* 730 */ 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, - /* 740 */ 5, 97, 85, 60, 0, 278, 278, 278, 278, 278, - /* 750 */ 278, 278, 278, 278, 278, 278, 278, 21, 21, 278, + /* 0 */ 266, 1, 199, 200, 199, 266, 245, 5, 266, 9, + /* 10 */ 249, 197, 198, 13, 14, 253, 16, 17, 247, 277, + /* 20 */ 20, 21, 22, 23, 1, 25, 26, 27, 28, 29, + /* 30 */ 30, 199, 9, 271, 263, 35, 36, 35, 36, 39, + /* 40 */ 40, 41, 13, 14, 245, 16, 17, 206, 249, 20, + /* 50 */ 21, 22, 23, 266, 25, 26, 27, 28, 29, 30, + /* 60 */ 223, 247, 225, 226, 35, 36, 5, 230, 39, 40, + /* 70 */ 41, 234, 267, 236, 237, 35, 36, 263, 0, 39, + /* 80 */ 40, 41, 13, 14, 199, 16, 17, 199, 88, 20, + /* 90 */ 21, 22, 23, 84, 25, 26, 27, 28, 29, 30, + /* 100 */ 268, 269, 245, 39, 35, 36, 249, 266, 39, 40, + /* 110 */ 41, 199, 13, 14, 85, 16, 17, 247, 277, 20, + /* 120 */ 21, 22, 23, 1, 25, 26, 27, 28, 29, 30, + /* 130 */ 121, 9, 244, 263, 35, 36, 248, 199, 39, 40, + /* 140 */ 41, 14, 199, 16, 17, 127, 128, 20, 21, 22, + /* 150 */ 23, 250, 25, 26, 27, 28, 29, 30, 199, 274, + /* 160 */ 248, 276, 35, 36, 86, 264, 39, 40, 41, 47, + /* 170 */ 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, + /* 180 */ 58, 59, 60, 61, 86, 63, 16, 17, 124, 246, + /* 190 */ 20, 21, 22, 23, 1, 25, 26, 27, 28, 29, + /* 200 */ 30, 206, 9, 244, 92, 35, 36, 248, 266, 39, + /* 210 */ 40, 41, 274, 100, 101, 102, 103, 104, 105, 106, + /* 220 */ 107, 108, 109, 110, 111, 112, 113, 114, 1, 199, + /* 230 */ 208, 209, 46, 223, 224, 225, 226, 227, 228, 229, + /* 240 */ 230, 231, 232, 233, 234, 235, 236, 237, 206, 63, + /* 250 */ 199, 1, 2, 205, 207, 5, 70, 7, 199, 9, + /* 260 */ 212, 266, 76, 77, 78, 79, 39, 1, 2, 83, + /* 270 */ 84, 5, 277, 7, 244, 9, 249, 84, 248, 27, + /* 280 */ 28, 29, 30, 199, 249, 35, 36, 35, 36, 39, + /* 290 */ 243, 39, 40, 41, 199, 244, 39, 40, 41, 248, + /* 300 */ 144, 35, 36, 70, 64, 65, 66, 121, 266, 153, + /* 310 */ 154, 71, 72, 73, 74, 75, 199, 99, 2, 277, + /* 320 */ 205, 5, 5, 7, 7, 9, 70, 212, 244, 143, + /* 330 */ 90, 145, 248, 274, 84, 64, 65, 66, 152, 244, + /* 340 */ 221, 222, 71, 248, 73, 74, 75, 64, 65, 66, + /* 350 */ 84, 35, 36, 82, 71, 72, 73, 74, 75, 141, + /* 360 */ 100, 207, 102, 103, 78, 205, 199, 107, 199, 119, + /* 370 */ 120, 111, 212, 113, 114, 199, 126, 199, 199, 146, + /* 380 */ 5, 148, 7, 150, 151, 119, 120, 199, 80, 272, + /* 390 */ 199, 274, 126, 15, 240, 241, 242, 243, 210, 84, + /* 400 */ 92, 210, 146, 88, 148, 199, 150, 151, 64, 65, + /* 410 */ 66, 244, 99, 244, 207, 248, 210, 248, 203, 204, + /* 420 */ 244, 135, 244, 244, 248, 85, 248, 248, 67, 68, + /* 430 */ 69, 85, 85, 118, 99, 119, 120, 85, 125, 99, + /* 440 */ 85, 85, 85, 126, 85, 85, 266, 85, 241, 85, + /* 450 */ 84, 99, 85, 85, 99, 99, 99, 62, 99, 99, + /* 460 */ 84, 99, 85, 99, 80, 81, 99, 99, 122, 122, + /* 470 */ 147, 147, 149, 149, 139, 147, 99, 149, 5, 84, + /* 480 */ 7, 199, 5, 117, 7, 147, 147, 149, 149, 147, + /* 490 */ 266, 149, 116, 239, 266, 266, 266, 119, 266, 199, + /* 500 */ 266, 126, 266, 266, 266, 266, 239, 266, 266, 266, + /* 510 */ 266, 266, 266, 266, 249, 239, 239, 239, 239, 239, + /* 520 */ 199, 199, 247, 265, 199, 199, 199, 62, 247, 199, + /* 530 */ 199, 199, 247, 199, 275, 251, 275, 201, 201, 270, + /* 540 */ 199, 91, 199, 270, 199, 255, 259, 199, 199, 199, + /* 550 */ 199, 126, 199, 199, 262, 261, 260, 199, 270, 270, + /* 560 */ 136, 133, 199, 258, 199, 138, 140, 137, 199, 199, + /* 570 */ 199, 199, 199, 199, 131, 257, 130, 129, 199, 199, + /* 580 */ 199, 199, 199, 132, 199, 199, 199, 199, 199, 199, + /* 590 */ 199, 199, 199, 199, 142, 199, 199, 199, 199, 199, + /* 600 */ 199, 199, 199, 199, 199, 199, 199, 199, 199, 199, + /* 610 */ 199, 199, 199, 199, 199, 199, 199, 199, 199, 199, + /* 620 */ 115, 98, 201, 201, 201, 97, 53, 94, 96, 57, + /* 630 */ 95, 93, 86, 5, 201, 201, 201, 155, 5, 5, + /* 640 */ 155, 201, 201, 5, 5, 102, 207, 211, 211, 207, + /* 650 */ 101, 144, 122, 117, 84, 99, 85, 123, 84, 99, + /* 660 */ 201, 201, 201, 218, 220, 216, 219, 214, 202, 213, + /* 670 */ 215, 217, 202, 202, 202, 201, 238, 201, 203, 201, + /* 680 */ 222, 85, 84, 252, 85, 256, 254, 208, 84, 99, + /* 690 */ 85, 84, 1, 85, 84, 238, 84, 134, 134, 99, + /* 700 */ 99, 84, 84, 117, 84, 118, 80, 72, 89, 88, + /* 710 */ 5, 89, 88, 9, 5, 5, 5, 5, 5, 5, + /* 720 */ 5, 15, 80, 87, 84, 119, 85, 84, 26, 61, + /* 730 */ 149, 149, 16, 16, 149, 5, 99, 149, 5, 85, + /* 740 */ 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, + /* 750 */ 5, 5, 5, 5, 5, 99, 87, 62, 0, 278, /* 760 */ 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, - /* 770 */ 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, + /* 770 */ 278, 21, 21, 278, 278, 278, 278, 278, 278, 278, /* 780 */ 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, /* 790 */ 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, /* 800 */ 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, @@ -380,114 +395,115 @@ static const YYCODETYPE yy_lookahead[] = { /* 920 */ 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, /* 930 */ 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, /* 940 */ 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, - /* 950 */ 278, 278, 278, 278, 278, + /* 950 */ 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, + /* 960 */ 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, }; -#define YY_SHIFT_COUNT (363) +#define YY_SHIFT_COUNT (367) #define YY_SHIFT_MIN (0) -#define YY_SHIFT_MAX (744) +#define YY_SHIFT_MAX (758) static const unsigned short int yy_shift_ofst[] = { - /* 0 */ 159, 73, 73, 258, 258, 13, 237, 247, 247, 206, - /* 10 */ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, - /* 20 */ 3, 3, 3, 0, 2, 247, 299, 299, 299, 27, - /* 30 */ 27, 3, 3, 24, 3, 69, 3, 3, 3, 3, - /* 40 */ 172, 13, 33, 33, 40, 759, 759, 759, 247, 247, - /* 50 */ 247, 247, 247, 247, 247, 247, 247, 247, 247, 247, - /* 60 */ 247, 247, 247, 247, 247, 247, 247, 247, 299, 299, - /* 70 */ 299, 102, 102, 102, 102, 102, 102, 102, 3, 3, - /* 80 */ 3, 218, 3, 3, 3, 27, 27, 3, 3, 3, - /* 90 */ 3, 296, 296, 292, 27, 3, 3, 3, 3, 3, - /* 100 */ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, - /* 110 */ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, - /* 120 */ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, - /* 130 */ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, - /* 140 */ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, - /* 150 */ 3, 3, 3, 3, 452, 452, 452, 408, 408, 408, - /* 160 */ 408, 452, 452, 414, 413, 423, 422, 424, 441, 448, - /* 170 */ 466, 449, 465, 452, 452, 452, 513, 513, 493, 13, - /* 180 */ 13, 452, 452, 516, 514, 562, 522, 521, 564, 527, - /* 190 */ 530, 493, 40, 452, 452, 538, 538, 452, 538, 452, - /* 200 */ 538, 452, 452, 759, 759, 51, 78, 78, 105, 78, - /* 210 */ 131, 185, 195, 259, 259, 259, 259, 272, 253, 192, - /* 220 */ 192, 192, 192, 231, 239, 109, 308, 274, 274, 313, - /* 230 */ 323, 284, 275, 331, 337, 344, 348, 349, 351, 301, - /* 240 */ 336, 352, 353, 355, 356, 358, 328, 359, 361, 450, - /* 250 */ 394, 371, 377, 100, 314, 318, 461, 462, 325, 330, - /* 260 */ 380, 333, 403, 618, 471, 620, 621, 480, 629, 630, - /* 270 */ 536, 540, 495, 523, 526, 579, 541, 580, 585, 572, - /* 280 */ 573, 589, 591, 592, 594, 595, 577, 597, 598, 600, - /* 290 */ 676, 601, 583, 552, 588, 554, 605, 526, 606, 574, - /* 300 */ 608, 575, 614, 607, 609, 623, 691, 610, 612, 690, - /* 310 */ 695, 696, 697, 698, 699, 700, 701, 622, 693, 631, - /* 320 */ 628, 632, 596, 634, 687, 653, 702, 567, 570, 624, - /* 330 */ 624, 624, 624, 703, 576, 578, 624, 624, 624, 715, - /* 340 */ 717, 641, 624, 721, 722, 723, 724, 725, 726, 727, - /* 350 */ 728, 729, 730, 731, 732, 733, 734, 735, 644, 657, - /* 360 */ 736, 737, 683, 744, + /* 0 */ 186, 113, 113, 260, 260, 98, 250, 266, 266, 193, + /* 10 */ 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, + /* 20 */ 23, 23, 23, 0, 122, 266, 316, 316, 316, 9, + /* 30 */ 9, 23, 23, 18, 23, 78, 23, 23, 23, 23, + /* 40 */ 308, 98, 112, 112, 61, 773, 773, 773, 266, 266, + /* 50 */ 266, 266, 266, 266, 266, 266, 266, 266, 266, 266, + /* 60 */ 266, 266, 266, 266, 266, 266, 266, 266, 266, 266, + /* 70 */ 316, 316, 316, 2, 2, 2, 2, 2, 2, 2, + /* 80 */ 23, 23, 23, 64, 23, 23, 23, 9, 9, 23, + /* 90 */ 23, 23, 23, 286, 286, 313, 9, 23, 23, 23, + /* 100 */ 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, + /* 110 */ 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, + /* 120 */ 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, + /* 130 */ 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, + /* 140 */ 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, + /* 150 */ 23, 23, 23, 23, 23, 23, 465, 465, 465, 425, + /* 160 */ 425, 425, 425, 465, 465, 427, 426, 428, 430, 424, + /* 170 */ 443, 446, 448, 451, 452, 465, 465, 465, 450, 450, + /* 180 */ 505, 98, 98, 465, 465, 523, 528, 573, 533, 532, + /* 190 */ 572, 535, 538, 505, 61, 465, 465, 546, 546, 465, + /* 200 */ 546, 465, 546, 465, 465, 773, 773, 29, 69, 69, + /* 210 */ 99, 69, 127, 170, 240, 252, 252, 252, 252, 252, + /* 220 */ 252, 271, 283, 40, 40, 40, 40, 233, 256, 156, + /* 230 */ 315, 257, 257, 317, 375, 361, 344, 340, 346, 347, + /* 240 */ 352, 355, 356, 218, 335, 357, 359, 360, 362, 364, + /* 250 */ 366, 367, 368, 227, 395, 378, 377, 323, 324, 328, + /* 260 */ 473, 477, 338, 339, 376, 342, 384, 628, 482, 633, + /* 270 */ 634, 485, 638, 639, 543, 549, 507, 530, 536, 570, + /* 280 */ 534, 571, 574, 556, 560, 596, 598, 599, 604, 605, + /* 290 */ 590, 607, 608, 610, 691, 612, 600, 563, 601, 564, + /* 300 */ 617, 536, 618, 586, 620, 587, 626, 619, 621, 635, + /* 310 */ 705, 622, 624, 704, 709, 710, 711, 712, 713, 714, + /* 320 */ 715, 636, 706, 642, 640, 641, 606, 643, 702, 668, + /* 330 */ 716, 581, 582, 637, 637, 637, 637, 717, 585, 588, + /* 340 */ 637, 637, 637, 730, 733, 654, 637, 735, 736, 737, + /* 350 */ 738, 739, 740, 741, 742, 743, 744, 745, 746, 747, + /* 360 */ 748, 749, 656, 669, 750, 751, 695, 758, }; -#define YY_REDUCE_COUNT (204) -#define YY_REDUCE_MIN (-261) -#define YY_REDUCE_MAX (463) +#define YY_REDUCE_COUNT (206) +#define YY_REDUCE_MIN (-266) +#define YY_REDUCE_MAX (479) static const short yy_reduce_ofst[] = { - /* 0 */ -31, -37, -37, -194, -194, -80, -204, -196, -184, -155, - /* 10 */ 29, 15, -164, 83, 148, 149, 158, 161, 163, 174, - /* 20 */ 175, 177, 182, -180, -193, -236, -149, -125, -108, -26, - /* 30 */ 32, -191, 104, -253, 44, 10, 95, 111, 143, 147, - /* 40 */ 64, -161, 179, 196, 178, 118, 204, 200, -261, -251, - /* 50 */ -200, -126, -62, -3, 6, 9, 48, 205, 213, 217, - /* 60 */ 219, 220, 223, 224, 225, 226, 227, 229, 117, 235, - /* 70 */ 248, 257, 260, 261, 262, 263, 264, 265, 306, 307, - /* 80 */ 309, 233, 310, 311, 312, 266, 267, 316, 317, 319, - /* 90 */ 320, 232, 242, 269, 276, 322, 326, 327, 329, 332, - /* 100 */ 334, 335, 338, 339, 340, 341, 342, 343, 345, 346, - /* 110 */ 347, 350, 354, 357, 360, 362, 363, 364, 365, 366, - /* 120 */ 367, 368, 369, 370, 372, 373, 374, 375, 376, 378, - /* 130 */ 379, 381, 382, 383, 384, 385, 386, 387, 388, 389, - /* 140 */ 390, 391, 392, 393, 395, 396, 397, 398, 399, 400, - /* 150 */ 401, 402, 404, 405, 321, 406, 407, 254, 273, 277, - /* 160 */ 278, 409, 410, 268, 291, 295, 270, 302, 279, 271, - /* 170 */ 280, 411, 412, 415, 416, 417, 418, 419, 420, 421, - /* 180 */ 425, 426, 430, 427, 429, 428, 431, 432, 435, 436, - /* 190 */ 438, 433, 434, 437, 439, 440, 442, 453, 451, 454, - /* 200 */ 455, 458, 459, 460, 463, + /* 0 */ -186, 10, 10, -163, -163, 154, -159, -5, 42, -168, + /* 10 */ -112, -115, 117, -41, 30, 51, 84, 95, 167, 169, + /* 20 */ 176, 178, 179, -195, -197, -258, -239, -201, -143, -229, + /* 30 */ -130, -62, 59, -238, -57, 47, 188, 191, 206, -88, + /* 40 */ 48, 207, 115, 160, 119, -99, 22, 215, -266, -261, + /* 50 */ -213, -58, 180, 224, 228, 229, 230, 232, 234, 236, + /* 60 */ 237, 238, 239, 241, 242, 243, 244, 245, 246, 247, + /* 70 */ 27, 35, 265, 254, 267, 276, 277, 278, 279, 280, + /* 80 */ 282, 300, 321, 258, 322, 325, 326, 275, 281, 327, + /* 90 */ 330, 331, 332, 259, 261, 284, 285, 334, 341, 343, + /* 100 */ 345, 348, 349, 350, 351, 353, 354, 358, 363, 365, + /* 110 */ 369, 370, 371, 372, 373, 374, 379, 380, 381, 382, + /* 120 */ 383, 385, 386, 387, 388, 389, 390, 391, 392, 393, + /* 130 */ 394, 396, 397, 398, 399, 400, 401, 402, 403, 404, + /* 140 */ 405, 406, 407, 408, 409, 410, 411, 412, 413, 414, + /* 150 */ 415, 416, 417, 418, 419, 420, 336, 337, 421, 269, + /* 160 */ 273, 288, 289, 422, 423, 292, 294, 296, 287, 305, + /* 170 */ 318, 429, 290, 432, 431, 433, 434, 435, 436, 437, + /* 180 */ 438, 439, 442, 440, 441, 444, 447, 445, 453, 454, + /* 190 */ 455, 449, 456, 457, 458, 459, 460, 466, 470, 461, + /* 200 */ 471, 474, 472, 476, 478, 479, 475, }; static const YYACTIONTYPE yy_default[] = { - /* 0 */ 864, 988, 927, 998, 914, 924, 1137, 1137, 1137, 864, - /* 10 */ 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, - /* 20 */ 864, 864, 864, 1046, 884, 1137, 864, 864, 864, 864, - /* 30 */ 864, 864, 864, 1061, 864, 924, 864, 864, 864, 864, - /* 40 */ 934, 924, 934, 934, 864, 1041, 972, 990, 864, 864, - /* 50 */ 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, - /* 60 */ 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, - /* 70 */ 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, - /* 80 */ 864, 1048, 1054, 1051, 864, 864, 864, 1056, 864, 864, - /* 90 */ 864, 1080, 1080, 1039, 864, 864, 864, 864, 864, 864, - /* 100 */ 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, - /* 110 */ 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, - /* 120 */ 864, 864, 864, 864, 864, 864, 864, 912, 864, 910, - /* 130 */ 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, - /* 140 */ 864, 864, 864, 864, 864, 864, 895, 864, 864, 864, - /* 150 */ 864, 864, 864, 882, 886, 886, 886, 864, 864, 864, - /* 160 */ 864, 886, 886, 1087, 1091, 1073, 1085, 1081, 1068, 1066, - /* 170 */ 1064, 1072, 1095, 886, 886, 886, 932, 932, 928, 924, - /* 180 */ 924, 886, 886, 950, 948, 946, 938, 944, 940, 942, - /* 190 */ 936, 915, 864, 886, 886, 922, 922, 886, 922, 886, - /* 200 */ 922, 886, 886, 972, 990, 864, 1096, 1086, 864, 1136, - /* 210 */ 1126, 1125, 864, 1132, 1124, 1123, 1122, 864, 864, 1118, - /* 220 */ 1121, 1120, 1119, 864, 864, 864, 864, 1128, 1127, 864, - /* 230 */ 864, 864, 864, 864, 864, 864, 864, 864, 864, 1092, - /* 240 */ 1088, 864, 864, 864, 864, 864, 864, 864, 864, 864, - /* 250 */ 1098, 864, 864, 864, 864, 864, 864, 864, 864, 864, - /* 260 */ 1000, 864, 864, 864, 864, 864, 864, 864, 864, 864, - /* 270 */ 864, 864, 864, 1038, 864, 864, 864, 864, 864, 1050, - /* 280 */ 1049, 864, 864, 864, 864, 864, 864, 864, 864, 864, - /* 290 */ 864, 864, 1082, 864, 1074, 864, 864, 1012, 864, 864, - /* 300 */ 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, - /* 310 */ 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, - /* 320 */ 864, 864, 864, 864, 864, 864, 864, 864, 864, 1155, - /* 330 */ 1150, 1151, 1148, 864, 864, 864, 1147, 1142, 1143, 864, - /* 340 */ 864, 864, 1140, 864, 864, 864, 864, 864, 864, 864, - /* 350 */ 864, 864, 864, 864, 864, 864, 864, 864, 956, 864, - /* 360 */ 893, 891, 864, 864, + /* 0 */ 870, 994, 933, 1004, 920, 930, 1145, 1145, 1145, 870, + /* 10 */ 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, + /* 20 */ 870, 870, 870, 1052, 890, 1145, 870, 870, 870, 870, + /* 30 */ 870, 870, 870, 1067, 870, 930, 870, 870, 870, 870, + /* 40 */ 940, 930, 940, 940, 870, 1047, 978, 996, 870, 870, + /* 50 */ 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, + /* 60 */ 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, + /* 70 */ 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, + /* 80 */ 870, 870, 870, 1054, 1060, 1057, 870, 870, 870, 1062, + /* 90 */ 870, 870, 870, 1086, 1086, 1045, 870, 870, 870, 870, + /* 100 */ 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, + /* 110 */ 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, + /* 120 */ 870, 870, 870, 870, 870, 870, 870, 870, 870, 918, + /* 130 */ 870, 916, 870, 870, 870, 870, 870, 870, 870, 870, + /* 140 */ 870, 870, 870, 870, 870, 870, 870, 870, 901, 870, + /* 150 */ 870, 870, 870, 870, 870, 888, 892, 892, 892, 870, + /* 160 */ 870, 870, 870, 892, 892, 1093, 1097, 1079, 1091, 1087, + /* 170 */ 1074, 1072, 1070, 1078, 1101, 892, 892, 892, 938, 938, + /* 180 */ 934, 930, 930, 892, 892, 956, 954, 952, 944, 950, + /* 190 */ 946, 948, 942, 921, 870, 892, 892, 928, 928, 892, + /* 200 */ 928, 892, 928, 892, 892, 978, 996, 870, 1102, 1092, + /* 210 */ 870, 1144, 1132, 1131, 870, 1140, 1139, 1138, 1130, 1129, + /* 220 */ 1128, 870, 870, 1124, 1127, 1126, 1125, 870, 870, 870, + /* 230 */ 870, 1134, 1133, 870, 870, 870, 870, 870, 870, 870, + /* 240 */ 870, 870, 870, 1098, 1094, 870, 870, 870, 870, 870, + /* 250 */ 870, 870, 870, 870, 1104, 870, 870, 870, 870, 870, + /* 260 */ 870, 870, 870, 870, 1006, 870, 870, 870, 870, 870, + /* 270 */ 870, 870, 870, 870, 870, 870, 870, 1044, 870, 870, + /* 280 */ 870, 870, 870, 1056, 1055, 870, 870, 870, 870, 870, + /* 290 */ 870, 870, 870, 870, 870, 870, 1088, 870, 1080, 870, + /* 300 */ 870, 1018, 870, 870, 870, 870, 870, 870, 870, 870, + /* 310 */ 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, + /* 320 */ 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, + /* 330 */ 870, 870, 870, 1163, 1158, 1159, 1156, 870, 870, 870, + /* 340 */ 1155, 1150, 1151, 870, 870, 870, 1148, 870, 870, 870, + /* 350 */ 870, 870, 870, 870, 870, 870, 870, 870, 870, 870, + /* 360 */ 870, 870, 962, 870, 899, 897, 870, 870, }; /********** End of lemon-generated parsing tables *****************************/ @@ -529,6 +545,8 @@ static const YYCODETYPE yyFallback[] = { 0, /* NOTNULL => nothing */ 0, /* IS => nothing */ 1, /* LIKE => ID */ + 1, /* MATCH => ID */ + 1, /* NMATCH => ID */ 1, /* GLOB => ID */ 0, /* BETWEEN => nothing */ 0, /* IN => nothing */ @@ -682,7 +700,6 @@ static const YYCODETYPE yyFallback[] = { 1, /* IMMEDIATE => ID */ 1, /* INITIALLY => ID */ 1, /* INSTEAD => ID */ - 1, /* MATCH => ID */ 1, /* KEY => ID */ 1, /* OF => ID */ 1, /* RAISE => ID */ @@ -742,6 +759,7 @@ struct yyParser { int yyerrcnt; /* Shifts left before out of the error */ #endif ParseARG_SDECL /* A place to hold %extra_argument */ + ParseCTX_SDECL /* A place to hold %extra_context */ #if YYSTACKDEPTH<=0 int yystksz; /* Current side of the stack */ yyStackEntry *yystack; /* The parser's stack */ @@ -811,181 +829,181 @@ static const char *const yyTokenName[] = { /* 19 */ "NOTNULL", /* 20 */ "IS", /* 21 */ "LIKE", - /* 22 */ "GLOB", - /* 23 */ "BETWEEN", - /* 24 */ "IN", - /* 25 */ "GT", - /* 26 */ "GE", - /* 27 */ "LT", - /* 28 */ "LE", - /* 29 */ "BITAND", - /* 30 */ "BITOR", - /* 31 */ "LSHIFT", - /* 32 */ "RSHIFT", - /* 33 */ "PLUS", - /* 34 */ "MINUS", - /* 35 */ "DIVIDE", - /* 36 */ "TIMES", - /* 37 */ "STAR", - /* 38 */ "SLASH", - /* 39 */ "REM", - /* 40 */ "CONCAT", - /* 41 */ "UMINUS", - /* 42 */ "UPLUS", - /* 43 */ "BITNOT", - /* 44 */ "SHOW", - /* 45 */ "DATABASES", - /* 46 */ "TOPICS", - /* 47 */ "FUNCTIONS", - /* 48 */ "MNODES", - /* 49 */ "DNODES", - /* 50 */ "ACCOUNTS", - /* 51 */ "USERS", - /* 52 */ "MODULES", - /* 53 */ "QUERIES", - /* 54 */ "CONNECTIONS", - /* 55 */ "STREAMS", - /* 56 */ "VARIABLES", - /* 57 */ "SCORES", - /* 58 */ "GRANTS", - /* 59 */ "VNODES", - /* 60 */ "DOT", - /* 61 */ "CREATE", - /* 62 */ "TABLE", - /* 63 */ "STABLE", - /* 64 */ "DATABASE", - /* 65 */ "TABLES", - /* 66 */ "STABLES", - /* 67 */ "VGROUPS", - /* 68 */ "DROP", - /* 69 */ "TOPIC", - /* 70 */ "FUNCTION", - /* 71 */ "DNODE", - /* 72 */ "USER", - /* 73 */ "ACCOUNT", - /* 74 */ "USE", - /* 75 */ "DESCRIBE", - /* 76 */ "DESC", - /* 77 */ "ALTER", - /* 78 */ "PASS", - /* 79 */ "PRIVILEGE", - /* 80 */ "LOCAL", - /* 81 */ "COMPACT", - /* 82 */ "LP", - /* 83 */ "RP", - /* 84 */ "IF", - /* 85 */ "EXISTS", - /* 86 */ "AS", - /* 87 */ "OUTPUTTYPE", - /* 88 */ "AGGREGATE", - /* 89 */ "BUFSIZE", - /* 90 */ "PPS", - /* 91 */ "TSERIES", - /* 92 */ "DBS", - /* 93 */ "STORAGE", - /* 94 */ "QTIME", - /* 95 */ "CONNS", - /* 96 */ "STATE", - /* 97 */ "COMMA", - /* 98 */ "KEEP", - /* 99 */ "CACHE", - /* 100 */ "REPLICA", - /* 101 */ "QUORUM", - /* 102 */ "DAYS", - /* 103 */ "MINROWS", - /* 104 */ "MAXROWS", - /* 105 */ "BLOCKS", - /* 106 */ "CTIME", - /* 107 */ "WAL", - /* 108 */ "FSYNC", - /* 109 */ "COMP", - /* 110 */ "PRECISION", - /* 111 */ "UPDATE", - /* 112 */ "CACHELAST", - /* 113 */ "PARTITIONS", - /* 114 */ "UNSIGNED", - /* 115 */ "TAGS", - /* 116 */ "USING", - /* 117 */ "NULL", - /* 118 */ "NOW", - /* 119 */ "SELECT", - /* 120 */ "UNION", - /* 121 */ "ALL", - /* 122 */ "DISTINCT", - /* 123 */ "FROM", - /* 124 */ "VARIABLE", - /* 125 */ "INTERVAL", - /* 126 */ "EVERY", - /* 127 */ "SESSION", - /* 128 */ "STATE_WINDOW", - /* 129 */ "FILL", - /* 130 */ "SLIDING", - /* 131 */ "ORDER", - /* 132 */ "BY", - /* 133 */ "ASC", - /* 134 */ "GROUP", - /* 135 */ "HAVING", - /* 136 */ "LIMIT", - /* 137 */ "OFFSET", - /* 138 */ "SLIMIT", - /* 139 */ "SOFFSET", - /* 140 */ "WHERE", - /* 141 */ "RESET", - /* 142 */ "QUERY", - /* 143 */ "SYNCDB", - /* 144 */ "ADD", - /* 145 */ "COLUMN", - /* 146 */ "MODIFY", - /* 147 */ "TAG", - /* 148 */ "CHANGE", - /* 149 */ "SET", - /* 150 */ "KILL", - /* 151 */ "CONNECTION", - /* 152 */ "STREAM", - /* 153 */ "COLON", - /* 154 */ "ABORT", - /* 155 */ "AFTER", - /* 156 */ "ATTACH", - /* 157 */ "BEFORE", - /* 158 */ "BEGIN", - /* 159 */ "CASCADE", - /* 160 */ "CLUSTER", - /* 161 */ "CONFLICT", - /* 162 */ "COPY", - /* 163 */ "DEFERRED", - /* 164 */ "DELIMITERS", - /* 165 */ "DETACH", - /* 166 */ "EACH", - /* 167 */ "END", - /* 168 */ "EXPLAIN", - /* 169 */ "FAIL", - /* 170 */ "FOR", - /* 171 */ "IGNORE", - /* 172 */ "IMMEDIATE", - /* 173 */ "INITIALLY", - /* 174 */ "INSTEAD", - /* 175 */ "MATCH", - /* 176 */ "KEY", - /* 177 */ "OF", - /* 178 */ "RAISE", - /* 179 */ "REPLACE", - /* 180 */ "RESTRICT", - /* 181 */ "ROW", - /* 182 */ "STATEMENT", - /* 183 */ "TRIGGER", - /* 184 */ "VIEW", - /* 185 */ "IPTOKEN", - /* 186 */ "SEMI", - /* 187 */ "NONE", - /* 188 */ "PREV", - /* 189 */ "LINEAR", - /* 190 */ "IMPORT", - /* 191 */ "TBNAME", - /* 192 */ "JOIN", - /* 193 */ "INSERT", - /* 194 */ "INTO", - /* 195 */ "VALUES", - /* 196 */ "error", + /* 22 */ "MATCH", + /* 23 */ "NMATCH", + /* 24 */ "GLOB", + /* 25 */ "BETWEEN", + /* 26 */ "IN", + /* 27 */ "GT", + /* 28 */ "GE", + /* 29 */ "LT", + /* 30 */ "LE", + /* 31 */ "BITAND", + /* 32 */ "BITOR", + /* 33 */ "LSHIFT", + /* 34 */ "RSHIFT", + /* 35 */ "PLUS", + /* 36 */ "MINUS", + /* 37 */ "DIVIDE", + /* 38 */ "TIMES", + /* 39 */ "STAR", + /* 40 */ "SLASH", + /* 41 */ "REM", + /* 42 */ "CONCAT", + /* 43 */ "UMINUS", + /* 44 */ "UPLUS", + /* 45 */ "BITNOT", + /* 46 */ "SHOW", + /* 47 */ "DATABASES", + /* 48 */ "TOPICS", + /* 49 */ "FUNCTIONS", + /* 50 */ "MNODES", + /* 51 */ "DNODES", + /* 52 */ "ACCOUNTS", + /* 53 */ "USERS", + /* 54 */ "MODULES", + /* 55 */ "QUERIES", + /* 56 */ "CONNECTIONS", + /* 57 */ "STREAMS", + /* 58 */ "VARIABLES", + /* 59 */ "SCORES", + /* 60 */ "GRANTS", + /* 61 */ "VNODES", + /* 62 */ "DOT", + /* 63 */ "CREATE", + /* 64 */ "TABLE", + /* 65 */ "STABLE", + /* 66 */ "DATABASE", + /* 67 */ "TABLES", + /* 68 */ "STABLES", + /* 69 */ "VGROUPS", + /* 70 */ "DROP", + /* 71 */ "TOPIC", + /* 72 */ "FUNCTION", + /* 73 */ "DNODE", + /* 74 */ "USER", + /* 75 */ "ACCOUNT", + /* 76 */ "USE", + /* 77 */ "DESCRIBE", + /* 78 */ "DESC", + /* 79 */ "ALTER", + /* 80 */ "PASS", + /* 81 */ "PRIVILEGE", + /* 82 */ "LOCAL", + /* 83 */ "COMPACT", + /* 84 */ "LP", + /* 85 */ "RP", + /* 86 */ "IF", + /* 87 */ "EXISTS", + /* 88 */ "AS", + /* 89 */ "OUTPUTTYPE", + /* 90 */ "AGGREGATE", + /* 91 */ "BUFSIZE", + /* 92 */ "PPS", + /* 93 */ "TSERIES", + /* 94 */ "DBS", + /* 95 */ "STORAGE", + /* 96 */ "QTIME", + /* 97 */ "CONNS", + /* 98 */ "STATE", + /* 99 */ "COMMA", + /* 100 */ "KEEP", + /* 101 */ "CACHE", + /* 102 */ "REPLICA", + /* 103 */ "QUORUM", + /* 104 */ "DAYS", + /* 105 */ "MINROWS", + /* 106 */ "MAXROWS", + /* 107 */ "BLOCKS", + /* 108 */ "CTIME", + /* 109 */ "WAL", + /* 110 */ "FSYNC", + /* 111 */ "COMP", + /* 112 */ "PRECISION", + /* 113 */ "UPDATE", + /* 114 */ "CACHELAST", + /* 115 */ "PARTITIONS", + /* 116 */ "UNSIGNED", + /* 117 */ "TAGS", + /* 118 */ "USING", + /* 119 */ "NULL", + /* 120 */ "NOW", + /* 121 */ "SELECT", + /* 122 */ "UNION", + /* 123 */ "ALL", + /* 124 */ "DISTINCT", + /* 125 */ "FROM", + /* 126 */ "VARIABLE", + /* 127 */ "INTERVAL", + /* 128 */ "EVERY", + /* 129 */ "SESSION", + /* 130 */ "STATE_WINDOW", + /* 131 */ "FILL", + /* 132 */ "SLIDING", + /* 133 */ "ORDER", + /* 134 */ "BY", + /* 135 */ "ASC", + /* 136 */ "GROUP", + /* 137 */ "HAVING", + /* 138 */ "LIMIT", + /* 139 */ "OFFSET", + /* 140 */ "SLIMIT", + /* 141 */ "SOFFSET", + /* 142 */ "WHERE", + /* 143 */ "RESET", + /* 144 */ "QUERY", + /* 145 */ "SYNCDB", + /* 146 */ "ADD", + /* 147 */ "COLUMN", + /* 148 */ "MODIFY", + /* 149 */ "TAG", + /* 150 */ "CHANGE", + /* 151 */ "SET", + /* 152 */ "KILL", + /* 153 */ "CONNECTION", + /* 154 */ "STREAM", + /* 155 */ "COLON", + /* 156 */ "ABORT", + /* 157 */ "AFTER", + /* 158 */ "ATTACH", + /* 159 */ "BEFORE", + /* 160 */ "BEGIN", + /* 161 */ "CASCADE", + /* 162 */ "CLUSTER", + /* 163 */ "CONFLICT", + /* 164 */ "COPY", + /* 165 */ "DEFERRED", + /* 166 */ "DELIMITERS", + /* 167 */ "DETACH", + /* 168 */ "EACH", + /* 169 */ "END", + /* 170 */ "EXPLAIN", + /* 171 */ "FAIL", + /* 172 */ "FOR", + /* 173 */ "IGNORE", + /* 174 */ "IMMEDIATE", + /* 175 */ "INITIALLY", + /* 176 */ "INSTEAD", + /* 177 */ "KEY", + /* 178 */ "OF", + /* 179 */ "RAISE", + /* 180 */ "REPLACE", + /* 181 */ "RESTRICT", + /* 182 */ "ROW", + /* 183 */ "STATEMENT", + /* 184 */ "TRIGGER", + /* 185 */ "VIEW", + /* 186 */ "IPTOKEN", + /* 187 */ "SEMI", + /* 188 */ "NONE", + /* 189 */ "PREV", + /* 190 */ "LINEAR", + /* 191 */ "IMPORT", + /* 192 */ "TBNAME", + /* 193 */ "JOIN", + /* 194 */ "INSERT", + /* 195 */ "INTO", + /* 196 */ "VALUES", /* 197 */ "program", /* 198 */ "cmd", /* 199 */ "ids", @@ -1340,32 +1358,34 @@ static const char *const yyRuleName[] = { /* 263 */ "expr ::= expr SLASH expr", /* 264 */ "expr ::= expr REM expr", /* 265 */ "expr ::= expr LIKE expr", - /* 266 */ "expr ::= expr IN LP exprlist RP", - /* 267 */ "exprlist ::= exprlist COMMA expritem", - /* 268 */ "exprlist ::= expritem", - /* 269 */ "expritem ::= expr", - /* 270 */ "expritem ::=", - /* 271 */ "cmd ::= RESET QUERY CACHE", - /* 272 */ "cmd ::= SYNCDB ids REPLICA", - /* 273 */ "cmd ::= ALTER TABLE ids cpxName ADD COLUMN columnlist", - /* 274 */ "cmd ::= ALTER TABLE ids cpxName DROP COLUMN ids", - /* 275 */ "cmd ::= ALTER TABLE ids cpxName MODIFY COLUMN columnlist", - /* 276 */ "cmd ::= ALTER TABLE ids cpxName ADD TAG columnlist", - /* 277 */ "cmd ::= ALTER TABLE ids cpxName DROP TAG ids", - /* 278 */ "cmd ::= ALTER TABLE ids cpxName CHANGE TAG ids ids", - /* 279 */ "cmd ::= ALTER TABLE ids cpxName SET TAG ids EQ tagitem", - /* 280 */ "cmd ::= ALTER TABLE ids cpxName MODIFY TAG columnlist", - /* 281 */ "cmd ::= ALTER STABLE ids cpxName ADD COLUMN columnlist", - /* 282 */ "cmd ::= ALTER STABLE ids cpxName DROP COLUMN ids", - /* 283 */ "cmd ::= ALTER STABLE ids cpxName MODIFY COLUMN columnlist", - /* 284 */ "cmd ::= ALTER STABLE ids cpxName ADD TAG columnlist", - /* 285 */ "cmd ::= ALTER STABLE ids cpxName DROP TAG ids", - /* 286 */ "cmd ::= ALTER STABLE ids cpxName CHANGE TAG ids ids", - /* 287 */ "cmd ::= ALTER STABLE ids cpxName SET TAG ids EQ tagitem", - /* 288 */ "cmd ::= ALTER STABLE ids cpxName MODIFY TAG columnlist", - /* 289 */ "cmd ::= KILL CONNECTION INTEGER", - /* 290 */ "cmd ::= KILL STREAM INTEGER COLON INTEGER", - /* 291 */ "cmd ::= KILL QUERY INTEGER COLON INTEGER", + /* 266 */ "expr ::= expr MATCH expr", + /* 267 */ "expr ::= expr NMATCH expr", + /* 268 */ "expr ::= expr IN LP exprlist RP", + /* 269 */ "exprlist ::= exprlist COMMA expritem", + /* 270 */ "exprlist ::= expritem", + /* 271 */ "expritem ::= expr", + /* 272 */ "expritem ::=", + /* 273 */ "cmd ::= RESET QUERY CACHE", + /* 274 */ "cmd ::= SYNCDB ids REPLICA", + /* 275 */ "cmd ::= ALTER TABLE ids cpxName ADD COLUMN columnlist", + /* 276 */ "cmd ::= ALTER TABLE ids cpxName DROP COLUMN ids", + /* 277 */ "cmd ::= ALTER TABLE ids cpxName MODIFY COLUMN columnlist", + /* 278 */ "cmd ::= ALTER TABLE ids cpxName ADD TAG columnlist", + /* 279 */ "cmd ::= ALTER TABLE ids cpxName DROP TAG ids", + /* 280 */ "cmd ::= ALTER TABLE ids cpxName CHANGE TAG ids ids", + /* 281 */ "cmd ::= ALTER TABLE ids cpxName SET TAG ids EQ tagitem", + /* 282 */ "cmd ::= ALTER TABLE ids cpxName MODIFY TAG columnlist", + /* 283 */ "cmd ::= ALTER STABLE ids cpxName ADD COLUMN columnlist", + /* 284 */ "cmd ::= ALTER STABLE ids cpxName DROP COLUMN ids", + /* 285 */ "cmd ::= ALTER STABLE ids cpxName MODIFY COLUMN columnlist", + /* 286 */ "cmd ::= ALTER STABLE ids cpxName ADD TAG columnlist", + /* 287 */ "cmd ::= ALTER STABLE ids cpxName DROP TAG ids", + /* 288 */ "cmd ::= ALTER STABLE ids cpxName CHANGE TAG ids ids", + /* 289 */ "cmd ::= ALTER STABLE ids cpxName SET TAG ids EQ tagitem", + /* 290 */ "cmd ::= ALTER STABLE ids cpxName MODIFY TAG columnlist", + /* 291 */ "cmd ::= KILL CONNECTION INTEGER", + /* 292 */ "cmd ::= KILL STREAM INTEGER COLON INTEGER", + /* 293 */ "cmd ::= KILL QUERY INTEGER COLON INTEGER", }; #endif /* NDEBUG */ @@ -1414,28 +1434,29 @@ static int yyGrowStack(yyParser *p){ /* Initialize a new parser that has already been allocated. */ -void ParseInit(void *yypParser){ - yyParser *pParser = (yyParser*)yypParser; +void ParseInit(void *yypRawParser ParseCTX_PDECL){ + yyParser *yypParser = (yyParser*)yypRawParser; + ParseCTX_STORE #ifdef YYTRACKMAXSTACKDEPTH - pParser->yyhwm = 0; + yypParser->yyhwm = 0; #endif #if YYSTACKDEPTH<=0 - pParser->yytos = NULL; - pParser->yystack = NULL; - pParser->yystksz = 0; - if( yyGrowStack(pParser) ){ - pParser->yystack = &pParser->yystk0; - pParser->yystksz = 1; + yypParser->yytos = NULL; + yypParser->yystack = NULL; + yypParser->yystksz = 0; + if( yyGrowStack(yypParser) ){ + yypParser->yystack = &yypParser->yystk0; + yypParser->yystksz = 1; } #endif #ifndef YYNOERRORRECOVERY - pParser->yyerrcnt = -1; + yypParser->yyerrcnt = -1; #endif - pParser->yytos = pParser->yystack; - pParser->yystack[0].stateno = 0; - pParser->yystack[0].major = 0; + yypParser->yytos = yypParser->yystack; + yypParser->yystack[0].stateno = 0; + yypParser->yystack[0].major = 0; #if YYSTACKDEPTH>0 - pParser->yystackEnd = &pParser->yystack[YYSTACKDEPTH-1]; + yypParser->yystackEnd = &yypParser->yystack[YYSTACKDEPTH-1]; #endif } @@ -1452,11 +1473,14 @@ void ParseInit(void *yypParser){ ** A pointer to a parser. This pointer is used in subsequent calls ** to Parse and ParseFree. */ -void *ParseAlloc(void *(*mallocProc)(YYMALLOCARGTYPE)){ - yyParser *pParser; - pParser = (yyParser*)(*mallocProc)( (YYMALLOCARGTYPE)sizeof(yyParser) ); - if( pParser ) ParseInit(pParser); - return pParser; +void *ParseAlloc(void *(*mallocProc)(YYMALLOCARGTYPE) ParseCTX_PDECL){ + yyParser *yypParser; + yypParser = (yyParser*)(*mallocProc)( (YYMALLOCARGTYPE)sizeof(yyParser) ); + if( yypParser ){ + ParseCTX_STORE + ParseInit(yypParser ParseCTX_PARAM); + } + return (void*)yypParser; } #endif /* Parse_ENGINEALWAYSONSTACK */ @@ -1473,7 +1497,8 @@ static void yy_destructor( YYCODETYPE yymajor, /* Type code for object to destroy */ YYMINORTYPE *yypminor /* The object to be destroyed */ ){ - ParseARG_FETCH; + ParseARG_FETCH + ParseCTX_FETCH switch( yymajor ){ /* Here is inserted the actions which take place when a ** terminal or non-terminal is destroyed. This can happen @@ -1490,7 +1515,9 @@ static void yy_destructor( case 250: /* selcollist */ case 264: /* sclp */ { +#line 762 "sql.y" tSqlExprListDestroy((yypminor->yy421)); +#line 1521 "sql.c" } break; case 221: /* intitemlist */ @@ -1504,24 +1531,32 @@ tSqlExprListDestroy((yypminor->yy421)); case 272: /* sortlist */ case 276: /* grouplist */ { +#line 256 "sql.y" taosArrayDestroy((yypminor->yy421)); +#line 1537 "sql.c" } break; case 242: /* create_table_list */ { +#line 364 "sql.y" destroyCreateTableSql((yypminor->yy438)); +#line 1544 "sql.c" } break; case 247: /* select */ { +#line 484 "sql.y" destroySqlNode((yypminor->yy56)); +#line 1551 "sql.c" } break; case 251: /* from */ case 268: /* tablelist */ case 269: /* sub */ { +#line 539 "sql.y" destroyRelationInfo((yypminor->yy8)); +#line 1560 "sql.c" } break; case 252: /* where_opt */ @@ -1529,17 +1564,23 @@ destroyRelationInfo((yypminor->yy8)); case 266: /* expr */ case 277: /* expritem */ { +#line 691 "sql.y" tSqlExprDestroy((yypminor->yy439)); +#line 1570 "sql.c" } break; case 263: /* union */ { +#line 492 "sql.y" destroyAllSqlNode((yypminor->yy421)); +#line 1577 "sql.c" } break; case 273: /* sortitem */ { +#line 624 "sql.y" tVariantDestroy(&(yypminor->yy430)); +#line 1584 "sql.c" } break; /********* End destructor definitions *****************************************/ @@ -1651,13 +1692,12 @@ int ParseCoverage(FILE *out){ ** Find the appropriate action for a parser given the terminal ** look-ahead token iLookAhead. */ -static unsigned int yy_find_shift_action( - yyParser *pParser, /* The parser */ - YYCODETYPE iLookAhead /* The look-ahead token */ +static YYACTIONTYPE yy_find_shift_action( + YYCODETYPE iLookAhead, /* The look-ahead token */ + YYACTIONTYPE stateno /* Current state number */ ){ int i; - int stateno = pParser->yytos->stateno; - + if( stateno>YY_MAX_SHIFT ) return stateno; assert( stateno <= YY_SHIFT_COUNT ); #if defined(YYCOVERAGE) @@ -1665,15 +1705,19 @@ static unsigned int yy_find_shift_action( #endif do{ i = yy_shift_ofst[stateno]; - assert( i>=0 && i+YYNTOKEN<=sizeof(yy_lookahead)/sizeof(yy_lookahead[0]) ); + assert( i>=0 ); + assert( i<=YY_ACTTAB_COUNT ); + assert( i+YYNTOKEN<=(int)YY_NLOOKAHEAD ); assert( iLookAhead!=YYNOCODE ); assert( iLookAhead < YYNTOKEN ); i += iLookAhead; + assert( i<(int)YY_NLOOKAHEAD ); if( yy_lookahead[i]!=iLookAhead ){ #ifdef YYFALLBACK YYCODETYPE iFallback; /* Fallback token */ - if( iLookAhead %s\n", @@ -1688,15 +1732,8 @@ static unsigned int yy_find_shift_action( #ifdef YYWILDCARD { int j = i - iLookAhead + YYWILDCARD; - if( -#if YY_SHIFT_MIN+YYWILDCARD<0 - j>=0 && -#endif -#if YY_SHIFT_MAX+YYWILDCARD>=YY_ACTTAB_COUNT - j0 - ){ + assert( j<(int)(sizeof(yy_lookahead)/sizeof(yy_lookahead[0])) ); + if( yy_lookahead[j]==YYWILDCARD && iLookAhead>0 ){ #ifndef NDEBUG if( yyTraceFILE ){ fprintf(yyTraceFILE, "%sWILDCARD %s => %s\n", @@ -1710,6 +1747,7 @@ static unsigned int yy_find_shift_action( #endif /* YYWILDCARD */ return yy_default[stateno]; }else{ + assert( i>=0 && iyytos; - yytos->stateno = (YYACTIONTYPE)yyNewState; - yytos->major = (YYCODETYPE)yyMajor; + yytos->stateno = yyNewState; + yytos->major = yyMajor; yytos->minor.yy0 = yyMinor; yyTraceShift(yypParser, yyNewState, "Shift"); } -/* The following table contains information about every rule that -** is used during the reduce. -*/ -static const struct { - YYCODETYPE lhs; /* Symbol on the left-hand side of the rule */ - signed char nrhs; /* Negative of the number of RHS symbols in the rule */ -} yyRuleInfo[] = { - { 197, -1 }, /* (0) program ::= cmd */ - { 198, -2 }, /* (1) cmd ::= SHOW DATABASES */ - { 198, -2 }, /* (2) cmd ::= SHOW TOPICS */ - { 198, -2 }, /* (3) cmd ::= SHOW FUNCTIONS */ - { 198, -2 }, /* (4) cmd ::= SHOW MNODES */ - { 198, -2 }, /* (5) cmd ::= SHOW DNODES */ - { 198, -2 }, /* (6) cmd ::= SHOW ACCOUNTS */ - { 198, -2 }, /* (7) cmd ::= SHOW USERS */ - { 198, -2 }, /* (8) cmd ::= SHOW MODULES */ - { 198, -2 }, /* (9) cmd ::= SHOW QUERIES */ - { 198, -2 }, /* (10) cmd ::= SHOW CONNECTIONS */ - { 198, -2 }, /* (11) cmd ::= SHOW STREAMS */ - { 198, -2 }, /* (12) cmd ::= SHOW VARIABLES */ - { 198, -2 }, /* (13) cmd ::= SHOW SCORES */ - { 198, -2 }, /* (14) cmd ::= SHOW GRANTS */ - { 198, -2 }, /* (15) cmd ::= SHOW VNODES */ - { 198, -3 }, /* (16) cmd ::= SHOW VNODES ids */ - { 200, 0 }, /* (17) dbPrefix ::= */ - { 200, -2 }, /* (18) dbPrefix ::= ids DOT */ - { 201, 0 }, /* (19) cpxName ::= */ - { 201, -2 }, /* (20) cpxName ::= DOT ids */ - { 198, -5 }, /* (21) cmd ::= SHOW CREATE TABLE ids cpxName */ - { 198, -5 }, /* (22) cmd ::= SHOW CREATE STABLE ids cpxName */ - { 198, -4 }, /* (23) cmd ::= SHOW CREATE DATABASE ids */ - { 198, -3 }, /* (24) cmd ::= SHOW dbPrefix TABLES */ - { 198, -5 }, /* (25) cmd ::= SHOW dbPrefix TABLES LIKE ids */ - { 198, -3 }, /* (26) cmd ::= SHOW dbPrefix STABLES */ - { 198, -5 }, /* (27) cmd ::= SHOW dbPrefix STABLES LIKE ids */ - { 198, -3 }, /* (28) cmd ::= SHOW dbPrefix VGROUPS */ - { 198, -4 }, /* (29) cmd ::= SHOW dbPrefix VGROUPS ids */ - { 198, -5 }, /* (30) cmd ::= DROP TABLE ifexists ids cpxName */ - { 198, -5 }, /* (31) cmd ::= DROP STABLE ifexists ids cpxName */ - { 198, -4 }, /* (32) cmd ::= DROP DATABASE ifexists ids */ - { 198, -4 }, /* (33) cmd ::= DROP TOPIC ifexists ids */ - { 198, -3 }, /* (34) cmd ::= DROP FUNCTION ids */ - { 198, -3 }, /* (35) cmd ::= DROP DNODE ids */ - { 198, -3 }, /* (36) cmd ::= DROP USER ids */ - { 198, -3 }, /* (37) cmd ::= DROP ACCOUNT ids */ - { 198, -2 }, /* (38) cmd ::= USE ids */ - { 198, -3 }, /* (39) cmd ::= DESCRIBE ids cpxName */ - { 198, -3 }, /* (40) cmd ::= DESC ids cpxName */ - { 198, -5 }, /* (41) cmd ::= ALTER USER ids PASS ids */ - { 198, -5 }, /* (42) cmd ::= ALTER USER ids PRIVILEGE ids */ - { 198, -4 }, /* (43) cmd ::= ALTER DNODE ids ids */ - { 198, -5 }, /* (44) cmd ::= ALTER DNODE ids ids ids */ - { 198, -3 }, /* (45) cmd ::= ALTER LOCAL ids */ - { 198, -4 }, /* (46) cmd ::= ALTER LOCAL ids ids */ - { 198, -4 }, /* (47) cmd ::= ALTER DATABASE ids alter_db_optr */ - { 198, -4 }, /* (48) cmd ::= ALTER TOPIC ids alter_topic_optr */ - { 198, -4 }, /* (49) cmd ::= ALTER ACCOUNT ids acct_optr */ - { 198, -6 }, /* (50) cmd ::= ALTER ACCOUNT ids PASS ids acct_optr */ - { 198, -6 }, /* (51) cmd ::= COMPACT VNODES IN LP exprlist RP */ - { 199, -1 }, /* (52) ids ::= ID */ - { 199, -1 }, /* (53) ids ::= STRING */ - { 202, -2 }, /* (54) ifexists ::= IF EXISTS */ - { 202, 0 }, /* (55) ifexists ::= */ - { 207, -3 }, /* (56) ifnotexists ::= IF NOT EXISTS */ - { 207, 0 }, /* (57) ifnotexists ::= */ - { 198, -3 }, /* (58) cmd ::= CREATE DNODE ids */ - { 198, -6 }, /* (59) cmd ::= CREATE ACCOUNT ids PASS ids acct_optr */ - { 198, -5 }, /* (60) cmd ::= CREATE DATABASE ifnotexists ids db_optr */ - { 198, -5 }, /* (61) cmd ::= CREATE TOPIC ifnotexists ids topic_optr */ - { 198, -8 }, /* (62) cmd ::= CREATE FUNCTION ids AS ids OUTPUTTYPE typename bufsize */ - { 198, -9 }, /* (63) cmd ::= CREATE AGGREGATE FUNCTION ids AS ids OUTPUTTYPE typename bufsize */ - { 198, -5 }, /* (64) cmd ::= CREATE USER ids PASS ids */ - { 211, 0 }, /* (65) bufsize ::= */ - { 211, -2 }, /* (66) bufsize ::= BUFSIZE INTEGER */ - { 212, 0 }, /* (67) pps ::= */ - { 212, -2 }, /* (68) pps ::= PPS INTEGER */ - { 213, 0 }, /* (69) tseries ::= */ - { 213, -2 }, /* (70) tseries ::= TSERIES INTEGER */ - { 214, 0 }, /* (71) dbs ::= */ - { 214, -2 }, /* (72) dbs ::= DBS INTEGER */ - { 215, 0 }, /* (73) streams ::= */ - { 215, -2 }, /* (74) streams ::= STREAMS INTEGER */ - { 216, 0 }, /* (75) storage ::= */ - { 216, -2 }, /* (76) storage ::= STORAGE INTEGER */ - { 217, 0 }, /* (77) qtime ::= */ - { 217, -2 }, /* (78) qtime ::= QTIME INTEGER */ - { 218, 0 }, /* (79) users ::= */ - { 218, -2 }, /* (80) users ::= USERS INTEGER */ - { 219, 0 }, /* (81) conns ::= */ - { 219, -2 }, /* (82) conns ::= CONNS INTEGER */ - { 220, 0 }, /* (83) state ::= */ - { 220, -2 }, /* (84) state ::= STATE ids */ - { 205, -9 }, /* (85) acct_optr ::= pps tseries storage streams qtime dbs users conns state */ - { 221, -3 }, /* (86) intitemlist ::= intitemlist COMMA intitem */ - { 221, -1 }, /* (87) intitemlist ::= intitem */ - { 222, -1 }, /* (88) intitem ::= INTEGER */ - { 223, -2 }, /* (89) keep ::= KEEP intitemlist */ - { 224, -2 }, /* (90) cache ::= CACHE INTEGER */ - { 225, -2 }, /* (91) replica ::= REPLICA INTEGER */ - { 226, -2 }, /* (92) quorum ::= QUORUM INTEGER */ - { 227, -2 }, /* (93) days ::= DAYS INTEGER */ - { 228, -2 }, /* (94) minrows ::= MINROWS INTEGER */ - { 229, -2 }, /* (95) maxrows ::= MAXROWS INTEGER */ - { 230, -2 }, /* (96) blocks ::= BLOCKS INTEGER */ - { 231, -2 }, /* (97) ctime ::= CTIME INTEGER */ - { 232, -2 }, /* (98) wal ::= WAL INTEGER */ - { 233, -2 }, /* (99) fsync ::= FSYNC INTEGER */ - { 234, -2 }, /* (100) comp ::= COMP INTEGER */ - { 235, -2 }, /* (101) prec ::= PRECISION STRING */ - { 236, -2 }, /* (102) update ::= UPDATE INTEGER */ - { 237, -2 }, /* (103) cachelast ::= CACHELAST INTEGER */ - { 238, -2 }, /* (104) partitions ::= PARTITIONS INTEGER */ - { 208, 0 }, /* (105) db_optr ::= */ - { 208, -2 }, /* (106) db_optr ::= db_optr cache */ - { 208, -2 }, /* (107) db_optr ::= db_optr replica */ - { 208, -2 }, /* (108) db_optr ::= db_optr quorum */ - { 208, -2 }, /* (109) db_optr ::= db_optr days */ - { 208, -2 }, /* (110) db_optr ::= db_optr minrows */ - { 208, -2 }, /* (111) db_optr ::= db_optr maxrows */ - { 208, -2 }, /* (112) db_optr ::= db_optr blocks */ - { 208, -2 }, /* (113) db_optr ::= db_optr ctime */ - { 208, -2 }, /* (114) db_optr ::= db_optr wal */ - { 208, -2 }, /* (115) db_optr ::= db_optr fsync */ - { 208, -2 }, /* (116) db_optr ::= db_optr comp */ - { 208, -2 }, /* (117) db_optr ::= db_optr prec */ - { 208, -2 }, /* (118) db_optr ::= db_optr keep */ - { 208, -2 }, /* (119) db_optr ::= db_optr update */ - { 208, -2 }, /* (120) db_optr ::= db_optr cachelast */ - { 209, -1 }, /* (121) topic_optr ::= db_optr */ - { 209, -2 }, /* (122) topic_optr ::= topic_optr partitions */ - { 203, 0 }, /* (123) alter_db_optr ::= */ - { 203, -2 }, /* (124) alter_db_optr ::= alter_db_optr replica */ - { 203, -2 }, /* (125) alter_db_optr ::= alter_db_optr quorum */ - { 203, -2 }, /* (126) alter_db_optr ::= alter_db_optr keep */ - { 203, -2 }, /* (127) alter_db_optr ::= alter_db_optr blocks */ - { 203, -2 }, /* (128) alter_db_optr ::= alter_db_optr comp */ - { 203, -2 }, /* (129) alter_db_optr ::= alter_db_optr update */ - { 203, -2 }, /* (130) alter_db_optr ::= alter_db_optr cachelast */ - { 204, -1 }, /* (131) alter_topic_optr ::= alter_db_optr */ - { 204, -2 }, /* (132) alter_topic_optr ::= alter_topic_optr partitions */ - { 210, -1 }, /* (133) typename ::= ids */ - { 210, -4 }, /* (134) typename ::= ids LP signed RP */ - { 210, -2 }, /* (135) typename ::= ids UNSIGNED */ - { 239, -1 }, /* (136) signed ::= INTEGER */ - { 239, -2 }, /* (137) signed ::= PLUS INTEGER */ - { 239, -2 }, /* (138) signed ::= MINUS INTEGER */ - { 198, -3 }, /* (139) cmd ::= CREATE TABLE create_table_args */ - { 198, -3 }, /* (140) cmd ::= CREATE TABLE create_stable_args */ - { 198, -3 }, /* (141) cmd ::= CREATE STABLE create_stable_args */ - { 198, -3 }, /* (142) cmd ::= CREATE TABLE create_table_list */ - { 242, -1 }, /* (143) create_table_list ::= create_from_stable */ - { 242, -2 }, /* (144) create_table_list ::= create_table_list create_from_stable */ - { 240, -6 }, /* (145) create_table_args ::= ifnotexists ids cpxName LP columnlist RP */ - { 241, -10 }, /* (146) create_stable_args ::= ifnotexists ids cpxName LP columnlist RP TAGS LP columnlist RP */ - { 243, -10 }, /* (147) create_from_stable ::= ifnotexists ids cpxName USING ids cpxName TAGS LP tagitemlist RP */ - { 243, -13 }, /* (148) create_from_stable ::= ifnotexists ids cpxName USING ids cpxName LP tagNamelist RP TAGS LP tagitemlist RP */ - { 246, -3 }, /* (149) tagNamelist ::= tagNamelist COMMA ids */ - { 246, -1 }, /* (150) tagNamelist ::= ids */ - { 240, -5 }, /* (151) create_table_args ::= ifnotexists ids cpxName AS select */ - { 244, -3 }, /* (152) columnlist ::= columnlist COMMA column */ - { 244, -1 }, /* (153) columnlist ::= column */ - { 248, -2 }, /* (154) column ::= ids typename */ - { 245, -3 }, /* (155) tagitemlist ::= tagitemlist COMMA tagitem */ - { 245, -1 }, /* (156) tagitemlist ::= tagitem */ - { 249, -1 }, /* (157) tagitem ::= INTEGER */ - { 249, -1 }, /* (158) tagitem ::= FLOAT */ - { 249, -1 }, /* (159) tagitem ::= STRING */ - { 249, -1 }, /* (160) tagitem ::= BOOL */ - { 249, -1 }, /* (161) tagitem ::= NULL */ - { 249, -1 }, /* (162) tagitem ::= NOW */ - { 249, -2 }, /* (163) tagitem ::= MINUS INTEGER */ - { 249, -2 }, /* (164) tagitem ::= MINUS FLOAT */ - { 249, -2 }, /* (165) tagitem ::= PLUS INTEGER */ - { 249, -2 }, /* (166) tagitem ::= PLUS FLOAT */ - { 247, -14 }, /* (167) select ::= SELECT selcollist from where_opt interval_option sliding_opt session_option windowstate_option fill_opt groupby_opt having_opt orderby_opt slimit_opt limit_opt */ - { 247, -3 }, /* (168) select ::= LP select RP */ - { 263, -1 }, /* (169) union ::= select */ - { 263, -4 }, /* (170) union ::= union UNION ALL select */ - { 198, -1 }, /* (171) cmd ::= union */ - { 247, -2 }, /* (172) select ::= SELECT selcollist */ - { 264, -2 }, /* (173) sclp ::= selcollist COMMA */ - { 264, 0 }, /* (174) sclp ::= */ - { 250, -4 }, /* (175) selcollist ::= sclp distinct expr as */ - { 250, -2 }, /* (176) selcollist ::= sclp STAR */ - { 267, -2 }, /* (177) as ::= AS ids */ - { 267, -1 }, /* (178) as ::= ids */ - { 267, 0 }, /* (179) as ::= */ - { 265, -1 }, /* (180) distinct ::= DISTINCT */ - { 265, 0 }, /* (181) distinct ::= */ - { 251, -2 }, /* (182) from ::= FROM tablelist */ - { 251, -2 }, /* (183) from ::= FROM sub */ - { 269, -3 }, /* (184) sub ::= LP union RP */ - { 269, -4 }, /* (185) sub ::= LP union RP ids */ - { 269, -6 }, /* (186) sub ::= sub COMMA LP union RP ids */ - { 268, -2 }, /* (187) tablelist ::= ids cpxName */ - { 268, -3 }, /* (188) tablelist ::= ids cpxName ids */ - { 268, -4 }, /* (189) tablelist ::= tablelist COMMA ids cpxName */ - { 268, -5 }, /* (190) tablelist ::= tablelist COMMA ids cpxName ids */ - { 270, -1 }, /* (191) tmvar ::= VARIABLE */ - { 253, -4 }, /* (192) interval_option ::= intervalKey LP tmvar RP */ - { 253, -6 }, /* (193) interval_option ::= intervalKey LP tmvar COMMA tmvar RP */ - { 253, 0 }, /* (194) interval_option ::= */ - { 271, -1 }, /* (195) intervalKey ::= INTERVAL */ - { 271, -1 }, /* (196) intervalKey ::= EVERY */ - { 255, 0 }, /* (197) session_option ::= */ - { 255, -7 }, /* (198) session_option ::= SESSION LP ids cpxName COMMA tmvar RP */ - { 256, 0 }, /* (199) windowstate_option ::= */ - { 256, -4 }, /* (200) windowstate_option ::= STATE_WINDOW LP ids RP */ - { 257, 0 }, /* (201) fill_opt ::= */ - { 257, -6 }, /* (202) fill_opt ::= FILL LP ID COMMA tagitemlist RP */ - { 257, -4 }, /* (203) fill_opt ::= FILL LP ID RP */ - { 254, -4 }, /* (204) sliding_opt ::= SLIDING LP tmvar RP */ - { 254, 0 }, /* (205) sliding_opt ::= */ - { 260, 0 }, /* (206) orderby_opt ::= */ - { 260, -3 }, /* (207) orderby_opt ::= ORDER BY sortlist */ - { 272, -4 }, /* (208) sortlist ::= sortlist COMMA item sortorder */ - { 272, -2 }, /* (209) sortlist ::= item sortorder */ - { 274, -2 }, /* (210) item ::= ids cpxName */ - { 275, -1 }, /* (211) sortorder ::= ASC */ - { 275, -1 }, /* (212) sortorder ::= DESC */ - { 275, 0 }, /* (213) sortorder ::= */ - { 258, 0 }, /* (214) groupby_opt ::= */ - { 258, -3 }, /* (215) groupby_opt ::= GROUP BY grouplist */ - { 276, -3 }, /* (216) grouplist ::= grouplist COMMA item */ - { 276, -1 }, /* (217) grouplist ::= item */ - { 259, 0 }, /* (218) having_opt ::= */ - { 259, -2 }, /* (219) having_opt ::= HAVING expr */ - { 262, 0 }, /* (220) limit_opt ::= */ - { 262, -2 }, /* (221) limit_opt ::= LIMIT signed */ - { 262, -4 }, /* (222) limit_opt ::= LIMIT signed OFFSET signed */ - { 262, -4 }, /* (223) limit_opt ::= LIMIT signed COMMA signed */ - { 261, 0 }, /* (224) slimit_opt ::= */ - { 261, -2 }, /* (225) slimit_opt ::= SLIMIT signed */ - { 261, -4 }, /* (226) slimit_opt ::= SLIMIT signed SOFFSET signed */ - { 261, -4 }, /* (227) slimit_opt ::= SLIMIT signed COMMA signed */ - { 252, 0 }, /* (228) where_opt ::= */ - { 252, -2 }, /* (229) where_opt ::= WHERE expr */ - { 266, -3 }, /* (230) expr ::= LP expr RP */ - { 266, -1 }, /* (231) expr ::= ID */ - { 266, -3 }, /* (232) expr ::= ID DOT ID */ - { 266, -3 }, /* (233) expr ::= ID DOT STAR */ - { 266, -1 }, /* (234) expr ::= INTEGER */ - { 266, -2 }, /* (235) expr ::= MINUS INTEGER */ - { 266, -2 }, /* (236) expr ::= PLUS INTEGER */ - { 266, -1 }, /* (237) expr ::= FLOAT */ - { 266, -2 }, /* (238) expr ::= MINUS FLOAT */ - { 266, -2 }, /* (239) expr ::= PLUS FLOAT */ - { 266, -1 }, /* (240) expr ::= STRING */ - { 266, -1 }, /* (241) expr ::= NOW */ - { 266, -1 }, /* (242) expr ::= VARIABLE */ - { 266, -2 }, /* (243) expr ::= PLUS VARIABLE */ - { 266, -2 }, /* (244) expr ::= MINUS VARIABLE */ - { 266, -1 }, /* (245) expr ::= BOOL */ - { 266, -1 }, /* (246) expr ::= NULL */ - { 266, -4 }, /* (247) expr ::= ID LP exprlist RP */ - { 266, -4 }, /* (248) expr ::= ID LP STAR RP */ - { 266, -3 }, /* (249) expr ::= expr IS NULL */ - { 266, -4 }, /* (250) expr ::= expr IS NOT NULL */ - { 266, -3 }, /* (251) expr ::= expr LT expr */ - { 266, -3 }, /* (252) expr ::= expr GT expr */ - { 266, -3 }, /* (253) expr ::= expr LE expr */ - { 266, -3 }, /* (254) expr ::= expr GE expr */ - { 266, -3 }, /* (255) expr ::= expr NE expr */ - { 266, -3 }, /* (256) expr ::= expr EQ expr */ - { 266, -5 }, /* (257) expr ::= expr BETWEEN expr AND expr */ - { 266, -3 }, /* (258) expr ::= expr AND expr */ - { 266, -3 }, /* (259) expr ::= expr OR expr */ - { 266, -3 }, /* (260) expr ::= expr PLUS expr */ - { 266, -3 }, /* (261) expr ::= expr MINUS expr */ - { 266, -3 }, /* (262) expr ::= expr STAR expr */ - { 266, -3 }, /* (263) expr ::= expr SLASH expr */ - { 266, -3 }, /* (264) expr ::= expr REM expr */ - { 266, -3 }, /* (265) expr ::= expr LIKE expr */ - { 266, -5 }, /* (266) expr ::= expr IN LP exprlist RP */ - { 206, -3 }, /* (267) exprlist ::= exprlist COMMA expritem */ - { 206, -1 }, /* (268) exprlist ::= expritem */ - { 277, -1 }, /* (269) expritem ::= expr */ - { 277, 0 }, /* (270) expritem ::= */ - { 198, -3 }, /* (271) cmd ::= RESET QUERY CACHE */ - { 198, -3 }, /* (272) cmd ::= SYNCDB ids REPLICA */ - { 198, -7 }, /* (273) cmd ::= ALTER TABLE ids cpxName ADD COLUMN columnlist */ - { 198, -7 }, /* (274) cmd ::= ALTER TABLE ids cpxName DROP COLUMN ids */ - { 198, -7 }, /* (275) cmd ::= ALTER TABLE ids cpxName MODIFY COLUMN columnlist */ - { 198, -7 }, /* (276) cmd ::= ALTER TABLE ids cpxName ADD TAG columnlist */ - { 198, -7 }, /* (277) cmd ::= ALTER TABLE ids cpxName DROP TAG ids */ - { 198, -8 }, /* (278) cmd ::= ALTER TABLE ids cpxName CHANGE TAG ids ids */ - { 198, -9 }, /* (279) cmd ::= ALTER TABLE ids cpxName SET TAG ids EQ tagitem */ - { 198, -7 }, /* (280) cmd ::= ALTER TABLE ids cpxName MODIFY TAG columnlist */ - { 198, -7 }, /* (281) cmd ::= ALTER STABLE ids cpxName ADD COLUMN columnlist */ - { 198, -7 }, /* (282) cmd ::= ALTER STABLE ids cpxName DROP COLUMN ids */ - { 198, -7 }, /* (283) cmd ::= ALTER STABLE ids cpxName MODIFY COLUMN columnlist */ - { 198, -7 }, /* (284) cmd ::= ALTER STABLE ids cpxName ADD TAG columnlist */ - { 198, -7 }, /* (285) cmd ::= ALTER STABLE ids cpxName DROP TAG ids */ - { 198, -8 }, /* (286) cmd ::= ALTER STABLE ids cpxName CHANGE TAG ids ids */ - { 198, -9 }, /* (287) cmd ::= ALTER STABLE ids cpxName SET TAG ids EQ tagitem */ - { 198, -7 }, /* (288) cmd ::= ALTER STABLE ids cpxName MODIFY TAG columnlist */ - { 198, -3 }, /* (289) cmd ::= KILL CONNECTION INTEGER */ - { 198, -5 }, /* (290) cmd ::= KILL STREAM INTEGER COLON INTEGER */ - { 198, -5 }, /* (291) cmd ::= KILL QUERY INTEGER COLON INTEGER */ +/* For rule J, yyRuleInfoLhs[J] contains the symbol on the left-hand side +** of that rule */ +static const YYCODETYPE yyRuleInfoLhs[] = { + 197, /* (0) program ::= cmd */ + 198, /* (1) cmd ::= SHOW DATABASES */ + 198, /* (2) cmd ::= SHOW TOPICS */ + 198, /* (3) cmd ::= SHOW FUNCTIONS */ + 198, /* (4) cmd ::= SHOW MNODES */ + 198, /* (5) cmd ::= SHOW DNODES */ + 198, /* (6) cmd ::= SHOW ACCOUNTS */ + 198, /* (7) cmd ::= SHOW USERS */ + 198, /* (8) cmd ::= SHOW MODULES */ + 198, /* (9) cmd ::= SHOW QUERIES */ + 198, /* (10) cmd ::= SHOW CONNECTIONS */ + 198, /* (11) cmd ::= SHOW STREAMS */ + 198, /* (12) cmd ::= SHOW VARIABLES */ + 198, /* (13) cmd ::= SHOW SCORES */ + 198, /* (14) cmd ::= SHOW GRANTS */ + 198, /* (15) cmd ::= SHOW VNODES */ + 198, /* (16) cmd ::= SHOW VNODES ids */ + 200, /* (17) dbPrefix ::= */ + 200, /* (18) dbPrefix ::= ids DOT */ + 201, /* (19) cpxName ::= */ + 201, /* (20) cpxName ::= DOT ids */ + 198, /* (21) cmd ::= SHOW CREATE TABLE ids cpxName */ + 198, /* (22) cmd ::= SHOW CREATE STABLE ids cpxName */ + 198, /* (23) cmd ::= SHOW CREATE DATABASE ids */ + 198, /* (24) cmd ::= SHOW dbPrefix TABLES */ + 198, /* (25) cmd ::= SHOW dbPrefix TABLES LIKE ids */ + 198, /* (26) cmd ::= SHOW dbPrefix STABLES */ + 198, /* (27) cmd ::= SHOW dbPrefix STABLES LIKE ids */ + 198, /* (28) cmd ::= SHOW dbPrefix VGROUPS */ + 198, /* (29) cmd ::= SHOW dbPrefix VGROUPS ids */ + 198, /* (30) cmd ::= DROP TABLE ifexists ids cpxName */ + 198, /* (31) cmd ::= DROP STABLE ifexists ids cpxName */ + 198, /* (32) cmd ::= DROP DATABASE ifexists ids */ + 198, /* (33) cmd ::= DROP TOPIC ifexists ids */ + 198, /* (34) cmd ::= DROP FUNCTION ids */ + 198, /* (35) cmd ::= DROP DNODE ids */ + 198, /* (36) cmd ::= DROP USER ids */ + 198, /* (37) cmd ::= DROP ACCOUNT ids */ + 198, /* (38) cmd ::= USE ids */ + 198, /* (39) cmd ::= DESCRIBE ids cpxName */ + 198, /* (40) cmd ::= DESC ids cpxName */ + 198, /* (41) cmd ::= ALTER USER ids PASS ids */ + 198, /* (42) cmd ::= ALTER USER ids PRIVILEGE ids */ + 198, /* (43) cmd ::= ALTER DNODE ids ids */ + 198, /* (44) cmd ::= ALTER DNODE ids ids ids */ + 198, /* (45) cmd ::= ALTER LOCAL ids */ + 198, /* (46) cmd ::= ALTER LOCAL ids ids */ + 198, /* (47) cmd ::= ALTER DATABASE ids alter_db_optr */ + 198, /* (48) cmd ::= ALTER TOPIC ids alter_topic_optr */ + 198, /* (49) cmd ::= ALTER ACCOUNT ids acct_optr */ + 198, /* (50) cmd ::= ALTER ACCOUNT ids PASS ids acct_optr */ + 198, /* (51) cmd ::= COMPACT VNODES IN LP exprlist RP */ + 199, /* (52) ids ::= ID */ + 199, /* (53) ids ::= STRING */ + 202, /* (54) ifexists ::= IF EXISTS */ + 202, /* (55) ifexists ::= */ + 207, /* (56) ifnotexists ::= IF NOT EXISTS */ + 207, /* (57) ifnotexists ::= */ + 198, /* (58) cmd ::= CREATE DNODE ids */ + 198, /* (59) cmd ::= CREATE ACCOUNT ids PASS ids acct_optr */ + 198, /* (60) cmd ::= CREATE DATABASE ifnotexists ids db_optr */ + 198, /* (61) cmd ::= CREATE TOPIC ifnotexists ids topic_optr */ + 198, /* (62) cmd ::= CREATE FUNCTION ids AS ids OUTPUTTYPE typename bufsize */ + 198, /* (63) cmd ::= CREATE AGGREGATE FUNCTION ids AS ids OUTPUTTYPE typename bufsize */ + 198, /* (64) cmd ::= CREATE USER ids PASS ids */ + 211, /* (65) bufsize ::= */ + 211, /* (66) bufsize ::= BUFSIZE INTEGER */ + 212, /* (67) pps ::= */ + 212, /* (68) pps ::= PPS INTEGER */ + 213, /* (69) tseries ::= */ + 213, /* (70) tseries ::= TSERIES INTEGER */ + 214, /* (71) dbs ::= */ + 214, /* (72) dbs ::= DBS INTEGER */ + 215, /* (73) streams ::= */ + 215, /* (74) streams ::= STREAMS INTEGER */ + 216, /* (75) storage ::= */ + 216, /* (76) storage ::= STORAGE INTEGER */ + 217, /* (77) qtime ::= */ + 217, /* (78) qtime ::= QTIME INTEGER */ + 218, /* (79) users ::= */ + 218, /* (80) users ::= USERS INTEGER */ + 219, /* (81) conns ::= */ + 219, /* (82) conns ::= CONNS INTEGER */ + 220, /* (83) state ::= */ + 220, /* (84) state ::= STATE ids */ + 205, /* (85) acct_optr ::= pps tseries storage streams qtime dbs users conns state */ + 221, /* (86) intitemlist ::= intitemlist COMMA intitem */ + 221, /* (87) intitemlist ::= intitem */ + 222, /* (88) intitem ::= INTEGER */ + 223, /* (89) keep ::= KEEP intitemlist */ + 224, /* (90) cache ::= CACHE INTEGER */ + 225, /* (91) replica ::= REPLICA INTEGER */ + 226, /* (92) quorum ::= QUORUM INTEGER */ + 227, /* (93) days ::= DAYS INTEGER */ + 228, /* (94) minrows ::= MINROWS INTEGER */ + 229, /* (95) maxrows ::= MAXROWS INTEGER */ + 230, /* (96) blocks ::= BLOCKS INTEGER */ + 231, /* (97) ctime ::= CTIME INTEGER */ + 232, /* (98) wal ::= WAL INTEGER */ + 233, /* (99) fsync ::= FSYNC INTEGER */ + 234, /* (100) comp ::= COMP INTEGER */ + 235, /* (101) prec ::= PRECISION STRING */ + 236, /* (102) update ::= UPDATE INTEGER */ + 237, /* (103) cachelast ::= CACHELAST INTEGER */ + 238, /* (104) partitions ::= PARTITIONS INTEGER */ + 208, /* (105) db_optr ::= */ + 208, /* (106) db_optr ::= db_optr cache */ + 208, /* (107) db_optr ::= db_optr replica */ + 208, /* (108) db_optr ::= db_optr quorum */ + 208, /* (109) db_optr ::= db_optr days */ + 208, /* (110) db_optr ::= db_optr minrows */ + 208, /* (111) db_optr ::= db_optr maxrows */ + 208, /* (112) db_optr ::= db_optr blocks */ + 208, /* (113) db_optr ::= db_optr ctime */ + 208, /* (114) db_optr ::= db_optr wal */ + 208, /* (115) db_optr ::= db_optr fsync */ + 208, /* (116) db_optr ::= db_optr comp */ + 208, /* (117) db_optr ::= db_optr prec */ + 208, /* (118) db_optr ::= db_optr keep */ + 208, /* (119) db_optr ::= db_optr update */ + 208, /* (120) db_optr ::= db_optr cachelast */ + 209, /* (121) topic_optr ::= db_optr */ + 209, /* (122) topic_optr ::= topic_optr partitions */ + 203, /* (123) alter_db_optr ::= */ + 203, /* (124) alter_db_optr ::= alter_db_optr replica */ + 203, /* (125) alter_db_optr ::= alter_db_optr quorum */ + 203, /* (126) alter_db_optr ::= alter_db_optr keep */ + 203, /* (127) alter_db_optr ::= alter_db_optr blocks */ + 203, /* (128) alter_db_optr ::= alter_db_optr comp */ + 203, /* (129) alter_db_optr ::= alter_db_optr update */ + 203, /* (130) alter_db_optr ::= alter_db_optr cachelast */ + 204, /* (131) alter_topic_optr ::= alter_db_optr */ + 204, /* (132) alter_topic_optr ::= alter_topic_optr partitions */ + 210, /* (133) typename ::= ids */ + 210, /* (134) typename ::= ids LP signed RP */ + 210, /* (135) typename ::= ids UNSIGNED */ + 239, /* (136) signed ::= INTEGER */ + 239, /* (137) signed ::= PLUS INTEGER */ + 239, /* (138) signed ::= MINUS INTEGER */ + 198, /* (139) cmd ::= CREATE TABLE create_table_args */ + 198, /* (140) cmd ::= CREATE TABLE create_stable_args */ + 198, /* (141) cmd ::= CREATE STABLE create_stable_args */ + 198, /* (142) cmd ::= CREATE TABLE create_table_list */ + 242, /* (143) create_table_list ::= create_from_stable */ + 242, /* (144) create_table_list ::= create_table_list create_from_stable */ + 240, /* (145) create_table_args ::= ifnotexists ids cpxName LP columnlist RP */ + 241, /* (146) create_stable_args ::= ifnotexists ids cpxName LP columnlist RP TAGS LP columnlist RP */ + 243, /* (147) create_from_stable ::= ifnotexists ids cpxName USING ids cpxName TAGS LP tagitemlist RP */ + 243, /* (148) create_from_stable ::= ifnotexists ids cpxName USING ids cpxName LP tagNamelist RP TAGS LP tagitemlist RP */ + 246, /* (149) tagNamelist ::= tagNamelist COMMA ids */ + 246, /* (150) tagNamelist ::= ids */ + 240, /* (151) create_table_args ::= ifnotexists ids cpxName AS select */ + 244, /* (152) columnlist ::= columnlist COMMA column */ + 244, /* (153) columnlist ::= column */ + 248, /* (154) column ::= ids typename */ + 245, /* (155) tagitemlist ::= tagitemlist COMMA tagitem */ + 245, /* (156) tagitemlist ::= tagitem */ + 249, /* (157) tagitem ::= INTEGER */ + 249, /* (158) tagitem ::= FLOAT */ + 249, /* (159) tagitem ::= STRING */ + 249, /* (160) tagitem ::= BOOL */ + 249, /* (161) tagitem ::= NULL */ + 249, /* (162) tagitem ::= NOW */ + 249, /* (163) tagitem ::= MINUS INTEGER */ + 249, /* (164) tagitem ::= MINUS FLOAT */ + 249, /* (165) tagitem ::= PLUS INTEGER */ + 249, /* (166) tagitem ::= PLUS FLOAT */ + 247, /* (167) select ::= SELECT selcollist from where_opt interval_option sliding_opt session_option windowstate_option fill_opt groupby_opt having_opt orderby_opt slimit_opt limit_opt */ + 247, /* (168) select ::= LP select RP */ + 263, /* (169) union ::= select */ + 263, /* (170) union ::= union UNION ALL select */ + 198, /* (171) cmd ::= union */ + 247, /* (172) select ::= SELECT selcollist */ + 264, /* (173) sclp ::= selcollist COMMA */ + 264, /* (174) sclp ::= */ + 250, /* (175) selcollist ::= sclp distinct expr as */ + 250, /* (176) selcollist ::= sclp STAR */ + 267, /* (177) as ::= AS ids */ + 267, /* (178) as ::= ids */ + 267, /* (179) as ::= */ + 265, /* (180) distinct ::= DISTINCT */ + 265, /* (181) distinct ::= */ + 251, /* (182) from ::= FROM tablelist */ + 251, /* (183) from ::= FROM sub */ + 269, /* (184) sub ::= LP union RP */ + 269, /* (185) sub ::= LP union RP ids */ + 269, /* (186) sub ::= sub COMMA LP union RP ids */ + 268, /* (187) tablelist ::= ids cpxName */ + 268, /* (188) tablelist ::= ids cpxName ids */ + 268, /* (189) tablelist ::= tablelist COMMA ids cpxName */ + 268, /* (190) tablelist ::= tablelist COMMA ids cpxName ids */ + 270, /* (191) tmvar ::= VARIABLE */ + 253, /* (192) interval_option ::= intervalKey LP tmvar RP */ + 253, /* (193) interval_option ::= intervalKey LP tmvar COMMA tmvar RP */ + 253, /* (194) interval_option ::= */ + 271, /* (195) intervalKey ::= INTERVAL */ + 271, /* (196) intervalKey ::= EVERY */ + 255, /* (197) session_option ::= */ + 255, /* (198) session_option ::= SESSION LP ids cpxName COMMA tmvar RP */ + 256, /* (199) windowstate_option ::= */ + 256, /* (200) windowstate_option ::= STATE_WINDOW LP ids RP */ + 257, /* (201) fill_opt ::= */ + 257, /* (202) fill_opt ::= FILL LP ID COMMA tagitemlist RP */ + 257, /* (203) fill_opt ::= FILL LP ID RP */ + 254, /* (204) sliding_opt ::= SLIDING LP tmvar RP */ + 254, /* (205) sliding_opt ::= */ + 260, /* (206) orderby_opt ::= */ + 260, /* (207) orderby_opt ::= ORDER BY sortlist */ + 272, /* (208) sortlist ::= sortlist COMMA item sortorder */ + 272, /* (209) sortlist ::= item sortorder */ + 274, /* (210) item ::= ids cpxName */ + 275, /* (211) sortorder ::= ASC */ + 275, /* (212) sortorder ::= DESC */ + 275, /* (213) sortorder ::= */ + 258, /* (214) groupby_opt ::= */ + 258, /* (215) groupby_opt ::= GROUP BY grouplist */ + 276, /* (216) grouplist ::= grouplist COMMA item */ + 276, /* (217) grouplist ::= item */ + 259, /* (218) having_opt ::= */ + 259, /* (219) having_opt ::= HAVING expr */ + 262, /* (220) limit_opt ::= */ + 262, /* (221) limit_opt ::= LIMIT signed */ + 262, /* (222) limit_opt ::= LIMIT signed OFFSET signed */ + 262, /* (223) limit_opt ::= LIMIT signed COMMA signed */ + 261, /* (224) slimit_opt ::= */ + 261, /* (225) slimit_opt ::= SLIMIT signed */ + 261, /* (226) slimit_opt ::= SLIMIT signed SOFFSET signed */ + 261, /* (227) slimit_opt ::= SLIMIT signed COMMA signed */ + 252, /* (228) where_opt ::= */ + 252, /* (229) where_opt ::= WHERE expr */ + 266, /* (230) expr ::= LP expr RP */ + 266, /* (231) expr ::= ID */ + 266, /* (232) expr ::= ID DOT ID */ + 266, /* (233) expr ::= ID DOT STAR */ + 266, /* (234) expr ::= INTEGER */ + 266, /* (235) expr ::= MINUS INTEGER */ + 266, /* (236) expr ::= PLUS INTEGER */ + 266, /* (237) expr ::= FLOAT */ + 266, /* (238) expr ::= MINUS FLOAT */ + 266, /* (239) expr ::= PLUS FLOAT */ + 266, /* (240) expr ::= STRING */ + 266, /* (241) expr ::= NOW */ + 266, /* (242) expr ::= VARIABLE */ + 266, /* (243) expr ::= PLUS VARIABLE */ + 266, /* (244) expr ::= MINUS VARIABLE */ + 266, /* (245) expr ::= BOOL */ + 266, /* (246) expr ::= NULL */ + 266, /* (247) expr ::= ID LP exprlist RP */ + 266, /* (248) expr ::= ID LP STAR RP */ + 266, /* (249) expr ::= expr IS NULL */ + 266, /* (250) expr ::= expr IS NOT NULL */ + 266, /* (251) expr ::= expr LT expr */ + 266, /* (252) expr ::= expr GT expr */ + 266, /* (253) expr ::= expr LE expr */ + 266, /* (254) expr ::= expr GE expr */ + 266, /* (255) expr ::= expr NE expr */ + 266, /* (256) expr ::= expr EQ expr */ + 266, /* (257) expr ::= expr BETWEEN expr AND expr */ + 266, /* (258) expr ::= expr AND expr */ + 266, /* (259) expr ::= expr OR expr */ + 266, /* (260) expr ::= expr PLUS expr */ + 266, /* (261) expr ::= expr MINUS expr */ + 266, /* (262) expr ::= expr STAR expr */ + 266, /* (263) expr ::= expr SLASH expr */ + 266, /* (264) expr ::= expr REM expr */ + 266, /* (265) expr ::= expr LIKE expr */ + 266, /* (266) expr ::= expr MATCH expr */ + 266, /* (267) expr ::= expr NMATCH expr */ + 266, /* (268) expr ::= expr IN LP exprlist RP */ + 206, /* (269) exprlist ::= exprlist COMMA expritem */ + 206, /* (270) exprlist ::= expritem */ + 277, /* (271) expritem ::= expr */ + 277, /* (272) expritem ::= */ + 198, /* (273) cmd ::= RESET QUERY CACHE */ + 198, /* (274) cmd ::= SYNCDB ids REPLICA */ + 198, /* (275) cmd ::= ALTER TABLE ids cpxName ADD COLUMN columnlist */ + 198, /* (276) cmd ::= ALTER TABLE ids cpxName DROP COLUMN ids */ + 198, /* (277) cmd ::= ALTER TABLE ids cpxName MODIFY COLUMN columnlist */ + 198, /* (278) cmd ::= ALTER TABLE ids cpxName ADD TAG columnlist */ + 198, /* (279) cmd ::= ALTER TABLE ids cpxName DROP TAG ids */ + 198, /* (280) cmd ::= ALTER TABLE ids cpxName CHANGE TAG ids ids */ + 198, /* (281) cmd ::= ALTER TABLE ids cpxName SET TAG ids EQ tagitem */ + 198, /* (282) cmd ::= ALTER TABLE ids cpxName MODIFY TAG columnlist */ + 198, /* (283) cmd ::= ALTER STABLE ids cpxName ADD COLUMN columnlist */ + 198, /* (284) cmd ::= ALTER STABLE ids cpxName DROP COLUMN ids */ + 198, /* (285) cmd ::= ALTER STABLE ids cpxName MODIFY COLUMN columnlist */ + 198, /* (286) cmd ::= ALTER STABLE ids cpxName ADD TAG columnlist */ + 198, /* (287) cmd ::= ALTER STABLE ids cpxName DROP TAG ids */ + 198, /* (288) cmd ::= ALTER STABLE ids cpxName CHANGE TAG ids ids */ + 198, /* (289) cmd ::= ALTER STABLE ids cpxName SET TAG ids EQ tagitem */ + 198, /* (290) cmd ::= ALTER STABLE ids cpxName MODIFY TAG columnlist */ + 198, /* (291) cmd ::= KILL CONNECTION INTEGER */ + 198, /* (292) cmd ::= KILL STREAM INTEGER COLON INTEGER */ + 198, /* (293) cmd ::= KILL QUERY INTEGER COLON INTEGER */ +}; + +/* For rule J, yyRuleInfoNRhs[J] contains the negative of the number +** of symbols on the right-hand side of that rule. */ +static const signed char yyRuleInfoNRhs[] = { + -1, /* (0) program ::= cmd */ + -2, /* (1) cmd ::= SHOW DATABASES */ + -2, /* (2) cmd ::= SHOW TOPICS */ + -2, /* (3) cmd ::= SHOW FUNCTIONS */ + -2, /* (4) cmd ::= SHOW MNODES */ + -2, /* (5) cmd ::= SHOW DNODES */ + -2, /* (6) cmd ::= SHOW ACCOUNTS */ + -2, /* (7) cmd ::= SHOW USERS */ + -2, /* (8) cmd ::= SHOW MODULES */ + -2, /* (9) cmd ::= SHOW QUERIES */ + -2, /* (10) cmd ::= SHOW CONNECTIONS */ + -2, /* (11) cmd ::= SHOW STREAMS */ + -2, /* (12) cmd ::= SHOW VARIABLES */ + -2, /* (13) cmd ::= SHOW SCORES */ + -2, /* (14) cmd ::= SHOW GRANTS */ + -2, /* (15) cmd ::= SHOW VNODES */ + -3, /* (16) cmd ::= SHOW VNODES ids */ + 0, /* (17) dbPrefix ::= */ + -2, /* (18) dbPrefix ::= ids DOT */ + 0, /* (19) cpxName ::= */ + -2, /* (20) cpxName ::= DOT ids */ + -5, /* (21) cmd ::= SHOW CREATE TABLE ids cpxName */ + -5, /* (22) cmd ::= SHOW CREATE STABLE ids cpxName */ + -4, /* (23) cmd ::= SHOW CREATE DATABASE ids */ + -3, /* (24) cmd ::= SHOW dbPrefix TABLES */ + -5, /* (25) cmd ::= SHOW dbPrefix TABLES LIKE ids */ + -3, /* (26) cmd ::= SHOW dbPrefix STABLES */ + -5, /* (27) cmd ::= SHOW dbPrefix STABLES LIKE ids */ + -3, /* (28) cmd ::= SHOW dbPrefix VGROUPS */ + -4, /* (29) cmd ::= SHOW dbPrefix VGROUPS ids */ + -5, /* (30) cmd ::= DROP TABLE ifexists ids cpxName */ + -5, /* (31) cmd ::= DROP STABLE ifexists ids cpxName */ + -4, /* (32) cmd ::= DROP DATABASE ifexists ids */ + -4, /* (33) cmd ::= DROP TOPIC ifexists ids */ + -3, /* (34) cmd ::= DROP FUNCTION ids */ + -3, /* (35) cmd ::= DROP DNODE ids */ + -3, /* (36) cmd ::= DROP USER ids */ + -3, /* (37) cmd ::= DROP ACCOUNT ids */ + -2, /* (38) cmd ::= USE ids */ + -3, /* (39) cmd ::= DESCRIBE ids cpxName */ + -3, /* (40) cmd ::= DESC ids cpxName */ + -5, /* (41) cmd ::= ALTER USER ids PASS ids */ + -5, /* (42) cmd ::= ALTER USER ids PRIVILEGE ids */ + -4, /* (43) cmd ::= ALTER DNODE ids ids */ + -5, /* (44) cmd ::= ALTER DNODE ids ids ids */ + -3, /* (45) cmd ::= ALTER LOCAL ids */ + -4, /* (46) cmd ::= ALTER LOCAL ids ids */ + -4, /* (47) cmd ::= ALTER DATABASE ids alter_db_optr */ + -4, /* (48) cmd ::= ALTER TOPIC ids alter_topic_optr */ + -4, /* (49) cmd ::= ALTER ACCOUNT ids acct_optr */ + -6, /* (50) cmd ::= ALTER ACCOUNT ids PASS ids acct_optr */ + -6, /* (51) cmd ::= COMPACT VNODES IN LP exprlist RP */ + -1, /* (52) ids ::= ID */ + -1, /* (53) ids ::= STRING */ + -2, /* (54) ifexists ::= IF EXISTS */ + 0, /* (55) ifexists ::= */ + -3, /* (56) ifnotexists ::= IF NOT EXISTS */ + 0, /* (57) ifnotexists ::= */ + -3, /* (58) cmd ::= CREATE DNODE ids */ + -6, /* (59) cmd ::= CREATE ACCOUNT ids PASS ids acct_optr */ + -5, /* (60) cmd ::= CREATE DATABASE ifnotexists ids db_optr */ + -5, /* (61) cmd ::= CREATE TOPIC ifnotexists ids topic_optr */ + -8, /* (62) cmd ::= CREATE FUNCTION ids AS ids OUTPUTTYPE typename bufsize */ + -9, /* (63) cmd ::= CREATE AGGREGATE FUNCTION ids AS ids OUTPUTTYPE typename bufsize */ + -5, /* (64) cmd ::= CREATE USER ids PASS ids */ + 0, /* (65) bufsize ::= */ + -2, /* (66) bufsize ::= BUFSIZE INTEGER */ + 0, /* (67) pps ::= */ + -2, /* (68) pps ::= PPS INTEGER */ + 0, /* (69) tseries ::= */ + -2, /* (70) tseries ::= TSERIES INTEGER */ + 0, /* (71) dbs ::= */ + -2, /* (72) dbs ::= DBS INTEGER */ + 0, /* (73) streams ::= */ + -2, /* (74) streams ::= STREAMS INTEGER */ + 0, /* (75) storage ::= */ + -2, /* (76) storage ::= STORAGE INTEGER */ + 0, /* (77) qtime ::= */ + -2, /* (78) qtime ::= QTIME INTEGER */ + 0, /* (79) users ::= */ + -2, /* (80) users ::= USERS INTEGER */ + 0, /* (81) conns ::= */ + -2, /* (82) conns ::= CONNS INTEGER */ + 0, /* (83) state ::= */ + -2, /* (84) state ::= STATE ids */ + -9, /* (85) acct_optr ::= pps tseries storage streams qtime dbs users conns state */ + -3, /* (86) intitemlist ::= intitemlist COMMA intitem */ + -1, /* (87) intitemlist ::= intitem */ + -1, /* (88) intitem ::= INTEGER */ + -2, /* (89) keep ::= KEEP intitemlist */ + -2, /* (90) cache ::= CACHE INTEGER */ + -2, /* (91) replica ::= REPLICA INTEGER */ + -2, /* (92) quorum ::= QUORUM INTEGER */ + -2, /* (93) days ::= DAYS INTEGER */ + -2, /* (94) minrows ::= MINROWS INTEGER */ + -2, /* (95) maxrows ::= MAXROWS INTEGER */ + -2, /* (96) blocks ::= BLOCKS INTEGER */ + -2, /* (97) ctime ::= CTIME INTEGER */ + -2, /* (98) wal ::= WAL INTEGER */ + -2, /* (99) fsync ::= FSYNC INTEGER */ + -2, /* (100) comp ::= COMP INTEGER */ + -2, /* (101) prec ::= PRECISION STRING */ + -2, /* (102) update ::= UPDATE INTEGER */ + -2, /* (103) cachelast ::= CACHELAST INTEGER */ + -2, /* (104) partitions ::= PARTITIONS INTEGER */ + 0, /* (105) db_optr ::= */ + -2, /* (106) db_optr ::= db_optr cache */ + -2, /* (107) db_optr ::= db_optr replica */ + -2, /* (108) db_optr ::= db_optr quorum */ + -2, /* (109) db_optr ::= db_optr days */ + -2, /* (110) db_optr ::= db_optr minrows */ + -2, /* (111) db_optr ::= db_optr maxrows */ + -2, /* (112) db_optr ::= db_optr blocks */ + -2, /* (113) db_optr ::= db_optr ctime */ + -2, /* (114) db_optr ::= db_optr wal */ + -2, /* (115) db_optr ::= db_optr fsync */ + -2, /* (116) db_optr ::= db_optr comp */ + -2, /* (117) db_optr ::= db_optr prec */ + -2, /* (118) db_optr ::= db_optr keep */ + -2, /* (119) db_optr ::= db_optr update */ + -2, /* (120) db_optr ::= db_optr cachelast */ + -1, /* (121) topic_optr ::= db_optr */ + -2, /* (122) topic_optr ::= topic_optr partitions */ + 0, /* (123) alter_db_optr ::= */ + -2, /* (124) alter_db_optr ::= alter_db_optr replica */ + -2, /* (125) alter_db_optr ::= alter_db_optr quorum */ + -2, /* (126) alter_db_optr ::= alter_db_optr keep */ + -2, /* (127) alter_db_optr ::= alter_db_optr blocks */ + -2, /* (128) alter_db_optr ::= alter_db_optr comp */ + -2, /* (129) alter_db_optr ::= alter_db_optr update */ + -2, /* (130) alter_db_optr ::= alter_db_optr cachelast */ + -1, /* (131) alter_topic_optr ::= alter_db_optr */ + -2, /* (132) alter_topic_optr ::= alter_topic_optr partitions */ + -1, /* (133) typename ::= ids */ + -4, /* (134) typename ::= ids LP signed RP */ + -2, /* (135) typename ::= ids UNSIGNED */ + -1, /* (136) signed ::= INTEGER */ + -2, /* (137) signed ::= PLUS INTEGER */ + -2, /* (138) signed ::= MINUS INTEGER */ + -3, /* (139) cmd ::= CREATE TABLE create_table_args */ + -3, /* (140) cmd ::= CREATE TABLE create_stable_args */ + -3, /* (141) cmd ::= CREATE STABLE create_stable_args */ + -3, /* (142) cmd ::= CREATE TABLE create_table_list */ + -1, /* (143) create_table_list ::= create_from_stable */ + -2, /* (144) create_table_list ::= create_table_list create_from_stable */ + -6, /* (145) create_table_args ::= ifnotexists ids cpxName LP columnlist RP */ + -10, /* (146) create_stable_args ::= ifnotexists ids cpxName LP columnlist RP TAGS LP columnlist RP */ + -10, /* (147) create_from_stable ::= ifnotexists ids cpxName USING ids cpxName TAGS LP tagitemlist RP */ + -13, /* (148) create_from_stable ::= ifnotexists ids cpxName USING ids cpxName LP tagNamelist RP TAGS LP tagitemlist RP */ + -3, /* (149) tagNamelist ::= tagNamelist COMMA ids */ + -1, /* (150) tagNamelist ::= ids */ + -5, /* (151) create_table_args ::= ifnotexists ids cpxName AS select */ + -3, /* (152) columnlist ::= columnlist COMMA column */ + -1, /* (153) columnlist ::= column */ + -2, /* (154) column ::= ids typename */ + -3, /* (155) tagitemlist ::= tagitemlist COMMA tagitem */ + -1, /* (156) tagitemlist ::= tagitem */ + -1, /* (157) tagitem ::= INTEGER */ + -1, /* (158) tagitem ::= FLOAT */ + -1, /* (159) tagitem ::= STRING */ + -1, /* (160) tagitem ::= BOOL */ + -1, /* (161) tagitem ::= NULL */ + -1, /* (162) tagitem ::= NOW */ + -2, /* (163) tagitem ::= MINUS INTEGER */ + -2, /* (164) tagitem ::= MINUS FLOAT */ + -2, /* (165) tagitem ::= PLUS INTEGER */ + -2, /* (166) tagitem ::= PLUS FLOAT */ + -14, /* (167) select ::= SELECT selcollist from where_opt interval_option sliding_opt session_option windowstate_option fill_opt groupby_opt having_opt orderby_opt slimit_opt limit_opt */ + -3, /* (168) select ::= LP select RP */ + -1, /* (169) union ::= select */ + -4, /* (170) union ::= union UNION ALL select */ + -1, /* (171) cmd ::= union */ + -2, /* (172) select ::= SELECT selcollist */ + -2, /* (173) sclp ::= selcollist COMMA */ + 0, /* (174) sclp ::= */ + -4, /* (175) selcollist ::= sclp distinct expr as */ + -2, /* (176) selcollist ::= sclp STAR */ + -2, /* (177) as ::= AS ids */ + -1, /* (178) as ::= ids */ + 0, /* (179) as ::= */ + -1, /* (180) distinct ::= DISTINCT */ + 0, /* (181) distinct ::= */ + -2, /* (182) from ::= FROM tablelist */ + -2, /* (183) from ::= FROM sub */ + -3, /* (184) sub ::= LP union RP */ + -4, /* (185) sub ::= LP union RP ids */ + -6, /* (186) sub ::= sub COMMA LP union RP ids */ + -2, /* (187) tablelist ::= ids cpxName */ + -3, /* (188) tablelist ::= ids cpxName ids */ + -4, /* (189) tablelist ::= tablelist COMMA ids cpxName */ + -5, /* (190) tablelist ::= tablelist COMMA ids cpxName ids */ + -1, /* (191) tmvar ::= VARIABLE */ + -4, /* (192) interval_option ::= intervalKey LP tmvar RP */ + -6, /* (193) interval_option ::= intervalKey LP tmvar COMMA tmvar RP */ + 0, /* (194) interval_option ::= */ + -1, /* (195) intervalKey ::= INTERVAL */ + -1, /* (196) intervalKey ::= EVERY */ + 0, /* (197) session_option ::= */ + -7, /* (198) session_option ::= SESSION LP ids cpxName COMMA tmvar RP */ + 0, /* (199) windowstate_option ::= */ + -4, /* (200) windowstate_option ::= STATE_WINDOW LP ids RP */ + 0, /* (201) fill_opt ::= */ + -6, /* (202) fill_opt ::= FILL LP ID COMMA tagitemlist RP */ + -4, /* (203) fill_opt ::= FILL LP ID RP */ + -4, /* (204) sliding_opt ::= SLIDING LP tmvar RP */ + 0, /* (205) sliding_opt ::= */ + 0, /* (206) orderby_opt ::= */ + -3, /* (207) orderby_opt ::= ORDER BY sortlist */ + -4, /* (208) sortlist ::= sortlist COMMA item sortorder */ + -2, /* (209) sortlist ::= item sortorder */ + -2, /* (210) item ::= ids cpxName */ + -1, /* (211) sortorder ::= ASC */ + -1, /* (212) sortorder ::= DESC */ + 0, /* (213) sortorder ::= */ + 0, /* (214) groupby_opt ::= */ + -3, /* (215) groupby_opt ::= GROUP BY grouplist */ + -3, /* (216) grouplist ::= grouplist COMMA item */ + -1, /* (217) grouplist ::= item */ + 0, /* (218) having_opt ::= */ + -2, /* (219) having_opt ::= HAVING expr */ + 0, /* (220) limit_opt ::= */ + -2, /* (221) limit_opt ::= LIMIT signed */ + -4, /* (222) limit_opt ::= LIMIT signed OFFSET signed */ + -4, /* (223) limit_opt ::= LIMIT signed COMMA signed */ + 0, /* (224) slimit_opt ::= */ + -2, /* (225) slimit_opt ::= SLIMIT signed */ + -4, /* (226) slimit_opt ::= SLIMIT signed SOFFSET signed */ + -4, /* (227) slimit_opt ::= SLIMIT signed COMMA signed */ + 0, /* (228) where_opt ::= */ + -2, /* (229) where_opt ::= WHERE expr */ + -3, /* (230) expr ::= LP expr RP */ + -1, /* (231) expr ::= ID */ + -3, /* (232) expr ::= ID DOT ID */ + -3, /* (233) expr ::= ID DOT STAR */ + -1, /* (234) expr ::= INTEGER */ + -2, /* (235) expr ::= MINUS INTEGER */ + -2, /* (236) expr ::= PLUS INTEGER */ + -1, /* (237) expr ::= FLOAT */ + -2, /* (238) expr ::= MINUS FLOAT */ + -2, /* (239) expr ::= PLUS FLOAT */ + -1, /* (240) expr ::= STRING */ + -1, /* (241) expr ::= NOW */ + -1, /* (242) expr ::= VARIABLE */ + -2, /* (243) expr ::= PLUS VARIABLE */ + -2, /* (244) expr ::= MINUS VARIABLE */ + -1, /* (245) expr ::= BOOL */ + -1, /* (246) expr ::= NULL */ + -4, /* (247) expr ::= ID LP exprlist RP */ + -4, /* (248) expr ::= ID LP STAR RP */ + -3, /* (249) expr ::= expr IS NULL */ + -4, /* (250) expr ::= expr IS NOT NULL */ + -3, /* (251) expr ::= expr LT expr */ + -3, /* (252) expr ::= expr GT expr */ + -3, /* (253) expr ::= expr LE expr */ + -3, /* (254) expr ::= expr GE expr */ + -3, /* (255) expr ::= expr NE expr */ + -3, /* (256) expr ::= expr EQ expr */ + -5, /* (257) expr ::= expr BETWEEN expr AND expr */ + -3, /* (258) expr ::= expr AND expr */ + -3, /* (259) expr ::= expr OR expr */ + -3, /* (260) expr ::= expr PLUS expr */ + -3, /* (261) expr ::= expr MINUS expr */ + -3, /* (262) expr ::= expr STAR expr */ + -3, /* (263) expr ::= expr SLASH expr */ + -3, /* (264) expr ::= expr REM expr */ + -3, /* (265) expr ::= expr LIKE expr */ + -3, /* (266) expr ::= expr MATCH expr */ + -3, /* (267) expr ::= expr NMATCH expr */ + -5, /* (268) expr ::= expr IN LP exprlist RP */ + -3, /* (269) exprlist ::= exprlist COMMA expritem */ + -1, /* (270) exprlist ::= expritem */ + -1, /* (271) expritem ::= expr */ + 0, /* (272) expritem ::= */ + -3, /* (273) cmd ::= RESET QUERY CACHE */ + -3, /* (274) cmd ::= SYNCDB ids REPLICA */ + -7, /* (275) cmd ::= ALTER TABLE ids cpxName ADD COLUMN columnlist */ + -7, /* (276) cmd ::= ALTER TABLE ids cpxName DROP COLUMN ids */ + -7, /* (277) cmd ::= ALTER TABLE ids cpxName MODIFY COLUMN columnlist */ + -7, /* (278) cmd ::= ALTER TABLE ids cpxName ADD TAG columnlist */ + -7, /* (279) cmd ::= ALTER TABLE ids cpxName DROP TAG ids */ + -8, /* (280) cmd ::= ALTER TABLE ids cpxName CHANGE TAG ids ids */ + -9, /* (281) cmd ::= ALTER TABLE ids cpxName SET TAG ids EQ tagitem */ + -7, /* (282) cmd ::= ALTER TABLE ids cpxName MODIFY TAG columnlist */ + -7, /* (283) cmd ::= ALTER STABLE ids cpxName ADD COLUMN columnlist */ + -7, /* (284) cmd ::= ALTER STABLE ids cpxName DROP COLUMN ids */ + -7, /* (285) cmd ::= ALTER STABLE ids cpxName MODIFY COLUMN columnlist */ + -7, /* (286) cmd ::= ALTER STABLE ids cpxName ADD TAG columnlist */ + -7, /* (287) cmd ::= ALTER STABLE ids cpxName DROP TAG ids */ + -8, /* (288) cmd ::= ALTER STABLE ids cpxName CHANGE TAG ids ids */ + -9, /* (289) cmd ::= ALTER STABLE ids cpxName SET TAG ids EQ tagitem */ + -7, /* (290) cmd ::= ALTER STABLE ids cpxName MODIFY TAG columnlist */ + -3, /* (291) cmd ::= KILL CONNECTION INTEGER */ + -5, /* (292) cmd ::= KILL STREAM INTEGER COLON INTEGER */ + -5, /* (293) cmd ::= KILL QUERY INTEGER COLON INTEGER */ }; static void yy_accept(yyParser*); /* Forward Declaration */ @@ -2139,30 +2476,34 @@ static void yy_accept(yyParser*); /* Forward Declaration */ ** only called from one place, optimizing compilers will in-line it, which ** means that the extra parameters have no performance impact. */ -static void yy_reduce( +static YYACTIONTYPE yy_reduce( yyParser *yypParser, /* The parser */ unsigned int yyruleno, /* Number of the rule by which to reduce */ int yyLookahead, /* Lookahead token, or YYNOCODE if none */ ParseTOKENTYPE yyLookaheadToken /* Value of the lookahead token */ + ParseCTX_PDECL /* %extra_context */ ){ int yygoto; /* The next state */ - int yyact; /* The next action */ + YYACTIONTYPE yyact; /* The next action */ yyStackEntry *yymsp; /* The top of the parser's stack */ int yysize; /* Amount to pop the stack */ - ParseARG_FETCH; + ParseARG_FETCH (void)yyLookahead; (void)yyLookaheadToken; yymsp = yypParser->yytos; #ifndef NDEBUG if( yyTraceFILE && yyruleno<(int)(sizeof(yyRuleName)/sizeof(yyRuleName[0])) ){ - yysize = yyRuleInfo[yyruleno].nrhs; + yysize = yyRuleInfoNRhs[yyruleno]; if( yysize ){ - fprintf(yyTraceFILE, "%sReduce %d [%s], go to state %d.\n", + fprintf(yyTraceFILE, "%sReduce %d [%s]%s, pop back to state %d.\n", yyTracePrompt, - yyruleno, yyRuleName[yyruleno], yymsp[yysize].stateno); + yyruleno, yyRuleName[yyruleno], + yyrulenoyytos - yypParser->yystack)>yypParser->yyhwm ){ yypParser->yyhwm++; @@ -2180,13 +2521,19 @@ static void yy_reduce( #if YYSTACKDEPTH>0 if( yypParser->yytos>=yypParser->yystackEnd ){ yyStackOverflow(yypParser); - return; + /* The call to yyStackOverflow() above pops the stack until it is + ** empty, causing the main parser loop to exit. So the return value + ** is never used and does not matter. */ + return 0; } #else if( yypParser->yytos>=&yypParser->yystack[yypParser->yystksz-1] ){ if( yyGrowStack(yypParser) ){ yyStackOverflow(yypParser); - return; + /* The call to yyStackOverflow() above pops the stack until it is + ** empty, causing the main parser loop to exit. So the return value + ** is never used and does not matter. */ + return 0; } yymsp = yypParser->yytos; } @@ -2208,227 +2555,347 @@ static void yy_reduce( case 139: /* cmd ::= CREATE TABLE create_table_args */ yytestcase(yyruleno==139); case 140: /* cmd ::= CREATE TABLE create_stable_args */ yytestcase(yyruleno==140); case 141: /* cmd ::= CREATE STABLE create_stable_args */ yytestcase(yyruleno==141); +#line 63 "sql.y" {} +#line 2561 "sql.c" break; case 1: /* cmd ::= SHOW DATABASES */ +#line 66 "sql.y" { setShowOptions(pInfo, TSDB_MGMT_TABLE_DB, 0, 0);} +#line 2566 "sql.c" break; case 2: /* cmd ::= SHOW TOPICS */ +#line 67 "sql.y" { setShowOptions(pInfo, TSDB_MGMT_TABLE_TP, 0, 0);} +#line 2571 "sql.c" break; case 3: /* cmd ::= SHOW FUNCTIONS */ +#line 68 "sql.y" { setShowOptions(pInfo, TSDB_MGMT_TABLE_FUNCTION, 0, 0);} +#line 2576 "sql.c" break; case 4: /* cmd ::= SHOW MNODES */ +#line 69 "sql.y" { setShowOptions(pInfo, TSDB_MGMT_TABLE_MNODE, 0, 0);} +#line 2581 "sql.c" break; case 5: /* cmd ::= SHOW DNODES */ +#line 70 "sql.y" { setShowOptions(pInfo, TSDB_MGMT_TABLE_DNODE, 0, 0);} +#line 2586 "sql.c" break; case 6: /* cmd ::= SHOW ACCOUNTS */ +#line 71 "sql.y" { setShowOptions(pInfo, TSDB_MGMT_TABLE_ACCT, 0, 0);} +#line 2591 "sql.c" break; case 7: /* cmd ::= SHOW USERS */ +#line 72 "sql.y" { setShowOptions(pInfo, TSDB_MGMT_TABLE_USER, 0, 0);} +#line 2596 "sql.c" break; case 8: /* cmd ::= SHOW MODULES */ +#line 74 "sql.y" { setShowOptions(pInfo, TSDB_MGMT_TABLE_MODULE, 0, 0); } +#line 2601 "sql.c" break; case 9: /* cmd ::= SHOW QUERIES */ +#line 75 "sql.y" { setShowOptions(pInfo, TSDB_MGMT_TABLE_QUERIES, 0, 0); } +#line 2606 "sql.c" break; case 10: /* cmd ::= SHOW CONNECTIONS */ +#line 76 "sql.y" { setShowOptions(pInfo, TSDB_MGMT_TABLE_CONNS, 0, 0);} +#line 2611 "sql.c" break; case 11: /* cmd ::= SHOW STREAMS */ +#line 77 "sql.y" { setShowOptions(pInfo, TSDB_MGMT_TABLE_STREAMS, 0, 0); } +#line 2616 "sql.c" break; case 12: /* cmd ::= SHOW VARIABLES */ +#line 78 "sql.y" { setShowOptions(pInfo, TSDB_MGMT_TABLE_VARIABLES, 0, 0); } +#line 2621 "sql.c" break; case 13: /* cmd ::= SHOW SCORES */ +#line 79 "sql.y" { setShowOptions(pInfo, TSDB_MGMT_TABLE_SCORES, 0, 0); } +#line 2626 "sql.c" break; case 14: /* cmd ::= SHOW GRANTS */ +#line 80 "sql.y" { setShowOptions(pInfo, TSDB_MGMT_TABLE_GRANTS, 0, 0); } +#line 2631 "sql.c" break; case 15: /* cmd ::= SHOW VNODES */ +#line 82 "sql.y" { setShowOptions(pInfo, TSDB_MGMT_TABLE_VNODES, 0, 0); } +#line 2636 "sql.c" break; case 16: /* cmd ::= SHOW VNODES ids */ +#line 83 "sql.y" { setShowOptions(pInfo, TSDB_MGMT_TABLE_VNODES, &yymsp[0].minor.yy0, 0); } +#line 2641 "sql.c" break; case 17: /* dbPrefix ::= */ +#line 87 "sql.y" {yymsp[1].minor.yy0.n = 0; yymsp[1].minor.yy0.type = 0;} +#line 2646 "sql.c" break; case 18: /* dbPrefix ::= ids DOT */ +#line 88 "sql.y" {yylhsminor.yy0 = yymsp[-1].minor.yy0; } +#line 2651 "sql.c" yymsp[-1].minor.yy0 = yylhsminor.yy0; break; case 19: /* cpxName ::= */ +#line 91 "sql.y" {yymsp[1].minor.yy0.n = 0; } +#line 2657 "sql.c" break; case 20: /* cpxName ::= DOT ids */ +#line 92 "sql.y" {yymsp[-1].minor.yy0 = yymsp[0].minor.yy0; yymsp[-1].minor.yy0.n += 1; } +#line 2662 "sql.c" break; case 21: /* cmd ::= SHOW CREATE TABLE ids cpxName */ +#line 94 "sql.y" { yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; setDCLSqlElems(pInfo, TSDB_SQL_SHOW_CREATE_TABLE, 1, &yymsp[-1].minor.yy0); } +#line 2670 "sql.c" break; case 22: /* cmd ::= SHOW CREATE STABLE ids cpxName */ +#line 98 "sql.y" { yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; setDCLSqlElems(pInfo, TSDB_SQL_SHOW_CREATE_STABLE, 1, &yymsp[-1].minor.yy0); } +#line 2678 "sql.c" break; case 23: /* cmd ::= SHOW CREATE DATABASE ids */ +#line 103 "sql.y" { setDCLSqlElems(pInfo, TSDB_SQL_SHOW_CREATE_DATABASE, 1, &yymsp[0].minor.yy0); } +#line 2685 "sql.c" break; case 24: /* cmd ::= SHOW dbPrefix TABLES */ +#line 107 "sql.y" { setShowOptions(pInfo, TSDB_MGMT_TABLE_TABLE, &yymsp[-1].minor.yy0, 0); } +#line 2692 "sql.c" break; case 25: /* cmd ::= SHOW dbPrefix TABLES LIKE ids */ +#line 111 "sql.y" { setShowOptions(pInfo, TSDB_MGMT_TABLE_TABLE, &yymsp[-3].minor.yy0, &yymsp[0].minor.yy0); } +#line 2699 "sql.c" break; case 26: /* cmd ::= SHOW dbPrefix STABLES */ +#line 115 "sql.y" { setShowOptions(pInfo, TSDB_MGMT_TABLE_METRIC, &yymsp[-1].minor.yy0, 0); } +#line 2706 "sql.c" break; case 27: /* cmd ::= SHOW dbPrefix STABLES LIKE ids */ +#line 119 "sql.y" { SStrToken token; tSetDbName(&token, &yymsp[-3].minor.yy0); setShowOptions(pInfo, TSDB_MGMT_TABLE_METRIC, &token, &yymsp[0].minor.yy0); } +#line 2715 "sql.c" break; case 28: /* cmd ::= SHOW dbPrefix VGROUPS */ +#line 125 "sql.y" { SStrToken token; tSetDbName(&token, &yymsp[-1].minor.yy0); setShowOptions(pInfo, TSDB_MGMT_TABLE_VGROUP, &token, 0); } +#line 2724 "sql.c" break; case 29: /* cmd ::= SHOW dbPrefix VGROUPS ids */ +#line 131 "sql.y" { SStrToken token; tSetDbName(&token, &yymsp[-2].minor.yy0); setShowOptions(pInfo, TSDB_MGMT_TABLE_VGROUP, &token, &yymsp[0].minor.yy0); } +#line 2733 "sql.c" break; case 30: /* cmd ::= DROP TABLE ifexists ids cpxName */ +#line 138 "sql.y" { yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; setDropDbTableInfo(pInfo, TSDB_SQL_DROP_TABLE, &yymsp[-1].minor.yy0, &yymsp[-2].minor.yy0, -1, -1); } +#line 2741 "sql.c" break; case 31: /* cmd ::= DROP STABLE ifexists ids cpxName */ +#line 144 "sql.y" { yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; setDropDbTableInfo(pInfo, TSDB_SQL_DROP_TABLE, &yymsp[-1].minor.yy0, &yymsp[-2].minor.yy0, -1, TSDB_SUPER_TABLE); } +#line 2749 "sql.c" break; case 32: /* cmd ::= DROP DATABASE ifexists ids */ +#line 149 "sql.y" { setDropDbTableInfo(pInfo, TSDB_SQL_DROP_DB, &yymsp[0].minor.yy0, &yymsp[-1].minor.yy0, TSDB_DB_TYPE_DEFAULT, -1); } +#line 2754 "sql.c" break; case 33: /* cmd ::= DROP TOPIC ifexists ids */ +#line 150 "sql.y" { setDropDbTableInfo(pInfo, TSDB_SQL_DROP_DB, &yymsp[0].minor.yy0, &yymsp[-1].minor.yy0, TSDB_DB_TYPE_TOPIC, -1); } +#line 2759 "sql.c" break; case 34: /* cmd ::= DROP FUNCTION ids */ +#line 151 "sql.y" { setDropFuncInfo(pInfo, TSDB_SQL_DROP_FUNCTION, &yymsp[0].minor.yy0); } +#line 2764 "sql.c" break; case 35: /* cmd ::= DROP DNODE ids */ +#line 153 "sql.y" { setDCLSqlElems(pInfo, TSDB_SQL_DROP_DNODE, 1, &yymsp[0].minor.yy0); } +#line 2769 "sql.c" break; case 36: /* cmd ::= DROP USER ids */ +#line 154 "sql.y" { setDCLSqlElems(pInfo, TSDB_SQL_DROP_USER, 1, &yymsp[0].minor.yy0); } +#line 2774 "sql.c" break; case 37: /* cmd ::= DROP ACCOUNT ids */ +#line 155 "sql.y" { setDCLSqlElems(pInfo, TSDB_SQL_DROP_ACCT, 1, &yymsp[0].minor.yy0); } +#line 2779 "sql.c" break; case 38: /* cmd ::= USE ids */ +#line 158 "sql.y" { setDCLSqlElems(pInfo, TSDB_SQL_USE_DB, 1, &yymsp[0].minor.yy0);} +#line 2784 "sql.c" break; case 39: /* cmd ::= DESCRIBE ids cpxName */ case 40: /* cmd ::= DESC ids cpxName */ yytestcase(yyruleno==40); +#line 161 "sql.y" { yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; setDCLSqlElems(pInfo, TSDB_SQL_DESCRIBE_TABLE, 1, &yymsp[-1].minor.yy0); } +#line 2793 "sql.c" break; case 41: /* cmd ::= ALTER USER ids PASS ids */ +#line 170 "sql.y" { setAlterUserSql(pInfo, TSDB_ALTER_USER_PASSWD, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0, NULL); } +#line 2798 "sql.c" break; case 42: /* cmd ::= ALTER USER ids PRIVILEGE ids */ +#line 171 "sql.y" { setAlterUserSql(pInfo, TSDB_ALTER_USER_PRIVILEGES, &yymsp[-2].minor.yy0, NULL, &yymsp[0].minor.yy0);} +#line 2803 "sql.c" break; case 43: /* cmd ::= ALTER DNODE ids ids */ +#line 172 "sql.y" { setDCLSqlElems(pInfo, TSDB_SQL_CFG_DNODE, 2, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy0); } +#line 2808 "sql.c" break; case 44: /* cmd ::= ALTER DNODE ids ids ids */ +#line 173 "sql.y" { setDCLSqlElems(pInfo, TSDB_SQL_CFG_DNODE, 3, &yymsp[-2].minor.yy0, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy0); } +#line 2813 "sql.c" break; case 45: /* cmd ::= ALTER LOCAL ids */ +#line 174 "sql.y" { setDCLSqlElems(pInfo, TSDB_SQL_CFG_LOCAL, 1, &yymsp[0].minor.yy0); } +#line 2818 "sql.c" break; case 46: /* cmd ::= ALTER LOCAL ids ids */ +#line 175 "sql.y" { setDCLSqlElems(pInfo, TSDB_SQL_CFG_LOCAL, 2, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy0); } +#line 2823 "sql.c" break; case 47: /* cmd ::= ALTER DATABASE ids alter_db_optr */ case 48: /* cmd ::= ALTER TOPIC ids alter_topic_optr */ yytestcase(yyruleno==48); +#line 176 "sql.y" { SStrToken t = {0}; setCreateDbInfo(pInfo, TSDB_SQL_ALTER_DB, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy90, &t);} +#line 2829 "sql.c" break; case 49: /* cmd ::= ALTER ACCOUNT ids acct_optr */ +#line 179 "sql.y" { setCreateAcctSql(pInfo, TSDB_SQL_ALTER_ACCT, &yymsp[-1].minor.yy0, NULL, &yymsp[0].minor.yy171);} +#line 2834 "sql.c" break; case 50: /* cmd ::= ALTER ACCOUNT ids PASS ids acct_optr */ +#line 180 "sql.y" { setCreateAcctSql(pInfo, TSDB_SQL_ALTER_ACCT, &yymsp[-3].minor.yy0, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy171);} +#line 2839 "sql.c" break; case 51: /* cmd ::= COMPACT VNODES IN LP exprlist RP */ +#line 184 "sql.y" { setCompactVnodeSql(pInfo, TSDB_SQL_COMPACT_VNODE, yymsp[-1].minor.yy421);} +#line 2844 "sql.c" break; case 52: /* ids ::= ID */ case 53: /* ids ::= STRING */ yytestcase(yyruleno==53); +#line 190 "sql.y" {yylhsminor.yy0 = yymsp[0].minor.yy0; } +#line 2850 "sql.c" yymsp[0].minor.yy0 = yylhsminor.yy0; break; case 54: /* ifexists ::= IF EXISTS */ +#line 194 "sql.y" { yymsp[-1].minor.yy0.n = 1;} +#line 2856 "sql.c" break; case 55: /* ifexists ::= */ case 57: /* ifnotexists ::= */ yytestcase(yyruleno==57); case 181: /* distinct ::= */ yytestcase(yyruleno==181); +#line 195 "sql.y" { yymsp[1].minor.yy0.n = 0;} +#line 2863 "sql.c" break; case 56: /* ifnotexists ::= IF NOT EXISTS */ +#line 198 "sql.y" { yymsp[-2].minor.yy0.n = 1;} +#line 2868 "sql.c" break; case 58: /* cmd ::= CREATE DNODE ids */ +#line 203 "sql.y" { setDCLSqlElems(pInfo, TSDB_SQL_CREATE_DNODE, 1, &yymsp[0].minor.yy0);} +#line 2873 "sql.c" break; case 59: /* cmd ::= CREATE ACCOUNT ids PASS ids acct_optr */ +#line 205 "sql.y" { setCreateAcctSql(pInfo, TSDB_SQL_CREATE_ACCT, &yymsp[-3].minor.yy0, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy171);} +#line 2878 "sql.c" break; case 60: /* cmd ::= CREATE DATABASE ifnotexists ids db_optr */ case 61: /* cmd ::= CREATE TOPIC ifnotexists ids topic_optr */ yytestcase(yyruleno==61); +#line 206 "sql.y" { setCreateDbInfo(pInfo, TSDB_SQL_CREATE_DB, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy90, &yymsp[-2].minor.yy0);} +#line 2884 "sql.c" break; case 62: /* cmd ::= CREATE FUNCTION ids AS ids OUTPUTTYPE typename bufsize */ +#line 208 "sql.y" { setCreateFuncInfo(pInfo, TSDB_SQL_CREATE_FUNCTION, &yymsp[-5].minor.yy0, &yymsp[-3].minor.yy0, &yymsp[-1].minor.yy183, &yymsp[0].minor.yy0, 1);} +#line 2889 "sql.c" break; case 63: /* cmd ::= CREATE AGGREGATE FUNCTION ids AS ids OUTPUTTYPE typename bufsize */ +#line 209 "sql.y" { setCreateFuncInfo(pInfo, TSDB_SQL_CREATE_FUNCTION, &yymsp[-5].minor.yy0, &yymsp[-3].minor.yy0, &yymsp[-1].minor.yy183, &yymsp[0].minor.yy0, 2);} +#line 2894 "sql.c" break; case 64: /* cmd ::= CREATE USER ids PASS ids */ +#line 210 "sql.y" { setCreateUserSql(pInfo, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0);} +#line 2899 "sql.c" break; case 65: /* bufsize ::= */ case 67: /* pps ::= */ yytestcase(yyruleno==67); @@ -2440,7 +2907,9 @@ static void yy_reduce( case 79: /* users ::= */ yytestcase(yyruleno==79); case 81: /* conns ::= */ yytestcase(yyruleno==81); case 83: /* state ::= */ yytestcase(yyruleno==83); +#line 212 "sql.y" { yymsp[1].minor.yy0.n = 0; } +#line 2913 "sql.c" break; case 66: /* bufsize ::= BUFSIZE INTEGER */ case 68: /* pps ::= PPS INTEGER */ yytestcase(yyruleno==68); @@ -2452,9 +2921,12 @@ static void yy_reduce( case 80: /* users ::= USERS INTEGER */ yytestcase(yyruleno==80); case 82: /* conns ::= CONNS INTEGER */ yytestcase(yyruleno==82); case 84: /* state ::= STATE ids */ yytestcase(yyruleno==84); +#line 213 "sql.y" { yymsp[-1].minor.yy0 = yymsp[0].minor.yy0; } +#line 2927 "sql.c" break; case 85: /* acct_optr ::= pps tseries storage streams qtime dbs users conns state */ +#line 243 "sql.y" { yylhsminor.yy171.maxUsers = (yymsp[-2].minor.yy0.n>0)?atoi(yymsp[-2].minor.yy0.z):-1; yylhsminor.yy171.maxDbs = (yymsp[-3].minor.yy0.n>0)?atoi(yymsp[-3].minor.yy0.z):-1; @@ -2466,16 +2938,21 @@ static void yy_reduce( yylhsminor.yy171.maxConnections = (yymsp[-1].minor.yy0.n>0)?atoi(yymsp[-1].minor.yy0.z):-1; yylhsminor.yy171.stat = yymsp[0].minor.yy0; } +#line 2942 "sql.c" yymsp[-8].minor.yy171 = yylhsminor.yy171; break; case 86: /* intitemlist ::= intitemlist COMMA intitem */ case 155: /* tagitemlist ::= tagitemlist COMMA tagitem */ yytestcase(yyruleno==155); +#line 259 "sql.y" { yylhsminor.yy421 = tVariantListAppend(yymsp[-2].minor.yy421, &yymsp[0].minor.yy430, -1); } +#line 2949 "sql.c" yymsp[-2].minor.yy421 = yylhsminor.yy421; break; case 87: /* intitemlist ::= intitem */ case 156: /* tagitemlist ::= tagitem */ yytestcase(yyruleno==156); +#line 260 "sql.y" { yylhsminor.yy421 = tVariantListAppend(NULL, &yymsp[0].minor.yy430, -1); } +#line 2956 "sql.c" yymsp[0].minor.yy421 = yylhsminor.yy421; break; case 88: /* intitem ::= INTEGER */ @@ -2483,11 +2960,15 @@ static void yy_reduce( case 158: /* tagitem ::= FLOAT */ yytestcase(yyruleno==158); case 159: /* tagitem ::= STRING */ yytestcase(yyruleno==159); case 160: /* tagitem ::= BOOL */ yytestcase(yyruleno==160); +#line 262 "sql.y" { toTSDBType(yymsp[0].minor.yy0.type); tVariantCreate(&yylhsminor.yy430, &yymsp[0].minor.yy0); } +#line 2966 "sql.c" yymsp[0].minor.yy430 = yylhsminor.yy430; break; case 89: /* keep ::= KEEP intitemlist */ +#line 266 "sql.y" { yymsp[-1].minor.yy421 = yymsp[0].minor.yy421; } +#line 2972 "sql.c" break; case 90: /* cache ::= CACHE INTEGER */ case 91: /* replica ::= REPLICA INTEGER */ yytestcase(yyruleno==91); @@ -2504,99 +2985,142 @@ static void yy_reduce( case 102: /* update ::= UPDATE INTEGER */ yytestcase(yyruleno==102); case 103: /* cachelast ::= CACHELAST INTEGER */ yytestcase(yyruleno==103); case 104: /* partitions ::= PARTITIONS INTEGER */ yytestcase(yyruleno==104); +#line 268 "sql.y" { yymsp[-1].minor.yy0 = yymsp[0].minor.yy0; } +#line 2991 "sql.c" break; case 105: /* db_optr ::= */ +#line 285 "sql.y" {setDefaultCreateDbOption(&yymsp[1].minor.yy90); yymsp[1].minor.yy90.dbType = TSDB_DB_TYPE_DEFAULT;} +#line 2996 "sql.c" break; case 106: /* db_optr ::= db_optr cache */ +#line 287 "sql.y" { yylhsminor.yy90 = yymsp[-1].minor.yy90; yylhsminor.yy90.cacheBlockSize = strtol(yymsp[0].minor.yy0.z, NULL, 10); } +#line 3001 "sql.c" yymsp[-1].minor.yy90 = yylhsminor.yy90; break; case 107: /* db_optr ::= db_optr replica */ case 124: /* alter_db_optr ::= alter_db_optr replica */ yytestcase(yyruleno==124); +#line 288 "sql.y" { yylhsminor.yy90 = yymsp[-1].minor.yy90; yylhsminor.yy90.replica = strtol(yymsp[0].minor.yy0.z, NULL, 10); } +#line 3008 "sql.c" yymsp[-1].minor.yy90 = yylhsminor.yy90; break; case 108: /* db_optr ::= db_optr quorum */ case 125: /* alter_db_optr ::= alter_db_optr quorum */ yytestcase(yyruleno==125); +#line 289 "sql.y" { yylhsminor.yy90 = yymsp[-1].minor.yy90; yylhsminor.yy90.quorum = strtol(yymsp[0].minor.yy0.z, NULL, 10); } +#line 3015 "sql.c" yymsp[-1].minor.yy90 = yylhsminor.yy90; break; case 109: /* db_optr ::= db_optr days */ +#line 290 "sql.y" { yylhsminor.yy90 = yymsp[-1].minor.yy90; yylhsminor.yy90.daysPerFile = strtol(yymsp[0].minor.yy0.z, NULL, 10); } +#line 3021 "sql.c" yymsp[-1].minor.yy90 = yylhsminor.yy90; break; case 110: /* db_optr ::= db_optr minrows */ +#line 291 "sql.y" { yylhsminor.yy90 = yymsp[-1].minor.yy90; yylhsminor.yy90.minRowsPerBlock = strtod(yymsp[0].minor.yy0.z, NULL); } +#line 3027 "sql.c" yymsp[-1].minor.yy90 = yylhsminor.yy90; break; case 111: /* db_optr ::= db_optr maxrows */ +#line 292 "sql.y" { yylhsminor.yy90 = yymsp[-1].minor.yy90; yylhsminor.yy90.maxRowsPerBlock = strtod(yymsp[0].minor.yy0.z, NULL); } +#line 3033 "sql.c" yymsp[-1].minor.yy90 = yylhsminor.yy90; break; case 112: /* db_optr ::= db_optr blocks */ case 127: /* alter_db_optr ::= alter_db_optr blocks */ yytestcase(yyruleno==127); +#line 293 "sql.y" { yylhsminor.yy90 = yymsp[-1].minor.yy90; yylhsminor.yy90.numOfBlocks = strtol(yymsp[0].minor.yy0.z, NULL, 10); } +#line 3040 "sql.c" yymsp[-1].minor.yy90 = yylhsminor.yy90; break; case 113: /* db_optr ::= db_optr ctime */ +#line 294 "sql.y" { yylhsminor.yy90 = yymsp[-1].minor.yy90; yylhsminor.yy90.commitTime = strtol(yymsp[0].minor.yy0.z, NULL, 10); } +#line 3046 "sql.c" yymsp[-1].minor.yy90 = yylhsminor.yy90; break; case 114: /* db_optr ::= db_optr wal */ +#line 295 "sql.y" { yylhsminor.yy90 = yymsp[-1].minor.yy90; yylhsminor.yy90.walLevel = strtol(yymsp[0].minor.yy0.z, NULL, 10); } +#line 3052 "sql.c" yymsp[-1].minor.yy90 = yylhsminor.yy90; break; case 115: /* db_optr ::= db_optr fsync */ +#line 296 "sql.y" { yylhsminor.yy90 = yymsp[-1].minor.yy90; yylhsminor.yy90.fsyncPeriod = strtol(yymsp[0].minor.yy0.z, NULL, 10); } +#line 3058 "sql.c" yymsp[-1].minor.yy90 = yylhsminor.yy90; break; case 116: /* db_optr ::= db_optr comp */ case 128: /* alter_db_optr ::= alter_db_optr comp */ yytestcase(yyruleno==128); +#line 297 "sql.y" { yylhsminor.yy90 = yymsp[-1].minor.yy90; yylhsminor.yy90.compressionLevel = strtol(yymsp[0].minor.yy0.z, NULL, 10); } +#line 3065 "sql.c" yymsp[-1].minor.yy90 = yylhsminor.yy90; break; case 117: /* db_optr ::= db_optr prec */ +#line 298 "sql.y" { yylhsminor.yy90 = yymsp[-1].minor.yy90; yylhsminor.yy90.precision = yymsp[0].minor.yy0; } +#line 3071 "sql.c" yymsp[-1].minor.yy90 = yylhsminor.yy90; break; case 118: /* db_optr ::= db_optr keep */ case 126: /* alter_db_optr ::= alter_db_optr keep */ yytestcase(yyruleno==126); +#line 299 "sql.y" { yylhsminor.yy90 = yymsp[-1].minor.yy90; yylhsminor.yy90.keep = yymsp[0].minor.yy421; } +#line 3078 "sql.c" yymsp[-1].minor.yy90 = yylhsminor.yy90; break; case 119: /* db_optr ::= db_optr update */ case 129: /* alter_db_optr ::= alter_db_optr update */ yytestcase(yyruleno==129); +#line 300 "sql.y" { yylhsminor.yy90 = yymsp[-1].minor.yy90; yylhsminor.yy90.update = strtol(yymsp[0].minor.yy0.z, NULL, 10); } +#line 3085 "sql.c" yymsp[-1].minor.yy90 = yylhsminor.yy90; break; case 120: /* db_optr ::= db_optr cachelast */ case 130: /* alter_db_optr ::= alter_db_optr cachelast */ yytestcase(yyruleno==130); +#line 301 "sql.y" { yylhsminor.yy90 = yymsp[-1].minor.yy90; yylhsminor.yy90.cachelast = strtol(yymsp[0].minor.yy0.z, NULL, 10); } +#line 3092 "sql.c" yymsp[-1].minor.yy90 = yylhsminor.yy90; break; case 121: /* topic_optr ::= db_optr */ case 131: /* alter_topic_optr ::= alter_db_optr */ yytestcase(yyruleno==131); +#line 305 "sql.y" { yylhsminor.yy90 = yymsp[0].minor.yy90; yylhsminor.yy90.dbType = TSDB_DB_TYPE_TOPIC; } +#line 3099 "sql.c" yymsp[0].minor.yy90 = yylhsminor.yy90; break; case 122: /* topic_optr ::= topic_optr partitions */ case 132: /* alter_topic_optr ::= alter_topic_optr partitions */ yytestcase(yyruleno==132); +#line 306 "sql.y" { yylhsminor.yy90 = yymsp[-1].minor.yy90; yylhsminor.yy90.partitions = strtol(yymsp[0].minor.yy0.z, NULL, 10); } +#line 3106 "sql.c" yymsp[-1].minor.yy90 = yylhsminor.yy90; break; case 123: /* alter_db_optr ::= */ +#line 309 "sql.y" { setDefaultCreateDbOption(&yymsp[1].minor.yy90); yymsp[1].minor.yy90.dbType = TSDB_DB_TYPE_DEFAULT;} +#line 3112 "sql.c" break; case 133: /* typename ::= ids */ +#line 329 "sql.y" { yymsp[0].minor.yy0.type = 0; tSetColumnType (&yylhsminor.yy183, &yymsp[0].minor.yy0); } +#line 3120 "sql.c" yymsp[0].minor.yy183 = yylhsminor.yy183; break; case 134: /* typename ::= ids LP signed RP */ +#line 335 "sql.y" { if (yymsp[-1].minor.yy325 <= 0) { yymsp[-3].minor.yy0.type = 0; @@ -2606,30 +3130,42 @@ static void yy_reduce( tSetColumnType(&yylhsminor.yy183, &yymsp[-3].minor.yy0); } } +#line 3134 "sql.c" yymsp[-3].minor.yy183 = yylhsminor.yy183; break; case 135: /* typename ::= ids UNSIGNED */ +#line 346 "sql.y" { yymsp[-1].minor.yy0.type = 0; yymsp[-1].minor.yy0.n = ((yymsp[0].minor.yy0.z + yymsp[0].minor.yy0.n) - yymsp[-1].minor.yy0.z); tSetColumnType (&yylhsminor.yy183, &yymsp[-1].minor.yy0); } +#line 3144 "sql.c" yymsp[-1].minor.yy183 = yylhsminor.yy183; break; case 136: /* signed ::= INTEGER */ +#line 353 "sql.y" { yylhsminor.yy325 = strtol(yymsp[0].minor.yy0.z, NULL, 10); } +#line 3150 "sql.c" yymsp[0].minor.yy325 = yylhsminor.yy325; break; case 137: /* signed ::= PLUS INTEGER */ +#line 354 "sql.y" { yymsp[-1].minor.yy325 = strtol(yymsp[0].minor.yy0.z, NULL, 10); } +#line 3156 "sql.c" break; case 138: /* signed ::= MINUS INTEGER */ +#line 355 "sql.y" { yymsp[-1].minor.yy325 = -strtol(yymsp[0].minor.yy0.z, NULL, 10);} +#line 3161 "sql.c" break; case 142: /* cmd ::= CREATE TABLE create_table_list */ +#line 361 "sql.y" { pInfo->type = TSDB_SQL_CREATE_TABLE; pInfo->pCreateTableInfo = yymsp[0].minor.yy438;} +#line 3166 "sql.c" break; case 143: /* create_table_list ::= create_from_stable */ +#line 365 "sql.y" { SCreateTableSql* pCreateTable = calloc(1, sizeof(SCreateTableSql)); pCreateTable->childTableInfo = taosArrayInit(4, sizeof(SCreatedTableInfo)); @@ -2638,16 +3174,20 @@ static void yy_reduce( pCreateTable->type = TSQL_CREATE_TABLE_FROM_STABLE; yylhsminor.yy438 = pCreateTable; } +#line 3178 "sql.c" yymsp[0].minor.yy438 = yylhsminor.yy438; break; case 144: /* create_table_list ::= create_table_list create_from_stable */ +#line 374 "sql.y" { taosArrayPush(yymsp[-1].minor.yy438->childTableInfo, &yymsp[0].minor.yy152); yylhsminor.yy438 = yymsp[-1].minor.yy438; } +#line 3187 "sql.c" yymsp[-1].minor.yy438 = yylhsminor.yy438; break; case 145: /* create_table_args ::= ifnotexists ids cpxName LP columnlist RP */ +#line 380 "sql.y" { yylhsminor.yy438 = tSetCreateTableInfo(yymsp[-1].minor.yy421, NULL, NULL, TSQL_CREATE_TABLE); setSqlInfo(pInfo, yylhsminor.yy438, NULL, TSDB_SQL_CREATE_TABLE); @@ -2655,9 +3195,11 @@ static void yy_reduce( yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n; setCreatedTableName(pInfo, &yymsp[-4].minor.yy0, &yymsp[-5].minor.yy0); } +#line 3199 "sql.c" yymsp[-5].minor.yy438 = yylhsminor.yy438; break; case 146: /* create_stable_args ::= ifnotexists ids cpxName LP columnlist RP TAGS LP columnlist RP */ +#line 390 "sql.y" { yylhsminor.yy438 = tSetCreateTableInfo(yymsp[-5].minor.yy421, yymsp[-1].minor.yy421, NULL, TSQL_CREATE_STABLE); setSqlInfo(pInfo, yylhsminor.yy438, NULL, TSDB_SQL_CREATE_TABLE); @@ -2665,33 +3207,43 @@ static void yy_reduce( yymsp[-8].minor.yy0.n += yymsp[-7].minor.yy0.n; setCreatedTableName(pInfo, &yymsp[-8].minor.yy0, &yymsp[-9].minor.yy0); } +#line 3211 "sql.c" yymsp[-9].minor.yy438 = yylhsminor.yy438; break; case 147: /* create_from_stable ::= ifnotexists ids cpxName USING ids cpxName TAGS LP tagitemlist RP */ +#line 401 "sql.y" { yymsp[-5].minor.yy0.n += yymsp[-4].minor.yy0.n; yymsp[-8].minor.yy0.n += yymsp[-7].minor.yy0.n; yylhsminor.yy152 = createNewChildTableInfo(&yymsp[-5].minor.yy0, NULL, yymsp[-1].minor.yy421, &yymsp[-8].minor.yy0, &yymsp[-9].minor.yy0); } +#line 3221 "sql.c" yymsp[-9].minor.yy152 = yylhsminor.yy152; break; case 148: /* create_from_stable ::= ifnotexists ids cpxName USING ids cpxName LP tagNamelist RP TAGS LP tagitemlist RP */ +#line 407 "sql.y" { yymsp[-8].minor.yy0.n += yymsp[-7].minor.yy0.n; yymsp[-11].minor.yy0.n += yymsp[-10].minor.yy0.n; yylhsminor.yy152 = createNewChildTableInfo(&yymsp[-8].minor.yy0, yymsp[-5].minor.yy421, yymsp[-1].minor.yy421, &yymsp[-11].minor.yy0, &yymsp[-12].minor.yy0); } +#line 3231 "sql.c" yymsp[-12].minor.yy152 = yylhsminor.yy152; break; case 149: /* tagNamelist ::= tagNamelist COMMA ids */ +#line 415 "sql.y" {taosArrayPush(yymsp[-2].minor.yy421, &yymsp[0].minor.yy0); yylhsminor.yy421 = yymsp[-2].minor.yy421; } +#line 3237 "sql.c" yymsp[-2].minor.yy421 = yylhsminor.yy421; break; case 150: /* tagNamelist ::= ids */ +#line 416 "sql.y" {yylhsminor.yy421 = taosArrayInit(4, sizeof(SStrToken)); taosArrayPush(yylhsminor.yy421, &yymsp[0].minor.yy0);} +#line 3243 "sql.c" yymsp[0].minor.yy421 = yylhsminor.yy421; break; case 151: /* create_table_args ::= ifnotexists ids cpxName AS select */ +#line 420 "sql.y" { yylhsminor.yy438 = tSetCreateTableInfo(NULL, NULL, yymsp[0].minor.yy56, TSQL_CREATE_STREAM); setSqlInfo(pInfo, yylhsminor.yy438, NULL, TSDB_SQL_CREATE_TABLE); @@ -2699,186 +3251,266 @@ static void yy_reduce( yymsp[-3].minor.yy0.n += yymsp[-2].minor.yy0.n; setCreatedTableName(pInfo, &yymsp[-3].minor.yy0, &yymsp[-4].minor.yy0); } +#line 3255 "sql.c" yymsp[-4].minor.yy438 = yylhsminor.yy438; break; case 152: /* columnlist ::= columnlist COMMA column */ +#line 431 "sql.y" {taosArrayPush(yymsp[-2].minor.yy421, &yymsp[0].minor.yy183); yylhsminor.yy421 = yymsp[-2].minor.yy421; } +#line 3261 "sql.c" yymsp[-2].minor.yy421 = yylhsminor.yy421; break; case 153: /* columnlist ::= column */ +#line 432 "sql.y" {yylhsminor.yy421 = taosArrayInit(4, sizeof(TAOS_FIELD)); taosArrayPush(yylhsminor.yy421, &yymsp[0].minor.yy183);} +#line 3267 "sql.c" yymsp[0].minor.yy421 = yylhsminor.yy421; break; case 154: /* column ::= ids typename */ +#line 436 "sql.y" { tSetColumnInfo(&yylhsminor.yy183, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy183); } +#line 3275 "sql.c" yymsp[-1].minor.yy183 = yylhsminor.yy183; break; case 161: /* tagitem ::= NULL */ +#line 451 "sql.y" { yymsp[0].minor.yy0.type = 0; tVariantCreate(&yylhsminor.yy430, &yymsp[0].minor.yy0); } +#line 3281 "sql.c" yymsp[0].minor.yy430 = yylhsminor.yy430; break; case 162: /* tagitem ::= NOW */ +#line 452 "sql.y" { yymsp[0].minor.yy0.type = TSDB_DATA_TYPE_TIMESTAMP; tVariantCreate(&yylhsminor.yy430, &yymsp[0].minor.yy0);} +#line 3287 "sql.c" yymsp[0].minor.yy430 = yylhsminor.yy430; break; case 163: /* tagitem ::= MINUS INTEGER */ case 164: /* tagitem ::= MINUS FLOAT */ yytestcase(yyruleno==164); case 165: /* tagitem ::= PLUS INTEGER */ yytestcase(yyruleno==165); case 166: /* tagitem ::= PLUS FLOAT */ yytestcase(yyruleno==166); +#line 454 "sql.y" { yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; yymsp[-1].minor.yy0.type = yymsp[0].minor.yy0.type; toTSDBType(yymsp[-1].minor.yy0.type); tVariantCreate(&yylhsminor.yy430, &yymsp[-1].minor.yy0); } +#line 3301 "sql.c" yymsp[-1].minor.yy430 = yylhsminor.yy430; break; case 167: /* select ::= SELECT selcollist from where_opt interval_option sliding_opt session_option windowstate_option fill_opt groupby_opt having_opt orderby_opt slimit_opt limit_opt */ +#line 485 "sql.y" { yylhsminor.yy56 = tSetQuerySqlNode(&yymsp[-13].minor.yy0, yymsp[-12].minor.yy421, yymsp[-11].minor.yy8, yymsp[-10].minor.yy439, yymsp[-4].minor.yy421, yymsp[-2].minor.yy421, &yymsp[-9].minor.yy400, &yymsp[-7].minor.yy147, &yymsp[-6].minor.yy40, &yymsp[-8].minor.yy0, yymsp[-5].minor.yy421, &yymsp[0].minor.yy166, &yymsp[-1].minor.yy166, yymsp[-3].minor.yy439); } +#line 3309 "sql.c" yymsp[-13].minor.yy56 = yylhsminor.yy56; break; case 168: /* select ::= LP select RP */ +#line 489 "sql.y" {yymsp[-2].minor.yy56 = yymsp[-1].minor.yy56;} +#line 3315 "sql.c" break; case 169: /* union ::= select */ +#line 493 "sql.y" { yylhsminor.yy421 = setSubclause(NULL, yymsp[0].minor.yy56); } +#line 3320 "sql.c" yymsp[0].minor.yy421 = yylhsminor.yy421; break; case 170: /* union ::= union UNION ALL select */ +#line 494 "sql.y" { yylhsminor.yy421 = appendSelectClause(yymsp[-3].minor.yy421, yymsp[0].minor.yy56); } +#line 3326 "sql.c" yymsp[-3].minor.yy421 = yylhsminor.yy421; break; case 171: /* cmd ::= union */ +#line 496 "sql.y" { setSqlInfo(pInfo, yymsp[0].minor.yy421, NULL, TSDB_SQL_SELECT); } +#line 3332 "sql.c" break; case 172: /* select ::= SELECT selcollist */ +#line 503 "sql.y" { yylhsminor.yy56 = tSetQuerySqlNode(&yymsp[-1].minor.yy0, yymsp[0].minor.yy421, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL); } +#line 3339 "sql.c" yymsp[-1].minor.yy56 = yylhsminor.yy56; break; case 173: /* sclp ::= selcollist COMMA */ +#line 515 "sql.y" {yylhsminor.yy421 = yymsp[-1].minor.yy421;} +#line 3345 "sql.c" yymsp[-1].minor.yy421 = yylhsminor.yy421; break; case 174: /* sclp ::= */ case 206: /* orderby_opt ::= */ yytestcase(yyruleno==206); +#line 516 "sql.y" {yymsp[1].minor.yy421 = 0;} +#line 3352 "sql.c" break; case 175: /* selcollist ::= sclp distinct expr as */ +#line 517 "sql.y" { yylhsminor.yy421 = tSqlExprListAppend(yymsp[-3].minor.yy421, yymsp[-1].minor.yy439, yymsp[-2].minor.yy0.n? &yymsp[-2].minor.yy0:0, yymsp[0].minor.yy0.n?&yymsp[0].minor.yy0:0); } +#line 3359 "sql.c" yymsp[-3].minor.yy421 = yylhsminor.yy421; break; case 176: /* selcollist ::= sclp STAR */ +#line 521 "sql.y" { tSqlExpr *pNode = tSqlExprCreateIdValue(NULL, TK_ALL); yylhsminor.yy421 = tSqlExprListAppend(yymsp[-1].minor.yy421, pNode, 0, 0); } +#line 3368 "sql.c" yymsp[-1].minor.yy421 = yylhsminor.yy421; break; case 177: /* as ::= AS ids */ +#line 529 "sql.y" { yymsp[-1].minor.yy0 = yymsp[0].minor.yy0; } +#line 3374 "sql.c" break; case 178: /* as ::= ids */ +#line 530 "sql.y" { yylhsminor.yy0 = yymsp[0].minor.yy0; } +#line 3379 "sql.c" yymsp[0].minor.yy0 = yylhsminor.yy0; break; case 179: /* as ::= */ +#line 531 "sql.y" { yymsp[1].minor.yy0.n = 0; } +#line 3385 "sql.c" break; case 180: /* distinct ::= DISTINCT */ +#line 534 "sql.y" { yylhsminor.yy0 = yymsp[0].minor.yy0; } +#line 3390 "sql.c" yymsp[0].minor.yy0 = yylhsminor.yy0; break; case 182: /* from ::= FROM tablelist */ case 183: /* from ::= FROM sub */ yytestcase(yyruleno==183); +#line 540 "sql.y" {yymsp[-1].minor.yy8 = yymsp[0].minor.yy8;} +#line 3397 "sql.c" break; case 184: /* sub ::= LP union RP */ +#line 545 "sql.y" {yymsp[-2].minor.yy8 = addSubqueryElem(NULL, yymsp[-1].minor.yy421, NULL);} +#line 3402 "sql.c" break; case 185: /* sub ::= LP union RP ids */ +#line 546 "sql.y" {yymsp[-3].minor.yy8 = addSubqueryElem(NULL, yymsp[-2].minor.yy421, &yymsp[0].minor.yy0);} +#line 3407 "sql.c" break; case 186: /* sub ::= sub COMMA LP union RP ids */ +#line 547 "sql.y" {yylhsminor.yy8 = addSubqueryElem(yymsp[-5].minor.yy8, yymsp[-2].minor.yy421, &yymsp[0].minor.yy0);} +#line 3412 "sql.c" yymsp[-5].minor.yy8 = yylhsminor.yy8; break; case 187: /* tablelist ::= ids cpxName */ +#line 551 "sql.y" { yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; yylhsminor.yy8 = setTableNameList(NULL, &yymsp[-1].minor.yy0, NULL); } +#line 3421 "sql.c" yymsp[-1].minor.yy8 = yylhsminor.yy8; break; case 188: /* tablelist ::= ids cpxName ids */ +#line 556 "sql.y" { yymsp[-2].minor.yy0.n += yymsp[-1].minor.yy0.n; yylhsminor.yy8 = setTableNameList(NULL, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0); } +#line 3430 "sql.c" yymsp[-2].minor.yy8 = yylhsminor.yy8; break; case 189: /* tablelist ::= tablelist COMMA ids cpxName */ +#line 561 "sql.y" { yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; yylhsminor.yy8 = setTableNameList(yymsp[-3].minor.yy8, &yymsp[-1].minor.yy0, NULL); } +#line 3439 "sql.c" yymsp[-3].minor.yy8 = yylhsminor.yy8; break; case 190: /* tablelist ::= tablelist COMMA ids cpxName ids */ +#line 566 "sql.y" { yymsp[-2].minor.yy0.n += yymsp[-1].minor.yy0.n; yylhsminor.yy8 = setTableNameList(yymsp[-4].minor.yy8, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0); } +#line 3448 "sql.c" yymsp[-4].minor.yy8 = yylhsminor.yy8; break; case 191: /* tmvar ::= VARIABLE */ +#line 573 "sql.y" {yylhsminor.yy0 = yymsp[0].minor.yy0;} +#line 3454 "sql.c" yymsp[0].minor.yy0 = yylhsminor.yy0; break; case 192: /* interval_option ::= intervalKey LP tmvar RP */ +#line 576 "sql.y" {yylhsminor.yy400.interval = yymsp[-1].minor.yy0; yylhsminor.yy400.offset.n = 0; yylhsminor.yy400.token = yymsp[-3].minor.yy104;} +#line 3460 "sql.c" yymsp[-3].minor.yy400 = yylhsminor.yy400; break; case 193: /* interval_option ::= intervalKey LP tmvar COMMA tmvar RP */ +#line 577 "sql.y" {yylhsminor.yy400.interval = yymsp[-3].minor.yy0; yylhsminor.yy400.offset = yymsp[-1].minor.yy0; yylhsminor.yy400.token = yymsp[-5].minor.yy104;} +#line 3466 "sql.c" yymsp[-5].minor.yy400 = yylhsminor.yy400; break; case 194: /* interval_option ::= */ +#line 578 "sql.y" {memset(&yymsp[1].minor.yy400, 0, sizeof(yymsp[1].minor.yy400));} +#line 3472 "sql.c" break; case 195: /* intervalKey ::= INTERVAL */ +#line 581 "sql.y" {yymsp[0].minor.yy104 = TK_INTERVAL;} +#line 3477 "sql.c" break; case 196: /* intervalKey ::= EVERY */ +#line 582 "sql.y" {yymsp[0].minor.yy104 = TK_EVERY; } +#line 3482 "sql.c" break; case 197: /* session_option ::= */ +#line 585 "sql.y" {yymsp[1].minor.yy147.col.n = 0; yymsp[1].minor.yy147.gap.n = 0;} +#line 3487 "sql.c" break; case 198: /* session_option ::= SESSION LP ids cpxName COMMA tmvar RP */ +#line 586 "sql.y" { yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n; yymsp[-6].minor.yy147.col = yymsp[-4].minor.yy0; yymsp[-6].minor.yy147.gap = yymsp[-1].minor.yy0; } +#line 3496 "sql.c" break; case 199: /* windowstate_option ::= */ +#line 593 "sql.y" { yymsp[1].minor.yy40.col.n = 0; yymsp[1].minor.yy40.col.z = NULL;} +#line 3501 "sql.c" break; case 200: /* windowstate_option ::= STATE_WINDOW LP ids RP */ +#line 594 "sql.y" { yymsp[-3].minor.yy40.col = yymsp[-1].minor.yy0; } +#line 3506 "sql.c" break; case 201: /* fill_opt ::= */ +#line 598 "sql.y" { yymsp[1].minor.yy421 = 0; } +#line 3511 "sql.c" break; case 202: /* fill_opt ::= FILL LP ID COMMA tagitemlist RP */ +#line 599 "sql.y" { tVariant A = {0}; toTSDBType(yymsp[-3].minor.yy0.type); @@ -2887,264 +3519,402 @@ static void yy_reduce( tVariantListInsert(yymsp[-1].minor.yy421, &A, -1, 0); yymsp[-5].minor.yy421 = yymsp[-1].minor.yy421; } +#line 3523 "sql.c" break; case 203: /* fill_opt ::= FILL LP ID RP */ +#line 608 "sql.y" { toTSDBType(yymsp[-1].minor.yy0.type); yymsp[-3].minor.yy421 = tVariantListAppendToken(NULL, &yymsp[-1].minor.yy0, -1); } +#line 3531 "sql.c" break; case 204: /* sliding_opt ::= SLIDING LP tmvar RP */ +#line 614 "sql.y" {yymsp[-3].minor.yy0 = yymsp[-1].minor.yy0; } +#line 3536 "sql.c" break; case 205: /* sliding_opt ::= */ +#line 615 "sql.y" {yymsp[1].minor.yy0.n = 0; yymsp[1].minor.yy0.z = NULL; yymsp[1].minor.yy0.type = 0; } +#line 3541 "sql.c" break; case 207: /* orderby_opt ::= ORDER BY sortlist */ +#line 627 "sql.y" {yymsp[-2].minor.yy421 = yymsp[0].minor.yy421;} +#line 3546 "sql.c" break; case 208: /* sortlist ::= sortlist COMMA item sortorder */ +#line 629 "sql.y" { yylhsminor.yy421 = tVariantListAppend(yymsp[-3].minor.yy421, &yymsp[-1].minor.yy430, yymsp[0].minor.yy96); } +#line 3553 "sql.c" yymsp[-3].minor.yy421 = yylhsminor.yy421; break; case 209: /* sortlist ::= item sortorder */ +#line 633 "sql.y" { yylhsminor.yy421 = tVariantListAppend(NULL, &yymsp[-1].minor.yy430, yymsp[0].minor.yy96); } +#line 3561 "sql.c" yymsp[-1].minor.yy421 = yylhsminor.yy421; break; case 210: /* item ::= ids cpxName */ +#line 638 "sql.y" { toTSDBType(yymsp[-1].minor.yy0.type); yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; tVariantCreate(&yylhsminor.yy430, &yymsp[-1].minor.yy0); } +#line 3572 "sql.c" yymsp[-1].minor.yy430 = yylhsminor.yy430; break; case 211: /* sortorder ::= ASC */ +#line 646 "sql.y" { yymsp[0].minor.yy96 = TSDB_ORDER_ASC; } +#line 3578 "sql.c" break; case 212: /* sortorder ::= DESC */ +#line 647 "sql.y" { yymsp[0].minor.yy96 = TSDB_ORDER_DESC;} +#line 3583 "sql.c" break; case 213: /* sortorder ::= */ +#line 648 "sql.y" { yymsp[1].minor.yy96 = TSDB_ORDER_ASC; } +#line 3588 "sql.c" break; case 214: /* groupby_opt ::= */ +#line 656 "sql.y" { yymsp[1].minor.yy421 = 0;} +#line 3593 "sql.c" break; case 215: /* groupby_opt ::= GROUP BY grouplist */ +#line 657 "sql.y" { yymsp[-2].minor.yy421 = yymsp[0].minor.yy421;} +#line 3598 "sql.c" break; case 216: /* grouplist ::= grouplist COMMA item */ +#line 659 "sql.y" { yylhsminor.yy421 = tVariantListAppend(yymsp[-2].minor.yy421, &yymsp[0].minor.yy430, -1); } +#line 3605 "sql.c" yymsp[-2].minor.yy421 = yylhsminor.yy421; break; case 217: /* grouplist ::= item */ +#line 663 "sql.y" { yylhsminor.yy421 = tVariantListAppend(NULL, &yymsp[0].minor.yy430, -1); } +#line 3613 "sql.c" yymsp[0].minor.yy421 = yylhsminor.yy421; break; case 218: /* having_opt ::= */ case 228: /* where_opt ::= */ yytestcase(yyruleno==228); - case 270: /* expritem ::= */ yytestcase(yyruleno==270); + case 272: /* expritem ::= */ yytestcase(yyruleno==272); +#line 670 "sql.y" {yymsp[1].minor.yy439 = 0;} +#line 3621 "sql.c" break; case 219: /* having_opt ::= HAVING expr */ case 229: /* where_opt ::= WHERE expr */ yytestcase(yyruleno==229); +#line 671 "sql.y" {yymsp[-1].minor.yy439 = yymsp[0].minor.yy439;} +#line 3627 "sql.c" break; case 220: /* limit_opt ::= */ case 224: /* slimit_opt ::= */ yytestcase(yyruleno==224); +#line 675 "sql.y" {yymsp[1].minor.yy166.limit = -1; yymsp[1].minor.yy166.offset = 0;} +#line 3633 "sql.c" break; case 221: /* limit_opt ::= LIMIT signed */ case 225: /* slimit_opt ::= SLIMIT signed */ yytestcase(yyruleno==225); +#line 676 "sql.y" {yymsp[-1].minor.yy166.limit = yymsp[0].minor.yy325; yymsp[-1].minor.yy166.offset = 0;} +#line 3639 "sql.c" break; case 222: /* limit_opt ::= LIMIT signed OFFSET signed */ +#line 678 "sql.y" { yymsp[-3].minor.yy166.limit = yymsp[-2].minor.yy325; yymsp[-3].minor.yy166.offset = yymsp[0].minor.yy325;} +#line 3644 "sql.c" break; case 223: /* limit_opt ::= LIMIT signed COMMA signed */ +#line 680 "sql.y" { yymsp[-3].minor.yy166.limit = yymsp[0].minor.yy325; yymsp[-3].minor.yy166.offset = yymsp[-2].minor.yy325;} +#line 3649 "sql.c" break; case 226: /* slimit_opt ::= SLIMIT signed SOFFSET signed */ +#line 686 "sql.y" {yymsp[-3].minor.yy166.limit = yymsp[-2].minor.yy325; yymsp[-3].minor.yy166.offset = yymsp[0].minor.yy325;} +#line 3654 "sql.c" break; case 227: /* slimit_opt ::= SLIMIT signed COMMA signed */ +#line 688 "sql.y" {yymsp[-3].minor.yy166.limit = yymsp[0].minor.yy325; yymsp[-3].minor.yy166.offset = yymsp[-2].minor.yy325;} +#line 3659 "sql.c" break; case 230: /* expr ::= LP expr RP */ +#line 701 "sql.y" {yylhsminor.yy439 = yymsp[-1].minor.yy439; yylhsminor.yy439->exprToken.z = yymsp[-2].minor.yy0.z; yylhsminor.yy439->exprToken.n = (yymsp[0].minor.yy0.z - yymsp[-2].minor.yy0.z + 1);} +#line 3664 "sql.c" yymsp[-2].minor.yy439 = yylhsminor.yy439; break; case 231: /* expr ::= ID */ +#line 703 "sql.y" { yylhsminor.yy439 = tSqlExprCreateIdValue(&yymsp[0].minor.yy0, TK_ID);} +#line 3670 "sql.c" yymsp[0].minor.yy439 = yylhsminor.yy439; break; case 232: /* expr ::= ID DOT ID */ +#line 704 "sql.y" { yymsp[-2].minor.yy0.n += (1+yymsp[0].minor.yy0.n); yylhsminor.yy439 = tSqlExprCreateIdValue(&yymsp[-2].minor.yy0, TK_ID);} +#line 3676 "sql.c" yymsp[-2].minor.yy439 = yylhsminor.yy439; break; case 233: /* expr ::= ID DOT STAR */ +#line 705 "sql.y" { yymsp[-2].minor.yy0.n += (1+yymsp[0].minor.yy0.n); yylhsminor.yy439 = tSqlExprCreateIdValue(&yymsp[-2].minor.yy0, TK_ALL);} +#line 3682 "sql.c" yymsp[-2].minor.yy439 = yylhsminor.yy439; break; case 234: /* expr ::= INTEGER */ +#line 707 "sql.y" { yylhsminor.yy439 = tSqlExprCreateIdValue(&yymsp[0].minor.yy0, TK_INTEGER);} +#line 3688 "sql.c" yymsp[0].minor.yy439 = yylhsminor.yy439; break; case 235: /* expr ::= MINUS INTEGER */ case 236: /* expr ::= PLUS INTEGER */ yytestcase(yyruleno==236); +#line 708 "sql.y" { yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; yymsp[-1].minor.yy0.type = TK_INTEGER; yylhsminor.yy439 = tSqlExprCreateIdValue(&yymsp[-1].minor.yy0, TK_INTEGER);} +#line 3695 "sql.c" yymsp[-1].minor.yy439 = yylhsminor.yy439; break; case 237: /* expr ::= FLOAT */ +#line 710 "sql.y" { yylhsminor.yy439 = tSqlExprCreateIdValue(&yymsp[0].minor.yy0, TK_FLOAT);} +#line 3701 "sql.c" yymsp[0].minor.yy439 = yylhsminor.yy439; break; case 238: /* expr ::= MINUS FLOAT */ case 239: /* expr ::= PLUS FLOAT */ yytestcase(yyruleno==239); +#line 711 "sql.y" { yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; yymsp[-1].minor.yy0.type = TK_FLOAT; yylhsminor.yy439 = tSqlExprCreateIdValue(&yymsp[-1].minor.yy0, TK_FLOAT);} +#line 3708 "sql.c" yymsp[-1].minor.yy439 = yylhsminor.yy439; break; case 240: /* expr ::= STRING */ +#line 713 "sql.y" { yylhsminor.yy439 = tSqlExprCreateIdValue(&yymsp[0].minor.yy0, TK_STRING);} +#line 3714 "sql.c" yymsp[0].minor.yy439 = yylhsminor.yy439; break; case 241: /* expr ::= NOW */ +#line 714 "sql.y" { yylhsminor.yy439 = tSqlExprCreateIdValue(&yymsp[0].minor.yy0, TK_NOW); } +#line 3720 "sql.c" yymsp[0].minor.yy439 = yylhsminor.yy439; break; case 242: /* expr ::= VARIABLE */ +#line 715 "sql.y" { yylhsminor.yy439 = tSqlExprCreateIdValue(&yymsp[0].minor.yy0, TK_VARIABLE);} +#line 3726 "sql.c" yymsp[0].minor.yy439 = yylhsminor.yy439; break; case 243: /* expr ::= PLUS VARIABLE */ case 244: /* expr ::= MINUS VARIABLE */ yytestcase(yyruleno==244); +#line 716 "sql.y" { yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; yymsp[-1].minor.yy0.type = TK_VARIABLE; yylhsminor.yy439 = tSqlExprCreateIdValue(&yymsp[-1].minor.yy0, TK_VARIABLE);} +#line 3733 "sql.c" yymsp[-1].minor.yy439 = yylhsminor.yy439; break; case 245: /* expr ::= BOOL */ +#line 718 "sql.y" { yylhsminor.yy439 = tSqlExprCreateIdValue(&yymsp[0].minor.yy0, TK_BOOL);} +#line 3739 "sql.c" yymsp[0].minor.yy439 = yylhsminor.yy439; break; case 246: /* expr ::= NULL */ +#line 719 "sql.y" { yylhsminor.yy439 = tSqlExprCreateIdValue(&yymsp[0].minor.yy0, TK_NULL);} +#line 3745 "sql.c" yymsp[0].minor.yy439 = yylhsminor.yy439; break; case 247: /* expr ::= ID LP exprlist RP */ +#line 722 "sql.y" { tStrTokenAppend(pInfo->funcs, &yymsp[-3].minor.yy0); yylhsminor.yy439 = tSqlExprCreateFunction(yymsp[-1].minor.yy421, &yymsp[-3].minor.yy0, &yymsp[0].minor.yy0, yymsp[-3].minor.yy0.type); } +#line 3751 "sql.c" yymsp[-3].minor.yy439 = yylhsminor.yy439; break; case 248: /* expr ::= ID LP STAR RP */ +#line 725 "sql.y" { tStrTokenAppend(pInfo->funcs, &yymsp[-3].minor.yy0); yylhsminor.yy439 = tSqlExprCreateFunction(NULL, &yymsp[-3].minor.yy0, &yymsp[0].minor.yy0, yymsp[-3].minor.yy0.type); } +#line 3757 "sql.c" yymsp[-3].minor.yy439 = yylhsminor.yy439; break; case 249: /* expr ::= expr IS NULL */ +#line 728 "sql.y" {yylhsminor.yy439 = tSqlExprCreate(yymsp[-2].minor.yy439, NULL, TK_ISNULL);} +#line 3763 "sql.c" yymsp[-2].minor.yy439 = yylhsminor.yy439; break; case 250: /* expr ::= expr IS NOT NULL */ +#line 729 "sql.y" {yylhsminor.yy439 = tSqlExprCreate(yymsp[-3].minor.yy439, NULL, TK_NOTNULL);} +#line 3769 "sql.c" yymsp[-3].minor.yy439 = yylhsminor.yy439; break; case 251: /* expr ::= expr LT expr */ +#line 732 "sql.y" {yylhsminor.yy439 = tSqlExprCreate(yymsp[-2].minor.yy439, yymsp[0].minor.yy439, TK_LT);} +#line 3775 "sql.c" yymsp[-2].minor.yy439 = yylhsminor.yy439; break; case 252: /* expr ::= expr GT expr */ +#line 733 "sql.y" {yylhsminor.yy439 = tSqlExprCreate(yymsp[-2].minor.yy439, yymsp[0].minor.yy439, TK_GT);} +#line 3781 "sql.c" yymsp[-2].minor.yy439 = yylhsminor.yy439; break; case 253: /* expr ::= expr LE expr */ +#line 734 "sql.y" {yylhsminor.yy439 = tSqlExprCreate(yymsp[-2].minor.yy439, yymsp[0].minor.yy439, TK_LE);} +#line 3787 "sql.c" yymsp[-2].minor.yy439 = yylhsminor.yy439; break; case 254: /* expr ::= expr GE expr */ +#line 735 "sql.y" {yylhsminor.yy439 = tSqlExprCreate(yymsp[-2].minor.yy439, yymsp[0].minor.yy439, TK_GE);} +#line 3793 "sql.c" yymsp[-2].minor.yy439 = yylhsminor.yy439; break; case 255: /* expr ::= expr NE expr */ +#line 736 "sql.y" {yylhsminor.yy439 = tSqlExprCreate(yymsp[-2].minor.yy439, yymsp[0].minor.yy439, TK_NE);} +#line 3799 "sql.c" yymsp[-2].minor.yy439 = yylhsminor.yy439; break; case 256: /* expr ::= expr EQ expr */ +#line 737 "sql.y" {yylhsminor.yy439 = tSqlExprCreate(yymsp[-2].minor.yy439, yymsp[0].minor.yy439, TK_EQ);} +#line 3805 "sql.c" yymsp[-2].minor.yy439 = yylhsminor.yy439; break; case 257: /* expr ::= expr BETWEEN expr AND expr */ +#line 739 "sql.y" { tSqlExpr* X2 = tSqlExprClone(yymsp[-4].minor.yy439); yylhsminor.yy439 = tSqlExprCreate(tSqlExprCreate(yymsp[-4].minor.yy439, yymsp[-2].minor.yy439, TK_GE), tSqlExprCreate(X2, yymsp[0].minor.yy439, TK_LE), TK_AND);} +#line 3811 "sql.c" yymsp[-4].minor.yy439 = yylhsminor.yy439; break; case 258: /* expr ::= expr AND expr */ +#line 741 "sql.y" {yylhsminor.yy439 = tSqlExprCreate(yymsp[-2].minor.yy439, yymsp[0].minor.yy439, TK_AND);} +#line 3817 "sql.c" yymsp[-2].minor.yy439 = yylhsminor.yy439; break; case 259: /* expr ::= expr OR expr */ +#line 742 "sql.y" {yylhsminor.yy439 = tSqlExprCreate(yymsp[-2].minor.yy439, yymsp[0].minor.yy439, TK_OR); } +#line 3823 "sql.c" yymsp[-2].minor.yy439 = yylhsminor.yy439; break; case 260: /* expr ::= expr PLUS expr */ +#line 745 "sql.y" {yylhsminor.yy439 = tSqlExprCreate(yymsp[-2].minor.yy439, yymsp[0].minor.yy439, TK_PLUS); } +#line 3829 "sql.c" yymsp[-2].minor.yy439 = yylhsminor.yy439; break; case 261: /* expr ::= expr MINUS expr */ +#line 746 "sql.y" {yylhsminor.yy439 = tSqlExprCreate(yymsp[-2].minor.yy439, yymsp[0].minor.yy439, TK_MINUS); } +#line 3835 "sql.c" yymsp[-2].minor.yy439 = yylhsminor.yy439; break; case 262: /* expr ::= expr STAR expr */ +#line 747 "sql.y" {yylhsminor.yy439 = tSqlExprCreate(yymsp[-2].minor.yy439, yymsp[0].minor.yy439, TK_STAR); } +#line 3841 "sql.c" yymsp[-2].minor.yy439 = yylhsminor.yy439; break; case 263: /* expr ::= expr SLASH expr */ +#line 748 "sql.y" {yylhsminor.yy439 = tSqlExprCreate(yymsp[-2].minor.yy439, yymsp[0].minor.yy439, TK_DIVIDE);} +#line 3847 "sql.c" yymsp[-2].minor.yy439 = yylhsminor.yy439; break; case 264: /* expr ::= expr REM expr */ +#line 749 "sql.y" {yylhsminor.yy439 = tSqlExprCreate(yymsp[-2].minor.yy439, yymsp[0].minor.yy439, TK_REM); } +#line 3853 "sql.c" yymsp[-2].minor.yy439 = yylhsminor.yy439; break; case 265: /* expr ::= expr LIKE expr */ +#line 752 "sql.y" {yylhsminor.yy439 = tSqlExprCreate(yymsp[-2].minor.yy439, yymsp[0].minor.yy439, TK_LIKE); } +#line 3859 "sql.c" yymsp[-2].minor.yy439 = yylhsminor.yy439; break; - case 266: /* expr ::= expr IN LP exprlist RP */ + case 266: /* expr ::= expr MATCH expr */ +#line 755 "sql.y" +{yylhsminor.yy439 = tSqlExprCreate(yymsp[-2].minor.yy439, yymsp[0].minor.yy439, TK_MATCH); } +#line 3865 "sql.c" + yymsp[-2].minor.yy439 = yylhsminor.yy439; + break; + case 267: /* expr ::= expr NMATCH expr */ +#line 756 "sql.y" +{yylhsminor.yy439 = tSqlExprCreate(yymsp[-2].minor.yy439, yymsp[0].minor.yy439, TK_NMATCH); } +#line 3871 "sql.c" + yymsp[-2].minor.yy439 = yylhsminor.yy439; + break; + case 268: /* expr ::= expr IN LP exprlist RP */ +#line 759 "sql.y" {yylhsminor.yy439 = tSqlExprCreate(yymsp[-4].minor.yy439, (tSqlExpr*)yymsp[-1].minor.yy421, TK_IN); } +#line 3877 "sql.c" yymsp[-4].minor.yy439 = yylhsminor.yy439; break; - case 267: /* exprlist ::= exprlist COMMA expritem */ + case 269: /* exprlist ::= exprlist COMMA expritem */ +#line 767 "sql.y" {yylhsminor.yy421 = tSqlExprListAppend(yymsp[-2].minor.yy421,yymsp[0].minor.yy439,0, 0);} +#line 3883 "sql.c" yymsp[-2].minor.yy421 = yylhsminor.yy421; break; - case 268: /* exprlist ::= expritem */ + case 270: /* exprlist ::= expritem */ +#line 768 "sql.y" {yylhsminor.yy421 = tSqlExprListAppend(0,yymsp[0].minor.yy439,0, 0);} +#line 3889 "sql.c" yymsp[0].minor.yy421 = yylhsminor.yy421; break; - case 269: /* expritem ::= expr */ + case 271: /* expritem ::= expr */ +#line 769 "sql.y" {yylhsminor.yy439 = yymsp[0].minor.yy439;} +#line 3895 "sql.c" yymsp[0].minor.yy439 = yylhsminor.yy439; break; - case 271: /* cmd ::= RESET QUERY CACHE */ + case 273: /* cmd ::= RESET QUERY CACHE */ +#line 773 "sql.y" { setDCLSqlElems(pInfo, TSDB_SQL_RESET_CACHE, 0);} +#line 3901 "sql.c" break; - case 272: /* cmd ::= SYNCDB ids REPLICA */ + case 274: /* cmd ::= SYNCDB ids REPLICA */ +#line 776 "sql.y" { setDCLSqlElems(pInfo, TSDB_SQL_SYNC_DB_REPLICA, 1, &yymsp[-1].minor.yy0);} +#line 3906 "sql.c" break; - case 273: /* cmd ::= ALTER TABLE ids cpxName ADD COLUMN columnlist */ + case 275: /* cmd ::= ALTER TABLE ids cpxName ADD COLUMN columnlist */ +#line 779 "sql.y" { yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n; SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&yymsp[-4].minor.yy0, yymsp[0].minor.yy421, NULL, TSDB_ALTER_TABLE_ADD_COLUMN, -1); setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } +#line 3915 "sql.c" break; - case 274: /* cmd ::= ALTER TABLE ids cpxName DROP COLUMN ids */ + case 276: /* cmd ::= ALTER TABLE ids cpxName DROP COLUMN ids */ +#line 785 "sql.y" { yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n; @@ -3154,22 +3924,28 @@ static void yy_reduce( SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&yymsp[-4].minor.yy0, NULL, K, TSDB_ALTER_TABLE_DROP_COLUMN, -1); setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } +#line 3928 "sql.c" break; - case 275: /* cmd ::= ALTER TABLE ids cpxName MODIFY COLUMN columnlist */ + case 277: /* cmd ::= ALTER TABLE ids cpxName MODIFY COLUMN columnlist */ +#line 795 "sql.y" { yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n; SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&yymsp[-4].minor.yy0, yymsp[0].minor.yy421, NULL, TSDB_ALTER_TABLE_CHANGE_COLUMN, -1); setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } +#line 3937 "sql.c" break; - case 276: /* cmd ::= ALTER TABLE ids cpxName ADD TAG columnlist */ + case 278: /* cmd ::= ALTER TABLE ids cpxName ADD TAG columnlist */ +#line 802 "sql.y" { yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n; SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&yymsp[-4].minor.yy0, yymsp[0].minor.yy421, NULL, TSDB_ALTER_TABLE_ADD_TAG_COLUMN, -1); setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } +#line 3946 "sql.c" break; - case 277: /* cmd ::= ALTER TABLE ids cpxName DROP TAG ids */ + case 279: /* cmd ::= ALTER TABLE ids cpxName DROP TAG ids */ +#line 807 "sql.y" { yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n; @@ -3179,8 +3955,10 @@ static void yy_reduce( SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&yymsp[-4].minor.yy0, NULL, A, TSDB_ALTER_TABLE_DROP_TAG_COLUMN, -1); setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } +#line 3959 "sql.c" break; - case 278: /* cmd ::= ALTER TABLE ids cpxName CHANGE TAG ids ids */ + case 280: /* cmd ::= ALTER TABLE ids cpxName CHANGE TAG ids ids */ +#line 817 "sql.y" { yymsp[-5].minor.yy0.n += yymsp[-4].minor.yy0.n; @@ -3193,8 +3971,10 @@ static void yy_reduce( SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&yymsp[-5].minor.yy0, NULL, A, TSDB_ALTER_TABLE_CHANGE_TAG_COLUMN, -1); setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } +#line 3975 "sql.c" break; - case 279: /* cmd ::= ALTER TABLE ids cpxName SET TAG ids EQ tagitem */ + case 281: /* cmd ::= ALTER TABLE ids cpxName SET TAG ids EQ tagitem */ +#line 830 "sql.y" { yymsp[-6].minor.yy0.n += yymsp[-5].minor.yy0.n; @@ -3205,22 +3985,28 @@ static void yy_reduce( SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&yymsp[-6].minor.yy0, NULL, A, TSDB_ALTER_TABLE_UPDATE_TAG_VAL, -1); setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } +#line 3989 "sql.c" break; - case 280: /* cmd ::= ALTER TABLE ids cpxName MODIFY TAG columnlist */ + case 282: /* cmd ::= ALTER TABLE ids cpxName MODIFY TAG columnlist */ +#line 841 "sql.y" { yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n; SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&yymsp[-4].minor.yy0, yymsp[0].minor.yy421, NULL, TSDB_ALTER_TABLE_MODIFY_TAG_COLUMN, -1); setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } +#line 3998 "sql.c" break; - case 281: /* cmd ::= ALTER STABLE ids cpxName ADD COLUMN columnlist */ + case 283: /* cmd ::= ALTER STABLE ids cpxName ADD COLUMN columnlist */ +#line 848 "sql.y" { yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n; SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&yymsp[-4].minor.yy0, yymsp[0].minor.yy421, NULL, TSDB_ALTER_TABLE_ADD_COLUMN, TSDB_SUPER_TABLE); setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } +#line 4007 "sql.c" break; - case 282: /* cmd ::= ALTER STABLE ids cpxName DROP COLUMN ids */ + case 284: /* cmd ::= ALTER STABLE ids cpxName DROP COLUMN ids */ +#line 854 "sql.y" { yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n; @@ -3230,22 +4016,28 @@ static void yy_reduce( SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&yymsp[-4].minor.yy0, NULL, K, TSDB_ALTER_TABLE_DROP_COLUMN, TSDB_SUPER_TABLE); setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } +#line 4020 "sql.c" break; - case 283: /* cmd ::= ALTER STABLE ids cpxName MODIFY COLUMN columnlist */ + case 285: /* cmd ::= ALTER STABLE ids cpxName MODIFY COLUMN columnlist */ +#line 864 "sql.y" { yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n; SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&yymsp[-4].minor.yy0, yymsp[0].minor.yy421, NULL, TSDB_ALTER_TABLE_CHANGE_COLUMN, TSDB_SUPER_TABLE); setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } +#line 4029 "sql.c" break; - case 284: /* cmd ::= ALTER STABLE ids cpxName ADD TAG columnlist */ + case 286: /* cmd ::= ALTER STABLE ids cpxName ADD TAG columnlist */ +#line 871 "sql.y" { yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n; SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&yymsp[-4].minor.yy0, yymsp[0].minor.yy421, NULL, TSDB_ALTER_TABLE_ADD_TAG_COLUMN, TSDB_SUPER_TABLE); setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } +#line 4038 "sql.c" break; - case 285: /* cmd ::= ALTER STABLE ids cpxName DROP TAG ids */ + case 287: /* cmd ::= ALTER STABLE ids cpxName DROP TAG ids */ +#line 876 "sql.y" { yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n; @@ -3255,8 +4047,10 @@ static void yy_reduce( SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&yymsp[-4].minor.yy0, NULL, A, TSDB_ALTER_TABLE_DROP_TAG_COLUMN, TSDB_SUPER_TABLE); setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } +#line 4051 "sql.c" break; - case 286: /* cmd ::= ALTER STABLE ids cpxName CHANGE TAG ids ids */ + case 288: /* cmd ::= ALTER STABLE ids cpxName CHANGE TAG ids ids */ +#line 886 "sql.y" { yymsp[-5].minor.yy0.n += yymsp[-4].minor.yy0.n; @@ -3269,8 +4063,10 @@ static void yy_reduce( SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&yymsp[-5].minor.yy0, NULL, A, TSDB_ALTER_TABLE_CHANGE_TAG_COLUMN, TSDB_SUPER_TABLE); setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } +#line 4067 "sql.c" break; - case 287: /* cmd ::= ALTER STABLE ids cpxName SET TAG ids EQ tagitem */ + case 289: /* cmd ::= ALTER STABLE ids cpxName SET TAG ids EQ tagitem */ +#line 899 "sql.y" { yymsp[-6].minor.yy0.n += yymsp[-5].minor.yy0.n; @@ -3281,30 +4077,39 @@ static void yy_reduce( SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&yymsp[-6].minor.yy0, NULL, A, TSDB_ALTER_TABLE_UPDATE_TAG_VAL, TSDB_SUPER_TABLE); setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } +#line 4081 "sql.c" break; - case 288: /* cmd ::= ALTER STABLE ids cpxName MODIFY TAG columnlist */ + case 290: /* cmd ::= ALTER STABLE ids cpxName MODIFY TAG columnlist */ +#line 910 "sql.y" { yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n; SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&yymsp[-4].minor.yy0, yymsp[0].minor.yy421, NULL, TSDB_ALTER_TABLE_MODIFY_TAG_COLUMN, TSDB_SUPER_TABLE); setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } +#line 4090 "sql.c" break; - case 289: /* cmd ::= KILL CONNECTION INTEGER */ + case 291: /* cmd ::= KILL CONNECTION INTEGER */ +#line 917 "sql.y" {setKillSql(pInfo, TSDB_SQL_KILL_CONNECTION, &yymsp[0].minor.yy0);} +#line 4095 "sql.c" break; - case 290: /* cmd ::= KILL STREAM INTEGER COLON INTEGER */ + case 292: /* cmd ::= KILL STREAM INTEGER COLON INTEGER */ +#line 918 "sql.y" {yymsp[-2].minor.yy0.n += (yymsp[-1].minor.yy0.n + yymsp[0].minor.yy0.n); setKillSql(pInfo, TSDB_SQL_KILL_STREAM, &yymsp[-2].minor.yy0);} +#line 4100 "sql.c" break; - case 291: /* cmd ::= KILL QUERY INTEGER COLON INTEGER */ + case 293: /* cmd ::= KILL QUERY INTEGER COLON INTEGER */ +#line 919 "sql.y" {yymsp[-2].minor.yy0.n += (yymsp[-1].minor.yy0.n + yymsp[0].minor.yy0.n); setKillSql(pInfo, TSDB_SQL_KILL_QUERY, &yymsp[-2].minor.yy0);} +#line 4105 "sql.c" break; default: break; /********** End reduce actions ************************************************/ }; - assert( yyrulenostateno = (YYACTIONTYPE)yyact; yymsp->major = (YYCODETYPE)yygoto; yyTraceShift(yypParser, yyact, "... then shift"); + return yyact; } /* @@ -3328,7 +4134,8 @@ static void yy_reduce( static void yy_parse_failed( yyParser *yypParser /* The parser */ ){ - ParseARG_FETCH; + ParseARG_FETCH + ParseCTX_FETCH #ifndef NDEBUG if( yyTraceFILE ){ fprintf(yyTraceFILE,"%sFail!\n",yyTracePrompt); @@ -3339,7 +4146,8 @@ static void yy_parse_failed( ** parser fails */ /************ Begin %parse_failure code ***************************************/ /************ End %parse_failure code *****************************************/ - ParseARG_STORE; /* Suppress warning about unused %extra_argument variable */ + ParseARG_STORE /* Suppress warning about unused %extra_argument variable */ + ParseCTX_STORE } #endif /* YYNOERRORRECOVERY */ @@ -3351,9 +4159,11 @@ static void yy_syntax_error( int yymajor, /* The major type of the error token */ ParseTOKENTYPE yyminor /* The minor type of the error token */ ){ - ParseARG_FETCH; + ParseARG_FETCH + ParseCTX_FETCH #define TOKEN yyminor /************ Begin %syntax_error code ****************************************/ +#line 37 "sql.y" pInfo->valid = false; int32_t outputBufLen = tListLen(pInfo->msg); @@ -3376,8 +4186,10 @@ static void yy_syntax_error( } assert(len <= outputBufLen); +#line 4190 "sql.c" /************ End %syntax_error code ******************************************/ - ParseARG_STORE; /* Suppress warning about unused %extra_argument variable */ + ParseARG_STORE /* Suppress warning about unused %extra_argument variable */ + ParseCTX_STORE } /* @@ -3386,7 +4198,8 @@ static void yy_syntax_error( static void yy_accept( yyParser *yypParser /* The parser */ ){ - ParseARG_FETCH; + ParseARG_FETCH + ParseCTX_FETCH #ifndef NDEBUG if( yyTraceFILE ){ fprintf(yyTraceFILE,"%sAccept!\n",yyTracePrompt); @@ -3399,9 +4212,11 @@ static void yy_accept( /* Here code is inserted which will be executed whenever the ** parser accepts */ /*********** Begin %parse_accept code *****************************************/ - +#line 61 "sql.y" +#line 4217 "sql.c" /*********** End %parse_accept code *******************************************/ - ParseARG_STORE; /* Suppress warning about unused %extra_argument variable */ + ParseARG_STORE /* Suppress warning about unused %extra_argument variable */ + ParseCTX_STORE } /* The main parser program. @@ -3430,45 +4245,47 @@ void Parse( ParseARG_PDECL /* Optional %extra_argument parameter */ ){ YYMINORTYPE yyminorunion; - unsigned int yyact; /* The parser action. */ + YYACTIONTYPE yyact; /* The parser action. */ #if !defined(YYERRORSYMBOL) && !defined(YYNOERRORRECOVERY) int yyendofinput; /* True if we are at the end of input */ #endif #ifdef YYERRORSYMBOL int yyerrorhit = 0; /* True if yymajor has invoked an error */ #endif - yyParser *yypParser; /* The parser */ + yyParser *yypParser = (yyParser*)yyp; /* The parser */ + ParseCTX_FETCH + ParseARG_STORE - yypParser = (yyParser*)yyp; assert( yypParser->yytos!=0 ); #if !defined(YYERRORSYMBOL) && !defined(YYNOERRORRECOVERY) yyendofinput = (yymajor==0); #endif - ParseARG_STORE; + yyact = yypParser->yytos->stateno; #ifndef NDEBUG if( yyTraceFILE ){ - int stateno = yypParser->yytos->stateno; - if( stateno < YY_MIN_REDUCE ){ + if( yyact < YY_MIN_REDUCE ){ fprintf(yyTraceFILE,"%sInput '%s' in state %d\n", - yyTracePrompt,yyTokenName[yymajor],stateno); + yyTracePrompt,yyTokenName[yymajor],yyact); }else{ fprintf(yyTraceFILE,"%sInput '%s' with pending reduce %d\n", - yyTracePrompt,yyTokenName[yymajor],stateno-YY_MIN_REDUCE); + yyTracePrompt,yyTokenName[yymajor],yyact-YY_MIN_REDUCE); } } #endif do{ - yyact = yy_find_shift_action(yypParser,(YYCODETYPE)yymajor); + assert( yyact==yypParser->yytos->stateno ); + yyact = yy_find_shift_action((YYCODETYPE)yymajor,yyact); if( yyact >= YY_MIN_REDUCE ){ - yy_reduce(yypParser,yyact-YY_MIN_REDUCE,yymajor,yyminor); + yyact = yy_reduce(yypParser,yyact-YY_MIN_REDUCE,yymajor, + yyminor ParseCTX_PARAM); }else if( yyact <= YY_MAX_SHIFTREDUCE ){ - yy_shift(yypParser,yyact,yymajor,yyminor); + yy_shift(yypParser,yyact,(YYCODETYPE)yymajor,yyminor); #ifndef YYNOERRORRECOVERY yypParser->yyerrcnt--; #endif - yymajor = YYNOCODE; + break; }else if( yyact==YY_ACCEPT_ACTION ){ yypParser->yytos--; yy_accept(yypParser); @@ -3519,10 +4336,9 @@ void Parse( yymajor = YYNOCODE; }else{ while( yypParser->yytos >= yypParser->yystack - && yymx != YYERRORSYMBOL && (yyact = yy_find_reduce_action( yypParser->yytos->stateno, - YYERRORSYMBOL)) >= YY_MIN_REDUCE + YYERRORSYMBOL)) > YY_MAX_SHIFTREDUCE ){ yy_pop_parser_stack(yypParser); } @@ -3539,6 +4355,8 @@ void Parse( } yypParser->yyerrcnt = 3; yyerrorhit = 1; + if( yymajor==YYNOCODE ) break; + yyact = yypParser->yytos->stateno; #elif defined(YYNOERRORRECOVERY) /* If the YYNOERRORRECOVERY macro is defined, then do not attempt to ** do any kind of error recovery. Instead, simply invoke the syntax @@ -3549,8 +4367,7 @@ void Parse( */ yy_syntax_error(yypParser,yymajor, yyminor); yy_destructor(yypParser,(YYCODETYPE)yymajor,&yyminorunion); - yymajor = YYNOCODE; - + break; #else /* YYERRORSYMBOL is not defined */ /* This is what we do if the grammar does not define ERROR: ** @@ -3572,10 +4389,10 @@ void Parse( yypParser->yyerrcnt = -1; #endif } - yymajor = YYNOCODE; + break; #endif } - }while( yymajor!=YYNOCODE && yypParser->yytos>yypParser->yystack ); + }while( yypParser->yytos>yypParser->yystack ); #ifndef NDEBUG if( yyTraceFILE ){ yyStackEntry *i; @@ -3590,3 +4407,17 @@ void Parse( #endif return; } + +/* +** Return the fallback token corresponding to canonical token iToken, or +** 0 if iToken has no fallback. +*/ +int ParseFallback(int iToken){ +#ifdef YYFALLBACK + assert( iToken<(int)(sizeof(yyFallback)/sizeof(yyFallback[0])) ); + return yyFallback[iToken]; +#else + (void)iToken; + return 0; +#endif +} diff --git a/src/rpc/src/rpcMain.c b/src/rpc/src/rpcMain.c index e958a8e5ec5b6542d609028ee052d21a9a84d397..9ea5fd539244820f111a3fbb3c60aee088e727c5 100644 --- a/src/rpc/src/rpcMain.c +++ b/src/rpc/src/rpcMain.c @@ -407,7 +407,7 @@ void rpcSendRequest(void *shandle, const SRpcEpSet *pEpSet, SRpcMsg *pMsg, int64 if (type == TSDB_MSG_TYPE_QUERY || type == TSDB_MSG_TYPE_CM_RETRIEVE || type == TSDB_MSG_TYPE_FETCH || type == TSDB_MSG_TYPE_CM_STABLE_VGROUP || type == TSDB_MSG_TYPE_CM_TABLES_META || type == TSDB_MSG_TYPE_CM_TABLE_META - || type == TSDB_MSG_TYPE_CM_SHOW || type == TSDB_MSG_TYPE_DM_STATUS) + || type == TSDB_MSG_TYPE_CM_SHOW || type == TSDB_MSG_TYPE_DM_STATUS || type == TSDB_MSG_TYPE_CM_ALTER_TABLE) pContext->connType = RPC_CONN_TCPC; pContext->rid = taosAddRef(tsRpcRefId, pContext); diff --git a/src/tsdb/inc/tsdbBuffer.h b/src/tsdb/inc/tsdbBuffer.h index ec6b057aef142fb938993b3a27717c5e64937258..4b650d3993a54f6a98caf00a3605feb37e972ebd 100644 --- a/src/tsdb/inc/tsdbBuffer.h +++ b/src/tsdb/inc/tsdbBuffer.h @@ -29,6 +29,7 @@ typedef struct { int tBufBlocks; int nBufBlocks; int nRecycleBlocks; + int nElasticBlocks; int64_t index; SList* bufBlockList; } STsdbBufPool; @@ -41,6 +42,10 @@ int tsdbOpenBufPool(STsdbRepo* pRepo); void tsdbCloseBufPool(STsdbRepo* pRepo); SListNode* tsdbAllocBufBlockFromPool(STsdbRepo* pRepo); int tsdbExpandPool(STsdbRepo* pRepo, int32_t oldTotalBlocks); -void tsdbRecycleBufferBlock(STsdbBufPool* pPool, SListNode *pNode); +void tsdbRecycleBufferBlock(STsdbBufPool* pPool, SListNode *pNode, bool bELastic); + +// health cite +STsdbBufBlock *tsdbNewBufBlock(int bufBlockSize); +void tsdbFreeBufBlock(STsdbBufBlock *pBufBlock); #endif /* _TD_TSDB_BUFFER_H_ */ diff --git a/src/tsdb/inc/tsdbHealth.h b/src/tsdb/inc/tsdbHealth.h new file mode 100644 index 0000000000000000000000000000000000000000..324f4312e05fc0ca0200c319728bf692bf476bf6 --- /dev/null +++ b/src/tsdb/inc/tsdbHealth.h @@ -0,0 +1,25 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#ifndef _TD_TSDB_HEALTH_H_ +#define _TD_TSDB_HEALTH_H_ + +bool tsdbUrgeQueryFree(STsdbRepo* pRepo); +int32_t tsdbInsertNewBlock(STsdbRepo* pRepo); + +bool tsdbIdleMemEnough(); +bool tsdbAllowNewBlock(STsdbRepo* pRepo); + +#endif /* _TD_TSDB_BUFFER_H_ */ diff --git a/src/tsdb/inc/tsdbMeta.h b/src/tsdb/inc/tsdbMeta.h index 51801c843c279f10e9e0895a0f2dee2839a3f6a2..8ce5e7ade80b2006ac8c39fec178994073c5a26d 100644 --- a/src/tsdb/inc/tsdbMeta.h +++ b/src/tsdb/inc/tsdbMeta.h @@ -100,7 +100,7 @@ static FORCE_INLINE int tsdbCompareSchemaVersion(const void *key1, const void *k } static FORCE_INLINE STSchema* tsdbGetTableSchemaImpl(STable* pTable, bool lock, bool copy, int16_t _version) { - STable* pDTable = (TABLE_TYPE(pTable) == TSDB_CHILD_TABLE) ? pTable->pSuper : pTable; + STable* pDTable = (pTable->pSuper != NULL) ? pTable->pSuper : pTable; // for performance purpose STSchema* pSchema = NULL; STSchema* pTSchema = NULL; diff --git a/src/tsdb/inc/tsdbint.h b/src/tsdb/inc/tsdbint.h index 532907ae01be576e40feea2969761846f07170b3..80e92975799f47d68ff72ef80a52efb6fe901b5e 100644 --- a/src/tsdb/inc/tsdbint.h +++ b/src/tsdb/inc/tsdbint.h @@ -97,6 +97,7 @@ struct STsdbRepo { SMergeBuf mergeBuf; //used when update=2 int8_t compactState; // compact state: inCompact/noCompact/waitingCompact? + pthread_t* pthread; }; #define REPO_ID(r) (r)->config.tsdbId diff --git a/src/tsdb/src/tsdbBuffer.c b/src/tsdb/src/tsdbBuffer.c index e675bf6f9de04021112d43a1db70cf56cf430f08..70589031f6516a129a5a683b0e76edb23b814e15 100644 --- a/src/tsdb/src/tsdbBuffer.c +++ b/src/tsdb/src/tsdbBuffer.c @@ -14,12 +14,10 @@ */ #include "tsdbint.h" +#include "tsdbHealth.h" #define POOL_IS_EMPTY(b) (listNEles((b)->bufBlockList) == 0) -static STsdbBufBlock *tsdbNewBufBlock(int bufBlockSize); -static void tsdbFreeBufBlock(STsdbBufBlock *pBufBlock); - // ---------------- INTERNAL FUNCTIONS ---------------- STsdbBufPool *tsdbNewBufPool() { STsdbBufPool *pBufPool = (STsdbBufPool *)calloc(1, sizeof(*pBufPool)); @@ -65,10 +63,10 @@ int tsdbOpenBufPool(STsdbRepo *pRepo) { STsdbBufPool *pPool = pRepo->pPool; ASSERT(pPool != NULL); - pPool->bufBlockSize = pCfg->cacheBlockSize * 1024 * 1024; // MB pPool->tBufBlocks = pCfg->totalBlocks; pPool->nBufBlocks = 0; + pPool->nElasticBlocks = 0; pPool->index = 0; pPool->nRecycleBlocks = 0; @@ -120,6 +118,18 @@ SListNode *tsdbAllocBufBlockFromPool(STsdbRepo *pRepo) { STsdbBufPool *pBufPool = pRepo->pPool; while (POOL_IS_EMPTY(pBufPool)) { + if(tsDeadLockKillQuery) { + // supply new Block + if(tsdbInsertNewBlock(pRepo) > 0) { + tsdbWarn("vgId:%d add new elastic block . elasticBlocks=%d cur free Blocks=%d", REPO_ID(pRepo), pBufPool->nElasticBlocks, pBufPool->bufBlockList->numOfEles); + break; + } else { + // no newBlock, kill query free + if(!tsdbUrgeQueryFree(pRepo)) + tsdbWarn("vgId:%d Urge query free thread start failed.", REPO_ID(pRepo)); + } + } + pRepo->repoLocked = false; pthread_cond_wait(&(pBufPool->poolNotEmpty), &(pRepo->mutex)); pRepo->repoLocked = true; @@ -139,11 +149,11 @@ SListNode *tsdbAllocBufBlockFromPool(STsdbRepo *pRepo) { } // ---------------- LOCAL FUNCTIONS ---------------- -static STsdbBufBlock *tsdbNewBufBlock(int bufBlockSize) { +STsdbBufBlock *tsdbNewBufBlock(int bufBlockSize) { STsdbBufBlock *pBufBlock = (STsdbBufBlock *)malloc(sizeof(*pBufBlock) + bufBlockSize); if (pBufBlock == NULL) { terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; - goto _err; + return NULL; } pBufBlock->blockId = 0; @@ -151,13 +161,9 @@ static STsdbBufBlock *tsdbNewBufBlock(int bufBlockSize) { pBufBlock->remain = bufBlockSize; return pBufBlock; - -_err: - tsdbFreeBufBlock(pBufBlock); - return NULL; } -static void tsdbFreeBufBlock(STsdbBufBlock *pBufBlock) { tfree(pBufBlock); } + void tsdbFreeBufBlock(STsdbBufBlock *pBufBlock) { tfree(pBufBlock); } int tsdbExpandPool(STsdbRepo* pRepo, int32_t oldTotalBlocks) { if (oldTotalBlocks == pRepo->config.totalBlocks) { @@ -193,10 +199,16 @@ err: return err; } -void tsdbRecycleBufferBlock(STsdbBufPool* pPool, SListNode *pNode) { +void tsdbRecycleBufferBlock(STsdbBufPool* pPool, SListNode *pNode, bool bELastic) { STsdbBufBlock *pBufBlock = NULL; tdListNodeGetData(pPool->bufBlockList, pNode, (void *)(&pBufBlock)); tsdbFreeBufBlock(pBufBlock); free(pNode); - pPool->nBufBlocks--; -} + if(bELastic) + { + pPool->nElasticBlocks--; + tsdbWarn("pPool=%p elastic block reduce one . nElasticBlocks=%d cur free Blocks=%d", pPool, pPool->nElasticBlocks, pPool->bufBlockList->numOfEles); + } + else + pPool->nBufBlocks--; +} \ No newline at end of file diff --git a/src/tsdb/src/tsdbCommit.c b/src/tsdb/src/tsdbCommit.c index 15fc3cc47d663aa6f2fb8910ebbadc03861418c7..03110487807076bf8ac2ac7026ffdb828ea4c7c6 100644 --- a/src/tsdb/src/tsdbCommit.c +++ b/src/tsdb/src/tsdbCommit.c @@ -1418,13 +1418,11 @@ static void tsdbLoadAndMergeFromCache(SDataCols *pDataCols, int *iter, SCommitIt while (true) { key1 = (*iter >= pDataCols->numOfRows) ? INT64_MAX : dataColsKeyAt(pDataCols, *iter); - bool isRowDel = false; SMemRow row = tsdbNextIterRow(pCommitIter->pIter); if (row == NULL || memRowKey(row) > maxKey) { key2 = INT64_MAX; } else { key2 = memRowKey(row); - isRowDel = memRowDeleted(row); } if (key1 == INT64_MAX && key2 == INT64_MAX) break; @@ -1439,36 +1437,33 @@ static void tsdbLoadAndMergeFromCache(SDataCols *pDataCols, int *iter, SCommitIt pTarget->numOfRows++; (*iter)++; } else if (key1 > key2) { - if (!isRowDel) { - if (pSchema == NULL || schemaVersion(pSchema) != memRowVersion(row)) { - pSchema = tsdbGetTableSchemaImpl(pCommitIter->pTable, false, false, memRowVersion(row)); - ASSERT(pSchema != NULL); - } - - tdAppendMemRowToDataCol(row, pSchema, pTarget, true); + if (pSchema == NULL || schemaVersion(pSchema) != memRowVersion(row)) { + pSchema = tsdbGetTableSchemaImpl(pCommitIter->pTable, false, false, memRowVersion(row)); + ASSERT(pSchema != NULL); } + tdAppendMemRowToDataCol(row, pSchema, pTarget, true); + tSkipListIterNext(pCommitIter->pIter); } else { - if (update) { - if (!isRowDel) { - if (pSchema == NULL || schemaVersion(pSchema) != memRowVersion(row)) { - pSchema = tsdbGetTableSchemaImpl(pCommitIter->pTable, false, false, memRowVersion(row)); - ASSERT(pSchema != NULL); - } - - tdAppendMemRowToDataCol(row, pSchema, pTarget, update == TD_ROW_OVERWRITE_UPDATE); - } - } else { - ASSERT(!isRowDel); - + if (update != TD_ROW_OVERWRITE_UPDATE) { + //copy disk data for (int i = 0; i < pDataCols->numOfCols; i++) { //TODO: dataColAppendVal may fail dataColAppendVal(pTarget->cols + i, tdGetColDataOfRow(pDataCols->cols + i, *iter), pTarget->numOfRows, pTarget->maxPoints); } - pTarget->numOfRows++; + if(update == TD_ROW_DISCARD_UPDATE) pTarget->numOfRows++; + } + if (update != TD_ROW_DISCARD_UPDATE) { + //copy mem data + if (pSchema == NULL || schemaVersion(pSchema) != memRowVersion(row)) { + pSchema = tsdbGetTableSchemaImpl(pCommitIter->pTable, false, false, memRowVersion(row)); + ASSERT(pSchema != NULL); + } + + tdAppendMemRowToDataCol(row, pSchema, pTarget, update == TD_ROW_OVERWRITE_UPDATE); } (*iter)++; tSkipListIterNext(pCommitIter->pIter); diff --git a/src/tsdb/src/tsdbHealth.c b/src/tsdb/src/tsdbHealth.c new file mode 100644 index 0000000000000000000000000000000000000000..8198c480334912b1ce373ceca7b82409f5a644f2 --- /dev/null +++ b/src/tsdb/src/tsdbHealth.c @@ -0,0 +1,98 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#include "os.h" +#include "taosmsg.h" +#include "tarray.h" +#include "query.h" +#include "tglobal.h" +#include "tlist.h" +#include "tsdbint.h" +#include "tsdbBuffer.h" +#include "tsdbLog.h" +#include "tsdbHealth.h" +#include "ttimer.h" +#include "tthread.h" + + +// return malloc new block count +int32_t tsdbInsertNewBlock(STsdbRepo * pRepo) { + STsdbBufPool *pPool = pRepo->pPool; + int32_t cnt = 0; + + if(tsdbAllowNewBlock(pRepo)) { + STsdbBufBlock *pBufBlock = tsdbNewBufBlock(pPool->bufBlockSize); + if (pBufBlock) { + if (tdListAppend(pPool->bufBlockList, (void *)(&pBufBlock)) < 0) { + // append error + tsdbFreeBufBlock(pBufBlock); + } else { + pPool->nElasticBlocks ++; + cnt ++ ; + } + } + } + return cnt; +} + +// switch anther thread to run +void* cbKillQueryFree(void* param) { + STsdbRepo* pRepo = (STsdbRepo*)param; + // vnode + if(pRepo->appH.notifyStatus) { + pRepo->appH.notifyStatus(pRepo->appH.appH, TSDB_STATUS_COMMIT_NOBLOCK, TSDB_CODE_SUCCESS); + } + + // free + if(pRepo->pthread){ + void* p = pRepo->pthread; + pRepo->pthread = NULL; + free(p); + } + + return NULL; +} + +// return true do free , false do nothing +bool tsdbUrgeQueryFree(STsdbRepo * pRepo) { + // check previous running + if(pRepo->pthread && taosThreadRunning(pRepo->pthread)) { + tsdbWarn("vgId:%d pre urge thread is runing. nBlocks=%d nElasticBlocks=%d", REPO_ID(pRepo), pRepo->pPool->nBufBlocks, pRepo->pPool->nElasticBlocks); + return false; + } + // create new + pRepo->pthread = taosCreateThread(cbKillQueryFree, pRepo); + if(pRepo->pthread == NULL) { + tsdbError("vgId:%d create urge thread error.", REPO_ID(pRepo)); + return false; + } + return true; +} + +bool tsdbAllowNewBlock(STsdbRepo* pRepo) { + int32_t nMaxElastic = pRepo->config.totalBlocks/3; + STsdbBufPool* pPool = pRepo->pPool; + if(pPool->nElasticBlocks >= nMaxElastic) { + tsdbWarn("vgId:%d tsdbAllowNewBlock return fasle. nElasticBlock(%d) >= MaxElasticBlocks(%d)", REPO_ID(pRepo), pPool->nElasticBlocks, nMaxElastic); + return false; + } + return true; +} + +bool tsdbNoProblem(STsdbRepo* pRepo) { + if(listNEles(pRepo->pPool->bufBlockList) == 0) + return false; + return true; +} \ No newline at end of file diff --git a/src/tsdb/src/tsdbMain.c b/src/tsdb/src/tsdbMain.c index b2e6fe89161d0e9bceaf74a46807f51ec402fb2a..c2021963e0d0c8be4ed42588549153dcd20be63c 100644 --- a/src/tsdb/src/tsdbMain.c +++ b/src/tsdb/src/tsdbMain.c @@ -16,6 +16,8 @@ // no test file errors here #include "taosdef.h" #include "tsdbint.h" +#include "ttimer.h" +#include "tthread.h" #define IS_VALID_PRECISION(precision) \ (((precision) >= TSDB_TIME_PRECISION_MILLI) && ((precision) <= TSDB_TIME_PRECISION_NANO)) @@ -126,6 +128,10 @@ int tsdbCloseRepo(STsdbRepo *repo, int toCommit) { terrno = TSDB_CODE_SUCCESS; tsdbStopStream(pRepo); + if(pRepo->pthread){ + taosDestoryThread(pRepo->pthread); + pRepo->pthread = NULL; + } if (toCommit) { tsdbSyncCommit(repo); @@ -547,6 +553,7 @@ static STsdbRepo *tsdbNewRepo(STsdbCfg *pCfg, STsdbAppH *pAppH) { pRepo->appH = *pAppH; } pRepo->repoLocked = false; + pRepo->pthread = NULL; int code = pthread_mutex_init(&(pRepo->mutex), NULL); if (code != 0) { diff --git a/src/tsdb/src/tsdbMemTable.c b/src/tsdb/src/tsdbMemTable.c index e766d97a97a5905db87691426d282a219eef9d68..3890dca5b96c26009dcf3ca72205ca4b1725aa29 100644 --- a/src/tsdb/src/tsdbMemTable.c +++ b/src/tsdb/src/tsdbMemTable.c @@ -99,17 +99,22 @@ int tsdbUnRefMemTable(STsdbRepo *pRepo, SMemTable *pMemTable) { STsdbBufPool *pBufPool = pRepo->pPool; SListNode *pNode = NULL; - bool recycleBlocks = pBufPool->nRecycleBlocks > 0; + bool addNew = false; if (tsdbLockRepo(pRepo) < 0) return -1; while ((pNode = tdListPopHead(pMemTable->bufBlockList)) != NULL) { if (pBufPool->nRecycleBlocks > 0) { - tsdbRecycleBufferBlock(pBufPool, pNode); + tsdbRecycleBufferBlock(pBufPool, pNode, false); pBufPool->nRecycleBlocks -= 1; } else { - tdListAppendNode(pBufPool->bufBlockList, pNode); + if(pBufPool->nElasticBlocks > 0 && listNEles(pBufPool->bufBlockList) > 2) { + tsdbRecycleBufferBlock(pBufPool, pNode, true); + } else { + tdListAppendNode(pBufPool->bufBlockList, pNode); + addNew = true; + } } } - if (!recycleBlocks) { + if (addNew) { int code = pthread_cond_signal(&pBufPool->poolNotEmpty); if (code != 0) { if (tsdbUnlockRepo(pRepo) < 0) return -1; diff --git a/src/tsdb/src/tsdbRead.c b/src/tsdb/src/tsdbRead.c index e3ad254bbb1a4ce0a3c504e9a799a109b7e1159a..da5481dae184b1cc0aa280f9e3d4a952571abfe1 100644 --- a/src/tsdb/src/tsdbRead.c +++ b/src/tsdb/src/tsdbRead.c @@ -288,8 +288,6 @@ static SArray* createCheckInfoFromTableGroup(STsdbQueryHandle* pQueryHandle, STa STableKeyInfo* pKeyInfo = (STableKeyInfo*) taosArrayGet(group, j); STableCheckInfo info = { .lastKey = pKeyInfo->lastKey, .pTableObj = pKeyInfo->pTable }; - info.tableId = ((STable*)(pKeyInfo->pTable))->tableId; - assert(info.pTableObj != NULL && (info.pTableObj->type == TSDB_NORMAL_TABLE || info.pTableObj->type == TSDB_CHILD_TABLE || info.pTableObj->type == TSDB_STREAM_TABLE)); @@ -2218,7 +2216,7 @@ static int32_t createDataBlocksInfo(STsdbQueryHandle* pQueryHandle, int32_t numO SBlock* pBlock = pTableCheck->pCompInfo->blocks; sup.numOfBlocksPerTable[numOfQualTables] = pTableCheck->numOfBlocks; - char* buf = calloc(1, sizeof(STableBlockInfo) * pTableCheck->numOfBlocks); + char* buf = malloc(sizeof(STableBlockInfo) * pTableCheck->numOfBlocks); if (buf == NULL) { cleanBlockOrderSupporter(&sup, numOfQualTables); return TSDB_CODE_TDB_OUT_OF_MEMORY; @@ -3618,8 +3616,6 @@ SArray* createTableGroup(SArray* pTableList, STSchema* pTagSchema, SColIndex* pC for(int32_t i = 0; i < size; ++i) { STableKeyInfo *pKeyInfo = taosArrayGet(pTableList, i); - assert(((STable*)pKeyInfo->pTable)->type == TSDB_CHILD_TABLE); - tsdbRefTable(pKeyInfo->pTable); STableKeyInfo info = {.pTable = pKeyInfo->pTable, .lastKey = skey}; @@ -3709,6 +3705,12 @@ static bool tableFilterFp(const void* pNode, void* param) { case TSDB_RELATION_LIKE: { return ret == 0; } + case TSDB_RELATION_MATCH: { + return ret == 0; + } + case TSDB_RELATION_NMATCH: { + return ret == 0; + } case TSDB_RELATION_IN: { return ret == 1; } @@ -4042,6 +4044,10 @@ static int32_t setQueryCond(tQueryInfo *queryColInfo, SQueryCond* pCond) { pCond->start->v = queryColInfo->q; } else if (optr == TSDB_RELATION_LIKE) { assert(0); + } else if (optr == TSDB_RELATION_MATCH) { + assert(0); + } else if (optr == TSDB_RELATION_NMATCH) { + assert(0); } return TSDB_CODE_SUCCESS; @@ -4199,7 +4205,9 @@ static void queryIndexlessColumn(SSkipList* pSkipList, tQueryInfo* pQueryInfo, S if (pQueryInfo->sch.colId == TSDB_TBNAME_COLUMN_INDEX) { if (pQueryInfo->optr == TSDB_RELATION_IN) { addToResult = pQueryInfo->compare(name, pQueryInfo->q); - } else if (pQueryInfo->optr == TSDB_RELATION_LIKE) { + } else if (pQueryInfo->optr == TSDB_RELATION_LIKE || + pQueryInfo->optr == TSDB_RELATION_MATCH || + pQueryInfo->optr == TSDB_RELATION_NMATCH) { addToResult = !pQueryInfo->compare(name, pQueryInfo->q); } } else { @@ -4231,7 +4239,9 @@ void getTableListfromSkipList(tExprNode *pExpr, SSkipList *pSkipList, SArray *re param->setupInfoFn(pExpr, param->pExtInfo); tQueryInfo *pQueryInfo = pExpr->_node.info; - if (pQueryInfo->indexed && (pQueryInfo->optr != TSDB_RELATION_LIKE && pQueryInfo->optr != TSDB_RELATION_IN)) { + if (pQueryInfo->indexed && (pQueryInfo->optr != TSDB_RELATION_LIKE + && pQueryInfo->optr != TSDB_RELATION_MATCH && pQueryInfo->optr != TSDB_RELATION_NMATCH + && pQueryInfo->optr != TSDB_RELATION_IN)) { queryIndexedColumn(pSkipList, pQueryInfo, result); } else { queryIndexlessColumn(pSkipList, pQueryInfo, result, param->nodeFilterFn); diff --git a/src/util/inc/tcache.h b/src/util/inc/tcache.h index e41b544d00e55f7eece904c5957ef9c06063e6c3..40069d7d273caa14ce3b80467b25d68ea476fb75 100644 --- a/src/util/inc/tcache.h +++ b/src/util/inc/tcache.h @@ -33,6 +33,7 @@ extern "C" { #endif typedef void (*__cache_free_fn_t)(void*); +typedef void (*__cache_trav_fn_t)(void*, void*); typedef struct SCacheStatis { int64_t missCount; @@ -176,7 +177,7 @@ void taosCacheCleanup(SCacheObj *pCacheObj); * @param fp * @return */ -void taosCacheRefresh(SCacheObj *pCacheObj, __cache_free_fn_t fp); +void taosCacheRefresh(SCacheObj *pCacheObj, __cache_trav_fn_t fp, void* param1); /** * stop background refresh worker thread diff --git a/src/util/inc/tcompare.h b/src/util/inc/tcompare.h index e29015c7cbbe49d7d2251a0c3fc2e97a00a4d5c4..1125516d34c65da1b5d0c47dadd126aa0b1959fa 100644 --- a/src/util/inc/tcompare.h +++ b/src/util/inc/tcompare.h @@ -22,10 +22,11 @@ extern "C" { #include "os.h" -#define TSDB_PATTERN_MATCH 0 -#define TSDB_PATTERN_NOMATCH 1 -#define TSDB_PATTERN_NOWILDCARDMATCH 2 -#define TSDB_PATTERN_STRING_DEFAULT_LEN 100 +#define TSDB_PATTERN_MATCH 0 +#define TSDB_PATTERN_NOMATCH 1 +#define TSDB_PATTERN_NOWILDCARDMATCH 2 +#define TSDB_PATTERN_STRING_DEFAULT_LEN 100 +#define TSDB_REGEX_STRING_DEFAULT_LEN 128 #define FLT_COMPAR_TOL_FACTOR 4 #define FLT_EQUAL(_x, _y) (fabs((_x) - (_y)) <= (FLT_COMPAR_TOL_FACTOR * FLT_EPSILON)) @@ -82,6 +83,9 @@ int32_t compareLenPrefixedStr(const void *pLeft, const void *pRight); int32_t compareLenPrefixedWStr(const void *pLeft, const void *pRight); int32_t compareStrPatternComp(const void* pLeft, const void* pRight); +int32_t compareStrRegexComp(const void* pLeft, const void* pRight); +int32_t compareStrRegexCompMatch(const void* pLeft, const void* pRight); +int32_t compareStrRegexCompNMatch(const void* pLeft, const void* pRight); int32_t compareFindItemInSet(const void *pLeft, const void* pRight); int32_t compareWStrPatternComp(const void* pLeft, const void* pRight); diff --git a/src/util/inc/tconfig.h b/src/util/inc/tconfig.h index d03ce6e0f1f34478951a84b2ab18020f5cbec92b..cf8977ce06c898fcdee8d21eedf5ed8d0f47f263 100644 --- a/src/util/inc/tconfig.h +++ b/src/util/inc/tconfig.h @@ -20,7 +20,7 @@ extern "C" { #endif -#define TSDB_CFG_MAX_NUM 116 // 110 + 6 with lossy option +#define TSDB_CFG_MAX_NUM 123 #define TSDB_CFG_PRINT_LEN 23 #define TSDB_CFG_OPTION_LEN 24 #define TSDB_CFG_VALUE_LEN 41 diff --git a/src/util/inc/tlosertree.h b/src/util/inc/tlosertree.h index 4c731625dd5c7950c321b2180ca913e49362059b..58f2ca8c5c81408b35c2c9435357deeb2b0f13a4 100644 --- a/src/util/inc/tlosertree.h +++ b/src/util/inc/tlosertree.h @@ -26,7 +26,7 @@ typedef int (*__merge_compare_fn_t)(const void *, const void *, void *param); typedef struct SLoserTreeNode { int32_t index; - void * pData; + void *pData; } SLoserTreeNode; typedef struct SLoserTreeInfo { @@ -34,8 +34,7 @@ typedef struct SLoserTreeInfo { int32_t totalEntries; __merge_compare_fn_t comparFn; void * param; - - SLoserTreeNode *pNode; + SLoserTreeNode *pNode; } SLoserTreeInfo; uint32_t tLoserTreeCreate(SLoserTreeInfo **pTree, int32_t numOfEntries, void *param, __merge_compare_fn_t compareFn); diff --git a/src/util/inc/tthread.h b/src/util/inc/tthread.h new file mode 100644 index 0000000000000000000000000000000000000000..7443ad706dcbef529d857fe823cddd0cc1efbdd3 --- /dev/null +++ b/src/util/inc/tthread.h @@ -0,0 +1,37 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#ifndef TDENGINE_TTHREAD_H +#define TDENGINE_TTHREAD_H + +#ifdef __cplusplus +extern "C" { +#endif + +#include "os.h" +#include "taosdef.h" + +// create new thread +pthread_t* taosCreateThread( void *(*__start_routine) (void *), void* param); +// destory thread +bool taosDestoryThread(pthread_t* pthread); +// thread running return true +bool taosThreadRunning(pthread_t* pthread); + +#ifdef __cplusplus +} +#endif + +#endif // TDENGINE_TTHREAD_H diff --git a/src/util/src/hash.c b/src/util/src/hash.c index a22ce34a0e3030f409948cfcf3e739335d6417cb..6577a0a0f4710951cf240f792d68f1afb2d37569 100644 --- a/src/util/src/hash.c +++ b/src/util/src/hash.c @@ -741,17 +741,19 @@ void taosHashTableResize(SHashObj *pHashObj) { } SHashNode *doCreateHashNode(const void *key, size_t keyLen, const void *pData, size_t dsize, uint32_t hashVal) { - SHashNode *pNewNode = calloc(1, sizeof(SHashNode) + keyLen + dsize); + SHashNode *pNewNode = malloc(sizeof(SHashNode) + keyLen + dsize); if (pNewNode == NULL) { uError("failed to allocate memory, reason:%s", strerror(errno)); return NULL; } - pNewNode->keyLen = (uint32_t)keyLen; + pNewNode->keyLen = (uint32_t)keyLen; pNewNode->hashVal = hashVal; pNewNode->dataLen = (uint32_t) dsize; - pNewNode->count = 1; + pNewNode->count = 1; + pNewNode->removed = 0; + pNewNode->next = NULL; memcpy(GET_HASH_NODE_DATA(pNewNode), pData, dsize); memcpy(GET_HASH_NODE_KEY(pNewNode), key, keyLen); diff --git a/src/util/src/tarray.c b/src/util/src/tarray.c index d0d126c1e4d7f2e7c0913585df6031b556291fc3..007ce0682974d06bf506a82d8bbbc809092eb9e4 100644 --- a/src/util/src/tarray.c +++ b/src/util/src/tarray.c @@ -24,11 +24,12 @@ void* taosArrayInit(size_t size, size_t elemSize) { size = TARRAY_MIN_SIZE; } - SArray* pArray = calloc(1, sizeof(SArray)); + SArray* pArray = malloc(sizeof(SArray)); if (pArray == NULL) { return NULL; } + pArray->size = 0; pArray->pData = calloc(size, elemSize); if (pArray->pData == NULL) { free(pArray); @@ -112,14 +113,15 @@ void taosArrayRemoveBatch(SArray *pArray, const int32_t* pData, int32_t numOfEle i += 1; } - assert(i == pData[numOfElems - 1] + 1); + assert(i == pData[numOfElems - 1] + 1 && i <= size); - int32_t dstIndex = pData[numOfElems - 1] - numOfElems + 1; int32_t srcIndex = pData[numOfElems - 1] + 1; - - char* dst = TARRAY_GET_ELEM(pArray, dstIndex); - char* src = TARRAY_GET_ELEM(pArray, srcIndex); - memmove(dst, src, pArray->elemSize * (pArray->size - numOfElems)); + int32_t dstIndex = pData[numOfElems - 1] - numOfElems + 1; + if (pArray->size - srcIndex > 0) { + char* dst = TARRAY_GET_ELEM(pArray, dstIndex); + char* src = TARRAY_GET_ELEM(pArray, srcIndex); + memmove(dst, src, pArray->elemSize * (pArray->size - srcIndex)); + } pArray->size -= numOfElems; } diff --git a/src/util/src/tcache.c b/src/util/src/tcache.c index b4cf2b6658c6247b110da112838ef9d732d08169..589d3d4fa57c42b472319673a72d2e7ab599689f 100644 --- a/src/util/src/tcache.c +++ b/src/util/src/tcache.c @@ -71,6 +71,8 @@ static pthread_once_t cacheThreadInit = PTHREAD_ONCE_INIT; static pthread_mutex_t guard = PTHREAD_MUTEX_INITIALIZER; static SArray* pCacheArrayList = NULL; static bool stopRefreshWorker = false; +static bool refreshWorkerNormalStopped = false; +static bool refreshWorkerUnexpectedStopped = false; static void doInitRefreshThread(void) { pCacheArrayList = taosArrayInit(4, POINTER_BYTES); @@ -503,7 +505,8 @@ void taosCacheRelease(SCacheObj *pCacheObj, void **data, bool _remove) { typedef struct SHashTravSupp { SCacheObj* pCacheObj; int64_t time; - __cache_free_fn_t fp; + __cache_trav_fn_t fp; + void* param1; } SHashTravSupp; static bool travHashTableEmptyFn(void* param, void* data) { @@ -537,8 +540,10 @@ void taosCacheCleanup(SCacheObj *pCacheObj) { pCacheObj->deleting = 1; // wait for the refresh thread quit before destroying the cache object. - // But in the dll, the child thread will be killed before atexit takes effect.So here we only wait for 2 seconds. - for (int i = 0; i < 40&&atomic_load_8(&pCacheObj->deleting) != 0; i++) { + // But in the dll, the child thread will be killed before atexit takes effect. + while(atomic_load_8(&pCacheObj->deleting) != 0) { + if (refreshWorkerNormalStopped) break; + if (refreshWorkerUnexpectedStopped) return; taosMsleep(50); } @@ -641,7 +646,7 @@ void doCleanupDataCache(SCacheObj *pCacheObj) { // todo memory leak if there are object with refcount greater than 0 in hash table? taosHashCleanup(pCacheObj->pHashTable); - taosTrashcanEmpty(pCacheObj, false); + taosTrashcanEmpty(pCacheObj, true); __cache_lock_destroy(pCacheObj); @@ -663,20 +668,26 @@ bool travHashTableFn(void* param, void* data) { } if (ps->fp) { - (ps->fp)(pNode->data); + (ps->fp)(pNode->data, ps->param1); } // do not remove element in hash table return true; } -static void doCacheRefresh(SCacheObj* pCacheObj, int64_t time, __cache_free_fn_t fp) { +static void doCacheRefresh(SCacheObj* pCacheObj, int64_t time, __cache_trav_fn_t fp, void* param1) { assert(pCacheObj != NULL); - SHashTravSupp sup = {.pCacheObj = pCacheObj, .fp = fp, .time = time}; + SHashTravSupp sup = {.pCacheObj = pCacheObj, .fp = fp, .time = time, .param1 = param1}; taosHashCondTraverse(pCacheObj->pHashTable, travHashTableFn, &sup); } +void taosCacheRefreshWorkerUnexpectedStopped(void) { + if(!refreshWorkerNormalStopped) { + refreshWorkerUnexpectedStopped=true; + } +} + void* taosCacheTimedRefresh(void *handle) { assert(pCacheArrayList != NULL); uDebug("cache refresh thread starts"); @@ -685,6 +696,7 @@ void* taosCacheTimedRefresh(void *handle) { const int32_t SLEEP_DURATION = 500; //500 ms int64_t count = 0; + atexit(taosCacheRefreshWorkerUnexpectedStopped); while(1) { taosMsleep(SLEEP_DURATION); @@ -737,7 +749,7 @@ void* taosCacheTimedRefresh(void *handle) { // refresh data in hash table if (elemInHash > 0) { int64_t now = taosGetTimestampMs(); - doCacheRefresh(pCacheObj, now, NULL); + doCacheRefresh(pCacheObj, now, NULL, NULL); } taosTrashcanEmpty(pCacheObj, false); @@ -749,20 +761,21 @@ void* taosCacheTimedRefresh(void *handle) { pCacheArrayList = NULL; pthread_mutex_destroy(&guard); + refreshWorkerNormalStopped=true; uDebug("cache refresh thread quits"); return NULL; } -void taosCacheRefresh(SCacheObj *pCacheObj, __cache_free_fn_t fp) { +void taosCacheRefresh(SCacheObj *pCacheObj, __cache_trav_fn_t fp, void* param1) { if (pCacheObj == NULL) { return; } int64_t now = taosGetTimestampMs(); - doCacheRefresh(pCacheObj, now, fp); + doCacheRefresh(pCacheObj, now, fp, param1); } -void taosStopCacheRefreshWorker() { - stopRefreshWorker = false; +void taosStopCacheRefreshWorker(void) { + stopRefreshWorker = true; } \ No newline at end of file diff --git a/src/util/src/tcompare.c b/src/util/src/tcompare.c index 47cc75131802fce5c72e7fdd3ae6675d34917a8c..482dd8a6a15d1c7bf6aca76159b95b71ef244dd2 100644 --- a/src/util/src/tcompare.c +++ b/src/util/src/tcompare.c @@ -12,11 +12,17 @@ * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . */ +#define _BSD_SOURCE +#define _GNU_SOURCE +#define _XOPEN_SOURCE +#define _DEFAULT_SOURCE -#include "os.h" -#include "ttype.h" #include "tcompare.h" +#include "tulog.h" #include "hash.h" +#include "regex.h" +#include "os.h" +#include "ttype.h" int32_t setCompareBytes1(const void *pLeft, const void *pRight) { return NULL != taosHashGet((SHashObj *)pRight, pLeft, 1) ? 1 : 0; @@ -344,6 +350,51 @@ int32_t compareStrPatternComp(const void* pLeft, const void* pRight) { return (ret == TSDB_PATTERN_MATCH) ? 0 : 1; } +int32_t compareStrRegexCompMatch(const void* pLeft, const void* pRight) { + return compareStrRegexComp(pLeft, pRight); +} + +int32_t compareStrRegexCompNMatch(const void* pLeft, const void* pRight) { + return compareStrRegexComp(pLeft, pRight) ? 0 : 1; +} + +int32_t compareStrRegexComp(const void* pLeft, const void* pRight) { + size_t sz = varDataLen(pRight); + char *pattern = malloc(sz + 1); + memcpy(pattern, varDataVal(pRight), varDataLen(pRight)); + pattern[sz] = 0; + + sz = varDataLen(pLeft); + char *str = malloc(sz + 1); + memcpy(str, varDataVal(pLeft), sz); + str[sz] = 0; + + int errCode = 0; + regex_t regex; + char msgbuf[256] = {0}; + + int cflags = REG_EXTENDED; + if ((errCode = regcomp(®ex, pattern, cflags)) != 0) { + regerror(errCode, ®ex, msgbuf, sizeof(msgbuf)); + uError("Failed to compile regex pattern %s. reason %s", pattern, msgbuf); + regfree(®ex); + free(str); + free(pattern); + return 1; + } + + errCode = regexec(®ex, str, 0, NULL, 0); + if (errCode != 0 && errCode != REG_NOMATCH) { + regerror(errCode, ®ex, msgbuf, sizeof(msgbuf)); + uDebug("Failed to match %s with pattern %s, reason %s", str, pattern, msgbuf) + } + int32_t result = (errCode == 0) ? 0 : 1; + regfree(®ex); + free(str); + free(pattern); + return result; +} + int32_t taosArrayCompareString(const void* a, const void* b) { const char* x = *(const char**)a; const char* y = *(const char**)b; @@ -405,7 +456,11 @@ __compar_fn_t getComparFunc(int32_t type, int32_t optr) { case TSDB_DATA_TYPE_FLOAT: comparFn = compareFloatVal; break; case TSDB_DATA_TYPE_DOUBLE: comparFn = compareDoubleVal; break; case TSDB_DATA_TYPE_BINARY: { - if (optr == TSDB_RELATION_LIKE) { /* wildcard query using like operator */ + if (optr == TSDB_RELATION_MATCH) { + comparFn = compareStrRegexCompMatch; + } else if (optr == TSDB_RELATION_NMATCH) { + comparFn = compareStrRegexCompNMatch; + } else if (optr == TSDB_RELATION_LIKE) { /* wildcard query using like operator */ comparFn = compareStrPatternComp; } else if (optr == TSDB_RELATION_IN) { comparFn = compareFindItemInSet; @@ -417,7 +472,11 @@ __compar_fn_t getComparFunc(int32_t type, int32_t optr) { } case TSDB_DATA_TYPE_NCHAR: { - if (optr == TSDB_RELATION_LIKE) { + if (optr == TSDB_RELATION_MATCH) { + comparFn = compareStrRegexCompMatch; + } else if (optr == TSDB_RELATION_NMATCH) { + comparFn = compareStrRegexCompNMatch; + } else if (optr == TSDB_RELATION_LIKE) { comparFn = compareWStrPatternComp; } else if (optr == TSDB_RELATION_IN) { comparFn = compareFindItemInSet; diff --git a/src/util/src/terror.c b/src/util/src/terror.c index 311abe70df65f6090aa06b4857adb434f4d2f9a0..e3d022a6b0a4a929b6c06b2c305fb71b6980a865 100644 --- a/src/util/src/terror.c +++ b/src/util/src/terror.c @@ -112,9 +112,13 @@ TAOS_DEFINE_ERROR(TSDB_CODE_TSC_EXCEED_SQL_LIMIT, "SQL statement too lon TAOS_DEFINE_ERROR(TSDB_CODE_TSC_FILE_EMPTY, "File is empty") TAOS_DEFINE_ERROR(TSDB_CODE_TSC_LINE_SYNTAX_ERROR, "Syntax error in Line") TAOS_DEFINE_ERROR(TSDB_CODE_TSC_NO_META_CACHED, "No table meta cached") -TAOS_DEFINE_ERROR(TSDB_CODE_TSC_DUP_COL_NAMES, "duplicated column names") -TAOS_DEFINE_ERROR(TSDB_CODE_TSC_INVALID_TAG_LENGTH, "Invalid tag length") -TAOS_DEFINE_ERROR(TSDB_CODE_TSC_INVALID_COLUMN_LENGTH, "Invalid column length") +TAOS_DEFINE_ERROR(TSDB_CODE_TSC_DUP_COL_NAMES, "duplicated column names") +TAOS_DEFINE_ERROR(TSDB_CODE_TSC_INVALID_TAG_LENGTH, "Invalid tag length") +TAOS_DEFINE_ERROR(TSDB_CODE_TSC_INVALID_COLUMN_LENGTH, "Invalid column length") +TAOS_DEFINE_ERROR(TSDB_CODE_TSC_DUP_TAG_NAMES, "duplicated tag names") +TAOS_DEFINE_ERROR(TSDB_CODE_TSC_INVALID_JSON, "Invalid JSON format") +TAOS_DEFINE_ERROR(TSDB_CODE_TSC_INVALID_JSON_TYPE, "Invalid JSON data type") +TAOS_DEFINE_ERROR(TSDB_CODE_TSC_VALUE_OUT_OF_RANGE, "Value out of range") // mnode TAOS_DEFINE_ERROR(TSDB_CODE_MND_MSG_NOT_PROCESSED, "Message not processed") diff --git a/src/util/src/tlosertree.c b/src/util/src/tlosertree.c index e793548407ad37e2021fdba7db106db3a48fcaf0..0f104c4b63a36880a79ad564a0f837f9b09e7819 100644 --- a/src/util/src/tlosertree.c +++ b/src/util/src/tlosertree.c @@ -90,12 +90,13 @@ void tLoserTreeAdjust(SLoserTreeInfo* pTree, int32_t idx) { SLoserTreeNode kLeaf = pTree->pNode[idx]; while (parentId > 0) { - if (pTree->pNode[parentId].index == -1) { + SLoserTreeNode* pCur = &pTree->pNode[parentId]; + if (pCur->index == -1) { pTree->pNode[parentId] = kLeaf; return; } - int32_t ret = pTree->comparFn(&pTree->pNode[parentId], &kLeaf, pTree->param); + int32_t ret = pTree->comparFn(pCur, &kLeaf, pTree->param); if (ret < 0) { SLoserTreeNode t = pTree->pNode[parentId]; pTree->pNode[parentId] = kLeaf; diff --git a/src/util/src/tthread.c b/src/util/src/tthread.c new file mode 100644 index 0000000000000000000000000000000000000000..043b2de2f241297d209041294428dde2c55e974e --- /dev/null +++ b/src/util/src/tthread.c @@ -0,0 +1,62 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#include "os.h" +#include "tthread.h" +#include "tglobal.h" +#include "taosdef.h" +#include "tutil.h" +#include "tulog.h" +#include "taoserror.h" + +// create new thread +pthread_t* taosCreateThread( void *(*__start_routine) (void *), void* param) { + pthread_t* pthread = (pthread_t*)malloc(sizeof(pthread_t)); + pthread_attr_t thattr; + pthread_attr_init(&thattr); + pthread_attr_setdetachstate(&thattr, PTHREAD_CREATE_JOINABLE); + int32_t ret = pthread_create(pthread, &thattr, __start_routine, param); + pthread_attr_destroy(&thattr); + + if (ret != 0) { + free(pthread); + return NULL; + } + return pthread; +} + +// destory thread +bool taosDestoryThread(pthread_t* pthread) { + if(pthread == NULL) return false; + if(taosThreadRunning(pthread)) { + pthread_cancel(*pthread); + pthread_join(*pthread, NULL); + } + + free(pthread); + return true; +} + +// thread running return true +bool taosThreadRunning(pthread_t* pthread) { + if(pthread == NULL) return false; + int ret = pthread_kill(*pthread, 0); + if(ret == ESRCH) + return false; + if(ret == EINVAL) + return false; + // alive + return true; +} diff --git a/src/util/src/ttokenizer.c b/src/util/src/ttokenizer.c index 2d4e3aa743a250b618d9c9e80ffa312af1feba12..289c4a6ef5d5db1a04fdb33985ed4de959375f8d 100644 --- a/src/util/src/ttokenizer.c +++ b/src/util/src/ttokenizer.c @@ -53,6 +53,7 @@ static SKeyword keywordTable[] = { {"NOTNULL", TK_NOTNULL}, {"IS", TK_IS}, {"LIKE", TK_LIKE}, + {"MATCH", TK_MATCH}, {"GLOB", TK_GLOB}, {"BETWEEN", TK_BETWEEN}, {"IN", TK_IN}, @@ -194,6 +195,7 @@ static SKeyword keywordTable[] = { {"INITIALLY", TK_INITIALLY}, {"INSTEAD", TK_INSTEAD}, {"MATCH", TK_MATCH}, + {"NMATCH", TK_NMATCH}, {"KEY", TK_KEY}, {"OF", TK_OF}, {"RAISE", TK_RAISE}, diff --git a/src/vnode/src/vnodeMain.c b/src/vnode/src/vnodeMain.c index f826c1aecd336a0eedeb3f02df0a7acc61895bb2..c823880ae2028c4bcfe26dbfc5cd60af62443722 100644 --- a/src/vnode/src/vnodeMain.c +++ b/src/vnode/src/vnodeMain.c @@ -560,5 +560,10 @@ static int32_t vnodeProcessTsdbStatus(void *arg, int32_t status, int32_t eno) { return vnodeSaveVersion(pVnode); } + // timer thread callback + if(status == TSDB_STATUS_COMMIT_NOBLOCK) { + qSolveCommitNoBlock(pVnode->tsdb, pVnode->qMgmt); + } + return 0; } diff --git a/tests/connectorTest/C#Test/nanosupport/TDengineDriver.cs b/tests/connectorTest/C#Test/nanosupport/TDengineDriver.cs new file mode 100644 index 0000000000000000000000000000000000000000..e6c3a598adc0bc4bcf5ea84953f649b418199555 --- /dev/null +++ b/tests/connectorTest/C#Test/nanosupport/TDengineDriver.cs @@ -0,0 +1,170 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +using System; +using System.Collections.Generic; +using System.Runtime.InteropServices; + +namespace TDengineDriver +{ + enum TDengineDataType + { + TSDB_DATA_TYPE_NULL = 0, // 1 bytes + TSDB_DATA_TYPE_BOOL = 1, // 1 bytes + TSDB_DATA_TYPE_TINYINT = 2, // 1 bytes + TSDB_DATA_TYPE_SMALLINT = 3, // 2 bytes + TSDB_DATA_TYPE_INT = 4, // 4 bytes + TSDB_DATA_TYPE_BIGINT = 5, // 8 bytes + TSDB_DATA_TYPE_FLOAT = 6, // 4 bytes + TSDB_DATA_TYPE_DOUBLE = 7, // 8 bytes + TSDB_DATA_TYPE_BINARY = 8, // string + TSDB_DATA_TYPE_TIMESTAMP = 9,// 8 bytes + TSDB_DATA_TYPE_NCHAR = 10, // unicode string + TSDB_DATA_TYPE_UTINYINT = 11,// 1 byte + TSDB_DATA_TYPE_USMALLINT= 12,// 2 bytes + TSDB_DATA_TYPE_UINT = 13, // 4 bytes + TSDB_DATA_TYPE_UBIGINT= 14 // 8 bytes + } + + enum TDengineInitOption + { + TSDB_OPTION_LOCALE = 0, + TSDB_OPTION_CHARSET = 1, + TSDB_OPTION_TIMEZONE = 2, + TDDB_OPTION_CONFIGDIR = 3, + TDDB_OPTION_SHELL_ACTIVITY_TIMER = 4 + } + + class TDengineMeta + { + public string name; + public short size; + public byte type; + public string TypeName() + { + switch ((TDengineDataType)type) + { + case TDengineDataType.TSDB_DATA_TYPE_BOOL: + return "BOOL"; + case TDengineDataType.TSDB_DATA_TYPE_TINYINT: + return "TINYINT"; + case TDengineDataType.TSDB_DATA_TYPE_SMALLINT: + return "SMALLINT"; + case TDengineDataType.TSDB_DATA_TYPE_INT: + return "INT"; + case TDengineDataType.TSDB_DATA_TYPE_BIGINT: + return "BIGINT"; + case TDengineDataType.TSDB_DATA_TYPE_UTINYINT: + return "TINYINT UNSIGNED"; + case TDengineDataType.TSDB_DATA_TYPE_USMALLINT: + return "SMALLINT UNSIGNED"; + case TDengineDataType.TSDB_DATA_TYPE_UINT: + return "INT UNSIGNED"; + case TDengineDataType.TSDB_DATA_TYPE_UBIGINT: + return "BIGINT UNSIGNED"; + case TDengineDataType.TSDB_DATA_TYPE_FLOAT: + return "FLOAT"; + case TDengineDataType.TSDB_DATA_TYPE_DOUBLE: + return "DOUBLE"; + case TDengineDataType.TSDB_DATA_TYPE_BINARY: + return "STRING"; + case TDengineDataType.TSDB_DATA_TYPE_TIMESTAMP: + return "TIMESTAMP"; + case TDengineDataType.TSDB_DATA_TYPE_NCHAR: + return "NCHAR"; + default: + return "undefine"; + } + } + } + + class TDengine + { + public const int TSDB_CODE_SUCCESS = 0; + + [DllImport("taos", EntryPoint = "taos_init", CallingConvention = CallingConvention.Cdecl)] + static extern public void Init(); + + [DllImport("taos", EntryPoint = "taos_cleanup", CallingConvention = CallingConvention.Cdecl)] + static extern public void Cleanup(); + + [DllImport("taos", EntryPoint = "taos_options", CallingConvention = CallingConvention.Cdecl)] + static extern public void Options(int option, string value); + + [DllImport("taos", EntryPoint = "taos_connect", CallingConvention = CallingConvention.Cdecl)] + static extern public IntPtr Connect(string ip, string user, string password, string db, short port); + + [DllImport("taos", EntryPoint = "taos_errstr", CallingConvention = CallingConvention.Cdecl)] + static extern private IntPtr taos_errstr(IntPtr res); + static public string Error(IntPtr res) + { + IntPtr errPtr = taos_errstr(res); + return Marshal.PtrToStringAnsi(errPtr); + } + + [DllImport("taos", EntryPoint = "taos_errno", CallingConvention = CallingConvention.Cdecl)] + static extern public int ErrorNo(IntPtr res); + + [DllImport("taos", EntryPoint = "taos_query", CallingConvention = CallingConvention.Cdecl)] + static extern public IntPtr Query(IntPtr conn, string sqlstr); + + [DllImport("taos", EntryPoint = "taos_affected_rows", CallingConvention = CallingConvention.Cdecl)] + static extern public int AffectRows(IntPtr res); + + [DllImport("taos", EntryPoint = "taos_field_count", CallingConvention = CallingConvention.Cdecl)] + static extern public int FieldCount(IntPtr res); + + [DllImport("taos", EntryPoint = "taos_fetch_fields", CallingConvention = CallingConvention.Cdecl)] + static extern private IntPtr taos_fetch_fields(IntPtr res); + static public List FetchFields(IntPtr res) + { + const int fieldSize = 68; + + List metas = new List(); + if (res == IntPtr.Zero) + { + return metas; + } + + int fieldCount = FieldCount(res); + IntPtr fieldsPtr = taos_fetch_fields(res); + + for (int i = 0; i < fieldCount; ++i) + { + int offset = i * fieldSize; + + TDengineMeta meta = new TDengineMeta(); + meta.name = Marshal.PtrToStringAnsi(fieldsPtr + offset); + meta.type = Marshal.ReadByte(fieldsPtr + offset + 65); + meta.size = Marshal.ReadInt16(fieldsPtr + offset + 66); + metas.Add(meta); + } + + return metas; + } + + [DllImport("taos", EntryPoint = "taos_fetch_row", CallingConvention = CallingConvention.Cdecl)] + static extern public IntPtr FetchRows(IntPtr res); + + [DllImport("taos", EntryPoint = "taos_free_result", CallingConvention = CallingConvention.Cdecl)] + static extern public IntPtr FreeResult(IntPtr res); + + [DllImport("taos", EntryPoint = "taos_close", CallingConvention = CallingConvention.Cdecl)] + static extern public int Close(IntPtr taos); + //get precisionin parameter restultset + [DllImport("taos", EntryPoint = "taos_result_precision", CallingConvention = CallingConvention.Cdecl)] + static extern public int ResultPrecision(IntPtr taos); + } +} diff --git a/tests/connectorTest/C#Test/nanosupport/nanotest.cs b/tests/connectorTest/C#Test/nanosupport/nanotest.cs new file mode 100644 index 0000000000000000000000000000000000000000..b9eaefef8c740f8196a715282c8c28ffd79bbdac --- /dev/null +++ b/tests/connectorTest/C#Test/nanosupport/nanotest.cs @@ -0,0 +1,502 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +using System; +using System.Text; +using System.Collections.Generic; +using System.Runtime.InteropServices; +using System.Collections; +namespace TDengineDriver +{ + class TDengineNanoTest + { + //connect parameters + private string host="localhost"; + private string configDir="/etc/taos"; + private string user="root"; + private string password="taosdata"; + private short port = 0; + + //sql parameters + private string dbName; + private string tbName; + private string precision; + + private bool isInsertData; + private bool isQueryData; + + private long tableCount; + private long totalRows; + private long batchRows; + private long beginTimestamp = 1551369600000L; + + private IntPtr conn = IntPtr.Zero; + private long rowsInserted = 0; + + static void Main(string[] args) + { + TDengineNanoTest tester = new TDengineNanoTest(); + //tester.ReadArgument(args); + + tester.InitTDengine(); + tester.ConnectTDengine(); + tester.execute("reset query cache"); + tester.execute("drop database if exists db"); + tester.execute("create database db precision 'ns'"); + tester.executeQuery("show databases;"); + //tester.checkData(0,16,"ns"); + tester.execute("use db"); + + Console.WriteLine("testing nanosecond support in 1st timestamp"); + tester.execute("create table tb (ts timestamp, speed int)"); + tester.execute("insert into tb values('2021-06-10 0:00:00.100000001', 1);"); + tester.execute("insert into tb values(1623254400150000000, 2);"); + tester.execute("import into tb values(1623254400300000000, 3);"); + tester.execute("import into tb values(1623254400299999999, 4);"); + tester.execute("insert into tb values(1623254400300000001, 5);"); + tester.execute("insert into tb values(1623254400999999999, 7);"); + tester.executeQuery("select * from tb;"); + + Console.WriteLine("expect data is "); + + tester.executeQuery("select * from tb;"); + + // Console.WriteLine("expected is : {0}", width); + // tdSql.checkData(0,0,"2021-06-10 0:00:00.100000001"); + // tdSql.checkData(1,0,"2021-06-10 0:00:00.150000000"); + // tdSql.checkData(2,0,"2021-06-10 0:00:00.299999999"); + // tdSql.checkData(3,1,3); + // tdSql.checkData(4,1,5); + // tdSql.checkData(5,1,7); + // tdSql.checkRows(6); + + tester.executeQuery("select count(*) from tb where ts > 1623254400100000000 and ts < 1623254400100000002;"); + Console.WriteLine("expected is : 1 " ); + tester.executeQuery("select count(*) from tb where ts > '2021-06-10 0:00:00.100000001' and ts < '2021-06-10 0:00:00.160000000';"); + Console.WriteLine("expected is : 1 " ); + + tester.executeQuery("select count(*) from tb where ts > 1623254400100000000 and ts < 1623254400150000000;"); + Console.WriteLine("expected is : 1 " ); + tester.executeQuery("select count(*) from tb where ts > '2021-06-10 0:00:00.100000000' and ts < '2021-06-10 0:00:00.150000000';"); + Console.WriteLine("expected is : 1 " ); + + tester.executeQuery("select count(*) from tb where ts > 1623254400400000000;"); + Console.WriteLine("expected is : 1 " ); + tester.executeQuery("select count(*) from tb where ts < '2021-06-10 00:00:00.400000000';"); + Console.WriteLine("expected is : 5 " ); + + tester.executeQuery("select count(*) from tb where ts > now + 400000000b;"); + Console.WriteLine("expected is : 0 " ); + + tester.executeQuery("select count(*) from tb where ts >= '2021-06-10 0:00:00.100000001';"); + Console.WriteLine("expected is : 6 " ); + + tester.executeQuery("select count(*) from tb where ts <= 1623254400300000000;"); + Console.WriteLine("expected is : 4 " ); + + tester.executeQuery("select count(*) from tb where ts = '2021-06-10 0:00:00.000000000';"); + Console.WriteLine("expected is : 0 " ); + + tester.executeQuery("select count(*) from tb where ts = 1623254400150000000;"); + Console.WriteLine("expected is : 1 " ); + + tester.executeQuery("select count(*) from tb where ts = '2021-06-10 0:00:00.100000001';"); + Console.WriteLine("expected is : 1 " ); + + tester.executeQuery("select count(*) from tb where ts between 1623254400000000000 and 1623254400400000000;"); + Console.WriteLine("expected is : 5 " ); + + tester.executeQuery("select count(*) from tb where ts between '2021-06-10 0:00:00.299999999' and '2021-06-10 0:00:00.300000001';"); + Console.WriteLine("expected is : 3 " ); + + tester.executeQuery("select avg(speed) from tb interval(5000000000b);"); + Console.WriteLine("expected is : 1 " ); + + tester.executeQuery("select avg(speed) from tb interval(100000000b)"); + Console.WriteLine("expected is : 4 " ); + + // tdSql.error("select avg(speed) from tb interval(1b);") + // tdSql.error("select avg(speed) from tb interval(999b);") + + tester.executeQuery("select avg(speed) from tb interval(1000b);"); + Console.WriteLine("expected is : 5 rows " ); + + tester.executeQuery("select avg(speed) from tb interval(1u);"); + Console.WriteLine("expected is : 5 rows " ); + + tester.executeQuery("select avg(speed) from tb interval(100000000b) sliding (100000000b);"); + Console.WriteLine("expected is : 4 rows " ); + + tester.executeQuery("select last(*) from tb"); + Console.WriteLine("expected is :1623254400999999999 " ); + + // tdSql.checkData(0,0, "2021-06-10 0:00:00.999999999") + // tdSql.checkData(0,0, 1623254400999999999) + + tester.executeQuery("select first(*) from tb"); + Console.WriteLine("expected is : 1623254400100000001" ); + // tdSql.checkData(0,0, 1623254400100000001); + // tdSql.checkData(0,0, "2021-06-10 0:00:00.100000001"); + + tester.execute("insert into tb values(now + 500000000b, 6);"); + tester.executeQuery("select * from tb;"); + // tdSql.checkRows(7); + + tester.execute("create table tb2 (ts timestamp, speed int, ts2 timestamp);"); + tester.execute("insert into tb2 values('2021-06-10 0:00:00.100000001', 1, '2021-06-11 0:00:00.100000001');"); + tester.execute("insert into tb2 values(1623254400150000000, 2, 1623340800150000000);"); + tester.execute("import into tb2 values(1623254400300000000, 3, 1623340800300000000);"); + tester.execute("import into tb2 values(1623254400299999999, 4, 1623340800299999999);"); + tester.execute("insert into tb2 values(1623254400300000001, 5, 1623340800300000001);"); + tester.execute("insert into tb2 values(1623254400999999999, 7, 1623513600999999999);"); + + tester.executeQuery("select * from tb2;"); + // tdSql.checkData(0,0,"2021-06-10 0:00:00.100000001"); + // tdSql.checkData(1,0,"2021-06-10 0:00:00.150000000"); + // tdSql.checkData(2,1,4); + // tdSql.checkData(3,1,3); + // tdSql.checkData(4,2,"2021-06-11 00:00:00.300000001"); + // tdSql.checkData(5,2,"2021-06-13 00:00:00.999999999"); + // tdSql.checkRows(6); + tester.executeQuery("select count(*) from tb2 where ts2 > 1623340800000000000 and ts2 < 1623340800150000000;"); + Console.WriteLine("expected is : 1 " ); + // tdSql.checkData(0,0,1); + + tester.executeQuery("select count(*) from tb2 where ts2 > '2021-06-11 0:00:00.100000000' and ts2 < '2021-06-11 0:00:00.100000002';"); + Console.WriteLine("expected is : 1 " ); + // tdSql.checkData(0,0,1); + + tester.executeQuery("select count(*) from tb2 where ts2 > 1623340800500000000;"); + Console.WriteLine("expected is : 1 " ); + // tdSql.checkData(0,0,1); + tester.executeQuery("select count(*) from tb2 where ts2 < '2021-06-11 0:00:00.400000000';"); + Console.WriteLine("expected is : 5 " ); + // tdSql.checkData(0,0,5); + + tester.executeQuery("select count(*) from tb2 where ts2 > now + 400000000b;"); + Console.WriteLine("expected is : 0 " ); + // tdSql.checkRows(0); + + tester.executeQuery("select count(*) from tb2 where ts2 >= '2021-06-11 0:00:00.100000001';"); + Console.WriteLine("expected is : 6 " ); + // tdSql.checkData(0,0,6); + + tester.executeQuery("select count(*) from tb2 where ts2 <= 1623340800400000000;"); + Console.WriteLine("expected is : 5 " ); + // tdSql.checkData(0,0,5); + + tester.executeQuery("select count(*) from tb2 where ts2 = '2021-06-11 0:00:00.000000000';"); + Console.WriteLine("expected is : 0 " ); + // tdSql.checkRows(0); + + tester.executeQuery("select count(*) from tb2 where ts2 = '2021-06-11 0:00:00.300000001';"); + Console.WriteLine("expected is : 1 " ); + // tdSql.checkData(0,0,1); + + tester.executeQuery("select count(*) from tb2 where ts2 = 1623340800300000001;"); + Console.WriteLine("expected is : 1 " ); + // tdSql.checkData(0,0,1); + + tester.executeQuery("select count(*) from tb2 where ts2 between 1623340800000000000 and 1623340800450000000;"); + Console.WriteLine("expected is : 5 " ); + // tdSql.checkData(0,0,5); + + tester.executeQuery("select count(*) from tb2 where ts2 between '2021-06-11 0:00:00.299999999' and '2021-06-11 0:00:00.300000001';"); + Console.WriteLine("expected is : 3 " ); + // tdSql.checkData(0,0,3); + + tester.executeQuery("select count(*) from tb2 where ts2 <> 1623513600999999999;"); + Console.WriteLine("expected is : 5 " ); + // tdSql.checkData(0,0,5); + + tester.executeQuery("select count(*) from tb2 where ts2 <> '2021-06-11 0:00:00.100000001';"); + Console.WriteLine("expected is : 5 " ); + // tdSql.checkData(0,0,5); + + tester.executeQuery("select count(*) from tb2 where ts2 <> '2021-06-11 0:00:00.100000000';"); + Console.WriteLine("expected is : 6 " ); + // tdSql.checkData(0,0,6); + + tester.executeQuery("select count(*) from tb2 where ts2 != 1623513600999999999;"); + Console.WriteLine("expected is : 5 " ); + // tdSql.checkData(0,0,5); + + tester.executeQuery("select count(*) from tb2 where ts2 != '2021-06-11 0:00:00.100000001';"); + Console.WriteLine("expected is : 5 " ); + // tdSql.checkData(0,0,5); + + tester.executeQuery("select count(*) from tb2 where ts2 != '2021-06-11 0:00:00.100000000';"); + Console.WriteLine("expected is : 6 " ); + // tdSql.checkData(0,0,6); + + tester.execute("insert into tb2 values(now + 500000000b, 6, now +2d);"); + tester.executeQuery("select * from tb2;"); + Console.WriteLine("expected is : 7 rows" ); + // tdSql.checkRows(7); + + // tdLog.debug("testing ill nanosecond format handling"); + tester.execute("create table tb3 (ts timestamp, speed int);"); + // tdSql.error("insert into tb3 values(16232544001500000, 2);"); + tester.execute("insert into tb3 values('2021-06-10 0:00:00.123456', 2);"); + tester.executeQuery("select * from tb3 where ts = '2021-06-10 0:00:00.123456000';"); + // tdSql.checkRows(1); + Console.WriteLine("expected is : 1 rows " ); + + tester.execute("insert into tb3 values('2021-06-10 0:00:00.123456789000', 2);"); + tester.executeQuery("select * from tb3 where ts = '2021-06-10 0:00:00.123456789';"); + // tdSql.checkRows(1); + Console.WriteLine("expected is : 1 rows " ); + + // check timezone support + Console.WriteLine("nsdb" ); + tester.execute("drop database if exists nsdb;"); + tester.execute("create database nsdb precision 'ns';"); + tester.execute("use nsdb;"); + tester.execute("create stable st (ts timestamp ,speed float ) tags(time timestamp ,id int);"); + tester.execute("insert into tb1 using st tags('2021-06-10 0:00:00.123456789' , 1 ) values('2021-06-10T0:00:00.123456789+07:00' , 1.0);" ); + tester.executeQuery("select first(*) from tb1;"); + Console.WriteLine("expected is : 1623258000123456789 " ); + // tdSql.checkData(0,0,1623258000123456789); + + + + Console.WriteLine("usdb" ); + tester.execute("create database usdb precision 'us';"); + tester.execute("use usdb;"); + tester.execute("create stable st (ts timestamp ,speed float ) tags(time timestamp ,id int);"); + tester.execute("insert into tb1 using st tags('2021-06-10 0:00:00.123456' , 1 ) values('2021-06-10T0:00:00.123456+07:00' , 1.0);" ); + tester.executeQuery("select first(*) from tb1;"); + + Console.WriteLine("expected is : 1623258000123456 " ); + + Console.WriteLine("msdb" ); + tester.execute("drop database if exists msdb;"); + tester.execute("create database msdb precision 'ms';"); + tester.execute("use msdb;"); + tester.execute("create stable st (ts timestamp ,speed float ) tags(time timestamp ,id int);"); + tester.execute("insert into tb1 using st tags('2021-06-10 0:00:00.123' , 1 ) values('2021-06-10T0:00:00.123+07:00' , 1.0);" ); + tester.executeQuery("select first(*) from tb1;"); + Console.WriteLine("expected is : 1623258000123 " ); + + + + tester.CloseConnection(); + tester.cleanup(); + + + } + + public void InitTDengine() + { + TDengine.Options((int)TDengineInitOption.TDDB_OPTION_CONFIGDIR, this.configDir); + TDengine.Options((int)TDengineInitOption.TDDB_OPTION_SHELL_ACTIVITY_TIMER, "60"); + Console.WriteLine("init..."); + TDengine.Init(); + Console.WriteLine("get connection starting..."); + } + + public void ConnectTDengine() + { + string db = ""; + this.conn = TDengine.Connect(this.host, this.user, this.password, db, this.port); + if (this.conn == IntPtr.Zero) + { + Console.WriteLine("connection failed: " + this.host); + ExitProgram(); + } + else + { + Console.WriteLine("[ OK ] Connection established."); + } + } + //EXECUTE SQL + public void execute(string sql) + { + DateTime dt1 = DateTime.Now; + IntPtr res = TDengine.Query(this.conn, sql.ToString()); + DateTime dt2 = DateTime.Now; + TimeSpan span = dt2 - dt1; + + if ((res == IntPtr.Zero) || (TDengine.ErrorNo(res) != 0)) + { + Console.Write(sql.ToString() + " failure, "); + if (res != IntPtr.Zero) { + Console.Write("reason: " + TDengine.Error(res)); + } + Console.WriteLine(""); + ExitProgram(); + } + else + { + Console.WriteLine(sql.ToString() + " success"); + } + TDengine.FreeResult(res); + } + //EXECUTE QUERY + public void executeQuery(string sql) + { + + DateTime dt1 = DateTime.Now; + long queryRows = 0; + IntPtr res = TDengine.Query(conn, sql); + getPrecision(res); + if ((res == IntPtr.Zero) || (TDengine.ErrorNo(res) != 0)) + { + Console.Write(sql.ToString() + " failure, "); + if (res != IntPtr.Zero) { + Console.Write("reason: " + TDengine.Error(res)); + } + Console.WriteLine(""); + ExitProgram(); + } + DateTime dt2 = DateTime.Now; + TimeSpan span = dt2 - dt1; + Console.WriteLine("[OK] time cost: " + span.ToString() + "ms, execute statement ====> " + sql.ToString()); + int fieldCount = TDengine.FieldCount(res); + + List metas = TDengine.FetchFields(res); + for (int j = 0; j < metas.Count; j++) + { + TDengineMeta meta = (TDengineMeta)metas[j]; + } + + IntPtr rowdata; + StringBuilder builder = new StringBuilder(); + while ((rowdata = TDengine.FetchRows(res)) != IntPtr.Zero) + { + queryRows++; + for (int fields = 0; fields < fieldCount; ++fields) + { + TDengineMeta meta = metas[fields]; + int offset = IntPtr.Size * fields; + IntPtr data = Marshal.ReadIntPtr(rowdata, offset); + + builder.Append("---"); + + if (data == IntPtr.Zero) + { + builder.Append("NULL"); + continue; + } + + switch ((TDengineDataType)meta.type) + { + case TDengineDataType.TSDB_DATA_TYPE_BOOL: + bool v1 = Marshal.ReadByte(data) == 0 ? false : true; + builder.Append(v1); + break; + case TDengineDataType.TSDB_DATA_TYPE_TINYINT: + byte v2 = Marshal.ReadByte(data); + builder.Append(v2); + break; + case TDengineDataType.TSDB_DATA_TYPE_SMALLINT: + short v3 = Marshal.ReadInt16(data); + builder.Append(v3); + break; + case TDengineDataType.TSDB_DATA_TYPE_INT: + int v4 = Marshal.ReadInt32(data); + builder.Append(v4); + break; + case TDengineDataType.TSDB_DATA_TYPE_BIGINT: + long v5 = Marshal.ReadInt64(data); + builder.Append(v5); + break; + case TDengineDataType.TSDB_DATA_TYPE_FLOAT: + float v6 = (float)Marshal.PtrToStructure(data, typeof(float)); + builder.Append(v6); + break; + case TDengineDataType.TSDB_DATA_TYPE_DOUBLE: + double v7 = (double)Marshal.PtrToStructure(data, typeof(double)); + builder.Append(v7); + break; + case TDengineDataType.TSDB_DATA_TYPE_BINARY: + string v8 = Marshal.PtrToStringAnsi(data); + builder.Append(v8); + break; + case TDengineDataType.TSDB_DATA_TYPE_TIMESTAMP: + long v9 = Marshal.ReadInt64(data); + builder.Append(v9); + break; + case TDengineDataType.TSDB_DATA_TYPE_NCHAR: + string v10 = Marshal.PtrToStringAnsi(data); + builder.Append(v10); + break; + } + } + builder.Append("---"); + + if (queryRows <= 10) + { + Console.WriteLine(builder.ToString()); + } + builder.Clear(); + } + + if (TDengine.ErrorNo(res) != 0) + { + Console.Write("Query is not complete, Error {0:G}", TDengine.ErrorNo(res), TDengine.Error(res)); + } + Console.WriteLine(""); + + TDengine.FreeResult(res); + + } + + public void CloseConnection() + { + if (this.conn != IntPtr.Zero) + { + TDengine.Close(this.conn); + Console.WriteLine("connection closed."); + } + } + + static void ExitProgram() + { + System.Environment.Exit(0); + } + + public void cleanup() + { + Console.WriteLine("clean up..."); + System.Environment.Exit(0); + } + + // method to get db precision + public void getPrecision(IntPtr res) + { + int psc=TDengine.ResultPrecision(res); + switch(psc) + { + case 0: + Console.WriteLine("db:[{0:G}]'s precision is {1:G}",this.dbName,"millisecond"); + break; + case 1: + Console.WriteLine("db:[{0:G}]'s precision is {1:G}",this.dbName,"microsecond"); + break; + case 2: + Console.WriteLine("db:[{0:G}]'s precision is {1:G}",this.dbName,"nanosecond"); + break; + } + + } + + // public void checkData(int x ,int y , long ts ){ + + // } + + } +} + diff --git a/tests/connectorTest/nodejsTest/nanosupport/nanosecondTest.js b/tests/connectorTest/nodejsTest/nanosupport/nanosecondTest.js new file mode 100644 index 0000000000000000000000000000000000000000..11812ac84b91d5c639a3b3bd73c8b81838c5cc23 --- /dev/null +++ b/tests/connectorTest/nodejsTest/nanosupport/nanosecondTest.js @@ -0,0 +1,290 @@ +const taos = require('td2.0-connector'); +var conn = taos.connect({host:"localhost", user:"root", password:"taosdata", config:"/etc/taos",port:6030}) +var c1 = conn.cursor(); + + +function checkData(sql,row,col,data){ + + + console.log(sql) + c1.execute(sql) + var d = c1.fetchall(); + let checkdata = d[row][col]; + if (checkdata == data) { + + console.log('check pass') + } + else{ + console.log('check failed') + console.log('checked is :',checkdata) + console.log("expected is :",data) + + + } +} + + +// nano basic case + +c1.execute('reset query cache') +c1.execute('drop database if exists db') +c1.execute('create database db precision "ns";') +c1.execute('use db'); +c1.execute('create table tb (ts timestamp, speed int)') +c1.execute('insert into tb values(\'2021-06-10 00:00:00.100000001\', 1);') +c1.execute('insert into tb values(1623254400150000000, 2);') +c1.execute('import into tb values(1623254400300000000, 3);') +c1.execute('import into tb values(1623254400299999999, 4);') +c1.execute('insert into tb values(1623254400300000001, 5);') +c1.execute('insert into tb values(1623254400999999999, 7);') +c1.execute('insert into tb values(1623254400123456789, 8);') +sql = 'select * from tb;' + +console.log('*******************************************') +console.log('this is area about checkdata result') +//check data about insert data +checkData(sql,0,0,'2021-06-10 00:00:00.100000001') +checkData(sql,1,0,'2021-06-10 00:00:00.123456789') +checkData(sql,2,0,'2021-06-10 00:00:00.150000000') +checkData(sql,3,0,'2021-06-10 00:00:00.299999999') +checkData(sql,4,0,'2021-06-10 00:00:00.300000000') +checkData(sql,5,0,'2021-06-10 00:00:00.300000001') +checkData(sql,6,0,'2021-06-10 00:00:00.999999999') +checkData(sql,0,1,1) +checkData(sql,1,1,8) +checkData(sql,2,1,2) +checkData(sql,5,1,5) + + + +// us basic case + +c1.execute('reset query cache') +c1.execute('drop database if exists usdb') +c1.execute('create database usdb precision "us";') +c1.execute('use usdb'); +c1.execute('create table tb (ts timestamp, speed int)') +c1.execute('insert into tb values(\'2021-06-10 00:00:00.100001\', 1);') +c1.execute('insert into tb values(1623254400150000, 2);') +c1.execute('import into tb values(1623254400300000, 3);') +c1.execute('import into tb values(1623254400299999, 4);') +c1.execute('insert into tb values(1623254400300001, 5);') +c1.execute('insert into tb values(1623254400999999, 7);') +c1.execute('insert into tb values(1623254400123789, 8);') +sql = 'select * from tb;' + +console.log('*******************************************') + +//check data about insert data +checkData(sql,0,0,'2021-06-10 00:00:00.100001') +checkData(sql,1,0,'2021-06-10 00:00:00.123789') +checkData(sql,2,0,'2021-06-10 00:00:00.150000') +checkData(sql,3,0,'2021-06-10 00:00:00.299999') +checkData(sql,4,0,'2021-06-10 00:00:00.300000') +checkData(sql,5,0,'2021-06-10 00:00:00.300001') +checkData(sql,6,0,'2021-06-10 00:00:00.999999') +checkData(sql,0,1,1) +checkData(sql,1,1,8) +checkData(sql,2,1,2) +checkData(sql,5,1,5) + +console.log('*******************************************') + +// ms basic case + +c1.execute('reset query cache') +c1.execute('drop database if exists msdb') +c1.execute('create database msdb precision "ms";') +c1.execute('use msdb'); +c1.execute('create table tb (ts timestamp, speed int)') +c1.execute('insert into tb values(\'2021-06-10 00:00:00.101\', 1);') +c1.execute('insert into tb values(1623254400150, 2);') +c1.execute('import into tb values(1623254400300, 3);') +c1.execute('import into tb values(1623254400299, 4);') +c1.execute('insert into tb values(1623254400301, 5);') +c1.execute('insert into tb values(1623254400789, 7);') +c1.execute('insert into tb values(1623254400999, 8);') +sql = 'select * from tb;' + +console.log('*******************************************') +console.log('this is area about checkdata result') +//check data about insert data +checkData(sql,0,0,'2021-06-10 00:00:00.101') +checkData(sql,1,0,'2021-06-10 00:00:00.150') +checkData(sql,2,0,'2021-06-10 00:00:00.299') +checkData(sql,3,0,'2021-06-10 00:00:00.300') +checkData(sql,4,0,'2021-06-10 00:00:00.301') +checkData(sql,5,0,'2021-06-10 00:00:00.789') +checkData(sql,6,0,'2021-06-10 00:00:00.999') +checkData(sql,0,1,1) +checkData(sql,1,1,2) +checkData(sql,2,1,4) +checkData(sql,5,1,7) + +console.log('*******************************************') + +// offfical query result to show +// console.log('this is area about fetch all data') +// var query = c1.query(sql) +// var promise = query.execute(); +// promise.then(function(result) { +// result.pretty(); +// }); + +console.log('*******************************************') +c1.execute('use db') + +sql2 = 'select count(*) from tb where ts > 1623254400100000000 and ts < 1623254400100000002;' +checkData(sql2,0,0,1) + +sql3 = 'select count(*) from tb where ts > \'2021-06-10 0:00:00.100000001\' and ts < \'2021-06-10 0:00:00.160000000\';' +checkData(sql3,0,0,2) + +sql4 = 'select count(*) from tb where ts > 1623254400100000000 and ts < 1623254400150000000;' +checkData(sql4,0,0,2) + +sql5 = 'select count(*) from tb where ts > \'2021-06-10 0:00:00.100000000\' and ts < \'2021-06-10 0:00:00.150000000\';' +checkData(sql5,0,0,2) + +sql6 = 'select count(*) from tb where ts > 1623254400400000000;' +checkData(sql6,0,0,1) + +sql7 = 'select count(*) from tb where ts < \'2021-06-10 00:00:00.400000000\';' +checkData(sql7,0,0,6) + +sql8 = 'select count(*) from tb where ts > now + 400000000b;' +c1.execute(sql8) + +sql9 = 'select count(*) from tb where ts >= \'2021-06-10 0:00:00.100000001\';' +checkData(sql9,0,0,7) + +sql10 = 'select count(*) from tb where ts <= 1623254400300000000;' +checkData(sql10,0,0,5) + +sql11 = 'select count(*) from tb where ts = \'2021-06-10 0:00:00.000000000\';' +c1.execute(sql11) + +sql12 = 'select count(*) from tb where ts = 1623254400150000000;' +checkData(sql12,0,0,1) + +sql13 = 'select count(*) from tb where ts = \'2021-06-10 0:00:00.100000001\';' +checkData(sql13,0,0,1) + +sql14 = 'select count(*) from tb where ts between 1623254400000000000 and 1623254400400000000;' +checkData(sql14,0,0,6) + +sql15 = 'select count(*) from tb where ts between \'2021-06-10 0:00:00.299999999\' and \'2021-06-10 0:00:00.300000001\';' +checkData(sql15,0,0,3) + +sql16 = 'select avg(speed) from tb interval(5000000000b);' +checkData(sql16,0,0,'2021-06-10 00:00:00.000000000') + +sql17 = 'select avg(speed) from tb interval(100000000b)' +checkData(sql17,0,1,3.6666666666666665) +checkData(sql17,1,1,4.000000000) + +checkData(sql17,2,0,'2021-06-10 00:00:00.300000000') +checkData(sql17,3,0,'2021-06-10 00:00:00.900000000') + +console.log("print break ") + +// sql18 = 'select avg(speed) from tb interval(999b)' +// c1.execute(sql18) + +console.log("print break2 ") +sql19 = 'select avg(speed) from tb interval(1u);' +checkData(sql19,2,1,2.000000000) +checkData(sql19,3,0,'2021-06-10 00:00:00.299999000') + +sql20 = 'select avg(speed) from tb interval(100000000b) sliding (100000000b);' +checkData(sql20,2,1,4.000000000) +checkData(sql20,3,0,'2021-06-10 00:00:00.900000000') + +sql21 = 'select last(*) from tb;' +checkData(sql21,0,0,'2021-06-10 00:00:00.999999999') + +sql22 = 'select first(*) from tb;' +checkData(sql22,0,0,'2021-06-10 00:00:00.100000001') + +// timezone support + +console.log('testing nanosecond support in other timestamps') + +c1.execute('create table tb2 (ts timestamp, speed int, ts2 timestamp);') +c1.execute('insert into tb2 values(\'2021-06-10 0:00:00.100000001\', 1, \'2021-06-11 0:00:00.100000001\');') +c1.execute('insert into tb2 values(1623254400150000000, 2, 1623340800150000000);') +c1.execute('import into tb2 values(1623254400300000000, 3, 1623340800300000000);') +c1.execute('import into tb2 values(1623254400299999999, 4, 1623340800299999999);') +c1.execute('insert into tb2 values(1623254400300000001, 5, 1623340800300000001);') +c1.execute('insert into tb2 values(1623254400999999999, 7, 1623513600999999999);') + +sql23 = 'select * from tb2;' +checkData(sql23,0,0,'2021-06-10 00:00:00.100000001') +checkData(sql23,1,0,'2021-06-10 00:00:00.150000000') +checkData(sql23,2,1,4) +checkData(sql23,3,1,3) +checkData(sql23,4,2,'2021-06-11 00:00:00.300000001') +checkData(sql23,5,2,'2021-06-13 00:00:00.999999999') + +sql24 = 'select count(*) from tb2 where ts2 >= \'2021-06-11 0:00:00.100000001\';' +checkData(sql24,0,0,6) + +sql25 = 'select count(*) from tb2 where ts2 <= 1623340800400000000;' +checkData(sql25,0,0,5) + +sql26 = 'select count(*) from tb2 where ts2 = \'2021-06-11 0:00:00.300000001\';' +checkData(sql26,0,0,1) + +sql27 = 'select count(*) from tb2 where ts2 = 1623340800300000001;' +checkData(sql27,0,0,1) + +sql28 = 'select count(*) from tb2 where ts2 between 1623340800000000000 and 1623340800450000000;' +checkData(sql28,0,0,5) + +sql29 = 'select count(*) from tb2 where ts2 between \'2021-06-11 0:00:00.299999999\' and \'2021-06-11 0:00:00.300000001\';' +checkData(sql29,0,0,3) + +sql30 = 'select count(*) from tb2 where ts2 <> 1623513600999999999;' +checkData(sql30,0,0,5) + +sql31 = 'select count(*) from tb2 where ts2 <> \'2021-06-11 0:00:00.100000001\';' +checkData(sql31,0,0,5) + +sql32 = 'select count(*) from tb2 where ts2 != 1623513600999999999;' +checkData(sql32,0,0,5) + +sql33 = 'select count(*) from tb2 where ts2 != \'2021-06-11 0:00:00.100000001\';' +checkData(sql33,0,0,5) + +c1.execute('insert into tb2 values(now + 500000000b, 6, now +2d);') + +sql34 = 'select count(*) from tb2;' +checkData(sql34,0,0,7) + + +// check timezone support + +c1.execute('use db;') +c1.execute('create stable st (ts timestamp ,speed float ) tags(time timestamp ,id int);') +c1.execute('insert into stb1 using st tags("2021-06-10 0:00:00.123456789" , 1 ) values("2021-06-10T0:00:00.123456789+07:00" , 1.0);' ) +sql35 = 'select first(*) from stb1;' +checkData(sql35,0,0,'2021-06-10 01:00:00.123456789') + +c1.execute('use usdb;') +c1.execute('create stable st (ts timestamp ,speed float ) tags(time timestamp ,id int);') +c1.execute('insert into stb1 using st tags("2021-06-10 0:00:00.123456" , 1 ) values("2021-06-10T0:00:00.123456+07:00" , 1.0);' ) +sql36 = 'select first(*) from stb1;' +checkData(sql36,0,0,'2021-06-10 01:00:00.123456') + +c1.execute('use msdb;') +c1.execute('create stable st (ts timestamp ,speed float ) tags(time timestamp ,id int);') +c1.execute('insert into stb1 using st tags("2021-06-10 0:00:00.123456" , 1 ) values("2021-06-10T0:00:00.123456+07:00" , 1.0);' ) +sql36 = 'select first(*) from stb1;' +checkData(sql36,0,0,'2021-06-10 01:00:00.123') + + + + + + + diff --git a/tests/connectorTest/nodejsTest/nodetaos/cinterface.js b/tests/connectorTest/nodejsTest/nodetaos/cinterface.js new file mode 100644 index 0000000000000000000000000000000000000000..03d27e5593ccb15d8ff47cd3c3dedba765d14fc1 --- /dev/null +++ b/tests/connectorTest/nodejsTest/nodetaos/cinterface.js @@ -0,0 +1,587 @@ +/** + * C Interface with TDengine Module + * @module CTaosInterface + */ + +const ref = require('ref-napi'); +const os = require('os'); +const ffi = require('ffi-napi'); +const ArrayType = require('ref-array-napi'); +const Struct = require('ref-struct-napi'); +const FieldTypes = require('./constants'); +const errors = require('./error'); +const TaosObjects = require('./taosobjects'); +const { NULL_POINTER } = require('ref-napi'); + +module.exports = CTaosInterface; + +function convertTimestamp(data, num_of_rows, nbytes = 0, offset = 0, precision = 0) { + data = ref.reinterpret(data.deref(), nbytes * num_of_rows, offset); + let res = []; + let currOffset = 0; + while (currOffset < data.length) { + let time = data.readInt64LE(currOffset); + currOffset += nbytes; + res.push(new TaosObjects.TaosTimestamp(time, precision)); + } + return res; +} +function convertBool(data, num_of_rows, nbytes = 0, offset = 0, precision = 0) { + data = ref.reinterpret(data.deref(), nbytes * num_of_rows, offset); + let res = new Array(data.length); + for (let i = 0; i < data.length; i++) { + if (data[i] == 0) { + res[i] = false; + } + else if (data[i] == 1) { + res[i] = true; + } + else if (data[i] == FieldTypes.C_BOOL_NULL) { + res[i] = null; + } + } + return res; +} +function convertTinyint(data, num_of_rows, nbytes = 0, offset = 0, precision = 0) { + data = ref.reinterpret(data.deref(), nbytes * num_of_rows, offset); + let res = []; + let currOffset = 0; + while (currOffset < data.length) { + let d = data.readIntLE(currOffset, 1); + res.push(d == FieldTypes.C_TINYINT_NULL ? null : d); + currOffset += nbytes; + } + return res; +} +function convertSmallint(data, num_of_rows, nbytes = 0, offset = 0, precision = 0) { + data = ref.reinterpret(data.deref(), nbytes * num_of_rows, offset); + let res = []; + let currOffset = 0; + while (currOffset < data.length) { + let d = data.readIntLE(currOffset, 2); + res.push(d == FieldTypes.C_SMALLINT_NULL ? null : d); + currOffset += nbytes; + } + return res; +} +function convertInt(data, num_of_rows, nbytes = 0, offset = 0, precision = 0) { + data = ref.reinterpret(data.deref(), nbytes * num_of_rows, offset); + let res = []; + let currOffset = 0; + while (currOffset < data.length) { + let d = data.readInt32LE(currOffset); + res.push(d == FieldTypes.C_INT_NULL ? null : d); + currOffset += nbytes; + } + return res; +} +function convertBigint(data, num_of_rows, nbytes = 0, offset = 0, precision = 0) { + data = ref.reinterpret(data.deref(), nbytes * num_of_rows, offset); + let res = []; + let currOffset = 0; + while (currOffset < data.length) { + let d = data.readInt64LE(currOffset); + res.push(d == FieldTypes.C_BIGINT_NULL ? null : BigInt(d)); + currOffset += nbytes; + } + return res; +} +function convertFloat(data, num_of_rows, nbytes = 0, offset = 0, precision = 0) { + data = ref.reinterpret(data.deref(), nbytes * num_of_rows, offset); + let res = []; + let currOffset = 0; + while (currOffset < data.length) { + let d = parseFloat(data.readFloatLE(currOffset).toFixed(5)); + res.push(isNaN(d) ? null : d); + currOffset += nbytes; + } + return res; +} +function convertDouble(data, num_of_rows, nbytes = 0, offset = 0, precision = 0) { + data = ref.reinterpret(data.deref(), nbytes * num_of_rows, offset); + let res = []; + let currOffset = 0; + while (currOffset < data.length) { + let d = parseFloat(data.readDoubleLE(currOffset).toFixed(16)); + res.push(isNaN(d) ? null : d); + currOffset += nbytes; + } + return res; +} + +function convertNchar(data, num_of_rows, nbytes = 0, offset = 0, precision = 0) { + data = ref.reinterpret(data.deref(), nbytes * num_of_rows, offset); + let res = []; + + let currOffset = 0; + while (currOffset < data.length) { + let len = data.readIntLE(currOffset, 2); + let dataEntry = data.slice(currOffset + 2, currOffset + len + 2); //one entry in a row under a column; + res.push(dataEntry.toString("utf-8")); + currOffset += nbytes; + } + return res; +} + +// Object with all the relevant converters from pblock data to javascript readable data +let convertFunctions = { + [FieldTypes.C_BOOL]: convertBool, + [FieldTypes.C_TINYINT]: convertTinyint, + [FieldTypes.C_SMALLINT]: convertSmallint, + [FieldTypes.C_INT]: convertInt, + [FieldTypes.C_BIGINT]: convertBigint, + [FieldTypes.C_FLOAT]: convertFloat, + [FieldTypes.C_DOUBLE]: convertDouble, + [FieldTypes.C_BINARY]: convertNchar, + [FieldTypes.C_TIMESTAMP]: convertTimestamp, + [FieldTypes.C_NCHAR]: convertNchar +} + +// Define TaosField structure +var char_arr = ArrayType(ref.types.char); +var TaosField = Struct({ + 'name': char_arr, +}); +TaosField.fields.name.type.size = 65; +TaosField.defineProperty('type', ref.types.char); +TaosField.defineProperty('bytes', ref.types.short); + + +/** + * + * @param {Object} config - Configuration options for the interface + * @return {CTaosInterface} + * @class CTaosInterface + * @classdesc The CTaosInterface is the interface through which Node.JS communicates data back and forth with TDengine. It is not advised to + * access this class directly and use it unless you understand what these functions do. + */ +function CTaosInterface(config = null, pass = false) { + ref.types.char_ptr = ref.refType(ref.types.char); + ref.types.void_ptr = ref.refType(ref.types.void); + ref.types.void_ptr2 = ref.refType(ref.types.void_ptr); + /*Declare a bunch of functions first*/ + /* Note, pointers to TAOS_RES, TAOS, are ref.types.void_ptr. The connection._conn buffer is supplied for pointers to TAOS * */ + + if ('win32' == os.platform()) { + taoslibname = 'taos'; + } else { + taoslibname = 'libtaos'; + } + this.libtaos = ffi.Library(taoslibname, { + 'taos_options': [ref.types.int, [ref.types.int, ref.types.void_ptr]], + 'taos_init': [ref.types.void, []], + //TAOS *taos_connect(char *ip, char *user, char *pass, char *db, int port) + 'taos_connect': [ref.types.void_ptr, [ref.types.char_ptr, ref.types.char_ptr, ref.types.char_ptr, ref.types.char_ptr, ref.types.int]], + //void taos_close(TAOS *taos) + 'taos_close': [ref.types.void, [ref.types.void_ptr]], + //int *taos_fetch_lengths(TAOS_RES *res); + 'taos_fetch_lengths': [ref.types.void_ptr, [ref.types.void_ptr]], + //int taos_query(TAOS *taos, char *sqlstr) + 'taos_query': [ref.types.void_ptr, [ref.types.void_ptr, ref.types.char_ptr]], + //int taos_affected_rows(TAOS_RES *res) + 'taos_affected_rows': [ref.types.int, [ref.types.void_ptr]], + //int taos_fetch_block(TAOS_RES *res, TAOS_ROW *rows) + 'taos_fetch_block': [ref.types.int, [ref.types.void_ptr, ref.types.void_ptr]], + //int taos_num_fields(TAOS_RES *res); + 'taos_num_fields': [ref.types.int, [ref.types.void_ptr]], + //TAOS_ROW taos_fetch_row(TAOS_RES *res) + //TAOS_ROW is void **, but we set the return type as a reference instead to get the row + 'taos_fetch_row': [ref.refType(ref.types.void_ptr2), [ref.types.void_ptr]], + 'taos_print_row': [ref.types.int, [ref.types.char_ptr, ref.types.void_ptr, ref.types.void_ptr, ref.types.int]], + //int taos_result_precision(TAOS_RES *res) + 'taos_result_precision': [ref.types.int, [ref.types.void_ptr]], + //void taos_free_result(TAOS_RES *res) + 'taos_free_result': [ref.types.void, [ref.types.void_ptr]], + //int taos_field_count(TAOS *taos) + 'taos_field_count': [ref.types.int, [ref.types.void_ptr]], + //TAOS_FIELD *taos_fetch_fields(TAOS_RES *res) + 'taos_fetch_fields': [ref.refType(TaosField), [ref.types.void_ptr]], + //int taos_errno(TAOS *taos) + 'taos_errno': [ref.types.int, [ref.types.void_ptr]], + //char *taos_errstr(TAOS *taos) + 'taos_errstr': [ref.types.char_ptr, [ref.types.void_ptr]], + //void taos_stop_query(TAOS_RES *res); + 'taos_stop_query': [ref.types.void, [ref.types.void_ptr]], + //char *taos_get_server_info(TAOS *taos); + 'taos_get_server_info': [ref.types.char_ptr, [ref.types.void_ptr]], + //char *taos_get_client_info(); + 'taos_get_client_info': [ref.types.char_ptr, []], + + // ASYNC + // void taos_query_a(TAOS *taos, char *sqlstr, void (*fp)(void *, TAOS_RES *, int), void *param) + 'taos_query_a': [ref.types.void, [ref.types.void_ptr, ref.types.char_ptr, ref.types.void_ptr, ref.types.void_ptr]], + // void taos_fetch_rows_a(TAOS_RES *res, void (*fp)(void *param, TAOS_RES *, int numOfRows), void *param); + 'taos_fetch_rows_a': [ref.types.void, [ref.types.void_ptr, ref.types.void_ptr, ref.types.void_ptr]], + + // Subscription + //TAOS_SUB *taos_subscribe(TAOS* taos, int restart, const char* topic, const char *sql, TAOS_SUBSCRIBE_CALLBACK fp, void *param, int interval) + 'taos_subscribe': [ref.types.void_ptr, [ref.types.void_ptr, ref.types.int, ref.types.char_ptr, ref.types.char_ptr, ref.types.void_ptr, ref.types.void_ptr, ref.types.int]], + // TAOS_RES *taos_consume(TAOS_SUB *tsub) + 'taos_consume': [ref.types.void_ptr, [ref.types.void_ptr]], + //void taos_unsubscribe(TAOS_SUB *tsub); + 'taos_unsubscribe': [ref.types.void, [ref.types.void_ptr]], + + // Continuous Query + //TAOS_STREAM *taos_open_stream(TAOS *taos, char *sqlstr, void (*fp)(void *param, TAOS_RES *, TAOS_ROW row), + // int64_t stime, void *param, void (*callback)(void *)); + 'taos_open_stream': [ref.types.void_ptr, [ref.types.void_ptr, ref.types.char_ptr, ref.types.void_ptr, ref.types.int64, ref.types.void_ptr, ref.types.void_ptr]], + //void taos_close_stream(TAOS_STREAM *tstr); + 'taos_close_stream': [ref.types.void, [ref.types.void_ptr]] + + }); + if (pass == false) { + if (config == null) { + this._config = ref.alloc(ref.types.char_ptr, ref.NULL); + } + else { + try { + this._config = ref.allocCString(config); + } + catch (err) { + throw "Attribute Error: config is expected as a str"; + } + } + if (config != null) { + this.libtaos.taos_options(3, this._config); + } + this.libtaos.taos_init(); + } + return this; +} +CTaosInterface.prototype.config = function config() { + return this._config; +} +CTaosInterface.prototype.connect = function connect(host = null, user = "root", password = "taosdata", db = null, port = 0) { + let _host, _user, _password, _db, _port; + try { + _host = host != null ? ref.allocCString(host) : ref.NULL; + } + catch (err) { + throw "Attribute Error: host is expected as a str"; + } + try { + _user = ref.allocCString(user) + } + catch (err) { + throw "Attribute Error: user is expected as a str"; + } + try { + _password = ref.allocCString(password); + } + catch (err) { + throw "Attribute Error: password is expected as a str"; + } + try { + _db = db != null ? ref.allocCString(db) : ref.NULL; + } + catch (err) { + throw "Attribute Error: db is expected as a str"; + } + try { + _port = ref.alloc(ref.types.int, port); + } + catch (err) { + throw TypeError("port is expected as an int") + } + let connection = this.libtaos.taos_connect(_host, _user, _password, _db, _port); + if (ref.isNull(connection)) { + throw new errors.TDError('Failed to connect to TDengine'); + } + else { + console.log('Successfully connected to TDengine'); + } + return connection; +} +CTaosInterface.prototype.close = function close(connection) { + this.libtaos.taos_close(connection); + console.log("Connection is closed"); +} +CTaosInterface.prototype.query = function query(connection, sql) { + return this.libtaos.taos_query(connection, ref.allocCString(sql)); +} +CTaosInterface.prototype.affectedRows = function affectedRows(result) { + return this.libtaos.taos_affected_rows(result); +} +CTaosInterface.prototype.useResult = function useResult(result) { + + let fields = []; + let pfields = this.fetchFields(result); + if (ref.isNull(pfields) == false) { + pfields = ref.reinterpret(pfields, this.fieldsCount(result) * 68, 0); + for (let i = 0; i < pfields.length; i += 68) { + //0 - 63 = name //64 - 65 = bytes, 66 - 67 = type + fields.push({ + name: ref.readCString(ref.reinterpret(pfields, 65, i)), + type: pfields[i + 65], + bytes: pfields[i + 66] + }) + } + } + return fields; +} +CTaosInterface.prototype.fetchBlock = function fetchBlock(result, fields) { + let pblock = ref.NULL_POINTER; + let num_of_rows = this.libtaos.taos_fetch_block(result, pblock); + if (ref.isNull(pblock.deref()) == true) { + return { block: null, num_of_rows: 0 }; + } + + var fieldL = this.libtaos.taos_fetch_lengths(result); + let precision = this.libtaos.taos_result_precision(result); + + var fieldlens = []; + + if (ref.isNull(fieldL) == false) { + for (let i = 0; i < fields.length; i++) { + let plen = ref.reinterpret(fieldL, 4, i * 4); + let len = plen.readInt32LE(0); + fieldlens.push(len); + } + } + + let blocks = new Array(fields.length); + blocks.fill(null); + num_of_rows = Math.abs(num_of_rows); + let offset = 0; + let ptr = pblock.deref(); + + for (let i = 0; i < fields.length; i++) { + pdata = ref.reinterpret(ptr, 8, i * 8); + if (ref.isNull(pdata.readPointer())) { + blocks[i] = new Array(); + } else { + pdata = ref.ref(pdata.readPointer()); + if (!convertFunctions[fields[i]['type']]) { + throw new errors.DatabaseError("Invalid data type returned from database"); + } + blocks[i] = convertFunctions[fields[i]['type']](pdata, num_of_rows, fieldlens[i], offset, precision); + } + } + return { blocks: blocks, num_of_rows } +} +CTaosInterface.prototype.fetchRow = function fetchRow(result, fields) { + let row = this.libtaos.taos_fetch_row(result); + return row; +} +CTaosInterface.prototype.freeResult = function freeResult(result) { + this.libtaos.taos_free_result(result); + result = null; +} +/** Number of fields returned in this result handle, must use with async */ +CTaosInterface.prototype.numFields = function numFields(result) { + return this.libtaos.taos_num_fields(result); +} +// Fetch fields count by connection, the latest query +CTaosInterface.prototype.fieldsCount = function fieldsCount(result) { + return this.libtaos.taos_field_count(result); +} +CTaosInterface.prototype.fetchFields = function fetchFields(result) { + return this.libtaos.taos_fetch_fields(result); +} +CTaosInterface.prototype.errno = function errno(result) { + return this.libtaos.taos_errno(result); +} +CTaosInterface.prototype.errStr = function errStr(result) { + return ref.readCString(this.libtaos.taos_errstr(result)); +} +// Async +CTaosInterface.prototype.query_a = function query_a(connection, sql, callback, param = ref.ref(ref.NULL)) { + // void taos_query_a(TAOS *taos, char *sqlstr, void (*fp)(void *param, TAOS_RES *, int), void *param) + callback = ffi.Callback(ref.types.void, [ref.types.void_ptr, ref.types.void_ptr, ref.types.int], callback); + this.libtaos.taos_query_a(connection, ref.allocCString(sql), callback, param); + return param; +} +/** Asynchrnously fetches the next block of rows. Wraps callback and transfers a 4th argument to the cursor, the row data as blocks in javascript form + * Note: This isn't a recursive function, in order to fetch all data either use the TDengine cursor object, TaosQuery object, or implement a recrusive + * function yourself using the libtaos.taos_fetch_rows_a function + */ +CTaosInterface.prototype.fetch_rows_a = function fetch_rows_a(result, callback, param = ref.ref(ref.NULL)) { + // void taos_fetch_rows_a(TAOS_RES *res, void (*fp)(void *param, TAOS_RES *, int numOfRows), void *param); + var cti = this; + // wrap callback with a function so interface can access the numOfRows value, needed in order to properly process the binary data + let asyncCallbackWrapper = function (param2, result2, numOfRows2) { + // Data preparation to pass to cursor. Could be bottleneck in query execution callback times. + let row = cti.libtaos.taos_fetch_row(result2); + let fields = cti.fetchFields_a(result2); + + let precision = cti.libtaos.taos_result_precision(result2); + let blocks = new Array(fields.length); + blocks.fill(null); + numOfRows2 = Math.abs(numOfRows2); + let offset = 0; + var fieldL = cti.libtaos.taos_fetch_lengths(result); + var fieldlens = []; + if (ref.isNull(fieldL) == false) { + + for (let i = 0; i < fields.length; i++) { + let plen = ref.reinterpret(fieldL, 8, i * 8); + let len = ref.get(plen, 0, ref.types.int32); + fieldlens.push(len); + } + } + if (numOfRows2 > 0) { + for (let i = 0; i < fields.length; i++) { + if (ref.isNull(pdata.readPointer())) { + blocks[i] = new Array(); + } else { + if (!convertFunctions[fields[i]['type']]) { + throw new errors.DatabaseError("Invalid data type returned from database"); + } + let prow = ref.reinterpret(row, 8, i * 8); + prow = prow.readPointer(); + prow = ref.ref(prow); + blocks[i] = convertFunctions[fields[i]['type']](prow, 1, fieldlens[i], offset, precision); + //offset += fields[i]['bytes'] * numOfRows2; + } + } + } + callback(param2, result2, numOfRows2, blocks); + } + asyncCallbackWrapper = ffi.Callback(ref.types.void, [ref.types.void_ptr, ref.types.void_ptr, ref.types.int], asyncCallbackWrapper); + this.libtaos.taos_fetch_rows_a(result, asyncCallbackWrapper, param); + return param; +} +// Fetch field meta data by result handle +CTaosInterface.prototype.fetchFields_a = function fetchFields_a(result) { + let pfields = this.fetchFields(result); + let pfieldscount = this.numFields(result); + let fields = []; + if (ref.isNull(pfields) == false) { + pfields = ref.reinterpret(pfields, 68 * pfieldscount, 0); + for (let i = 0; i < pfields.length; i += 68) { + //0 - 64 = name //65 = type, 66 - 67 = bytes + fields.push({ + name: ref.readCString(ref.reinterpret(pfields, 65, i)), + type: pfields[i + 65], + bytes: pfields[i + 66] + }) + } + } + return fields; +} +// Stop a query by result handle +CTaosInterface.prototype.stopQuery = function stopQuery(result) { + if (result != null) { + this.libtaos.taos_stop_query(result); + } + else { + throw new errors.ProgrammingError("No result handle passed to stop query"); + } +} +CTaosInterface.prototype.getServerInfo = function getServerInfo(connection) { + return ref.readCString(this.libtaos.taos_get_server_info(connection)); +} +CTaosInterface.prototype.getClientInfo = function getClientInfo() { + return ref.readCString(this.libtaos.taos_get_client_info()); +} + +// Subscription +CTaosInterface.prototype.subscribe = function subscribe(connection, restart, topic, sql, interval) { + let topicOrig = topic; + let sqlOrig = sql; + try { + sql = sql != null ? ref.allocCString(sql) : ref.alloc(ref.types.char_ptr, ref.NULL); + } + catch (err) { + throw "Attribute Error: sql is expected as a str"; + } + try { + topic = topic != null ? ref.allocCString(topic) : ref.alloc(ref.types.char_ptr, ref.NULL); + } + catch (err) { + throw TypeError("topic is expected as a str"); + } + + restart = ref.alloc(ref.types.int, restart); + + let subscription = this.libtaos.taos_subscribe(connection, restart, topic, sql, null, null, interval); + if (ref.isNull(subscription)) { + throw new errors.TDError('Failed to subscribe to TDengine | Database: ' + dbOrig + ', Table: ' + tableOrig); + } + else { + console.log('Successfully subscribed to TDengine - Topic: ' + topicOrig); + } + return subscription; +} + +CTaosInterface.prototype.consume = function consume(subscription) { + let result = this.libtaos.taos_consume(subscription); + let fields = []; + let pfields = this.fetchFields(result); + if (ref.isNull(pfields) == false) { + pfields = ref.reinterpret(pfields, this.numFields(result) * 68, 0); + for (let i = 0; i < pfields.length; i += 68) { + //0 - 63 = name //64 - 65 = bytes, 66 - 67 = type + fields.push({ + name: ref.readCString(ref.reinterpret(pfields, 64, i)), + bytes: pfields[i + 64], + type: pfields[i + 66] + }) + } + } + + let data = []; + while (true) { + let { blocks, num_of_rows } = this.fetchBlock(result, fields); + if (num_of_rows == 0) { + break; + } + for (let i = 0; i < num_of_rows; i++) { + data.push([]); + let rowBlock = new Array(fields.length); + for (let j = 0; j < fields.length; j++) { + rowBlock[j] = blocks[j][i]; + } + data[data.length - 1] = (rowBlock); + } + } + return { data: data, fields: fields, result: result }; +} +CTaosInterface.prototype.unsubscribe = function unsubscribe(subscription) { + //void taos_unsubscribe(TAOS_SUB *tsub); + this.libtaos.taos_unsubscribe(subscription); +} + +// Continuous Query +CTaosInterface.prototype.openStream = function openStream(connection, sql, callback, stime, stoppingCallback, param = ref.ref(ref.NULL)) { + try { + sql = ref.allocCString(sql); + } + catch (err) { + throw "Attribute Error: sql string is expected as a str"; + } + var cti = this; + let asyncCallbackWrapper = function (param2, result2, row) { + let fields = cti.fetchFields_a(result2); + let precision = cti.libtaos.taos_result_precision(result2); + let blocks = new Array(fields.length); + blocks.fill(null); + let numOfRows2 = 1; + let offset = 0; + if (numOfRows2 > 0) { + for (let i = 0; i < fields.length; i++) { + if (!convertFunctions[fields[i]['type']]) { + throw new errors.DatabaseError("Invalid data type returned from database"); + } + blocks[i] = convertFunctions[fields[i]['type']](row, numOfRows2, fields[i]['bytes'], offset, precision); + offset += fields[i]['bytes'] * numOfRows2; + } + } + callback(param2, result2, blocks, fields); + } + asyncCallbackWrapper = ffi.Callback(ref.types.void, [ref.types.void_ptr, ref.types.void_ptr, ref.refType(ref.types.void_ptr2)], asyncCallbackWrapper); + asyncStoppingCallbackWrapper = ffi.Callback(ref.types.void, [ref.types.void_ptr], stoppingCallback); + let streamHandle = this.libtaos.taos_open_stream(connection, sql, asyncCallbackWrapper, stime, param, asyncStoppingCallbackWrapper); + if (ref.isNull(streamHandle)) { + throw new errors.TDError('Failed to open a stream with TDengine'); + return false; + } + else { + console.log("Succesfully opened stream"); + return streamHandle; + } +} +CTaosInterface.prototype.closeStream = function closeStream(stream) { + this.libtaos.taos_close_stream(stream); + console.log("Closed stream"); +} diff --git a/tests/connectorTest/nodejsTest/nodetaos/connection.js b/tests/connectorTest/nodejsTest/nodetaos/connection.js new file mode 100644 index 0000000000000000000000000000000000000000..08186f87053ad0ed0982ec8941f0cf38c4ad0467 --- /dev/null +++ b/tests/connectorTest/nodejsTest/nodetaos/connection.js @@ -0,0 +1,84 @@ +const TDengineCursor = require('./cursor') +const CTaosInterface = require('./cinterface') +module.exports = TDengineConnection; + +/** + * TDengine Connection Class + * @param {object} options - Options for configuring the connection with TDengine + * @return {TDengineConnection} + * @class TDengineConnection + * @constructor + * @example + * //Initialize a new connection + * var conn = new TDengineConnection({host:"127.0.0.1", user:"root", password:"taosdata", config:"/etc/taos",port:0}) + * + */ +function TDengineConnection(options) { + this._conn = null; + this._host = null; + this._user = "root"; //The default user + this._password = "taosdata"; //The default password + this._database = null; + this._port = 0; + this._config = null; + this._chandle = null; + this._configConn(options) + return this; +} +/** + * Configure the connection to TDengine + * @private + * @memberof TDengineConnection + */ +TDengineConnection.prototype._configConn = function _configConn(options) { + if (options['host']) { + this._host = options['host']; + } + if (options['user']) { + this._user = options['user']; + } + if (options['password']) { + this._password = options['password']; + } + if (options['database']) { + this._database = options['database']; + } + if (options['port']) { + this._port = options['port']; + } + if (options['config']) { + this._config = options['config']; + } + this._chandle = new CTaosInterface(this._config); + this._conn = this._chandle.connect(this._host, this._user, this._password, this._database, this._port); +} +/** Close the connection to TDengine */ +TDengineConnection.prototype.close = function close() { + this._chandle.close(this._conn); +} +/** + * Initialize a new cursor to interact with TDengine with + * @return {TDengineCursor} + */ +TDengineConnection.prototype.cursor = function cursor() { + //Pass the connection object to the cursor + return new TDengineCursor(this); +} +TDengineConnection.prototype.commit = function commit() { + return this; +} +TDengineConnection.prototype.rollback = function rollback() { + return this; +} +/** + * Clear the results from connector + * @private + */ +/* + TDengineConnection.prototype._clearResultSet = function _clearResultSet() { + var result = this._chandle.useResult(this._conn).result; + if (result) { + this._chandle.freeResult(result) + } +} +*/ diff --git a/tests/connectorTest/nodejsTest/nodetaos/constants.js b/tests/connectorTest/nodejsTest/nodetaos/constants.js new file mode 100644 index 0000000000000000000000000000000000000000..cd6a0c9fbaff51e7f0ecd3ab06907b7b1fb7dcb1 --- /dev/null +++ b/tests/connectorTest/nodejsTest/nodetaos/constants.js @@ -0,0 +1,76 @@ +/** + * Contains the the definitions/values assigned to various field types + * @module FieldTypes + */ +/** + * TDengine Field Types and their type codes + * @typedef {Object} FieldTypes + * @global + * @property {number} C_NULL - Null + * @property {number} C_BOOL - Boolean. Note, 0x02 is the C_BOOL_NULL value. + * @property {number} C_TINYINT - Tiny Int, values in the range [-2^7+1, 2^7-1]. Note, -2^7 has been used as the C_TINYINT_NULL value + * @property {number} C_SMALLINT - Small Int, values in the range [-2^15+1, 2^15-1]. Note, -2^15 has been used as the C_SMALLINT_NULL value + * @property {number} C_INT - Int, values in the range [-2^31+1, 2^31-1]. Note, -2^31 has been used as the C_INT_NULL value + * @property {number} C_BIGINT - Big Int, values in the range [-2^59, 2^59]. + * @property {number} C_FLOAT - Float, values in the range [-3.4E38, 3.4E38], accurate up to 6-7 decimal places. + * @property {number} C_DOUBLE - Double, values in the range [-1.7E308, 1.7E308], accurate up to 15-16 decimal places. + * @property {number} C_BINARY - Binary, encoded in utf-8. + * @property {number} C_TIMESTAMP - Timestamp in format "YYYY:MM:DD HH:MM:SS.MMM". Measured in number of milliseconds passed after + 1970-01-01 08:00:00.000 GMT. + * @property {number} C_NCHAR - NChar field type encoded in ASCII, a wide string. + * + * + * + * @property {number} C_TIMESTAMP_MILLI - The code for millisecond timestamps, as returned by libtaos.taos_result_precision(result). + * @property {number} C_TIMESTAMP_MICRO - The code for microsecond timestamps, as returned by libtaos.taos_result_precision(result). + */ +module.exports = { + C_NULL : 0, + C_BOOL : 1, + C_TINYINT : 2, + C_SMALLINT : 3, + C_INT : 4, + C_BIGINT : 5, + C_FLOAT : 6, + C_DOUBLE : 7, + C_BINARY : 8, + C_TIMESTAMP : 9, + C_NCHAR : 10, + // NULL value definition + // NOTE: These values should change according to C definition in tsdb.h + C_BOOL_NULL : 2, + C_TINYINT_NULL : -128, + C_SMALLINT_NULL : -32768, + C_INT_NULL : -2147483648, + C_BIGINT_NULL : -9223372036854775808, + C_FLOAT_NULL : 2146435072, + C_DOUBLE_NULL : -9223370937343148032, + C_NCHAR_NULL : 4294967295, + C_BINARY_NULL : 255, + C_TIMESTAMP_MILLI : 0, + C_TIMESTAMP_MICRO : 1, + getType, +} + +const typeCodesToName = { + 0 : 'Null', + 1 : 'Boolean', + 2 : 'Tiny Int', + 3 : 'Small Int', + 4 : 'Int', + 5 : 'Big Int', + 6 : 'Float', + 7 : 'Double', + 8 : 'Binary', + 9 : 'Timestamp', + 10 : 'Nchar', +} + +/** + * @function + * @param {number} typecode - The code to get the name of the type for + * @return {string} Name of the field type + */ +function getType(typecode) { + return typeCodesToName[typecode]; +} diff --git a/tests/connectorTest/nodejsTest/nodetaos/cursor.js b/tests/connectorTest/nodejsTest/nodetaos/cursor.js new file mode 100644 index 0000000000000000000000000000000000000000..f879d89d487eae9290fd9fc70259699f27937928 --- /dev/null +++ b/tests/connectorTest/nodejsTest/nodetaos/cursor.js @@ -0,0 +1,476 @@ +const ref = require('ref-napi'); +require('./globalfunc.js') +const CTaosInterface = require('./cinterface') +const errors = require('./error') +const TaosQuery = require('./taosquery') +const { PerformanceObserver, performance } = require('perf_hooks'); +module.exports = TDengineCursor; + +/** + * @typedef {Object} Buffer - A Node.js buffer. Please refer to {@link https://nodejs.org/api/buffer.html} for more details + * @global + */ + +/** + * @class TDengineCursor + * @classdesc The TDengine Cursor works directly with the C Interface which works with TDengine. It refrains from + * returning parsed data and majority of functions return the raw data such as cursor.fetchall() as compared to the TaosQuery class which + * has functions that "prettify" the data and add more functionality and can be used through cursor.query("your query"). Instead of + * promises, the class and its functions use callbacks. + * @param {TDengineConnection} - The TDengine Connection this cursor uses to interact with TDengine + * @property {data} - Latest retrieved data from query execution. It is an empty array by default + * @property {fields} - Array of the field objects in order from left to right of the latest data retrieved + * @since 1.0.0 + */ +function TDengineCursor(connection = null) { + //All parameters are store for sync queries only. + this._rowcount = -1; + this._connection = null; + this._result = null; + this._fields = null; + this.data = []; + this.fields = null; + if (connection != null) { + this._connection = connection + this._chandle = connection._chandle //pass through, just need library loaded. + } + else { + throw new errors.ProgrammingError("A TDengineConnection object is required to be passed to the TDengineCursor"); + } + +} +/** + * Get the row counts of the latest query + * @since 1.0.0 + * @return {number} Rowcount + */ +TDengineCursor.prototype.rowcount = function rowcount() { + return this._rowcount; +} +/** + * Close the cursor by setting its connection to null and freeing results from the connection and resetting the results it has stored + * @return {boolean} Whether or not the cursor was succesfully closed + * @since 1.0.0 + */ +TDengineCursor.prototype.close = function close() { + if (this._connection == null) { + return false; + } + this._connection._clearResultSet(); + this._reset_result(); + this._connection = null; + return true; +} +/** + * Create a TaosQuery object to perform a query to TDengine and retrieve data. + * @param {string} operation - The operation string to perform a query on + * @param {boolean} execute - Whether or not to immedietely perform the query. Default is false. + * @return {TaosQuery | Promise} A TaosQuery object + * @example + * var query = cursor.query("select count(*) from meterinfo.meters"); + * query.execute(); + * @since 1.0.6 + */ +TDengineCursor.prototype.query = function query(operation, execute = false) { + return new TaosQuery(operation, this, execute); +} + +/** + * Execute a query. Also stores all the field meta data returned from the query into cursor.fields. It is preferable to use cursor.query() to create + * queries and execute them instead of using the cursor object directly. + * @param {string} operation - The query operation to execute in the taos shell + * @param {Object} options - Execution options object. quiet : true turns off logging from queries + * @param {boolean} options.quiet - True if you want to surpress logging such as "Query OK, 1 row(s) ..." + * @param {function} callback - A callback function to execute after the query is made to TDengine + * @return {number | Buffer} Number of affected rows or a Buffer that points to the results of the query + * @since 1.0.0 + */ +TDengineCursor.prototype.execute = function execute(operation, options, callback) { + if (operation == undefined) { + throw new errors.ProgrammingError('No operation passed as argument'); + return null; + } + + if (typeof options == 'function') { + callback = options; + } + if (typeof options != 'object') options = {} + if (this._connection == null) { + throw new errors.ProgrammingError('Cursor is not connected'); + } + + this._reset_result(); + + let stmt = operation; + let time = 0; + let res; + if (options['quiet'] != true) { + const obs = new PerformanceObserver((items) => { + time = items.getEntries()[0].duration; + performance.clearMarks(); + }); + obs.observe({ entryTypes: ['measure'] }); + performance.mark('A'); + this._result = this._chandle.query(this._connection._conn, stmt); + performance.mark('B'); + performance.measure('query', 'A', 'B'); + } + else { + this._result = this._chandle.query(this._connection._conn, stmt); + } + res = this._chandle.errno(this._result); + if (res == 0) { + let fieldCount = this._chandle.fieldsCount(this._result); + if (fieldCount == 0) { + let affectedRowCount = this._chandle.affectedRows(this._result); + let response = this._createAffectedResponse(affectedRowCount, time) + if (options['quiet'] != true) { + console.log(response); + } + wrapCB(callback); + return affectedRowCount; //return num of affected rows, common with insert, use statements + } + else { + this._fields = this._chandle.useResult(this._result); + this.fields = this._fields; + wrapCB(callback); + + return this._result; //return a pointer to the result + } + } + else { + throw new errors.ProgrammingError(this._chandle.errStr(this._result)) + } + +} +TDengineCursor.prototype._createAffectedResponse = function (num, time) { + return "Query OK, " + num + " row(s) affected (" + (time * 0.001).toFixed(8) + "s)"; +} +TDengineCursor.prototype._createSetResponse = function (num, time) { + return "Query OK, " + num + " row(s) in set (" + (time * 0.001).toFixed(8) + "s)"; +} +TDengineCursor.prototype.executemany = function executemany() { + +} +TDengineCursor.prototype.fetchone = function fetchone() { + +} +TDengineCursor.prototype.fetchmany = function fetchmany() { + +} +/** + * Fetches all results from a query and also stores results into cursor.data. It is preferable to use cursor.query() to create + * queries and execute them instead of using the cursor object directly. + * @param {function} callback - callback function executing on the complete fetched data + * @return {Array} The resultant array, with entries corresponding to each retreived row from the query results, sorted in + * order by the field name ordering in the table. + * @since 1.0.0 + * @example + * cursor.execute('select * from db.table'); + * var data = cursor.fetchall(function(results) { + * results.forEach(row => console.log(row)); + * }) + */ +TDengineCursor.prototype.fetchall = function fetchall(options, callback) { + if (this._result == null || this._fields == null) { + throw new errors.OperationalError("Invalid use of fetchall, either result or fields from query are null. First execute a query first"); + } + + let num_of_rows = this._chandle.affectedRows(this._result); + let data = new Array(num_of_rows); + + this._rowcount = 0; + + let time = 0; + const obs = new PerformanceObserver((items) => { + time += items.getEntries()[0].duration; + performance.clearMarks(); + }); + obs.observe({ entryTypes: ['measure'] }); + performance.mark('A'); + while (true) { + let blockAndRows = this._chandle.fetchBlock(this._result, this._fields); + // console.log(blockAndRows); + // break; + let block = blockAndRows.blocks; + let num_of_rows = blockAndRows.num_of_rows; + if (num_of_rows == 0) { + break; + } + this._rowcount += num_of_rows; + let numoffields = this._fields.length; + for (let i = 0; i < num_of_rows; i++) { + // data.push([]); + + let rowBlock = new Array(numoffields); + for (let j = 0; j < numoffields; j++) { + rowBlock[j] = block[j][i]; + } + data[this._rowcount - num_of_rows + i] = (rowBlock); + // data.push(rowBlock); + } + + } + + performance.mark('B'); + performance.measure('query', 'A', 'B'); + let response = this._createSetResponse(this._rowcount, time) + console.log(response); + + // this._connection._clearResultSet(); + let fields = this.fields; + this._reset_result(); + this.data = data; + this.fields = fields; + + wrapCB(callback, data); + + return data; +} +/** + * Asynchrnously execute a query to TDengine. NOTE, insertion requests must be done in sync if on the same table. + * @param {string} operation - The query operation to execute in the taos shell + * @param {Object} options - Execution options object. quiet : true turns off logging from queries + * @param {boolean} options.quiet - True if you want to surpress logging such as "Query OK, 1 row(s) ..." + * @param {function} callback - A callback function to execute after the query is made to TDengine + * @return {number | Buffer} Number of affected rows or a Buffer that points to the results of the query + * @since 1.0.0 + */ +TDengineCursor.prototype.execute_a = function execute_a(operation, options, callback, param) { + if (operation == undefined) { + throw new errors.ProgrammingError('No operation passed as argument'); + return null; + } + if (typeof options == 'function') { + //we expect the parameter after callback to be param + param = callback; + callback = options; + } + if (typeof options != 'object') options = {} + if (this._connection == null) { + throw new errors.ProgrammingError('Cursor is not connected'); + } + if (typeof callback != 'function') { + throw new errors.ProgrammingError("No callback function passed to execute_a function"); + } + // Async wrapper for callback; + var cr = this; + + let asyncCallbackWrapper = function (param2, res2, resCode) { + if (typeof callback == 'function') { + callback(param2, res2, resCode); + } + + if (resCode >= 0) { + // let fieldCount = cr._chandle.numFields(res2); + // if (fieldCount == 0) { + // //cr._chandle.freeResult(res2); + // return res2; + // } + // else { + // return res2; + // } + return res2; + + } + else { + throw new errors.ProgrammingError("Error occuring with use of execute_a async function. Status code was returned with failure"); + } + } + + let stmt = operation; + let time = 0; + + // Use ref module to write to buffer in cursor.js instead of taosquery to maintain a difference in levels. Have taosquery stay high level + // through letting it pass an object as param + var buf = ref.alloc('Object'); + ref.writeObject(buf, 0, param); + const obs = new PerformanceObserver((items) => { + time = items.getEntries()[0].duration; + performance.clearMarks(); + }); + obs.observe({ entryTypes: ['measure'] }); + performance.mark('A'); + this._chandle.query_a(this._connection._conn, stmt, asyncCallbackWrapper, buf); + performance.mark('B'); + performance.measure('query', 'A', 'B'); + return param; + + +} +/** + * Fetches all results from an async query. It is preferable to use cursor.query_a() to create + * async queries and execute them instead of using the cursor object directly. + * @param {Object} options - An options object containing options for this function + * @param {function} callback - callback function that is callbacked on the COMPLETE fetched data (it is calledback only once!). + * Must be of form function (param, result, rowCount, rowData) + * @param {Object} param - A parameter that is also passed to the main callback function. Important! Param must be an object, and the key "data" cannot be used + * @return {{param:Object, result:Buffer}} An object with the passed parameters object and the buffer instance that is a pointer to the result handle. + * @since 1.2.0 + * @example + * cursor.execute('select * from db.table'); + * var data = cursor.fetchall(function(results) { + * results.forEach(row => console.log(row)); + * }) + */ +TDengineCursor.prototype.fetchall_a = function fetchall_a(result, options, callback, param = {}) { + if (typeof options == 'function') { + //we expect the parameter after callback to be param + param = callback; + callback = options; + } + if (typeof options != 'object') options = {} + if (this._connection == null) { + throw new errors.ProgrammingError('Cursor is not connected'); + } + if (typeof callback != 'function') { + throw new errors.ProgrammingError('No callback function passed to fetchall_a function') + } + if (param.data) { + throw new errors.ProgrammingError("You aren't allowed to set the key 'data' for the parameters object"); + } + let buf = ref.alloc('Object'); + param.data = []; + var cr = this; + + // This callback wrapper accumulates the data from the fetch_rows_a function from the cinterface. It is accumulated by passing the param2 + // object which holds accumulated data in the data key. + let asyncCallbackWrapper = function asyncCallbackWrapper(param2, result2, numOfRows2, rowData) { + param2 = ref.readObject(param2); //return the object back from the pointer + if (numOfRows2 > 0 && rowData.length != 0) { + // Keep fetching until now rows left. + let buf2 = ref.alloc('Object'); + param2.data.push(rowData); + ref.writeObject(buf2, 0, param2); + cr._chandle.fetch_rows_a(result2, asyncCallbackWrapper, buf2); + } + else { + let finalData = param2.data; + let fields = cr._chandle.fetchFields_a(result2); + let data = []; + for (let i = 0; i < finalData.length; i++) { + let num_of_rows = finalData[i][0].length; //fetched block number i; + let block = finalData[i]; + for (let j = 0; j < num_of_rows; j++) { + data.push([]); + let rowBlock = new Array(fields.length); + for (let k = 0; k < fields.length; k++) { + rowBlock[k] = block[k][j]; + } + data[data.length - 1] = rowBlock; + } + } + cr._chandle.freeResult(result2); // free result, avoid seg faults and mem leaks! + callback(param2, result2, numOfRows2, { data: data, fields: fields }); + + } + } + ref.writeObject(buf, 0, param); + param = this._chandle.fetch_rows_a(result, asyncCallbackWrapper, buf); //returned param + return { param: param, result: result }; +} +/** + * Stop a query given the result handle. + * @param {Buffer} result - The buffer that acts as the result handle + * @since 1.3.0 + */ +TDengineCursor.prototype.stopQuery = function stopQuery(result) { + this._chandle.stopQuery(result); +} +TDengineCursor.prototype._reset_result = function _reset_result() { + this._rowcount = -1; + if (this._result != null) { + this._chandle.freeResult(this._result); + } + this._result = null; + this._fields = null; + this.data = []; + this.fields = null; +} +/** + * Get server info such as version number + * @return {string} + * @since 1.3.0 + */ +TDengineCursor.prototype.getServerInfo = function getServerInfo() { + return this._chandle.getServerInfo(this._connection._conn); +} +/** + * Get client info such as version number + * @return {string} + * @since 1.3.0 + */ +TDengineCursor.prototype.getClientInfo = function getClientInfo() { + return this._chandle.getClientInfo(); +} +/** + * Subscribe to a table from a database in TDengine. + * @param {Object} config - A configuration object containing the configuration options for the subscription + * @param {string} config.restart - whether or not to continue a subscription if it already exits, otherwise start from beginning + * @param {string} config.topic - The unique identifier of a subscription + * @param {string} config.sql - A sql statement for data query + * @param {string} config.interval - The pulling interval + * @return {Buffer} A buffer pointing to the subscription session handle + * @since 1.3.0 + */ +TDengineCursor.prototype.subscribe = function subscribe(config) { + let restart = config.restart ? 1 : 0; + return this._chandle.subscribe(this._connection._conn, restart, config.topic, config.sql, config.interval); +}; +/** + * An infinite loop that consumes the latest data and calls a callback function that is provided. + * @param {Buffer} subscription - A buffer object pointing to the subscription session handle + * @param {function} callback - The callback function that takes the row data, field/column meta data, and the subscription session handle as input + * @since 1.3.0 + */ +TDengineCursor.prototype.consumeData = async function consumeData(subscription, callback) { + while (true) { + let { data, fields, result } = this._chandle.consume(subscription); + callback(data, fields, result); + } +} +/** + * Unsubscribe the provided buffer object pointing to the subscription session handle + * @param {Buffer} subscription - A buffer object pointing to the subscription session handle that is to be unsubscribed + * @since 1.3.0 + */ +TDengineCursor.prototype.unsubscribe = function unsubscribe(subscription) { + this._chandle.unsubscribe(subscription); +} +/** + * Open a stream with TDengine to run the sql query periodically in the background + * @param {string} sql - The query to run + * @param {function} callback - The callback function to run after each query, accepting inputs as param, result handle, data, fields meta data + * @param {number} stime - The time of the stream starts in the form of epoch milliseconds. If 0 is given, the start time is set as the current time. + * @param {function} stoppingCallback - The callback function to run when the continuous query stops. It takes no inputs + * @param {object} param - A parameter that is passed to the main callback function + * @return {Buffer} A buffer pointing to the stream handle + * @since 1.3.0 + */ +TDengineCursor.prototype.openStream = function openStream(sql, callback, stime = 0, stoppingCallback, param = {}) { + let buf = ref.alloc('Object'); + ref.writeObject(buf, 0, param); + + let asyncCallbackWrapper = function (param2, result2, blocks, fields) { + let data = []; + let num_of_rows = blocks[0].length; + for (let j = 0; j < num_of_rows; j++) { + data.push([]); + let rowBlock = new Array(fields.length); + for (let k = 0; k < fields.length; k++) { + rowBlock[k] = blocks[k][j]; + } + data[data.length - 1] = rowBlock; + } + callback(param2, result2, blocks, fields); + } + return this._chandle.openStream(this._connection._conn, sql, asyncCallbackWrapper, stime, stoppingCallback, buf); +} +/** + * Close a stream + * @param {Buffer} - A buffer pointing to the handle of the stream to be closed + * @since 1.3.0 + */ +TDengineCursor.prototype.closeStream = function closeStream(stream) { + this._chandle.closeStream(stream); +} diff --git a/tests/connectorTest/nodejsTest/nodetaos/error.js b/tests/connectorTest/nodejsTest/nodetaos/error.js new file mode 100644 index 0000000000000000000000000000000000000000..8ab91a50c7d81a4675246617e0969ee8c81c514e --- /dev/null +++ b/tests/connectorTest/nodejsTest/nodetaos/error.js @@ -0,0 +1,96 @@ + +/** + * TDengine Error Class + * @ignore + */ +class TDError extends Error { + constructor(args) { + super(args) + this.name = "TDError"; + } +} +/** Exception raised for important warnings like data truncations while inserting. + * @ignore + */ +class Warning extends Error { + constructor(args) { + super(args) + this.name = "Warning"; + } +} +/** Exception raised for errors that are related to the database interface rather than the database itself. + * @ignore + */ +class InterfaceError extends TDError { + constructor(args) { + super(args) + this.name = "TDError.InterfaceError"; + } +} +/** Exception raised for errors that are related to the database. + * @ignore + */ +class DatabaseError extends TDError { + constructor(args) { + super(args) + this.name = "TDError.DatabaseError"; + } +} +/** Exception raised for errors that are due to problems with the processed data like division by zero, numeric value out of range. + * @ignore + */ +class DataError extends DatabaseError { + constructor(args) { + super(args) + this.name = "TDError.DatabaseError.DataError"; + } +} +/** Exception raised for errors that are related to the database's operation and not necessarily under the control of the programmer + * @ignore + */ +class OperationalError extends DatabaseError { + constructor(args) { + super(args) + this.name = "TDError.DatabaseError.OperationalError"; + } +} +/** Exception raised when the relational integrity of the database is affected. + * @ignore + */ +class IntegrityError extends DatabaseError { + constructor(args) { + super(args) + this.name = "TDError.DatabaseError.IntegrityError"; + } +} +/** Exception raised when the database encounters an internal error. + * @ignore + */ +class InternalError extends DatabaseError { + constructor(args) { + super(args) + this.name = "TDError.DatabaseError.InternalError"; + } +} +/** Exception raised for programming errors. + * @ignore + */ +class ProgrammingError extends DatabaseError { + constructor(args) { + super(args) + this.name = "TDError.DatabaseError.ProgrammingError"; + } +} +/** Exception raised in case a method or database API was used which is not supported by the database. + * @ignore + */ +class NotSupportedError extends DatabaseError { + constructor(args) { + super(args) + this.name = "TDError.DatabaseError.NotSupportedError"; + } +} + +module.exports = { + TDError, Warning, InterfaceError, DatabaseError, DataError, OperationalError, IntegrityError, InternalError, ProgrammingError, NotSupportedError +}; diff --git a/tests/connectorTest/nodejsTest/nodetaos/globalfunc.js b/tests/connectorTest/nodejsTest/nodetaos/globalfunc.js new file mode 100644 index 0000000000000000000000000000000000000000..cf7344c868ee94831eba47ff55369a684e34b02f --- /dev/null +++ b/tests/connectorTest/nodejsTest/nodetaos/globalfunc.js @@ -0,0 +1,14 @@ +/* Wrap a callback, reduce code amount */ +function wrapCB(callback, input) { + if (typeof callback === 'function') { + callback(input); + } + return; +} +global.wrapCB = wrapCB; +function toTaosTSString(date) { + date = new Date(date); + let tsArr = date.toISOString().split("T") + return tsArr[0] + " " + tsArr[1].substring(0, tsArr[1].length-1); +} +global.toTaosTSString = toTaosTSString; diff --git a/tests/connectorTest/nodejsTest/nodetaos/taosobjects.js b/tests/connectorTest/nodejsTest/nodetaos/taosobjects.js new file mode 100644 index 0000000000000000000000000000000000000000..3bc0fe0aca060a32daa7a5cebd2dbfb99ac29a7c --- /dev/null +++ b/tests/connectorTest/nodejsTest/nodetaos/taosobjects.js @@ -0,0 +1,152 @@ +const FieldTypes = require('./constants'); +const util = require('util'); +/** + * Various objects such as TaosRow and TaosColumn that help make parsing data easier + * @module TaosObjects + * + */ + +/** + * The TaosRow object. Contains the data from a retrieved row from a database and functions that parse the data. + * @typedef {Object} TaosRow - A row of data retrieved from a table. + * @global + * @example + * var trow = new TaosRow(row); + * console.log(trow.data); + */ +function TaosRow(row) { + this.data = row; + this.length = row.length; + return this; +} + +/** + * @typedef {Object} TaosField - A field/column's metadata from a table. + * @global + * @example + * var tfield = new TaosField(field); + * console.log(tfield.name); + */ + +function TaosField(field) { + this._field = field; + this.name = field.name; + this.type = FieldTypes.getType(field.type); + return this; +} + +/** + * A TaosTimestamp object, which is the standard date object with added functionality + * @global + * @memberof TaosObjects + * @param {Date} date - A Javascript date time object or the time in milliseconds past 1970-1-1 00:00:00.000 + */ +class TaosTimestamp extends Date { + constructor(date, precision = 0) { + if (precision === 1) { + super(Math.floor(date / 1000)); + this.precisionExtras = date % 1000; + } else if (precision === 2) { + // use BigInt to fix: 1623254400999999999 / 1000000 = 1623254401000 which not expected + super(parseInt(BigInt(date) / 1000000n)); + // use BigInt to fix: 1625801548423914405 % 1000000 = 914496 which not expected (914405) + this.precisionExtras = parseInt(BigInt(date) % 1000000n); + } else { + super(parseInt(date)); + } + this.precision = precision; + } + + /** + * TDengine raw timestamp. + * @returns raw taos timestamp (int64) + */ + taosTimestamp() { + if (this.precision == 1) { + return (this * 1000 + this.precisionExtras); + } else if (this.precision == 2) { + return (this * 1000000 + this.precisionExtras); + } else { + return Math.floor(this); + } + } + + /** + * Gets the microseconds of a Date. + * @return {Int} A microseconds integer + */ + getMicroseconds() { + if (this.precision == 1) { + return this.getMilliseconds() * 1000 + this.precisionExtras; + } else if (this.precision == 2) { + return this.getMilliseconds() * 1000 + this.precisionExtras / 1000; + } else { + return 0; + } + } + /** + * Gets the nanoseconds of a TaosTimestamp. + * @return {Int} A nanoseconds integer + */ + getNanoseconds() { + if (this.precision == 1) { + return this.getMilliseconds() * 1000000 + this.precisionExtras * 1000; + } else if (this.precision == 2) { + return this.getMilliseconds() * 1000000 + this.precisionExtras; + } else { + return 0; + } + } + + /** + * @returns {String} a string for timestamp string format + */ + _precisionExtra() { + if (this.precision == 1) { + return String(this.precisionExtras).padStart(3, '0'); + } else if (this.precision == 2) { + return String(this.precisionExtras).padStart(6, '0'); + } else { + return ''; + } + } + /** + * @function Returns the date into a string usable by TDengine + * @return {string} A Taos Timestamp String + */ + toTaosString() { + var tzo = -this.getTimezoneOffset(), + dif = tzo >= 0 ? '+' : '-', + pad = function (num) { + var norm = Math.floor(Math.abs(num)); + return (norm < 10 ? '0' : '') + norm; + }, + pad2 = function (num) { + var norm = Math.floor(Math.abs(num)); + if (norm < 10) return '00' + norm; + if (norm < 100) return '0' + norm; + if (norm < 1000) return norm; + }; + return this.getFullYear() + + '-' + pad(this.getMonth() + 1) + + '-' + pad(this.getDate()) + + ' ' + pad(this.getHours()) + + ':' + pad(this.getMinutes()) + + ':' + pad(this.getSeconds()) + + '.' + pad2(this.getMilliseconds()) + + '' + this._precisionExtra(); + } + + /** + * Custom console.log + * @returns {String} string format for debug + */ + [util.inspect.custom](depth, opts) { + return this.toTaosString() + JSON.stringify({ precision: this.precision, precisionExtras: this.precisionExtras }, opts); + } + toString() { + return this.toTaosString(); + } +} + +module.exports = { TaosRow, TaosField, TaosTimestamp } diff --git a/tests/connectorTest/nodejsTest/nodetaos/taosquery.js b/tests/connectorTest/nodejsTest/nodetaos/taosquery.js new file mode 100644 index 0000000000000000000000000000000000000000..eeede3ff6885e27c1d1c569a7a410f88109c9acd --- /dev/null +++ b/tests/connectorTest/nodejsTest/nodetaos/taosquery.js @@ -0,0 +1,112 @@ +var TaosResult = require('./taosresult') +require('./globalfunc.js') +module.exports = TaosQuery; + + +/** + * @class TaosQuery + * @classdesc The TaosQuery class is one level above the TDengine Cursor in that it makes sure to generally return promises from functions, and wrap + * all data with objects such as wrapping a row of data with Taos Row. This is meant to enable an higher level API that allows additional + * functionality and save time whilst also making it easier to debug and enter less problems with the use of promises. + * @param {string} query - Query to construct object from + * @param {TDengineCursor} cursor - The cursor from which this query will execute from + * @param {boolean} execute - Whether or not to immedietely execute the query synchronously and fetch all results. Default is false. + * @property {string} query - The current query in string format the TaosQuery object represents + * @return {TaosQuery} + * @since 1.0.6 + */ +function TaosQuery(query = "", cursor = null, execute = false) { + this.query = query; + this._cursor = cursor; + if (execute == true) { + return this.execute(); + } + return this; +} + +/** + * Executes the query object and returns a Promise + * @memberof TaosQuery + * @return {Promise} A promise that resolves with a TaosResult object, or rejects with an error + * @since 1.0.6 + */ +TaosQuery.prototype.execute = async function execute() { + var taosQuery = this; //store the current instance of taosQuery to avoid async issues? + var executionPromise = new Promise(function(resolve, reject) { + let data = []; + let fields = []; + let result; + try { + taosQuery._cursor.execute(taosQuery.query); + if (taosQuery._cursor._fields) fields = taosQuery._cursor._fields; + if (taosQuery._cursor._result != null) data = taosQuery._cursor.fetchall(); + result = new TaosResult(data, fields) + } + catch(err) { + reject(err); + } + resolve(result) + + }); + return executionPromise; +} + +/** + * Executes the query object asynchronously and returns a Promise. Completes query to completion. + * @memberof TaosQuery + * @param {Object} options - Execution options + * @return {Promise} A promise that resolves with a TaosResult object, or rejects with an error + * @since 1.2.0 + */ +TaosQuery.prototype.execute_a = async function execute_a(options = {}) { + var executionPromise = new Promise( (resolve, reject) => { + + }); + var fres; + var frej; + var fetchPromise = new Promise( (resolve, reject) => { + fres = resolve; + frej = reject; + }); + let asyncCallbackFetchall = async function(param, res, numOfRows, blocks) { + if (numOfRows > 0) { + // Likely a query like insert + fres(); + } + else { + fres(new TaosResult(blocks.data, blocks.fields)); + } + } + let asyncCallback = async function(param, res, code) { + //upon success, we fetchall results + this._cursor.fetchall_a(res, options, asyncCallbackFetchall, {}); + } + this._cursor.execute_a(this.query, asyncCallback.bind(this), {}); + return fetchPromise; +} + +/** + * Bind arguments to the query and automatically parses them into the right format + * @param {array | ...args} args - A number of arguments to bind to each ? in the query + * @return {TaosQuery} + * @example + * // An example of binding a javascript date and a number to a query + * var query = cursor.query("select count(*) from meterinfo.meters where ts <= ? and areaid = ?").bind(new Date(), 3); + * var promise1 = query.execute(); + * promise1.then(function(result) { + * result.pretty(); // Log the prettified version of the results. + * }); + * @since 1.0.6 + */ +TaosQuery.prototype.bind = function bind(f, ...args) { + if (typeof f == 'object' && f.constructor.name != 'Array') args.unshift(f); //param is not an array object + else if (typeof f != 'object') args.unshift(f); + else { args = f; } + args.forEach(function(arg) { + if (arg.constructor.name == 'TaosTimestamp') arg = "\"" + arg.toTaosString() + "\""; + else if (arg.constructor.name == 'Date') arg = "\"" + toTaosTSString(arg) + "\""; + else if (typeof arg == 'string') arg = "\"" + arg + "\""; + this.query = this.query.replace(/\?/,arg); + }, this); + return this; +} diff --git a/tests/connectorTest/nodejsTest/nodetaos/taosresult.js b/tests/connectorTest/nodejsTest/nodetaos/taosresult.js new file mode 100644 index 0000000000000000000000000000000000000000..4138ebbec6e1b792691d17a25b7c18d35b6a922a --- /dev/null +++ b/tests/connectorTest/nodejsTest/nodetaos/taosresult.js @@ -0,0 +1,85 @@ +require('./globalfunc.js') +const TaosObjects = require('./taosobjects'); +const TaosRow = TaosObjects.TaosRow; +const TaosField = TaosObjects.TaosField; + +module.exports = TaosResult; +/** + * @class TaosResult + * @classdesc A TaosResult class consts of the row data and the fields metadata, all wrapped under various objects for higher functionality. + * @param {Array} data - Array of result rows + * @param {Array} fields - Array of field meta data + * @property {Array} data - Array of TaosRows forming the result data (this does not include field meta data) + * @property {Array} fields - Array of TaosFields forming the fields meta data array. + * @return {TaosResult} + * @since 1.0.6 + */ +function TaosResult(data, fields) { + this.data = data.map(row => new TaosRow(row)); + this.rowcount = this.data.length; + this.fields = fields.map(field => new TaosField(field)); +} +/** + * Pretty print data and the fields meta data as if you were using the taos shell + * @memberof TaosResult + * @function pretty + * @since 1.0.6 + */ + +TaosResult.prototype.pretty = function pretty() { + let fieldsStr = ""; + let sizing = []; + this.fields.forEach((field,i) => { + if (field._field.type == 8 || field._field.type == 10){ + sizing.push(Math.max(field.name.length, field._field.bytes)); + } + else { + sizing.push(Math.max(field.name.length, suggestedMinWidths[field._field.type])); + } + fieldsStr += fillEmpty(Math.floor(sizing[i]/2 - field.name.length / 2)) + field.name + fillEmpty(Math.ceil(sizing[i]/2 - field.name.length / 2)) + " | "; + }); + var sumLengths = sizing.reduce((a,b)=> a+=b,(0)) + sizing.length * 3; + + console.log("\n" + fieldsStr); + console.log(printN("=",sumLengths)); + this.data.forEach(row => { + let rowStr = ""; + row.data.forEach((entry, i) => { + if (this.fields[i]._field.type == 9) { + entry = entry.toTaosString(); + } else { + entry = entry == null ? 'null' : entry.toString(); + } + rowStr += entry + rowStr += fillEmpty(sizing[i] - entry.length) + " | "; + }); + console.log(rowStr); + }); +} +const suggestedMinWidths = { + 0: 4, + 1: 4, + 2: 4, + 3: 6, + 4: 11, + 5: 12, + 6: 24, + 7: 24, + 8: 10, + 9: 25, + 10: 10, +} +function printN(s, n) { + let f = ""; + for (let i = 0; i < n; i ++) { + f += s; + } + return f; +} +function fillEmpty(n) { + let str = ""; + for (let i = 0; i < n; i++) { + str += " "; + } + return str; +} diff --git a/tests/connectorTest/nodejsTest/readme.md b/tests/connectorTest/nodejsTest/readme.md new file mode 100644 index 0000000000000000000000000000000000000000..26a28afbdd514ad97e969302e7d790f6240bb770 --- /dev/null +++ b/tests/connectorTest/nodejsTest/readme.md @@ -0,0 +1,161 @@ +# TDengine Node.js connector +[![minzip](https://img.shields.io/bundlephobia/minzip/td2.0-connector.svg)](https://github.com/taosdata/TDengine/tree/master/src/connector/nodejs) [![NPM](https://img.shields.io/npm/l/td2.0-connector.svg)](https://github.com/taosdata/TDengine/#what-is-tdengine) + +This is the Node.js library that lets you connect to [TDengine](https://www.github.com/taosdata/tdengine) 2.0 version. It is built so that you can use as much of it as you want or as little of it as you want through providing an extensive API. If you want the raw data in the form of an array of arrays for the row data retrieved from a table, you can do that. If you want to wrap that data with objects that allow you easily manipulate and display data such as using a prettifier function, you can do that! + +## Installation + +To get started, just type in the following to install the connector through [npm](https://www.npmjs.com/) + +```cmd +npm install td2.0-connector +``` + +To interact with TDengine, we make use of the [node-gyp](https://github.com/nodejs/node-gyp) library. To install, you will need to install the following depending on platform (the following instructions are quoted from node-gyp) + +### On Linux + +- `python` (`v2.7` recommended, `v3.x.x` is **not** supported) +- `make` +- A proper C/C++ compiler toolchain, like [GCC](https://gcc.gnu.org) +- `node` (between `v10.x` and `v11.x`, other version has some dependency compatibility problems) + +### On macOS + +- `python` (`v2.7` recommended, `v3.x.x` is **not** supported) (already installed on macOS) + +- Xcode + + - You also need to install the + + ``` + Command Line Tools + ``` + + via Xcode. You can find this under the menu + + ``` + Xcode -> Preferences -> Locations + ``` + + (or by running + + ``` + xcode-select --install + ``` + + in your Terminal) + + - This step will install `gcc` and the related toolchain containing `make` + +### On Windows + +#### Option 1 + +Install all the required tools and configurations using Microsoft's [windows-build-tools](https://github.com/felixrieseberg/windows-build-tools) using `npm install --global --production windows-build-tools` from an elevated PowerShell or CMD.exe (run as Administrator). + +#### Option 2 + +Install tools and configuration manually: + +- Install Visual C++ Build Environment: [Visual Studio Build Tools](https://visualstudio.microsoft.com/thank-you-downloading-visual-studio/?sku=BuildTools) (using "Visual C++ build tools" workload) or [Visual Studio 2017 Community](https://visualstudio.microsoft.com/pl/thank-you-downloading-visual-studio/?sku=Community) (using the "Desktop development with C++" workload) +- Install [Python 2.7](https://www.python.org/downloads/) (`v3.x.x` is not supported), and run `npm config set python python2.7` (or see below for further instructions on specifying the proper Python version and path.) +- Launch cmd, `npm config set msvs_version 2017` + +If the above steps didn't work for you, please visit [Microsoft's Node.js Guidelines for Windows](https://github.com/Microsoft/nodejs-guidelines/blob/master/windows-environment.md#compiling-native-addon-modules) for additional tips. + +To target native ARM64 Node.js on Windows 10 on ARM, add the components "Visual C++ compilers and libraries for ARM64" and "Visual C++ ATL for ARM64". + +## Usage + +The following is a short summary of the basic usage of the connector, the full api and documentation can be found [here](http://docs.taosdata.com/node) + +### Connection + +To use the connector, first require the library ```td2.0-connector```. Running the function ```taos.connect``` with the connection options passed in as an object will return a TDengine connection object. The required connection option is ```host```, other options if not set, will be the default values as shown below. + +A cursor also needs to be initialized in order to interact with TDengine from Node.js. + +```javascript +const taos = require('td2.0-connector'); +var conn = taos.connect({host:"127.0.0.1", user:"root", password:"taosdata", config:"/etc/taos",port:0}) +var cursor = conn.cursor(); // Initializing a new cursor +``` + +Close a connection + +```javascript +conn.close(); +``` + +### Queries + +We can now start executing simple queries through the ```cursor.query``` function, which returns a TaosQuery object. + +```javascript +var query = cursor.query('show databases;') +``` + +We can get the results of the queries through the ```query.execute()``` function, which returns a promise that resolves with a TaosResult object, which contains the raw data and additional functionalities such as pretty printing the results. + +```javascript +var promise = query.execute(); +promise.then(function(result) { + result.pretty(); //logs the results to the console as if you were in the taos shell +}); +``` + +You can also query by binding parameters to a query by filling in the question marks in a string as so. The query will automatically parse what was binded and convert it to the proper format for use with TDengine +```javascript +var query = cursor.query('select * from meterinfo.meters where ts <= ? and areaid = ?;').bind(new Date(), 5); +query.execute().then(function(result) { + result.pretty(); +}) +``` + +The TaosQuery object can also be immediately executed upon creation by passing true as the second argument, returning a promise instead of a TaosQuery. +```javascript +var promise = cursor.query('select * from meterinfo.meters where v1 = 30;', true) +promise.then(function(result) { + result.pretty(); +}) +``` + +If you want to execute queries without objects being wrapped around the data, use ```cursor.execute()``` directly and ```cursor.fetchall()``` to retrieve data if there is any. +```javascript +cursor.execute('select count(*), avg(v1), min(v2) from meterinfo.meters where ts >= \"2019-07-20 00:00:00.000\";'); +var data = cursor.fetchall(); +console.log(cursor.fields); // Latest query's Field metadata is stored in cursor.fields +console.log(cursor.data); // Latest query's result data is stored in cursor.data, also returned by fetchall. +``` + +### Async functionality + +Async queries can be performed using the same functions such as `cursor.execute`, `TaosQuery.query`, but now with `_a` appended to them. + +Say you want to execute an two async query on two separate tables, using `cursor.query`, you can do that and get a TaosQuery object, which upon executing with the `execute_a` function, returns a promise that resolves with a TaosResult object. + +```javascript +var promise1 = cursor.query('select count(*), avg(v1), avg(v2) from meter1;').execute_a() +var promise2 = cursor.query('select count(*), avg(v1), avg(v2) from meter2;').execute_a(); +promise1.then(function(result) { + result.pretty(); +}) +promise2.then(function(result) { + result.pretty(); +}) +``` + +## Example + +An example of using the NodeJS connector to create a table with weather data and create and execute queries can be found [here](https://github.com/taosdata/TDengine/tree/master/tests/examples/nodejs/node-example.js) (The preferred method for using the connector) + +An example of using the NodeJS connector to achieve the same things but without all the object wrappers that wrap around the data returned to achieve higher functionality can be found [here](https://github.com/taosdata/TDengine/tree/master/tests/examples/nodejs/node-example-raw.js) + +## Contributing to TDengine + +Please follow the [contribution guidelines](https://github.com/taosdata/TDengine/blob/master/CONTRIBUTING.md) to contribute to the project. + +## License + +[GNU AGPL v3.0](http://www.gnu.org/licenses/agpl-3.0.html) diff --git a/tests/connectorTest/nodejsTest/tdengine.js b/tests/connectorTest/nodejsTest/tdengine.js new file mode 100644 index 0000000000000000000000000000000000000000..047c744a4fc90c6306e851eaa529a7f9f578fe12 --- /dev/null +++ b/tests/connectorTest/nodejsTest/tdengine.js @@ -0,0 +1,4 @@ +var TDengineConnection = require('./nodetaos/connection.js') +module.exports.connect = function (connection={}) { + return new TDengineConnection(connection); +} diff --git a/tests/connectorTest/nodejsTest/test/performance.js b/tests/connectorTest/nodejsTest/test/performance.js new file mode 100644 index 0000000000000000000000000000000000000000..ea197f034435e28edd67df8d5f4b141f410fed81 --- /dev/null +++ b/tests/connectorTest/nodejsTest/test/performance.js @@ -0,0 +1,89 @@ +function memoryUsageData() { + let s = process.memoryUsage() + for (key in s) { + s[key] = (s[key]/1000000).toFixed(3) + "MB"; + } + return s; +} +console.log("initial mem usage:", memoryUsageData()); + +const { PerformanceObserver, performance } = require('perf_hooks'); +const taos = require('../tdengine'); +var conn = taos.connect({host:"127.0.0.1", user:"root", password:"taosdata", config:"/etc/taos",port:0}); +var c1 = conn.cursor(); + +// Initialize env +c1.execute('create database if not exists td_connector_test;'); +c1.execute('use td_connector_test;') +c1.execute('create table if not exists all_types (ts timestamp, _int int, _bigint bigint, _float float, _double double, _binary binary(40), _smallint smallint, _tinyint tinyint, _bool bool, _nchar nchar(40));'); +c1.execute('create table if not exists stabletest (ts timestamp, v1 int, v2 int, v3 int, v4 double) tags (id int, location binary(20));') + + +// Insertion into single table Performance Test +var dataPrepTime = 0; +var insertTime = 0; +var insertTime5000 = 0; +var avgInsert5ktime = 0; +const obs = new PerformanceObserver((items) => { + let entry = items.getEntries()[0]; + + if (entry.name == 'Data Prep') { + dataPrepTime += entry.duration; + } + else if (entry.name == 'Insert'){ + insertTime += entry.duration + } + else { + console.log(entry.name + ': ' + (entry.duration/1000).toFixed(8) + 's'); + } + performance.clearMarks(); +}); +obs.observe({ entryTypes: ['measure'] }); + +function R(l,r) { + return Math.random() * (r - l) - r; +} +function randomBool() { + if (Math.random() < 0.5) { + return true; + } + return false; +} +function insertN(n) { + for (let i = 0; i < n; i++) { + performance.mark('A3'); + let insertData = ["now + " + i + "m", // Timestamp + parseInt( R(-Math.pow(2,31) + 1 , Math.pow(2,31) - 1) ), // Int + parseInt( R(-Math.pow(2,31) + 1 , Math.pow(2,31) - 1) ), // BigInt + parseFloat( R(-3.4E38, 3.4E38) ), // Float + parseFloat( R(-1.7E308, 1.7E308) ), // Double + "\"Long Binary\"", // Binary + parseInt( R(-32767, 32767) ), // Small Int + parseInt( R(-127, 127) ), // Tiny Int + randomBool(), + "\"Nchars 一些中文字幕\""]; // Bool + let query = 'insert into td_connector_test.all_types values(' + insertData.join(',') + ' );'; + performance.mark('B3'); + performance.measure('Data Prep', 'A3', 'B3'); + performance.mark('A2'); + c1.execute(query, {quiet:true}); + performance.mark('B2'); + performance.measure('Insert', 'A2', 'B2'); + if ( i % 5000 == 4999) { + console.log("Insert # " + (i+1)); + console.log('Insert 5k records: ' + ((insertTime - insertTime5000)/1000).toFixed(8) + 's'); + insertTime5000 = insertTime; + avgInsert5ktime = (avgInsert5ktime/1000 * Math.floor(i / 5000) + insertTime5000/1000) / Math.ceil( i / 5000); + console.log('DataPrepTime So Far: ' + (dataPrepTime/1000).toFixed(8) + 's | Inserting time So Far: ' + (insertTime/1000).toFixed(8) + 's | Avg. Insert 5k time: ' + avgInsert5ktime.toFixed(8)); + + + } + } +} +performance.mark('insert 1E5') +insertN(1E5); +performance.mark('insert 1E5 2') +performance.measure('Insert With Logs', 'insert 1E5', 'insert 1E5 2'); +console.log('DataPrepTime: ' + (dataPrepTime/1000).toFixed(8) + 's | Inserting time: ' + (insertTime/1000).toFixed(8) + 's'); +dataPrepTime = 0; insertTime = 0; +//'insert into td_connector_test.all_types values (now, null,null,null,null,null,null,null,null,null);' diff --git a/tests/connectorTest/nodejsTest/test/test.js b/tests/connectorTest/nodejsTest/test/test.js new file mode 100644 index 0000000000000000000000000000000000000000..caf05955da4c960ebedc872f400c17d18be767dd --- /dev/null +++ b/tests/connectorTest/nodejsTest/test/test.js @@ -0,0 +1,170 @@ +const taos = require('../tdengine'); +var conn = taos.connect(); +var c1 = conn.cursor(); +let stime = new Date(); +let interval = 1000; + +function convertDateToTS(date) { + let tsArr = date.toISOString().split("T") + return "\"" + tsArr[0] + " " + tsArr[1].substring(0, tsArr[1].length-1) + "\""; +} +function R(l,r) { + return Math.random() * (r - l) - r; +} +function randomBool() { + if (Math.random() < 0.5) { + return true; + } + return false; +} + +// Initialize +//c1.execute('drop database td_connector_test;'); +c1.execute('create database if not exists td_connector_test;'); +c1.execute('use td_connector_test;') +c1.execute('create table if not exists all_types (ts timestamp, _int int, _bigint bigint, _float float, _double double, _binary binary(40), _smallint smallint, _tinyint tinyint, _bool bool, _nchar nchar(40));'); +c1.execute('create table if not exists stabletest (ts timestamp, v1 int, v2 int, v3 int, v4 double) tags (id int, location binary(20));') + +// Shell Test : The following uses the cursor to imitate the taos shell + +// Insert +for (let i = 0; i < 10000; i++) { + let insertData = ["now+" + i + "s", // Timestamp + parseInt( R(-Math.pow(2,31) + 1 , Math.pow(2,31) - 1) ), // Int + parseInt( R(-Math.pow(2,31) + 1 , Math.pow(2,31) - 1) ), // BigInt + parseFloat( R(-3.4E38, 3.4E38) ), // Float + parseFloat( R(-1.7E30, 1.7E30) ), // Double + "\"Long Binary\"", // Binary + parseInt( R(-32767, 32767) ), // Small Int + parseInt( R(-127, 127) ), // Tiny Int + randomBool(), + "\"Nchars\""]; // Bool + c1.execute('insert into td_connector_test.all_types values(' + insertData.join(',') + ' );', {quiet:true}); + if (i % 1000 == 0) { + console.log("Insert # " , i); + } +} + +// Select +console.log('select * from td_connector_test.all_types limit 3 offset 100;'); +c1.execute('select * from td_connector_test.all_types limit 2 offset 100;'); + +var d = c1.fetchall(); +console.log(c1.fields); +console.log(d); + +// Functions +console.log('select count(*), avg(_int), sum(_float), max(_bigint), min(_double) from td_connector_test.all_types;') +c1.execute('select count(*), avg(_int), sum(_float), max(_bigint), min(_double) from td_connector_test.all_types;'); +var d = c1.fetchall(); +console.log(c1.fields); +console.log(d); + +// Immediate Execution like the Shell + +c1.query('select count(*), stddev(_double), min(_tinyint) from all_types where _tinyint > 50 and _int < 0;', true).then(function(result){ + result.pretty(); +}) + +c1.query('select _tinyint, _bool from all_types where _tinyint > 50 and _int < 0 limit 50;', true).then(function(result){ + result.pretty(); +}) + +c1.query('select stddev(_double), stddev(_bigint), stddev(_float) from all_types;', true).then(function(result){ + result.pretty(); +}) +c1.query('select stddev(_double), stddev(_bigint), stddev(_float) from all_types interval(1m) limit 100;', true).then(function(result){ + result.pretty(); +}) + +// Binding arguments, and then using promise +var q = c1.query('select _nchar from td_connector_test.all_types where ts >= ? and _int > ? limit 100 offset 40;').bind(new Date(1231), 100) +console.log(q.query); +q.execute().then(function(r) { + r.pretty(); +}); + + +// test query null value +c1.execute("create table if not exists td_connector_test.weather(ts timestamp, temperature float, humidity int) tags(location nchar(64))"); +c1.execute("insert into t1 using weather tags('北京') values(now, 11.11, 11)"); +c1.execute("insert into t1(ts, temperature) values(now, 22.22)"); +c1.execute("insert into t1(ts, humidity) values(now, 33)"); +c1.query('select * from test.t1', true).then(function (result) { + result.pretty(); +}); + +var q = c1.query('select * from td_connector_test.weather'); +console.log(q.query); +q.execute().then(function(r) { + r.pretty(); +}); + +function sleep(sleepTime) { + for(var start = +new Date; +new Date - start <= sleepTime; ) { } +} + +sleep(10000); + +// Raw Async Testing (Callbacks, not promises) +function cb2(param, result, rowCount, rd) { + console.log('CB2 Callbacked!'); + console.log("RES *", result); + console.log("Async fetched", rowCount, " rows"); + console.log("Passed Param: ", param); + console.log("Fields ", rd.fields); + console.log("Data ", rd.data); +} +function cb1(param,result,code) { + console.log('CB1 Callbacked!'); + console.log("RES * ", result); + console.log("Status: ", code); + console.log("Passed Param ", param); + c1.fetchall_a(result, cb2, param); +} + +c1.execute_a("describe td_connector_test.all_types;", cb1, {myparam:3.141}); + +function cb4(param, result, rowCount, rd) { + console.log('CB4 Callbacked!'); + console.log("RES *", result); + console.log("Async fetched", rowCount, "rows"); + console.log("Passed Param: ", param); + console.log("Fields", rd.fields); + console.log("Data", rd.data); +} +// Without directly calling fetchall_a +var thisRes; +function cb3(param,result,code) { + console.log('CB3 Callbacked!'); + console.log("RES *", result); + console.log("Status:", code); + console.log("Passed Param", param); + thisRes = result; +} +//Test calling execute and fetchall seperately and not through callbacks +var param = c1.execute_a("describe td_connector_test.all_types;", cb3, {e:2.718}); +console.log("Passed Param outside of callback: ", param); +console.log(param); +setTimeout(function(){ + c1.fetchall_a(thisRes, cb4, param); +},100); + + +// Async through promises +var aq = c1.query('select count(*) from td_connector_test.all_types;',false); +aq.execute_a().then(function(data) { + data.pretty(); +}); + +c1.query('describe td_connector_test.stabletest').execute_a().then(function(r){ + r.pretty() +}); + +setTimeout(function(){ + c1.query('drop database td_connector_test;'); +},200); + +setTimeout(function(){ + conn.close(); +},2000); diff --git a/tests/connectorTest/nodejsTest/test/testMicroseconds.js b/tests/connectorTest/nodejsTest/test/testMicroseconds.js new file mode 100644 index 0000000000000000000000000000000000000000..cc65b3d919f92b3b4d7e0e216c6c8ac64a294d7f --- /dev/null +++ b/tests/connectorTest/nodejsTest/test/testMicroseconds.js @@ -0,0 +1,49 @@ +const taos = require('../tdengine'); +var conn = taos.connect(); +var c1 = conn.cursor(); +let stime = new Date(); +let interval = 1000; + +function convertDateToTS(date) { + let tsArr = date.toISOString().split("T") + return "\"" + tsArr[0] + " " + tsArr[1].substring(0, tsArr[1].length - 1) + "\""; +} +function R(l, r) { + return Math.random() * (r - l) - r; +} +function randomBool() { + if (Math.random() < 0.5) { + return true; + } + return false; +} + +// Initialize +//c1.execute('drop database td_connector_test;'); +const dbname = 'nodejs_test_us'; +c1.execute('create database if not exists ' + dbname + ' precision "us"'); +c1.execute('use ' + dbname) +c1.execute('create table if not exists tstest (ts timestamp, _int int);'); +c1.execute('insert into tstest values(1625801548423914, 0)'); +// Select +console.log('select * from tstest'); +c1.execute('select * from tstest'); + +var d = c1.fetchall(); +console.log(c1.fields); +let ts = d[0][0]; +console.log(ts); + +if (ts.taosTimestamp() != 1625801548423914) { + throw "microseconds not match!"; +} +if (ts.getMicroseconds() % 1000 !== 914) { + throw "micronsecond precision error"; +} +setTimeout(function () { + c1.query('drop database nodejs_us_test;'); +}, 200); + +setTimeout(function () { + conn.close(); +}, 2000); diff --git a/tests/connectorTest/nodejsTest/test/testNanoseconds.js b/tests/connectorTest/nodejsTest/test/testNanoseconds.js new file mode 100644 index 0000000000000000000000000000000000000000..85a7600b01f2c908f22e621488f22678083149ea --- /dev/null +++ b/tests/connectorTest/nodejsTest/test/testNanoseconds.js @@ -0,0 +1,49 @@ +const taos = require('../tdengine'); +var conn = taos.connect(); +var c1 = conn.cursor(); +let stime = new Date(); +let interval = 1000; + +function convertDateToTS(date) { + let tsArr = date.toISOString().split("T") + return "\"" + tsArr[0] + " " + tsArr[1].substring(0, tsArr[1].length - 1) + "\""; +} +function R(l, r) { + return Math.random() * (r - l) - r; +} +function randomBool() { + if (Math.random() < 0.5) { + return true; + } + return false; +} + +// Initialize +//c1.execute('drop database td_connector_test;'); +const dbname = 'nodejs_test_ns'; +c1.execute('create database if not exists ' + dbname + ' precision "ns"'); +c1.execute('use ' + dbname) +c1.execute('create table if not exists tstest (ts timestamp, _int int);'); +c1.execute('insert into tstest values(1625801548423914405, 0)'); +// Select +console.log('select * from tstest'); +c1.execute('select * from tstest'); + +var d = c1.fetchall(); +console.log(c1.fields); +let ts = d[0][0]; +console.log(ts); + +if (ts.taosTimestamp() != 1625801548423914405) { + throw "nanosecond not match!"; +} +if (ts.getNanoseconds() % 1000000 !== 914405) { + throw "nanosecond precision error"; +} +setTimeout(function () { + c1.query('drop database nodejs_ns_test;'); +}, 200); + +setTimeout(function () { + conn.close(); +}, 2000); diff --git a/tests/connectorTest/nodejsTest/test/testSubscribe.js b/tests/connectorTest/nodejsTest/test/testSubscribe.js new file mode 100644 index 0000000000000000000000000000000000000000..30fb3f425683f0113873534f2b67255db811edcc --- /dev/null +++ b/tests/connectorTest/nodejsTest/test/testSubscribe.js @@ -0,0 +1,16 @@ +const taos = require('../tdengine'); +var conn = taos.connect({host:"127.0.0.1", user:"root", password:"taosdata", config:"/etc/taos",port:10}); +var c1 = conn.cursor(); +let stime = new Date(); +let interval = 1000; +c1.execute('use td_connector_test'); +let sub = c1.subscribe({ + restart: true, + sql: "select AVG(_int) from td_connector_test.all_Types;", + topic: 'all_Types', + interval: 1000 +}); + +c1.consumeData(sub, (data, fields) => { + console.log(data); +}); \ No newline at end of file diff --git a/tests/connectorTest/odbcTest/nanosupport/nanoTest_odbc.py b/tests/connectorTest/odbcTest/nanosupport/nanoTest_odbc.py new file mode 100644 index 0000000000000000000000000000000000000000..e6a4bc73aef3e19bc56e817325acd62d21156d67 --- /dev/null +++ b/tests/connectorTest/odbcTest/nanosupport/nanoTest_odbc.py @@ -0,0 +1,111 @@ +import pyodbc +import argparse +import sys + +parser = argparse.ArgumentParser(description='Access TDengine via ODBC.') +parser.add_argument('--DSN', help='DSN to use') +parser.add_argument('--UID', help='UID to use') +parser.add_argument('--PWD', help='PWD to use') +parser.add_argument('--Server', help='Server to use') +parser.add_argument('-C', metavar='CONNSTR', help='Connection string to use') + +args = parser.parse_args() + +a = 'DSN=%s'%args.DSN if args.DSN else None +b = 'UID=%s'%args.UID if args.UID else None +c = 'PWD=%s'%args.PWD if args.PWD else None +d = 'Server=%s'%args.Server if args.Server else None +conn_str = ';'.join(filter(None, [a,b,c,d])) if args.DSN else None +conn_str = conn_str if conn_str else args.C +if not conn_str: + parser.print_help(file=sys.stderr) + exit() + +print('connecting: [%s]' % conn_str) +cnxn = pyodbc.connect(conn_str, autocommit=True) +cnxn.setdecoding(pyodbc.SQL_CHAR, encoding='utf-8') + +cursor = cnxn.cursor() +cursor.execute("drop database if exists db"); +cursor.close() + +cursor = cnxn.cursor() +cursor.execute("create database db"); +cursor.close() + +cursor = cnxn.cursor() +cursor.execute("create table db.mt (ts timestamp, b bool, v1 tinyint, v2 smallint, v4 int, v8 bigint, f4 float, f8 double, bin binary(10), blob nchar(10))"); +cursor.close() + +cursor = cnxn.cursor() +cursor.execute("insert into db.mt values('2020-10-13 06:44:00.123', 1, 127, 32767, 2147483647, 32769, 123.456, 789.987, 'hello', 'helloworld')") +cursor.close() + +cursor = cnxn.cursor() +cursor.execute("insert into db.mt values(?,?,?,?,?,?,?,?,?,?)", "2020-10-13 07:06:00.234", 0, 127, 32767, 32768, 32769, 123.456, 789.987, "hel后lo".encode('utf-8'), "wo哈rlxd129") +##cursor.execute("insert into db.mt values(?,?,?,?,?,?,?,?,?,?)", 1502535178128, 9223372036854775807, 127, 32767, 32768, 32769, 123.456, 789.987, "hel后lo".encode('utf-8'), "wo哈rlxd123"); +cursor.close() + +cursor = cnxn.cursor() +cursor.execute(""" +INSERT INTO db.mt (ts,b,v1,v2,v4,v8,f4,f8,bin,blob) values (?,?,?,?,?,?,?,?,?,?) +""", +"2020-12-12 00:00:00", +'true', +'-127', +'-32767', +'-2147483647', +'-9223372036854775807', +'-1.23e10', +'-11.23e6', +'abcdefghij'.encode('utf-8'), +"人啊大发测试及abc") +cursor.close() + +cursor = cnxn.cursor() +cursor.execute("drop database if exists db"); +cursor.close() + +cursor = cnxn.cursor() +cursor.execute("create database db"); +cursor.close() + +cursor = cnxn.cursor() +cursor.execute("create table db.t (ts timestamp, b bool, v1 tinyint, v2 smallint, v4 int, v8 bigint, f4 float, f8 double, bin binary(4), blob nchar(4))"); +cursor.close() + +cursor = cnxn.cursor() +cursor.execute("insert into db.t values('2020-10-13 06:44:00', 1, 127, 32767, 32768, 32769, 123.456, 789.987, 'hell', 'w我你z')") +cursor.close() + +cursor = cnxn.cursor() +cursor.execute("create table db.v (ts timestamp, v1 tinyint, v2 smallint, name nchar(10), ts2 timestamp)") +cursor.close() + +params = [ ('2020-10-16 00:00:00.123', 19, '2111-01-02 01:02:03.123'), + ('2020-10-16 00:00:01', 41, '2111-01-02 01:02:03.423'), + ('2020-10-16 00:00:02', 57, '2111-01-02 01:02:03.153'), + ('2020-10-16 00:00:03.009', 26, '2111-01-02 01:02:03.623') ] +cursor = cnxn.cursor() +cursor.fast_executemany = True +print('py:...................') +cursor.executemany("insert into db.v (ts, v1, ts2) values (?, ?, ?)", params) +print('py:...................') +cursor.close() + +## cursor = cnxn.cursor() +## cursor.execute("SELECT * from db.v where v1 > ?", 4) +## row = cursor.fetchone() +## while row: +## print(row) +## row = cursor.fetchone() +## cursor.close() +## +## cursor = cnxn.cursor() +## cursor.execute("SELECT * from db.v where v1 > ?", '5') +## row = cursor.fetchone() +## while row: +## print(row) +## row = cursor.fetchone() +## cursor.close() + diff --git a/tests/connectorTest/odbcTest/nanosupport/odbc.go b/tests/connectorTest/odbcTest/nanosupport/odbc.go new file mode 100644 index 0000000000000000000000000000000000000000..4d9c760c4e87a4a899051edc74692ecca8a19d15 --- /dev/null +++ b/tests/connectorTest/odbcTest/nanosupport/odbc.go @@ -0,0 +1,84 @@ +package main + +import ( + "context" + "database/sql" + "flag" + "log" + "os" + "os/signal" + "time" + _ "github.com/alexbrainman/odbc" +) + +var pool *sql.DB // Database connection pool. + +func main() { + id := flag.Int64("id", 32768, "person ID to find") + dsn := flag.String("dsn", os.Getenv("DSN"), "connection data source name") + flag.Parse() + + if len(*dsn) == 0 { + log.Fatal("missing dsn flag") + } + if *id == 0 { + log.Fatal("missing person ID") + } + var err error + + // Opening a driver typically will not attempt to connect to the database. + pool, err = sql.Open("odbc", *dsn) + if err != nil { + // This will not be a connection error, but a DSN parse error or + // another initialization error. + log.Fatal("unable to use data source name", err) + } + defer pool.Close() + + pool.SetConnMaxLifetime(0) + pool.SetMaxIdleConns(3) + pool.SetMaxOpenConns(3) + + ctx, stop := context.WithCancel(context.Background()) + defer stop() + + appSignal := make(chan os.Signal, 3) + signal.Notify(appSignal, os.Interrupt) + + go func() { + select { + case <-appSignal: + stop() + } + }() + + Ping(ctx) + + Query(ctx, *id) +} + +// Ping the database to verify DSN provided by the user is valid and the +// server accessible. If the ping fails exit the program with an error. +func Ping(ctx context.Context) { + ctx, cancel := context.WithTimeout(ctx, 1*time.Second) + defer cancel() + + if err := pool.PingContext(ctx); err != nil { + log.Fatalf("unable to connect to database: %v", err) + } +} + +// Query the database for the information requested and prints the results. +// If the query fails exit the program with an error. +func Query(ctx context.Context, id int64) { + ctx, cancel := context.WithTimeout(ctx, 5*time.Second) + defer cancel() + + var name string + err := pool.QueryRowContext(ctx, "select name from m.t").Scan(&name) + if err != nil { + log.Fatal("unable to execute search query", err) + } + log.Println("name=", name) +} + diff --git a/tests/connectorTest/odbcTest/nanosupport/odbc.py b/tests/connectorTest/odbcTest/nanosupport/odbc.py new file mode 100644 index 0000000000000000000000000000000000000000..cee0cf1a13f6360790de368637e2b6a05de3564f --- /dev/null +++ b/tests/connectorTest/odbcTest/nanosupport/odbc.py @@ -0,0 +1,115 @@ +import pyodbc +import argparse +import sys + +parser = argparse.ArgumentParser(description='Access TDengine via ODBC.') +parser.add_argument('--DSN', help='DSN to use') +parser.add_argument('--UID', help='UID to use') +parser.add_argument('--PWD', help='PWD to use') +parser.add_argument('--Server', help='Server to use') +parser.add_argument('-C', metavar='CONNSTR', help='Connection string to use') + +args = parser.parse_args() + +a = 'DSN=%s'%args.DSN if args.DSN else None +b = 'UID=%s'%args.UID if args.UID else None +c = 'PWD=%s'%args.PWD if args.PWD else None +d = 'Server=%s'%args.Server if args.Server else None +conn_str = ';'.join(filter(None, [a,b,c,d])) if args.DSN else None +conn_str = conn_str if conn_str else args.C +if not conn_str: + parser.print_help(file=sys.stderr) + exit() + +print('connecting: [%s]' % conn_str) +cnxn = pyodbc.connect(conn_str, autocommit=True) +cnxn.setdecoding(pyodbc.SQL_CHAR, encoding='utf-8') + +cursor = cnxn.cursor() +cursor.execute("drop database if exists db"); +cursor.close() + +cursor = cnxn.cursor() +cursor.execute("create database db"); +cursor.close() + +cursor = cnxn.cursor() +cursor.execute("create table db.mt (ts timestamp, b bool, v1 tinyint, v2 smallint, v4 int, v8 bigint, f4 float, f8 double, bin binary(10), blob nchar(10))"); +cursor.close() + +cursor = cnxn.cursor() +cursor.execute("insert into db.mt values('2020-10-13 06:44:00.123', 1, 127, 32767, 2147483647, 32769, 123.456, 789.987, 'hello', 'helloworld')") +cursor.close() + +cursor = cnxn.cursor() +cursor.execute("insert into db.mt values(?,?,?,?,?,?,?,?,?,?)", "2020-10-13 07:06:00.234", 0, 127, 32767, 32768, 32769, 123.456, 789.987, "hel后lo".encode('utf-8'), "wo哈rlxd129") +##cursor.execute("insert into db.mt values(?,?,?,?,?,?,?,?,?,?)", 1502535178128, 9223372036854775807, 127, 32767, 32768, 32769, 123.456, 789.987, "hel后lo".encode('utf-8'), "wo哈rlxd123"); +cursor.close() + +cursor = cnxn.cursor() +cursor.execute(""" +INSERT INTO db.mt (ts,b,v1,v2,v4,v8,f4,f8,bin,blob) values (?,?,?,?,?,?,?,?,?,?) +""", +"2020-12-12 00:00:00", +'true', +'-127', +'-32767', +'-2147483647', +'-9223372036854775807', +'-1.23e10', +'-11.23e6', +'abcdefghij'.encode('utf-8'), +"人啊大发测试及abc") +cursor.close() + +cursor = cnxn.cursor() +cursor.execute("drop database if exists db"); +cursor.close() + +cursor = cnxn.cursor() +cursor.execute("create database db"); +cursor.close() + +cursor = cnxn.cursor() +cursor.execute("create table db.t (ts timestamp, b bool, v1 tinyint, v2 smallint, v4 int, v8 bigint, f4 float, f8 double, bin binary(4), blob nchar(4))"); +cursor.close() + +cursor = cnxn.cursor() +cursor.execute("insert into db.t values('2020-10-13 06:44:00', 1, 127, 32767, 32768, 32769, 123.456, 789.987, 'hell', 'w我你z')") +cursor.close() + +cursor = cnxn.cursor() +cursor.execute("create table db.v (ts timestamp, v1 tinyint, v2 smallint, name nchar(10), ts2 timestamp)") +cursor.close() + +cursor = cnxn.cursor() +cursor.execute("select * from db.v") +cursor.close() + +params = [ ('2020-10-16 00:00:00.123', 19, '2111-01-02 01:02:03.123'), + ('2020-10-16 00:00:01', 41, '2111-01-02 01:02:03.423'), + ('2020-10-16 00:00:02', 57, '2111-01-02 01:02:03.153'), + ('2020-10-16 00:00:03.009', 26, '2111-01-02 01:02:03.623') ] +cursor = cnxn.cursor() +cursor.fast_executemany = True +print('py:...................') +cursor.executemany("insert into db.v (ts, v1, ts2) values (?, ?, ?)", params) +print('py:...................') +cursor.close() + +## cursor = cnxn.cursor() +## cursor.execute("SELECT * from db.v where v1 > ?", 4) +## row = cursor.fetchone() +## while row: +## print(row) +## row = cursor.fetchone() +## cursor.close() +## +## cursor = cnxn.cursor() +## cursor.execute("SELECT * from db.v where v1 > ?", '5') +## row = cursor.fetchone() +## while row: +## print(row) +## row = cursor.fetchone() +## cursor.close() + diff --git a/tests/examples/C#/taosdemo/Dockerfile b/tests/examples/C#/taosdemo/Dockerfile new file mode 100644 index 0000000000000000000000000000000000000000..4eefc6c75248b1e1e1d6daf305386cca5b11e606 --- /dev/null +++ b/tests/examples/C#/taosdemo/Dockerfile @@ -0,0 +1,24 @@ +FROM tdengine/tdengine-beta:latest + +ENV DEBIAN_FRONTEND=noninteractive +ARG MIRROR=archive.ubuntu.com +RUN sed -Ei 's/\w+.ubuntu.com/'${MIRROR}'/' /etc/apt/sources.list && apt update && apt install mono-devel -y +RUN apt-get install wget -y \ + && wget https://packages.microsoft.com/config/ubuntu/18.04/packages-microsoft-prod.deb -O packages-microsoft-prod.deb \ + && dpkg -i packages-microsoft-prod.deb \ + && rm packages-microsoft-prod.deb \ + && apt-get update && apt-get install -y dotnet-sdk-5.0 +COPY ./*.cs *.csproj /tmp/ +WORKDIR /tmp/ +RUN dotnet build -c Release && cp bin/Release/net5.0/taosdemo bin/Release/net5.0/taosdemo.* /usr/local/bin/ && rm -rf /tmp/* + +FROM tdengine/tdengine-beta:latest + +ENV DEBIAN_FRONTEND=noninteractive +RUN apt-get update && apt-get install wget -y \ + && wget https://packages.microsoft.com/config/ubuntu/18.04/packages-microsoft-prod.deb -O packages-microsoft-prod.deb \ + && dpkg -i packages-microsoft-prod.deb \ + && rm packages-microsoft-prod.deb \ + && apt-get update && apt-get install -y dotnet-runtime-5.0 +COPY --from=0 /usr/local/bin/taosdemo* /usr/local/bin/ +CMD ["/usr/local/bin/taosdemo"] diff --git a/tests/examples/C#/taosdemo/README.md b/tests/examples/C#/taosdemo/README.md index 2d125fb140076c46c9abc4c60db330b28b494802..3cba3529bf513e2bf3d4ab0c169e7f3d03b2e6a8 100644 --- a/tests/examples/C#/taosdemo/README.md +++ b/tests/examples/C#/taosdemo/README.md @@ -1,13 +1,41 @@ +# C# Taosdemo + +## For Mono + install build environment -=== + +```sh yum/apt install mono-complete +``` -build C# version taosdemo -=== +build C# version taosdemo. + +```sh mcs -out:taosdemo *.cs +./taosdemo --help +``` + +## For DotNet + +install dotnet environment. + +```sh +wget https://packages.microsoft.com/config/ubuntu/18.04/packages-microsoft-prod.deb -O packages-microsoft-prod.deb \ + && dpkg -i packages-microsoft-prod.deb \ + && rm packages-microsoft-prod.deb \ + && apt-get update && apt-get install -y dotnet-sdk-5.0 +``` + +Build DotNet version taosdemo. + +```sh +dotnet build -c Release +./bin/Release/net5.0/taosdemo --help +``` + +## Usage -run C# version taosdemo -=== +``` Usage: mono taosdemo.exe [OPTION...] --help Show usage. @@ -34,3 +62,4 @@ Usage: mono taosdemo.exe [OPTION...] -v Print verbose output -g Print debug output -y Skip read key for continous test, default is not skip +``` diff --git a/tests/examples/C#/taosdemo/taosdemo.csproj b/tests/examples/C#/taosdemo/taosdemo.csproj new file mode 100644 index 0000000000000000000000000000000000000000..15ec155d45e34aae7276fe596c177619dfddd3e9 --- /dev/null +++ b/tests/examples/C#/taosdemo/taosdemo.csproj @@ -0,0 +1,9 @@ + + + + Exe + net5.0 + false + + + diff --git a/tests/examples/JDBC/JDBCDemo/pom.xml b/tests/examples/JDBC/JDBCDemo/pom.xml index fed00c147b87621c70d60ea206b06f1b0f3e8d8f..8cf0356721f8ffd568e87fa4a77c86eb0f90a62b 100644 --- a/tests/examples/JDBC/JDBCDemo/pom.xml +++ b/tests/examples/JDBC/JDBCDemo/pom.xml @@ -17,7 +17,7 @@ com.taosdata.jdbc taos-jdbcdriver - 2.0.31 + 2.0.34 diff --git a/tests/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/JdbcDemo.java b/tests/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/JdbcDemo.java index d4ea5f919d2882e4f82b817380172eff20d7c611..5bc23403087578c0791b0a5e6fca74a47aad8184 100644 --- a/tests/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/JdbcDemo.java +++ b/tests/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/JdbcDemo.java @@ -7,6 +7,9 @@ public class JdbcDemo { private static String host; private static final String dbName = "test"; private static final String tbName = "weather"; + private static final String user = "root"; + private static final String password = "taosdata"; + private Connection connection; public static void main(String[] args) { @@ -30,10 +33,9 @@ public class JdbcDemo { } private void init() { - final String url = "jdbc:TAOS://" + host + ":6030/?user=root&password=taosdata"; + final String url = "jdbc:TAOS://" + host + ":6030/?user=" + user + "&password=" + password; // get connection try { - Class.forName("com.taosdata.jdbc.TSDBDriver"); Properties properties = new Properties(); properties.setProperty("charset", "UTF-8"); properties.setProperty("locale", "en_US.UTF-8"); @@ -42,8 +44,7 @@ public class JdbcDemo { connection = DriverManager.getConnection(url, properties); if (connection != null) System.out.println("[ OK ] Connection established."); - } catch (ClassNotFoundException | SQLException e) { - System.out.println("[ ERROR! ] Connection establish failed."); + } catch (SQLException e) { e.printStackTrace(); } } @@ -74,7 +75,7 @@ public class JdbcDemo { } private void select() { - final String sql = "select * from "+ dbName + "." + tbName; + final String sql = "select * from " + dbName + "." + tbName; executeQuery(sql); } @@ -89,8 +90,6 @@ public class JdbcDemo { } } - /************************************************************************/ - private void executeQuery(String sql) { long start = System.currentTimeMillis(); try (Statement statement = connection.createStatement()) { @@ -117,7 +116,6 @@ public class JdbcDemo { } } - private void printSql(String sql, boolean succeed, long cost) { System.out.println("[ " + (succeed ? "OK" : "ERROR!") + " ] time cost: " + cost + " ms, execute statement ====> " + sql); } @@ -132,7 +130,6 @@ public class JdbcDemo { long end = System.currentTimeMillis(); printSql(sql, false, (end - start)); e.printStackTrace(); - } } @@ -141,5 +138,4 @@ public class JdbcDemo { System.exit(0); } - -} \ No newline at end of file +} diff --git a/tests/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/JdbcRestfulDemo.java b/tests/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/JdbcRestfulDemo.java index 5bf980f6d84e53438573812aa9f07d8d463f08c3..d89476b8ca718dab24202e2320e842366533a763 100644 --- a/tests/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/JdbcRestfulDemo.java +++ b/tests/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/JdbcRestfulDemo.java @@ -4,14 +4,15 @@ import java.sql.*; import java.util.Properties; public class JdbcRestfulDemo { - private static final String host = "127.0.0.1"; + private static final String host = "localhost"; + private static final String dbname = "test"; + private static final String user = "root"; + private static final String password = "taosdata"; public static void main(String[] args) { try { - // load JDBC-restful driver - Class.forName("com.taosdata.jdbc.rs.RestfulDriver"); // use port 6041 in url when use JDBC-restful - String url = "jdbc:TAOS-RS://" + host + ":6041/?user=root&password=taosdata"; + String url = "jdbc:TAOS-RS://" + host + ":6041/?user=" + user + "&password=" + password; Properties properties = new Properties(); properties.setProperty("charset", "UTF-8"); @@ -21,12 +22,12 @@ public class JdbcRestfulDemo { Connection conn = DriverManager.getConnection(url, properties); Statement stmt = conn.createStatement(); - stmt.execute("drop database if exists restful_test"); - stmt.execute("create database if not exists restful_test"); - stmt.execute("use restful_test"); - stmt.execute("create table restful_test.weather(ts timestamp, temperature float) tags(location nchar(64))"); - stmt.executeUpdate("insert into t1 using restful_test.weather tags('北京') values(now, 18.2)"); - ResultSet rs = stmt.executeQuery("select * from restful_test.weather"); + stmt.execute("drop database if exists " + dbname); + stmt.execute("create database if not exists " + dbname); + stmt.execute("use " + dbname); + stmt.execute("create table " + dbname + ".weather(ts timestamp, temperature float) tags(location nchar(64))"); + stmt.executeUpdate("insert into t1 using " + dbname + ".weather tags('北京') values(now, 18.2)"); + ResultSet rs = stmt.executeQuery("select * from " + dbname + ".weather"); ResultSetMetaData meta = rs.getMetaData(); while (rs.next()) { for (int i = 1; i <= meta.getColumnCount(); i++) { @@ -38,8 +39,6 @@ public class JdbcRestfulDemo { rs.close(); stmt.close(); conn.close(); - } catch (ClassNotFoundException e) { - e.printStackTrace(); } catch (SQLException e) { e.printStackTrace(); } diff --git a/tests/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/SubscribeDemo.java b/tests/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/SubscribeDemo.java index def4c649027034028d222bfedb71e37d82b99380..4c499b0b3abb518b48b222eca9bbbcb388bd2008 100644 --- a/tests/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/SubscribeDemo.java +++ b/tests/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/SubscribeDemo.java @@ -34,9 +34,8 @@ public class SubscribeDemo { System.out.println(usage); return; } - /*********************************************************************************************/ + try { - Class.forName("com.taosdata.jdbc.TSDBDriver"); Properties properties = new Properties(); properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8"); properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8"); diff --git a/tests/examples/JDBC/springbootdemo/pom.xml b/tests/examples/JDBC/springbootdemo/pom.xml index 6c83718896cc2e5716f599ba08212d3dc8292133..9126813b67e71691692109920f891a6fb4cc5ab5 100644 --- a/tests/examples/JDBC/springbootdemo/pom.xml +++ b/tests/examples/JDBC/springbootdemo/pom.xml @@ -60,12 +60,15 @@ + + org.springframework.boot + spring-boot-starter-aop + + com.taosdata.jdbc taos-jdbcdriver - 2.0.28 - - + 2.0.34 diff --git a/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/example/springbootdemo/SpringbootdemoApplication.java b/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/example/springbootdemo/SpringbootdemoApplication.java index fa10f3b0929e4c25c1379f489f73fc12ad9c1917..53edaa5796cccc7e4a4f274048c83a9ca7bbc7bb 100644 --- a/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/example/springbootdemo/SpringbootdemoApplication.java +++ b/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/example/springbootdemo/SpringbootdemoApplication.java @@ -4,7 +4,7 @@ import org.mybatis.spring.annotation.MapperScan; import org.springframework.boot.SpringApplication; import org.springframework.boot.autoconfigure.SpringBootApplication; -@MapperScan(basePackages = {"com.taosdata.example.springbootdemo.dao"}) +@MapperScan(basePackages = {"com.taosdata.example.springbootdemo"}) @SpringBootApplication public class SpringbootdemoApplication { diff --git a/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/example/springbootdemo/controller/WeatherController.java b/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/example/springbootdemo/controller/WeatherController.java index cf14f5d84ace6348f38709ac3d3668ee8d2a0797..ed720fe6c02dd3a7eba6e645ea1e76d704c04d0c 100644 --- a/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/example/springbootdemo/controller/WeatherController.java +++ b/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/example/springbootdemo/controller/WeatherController.java @@ -15,35 +15,21 @@ public class WeatherController { @Autowired private WeatherService weatherService; - /** - * create database and table - * - * @return - */ + @GetMapping("/lastOne") + public Weather lastOne() { + return weatherService.lastOne(); + } + @GetMapping("/init") public int init() { return weatherService.init(); } - /** - * Pagination Query - * - * @param limit - * @param offset - * @return - */ @GetMapping("/{limit}/{offset}") public List queryWeather(@PathVariable Long limit, @PathVariable Long offset) { return weatherService.query(limit, offset); } - /** - * upload single weather info - * - * @param temperature - * @param humidity - * @return - */ @PostMapping("/{temperature}/{humidity}") public int saveWeather(@PathVariable float temperature, @PathVariable float humidity) { return weatherService.save(temperature, humidity); diff --git a/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/example/springbootdemo/dao/WeatherMapper.java b/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/example/springbootdemo/dao/WeatherMapper.java index ad6733558a9d548be196cf8c9c0c63dc96227b39..d9202b45b4cc3dddf8e5a082ac339c1f88d4ec01 100644 --- a/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/example/springbootdemo/dao/WeatherMapper.java +++ b/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/example/springbootdemo/dao/WeatherMapper.java @@ -8,6 +8,8 @@ import java.util.Map; public interface WeatherMapper { + Map lastOne(); + void dropDB(); void createDB(); diff --git a/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/example/springbootdemo/dao/WeatherMapper.xml b/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/example/springbootdemo/dao/WeatherMapper.xml index 2d3e0540650f35c1018992795ac33fb6cb7c4837..91938ca24e3cf9c3e0f2895cf40f214d484c55d5 100644 --- a/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/example/springbootdemo/dao/WeatherMapper.xml +++ b/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/example/springbootdemo/dao/WeatherMapper.xml @@ -9,20 +9,48 @@ + + - drop database if exists test + drop + database if exists test - create database if not exists test + create + database if not exists test - create table if not exists test.weather(ts timestamp, temperature float, humidity float) tags(location nchar(64), groupId int) + create table if not exists test.weather + ( + ts + timestamp, + temperature + float, + humidity + float, + note + binary + ( + 64 + )) tags + ( + location nchar + ( + 64 + ), groupId int) - create table if not exists test.t#{groupId} using test.weather tags(#{location}, #{groupId}) + create table if not exists test.t#{groupId} using test.weather tags + ( + #{location}, + #{groupId} + ) - insert into test.t#{groupId} (ts, temperature, humidity) values (#{ts}, ${temperature}, ${humidity}) + insert into test.t#{groupId} (ts, temperature, humidity, note) + values (#{ts}, ${temperature}, ${humidity}, #{note}) - - - + + + \ No newline at end of file diff --git a/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/example/springbootdemo/domain/Weather.java b/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/example/springbootdemo/domain/Weather.java index c11b9a6f50655788d1e35eb9607a101d2d06c872..e4238127bd32b0f6ad21a514f3a1f07f6069b6d5 100644 --- a/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/example/springbootdemo/domain/Weather.java +++ b/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/example/springbootdemo/domain/Weather.java @@ -11,6 +11,7 @@ public class Weather { private Float temperature; private Float humidity; private String location; + private String note; private int groupId; public Weather() { @@ -61,4 +62,12 @@ public class Weather { public void setGroupId(int groupId) { this.groupId = groupId; } + + public String getNote() { + return note; + } + + public void setNote(String note) { + this.note = note; + } } diff --git a/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/example/springbootdemo/service/WeatherService.java b/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/example/springbootdemo/service/WeatherService.java index 26d09c7d128015739cdb0a87956affa4910b4b4e..2264b200afc3e0c2b7dd8e496e607649f940581d 100644 --- a/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/example/springbootdemo/service/WeatherService.java +++ b/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/example/springbootdemo/service/WeatherService.java @@ -29,6 +29,7 @@ public class WeatherService { Weather weather = new Weather(new Timestamp(ts + (thirtySec * i)), 30 * random.nextFloat(), random.nextInt(100)); weather.setLocation(locations[random.nextInt(locations.length)]); weather.setGroupId(i % locations.length); + weather.setNote("note-" + i); weatherMapper.createTable(weather); count += weatherMapper.insert(weather); } @@ -58,4 +59,21 @@ public class WeatherService { public List avg() { return weatherMapper.avg(); } + + public Weather lastOne() { + Map result = weatherMapper.lastOne(); + + long ts = (long) result.get("ts"); + float temperature = (float) result.get("temperature"); + float humidity = (float) result.get("humidity"); + String note = (String) result.get("note"); + int groupId = (int) result.get("groupid"); + String location = (String) result.get("location"); + + Weather weather = new Weather(new Timestamp(ts), temperature, humidity); + weather.setNote(note); + weather.setGroupId(groupId); + weather.setLocation(location); + return weather; + } } diff --git a/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/example/springbootdemo/util/TaosAspect.java b/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/example/springbootdemo/util/TaosAspect.java new file mode 100644 index 0000000000000000000000000000000000000000..80dad1bd7d669ba6b912c7e5fa816c29b7e37c87 --- /dev/null +++ b/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/example/springbootdemo/util/TaosAspect.java @@ -0,0 +1,36 @@ +package com.taosdata.example.springbootdemo.util; + +import org.aspectj.lang.ProceedingJoinPoint; +import org.aspectj.lang.annotation.Around; +import org.aspectj.lang.annotation.Aspect; +import org.springframework.stereotype.Component; + +import java.sql.Timestamp; +import java.util.Map; + +@Aspect +@Component +public class TaosAspect { + + @Around("execution(java.util.Map com.taosdata.example.springbootdemo.dao.*.*(..))") + public Object handleType(ProceedingJoinPoint joinPoint) { + Map result = null; + try { + result = (Map) joinPoint.proceed(); + for (String key : result.keySet()) { + Object obj = result.get(key); + if (obj instanceof byte[]) { + obj = new String((byte[]) obj); + result.put(key, obj); + } + if (obj instanceof Timestamp) { + obj = ((Timestamp) obj).getTime(); + result.put(key, obj); + } + } + } catch (Throwable e) { + e.printStackTrace(); + } + return result; + } +} diff --git a/tests/examples/JDBC/springbootdemo/src/main/resources/application.properties b/tests/examples/JDBC/springbootdemo/src/main/resources/application.properties index 4d7e64d10576388827502a459df9e68da2721dbb..06daa81bbb06450d99ab3f6e640c9795c0ad5d2e 100644 --- a/tests/examples/JDBC/springbootdemo/src/main/resources/application.properties +++ b/tests/examples/JDBC/springbootdemo/src/main/resources/application.properties @@ -1,22 +1,20 @@ # datasource config - JDBC-JNI #spring.datasource.driver-class-name=com.taosdata.jdbc.TSDBDriver -#spring.datasource.url=jdbc:TAOS://127.0.0.1:6030/test?timezone=UTC-8&charset=UTF-8&locale=en_US.UTF-8 +#spring.datasource.url=jdbc:TAOS://localhost:6030/?timezone=UTC-8&charset=UTF-8&locale=en_US.UTF-8 #spring.datasource.username=root #spring.datasource.password=taosdata - # datasource config - JDBC-RESTful spring.datasource.driver-class-name=com.taosdata.jdbc.rs.RestfulDriver -spring.datasource.url=jdbc:TAOS-RS://master:6041/test?timezone=UTC-8&charset=UTF-8&locale=en_US.UTF-8 +spring.datasource.url=jdbc:TAOS-RS://localhsot:6041/test?timezone=UTC-8&charset=UTF-8&locale=en_US.UTF-8 spring.datasource.username=root spring.datasource.password=taosdata - spring.datasource.druid.initial-size=5 spring.datasource.druid.min-idle=5 spring.datasource.druid.max-active=5 spring.datasource.druid.max-wait=30000 spring.datasource.druid.validation-query=select server_status(); - +spring.aop.auto=true +spring.aop.proxy-target-class=true #mybatis mybatis.mapper-locations=classpath:mapper/*.xml - logging.level.com.taosdata.jdbc.springbootdemo.dao=debug diff --git a/tests/examples/c/apitest.c b/tests/examples/c/apitest.c index 01169715f3e8b5b9d6e212b4b317ecca5fa4dbcd..28e8d3f012dd4d6b4b12f2a01132b00a2ae7225c 100644 --- a/tests/examples/c/apitest.c +++ b/tests/examples/c/apitest.c @@ -1,12 +1,16 @@ // sample code to verify all TDengine API // to compile: gcc -o apitest apitest.c -ltaos +#include "taoserror.h" +#include "cJSON.h" + #include #include #include #include #include + static void prepare_data(TAOS* taos) { TAOS_RES *result; result = taos_query(taos, "drop database if exists test;"); @@ -1014,6 +1018,919 @@ int32_t verify_schema_less(TAOS* taos) { return (code); } +void verify_telnet_insert(TAOS* taos) { + TAOS_RES *result; + + result = taos_query(taos, "drop database if exists db;"); + taos_free_result(result); + usleep(100000); + result = taos_query(taos, "create database db precision 'ms';"); + taos_free_result(result); + usleep(100000); + + (void)taos_select_db(taos, "db"); + int32_t code = 0; + + /* metric */ + char* lines0[] = { + "stb0_0 1626006833639000000ns 4i8 host=\"host0\",interface=\"eth0\"", + "stb0_1 1626006833639000000ns 4i8 host=\"host0\",interface=\"eth0\"", + "stb0_2 1626006833639000000ns 4i8 host=\"host0\",interface=\"eth0\"", + }; + code = taos_insert_telnet_lines(taos, lines0, 3); + if (code) { + printf("lines0 code: %d, %s.\n", code, tstrerror(code)); + } + + /* timestamp */ + char* lines1[] = { + "stb1 1626006833s 1i8 host=\"host0\"", + "stb1 1626006833639000000ns 2i8 host=\"host0\"", + "stb1 1626006833640000us 3i8 host=\"host0\"", + "stb1 1626006833641123 4i8 host=\"host0\"", + "stb1 1626006833651ms 5i8 host=\"host0\"", + "stb1 0 6i8 host=\"host0\"", + }; + code = taos_insert_telnet_lines(taos, lines1, 6); + if (code) { + printf("lines1 code: %d, %s.\n", code, tstrerror(code)); + } + + /* metric value */ + //tinyin + char* lines2_0[] = { + "stb2_0 1626006833651ms -127i8 host=\"host0\"", + "stb2_0 1626006833652ms 127i8 host=\"host0\"" + }; + code = taos_insert_telnet_lines(taos, lines2_0, 2); + if (code) { + printf("lines2_0 code: %d, %s.\n", code, tstrerror(code)); + } + + //smallint + char* lines2_1[] = { + "stb2_1 1626006833651ms -32767i16 host=\"host0\"", + "stb2_1 1626006833652ms 32767i16 host=\"host0\"" + }; + code = taos_insert_telnet_lines(taos, lines2_1, 2); + if (code) { + printf("lines2_1 code: %d, %s.\n", code, tstrerror(code)); + } + + //int + char* lines2_2[] = { + "stb2_2 1626006833651ms -2147483647i32 host=\"host0\"", + "stb2_2 1626006833652ms 2147483647i32 host=\"host0\"" + }; + code = taos_insert_telnet_lines(taos, lines2_2, 2); + if (code) { + printf("lines2_2 code: %d, %s.\n", code, tstrerror(code)); + } + + //bigint + char* lines2_3[] = { + "stb2_3 1626006833651ms -9223372036854775807i64 host=\"host0\"", + "stb2_3 1626006833652ms 9223372036854775807i64 host=\"host0\"" + }; + code = taos_insert_telnet_lines(taos, lines2_3, 2); + if (code) { + printf("lines2_3 code: %d, %s.\n", code, tstrerror(code)); + } + + //float + char* lines2_4[] = { + "stb2_4 1626006833610ms 3f32 host=\"host0\"", + "stb2_4 1626006833620ms -3f32 host=\"host0\"", + "stb2_4 1626006833630ms 3.4f32 host=\"host0\"", + "stb2_4 1626006833640ms -3.4f32 host=\"host0\"", + "stb2_4 1626006833650ms 3.4E10f32 host=\"host0\"", + "stb2_4 1626006833660ms -3.4e10f32 host=\"host0\"", + "stb2_4 1626006833670ms 3.4E+2f32 host=\"host0\"", + "stb2_4 1626006833680ms -3.4e-2f32 host=\"host0\"", + "stb2_4 1626006833690ms 3.15 host=\"host0\"", + "stb2_4 1626006833700ms 3.4E38f32 host=\"host0\"", + "stb2_4 1626006833710ms -3.4E38f32 host=\"host0\"" + }; + code = taos_insert_telnet_lines(taos, lines2_4, 11); + if (code) { + printf("lines2_4 code: %d, %s.\n", code, tstrerror(code)); + } + + //double + char* lines2_5[] = { + "stb2_5 1626006833610ms 3f64 host=\"host0\"", + "stb2_5 1626006833620ms -3f64 host=\"host0\"", + "stb2_5 1626006833630ms 3.4f64 host=\"host0\"", + "stb2_5 1626006833640ms -3.4f64 host=\"host0\"", + "stb2_5 1626006833650ms 3.4E10f64 host=\"host0\"", + "stb2_5 1626006833660ms -3.4e10f64 host=\"host0\"", + "stb2_5 1626006833670ms 3.4E+2f64 host=\"host0\"", + "stb2_5 1626006833680ms -3.4e-2f64 host=\"host0\"", + "stb2_5 1626006833690ms 1.7E308f64 host=\"host0\"", + "stb2_5 1626006833700ms -1.7E308f64 host=\"host0\"" + }; + code = taos_insert_telnet_lines(taos, lines2_5, 10); + if (code) { + printf("lines2_5 code: %d, %s.\n", code, tstrerror(code)); + } + + //bool + char* lines2_6[] = { + "stb2_6 1626006833610ms t host=\"host0\"", + "stb2_6 1626006833620ms T host=\"host0\"", + "stb2_6 1626006833630ms true host=\"host0\"", + "stb2_6 1626006833640ms True host=\"host0\"", + "stb2_6 1626006833650ms TRUE host=\"host0\"", + "stb2_6 1626006833660ms f host=\"host0\"", + "stb2_6 1626006833670ms F host=\"host0\"", + "stb2_6 1626006833680ms false host=\"host0\"", + "stb2_6 1626006833690ms False host=\"host0\"", + "stb2_6 1626006833700ms FALSE host=\"host0\"" + }; + code = taos_insert_telnet_lines(taos, lines2_6, 10); + if (code) { + printf("lines2_6 code: %d, %s.\n", code, tstrerror(code)); + } + + //binary + char* lines2_7[] = { + "stb2_7 1626006833610ms \"binary_val.!@#$%^&*\" host=\"host0\"", + "stb2_7 1626006833620ms \"binary_val.:;,./?|+-=\" host=\"host0\"", + "stb2_7 1626006833630ms \"binary_val.()[]{}<>\" host=\"host0\"" + }; + code = taos_insert_telnet_lines(taos, lines2_7, 3); + if (code) { + printf("lines2_7 code: %d, %s.\n", code, tstrerror(code)); + } + + //nchar + char* lines2_8[] = { + "stb2_8 1626006833610ms L\"nchar_val数值一\" host=\"host0\"", + "stb2_8 1626006833620ms L\"nchar_val数值二\" host=\"host0\"", + }; + code = taos_insert_telnet_lines(taos, lines2_8, 2); + if (code) { + printf("lines2_8 code: %d, %s.\n", code, tstrerror(code)); + } + + /* tags */ + //tag value types + char* lines3_0[] = { + "stb3_0 1626006833610ms 1 t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=3.4E38f32,t6=1.7E308f64,t7=true,t8=\"binary_val_1\",t9=L\"标签值1\"", + "stb3_0 1626006833610ms 2 t1=-127i8,t2=-32767i16,t3=-2147483647i32,t4=-9223372036854775807i64,t5=-3.4E38f32,t6=-1.7E308f64,t7=false,t8=\"binary_val_2\",t9=L\"标签值2\"" + }; + code = taos_insert_telnet_lines(taos, lines3_0, 2); + if (code) { + printf("lines3_0 code: %d, %s.\n", code, tstrerror(code)); + } + + //tag ID as child table name + char* lines3_1[] = { + "stb3_1 1626006833610ms 1 id=\"child_table1\",host=\"host1\"", + "stb3_1 1626006833610ms 2 host=\"host2\",iD=\"child_table2\"", + "stb3_1 1626006833610ms 3 ID=\"child_table3\",host=\"host3\"" + }; + code = taos_insert_telnet_lines(taos, lines3_1, 3); + if (code) { + printf("lines3_1 code: %d, %s.\n", code, tstrerror(code)); + } + + return; +} + +void verify_json_insert(TAOS* taos) { + TAOS_RES *result; + + result = taos_query(taos, "drop database if exists db;"); + taos_free_result(result); + usleep(100000); + result = taos_query(taos, "create database db precision 'ms';"); + taos_free_result(result); + usleep(100000); + + (void)taos_select_db(taos, "db"); + int32_t code = 0; + + char *message = + "{ \ + \"metric\":\"cpu_load_0\", \ + \"timestamp\": 1626006833610123, \ + \"value\": 55.5, \ + \"tags\": \ + { \ + \"host\": \"ubuntu\", \ + \"interface1\": \"eth0\", \ + \"Id\": \"tb0\" \ + } \ + }"; + + code = taos_insert_json_payload(taos, message); + if (code) { + printf("payload_0 code: %d, %s.\n", code, tstrerror(code)); + } + + char *message1 = + "[ \ + { \ + \"metric\":\"cpu_load_1\", \ + \"timestamp\": 1626006833610123, \ + \"value\": 55.5, \ + \"tags\": \ + { \ + \"host\": \"ubuntu\", \ + \"interface\": \"eth1\", \ + \"Id\": \"tb1\" \ + } \ + }, \ + { \ + \"metric\":\"cpu_load_2\", \ + \"timestamp\": 1626006833610123, \ + \"value\": 55.5, \ + \"tags\": \ + { \ + \"host\": \"ubuntu\", \ + \"interface\": \"eth2\", \ + \"Id\": \"tb2\" \ + } \ + } \ + ]"; + + code = taos_insert_json_payload(taos, message1); + if (code) { + printf("payload_1 code: %d, %s.\n", code, tstrerror(code)); + } + + char *message2 = + "[ \ + { \ + \"metric\":\"cpu_load_3\", \ + \"timestamp\": \ + { \ + \"value\": 1626006833610123, \ + \"type\": \"us\" \ + }, \ + \"value\": \ + { \ + \"value\": 55, \ + \"type\": \"int\" \ + }, \ + \"tags\": \ + { \ + \"host\": \ + { \ + \"value\": \"ubuntu\", \ + \"type\": \"binary\" \ + }, \ + \"interface\": \ + { \ + \"value\": \"eth3\", \ + \"type\": \"nchar\" \ + }, \ + \"ID\": \"tb3\", \ + \"port\": \ + { \ + \"value\": 4040, \ + \"type\": \"int\" \ + } \ + } \ + }, \ + { \ + \"metric\":\"cpu_load_4\", \ + \"timestamp\": 1626006833610123, \ + \"value\": 66.6, \ + \"tags\": \ + { \ + \"host\": \"ubuntu\", \ + \"interface\": \"eth4\", \ + \"Id\": \"tb4\" \ + } \ + } \ + ]"; + code = taos_insert_json_payload(taos, message2); + if (code) { + printf("payload_2 code: %d, %s.\n", code, tstrerror(code)); + } + + + cJSON *payload, *tags; + char *payload_str; + + /* Default format */ + //number + payload = cJSON_CreateObject(); + cJSON_AddStringToObject(payload, "metric", "stb0_0"); + cJSON_AddNumberToObject(payload, "timestamp", 1626006833610123); + cJSON_AddNumberToObject(payload, "value", 10); + tags = cJSON_CreateObject(); + cJSON_AddTrueToObject(tags, "t1"); + cJSON_AddFalseToObject(tags, "t2"); + cJSON_AddNumberToObject(tags, "t3", 10); + cJSON_AddStringToObject(tags, "t4", "123_abc_.!@#$%^&*:;,./?|+-=()[]{}<>"); + cJSON_AddItemToObject(payload, "tags", tags); + payload_str = cJSON_Print(payload); + //printf("%s\n", payload_str); + + code = taos_insert_json_payload(taos, payload_str); + if (code) { + printf("payload0_0 code: %d, %s.\n", code, tstrerror(code)); + } + free(payload_str); + cJSON_Delete(payload); + + //true + payload = cJSON_CreateObject(); + cJSON_AddStringToObject(payload, "metric", "stb0_1"); + cJSON_AddNumberToObject(payload, "timestamp", 1626006833610123); + cJSON_AddTrueToObject(payload, "value"); + tags = cJSON_CreateObject(); + cJSON_AddTrueToObject(tags, "t1"); + cJSON_AddFalseToObject(tags, "t2"); + cJSON_AddNumberToObject(tags, "t3", 10); + cJSON_AddStringToObject(tags, "t4", "123_abc_.!@#$%^&*:;,./?|+-=()[]{}<>"); + cJSON_AddItemToObject(payload, "tags", tags); + payload_str = cJSON_Print(payload); + //printf("%s\n", payload_str); + + code = taos_insert_json_payload(taos, payload_str); + if (code) { + printf("payload0_1 code: %d, %s.\n", code, tstrerror(code)); + } + free(payload_str); + cJSON_Delete(payload); + + //false + payload = cJSON_CreateObject(); + cJSON_AddStringToObject(payload, "metric", "stb0_2"); + cJSON_AddNumberToObject(payload, "timestamp", 1626006833610123); + cJSON_AddFalseToObject(payload, "value"); + tags = cJSON_CreateObject(); + cJSON_AddTrueToObject(tags, "t1"); + cJSON_AddFalseToObject(tags, "t2"); + cJSON_AddNumberToObject(tags, "t3", 10); + cJSON_AddStringToObject(tags, "t4", "123_abc_.!@#$%^&*:;,./?|+-=()[]{}<>"); + cJSON_AddItemToObject(payload, "tags", tags); + payload_str = cJSON_Print(payload); + //printf("%s\n", payload_str); + + code = taos_insert_json_payload(taos, payload_str); + if (code) { + printf("payload0_2 code: %d, %s.\n", code, tstrerror(code)); + } + free(payload_str); + cJSON_Delete(payload); + + //string + payload = cJSON_CreateObject(); + cJSON_AddStringToObject(payload, "metric", "stb0_3"); + cJSON_AddNumberToObject(payload, "timestamp", 1626006833610123); + cJSON_AddStringToObject(payload, "value", "123_abc_.!@#$%^&*:;,./?|+-=()[]{}<>"); + tags = cJSON_CreateObject(); + cJSON_AddTrueToObject(tags, "t1"); + cJSON_AddFalseToObject(tags, "t2"); + cJSON_AddNumberToObject(tags, "t3", 10); + cJSON_AddStringToObject(tags, "t4", "123_abc_.!@#$%^&*:;,./?|+-=()[]{}<>"); + cJSON_AddItemToObject(payload, "tags", tags); + payload_str = cJSON_Print(payload); + //printf("%s\n", payload_str); + + code = taos_insert_json_payload(taos, payload_str); + if (code) { + printf("payload0_3 code: %d, %s.\n", code, tstrerror(code)); + } + free(payload_str); + cJSON_Delete(payload); + + //timestamp 0 -> current time + payload = cJSON_CreateObject(); + cJSON_AddStringToObject(payload, "metric", "stb0_4"); + cJSON_AddNumberToObject(payload, "timestamp", 0); + cJSON_AddNumberToObject(payload, "value", 123); + tags = cJSON_CreateObject(); + cJSON_AddTrueToObject(tags, "t1"); + cJSON_AddFalseToObject(tags, "t2"); + cJSON_AddNumberToObject(tags, "t3", 10); + cJSON_AddStringToObject(tags, "t4", "123_abc_.!@#$%^&*:;,./?|+-=()[]{}<>"); + cJSON_AddItemToObject(payload, "tags", tags); + payload_str = cJSON_Print(payload); + //printf("%s\n", payload_str); + + code = taos_insert_json_payload(taos, payload_str); + if (code) { + printf("payload0_4 code: %d, %s.\n", code, tstrerror(code)); + } + free(payload_str); + cJSON_Delete(payload); + + //ID + payload = cJSON_CreateObject(); + cJSON_AddStringToObject(payload, "metric", "stb0_5"); + cJSON_AddNumberToObject(payload, "timestamp", 0); + cJSON_AddNumberToObject(payload, "value", 123); + tags = cJSON_CreateObject(); + cJSON_AddStringToObject(tags, "ID", "tb0_5"); + cJSON_AddTrueToObject(tags, "t1"); + cJSON_AddStringToObject(tags, "iD", "tb000"); + cJSON_AddFalseToObject(tags, "t2"); + cJSON_AddNumberToObject(tags, "t3", 10); + cJSON_AddStringToObject(tags, "t4", "123_abc_.!@#$%^&*:;,./?|+-=()[]{}<>"); + cJSON_AddStringToObject(tags, "id", "tb555"); + cJSON_AddItemToObject(payload, "tags", tags); + payload_str = cJSON_Print(payload); + //printf("%s\n", payload_str); + + code = taos_insert_json_payload(taos, payload_str); + if (code) { + printf("payload0_5 code: %d, %s.\n", code, tstrerror(code)); + } + free(payload_str); + cJSON_Delete(payload); + + /* Nested format */ + //timestamp + cJSON *timestamp; + //seconds + payload = cJSON_CreateObject(); + cJSON_AddStringToObject(payload, "metric", "stb1_0"); + + timestamp = cJSON_CreateObject(); + cJSON_AddNumberToObject(timestamp, "value", 1626006833); + cJSON_AddStringToObject(timestamp, "type", "s"); + cJSON_AddItemToObject(payload, "timestamp", timestamp); + + cJSON_AddNumberToObject(payload, "value", 10); + tags = cJSON_CreateObject(); + cJSON_AddTrueToObject(tags, "t1"); + cJSON_AddFalseToObject(tags, "t2"); + cJSON_AddNumberToObject(tags, "t3", 10); + cJSON_AddStringToObject(tags, "t4", "123_abc_.!@#$%^&*:;,./?|+-=()[]{}<>"); + cJSON_AddItemToObject(payload, "tags", tags); + payload_str = cJSON_Print(payload); + //printf("%s\n", payload_str); + + code = taos_insert_json_payload(taos, payload_str); + if (code) { + printf("payload1_0 code: %d, %s.\n", code, tstrerror(code)); + } + free(payload_str); + cJSON_Delete(payload); + + //milleseconds + payload = cJSON_CreateObject(); + cJSON_AddStringToObject(payload, "metric", "stb1_1"); + + timestamp = cJSON_CreateObject(); + cJSON_AddNumberToObject(timestamp, "value", 1626006833610); + cJSON_AddStringToObject(timestamp, "type", "ms"); + cJSON_AddItemToObject(payload, "timestamp", timestamp); + + cJSON_AddNumberToObject(payload, "value", 10); + tags = cJSON_CreateObject(); + cJSON_AddTrueToObject(tags, "t1"); + cJSON_AddFalseToObject(tags, "t2"); + cJSON_AddNumberToObject(tags, "t3", 10); + cJSON_AddStringToObject(tags, "t4", "123_abc_.!@#$%^&*:;,./?|+-=()[]{}<>"); + cJSON_AddItemToObject(payload, "tags", tags); + payload_str = cJSON_Print(payload); + //printf("%s\n", payload_str); + + code = taos_insert_json_payload(taos, payload_str); + if (code) { + printf("payload1_1 code: %d, %s.\n", code, tstrerror(code)); + } + free(payload_str); + cJSON_Delete(payload); + + //microseconds + payload = cJSON_CreateObject(); + cJSON_AddStringToObject(payload, "metric", "stb1_2"); + + timestamp = cJSON_CreateObject(); + cJSON_AddNumberToObject(timestamp, "value", 1626006833610123); + cJSON_AddStringToObject(timestamp, "type", "us"); + cJSON_AddItemToObject(payload, "timestamp", timestamp); + + cJSON_AddNumberToObject(payload, "value", 10); + tags = cJSON_CreateObject(); + cJSON_AddTrueToObject(tags, "t1"); + cJSON_AddFalseToObject(tags, "t2"); + cJSON_AddNumberToObject(tags, "t3", 10); + cJSON_AddStringToObject(tags, "t4", "123_abc_.!@#$%^&*:;,./?|+-=()[]{}<>"); + cJSON_AddItemToObject(payload, "tags", tags); + payload_str = cJSON_Print(payload); + //printf("%s\n", payload_str); + + code = taos_insert_json_payload(taos, payload_str); + if (code) { + printf("payload1_2 code: %d, %s.\n", code, tstrerror(code)); + } + free(payload_str); + cJSON_Delete(payload); + + //nanoseconds + payload = cJSON_CreateObject(); + cJSON_AddStringToObject(payload, "metric", "stb1_3"); + + timestamp = cJSON_CreateObject(); + cJSON_AddNumberToObject(timestamp, "value", 1626006833610123321); + cJSON_AddStringToObject(timestamp, "type", "ns"); + cJSON_AddItemToObject(payload, "timestamp", timestamp); + + cJSON_AddNumberToObject(payload, "value", 10); + tags = cJSON_CreateObject(); + cJSON_AddTrueToObject(tags, "t1"); + cJSON_AddFalseToObject(tags, "t2"); + cJSON_AddNumberToObject(tags, "t3", 10); + cJSON_AddStringToObject(tags, "t4", "123_abc_.!@#$%^&*:;,./?|+-=()[]{}<>"); + cJSON_AddItemToObject(payload, "tags", tags); + payload_str = cJSON_Print(payload); + //printf("%s\n", payload_str); + + code = taos_insert_json_payload(taos, payload_str); + if (code) { + printf("payload1_3 code: %d, %s.\n", code, tstrerror(code)); + } + free(payload_str); + cJSON_Delete(payload); + + //now + payload = cJSON_CreateObject(); + cJSON_AddStringToObject(payload, "metric", "stb1_4"); + + timestamp = cJSON_CreateObject(); + cJSON_AddNumberToObject(timestamp, "value", 0); + cJSON_AddStringToObject(timestamp, "type", "ns"); + cJSON_AddItemToObject(payload, "timestamp", timestamp); + + cJSON_AddNumberToObject(payload, "value", 10); + tags = cJSON_CreateObject(); + cJSON_AddTrueToObject(tags, "t1"); + cJSON_AddFalseToObject(tags, "t2"); + cJSON_AddNumberToObject(tags, "t3", 10); + cJSON_AddStringToObject(tags, "t4", "123_abc_.!@#$%^&*:;,./?|+-=()[]{}<>"); + cJSON_AddItemToObject(payload, "tags", tags); + payload_str = cJSON_Print(payload); + //printf("%s\n", payload_str); + + code = taos_insert_json_payload(taos, payload_str); + if (code) { + printf("payload1_4 code: %d, %s.\n", code, tstrerror(code)); + } + free(payload_str); + cJSON_Delete(payload); + + //metric value + cJSON *metric_val; + //bool + payload = cJSON_CreateObject(); + cJSON_AddStringToObject(payload, "metric", "stb2_0"); + + timestamp = cJSON_CreateObject(); + cJSON_AddNumberToObject(timestamp, "value", 1626006833); + cJSON_AddStringToObject(timestamp, "type", "s"); + cJSON_AddItemToObject(payload, "timestamp", timestamp); + + metric_val = cJSON_CreateObject(); + cJSON_AddTrueToObject(metric_val, "value"); + cJSON_AddStringToObject(metric_val, "type", "bool"); + cJSON_AddItemToObject(payload, "value", metric_val); + + tags = cJSON_CreateObject(); + cJSON_AddTrueToObject(tags, "t1"); + cJSON_AddFalseToObject(tags, "t2"); + cJSON_AddNumberToObject(tags, "t3", 10); + cJSON_AddStringToObject(tags, "t4", "123_abc_.!@#$%^&*:;,./?|+-=()[]{}<>"); + cJSON_AddItemToObject(payload, "tags", tags); + payload_str = cJSON_Print(payload); + //printf("%s\n", payload_str); + + code = taos_insert_json_payload(taos, payload_str); + if (code) { + printf("payload2_0 code: %d, %s.\n", code, tstrerror(code)); + } + free(payload_str); + cJSON_Delete(payload); + + //tinyint + payload = cJSON_CreateObject(); + cJSON_AddStringToObject(payload, "metric", "stb2_1"); + + timestamp = cJSON_CreateObject(); + cJSON_AddNumberToObject(timestamp, "value", 1626006833); + cJSON_AddStringToObject(timestamp, "type", "s"); + cJSON_AddItemToObject(payload, "timestamp", timestamp); + + metric_val = cJSON_CreateObject(); + cJSON_AddNumberToObject(metric_val, "value", 127); + cJSON_AddStringToObject(metric_val, "type", "tinyint"); + cJSON_AddItemToObject(payload, "value", metric_val); + + tags = cJSON_CreateObject(); + cJSON_AddTrueToObject(tags, "t1"); + cJSON_AddFalseToObject(tags, "t2"); + cJSON_AddNumberToObject(tags, "t3", 10); + cJSON_AddStringToObject(tags, "t4", "123_abc_.!@#$%^&*:;,./?|+-=()[]{}<>"); + cJSON_AddItemToObject(payload, "tags", tags); + payload_str = cJSON_Print(payload); + //printf("%s\n", payload_str); + + code = taos_insert_json_payload(taos, payload_str); + if (code) { + printf("payload2_1 code: %d, %s.\n", code, tstrerror(code)); + } + free(payload_str); + cJSON_Delete(payload); + + //smallint + payload = cJSON_CreateObject(); + cJSON_AddStringToObject(payload, "metric", "stb2_2"); + + timestamp = cJSON_CreateObject(); + cJSON_AddNumberToObject(timestamp, "value", 1626006833); + cJSON_AddStringToObject(timestamp, "type", "s"); + cJSON_AddItemToObject(payload, "timestamp", timestamp); + + metric_val = cJSON_CreateObject(); + cJSON_AddNumberToObject(metric_val, "value", 32767); + cJSON_AddStringToObject(metric_val, "type", "smallint"); + cJSON_AddItemToObject(payload, "value", metric_val); + + tags = cJSON_CreateObject(); + cJSON_AddTrueToObject(tags, "t1"); + cJSON_AddFalseToObject(tags, "t2"); + cJSON_AddNumberToObject(tags, "t3", 10); + cJSON_AddStringToObject(tags, "t4", "123_abc_.!@#$%^&*:;,./?|+-=()[]{}<>"); + cJSON_AddItemToObject(payload, "tags", tags); + payload_str = cJSON_Print(payload); + //printf("%s\n", payload_str); + + code = taos_insert_json_payload(taos, payload_str); + if (code) { + printf("payload2_2 code: %d, %s.\n", code, tstrerror(code)); + } + free(payload_str); + cJSON_Delete(payload); + + //int + payload = cJSON_CreateObject(); + cJSON_AddStringToObject(payload, "metric", "stb2_3"); + + timestamp = cJSON_CreateObject(); + cJSON_AddNumberToObject(timestamp, "value", 1626006833); + cJSON_AddStringToObject(timestamp, "type", "s"); + cJSON_AddItemToObject(payload, "timestamp", timestamp); + + metric_val = cJSON_CreateObject(); + cJSON_AddNumberToObject(metric_val, "value", 2147483647); + cJSON_AddStringToObject(metric_val, "type", "int"); + cJSON_AddItemToObject(payload, "value", metric_val); + + tags = cJSON_CreateObject(); + cJSON_AddTrueToObject(tags, "t1"); + cJSON_AddFalseToObject(tags, "t2"); + cJSON_AddNumberToObject(tags, "t3", 10); + cJSON_AddStringToObject(tags, "t4", "123_abc_.!@#$%^&*:;,./?|+-=()[]{}<>"); + cJSON_AddItemToObject(payload, "tags", tags); + payload_str = cJSON_Print(payload); + //printf("%s\n", payload_str); + + code = taos_insert_json_payload(taos, payload_str); + if (code) { + printf("payload2_3 code: %d, %s.\n", code, tstrerror(code)); + } + free(payload_str); + cJSON_Delete(payload); + + //bigint + payload = cJSON_CreateObject(); + cJSON_AddStringToObject(payload, "metric", "stb2_4"); + + timestamp = cJSON_CreateObject(); + cJSON_AddNumberToObject(timestamp, "value", 1626006833); + cJSON_AddStringToObject(timestamp, "type", "s"); + cJSON_AddItemToObject(payload, "timestamp", timestamp); + + metric_val = cJSON_CreateObject(); + cJSON_AddNumberToObject(metric_val, "value", 9223372036854775807); + cJSON_AddStringToObject(metric_val, "type", "bigint"); + cJSON_AddItemToObject(payload, "value", metric_val); + + tags = cJSON_CreateObject(); + cJSON_AddTrueToObject(tags, "t1"); + cJSON_AddFalseToObject(tags, "t2"); + cJSON_AddNumberToObject(tags, "t3", 10); + cJSON_AddStringToObject(tags, "t4", "123_abc_.!@#$%^&*:;,./?|+-=()[]{}<>"); + cJSON_AddItemToObject(payload, "tags", tags); + payload_str = cJSON_Print(payload); + //printf("%s\n", payload_str); + + code = taos_insert_json_payload(taos, payload_str); + if (code) { + printf("payload2_4 code: %d, %s.\n", code, tstrerror(code)); + } + free(payload_str); + cJSON_Delete(payload); + + //float + payload = cJSON_CreateObject(); + cJSON_AddStringToObject(payload, "metric", "stb2_5"); + + timestamp = cJSON_CreateObject(); + cJSON_AddNumberToObject(timestamp, "value", 1626006833); + cJSON_AddStringToObject(timestamp, "type", "s"); + cJSON_AddItemToObject(payload, "timestamp", timestamp); + + metric_val = cJSON_CreateObject(); + cJSON_AddNumberToObject(metric_val, "value", 11.12345); + cJSON_AddStringToObject(metric_val, "type", "float"); + cJSON_AddItemToObject(payload, "value", metric_val); + + tags = cJSON_CreateObject(); + cJSON_AddTrueToObject(tags, "t1"); + cJSON_AddFalseToObject(tags, "t2"); + cJSON_AddNumberToObject(tags, "t3", 10); + cJSON_AddStringToObject(tags, "t4", "123_abc_.!@#$%^&*:;,./?|+-=()[]{}<>"); + cJSON_AddItemToObject(payload, "tags", tags); + payload_str = cJSON_Print(payload); + //printf("%s\n", payload_str); + + code = taos_insert_json_payload(taos, payload_str); + if (code) { + printf("payload2_5 code: %d, %s.\n", code, tstrerror(code)); + } + free(payload_str); + cJSON_Delete(payload); + + //double + payload = cJSON_CreateObject(); + cJSON_AddStringToObject(payload, "metric", "stb2_6"); + + timestamp = cJSON_CreateObject(); + cJSON_AddNumberToObject(timestamp, "value", 1626006833); + cJSON_AddStringToObject(timestamp, "type", "s"); + cJSON_AddItemToObject(payload, "timestamp", timestamp); + + metric_val = cJSON_CreateObject(); + cJSON_AddNumberToObject(metric_val, "value", 22.123456789); + cJSON_AddStringToObject(metric_val, "type", "double"); + cJSON_AddItemToObject(payload, "value", metric_val); + + tags = cJSON_CreateObject(); + cJSON_AddTrueToObject(tags, "t1"); + cJSON_AddFalseToObject(tags, "t2"); + cJSON_AddNumberToObject(tags, "t3", 10); + cJSON_AddStringToObject(tags, "t4", "123_abc_.!@#$%^&*:;,./?|+-=()[]{}<>"); + cJSON_AddItemToObject(payload, "tags", tags); + payload_str = cJSON_Print(payload); + //printf("%s\n", payload_str); + + code = taos_insert_json_payload(taos, payload_str); + if (code) { + printf("payload2_6 code: %d, %s.\n", code, tstrerror(code)); + } + free(payload_str); + cJSON_Delete(payload); + + //binary + payload = cJSON_CreateObject(); + cJSON_AddStringToObject(payload, "metric", "stb2_7"); + + timestamp = cJSON_CreateObject(); + cJSON_AddNumberToObject(timestamp, "value", 1626006833); + cJSON_AddStringToObject(timestamp, "type", "s"); + cJSON_AddItemToObject(payload, "timestamp", timestamp); + + metric_val = cJSON_CreateObject(); + cJSON_AddStringToObject(metric_val, "value", "123_abc_.!@#$%^&*:;,./?|+-=()[]{}<>"); + cJSON_AddStringToObject(metric_val, "type", "binary"); + cJSON_AddItemToObject(payload, "value", metric_val); + + tags = cJSON_CreateObject(); + cJSON_AddTrueToObject(tags, "t1"); + cJSON_AddFalseToObject(tags, "t2"); + cJSON_AddNumberToObject(tags, "t3", 10); + cJSON_AddStringToObject(tags, "t4", "123_abc_.!@#$%^&*:;,./?|+-=()[]{}<>"); + cJSON_AddItemToObject(payload, "tags", tags); + payload_str = cJSON_Print(payload); + //printf("%s\n", payload_str); + + code = taos_insert_json_payload(taos, payload_str); + if (code) { + printf("payload2_7 code: %d, %s.\n", code, tstrerror(code)); + } + free(payload_str); + cJSON_Delete(payload); + + //nchar + payload = cJSON_CreateObject(); + cJSON_AddStringToObject(payload, "metric", "stb2_8"); + + timestamp = cJSON_CreateObject(); + cJSON_AddNumberToObject(timestamp, "value", 1626006833); + cJSON_AddStringToObject(timestamp, "type", "s"); + cJSON_AddItemToObject(payload, "timestamp", timestamp); + + metric_val = cJSON_CreateObject(); + cJSON_AddStringToObject(metric_val, "value", "你好"); + cJSON_AddStringToObject(metric_val, "type", "nchar"); + cJSON_AddItemToObject(payload, "value", metric_val); + + tags = cJSON_CreateObject(); + cJSON_AddTrueToObject(tags, "t1"); + cJSON_AddFalseToObject(tags, "t2"); + cJSON_AddNumberToObject(tags, "t3", 10); + cJSON_AddStringToObject(tags, "t4", "123_abc_.!@#$%^&*:;,./?|+-=()[]{}<>"); + cJSON_AddItemToObject(payload, "tags", tags); + payload_str = cJSON_Print(payload); + //printf("%s\n", payload_str); + + code = taos_insert_json_payload(taos, payload_str); + if (code) { + printf("payload2_8 code: %d, %s.\n", code, tstrerror(code)); + } + free(payload_str); + cJSON_Delete(payload); + + //tag value + cJSON *tag; + + payload = cJSON_CreateObject(); + cJSON_AddStringToObject(payload, "metric", "stb3_0"); + + timestamp = cJSON_CreateObject(); + cJSON_AddNumberToObject(timestamp, "value", 1626006833); + cJSON_AddStringToObject(timestamp, "type", "s"); + cJSON_AddItemToObject(payload, "timestamp", timestamp); + + metric_val = cJSON_CreateObject(); + cJSON_AddStringToObject(metric_val, "value", "hello"); + cJSON_AddStringToObject(metric_val, "type", "nchar"); + cJSON_AddItemToObject(payload, "value", metric_val); + + tags = cJSON_CreateObject(); + + tag = cJSON_CreateObject(); + cJSON_AddTrueToObject(tag, "value"); + cJSON_AddStringToObject(tag, "type", "bool"); + cJSON_AddItemToObject(tags, "t1", tag); + + tag = cJSON_CreateObject(); + cJSON_AddFalseToObject(tag, "value"); + cJSON_AddStringToObject(tag, "type", "bool"); + cJSON_AddItemToObject(tags, "t2", tag); + + tag = cJSON_CreateObject(); + cJSON_AddNumberToObject(tag, "value", 127); + cJSON_AddStringToObject(tag, "type", "tinyint"); + cJSON_AddItemToObject(tags, "t3", tag); + + tag = cJSON_CreateObject(); + cJSON_AddNumberToObject(tag, "value", 32767); + cJSON_AddStringToObject(tag, "type", "smallint"); + cJSON_AddItemToObject(tags, "t4", tag); + + tag = cJSON_CreateObject(); + cJSON_AddNumberToObject(tag, "value", 2147483647); + cJSON_AddStringToObject(tag, "type", "int"); + cJSON_AddItemToObject(tags, "t5", tag); + + tag = cJSON_CreateObject(); + cJSON_AddNumberToObject(tag, "value", 9223372036854775807); + cJSON_AddStringToObject(tag, "type", "bigint"); + cJSON_AddItemToObject(tags, "t6", tag); + + tag = cJSON_CreateObject(); + cJSON_AddNumberToObject(tag, "value", 11.12345); + cJSON_AddStringToObject(tag, "type", "float"); + cJSON_AddItemToObject(tags, "t7", tag); + + tag = cJSON_CreateObject(); + cJSON_AddNumberToObject(tag, "value", 22.1234567890); + cJSON_AddStringToObject(tag, "type", "double"); + cJSON_AddItemToObject(tags, "t8", tag); + + tag = cJSON_CreateObject(); + cJSON_AddStringToObject(tag, "value", "binary_val"); + cJSON_AddStringToObject(tag, "type", "binary"); + cJSON_AddItemToObject(tags, "t9", tag); + + tag = cJSON_CreateObject(); + cJSON_AddStringToObject(tag, "value", "你好"); + cJSON_AddStringToObject(tag, "type", "nchar"); + cJSON_AddItemToObject(tags, "t10", tag); + + cJSON_AddItemToObject(payload, "tags", tags); + + payload_str = cJSON_Print(payload); + //printf("%s\n", payload_str); + + code = taos_insert_json_payload(taos, payload_str); + if (code) { + printf("payload3_0 code: %d, %s.\n", code, tstrerror(code)); + } + free(payload_str); + cJSON_Delete(payload); +} + int main(int argc, char *argv[]) { const char* host = "127.0.0.1"; const char* user = "root"; @@ -1034,6 +1951,11 @@ int main(int argc, char *argv[]) { printf("************ verify schema-less *************\n"); verify_schema_less(taos); + printf("************ verify telnet-insert *************\n"); + verify_telnet_insert(taos); + + printf("************ verify json-insert *************\n"); + verify_json_insert(taos); printf("************ verify query *************\n"); verify_query(taos); @@ -1051,7 +1973,7 @@ int main(int argc, char *argv[]) { verify_prepare2(taos); printf("************ verify prepare3 *************\n"); verify_prepare3(taos); - + printf("************ verify stream *************\n"); verify_stream(taos); printf("done\n"); diff --git a/tests/examples/c/makefile b/tests/examples/c/makefile index 304623c27af27cd23a301af134647fb3b9746d64..c85eb4adc515e5fb4e875b8e8e955222bc09190e 100644 --- a/tests/examples/c/makefile +++ b/tests/examples/c/makefile @@ -6,8 +6,8 @@ TARGET=exe LFLAGS = '-Wl,-rpath,/usr/local/taos/driver/' -ltaos -lpthread -lm -lrt CFLAGS = -O3 -g -Wall -Wno-deprecated -fPIC -Wno-unused-result -Wconversion \ -Wno-char-subscripts -D_REENTRANT -Wno-format -D_REENTRANT -DLINUX \ - -Wno-unused-function -D_M_X64 -I/usr/local/taos/include -std=gnu99 - + -Wno-unused-function -D_M_X64 -I/usr/local/taos/include -std=gnu99 \ + -I../../../deps/cJson/inc all: $(TARGET) exe: diff --git a/tests/examples/nodejs/test1970.js b/tests/examples/nodejs/test1970.js new file mode 100644 index 0000000000000000000000000000000000000000..5177a7371e9a07fa7b548936ff038c1f2a29bc1f --- /dev/null +++ b/tests/examples/nodejs/test1970.js @@ -0,0 +1,125 @@ +const taos = require('td2.0-connector'); +var conn = taos.connect({host:"127.0.0.1", user:"root", password:"taosdata", config:"/etc/taos",port:0}) +var c1 = conn.cursor(); // Initializing a new cursor + +let stime = new Date(); +let interval = 1000; + +function convertDateToTS(date) { + let tsArr = date.toISOString().split("T") + return "\"" + tsArr[0] + " " + tsArr[1].substring(0, tsArr[1].length - 1) + "\""; +} + +function R(l, r) { + return Math.random() * (r - l) - r; +} + +function randomBool() { + if (Math.random() < 0.5) { + return true; + } + return false; +} + +// Initialize +const dbname = "nodejs_1970_db"; +const tbname = "t1"; + +let dropDB = "drop database if exists " + dbname +console.log(dropDB);//asdasdasd +c1.execute(dropDB);///asdasd + +let createDB = "create database " + dbname + " keep 36500" +console.log(createDB); +c1.execute(createDB); + +let useTbl = "use " + dbname +console.log(useTbl) +c1.execute(useTbl); + +let createTbl = "create table if not exists " + tbname + "(ts timestamp,id int)" +console.log(createTbl); +c1.execute(createTbl); + +//1969-12-31 23:59:59.999 +//1970-01-01 00:00:00.000 +//1970-01-01 07:59:59.999 +//1970-01-01 08:00:00.000a +//1628928479484 2021-08-14 08:07:59.484 +let sql1 = "insert into " + dbname + "." + tbname + " values('1969-12-31 23:59:59.999',1)" +console.log(sql1); +c1.execute(sql1); + +let sql2 = "insert into " + dbname + "." + tbname + " values('1970-01-01 00:00:00.000',2)" +console.log(sql2); +c1.execute(sql2); + +let sql3 = "insert into " + dbname + "." + tbname + " values('1970-01-01 07:59:59.999',3)" +console.log(sql3); +c1.execute(sql3); + +let sql4 = "insert into " + dbname + "." + tbname + " values('1970-01-01 08:00:00.000',4)" +console.log(sql4); +c1.execute(sql4); + +let sql5 = "insert into " + dbname + "." + tbname + " values('2021-08-14 08:07:59.484',5)" +console.log(sql5); +c1.execute(sql5); + +// Select +let query1 = "select * from " + dbname + "." + tbname +console.log(query1); +c1.execute(query1); + +var d = c1.fetchall(); +console.log(c1.fields); +for (let i = 0; i < d.length; i++) + console.log(d[i][0].valueOf()); + +//initialize +let initSql1 = "drop table if exists " + tbname +console.log(initSql1); +c1.execute(initSql1); + +console.log(createTbl); +c1.execute(createTbl); +c1.execute(useTbl) + +//-28800001 1969-12-31 23:59:59.999 +//-28800000 1970-01-01 00:00:00.000 +//-1 1970-01-01 07:59:59.999 +//0 1970-01-01 08:00:00.00 +//1628928479484 2021-08-14 08:07:59.484 +let sql11 = "insert into " + dbname + "." + tbname + " values(-28800001,11)"; +console.log(sql11); +c1.execute(sql11); + +let sql12 = "insert into " + dbname + "." + tbname + " values(-28800000,12)" +console.log(sql12); +c1.execute(sql12); + +let sql13 = "insert into " + dbname + "." + tbname + " values(-1,13)" +console.log(sql13); +c1.execute(sql13); + +let sql14 = "insert into " + dbname + "." + tbname + " values(0,14)" +console.log(sql14); +c1.execute(sql14); + +let sql15 = "insert into " + dbname + "." + tbname + " values(1628928479484,15)" +console.log(sql15); +c1.execute(sql15); + +// Select +console.log(query1); +c1.execute(query1); + +var d = c1.fetchall(); +console.log(c1.fields); +for (let i = 0; i < d.length; i++) + console.log(d[i][0].valueOf()); + +setTimeout(function () { + conn.close(); +}, 2000); + diff --git a/tests/gotest/batchtest.bat b/tests/gotest/batchtest.bat index efd8961bb0be2eb6f20e291114b92b00469b984f..2a96ee31eb6211dbc5f300fbb2f3d62c03df3061 100755 --- a/tests/gotest/batchtest.bat +++ b/tests/gotest/batchtest.bat @@ -1,3 +1,4 @@ + @echo off echo ==== start Go connector test cases test ==== cd /d %~dp0 @@ -18,3 +19,10 @@ rem case002.bat :: cd case002 :: case002.bat + + +rem cd nanosupport +rem nanoCase.bat + +:: cd nanosupport +:: nanoCase.bat \ No newline at end of file diff --git a/tests/gotest/batchtest.sh b/tests/gotest/batchtest.sh index 0fbbf40714b3349651beea9302e66628b31a22ac..503d77b226885b10e3874a3e0718789bed34b200 100755 --- a/tests/gotest/batchtest.sh +++ b/tests/gotest/batchtest.sh @@ -17,5 +17,6 @@ go env -w GO111MODULE=on go env -w GOPROXY=https://goproxy.io,direct bash ./case001/case001.sh $severIp $serverPort -#bash ./case002/case002.sh $severIp $serverPort +bash ./case002/case002.sh $severIp $serverPort #bash ./case003/case003.sh $severIp $serverPort +bash ./nanosupport/nanoCase.sh $severIp $serverPort diff --git a/tests/gotest/case001/case001.go b/tests/gotest/case001/case001.go index 9e912aab99e2aa0da1e1490741f04e67ab1d0c8a..29bc92f2a0668b3f576145d5bd6d08ed37c82f1b 100644 --- a/tests/gotest/case001/case001.go +++ b/tests/gotest/case001/case001.go @@ -12,14 +12,13 @@ * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . */ - package main import ( "database/sql" "flag" "fmt" - _ "github.com/taosdata/driver-go/taosSql" + _ "github.com/taosdata/driver-go/v2/taosSql" "log" "strconv" "time" @@ -63,6 +62,7 @@ func main() { url = "root:taosdata@/tcp(" + configPara.hostName + ":" + strconv.Itoa(configPara.serverPort) + ")/" // open connect to taos server + fmt.Printf("url:%s",url) db, err := sql.Open(taosDriverName, url) if err != nil { log.Fatalf("Open database error: %s\n", err) @@ -168,17 +168,18 @@ func insert_data(db *sql.DB, demot string) { func select_data(db *sql.DB, demot string) { st := time.Now().Nanosecond() - + fmt.Println(demot) rows, err := db.Query("select * from ? ", demot) // go text mode + fmt.Println("end query",err) checkErr(err, "select db.Query") fmt.Printf("%10s%s%8s %5s %9s%s %s %8s%s %7s%s %8s%s %4s%s %5s%s\n", " ", "ts", " ", "id", " ", "name", " ", "len", " ", "flag", " ", "notes", " ", "fv", " ", " ", "dv") var affectd int //decoder := mahonia.NewDecoder("gbk") // 把原来ANSI格式的文本文件里的字符,用gbk进行解码。 - + fmt.Println("start next") for rows.Next() { - var ts string + var ts time.Time var name string var id int var len int8 @@ -188,6 +189,7 @@ func select_data(db *sql.DB, demot string) { var dv float64 err = rows.Scan(&ts, &id, &name, &len, &flag, ¬es, &fv, &dv) + fmt.Println("rows:",err) checkErr(err, "select rows.Scan") fmt.Printf("%s|\t", ts) diff --git a/tests/gotest/case001/case001.sh b/tests/gotest/case001/case001.sh index 94e5bb44e03a1f7d2704752fcf9c080abcb4f23f..831e9f83ac482c0a2c668e2ad0d16c4bf59f19aa 100644 --- a/tests/gotest/case001/case001.sh +++ b/tests/gotest/case001/case001.sh @@ -15,8 +15,7 @@ script_dir="$(dirname $(readlink -f $0))" ###### step 3: start build cd $script_dir rm -f go.* -go mod init demotest > /dev/null 2>&1 -go mod tidy > /dev/null 2>&1 -go build > /dev/null 2>&1 +go mod init demotest +go build sleep 1s ./demotest -h $1 -p $2 diff --git a/tests/gotest/case002/case002.bat b/tests/gotest/case002/case002.bat new file mode 100644 index 0000000000000000000000000000000000000000..385677acae826e248a410472bfc7a022ff3003ab --- /dev/null +++ b/tests/gotest/case002/case002.bat @@ -0,0 +1,9 @@ +@echo off +echo ==== start run cases002.go + +del go.* +go mod init demotest +go build +demotest.exe -h %1 -p %2 +cd .. + diff --git a/tests/gotest/case002/case002.go b/tests/gotest/case002/case002.go new file mode 100644 index 0000000000000000000000000000000000000000..e2ba5ea28ee4f92cfbdca27c78d47268a387c693 --- /dev/null +++ b/tests/gotest/case002/case002.go @@ -0,0 +1,80 @@ +package main + +import ( + "database/sql/driver" + "fmt" + "io" + "os" + "time" + + taos "github.com/taosdata/driver-go/v2/af" +) + +func Subscribe_check(topic taos.Subscriber, check int) bool { + count := 0 + rows, err := topic.Consume() + defer func() { rows.Close(); time.Sleep(time.Second) }() + if err != nil { + fmt.Println(err) + os.Exit(3) + } + for { + values := make([]driver.Value, 2) + err := rows.Next(values) + if err == io.EOF { + break + } else if err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(4) + } + count++ + } + if count == check { + return false + } else { + return true + } +} +func main() { + ts := 1630461600000 + db, err := taos.Open("127.0.0.1", "", "", "", 0) + if err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } + defer db.Close() + db.Exec("drop database if exists test") + db.Exec("create database if not exists test ") + db.Exec("use test") + db.Exec("create table test (ts timestamp ,level int)") + for i := 0; i < 10; i++ { + sqlcmd := fmt.Sprintf("insert into test values(%d,%d)", ts+i, i) + db.Exec(sqlcmd) + } + + fmt.Println("consumption 01.") + topic, err := db.Subscribe(false, "test", "select ts, level from test", time.Second) + if Subscribe_check(topic, 10) { + os.Exit(3) + } + + fmt.Println("consumption 02: no new rows inserted") + if Subscribe_check(topic, 0) { + os.Exit(3) + } + + fmt.Println("consumption 03: after one new rows inserted") + sqlcmd := fmt.Sprintf("insert into test values(%d,%d)", ts+10, 10) + db.Exec(sqlcmd) + if Subscribe_check(topic, 1) { + os.Exit(3) + } + + fmt.Println("consumption 04: keep progress and continue previous subscription") + topic.Unsubscribe(true) + topic, err = db.Subscribe(false, "test", "select ts, level from test", time.Second) + if Subscribe_check(topic, 0) { + os.Exit(3) + } + +} diff --git a/tests/gotest/case002/case002.sh b/tests/gotest/case002/case002.sh new file mode 100644 index 0000000000000000000000000000000000000000..d98337cce7cfeb51ec9305226b20abdd7b360a46 --- /dev/null +++ b/tests/gotest/case002/case002.sh @@ -0,0 +1,22 @@ +#!/bin/bash + +echo "==== start run cases002.go" + +set +e +#set -x + +script_dir="$(dirname $(readlink -f $0))" +#echo "pwd: $script_dir, para0: $0" + +#execName=$0 +#execName=`echo ${execName##*/}` +#goName=`echo ${execName%.*}` + +###### step 3: start build +cd $script_dir +rm -f go.* +go mod init demotest > /dev/null 2>&1 +go mod tidy > /dev/null 2>&1 +go build > /dev/null 2>&1 +sleep 1s +./demotest -h $1 -p $2 diff --git a/tests/gotest/nanosupport/connector/executor.go b/tests/gotest/nanosupport/connector/executor.go new file mode 100644 index 0000000000000000000000000000000000000000..218ea29af3b34a8cfb5ab56585eeb07bc467d209 --- /dev/null +++ b/tests/gotest/nanosupport/connector/executor.go @@ -0,0 +1,208 @@ +package connector + +import ( + "context" + "fmt" + "reflect" + "time" + + "github.com/taosdata/go-utils/log" + "github.com/taosdata/go-utils/tdengine/config" + "github.com/taosdata/go-utils/tdengine/connector" + tdengineExecutor "github.com/taosdata/go-utils/tdengine/executor" +) + +type Executor struct { + executor *tdengineExecutor.Executor + ctx context.Context +} + +var Logger = log.NewLogger("taos test") + +func NewExecutor(conf *config.TDengineGo, db string, showSql bool) (*Executor, error) { + tdengineConnector, err := connector.NewTDengineConnector("go", conf) + if err != nil { + return nil, err + } + executor := tdengineExecutor.NewExecutor(tdengineConnector, db, showSql, Logger) + return &Executor{ + executor: executor, + ctx: context.Background(), + }, nil +} + +func (e *Executor) Execute(sql string) (int64, error) { + return e.executor.DoExec(e.ctx, sql) +} +func (e *Executor) Query(sql string) (*connector.Data, error) { + fmt.Println("query :", sql) + return e.executor.DoQuery(e.ctx, sql) +} +func (e *Executor) CheckData(row, col int, value interface{}, data *connector.Data) (bool, error) { + if data == nil { + return false, fmt.Errorf("data is nil") + } + if col >= len(data.Head) { + return false, fmt.Errorf("col out of data") + } + if row >= len(data.Data) { + return false, fmt.Errorf("row out of data") + } + dataValue := data.Data[row][col] + + if dataValue == nil && value != nil { + return false, fmt.Errorf("dataValue is nil but value is not nil") + } + if dataValue == nil && value == nil { + return true, nil + } + if reflect.TypeOf(dataValue) != reflect.TypeOf(value) { + return false, fmt.Errorf("type not match expect %s got %s", reflect.TypeOf(value), reflect.TypeOf(dataValue)) + } + switch value.(type) { + case time.Time: + t, _ := dataValue.(time.Time) + if value.(time.Time).Nanosecond() != t.Nanosecond() { + return false, fmt.Errorf("value not match expect %d got %d", value.(time.Time).Nanosecond(), t.Nanosecond()) + } + case string: + if value.(string) != dataValue.(string) { + return false, fmt.Errorf("value not match expect %s got %s", value.(string), dataValue.(string)) + } + case int8: + if value.(int8) != dataValue.(int8) { + return false, fmt.Errorf("value not match expect %d got %d", value.(int8), dataValue.(int8)) + } + case int16: + if value.(int16) != dataValue.(int16) { + return false, fmt.Errorf("value not match expect %d got %d", value.(int16), dataValue.(int16)) + } + case int32: + if value.(int32) != dataValue.(int32) { + return false, fmt.Errorf("value not match expect %d got %d", value.(int32), dataValue.(int32)) + } + case int64: + if value.(int64) != dataValue.(int64) { + return false, fmt.Errorf("value not match expect %d got %d", value.(int64), dataValue.(int64)) + } + case float32: + if value.(float32) != dataValue.(float32) { + return false, fmt.Errorf("value not match expect %f got %f", value.(float32), dataValue.(float32)) + } + case float64: + if value.(float64) != dataValue.(float64) { + return false, fmt.Errorf("value not match expect %f got %f", value.(float32), dataValue.(float32)) + } + case bool: + if value.(bool) != dataValue.(bool) { + return false, fmt.Errorf("value not match expect %t got %t", value.(bool), dataValue.(bool)) + } + default: + return false, fmt.Errorf("unsupport type %v", reflect.TypeOf(value)) + } + return true, nil +} + +func (e *Executor) CheckData2(row, col int, value interface{}, data *connector.Data) { + + match, err := e.CheckData(row, col, value, data) + fmt.Println("expect data is :", value) + fmt.Println("go got data is :", data.Data[row][col]) + if err != nil { + fmt.Println(err) + } + if !match { + fmt.Println(" data not match") + + } + + /* + fmt.Println(value) + if data == nil { + // return false, fmt.Errorf("data is nil") + // fmt.Println("check failed") + } + if col >= len(data.Head) { + // return false, fmt.Errorf("col out of data") + // fmt.Println("check failed") + } + if row >= len(data.Data) { + // return false, fmt.Errorf("row out of data") + // fmt.Println("check failed") + } + dataValue := data.Data[row][col] + + if dataValue == nil && value != nil { + // return false, fmt.Errorf("dataValue is nil but value is not nil") + // fmt.Println("check failed") + } + if dataValue == nil && value == nil { + // return true, nil + fmt.Println("check pass") + } + if reflect.TypeOf(dataValue) != reflect.TypeOf(value) { + // return false, fmt.Errorf("type not match expect %s got %s", reflect.TypeOf(value), reflect.TypeOf(dataValue)) + fmt.Println("check failed") + } + switch value.(type) { + case time.Time: + t, _ := dataValue.(time.Time) + if value.(time.Time).Nanosecond() != t.Nanosecond() { + // return false, fmt.Errorf("value not match expect %d got %d", value.(time.Time).Nanosecond(), t.Nanosecond()) + // fmt.Println("check failed") + } + case string: + if value.(string) != dataValue.(string) { + // return false, fmt.Errorf("value not match expect %s got %s", value.(string), dataValue.(string)) + // fmt.Println("check failed") + } + case int8: + if value.(int8) != dataValue.(int8) { + // return false, fmt.Errorf("value not match expect %d got %d", value.(int8), dataValue.(int8)) + // fmt.Println("check failed") + } + case int16: + if value.(int16) != dataValue.(int16) { + // return false, fmt.Errorf("value not match expect %d got %d", value.(int16), dataValue.(int16)) + // fmt.Println("check failed") + } + case int32: + if value.(int32) != dataValue.(int32) { + // return false, fmt.Errorf("value not match expect %d got %d", value.(int32), dataValue.(int32)) + // fmt.Println("check failed") + } + case int64: + if value.(int64) != dataValue.(int64) { + // return false, fmt.Errorf("value not match expect %d got %d", value.(int64), dataValue.(int64)) + // fmt.Println("check failed") + } + case float32: + if value.(float32) != dataValue.(float32) { + // return false, fmt.Errorf("value not match expect %f got %f", value.(float32), dataValue.(float32)) + // fmt.Println("check failed") + } + case float64: + if value.(float64) != dataValue.(float64) { + // return false, fmt.Errorf("value not match expect %f got %f", value.(float32), dataValue.(float32)) + // fmt.Println("check failed") + } + case bool: + if value.(bool) != dataValue.(bool) { + // return false, fmt.Errorf("value not match expect %t got %t", value.(bool), dataValue.(bool)) + // fmt.Println("check failed") + } + default: + // return false, fmt.Errorf("unsupport type %v", reflect.TypeOf(value)) + // fmt.Println("check failed") + } + // return true, nil + // fmt.Println("check pass") + */ +} + +func (e *Executor) CheckRow(count int, data *connector.Data) { + + if len(data.Data) != count { + fmt.Println("check failed !") + } +} diff --git a/tests/gotest/nanosupport/nanoCase.bat b/tests/gotest/nanosupport/nanoCase.bat new file mode 100644 index 0000000000000000000000000000000000000000..86bddd5b02c5399d5b8d70bd08020e96a7d1c0e5 --- /dev/null +++ b/tests/gotest/nanosupport/nanoCase.bat @@ -0,0 +1,9 @@ +@echo off +echo ==== start run nanosupport.go + +del go.* +go mod init nano +go mod tidy +go build +nano.exe -h %1 -p %2 +cd .. diff --git a/tests/gotest/nanosupport/nanoCase.sh b/tests/gotest/nanosupport/nanoCase.sh new file mode 100644 index 0000000000000000000000000000000000000000..bec8929f14c0a56e7c4074efa39d1e1e881fb12e --- /dev/null +++ b/tests/gotest/nanosupport/nanoCase.sh @@ -0,0 +1,22 @@ +#!/bin/bash + +echo "==== start run nanosupport.go " + +set +e +#set -x + +script_dir="$(dirname $(readlink -f $0))" +#echo "pwd: $script_dir, para0: $0" + +#execName=$0 +#execName=`echo ${execName##*/}` +#goName=`echo ${execName%.*}` + +###### step 3: start build +cd $script_dir +rm -f go.* +go mod init nano +go mod tidy +go build +sleep 10s +./nano -h $1 -p $2 diff --git a/tests/gotest/nanosupport/nanosupport.go b/tests/gotest/nanosupport/nanosupport.go new file mode 100644 index 0000000000000000000000000000000000000000..e2f24a73c0a6db3c94b90879c73d0f05e2476307 --- /dev/null +++ b/tests/gotest/nanosupport/nanosupport.go @@ -0,0 +1,269 @@ +package main + +import ( + "fmt" + "log" + "nano/connector" + "time" + + "github.com/taosdata/go-utils/tdengine/config" +) + +func main() { + e, err := connector.NewExecutor(&config.TDengineGo{ + Address: "root:taosdata@/tcp(127.0.0.1:6030)/", + MaxIdle: 20, + MaxOpen: 30, + MaxLifetime: 30, + }, "db", false) + if err != nil { + panic(err) + } + prepareData(e) + data, err := e.Query("select * from tb") + if err != nil { + panic(err) + } + + layout := "2006-01-02 15:04:05.999999999" + t0, _ := time.Parse(layout, "2021-06-10 00:00:00.100000001") + t1, _ := time.Parse(layout, "2021-06-10 00:00:00.150000000") + t2, _ := time.Parse(layout, "2021-06-10 00:00:00.299999999") + t3, _ := time.Parse(layout, "2021-06-10 00:00:00.300000000") + t4, _ := time.Parse(layout, "2021-06-10 00:00:00.300000001") + t5, _ := time.Parse(layout, "2021-06-10 00:00:00.999999999") + + e.CheckData2(0, 0, t0, data) + e.CheckData2(1, 0, t1, data) + e.CheckData2(2, 0, t2, data) + e.CheckData2(3, 0, t3, data) + e.CheckData2(4, 0, t4, data) + e.CheckData2(5, 0, t5, data) + e.CheckData2(3, 1, int32(3), data) + e.CheckData2(4, 1, int32(5), data) + e.CheckData2(5, 1, int32(7), data) + + fmt.Println(" start check nano support!") + + data, _ = e.Query("select count(*) from tb where ts > 1623254400100000000 and ts < 1623254400100000002;") + e.CheckData2(0, 0, int64(1), data) + + data, _ = e.Query("select count(*) from tb where ts > \"2021-06-10 0:00:00.100000001\" and ts < \"2021-06-10 0:00:00.160000000\";") + e.CheckData2(0, 0, int64(1), data) + + data, _ = e.Query("select count(*) from tb where ts > 1623254400100000000 and ts < 1623254400150000000;") + e.CheckData2(0, 0, int64(1), data) + data, _ = e.Query("select count(*) from tb where ts > \"2021-06-10 0:00:00.100000000\" and ts < \"2021-06-10 0:00:00.150000000\";") + e.CheckData2(0, 0, int64(1), data) + + data, _ = e.Query("select count(*) from tb where ts > 1623254400400000000;") + e.CheckData2(0, 0, int64(1), data) + data, _ = e.Query("select count(*) from tb where ts < \"2021-06-10 00:00:00.400000000\";") + e.CheckData2(0, 0, int64(5), data) + + data, _ = e.Query("select count(*) from tb where ts < now + 400000000b;") + e.CheckData2(0, 0, int64(6), data) + + data, _ = e.Query("select count(*) from tb where ts >= \"2021-06-10 0:00:00.100000001\";") + e.CheckData2(0, 0, int64(6), data) + + data, _ = e.Query("select count(*) from tb where ts <= 1623254400300000000;") + e.CheckData2(0, 0, int64(4), data) + + data, _ = e.Query("select count(*) from tb where ts = \"2021-06-10 0:00:00.000000000\";") + + data, _ = e.Query("select count(*) from tb where ts = 1623254400150000000;") + e.CheckData2(0, 0, int64(1), data) + + data, _ = e.Query("select count(*) from tb where ts = \"2021-06-10 0:00:00.100000001\";") + e.CheckData2(0, 0, int64(1), data) + + data, _ = e.Query("select count(*) from tb where ts between 1623254400000000000 and 1623254400400000000;") + e.CheckData2(0, 0, int64(5), data) + + data, _ = e.Query("select count(*) from tb where ts between \"2021-06-10 0:00:00.299999999\" and \"2021-06-10 0:00:00.300000001\";") + e.CheckData2(0, 0, int64(3), data) + + data, _ = e.Query("select avg(speed) from tb interval(5000000000b);") + e.CheckRow(1, data) + + data, _ = e.Query("select avg(speed) from tb interval(100000000b)") + e.CheckRow(4, data) + + data, _ = e.Query("select avg(speed) from tb interval(1000b);") + e.CheckRow(5, data) + + data, _ = e.Query("select avg(speed) from tb interval(1u);") + e.CheckRow(5, data) + + data, _ = e.Query("select avg(speed) from tb interval(100000000b) sliding (100000000b);") + e.CheckRow(4, data) + + data, _ = e.Query("select last(*) from tb") + tt, _ := time.Parse(layout, "2021-06-10 0:00:00.999999999") + e.CheckData2(0, 0, tt, data) + + data, _ = e.Query("select first(*) from tb") + tt1, _ := time.Parse(layout, "2021-06-10 0:00:00.100000001") + e.CheckData2(0, 0, tt1, data) + + e.Execute("insert into tb values(now + 500000000b, 6);") + data, _ = e.Query("select * from tb;") + e.CheckRow(7, data) + + e.Execute("create table tb2 (ts timestamp, speed int, ts2 timestamp);") + e.Execute("insert into tb2 values(\"2021-06-10 0:00:00.100000001\", 1, \"2021-06-11 0:00:00.100000001\");") + e.Execute("insert into tb2 values(1623254400150000000, 2, 1623340800150000000);") + e.Execute("import into tb2 values(1623254400300000000, 3, 1623340800300000000);") + e.Execute("import into tb2 values(1623254400299999999, 4, 1623340800299999999);") + e.Execute("insert into tb2 values(1623254400300000001, 5, 1623340800300000001);") + e.Execute("insert into tb2 values(1623254400999999999, 7, 1623513600999999999);") + + data, _ = e.Query("select * from tb2;") + tt2, _ := time.Parse(layout, "2021-06-10 0:00:00.100000001") + tt3, _ := time.Parse(layout, "2021-06-10 0:00:00.150000000") + + e.CheckData2(0, 0, tt2, data) + e.CheckData2(1, 0, tt3, data) + e.CheckData2(2, 1, int32(4), data) + e.CheckData2(3, 1, int32(3), data) + tt4, _ := time.Parse(layout, "2021-06-11 00:00:00.300000001") + e.CheckData2(4, 2, tt4, data) + e.CheckRow(6, data) + + data, _ = e.Query("select count(*) from tb2 where ts2 > 1623340800000000000 and ts2 < 1623340800150000000;") + e.CheckData2(0, 0, int64(1), data) + + data, _ = e.Query("select count(*) from tb2 where ts2 > \"2021-06-11 0:00:00.100000000\" and ts2 < \"2021-06-11 0:00:00.100000002\";") + e.CheckData2(0, 0, int64(1), data) + + data, _ = e.Query("select count(*) from tb2 where ts2 > 1623340800500000000;") + e.CheckData2(0, 0, int64(1), data) + data, _ = e.Query("select count(*) from tb2 where ts2 < \"2021-06-11 0:00:00.400000000\";") + e.CheckData2(0, 0, int64(5), data) + + data, _ = e.Query("select count(*) from tb2 where ts2 < now + 400000000b;") + e.CheckData2(0, 0, int64(6), data) + + data, _ = e.Query("select count(*) from tb2 where ts2 >= \"2021-06-11 0:00:00.100000001\";") + e.CheckData2(0, 0, int64(6), data) + + data, _ = e.Query("select count(*) from tb2 where ts2 <= 1623340800400000000;") + e.CheckData2(0, 0, int64(5), data) + + data, _ = e.Query("select count(*) from tb2 where ts2 = \"2021-06-11 0:00:00.000000000\";") + + data, _ = e.Query("select count(*) from tb2 where ts2 = \"2021-06-11 0:00:00.300000001\";") + e.CheckData2(0, 0, int64(1), data) + + data, _ = e.Query("select count(*) from tb2 where ts2 = 1623340800300000001;") + e.CheckData2(0, 0, int64(1), data) + + data, _ = e.Query("select count(*) from tb2 where ts2 between 1623340800000000000 and 1623340800450000000;") + e.CheckData2(0, 0, int64(5), data) + + data, _ = e.Query("select count(*) from tb2 where ts2 between \"2021-06-11 0:00:00.299999999\" and \"2021-06-11 0:00:00.300000001\";") + e.CheckData2(0, 0, int64(3), data) + + data, _ = e.Query("select count(*) from tb2 where ts2 <> 1623513600999999999;") + e.CheckData2(0, 0, int64(5), data) + + data, _ = e.Query("select count(*) from tb2 where ts2 <> \"2021-06-11 0:00:00.100000001\";") + e.CheckData2(0, 0, int64(5), data) + + data, _ = e.Query("select count(*) from tb2 where ts2 <> \"2021-06-11 0:00:00.100000000\";") + e.CheckData2(0, 0, int64(6), data) + + data, _ = e.Query("select count(*) from tb2 where ts2 != 1623513600999999999;") + e.CheckData2(0, 0, int64(5), data) + + data, _ = e.Query("select count(*) from tb2 where ts2 != \"2021-06-11 0:00:00.100000001\";") + e.CheckData2(0, 0, int64(5), data) + + data, _ = e.Query("select count(*) from tb2 where ts2 != \"2021-06-11 0:00:00.100000000\";") + e.CheckData2(0, 0, int64(6), data) + + e.Execute("insert into tb2 values(now + 500000000b, 6, now +2d);") + data, _ = e.Query("select * from tb2;") + e.CheckRow(7, data) + + e.Execute("create table tb3 (ts timestamp, speed int);") + _, err = e.Execute("insert into tb3 values(16232544001500000, 2);") + if err != nil { + fmt.Println("check pass! ") + } + + e.Execute("insert into tb3 values(\"2021-06-10 0:00:00.123456\", 2);") + data, _ = e.Query("select * from tb3 where ts = \"2021-06-10 0:00:00.123456000\";") + e.CheckRow(1, data) + + e.Execute("insert into tb3 values(\"2021-06-10 0:00:00.123456789000\", 2);") + data, _ = e.Query("select * from tb3 where ts = \"2021-06-10 0:00:00.123456789\";") + e.CheckRow(1, data) + + // check timezone support + + e.Execute("drop database if exists nsdb;") + e.Execute("create database nsdb precision 'ns';") + e.Execute("use nsdb;") + e.Execute("create stable st (ts timestamp ,speed float ) tags(time timestamp ,id int);") + e.Execute("insert into tb1 using st tags('2021-06-10 0:00:00.123456789' , 1 ) values('2021-06-10T0:00:00.123456789+07:00' , 1.0);") + data, _ = e.Query("select first(*) from tb1;") + + ttt, _ := time.Parse(layout, "2021-06-10 01:00:00.123456789") + e.CheckData2(0, 0, ttt, data) + + e.Execute("create database usdb precision 'us';") + e.Execute("use usdb;") + e.Execute("create stable st (ts timestamp ,speed float ) tags(time timestamp ,id int);") + e.Execute("insert into tb1 using st tags('2021-06-10 0:00:00.123456' , 1 ) values('2021-06-10T0:00:00.123456+07:00' , 1.0);") + data, _ = e.Query("select first(*) from tb1;") + ttt2, _ := time.Parse(layout, "2021-06-10 01:00:00.123456") + e.CheckData2(0, 0, ttt2, data) + + e.Execute("drop database if exists msdb;") + e.Execute("create database msdb precision 'ms';") + e.Execute("use msdb;") + e.Execute("create stable st (ts timestamp ,speed float ) tags(time timestamp ,id int);") + e.Execute("insert into tb1 using st tags('2021-06-10 0:00:00.123' , 1 ) values('2021-06-10T0:00:00.123+07:00' , 1.0);") + data, _ = e.Query("select first(*) from tb1;") + ttt3, _ := time.Parse(layout, "2021-06-10 01:00:00.123") + e.CheckData2(0, 0, ttt3, data) + fmt.Println("all test done!") + +} + +func prepareData(e *connector.Executor) { + sqlList := []string{ + "reset query cache;", + "drop database if exists db;", + "create database db;", + "use db;", + "reset query cache;", + "drop database if exists db;", + "create database db precision 'ns';", + "show databases;", + "use db;", + "create table tb (ts timestamp, speed int);", + "insert into tb values('2021-06-10 0:00:00.100000001', 1);", + "insert into tb values(1623254400150000000, 2);", + "import into tb values(1623254400300000000, 3);", + "import into tb values(1623254400299999999, 4);", + "insert into tb values(1623254400300000001, 5);", + "insert into tb values(1623254400999999999, 7);", + } + for _, sql := range sqlList { + err := executeSql(e, sql) + if err != nil { + log.Fatalf("prepare data error:%v, sql:%s", err, sql) + } + } +} + +func executeSql(e *connector.Executor, sql string) error { + _, err := e.Execute(sql) + if err != nil { + return err + } + return nil +} diff --git a/tests/perftest-scripts/perftest-query.sh b/tests/perftest-scripts/perftest-query.sh index 68b64fd4e0c4f09ff0b8e96d7802b954b774fbc5..05b2d45ce434d0990d7c143863b9ca268a7d6a26 100755 --- a/tests/perftest-scripts/perftest-query.sh +++ b/tests/perftest-scripts/perftest-query.sh @@ -21,7 +21,8 @@ fi today=`date +"%Y%m%d"` WORK_DIR=/root/pxiao -PERFORMANCE_TEST_REPORT=$WORK_DIR/TDengine/tests/performance-report-$branch-$type-$today.log +name=`echo $branch | cut -d '/' -f2` +PERFORMANCE_TEST_REPORT=$WORK_DIR/TDinternal/community/tests/performance-report-$name-$type-$today.log # Coloured Echoes # function red_echo { echo -e "\033[31m$@\033[0m"; } # @@ -54,11 +55,12 @@ function stopTaosd { } function buildTDengine { - echoInfo "Build TDengine" - cd $WORK_DIR/TDengine + echoInfo "Build TDinternal" + cd $WORK_DIR/TDinternal git remote update > /dev/null git reset --hard HEAD + git fetch git checkout $branch REMOTE_COMMIT=`git rev-parse --short remotes/origin/$branch` LOCAL_COMMIT=`git rev-parse --short @` @@ -69,13 +71,22 @@ function buildTDengine { echo "repo up-to-date" fi + cd community + git reset --hard HEAD + cd .. + echo "git submodule update --init --recursive" + git submodule update --init --recursive + git pull > /dev/null 2>&1 - if [ $type = "jemalloc" ];then - echo "git submodule update --init --recursive" - git submodule update --init --recursive - fi + + cd community + git remote update > /dev/null + git reset --hard HEAD + git fetch + git checkout $branch + REMOTE_COMMIT=`git rev-parse --short remotes/origin/$branch` LOCAL_COMMIT=`git rev-parse --short @` - cd debug + cd ../debug rm -rf * if [ $type = "jemalloc" ];then echo "cmake .. -DJEMALLOC_ENABLED=true > /dev/null" @@ -83,6 +94,10 @@ function buildTDengine { else cmake .. > /dev/null fi + #cp $WORK_DIR/taosdemoPerformance.py $WORK_DIR/TDinternal/community/tests/pytest/tools/ + #cp $WORK_DIR/insertFromCSVPerformance.py $WORK_DIR/TDinternal/community/tests/pytest/insert/ + #cp $WORK_DIR/queryPerformance.py $WORK_DIR/TDinternal/community/tests/pytest/query/ + rm -rf $WORK_DIR/TDinternal/community/tests/pytest/query/operator.py make > /dev/null 2>&1 make install > /dev/null 2>&1 echo "Build TDengine on remote server" @@ -91,24 +106,24 @@ function buildTDengine { function runQueryPerfTest { [ -f $PERFORMANCE_TEST_REPORT ] && rm $PERFORMANCE_TEST_REPORT - nohup $WORK_DIR/TDengine/debug/build/bin/taosd -c /etc/perf/ > /dev/null 2>&1 & + nohup $WORK_DIR/TDinternal/debug/build/bin/taosd -c /etc/perf/ > /dev/null 2>&1 & echoInfo "Wait TDengine to start" sleep 60 echoInfo "Run Performance Test" - cd $WORK_DIR/TDengine/tests/pytest + cd $WORK_DIR/TDinternal/community/tests/pytest - python3 query/queryPerformance.py -c $LOCAL_COMMIT -b $branch -T $type | tee -a $PERFORMANCE_TEST_REPORT + python3 query/queryPerformance.py -c $LOCAL_COMMIT -b $branch -T $type -d perf2 | tee -a $PERFORMANCE_TEST_REPORT python3 insert/insertFromCSVPerformance.py -c $LOCAL_COMMIT -b $branch -T $type | tee -a $PERFORMANCE_TEST_REPORT echo "=========== taosdemo performance: 4 int columns, 10000 tables, 100000 recoreds per table ===========" | tee -a $PERFORMANCE_TEST_REPORT python3 tools/taosdemoPerformance.py -c $LOCAL_COMMIT -b $branch -T $type | tee -a $PERFORMANCE_TEST_REPORT - echo "=========== taosdemo performance: 400 int columns, 400 double columns, 200 binary(128) columns, 10000 tables, 1000 recoreds per table ===========" | tee -a $PERFORMANCE_TEST_REPORT - python3 tools/taosdemoPerformance.py -c $LOCAL_COMMIT -b $branch -T $type -i 400 -D 400 -B 200 -t 10000 -r 100 | tee -a $PERFORMANCE_TEST_REPORT + echo "=========== taosdemo performance: 400 int columns, 400 double columns, 200 binary(128) columns, 10000 tables, 10 recoreds per table ===========" | tee -a $PERFORMANCE_TEST_REPORT + python3 tools/taosdemoPerformance.py -c $LOCAL_COMMIT -b $branch -T $type -i 400 -D 400 -B 200 -t 10000 -r 10 | tee -a $PERFORMANCE_TEST_REPORT - echo "=========== taosdemo performance: 1900 int columns, 1900 double columns, 200 binary(128) columns, 10000 tables, 1000 recoreds per table ===========" | tee -a $PERFORMANCE_TEST_REPORT - python3 tools/taosdemoPerformance.py -c $LOCAL_COMMIT -b $branch -T $type -i 1900 -D 1900 -B 200 -t 10000 -r 100 | tee -a $PERFORMANCE_TEST_REPORT + echo "=========== taosdemo performance: 1900 int columns, 1900 double columns, 200 binary(128) columns, 10000 tables, 10 recoreds per table ===========" | tee -a $PERFORMANCE_TEST_REPORT + python3 tools/taosdemoPerformance.py -c $LOCAL_COMMIT -b $branch -T $type -i 1900 -D 1900 -B 200 -t 10000 -r 10 | tee -a $PERFORMANCE_TEST_REPORT } @@ -121,7 +136,7 @@ function sendReport { sed -i 's/\x1b\[[0-9;]*m//g' $PERFORMANCE_TEST_REPORT BODY_CONTENT=`cat $PERFORMANCE_TEST_REPORT` - echo -e "From: \nto: ${receiver}\nsubject: Query Performace Report ${branch} ${jemalloc} commit ID: ${LOCAL_COMMIT}\n\n${today}:\n${BODY_CONTENT}" | \ + echo -e "From: \nto: ${receiver}\nsubject: Query Performace Report ${branch} ${type} commit ID: ${LOCAL_COMMIT}\n\n${today}:\n${BODY_CONTENT}" | \ (cat - && uuencode $PERFORMANCE_TEST_REPORT performance-test-report-$today.log) | \ /usr/sbin/ssmtp "${receiver}" && echo "Report Sent!" } diff --git a/tests/pytest/client/version.py b/tests/pytest/client/version.py index 7cbeeb60df54e8d89fdcc7815a2b2757793dfaec..5c79380a00c96c03c827071c2bbab4f8eacad897 100644 --- a/tests/pytest/client/version.py +++ b/tests/pytest/client/version.py @@ -15,6 +15,7 @@ import sys from util.log import * from util.cases import * from util.sql import * +from math import floor class TDTestCase: @@ -27,23 +28,22 @@ class TDTestCase: sql = "select server_version()" ret = tdSql.query(sql) - version = tdSql.getData(0, 0)[0:3] - expectedVersion_dev = "2.0" - expectedVersion_master = "2.1" - if(version == expectedVersion_dev or version == expectedVersion_master): - tdLog.info("sql:%s, row:%d col:%d data:%s == expect" % (sql, 0, 0, version)) + version = floor(float(tdSql.getData(0, 0)[0:3])) + expectedVersion = 2 + + if(version == expectedVersion): + tdLog.info("sql:%s, row:%d col:%d data:%d == expect" % (sql, 0, 0, version)) else: - tdLog.exit("sql:%s, row:%d col:%d data:%s != expect:%s or %s " % (sql, 0, 0, version, expectedVersion_dev, expectedVersion_master)) + tdLog.exit("sql:%s, row:%d col:%d data:%d != expect:%d " % (sql, 0, 0, version, expectedVersion)) sql = "select client_version()" ret = tdSql.query(sql) - version = tdSql.getData(0, 0)[0:3] - expectedVersion_dev = "2.0" - expectedVersion_master = "2.1" - if(version == expectedVersion_dev or version == expectedVersion_master): - tdLog.info("sql:%s, row:%d col:%d data:%s == expect" % (sql, 0, 0, version)) + version = floor(float(tdSql.getData(0, 0)[0:3])) + expectedVersion = 2 + if(version == expectedVersion): + tdLog.info("sql:%s, row:%d col:%d data:%d == expect" % (sql, 0, 0, version)) else: - tdLog.exit("sql:%s, row:%d col:%d data:%s != expect:%s or %s " % (sql, 0, 0, version, expectedVersion_dev, expectedVersion_master)) + tdLog.exit("sql:%s, row:%d col:%d data:%d != expect:%d " % (sql, 0, 0, version, expectedVersion)) def stop(self): diff --git a/tests/pytest/concurrent_inquiry.py b/tests/pytest/concurrent_inquiry.py index 7af38c3b567bdb8f3a723b25aa600a15876b41da..1bb2081d7f936ccd6df0e6c85d3d451d9c0d9e44 100644 --- a/tests/pytest/concurrent_inquiry.py +++ b/tests/pytest/concurrent_inquiry.py @@ -23,7 +23,7 @@ import string from requests.auth import HTTPBasicAuth func_list=['avg','count','twa','sum','stddev','leastsquares','min', 'max','first','last','top','bottom','percentile','apercentile', -'last_row','diff','spread'] +'last_row','diff','spread','distinct'] condition_list=[ "where _c0 > now -10d ", 'interval(10s)', @@ -33,7 +33,7 @@ condition_list=[ 'fill(null)' ] -where_list = ['_c0>now-10d',' <50','like',' is null'] +where_list = ['_c0>now-10d',' <50','like',' is null','in'] class ConcurrentInquiry: # def __init__(self,ts=1500000001000,host='127.0.0.1',user='root',password='taosdata',dbname='test', # stb_prefix='st',subtb_prefix='t',n_Therads=10,r_Therads=10,probabilities=0.05,loop=5, @@ -152,6 +152,20 @@ class ConcurrentInquiry: elif 'is null' in c: conlist = ' ' + random.choice(tlist) + random.choice([' is null',' is not null']) l.append(conlist) + elif 'in' in c: + in_list = [] + temp = [] + for i in range(random.randint(0,100)): + temp.append(random.randint(-10000,10000)) + temp = (str(i) for i in temp) + in_list.append(temp) + temp1 = [] + for i in range(random.randint(0,100)): + temp1.append("'" + ''.join(random.sample(string.ascii_letters, random.randint(0,10))) + "'") + in_list.append(temp1) + in_list.append(['NULL','NULL']) + conlist = ' ' + random.choice(tlist) + ' in (' + ','.join(random.choice(in_list)) + ')' + l.append(conlist) else: s_all = string.ascii_letters conlist = ' ' + random.choice(tlist) + " like \'%" + random.choice(s_all) + "%\' " @@ -182,7 +196,14 @@ class ConcurrentInquiry: def con_order(self,tlist,col_list,tag_list): return 'order by '+random.choice(tlist) - + + def con_state_window(self,tlist,col_list,tag_list): + return 'state_window(' + random.choice(tlist + tag_list) + ')' + + def con_session_window(self,tlist,col_list,tag_list): + session_window = 'session_window(' + random.choice(tlist + tag_list) + ',' + str(random.randint(0,20)) + random.choice(['a','s','d','w','n','y']) + ')' + return session_window + def gen_subquery_sql(self): subsql ,col_num = self.gen_query_sql(1) if col_num == 0: @@ -221,7 +242,7 @@ class ConcurrentInquiry: else: sql=sql+','.join(sel_col_list) #select col & func sql = sql + ' from ('+ subsql +') ' - con_func=[self.con_where,self.con_interval,self.con_limit,self.con_group,self.con_order,self.con_fill] + con_func=[self.con_where,self.con_interval,self.con_limit,self.con_group,self.con_order,self.con_fill,self.con_state_window,self.con_session_window] sel_con=random.sample(con_func,random.randint(0,len(con_func))) sel_con_list=[] for i in sel_con: @@ -281,7 +302,7 @@ class ConcurrentInquiry: sql = sql + ' from '+random.choice(self.subtb_list)+' ' else: sql = sql + ' from '+random.choice(self.stb_list)+' ' - con_func=[self.con_where,self.con_interval,self.con_limit,self.con_group,self.con_order,self.con_fill] + con_func=[self.con_where,self.con_interval,self.con_limit,self.con_group,self.con_order,self.con_fill,self.con_state_window,self.con_session_window] sel_con=random.sample(con_func,random.randint(0,len(con_func))) sel_con_list=[] for i in sel_con: diff --git a/tests/pytest/fulltest.sh b/tests/pytest/fulltest.sh index 886e2a365e26dca4912ad2b52a5cd01e5385faa6..db846f2bd7a75ce2294df4968c42dc947541b05b 100755 --- a/tests/pytest/fulltest.sh +++ b/tests/pytest/fulltest.sh @@ -183,6 +183,9 @@ python3 test.py -f tools/taosdemoAllTest/NanoTestCase/taosdemoTestSupportNanosub python3 test.py -f tools/taosdemoAllTest/NanoTestCase/taosdemoTestInsertTime_step.py python3 test.py -f tools/taosdemoAllTest/NanoTestCase/taosdumpTestNanoSupport.py +# +python3 ./test.py -f tsdb/tsdbComp.py + # update python3 ./test.py -f update/allow_update.py python3 ./test.py -f update/allow_update-0.py @@ -267,7 +270,7 @@ python3 ./test.py -f query/queryStateWindow.py # python3 ./test.py -f query/nestedQuery/queryWithOrderLimit.py python3 ./test.py -f query/nestquery_last_row.py python3 ./test.py -f query/queryCnameDisplay.py -python3 ./test.py -f query/operator_cost.py +# python3 ./test.py -f query/operator_cost.py # python3 ./test.py -f query/long_where_query.py python3 test.py -f query/nestedQuery/queryWithSpread.py @@ -390,7 +393,7 @@ python3 ./test.py -f alter/alterColMultiTimes.py python3 ./test.py -f query/queryWildcardLength.py python3 ./test.py -f query/queryTbnameUpperLower.py python3 ./test.py -f query/query.py - +python3 ./test.py -f query/queryDiffColsOr.py #======================p4-end=============== diff --git a/tests/pytest/functions/function_interp.py b/tests/pytest/functions/function_interp.py index 87d001d9e586fdfccb28e6fe9c8a04510d6f3fb1..469e9186f668ec2c1afb03a79648c5a822cacdbe 100644 --- a/tests/pytest/functions/function_interp.py +++ b/tests/pytest/functions/function_interp.py @@ -11,45 +11,43 @@ # -*- coding: utf-8 -*- -import sys -import taos from util.log import * from util.cases import * from util.sql import * -import numpy as np - - class TDTestCase: def init(self, conn, logSql): tdLog.debug("start to execute %s" % __file__) tdSql.init(conn.cursor()) - self.rowNum = 10 - self.ts = 1537146000000 - def run(self): tdSql.prepare() tdSql.execute("create table ap1 (ts timestamp, pav float)") - tdSql.execute("insert into ap1 values ('2021-07-25 02:19:54.119', 2.90799)") - tdSql.execute("insert into ap1 values ('2021-07-25 02:19:54.317', 3.07399)") - tdSql.execute("insert into ap1 values ('2021-07-25 02:19:54.517', 0.58117)") - tdSql.execute("insert into ap1 values ('2021-07-25 02:19:54.717', 0.16150)") - tdSql.execute("insert into ap1 values ('2021-07-25 02:19:54.918', 1.47885)") - tdSql.execute("insert into ap1 values ('2021-07-25 02:19:56.569', 1.76472)") - tdSql.execute("insert into ap1 values ('2021-07-25 02:19:57.381', 2.13722)") - tdSql.execute("insert into ap1 values ('2021-07-25 02:19:57.574', 4.10256)") - tdSql.execute("insert into ap1 values ('2021-07-25 02:19:57.776', 3.55345)") - tdSql.execute("insert into ap1 values ('2021-07-25 02:19:57.976', 1.46624)") - tdSql.execute("insert into ap1 values ('2021-07-25 02:19:58.187', 0.17943)") - tdSql.execute("insert into ap1 values ('2021-07-25 02:19:58.372', 2.04101)") - tdSql.execute("insert into ap1 values ('2021-07-25 02:19:58.573', 3.20924)") - tdSql.execute("insert into ap1 values ('2021-07-25 02:19:58.768', 1.71807)") - tdSql.execute("insert into ap1 values ('2021-07-25 02:19:58.964', 4.60900)") - tdSql.execute("insert into ap1 values ('2021-07-25 02:19:59.155', 4.33907)") - tdSql.execute("insert into ap1 values ('2021-07-25 02:19:59.359', 0.76940)") - tdSql.execute("insert into ap1 values ('2021-07-25 02:19:59.553', 0.06458)") - tdSql.execute("insert into ap1 values ('2021-07-25 02:19:59.742', 4.59857)") - tdSql.execute("insert into ap1 values ('2021-07-25 02:19:59.938', 1.55081)") + tdSql.execute("create table ap2 (ts timestamp, pav float) tags (t1 float)") + tdSql.execute("create table ap2_sub1 using ap2 tags (2.90799)") + tdSql.execute("create table ap2_sub2 using ap2 tags (2.90799)") + tdSql.execute("create table ap3 (ts timestamp, pav float) tags (t1 float)") + tdSql.execute("create table ap3_sub1 using ap3 tags (2.90799)") + for tb_name in ["ap1", "ap2_sub1", "ap3_sub1"]: + tdSql.execute(f"insert into {tb_name} values ('2021-07-25 02:19:54.119', 2.90799)") + tdSql.execute(f"insert into {tb_name} values ('2021-07-25 02:19:54.317', 3.07399)") + tdSql.execute(f"insert into {tb_name} values ('2021-07-25 02:19:54.517', 0.58117)") + tdSql.execute(f"insert into {tb_name} values ('2021-07-25 02:19:54.717', 0.16150)") + tdSql.execute(f"insert into {tb_name} values ('2021-07-25 02:19:54.918', 1.47885)") + tdSql.execute(f"insert into {tb_name} values ('2021-07-25 02:19:56.569', 1.76472)") + tdSql.execute(f"insert into {tb_name} values ('2021-07-25 02:19:57.381', 2.13722)") + tdSql.execute(f"insert into {tb_name} values ('2021-07-25 02:19:57.574', 4.10256)") + tdSql.execute(f"insert into {tb_name} values ('2021-07-25 02:19:57.776', 3.55345)") + tdSql.execute(f"insert into {tb_name} values ('2021-07-25 02:19:57.976', 1.46624)") + tdSql.execute(f"insert into {tb_name} values ('2021-07-25 02:19:58.187', 0.17943)") + tdSql.execute(f"insert into {tb_name} values ('2021-07-25 02:19:58.372', 2.04101)") + tdSql.execute(f"insert into {tb_name} values ('2021-07-25 02:19:58.573', 3.20924)") + tdSql.execute(f"insert into {tb_name} values ('2021-07-25 02:19:58.768', 1.71807)") + tdSql.execute(f"insert into {tb_name} values ('2021-07-25 02:19:58.964', 4.60900)") + tdSql.execute(f"insert into {tb_name} values ('2021-07-25 02:19:59.155', 4.33907)") + tdSql.execute(f"insert into {tb_name} values ('2021-07-25 02:19:59.359', 0.76940)") + tdSql.execute(f"insert into {tb_name} values ('2021-07-25 02:19:59.553', 0.06458)") + tdSql.execute(f"insert into {tb_name} values ('2021-07-25 02:19:59.742', 4.59857)") + tdSql.execute(f"insert into {tb_name} values ('2021-07-25 02:19:59.938', 1.55081)") tdSql.query("select interp(pav) from ap1 where ts = '2021-07-25 02:19:54' FILL (PREV)") tdSql.checkRows(0) @@ -57,6 +55,29 @@ class TDTestCase: tdSql.checkRows(0) tdSql.query("select interp(pav) from ap1 where ts = '2021-07-25 02:19:54' FILL (LINEAR)") tdSql.checkRows(0) + # check None + tdSql.query("select interp(pav) from ap1 where ts> '2021-07-25 02:19:54' and ts<'2021-07-25 02:20:00' every(1000a) FILL (None)") + tdSql.checkRows(0) + # check NULL + tdSql.query("select interp(pav) from ap1 where ts> '2021-07-25 02:19:54' and ts<'2021-07-25 02:20:00' every(1000a) FILL (NULL)") + tdSql.checkRows(6) + for i in range(5): + tdSql.checkData(i,1,None) + # checkout VALUE + tdSql.query("select interp(pav) from ap1 where ts> '2021-07-25 02:19:54' and ts<'2021-07-25 02:20:00' every(1000a) FILL (VALUE, 1)") + tdSql.checkRows(6) + for i in range(5): + tdSql.checkData(i,1,1.00000) + # check tag group by + tdSql.query("select interp(pav) from ap2 where ts>= '2021-07-25 02:19:54' and ts<'2021-07-25 02:20:00' every(1000a) FILL (VALUE,1) group by t1;") + for i in range(5): + tdSql.checkData(i,1,1.00000) + tdSql.checkData(i,2,2.90799) + # check multi ts lines + tdSql.query("select z1.ts,z1.val1,z2.val2 from (select interp(pav) val1 from ap2 where ts> '2021-07-25 02:19:54' and ts<'2021-07-25 02:20:00' every(1000a) FILL (value,1)) z1,(select interp(pav) val2 from ap3 where ts> '2021-07-25 02:19:54' and ts<'2021-07-25 02:20:00' every(1000a) FILL (value,2)) z2 where z1.ts=z2.ts ;") + for i in range(5): + tdSql.checkData(i,1,1.00000) + tdSql.checkData(i,2,2.00000) tdSql.query("select interp(pav) from ap1 where ts> '2021-07-25 02:19:54' and ts<'2021-07-25 02:20:00' every(1000a) FILL (LINEAR)") tdSql.checkRows(6) tdSql.query("select interp(pav) from ap1 where ts>= '2021-07-25 02:19:54' and ts<'2021-07-25 02:20:00' every(1000a) FILL (NEXT)") diff --git a/tests/pytest/functions/showOfflineThresholdIs864000.py b/tests/pytest/functions/showOfflineThresholdIs864000.py index 8ec25cef26b3c97bc55f2f4df3fe8cf55a19125c..7462d4cd72f600674fcb82aa1224019787d23fd5 100644 --- a/tests/pytest/functions/showOfflineThresholdIs864000.py +++ b/tests/pytest/functions/showOfflineThresholdIs864000.py @@ -12,6 +12,8 @@ # -*- coding: utf-8 -*- import sys +import numpy as np + from util.log import * from util.cases import * from util.sql import * @@ -24,8 +26,17 @@ class TDTestCase: tdSql.init(conn.cursor(), logSql) def run(self): + # tdSql.query("show variables") + # tdSql.checkData(54, 1, 864000) + tdSql.execute("show variables") + res = tdSql.cursor.fetchall() + resList = np.array(res) + index = np.where(resList == "offlineThreshold") + index_value = np.dstack((index[0])).squeeze() tdSql.query("show variables") - tdSql.checkData(55, 1, 864000) + tdSql.checkData(index_value, 1, 864000) + pass + def stop(self): tdSql.close() diff --git a/tests/pytest/insert/insertJSONPayload.py b/tests/pytest/insert/insertJSONPayload.py new file mode 100644 index 0000000000000000000000000000000000000000..30f34446a93237f9b7b610efc9b1b5507ba09f4a --- /dev/null +++ b/tests/pytest/insert/insertJSONPayload.py @@ -0,0 +1,568 @@ +################################################################### +# Copyright (c) 2021 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +from util.log import * +from util.cases import * +from util.sql import * + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + self._conn = conn + + def run(self): + print("running {}".format(__file__)) + tdSql.execute("drop database if exists test") + tdSql.execute("create database if not exists test precision 'us'") + tdSql.execute('use test') + + + ### Default format ### + ### metric value ### + print("============= step1 : test metric value types ================") + payload = ''' + { + "metric": "stb0_0", + "timestamp": 1626006833610123, + "value": 10, + "tags": { + "t1": true, + "t2": false, + "t3": 10, + "t4": "123_abc_.!@#$%^&*:;,./?|+-=()[]{}<>" + } + } + ''' + code = self._conn.insert_json_payload(payload) + print("insert_json_payload result {}".format(code)) + + tdSql.query("describe stb0_0") + tdSql.checkData(1, 1, "FLOAT") + + payload = ''' + { + "metric": "stb0_1", + "timestamp": 1626006833610123, + "value": true, + "tags": { + "t1": true, + "t2": false, + "t3": 10, + "t4": "123_abc_.!@#$%^&*:;,./?|+-=()[]{}<>" + } + } + ''' + code = self._conn.insert_json_payload(payload) + print("insert_json_payload result {}".format(code)) + + tdSql.query("describe stb0_1") + tdSql.checkData(1, 1, "BOOL") + + payload = ''' + { + "metric": "stb0_2", + "timestamp": 1626006833610123, + "value": false, + "tags": { + "t1": true, + "t2": false, + "t3": 10, + "t4": "123_abc_.!@#$%^&*:;,./?|+-=()[]{}<>" + } + } + ''' + code = self._conn.insert_json_payload(payload) + print("insert_json_payload result {}".format(code)) + + tdSql.query("describe stb0_2") + tdSql.checkData(1, 1, "BOOL") + + payload = ''' + { + "metric": "stb0_3", + "timestamp": 1626006833610123, + "value": "123_abc_.!@#$%^&*:;,./?|+-=()[]{}<>", + "tags": { + "t1": true, + "t2": false, + "t3": 10, + "t4": "123_abc_.!@#$%^&*:;,./?|+-=()[]{}<>" + } + } + ''' + code = self._conn.insert_json_payload(payload) + print("insert_json_payload result {}".format(code)) + + tdSql.query("describe stb0_3") + tdSql.checkData(1, 1, "NCHAR") + + ### timestamp 0 ### + payload = ''' + { + "metric": "stb0_4", + "timestamp": 0, + "value": 123, + "tags": { + "t1": true, + "t2": false, + "t3": 10, + "t4": "123_abc_.!@#$%^&*:;,./?|+-=()[]{}<>" + } + } + ''' + code = self._conn.insert_json_payload(payload) + print("insert_json_payload result {}".format(code)) + + + ### ID ### + payload = ''' + { + "metric": "stb0_5", + "timestamp": 0, + "value": 123, + "tags": { + "ID": "tb0_5", + "t1": true, + "iD": "tb000", + "t2": false, + "t3": 10, + "t4": "123_abc_.!@#$%^&*:;,./?|+-=()[]{}<>", + "id": "tb555" + } + } + ''' + code = self._conn.insert_json_payload(payload) + print("insert_json_payload result {}".format(code)) + + tdSql.query("select tbname from stb0_5") + tdSql.checkData(0, 0, "tb0_5") + + ### Nested format ### + ### timestamp ### + #seconds + payload = ''' + { + "metric": "stb1_0", + "timestamp": { + "value": 1626006833, + "type": "s" + }, + "value": 10, + "tags": { + "t1": true, + "t2": false, + "t3": 10, + "t4": "123_abc_.!@#$%^&*:;,./?|+-=()[]{}<>" + } + } + ''' + code = self._conn.insert_json_payload(payload) + print("insert_json_payload result {}".format(code)) + + tdSql.query("select ts from stb1_0") + tdSql.checkData(0, 0, "2021-07-11 20:33:53.000000") + + #milliseconds + payload = ''' + { + "metric": "stb1_1", + "timestamp": { + "value": 1626006833610, + "type": "ms" + }, + "value": 10, + "tags": { + "t1": true, + "t2": false, + "t3": 10, + "t4": "123_abc_.!@#$%^&*:;,./?|+-=()[]{}<>" + } + } + ''' + code = self._conn.insert_json_payload(payload) + print("insert_json_payload result {}".format(code)) + + tdSql.query("select ts from stb1_1") + tdSql.checkData(0, 0, "2021-07-11 20:33:53.610000") + + #microseconds + payload = ''' + { + "metric": "stb1_2", + "timestamp": { + "value": 1626006833610123, + "type": "us" + }, + "value": 10, + "tags": { + "t1": true, + "t2": false, + "t3": 10, + "t4": "123_abc_.!@#$%^&*:;,./?|+-=()[]{}<>" + } + } + ''' + code = self._conn.insert_json_payload(payload) + print("insert_json_payload result {}".format(code)) + + tdSql.query("select ts from stb1_2") + tdSql.checkData(0, 0, "2021-07-11 20:33:53.610123") + + #nanoseconds + payload = ''' + { + "metric": "stb1_3", + "timestamp": { + "value": 1.6260068336101233e+18, + "type": "ns" + }, + "value": 10, + "tags": { + "t1": true, + "t2": false, + "t3": 10, + "t4": "123_abc_.!@#$%^&*:;,./?|+-=()[]{}<>" + } + } + ''' + code = self._conn.insert_json_payload(payload) + print("insert_json_payload result {}".format(code)) + + tdSql.query("select ts from stb1_3") + tdSql.checkData(0, 0, "2021-07-11 20:33:53.610123") + + #now + tdSql.execute('use test') + payload = ''' + { + "metric": "stb1_4", + "timestamp": { + "value": 0, + "type": "ns" + }, + "value": 10, + "tags": { + "t1": true, + "t2": false, + "t3": 10, + "t4": "123_abc_.!@#$%^&*:;,./?|+-=()[]{}<>" + } + } + ''' + code = self._conn.insert_json_payload(payload) + print("insert_json_payload result {}".format(code)) + + ### metric value ### + payload = ''' + { + "metric": "stb2_0", + "timestamp": { + "value": 1626006833, + "type": "s" + }, + "value": { + "value": true, + "type": "bool" + }, + "tags": { + "t1": true, + "t2": false, + "t3": 10, + "t4": "123_abc_.!@#$%^&*:;,./?|+-=()[]{}<>" + } + } + ''' + code = self._conn.insert_json_payload(payload) + print("insert_json_payload result {}".format(code)) + + tdSql.query("describe stb2_0") + tdSql.checkData(1, 1, "BOOL") + + payload = ''' + { + "metric": "stb2_1", + "timestamp": { + "value": 1626006833, + "type": "s" + }, + "value": { + "value": 127, + "type": "tinyint" + }, + "tags": { + "t1": true, + "t2": false, + "t3": 10, + "t4": "123_abc_.!@#$%^&*:;,./?|+-=()[]{}<>" + } + } + ''' + code = self._conn.insert_json_payload(payload) + print("insert_json_payload result {}".format(code)) + + tdSql.query("describe stb2_1") + tdSql.checkData(1, 1, "TINYINT") + + payload = ''' + { + "metric": "stb2_2", + "timestamp": { + "value": 1626006833, + "type": "s" + }, + "value": { + "value": 32767, + "type": "smallint" + }, + "tags": { + "t1": true, + "t2": false, + "t3": 10, + "t4": "123_abc_.!@#$%^&*:;,./?|+-=()[]{}<>" + } + } + ''' + code = self._conn.insert_json_payload(payload) + print("insert_json_payload result {}".format(code)) + + tdSql.query("describe stb2_2") + tdSql.checkData(1, 1, "SMALLINT") + + payload = ''' + { + "metric": "stb2_3", + "timestamp": { + "value": 1626006833, + "type": "s" + }, + "value": { + "value": 2147483647, + "type": "int" + }, + "tags": { + "t1": true, + "t2": false, + "t3": 10, + "t4": "123_abc_.!@#$%^&*:;,./?|+-=()[]{}<>" + } + } + ''' + code = self._conn.insert_json_payload(payload) + print("insert_json_payload result {}".format(code)) + + tdSql.query("describe stb2_3") + tdSql.checkData(1, 1, "INT") + + payload = ''' + { + "metric": "stb2_4", + "timestamp": { + "value": 1626006833, + "type": "s" + }, + "value": { + "value": 9.2233720368547758e+18, + "type": "bigint" + }, + "tags": { + "t1": true, + "t2": false, + "t3": 10, + "t4": "123_abc_.!@#$%^&*:;,./?|+-=()[]{}<>" + } + } + ''' + code = self._conn.insert_json_payload(payload) + print("insert_json_payload result {}".format(code)) + + tdSql.query("describe stb2_4") + tdSql.checkData(1, 1, "BIGINT") + + payload = ''' + { + "metric": "stb2_5", + "timestamp": { + "value": 1626006833, + "type": "s" + }, + "value": { + "value": 11.12345, + "type": "float" + }, + "tags": { + "t1": true, + "t2": false, + "t3": 10, + "t4": "123_abc_.!@#$%^&*:;,./?|+-=()[]{}<>" + } + } + ''' + code = self._conn.insert_json_payload(payload) + print("insert_json_payload result {}".format(code)) + + tdSql.query("describe stb2_5") + tdSql.checkData(1, 1, "FLOAT") + + payload = ''' + { + "metric": "stb2_6", + "timestamp": { + "value": 1626006833, + "type": "s" + }, + "value": { + "value": 22.123456789, + "type": "double" + }, + "tags": { + "t1": true, + "t2": false, + "t3": 10, + "t4": "123_abc_.!@#$%^&*:;,./?|+-=()[]{}<>" + } + } + ''' + code = self._conn.insert_json_payload(payload) + print("insert_json_payload result {}".format(code)) + + tdSql.query("describe stb2_6") + tdSql.checkData(1, 1, "DOUBLE") + + payload = ''' + { + "metric": "stb2_7", + "timestamp": { + "value": 1626006833, + "type": "s" + }, + "value": { + "value": "123_abc_.!@#$%^&*:;,./?|+-=()[]{}<>", + "type": "binary" + }, + "tags": { + "t1": true, + "t2": false, + "t3": 10, + "t4": "123_abc_.!@#$%^&*:;,./?|+-=()[]{}<>" + } + } + ''' + code = self._conn.insert_json_payload(payload) + print("insert_json_payload result {}".format(code)) + + tdSql.query("describe stb2_7") + tdSql.checkData(1, 1, "BINARY") + + payload = ''' + { + "metric": "stb2_8", + "timestamp": { + "value": 1626006833, + "type": "s" + }, + "value": { + "value": "你好", + "type": "nchar" + }, + "tags": { + "t1": true, + "t2": false, + "t3": 10, + "t4": "123_abc_.!@#$%^&*:;,./?|+-=()[]{}<>" + } + } + ''' + code = self._conn.insert_json_payload(payload) + print("insert_json_payload result {}".format(code)) + + tdSql.query("describe stb2_8") + tdSql.checkData(1, 1, "NCHAR") + + ### tag value ### + + payload = ''' + { + "metric": "stb3_0", + "timestamp": { + "value": 1626006833, + "type": "s" + }, + "value": { + "value": "hello", + "type": "nchar" + }, + "tags": { + "t1": { + "value": true, + "type": "bool" + }, + "t2": { + "value": 127, + "type": "tinyint" + }, + "t3": { + "value": 32767, + "type": "smallint" + }, + "t4": { + "value": 2147483647, + "type": "int" + }, + "t5": { + "value": 9.2233720368547758e+18, + "type": "bigint" + }, + "t6": { + "value": 11.12345, + "type": "float" + }, + "t7": { + "value": 22.123456789, + "type": "double" + }, + "t8": { + "value": "binary_val", + "type": "binary" + }, + "t9": { + "value": "你好", + "type": "nchar" + } + } + } + ''' + code = self._conn.insert_json_payload(payload) + print("insert_json_payload result {}".format(code)) + + tdSql.query("describe stb3_0") + tdSql.checkData(2, 1, "BOOL") + tdSql.checkData(3, 1, "TINYINT") + tdSql.checkData(4, 1, "SMALLINT") + tdSql.checkData(5, 1, "INT") + tdSql.checkData(6, 1, "BIGINT") + tdSql.checkData(7, 1, "FLOAT") + tdSql.checkData(8, 1, "DOUBLE") + tdSql.checkData(9, 1, "BINARY") + tdSql.checkData(10, 1, "NCHAR") + + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/insert/insertTelnetLines.py b/tests/pytest/insert/insertTelnetLines.py new file mode 100644 index 0000000000000000000000000000000000000000..8ebb6bd3df4bcd4abfbb8c42cf5024fe066fcce3 --- /dev/null +++ b/tests/pytest/insert/insertTelnetLines.py @@ -0,0 +1,313 @@ +################################################################### +# Copyright (c) 2021 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +from util.log import * +from util.cases import * +from util.sql import * + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + self._conn = conn + + def run(self): + print("running {}".format(__file__)) + tdSql.execute("drop database if exists test") + tdSql.execute("create database if not exists test precision 'us'") + tdSql.execute('use test') + + + ### metric ### + print("============= step1 : test metric ================") + lines0 = [ + "stb0_0 1626006833639000000ns 4i8 host=\"host0\",interface=\"eth0\"", + "stb0_1 1626006833639000000ns 4i8 host=\"host0\",interface=\"eth0\"", + "stb0_2 1626006833639000000ns 4i8 host=\"host0\",interface=\"eth0\"", + ] + + code = self._conn.insert_telnet_lines(lines0) + print("insert_telnet_lines result {}".format(code)) + + tdSql.query("show stables") + tdSql.checkRows(3) + + tdSql.query("describe stb0_0") + tdSql.checkRows(4) + + tdSql.query("describe stb0_1") + tdSql.checkRows(4) + + tdSql.query("describe stb0_2") + tdSql.checkRows(4) + + ### timestamp ### + print("============= step2 : test timestamp ================") + lines1 = [ + "stb1 1626006833s 1i8 host=\"host0\"", + "stb1 1626006833639000000ns 2i8 host=\"host0\"", + "stb1 1626006833640000us 3i8 host=\"host0\"", + "stb1 1626006833641123 4i8 host=\"host0\"", + "stb1 1626006833651ms 5i8 host=\"host0\"", + "stb1 0 6i8 host=\"host0\"", + ] + + code = self._conn.insert_telnet_lines(lines1) + print("insert_telnet_lines result {}".format(code)) + + tdSql.query("select * from stb1") + tdSql.checkRows(6) + + ### metric value ### + print("============= step3 : test metric value ================") + + #tinyint + lines2_0 = [ + "stb2_0 1626006833651ms -127i8 host=\"host0\"", + "stb2_0 1626006833652ms 127i8 host=\"host0\"" + ] + code = self._conn.insert_telnet_lines(lines2_0) + print("insert_telnet_lines result {}".format(code)) + + tdSql.query("select * from stb2_0") + tdSql.checkRows(2) + + tdSql.query("describe stb2_0") + tdSql.checkRows(3) + tdSql.checkData(1, 1, "TINYINT") + + #smallint + lines2_1 = [ + "stb2_1 1626006833651ms -32767i16 host=\"host0\"", + "stb2_1 1626006833652ms 32767i16 host=\"host0\"" + ] + code = self._conn.insert_telnet_lines(lines2_1) + print("insert_telnet_lines result {}".format(code)) + + tdSql.query("select * from stb2_1") + tdSql.checkRows(2) + + tdSql.query("describe stb2_1") + tdSql.checkRows(3) + tdSql.checkData(1, 1, "SMALLINT") + + #int + lines2_2 = [ + "stb2_2 1626006833651ms -2147483647i32 host=\"host0\"", + "stb2_2 1626006833652ms 2147483647i32 host=\"host0\"" + ] + + code = self._conn.insert_telnet_lines(lines2_2) + print("insert_telnet_lines result {}".format(code)) + + tdSql.query("select * from stb2_2") + tdSql.checkRows(2) + + tdSql.query("describe stb2_2") + tdSql.checkRows(3) + tdSql.checkData(1, 1, "INT") + + #bigint + lines2_3 = [ + "stb2_3 1626006833651ms -9223372036854775807i64 host=\"host0\"", + "stb2_3 1626006833652ms 9223372036854775807i64 host=\"host0\"" + ] + + code = self._conn.insert_telnet_lines(lines2_3) + print("insert_telnet_lines result {}".format(code)) + + tdSql.query("select * from stb2_3") + tdSql.checkRows(2) + + tdSql.query("describe stb2_3") + tdSql.checkRows(3) + tdSql.checkData(1, 1, "BIGINT") + + #float + lines2_4 = [ + "stb2_4 1626006833610ms 3f32 host=\"host0\"", + "stb2_4 1626006833620ms -3f32 host=\"host0\"", + "stb2_4 1626006833630ms 3.4f32 host=\"host0\"", + "stb2_4 1626006833640ms -3.4f32 host=\"host0\"", + "stb2_4 1626006833650ms 3.4E10f32 host=\"host0\"", + "stb2_4 1626006833660ms -3.4e10f32 host=\"host0\"", + "stb2_4 1626006833670ms 3.4E+2f32 host=\"host0\"", + "stb2_4 1626006833680ms -3.4e-2f32 host=\"host0\"", + "stb2_4 1626006833690ms 3.15 host=\"host0\"", + "stb2_4 1626006833700ms 3.4E38f32 host=\"host0\"", + "stb2_4 1626006833710ms -3.4E38f32 host=\"host0\"" + ] + + code = self._conn.insert_telnet_lines(lines2_4) + print("insert_telnet_lines result {}".format(code)) + + tdSql.query("select * from stb2_4") + tdSql.checkRows(11) + + tdSql.query("describe stb2_4") + tdSql.checkRows(3) + tdSql.checkData(1, 1, "FLOAT") + + #double + lines2_5 = [ + "stb2_5 1626006833610ms 3f64 host=\"host0\"", + "stb2_5 1626006833620ms -3f64 host=\"host0\"", + "stb2_5 1626006833630ms 3.4f64 host=\"host0\"", + "stb2_5 1626006833640ms -3.4f64 host=\"host0\"", + "stb2_5 1626006833650ms 3.4E10f64 host=\"host0\"", + "stb2_5 1626006833660ms -3.4e10f64 host=\"host0\"", + "stb2_5 1626006833670ms 3.4E+2f64 host=\"host0\"", + "stb2_5 1626006833680ms -3.4e-2f64 host=\"host0\"", + "stb2_5 1626006833690ms 1.7E308f64 host=\"host0\"", + "stb2_5 1626006833700ms -1.7E308f64 host=\"host0\"" + ] + + code = self._conn.insert_telnet_lines(lines2_5) + print("insert_telnet_lines result {}".format(code)) + + tdSql.query("select * from stb2_5") + tdSql.checkRows(10) + + tdSql.query("describe stb2_5") + tdSql.checkRows(3) + tdSql.checkData(1, 1, "DOUBLE") + + #bool + lines2_6 = [ + "stb2_6 1626006833610ms t host=\"host0\"", + "stb2_6 1626006833620ms T host=\"host0\"", + "stb2_6 1626006833630ms true host=\"host0\"", + "stb2_6 1626006833640ms True host=\"host0\"", + "stb2_6 1626006833650ms TRUE host=\"host0\"", + "stb2_6 1626006833660ms f host=\"host0\"", + "stb2_6 1626006833670ms F host=\"host0\"", + "stb2_6 1626006833680ms false host=\"host0\"", + "stb2_6 1626006833690ms False host=\"host0\"", + "stb2_6 1626006833700ms FALSE host=\"host0\"" + ] + + code = self._conn.insert_telnet_lines(lines2_6) + print("insert_telnet_lines result {}".format(code)) + + tdSql.query("select * from stb2_6") + tdSql.checkRows(10) + + tdSql.query("describe stb2_6") + tdSql.checkRows(3) + tdSql.checkData(1, 1, "BOOL") + + #binary + lines2_7 = [ + "stb2_7 1626006833610ms \"binary_val.!@#$%^&*\" host=\"host0\"", + "stb2_7 1626006833620ms \"binary_val.:;,./?|+-=\" host=\"host0\"", + "stb2_7 1626006833630ms \"binary_val.()[]{}<>\" host=\"host0\"" + ] + + code = self._conn.insert_telnet_lines(lines2_7) + print("insert_telnet_lines result {}".format(code)) + + tdSql.query("select * from stb2_7") + tdSql.checkRows(3) + + tdSql.query("describe stb2_7") + tdSql.checkRows(3) + tdSql.checkData(1, 1, "BINARY") + + #nchar + lines2_8 = [ + "stb2_8 1626006833610ms L\"nchar_val数值一\" host=\"host0\"", + "stb2_8 1626006833620ms L\"nchar_val数值二\" host=\"host0\"" + ] + + code = self._conn.insert_telnet_lines(lines2_8) + print("insert_telnet_lines result {}".format(code)) + + tdSql.query("select * from stb2_8") + tdSql.checkRows(2) + + tdSql.query("describe stb2_8") + tdSql.checkRows(3) + tdSql.checkData(1, 1, "NCHAR") + + ### tags ### + print("============= step3 : test tags ================") + #tag value types + lines3_0 = [ + "stb3_0 1626006833610ms 1 t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=3.4E38f32,t6=1.7E308f64,t7=true,t8=\"binary_val_1\",t9=L\"标签值1\"", + "stb3_0 1626006833610ms 2 t1=-127i8,t2=-32767i16,t3=-2147483647i32,t4=-9223372036854775807i64,t5=-3.4E38f32,t6=-1.7E308f64,t7=false,t8=\"binary_val_2\",t9=L\"标签值2\"" + ] + + code = self._conn.insert_telnet_lines(lines3_0) + print("insert_telnet_lines result {}".format(code)) + + tdSql.query("select * from stb3_0") + tdSql.checkRows(2) + + tdSql.query("describe stb3_0") + tdSql.checkRows(11) + + tdSql.checkData(2, 1, "TINYINT") + tdSql.checkData(2, 3, "TAG") + + tdSql.checkData(3, 1, "SMALLINT") + tdSql.checkData(3, 3, "TAG") + + tdSql.checkData(4, 1, "INT") + tdSql.checkData(4, 3, "TAG") + + tdSql.checkData(5, 1, "BIGINT") + tdSql.checkData(5, 3, "TAG") + + tdSql.checkData(6, 1, "FLOAT") + tdSql.checkData(6, 3, "TAG") + + tdSql.checkData(7, 1, "DOUBLE") + tdSql.checkData(7, 3, "TAG") + + tdSql.checkData(8, 1, "BOOL") + tdSql.checkData(8, 3, "TAG") + + tdSql.checkData(9, 1, "BINARY") + tdSql.checkData(9, 3, "TAG") + + tdSql.checkData(10, 1, "NCHAR") + tdSql.checkData(10, 3, "TAG") + + + #tag ID as child table name + lines3_1 = [ + "stb3_1 1626006833610ms 1 id=\"child_table1\",host=\"host1\"", + "stb3_1 1626006833610ms 2 host=\"host2\",iD=\"child_table2\"", + "stb3_1 1626006833610ms 3 ID=\"child_table3\",host=\"host3\"" + ] + + code = self._conn.insert_telnet_lines(lines3_1) + print("insert_telnet_lines result {}".format(code)) + + tdSql.query("select * from stb3_1") + tdSql.checkRows(3) + + tdSql.query("show tables like \"child%\"") + tdSql.checkRows(3) + + tdSql.checkData(0, 0, "child_table1") + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/insert/openTsdbTelnetLinesInsert.py b/tests/pytest/insert/openTsdbTelnetLinesInsert.py new file mode 100644 index 0000000000000000000000000000000000000000..25518437e102c985b4d84887b1806f9e341c86d6 --- /dev/null +++ b/tests/pytest/insert/openTsdbTelnetLinesInsert.py @@ -0,0 +1,1364 @@ +################################################################### +# Copyright (c) 2021 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import traceback +import random +from taos.error import LinesError +import time +from copy import deepcopy +import numpy as np +from util.log import * +from util.cases import * +from util.sql import * +from util.common import tdCom +import threading + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + self._conn = conn + + def createDb(self, name="test", db_update_tag=0): + if db_update_tag == 0: + tdSql.execute(f"drop database if exists {name}") + tdSql.execute(f"create database if not exists {name} precision 'us'") + else: + tdSql.execute(f"drop database if exists {name}") + tdSql.execute(f"create database if not exists {name} precision 'us' update 1") + tdSql.execute(f'use {name}') + + def timeTrans(self, time_value): + if time_value.endswith("ns"): + ts = int(''.join(list(filter(str.isdigit, time_value))))/1000000000 + elif time_value.endswith("us") or time_value.isdigit() and int(time_value) != 0: + ts = int(''.join(list(filter(str.isdigit, time_value))))/1000000 + elif time_value.endswith("ms"): + ts = int(''.join(list(filter(str.isdigit, time_value))))/1000 + elif time_value.endswith("s") and list(time_value)[-1] not in "num": + ts = int(''.join(list(filter(str.isdigit, time_value))))/1 + elif int(time_value) == 0: + ts = time.time() + else: + print("input ts maybe not right format") + ulsec = repr(ts).split('.')[1][:6] + if len(ulsec) < 6 and int(ulsec) != 0: + ulsec = int(ulsec) * (10 ** (6 - len(ulsec))) + elif int(ulsec) == 0: + ulsec *= 6 + # * follow two rows added for tsCheckCase + td_ts = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(ts)) + return td_ts + #td_ts = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(ts)) + td_ts = time.strftime("%Y-%m-%d %H:%M:%S.{}".format(ulsec), time.localtime(ts)) + return td_ts + #return repr(datetime.datetime.strptime(td_ts, "%Y-%m-%d %H:%M:%S.%f")) + + def dateToTs(self, datetime_input): + return int(time.mktime(time.strptime(datetime_input, "%Y-%m-%d %H:%M:%S.%f"))) + + def getTdTypeValue(self, value): + if value.endswith("i8"): + td_type = "TINYINT" + td_tag_value = ''.join(list(value)[:-2]) + elif value.endswith("i16"): + td_type = "SMALLINT" + td_tag_value = ''.join(list(value)[:-3]) + elif value.endswith("i32"): + td_type = "INT" + td_tag_value = ''.join(list(value)[:-3]) + elif value.endswith("i64"): + td_type = "BIGINT" + td_tag_value = ''.join(list(value)[:-3]) + elif value.endswith("u64"): + td_type = "BIGINT UNSIGNED" + td_tag_value = ''.join(list(value)[:-3]) + elif value.endswith("f32"): + td_type = "FLOAT" + td_tag_value = ''.join(list(value)[:-3]) + td_tag_value = '{}'.format(np.float32(td_tag_value)) + elif value.endswith("f64"): + td_type = "DOUBLE" + td_tag_value = ''.join(list(value)[:-3]) + elif value.startswith('L"'): + td_type = "NCHAR" + td_tag_value = ''.join(list(value)[2:-1]) + elif value.startswith('"') and value.endswith('"'): + td_type = "BINARY" + td_tag_value = ''.join(list(value)[1:-1]) + elif value.lower() == "t" or value == "true" or value == "True" or value == "TRUE": + td_type = "BOOL" + td_tag_value = "True" + elif value.lower() == "f" or value == "false" or value == "False" or value == "FALSE": + td_type = "BOOL" + td_tag_value = "False" + else: + td_type = "FLOAT" + td_tag_value = value + return td_type, td_tag_value + + def typeTrans(self, type_list): + type_num_list = [] + for tp in type_list: + if tp.upper() == "TIMESTAMP": + type_num_list.append(9) + elif tp.upper() == "BOOL": + type_num_list.append(1) + elif tp.upper() == "TINYINT": + type_num_list.append(2) + elif tp.upper() == "SMALLINT": + type_num_list.append(3) + elif tp.upper() == "INT": + type_num_list.append(4) + elif tp.upper() == "BIGINT": + type_num_list.append(5) + elif tp.upper() == "FLOAT": + type_num_list.append(6) + elif tp.upper() == "DOUBLE": + type_num_list.append(7) + elif tp.upper() == "BINARY": + type_num_list.append(8) + elif tp.upper() == "NCHAR": + type_num_list.append(10) + elif tp.upper() == "BIGINT UNSIGNED": + type_num_list.append(14) + return type_num_list + + def inputHandle(self, input_sql): + input_sql_split_list = input_sql.split(" ") + stb_name = input_sql_split_list[0] + + #'stb2_5 1626006833610ms 3f64 host="host0"', + stb_tag_list = input_sql_split_list[3].split(',') + stb_col_value = input_sql_split_list[2] + ts_value = self.timeTrans(input_sql_split_list[1]) + + tag_name_list = [] + tag_value_list = [] + td_tag_value_list = [] + td_tag_type_list = [] + + col_name_list = [] + col_value_list = [] + td_col_value_list = [] + td_col_type_list = [] + + for elm in stb_tag_list: + if "id=" in elm.lower(): + tb_name = elm.split('=')[1] + else: + tag_name_list.append(elm.split("=")[0]) + tag_value_list.append(elm.split("=")[1]) + tb_name = "" + td_tag_value_list.append(self.getTdTypeValue(elm.split("=")[1])[1]) + td_tag_type_list.append(self.getTdTypeValue(elm.split("=")[1])[0]) + + col_name_list.append('value') + col_value_list.append(stb_col_value) + + td_col_value_list.append(self.getTdTypeValue(stb_col_value)[1]) + td_col_type_list.append(self.getTdTypeValue(stb_col_value)[0]) + + final_field_list = [] + final_field_list.extend(col_name_list) + final_field_list.extend(tag_name_list) + + final_type_list = [] + final_type_list.append("TIMESTAMP") + final_type_list.extend(td_col_type_list) + final_type_list.extend(td_tag_type_list) + final_type_list = self.typeTrans(final_type_list) + + final_value_list = [] + final_value_list.append(ts_value) + final_value_list.extend(td_col_value_list) + final_value_list.extend(td_tag_value_list) + return final_value_list, final_field_list, final_type_list, stb_name, tb_name + + def genFullTypeSql(self, stb_name="", tb_name="", value="", t0="", t1="127i8", t2="32767i16", t3="2147483647i32", + t4="9223372036854775807i64", t5="11.12345f32", t6="22.123456789f64", t7="\"binaryTagValue\"", + t8="L\"ncharTagValue\"", ts="1626006833639000000ns", + id_noexist_tag=None, id_change_tag=None, id_upper_tag=None, id_double_tag=None, + t_add_tag=None, t_mul_tag=None, t_multi_tag=None, c_blank_tag=None, t_blank_tag=None, + chinese_tag=None, multi_field_tag=None): + if stb_name == "": + stb_name = tdCom.getLongName(len=6, mode="letters") + if tb_name == "": + tb_name = f'{stb_name}_{random.randint(0, 65535)}_{random.randint(0, 65535)}' + if t0 == "": + t0 = random.choice(["f", "F", "false", "False", "t", "T", "true", "True", "TRUE", "FALSE"]) + if value == "": + value = random.choice(["f", "F", "false", "False", "t", "T", "true", "True", "TRUE", "FALSE"]) + if id_upper_tag is not None: + id = "ID" + else: + id = "id" + sql_seq = f'{stb_name} {ts} {value} {id}=\"{tb_name}\",t0={t0},t1={t1},t2={t2},t3={t3},t4={t4},t5={t5},t6={t6},t7={t7},t8={t8}' + if id_noexist_tag is not None: + sql_seq = f'{stb_name} {ts} {value} t0={t0},t1={t1},t2={t2},t3={t3},t4={t4},t5={t5},t6={t6},t7={t7},t8={t8}' + if t_add_tag is not None: + sql_seq = f'{stb_name} {ts} {value} t0={t0},t1={t1},t2={t2},t3={t3},t4={t4},t5={t5},t6={t6},t7={t7},t8={t8},t9={t8}' + if id_change_tag is not None: + sql_seq = f'{stb_name} {ts} {value} t0={t0},{id}=\"{tb_name}\",t1={t1},t2={t2},t3={t3},t4={t4},t5={t5},t6={t6},t7={t7},t8={t8}' + if id_double_tag is not None: + sql_seq = f'{stb_name} {ts} {value} {id}=\"{tb_name}_1\",t0={t0},t1={t1},{id}=\"{tb_name}_2\",t2={t2},t3={t3},t4={t4},t5={t5},t6={t6},t7={t7},t8={t8}' + if t_add_tag is not None: + sql_seq = f'{stb_name} {ts} {value} {id}=\"{tb_name}\",t0={t0},t1={t1},t2={t2},t3={t3},t4={t4},t5={t5},t6={t6},t7={t7},t8={t8},t11={t1},t10={t8}' + if t_mul_tag is not None: + sql_seq = f'{stb_name} {ts} {value} {id}=\"{tb_name}\",t0={t0},t1={t1},t2={t2},t3={t3},t4={t4},t5={t5},t6={t6}' + if id_noexist_tag is not None: + sql_seq = f'{stb_name} {ts} {value} t0={t0},t1={t1},t2={t2},t3={t3},t4={t4},t5={t5},t6={t6}' + if t_multi_tag is not None: + sql_seq = f'{stb_name} {ts} {value},{value} {id}=\"{tb_name}\",t0={t0},t1={t1},t2={t2},t3={t3},t4={t4},t5={t5},t6={t6}' + if c_blank_tag is not None: + sql_seq = f'{stb_name} {ts} {id}=\"{tb_name}\",t0={t0},t1={t1},t2={t2},t3={t3},t4={t4},t5={t5},t6={t6},t7={t7},t8={t8}' + if t_blank_tag is not None: + sql_seq = f'{stb_name} {ts} {value} {id}=\"{tb_name}\"' + if chinese_tag is not None: + sql_seq = f'{stb_name} {ts} L"涛思数据" t0={t0},t1=L"涛思数据"' + if multi_field_tag is not None: + sql_seq = f'{stb_name} {ts} {value} {id}=\"{tb_name}\",t0={t0} t1={t1}' + return sql_seq, stb_name + + def genMulTagColStr(self, genType, count=1): + """ + genType must be tag/col + """ + tag_str = "" + col_str = "" + if genType == "tag": + for i in range(0, count): + if i < (count-1): + tag_str += f't{i}=f,' + else: + tag_str += f't{i}=f' + return tag_str + if genType == "col": + col_str = "t" + return col_str + + def genLongSql(self, tag_count): + stb_name = tdCom.getLongName(7, mode="letters") + tb_name = f'{stb_name}_1' + tag_str = self.genMulTagColStr("tag", tag_count) + col_str = self.genMulTagColStr("col") + ts = "1626006833640000000ns" + long_sql = stb_name + ' ' + ts + ' ' + col_str + ' ' + f'id=\"{tb_name}\"' + ',' + tag_str + return long_sql, stb_name + + def getNoIdTbName(self, stb_name): + query_sql = f"select tbname from {stb_name}" + tb_name = self.resHandle(query_sql, True)[0][0] + return tb_name + + def resHandle(self, query_sql, query_tag): + tdSql.execute('reset query cache') + row_info = tdSql.query(query_sql, query_tag) + col_info = tdSql.getColNameList(query_sql, query_tag) + res_row_list = [] + sub_list = [] + for row_mem in row_info: + for i in row_mem: + sub_list.append(str(i)) + res_row_list.append(sub_list) + res_field_list_without_ts = col_info[0][1:] + res_type_list = col_info[1] + return res_row_list, res_field_list_without_ts, res_type_list + + def resCmp(self, input_sql, stb_name, query_sql="select * from", condition="", ts=None, id=True, none_check_tag=None): + expect_list = self.inputHandle(input_sql) + self._conn.insert_telnet_lines([input_sql]) + query_sql = f"{query_sql} {stb_name} {condition}" + res_row_list, res_field_list_without_ts, res_type_list = self.resHandle(query_sql, True) + if ts == 0: + res_ts = self.dateToTs(res_row_list[0][0]) + current_time = time.time() + if current_time - res_ts < 60: + tdSql.checkEqual(res_row_list[0][1:], expect_list[0][1:]) + else: + print("timeout") + tdSql.checkEqual(res_row_list[0], expect_list[0]) + else: + if none_check_tag is not None: + none_index_list = [i for i,x in enumerate(res_row_list[0]) if x=="None"] + none_index_list.reverse() + for j in none_index_list: + res_row_list[0].pop(j) + expect_list[0].pop(j) + tdSql.checkEqual(res_row_list[0], expect_list[0]) + tdSql.checkEqual(res_field_list_without_ts, expect_list[1]) + for i in range(len(res_type_list)): + tdSql.checkEqual(res_type_list[i], expect_list[2][i]) + # tdSql.checkEqual(res_type_list, expect_list[2]) + + def initCheckCase(self): + """ + normal tags and cols, one for every elm + """ + tdCom.cleanTb() + input_sql, stb_name = self.genFullTypeSql() + self.resCmp(input_sql, stb_name) + + def boolTypeCheckCase(self): + """ + check all normal type + """ + tdCom.cleanTb() + full_type_list = ["f", "F", "false", "False", "t", "T", "true", "True"] + for t_type in full_type_list: + input_sql, stb_name = self.genFullTypeSql(t0=t_type) + self.resCmp(input_sql, stb_name) + + def symbolsCheckCase(self): + """ + check symbols = `~!@#$%^&*()_-+={[}]\|:;'\",<.>/? + """ + ''' + please test : + binary_symbols = '\"abcd`~!@#$%^&*()_-{[}]|:;<.>?lfjal"\'\'"\"' + ''' + tdCom.cleanTb() + binary_symbols = '"aaa"' + # binary_symbols = '"abcd`~!@#$%^&*()_-{[}]|:;<.>?lfjal"' + nchar_symbols = f'L{binary_symbols}' + input_sql1, stb_name1 = self.genFullTypeSql(value=binary_symbols, t7=binary_symbols, t8=nchar_symbols) + + # input_sql2, stb_name2 = self.genFullTypeSql(value=nchar_symbols, t7=binary_symbols, t8=nchar_symbols) + self.resCmp(input_sql1, stb_name1) + # self.resCmp(input_sql2, stb_name2) + + def tsCheckCase(self): + """ + test ts list --> ["1626006833639000000ns", "1626006833639019us", "1626006833640ms", "1626006834s", "1626006822639022"] + # ! us级时间戳都为0时,数据库中查询显示,但python接口拿到的结果不显示 .000000的情况请确认,目前修改时间处理代码可以通过 + """ + tdCom.cleanTb() + ts_list = ["1626006833639000000ns", "1626006833639019us", "1626006833640ms", "1626006834s", "1626006822639022", 0] + for ts in ts_list: + input_sql, stb_name = self.genFullTypeSql(ts=ts) + self.resCmp(input_sql, stb_name, ts=ts) + + def idSeqCheckCase(self): + """ + check id.index in tags + eg: t0=**,id=**,t1=** + """ + tdCom.cleanTb() + input_sql, stb_name = self.genFullTypeSql(id_change_tag=True) + self.resCmp(input_sql, stb_name) + + def idUpperCheckCase(self): + """ + check id param + eg: id and ID + """ + tdCom.cleanTb() + input_sql, stb_name = self.genFullTypeSql(id_upper_tag=True) + self.resCmp(input_sql, stb_name) + input_sql, stb_name = self.genFullTypeSql(id_change_tag=True, id_upper_tag=True) + self.resCmp(input_sql, stb_name) + + def noIdCheckCase(self): + """ + id not exist + """ + tdCom.cleanTb() + input_sql, stb_name = self.genFullTypeSql(id_noexist_tag=True) + self.resCmp(input_sql, stb_name) + query_sql = f"select tbname from {stb_name}" + res_row_list = self.resHandle(query_sql, True)[0] + if len(res_row_list[0][0]) > 0: + tdSql.checkColNameList(res_row_list, res_row_list) + else: + tdSql.checkColNameList(res_row_list, "please check noIdCheckCase") + + def maxColTagCheckCase(self): + """ + max tag count is 128 + """ + for input_sql in [self.genLongSql(128)[0]]: + tdCom.cleanTb() + self._conn.insert_telnet_lines([input_sql]) + for input_sql in [self.genLongSql(129)[0]]: + tdCom.cleanTb() + try: + self._conn.insert_telnet_lines([input_sql]) + except LinesError: + pass + + def idIllegalNameCheckCase(self): + """ + test illegal id name + mix "`~!@#$¥%^&*()-+={}|[]、「」【】\:;《》<>?" + """ + tdCom.cleanTb() + rstr = list("`~!@#$¥%^&*()-+={}|[]、「」【】\:;《》<>?") + for i in rstr: + input_sql = self.genFullTypeSql(tb_name=f"\"aaa{i}bbb\"")[0] + try: + self._conn.insert_telnet_lines([input_sql]) + except LinesError: + pass + + def idStartWithNumCheckCase(self): + """ + id is start with num + """ + tdCom.cleanTb() + input_sql = self.genFullTypeSql(tb_name=f"\"1aaabbb\"")[0] + try: + self._conn.insert_telnet_lines([input_sql]) + except LinesError: + pass + + def nowTsCheckCase(self): + """ + check now unsupported + """ + tdCom.cleanTb() + input_sql = self.genFullTypeSql(ts="now")[0] + try: + self._conn.insert_telnet_lines([input_sql]) + except LinesError: + pass + + def dateFormatTsCheckCase(self): + """ + check date format ts unsupported + """ + tdCom.cleanTb() + input_sql = self.genFullTypeSql(ts="2021-07-21\ 19:01:46.920")[0] + try: + self._conn.insert_telnet_lines([input_sql]) + except LinesError: + pass + + def illegalTsCheckCase(self): + """ + check ts format like 16260068336390us19 + """ + tdCom.cleanTb() + input_sql = self.genFullTypeSql(ts="16260068336390us19")[0] + try: + self._conn.insert_telnet_lines([input_sql]) + except LinesError: + pass + + def tagValueLengthCheckCase(self): + """ + check full type tag value limit + """ + tdCom.cleanTb() + # i8 + for t1 in ["-127i8", "127i8"]: + input_sql, stb_name = self.genFullTypeSql(t1=t1) + self.resCmp(input_sql, stb_name) + for t1 in ["-128i8", "128i8"]: + input_sql = self.genFullTypeSql(t1=t1)[0] + try: + self._conn.insert_telnet_lines([input_sql]) + except LinesError: + pass + + #i16 + for t2 in ["-32767i16", "32767i16"]: + input_sql, stb_name = self.genFullTypeSql(t2=t2) + self.resCmp(input_sql, stb_name) + for t2 in ["-32768i16", "32768i16"]: + input_sql = self.genFullTypeSql(t2=t2)[0] + try: + self._conn.insert_telnet_lines([input_sql]) + except LinesError: + pass + + #i32 + for t3 in ["-2147483647i32", "2147483647i32"]: + input_sql, stb_name = self.genFullTypeSql(t3=t3) + self.resCmp(input_sql, stb_name) + for t3 in ["-2147483648i32", "2147483648i32"]: + input_sql = self.genFullTypeSql(t3=t3)[0] + try: + self._conn.insert_telnet_lines([input_sql]) + except LinesError: + pass + + #i64 + for t4 in ["-9223372036854775807i64", "9223372036854775807i64"]: + input_sql, stb_name = self.genFullTypeSql(t4=t4) + self.resCmp(input_sql, stb_name) + for t4 in ["-9223372036854775808i64", "9223372036854775808i64"]: + input_sql = self.genFullTypeSql(t4=t4)[0] + try: + self._conn.insert_telnet_lines([input_sql]) + except LinesError: + pass + + # f32 + for t5 in [f"{-3.4028234663852885981170418348451692544*(10**38)}f32", f"{3.4028234663852885981170418348451692544*(10**38)}f32"]: + input_sql, stb_name = self.genFullTypeSql(t5=t5) + self.resCmp(input_sql, stb_name) + # * limit set to 4028234664*(10**38) + for t5 in [f"{-3.4028234664*(10**38)}f32", f"{3.4028234664*(10**38)}f32"]: + input_sql = self.genFullTypeSql(t5=t5)[0] + try: + self._conn.insert_telnet_lines([input_sql]) + raise Exception("should not reach here") + except LinesError as err: + tdSql.checkNotEqual(err.errno, 0) + + + # f64 + for t6 in [f'{-1.79769*(10**308)}f64', f'{-1.79769*(10**308)}f64']: + input_sql, stb_name = self.genFullTypeSql(t6=t6) + self.resCmp(input_sql, stb_name) + # * limit set to 1.797693134862316*(10**308) + for t6 in [f'{-1.797693134862316*(10**308)}f64', f'{-1.797693134862316*(10**308)}f64']: + input_sql = self.genFullTypeSql(t6=t6)[0] + try: + self._conn.insert_telnet_lines([input_sql]) + raise Exception("should not reach here") + except LinesError as err: + tdSql.checkNotEqual(err.errno, 0) + + # binary + stb_name = tdCom.getLongName(7, "letters") + input_sql = f'{stb_name} 1626006833639000000ns t t0=t,t1="{tdCom.getLongName(16374, "letters")}"' + self._conn.insert_telnet_lines([input_sql]) + + input_sql = f'{stb_name} 1626006833639000000ns t t0=t,t1="{tdCom.getLongName(16375, "letters")}"' + try: + self._conn.insert_telnet_lines([input_sql]) + raise Exception("should not reach here") + except LinesError as err: + pass + + # nchar + # * legal nchar could not be larger than 16374/4 + stb_name = tdCom.getLongName(7, "letters") + input_sql = f'{stb_name} 1626006833639000000ns t t0=t,t1=L"{tdCom.getLongName(4093, "letters")}"' + self._conn.insert_telnet_lines([input_sql]) + + input_sql = f'{stb_name} 1626006833639000000ns t t0=t,t1=L"{tdCom.getLongName(4094, "letters")}"' + try: + self._conn.insert_telnet_lines([input_sql]) + raise Exception("should not reach here") + except LinesError as err: + tdSql.checkNotEqual(err.errno, 0) + + def colValueLengthCheckCase(self): + """ + check full type col value limit + """ + tdCom.cleanTb() + # i8 + for value in ["-127i8", "127i8"]: + input_sql, stb_name = self.genFullTypeSql(value=value) + self.resCmp(input_sql, stb_name) + tdCom.cleanTb() + for value in ["-128i8", "128i8"]: + input_sql = self.genFullTypeSql(value=value)[0] + try: + self._conn.insert_telnet_lines([input_sql]) + raise Exception("should not reach here") + except LinesError as err: + tdSql.checkNotEqual(err.errno, 0) + # i16 + tdCom.cleanTb() + for value in ["-32767i16"]: + input_sql, stb_name = self.genFullTypeSql(value=value) + self.resCmp(input_sql, stb_name) + tdCom.cleanTb() + for value in ["-32768i16", "32768i16"]: + input_sql = self.genFullTypeSql(value=value)[0] + try: + self._conn.insert_telnet_lines([input_sql]) + raise Exception("should not reach here") + except LinesError as err: + tdSql.checkNotEqual(err.errno, 0) + + # i32 + tdCom.cleanTb() + for value in ["-2147483647i32"]: + input_sql, stb_name = self.genFullTypeSql(value=value) + self.resCmp(input_sql, stb_name) + tdCom.cleanTb() + for value in ["-2147483648i32", "2147483648i32"]: + input_sql = self.genFullTypeSql(value=value)[0] + try: + self._conn.insert_telnet_lines([input_sql]) + raise Exception("should not reach here") + except LinesError as err: + tdSql.checkNotEqual(err.errno, 0) + + # i64 + tdCom.cleanTb() + for value in ["-9223372036854775807i64"]: + input_sql, stb_name = self.genFullTypeSql(value=value) + self.resCmp(input_sql, stb_name) + tdCom.cleanTb() + for value in ["-9223372036854775808i64", "9223372036854775808i64"]: + input_sql = self.genFullTypeSql(value=value)[0] + try: + self._conn.insert_telnet_lines([input_sql]) + raise Exception("should not reach here") + except LinesError as err: + tdSql.checkNotEqual(err.errno, 0) + + # f32 + tdCom.cleanTb() + for value in [f"{-3.4028234663852885981170418348451692544*(10**38)}f32", f"{3.4028234663852885981170418348451692544*(10**38)}f32"]: + input_sql, stb_name = self.genFullTypeSql(value=value) + self.resCmp(input_sql, stb_name) + # * limit set to 4028234664*(10**38) + tdCom.cleanTb() + for value in [f"{-3.4028234664*(10**38)}f32", f"{3.4028234664*(10**38)}f32"]: + input_sql = self.genFullTypeSql(value=value)[0] + try: + self._conn.insert_telnet_lines([input_sql]) + raise Exception("should not reach here") + except LinesError as err: + tdSql.checkNotEqual(err.errno, 0) + + # f64 + tdCom.cleanTb() + for value in [f'{-1.79769313486231570814527423731704356798070567525844996598917476803157260780*(10**308)}f64', f'{-1.79769313486231570814527423731704356798070567525844996598917476803157260780*(10**308)}f64']: + input_sql, stb_name = self.genFullTypeSql(value=value) + self.resCmp(input_sql, stb_name) + # * limit set to 1.797693134862316*(10**308) + tdCom.cleanTb() + for value in [f'{-1.797693134862316*(10**308)}f64', f'{-1.797693134862316*(10**308)}f64']: + input_sql = self.genFullTypeSql(value=value)[0] + try: + self._conn.insert_telnet_lines([input_sql]) + raise Exception("should not reach here") + except LinesError as err: + tdSql.checkNotEqual(err.errno, 0) + + # # binary + tdCom.cleanTb() + stb_name = tdCom.getLongName(7, "letters") + input_sql = f'{stb_name} 1626006833639000000ns "{tdCom.getLongName(16374, "letters")}" t0=t' + self._conn.insert_telnet_lines([input_sql]) + + tdCom.cleanTb() + input_sql = f'{stb_name} 1626006833639000000ns "{tdCom.getLongName(16375, "letters")}" t0=t' + try: + self._conn.insert_telnet_lines([input_sql]) + raise Exception("should not reach here") + except LinesError as err: + tdSql.checkNotEqual(err.errno, 0) + + # nchar + # * legal nchar could not be larger than 16374/4 + tdCom.cleanTb() + stb_name = tdCom.getLongName(7, "letters") + input_sql = f'{stb_name} 1626006833639000000ns L"{tdCom.getLongName(4093, "letters")}" t0=t' + self._conn.insert_telnet_lines([input_sql]) + + tdCom.cleanTb() + input_sql = f'{stb_name} 1626006833639000000ns L"{tdCom.getLongName(4094, "letters")}" t0=t' + try: + self._conn.insert_telnet_lines([input_sql]) + raise Exception("should not reach here") + except LinesError as err: + tdSql.checkNotEqual(err.errno, 0) + + def tagColIllegalValueCheckCase(self): + + """ + test illegal tag col value + """ + tdCom.cleanTb() + # bool + for i in ["TrUe", "tRue", "trUe", "truE", "FalsE", "fAlse", "faLse", "falSe", "falsE"]: + input_sql1 = self.genFullTypeSql(t0=i)[0] + try: + self._conn.insert_telnet_lines([input_sql1]) + raise Exception("should not reach here") + except LinesError as err: + tdSql.checkNotEqual(err.errno, 0) + input_sql2 = self.genFullTypeSql(value=i)[0] + try: + self._conn.insert_telnet_lines([input_sql2]) + raise Exception("should not reach here") + except LinesError as err: + tdSql.checkNotEqual(err.errno, 0) + + # i8 i16 i32 i64 f32 f64 + for input_sql in [ + self.genFullTypeSql(t1="1s2i8")[0], + self.genFullTypeSql(t2="1s2i16")[0], + self.genFullTypeSql(t3="1s2i32")[0], + self.genFullTypeSql(t4="1s2i64")[0], + self.genFullTypeSql(t5="11.1s45f32")[0], + self.genFullTypeSql(t6="11.1s45f64")[0], + ]: + try: + self._conn.insert_telnet_lines([input_sql]) + raise Exception("should not reach here") + except LinesError as err: + tdSql.checkNotEqual(err.errno, 0) + + # check binary and nchar blank + input_sql1 = f'{tdCom.getLongName(7, "letters")} 1626006833639000000ns "abc aaa" t0=t' + input_sql2 = f'{tdCom.getLongName(7, "letters")} 1626006833639000000ns L"abc aaa" t0=t' + input_sql3 = f'{tdCom.getLongName(7, "letters")} 1626006833639000000ns t t0="abc aaa"' + input_sql4 = f'{tdCom.getLongName(7, "letters")} 1626006833639000000ns t t0=L"abc aaa"' + for input_sql in [input_sql1, input_sql2, input_sql3, input_sql4]: + try: + self._conn.insert_telnet_lines([input_sql]) + except LinesError as err: + pass + + # check accepted binary and nchar symbols + # # * ~!@#$¥%^&*()-+={}|[]、「」:; + for symbol in list('~!@#$¥%^&*()-+={}|[]、「」:;'): + input_sql1 = f'{tdCom.getLongName(7, "letters")} 1626006833639000000ns "abc{symbol}aaa" t0=t' + input_sql2 = f'{tdCom.getLongName(7, "letters")} 1626006833639000000ns t t0=t,t1="abc{symbol}aaa"' + self._conn.insert_telnet_lines([input_sql1]) + self._conn.insert_telnet_lines([input_sql2]) + + + def duplicateIdTagColInsertCheckCase(self): + """ + check duplicate Id Tag Col + """ + tdCom.cleanTb() + input_sql_id = self.genFullTypeSql(id_double_tag=True)[0] + try: + self._conn.insert_telnet_lines([input_sql_id]) + raise Exception("should not reach here") + except LinesError as err: + tdSql.checkNotEqual(err.errno, 0) + + input_sql = self.genFullTypeSql()[0] + input_sql_tag = input_sql.replace("t5", "t6") + try: + self._conn.insert_telnet_lines([input_sql_tag]) + raise Exception("should not reach here") + except LinesError as err: + tdSql.checkNotEqual(err.errno, 0) + + ##### stb exist ##### + def noIdStbExistCheckCase(self): + """ + case no id when stb exist + """ + print("noIdStbExistCheckCase") + tdCom.cleanTb() + input_sql, stb_name = self.genFullTypeSql(tb_name="sub_table_0123456", t0="f", value="f") + self.resCmp(input_sql, stb_name) + input_sql, stb_name = self.genFullTypeSql(stb_name=stb_name, id_noexist_tag=True, t0="f", value="f") + self.resCmp(input_sql, stb_name, condition='where tbname like "t_%"') + tdSql.query(f"select * from {stb_name}") + tdSql.checkRows(2) + # TODO cover other case + + def duplicateInsertExistCheckCase(self): + """ + check duplicate insert when stb exist + """ + tdCom.cleanTb() + input_sql, stb_name = self.genFullTypeSql() + self.resCmp(input_sql, stb_name) + self._conn.insert_telnet_lines([input_sql]) + self.resCmp(input_sql, stb_name) + + def tagColBinaryNcharLengthCheckCase(self): + """ + check length increase + """ + print("tagColBinaryNcharLengthCheckCase") + tdCom.cleanTb() + input_sql, stb_name = self.genFullTypeSql() + self.resCmp(input_sql, stb_name) + tb_name = tdCom.getLongName(5, "letters") + input_sql, stb_name = self.genFullTypeSql(stb_name=stb_name, tb_name=tb_name,t7="\"binaryTagValuebinaryTagValue\"", t8="L\"ncharTagValuencharTagValue\"") + self.resCmp(input_sql, stb_name, condition=f'where tbname like "{tb_name}"') + + def tagColAddDupIDCheckCase(self): + """ + check tag count add, stb and tb duplicate + * tag: alter table ... + * col: when update==0 and ts is same, unchange + * so this case tag&&value will be added, + * col is added without value when update==0 + * col is added with value when update==1 + """ + print("tagColAddDupIDCheckCase") + tdCom.cleanTb() + tb_name = tdCom.getLongName(7, "letters") + for db_update_tag in [0, 1]: + if db_update_tag == 1 : + self.createDb("test_update", db_update_tag=db_update_tag) + input_sql, stb_name = self.genFullTypeSql(tb_name=tb_name, t0="f", value="f") + self.resCmp(input_sql, stb_name) + self.genFullTypeSql(stb_name=stb_name, tb_name=tb_name, t0="f", value="f", t_add_tag=True) + if db_update_tag == 1 : + self.resCmp(input_sql, stb_name, condition=f'where tbname like "{tb_name}"') + else: + self.resCmp(input_sql, stb_name, condition=f'where tbname like "{tb_name}"', none_check_tag=True) + self.createDb() + + def tagColAddCheckCase(self): + """ + check tag count add + """ + print("tagColAddCheckCase") + tdCom.cleanTb() + tb_name = tdCom.getLongName(7, "letters") + input_sql, stb_name = self.genFullTypeSql(tb_name=tb_name, t0="f", value="f") + self.resCmp(input_sql, stb_name) + tb_name_1 = tdCom.getLongName(7, "letters") + input_sql, stb_name = self.genFullTypeSql(stb_name=stb_name, tb_name=tb_name_1, t0="f", value="f", t_add_tag=True) + self.resCmp(input_sql, stb_name, condition=f'where tbname like "{tb_name_1}"') + res_row_list = self.resHandle(f"select t10,t11 from {tb_name}", True)[0] + tdSql.checkEqual(res_row_list[0], ['None', 'None']) + self.resCmp(input_sql, stb_name, condition=f'where tbname like "{tb_name}"', none_check_tag=True) + + def tagMd5Check(self): + """ + condition: stb not change + insert two table, keep tag unchange, change col + """ + tdCom.cleanTb() + input_sql, stb_name = self.genFullTypeSql(t0="f", value="f", id_noexist_tag=True) + self.resCmp(input_sql, stb_name) + tb_name1 = self.getNoIdTbName(stb_name) + input_sql, stb_name = self.genFullTypeSql(stb_name=stb_name, t0="f", value="f", id_noexist_tag=True) + self.resCmp(input_sql, stb_name) + tb_name2 = self.getNoIdTbName(stb_name) + tdSql.query(f"select * from {stb_name}") + tdSql.checkRows(1) + tdSql.checkEqual(tb_name1, tb_name2) + input_sql, stb_name = self.genFullTypeSql(stb_name=stb_name, t0="f", value="f", id_noexist_tag=True, t_add_tag=True) + self._conn.insert_telnet_lines([input_sql]) + tb_name3 = self.getNoIdTbName(stb_name) + tdSql.query(f"select * from {stb_name}") + tdSql.checkRows(2) + tdSql.checkNotEqual(tb_name1, tb_name3) + + # * tag binary max is 16384, col+ts binary max 49151 + def tagColBinaryMaxLengthCheckCase(self): + """ + every binary and nchar must be length+2 + """ + tdCom.cleanTb() + stb_name = tdCom.getLongName(7, "letters") + tb_name = f'{stb_name}_1' + + input_sql = f'{stb_name} 1626006833639000000ns f id="{tb_name}",t0=t' + self._conn.insert_telnet_lines([input_sql]) + + # * every binary and nchar must be length+2, so here is two tag, max length could not larger than 16384-2*2 + input_sql = f'{stb_name} 1626006833639000000ns f t0=t,t1="{tdCom.getLongName(16374, "letters")}",t2="{tdCom.getLongName(5, "letters")}"' + self._conn.insert_telnet_lines([input_sql]) + + tdSql.query(f"select * from {stb_name}") + tdSql.checkRows(2) + input_sql = f'{stb_name} 1626006833639000000ns f t0=t,t1="{tdCom.getLongName(16374, "letters")}",t2="{tdCom.getLongName(6, "letters")}"' + try: + self._conn.insert_telnet_lines([input_sql]) + raise Exception("should not reach here") + except LinesError: + pass + tdSql.query(f"select * from {stb_name}") + tdSql.checkRows(2) + + # * tag nchar max is 16374/4, col+ts nchar max 49151 + def tagColNcharMaxLengthCheckCase(self): + """ + check nchar length limit + """ + tdCom.cleanTb() + stb_name = tdCom.getLongName(7, "letters") + tb_name = f'{stb_name}_1' + input_sql = f'{stb_name} 1626006833639000000ns f id="{tb_name}",t0=t' + self._conn.insert_telnet_lines([input_sql]) + + # * legal nchar could not be larger than 16374/4 + input_sql = f'{stb_name} 1626006833639000000ns f t0=t,t1=L"{tdCom.getLongName(4093, "letters")}",t2=L"{tdCom.getLongName(1, "letters")}"' + self._conn.insert_telnet_lines([input_sql]) + tdSql.query(f"select * from {stb_name}") + tdSql.checkRows(2) + input_sql = f'{stb_name} 1626006833639000000ns f t0=t,t1=L"{tdCom.getLongName(4093, "letters")}",t2=L"{tdCom.getLongName(2, "letters")}"' + try: + self._conn.insert_telnet_lines([input_sql]) + raise Exception("should not reach here") + except LinesError as err: + tdSql.checkNotEqual(err.errno, 0) + tdSql.query(f"select * from {stb_name}") + tdSql.checkRows(2) + + def batchInsertCheckCase(self): + """ + test batch insert + """ + tdCom.cleanTb() + stb_name = tdCom.getLongName(8, "letters") + tdSql.execute(f'create stable {stb_name}(ts timestamp, f int) tags(t1 bigint)') + + lines = ["st123456 1626006833639000000ns 1i64 t1=3i64,t2=4f64,t3=\"t3\"", + "st123456 1626006833640000000ns 2i64 t1=4i64,t3=\"t4\",t2=5f64,t4=5f64", + f'{stb_name} 1626056811823316532ns 3i64 t2=5f64,t3=L\"ste\"', + "stf567890 1626006933640000000ns 4i64 t1=4i64,t3=\"t4\",t2=5f64,t4=5f64", + "st123456 1626006833642000000ns 5i64 t1=4i64,t2=5f64,t3=\"t4\"", + f'{stb_name} 1626056811843316532ns 6i64 t2=5f64,t3=L\"ste2\"', + f'{stb_name} 1626056812843316532ns 7i64 t2=5f64,t3=L\"ste2\"', + "st123456 1626006933640000000ns 8i64 t1=4i64,t3=\"t4\",t2=5f64,t4=5f64", + "st123456 1626006933641000000ns 9i64 t1=4i64,t3=\"t4\",t2=5f64,t4=5f64" + ] + self._conn.insert_telnet_lines(lines) + tdSql.query('show stables') + tdSql.checkRows(3) + tdSql.query('show tables') + tdSql.checkRows(6) + tdSql.query('select * from st123456') + tdSql.checkRows(5) + + def multiInsertCheckCase(self, count): + """ + test multi insert + """ + tdCom.cleanTb() + sql_list = [] + stb_name = tdCom.getLongName(8, "letters") + tdSql.execute(f'create stable {stb_name}(ts timestamp, f int) tags(t1 bigint)') + for i in range(count): + input_sql = self.genFullTypeSql(stb_name=stb_name, t7=f'"{tdCom.getLongName(8, "letters")}"', value=f'"{tdCom.getLongName(8, "letters")}"', id_noexist_tag=True)[0] + sql_list.append(input_sql) + self._conn.insert_telnet_lines(sql_list) + tdSql.query('show tables') + tdSql.checkRows(1000) + + def batchErrorInsertCheckCase(self): + """ + test batch error insert + """ + tdCom.cleanTb() + stb_name = tdCom.getLongName(8, "letters") + lines = ["st123456 1626006833639000000ns 3i64 t1=3i64,t2=4f64,t3=\"t3\"", + f"{stb_name} 1626056811823316532ns tRue t2=5f64,t3=L\"ste\""] + try: + self._conn.insert_telnet_lines(lines) + raise Exception("should not reach here") + except LinesError as err: + tdSql.checkNotEqual(err.errno, 0) + + def multiColsInsertCheckCase(self): + """ + test multi cols insert + """ + tdCom.cleanTb() + input_sql = self.genFullTypeSql(t_multi_tag=True)[0] + try: + self._conn.insert_telnet_lines([input_sql]) + raise Exception("should not reach here") + except LinesError as err: + tdSql.checkNotEqual(err.errno, 0) + + def blankColInsertCheckCase(self): + """ + test blank col insert + """ + tdCom.cleanTb() + input_sql = self.genFullTypeSql(c_blank_tag=True)[0] + try: + self._conn.insert_telnet_lines([input_sql]) + raise Exception("should not reach here") + except LinesError as err: + tdSql.checkNotEqual(err.errno, 0) + + def blankTagInsertCheckCase(self): + """ + test blank tag insert + """ + tdCom.cleanTb() + input_sql = self.genFullTypeSql(t_blank_tag=True)[0] + try: + self._conn.insert_telnet_lines([input_sql]) + raise Exception("should not reach here") + except LinesError as err: + tdSql.checkNotEqual(err.errno, 0) + + def chineseCheckCase(self): + """ + check nchar ---> chinese + """ + tdCom.cleanTb() + input_sql, stb_name = self.genFullTypeSql(chinese_tag=True) + self.resCmp(input_sql, stb_name) + + def multiFieldCheckCase(self): + ''' + multi_field + ''' + tdCom.cleanTb() + input_sql = self.genFullTypeSql(multi_field_tag=True)[0] + try: + self._conn.insert_telnet_lines([input_sql]) + raise Exception("should not reach here") + except LinesError as err: + tdSql.checkNotEqual(err.errno, 0) + + def errorTypeCheckCase(self): + stb_name = tdCom.getLongName(8, "letters") + input_sql_list = [f'{stb_name} 0 "hkgjiwdj" t0=f,t1=127I8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7="vozamcts",t8=L"ncharTagValue"', \ + f'{stb_name} 0 "hkgjiwdj" t0=f,t1=127i8,t2=32767I16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7="vozamcts",t8=L"ncharTagValue"', \ + f'{stb_name} 0 "hkgjiwdj" t0=f,t1=127i8,t2=32767i16,t3=2147483647I32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7="vozamcts",t8=L"ncharTagValue"', \ + f'{stb_name} 0 "hkgjiwdj" t0=f,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807I64,t5=11.12345f32,t6=22.123456789f64,t7="vozamcts",t8=L"ncharTagValue"', \ + f'{stb_name} 0 "hkgjiwdj" t0=f,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345F32,t6=22.123456789f64,t7="vozamcts",t8=L"ncharTagValue"', \ + f'{stb_name} 0 "hkgjiwdj" t0=f,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789F64,t7="vozamcts",t8=L"ncharTagValue"', \ + f'{stb_name} 1626006833639000000NS "hkgjiwdj" t0=f,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7="vozamcts",t8=L"ncharTagValue"'] + for input_sql in input_sql_list: + try: + self._conn.insert_telnet_lines([input_sql]) + raise Exception("should not reach here") + except LinesError as err: + tdSql.checkNotEqual(err.errno, 0) + + def genSqlList(self, count=5, stb_name="", tb_name=""): + """ + stb --> supertable + tb --> table + ts --> timestamp, same default + col --> column, same default + tag --> tag, same default + d --> different + s --> same + a --> add + m --> minus + """ + d_stb_d_tb_list = list() + s_stb_s_tb_list = list() + s_stb_s_tb_a_tag_list = list() + s_stb_s_tb_m_tag_list = list() + s_stb_d_tb_list = list() + s_stb_d_tb_m_tag_list = list() + s_stb_d_tb_a_tag_list = list() + s_stb_s_tb_d_ts_list = list() + s_stb_s_tb_d_ts_m_tag_list = list() + s_stb_s_tb_d_ts_a_tag_list = list() + s_stb_d_tb_d_ts_list = list() + s_stb_d_tb_d_ts_m_tag_list = list() + s_stb_d_tb_d_ts_a_tag_list = list() + for i in range(count): + d_stb_d_tb_list.append(self.genFullTypeSql(t0="f", value="f")) + s_stb_s_tb_list.append(self.genFullTypeSql(stb_name=stb_name, tb_name=tb_name, t7=f'"{tdCom.getLongName(8, "letters")}"', value=f'"{tdCom.getLongName(8, "letters")}"')) + s_stb_s_tb_a_tag_list.append(self.genFullTypeSql(stb_name=stb_name, tb_name=tb_name, t7=f'"{tdCom.getLongName(8, "letters")}"', value=f'"{tdCom.getLongName(8, "letters")}"', t_add_tag=True)) + s_stb_s_tb_m_tag_list.append(self.genFullTypeSql(stb_name=stb_name, tb_name=tb_name, t7=f'"{tdCom.getLongName(8, "letters")}"', value=f'"{tdCom.getLongName(8, "letters")}"', t_mul_tag=True)) + s_stb_d_tb_list.append(self.genFullTypeSql(stb_name=stb_name, t7=f'"{tdCom.getLongName(8, "letters")}"', value=f'"{tdCom.getLongName(8, "letters")}"', id_noexist_tag=True)) + s_stb_d_tb_m_tag_list.append(self.genFullTypeSql(stb_name=stb_name, t7=f'"{tdCom.getLongName(8, "letters")}"', value=f'"{tdCom.getLongName(8, "letters")}"', id_noexist_tag=True, t_mul_tag=True)) + s_stb_d_tb_a_tag_list.append(self.genFullTypeSql(stb_name=stb_name, t7=f'"{tdCom.getLongName(8, "letters")}"', value=f'"{tdCom.getLongName(8, "letters")}"', id_noexist_tag=True, t_add_tag=True)) + s_stb_s_tb_d_ts_list.append(self.genFullTypeSql(stb_name=stb_name, tb_name=tb_name, t7=f'"{tdCom.getLongName(8, "letters")}"', value=f'"{tdCom.getLongName(8, "letters")}"', ts=0)) + s_stb_s_tb_d_ts_m_tag_list.append(self.genFullTypeSql(stb_name=stb_name, tb_name=tb_name, t7=f'"{tdCom.getLongName(8, "letters")}"', value=f'"{tdCom.getLongName(8, "letters")}"', ts=0, t_mul_tag=True)) + s_stb_s_tb_d_ts_a_tag_list.append(self.genFullTypeSql(stb_name=stb_name, tb_name=tb_name, t7=f'"{tdCom.getLongName(8, "letters")}"', value=f'"{tdCom.getLongName(8, "letters")}"', ts=0, t_add_tag=True)) + s_stb_d_tb_d_ts_list.append(self.genFullTypeSql(stb_name=stb_name, t7=f'"{tdCom.getLongName(8, "letters")}"', value=f'"{tdCom.getLongName(8, "letters")}"', id_noexist_tag=True, ts=0)) + s_stb_d_tb_d_ts_m_tag_list.append(self.genFullTypeSql(stb_name=stb_name, t7=f'"{tdCom.getLongName(8, "letters")}"', value=f'"{tdCom.getLongName(8, "letters")}"', id_noexist_tag=True, ts=0, t_mul_tag=True)) + s_stb_d_tb_d_ts_a_tag_list.append(self.genFullTypeSql(stb_name=stb_name, t7=f'"{tdCom.getLongName(8, "letters")}"', value=f'"{tdCom.getLongName(8, "letters")}"', id_noexist_tag=True, ts=0, t_add_tag=True)) + + return d_stb_d_tb_list, s_stb_s_tb_list, s_stb_s_tb_a_tag_list, s_stb_s_tb_m_tag_list, \ + s_stb_d_tb_list, s_stb_d_tb_m_tag_list, s_stb_d_tb_a_tag_list, s_stb_s_tb_d_ts_list, \ + s_stb_s_tb_d_ts_m_tag_list, s_stb_s_tb_d_ts_a_tag_list, s_stb_d_tb_d_ts_list, \ + s_stb_d_tb_d_ts_m_tag_list, s_stb_d_tb_d_ts_a_tag_list + + + def genMultiThreadSeq(self, sql_list): + tlist = list() + for insert_sql in sql_list: + t = threading.Thread(target=self._conn.insert_telnet_lines,args=([insert_sql[0]],)) + tlist.append(t) + return tlist + + def multiThreadRun(self, tlist): + for t in tlist: + t.start() + for t in tlist: + t.join() + + def stbInsertMultiThreadCheckCase(self): + """ + thread input different stb + """ + tdCom.cleanTb() + input_sql = self.genSqlList()[0] + self.multiThreadRun(self.genMultiThreadSeq(input_sql)) + tdSql.query(f"show tables;") + tdSql.checkRows(5) + + def sStbStbDdataInsertMultiThreadCheckCase(self): + """ + thread input same stb tb, different data, result keep first data + """ + tdCom.cleanTb() + tb_name = tdCom.getLongName(7, "letters") + input_sql, stb_name = self.genFullTypeSql(tb_name=tb_name, value="\"binaryTagValue\"") + self.resCmp(input_sql, stb_name) + s_stb_s_tb_list = self.genSqlList(stb_name=stb_name, tb_name=tb_name)[1] + self.multiThreadRun(self.genMultiThreadSeq(s_stb_s_tb_list)) + tdSql.query(f"show tables;") + tdSql.checkRows(1) + expected_tb_name = self.getNoIdTbName(stb_name)[0] + tdSql.checkEqual(tb_name, expected_tb_name) + tdSql.query(f"select * from {stb_name};") + tdSql.checkRows(1) + + def sStbStbDdataAtInsertMultiThreadCheckCase(self): + """ + thread input same stb tb, different data, add columes and tags, result keep first data + """ + tdCom.cleanTb() + tb_name = tdCom.getLongName(7, "letters") + input_sql, stb_name = self.genFullTypeSql(tb_name=tb_name, value="\"binaryTagValue\"") + self.resCmp(input_sql, stb_name) + s_stb_s_tb_a_tag_list = self.genSqlList(stb_name=stb_name, tb_name=tb_name)[2] + self.multiThreadRun(self.genMultiThreadSeq(s_stb_s_tb_a_tag_list)) + tdSql.query(f"show tables;") + tdSql.checkRows(1) + expected_tb_name = self.getNoIdTbName(stb_name)[0] + tdSql.checkEqual(tb_name, expected_tb_name) + tdSql.query(f"select * from {stb_name};") + tdSql.checkRows(1) + + def sStbStbDdataMtInsertMultiThreadCheckCase(self): + """ + thread input same stb tb, different data, minus columes and tags, result keep first data + """ + tdCom.cleanTb() + tb_name = tdCom.getLongName(7, "letters") + input_sql, stb_name = self.genFullTypeSql(tb_name=tb_name, value="\"binaryTagValue\"") + self.resCmp(input_sql, stb_name) + s_stb_s_tb_m_tag_list = self.genSqlList(stb_name=stb_name, tb_name=tb_name)[3] + self.multiThreadRun(self.genMultiThreadSeq(s_stb_s_tb_m_tag_list)) + tdSql.query(f"show tables;") + tdSql.checkRows(1) + expected_tb_name = self.getNoIdTbName(stb_name)[0] + tdSql.checkEqual(tb_name, expected_tb_name) + tdSql.query(f"select * from {stb_name};") + tdSql.checkRows(1) + + def sStbDtbDdataInsertMultiThreadCheckCase(self): + """ + thread input same stb, different tb, different data + """ + tdCom.cleanTb() + input_sql, stb_name = self.genFullTypeSql(value="\"binaryTagValue\"") + self.resCmp(input_sql, stb_name) + s_stb_d_tb_list = self.genSqlList(stb_name=stb_name)[4] + self.multiThreadRun(self.genMultiThreadSeq(s_stb_d_tb_list)) + tdSql.query(f"show tables;") + tdSql.checkRows(6) + + def sStbDtbDdataMtInsertMultiThreadCheckCase(self): + """ + thread input same stb, different tb, different data, add col, mul tag + """ + tdCom.cleanTb() + input_sql, stb_name = self.genFullTypeSql(value="\"binaryTagValue\"") + self.resCmp(input_sql, stb_name) + s_stb_d_tb_m_tag_list = [(f'{stb_name} 1626006833639000000ns "omfdhyom" t0=F,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64', 'yzwswz'), \ + (f'{stb_name} 1626006833639000000ns "vqowydbc" t0=F,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64', 'yzwswz'), \ + (f'{stb_name} 1626006833639000000ns "plgkckpv" t0=F,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64', 'yzwswz'), \ + (f'{stb_name} 1626006833639000000ns "cujyqvlj" t0=F,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64', 'yzwswz'), \ + (f'{stb_name} 1626006833639000000ns "twjxisat" t0=T,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64', 'yzwswz')] + self.multiThreadRun(self.genMultiThreadSeq(s_stb_d_tb_m_tag_list)) + tdSql.query(f"show tables;") + tdSql.checkRows(3) + + def sStbDtbDdataAtInsertMultiThreadCheckCase(self): + """ + thread input same stb, different tb, different data, add tag, mul col + """ + tdCom.cleanTb() + input_sql, stb_name = self.genFullTypeSql(value="\"binaryTagValue\"") + self.resCmp(input_sql, stb_name) + s_stb_d_tb_a_tag_list = self.genSqlList(stb_name=stb_name)[6] + self.multiThreadRun(self.genMultiThreadSeq(s_stb_d_tb_a_tag_list)) + tdSql.query(f"show tables;") + tdSql.checkRows(6) + + def sStbStbDdataDtsInsertMultiThreadCheckCase(self): + """ + thread input same stb tb, different ts + """ + tdCom.cleanTb() + tb_name = tdCom.getLongName(7, "letters") + input_sql, stb_name = self.genFullTypeSql(tb_name=tb_name, value="\"binaryTagValue\"") + self.resCmp(input_sql, stb_name) + s_stb_s_tb_d_ts_list = [(f'{stb_name} 0 "hkgjiwdj" id="{tb_name}",t0=f,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7="vozamcts",t8=L"ncharTagValue"', 'dwpthv'), \ + (f'{stb_name} 0 "rljjrrul" id="{tb_name}",t0=False,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7="bmcanhbs",t8=L"ncharTagValue"', 'dwpthv'), \ + (f'{stb_name} 0 "basanglx" id="{tb_name}",t0=False,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7="enqkyvmb",t8=L"ncharTagValue"', 'dwpthv'), \ + (f'{stb_name} 0 "clsajzpp" id="{tb_name}",t0=F,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7="eivaegjk",t8=L"ncharTagValue"', 'dwpthv'), \ + (f'{stb_name} 0 "jitwseso" id="{tb_name}",t0=T,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7="yhlwkddq",t8=L"ncharTagValue"', 'dwpthv')] + self.multiThreadRun(self.genMultiThreadSeq(s_stb_s_tb_d_ts_list)) + tdSql.query(f"show tables;") + tdSql.checkRows(1) + tdSql.query(f"select * from {stb_name}") + tdSql.checkRows(6) + + def sStbStbDdataDtsMtInsertMultiThreadCheckCase(self): + """ + thread input same stb tb, different ts, add col, mul tag + """ + tdCom.cleanTb() + tb_name = tdCom.getLongName(7, "letters") + input_sql, stb_name = self.genFullTypeSql(tb_name=tb_name, value="\"binaryTagValue\"") + self.resCmp(input_sql, stb_name) + s_stb_s_tb_d_ts_m_tag_list = self.genSqlList(stb_name=stb_name, tb_name=tb_name)[8] + self.multiThreadRun(self.genMultiThreadSeq(s_stb_s_tb_d_ts_m_tag_list)) + tdSql.query(f"show tables;") + tdSql.checkRows(1) + tdSql.query(f"select * from {stb_name}") + tdSql.checkRows(6) + tdSql.query(f"select * from {stb_name} where t8 is not NULL") + tdSql.checkRows(6) + + def sStbStbDdataDtsAtInsertMultiThreadCheckCase(self): + """ + thread input same stb tb, different ts, add tag, mul col + """ + tdCom.cleanTb() + tb_name = tdCom.getLongName(7, "letters") + input_sql, stb_name = self.genFullTypeSql(tb_name=tb_name, value="\"binaryTagValue\"") + self.resCmp(input_sql, stb_name) + s_stb_s_tb_d_ts_a_tag_list = [(f'{stb_name} 0 "clummqfy" id="{tb_name}",t0=False,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7="hpxzrdiw",t8=L"ncharTagValue",t11=127i8,t10=L"ncharTagValue"', 'bokaxl'), \ + (f'{stb_name} 0 "yqeztggb" id="{tb_name}",t0=F,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7="gdtblmrc",t8=L"ncharTagValue",t11=127i8,t10=L"ncharTagValue"', 'bokaxl'), \ + (f'{stb_name} 0 "gbkinqdk" id="{tb_name}",t0=f,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7="iqniuvco",t8=L"ncharTagValue",t11=127i8,t10=L"ncharTagValue"', 'bokaxl'), \ + (f'{stb_name} 0 "ldxxejbd" id="{tb_name}",t0=f,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7="vxkipags",t8=L"ncharTagValue",t11=127i8,t10=L"ncharTagValue"', 'bokaxl'), \ + (f'{stb_name} 0 "tlvzwjes" id="{tb_name}",t0=true,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7="enwrlrtj",t8=L"ncharTagValue",t11=127i8,t10=L"ncharTagValue"', 'bokaxl')] + self.multiThreadRun(self.genMultiThreadSeq(s_stb_s_tb_d_ts_a_tag_list)) + tdSql.query(f"show tables;") + tdSql.checkRows(1) + tdSql.query(f"select * from {stb_name}") + tdSql.checkRows(6) + for t in ["t10", "t11"]: + tdSql.query(f"select * from {stb_name} where {t} is not NULL;") + tdSql.checkRows(0) + + def sStbDtbDdataDtsInsertMultiThreadCheckCase(self): + """ + thread input same stb, different tb, data, ts + """ + tdCom.cleanTb() + input_sql, stb_name = self.genFullTypeSql(value="\"binaryTagValue\"") + self.resCmp(input_sql, stb_name) + s_stb_d_tb_d_ts_list = self.genSqlList(stb_name=stb_name)[10] + self.multiThreadRun(self.genMultiThreadSeq(s_stb_d_tb_d_ts_list)) + tdSql.query(f"show tables;") + tdSql.checkRows(6) + + def sStbDtbDdataDtsMtInsertMultiThreadCheckCase(self): + """ + thread input same stb, different tb, data, ts, add col, mul tag + """ + tdCom.cleanTb() + input_sql, stb_name = self.genFullTypeSql(value="\"binaryTagValue\"") + self.resCmp(input_sql, stb_name) + s_stb_d_tb_d_ts_m_tag_list = [(f'{stb_name} 0 "mnpmtzul" t0=f,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64', 'pcppkg'), \ + (f'{stb_name} 0 "zbvwckcd" t0=True,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64', 'pcppkg'), \ + (f'{stb_name} 0 "vymcjfwc" t0=F,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64', 'pcppkg'), \ + (f'{stb_name} 0 "laumkwfn" t0=False,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64', 'pcppkg'), \ + (f'{stb_name} 0 "nyultzxr" t0=false,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64', 'pcppkg')] + self.multiThreadRun(self.genMultiThreadSeq(s_stb_d_tb_d_ts_m_tag_list)) + tdSql.query(f"show tables;") + tdSql.checkRows(3) + + def test(self): + # input_sql1 = "stb2_5 1626006833610ms 3f64 host=\"host0\",host2=L\"host2\"" + # input_sql2 = "rfasta,id=\"rfasta_1\",t0=true,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64 c0=True,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64 1626006933640000000ns" + try: + input_sql = f'test_nchar 0 L"涛思数据" t0=f,t1=L"涛思数据",t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64' + self._conn.insert_telnet_lines([input_sql]) + # input_sql, stb_name = self.genFullTypeSql() + # self.resCmp(input_sql, stb_name) + except LinesError as err: + print(err.errno) + # self._conn.insert_telnet_lines([input_sql2]) + # input_sql3 = f'abcd,id="cc¥Ec",t0=True,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7="ndsfdrum",t8=L"ncharTagValue" c0=f,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7="igwoehkm",c8=L"ncharColValue",c9=7u64 0' + # print(input_sql3) + # input_sql4 = 'hmemeb,id="kilrcrldgf",t0=F,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7="fysodjql",t8=L"ncharTagValue" c0=True,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7="waszbfvc",c8=L"ncharColValue",c9=7u64 0' + # code = self._conn.insert_telnet_lines([input_sql3]) + # print(code) + # self._conn.insert_telnet_lines([input_sql4]) + + def runAll(self): + self.initCheckCase() + self.boolTypeCheckCase() + # ! leave a bug + #self.symbolsCheckCase() + self.tsCheckCase() + self.idSeqCheckCase() + self.idUpperCheckCase() + self.noIdCheckCase() + self.maxColTagCheckCase() + + self.idIllegalNameCheckCase() + self.idStartWithNumCheckCase() + self.nowTsCheckCase() + self.dateFormatTsCheckCase() + self.illegalTsCheckCase() + self.tagValueLengthCheckCase() + self.colValueLengthCheckCase() + self.tagColIllegalValueCheckCase() + self.duplicateIdTagColInsertCheckCase() + self.noIdStbExistCheckCase() + self.duplicateInsertExistCheckCase() + self.tagColBinaryNcharLengthCheckCase() + self.tagColAddDupIDCheckCase() + self.tagColAddCheckCase() + self.tagMd5Check() + self.tagColBinaryMaxLengthCheckCase() + self.tagColNcharMaxLengthCheckCase() + + self.batchInsertCheckCase() + self.multiInsertCheckCase(1000) + self.batchErrorInsertCheckCase() + self.multiColsInsertCheckCase() + self.blankColInsertCheckCase() + self.blankTagInsertCheckCase() + self.chineseCheckCase() + self.multiFieldCheckCase() + self.errorTypeCheckCase() + # MultiThreads + # self.stbInsertMultiThreadCheckCase() + # self.sStbStbDdataInsertMultiThreadCheckCase() + # self.sStbStbDdataAtInsertMultiThreadCheckCase() + # self.sStbStbDdataMtInsertMultiThreadCheckCase() + # self.sStbDtbDdataInsertMultiThreadCheckCase() + # self.sStbDtbDdataMtInsertMultiThreadCheckCase() + # self.sStbDtbDdataAtInsertMultiThreadCheckCase() + # self.sStbStbDdataDtsInsertMultiThreadCheckCase() + # self.sStbStbDdataDtsMtInsertMultiThreadCheckCase() + # self.sStbStbDdataDtsAtInsertMultiThreadCheckCase() + # self.sStbDtbDdataDtsInsertMultiThreadCheckCase() + # self.sStbDtbDdataDtsMtInsertMultiThreadCheckCase() + + def run(self): + print("running {}".format(__file__)) + self.createDb() + try: + # self.symbolsCheckCase() + self.runAll() + # self.test() + except Exception as err: + print(''.join(traceback.format_exception(None, err, err.__traceback__))) + raise err + # self.tagColIllegalValueCheckCase() + # self.test() + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/manualTest/TD-5114/continueCreateDn.py b/tests/pytest/manualTest/TD-5114/continueCreateDn.py index 4b724f0587a6a2bbe3f477e8a47e283c0924a29e..9494ee5f3685d3ddaeb1848a58878d63fa7a54b6 100644 --- a/tests/pytest/manualTest/TD-5114/continueCreateDn.py +++ b/tests/pytest/manualTest/TD-5114/continueCreateDn.py @@ -42,7 +42,7 @@ class TwoClients: tdSql.execute("drop database if exists db3") - # insert data with taosc + # insert data with c connector for i in range(10): os.system("taosdemo -f manualTest/TD-5114/insertDataDb3Replica2.json -y ") # # check data correct diff --git a/tests/pytest/query/queryDiffColsOr.py b/tests/pytest/query/queryDiffColsOr.py index feeab84a7ec577c9b48f19416400bcc7b348d543..e9e791da9f34c881d5c846b9bcc112866e5d992b 100644 --- a/tests/pytest/query/queryDiffColsOr.py +++ b/tests/pytest/query/queryDiffColsOr.py @@ -10,13 +10,10 @@ ################################################################### # -*- coding: utf-8 -*- -from copy import deepcopy from util.log import tdLog from util.cases import tdCases from util.sql import tdSql from util.common import tdCom - - class TDTestCase: def init(self, conn, logSql): tdLog.debug("start to execute %s" % __file__) @@ -409,6 +406,62 @@ class TDTestCase: tdSql.checkRows(10) tdSql.checkEqual(int(res[9][0]), 10) + def queryMultiTbWithTag(self, tb_name): + # tags (1, 1, 1, 3, 1.1, 1.1, "binary", "nchar", true, 1)') + + tdSql.execute( + f'CREATE TABLE {tb_name}_sub2 using {tb_name} tags (2, 2, 2, 4, 2.2, 2.2, "binary2", "nchar2", true, 12)') + tdSql.execute( + f'CREATE TABLE {tb_name}_sub3 using {tb_name} tags (3, 3, 3, 3, 3.3, 3.3, "binary3", "nchar3", true, 13)') + tdSql.execute( + f'insert into {tb_name}_sub2 values ("2021-01-25 12:00:00", 2, 2, 2, 4, 2.2, 2.2, "binary2", "nchar2", true, 12)') + tdSql.execute( + f'insert into {tb_name}_sub3 values ("2021-01-27 12:00:00", 3, 3, 3, 3, 3.3, 3.3, "binary3", "nchar3", true, 13)') + ## select count avg sum from (condition_A or condition_B and like and in) where condition_A or condition_B or condition_tag_C or condition_tag_D or like and in interval + query_sql = f'select count(*), avg(c6), sum(c3) from (select * from {tb_name} where c1 >1 or c2 = 2 and c7 like "binar_" and c4 in (3, 5)) where c1 != 2 or c3 = 1 or t1=2 or t1=3 or c8 like "ncha_" and c9 in (true) interval(8d)' + res = tdSql.query(query_sql, True) + tdSql.checkRows(3) + tdSql.checkEqual(int(res[0][1]), 3) + tdSql.checkEqual(int(res[0][2]), 1) + tdSql.checkEqual(int(res[0][3]), 10) + tdSql.checkEqual(int(res[1][1]), 3) + tdSql.checkEqual(int(res[1][2]), 3) + tdSql.checkEqual(int(res[1][3]), 3) + tdSql.checkEqual(int(res[2][1]), 3) + tdSql.checkEqual(int(res[2][2]), 2) + tdSql.checkEqual(int(res[2][3]), 6) + + + # ! to confirm + ## select count avg sum from (condition_A or condition_B or condition_tag_C or condition_tag_D and like and in) where condition_A or condition_B or like and in interval + # query_sql = f'select count(*), avg(c6), sum(c3) from (select * from {tb_name} where t1 = 3 and t1 = 2 or c1 >1 or c2 = 2 and c7 like "binar_" and c4 in (3, 5)) where c1 != 2 or c3 = 1 or c8 like "ncha_" and c9 in (true) interval(8d)' + # res = tdSql.query(query_sql, True) + # tdSql.checkRows(3) + # tdSql.checkEqual(int(res[0][1]), 3) + # tdSql.checkEqual(int(res[0][2]), 1) + # tdSql.checkEqual(int(res[0][3]), 10) + # tdSql.checkEqual(int(res[1][1]), 3) + # tdSql.checkEqual(int(res[1][2]), 3) + # tdSql.checkEqual(int(res[1][3]), 3) + # tdSql.checkEqual(int(res[2][1]), 3) + # tdSql.checkEqual(int(res[2][2]), 2) + # tdSql.checkEqual(int(res[2][3]), 6) + + ## select count avg sum from (condition_A and condition_B and and line and in and ts and condition_tag_A and condition_tag_B and between) where condition_C orr condition_D or condition_tag_C or condition_tag_D or like and in interval + query_sql = f'select count(*), avg(c6), sum(c3) from (select * from {tb_name} where c1 >= 1 and c2 = 2 and c7 like "binar_" and c4 in (3, 5) and ts > "2021-01-11 12:00:00" and t1 < 2 and t1 > 0 and c6 between 0 and 7) where c1 != 2 or c3 = 1 or t1=2 or t1=3 or c8 like "ncha_" and c9 in (true) interval(8d)' + res = tdSql.query(query_sql, True) + tdSql.checkRows(2) + tdSql.checkEqual(int(res[0][1]), 2) + tdSql.checkEqual(int(res[0][2]), 1) + tdSql.checkEqual(int(res[0][3]), 2) + tdSql.checkEqual(int(res[1][1]), 1) + tdSql.checkEqual(int(res[1][2]), 1) + tdSql.checkEqual(int(res[1][3]), 1) + + # ! to confirm + #select * from (select * from pyclqtwi where c1 >1 or c2 = 2 and c7 like "binar_" and c4 in (3, 5) and ts > "2021-01-11 12:00:00") where c1 != 2 or c3 = 1 or t1=2 or t1=3 or c8 like "ncha_" and c9 in (true) ; + #DB error: invalid operation: invalid expression (0.008747s) + def checkTbColTypeOperator(self): ''' Ordinary table full column type and operator @@ -492,33 +545,13 @@ class TDTestCase: ''' tb_name = self.initStb() self.queryMultiTb(tb_name) - - - # tb_name1 = tdCom.getLongName(8, "letters") - # tb_name2 = tdCom.getLongName(8, "letters") - # tb_name3 = tdCom.getLongName(8, "letters") - # tdSql.execute( - # f"CREATE TABLE {tb_name1} (ts timestamp, c1 tinyint, c2 smallint, c3 int)") - # tdSql.execute( - # f"CREATE TABLE {tb_name2} (ts timestamp, c1 tinyint, c2 smallint, c3 int)") - # tdSql.execute( - # f"CREATE TABLE {tb_name3} (ts timestamp, c1 tinyint, c2 smallint, c3 int)") - # insert_sql_list = [f'insert into {tb_name1} values ("2021-01-01 12:00:00", 1, 5, 1)', - # f'insert into {tb_name1} values ("2021-01-03 12:00:00", 2, 4, 1)', - # f'insert into {tb_name1} values ("2021-01-05 12:00:00", 3, 2, 1)', - # f'insert into {tb_name2} values ("2021-01-01 12:00:00", 4, 2, 1)', - # f'insert into {tb_name2} values ("2021-01-02 12:00:00", 5, 1, 1)', - # f'insert into {tb_name2} values ("2021-01-04 12:00:00", 1, 2, 1)', - # f'insert into {tb_name3} values ("2021-01-02 12:00:00", 4, 2, 1)', - # f'insert into {tb_name3} values ("2021-01-06 12:00:00", 5, 1, 1)', - # f'insert into {tb_name3} values ("2021-01-07 12:00:00", 1, 2, 1)', - # ] - # for sql in insert_sql_list: - # tdSql.execute(sql) - # tdSql.query( - # f'select * from {tb_name1} t1, {tb_name2}, {tb_name3} t3 t2 where (t1.ts=t2.ts or t2.ts=t3.ts)') - # tdSql.checkRows(4) - + + def checkMultiTbWithTag(self): + ''' + test Multi tb with tag + ''' + tb_name = self.initStb() + self.queryMultiTbWithTag(tb_name) def run(self): tdSql.prepare() @@ -534,7 +567,7 @@ class TDTestCase: self.checkStbPreCal() self.checkMultiTb() self.checkMultiStb() - + self.checkMultiTbWithTag() def stop(self): tdSql.close() diff --git a/tests/pytest/query/queryJoin.py b/tests/pytest/query/queryJoin.py index cd50a7bf452966306e6811e90802d9d160bfa68b..6d028049e516b4b0f399fcb055793a16ec093eec 100644 --- a/tests/pytest/query/queryJoin.py +++ b/tests/pytest/query/queryJoin.py @@ -13,6 +13,8 @@ import sys import taos +import string +import random from util.log import * from util.cases import * from util.sql import * @@ -23,6 +25,11 @@ class TDTestCase: tdLog.debug("start to execute %s" % __file__) tdSql.init(conn.cursor()) + def get_random_string(self, length): + letters = string.ascii_lowercase + result_str = ''.join(random.choice(letters) for i in range(length)) + return result_str + def run(self): tdSql.prepare() @@ -186,6 +193,20 @@ class TDTestCase: tdSql.query("select t1.ts from t0,t1 where t0.ts = t1.ts") tdSql.checkData(0,0,'2018-10-03 14:38:05.000000') + #TD-6425 join result more than 1MB + tdSql.execute("create database test_join") + tdSql.execute("use test_join") + + ts = 1538548685000 + tdSql.execute("create table stb(ts timestamp, c1 nchar(200)) tags(id int, loc binary(20))") + for i in range(2): + tdSql.execute("create table tb%d using stb tags(1, 'city%d')" % (i, i)) + for j in range(1000): + tdSql.execute("insert into tb%d values(%d, '%s')" % (i, ts + j, self.get_random_string(200))) + + tdSql.query("select tb0.c1, tb1.c1 from tb0, tb1 where tb0.ts = tb1.ts") + tdSql.checkRows(1000) + def stop(self): tdSql.close() tdLog.success("%s successfully executed" % __file__) diff --git a/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdumpTestNanoSupport.py b/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdumpTestNanoSupport.py index ca8832170b7706621f5ef9d3225fe2cf16141c34..a2059ec924ad1e2239c2709bc99dd58fbafa1337 100644 --- a/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdumpTestNanoSupport.py +++ b/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdumpTestNanoSupport.py @@ -44,7 +44,7 @@ class TDTestCase: projPath = selfPath[:selfPath.find("tests")] for root, dirs, files in os.walk(projPath): - if ("taosdump" in files): + if ("taosd" in files): rootRealPath = os.path.dirname(os.path.realpath(root)) if ("packaging" not in rootRealPath): buildPath = root[:len(root) - len("/build/bin")] diff --git a/tests/pytest/tools/taosdemoAllTest/insertInterlaceRowsLarge1M.json b/tests/pytest/tools/taosdemoAllTest/insertInterlaceRowsLarge1M.json index 1b56830189623d344168918f239887c3359b2645..197f8a208e85ca4ce57c06518a433ec3a3acbac3 100644 --- a/tests/pytest/tools/taosdemoAllTest/insertInterlaceRowsLarge1M.json +++ b/tests/pytest/tools/taosdemoAllTest/insertInterlaceRowsLarge1M.json @@ -41,7 +41,7 @@ "batch_create_tbl_num": 10, "data_source": "rand", "insert_mode": "taosc", - "insert_rows": 1000, + "insert_rows": 1001, "childtable_limit": 0, "childtable_offset":0, "multi_thread_write_one_tbl": "no", diff --git a/tests/pytest/tools/taosdemoPerformance.py b/tests/pytest/tools/taosdemoPerformance.py index 51b064a08e5cd55401f9cf803a8683653f722679..82c57a656dfea12f80fe4eb2b530742c5bfb0916 100644 --- a/tests/pytest/tools/taosdemoPerformance.py +++ b/tests/pytest/tools/taosdemoPerformance.py @@ -120,7 +120,7 @@ class taosdemoPerformace: projPath = selfPath[:selfPath.find("tests")] for root, dirs, files in os.walk(projPath): - if ("taosdemo" in files): + if ("taosd" in files): rootRealPath = os.path.dirname(os.path.realpath(root)) if ("packaging" not in rootRealPath): buildPath = root[:len(root) - len("/build/bin")] diff --git a/tests/pytest/tools/taosdemoTest.py b/tests/pytest/tools/taosdemoTest.py index 5662881031a01d19398cce223892eebbd8133c97..3cdcdcef5afcb14c04204d2489571bdfed937080 100644 --- a/tests/pytest/tools/taosdemoTest.py +++ b/tests/pytest/tools/taosdemoTest.py @@ -36,7 +36,7 @@ class TDTestCase: projPath = selfPath[:selfPath.find("tests")] for root, dirs, files in os.walk(projPath): - if ("taosdemo" in files): + if ("taosd" in files): rootRealPath = os.path.dirname(os.path.realpath(root)) if ("packaging" not in rootRealPath): buildPath = root[:len(root) - len("/build/bin")] diff --git a/tests/pytest/tools/taosdemoTestTblAlt.py b/tests/pytest/tools/taosdemoTestTblAlt.py index b70525ae4d87465a59ad524067d8b1e4a61d526a..70df535f59cbb97469b7a73e4e230d9a8671bfc7 100644 --- a/tests/pytest/tools/taosdemoTestTblAlt.py +++ b/tests/pytest/tools/taosdemoTestTblAlt.py @@ -26,7 +26,7 @@ class TDTestCase: tdLog.debug("start to execute %s" % __file__) tdSql.init(conn.cursor(), logSql) - self.numberOfTables = 10 + self.numberOfTables = 8 self.numberOfRecords = 1000000 def getBuildPath(self): @@ -86,7 +86,7 @@ class TDTestCase: while True: print("query started") try: - tdSql.query("select * from test.t9") + tdSql.query("select * from test.t7") except Exception as e: tdLog.info("select * test failed") time.sleep(2) @@ -100,8 +100,8 @@ class TDTestCase: print("alter table test.meters add column c10 int") tdSql.execute("alter table test.meters add column c10 int") - print("insert into test.t9 values (now, 1, 2, 3, 4, 0)") - tdSql.execute("insert into test.t9 values (now, 1, 2, 3, 4, 0)") + print("insert into test.t7 values (now, 1, 2, 3, 4, 0)") + tdSql.execute("insert into test.t7 values (now, 1, 2, 3, 4, 0)") def run(self): tdSql.prepare() diff --git a/tests/pytest/tools/taosdumpTest2.py b/tests/pytest/tools/taosdumpTest2.py index bed0564139e20fb6c562a7258af0cbd5b542069b..839988375b652b0cfad09d8a6de7697de19609ea 100644 --- a/tests/pytest/tools/taosdumpTest2.py +++ b/tests/pytest/tools/taosdumpTest2.py @@ -37,7 +37,7 @@ class TDTestCase: projPath = selfPath[:selfPath.find("tests")] for root, dirs, files in os.walk(projPath): - if ("taosdump" in files): + if ("taosd" in files): rootRealPath = os.path.dirname(os.path.realpath(root)) if ("packaging" not in rootRealPath): buildPath = root[:len(root) - len("/build/bin")] diff --git a/tests/pytest/tsdb/tsdbCompClusterReplica2.py b/tests/pytest/tsdb/tsdbCompClusterReplica2.py index 2e016deea0b78d6cf9f76a917ca49fc2c5744a6e..cfda271497cde59e8dbe60150ddf935ba63fd9be 100644 --- a/tests/pytest/tsdb/tsdbCompClusterReplica2.py +++ b/tests/pytest/tsdb/tsdbCompClusterReplica2.py @@ -24,7 +24,7 @@ from random import choice class TwoClients: def initConnection(self): - self.host = "chenhaoran02" + self.host = "chenhaoran01" self.user = "root" self.password = "taosdata" self.config = "/etc/taos/" @@ -116,8 +116,10 @@ class TwoClients: sleep(3) tdSql.execute(" drop dnode 'chenhaoran02:6030'; ") sleep(20) - os.system("rm -rf /var/lib/taos/*") + # remove data file; + os.system("rm -rf /home/chr/data/data0/*") print("clear dnode chenhaoran02'data files") + sleep(5) os.system("nohup /usr/bin/taosd > /dev/null 2>&1 &") print("start taosd") sleep(10) diff --git a/tests/script/fullGeneralSuite.sim b/tests/script/fullGeneralSuite.sim index 9f46b078475806292239e49fcbec80f5957e4eb4..188ce1405541cbbb230ceb186c44cfd4230925fc 100644 --- a/tests/script/fullGeneralSuite.sim +++ b/tests/script/fullGeneralSuite.sim @@ -222,3 +222,4 @@ run general/stream/metrics_replica1_vnoden.sim run general/db/show_create_db.sim run general/db/show_create_table.sim run general/parser/like.sim +run general/parser/regex.sim diff --git a/tests/script/general/parser/regex.sim b/tests/script/general/parser/regex.sim new file mode 100644 index 0000000000000000000000000000000000000000..eed36018d4c04ec5752e64105d025347982bfcb0 --- /dev/null +++ b/tests/script/general/parser/regex.sim @@ -0,0 +1,122 @@ +system sh/stop_dnodes.sh +system sh/deploy.sh -n dnode1 -i 1 +system sh/cfg.sh -n dnode1 -c walLevel -v 1 +system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 4 +system sh/exec.sh -n dnode1 -s start + +sleep 100 +sql connect + +$db = testdb +sql drop database if exists $db +sql create database $db +sql use $db + +print ======================== regular expression match test +$st_name = st +$ct1_name = ct1 +$ct2_name = ct2 + +sql create table $st_name (ts timestamp, c1b binary(20)) tags(t1b binary(20)); +sql create table $ct1_name using $st_name tags('taosdata1') +sql create table $ct2_name using $st_name tags('taosdata2') +sql create table not_match using $st_name tags('NOTMATCH') + +sql select tbname from $st_name where tbname match '.*' +if $rows != 3 then + return -1 +endi + + +sql select tbname from $st_name where tbname match '^ct[[:digit:]]' +if $rows != 2 then + return -1 +endi + +sql select tbname from $st_name where tbname nmatch '^ct[[:digit:]]' +if $rows != 1 then + return -1 +endi + +sql select tbname from $st_name where tbname match '.*' +if $rows != 3 then + return -1 +endi + +sql select tbname from $st_name where tbname nmatch '.*' +if $rows != 0 then + return -1 +endi + +sql select tbname from $st_name where t1b match '[[:lower:]]+' +if $rows != 2 then + return -1 +endi + +sql select tbname from $st_name where t1b nmatch '[[:lower:]]+' +if $rows != 1 then + return -1 +endi + +sql insert into $ct1_name values(now, 'this is engine') +sql insert into $ct2_name values(now, 'this is app egnine') + +sql select c1b from $st_name where c1b match 'engine' +if $data00 != @this is engine@ then + return -1 +endi + +if $rows != 1 then + return -1 +endi + +sql select c1b from $st_name where c1b nmatch 'engine' +if $data00 != @this is app egnine@ then + return -1 +endi + +if $rows != 1 then + return -1 +endi + +sql_error select c1b from $st_name where c1b match e; +sql_error select c1b from $st_name where c1b nmatch e; + +sql create table wrong_type(ts timestamp, c0 tinyint, c1 smallint, c2 int, c3 bigint, c4 float, c5 double, c6 bool, c7 nchar(20)) tags(t0 tinyint, t1 smallint, t2 int, t3 bigint, t4 float, t5 double, t6 bool, t7 nchar(10)) +sql insert into wrong_type_1 using wrong_type tags(1, 2, 3, 4, 5, 6, true, 'notsupport') values(now, 1, 2, 3, 4, 5, 6, false, 'notsupport') +sql_error select * from wrong_type where ts match '.*' +sql_error select * from wrong_type where ts nmatch '.*' +sql_error select * from wrong_type where c0 match '.*' +sql_error select * from wrong_type where c0 nmatch '.*' +sql_error select * from wrong_type where c1 match '.*' +sql_error select * from wrong_type where c1 nmatch '.*' +sql_error select * from wrong_type where c2 match '.*' +sql_error select * from wrong_type where c2 nmatch '.*' +sql_error select * from wrong_type where c3 match '.*' +sql_error select * from wrong_type where c3 nmatch '.*' +sql_error select * from wrong_type where c4 match '.*' +sql_error select * from wrong_type where c4 nmatch '.*' +sql_error select * from wrong_type where c5 match '.*' +sql_error select * from wrong_type where c5 nmatch '.*' +sql_error select * from wrong_type where c6 match '.*' +sql_error select * from wrong_type where c6 nmatch '.*' +sql_error select * from wrong_type where c7 match '.*' +sql_error select * from wrong_type where c7 nmatch '.*' +sql_error select * from wrong_type where t1 match '.*' +sql_error select * from wrong_type where t1 nmatch '.*' +sql_error select * from wrong_type where t2 match '.*' +sql_error select * from wrong_type where t2 nmatch '.*' +sql_error select * from wrong_type where t3 match '.*' +sql_error select * from wrong_type where t3 nmatch '.*' +sql_error select * from wrong_type where t4 match '.*' +sql_error select * from wrong_type where t4 nmatch '.*' +sql_error select * from wrong_type where t5 match '.*' +sql_error select * from wrong_type where t5 nmatch '.*' +sql_error select * from wrong_type where t6 match '.*' +sql_error select * from wrong_type where t6 nmatch '.*' +sql_error select * from wrong_type where t7 match '.*' +sql_error select * from wrong_type where t7 nmatch '.*' + +system sh/exec.sh -n dnode1 -s stop -x SIGINT + + diff --git a/tests/script/http/httpTestSqlUtc.c b/tests/script/http/httpTestSqlUtc.c new file mode 100644 index 0000000000000000000000000000000000000000..643c884a1a64d6eaaeb8984cd80d985408edf0e6 --- /dev/null +++ b/tests/script/http/httpTestSqlUtc.c @@ -0,0 +1,128 @@ +#include +#include +#include +#include +#include +#include +#include +#include + +#define MAXLINE 1024 + +typedef struct { + pthread_t pid; + int threadId; + int rows; + int tables; +} ThreadObj; + +void post(char *ip,int port,char *page,char *msg) { + int sockfd,n; + char recvline[MAXLINE]; + struct sockaddr_in servaddr; + char content[4096]; + char content_page[50]; + sprintf(content_page,"POST /%s HTTP/1.1\r\n",page); + char content_host[50]; + sprintf(content_host,"HOST: %s:%d\r\n",ip,port); + char content_type[] = "Content-Type: text/plain\r\n"; + char Auth[] = "Authorization: Basic cm9vdDp0YW9zZGF0YQ==\r\n"; + char content_len[50]; + sprintf(content_len,"Content-Length: %ld\r\n\r\n",strlen(msg)); + sprintf(content,"%s%s%s%s%s%s",content_page,content_host,content_type,Auth,content_len,msg); + if((sockfd = socket(AF_INET,SOCK_STREAM,0)) < 0) { + printf("socket error\n"); + } + bzero(&servaddr,sizeof(servaddr)); + servaddr.sin_family = AF_INET; + servaddr.sin_port = htons(port); + if(inet_pton(AF_INET,ip,&servaddr.sin_addr) <= 0) { + printf("inet_pton error\n"); + } + if(connect(sockfd,(struct sockaddr *)&servaddr,sizeof(servaddr)) < 0) { + printf("connect error\n"); + } + write(sockfd,content,strlen(content)); + printf("%s\n", content); + while((n = read(sockfd,recvline,MAXLINE)) > 0) { + recvline[n] = 0; + if(fputs(recvline,stdout) == EOF) { + printf("fputs error\n"); + } + } + if(n < 0) { + printf("read error\n"); + } +} + +void singleThread() { + char ip[] = "127.0.0.1"; + int port = 6041; + char page[] = "rest/sqlutc"; + char page1[] = "rest/sqlutc/db1"; + char page2[] = "rest/sqlutc/db2"; + char nonexit[] = "rest/sqlutc/xxdb"; + + post(ip,port,page,"drop database if exists db1"); + post(ip,port,page,"create database if not exists db1"); + post(ip,port,page,"drop database if exists db2"); + post(ip,port,page,"create database if not exists db2"); + post(ip,port,page1,"create table t11 (ts timestamp, c1 int)"); + post(ip,port,page2,"create table t21 (ts timestamp, c1 int)"); + post(ip,port,page1,"insert into t11 values (now, 1)"); + post(ip,port,page2,"insert into t21 values (now, 2)"); + post(ip,port,nonexit,"create database if not exists db3"); +} + +void execute(void *params) { + char ip[] = "127.0.0.1"; + int port = 6041; + char page[] = "rest/sqlutc"; + char *unique = calloc(1, 1024); + char *sql = calloc(1, 1024); + ThreadObj *pThread = (ThreadObj *)params; + printf("Thread %d started\n", pThread->threadId); + sprintf(unique, "rest/sqlutc/db%d",pThread->threadId); + sprintf(sql, "drop database if exists db%d", pThread->threadId); + post(ip,port,page, sql); + sprintf(sql, "create database if not exists db%d", pThread->threadId); + post(ip,port,page, sql); + for (int i = 0; i < pThread->tables; i++) { + sprintf(sql, "create table t%d (ts timestamp, c1 int)", i); + post(ip,port,unique, sql); + } + for (int i = 0; i < pThread->rows; i++) { + sprintf(sql, "insert into t%d values (now + %ds, %d)", pThread->threadId, i, pThread->threadId); + post(ip,port,unique, sql); + } + free(unique); + free(sql); + return; +} + +void multiThread() { + int numOfThreads = 100; + int numOfTables = 100; + int numOfRows = 1; + ThreadObj *threads = calloc((size_t)numOfThreads, sizeof(ThreadObj)); + for (int i = 0; i < numOfThreads; i++) { + ThreadObj *pthread = threads + i; + pthread_attr_t thattr; + pthread->threadId = i + 1; + pthread->rows = numOfRows; + pthread->tables = numOfTables; + pthread_attr_init(&thattr); + pthread_attr_setdetachstate(&thattr, PTHREAD_CREATE_JOINABLE); + pthread_create(&pthread->pid, &thattr, (void *(*)(void *))execute, pthread); + } + for (int i = 0; i < numOfThreads; i++) { + pthread_join(threads[i].pid, NULL); + } + free(threads); +} + +int main() { + singleThread(); + multiThread(); + exit(0); +} \ No newline at end of file diff --git a/tests/script/http/httpTestSqlt.c b/tests/script/http/httpTestSqlt.c new file mode 100644 index 0000000000000000000000000000000000000000..2eaaee0f992d802d57b4fc4d684da4622ea3b763 --- /dev/null +++ b/tests/script/http/httpTestSqlt.c @@ -0,0 +1,128 @@ +#include +#include +#include +#include +#include +#include +#include +#include + +#define MAXLINE 1024 + +typedef struct { + pthread_t pid; + int threadId; + int rows; + int tables; +} ThreadObj; + +void post(char *ip,int port,char *page,char *msg) { + int sockfd,n; + char recvline[MAXLINE]; + struct sockaddr_in servaddr; + char content[4096]; + char content_page[50]; + sprintf(content_page,"POST /%s HTTP/1.1\r\n",page); + char content_host[50]; + sprintf(content_host,"HOST: %s:%d\r\n",ip,port); + char content_type[] = "Content-Type: text/plain\r\n"; + char Auth[] = "Authorization: Basic cm9vdDp0YW9zZGF0YQ==\r\n"; + char content_len[50]; + sprintf(content_len,"Content-Length: %ld\r\n\r\n",strlen(msg)); + sprintf(content,"%s%s%s%s%s%s",content_page,content_host,content_type,Auth,content_len,msg); + if((sockfd = socket(AF_INET,SOCK_STREAM,0)) < 0) { + printf("socket error\n"); + } + bzero(&servaddr,sizeof(servaddr)); + servaddr.sin_family = AF_INET; + servaddr.sin_port = htons(port); + if(inet_pton(AF_INET,ip,&servaddr.sin_addr) <= 0) { + printf("inet_pton error\n"); + } + if(connect(sockfd,(struct sockaddr *)&servaddr,sizeof(servaddr)) < 0) { + printf("connect error\n"); + } + write(sockfd,content,strlen(content)); + printf("%s\n", content); + while((n = read(sockfd,recvline,MAXLINE)) > 0) { + recvline[n] = 0; + if(fputs(recvline,stdout) == EOF) { + printf("fputs error\n"); + } + } + if(n < 0) { + printf("read error\n"); + } +} + +void singleThread() { + char ip[] = "127.0.0.1"; + int port = 6041; + char page[] = "rest/sqlt"; + char page1[] = "rest/sqlt/db1"; + char page2[] = "rest/sqlt/db2"; + char nonexit[] = "rest/sqlt/xxdb"; + + post(ip,port,page,"drop database if exists db1"); + post(ip,port,page,"create database if not exists db1"); + post(ip,port,page,"drop database if exists db2"); + post(ip,port,page,"create database if not exists db2"); + post(ip,port,page1,"create table t11 (ts timestamp, c1 int)"); + post(ip,port,page2,"create table t21 (ts timestamp, c1 int)"); + post(ip,port,page1,"insert into t11 values (now, 1)"); + post(ip,port,page2,"insert into t21 values (now, 2)"); + post(ip,port,nonexit,"create database if not exists db3"); +} + +void execute(void *params) { + char ip[] = "127.0.0.1"; + int port = 6041; + char page[] = "rest/sqlt"; + char *unique = calloc(1, 1024); + char *sql = calloc(1, 1024); + ThreadObj *pThread = (ThreadObj *)params; + printf("Thread %d started\n", pThread->threadId); + sprintf(unique, "rest/sqlt/db%d",pThread->threadId); + sprintf(sql, "drop database if exists db%d", pThread->threadId); + post(ip,port,page, sql); + sprintf(sql, "create database if not exists db%d", pThread->threadId); + post(ip,port,page, sql); + for (int i = 0; i < pThread->tables; i++) { + sprintf(sql, "create table t%d (ts timestamp, c1 int)", i); + post(ip,port,unique, sql); + } + for (int i = 0; i < pThread->rows; i++) { + sprintf(sql, "insert into t%d values (now + %ds, %d)", pThread->threadId, i, pThread->threadId); + post(ip,port,unique, sql); + } + free(unique); + free(sql); + return; +} + +void multiThread() { + int numOfThreads = 100; + int numOfTables = 100; + int numOfRows = 1; + ThreadObj *threads = calloc((size_t)numOfThreads, sizeof(ThreadObj)); + for (int i = 0; i < numOfThreads; i++) { + ThreadObj *pthread = threads + i; + pthread_attr_t thattr; + pthread->threadId = i + 1; + pthread->rows = numOfRows; + pthread->tables = numOfTables; + pthread_attr_init(&thattr); + pthread_attr_setdetachstate(&thattr, PTHREAD_CREATE_JOINABLE); + pthread_create(&pthread->pid, &thattr, (void *(*)(void *))execute, pthread); + } + for (int i = 0; i < numOfThreads; i++) { + pthread_join(threads[i].pid, NULL); + } + free(threads); +} + +int main() { + singleThread(); + multiThread(); + exit(0); +} \ No newline at end of file diff --git a/tests/script/http/makefile b/tests/script/http/makefile index d1be683edad8a0b8cebfdd37a258c1578b6845cd..50886cf6b1c5f445263f05434839537ca8a5794e 100644 --- a/tests/script/http/makefile +++ b/tests/script/http/makefile @@ -1,2 +1,9 @@ all: - gcc -g httpTest.c -o httpTest -lpthread \ No newline at end of file + gcc -g httpTest.c -o httpTest -lpthread + gcc -g httpTestSqlt.c -o httpTestSqlt -lpthread + gcc -g httpTestSqlUtc.c -o httpTestSqlUtc -lpthread + +clean: + rm httpTest + rm httpTestSqlt + rm httpTestSqlUtc \ No newline at end of file
Collected Metrics Tags
Device ID Time Stamp