diff --git a/.gitmodules b/.gitmodules index a2266c46afd180b52d3aa19003380078894f6a4b..3d721fa8954023f92f8dcc70b09a1424d0104bbe 100644 --- a/.gitmodules +++ b/.gitmodules @@ -1,12 +1,12 @@ [submodule "src/connector/go"] path = src/connector/go - url = git@github.com:taosdata/driver-go.git + url = https://github.com/taosdata/driver-go.git [submodule "src/connector/grafanaplugin"] path = src/connector/grafanaplugin - url = git@github.com:taosdata/grafanaplugin.git + url = https://github.com/taosdata/grafanaplugin.git [submodule "src/connector/hivemq-tdengine-extension"] path = src/connector/hivemq-tdengine-extension - url = git@github.com:taosdata/hivemq-tdengine-extension.git + url = https://github.com/taosdata/hivemq-tdengine-extension.git [submodule "tests/examples/rust"] path = tests/examples/rust url = https://github.com/songtianyi/tdengine-rust-bindings.git diff --git a/Jenkinsfile b/Jenkinsfile index 91855a92fb811a7380ea9dca75745d21386f8496..f076a046686fd62a07695cfe3911e1baacf5c5d5 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -235,11 +235,18 @@ pipeline { npm install td2.0-connector > /dev/null 2>&1 node nodejsChecker.js host=localhost node test1970.js + cd ${WKC}/tests/connectorTest/nodejsTest/nanosupport + npm install td2.0-connector > /dev/null 2>&1 + node nanosecondTest.js + ''' sh ''' cd ${WKC}/tests/examples/C#/taosdemo mcs -out:taosdemo *.cs > /dev/null 2>&1 echo '' |./taosdemo -c /etc/taos + cd ${WKC}/tests/connectorTest/C#Test/nanosupport + mcs -out:nano *.cs > /dev/null 2>&1 + echo '' |./nano ''' sh ''' cd ${WKC}/tests/gotest @@ -264,12 +271,12 @@ pipeline { ''' } timeout(time: 60, unit: 'MINUTES'){ - // sh ''' - // cd ${WKC}/tests/pytest - // rm -rf /var/lib/taos/* - // rm -rf /var/log/taos/* - // ./handle_crash_gen_val_log.sh - // ''' + sh ''' + cd ${WKC}/tests/pytest + rm -rf /var/lib/taos/* + rm -rf /var/log/taos/* + ./handle_crash_gen_val_log.sh + ''' sh ''' cd ${WKC}/tests/pytest rm -rf /var/lib/taos/* diff --git a/README-CN.md b/README-CN.md index a9bc814e8d6f6bef0ad94e29588f62e2e4c0e7f1..d7192c939780a272acdebc94baf474aeaf0d7a38 100644 --- a/README-CN.md +++ b/README-CN.md @@ -107,6 +107,12 @@ Go 连接器和 Grafana 插件在其他独立仓库,如果安装它们的话 git submodule update --init --recursive ``` +如果使用 https 协议下载比较慢,可以通过修改 ~/.gitconfig 文件添加以下两行设置使用 ssh 协议下载。需要首先上传 ssh 密钥到 GitHub,详细方法请参考 GitHub 官方文档。 +``` +[url "git@github.com:"] + insteadOf = https://github.com/ +``` + ## 构建 TDengine ### Linux 系统 diff --git a/README.md b/README.md index 2dea05f09d268b0d78de15ab98f3584df055c353..ab9e0348c8547c43bdbcb4df44a88c53429971e3 100644 --- a/README.md +++ b/README.md @@ -101,6 +101,12 @@ so you should run this command in the TDengine directory to install them: git submodule update --init --recursive ``` +You can modify the file ~/.gitconfig to use ssh protocol instead of https for better download speed. You need to upload ssh public key to GitHub first. Please refer to GitHub official documentation for detail. +``` +[url "git@github.com:"] + insteadOf = https://github.com/ +``` + ## Build TDengine ### On Linux platform diff --git a/cmake/install.inc b/cmake/install.inc index e9ad240a793b9736edbe5769c6af12276e13a1a6..7ea2ba8da0af79c15378cda956a330b357804c5a 100755 --- a/cmake/install.inc +++ b/cmake/install.inc @@ -32,7 +32,7 @@ ELSEIF (TD_WINDOWS) #INSTALL(TARGETS taos RUNTIME DESTINATION driver) #INSTALL(TARGETS shell RUNTIME DESTINATION .) IF (TD_MVN_INSTALLED) - INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos-jdbcdriver-2.0.34-dist.jar DESTINATION connector/jdbc) + INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos-jdbcdriver-2.0.35-dist.jar DESTINATION connector/jdbc) ENDIF () ELSEIF (TD_DARWIN) SET(TD_MAKE_INSTALL_SH "${TD_COMMUNITY_DIR}/packaging/tools/make_install.sh") diff --git a/documentation20/cn/00.index/docs.md b/documentation20/cn/00.index/docs.md index 18bdc15d30430516c3ae6c847fc448477003dd66..df5a82517183f967aaaeb6767804cefa795301a1 100644 --- a/documentation20/cn/00.index/docs.md +++ b/documentation20/cn/00.index/docs.md @@ -40,17 +40,19 @@ TDengine是一个高效的存储、查询、分析时序大数据的平台,专 * [超级表管理](/taos-sql#super-table):添加、删除、查看、修改超级表 * [标签管理](/taos-sql#tags):增加、删除、修改标签 * [数据写入](/taos-sql#insert):支持单表单条、多条、多表多条写入,支持历史数据写入 -* [数据查询](/taos-sql#select):支持时间段、值过滤、排序、查询结果手动分页等 +* [数据查询](/taos-sql#select):支持时间段、值过滤、排序、嵌套查询、UINON、JOIN、查询结果手动分页等 * [SQL函数](/taos-sql#functions):支持各种聚合函数、选择函数、计算函数,如avg, min, diff等 * [窗口切分聚合](/taos-sql#aggregation):将表中数据按照时间段等方式进行切割后聚合,降维处理 * [边界限制](/taos-sql#limitation):库、表、SQL等边界限制条件 +* [UDF](/taos-sql/udf):用户定义函数的创建和管理方法 * [错误码](/taos-sql/error-code):TDengine 2.0 错误码以及对应的十进制码 ## [高效写入数据](/insert) -* [SQL写入](/insert#sql):使用SQL insert命令向一张或多张表写入单条或多条记录 -* [Prometheus写入](/insert#prometheus):配置Prometheus, 不用任何代码,将数据直接写入 -* [Telegraf写入](/insert#telegraf):配置Telegraf, 不用任何代码,将采集数据直接写入 +* [SQL 写入](/insert#sql):使用SQL insert命令向一张或多张表写入单条或多条记录 +* [Schemaless 写入](/insert#schemaless):免于预先建表,将数据直接写入时自动维护元数据结构 +* [Prometheus 写入](/insert#prometheus):配置Prometheus, 不用任何代码,将数据直接写入 +* [Telegraf 写入](/insert#telegraf):配置Telegraf, 不用任何代码,将采集数据直接写入 * [EMQ X Broker](/insert#emq):配置EMQ X,不用任何代码,就可将MQTT数据直接写入 * [HiveMQ Broker](/insert#hivemq):配置HiveMQ,不用任何代码,就可将MQTT数据直接写入 diff --git a/documentation20/cn/01.evaluation/docs.md b/documentation20/cn/01.evaluation/docs.md index edfa7af7e9a24be9a6d6ab1de3591deb48e22b3a..050046645c24e7db58ef2f39683433c3a4b53169 100644 --- a/documentation20/cn/01.evaluation/docs.md +++ b/documentation20/cn/01.evaluation/docs.md @@ -2,28 +2,27 @@ ## TDengine 简介 -TDengine 是涛思数据面对高速增长的物联网大数据市场和技术挑战推出的创新性的大数据处理产品,它不依赖任何第三方软件,也不是优化或包装了一个开源的数据库或流式计算产品,而是在吸取众多传统关系型数据库、NoSQL 数据库、流式计算引擎、消息队列等软件的优点之后自主开发的产品,在时序空间大数据处理上,有着自己独到的优势。 +TDengine 是涛思数据面对高速增长的物联网大数据市场和技术挑战推出的创新性的大数据处理产品,它不依赖任何第三方软件,也不是优化或包装了一个开源的数据库或流式计算产品,而是在吸取众多传统关系型数据库、NoSQL 数据库、流式计算引擎、消息队列等软件的优点之后自主开发的产品,TDengine 在时序空间大数据处理上,有着自己独到的优势。 -TDengine 的模块之一是时序数据库。但除此之外,为减少研发的复杂度、系统维护的难度,TDengine 还提供缓存、消息队列、订阅、流式计算等功能,为物联网、工业互联网大数据的处理提供全栈的技术方案,是一个高效易用的物联网大数据平台。与 Hadoop 等典型的大数据平台相比,它具有如下鲜明的特点: +TDengine 的模块之一是时序数据库。但除此之外,为减少研发的复杂度、系统维护的难度,TDengine 还提供缓存、消息队列、订阅、流式计算等功能,为物联网和工业互联网大数据的处理提供全栈的技术方案,是一个高效易用的物联网大数据平台。与 Hadoop 等典型的大数据平台相比,TDengine 具有如下鲜明的特点: * __10 倍以上的性能提升__:定义了创新的数据存储结构,单核每秒能处理至少 2 万次请求,插入数百万个数据点,读出一千万以上数据点,比现有通用数据库快十倍以上。 -* __硬件或云服务成本降至 1/5__:由于超强性能,计算资源不到通用大数据方案的 1/5;通过列式存储和先进的压缩算法,存储空间不到通用数据库的 1/10。 +* __硬件或云服务成本降至 1/5__:由于超强性能,计算资源不到通用大数据方案的 1/5;通过列式存储和先进的压缩算法,存储占用不到通用数据库的 1/10。 * __全栈时序数据处理引擎__:将数据库、消息队列、缓存、流式计算等功能融为一体,应用无需再集成 Kafka/Redis/HBase/Spark/HDFS 等软件,大幅降低应用开发和维护的复杂度成本。 * __强大的分析功能__:无论是十年前还是一秒钟前的数据,指定时间范围即可查询。数据可在时间轴上或多个设备上进行聚合。即席查询可通过 Shell, Python, R, MATLAB 随时进行。 -* __与第三方工具无缝连接__:不用一行代码,即可与 Telegraf, Grafana, EMQ, HiveMQ, Prometheus, MATLAB, R 等集成。后续将支持 OPC, Hadoop, Spark 等,BI 工具也将无缝连接。 -* __零运维成本、零学习成本__:安装集群简单快捷,无需分库分表,实时备份。类标准 SQL,支持 RESTful,支持 Python/Java/C/C++/C#/Go/Node.js, 与 MySQL 相似,零学习成本。 +* __高可用性和水平扩展__:通过分布式架构和一致性算法,通过多复制和集群特性,TDengine确保了高可用性和水平扩展性以支持关键任务应用程序。 +* __零运维成本、零学习成本__:安装集群简单快捷,无需分库分表,实时备份。类似标准 SQL,支持 RESTful,支持 Python/Java/C/C++/C#/Go/Node.js, 与 MySQL 相似,零学习成本。 +* __核心开源__:除了一些辅助功能外,TDengine的核心是开源的。企业再也不会被数据库绑定了。这使生态更加强大,产品更加稳定,开发者社区更加活跃。 采用 TDengine,可将典型的物联网、车联网、工业互联网大数据平台的总拥有成本大幅降低。但需要指出的是,因充分利用了物联网时序数据的特点,它无法用来处理网络爬虫、微博、微信、电商、ERP、CRM 等通用型数据。 ![TDengine技术生态图](page://images/eco_system.png)
图 1. TDengine技术生态图
- ## TDengine 总体适用场景 作为一个 IoT 大数据平台,TDengine 的典型适用场景是在 IoT 范畴,而且用户有一定的数据量。本文后续的介绍主要针对这个范畴里面的系统。范畴之外的系统,比如 CRM,ERP 等,不在本文讨论范围内。 - ### 数据源特点和需求 从数据源角度,设计人员可以从下面几个角度分析 TDengine 在目标应用系统里面的适用性。 @@ -64,4 +63,3 @@ TDengine 的模块之一是时序数据库。但除此之外,为减少研发 |要求系统可靠运行| | | √ | TDengine 的系统架构非常稳定可靠,日常维护也简单便捷,对维护人员的要求简洁明了,最大程度上杜绝人为错误和事故。| |要求运维学习成本可控| | | √ |同上。| |要求市场有大量人才储备| √ | | | TDengine 作为新一代产品,目前人才市场里面有经验的人员还有限。但是学习成本低,我们作为厂家也提供运维的培训和辅助服务。| - diff --git a/documentation20/cn/03.architecture/docs.md b/documentation20/cn/03.architecture/docs.md index 8adafc73c21bc915a4564ccf530441bf33a16bda..3e9877b4465eac2ca05d99c88a620a0c6bf89689 100644 --- a/documentation20/cn/03.architecture/docs.md +++ b/documentation20/cn/03.architecture/docs.md @@ -250,7 +250,7 @@ vnode(虚拟数据节点)负责为采集的时序数据提供写入、查询和 创建DB时,系统并不会马上分配资源。但当创建一张表时,系统将看是否有已经分配的vnode, 且该vnode是否有空余的表空间,如果有,立即在该有空位的vnode创建表。如果没有,系统将从集群中,根据当前的负载情况,在一个dnode上创建一新的vnode, 然后创建表。如果DB有多个副本,系统不是只创建一个vnode,而是一个vgroup(虚拟数据节点组)。系统对vnode的数目没有任何限制,仅仅受限于物理节点本身的计算和存储资源。 -每张表的meda data(包含schema, 标签等)也存放于vnode里,而不是集中存放于mnode,实际上这是对Meta数据的分片,这样便于高效并行的进行标签过滤操作。 +每张表的meta data(包含schema, 标签等)也存放于vnode里,而不是集中存放于mnode,实际上这是对Meta数据的分片,这样便于高效并行的进行标签过滤操作。 ### 数据分区 diff --git a/documentation20/cn/05.insert/docs.md b/documentation20/cn/05.insert/docs.md index f055b0c25ba4811336084d6a2a58d6752b9db1e5..556d51759cb126f3b49b032b6efeb7e9924f864c 100644 --- a/documentation20/cn/05.insert/docs.md +++ b/documentation20/cn/05.insert/docs.md @@ -2,7 +2,7 @@ TDengine支持多种接口写入数据,包括SQL, Prometheus, Telegraf, EMQ MQTT Broker, HiveMQ Broker, CSV文件等,后续还将提供Kafka, OPC等接口。数据可以单条插入,也可以批量插入,可以插入一个数据采集点的数据,也可以同时插入多个数据采集点的数据。支持多线程插入,支持时间乱序数据插入,也支持历史数据插入。 -## SQL写入 +## SQL 写入 应用通过C/C++、JDBC、GO、C#或Python Connector 执行SQL insert语句来插入数据,用户还可以通过TAOS Shell,手动输入SQL insert语句插入数据。比如下面这条insert 就将一条记录写入到表d1001中: ```mysql @@ -27,11 +27,74 @@ INSERT INTO d1001 VALUES (1538548685000, 10.3, 219, 0.31) (1538548695000, 12.6, - 对同一张表,如果新插入记录的时间戳已经存在,默认情形下(UPDATE=0)新记录将被直接抛弃,也就是说,在一张表里,时间戳必须是唯一的。如果应用自动生成记录,很有可能生成的时间戳是一样的,这样,成功插入的记录条数会小于应用插入的记录条数。如果在创建数据库时使用了 UPDATE 1 选项,插入相同时间戳的新记录将覆盖原有记录。 - 写入的数据的时间戳必须大于当前时间减去配置参数keep的时间。如果keep配置为3650天,那么无法写入比3650天还早的数据。写入数据的时间戳也不能大于当前时间加配置参数days。如果days为2,那么无法写入比当前时间还晚2天的数据。 -## Prometheus直接写入 +## Schemaless 写入 + +在物联网应用中,常会采集比较多的数据项,用于实现智能控制、业务分析、设备监控等。由于应用逻辑的版本升级,或者设备自身的硬件调整等原因,数据采集项就有可能比较频繁地出现变动。为了在这种情况下方便地完成数据记录工作,TDengine 从 2.2.0.0 版本开始,提供 Schemaless 写入方式,可以免于预先创建超级表/数据子表,而是随着数据写入,自动创建与数据对应的存储结构。并且在必要时,Schemaless 将自动增加必要的数据列,保证用户写入的数据可以被正确存储。目前,TDengine 的 C/C++ Connector 提供支持 Schemaless 的操作接口,详情请参见 [Schemaless 方式写入接口](https://www.taosdata.com/cn/documentation/connector#schemaless) 章节。这里对 Schemaless 的数据表达格式进行描述。 + +### Schemaless 数据行协议 + +Schemaless 采用一个字符串来表达最终存储的一个数据行(可以向 Schemaless 写入 API 中一次传入多个字符串来实现多个数据行的批量写入),其格式约定如下: +```json +measurement,tag_set field_set timestamp +``` + +其中, +* measurement 将作为数据表名。它与 tag_set 之间使用一个英文逗号来分隔。 +* tag_set 将作为标签数据,其格式形如 `=,=`,也即可以使用英文逗号来分隔多个标签数据。它与 field_set 之间使用一个半角空格来分隔。 +* field_set 将作为普通列数据,其格式形如 `=,=`,同样是使用英文逗号来分隔多个普通列的数据。它与 timestamp 之间使用一个半角空格来分隔。 +* timestamp 即本行数据对应的主键时间戳。 + +在 Schemaless 的数据行协议中,tag_set、field_set 中的每个数据项都需要对自身的数据类型进行描述。具体来说: +* 如果两边有英文双引号,表示 BIANRY(32) 类型。例如 `"abc"`。 +* 如果两边有英文双引号而且带有 L 前缀,表示 NCHAR(32) 类型。例如 `L"报错信息"`。 +* 对空格、等号(=)、逗号(,)、双引号("),前面需要使用反斜杠(\)进行转义。(都指的是英文半角符号) +* 数值类型将通过后缀来区分数据类型: + - 没有后缀,为 FLOAT 类型; + - 后缀为 f32,为 FLOAT 类型; + - 后缀为 f64,为 DOUBLE 类型; + - 后缀为 i8,表示为 TINYINT (INT8) 类型; + - 后缀为 i16,表示为 SMALLINT (INT16) 类型; + - 后缀为 i32,表示为 INT (INT32) 类型; + - 后缀为 i64,表示为 BIGINT (INT64) 类型; +* t, T, true, True, TRUE, f, F, false, False 将直接作为 BOOL 型来处理。 + +timestamp 位置的时间戳通过后缀来声明时间精度,具体如下: +* 不带任何后缀的长整数会被当作微秒来处理; +* 当后缀为 s 时,表示秒时间戳; +* 当后缀为 ms 时,表示毫秒时间戳; +* 当后缀为 us 时,表示微秒时间戳; +* 当后缀为 ns 时,表示纳秒时间戳; +* 当时间戳为 0 时,表示采用客户端的当前时间(因此,同一批提交的数据中,时间戳 0 会被解释为同一个时间点,于是就有可能导致时间戳重复)。 + +例如,如下 Schemaless 数据行表示:向名为 st 的超级表下的 t1 标签为 3(BIGINT 类型)、t2 标签为 4(DOUBLE 类型)、t3 标签为 "t3"(BINARY 类型)的数据子表,写入 c1 列为 3(BIGINT 类型)、c2 列为 false(BOOL 类型)、c3 列为 "passit"(NCHAR 类型)、c4 列为 4(DOUBLE 类型)、主键时间戳为 1626006833639000000(纳秒精度)的一行数据。 +```json +st,t1=3i64,t2=4f64,t3="t3" c1=3i64,c3=L"passit",c2=false,c4=4f64 1626006833639000000ns +``` + +需要注意的是,如果描述数据类型后缀时使用了错误的大小写,或者为数据指定的数据类型有误,均可能引发报错提示而导致数据写入失败。 + +### Schemaless 的处理逻辑 + +Schemaless 按照如下原则来处理行数据: +1. 当 tag_set 中有 ID 字段时,该字段的值将作为数据子表的表名。 +2. 没有 ID 字段时,将使用 `measurement + tag_value1 + tag_value2 + ...` 的 md5 值来作为子表名。 +3. 如果指定的超级表名不存在,则 Schemaless 会创建这个超级表。 +4. 如果指定的数据子表不存在,则 Schemaless 会按照步骤 1 或 2 确定的子表名来创建子表。 +5. 如果数据行中指定的标签列或普通列不存在,则 Schemaless 会在超级表中增加对应的标签列或普通列(只增不减)。 +6. 如果超级表中存在一些标签列或普通列未在一个数据行中被指定取值,那么这些列的值在这一行中会被置为 NULL。 +7. 对 BINARY 或 NCHAR 列,如果数据行中所提供值的长度超出了列类型的限制,那么 Schemaless 会增加该列允许存储的字符长度上限(只增不减),以保证数据的完整保存。 +8. 如果指定的数据子表已经存在,而且本次指定的标签列取值跟已保存的值不一样,那么最新的数据行中的值会覆盖旧的标签列取值。 +9. 整个处理过程中遇到的错误会中断写入过程,并返回错误代码。 + +**注意:**Schemaless 所有的处理逻辑,仍会遵循 TDengine 对数据结构的底层限制,例如每行数据的总长度不能超过 16k 字节。这方面的具体限制约束请参见 [TAOS SQL 边界限制](https://www.taosdata.com/cn/documentation/taos-sql#limitation) 章节。 + +关于 Schemaless 的字符串编码处理、时区设置等,均会沿用 TAOSC 客户端的设置。 + +## Prometheus 直接写入 [Prometheus](https://www.prometheus.io/)作为Cloud Native Computing Fundation毕业的项目,在性能监控以及K8S性能监控领域有着非常广泛的应用。TDengine提供一个小工具[Bailongma](https://github.com/taosdata/Bailongma),只需对Prometheus做简单配置,无需任何代码,就可将Prometheus采集的数据直接写入TDengine,并按规则在TDengine自动创建库和相关表项。博文[用Docker容器快速搭建一个Devops监控Demo](https://www.taosdata.com/blog/2020/02/03/1189.html)即是采用Bailongma将Prometheus和Telegraf的数据写入TDengine中的示例,可以参考。 -### 从源代码编译blm_prometheus +### 从源代码编译 blm_prometheus 用户需要从github下载[Bailongma](https://github.com/taosdata/Bailongma)的源码,使用Golang语言编译器编译生成可执行文件。在开始编译前,需要准备好以下条件: - Linux操作系统的服务器 @@ -46,11 +109,11 @@ go build 一切正常的情况下,就会在对应的目录下生成一个blm_prometheus的可执行程序。 -### 安装Prometheus +### 安装 Prometheus 通过Prometheus的官网下载安装。具体请见:[下载地址](https://prometheus.io/download/)。 -### 配置Prometheus +### 配置 Prometheus 参考Prometheus的[配置文档](https://prometheus.io/docs/prometheus/latest/configuration/configuration/),在Prometheus的配置文件中的部分,增加以下配置: @@ -60,7 +123,8 @@ go build 启动Prometheus后,可以通过taos客户端查询确认数据是否成功写入。 -### 启动blm_prometheus程序 +### 启动 blm_prometheus 程序 + blm_prometheus程序有以下选项,在启动blm_prometheus程序时可以通过设定这些选项来设定blm_prometheus的配置。 ```bash --tdengine-name @@ -94,7 +158,8 @@ remote_write: - url: "http://10.1.2.3:8088/receive" ``` -### 查询prometheus写入数据 +### 查询 prometheus 写入数据 + prometheus产生的数据格式如下: ```json { @@ -105,10 +170,10 @@ prometheus产生的数据格式如下: instance="192.168.99.116:8443", job="kubernetes-apiservers", le="125000", - resource="persistentvolumes", s - cope="cluster", + resource="persistentvolumes", + scope="cluster", verb="LIST", - version=“v1" + version="v1" } } ``` @@ -118,11 +183,11 @@ use prometheus; select * from apiserver_request_latencies_bucket; ``` -## Telegraf直接写入 +## Telegraf 直接写入 [Telegraf](https://www.influxdata.com/time-series-platform/telegraf/)是一流行的IT运维数据采集开源工具,TDengine提供一个小工具[Bailongma](https://github.com/taosdata/Bailongma),只需在Telegraf做简单配置,无需任何代码,就可将Telegraf采集的数据直接写入TDengine,并按规则在TDengine自动创建库和相关表项。博文[用Docker容器快速搭建一个Devops监控Demo](https://www.taosdata.com/blog/2020/02/03/1189.html)即是采用bailongma将Prometheus和Telegraf的数据写入TDengine中的示例,可以参考。 -### 从源代码编译blm_telegraf +### 从源代码编译 blm_telegraf 用户需要从github下载[Bailongma](https://github.com/taosdata/Bailongma)的源码,使用Golang语言编译器编译生成可执行文件。在开始编译前,需要准备好以下条件: @@ -139,11 +204,11 @@ go build 一切正常的情况下,就会在对应的目录下生成一个blm_telegraf的可执行程序。 -### 安装Telegraf +### 安装 Telegraf 目前TDengine支持Telegraf 1.7.4以上的版本。用户可以根据当前的操作系统,到Telegraf官网下载安装包,并执行安装。下载地址如下:https://portal.influxdata.com/downloads 。 -### 配置Telegraf +### 配置 Telegraf 修改Telegraf配置文件/etc/telegraf/telegraf.conf中与TDengine有关的配置项。 @@ -160,7 +225,8 @@ go build 关于如何使用Telegraf采集数据以及更多有关使用Telegraf的信息,请参考Telegraf官方的[文档](https://docs.influxdata.com/telegraf/v1.11/)。 -### 启动blm_telegraf程序 +### 启动 blm_telegraf 程序 + blm_telegraf程序有以下选项,在启动blm_telegraf程序时可以通过设定这些选项来设定blm_telegraf的配置。 ```bash @@ -196,7 +262,7 @@ blm_telegraf对telegraf提供服务的端口号。 url = "http://10.1.2.3:8089/telegraf" ``` -### 查询telegraf写入数据 +### 查询 telegraf 写入数据 telegraf产生的数据格式如下: ```json diff --git a/documentation20/cn/06.queries/docs.md b/documentation20/cn/06.queries/docs.md index 294a9721e1dd4b9ea2e60308a48372dd83395010..32b74d1b23416814b39addb68303587ecc0ba3f8 100644 --- a/documentation20/cn/06.queries/docs.md +++ b/documentation20/cn/06.queries/docs.md @@ -3,7 +3,7 @@ ## 主要查询功能 -TDengine 采用 SQL 作为查询语言。应用程序可以通过 C/C++, Java, Go, Python 连接器发送 SQL 语句,用户可以通过 TDengine 提供的命令行(Command Line Interface, CLI)工具 TAOS Shell 手动执行 SQL 即席查询(Ad-Hoc Query)。TDengine 支持如下查询功能: +TDengine 采用 SQL 作为查询语言。应用程序可以通过 C/C++, Java, Go, C#, Python, Node.js 连接器发送 SQL 语句,用户可以通过 TDengine 提供的命令行(Command Line Interface, CLI)工具 TAOS Shell 手动执行 SQL 即席查询(Ad-Hoc Query)。TDengine 支持如下查询功能: - 单列、多列数据查询 - 标签和数值的多种过滤条件:>, <, =, <>, like 等 diff --git a/documentation20/cn/08.connector/01.java/docs.md b/documentation20/cn/08.connector/01.java/docs.md index def8d4a905eaa6ab63256673aad04bd159a5478d..b4537adad6f014712911d568a948b81f866b45f4 100644 --- a/documentation20/cn/08.connector/01.java/docs.md +++ b/documentation20/cn/08.connector/01.java/docs.md @@ -68,18 +68,18 @@ INSERT INTO test.t1 USING test.weather (ts, temperature) TAGS('beijing') VALUES( TDengine 目前支持时间戳、数字、字符、布尔类型,与 Java 对应类型转换如下: -| TDengine DataType | Java DataType | -| ----------------- | ------------------ | -| TIMESTAMP | java.sql.Timestamp | -| INT | java.lang.Integer | -| BIGINT | java.lang.Long | -| FLOAT | java.lang.Float | -| DOUBLE | java.lang.Double | -| SMALLINT | java.lang.Short | -| TINYINT | java.lang.Byte | -| BOOL | java.lang.Boolean | -| BINARY | byte array | -| NCHAR | java.lang.String | +| TDengine DataType | JDBCType (driver 版本 < 2.0.24) | JDBCType (driver 版本 >= 2.0.24) | +| ----------------- | ------------------ | ------------------ | +| TIMESTAMP | java.lang.Long | java.sql.Timestamp | +| INT | java.lang.Integer | java.lang.Integer | +| BIGINT | java.lang.Long | java.lang.Long | +| FLOAT | java.lang.Float | java.lang.Float | +| DOUBLE | java.lang.Double | java.lang.Double | +| SMALLINT | java.lang.Short | java.lang.Short | +| TINYINT | java.lang.Byte | java.lang.Byte | +| BOOL | java.lang.Boolean | java.lang.Boolean | +| BINARY | java.lang.String | byte array | +| NCHAR | java.lang.String | java.lang.String | ## 安装Java Connector diff --git a/documentation20/cn/08.connector/docs.md b/documentation20/cn/08.connector/docs.md index f132ff979df39cd36cd554a6e83ffbd950ef9bb8..3167404f8067610f0bf5f74fe41320decdcbcdf0 100644 --- a/documentation20/cn/08.connector/docs.md +++ b/documentation20/cn/08.connector/docs.md @@ -312,7 +312,7 @@ TDengine的异步API均采用非阻塞调用模式。应用程序可以用多线 ### 参数绑定 API -除了直接调用 `taos_query` 进行查询,TDengine 也提供了支持参数绑定的 Prepare API,与 MySQL 一样,这些 API 目前也仅支持用问号 `?` 来代表待绑定的参数。 +除了直接调用 `taos_query` 进行查询,TDengine 也提供了支持参数绑定的 Prepare API,与 MySQL 一样,这些 API 目前也仅支持用问号 `?` 来代表待绑定的参数。文档中有时也会把此功能称为“原生接口写入”。 从 2.1.1.0 和 2.1.2.0 版本开始,TDengine 大幅改进了参数绑定接口对数据写入(INSERT)场景的支持。这样在通过参数绑定接口写入数据时,就避免了 SQL 语法解析的资源消耗,从而在绝大多数情况下显著提升写入性能。此时的典型操作步骤如下: 1. 调用 `taos_stmt_init` 创建参数绑定对象; @@ -403,6 +403,25 @@ typedef struct TAOS_MULTI_BIND { (2.1.3.0 版本新增) 用于在其他 stmt API 返回错误(返回错误码或空指针)时获取错误信息。 + +### Schemaless 方式写入接口 + +除了使用 SQL 方式或者使用参数绑定 API 写入数据外,还可以使用 Schemaless 的方式完成写入。Schemaless 可以免于预先创建超级表/数据子表的数据结构,而是可以直接写入数据,TDengine 系统会根据写入的数据内容自动创建和维护所需要的表结构。Schemaless 的使用方式详见 [Schemaless 写入](https://www.taosdata.com/cn/documentation/insert#schemaless) 章节,这里介绍与之配套使用的 C/C++ API。 + +- `int taos_insert_lines(TAOS* taos, char* lines[], int numLines)` + + (2.2.0.0 版本新增) + 以 Schemaless 格式写入多行数据。其中: + * taos:调用 taos_connect 返回的数据库连接。 + * lines:由 char 字符串指针组成的数组,指向本次想要写入数据库的多行数据。 + * numLines:lines 数据的总行数。 + + 返回值为 0 表示写入成功,非零值表示出错。具体错误代码请参见 [taoserror.h](https://github.com/taosdata/TDengine/blob/develop/src/inc/taoserror.h) 文件。 + + 说明: + 1. 此接口是一个同步阻塞式接口,使用时机与 `taos_query()` 一致。 + 2. 在调用此接口之前,必须先调用 `taos_select_db()` 来确定目前是在向哪个 DB 来写入。 + ### 连续查询接口 TDengine提供时间驱动的实时流式计算API。可以每隔一指定的时间段,对一张或多张数据库的表(数据流)进行各种实时聚合计算操作。操作简单,仅有打开、关闭流的API。具体如下: @@ -757,7 +776,7 @@ curl -u username:password -d '' :/rest/sql/[db_name] - data: 具体返回的数据,一行一行的呈现,如果不返回结果集,那么就仅有 [[affected_rows]]。data 中每一行的数据列顺序,与 column_meta 中描述数据列的顺序完全一致。 - rows: 表明总共多少行数据。 -column_meta 中的列类型说明: +column_meta 中的列类型说明: * 1:BOOL * 2:TINYINT * 3:SMALLINT @@ -1147,7 +1166,7 @@ var affectRows = cursor.execute('insert into test.weather values(now, 22.3, 34); execute方法的返回值为该语句影响的行数,上面的sql向test库的weather表中,插入了一条数据,则返回值affectRows为1。 -TDengine目前还不支持update和delete语句。 +TDengine 目前还不支持 delete 语句。但从 2.0.8.0 版本开始,可以通过 `CREATE DATABASE` 时指定的 UPDATE 参数来启用对数据行的 update。 #### 查询 diff --git a/documentation20/cn/11.administrator/docs.md b/documentation20/cn/11.administrator/docs.md index 35eff03423e1400ff3339bb493e1816e9d899056..d26cd3c845527084612d1a876076838f5d0f9f1a 100644 --- a/documentation20/cn/11.administrator/docs.md +++ b/documentation20/cn/11.administrator/docs.md @@ -216,7 +216,7 @@ taosd -C | 98 | maxBinaryDisplayWidth | | **C** | | Taos shell中binary 和 nchar字段的显示宽度上限,超过此限制的部分将被隐藏 | 5 - | 30 | 实际上限按以下规则计算:如果字段值的长度大于 maxBinaryDisplayWidth,则显示上限为 **字段名长度** 和 **maxBinaryDisplayWidth** 的较大者。否则,上限为 **字段名长度** 和 **字段值长度** 的较大者。可在 shell 中通过命令 set max_binary_display_width nn动态修改此选项 | | 99 | queryBufferSize | | **S** | MB | 为所有并发查询占用保留的内存大小。 | | | 计算规则可以根据实际应用可能的最大并发数和表的数字相乘,再乘 170 。(2.0.15 以前的版本中,此参数的单位是字节) | | 100 | ratioOfQueryCores | | **S** | | 设置查询线程的最大数量。 | | | 最小值0 表示只有1个查询线程;最大值2表示最大建立2倍CPU核数的查询线程。默认为1,表示最大和CPU核数相等的查询线程。该值可以为小数,即0.5表示最大建立CPU核数一半的查询线程。 | -| 101 | update | | **S** | | 允许更新已存在的数据行 | 0 \| 1 | 0 | 从 2.0.8.0 版本开始 | +| 101 | update | | **S** | | 允许更新已存在的数据行 | 0:不允许更新;1:允许整行更新;2:允许部分列更新。(2.1.7.0 版本开始此参数支持设为 2,在此之前取值只能是 [0, 1]) | 0 | 2.0.8.0 版本之前,不支持此参数。 | | 102 | cacheLast | | **S** | | 是否在内存中缓存子表的最近数据 | 0:关闭;1:缓存子表最近一行数据;2:缓存子表每一列的最近的非NULL值;3:同时打开缓存最近行和列功能。(2.1.2.0 版本开始此参数支持 0~3 的取值范围,在此之前取值只能是 [0, 1]) | 0 | 2.1.2.0 版本之前、2.0.20.7 版本之前在 taos.cfg 文件中不支持此参数。 | | 103 | numOfCommitThreads | YES | **S** | | 设置写入线程的最大数量 | | | | | 104 | maxWildCardsLength | | **C** | bytes | 设定 LIKE 算子的通配符字符串允许的最大长度 | 0-16384 | 100 | 2.1.6.1 版本新增。 | @@ -239,7 +239,7 @@ taosd -C | 10 | fsync | 毫秒 | 当wal设置为2时,执行fsync的周期。设置为0,表示每次写入,立即执行fsync。 | | 3000 | | 11 | replica | | (可通过 alter database 修改)副本个数 | 1-3 | 1 | | 12 | precision | | 时间戳精度标识(2.1.2.0 版本之前、2.0.20.7 版本之前在 taos.cfg 文件中不支持此参数。)(从 2.1.5.0 版本开始,新增对纳秒时间精度的支持) | ms 表示毫秒,us 表示微秒,ns 表示纳秒 | ms | -| 13 | update | | 是否允许更新 | 0:不允许;1:允许 | 0 | +| 13 | update | | 是否允许数据更新(从 2.1.7.0 版本开始此参数支持 0~2 的取值范围,在此之前取值只能是 [0, 1];而 2.0.8.0 之前的版本在 SQL 指令中不支持此参数。) | 0:不允许;1:允许更新整行;2:允许部分列更新。 | 0 | | 14 | cacheLast | | (可通过 alter database 修改)是否在内存中缓存子表的最近数据(从 2.1.2.0 版本开始此参数支持 0~3 的取值范围,在此之前取值只能是 [0, 1];而 2.0.11.0 之前的版本在 SQL 指令中不支持此参数。)(2.1.2.0 版本之前、2.0.20.7 版本之前在 taos.cfg 文件中不支持此参数。) | 0:关闭;1:缓存子表最近一行数据;2:缓存子表每一列的最近的非NULL值;3:同时打开缓存最近行和列功能 | 0 | 对于一个应用场景,可能有多种数据特征的数据并存,最佳的设计是将具有相同数据特征的表放在一个库里,这样一个应用有多个库,而每个库可以配置不同的存储参数,从而保证系统有最优的性能。TDengine允许应用在创建库时指定上述存储参数,如果指定,该参数就将覆盖对应的系统配置参数。举例,有下述SQL: @@ -568,6 +568,35 @@ COMPACT 命令对指定的一个或多个 VGroup 启动碎片重整,系统会 需要注意的是,碎片重整操作会大幅消耗磁盘 I/O。因此在重整进行期间,有可能会影响节点的写入和查询性能,甚至在极端情况下导致短时间的阻写。 + +## 浮点数有损压缩 + +在车联网等物联网智能应用场景中,经常会采集和存储海量的浮点数类型数据,如果能更高效地对此类数据进行压缩,那么不但能够节省数据存储的硬件资源,也能够因降低磁盘 I/O 数据量而提升系统性能表现。 + +从 2.1.6.0 版本开始,TDengine 提供一种名为 TSZ 的新型数据压缩算法,无论设置为有损压缩还是无损压缩,都能够显著提升浮点数类型数据的压缩率表现。目前该功能以可选模块的方式进行发布,可以通过添加特定的编译参数来启用该功能(也即常规安装包中暂未包含该功能)。 + +**需要注意的是,该功能一旦启用,效果是全局的,也即会对系统中所有的 FLOAT、DOUBLE 类型的数据生效。同时,在启用了浮点数有损压缩功能后写入的数据,也无法被未启用该功能的版本载入,并有可能因此而导致数据库服务报错退出。** + +### 创建支持 TSZ 压缩算法的 TDengine 版本 + +TSZ 模块保存在单独的代码仓库 https://github.com/taosdata/TSZ 中。可以通过以下步骤创建包含此模块的 TDengine 版本: +1. TDengine 中的插件目前只支持通过 SSH 的方式拉取和编译,所以需要自己先配置好通过 SSH 拉取 GitHub 代码的环境。 +2. `git clone git@github.com:taosdata/TDengine -b your_branchname --recurse-submodules` 通过 `--recurse-submodules` 使依赖模块的源代码可以被一并下载。 +3. `mkdir debug && cd debug` 进入单独的编译目录。 +4. `cmake .. -DTSZ_ENABLED=true` 其中参数 `-DTSZ_ENABLED=true` 表示在编译过程中加入对 TSZ 插件功能的支持。如果成功激活对 TSZ 模块的编译,那么 CMAKE 过程中也会显示 `build with TSZ enabled` 字样。 +5. 编译成功后,包含 TSZ 浮点压缩功能的插件便已经编译进了 TDengine 中了,可以通过调整 taos.cfg 中的配置参数来使用此功能了。 + +### 通过配置文件来启用 TSZ 压缩算法 + +如果要启用 TSZ 压缩算法,除了在 TDengine 的编译过程需要声明启用 TSZ 模块之外,还需要在 taos.cfg 配置文件中对以下参数进行设置: +* lossyColumns:配置要进行有损压缩的浮点数数据类型。参数值类型为字符串,含义为:空 - 关闭有损压缩;float - 只对 FLOAT 类型进行有损压缩;double - 只对 DOUBLE 类型进行有损压缩;float|double:对 FLOAT 和 DOUBLE 类型都进行有损压缩。默认值是“空”,也即关闭有损压缩。 +* fPrecision:设置 float 类型浮点数压缩精度,小于此值的浮点数尾数部分将被截断。参数值类型为 FLOAT,最小值为 0.0,最大值为 100,000.0。缺省值为 0.00000001(1E-8)。 +* dPrecision:设置 double 类型浮点数压缩精度,小于此值的浮点数尾数部分将被截断。参数值类型为 DOUBLE,最小值为 0.0,最大值为 100,000.0。缺省值为 0.0000000000000001(1E-16)。 +* maxRange:表示数据的最大浮动范围。一般无需调整,在数据具有特定特征时可以配合 range 参数来实现极高的数据压缩率。默认值为 500。 +* range:表示数据大体浮动范围。一般无需调整,在数据具有特定特征时可以配合 maxRange 参数来实现极高的数据压缩率。默认值为 100。 + +**注意:**对 cfg 配置文件中参数值的任何调整,都需要重新启动 taosd 才能生效。并且以上选项为全局配置选项,配置后对所有数据库中所有表的 FLOAT 及 DOUBLE 类型的字段生效。 + ## 文件目录结构 安装TDengine后,默认会在操作系统中生成下列目录或文件: diff --git a/documentation20/cn/12.taos-sql/02.udf/docs.md b/documentation20/cn/12.taos-sql/02.udf/docs.md new file mode 100644 index 0000000000000000000000000000000000000000..cced65db802d9a589602fe9371b0468605cb4819 --- /dev/null +++ b/documentation20/cn/12.taos-sql/02.udf/docs.md @@ -0,0 +1,147 @@ +# UDF(用户定义函数) + +在有些应用场景中,应用逻辑需要的查询无法直接使用系统内置的函数来表示。利用 UDF 功能,TDengine 可以插入用户编写的处理代码并在查询中使用它们,就能够很方便地解决特殊应用场景中的使用需求。 + +从 2.2.0.0 版本开始,TDengine 支持通过 C/C++ 语言进行 UDF 定义。接下来结合示例讲解 UDF 的使用方法。 + +## 用 C/C++ 语言来定义 UDF + +TDengine 提供 3 个 UDF 的源代码示例,分别为: +* [add_one.c](https://github.com/taosdata/TDengine/blob/develop/tests/script/sh/add_one.c) +* [abs_max.c](https://github.com/taosdata/TDengine/blob/develop/tests/script/sh/abs_max.c) +* [sum_double.c](https://github.com/taosdata/TDengine/blob/develop/tests/script/sh/sum_double.c) + +### 无需中间变量的标量函数 + +[add_one.c](https://github.com/taosdata/TDengine/blob/develop/tests/script/sh/add_one.c) 是结构最简单的 UDF 实现。其功能为:对传入的一个数据列(可能因 WHERE 子句进行了筛选)中的每一项,都输出 +1 之后的值,并且要求输入的列数据类型为 INT。 + +这一具体的处理逻辑在函数 `void add_one(char* data, short itype, short ibytes, int numOfRows, long long* ts, char* dataOutput, char* interBUf, char* tsOutput, int* numOfOutput, short otype, short obytes, SUdfInit* buf)` 中定义。这类用于实现 UDF 的基础计算逻辑的函数,我们称为 udfNormalFunc,也就是对行数据块的标量计算函数。需要注意的是,udfNormalFunc 的参数项是固定的,用于按照约束完成与引擎之间的数据交换。 + +- udfNormalFunc 中各参数的具体含义是: + * data:存有输入的数据。 + * itype:输入数据的类型。这里采用的是短整型表示法,与各种数据类型对应的值可以参见 [column_meta 中的列类型说明](https://www.taosdata.com/cn/documentation/connector#column_meta)。例如 4 用于表示 INT 型。 + * iBytes:输入数据中每个值会占用的字节数。 + * numOfRows:输入数据的总行数。 + * ts:主键时间戳在输入中的列数据。 + * dataOutput:输出数据的缓冲区。 + * interBuf:系统使用的中间临时缓冲区,通常用户逻辑无需对 interBuf 进行处理。 + * tsOutput:主键时间戳在输出时的列数据。 + * numOfOutput:输出数据的个数。 + * oType:输出数据的类型。取值含义与 itype 参数一致。 + * oBytes:输出数据中每个值会占用的字节数。 + * buf:计算过程的中间变量缓冲区。 + +其中 buf 参数需要用到一个自定义结构体 SUdfInit。在这个例子中,因为 add_one 的计算过程无需用到中间变量缓存,所以可以把 SUdfInit 定义成一个空结构体。 + +### 无需中间变量的聚合函数 + +[abs_max.c](https://github.com/taosdata/TDengine/blob/develop/tests/script/sh/abs_max.c) 实现的是一个聚合函数,功能是对一组数据按绝对值取最大值。 + +其计算过程为:与所在查询语句相关的数据会被分为多个行数据块,对每个行数据块调用 udfNormalFunc(在本例的实现代码中,实际函数名是 `abs_max`),再将每个数据块的计算结果调用 udfMergeFunc(本例中,其实际的函数名是 `abs_max_merge`)进行聚合,生成每个子表的聚合结果。如果查询指令涉及超级表,那么最后还会通过 udfFinalizeFunc(本例中,其实际的函数名是 `abs_max_finalize`)再把子表的计算结果聚合为超级表的计算结果。 + +值得注意的是,udfNormalFunc、udfMergeFunc、udfFinalizeFunc 之间,函数名约定使用相同的前缀,此前缀即 udfNormalFunc 的实际函数名。udfMergeFunc 的函数名后缀 `_merge`、udfFinalizeFunc 的函数名后缀 `_finalize`,是 UDF 实现规则的一部分,系统会按照这些函数名后缀来调用相应功能。 + +- udfMergeFunc 用于对计算中间结果进行聚合。本例中 udfMergeFunc 对应的实现函数为 `void abs_max_merge(char* data, int32_t numOfRows, char* dataOutput, int32_t* numOfOutput, SUdfInit* buf)`,其中各参数的具体含义是: + * data:udfNormalFunc 的输出组合在一起的数据,也就成为了 udfMergeFunc 的输入。 + * numOfRows:data 中数据的行数。 + * dataOutput:输出数据的缓冲区。 + * numOfOutput:输出数据的个数。 + * buf:计算过程的中间变量缓冲区。 + +- udfFinalizeFunc 用于对计算结果进行最终聚合。本例中 udfFinalizeFunc 对应的实现函数为 `void abs_max_finalize(char* dataOutput, char* interBuf, int* numOfOutput, SUdfInit* buf)`,其中各参数的具体含义是: + * dataOutput:输出数据的缓冲区。对 udfFinalizeFunc 来说,其输入数据也来自于这里。 + * interBuf:系统使用的中间临时缓冲区,与 udfNormalFunc 中的同名参数含义一致。 + * numOfOutput:输出数据的个数。 + * buf:计算过程的中间变量缓冲区。 + +同样因为 abs_max 的计算过程无需用到中间变量缓存,所以同样是可以把 SUdfInit 定义成一个空结构体。 + +### 使用中间变量的聚合函数 + +[sum_double.c](https://github.com/taosdata/TDengine/blob/develop/tests/script/sh/sum_double.c) 也是一个聚合函数,功能是对一组数据输出求和结果的倍数。 + +出于功能演示的目的,在这个用户定义函数的实现方法中,用到了中间变量缓冲区 buf。因此,在这个源代码文件中,SUdfInit 就不再是一个空的结构体,而是定义了缓冲区的具体存储内容。 + +也正是因为用到了中间变量缓冲区,因此就需要对这一缓冲区进行初始化和资源释放。具体来说,也即对应 udfInitFunc(本例中,其实际的函数名是 `sum_double_init`)和 udfDestroyFunc(本例中,其实际的函数名是 `sum_double_destroy`)。其函数名命名规则同样是采取以 udfNormalFunc 的实际函数名为前缀,以 `_init` 和 `_destroy` 为后缀。系统会在初始化和资源释放时调用对应名称的函数。 + +- udfInitFunc 用于初始化中间变量缓冲区中的变量和内容。本例中 udfInitFunc 对应的实现函数为 `int sum_double_init(SUdfInit* buf)`,其中各参数的具体含义是: + * buf:计算过程的中间变量缓冲区。 + +- udfDestroyFunc 用于释放中间变量缓冲区中的变量和内容。本例中 udfDestroyFunc 对应的实现函数为 `void sum_double_destroy(SUdfInit* buf)`,其中各参数的具体含义是: + * buf:计算过程的中间变量缓冲区。 + +注意,UDF 的实现过程中需要小心处理对中间变量缓冲区的使用,如果使用不当则有可能导致内存泄露或对资源的过度占用,甚至导致系统服务进程崩溃等。 + +### UDF 实现方式的规则总结 + +根据所要实现的 UDF 类型不同,用户所要实现的功能函数内容也会有所区别: +* 无需中间变量的标量函数:结构体 SUdfInit 可以为空,需实现 udfNormalFunc。 +* 无需中间变量的聚合函数:结构体 SUdfInit 可以为空,需实现 udfNormalFunc、udfMergeFunc、udfFinalizeFunc。 +* 使用中间变量的标量函数:结构体 SUdfInit 需要具体定义,并需实现 udfNormalFunc、udfInitFunc、udfDestroyFunc。 +* 使用中间变量的聚合函数:结构体 SUdfInit 需要具体定义,并需实现 udfNormalFunc、udfInitFunc、udfDestroyFunc、udfMergeFunc、udfFinalizeFunc。 + +## 编译 UDF + +用户定义函数的 C 语言源代码无法直接被 TDengine 系统使用,而是需要先编译为 .so 链接库,之后才能载入 TDengine 系统。 + +例如,按照上一章节描述的规则准备好了用户定义函数的源代码 add_one.c,那么可以执行如下指令编译得到动态链接库文件: +```bash +gcc -g -O0 -fPIC -shared add_one.c -o add_one.so +``` + +这样就准备好了动态链接库 add_one.so 文件,可以供后文创建 UDF 时使用了。 + +## 在系统中管理和使用 UDF + +### 创建 UDF + +用户可以通过 SQL 指令在系统中加载客户端所在主机上的 UDF 函数库(不能通过 RESTful 接口或 HTTP 管理界面来进行这一过程)。一旦创建成功,则当前 TDengine 集群的所有用户都可以在 SQL 指令中使用这些函数。UDF 存储在系统的 MNode 节点上,因此即使重启 TDengine 系统,已经创建的 UDF 也仍然可用。 + +在创建 UDF 时,需要区分标量函数和聚合函数。如果创建时声明了错误的函数类别,则可能导致通过 SQL 指令调用函数时出错。 + +- 创建标量函数:`CREATE FUNCTION ids(X) AS ids(Y) OUTPUTTYPE typename(Z) bufsize B;` + * ids(X):标量函数未来在 SQL 指令中被调用时的函数名,必须与函数实现中 udfNormalFunc 的实际名称一致; + * ids(Y):包含 UDF 函数实现的动态链接库的库文件路径(指的是库文件在当前客户端所在主机上的保存路径,通常是指向一个 .so 文件),这个路径需要用英文单引号或英文双引号括起来; + * typename(Z):此函数计算结果的数据类型,与上文中 udfNormalFunc 的 itype 参数不同,这里不是使用数字表示法,而是直接写类型名称即可; + * B:系统使用的中间临时缓冲区大小,单位是字节,最小 0,最大 512,通常可以设置为 128。 + + 例如,如下语句可以把 add_one.so 创建为系统中可用的 UDF: + ```sql + CREATE FUNCTION add_one AS "/home/taos/udf_example/add_one.so" OUTPUTTYPE INT bufsize 128; + ``` + +- 创建聚合函数:`CREATE AGGREGATE FUNCTION ids(X) AS ids(Y) OUTPUTTYPE typename(Z) bufsize B;` + * ids(X):聚合函数未来在 SQL 指令中被调用时的函数名,必须与函数实现中 udfNormalFunc 的实际名称一致; + * ids(Y):包含 UDF 函数实现的动态链接库的库文件路径(指的是库文件在当前客户端所在主机上的保存路径,通常是指向一个 .so 文件),这个路径需要用英文单引号或英文双引号括起来; + * typename(Z):此函数计算结果的数据类型,与上文中 udfNormalFunc 的 itype 参数不同,这里不是使用数字表示法,而是直接写类型名称即可; + * B:系统使用的中间临时缓冲区大小,单位是字节,最小 0,最大 512,通常可以设置为 128。 + + 例如,如下语句可以把 add_one.so 创建为系统中可用的 UDF: + ```sql + CREATE FUNCTION abs_max AS "/home/taos/udf_example/abs_max.so" OUTPUTTYPE BIGINT bufsize 128; + ``` + +### 管理 UDF + +- 删除指定名称的用户定义函数:`DROP FUNCTION ids(X);` + * ids(X):此参数的含义与 CREATE 指令中的 ids(X) 参数一致,也即要删除的函数的名字,例如 `DROP FUNCTION add_one;`。 +- 显示系统中当前可用的所有 UDF:`SHOW FUNCTIONS;` + +### 调用 UDF + +在 SQL 指令中,可以直接以在系统中创建 UDF 时赋予的函数名来调用用户定义函数。例如: +```sql +SELECT X(c) FROM table/stable; +``` + +表示对名为 c 的数据列调用名为 X 的用户定义函数。SQL 指令中用户定义函数可以配合 WHERE 等查询特性来使用。 + +## UDF 的一些使用限制 + +在当前版本下,使用 UDF 存在如下这些限制: +1. 在创建和调用 UDF 时,服务端和客户端都只支持 Linux 操作系统; +2. UDF 不能与系统内建的 SQL 函数混合使用; +3. UDF 只支持以单个数据列作为输入; +4. UDF 只要创建成功,就会被持久化存储到 MNode 节点中; +5. 无法通过 RESTful 接口来创建 UDF; +6. UDF 在 SQL 中定义的函数名,必须与 .so 库文件实现中的接口函数名前缀保持一致,也即必须是 udfNormalFunc 的名称,而且不可与 TDengine 中已有的内建 SQL 函数重名。 diff --git a/documentation20/cn/12.taos-sql/docs.md b/documentation20/cn/12.taos-sql/docs.md index b96a9c3d285e76384ac8dad64473764bcc76942b..dabbb3d2af598c84f6c55f921d524cb9ddccb83b 100644 --- a/documentation20/cn/12.taos-sql/docs.md +++ b/documentation20/cn/12.taos-sql/docs.md @@ -70,7 +70,7 @@ TDengine 缺省的时间戳是毫秒精度,但通过在 CREATE DATABASE 时传 1) KEEP是该数据库的数据保留多长天数,缺省是3650天(10年),数据库会自动删除超过时限的数据; - 2) UPDATE 标志数据库支持更新相同时间戳数据; + 2) UPDATE 标志数据库支持更新相同时间戳数据;(从 2.1.7.0 版本开始此参数支持设为 2,表示允许部分列更新,也即更新数据行时未被设置的列会保留原值。)(从 2.0.8.0 版本开始支持此参数。注意此参数不能通过 `ALTER DATABASE` 指令进行修改。) 3) 数据库名最大长度为33; @@ -573,16 +573,24 @@ Query OK, 2 row(s) in set (0.003112s) 注意:普通表的通配符 * 中并不包含 _标签列_。 -##### 获取标签列的去重取值 +#### 获取标签列或普通列的去重取值 -从 2.0.15 版本开始,支持在超级表查询标签列时,指定 DISTINCT 关键字,这样将返回指定标签列的所有不重复取值。 -```mysql -SELECT DISTINCT tag_name FROM stb_name; +从 2.0.15.0 版本开始,支持在超级表查询标签列时,指定 DISTINCT 关键字,这样将返回指定标签列的所有不重复取值。注意,在 2.1.6.0 版本之前,DISTINCT 只支持处理单个标签列,而从 2.1.6.0 版本开始,DISTINCT 可以对多个标签列进行处理,输出这些标签列取值不重复的组合。 +```sql +SELECT DISTINCT tag_name [, tag_name ...] FROM stb_name; ``` -注意:目前 DISTINCT 关键字只支持对超级表的标签列进行去重,而不能用于普通列。 +从 2.1.7.0 版本开始,DISTINCT 也支持对数据子表或普通表进行处理,也即支持获取单个普通列的不重复取值,或多个普通列取值的不重复组合。 +```sql +SELECT DISTINCT col_name [, col_name ...] FROM tb_name; +``` +需要注意的是,DISTINCT 目前不支持对超级表中的普通列进行处理。如果需要进行此类操作,那么需要把超级表放在子查询中,再对子查询的计算结果执行 DISTINCT。 +说明: +1. cfg 文件中的配置参数 maxNumOfDistinctRes 将对 DISTINCT 能够输出的数据行数进行限制。其最小值是 100000,最大值是 100000000,默认值是 10000000。如果实际计算结果超出了这个限制,那么会仅输出这个数量范围内的部分。 +2. 由于浮点数天然的精度机制原因,在特定情况下,对 FLOAT 和 DOUBLE 列使用 DISTINCT 并不能保证输出值的完全唯一性。 +3. 在当前版本下,DISTINCT 不能在嵌套查询的子查询中使用,也不能与聚合函数、GROUP BY、或 JOIN 在同一条语句中混用。 #### 结果集列名 @@ -730,6 +738,34 @@ Query OK, 1 row(s) in set (0.001091s) 5. 从 2.0.17.0 版本开始,条件过滤开始支持 BETWEEN AND 语法,例如 `WHERE col2 BETWEEN 1.5 AND 3.25` 表示查询条件为“1.5 ≤ col2 ≤ 3.25”。 6. 从 2.1.4.0 版本开始,条件过滤开始支持 IN 算子,例如 `WHERE city IN ('Beijing', 'Shanghai')`。说明:BOOL 类型写作 `{true, false}` 或 `{0, 1}` 均可,但不能写作 0、1 之外的整数;FLOAT 和 DOUBLE 类型会受到浮点数精度影响,集合内的值在精度范围内认为和数据行的值完全相等才能匹配成功;TIMESTAMP 类型支持非主键的列。 + +### JOIN 子句 + +从 2.2.0.0 版本开始,TDengine 对内连接(INNER JOIN)中的自然连接(Natural join)操作实现了完整的支持。也即支持“普通表与普通表之间”、“超级表与超级表之间”、“子查询与子查询之间”进行自然连接。自然连接与内连接的主要区别是,自然连接要求参与连接的字段在不同的表/超级表中必须是同名字段。也即,TDengine 在连接关系的表达中,要求必须使用同名数据列/标签列的相等关系。 + +在普通表与普通表之间的 JOIN 操作中,只能使用主键时间戳之间的相等关系。例如: +```sql +SELECT * +FROM temp_tb_1 t1, pressure_tb_1 t2 +WHERE t1.ts = t2.ts +``` + +在超级表与超级表之间的 JOIN 操作中,除了主键时间戳一致的条件外,还要求引入能实现一一对应的标签列的相等关系。例如: +```sql +SELECT * +FROM temp_stable t1, temp_stable t2 +WHERE t1.ts = t2.ts AND t1.deviceid = t2.deviceid AND t1.status=0; +``` + +类似地,也可以对多个子查询的查询结果进行 JOIN 操作。 + +注意,JOIN 操作存在如下限制要求: +1. 参与一条语句中 JOIN 操作的表/超级表最多可以有 10 个。 +2. 在包含 JOIN 操作的查询语句中不支持 FILL。 +3. 暂不支持参与 JOIN 操作的表之间聚合后的四则运算。 +4. 不支持只对其中一部分表做 GROUP BY。 +5. JOIN 查询的不同表的过滤条件之间不能为 OR。 + ### 嵌套查询 @@ -757,7 +793,7 @@ SELECT ... FROM (SELECT ... FROM ...) ...; * 外层查询不支持 GROUP BY。 -### UNION ALL 操作符 +### UNION ALL 子句 ```mysql SELECT ... @@ -1258,9 +1294,13 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数 适用于:**表、超级表**。 - 说明:(从 2.0.15.0 版本开始新增此函数)INTERP 必须指定时间断面,如果该时间断面不存在直接对应的数据,那么会根据 FILL 参数的设定进行插值。此外,条件语句里面可附带筛选条件,例如标签、tbname。 + 说明:(从 2.0.15.0 版本开始新增此函数) + + 1)INTERP 必须指定时间断面,如果该时间断面不存在直接对应的数据,那么会根据 FILL 参数的设定进行插值。此外,条件语句里面可附带筛选条件,例如标签、tbname。 - INTERP 查询要求查询的时间区间必须位于数据集合(表)的所有记录的时间范围之内。如果给定的时间戳位于时间范围之外,即使有插值指令,仍然不返回结果。 + 2)INTERP 查询要求查询的时间区间必须位于数据集合(表)的所有记录的时间范围之内。如果给定的时间戳位于时间范围之外,即使有插值指令,仍然不返回结果。 + + 3)单个 INTERP 函数查询只能够针对一个时间点进行查询,如果需要返回等时间间隔的断面数据,可以通过 INTERP 配合 EVERY 的方式来进行查询处理(而不是使用 INTERVAL),其含义是每隔固定长度的时间进行插值。 示例: ```sql @@ -1284,6 +1324,18 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数 Query OK, 1 row(s) in set (0.003056s) ``` + 如下所示代码表示在时间区间 `['2017-7-14 18:40:00', '2017-7-14 18:40:00.014']` 中每隔 5 毫秒 进行一次断面计算。 + + ```sql + taos> SELECT INTERP(current) FROM d636 WHERE ts>='2017-7-14 18:40:00' AND ts<='2017-7-14 18:40:00.014' EVERY(5a); + ts | interp(current) | + ================================================= + 2017-07-14 18:40:00.000 | 10.04179 | + 2017-07-14 18:40:00.010 | 10.16123 | + Query OK, 2 row(s) in set (0.003487s) + + ``` + ### 计算函数 - **DIFF** @@ -1409,8 +1461,6 @@ SELECT function_list FROM tb_name SELECT function_list FROM stb_name [WHERE where_condition] - [SESSION(ts_col, tol_val)] - [STATE_WINDOW(col)] [INTERVAL(interval [, offset]) [SLIDING sliding]] [FILL({NONE | VALUE | PREV | NULL | LINEAR | NEXT})] [GROUP BY tags] @@ -1421,8 +1471,8 @@ SELECT function_list FROM stb_name 1. 时间窗口:聚合时间段的窗口宽度由关键词 INTERVAL 指定,最短时间间隔 10 毫秒(10a);并且支持偏移 offset(偏移必须小于间隔),也即时间窗口划分与“UTC 时刻 0”相比的偏移量。SLIDING 语句用于指定聚合时间段的前向增量,也即每次窗口向前滑动的时长。当 SLIDING 与 INTERVAL 取值相等的时候,滑动窗口即为翻转窗口。 * 从 2.1.5.0 版本开始,INTERVAL 语句允许的最短时间间隔调整为 1 微秒(1u),当然如果所查询的 DATABASE 的时间精度设置为毫秒级,那么允许的最短时间间隔为 1 毫秒(1a)。 * **注意:**用到 INTERVAL 语句时,除非极特殊的情况,都要求把客户端和服务端的 taos.cfg 配置文件中的 timezone 参数配置为相同的取值,以避免时间处理函数频繁进行跨时区转换而导致的严重性能影响。 - 2. 状态窗口:使用整数或布尔值来标识产生记录时设备的状态量,产生的记录如果具有相同的状态量取值则归属于同一个状态窗口,数值改变后该窗口关闭。状态量所对应的列作为 STATE_WINDOW 语句的参数来指定。 - 3. 会话窗口:时间戳所在的列由 SESSION 语句的 ts_col 参数指定,会话窗口根据相邻两条记录的时间戳差值来确定是否属于同一个会话——如果时间戳差异在 tol_val 以内,则认为记录仍属于同一个窗口;如果时间变化超过 tol_val,则自动开启下一个窗口。 + 2. 状态窗口:使用整数或布尔值来标识产生记录时设备的状态量,产生的记录如果具有相同的状态量取值则归属于同一个状态窗口,数值改变后该窗口关闭。状态量所对应的列作为 STATE_WINDOW 语句的参数来指定。(状态窗口暂不支持对超级表使用) + 3. 会话窗口:时间戳所在的列由 SESSION 语句的 ts_col 参数指定,会话窗口根据相邻两条记录的时间戳差值来确定是否属于同一个会话——如果时间戳差异在 tol_val 以内,则认为记录仍属于同一个窗口;如果时间变化超过 tol_val,则自动开启下一个窗口。(会话窗口暂不支持对超级表使用) - WHERE 语句可以指定查询的起止时间和其他过滤条件。 - FILL 语句指定某一窗口区间数据缺失的情况下的填充模式。填充模式包括以下几种: 1. 不进行填充:NONE(默认填充模式)。 @@ -1470,12 +1520,6 @@ SELECT AVG(current), MAX(current), LEASTSQUARES(current, start_val, step_val), P TAOS SQL 支持对标签、TBNAME 进行 GROUP BY 操作,也支持普通列进行 GROUP BY,前提是:仅限一列且该列的唯一值小于 10 万个。 -**JOIN 操作的限制** - -TAOS SQL 支持表之间按主键时间戳来 join 两张表的列,暂不支持两个表之间聚合后的四则运算。 - -JOIN 查询的不同表的过滤条件之间不能为 OR。 - **IS NOT NULL 与不为空的表达式适用范围** IS NOT NULL 支持所有类型的列。不为空的表达式为 <>"",仅对非数值类型的列适用。 diff --git a/documentation20/cn/13.faq/docs.md b/documentation20/cn/13.faq/docs.md index 3d6f03b30353524d55a4a49ea69625a519fe3ebe..14599079b7c5bf99d736b34504cf59f1112900b0 100644 --- a/documentation20/cn/13.faq/docs.md +++ b/documentation20/cn/13.faq/docs.md @@ -96,6 +96,8 @@ TDengine 目前尚不支持删除功能,未来根据用户需求可能会支 另需注意,在 UPDATE 设置为 0 时,后发送的相同时间戳的数据会被直接丢弃,但并不会报错,而且仍然会被计入 affected rows (所以不能利用 INSERT 指令的返回信息进行时间戳查重)。这样设计的主要原因是,TDengine 把写入的数据看做一个数据流,无论时间戳是否出现冲突,TDengine 都认为产生数据的原始设备真实地产生了这样的数据。UPDATE 参数只是控制这样的流数据在进行持久化时要怎样处理——UPDATE 为 0 时,表示先写入的数据覆盖后写入的数据;而 UPDATE 为 1 时,表示后写入的数据覆盖先写入的数据。这种覆盖关系如何选择,取决于对数据的后续使用和统计中,希望以先还是后生成的数据为准。 +此外,从 2.1.7.0 版本开始,支持将 UPDATE 参数设为 2,表示“支持部分列更新”。也即,当 UPDATE 设为 1 时,如果更新一个数据行,其中某些列没有提供取值,那么这些列会被设为 NULL;而当 UPDATE 设为 2 时,如果更新一个数据行,其中某些列没有提供取值,那么这些列会保持原有数据行中的对应值。 + ## 10. 我怎么创建超过1024列的表? 使用 2.0 及其以上版本,默认支持 1024 列;2.0 之前的版本,TDengine 最大允许创建 250 列的表。但是如果确实超过限值,建议按照数据特性,逻辑地将这个宽表分解成几个小表。(从 2.1.7.0 版本开始,表的最大列数增加到了 4096 列。) diff --git a/documentation20/en/01.evaluation/docs.md b/documentation20/en/01.evaluation/docs.md index ecbde8c5776e3bd3735aed2bd64906f8bef1afc1..5b2d0dd974203db1dafe8758e673a2f0970c3f17 100644 --- a/documentation20/en/01.evaluation/docs.md +++ b/documentation20/en/01.evaluation/docs.md @@ -6,17 +6,16 @@ TDengine is an innovative Big Data processing product launched by TAOS Data in t One of the modules of TDengine is the time-series database. However, in addition to this, to reduce the complexity of research and development and the difficulty of system operation, TDengine also provides functions such as caching, message queuing, subscription, stream computing, etc. TDengine provides a full-stack technical solution for the processing of IoT and Industrial Internet BigData. It is an efficient and easy-to-use IoT Big Data platform. Compared with typical Big Data platforms such as Hadoop, TDengine has the following distinct characteristics: -- **Performance improvement over 10 times**: An innovative data storage structure is defined, with each single core can process at least 20,000 requests per second, insert millions of data points, and read more than 10 million data points, which is more than 10 times faster than other existing general database. +- **Performance improvement over 10 times**: An innovative data storage structure is defined, with every single core that can process at least 20,000 requests per second, insert millions of data points, and read more than 10 million data points, which is more than 10 times faster than other existing general database. - **Reduce the cost of hardware or cloud services to 1/5**: Due to its ultra-performance, TDengine’s computing resources consumption is less than 1/5 of other common Big Data solutions; through columnar storage and advanced compression algorithms, the storage consumption is less than 1/10 of other general databases. - **Full-stack time-series data processing engine**: Integrate database, message queue, cache, stream computing, and other functions, and the applications do not need to integrate with software such as Kafka/Redis/HBase/Spark/HDFS, thus greatly reducing the complexity cost of application development and maintenance. -- **Highly Available and Horizontal Scalable **: With the distributed architecture and consistency algorithm, via multi-replication and clustering features, TDengine ensures high availability and horizontal scalability to support the mission-critical applications. +- **Highly Available and Horizontal Scalable**: With the distributed architecture and consistency algorithm, via multi-replication and clustering features, TDengine ensures high availability and horizontal scalability to support mission-critical applications. - **Zero operation cost & zero learning cost**: Installing clusters is simple and quick, with real-time backup built-in, and no need to split libraries or tables. Similar to standard SQL, TDengine can support RESTful, Python/Java/C/C++/C#/Go/Node.js, and similar to MySQL with zero learning cost. -- **Core is Open Sourced:** Except some auxiliary features, the core of TDengine is open sourced. Enterprise won't be locked by the database anymore. Ecosystem is more strong, product is more stable, and developer communities are more active. +- **Core is Open Sourced:** Except for some auxiliary features, the core of TDengine is open-sourced. Enterprise won't be locked by the database anymore. The ecosystem is more strong, products are more stable, and developer communities are more active. With TDengine, the total cost of ownership of typical IoT, Internet of Vehicles, and Industrial Internet Big Data platforms can be greatly reduced. However, since it makes full use of the characteristics of IoT time-series data, TDengine cannot be used to process general data from web crawlers, microblogs, WeChat, e-commerce, ERP, CRM, and other sources. ![TDengine Technology Ecosystem](page://images/eco_system.png) -
Figure 1. TDengine Technology Ecosystem
## Overall Scenarios of TDengine @@ -62,4 +61,4 @@ From the perspective of data sources, designers can analyze the applicability of | ------------------------------------------------- | ------------------ | ----------------------- | ------------------- | ------------------------------------------------------------ | | Require system with high-reliability | | | √ | TDengine has a very robust and reliable system architecture to implement simple and convenient daily operation with streamlined experiences for operators, thus human errors and accidents are eliminated to the greatest extent. | | Require controllable operation learning cost | | | √ | As above. | -| Require abundant talent supply | √ | | | As a new-generation product, it’s still difficult to find talents with TDengine experiences from market. However, the learning cost is low. As the vendor, we also provide extensive operation training and counselling services. | \ No newline at end of file +| Require abundant talent supply | √ | | | As a new-generation product, it’s still difficult to find talents with TDengine experiences from the market. However, the learning cost is low. As the vendor, we also provide extensive operation training and counseling services. | diff --git a/documentation20/en/03.architecture/docs.md b/documentation20/en/03.architecture/docs.md index bc686aebbf0b7696b1355fe45be6d09fcca63411..b9e21b1d4c775876c77b2c9ec999639f30bd0c00 100644 --- a/documentation20/en/03.architecture/docs.md +++ b/documentation20/en/03.architecture/docs.md @@ -13,7 +13,6 @@ In typical industry IoT, Internet of Vehicles and Operation Monitoring scenarios Collected Metrics Tags - Device ID Time Stamp @@ -108,16 +107,16 @@ Each data record contains the device ID, timestamp, collected metrics (current, As the data points are a series of data points over time, the data points generated by IoT, Internet of Vehicles, and Operation Monitoring have some strong common characteristics: -1. metrics are always structured data; -2. there are rarely delete/update operations on collected data; -3. unlike traditional databases, transaction processing is not required; -4. the ratio of writing over reading is much higher than typical Internet applications; -5. data volume is stable and can be predicted according to the number of devices and sampling rate; -6. the user pays attention to the trend of data, not a specific value at a specific time; -7. there is always a data retention policy; -8. the data query is always executed in a given time range and a subset of space; -9. in addition to storage and query operations, various statistical and real-time computing are also required; -10. data volume is huge, a system may generate over 10 billion data points in a day. +1. Metrics are always structured data; +2. There are rarely delete/update operations on collected data; +3. Unlike traditional databases, transaction processing is not required; +4. The ratio of writing over reading is much higher than typical Internet applications; +5. Data volume is stable and can be predicted according to the number of devices and sampling rate; +6. The user pays attention to the trend of data, not a specific value at a specific time; +7. There is always a data retention policy; +8. The data query is always executed in a given time range and a subset of space; +9. In addition to storage and query operations, various statistical and real-time computing are also required; +10. Data volume is huge, a system may generate over 10 billion data points in a day. By utilizing the above characteristics, TDengine designs the storage and computing engine in a special and optimized way for time-series data, resulting in massive improvements in system efficiency. @@ -156,11 +155,9 @@ The design of TDengine is based on the assumption that one single node or softwa Logical structure diagram of TDengine distributed architecture as following: ![TDengine architecture diagram](page://images/architecture/structure.png) -
Picture 1: TDengine architecture diagram
- - +
Figure 1: TDengine architecture diagram
-A complete TDengine system runs on one or more physical nodes. Logically, it includes data node (dnode), TDEngine application driver (taosc) and application (app). There are one or more data nodes in the system, which form a cluster. The application interacts with the TDengine cluster through taosc's API. The following is a brief introduction to each logical unit. +A complete TDengine system runs on one or more physical nodes. Logically, it includes data node (dnode), TDEngine application driver (TAOSC) and application (app). There are one or more data nodes in the system, which form a cluster. The application interacts with the TDengine cluster through TAOSC's API. The following is a brief introduction to each logical unit. **Physical node (pnode)**: A pnode is a computer that runs independently and has its own computing, storage and network capabilities. It can be a physical machine, virtual machine, or Docker container installed with OS. The physical node is identified by its configured FQDN (Fully Qualified Domain Name). TDengine relies entirely on FQDN for network communication. If you don't know about FQDN, please read the blog post "[All about FQDN of TDengine](https://www.taosdata.com/blog/2020/09/11/1824.html)". @@ -172,7 +169,7 @@ A complete TDengine system runs on one or more physical nodes. Logically, it inc **Virtual node group (VGroup)**: Vnodes on different data nodes can form a virtual node group to ensure the high availability of the system. The virtual node group is managed in a master/slave mechanism. Write operations can only be performed on the master vnode, and then replicated to slave vnodes, thus ensuring that one single replica of data is copied on multiple physical nodes. The number of virtual nodes in a vgroup equals the number of data replicas. If the number of replicas of a DB is N, the system must have at least N data nodes. The number of replicas can be specified by the parameter `replica` when creating DB, and the default is 1. Using the multi-replication feature of TDengine, the same high data reliability can be achieved without the need for expensive storage devices such as disk arrays. Virtual node group is created and managed by the management node, and the management node assigns a system unique ID, aka VGroup ID. If two virtual nodes have the same vnode group ID, means that they belong to the same group and the data is backed up to each other. The number of virtual nodes in a virtual node group can be dynamically changed, allowing only one, that is, no data replication. VGroup ID is never changed. Even if a virtual node group is deleted, its ID will not be reused. -**TAOSC**: TAOSC is the driver provided by TDengine to applications, which is responsible for dealing with the interaction between application and cluster, and provides the native interface of C/C++ language, which is embedded in JDBC, C #, Python, Go, Node.js language connection libraries. Applications interact with the whole cluster through taosc instead of directly connecting to data nodes in the cluster. This module is responsible for obtaining and caching metadata; forwarding requests for insertion, query, etc. to the correct data node; when returning the results to the application, taosc also needs to be responsible for the final level of aggregation, sorting, filtering and other operations. For JDBC, C/C++/C #/Python/Go/Node.js interfaces, this module runs on the physical node where the application is located. At the same time, in order to support the fully distributed RESTful interface, taosc has a running instance on each dnode of TDengine cluster. +**TAOSC**: TAOSC is the driver provided by TDengine to applications, which is responsible for dealing with the interaction between application and cluster, and provides the native interface of C/C++ language, which is embedded in JDBC, C #, Python, Go, Node.js language connection libraries. Applications interact with the whole cluster through TAOSC instead of directly connecting to data nodes in the cluster. This module is responsible for obtaining and caching metadata; forwarding requests for insertion, query, etc. to the correct data node; when returning the results to the application, TAOSC also needs to be responsible for the final level of aggregation, sorting, filtering and other operations. For JDBC, C/C++/C #/Python/Go/Node.js interfaces, this module runs on the physical node where the application is located. At the same time, in order to support the fully distributed RESTful interface, TAOSC has a running instance on each dnode of TDengine cluster. ### Node Communication @@ -184,35 +181,39 @@ A complete TDengine system runs on one or more physical nodes. Logically, it inc **Cluster external connection**: TDengine cluster can accommodate one single, multiple or even thousands of data nodes. The application only needs to initiate a connection to any data node in the cluster. The network parameter required for connection is the End Point (FQDN plus configured port number) of a data node. When starting the application taos through CLI, the FQDN of the data node can be specified through the option `-h`, and the configured port number can be specified through `-p`. If the port is not configured, the system configuration parameter serverPort of TDengine will be adopted. -**Inter-cluster communication**: Data nodes connect with each other through TCP/UDP. When a data node starts, it will obtain the EP information of the dnode where the mnode is located, and then establish a connection with the mnode in the system to exchange information. There are three steps to obtain EP information of the mnode: 1. Check whether the mnodeEpList file exists, if it does not exist or cannot be opened normally to obtain EP information of the mnode, skip to the second step; 2: Check the system configuration file taos.cfg to obtain node configuration parameters firstEp and secondEp (the node specified by these two parameters can be a normal node without mnode, in this case, the node will try to redirect to the mnode node when connected). If these two configuration parameters do not exist or do not exist in taos.cfg, or are invalid, skip to the third step; 3: Set your own EP as a mnode EP and run it independently. After obtaining the mnode EP list, the data node initiates the connection. It will successfully join the working cluster after connection. If not successful, it will try the next item in the mnode EP list. If all attempts are made, but the connection still fails, sleep for a few seconds before trying again. +**Inter-cluster communication**: Data nodes connect with each other through TCP/UDP. When a data node starts, it will obtain the EP information of the dnode where the mnode is located, and then establish a connection with the mnode in the system to exchange information. There are three steps to obtain EP information of the mnode: + +1. Check whether the mnodeEpList file exists, if it does not exist or cannot be opened normally to obtain EP information of the mnode, skip to the second step; +2. Check the system configuration file taos.cfg to obtain node configuration parameters firstEp and secondEp (the node specified by these two parameters can be a normal node without mnode, in this case, the node will try to redirect to the mnode node when connected). If these two configuration parameters do not exist or do not exist in taos.cfg, or are invalid, skip to the third step; +3. Set your own EP as a mnode EP and run it independently. After obtaining the mnode EP list, the data node initiates the connection. It will successfully join the working cluster after connection. If not successful, it will try the next item in the mnode EP list. If all attempts are made, but the connection still fails, sleep for a few seconds before trying again. **The choice of MNODE**: TDengine logically has a management node, but there is no separated execution code. The server-side only has a set of execution code taosd. So which data node will be the management node? This is determined automatically by the system without any manual intervention. The principle is as follows: when a data node starts, it will check its End Point and compare it with the obtained mnode EP List. If its EP exists in it, the data node shall start the mnode module and become a mnode. If your own EP is not in the mnode EP List, the mnode module will not start. During the system operation, due to load balancing, downtime and other reasons, mnode may migrate to the new dnode, while totally transparent without manual intervention. The modification of configuration parameters is the decision made by mnode itself according to resources usage. **Add new data nodes:** After the system has a data node, it has become a working system. There are two steps to add a new node into the cluster. Step1: Connect to the existing working data node using TDengine CLI, and then add the End Point of the new data node with the command "create dnode"; Step 2: In the system configuration parameter file taos.cfg of the new data node, set the firstEp and secondEp parameters to the EP of any two data nodes in the existing cluster. Please refer to the detailed user tutorial for detailed steps. In this way, the cluster will be established step by step. -**Redirection**: No matter about dnode or taosc, the connection to the mnode shall be initiated first, but the mnode is automatically created and maintained by the system, so the user does not know which dnode is running the mnode. TDengine only requires a connection to any working dnode in the system. Because any running dnode maintains the currently running mnode EP List, when receiving a connecting request from the newly started dnode or taosc, if it’s not a mnode by self, it will reply to the mnode EP List back. After receiving this list, taosc or the newly started dnode will try to establish the connection again. When the mnode EP List changes, each data node quickly obtains the latest list and notifies taosc through messaging interaction among nodes. +**Redirection**: No matter about dnode or TAOSC, the connection to the mnode shall be initiated first, but the mnode is automatically created and maintained by the system, so the user does not know which dnode is running the mnode. TDengine only requires a connection to any working dnode in the system. Because any running dnode maintains the currently running mnode EP List, when receiving a connecting request from the newly started dnode or TAOSC, if it’s not a mnode by self, it will reply to the mnode EP List back. After receiving this list, TAOSC or the newly started dnode will try to establish the connection again. When the mnode EP List changes, each data node quickly obtains the latest list and notifies TAOSC through messaging interaction among nodes. ### A Typical Data Writinfg Process -To explain the relationship between vnode, mnode, taosc and application and their respective roles, the following is an analysis of a typical data writing process. +To explain the relationship between vnode, mnode, TAOSC and application and their respective roles, the following is an analysis of a typical data writing process. -![ typical process of TDengine](page://images/architecture/message.png) -
Picture 2 typical process of TDengine
+![typical process of TDengine](page://images/architecture/message.png) +
Figure 2: Typical process of TDengine
1. Application initiates a request to insert data through JDBC, ODBC, or other APIs. -2. taosc checks if meta data existing for the table in the cache. If so, go straight to Step 4. If not, taosc sends a get meta-data request to mnode. -3. Mnode returns the meta-data of the table to taosc. Meta-data contains the schema of the table, and also the vgroup information to which the table belongs (the vnode ID and the End Point of the dnode where the table belongs. If the number of replicas is N, there will be N groups of End Points). If taosc does not receive a response from the mnode for a long time, and there are multiple mnodes, taosc will send a request to the next mnode. -4. Taosc initiates an insert request to master vnode. -5. After vnode inserts the data, it gives a reply to taosc, indicating that the insertion is successful. If taosc doesn't get a response from vnode for a long time, taosc will treat this node as offline. In this case, if there are multiple replicas of the inserted database, taosc will issue an insert request to the next vnode in vgroup. -6. Taosc notifies APP that writing is successful. +2. TAOSC checks if meta data existing for the table in the cache. If so, go straight to Step 4. If not, TAOSC sends a get meta-data request to mnode. +3. Mnode returns the meta-data of the table to TAOSC. Meta-data contains the schema of the table, and also the vgroup information to which the table belongs (the vnode ID and the End Point of the dnode where the table belongs. If the number of replicas is N, there will be N groups of End Points). If TAOSC does not receive a response from the mnode for a long time, and there are multiple mnodes, TAOSC will send a request to the next mnode. +4. TAOSC initiates an insert request to master vnode. +5. After vnode inserts the data, it gives a reply to TAOSC, indicating that the insertion is successful. If TAOSC doesn't get a response from vnode for a long time, TAOSC will treat this node as offline. In this case, if there are multiple replicas of the inserted database, TAOSC will issue an insert request to the next vnode in vgroup. +6. TAOSC notifies APP that writing is successful. -For Step 2 and 3, when taosc starts, it does not know the End Point of mnode, so it will directly initiate a request to the configured serving End Point of the cluster. If the dnode that receives the request does not have a mnode configured, it will inform the mnode EP list in a reply message, so that taosc will re-issue a request to obtain meta-data to the EP of another new mnode. +For Step 2 and 3, when TAOSC starts, it does not know the End Point of mnode, so it will directly initiate a request to the configured serving End Point of the cluster. If the dnode that receives the request does not have a mnode configured, it will inform the mnode EP list in a reply message, so that TAOSC will re-issue a request to obtain meta-data to the EP of another new mnode. -For Step 4 and 5, without caching, taosc can't recognize the master in the virtual node group, so assumes that the first vnode is the master and sends a request to it. If this vnode is not the master, it will reply to the actual master as a new target where taosc shall send a request to. Once the reply of successful insertion is obtained, taosc will cache the information of master node. +For Step 4 and 5, without caching, TAOSC can't recognize the master in the virtual node group, so assumes that the first vnode is the master and sends a request to it. If this vnode is not the master, it will reply to the actual master as a new target where TAOSC shall send a request to. Once the reply of successful insertion is obtained, TAOSC will cache the information of master node. -The above is the process of inserting data, and the processes of querying and computing are the same. Taosc encapsulates and hides all these complicated processes, and it is transparent to applications. +The above is the process of inserting data, and the processes of querying and computing are the same. TAOSC encapsulates and hides all these complicated processes, and it is transparent to applications. -Through taosc caching mechanism, mnode needs to be accessed only when a table is accessed for the first time, so mnode will not become a system bottleneck. However, because schema and vgroup may change (such as load balancing), taosc will interact with mnode regularly to automatically update the cache. +Through TAOSC caching mechanism, mnode needs to be accessed only when a table is accessed for the first time, so mnode will not become a system bottleneck. However, because schema and vgroup may change (such as load balancing), TAOSC will interact with mnode regularly to automatically update the cache. ## Storage Model and Data Partitioning/Sharding @@ -259,13 +260,14 @@ The load balancing process does not require any manual intervention, and it is t ## Data Writing and Replication Process -If a database has N replicas, thus a virtual node group has N virtual nodes, but only one as Master and all others are slaves. When the application writes a new record to system, only the Master vnode can accept the writing request. If a slave vnode receives a writing request, the system will notifies taosc to redirect. +If a database has N replicas, thus a virtual node group has N virtual nodes, but only one as Master and all others are slaves. When the application writes a new record to system, only the Master vnode can accept the writing request. If a slave vnode receives a writing request, the system will notifies TAOSC to redirect. ### Master vnode Writing Process Master Vnode uses a writing process as follows: -Figure 3: TDengine Master writing process +![TDengine Master Writing Process](page://images/architecture/write_master.png) +
Figure 3: TDengine Master writing process
1. Master vnode receives the application data insertion request, verifies, and moves to next step; 2. If the system configuration parameter `walLevel` is greater than 0, vnode will write the original request packet into database log file WAL. If walLevel is set to 2 and fsync is set to 0, TDengine will make WAL data written immediately to ensure that even system goes down, all data can be recovered from database log file; @@ -278,12 +280,12 @@ Figure 3: TDengine Master writing process For a slave vnode, the write process as follows: -![TDengine Slave Writing Process](page://images/architecture/write_master.png) -
Picture 3 TDengine Slave Writing Process
+![TDengine Slave Writing Process](page://images/architecture/write_slave.png) +
Figure 4: TDengine Slave Writing Process
-1. Slave vnode receives a data insertion request forwarded by Master vnode. +1. Slave vnode receives a data insertion request forwarded by Master vnode; 2. If the system configuration parameter `walLevel` is greater than 0, vnode will write the original request packet into database log file WAL. If walLevel is set to 2 and fsync is set to 0, TDengine will make WAL data written immediately to ensure that even system goes down, all data can be recovered from database log file; -3. Write into memory and add the record to “skip list”; +3. Write into memory and add the record to “skip list”. Compared with Master vnode, slave vnode has no forwarding or reply confirmation step, means two steps less. But writing into memory and WAL is exactly the same. @@ -297,7 +299,7 @@ However, the asynchronous replication has a tiny time window where data can be l 1. Master vnode has finished its 5-step operations, confirmed the success of writing to APP, and then went down; 2. Slave vnode receives the write request, then processing fails before writing to the log in Step 2; -3. Slave vnode will become the new master, thus losing one record +3. Slave vnode will become the new master, thus losing one record. In theory, for asynchronous replication, there is no guarantee to prevent data loss. However, this window is extremely small, only if mater and slave fail at the same time, and just confirm the successful write to the application before. @@ -352,8 +354,6 @@ When data is written to disk, it is decided whether to compress the data accordi By default, TDengine saves all data in /var/lib/taos directory, and the data files of each vnode are saved in a different directory under this directory. In order to expand the storage space, minimize the bottleneck of file reading and improve the data throughput rate, TDengine can configure the system parameter “dataDir” to allow multiple mounted hard disks to be used by system at the same time. In addition, TDengine also provides the function of tiered data storage, i.e. storage on different storage media according to the time stamps of data files. For example, the latest data is stored on SSD, the data for more than one week is stored on local hard disk, and the data for more than four weeks is stored on network storage device, thus reducing the storage cost and ensuring efficient data access. The movement of data on different storage media is automatically done by the system and completely transparent to applications. Tiered storage of data is also configured through the system parameter “dataDir”. - - dataDir format is as follows: ``` dataDir data_path [tier_level] @@ -361,8 +361,6 @@ dataDir data_path [tier_level] Where data_path is the folder path of mount point and tier_level is the media storage-tier. The higher the media storage-tier, means the older the data file. Multiple hard disks can be mounted at the same storage-tier, and data files on the same storage-tier are distributed on all hard disks within the tier. TDengine supports up to 3 tiers of storage, so tier_level values are 0, 1, and 2. When configuring dataDir, there must be only one mount path without specifying tier_level, which is called special mount disk (path). The mount path defaults to level 0 storage media and contains special file links, which cannot be removed, otherwise it will have a devastating impact on the written data. - - Suppose a physical node with six mountable hard disks/mnt/disk1,/mnt/disk2, …,/mnt/disk6, where disk1 and disk2 need to be designated as level 0 storage media, disk3 and disk4 are level 1 storage media, and disk5 and disk6 are level 2 storage media. Disk1 is a special mount disk, you can configure it in/etc/taos/taos.cfg as follows: ``` @@ -376,7 +374,6 @@ dataDir /mnt/disk6/taos 2 Mounted disks can also be a non-local network disk, as long as the system can access it. - Note: Tiered Storage is only supported in Enterprise Edition ## Data Query @@ -416,14 +413,14 @@ For the data collected by device D1001, the number of records per hour is counte TDengine creates a separate table for each data collection point, but in practical applications, it is often necessary to aggregate data from different data collection points. In order to perform aggregation operations efficiently, TDengine introduces the concept of STable. STable is used to represent a specific type of data collection point. It is a table set containing multiple tables. The schema of each table in the set is the same, but each table has its own static tag. The tags can be multiple and be added, deleted and modified at any time. Applications can aggregate or statistically operate all or a subset of tables under a STABLE by specifying tag filters, thus greatly simplifying the development of applications. The process is shown in the following figure: ![Diagram of multi-table aggregation query](page://images/architecture/multi_tables.png) -
Picture 4 Diagram of multi-table aggregation query
+
Figure 5: Diagram of multi-table aggregation query
1. Application sends a query condition to system; -2. taosc sends the STable name to Meta Node(management node); -3. Management node sends the vnode list owned by the STable back to taosc; -4. taosc sends the computing request together with tag filters to multiple data nodes corresponding to these vnodes; -5. Each vnode first finds out the set of tables within its own node that meet the tag filters from memory, then scans the stored time-series data, completes corresponding aggregation calculations, and returns result to taosc; -6. taosc finally aggregates the results returned by multiple data nodes and send them back to application. +2. TAOSC sends the STable name to Meta Node(management node); +3. Management node sends the vnode list owned by the STable back to TAOSC; +4. TAOSC sends the computing request together with tag filters to multiple data nodes corresponding to these vnodes; +5. Each vnode first finds out the set of tables within its own node that meet the tag filters from memory, then scans the stored time-series data, completes corresponding aggregation calculations, and returns result to TAOSC; +6. TAOSC finally aggregates the results returned by multiple data nodes and send them back to application. Since TDengine stores tag data and time-series data separately in vnode, by filtering tag data in memory, the set of tables that need to participate in aggregation operation is first found, which greatly reduces the volume of data scanned and improves aggregation speed. At the same time, because the data is distributed in multiple vnodes/dnodes, the aggregation operation is carried out concurrently in multiple vnodes, which further improves the aggregation speed. Aggregation functions for ordinary tables and most operations are applicable to STables. The syntax is exactly the same. Please see TAOS SQL for details. diff --git a/documentation20/en/06.queries/docs.md b/documentation20/en/06.queries/docs.md index 7688a941f0fb5b685f592833322906e4c4760b79..d906443153bb7e83cee69da4588554893ce154a3 100644 --- a/documentation20/en/06.queries/docs.md +++ b/documentation20/en/06.queries/docs.md @@ -2,7 +2,7 @@ ## Main Query Features -TDengine uses SQL as the query language. Applications can send SQL statements through C/C++, Java, Go, Python connectors, and users can manually execute SQL Ad-Hoc Query through the Command Line Interface (CLI) tool TAOS Shell provided by TDengine. TDengine supports the following query functions: +TDengine uses SQL as the query language. Applications can send SQL statements through C/C++, Java, Go, C#, Python, Node.js connectors, and users can manually execute SQL Ad-Hoc Query through the Command Line Interface (CLI) tool TAOS Shell provided by TDengine. TDengine supports the following query functions: - Single-column and multi-column data query - Multiple filters for tags and numeric values: >, <, =, < >, like, etc @@ -96,4 +96,4 @@ Query OK, 5 row(s) in set (0.001521s) In IoT scenario, it is difficult to synchronize the time stamp of collected data at each point, but many analysis algorithms (such as FFT) need to align the collected data strictly at equal intervals of time. In many systems, it’s required to write their own programs to process, but the down sampling operation of TDengine can be used to solve the problem easily. If there is no collected data in an interval, TDengine also provides interpolation calculation function. -For details of syntax rules, please refer to the [Time-dimension Aggregation section of TAOS SQL](https://www.taosdata.com/en/documentation/taos-sql#aggregation). \ No newline at end of file +For details of syntax rules, please refer to the [Time-dimension Aggregation section of TAOS SQL](https://www.taosdata.com/en/documentation/taos-sql#aggregation). diff --git a/documentation20/en/08.connector/01.java/docs.md b/documentation20/en/08.connector/01.java/docs.md index bd5df5a4952d5b56badde4eb3d6c88051cfa7b51..16adf906bea85d538ac408e1c40b18160aceed78 100644 --- a/documentation20/en/08.connector/01.java/docs.md +++ b/documentation20/en/08.connector/01.java/docs.md @@ -69,18 +69,18 @@ INSERT INTO test.t1 USING test.weather (ts, temperature) TAGS('beijing') VALUES( The TDengine supports the following data types and Java data types: -| TDengine DataType | Java DataType | -| ----------------- | ------------------ | -| TIMESTAMP | java.sql.Timestamp | -| INT | java.lang.Integer | -| BIGINT | java.lang.Long | -| FLOAT | java.lang.Float | -| DOUBLE | java.lang.Double | -| SMALLINT | java.lang.Short | -| TINYINT | java.lang.Byte | -| BOOL | java.lang.Boolean | -| BINARY | byte[] | -| NCHAR | java.lang.String | +| TDengine DataType | JDBCType (driver version < 2.0.24) | JDBCType (driver version >= 2.0.24) | +| ----------------- | ------------------ | ------------------ | +| TIMESTAMP | java.lang.Long | java.sql.Timestamp | +| INT | java.lang.Integer | java.lang.Integer | +| BIGINT | java.lang.Long | java.lang.Long | +| FLOAT | java.lang.Float | java.lang.Float | +| DOUBLE | java.lang.Double | java.lang.Double | +| SMALLINT | java.lang.Short | java.lang.Short | +| TINYINT | java.lang.Byte | java.lang.Byte | +| BOOL | java.lang.Boolean | java.lang.Boolean | +| BINARY | java.lang.String | byte array | +| NCHAR | java.lang.String | java.lang.String | ## Install Java connector diff --git a/documentation20/en/08.connector/docs.md b/documentation20/en/08.connector/docs.md index a0126ceb6455249bf24e60783221cef7142890af..fd9d129e50fa4450aed2fbebe80eddb978ef1263 100644 --- a/documentation20/en/08.connector/docs.md +++ b/documentation20/en/08.connector/docs.md @@ -132,7 +132,7 @@ taos> **Windows (x64/x86) environment:** -Under cmd, enter the c:\ tdengine directory and directly execute taos.exe, and you should be able to connect to tdengine service normally and jump to taos shell interface. For example: +Under cmd, enter the c:\TDengine directory and directly execute taos.exe, and you should be able to connect to tdengine service normally and jump to taos shell interface. For example: ```mysql C:\TDengine>taos @@ -413,11 +413,11 @@ See [video tutorials](https://www.taosdata.com/blog/2020/11/11/1963.html) for th Users can find the connector package for python2 and python3 in the source code src/connector/python (or tar.gz/connector/python) folder. Users can install it through `pip` command: -`pip install src/connector/python/linux/python2/` +`pip install src/connector/python/` or - `pip3 install src/connector/python/linux/python3/` + `pip3 install src/connector/python/` #### Windows diff --git a/documentation20/en/12.taos-sql/docs.md b/documentation20/en/12.taos-sql/docs.md index 630fbd1cdbeab7d9500b88ab979d708b14441f0a..7aaeb6c32b25cef8f0d1bf2f67ef94c3a2a007ee 100644 --- a/documentation20/en/12.taos-sql/docs.md +++ b/documentation20/en/12.taos-sql/docs.md @@ -1,8 +1,8 @@ # TAOS SQL -TDengine provides a SQL-style language, TAOS SQL, to insert or query data. To read through this document, you should have some basic understanding about SQL. +TDengine provides a SQL-style language, TAOS SQL, to insert or query data. This document introduces TAOS SQL and supports other common tips. To read through this document, readers should have basic understanding about SQL. -TAOS SQL is the main way for users to write and query data to TDengine. TAOS SQL is similar to standard SQL to facilitate users to get started quickly. Strictly speaking, TAOS SQL is not and does not attempt to provide SQL standard syntax. In addition, since TDengine does not provide deletion function for time-series data, the relevant function of data deletion is non-existent in TAO SQL. +TAOS SQL is the main tool for users to write and query data into/from TDengine. TAOS SQL provides a syntax style similar to standard SQL to facilitate users to get started quickly. Strictly speaking, TAOS SQL is not and does not attempt to provide SQL standard syntax. In addition, since TDengine does not provide deletion functionality for time-series data, the relevant functions of data deletion is unsupported in TAO SQL. Let’s take a look at the conventions used for syntax descriptions. @@ -37,7 +37,7 @@ With TDengine, the most important thing is timestamp. When creating and insertin - Epch Time: a timestamp value can also be a long integer representing milliseconds since 1970-01-01 08:00:00.000. - Arithmetic operations can be applied to timestamp. For example: now-2h represents a timestamp which is 2 hours ago from the current server time. Units include u( microsecond), a (milliseconds), s (seconds), m (minutes), h (hours), d (days), w (weeks). In `select * from t1 where ts > now-2w and ts <= now-1w`, which queries data of the whole week before two weeks. To specify the interval of down sampling, you can also use n(calendar month) and y(calendar year) as time units. -Default time precision of TDengine is millisecond, you can change it to microseocnd by setting parameter enableMicrosecond. +TDengine's timestamp is set to millisecond accuracy by default. Microsecond/nanosecond accuracy can be set using CREATE DATABASE with PRECISION parameter. (Nanosecond resolution is supported from version 2.1.5.0 onwards.) In TDengine, the following 10 data types can be used in data model of an ordinary table. @@ -1244,4 +1244,4 @@ TAOS SQL supports join columns of two tables by Primary Key timestamp between th **Availability of is no null** -Is not null supports all types of columns. Non-null expression is < > "" and only applies to columns of non-numeric types. \ No newline at end of file +Is not null supports all types of columns. Non-null expression is < > "" and only applies to columns of non-numeric types. diff --git a/packaging/tools/install.sh b/packaging/tools/install.sh index e116d72d2649940f9d272b8d3d01e34576a4049d..9c6a6e62f5b5fda1cfbaf1b5fff9593a5e349271 100755 --- a/packaging/tools/install.sh +++ b/packaging/tools/install.sh @@ -102,6 +102,12 @@ elif echo $osinfo | grep -qwi "centos" ; then elif echo $osinfo | grep -qwi "fedora" ; then # echo "This is fedora system" os_type=2 +elif echo $osinfo | grep -qwi "Linx" ; then +# echo "This is Linx system" + os_type=1 + service_mod=0 + initd_mod=0 + service_config_dir="/etc/systemd/system" else echo " osinfo: ${osinfo}" echo " This is an officially unverified linux system," diff --git a/packaging/tools/install_client.sh b/packaging/tools/install_client.sh index aa09013e538253b8740a0aaf70d04358320a6dd8..3df7013b197baaf4d78bb0f0ae5d507d6be92715 100755 --- a/packaging/tools/install_client.sh +++ b/packaging/tools/install_client.sh @@ -128,8 +128,12 @@ function install_lib() { ${csudo} ln -s ${install_main_dir}/driver/libtaos.* ${lib_link_dir}/libtaos.1.dylib ${csudo} ln -s ${lib_link_dir}/libtaos.1.dylib ${lib_link_dir}/libtaos.dylib fi - - ${csudo} ldconfig + + if [ "$osType" != "Darwin" ]; then + ${csudo} ldconfig + else + ${csudo} update_dyld_shared_cache + fi } function install_header() { diff --git a/src/client/CMakeLists.txt b/src/client/CMakeLists.txt index 0d06e5d39c0ed1916e0c2af7ccce5918e31ac42f..cf53977103c3a9760286e70447d826f7026d7e53 100644 --- a/src/client/CMakeLists.txt +++ b/src/client/CMakeLists.txt @@ -10,18 +10,19 @@ AUX_SOURCE_DIRECTORY(src SRC) IF (TD_LINUX) INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/deps/jni/linux) + INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/deps/cJson/inc) # set the static lib name ADD_LIBRARY(taos_static STATIC ${SRC}) - TARGET_LINK_LIBRARIES(taos_static common query trpc tutil pthread m rt ${VAR_TSZ}) + TARGET_LINK_LIBRARIES(taos_static common query trpc tutil pthread m rt cJson ${VAR_TSZ}) SET_TARGET_PROPERTIES(taos_static PROPERTIES OUTPUT_NAME "taos_static") SET_TARGET_PROPERTIES(taos_static PROPERTIES CLEAN_DIRECT_OUTPUT 1) # generate dynamic library (*.so) ADD_LIBRARY(taos SHARED ${SRC}) - TARGET_LINK_LIBRARIES(taos common query trpc tutil pthread m rt) + TARGET_LINK_LIBRARIES(taos common query trpc tutil pthread m rt cJson) IF (TD_LINUX_64) - TARGET_LINK_LIBRARIES(taos lua) + TARGET_LINK_LIBRARIES(taos lua cJson) ENDIF () SET_TARGET_PROPERTIES(taos PROPERTIES CLEAN_DIRECT_OUTPUT 1) @@ -36,16 +37,17 @@ IF (TD_LINUX) ELSEIF (TD_DARWIN) INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/deps/jni/linux) + INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/deps/cJson/inc) # set the static lib name ADD_LIBRARY(taos_static STATIC ${SRC}) - TARGET_LINK_LIBRARIES(taos_static common query trpc tutil pthread m lua) + TARGET_LINK_LIBRARIES(taos_static common query trpc tutil pthread m lua cJson) SET_TARGET_PROPERTIES(taos_static PROPERTIES OUTPUT_NAME "taos_static") SET_TARGET_PROPERTIES(taos_static PROPERTIES CLEAN_DIRECT_OUTPUT 1) # generate dynamic library (*.dylib) ADD_LIBRARY(taos SHARED ${SRC}) - TARGET_LINK_LIBRARIES(taos common query trpc tutil pthread m lua) + TARGET_LINK_LIBRARIES(taos common query trpc tutil pthread m lua cJson) SET_TARGET_PROPERTIES(taos PROPERTIES CLEAN_DIRECT_OUTPUT 1) #set version of .dylib @@ -59,30 +61,32 @@ ELSEIF (TD_DARWIN) ELSEIF (TD_WINDOWS) INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/deps/jni/windows) INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/deps/jni/windows/win32) + INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/deps/cJson/inc) CONFIGURE_FILE("${TD_COMMUNITY_DIR}/src/client/src/taos.rc.in" "${TD_COMMUNITY_DIR}/src/client/src/taos.rc") ADD_LIBRARY(taos_static STATIC ${SRC}) - TARGET_LINK_LIBRARIES(taos_static trpc tutil query) + TARGET_LINK_LIBRARIES(taos_static trpc tutil query cJson) # generate dynamic library (*.dll) ADD_LIBRARY(taos SHARED ${SRC} ${TD_COMMUNITY_DIR}/src/client/src/taos.rc) IF (NOT TD_GODLL) SET_TARGET_PROPERTIES(taos PROPERTIES LINK_FLAGS /DEF:${TD_COMMUNITY_DIR}/src/client/src/taos.def) ENDIF () - TARGET_LINK_LIBRARIES(taos trpc tutil query lua) + TARGET_LINK_LIBRARIES(taos trpc tutil query lua cJson) ELSEIF (TD_DARWIN) SET(CMAKE_MACOSX_RPATH 1) INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/deps/jni/linux) + INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/deps/cJson/inc) ADD_LIBRARY(taos_static STATIC ${SRC}) - TARGET_LINK_LIBRARIES(taos_static query trpc tutil pthread m lua) + TARGET_LINK_LIBRARIES(taos_static query trpc tutil pthread m lua cJson) SET_TARGET_PROPERTIES(taos_static PROPERTIES OUTPUT_NAME "taos_static") # generate dynamic library (*.dylib) ADD_LIBRARY(taos SHARED ${SRC}) - TARGET_LINK_LIBRARIES(taos query trpc tutil pthread m lua) + TARGET_LINK_LIBRARIES(taos query trpc tutil pthread m lua cJson) SET_TARGET_PROPERTIES(taos PROPERTIES CLEAN_DIRECT_OUTPUT 1) diff --git a/src/client/inc/tscUtil.h b/src/client/inc/tscUtil.h index 3b4c30098969e655e2d35da1665bb43c6d7e8fb4..126eb1424bd2ab270371c0aacab9dbff0a402358 100644 --- a/src/client/inc/tscUtil.h +++ b/src/client/inc/tscUtil.h @@ -92,7 +92,7 @@ typedef struct SMergeTsCtx { }SMergeTsCtx; typedef struct SVgroupTableInfo { - SVgroupInfo vgInfo; + SVgroupMsg vgInfo; SArray *itemList; // SArray } SVgroupTableInfo; @@ -174,7 +174,9 @@ void tscClearInterpInfo(SQueryInfo* pQueryInfo); bool tscIsInsertData(char* sqlstr); -int tscAllocPayload(SSqlCmd* pCmd, int size); +// the memory is not reset in case of fast allocate payload function +int32_t tscAllocPayloadFast(SSqlCmd *pCmd, size_t size); +int32_t tscAllocPayload(SSqlCmd* pCmd, int size); TAOS_FIELD tscCreateField(int8_t type, const char* name, int16_t bytes); @@ -288,7 +290,11 @@ void doExecuteQuery(SSqlObj* pSql, SQueryInfo* pQueryInfo); SVgroupsInfo* tscVgroupInfoClone(SVgroupsInfo *pInfo); void* tscVgroupInfoClear(SVgroupsInfo *pInfo); + +#if 0 void tscSVgroupInfoCopy(SVgroupInfo* dst, const SVgroupInfo* src); +#endif + /** * The create object function must be successful expect for the out of memory issue. * diff --git a/src/client/inc/tsclient.h b/src/client/inc/tsclient.h index b8eb0a5286a7b72b3ddd1d34b103e5b6239a496c..dd4ff7eb57f20cfc8d31328630fbb14b7acf7017 100644 --- a/src/client/inc/tsclient.h +++ b/src/client/inc/tsclient.h @@ -234,7 +234,6 @@ typedef struct STableDataBlocks { typedef struct { STableMeta *pTableMeta; SArray *vgroupIdList; -// SVgroupsInfo *pVgroupsInfo; } STableMetaVgroupInfo; typedef struct SInsertStatementParam { @@ -286,20 +285,14 @@ typedef struct { int32_t resColumnId; } SSqlCmd; -typedef struct SResRec { - int numOfRows; - int numOfTotal; -} SResRec; - typedef struct { int32_t numOfRows; // num of results in current retrieval - int64_t numOfRowsGroup; // num of results of current group int64_t numOfTotal; // num of total results int64_t numOfClauseTotal; // num of total result in current subclause char * pRsp; int32_t rspType; int32_t rspLen; - uint64_t qId; + uint64_t qId; // query id of SQInfo int64_t useconds; int64_t offset; // offset value from vnode during projection query of stable int32_t row; @@ -307,8 +300,6 @@ typedef struct { int16_t precision; bool completed; int32_t code; - int32_t numOfGroups; - SResRec * pGroupRec; char * data; TAOS_ROW tsrow; TAOS_ROW urow; @@ -316,8 +307,7 @@ typedef struct { char ** buffer; // Buffer used to put multibytes encoded using unicode (wchar_t) SColumnIndex* pColumnIndex; - TAOS_FIELD* final; - SArithmeticSupport *pArithSup; // support the arithmetic expression calculation on agg functions + TAOS_FIELD* final; struct SGlobalMerger *pMerger; } SSqlRes; @@ -377,7 +367,6 @@ typedef struct SSqlObj { tsem_t rspSem; SSqlCmd cmd; SSqlRes res; - bool isBind; SSubqueryState subState; struct SSqlObj **pSubs; @@ -578,7 +567,7 @@ static FORCE_INLINE void convertToSKVRow(SMemRow dest, SMemRow src, SSchema *pSc SKVRow kvRow = memRowKvBody(dest); memRowSetType(dest, SMEM_ROW_KV); - memRowSetKvVersion(kvRow, dataRowVersion(dataRow)); + memRowSetKvVersion(dest, dataRowVersion(dataRow)); kvRowSetNCols(kvRow, nBoundCols); kvRowSetLen(kvRow, (TDRowLenT)(TD_KV_ROW_HEAD_SIZE + sizeof(SColIdx) * nBoundCols)); diff --git a/src/client/jni/com_taosdata_jdbc_TSDBJNIConnector.h b/src/client/jni/com_taosdata_jdbc_TSDBJNIConnector.h index 7181c658ddcdfde3efe7df3c0784c20f18bd4c03..61ae5082f31cd9129a3cec1eaa1e0552ada7993b 100644 --- a/src/client/jni/com_taosdata_jdbc_TSDBJNIConnector.h +++ b/src/client/jni/com_taosdata_jdbc_TSDBJNIConnector.h @@ -41,6 +41,14 @@ JNIEXPORT void JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_initImp JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_setOptions (JNIEnv *, jclass, jint, jstring); +/* + * Class: com_taosdata_jdbc_TSDBJNIConnector + * Method: setConfigImp + * Signature: (Ljava/lang/String;)Lcom/taosdata/jdbc/TSDBException; + */ +JNIEXPORT jobject JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_setConfigImp + (JNIEnv *, jclass, jstring); + /* * Class: com_taosdata_jdbc_TSDBJNIConnector * Method: getTsCharset diff --git a/src/client/src/TSDBJNIConnector.c b/src/client/src/TSDBJNIConnector.c index 506c8d64b9f4213713656ecd08612a103e0b1b2d..925b7d75db9f88c9905270aa365c60990e9f45a3 100644 --- a/src/client/src/TSDBJNIConnector.c +++ b/src/client/src/TSDBJNIConnector.c @@ -200,6 +200,64 @@ JNIEXPORT void JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_initImp(JNIEnv *e jniDebug("jni initialized successfully, config directory: %s", configDir); } +JNIEXPORT jobject createTSDBException(JNIEnv *env, int code, char *msg) { + // find class + jclass exception_clazz = (*env)->FindClass(env, "com/taosdata/jdbc/TSDBException"); + // find methods + jmethodID init_method = (*env)->GetMethodID(env, exception_clazz, "", "()V"); + jmethodID setCode_method = (*env)->GetMethodID(env, exception_clazz, "setCode", "(I)V"); + jmethodID setMessage_method = (*env)->GetMethodID(env, exception_clazz, "setMessage", "(Ljava/lang/String;)V"); + // new exception + jobject exception_obj = (*env)->NewObject(env, exception_clazz, init_method); + // set code + (*env)->CallVoidMethod(env, exception_obj, setCode_method, code); + // set message + jstring message = (*env)->NewStringUTF(env, msg); + (*env)->CallVoidMethod(env, exception_obj, setMessage_method, message); + + return exception_obj; +} + +/* + * Class: com_taosdata_jdbc_TSDBJNIConnector + * Method: setConfigImp + * Signature: (Ljava/lang/String;)Lcom/taosdata/jdbc/TSDBException; + */ +JNIEXPORT jobject JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_setConfigImp(JNIEnv *env, jclass jobj, + jstring config) { + /* + if (config == NULL) { + jniDebug("config value is null"); + return -1; + } + + const char *cfg = (*env)->GetStringUTFChars(env, config, NULL); + if (!cfg) { + return -1; + } + return 0; + */ + + if (config == NULL) { + char *msg = "config value is null"; + jniDebug("config value is null"); + return createTSDBException(env, -1, msg); + } + + const char *cfg = (*env)->GetStringUTFChars(env, config, NULL); + if (!cfg) { + char *msg = "config value is null"; + jniDebug("config value is null"); + return createTSDBException(env, -1, msg); + } + + setConfRet result = taos_set_config(cfg); + int code = result.retCode; + char * msg = result.retMsg; + + return createTSDBException(env, code, msg); +} + JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_setOptions(JNIEnv *env, jobject jobj, jint optionIndex, jstring optionValue) { if (optionValue == NULL) { diff --git a/src/client/src/taos.def b/src/client/src/taos.def index 7d3b8e80c20226c4a509c95ab5728f41852110f5..f1ff17a491e795120494b00f59a800aa6bbc889a 100644 --- a/src/client/src/taos.def +++ b/src/client/src/taos.def @@ -2,6 +2,7 @@ EXPORTS taos_init taos_cleanup taos_options +taos_set_config taos_connect taos_connect_auth taos_close diff --git a/src/client/src/tscAsync.c b/src/client/src/tscAsync.c index 6b12cd0da04c0f791201182c793d647fc54c00b1..4a621d47c0dcae4c2765d53b0d5b650e22d64a58 100644 --- a/src/client/src/tscAsync.c +++ b/src/client/src/tscAsync.c @@ -60,17 +60,25 @@ void doAsyncQuery(STscObj* pObj, SSqlObj* pSql, __async_cb_func_t fp, void* para tscDebugL("0x%"PRIx64" SQL: %s", pSql->self, pSql->sqlstr); pCmd->resColumnId = TSDB_RES_COL_ID; + taosAcquireRef(tscObjRef, pSql->self); + int32_t code = tsParseSql(pSql, true); - if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) return; + + if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) { + taosReleaseRef(tscObjRef, pSql->self); + return; + } if (code != TSDB_CODE_SUCCESS) { pSql->res.code = code; tscAsyncResultOnError(pSql); + taosReleaseRef(tscObjRef, pSql->self); return; } SQueryInfo* pQueryInfo = tscGetQueryInfo(pCmd); executeQuery(pSql, pQueryInfo); + taosReleaseRef(tscObjRef, pSql->self); } // TODO return the correct error code to client in tscQueueAsyncError diff --git a/src/client/src/tscParseLineProtocol.c b/src/client/src/tscParseLineProtocol.c index c9ad7361efa3ade3e3e221a0128a5ad1f3e22ccb..e26e439492cec9c83b624c2bbb2bbc3a95de97b0 100644 --- a/src/client/src/tscParseLineProtocol.c +++ b/src/client/src/tscParseLineProtocol.c @@ -2128,11 +2128,12 @@ int32_t tscParseLines(char* lines[], int numLines, SArray* points, SArray* faile int taos_insert_lines(TAOS* taos, char* lines[], int numLines) { int32_t code = 0; - SSmlLinesInfo* info = calloc(1, sizeof(SSmlLinesInfo)); + SSmlLinesInfo* info = tcalloc(1, sizeof(SSmlLinesInfo)); info->id = genLinesSmlId(); if (numLines <= 0 || numLines > 65536) { tscError("SML:0x%"PRIx64" taos_insert_lines numLines should be between 1 and 65536. numLines: %d", info->id, numLines); + tfree(info); code = TSDB_CODE_TSC_APP_ERROR; return code; } @@ -2140,7 +2141,7 @@ int taos_insert_lines(TAOS* taos, char* lines[], int numLines) { for (int i = 0; i < numLines; ++i) { if (lines[i] == NULL) { tscError("SML:0x%"PRIx64" taos_insert_lines line %d is NULL", info->id, i); - free(info); + tfree(info); code = TSDB_CODE_TSC_APP_ERROR; return code; } @@ -2149,7 +2150,7 @@ int taos_insert_lines(TAOS* taos, char* lines[], int numLines) { SArray* lpPoints = taosArrayInit(numLines, sizeof(TAOS_SML_DATA_POINT)); if (lpPoints == NULL) { tscError("SML:0x%"PRIx64" taos_insert_lines failed to allocate memory", info->id); - free(info); + tfree(info); return TSDB_CODE_TSC_OUT_OF_MEMORY; } @@ -2177,7 +2178,7 @@ cleanup: taosArrayDestroy(lpPoints); - free(info); + tfree(info); return code; } diff --git a/src/client/src/tscParseOpenTSDB.c b/src/client/src/tscParseOpenTSDB.c index 397b5d3e97c2ed7fc1a42bd0e773bcff8f659c65..8e0322cab07ba462b7320cef02011b27b18785d5 100644 --- a/src/client/src/tscParseOpenTSDB.c +++ b/src/client/src/tscParseOpenTSDB.c @@ -3,6 +3,7 @@ #include #include +#include "cJSON.h" #include "hash.h" #include "taos.h" @@ -12,9 +13,12 @@ #include "tscParseLine.h" -#define MAX_TELNET_FILEDS_NUM 2 -#define OTS_TIMESTAMP_COLUMN_NAME "ts" -#define OTS_METRIC_VALUE_COLUMN_NAME "value" +#define OTD_MAX_FIELDS_NUM 2 +#define OTD_JSON_SUB_FIELDS_NUM 2 +#define OTD_JSON_FIELDS_NUM 4 + +#define OTD_TIMESTAMP_COLUMN_NAME "ts" +#define OTD_METRIC_VALUE_COLUMN_NAME "value" /* telnet style API parser */ static uint64_t HandleId = 0; @@ -77,12 +81,12 @@ static int32_t parseTelnetTimeStamp(TAOS_SML_KV **pTS, int *num_kvs, const char const char *start, *cur; int32_t ret = TSDB_CODE_SUCCESS; int len = 0; - char key[] = OTS_TIMESTAMP_COLUMN_NAME; + char key[] = OTD_TIMESTAMP_COLUMN_NAME; char *value = NULL; start = cur = *index; //allocate fields for timestamp and value - *pTS = tcalloc(MAX_TELNET_FILEDS_NUM, sizeof(TAOS_SML_KV)); + *pTS = tcalloc(OTD_MAX_FIELDS_NUM, sizeof(TAOS_SML_KV)); while(*cur != '\0') { if (*cur == ' ') { @@ -123,7 +127,7 @@ static int32_t parseTelnetMetricValue(TAOS_SML_KV **pKVs, int *num_kvs, const ch const char *start, *cur; int32_t ret = TSDB_CODE_SUCCESS; int len = 0; - char key[] = OTS_METRIC_VALUE_COLUMN_NAME; + char key[] = OTD_METRIC_VALUE_COLUMN_NAME; char *value = NULL; start = cur = *index; @@ -174,6 +178,9 @@ static int32_t parseTelnetTagKey(TAOS_SML_KV *pKV, const char **index, SHashObj tscError("OTD:0x%"PRIx64" Tag key cannot exceeds 65 characters", info->id); return TSDB_CODE_TSC_INVALID_COLUMN_LENGTH; } + if (*cur == ' ') { + return TSDB_CODE_TSC_LINE_SYNTAX_ERROR; + } if (*cur == '=') { break; } @@ -207,8 +214,8 @@ static int32_t parseTelnetTagValue(TAOS_SML_KV *pKV, const char **index, start = cur = *index; while (1) { - // ',' or '\0' identifies a value - if (*cur == ',' || *cur == '\0') { + // whitespace or '\0' identifies a value + if (*cur == ' ' || *cur == '\0') { // '\0' indicates end of value *is_last_kv = (*cur == '\0') ? true : false; break; @@ -405,7 +412,7 @@ cleanup: tscDebug("OTD:0x%"PRIx64" taos_insert_telnet_lines finish inserting %d lines. code: %d", info->id, numLines, code); points = TARRAY_GET_START(lpPoints); numPoints = taosArrayGetSize(lpPoints); - for (int i=0; ivaluestring); + if (stableLen > TSDB_TABLE_NAME_LEN) { + tscError("OTD:0x%"PRIx64" Metric cannot exceeds 193 characters in JSON", info->id); + return TSDB_CODE_TSC_INVALID_TABLE_ID_LENGTH; + } + + pSml->stableName = tcalloc(stableLen + 1, sizeof(char)); + if (pSml->stableName == NULL){ + return TSDB_CODE_TSC_OUT_OF_MEMORY; + } + + if (isdigit(metric->valuestring[0])) { + tscError("OTD:0x%"PRIx64" Metric cannnot start with digit in JSON", info->id); + tfree(pSml->stableName); + return TSDB_CODE_TSC_INVALID_JSON; + } + + tstrncpy(pSml->stableName, metric->valuestring, stableLen + 1); + + return TSDB_CODE_SUCCESS; + +} + +int32_t parseTimestampFromJSONObj(cJSON *root, int64_t *tsVal, SSmlLinesInfo* info) { + int32_t size = cJSON_GetArraySize(root); + if (size != OTD_JSON_SUB_FIELDS_NUM) { + return TSDB_CODE_TSC_INVALID_JSON; + } + + cJSON *value = cJSON_GetObjectItem(root, "value"); + if (!cJSON_IsNumber(value)) { + return TSDB_CODE_TSC_INVALID_JSON; + } + + cJSON *type = cJSON_GetObjectItem(root, "type"); + if (!cJSON_IsString(type)) { + return TSDB_CODE_TSC_INVALID_JSON; + } + + *tsVal = value->valueint; + //if timestamp value is 0 use current system time + if (*tsVal == 0) { + *tsVal = taosGetTimestampNs(); + return TSDB_CODE_SUCCESS; + } + + size_t typeLen = strlen(type->valuestring); + if (typeLen == 1 && type->valuestring[0] == 's') { + //seconds + *tsVal = (int64_t)(*tsVal * 1e9); + } else if (typeLen == 2 && type->valuestring[1] == 's') { + switch (type->valuestring[0]) { + case 'm': + //milliseconds + *tsVal = convertTimePrecision(*tsVal, TSDB_TIME_PRECISION_MILLI, TSDB_TIME_PRECISION_NANO); + break; + case 'u': + //microseconds + *tsVal = convertTimePrecision(*tsVal, TSDB_TIME_PRECISION_MICRO, TSDB_TIME_PRECISION_NANO); + break; + case 'n': + //nanoseconds + *tsVal = *tsVal * 1; + break; + default: + return TSDB_CODE_TSC_INVALID_JSON; + } + } + + return TSDB_CODE_SUCCESS; +} + +int32_t parseTimestampFromJSON(cJSON *root, TAOS_SML_KV **pTS, int *num_kvs, SSmlLinesInfo* info) { + //Timestamp must be the first KV to parse + assert(*num_kvs == 0); + int64_t tsVal; + char key[] = OTD_TIMESTAMP_COLUMN_NAME; + + cJSON *timestamp = cJSON_GetObjectItem(root, "timestamp"); + if (cJSON_IsNumber(timestamp)) { + //timestamp value 0 indicates current system time + if (timestamp->valueint == 0) { + tsVal = taosGetTimestampNs(); + } else { + tsVal = convertTimePrecision(timestamp->valueint, TSDB_TIME_PRECISION_MICRO, TSDB_TIME_PRECISION_NANO); + } + } else if (cJSON_IsObject(timestamp)) { + int32_t ret = parseTimestampFromJSONObj(timestamp, &tsVal, info); + if (ret != TSDB_CODE_SUCCESS) { + tscError("OTD:0x%"PRIx64" Failed to parse timestamp from JSON Obj", info->id); + return ret; + } + } else { + return TSDB_CODE_TSC_INVALID_JSON; + } + + //allocate fields for timestamp and value + *pTS = tcalloc(OTD_MAX_FIELDS_NUM, sizeof(TAOS_SML_KV)); + + + (*pTS)->key = tcalloc(sizeof(key), 1); + memcpy((*pTS)->key, key, sizeof(key)); + + (*pTS)->type = TSDB_DATA_TYPE_TIMESTAMP; + (*pTS)->length = (int16_t)tDataTypes[(*pTS)->type].bytes; + (*pTS)->value = tcalloc((*pTS)->length, 1); + memcpy((*pTS)->value, &tsVal, (*pTS)->length); + + *num_kvs += 1; + return TSDB_CODE_SUCCESS; + +} + +int32_t convertJSONBool(TAOS_SML_KV *pVal, char* typeStr, int64_t valueInt, SSmlLinesInfo* info) { + if (strcasecmp(typeStr, "bool") != 0) { + tscError("OTD:0x%"PRIx64" invalid type(%s) for JSON Bool", info->id, typeStr); + return TSDB_CODE_TSC_INVALID_JSON_TYPE; + } + pVal->type = TSDB_DATA_TYPE_BOOL; + pVal->length = (int16_t)tDataTypes[pVal->type].bytes; + pVal->value = tcalloc(pVal->length, 1); + *(bool *)(pVal->value) = valueInt ? true : false; + + return TSDB_CODE_SUCCESS; +} + +int32_t convertJSONNumber(TAOS_SML_KV *pVal, char* typeStr, cJSON *value, SSmlLinesInfo* info) { + //tinyint + if (strcasecmp(typeStr, "i8") == 0 || + strcasecmp(typeStr, "tinyint") == 0) { + if (!IS_VALID_TINYINT(value->valueint)) { + tscError("OTD:0x%"PRIx64" JSON value(%"PRId64") cannot fit in type(tinyint)", info->id, value->valueint); + return TSDB_CODE_TSC_VALUE_OUT_OF_RANGE; + } + pVal->type = TSDB_DATA_TYPE_TINYINT; + pVal->length = (int16_t)tDataTypes[pVal->type].bytes; + pVal->value = tcalloc(pVal->length, 1); + *(int8_t *)(pVal->value) = (int8_t)(value->valueint); + return TSDB_CODE_SUCCESS; + } + //smallint + if (strcasecmp(typeStr, "i16") == 0 || + strcasecmp(typeStr, "smallint") == 0) { + if (!IS_VALID_SMALLINT(value->valueint)) { + tscError("OTD:0x%"PRIx64" JSON value(%"PRId64") cannot fit in type(smallint)", info->id, value->valueint); + return TSDB_CODE_TSC_VALUE_OUT_OF_RANGE; + } + pVal->type = TSDB_DATA_TYPE_SMALLINT; + pVal->length = (int16_t)tDataTypes[pVal->type].bytes; + pVal->value = tcalloc(pVal->length, 1); + *(int16_t *)(pVal->value) = (int16_t)(value->valueint); + return TSDB_CODE_SUCCESS; + } + //int + if (strcasecmp(typeStr, "i32") == 0 || + strcasecmp(typeStr, "int") == 0) { + if (!IS_VALID_INT(value->valueint)) { + tscError("OTD:0x%"PRIx64" JSON value(%"PRId64") cannot fit in type(int)", info->id, value->valueint); + return TSDB_CODE_TSC_VALUE_OUT_OF_RANGE; + } + pVal->type = TSDB_DATA_TYPE_INT; + pVal->length = (int16_t)tDataTypes[pVal->type].bytes; + pVal->value = tcalloc(pVal->length, 1); + *(int32_t *)(pVal->value) = (int32_t)(value->valueint); + return TSDB_CODE_SUCCESS; + } + //bigint + if (strcasecmp(typeStr, "i64") == 0 || + strcasecmp(typeStr, "bigint") == 0) { + if (!IS_VALID_BIGINT(value->valueint)) { + tscError("OTD:0x%"PRIx64" JSON value(%"PRId64") cannot fit in type(bigint)", info->id, value->valueint); + return TSDB_CODE_TSC_VALUE_OUT_OF_RANGE; + } + pVal->type = TSDB_DATA_TYPE_BIGINT; + pVal->length = (int16_t)tDataTypes[pVal->type].bytes; + pVal->value = tcalloc(pVal->length, 1); + *(int64_t *)(pVal->value) = (int64_t)(value->valueint); + return TSDB_CODE_SUCCESS; + } + //float + if (strcasecmp(typeStr, "f32") == 0 || + strcasecmp(typeStr, "float") == 0) { + if (!IS_VALID_FLOAT(value->valuedouble)) { + tscError("OTD:0x%"PRIx64" JSON value(%f) cannot fit in type(float)", info->id, value->valuedouble); + return TSDB_CODE_TSC_VALUE_OUT_OF_RANGE; + } + pVal->type = TSDB_DATA_TYPE_FLOAT; + pVal->length = (int16_t)tDataTypes[pVal->type].bytes; + pVal->value = tcalloc(pVal->length, 1); + *(float *)(pVal->value) = (float)(value->valuedouble); + return TSDB_CODE_SUCCESS; + } + //double + if (strcasecmp(typeStr, "f64") == 0 || + strcasecmp(typeStr, "double") == 0) { + if (!IS_VALID_DOUBLE(value->valuedouble)) { + tscError("OTD:0x%"PRIx64" JSON value(%f) cannot fit in type(double)", info->id, value->valuedouble); + return TSDB_CODE_TSC_VALUE_OUT_OF_RANGE; + } + pVal->type = TSDB_DATA_TYPE_DOUBLE; + pVal->length = (int16_t)tDataTypes[pVal->type].bytes; + pVal->value = tcalloc(pVal->length, 1); + *(double *)(pVal->value) = (double)(value->valuedouble); + return TSDB_CODE_SUCCESS; + } + + //if reach here means type is unsupported + tscError("OTD:0x%"PRIx64" invalid type(%s) for JSON Number", info->id, typeStr); + return TSDB_CODE_TSC_INVALID_JSON_TYPE; +} + +int32_t convertJSONString(TAOS_SML_KV *pVal, char* typeStr, cJSON *value, SSmlLinesInfo* info) { + if (strcasecmp(typeStr, "binary") == 0) { + pVal->type = TSDB_DATA_TYPE_BINARY; + } else if (strcasecmp(typeStr, "nchar") == 0) { + pVal->type = TSDB_DATA_TYPE_NCHAR; + } else { + tscError("OTD:0x%"PRIx64" invalid type(%s) for JSON String", info->id, typeStr); + return TSDB_CODE_TSC_INVALID_JSON_TYPE; + } + pVal->length = (int16_t)strlen(value->valuestring); + pVal->value = tcalloc(pVal->length + 1, 1); + memcpy(pVal->value, value->valuestring, pVal->length); + return TSDB_CODE_SUCCESS; +} + +int32_t parseValueFromJSONObj(cJSON *root, TAOS_SML_KV *pVal, SSmlLinesInfo* info) { + int32_t ret = TSDB_CODE_SUCCESS; + int32_t size = cJSON_GetArraySize(root); + + if (size != OTD_JSON_SUB_FIELDS_NUM) { + return TSDB_CODE_TSC_INVALID_JSON; + } + + cJSON *value = cJSON_GetObjectItem(root, "value"); + if (value == NULL) { + return TSDB_CODE_TSC_INVALID_JSON; + } + + cJSON *type = cJSON_GetObjectItem(root, "type"); + if (!cJSON_IsString(type)) { + return TSDB_CODE_TSC_INVALID_JSON; + } + + switch (value->type) { + case cJSON_True: + case cJSON_False: { + ret = convertJSONBool(pVal, type->valuestring, value->valueint, info); + if (ret != TSDB_CODE_SUCCESS) { + return ret; + } + break; + } + case cJSON_Number: { + ret = convertJSONNumber(pVal, type->valuestring, value, info); + if (ret != TSDB_CODE_SUCCESS) { + return ret; + } + break; + } + case cJSON_String: { + ret = convertJSONString(pVal, type->valuestring, value, info); + if (ret != TSDB_CODE_SUCCESS) { + return ret; + } + break; + } + default: + return TSDB_CODE_TSC_INVALID_JSON_TYPE; + } + + return TSDB_CODE_SUCCESS; +} + +int32_t parseValueFromJSON(cJSON *root, TAOS_SML_KV *pVal, SSmlLinesInfo* info) { + int type = root->type; + + switch (type) { + case cJSON_True: + case cJSON_False: { + pVal->type = TSDB_DATA_TYPE_BOOL; + pVal->length = (int16_t)tDataTypes[pVal->type].bytes; + pVal->value = tcalloc(pVal->length, 1); + *(bool *)(pVal->value) = root->valueint ? true : false; + break; + } + case cJSON_Number: { + //convert default JSON Number type to float + pVal->type = TSDB_DATA_TYPE_FLOAT; + pVal->length = (int16_t)tDataTypes[pVal->type].bytes; + pVal->value = tcalloc(pVal->length, 1); + *(float *)(pVal->value) = (float)(root->valuedouble); + break; + } + case cJSON_String: { + //convert default JSON String type to nchar + pVal->type = TSDB_DATA_TYPE_NCHAR; + //pVal->length = wcslen((wchar_t *)root->valuestring) * TSDB_NCHAR_SIZE; + pVal->length = (int16_t)strlen(root->valuestring); + pVal->value = tcalloc(pVal->length + 1, 1); + memcpy(pVal->value, root->valuestring, pVal->length); + break; + } + case cJSON_Object: { + int32_t ret = parseValueFromJSONObj(root, pVal, info); + if (ret != TSDB_CODE_SUCCESS) { + tscError("OTD:0x%"PRIx64" Failed to parse timestamp from JSON Obj", info->id); + return ret; + } + break; + } + default: + return TSDB_CODE_TSC_INVALID_JSON; + } + + return TSDB_CODE_SUCCESS; +} + +int32_t parseMetricValueFromJSON(cJSON *root, TAOS_SML_KV **pKVs, int *num_kvs, SSmlLinesInfo* info) { + //skip timestamp + TAOS_SML_KV *pVal = *pKVs + 1; + char key[] = OTD_METRIC_VALUE_COLUMN_NAME; + + cJSON *metricVal = cJSON_GetObjectItem(root, "value"); + if (metricVal == NULL) { + return TSDB_CODE_TSC_INVALID_JSON; + } + + int32_t ret = parseValueFromJSON(metricVal, pVal, info); + if (ret != TSDB_CODE_SUCCESS) { + return ret; + } + + pVal->key = tcalloc(sizeof(key), 1); + memcpy(pVal->key, key, sizeof(key)); + + *num_kvs += 1; + return TSDB_CODE_SUCCESS; + +} + +int32_t parseTagsFromJSON(cJSON *root, TAOS_SML_KV **pKVs, int *num_kvs, char **childTableName, SSmlLinesInfo* info) { + int32_t ret = TSDB_CODE_SUCCESS; + + cJSON *tags = cJSON_GetObjectItem(root, "tags"); + if (tags == NULL || tags->type != cJSON_Object) { + return TSDB_CODE_TSC_INVALID_JSON; + } + + //only pick up the first ID value as child table name + cJSON *id = cJSON_GetObjectItem(tags, "ID"); + if (id != NULL) { + size_t idLen = strlen(id->valuestring); + ret = isValidChildTableName(id->valuestring, (int16_t)idLen); + if (ret != TSDB_CODE_SUCCESS) { + return ret; + } + *childTableName = tcalloc(idLen + 1, sizeof(char)); + memcpy(*childTableName, id->valuestring, idLen); + //remove all ID fields from tags list no case sensitive + while (id != NULL) { + cJSON_DeleteItemFromObject(tags, "ID"); + id = cJSON_GetObjectItem(tags, "ID"); + } + } + + int32_t tagNum = cJSON_GetArraySize(tags); + //at least one tag pair required + if (tagNum <= 0) { + return TSDB_CODE_TSC_INVALID_JSON; + } + + //allocate memory for tags + *pKVs = tcalloc(tagNum, sizeof(TAOS_SML_KV)); + TAOS_SML_KV *pkv = *pKVs; + + for (int32_t i = 0; i < tagNum; ++i) { + cJSON *tag = cJSON_GetArrayItem(tags, i); + if (tag == NULL) { + return TSDB_CODE_TSC_INVALID_JSON; + } + //key + size_t keyLen = strlen(tag->string); + pkv->key = tcalloc(keyLen + 1, sizeof(char)); + strncpy(pkv->key, tag->string, keyLen); + //value + ret = parseValueFromJSON(tag, pkv, info); + if (ret != TSDB_CODE_SUCCESS) { + return ret; + } + *num_kvs += 1; + pkv++; + } + + return ret; + +} + +int32_t tscParseJSONPayload(cJSON *root, TAOS_SML_DATA_POINT* pSml, SSmlLinesInfo* info) { + int32_t ret = TSDB_CODE_SUCCESS; + + if (!cJSON_IsObject(root)) { + tscError("OTD:0x%"PRIx64" data point needs to be JSON object", info->id); + return TSDB_CODE_TSC_INVALID_JSON; + } + + int32_t size = cJSON_GetArraySize(root); + //outmost json fields has to be exactly 4 + if (size != OTD_JSON_FIELDS_NUM) { + tscError("OTD:0x%"PRIx64" Invalid number of JSON fields in data point %d", info->id, size); + return TSDB_CODE_TSC_INVALID_JSON; + } + + //Parse metric + ret = parseMetricFromJSON(root, pSml, info); + if (ret != TSDB_CODE_SUCCESS) { + tscError("OTD:0x%"PRIx64" Unable to parse metric from JSON payload", info->id); + return ret; + } + tscDebug("OTD:0x%"PRIx64" Parse metric from JSON payload finished", info->id); + + //Parse timestamp + ret = parseTimestampFromJSON(root, &pSml->fields, &pSml->fieldNum, info); + if (ret) { + tscError("OTD:0x%"PRIx64" Unable to parse timestamp from JSON payload", info->id); + return ret; + } + tscDebug("OTD:0x%"PRIx64" Parse timestamp from JSON payload finished", info->id); + + //Parse metric value + ret = parseMetricValueFromJSON(root, &pSml->fields, &pSml->fieldNum, info); + if (ret) { + tscError("OTD:0x%"PRIx64" Unable to parse metric value from JSON payload", info->id); + return ret; + } + tscDebug("OTD:0x%"PRIx64" Parse metric value from JSON payload finished", info->id); + + //Parse tags + ret = parseTagsFromJSON(root, &pSml->tags, &pSml->tagNum, &pSml->childTableName, info); + if (ret) { + tscError("OTD:0x%"PRIx64" Unable to parse tags from JSON payload", info->id); + return ret; + } + tscDebug("OTD:0x%"PRIx64" Parse tags from JSON payload finished", info->id); + + return TSDB_CODE_SUCCESS; +} + +int32_t tscParseMultiJSONPayload(char* payload, SArray* points, SSmlLinesInfo* info) { + int32_t payloadNum, ret; + ret = TSDB_CODE_SUCCESS; + + if (payload == NULL) { + tscError("OTD:0x%"PRIx64" empty JSON Payload", info->id); + return TSDB_CODE_TSC_INVALID_JSON; + } + + cJSON *root = cJSON_Parse(payload); + //multiple data points must be sent in JSON array + if (cJSON_IsObject(root)) { + payloadNum = 1; + } else if (cJSON_IsArray(root)) { + payloadNum = cJSON_GetArraySize(root); + } else { + tscError("OTD:0x%"PRIx64" Invalid JSON Payload", info->id); + ret = TSDB_CODE_TSC_INVALID_JSON; + goto PARSE_JSON_OVER; + } + + for (int32_t i = 0; i < payloadNum; ++i) { + TAOS_SML_DATA_POINT point = {0}; + cJSON *dataPoint = (payloadNum == 1) ? root : cJSON_GetArrayItem(root, i); + + ret = tscParseJSONPayload(dataPoint, &point, info); + if (ret != TSDB_CODE_SUCCESS) { + tscError("OTD:0x%"PRIx64" JSON data point parse failed", info->id); + destroySmlDataPoint(&point); + goto PARSE_JSON_OVER; + } else { + tscDebug("OTD:0x%"PRIx64" JSON data point parse success", info->id); + } + taosArrayPush(points, &point); + } + +PARSE_JSON_OVER: + cJSON_Delete(root); + return ret; +} + +int taos_insert_json_payload(TAOS* taos, char* payload) { + int32_t code = 0; + + SSmlLinesInfo* info = tcalloc(1, sizeof(SSmlLinesInfo)); + info->id = genUID(); + + if (payload == NULL) { + tscError("OTD:0x%"PRIx64" taos_insert_json_payload payload is NULL", info->id); + tfree(info); + code = TSDB_CODE_TSC_APP_ERROR; + return code; + } + + SArray* lpPoints = taosArrayInit(1, sizeof(TAOS_SML_DATA_POINT)); + if (lpPoints == NULL) { + tscError("OTD:0x%"PRIx64" taos_insert_json_payload failed to allocate memory", info->id); + tfree(info); + return TSDB_CODE_TSC_OUT_OF_MEMORY; + } + + tscDebug("OTD:0x%"PRIx64" taos_insert_telnet_lines begin inserting %d points", info->id, 1); + code = tscParseMultiJSONPayload(payload, lpPoints, info); + size_t numPoints = taosArrayGetSize(lpPoints); + + if (code != 0) { + goto cleanup; + } + + TAOS_SML_DATA_POINT* points = TARRAY_GET_START(lpPoints); + code = tscSmlInsert(taos, points, (int)numPoints, info); + if (code != 0) { + tscError("OTD:0x%"PRIx64" taos_insert_json_payload error: %s", info->id, tstrerror((code))); + } + +cleanup: + tscDebug("OTD:0x%"PRIx64" taos_insert_json_payload finish inserting 1 Point. code: %d", info->id, code); + points = TARRAY_GET_START(lpPoints); + numPoints = taosArrayGetSize(lpPoints); + for (int i = 0; i < numPoints; ++i) { + destroySmlDataPoint(points+i); + } + + taosArrayDestroy(lpPoints); + + tfree(info); + return code; +} diff --git a/src/client/src/tscPrepare.c b/src/client/src/tscPrepare.c index e5ac18fb509fcc86b363f519e91a0f793aa696c9..186fc6286efba135ead881d3ee84ec5653efd52a 100644 --- a/src/client/src/tscPrepare.c +++ b/src/client/src/tscPrepare.c @@ -1493,7 +1493,6 @@ TAOS_STMT* taos_stmt_init(TAOS* taos) { pSql->signature = pSql; pSql->pTscObj = pObj; pSql->maxRetry = TSDB_MAX_REPLICA; - pSql->isBind = true; pStmt->pSql = pSql; pStmt->last = STMT_INIT; diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c index 88f50c6957250cbdc67b5759e56e0b1d0e78d2c2..208fa10abc8ad38bb3c68064662d7149aa88071f 100644 --- a/src/client/src/tscSQLParser.c +++ b/src/client/src/tscSQLParser.c @@ -77,7 +77,6 @@ static int32_t getDelimiterIndex(SStrToken* pTableName); static bool validateTableColumnInfo(SArray* pFieldList, SSqlCmd* pCmd); static bool validateTagParams(SArray* pTagsList, SArray* pFieldList, SSqlCmd* pCmd); -static int32_t setObjFullName(char* fullName, const char* account, SStrToken* pDB, SStrToken* tableName, int32_t* len); static void getColumnName(tSqlExprItem* pItem, char* resultFieldName, char* rawName, int32_t nameLength); static int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t colIndex, tSqlExprItem* pItem, @@ -282,6 +281,8 @@ static uint8_t convertRelationalOperator(SStrToken *pToken) { return TSDB_RELATION_LIKE; case TK_MATCH: return TSDB_RELATION_MATCH; + case TK_NMATCH: + return TSDB_RELATION_NMATCH; case TK_QUESTION: return TSDB_RELATION_QUESTION; case TK_ARROW: @@ -338,7 +339,7 @@ static int setColumnFilterInfoForTimestamp(SSqlCmd* pCmd, SQueryInfo* pQueryInfo STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0); STableComInfo tinfo = tscGetTableInfo(pTableMetaInfo->pTableMeta); - if (convertTimestampStrToInt64(pVar, tinfo.precision) < -1) { + if (convertTimestampStrToInt64(pVar, tinfo.precision) < 0) { return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg); } return TSDB_CODE_SUCCESS; @@ -1714,57 +1715,6 @@ static int32_t getDelimiterIndex(SStrToken* pTableName) { return -1; } -int32_t setObjFullName(char* fullName, const char* account, SStrToken* pDB, SStrToken* tableName, int32_t* xlen) { - int32_t totalLen = 0; - - if (account != NULL) { - int32_t len = (int32_t)strlen(account); - strcpy(fullName, account); - fullName[len] = TS_PATH_DELIMITER[0]; - totalLen += (len + 1); - } - - /* db name is not specified, the tableName dose not include db name */ - if (pDB != NULL) { - if (pDB->n >= TSDB_ACCT_ID_LEN + TSDB_DB_NAME_LEN || pDB->n == 0) { - return TSDB_CODE_TSC_INVALID_OPERATION; - } - - memcpy(&fullName[totalLen], pDB->z, pDB->n); - totalLen += pDB->n; - } - - if (tableName != NULL) { - if (pDB != NULL) { - fullName[totalLen] = TS_PATH_DELIMITER[0]; - totalLen += 1; - - /* here we only check the table name length limitation */ - if (!tscValidateTableNameLength(tableName->n)) { - return TSDB_CODE_TSC_INVALID_OPERATION; - } - } else { // pDB == NULL, the db prefix name is specified in tableName - /* the length limitation includes tablename + dbname + sep */ - if (tableName->n >= TSDB_TABLE_NAME_LEN + TSDB_DB_NAME_LEN) { - return TSDB_CODE_TSC_INVALID_OPERATION; - } - } - - memcpy(&fullName[totalLen], tableName->z, tableName->n); - totalLen += tableName->n; - } - - if (xlen != NULL) { - *xlen = totalLen; - } - - if (totalLen < TSDB_TABLE_FNAME_LEN) { - fullName[totalLen] = 0; - } - - return (totalLen < TSDB_TABLE_FNAME_LEN) ? TSDB_CODE_SUCCESS : TSDB_CODE_TSC_INVALID_OPERATION; -} - void tscInsertPrimaryTsSourceColumn(SQueryInfo* pQueryInfo, uint64_t tableUid) { SSchema s = {.type = TSDB_DATA_TYPE_TIMESTAMP, .bytes = TSDB_KEYSIZE, .colId = PRIMARYKEY_TIMESTAMP_COL_INDEX}; tscColumnListInsert(pQueryInfo->colList, PRIMARYKEY_TIMESTAMP_COL_INDEX, tableUid, &s); @@ -2567,6 +2517,9 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col case TSDB_FUNC_MAX: case TSDB_FUNC_DIFF: case TSDB_FUNC_DERIVATIVE: + case TSDB_FUNC_CEIL: + case TSDB_FUNC_FLOOR: + case TSDB_FUNC_ROUND: case TSDB_FUNC_STDDEV: case TSDB_FUNC_LEASTSQR: { // 1. valid the number of parameters @@ -2757,6 +2710,10 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col pTableMetaInfo = tscGetMetaInfo(pQueryInfo, index.tableIndex); + if (pParamElem->pNode->columnName.z == NULL) { + return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2); + } + // functions can not be applied to tags if ((index.columnIndex >= tscGetNumOfColumns(pTableMetaInfo->pTableMeta)) || (index.columnIndex < 0)) { return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg6); @@ -3490,6 +3447,7 @@ static bool functionCompatibleCheck(SQueryInfo* pQueryInfo, bool joinQuery, bool int32_t scalarUdf = 0; int32_t prjNum = 0; int32_t aggNum = 0; + int32_t scalNum = 0; size_t numOfExpr = tscNumOfExprs(pQueryInfo); assert(numOfExpr > 0); @@ -3521,6 +3479,10 @@ static bool functionCompatibleCheck(SQueryInfo* pQueryInfo, bool joinQuery, bool ++prjNum; } + if (functionId == TSDB_FUNC_CEIL || functionId == TSDB_FUNC_FLOOR || functionId == TSDB_FUNC_ROUND) { + ++scalNum; + } + if (functionId == TSDB_FUNC_PRJ && (pExpr1->base.colInfo.colId == PRIMARYKEY_TIMESTAMP_COL_INDEX || TSDB_COL_IS_UD_COL(pExpr1->base.colInfo.flag))) { continue; } @@ -3542,15 +3504,19 @@ static bool functionCompatibleCheck(SQueryInfo* pQueryInfo, bool joinQuery, bool } } - aggNum = (int32_t)size - prjNum - aggUdf - scalarUdf; + aggNum = (int32_t)size - prjNum - scalNum - aggUdf - scalarUdf; assert(aggNum >= 0); - if (aggUdf > 0 && (prjNum > 0 || aggNum > 0 || scalarUdf > 0)) { + if (aggUdf > 0 && (prjNum > 0 || aggNum > 0 || scalNum > 0 || scalarUdf > 0)) { + return false; + } + + if (scalarUdf > 0 && (aggNum > 0 || scalNum > 0)) { return false; } - if (scalarUdf > 0 && aggNum > 0) { + if (aggNum > 0 && scalNum > 0) { return false; } @@ -3737,11 +3703,7 @@ static int32_t doExtractColumnFilterInfo(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, } } - if (pExpr->tokenId == TK_LE || pExpr->tokenId == TK_LT) { - retVal = tVariantDump(&pRight->value, (char*)&pColumnFilter->upperBndd, colType, false); - - // TK_GT,TK_GE,TK_EQ,TK_NE are based on the pColumn->lowerBndd - } else if (pExpr->tokenId == TK_IN) { + if (pExpr->tokenId == TK_IN) { tVariant *pVal; if (pRight->tokenId != TK_SET || !serializeExprListToVariant(pRight->Expr.paramList, &pVal, colType, timePrecision)) { return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg); @@ -3767,6 +3729,10 @@ static int32_t doExtractColumnFilterInfo(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, size_t len = twcslen((wchar_t*)pColumnFilter->pz); pColumnFilter->len = len * TSDB_NCHAR_SIZE; + } else if (pExpr->tokenId == TK_LE || pExpr->tokenId == TK_LT) { + retVal = tVariantDump(&pRight->value, (char*)&pColumnFilter->upperBndd, colType, false); + + // TK_GT,TK_GE,TK_EQ,TK_NE are based on the pColumn->lowerBndd } else { retVal = tVariantDump(&pRight->value, (char*)&pColumnFilter->lowerBndd, colType, false); } @@ -3800,6 +3766,9 @@ static int32_t doExtractColumnFilterInfo(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, case TK_MATCH: pColumnFilter->lowerRelOptr = TSDB_RELATION_MATCH; break; + case TK_NMATCH: + pColumnFilter->lowerRelOptr = TSDB_RELATION_NMATCH; + break; case TK_ISNULL: pColumnFilter->lowerRelOptr = TSDB_RELATION_ISNULL; break; @@ -3822,9 +3791,6 @@ typedef struct SCondExpr { tSqlExpr* pColumnCond; - tSqlExpr* pTableCond; - int16_t relType; // relation between table name in expression and other tag - // filter condition expression, TK_AND or TK_OR int16_t tableCondIndex; tSqlExpr* pJoinExpr; // join condition @@ -3833,49 +3799,6 @@ typedef struct SCondExpr { static int32_t getTimeRange(STimeWindow* win, tSqlExpr* pRight, int32_t optr, int16_t timePrecision); -static int32_t tablenameListToString(tSqlExpr* pExpr, SStringBuilder* sb) { - SArray* pList = pExpr->Expr.paramList; - - int32_t size = (int32_t) taosArrayGetSize(pList); - if (size <= 0) { - return TSDB_CODE_TSC_INVALID_OPERATION; - } - - if (size > 0) { - taosStringBuilderAppendStringLen(sb, QUERY_COND_REL_PREFIX_IN, QUERY_COND_REL_PREFIX_IN_LEN); - } - - for (int32_t i = 0; i < size; ++i) { - tSqlExprItem* pSub = taosArrayGet(pList, i); - tVariant* pVar = &pSub->pNode->value; - - taosStringBuilderAppendStringLen(sb, pVar->pz, pVar->nLen); - - if (i < size - 1) { - taosStringBuilderAppendString(sb, TBNAME_LIST_SEP); - } - - if (pVar->nLen <= 0 || !tscValidateTableNameLength(pVar->nLen)) { - return TSDB_CODE_TSC_INVALID_OPERATION; - } - } - - return TSDB_CODE_SUCCESS; -} - -static int32_t tablenameCondToString(tSqlExpr* pExpr, uint32_t opToken, SStringBuilder* sb) { - assert(opToken == TK_LIKE || opToken == TK_MATCH); - if (opToken == TK_LIKE) { - taosStringBuilderAppendStringLen(sb, QUERY_COND_REL_PREFIX_LIKE, QUERY_COND_REL_PREFIX_LIKE_LEN); - taosStringBuilderAppendString(sb, pExpr->value.pz); - } else if (opToken == TK_MATCH) { - taosStringBuilderAppendStringLen(sb, QUERY_COND_REL_PREFIX_MATCH, QUERY_COND_REL_PREFIX_MATCH_LEN); - taosStringBuilderAppendString(sb, pExpr->value.pz); - } - - return TSDB_CODE_SUCCESS; -} - enum { TSQL_EXPR_TS = 1, TSQL_EXPR_TAG = 2, @@ -3893,7 +3816,6 @@ static int32_t checkColumnFilterInfo(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SCol SSchema* pSchema = tscGetTableColumnSchema(pTableMeta, pIndex->columnIndex); int32_t ret = 0; const char* msg1 = "non binary column not support like/match operator"; - const char* msg2 = "binary column not support this operator"; const char* msg3 = "bool column not support this operator"; const char* msg4 = "primary key not support this operator"; @@ -3914,19 +3836,8 @@ static int32_t checkColumnFilterInfo(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SCol pColFilter->filterstr = ((pSchema->type == TSDB_DATA_TYPE_BINARY || pSchema->type == TSDB_DATA_TYPE_NCHAR) ? 1 : 0); - if (pColFilter->filterstr) { - if (pExpr->tokenId != TK_EQ - && pExpr->tokenId != TK_NE - && pExpr->tokenId != TK_ISNULL - && pExpr->tokenId != TK_NOTNULL - && pExpr->tokenId != TK_LIKE - && pExpr->tokenId != TK_MATCH - && pExpr->tokenId != TK_IN) { - ret = invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2); - goto _err_ret; - } - } else { - if (pExpr->tokenId == TK_LIKE || pExpr->tokenId == TK_MATCH) { + if (!pColFilter->filterstr) { + if (pExpr->tokenId == TK_LIKE || pExpr->tokenId == TK_MATCH || pExpr->tokenId == TK_NMATCH) { ret = invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1); goto _err_ret; } @@ -3955,40 +3866,6 @@ _err_ret: return ret; } -static int32_t getTablenameCond(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSqlExpr* pTableCond, SStringBuilder* sb) { - const char* msg0 = "invalid table name list"; - const char* msg1 = "not string following like"; - - if (pTableCond == NULL) { - return TSDB_CODE_SUCCESS; - } - - tSqlExpr* pLeft = pTableCond->pLeft; - tSqlExpr* pRight = pTableCond->pRight; - - if (!isTablenameToken(&pLeft->columnName)) { - return TSDB_CODE_TSC_INVALID_OPERATION; - } - - int32_t ret = TSDB_CODE_SUCCESS; - - if (pTableCond->tokenId == TK_IN) { - ret = tablenameListToString(pRight, sb); - } else if (pTableCond->tokenId == TK_LIKE || pTableCond->tokenId == TK_MATCH) { - if (pRight->tokenId != TK_STRING) { - return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1); - } - - ret = tablenameCondToString(pRight, pTableCond->tokenId, sb); - } - - if (ret != TSDB_CODE_SUCCESS) { - invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg0); - } - - return ret; -} - static int32_t getColQueryCondExpr(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSqlExpr** pExpr) { int32_t ret = TSDB_CODE_SUCCESS; @@ -4111,8 +3988,9 @@ static int32_t checkAndSetJoinCondInfo(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tS index.columnIndex = index.columnIndex - tscGetNumOfColumns(pTableMetaInfo->pTableMeta); if (tscColumnExists(pTableMetaInfo->tagColList, pTagSchema1->colId, pTableMetaInfo->pTableMeta->id.uid) < 0) { tscColumnListInsert(pTableMetaInfo->tagColList, index.columnIndex, pTableMeta->id.uid, pTagSchema1); + atomic_add_fetch_32(&pTableMetaInfo->joinTagNum, 1); - if (taosArrayGetSize(pTableMetaInfo->tagColList) > 1) { + if (pTableMetaInfo->joinTagNum > 1) { return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2); } } @@ -4144,7 +4022,9 @@ static int32_t checkAndSetJoinCondInfo(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tS if (tscColumnExists(pTableMetaInfo->tagColList, pTagSchema2->colId, pTableMeta->id.uid) < 0) { tscColumnListInsert(pTableMetaInfo->tagColList, index.columnIndex, pTableMeta->id.uid, pTagSchema2); - if (taosArrayGetSize(pTableMetaInfo->tagColList) > 1) { + atomic_add_fetch_32(&pTableMetaInfo->joinTagNum, 1); + + if (pTableMetaInfo->joinTagNum > 1) { return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2); } } @@ -4427,18 +4307,6 @@ static bool validateJoinExprNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSqlExpr return true; } -static bool validTableNameOptr(tSqlExpr* pExpr) { - const char nameFilterOptr[] = {TK_IN, TK_LIKE, TK_MATCH}; - - for (int32_t i = 0; i < tListLen(nameFilterOptr); ++i) { - if (pExpr->tokenId == nameFilterOptr[i]) { - return true; - } - } - - return false; -} - static int32_t setExprToCond(tSqlExpr** parent, tSqlExpr* pExpr, const char* msg, int32_t parentOptr, char* msgBuf) { if (*parent != NULL) { if (parentOptr == TK_OR && msg != NULL) { @@ -4523,13 +4391,13 @@ static int32_t validateLikeExpr(tSqlExpr* pExpr, STableMeta* pTableMeta, int32_t // check for match expression static int32_t validateMatchExpr(tSqlExpr* pExpr, STableMeta* pTableMeta, int32_t index, char* msgBuf) { const char* msg1 = "regular expression string should be less than %d characters"; - const char* msg2 = "illegal column type for match"; + const char* msg2 = "illegal column type for match/nmatch"; const char* msg3 = "invalid regular expression"; tSqlExpr* pLeft = pExpr->pLeft; tSqlExpr* pRight = pExpr->pRight; - if (pExpr->tokenId == TK_MATCH) { + if (pExpr->tokenId == TK_MATCH || pExpr->tokenId == TK_NMATCH) { if (pRight->value.nLen > tsMaxRegexStringLen) { char tmp[64] = {0}; sprintf(tmp, msg1, tsMaxRegexStringLen); @@ -4537,10 +4405,14 @@ static int32_t validateMatchExpr(tSqlExpr* pExpr, STableMeta* pTableMeta, int32_ } SSchema* pSchema = tscGetTableSchema(pTableMeta); - if ((!isTablenameToken(&pLeft->columnName)) && !IS_VAR_DATA_TYPE(pSchema[index].type)) { + if ((!isTablenameToken(&pLeft->columnName)) &&(pSchema[index].type != TSDB_DATA_TYPE_BINARY)) { return invalidOperationMsg(msgBuf, msg2); } + if (!(pRight->type == SQL_NODE_VALUE && pRight->value.nType == TSDB_DATA_TYPE_BINARY)) { + return invalidOperationMsg(msgBuf, msg3); + } + int errCode = 0; regex_t regex; char regErrBuf[256] = {0}; @@ -4578,8 +4450,6 @@ static int32_t handleExprInQueryCond(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSql const char* msg2 = "illegal column name"; const char* msg4 = "too many join tables"; const char* msg5 = "not support ordinary column join"; - const char* msg6 = "only one query condition on tbname allowed"; - const char* msg7 = "only in/like allowed in filter table name"; tSqlExpr* pLeft = (*pExpr)->pLeft; tSqlExpr* pRight = (*pExpr)->pRight; @@ -4696,54 +4566,30 @@ static int32_t handleExprInQueryCond(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSql return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1); } - // in case of in operator, keep it in a seprate attribute - if (index.columnIndex == TSDB_TBNAME_COLUMN_INDEX) { - if (!validTableNameOptr(*pExpr)) { - return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg7); - } - - if (!UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) { - return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1); + if (pRight != NULL && pRight->tokenId == TK_ID) { // join on tag columns for stable query + if (!validateJoinExprNode(pCmd, pQueryInfo, *pExpr, &index)) { + return TSDB_CODE_TSC_INVALID_OPERATION; } - if (pCondExpr->pTableCond == NULL) { - pCondExpr->pTableCond = *pExpr; - pCondExpr->relType = parentOptr; - pCondExpr->tableCondIndex = index.tableIndex; - } else { - return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg6); + pQueryInfo->type |= TSDB_QUERY_TYPE_JOIN_QUERY; + ret = setExprToCond(&pCondExpr->pJoinExpr, *pExpr, NULL, parentOptr, pCmd->payload); + *pExpr = NULL; + if (type) { + *type |= TSQL_EXPR_JOIN; + } + } else { + // do nothing + // ret = setExprToCond(pCmd, &pCondExpr->pTagCond, + // *pExpr, NULL, parentOptr); + tSqlExpr *rexpr = NULL; + if ((*pExpr)->tokenId == TK_NE && (pSchema->type != TSDB_DATA_TYPE_BINARY && pSchema->type != TSDB_DATA_TYPE_NCHAR && pSchema->type != TSDB_DATA_TYPE_BOOL)) { + handleNeOptr(&rexpr, *pExpr); + *pExpr = rexpr; } - + if (type) { *type |= TSQL_EXPR_TAG; } - *pExpr = NULL; - } else { - if (pRight != NULL && pRight->tokenId == TK_ID) { // join on tag columns for stable query - if (!validateJoinExprNode(pCmd, pQueryInfo, *pExpr, &index)) { - return TSDB_CODE_TSC_INVALID_OPERATION; - } - - pQueryInfo->type |= TSDB_QUERY_TYPE_JOIN_QUERY; - ret = setExprToCond(&pCondExpr->pJoinExpr, *pExpr, NULL, parentOptr, pCmd->payload); - *pExpr = NULL; - if (type) { - *type |= TSQL_EXPR_JOIN; - } - } else { - // do nothing - // ret = setExprToCond(pCmd, &pCondExpr->pTagCond, - // *pExpr, NULL, parentOptr); - tSqlExpr *rexpr = NULL; - if ((*pExpr)->tokenId == TK_NE && (pSchema->type != TSDB_DATA_TYPE_BINARY && pSchema->type != TSDB_DATA_TYPE_NCHAR && pSchema->type != TSDB_DATA_TYPE_BOOL)) { - handleNeOptr(&rexpr, *pExpr); - *pExpr = rexpr; - } - - if (type) { - *type |= TSQL_EXPR_TAG; - } - } } } else { // query on other columns if (type) { @@ -4930,81 +4776,6 @@ int tableNameCompar(const void* lhs, const void* rhs) { return ret > 0 ? 1 : -1; } -static int32_t setTableCondForSTableQuery(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, const char* account, - tSqlExpr* pExpr, int16_t tableCondIndex, SStringBuilder* sb) { - const char* msg = "table name too long"; - - if (pExpr == NULL) { - return TSDB_CODE_SUCCESS; - } - - STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, tableCondIndex); - - STagCond* pTagCond = &pQueryInfo->tagCond; - pTagCond->tbnameCond.uid = pTableMetaInfo->pTableMeta->id.uid; - - assert(pExpr->tokenId == TK_LIKE || pExpr->tokenId == TK_MATCH || pExpr->tokenId == TK_IN); - - if (pExpr->tokenId == TK_LIKE || pExpr->tokenId == TK_MATCH) { - char* str = taosStringBuilderGetResult(sb, NULL); - pQueryInfo->tagCond.tbnameCond.cond = strdup(str); - pQueryInfo->tagCond.tbnameCond.len = (int32_t) strlen(str); - return TSDB_CODE_SUCCESS; - } else { - SStringBuilder sb1; - memset(&sb1, 0, sizeof(sb1)); - taosStringBuilderAppendStringLen(&sb1, QUERY_COND_REL_PREFIX_IN, QUERY_COND_REL_PREFIX_IN_LEN); - - // remove the duplicated input table names - int32_t num = 0; - char* tableNameString = taosStringBuilderGetResult(sb, NULL); - - char** segments = strsplit(tableNameString + QUERY_COND_REL_PREFIX_IN_LEN, TBNAME_LIST_SEP, &num); - qsort(segments, num, POINTER_BYTES, tableNameCompar); - - int32_t j = 1; - for (int32_t i = 1; i < num; ++i) { - if (strcmp(segments[i], segments[i - 1]) != 0) { - segments[j++] = segments[i]; - } - } - num = j; - - char name[TSDB_DB_NAME_LEN] = {0}; - tNameGetDbName(&pTableMetaInfo->name, name); - SStrToken dbToken = {.type = TK_STRING, .z = name, .n = (uint32_t)strlen(name)}; - - for (int32_t i = 0; i < num; ++i) { - if (i >= 1) { - taosStringBuilderAppendStringLen(&sb1, TBNAME_LIST_SEP, 1); - } - - char idBuf[TSDB_TABLE_FNAME_LEN] = {0}; - int32_t xlen = (int32_t)strlen(segments[i]); - SStrToken t = {.z = segments[i], .n = xlen, .type = TK_STRING}; - - int32_t ret = setObjFullName(idBuf, account, &dbToken, &t, &xlen); - if (ret != TSDB_CODE_SUCCESS) { - taosStringBuilderDestroy(&sb1); - tfree(segments); - - invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg); - return ret; - } - - taosStringBuilderAppendString(&sb1, idBuf); - } - - char* str = taosStringBuilderGetResult(&sb1, NULL); - pQueryInfo->tagCond.tbnameCond.cond = strdup(str); - pQueryInfo->tagCond.tbnameCond.len = (int32_t)strlen(str); - - taosStringBuilderDestroy(&sb1); - tfree(segments); - return TSDB_CODE_SUCCESS; - } -} - int32_t mergeTimeRange(SSqlCmd* pCmd, STimeWindow* res, STimeWindow* win, int32_t optr) { const char* msg0 = "only one time stamp window allowed"; @@ -5144,10 +4915,6 @@ static int32_t validateJoinExpr(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SCondExpr } static void cleanQueryExpr(SCondExpr* pCondExpr) { - if (pCondExpr->pTableCond) { - tSqlExprDestroy(pCondExpr->pTableCond); - } - if (pCondExpr->pColumnCond) { tSqlExprDestroy(pCondExpr->pColumnCond); } @@ -5443,7 +5210,7 @@ static int32_t getQueryTimeRange(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSqlExpr //multiple tables's query time range mixed together tExprNode* p = NULL; - SFilterInfo *filter = NULL; + void *filter = NULL; SArray* colList = taosArrayInit(10, sizeof(SColIndex)); ret = exprTreeFromSqlExpr(pCmd, &p, *pExpr, pQueryInfo, colList, NULL); @@ -5485,7 +5252,6 @@ int32_t validateWhereNode(SQueryInfo* pQueryInfo, tSqlExpr** pExpr, SSqlObj* pSq int32_t ret = TSDB_CODE_SUCCESS; // tags query condition may be larger than 512bytes, therefore, we need to prepare enough large space - SStringBuilder sb; memset(&sb, 0, sizeof(sb)); SCondExpr condExpr = {0}; if ((*pExpr)->pLeft == NULL || (*pExpr)->pRight == NULL) { @@ -5518,12 +5284,12 @@ int32_t validateWhereNode(SQueryInfo* pQueryInfo, tSqlExpr** pExpr, SSqlObj* pSq condExpr.pTagCond = (*pExpr); *pExpr = NULL; - // 1. check if it is a join query + // check if it is a join query if ((ret = validateJoinExpr(&pSql->cmd, pQueryInfo, &condExpr)) != TSDB_CODE_SUCCESS) { goto PARSE_WHERE_EXIT; } - // 2. get the query time range + // get the query time range if ((ret = convertTimeRangeFromExpr(&pSql->cmd, pQueryInfo, condExpr.pTimewindow)) != TSDB_CODE_SUCCESS) { goto PARSE_WHERE_EXIT; } @@ -5531,19 +5297,13 @@ int32_t validateWhereNode(SQueryInfo* pQueryInfo, tSqlExpr** pExpr, SSqlObj* pSq if ((ret = getQueryTimeRange(&pSql->cmd, pQueryInfo, &condExpr.pTimewindow)) != TSDB_CODE_SUCCESS) { goto PARSE_WHERE_EXIT; } - - // 3. get the tag query condition + // get the tag query condition if ((ret = getTagQueryCondExpr(&pSql->cmd, pQueryInfo, &condExpr)) != TSDB_CODE_SUCCESS) { goto PARSE_WHERE_EXIT; } - // 4. get the table name query condition - if ((ret = getTablenameCond(&pSql->cmd, pQueryInfo, condExpr.pTableCond, &sb)) != TSDB_CODE_SUCCESS) { - goto PARSE_WHERE_EXIT; - } - - // 5. other column query condition + // other column query condition if ((ret = checkColumnQueryCondInfo(&pSql->cmd, pQueryInfo, condExpr.pColumnCond, TK_AND)) != TSDB_CODE_SUCCESS) { goto PARSE_WHERE_EXIT; } @@ -5552,21 +5312,11 @@ int32_t validateWhereNode(SQueryInfo* pQueryInfo, tSqlExpr** pExpr, SSqlObj* pSq goto PARSE_WHERE_EXIT; } - - // 6. join condition + // join condition if ((ret = getJoinCondInfo(&pSql->cmd, pQueryInfo, condExpr.pJoinExpr)) != TSDB_CODE_SUCCESS) { goto PARSE_WHERE_EXIT; } - // 7. query condition for table name - pQueryInfo->tagCond.relType = (condExpr.relType == TK_AND) ? TSDB_RELATION_AND : TSDB_RELATION_OR; - - ret = setTableCondForSTableQuery(&pSql->cmd, pQueryInfo, getAccountId(pSql), condExpr.pTableCond, condExpr.tableCondIndex, &sb); - taosStringBuilderDestroy(&sb); - if (ret) { - goto PARSE_WHERE_EXIT; - } - //if (!validateFilterExpr(pQueryInfo)) { // ret = invalidOperationMsg(tscGetErrorMsgPayload(&pSql->cmd), msg2); // goto PARSE_WHERE_EXIT; @@ -5637,6 +5387,10 @@ int32_t getTimeRange(STimeWindow* win, tSqlExpr* pRight, int32_t optr, int16_t t pRight->flags &= ~(1 << EXPR_FLAG_NS_TIMESTAMP); } + if (pRight->value.nType == -1) { + return TSDB_CODE_TSC_INVALID_OPERATION; + } + tVariantDump(&pRight->value, (char*)&val, TSDB_DATA_TYPE_BIGINT, true); } @@ -6559,7 +6313,9 @@ int32_t validateFunctionsInIntervalOrGroupbyQuery(SSqlCmd* pCmd, SQueryInfo* pQu } int32_t f = pExpr->base.functionId; - if ((f == TSDB_FUNC_PRJ && pExpr->base.numOfParams == 0) || f == TSDB_FUNC_DIFF || f == TSDB_FUNC_ARITHM || f == TSDB_FUNC_DERIVATIVE) { + if ((f == TSDB_FUNC_PRJ && pExpr->base.numOfParams == 0) || f == TSDB_FUNC_DIFF || f == TSDB_FUNC_ARITHM || f == TSDB_FUNC_DERIVATIVE || + f == TSDB_FUNC_CEIL || f == TSDB_FUNC_FLOOR || f == TSDB_FUNC_ROUND) + { isProjectionFunction = true; break; } @@ -7161,6 +6917,7 @@ static int32_t checkUpdateTagPrjFunctions(SQueryInfo* pQueryInfo, char* msg) { const char* msg2 = "aggregation function should not be mixed up with projection"; bool tagTsColExists = false; + int16_t numOfScalar = 0; int16_t numOfSelectivity = 0; int16_t numOfAggregation = 0; @@ -7194,6 +6951,8 @@ static int32_t checkUpdateTagPrjFunctions(SQueryInfo* pQueryInfo, char* msg) { if ((aAggs[functionId].status & TSDB_FUNCSTATE_SELECTIVITY) != 0) { numOfSelectivity++; + } else if ((aAggs[functionId].status & TSDB_FUNCSTATE_SCALAR) != 0) { + numOfScalar++; } else { numOfAggregation++; } @@ -8285,11 +8044,12 @@ static int32_t handleExprInHavingClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, S && pExpr->tokenId != TK_NOTNULL && pExpr->tokenId != TK_LIKE && pExpr->tokenId != TK_MATCH + && pExpr->tokenId != TK_NMATCH ) { return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2); } } else { - if (pExpr->tokenId == TK_LIKE || pExpr->tokenId == TK_MATCH) { + if (pExpr->tokenId == TK_LIKE || pExpr->tokenId == TK_MATCH || pExpr->tokenId == TK_NMATCH) { return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1); } @@ -8746,7 +8506,7 @@ static int32_t doLoadAllTableMeta(SSqlObj* pSql, SQueryInfo* pQueryInfo, SSqlNod if (p->vgroupIdList != NULL) { size_t s = taosArrayGetSize(p->vgroupIdList); - size_t vgroupsz = sizeof(SVgroupInfo) * s + sizeof(SVgroupsInfo); + size_t vgroupsz = sizeof(SVgroupMsg) * s + sizeof(SVgroupsInfo); pTableMetaInfo->vgroupList = calloc(1, vgroupsz); if (pTableMetaInfo->vgroupList == NULL) { return TSDB_CODE_TSC_OUT_OF_MEMORY; @@ -8761,14 +8521,11 @@ static int32_t doLoadAllTableMeta(SSqlObj* pSql, SQueryInfo* pQueryInfo, SSqlNod taosHashGetClone(tscVgroupMap, id, sizeof(*id), NULL, &existVgroupInfo); assert(existVgroupInfo.inUse >= 0); - SVgroupInfo *pVgroup = &pTableMetaInfo->vgroupList->vgroups[j]; + SVgroupMsg *pVgroup = &pTableMetaInfo->vgroupList->vgroups[j]; pVgroup->numOfEps = existVgroupInfo.numOfEps; pVgroup->vgId = existVgroupInfo.vgId; - for (int32_t k = 0; k < existVgroupInfo.numOfEps; ++k) { - pVgroup->epAddr[k].port = existVgroupInfo.ep[k].port; - pVgroup->epAddr[k].fqdn = strndup(existVgroupInfo.ep[k].fqdn, TSDB_FQDN_LEN); - } + memcpy(&pVgroup->epAddr, &existVgroupInfo.ep, sizeof(pVgroup->epAddr)); } } } @@ -9257,13 +9014,17 @@ int32_t exprTreeFromSqlExpr(SSqlCmd* pCmd, tExprNode **pExpr, const tSqlExpr* pS (*pExpr)->pVal = calloc(1, sizeof(tVariant)); tVariantAssign((*pExpr)->pVal, &pSqlExpr->value); - STableMeta* pTableMeta = tscGetMetaInfo(pQueryInfo, 0)->pTableMeta; - if (pCols != NULL && taosArrayGetSize(pCols) > 0) { - SColIndex* idx = taosArrayGet(pCols, 0); - SSchema* pSchema = tscGetTableColumnSchema(pTableMeta, idx->colIndex); - // convert time by precision - if (pSchema != NULL && TSDB_DATA_TYPE_TIMESTAMP == pSchema->type && TSDB_DATA_TYPE_BINARY == (*pExpr)->pVal->nType) { - ret = setColumnFilterInfoForTimestamp(pCmd, pQueryInfo, (*pExpr)->pVal); + STableMeta* pTableMeta = tscGetMetaInfo(pQueryInfo, pQueryInfo->curTableIdx)->pTableMeta; + if (pCols != NULL) { + size_t colSize = taosArrayGetSize(pCols); + + if (colSize > 0) { + SColIndex* idx = taosArrayGet(pCols, colSize - 1); + SSchema* pSchema = tscGetTableColumnSchema(pTableMeta, idx->colIndex); + // convert time by precision + if (pSchema != NULL && TSDB_DATA_TYPE_TIMESTAMP == pSchema->type && TSDB_DATA_TYPE_BINARY == (*pExpr)->pVal->nType) { + ret = setColumnFilterInfoForTimestamp(pCmd, pQueryInfo, (*pExpr)->pVal); + } } } return ret; @@ -9306,8 +9067,18 @@ int32_t exprTreeFromSqlExpr(SSqlCmd* pCmd, tExprNode **pExpr, const tSqlExpr* pS (*pExpr)->nodeType = TSQL_NODE_COL; (*pExpr)->pSchema = calloc(1, sizeof(SSchema)); - SSchema* pSchema = tscGetTableColumnSchema(pTableMeta, index.columnIndex); - *(*pExpr)->pSchema = *pSchema; + SSchema* pSchema = NULL; + + if (index.columnIndex == TSDB_TBNAME_COLUMN_INDEX) { + pSchema = (*pExpr)->pSchema; + strcpy(pSchema->name, TSQL_TBNAME_L); + pSchema->type = TSDB_DATA_TYPE_BINARY; + pSchema->colId = TSDB_TBNAME_COLUMN_INDEX; + pSchema->bytes = -1; + } else { + pSchema = tscGetTableColumnSchema(pTableMeta, index.columnIndex); + *(*pExpr)->pSchema = *pSchema; + } if (pCols != NULL) { // record the involved columns SColIndex colIndex = {0}; @@ -9328,9 +9099,13 @@ int32_t exprTreeFromSqlExpr(SSqlCmd* pCmd, tExprNode **pExpr, const tSqlExpr* pS if (colSize > 0) { SColIndex* idx = taosArrayGet(pCols, colSize - 1); - SSchema* pSchema = tscGetTableColumnSchema(pTableMeta, idx->colIndex); - if (pSchema != NULL) { - colType = pSchema->type; + if (idx->colIndex == TSDB_TBNAME_COLUMN_INDEX) { + colType = TSDB_DATA_TYPE_BINARY; + } else { + SSchema* pSchema = tscGetTableColumnSchema(pTableMeta, idx->colIndex); + if (pSchema != NULL) { + colType = pSchema->type; + } } } } diff --git a/src/client/src/tscServer.c b/src/client/src/tscServer.c index a8e7f9ca8136b4e0a7a7f86a54804c7b744c9833..4bfabdf80fc9671a04d108b86c4ec8e8dff7c5ab 100644 --- a/src/client/src/tscServer.c +++ b/src/client/src/tscServer.c @@ -73,7 +73,7 @@ static int32_t removeDupVgid(int32_t *src, int32_t sz) { return ret; } -static void tscSetDnodeEpSet(SRpcEpSet* pEpSet, SVgroupInfo* pVgroupInfo) { +static void tscSetDnodeEpSet(SRpcEpSet* pEpSet, SVgroupMsg* pVgroupInfo) { assert(pEpSet != NULL && pVgroupInfo != NULL && pVgroupInfo->numOfEps > 0); // Issue the query to one of the vnode among a vgroup randomly. @@ -93,6 +93,7 @@ static void tscSetDnodeEpSet(SRpcEpSet* pEpSet, SVgroupInfo* pVgroupInfo) { existed = true; } } + assert(existed); } @@ -702,11 +703,6 @@ static int32_t tscEstimateQueryMsgSize(SSqlObj *pSql) { } } - SCond* pCond = &pQueryInfo->tagCond.tbnameCond; - if (pCond->len > 0) { - srcColListSize += pCond->len; - } - return MIN_QUERY_MSG_PKT_SIZE + minMsgSize() + sizeof(SQueryTableMsg) + srcColListSize + srcColFilterSize + srcTagFilterSize + exprSize + tsBufSize + tableSerialize + sqlLen + 4096 + pQueryInfo->bufLen; } @@ -723,7 +719,7 @@ static char *doSerializeTableInfo(SQueryTableMsg *pQueryMsg, SSqlObj *pSql, STab int32_t index = pTableMetaInfo->vgroupIndex; assert(index >= 0); - SVgroupInfo* pVgroupInfo = NULL; + SVgroupMsg* pVgroupInfo = NULL; if (pTableMetaInfo->vgroupList && pTableMetaInfo->vgroupList->numOfVgroups > 0) { assert(index < pTableMetaInfo->vgroupList->numOfVgroups); pVgroupInfo = &pTableMetaInfo->vgroupList->vgroups[index]; @@ -861,8 +857,8 @@ static int32_t serializeSqlExpr(SSqlExpr* pExpr, STableMetaInfo* pTableMetaInfo, (*pMsg) += sizeof(SSqlExpr); for (int32_t j = 0; j < pExpr->numOfParams; ++j) { // todo add log - pSqlExpr->param[j].nType = htons((uint16_t)pExpr->param[j].nType); - pSqlExpr->param[j].nLen = htons(pExpr->param[j].nLen); + pSqlExpr->param[j].nType = htonl(pExpr->param[j].nType); + pSqlExpr->param[j].nLen = htonl(pExpr->param[j].nLen); if (pExpr->param[j].nType == TSDB_DATA_TYPE_BINARY) { memcpy((*pMsg), pExpr->param[j].pz, pExpr->param[j].nLen); @@ -880,17 +876,22 @@ static int32_t serializeSqlExpr(SSqlExpr* pExpr, STableMetaInfo* pTableMetaInfo, int tscBuildQueryMsg(SSqlObj *pSql, SSqlInfo *pInfo) { SSqlCmd *pCmd = &pSql->cmd; + SQueryInfo *pQueryInfo = NULL; + STableMeta *pTableMeta = NULL; + STableMetaInfo *pTableMetaInfo = NULL; + int32_t code = TSDB_CODE_SUCCESS; int32_t size = tscEstimateQueryMsgSize(pSql); + assert(size > 0); - if (TSDB_CODE_SUCCESS != tscAllocPayload(pCmd, size)) { + if (TSDB_CODE_SUCCESS != tscAllocPayloadFast(pCmd, size)) { tscError("%p failed to malloc for query msg", pSql); return TSDB_CODE_TSC_INVALID_OPERATION; // todo add test for this } - SQueryInfo *pQueryInfo = tscGetQueryInfo(pCmd); - STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0); - STableMeta * pTableMeta = pTableMetaInfo->pTableMeta; + pQueryInfo = tscGetQueryInfo(pCmd); + pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0); + pTableMeta = pTableMetaInfo->pTableMeta; SQueryAttr query = {{0}}; tscCreateQueryFromQueryInfo(pQueryInfo, &query, pSql); @@ -941,18 +942,15 @@ int tscBuildQueryMsg(SSqlObj *pSql, SSqlInfo *pInfo) { pQueryMsg->pointInterpQuery = query.pointInterpQuery; pQueryMsg->needReverseScan = query.needReverseScan; pQueryMsg->stateWindow = query.stateWindow; - pQueryMsg->numOfTags = htonl(numOfTags); pQueryMsg->sqlstrLen = htonl(sqlLen); pQueryMsg->sw.gap = htobe64(query.sw.gap); pQueryMsg->sw.primaryColId = htonl(PRIMARYKEY_TIMESTAMP_COL_INDEX); pQueryMsg->secondStageOutput = htonl(query.numOfExpr2); - pQueryMsg->numOfOutput = htons((int16_t)query.numOfOutput); // this is the stage one output column number + pQueryMsg->numOfOutput = htons((int16_t)query.numOfOutput); // this is the stage one output column number pQueryMsg->numOfGroupCols = htons(pQueryInfo->groupbyExpr.numOfGroupCols); - pQueryMsg->tagNameRelType = htons(pQueryInfo->tagCond.relType); - pQueryMsg->tbnameCondLen = htonl(pQueryInfo->tagCond.tbnameCond.len); pQueryMsg->queryType = htonl(pQueryInfo->type); pQueryMsg->prevResultLen = htonl(pQueryInfo->bufLen); @@ -968,7 +966,7 @@ int tscBuildQueryMsg(SSqlObj *pSql, SSqlInfo *pInfo) { pQueryMsg->tableCols[i].type = htons(pCol->type); //pQueryMsg->tableCols[i].flist.numOfFilters = htons(pCol->flist.numOfFilters); pQueryMsg->tableCols[i].flist.numOfFilters = 0; - + pQueryMsg->tableCols[i].flist.filterInfo = 0; // append the filter information after the basic column information //serializeColFilterInfo(pCol->flist.filterInfo, pCol->flist.numOfFilters, &pMsg); } @@ -981,6 +979,8 @@ int tscBuildQueryMsg(SSqlObj *pSql, SSqlInfo *pInfo) { pMsg += pCond->len; } + } else { + pQueryMsg->colCondLen = 0; } for (int32_t i = 0; i < query.numOfOutput; ++i) { @@ -1060,6 +1060,8 @@ int tscBuildQueryMsg(SSqlObj *pSql, SSqlInfo *pInfo) { pMsg += pCond->len; } + } else { + pQueryMsg->tagCondLen = 0; } if (pQueryInfo->bufLen > 0) { @@ -1067,12 +1069,6 @@ int tscBuildQueryMsg(SSqlObj *pSql, SSqlInfo *pInfo) { pMsg += pQueryInfo->bufLen; } - SCond* pCond = &pQueryInfo->tagCond.tbnameCond; - if (pCond->len > 0) { - strncpy(pMsg, pCond->cond, pCond->len); - pMsg += pCond->len; - } - // compressed ts block pQueryMsg->tsBuf.tsOffset = htonl((int32_t)(pMsg - pCmd->payload)); @@ -1089,6 +1085,9 @@ int tscBuildQueryMsg(SSqlObj *pSql, SSqlInfo *pInfo) { pQueryMsg->tsBuf.tsOrder = htonl(pQueryInfo->tsBuf->tsOrder); pQueryMsg->tsBuf.tsLen = htonl(pQueryMsg->tsBuf.tsLen); pQueryMsg->tsBuf.tsNumOfBlocks = htonl(pQueryMsg->tsBuf.tsNumOfBlocks); + } else { + pQueryMsg->tsBuf.tsLen = 0; + pQueryMsg->tsBuf.tsNumOfBlocks = 0; } int32_t numOfOperator = (int32_t) taosArrayGetSize(queryOperator); @@ -1126,6 +1125,9 @@ int tscBuildQueryMsg(SSqlObj *pSql, SSqlInfo *pInfo) { pMsg += pUdfInfo->contLen; } + } else { + pQueryMsg->udfContentOffset = 0; + pQueryMsg->udfContentLen = 0; } memcpy(pMsg, pSql->sqlstr, sqlLen); @@ -2136,7 +2138,7 @@ static SVgroupsInfo* createVgroupInfoFromMsg(char* pMsg, int32_t* size, uint64_t *size = (int32_t)(sizeof(SVgroupMsg) * pVgroupMsg->numOfVgroups + sizeof(SVgroupsMsg)); - size_t vgroupsz = sizeof(SVgroupInfo) * pVgroupMsg->numOfVgroups + sizeof(SVgroupsInfo); + size_t vgroupsz = sizeof(SVgroupMsg) * pVgroupMsg->numOfVgroups + sizeof(SVgroupsInfo); SVgroupsInfo *pVgroupInfo = calloc(1, vgroupsz); assert(pVgroupInfo != NULL); @@ -2146,7 +2148,7 @@ static SVgroupsInfo* createVgroupInfoFromMsg(char* pMsg, int32_t* size, uint64_t } else { for (int32_t j = 0; j < pVgroupInfo->numOfVgroups; ++j) { // just init, no need to lock - SVgroupInfo *pVgroup = &pVgroupInfo->vgroups[j]; + SVgroupMsg *pVgroup = &pVgroupInfo->vgroups[j]; SVgroupMsg *vmsg = &pVgroupMsg->vgroups[j]; vmsg->vgId = htonl(vmsg->vgId); @@ -2158,7 +2160,8 @@ static SVgroupsInfo* createVgroupInfoFromMsg(char* pMsg, int32_t* size, uint64_t pVgroup->vgId = vmsg->vgId; for (int32_t k = 0; k < vmsg->numOfEps; ++k) { pVgroup->epAddr[k].port = vmsg->epAddr[k].port; - pVgroup->epAddr[k].fqdn = strndup(vmsg->epAddr[k].fqdn, TSDB_FQDN_LEN); + tstrncpy(pVgroup->epAddr[k].fqdn, vmsg->epAddr[k].fqdn, TSDB_FQDN_LEN); +// pVgroup->epAddr[k].fqdn = strndup(vmsg->epAddr[k].fqdn, TSDB_FQDN_LEN); } doUpdateVgroupInfo(pVgroup->vgId, vmsg); @@ -2608,7 +2611,11 @@ int tscProcessAlterTableMsgRsp(SSqlObj *pSql) { tfree(pTableMetaInfo->pTableMeta); if (isSuperTable) { // if it is a super table, iterate the hashTable and remove all the childTableMeta - taosHashClear(tscTableMetaMap); + if (pSql->res.pRsp == NULL) { + tscDebug("0x%"PRIx64" unexpected resp from mnode, super table: %s failed to update super table meta ", pSql->self, name); + return 0; + } + return tscProcessTableMetaRsp(pSql); } return 0; diff --git a/src/client/src/tscSubquery.c b/src/client/src/tscSubquery.c index edc3dbfc82aa6c6c7dcbb9fa6548c9f49864e324..99a2a79dc60c89530eb9c2c7f6b5645ca0133ba1 100644 --- a/src/client/src/tscSubquery.c +++ b/src/client/src/tscSubquery.c @@ -623,13 +623,12 @@ static int32_t tscLaunchRealSubqueries(SSqlObj* pSql) { int16_t colId = tscGetJoinTagColIdByUid(&pQueryInfo->tagCond, pTableMetaInfo->pTableMeta->id.uid); // set the tag column id for executor to extract correct tag value -#ifndef _TD_NINGSI_60 - pExpr->base.param[0] = (tVariant) {.i64 = colId, .nType = TSDB_DATA_TYPE_BIGINT, .nLen = sizeof(int64_t)}; -#else - pExpr->base.param[0].i64 = colId; - pExpr->base.param[0].nType = TSDB_DATA_TYPE_BIGINT; - pExpr->base.param[0].nLen = sizeof(int64_t); -#endif + tVariant* pVariant = &pExpr->base.param[0]; + + pVariant->i64 = colId; + pVariant->nType = TSDB_DATA_TYPE_BIGINT; + pVariant->nLen = sizeof(int64_t); + pExpr->base.numOfParams = 1; } @@ -748,10 +747,11 @@ void tscBuildVgroupTableInfo(SSqlObj* pSql, STableMetaInfo* pTableMetaInfo, SArr SVgroupTableInfo info = {{0}}; for (int32_t m = 0; m < pvg->numOfVgroups; ++m) { if (tt->vgId == pvg->vgroups[m].vgId) { - tscSVgroupInfoCopy(&info.vgInfo, &pvg->vgroups[m]); + memcpy(&info.vgInfo, &pvg->vgroups[m], sizeof(info.vgInfo)); break; } } + assert(info.vgInfo.numOfEps != 0); vgTables = taosArrayInit(4, sizeof(STableIdInfo)); @@ -1347,7 +1347,11 @@ static void tsCompRetrieveCallback(void* param, TAOS_RES* tres, int32_t numOfRow pCmd->command = TSDB_SQL_SELECT; tscResetForNextRetrieve(&pSql->res); - assert(pSupporter->f == NULL); + if (pSupporter->f != NULL) { + fclose(pSupporter->f); + pSupporter->f = NULL; + } + taosGetTmpfilePath("ts-join", pSupporter->path); // TODO check for failure @@ -2463,7 +2467,7 @@ static void doConcurrentlySendSubQueries(SSqlObj* pSql) { SSubqueryState *pState = &pSql->subState; // concurrently sent the query requests. - const int32_t MAX_REQUEST_PER_TASK = 8; + const int32_t MAX_REQUEST_PER_TASK = 4; int32_t numOfTasks = (pState->numOfSub + MAX_REQUEST_PER_TASK - 1)/MAX_REQUEST_PER_TASK; assert(numOfTasks >= 1); @@ -2550,13 +2554,14 @@ int32_t tscHandleMasterSTableQuery(SSqlObj *pSql) { trs->pExtMemBuffer = pMemoryBuf; trs->pOrderDescriptor = pDesc; - trs->localBuffer = (tFilePage *)calloc(1, nBufferSize + sizeof(tFilePage)); + trs->localBuffer = (tFilePage *)malloc(nBufferSize + sizeof(tFilePage)); if (trs->localBuffer == NULL) { tscError("0x%"PRIx64" failed to malloc buffer for local buffer, orderOfSub:%d, reason:%s", pSql->self, i, strerror(errno)); tfree(trs); break; } - + + trs->localBuffer->num = 0; trs->subqueryIndex = i; trs->pParentSql = pSql; @@ -2651,7 +2656,7 @@ static int32_t tscReissueSubquery(SRetrieveSupport *oriTrs, SSqlObj *pSql, int32 int32_t subqueryIndex = trsupport->subqueryIndex; STableMetaInfo* pTableMetaInfo = tscGetTableMetaInfoFromCmd(&pSql->cmd, 0); - SVgroupInfo* pVgroup = &pTableMetaInfo->vgroupList->vgroups[0]; + SVgroupMsg* pVgroup = &pTableMetaInfo->vgroupList->vgroups[0]; tExtMemBufferClear(trsupport->pExtMemBuffer[subqueryIndex]); @@ -2879,7 +2884,6 @@ static void tscAllDataRetrievedFromDnode(SRetrieveSupport *trsupport, SSqlObj* p pParentSql->res.precision = pSql->res.precision; pParentSql->res.numOfRows = 0; pParentSql->res.row = 0; - pParentSql->res.numOfGroups = 0; tscFreeRetrieveSup(pSql); @@ -2930,7 +2934,7 @@ static void tscRetrieveFromDnodeCallBack(void *param, TAOS_RES *tres, int numOfR SSubqueryState* pState = &pParentSql->subState; STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(&pSql->cmd, 0); - SVgroupInfo *pVgroup = &pTableMetaInfo->vgroupList->vgroups[0]; + SVgroupMsg *pVgroup = &pTableMetaInfo->vgroupList->vgroups[0]; if (pParentSql->res.code != TSDB_CODE_SUCCESS) { trsupport->numOfRetry = MAX_NUM_OF_SUBQUERY_RETRY; @@ -3058,7 +3062,7 @@ void tscRetrieveDataRes(void *param, TAOS_RES *tres, int code) { assert(pQueryInfo->numOfTables == 1); STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(&pSql->cmd, 0); - SVgroupInfo* pVgroup = &pTableMetaInfo->vgroupList->vgroups[trsupport->subqueryIndex]; + SVgroupMsg* pVgroup = &pTableMetaInfo->vgroupList->vgroups[trsupport->subqueryIndex]; // stable query killed or other subquery failed, all query stopped if (pParentSql->res.code != TSDB_CODE_SUCCESS) { @@ -3404,7 +3408,6 @@ static void doBuildResFromSubqueries(SSqlObj* pSql) { return; } -// tscRestoreFuncForSTableQuery(pQueryInfo); int32_t rowSize = tscGetResRowLength(pQueryInfo->exprList); assert(numOfRes * rowSize > 0); diff --git a/src/client/src/tscSystem.c b/src/client/src/tscSystem.c index 8af340030cccee1431a82eb88344642011f2e019..b3b83db80a70c19f79d1cd6a732d729817436dd3 100644 --- a/src/client/src/tscSystem.c +++ b/src/client/src/tscSystem.c @@ -50,6 +50,7 @@ int tscLogFileNum = 10; static pthread_mutex_t rpcObjMutex; // mutex to protect open the rpc obj concurrently static pthread_once_t tscinit = PTHREAD_ONCE_INIT; +static pthread_mutex_t setConfMutex = PTHREAD_MUTEX_INITIALIZER; // pthread_once can not return result code, so result code is set to a global variable. static volatile int tscInitRes = 0; @@ -249,6 +250,7 @@ void taos_cleanup(void) { pthread_mutex_destroy(&rpcObjMutex); } + pthread_mutex_destroy(&setConfMutex); taosCacheCleanup(tscVgroupListBuf); tscVgroupListBuf = NULL; @@ -437,3 +439,66 @@ int taos_options(TSDB_OPTION option, const void *arg, ...) { atomic_store_32(&lock, 0); return ret; } + +#include "cJSON.h" +static setConfRet taos_set_config_imp(const char *config){ + setConfRet ret = {SET_CONF_RET_SUCC, {0}}; + static bool setConfFlag = false; + if (setConfFlag) { + ret.retCode = SET_CONF_RET_ERR_ONLY_ONCE; + strcpy(ret.retMsg, "configuration can only set once"); + return ret; + } + taosInitGlobalCfg(); + cJSON *root = cJSON_Parse(config); + if (root == NULL){ + ret.retCode = SET_CONF_RET_ERR_JSON_PARSE; + strcpy(ret.retMsg, "parse json error"); + return ret; + } + + int size = cJSON_GetArraySize(root); + if(!cJSON_IsObject(root) || size == 0) { + ret.retCode = SET_CONF_RET_ERR_JSON_INVALID; + strcpy(ret.retMsg, "json content is invalid, must be not empty object"); + return ret; + } + + if(size >= 1000) { + ret.retCode = SET_CONF_RET_ERR_TOO_LONG; + strcpy(ret.retMsg, "json object size is too long"); + return ret; + } + + for(int i = 0; i < size; i++){ + cJSON *item = cJSON_GetArrayItem(root, i); + if(!item) { + ret.retCode = SET_CONF_RET_ERR_INNER; + strcpy(ret.retMsg, "inner error"); + return ret; + } + if(!taosReadConfigOption(item->string, item->valuestring, NULL, NULL, TAOS_CFG_CSTATUS_OPTION, TSDB_CFG_CTYPE_B_CLIENT)){ + ret.retCode = SET_CONF_RET_ERR_PART; + if (strlen(ret.retMsg) == 0){ + snprintf(ret.retMsg, RET_MSG_LENGTH, "part error|%s", item->string); + }else{ + int tmp = RET_MSG_LENGTH - 1 - (int)strlen(ret.retMsg); + size_t leftSize = tmp >= 0 ? tmp : 0; + strncat(ret.retMsg, "|", leftSize); + tmp = RET_MSG_LENGTH - 1 - (int)strlen(ret.retMsg); + leftSize = tmp >= 0 ? tmp : 0; + strncat(ret.retMsg, item->string, leftSize); + } + } + } + cJSON_Delete(root); + setConfFlag = true; + return ret; +} + +setConfRet taos_set_config(const char *config){ + pthread_mutex_lock(&setConfMutex); + setConfRet ret = taos_set_config_imp(config); + pthread_mutex_unlock(&setConfMutex); + return ret; +} diff --git a/src/client/src/tscUtil.c b/src/client/src/tscUtil.c index 8a1394cdbe74e4f90520f349dc012dd23720b3bc..be61434dacd6660f3685303895109f40a3d66983 100644 --- a/src/client/src/tscUtil.c +++ b/src/client/src/tscUtil.c @@ -270,7 +270,10 @@ bool tscIsProjectionQueryOnSTable(SQueryInfo* pQueryInfo, int32_t tableIndex) { functionId != TSDB_FUNC_DIFF && functionId != TSDB_FUNC_DERIVATIVE && functionId != TSDB_FUNC_TS_DUMMY && - functionId != TSDB_FUNC_TID_TAG) { + functionId != TSDB_FUNC_TID_TAG && + functionId != TSDB_FUNC_CEIL && + functionId != TSDB_FUNC_FLOOR && + functionId != TSDB_FUNC_ROUND) { return false; } } @@ -812,7 +815,7 @@ typedef struct SDummyInputInfo { SSDataBlock *block; STableQueryInfo *pTableQueryInfo; SSqlObj *pSql; // refactor: remove it - SFilterInfo *pFilterInfo; + void *pFilterInfo; } SDummyInputInfo; typedef struct SJoinStatus { @@ -828,7 +831,7 @@ typedef struct SJoinOperatorInfo { SRspResultInfo resultInfo; // todo refactor, add this info for each operator } SJoinOperatorInfo; -static void doSetupSDataBlock(SSqlRes* pRes, SSDataBlock* pBlock, SFilterInfo* pFilterInfo) { +static void doSetupSDataBlock(SSqlRes* pRes, SSDataBlock* pBlock, void* pFilterInfo) { int32_t offset = 0; char* pData = pRes->data; @@ -845,8 +848,9 @@ static void doSetupSDataBlock(SSqlRes* pRes, SSDataBlock* pBlock, SFilterInfo* p // filter data if needed if (pFilterInfo) { - //doSetFilterColumnInfo(pFilterInfo, numOfFilterCols, pBlock); - filterSetColFieldData(pFilterInfo, pBlock->info.numOfCols, pBlock->pDataBlock); + SColumnDataParam param = {.numOfCols = pBlock->info.numOfCols, .pDataBlock = pBlock->pDataBlock}; + filterSetColFieldData(pFilterInfo, ¶m, getColumnDataFromId); + bool gotNchar = false; filterConverNcharColumns(pFilterInfo, pBlock->info.rows, &gotNchar); int8_t* p = NULL; @@ -1109,7 +1113,7 @@ static void destroyDummyInputOperator(void* param, int32_t numOfOutput) { } // todo this operator servers as the adapter for Operator tree and SqlRes result, remove it later -SOperatorInfo* createDummyInputOperator(SSqlObj* pSql, SSchema* pSchema, int32_t numOfCols, SFilterInfo* pFilters) { +SOperatorInfo* createDummyInputOperator(SSqlObj* pSql, SSchema* pSchema, int32_t numOfCols, void* pFilters) { assert(numOfCols > 0); STimeWindow win = {.skey = INT64_MIN, .ekey = INT64_MAX}; @@ -1251,7 +1255,7 @@ void handleDownstreamOperator(SSqlObj** pSqlObjList, int32_t numOfUpstream, SQue // if it is a join query, create join operator here int32_t numOfCol1 = pTableMeta->tableInfo.numOfColumns; - SFilterInfo *pFilters = NULL; + void *pFilters = NULL; STblCond *pCond = NULL; if (px->colCond) { pCond = tsGetTableFilter(px->colCond, pTableMeta->id.uid, 0); @@ -1278,7 +1282,7 @@ void handleDownstreamOperator(SSqlObj** pSqlObjList, int32_t numOfUpstream, SQue for(int32_t i = 1; i < px->numOfTables; ++i) { STableMeta* pTableMeta1 = tscGetMetaInfo(px, i)->pTableMeta; numOfCol1 = pTableMeta1->tableInfo.numOfColumns; - SFilterInfo *pFilters1 = NULL; + void *pFilters1 = NULL; SSchema* pSchema1 = tscGetTableSchema(pTableMeta1); int32_t n = pTableMeta1->tableInfo.numOfColumns; @@ -1348,14 +1352,7 @@ static void tscDestroyResPointerInfo(SSqlRes* pRes) { tfree(pRes->buffer); tfree(pRes->urow); - tfree(pRes->pGroupRec); tfree(pRes->pColumnIndex); - - if (pRes->pArithSup != NULL) { - tfree(pRes->pArithSup->data); - tfree(pRes->pArithSup); - } - tfree(pRes->final); pRes->data = NULL; // pRes->data points to the buffer of pRsp, no need to free @@ -1473,7 +1470,12 @@ void tscFreeSubobj(SSqlObj* pSql) { tscDebug("0x%"PRIx64" start to free sub SqlObj, numOfSub:%d", pSql->self, pSql->subState.numOfSub); for(int32_t i = 0; i < pSql->subState.numOfSub; ++i) { - tscDebug("0x%"PRIx64" free sub SqlObj:0x%"PRIx64", index:%d", pSql->self, pSql->pSubs[i]->self, i); + if (pSql->pSubs[i] != NULL) { + tscDebug("0x%"PRIx64" free sub SqlObj:0x%"PRIx64", index:%d", pSql->self, pSql->pSubs[i]->self, i); + } else { + /* just for python error test case */ + tscDebug("0x%"PRIx64" free sub SqlObj:0x0, index:%d", pSql->self, i); + } taos_free_result(pSql->pSubs[i]); pSql->pSubs[i] = NULL; } @@ -2088,32 +2090,35 @@ bool tscIsInsertData(char* sqlstr) { } while (1); } -int tscAllocPayload(SSqlCmd* pCmd, int size) { +int32_t tscAllocPayloadFast(SSqlCmd *pCmd, size_t size) { if (pCmd->payload == NULL) { assert(pCmd->allocSize == 0); - pCmd->payload = (char*)calloc(1, size); - if (pCmd->payload == NULL) { + pCmd->payload = malloc(size); + pCmd->allocSize = (uint32_t) size; + } else if (pCmd->allocSize < size) { + char* tmp = realloc(pCmd->payload, size); + if (tmp == NULL) { return TSDB_CODE_TSC_OUT_OF_MEMORY; } - pCmd->allocSize = size; - } else { - if (pCmd->allocSize < (uint32_t)size) { - char* b = realloc(pCmd->payload, size); - if (b == NULL) { - return TSDB_CODE_TSC_OUT_OF_MEMORY; - } + pCmd->payload = tmp; + pCmd->allocSize = (uint32_t) size; + } - pCmd->payload = b; - pCmd->allocSize = size; - } + assert(pCmd->allocSize >= size); + return TSDB_CODE_SUCCESS; +} +int32_t tscAllocPayload(SSqlCmd* pCmd, int size) { + assert(size > 0); + + int32_t code = tscAllocPayloadFast(pCmd, (size_t) size); + if (code == TSDB_CODE_SUCCESS) { memset(pCmd->payload, 0, pCmd->allocSize); } - assert(pCmd->allocSize >= (uint32_t)size && size > 0); - return TSDB_CODE_SUCCESS; + return code; } TAOS_FIELD tscCreateField(int8_t type, const char* name, int16_t bytes) { @@ -2903,16 +2908,6 @@ bool tscValidateColumnId(STableMetaInfo* pTableMetaInfo, int32_t colId, int32_t int32_t tscTagCondCopy(STagCond* dest, const STagCond* src) { memset(dest, 0, sizeof(STagCond)); - if (src->tbnameCond.cond != NULL) { - dest->tbnameCond.cond = strdup(src->tbnameCond.cond); - if (dest->tbnameCond.cond == NULL) { - return -1; - } - } - - dest->tbnameCond.uid = src->tbnameCond.uid; - dest->tbnameCond.len = src->tbnameCond.len; - dest->joinInfo.hasJoin = src->joinInfo.hasJoin; for (int32_t i = 0; i < TSDB_MAX_JOIN_TABLE_NUM; ++i) { @@ -2931,9 +2926,6 @@ int32_t tscTagCondCopy(STagCond* dest, const STagCond* src) { } } - - dest->relType = src->relType; - if (src->pCond == NULL) { return 0; } @@ -3023,8 +3015,6 @@ void tscColCondRelease(SArray** pCond) { void tscTagCondRelease(STagCond* pTagCond) { - free(pTagCond->tbnameCond.cond); - if (pTagCond->pCond != NULL) { size_t s = taosArrayGetSize(pTagCond->pCond); for (int32_t i = 0; i < s; ++i) { @@ -3370,11 +3360,11 @@ void tscFreeVgroupTableInfo(SArray* pVgroupTables) { size_t num = taosArrayGetSize(pVgroupTables); for (size_t i = 0; i < num; i++) { SVgroupTableInfo* pInfo = taosArrayGet(pVgroupTables, i); - +#if 0 for(int32_t j = 0; j < pInfo->vgInfo.numOfEps; ++j) { tfree(pInfo->vgInfo.epAddr[j].fqdn); } - +#endif taosArrayDestroy(pInfo->itemList); } @@ -3388,9 +3378,9 @@ void tscRemoveVgroupTableGroup(SArray* pVgroupTable, int32_t index) { assert(size > index); SVgroupTableInfo* pInfo = taosArrayGet(pVgroupTable, index); - for(int32_t j = 0; j < pInfo->vgInfo.numOfEps; ++j) { - tfree(pInfo->vgInfo.epAddr[j].fqdn); - } +// for(int32_t j = 0; j < pInfo->vgInfo.numOfEps; ++j) { +// tfree(pInfo->vgInfo.epAddr[j].fqdn); +// } taosArrayDestroy(pInfo->itemList); taosArrayRemove(pVgroupTable, index); @@ -3400,9 +3390,12 @@ void tscVgroupTableCopy(SVgroupTableInfo* info, SVgroupTableInfo* pInfo) { memset(info, 0, sizeof(SVgroupTableInfo)); info->vgInfo = pInfo->vgInfo; + +#if 0 for(int32_t j = 0; j < pInfo->vgInfo.numOfEps; ++j) { info->vgInfo.epAddr[j].fqdn = strdup(pInfo->vgInfo.epAddr[j].fqdn); } +#endif if (pInfo->itemList) { info->itemList = taosArrayDup(pInfo->itemList); @@ -3465,13 +3458,9 @@ STableMetaInfo* tscAddTableMetaInfo(SQueryInfo* pQueryInfo, SName* name, STableM } pTableMetaInfo->pTableMeta = pTableMeta; - if (pTableMetaInfo->pTableMeta == NULL) { - pTableMetaInfo->tableMetaSize = 0; - } else { - pTableMetaInfo->tableMetaSize = tscGetTableMetaSize(pTableMeta); - } + pTableMetaInfo->tableMetaSize = (pTableMetaInfo->pTableMeta == NULL)? 0:tscGetTableMetaSize(pTableMeta); + pTableMetaInfo->tableMetaCapacity = (size_t)(pTableMetaInfo->tableMetaSize); - if (vgroupList != NULL) { pTableMetaInfo->vgroupList = tscVgroupInfoClone(vgroupList); @@ -3719,8 +3708,8 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, __async_cb_func_t terrno = TSDB_CODE_TSC_OUT_OF_MEMORY; goto _error; } - pNewQueryInfo->numOfFillVal = pQueryInfo->fieldsInfo.numOfOutput; + pNewQueryInfo->numOfFillVal = pQueryInfo->fieldsInfo.numOfOutput; memcpy(pNewQueryInfo->fillVal, pQueryInfo->fillVal, pQueryInfo->fieldsInfo.numOfOutput * sizeof(int64_t)); } @@ -3761,7 +3750,6 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, __async_cb_func_t pFinalInfo = tscAddTableMetaInfo(pNewQueryInfo, &pTableMetaInfo->name, pTableMeta, pTableMetaInfo->vgroupList, pTableMetaInfo->tagColList, pTableMetaInfo->pVgroupTables); - } else { // transfer the ownership of pTableMeta to the newly create sql object. STableMetaInfo* pPrevInfo = tscGetTableMetaInfoFromCmd(&pPrevSql->cmd, 0); if (pPrevInfo->pTableMeta && pPrevInfo->pTableMeta->tableType < 0) { @@ -3771,8 +3759,8 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, __async_cb_func_t STableMeta* pPrevTableMeta = tscTableMetaDup(pPrevInfo->pTableMeta); SVgroupsInfo* pVgroupsInfo = pPrevInfo->vgroupList; - pFinalInfo = tscAddTableMetaInfo(pNewQueryInfo, &pTableMetaInfo->name, pPrevTableMeta, pVgroupsInfo, pTableMetaInfo->tagColList, - pTableMetaInfo->pVgroupTables); + pFinalInfo = tscAddTableMetaInfo(pNewQueryInfo, &pTableMetaInfo->name, pPrevTableMeta, pVgroupsInfo, + pTableMetaInfo->tagColList, pTableMetaInfo->pVgroupTables); } // this case cannot be happened @@ -4416,8 +4404,8 @@ SVgroupsInfo* tscVgroupInfoClone(SVgroupsInfo *vgroupList) { return NULL; } - size_t size = sizeof(SVgroupsInfo) + sizeof(SVgroupInfo) * vgroupList->numOfVgroups; - SVgroupsInfo* pNew = calloc(1, size); + size_t size = sizeof(SVgroupsInfo) + sizeof(SVgroupMsg) * vgroupList->numOfVgroups; + SVgroupsInfo* pNew = malloc(size); if (pNew == NULL) { return NULL; } @@ -4425,15 +4413,15 @@ SVgroupsInfo* tscVgroupInfoClone(SVgroupsInfo *vgroupList) { pNew->numOfVgroups = vgroupList->numOfVgroups; for(int32_t i = 0; i < vgroupList->numOfVgroups; ++i) { - SVgroupInfo* pNewVInfo = &pNew->vgroups[i]; + SVgroupMsg* pNewVInfo = &pNew->vgroups[i]; - SVgroupInfo* pvInfo = &vgroupList->vgroups[i]; + SVgroupMsg* pvInfo = &vgroupList->vgroups[i]; pNewVInfo->vgId = pvInfo->vgId; pNewVInfo->numOfEps = pvInfo->numOfEps; for(int32_t j = 0; j < pvInfo->numOfEps; ++j) { - pNewVInfo->epAddr[j].fqdn = strdup(pvInfo->epAddr[j].fqdn); pNewVInfo->epAddr[j].port = pvInfo->epAddr[j].port; + tstrncpy(pNewVInfo->epAddr[j].fqdn, pvInfo->epAddr[j].fqdn, TSDB_FQDN_LEN); } } @@ -4445,8 +4433,9 @@ void* tscVgroupInfoClear(SVgroupsInfo *vgroupList) { return NULL; } +#if 0 for(int32_t i = 0; i < vgroupList->numOfVgroups; ++i) { - SVgroupInfo* pVgroupInfo = &vgroupList->vgroups[i]; + SVgroupMsg* pVgroupInfo = &vgroupList->vgroups[i]; for(int32_t j = 0; j < pVgroupInfo->numOfEps; ++j) { tfree(pVgroupInfo->epAddr[j].fqdn); @@ -4457,10 +4446,11 @@ void* tscVgroupInfoClear(SVgroupsInfo *vgroupList) { } } +#endif tfree(vgroupList); return NULL; } - +# if 0 void tscSVgroupInfoCopy(SVgroupInfo* dst, const SVgroupInfo* src) { dst->vgId = src->vgId; dst->numOfEps = src->numOfEps; @@ -4473,6 +4463,8 @@ void tscSVgroupInfoCopy(SVgroupInfo* dst, const SVgroupInfo* src) { } } +#endif + char* serializeTagData(STagData* pTagData, char* pMsg) { int32_t n = (int32_t) strlen(pTagData->name); *(int32_t*) pMsg = htonl(n); @@ -4613,11 +4605,12 @@ STableMeta* tscTableMetaDup(STableMeta* pTableMeta) { SVgroupsInfo* tscVgroupsInfoDup(SVgroupsInfo* pVgroupsInfo) { assert(pVgroupsInfo != NULL); - size_t size = sizeof(SVgroupInfo) * pVgroupsInfo->numOfVgroups + sizeof(SVgroupsInfo); + size_t size = sizeof(SVgroupMsg) * pVgroupsInfo->numOfVgroups + sizeof(SVgroupsInfo); SVgroupsInfo* pInfo = calloc(1, size); pInfo->numOfVgroups = pVgroupsInfo->numOfVgroups; for (int32_t m = 0; m < pVgroupsInfo->numOfVgroups; ++m) { - tscSVgroupInfoCopy(&pInfo->vgroups[m], &pVgroupsInfo->vgroups[m]); + memcpy(&pInfo->vgroups[m], &pVgroupsInfo->vgroups[m], sizeof(SVgroupMsg)); +// tscSVgroupInfoCopy(&pInfo->vgroups[m], &pVgroupsInfo->vgroups[m]); } return pInfo; } diff --git a/src/client/tests/CMakeLists.txt b/src/client/tests/CMakeLists.txt index 24bfb44ac90e11e01ba99423aa68bd5a9511f746..5de18942acbb5b3ac59d2496728c500b63246fe9 100644 --- a/src/client/tests/CMakeLists.txt +++ b/src/client/tests/CMakeLists.txt @@ -17,5 +17,5 @@ IF (HEADER_GTEST_INCLUDE_DIR AND (LIB_GTEST_STATIC_DIR OR LIB_GTEST_SHARED_DIR)) AUX_SOURCE_DIRECTORY(${CMAKE_CURRENT_SOURCE_DIR} SOURCE_LIST) ADD_EXECUTABLE(cliTest ${SOURCE_LIST}) - TARGET_LINK_LIBRARIES(cliTest taos tutil common gtest pthread) + TARGET_LINK_LIBRARIES(cliTest taos cJson tutil common gtest pthread) ENDIF() diff --git a/src/client/tests/setConfigTest.cpp b/src/client/tests/setConfigTest.cpp new file mode 100644 index 0000000000000000000000000000000000000000..fb016715f6ad2f5311aa2d81b608c2043f86c4f0 --- /dev/null +++ b/src/client/tests/setConfigTest.cpp @@ -0,0 +1,71 @@ +#include +#include + +#include "taos.h" +#include "tglobal.h" +#include "tconfig.h" + +/* test set config function */ +TEST(testCase, set_config_test1) { + const char *config = "{\"debugFlag\":\"131\"}"; + setConfRet ret = taos_set_config(config); + ASSERT_EQ(ret.retCode, 0); + printf("msg:%d->%s", ret.retCode, ret.retMsg); + + const char *config2 = "{\"debugFlag\":\"199\"}"; + ret = taos_set_config(config2); // not take effect + ASSERT_EQ(ret.retCode, -5); + printf("msg:%d->%s", ret.retCode, ret.retMsg); + + bool readResult = taosReadGlobalCfg(); // load file config, debugFlag not take effect + ASSERT_TRUE(readResult); + int32_t checkResult = taosCheckGlobalCfg(); + ASSERT_EQ(checkResult, 0); + + SGlobalCfg *cfg = taosGetConfigOption("debugFlag"); + ASSERT_EQ(cfg->cfgStatus, TAOS_CFG_CSTATUS_OPTION); + int32_t result = *(int32_t *)cfg->ptr; + ASSERT_EQ(result, 131); +} + +TEST(testCase, set_config_test2) { + const char *config = "{\"numOfCommitThreads\":\"10\"}"; + taos_set_config(config); + + bool readResult = taosReadGlobalCfg(); // load file config, debugFlag not take effect + ASSERT_TRUE(readResult); + int32_t checkResult = taosCheckGlobalCfg(); + ASSERT_EQ(checkResult, 0); + + SGlobalCfg *cfg = taosGetConfigOption("numOfCommitThreads"); + int32_t result = *(int32_t*)cfg->ptr; + ASSERT_NE(result, 10); // numOfCommitThreads not type of TSDB_CFG_CTYPE_B_CLIENT +} + +TEST(testCase, set_config_test3) { + const char *config = "{\"numOfCoitThreads\":\"10\", \"esdfa\":\"10\"}"; + setConfRet ret = taos_set_config(config); + ASSERT_EQ(ret.retCode, -1); + printf("msg:%d->%s", ret.retCode, ret.retMsg); +} + +TEST(testCase, set_config_test4) { + const char *config = "{null}"; + setConfRet ret = taos_set_config(config); + ASSERT_EQ(ret.retCode, -4); + printf("msg:%d->%s", ret.retCode, ret.retMsg); +} + +TEST(testCase, set_config_test5) { + const char *config = "\"ddd\""; + setConfRet ret = taos_set_config(config); + ASSERT_EQ(ret.retCode, -3); + printf("msg:%d->%s", ret.retCode, ret.retMsg); +} + +TEST(testCase, set_config_test6) { + const char *config = "{\"numOfCoitThreadsnumOfCoitThreadsnumOfCoitThreadsnumOfCoitThreadsnumOfCoitThreadsnumOfCoitThreadsnumOfCoitThreadsnumOfCoitThreadsnumOfCoitThreadsnumOfCoitThreadsnumOfCoitThreadsnumOfCoitThreadsnumOfCoitThreadsnumOfCoitThreadsnumOfCoitThreadsnumOfCoitThreadsnumOfCoitThreadsnumOfCoitThreadsnumOfCoitThreadsnumOfCoitThreadsnumOfCoitThreadsnumOfCoitThreadsnumOfCoitThreadsnumOfCoitThreadsnumOfCoitThreadsnumOfCoitThreadsnumOfCoitThreadsnumOfCoitThreadsnumOfCoitThreadsnumOfCoitThreadsnumOfCoitThreadsnumOfCoitThreadsnumOfCoitThreadsnumOfCoitThreadsnumOfCoitThreadsnumOfCoitThreadsnumOfCoitThreadsnumOfCoitThreadsnumOfCoitThreadsnumOfCoitThreadsnumOfCoitThreadsnumOfCoitThreadsnumOfCoitThreadsnumOfCoitThreadsnumOfCoitThreadsnumOfCoitThreadsnumOfCoitThreadsnumOfCoitThreadsnumOfCoitThreadsnumOfCoitThreadsnumOfCoitThreadsnumOfCoitThreadsnumOfCoitThreadsnumOfCoitThreadsnumOfCoitThreadsnumOfCoitThreadsnumOfCoitThreadsnumOfCoitThreadsnumOfCoitThreadsnumOfCoitThreadsnumOfCoitThreadsnumOfCoitThreadsnumOfCoitThreadsnumOfCoitT3333dd\":\"10\", \"esdfa\":\"10\"}"; + setConfRet ret = taos_set_config(config); + ASSERT_EQ(ret.retCode, -1); + printf("msg:%d->%s", ret.retCode, ret.retMsg); +} diff --git a/src/common/inc/texpr.h b/src/common/inc/texpr.h index db71559df6334ed935a44f3822f78ff671e8dab2..bfeb3a6dfeee22f793c82748611c28ec537e8825 100644 --- a/src/common/inc/texpr.h +++ b/src/common/inc/texpr.h @@ -34,10 +34,12 @@ struct SSchema; #define QUERY_COND_REL_PREFIX_IN "IN|" #define QUERY_COND_REL_PREFIX_LIKE "LIKE|" #define QUERY_COND_REL_PREFIX_MATCH "MATCH|" +#define QUERY_COND_REL_PREFIX_NMATCH "NMATCH|" #define QUERY_COND_REL_PREFIX_IN_LEN 3 #define QUERY_COND_REL_PREFIX_LIKE_LEN 5 #define QUERY_COND_REL_PREFIX_MATCH_LEN 6 +#define QUERY_COND_REL_PREFIX_NMATCH_LEN 7 typedef bool (*__result_filter_fn_t)(const void *, void *); typedef void (*__do_filter_suppl_fn_t)(void *, void *); @@ -86,7 +88,6 @@ void tExprTreeDestroy(tExprNode *pNode, void (*fp)(void *)); void exprTreeToBinary(SBufferWriter* bw, tExprNode* pExprTree); tExprNode* exprTreeFromBinary(const void* data, size_t size); -tExprNode* exprTreeFromTableName(const char* tbnameCond); tExprNode* exprdup(tExprNode* pTree); void exprTreeToBinary(SBufferWriter* bw, tExprNode* pExprTree); diff --git a/src/common/inc/tglobal.h b/src/common/inc/tglobal.h index 360a83eea8df9392b059e73ac59075b27a96f7c3..604ce89432bcf662b319fb2ec11f55026450a2be 100644 --- a/src/common/inc/tglobal.h +++ b/src/common/inc/tglobal.h @@ -224,6 +224,8 @@ extern uint32_t maxRange; extern uint32_t curRange; extern char Compressor[]; #endif +// long query +extern int8_t tsDeadLockKillQuery; typedef struct { char dir[TSDB_FILENAME_LEN]; diff --git a/src/common/src/texpr.c b/src/common/src/texpr.c index 2c72b7bd591ab4cb2d11d1420ae97e7cc2123272..cc2bb8803badc2aae2e80200691be0439bac3afe 100644 --- a/src/common/src/texpr.c +++ b/src/common/src/texpr.c @@ -325,14 +325,6 @@ static void* exception_calloc(size_t nmemb, size_t size) { return p; } -static void* exception_malloc(size_t size) { - void* p = malloc(size); - if (p == NULL) { - THROW(TSDB_CODE_QRY_OUT_OF_MEMORY); - } - return p; -} - static UNUSED_FUNC char* exception_strdup(const char* str) { char* p = strdup(str); if (p == NULL) { @@ -395,88 +387,6 @@ tExprNode* exprTreeFromBinary(const void* data, size_t size) { return exprTreeFromBinaryImpl(&br); } -tExprNode* exprTreeFromTableName(const char* tbnameCond) { - if (!tbnameCond) { - return NULL; - } - - int32_t anchor = CLEANUP_GET_ANCHOR(); - - tExprNode* expr = exception_calloc(1, sizeof(tExprNode)); - CLEANUP_PUSH_VOID_PTR_PTR(true, tExprTreeDestroy, expr, NULL); - - expr->nodeType = TSQL_NODE_EXPR; - - tExprNode* left = exception_calloc(1, sizeof(tExprNode)); - expr->_node.pLeft = left; - - left->nodeType = TSQL_NODE_COL; - SSchema* pSchema = exception_calloc(1, sizeof(SSchema)); - left->pSchema = pSchema; - - *pSchema = *tGetTbnameColumnSchema(); - - tExprNode* right = exception_calloc(1, sizeof(tExprNode)); - expr->_node.pRight = right; - - if (strncmp(tbnameCond, QUERY_COND_REL_PREFIX_LIKE, QUERY_COND_REL_PREFIX_LIKE_LEN) == 0) { - right->nodeType = TSQL_NODE_VALUE; - expr->_node.optr = TSDB_RELATION_LIKE; - tVariant* pVal = exception_calloc(1, sizeof(tVariant)); - right->pVal = pVal; - size_t len = strlen(tbnameCond + QUERY_COND_REL_PREFIX_LIKE_LEN) + 1; - pVal->pz = exception_malloc(len); - memcpy(pVal->pz, tbnameCond + QUERY_COND_REL_PREFIX_LIKE_LEN, len); - pVal->nType = TSDB_DATA_TYPE_BINARY; - pVal->nLen = (int32_t)len; - - } else if (strncmp(tbnameCond, QUERY_COND_REL_PREFIX_MATCH, QUERY_COND_REL_PREFIX_MATCH_LEN) == 0) { - right->nodeType = TSQL_NODE_VALUE; - expr->_node.optr = TSDB_RELATION_MATCH; - tVariant* pVal = exception_calloc(1, sizeof(tVariant)); - right->pVal = pVal; - size_t len = strlen(tbnameCond + QUERY_COND_REL_PREFIX_MATCH_LEN) + 1; - pVal->pz = exception_malloc(len); - memcpy(pVal->pz, tbnameCond + QUERY_COND_REL_PREFIX_MATCH_LEN, len); - pVal->nType = TSDB_DATA_TYPE_BINARY; - pVal->nLen = (int32_t)len; - - } else if (strncmp(tbnameCond, QUERY_COND_REL_PREFIX_IN, QUERY_COND_REL_PREFIX_IN_LEN) == 0) { - right->nodeType = TSQL_NODE_VALUE; - expr->_node.optr = TSDB_RELATION_IN; - tVariant* pVal = exception_calloc(1, sizeof(tVariant)); - right->pVal = pVal; - pVal->nType = TSDB_DATA_TYPE_POINTER_ARRAY; - pVal->arr = taosArrayInit(2, POINTER_BYTES); - - const char* cond = tbnameCond + QUERY_COND_REL_PREFIX_IN_LEN; - for (const char *e = cond; *e != 0; e++) { - if (*e == TS_PATH_DELIMITER[0]) { - cond = e + 1; - } else if (*e == ',') { - size_t len = e - cond; - char* p = exception_malloc(len + VARSTR_HEADER_SIZE); - STR_WITH_SIZE_TO_VARSTR(p, cond, (VarDataLenT)len); - cond += len; - taosArrayPush(pVal->arr, &p); - } - } - - if (*cond != 0) { - size_t len = strlen(cond) + VARSTR_HEADER_SIZE; - - char* p = exception_malloc(len); - STR_WITH_SIZE_TO_VARSTR(p, cond, (VarDataLenT)(len - VARSTR_HEADER_SIZE)); - taosArrayPush(pVal->arr, &p); - } - - taosArraySortString(pVal->arr, taosArrayCompareString); - } - - CLEANUP_EXECUTE_TO(anchor, false); - return expr; -} - void buildFilterSetFromBinary(void **q, const char *buf, int32_t len) { SBufferReader br = tbufInitReader(buf, len, false); uint32_t type = tbufReadUint32(&br); diff --git a/src/common/src/tglobal.c b/src/common/src/tglobal.c index 6e73227233591fa076893174b65a774f229ca4e5..339fa35bb3009db96c9c6e0cabea6b60881f05c5 100644 --- a/src/common/src/tglobal.c +++ b/src/common/src/tglobal.c @@ -279,6 +279,9 @@ uint32_t curRange = 100; // range char Compressor[32] = "ZSTD_COMPRESSOR"; // ZSTD_COMPRESSOR or GZIP_COMPRESSOR #endif +// long query death-lock +int8_t tsDeadLockKillQuery = 0; + int32_t (*monStartSystemFp)() = NULL; void (*monStopSystemFp)() = NULL; void (*monExecuteSQLFp)(char *sql) = NULL; @@ -1036,6 +1039,16 @@ static void doInitGlobalConfig(void) { cfg.unitType = TAOS_CFG_UTYPE_BYTE; taosInitConfigOption(cfg); + cfg.option = "maxRegexStringLen"; + cfg.ptr = &tsMaxRegexStringLen; + cfg.valType = TAOS_CFG_VTYPE_INT32; + cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_CLIENT | TSDB_CFG_CTYPE_B_SHOW; + cfg.minValue = 0; + cfg.maxValue = TSDB_MAX_FIELD_LEN; + cfg.ptrLength = 0; + cfg.unitType = TAOS_CFG_UTYPE_BYTE; + taosInitConfigOption(cfg); + cfg.option = "maxNumOfOrderedRes"; cfg.ptr = &tsMaxNumOfOrderedResults; cfg.valType = TAOS_CFG_VTYPE_INT32; @@ -1613,7 +1626,17 @@ static void doInitGlobalConfig(void) { cfg.unitType = TAOS_CFG_UTYPE_NONE; taosInitConfigOption(cfg); - assert(tsGlobalConfigNum <= TSDB_CFG_MAX_NUM); + // enable kill long query + cfg.option = "deadLockKillQuery"; + cfg.ptr = &tsDeadLockKillQuery; + cfg.valType = TAOS_CFG_VTYPE_INT8; + cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW; + cfg.minValue = 0; + cfg.maxValue = 1; + cfg.ptrLength = 1; + cfg.unitType = TAOS_CFG_UTYPE_NONE; + taosInitConfigOption(cfg); + #ifdef TD_TSZ // lossy compress cfg.option = "lossyColumns"; @@ -1667,6 +1690,9 @@ static void doInitGlobalConfig(void) { cfg.ptrLength = 0; cfg.unitType = TAOS_CFG_UTYPE_NONE; taosInitConfigOption(cfg); + assert(tsGlobalConfigNum == TSDB_CFG_MAX_NUM); +#else + assert(tsGlobalConfigNum == TSDB_CFG_MAX_NUM - 5); #endif } diff --git a/src/connector/jdbc/CMakeLists.txt b/src/connector/jdbc/CMakeLists.txt index e432dac1cea593b371a173f334e5313236091ab3..1e5cede714820f29defe3c6b458b2daf467bc4d2 100644 --- a/src/connector/jdbc/CMakeLists.txt +++ b/src/connector/jdbc/CMakeLists.txt @@ -8,7 +8,7 @@ IF (TD_MVN_INSTALLED) ADD_CUSTOM_COMMAND(OUTPUT ${JDBC_CMD_NAME} POST_BUILD COMMAND mvn -Dmaven.test.skip=true install -f ${CMAKE_CURRENT_SOURCE_DIR}/pom.xml - COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_CURRENT_SOURCE_DIR}/target/taos-jdbcdriver-2.0.34-dist.jar ${LIBRARY_OUTPUT_PATH} + COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_CURRENT_SOURCE_DIR}/target/taos-jdbcdriver-2.0.35-dist.jar ${LIBRARY_OUTPUT_PATH} COMMAND mvn -Dmaven.test.skip=true clean -f ${CMAKE_CURRENT_SOURCE_DIR}/pom.xml COMMENT "build jdbc driver") ADD_CUSTOM_TARGET(${JDBC_TARGET_NAME} ALL WORKING_DIRECTORY ${EXECUTABLE_OUTPUT_PATH} DEPENDS ${JDBC_CMD_NAME}) diff --git a/src/connector/jdbc/deploy-pom.xml b/src/connector/jdbc/deploy-pom.xml index ef57198e78d2268faba526d5506b0dc384f5766f..7caf46848d18c4491cdea1ab50df31d8d2d26daf 100755 --- a/src/connector/jdbc/deploy-pom.xml +++ b/src/connector/jdbc/deploy-pom.xml @@ -5,7 +5,7 @@ com.taosdata.jdbc taos-jdbcdriver - 2.0.34 + 2.0.35 jar JDBCDriver diff --git a/src/connector/jdbc/pom.xml b/src/connector/jdbc/pom.xml index 6b9fc9d96ce16700ee1243ef7c148a423a965d0b..810a85f8a33b3f244dab81e349b9df786ec50c21 100644 --- a/src/connector/jdbc/pom.xml +++ b/src/connector/jdbc/pom.xml @@ -3,7 +3,7 @@ 4.0.0 com.taosdata.jdbc taos-jdbcdriver - 2.0.34 + 2.0.35 jar JDBCDriver https://github.com/taosdata/TDengine/tree/master/src/connector/jdbc @@ -113,6 +113,7 @@ **/AppMemoryLeakTest.java + **/JDBCTypeAndTypeCompareTest.java **/ConnectMultiTaosdByRestfulWithDifferentTokenTest.java **/DatetimeBefore1970Test.java **/FailOverTest.java diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBDriver.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBDriver.java index 521a88b128ff930510bf00cdcb6a12cbc3211742..307451e014c59c1c3419f1a9daff4f89e8b90d46 100755 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBDriver.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBDriver.java @@ -118,9 +118,6 @@ public class TSDBDriver extends AbstractDriver { } public Connection connect(String url, Properties info) throws SQLException { - if (url == null) - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_URL_NOT_SET); - if (!acceptsURL(url)) return null; @@ -135,8 +132,7 @@ public class TSDBDriver extends AbstractDriver { throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_PASSWORD_IS_REQUIRED); try { - TSDBJNIConnector.init((String) props.get(PROPERTY_KEY_CONFIG_DIR), (String) props.get(PROPERTY_KEY_LOCALE), - (String) props.get(PROPERTY_KEY_CHARSET), (String) props.get(PROPERTY_KEY_TIME_ZONE)); + TSDBJNIConnector.init(props); return new TSDBConnection(props, this.dbMetaData); } catch (SQLWarning sqlWarning) { sqlWarning.printStackTrace(); @@ -205,6 +201,7 @@ public class TSDBDriver extends AbstractDriver { String dbProductName = url.substring(0, beginningOfSlashes); dbProductName = dbProductName.substring(dbProductName.indexOf(":") + 1); dbProductName = dbProductName.substring(0, dbProductName.indexOf(":")); + urlProps.setProperty(TSDBDriver.PROPERTY_KEY_PRODUCT_NAME, dbProductName); // parse database name url = url.substring(beginningOfSlashes + 2); diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBError.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBError.java index bdb3ea410005cadd865de1d9e080dd5b9f20834f..0970148b1dfb6c6c1fb85330e312bf2c8168b3c7 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBError.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBError.java @@ -35,6 +35,7 @@ public class TSDBError { TSDBErrorMap.put(TSDBErrorNumbers.ERROR_UNKNOWN_TIMESTAMP_PRECISION, "unknown timestamp precision"); TSDBErrorMap.put(TSDBErrorNumbers.ERROR_USER_IS_REQUIRED, "user is required"); TSDBErrorMap.put(TSDBErrorNumbers.ERROR_PASSWORD_IS_REQUIRED, "password is required"); + TSDBErrorMap.put(TSDBErrorNumbers.ERROR_INVALID_JSON_FORMAT, "invalid json format"); TSDBErrorMap.put(TSDBErrorNumbers.ERROR_UNKNOWN, "unknown error"); diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBErrorNumbers.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBErrorNumbers.java index 2207db6f9379595e68b8ed00ea8f7298ca3b45ad..0f4427fa20e272917df0327552efd1a80cd56b4d 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBErrorNumbers.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBErrorNumbers.java @@ -31,6 +31,7 @@ public class TSDBErrorNumbers { public static final int ERROR_RESTFul_Client_IOException = 0x2318; public static final int ERROR_USER_IS_REQUIRED = 0x2319; // user is required public static final int ERROR_PASSWORD_IS_REQUIRED = 0x231a; // password is required + public static final int ERROR_INVALID_JSON_FORMAT = 0x231b; public static final int ERROR_UNKNOWN = 0x2350; //unknown error @@ -72,6 +73,7 @@ public class TSDBErrorNumbers { errorNumbers.add(ERROR_RESTFul_Client_IOException); errorNumbers.add(ERROR_USER_IS_REQUIRED); errorNumbers.add(ERROR_PASSWORD_IS_REQUIRED); + errorNumbers.add(ERROR_INVALID_JSON_FORMAT); errorNumbers.add(ERROR_RESTFul_Client_Protocol_Exception); diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBException.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBException.java new file mode 100644 index 0000000000000000000000000000000000000000..31299a1c6f37a8b75521a65e7de09f5162558dd6 --- /dev/null +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBException.java @@ -0,0 +1,22 @@ +package com.taosdata.jdbc; + +public class TSDBException { + private int code; + private String message; + + public int getCode() { + return code; + } + + public void setCode(int code) { + this.code = code; + } + + public String getMessage() { + return message; + } + + public void setMessage(String message) { + this.message = message; + } +} \ No newline at end of file diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBJNIConnector.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBJNIConnector.java index 4a9e80ba53b096f057840eab67e61418332dbf81..aaada2e78ec284f4019b29465a38db109cf9d80a 100755 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBJNIConnector.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBJNIConnector.java @@ -16,18 +16,21 @@ */ package com.taosdata.jdbc; +import com.alibaba.fastjson.JSONObject; import com.taosdata.jdbc.utils.TaosInfo; import java.nio.ByteBuffer; import java.sql.SQLException; import java.sql.SQLWarning; import java.util.List; +import java.util.Properties; /** * JNI connector */ public class TSDBJNIConnector { - private static volatile Boolean isInitialized = false; + private static final Object LOCK = new Object(); + private static volatile boolean isInitialized; private final TaosInfo taosInfo = TaosInfo.getInstance(); private long taos = TSDBConstants.JNI_NULL_POINTER; // Connection pointer used in C @@ -38,24 +41,27 @@ public class TSDBJNIConnector { System.loadLibrary("taos"); } - public boolean isClosed() { - return this.taos == TSDBConstants.JNI_NULL_POINTER; - } + public static void init(Properties props) throws SQLWarning { + synchronized (LOCK) { + if (!isInitialized) { - public boolean isResultsetClosed() { - return this.isResultsetClosed; - } + JSONObject configJSON = new JSONObject(); + for (String key : props.stringPropertyNames()) { + configJSON.put(key, props.getProperty(key)); + } + setConfigImp(configJSON.toJSONString()); - public static void init(String configDir, String locale, String charset, String timezone) throws SQLWarning { - synchronized (isInitialized) { - if (!isInitialized) { - initImp(configDir); + initImp(props.getProperty(TSDBDriver.PROPERTY_KEY_CONFIG_DIR, null)); + + String locale = props.getProperty(TSDBDriver.PROPERTY_KEY_LOCALE); if (setOptions(0, locale) < 0) { throw TSDBError.createSQLWarning("Failed to set locale: " + locale + ". System default will be used."); } + String charset = props.getProperty(TSDBDriver.PROPERTY_KEY_CHARSET); if (setOptions(1, charset) < 0) { throw TSDBError.createSQLWarning("Failed to set charset: " + charset + ". System default will be used."); } + String timezone = props.getProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE); if (setOptions(2, timezone) < 0) { throw TSDBError.createSQLWarning("Failed to set timezone: " + timezone + ". System default will be used."); } @@ -65,11 +71,13 @@ public class TSDBJNIConnector { } } - public static native void initImp(String configDir); + private static native void initImp(String configDir); + + private static native int setOptions(int optionIndex, String optionValue); - public static native int setOptions(int optionIndex, String optionValue); + private static native String getTsCharset(); - public static native String getTsCharset(); + private static native TSDBException setConfigImp(String config); public boolean connect(String host, int port, String dbName, String user, String password) throws SQLException { if (this.taos != TSDBConstants.JNI_NULL_POINTER) { @@ -159,6 +167,14 @@ public class TSDBJNIConnector { private native long isUpdateQueryImp(long connection, long pSql); + public boolean isClosed() { + return this.taos == TSDBConstants.JNI_NULL_POINTER; + } + + public boolean isResultsetClosed() { + return this.isResultsetClosed; + } + /** * Free result set operation from C to release result set pointer by JNI */ @@ -351,4 +367,6 @@ public class TSDBJNIConnector { } private native int insertLinesImp(String[] lines, long conn); + + } diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/SetConfigurationInJNITest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/SetConfigurationInJNITest.java new file mode 100644 index 0000000000000000000000000000000000000000..6a983cd5bdd6d886dcac01f6085c70eade4f7cf5 --- /dev/null +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/SetConfigurationInJNITest.java @@ -0,0 +1,249 @@ +package com.taosdata.jdbc; + +import org.junit.Test; + +import java.sql.Connection; +import java.sql.DriverManager; +import java.sql.SQLException; +import java.sql.Statement; +import java.util.Properties; + + +public class SetConfigurationInJNITest { + + private String host = "127.0.0.1"; + private String dbname = "test_set_config"; + + @Test + public void setConfigInUrl() { + try { + Connection conn = DriverManager.getConnection("jdbc:TAOS://" + host + ":0/?user=root&password=taosdata&debugFlag=143&rpcTimer=500"); + Statement stmt = conn.createStatement(); + + stmt.execute("drop database if exists " + dbname); + stmt.execute("create database if not exists " + dbname); + stmt.execute("use " + dbname); + stmt.execute("create table weather(ts timestamp, f1 int) tags(loc nchar(10))"); + stmt.execute("drop database if exists " + dbname); + + stmt.close(); + conn.close(); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @Test + public void setConfigInProperties() { + try { + Properties props = new Properties(); + props.setProperty("debugFlag", "143"); + props.setProperty("r pcTimer", "500"); + Connection conn = DriverManager.getConnection("jdbc:TAOS://" + host + ":0/?user=root&password=taosdata", props); + + Statement stmt = conn.createStatement(); + stmt.execute("drop database if exists " + dbname); + stmt.execute("create database if not exists " + dbname); + stmt.execute("use " + dbname); + stmt.execute("create table weather(ts timestamp, f1 int) tags(loc nchar(10))"); + stmt.execute("drop database if exists " + dbname); + + stmt.close(); + conn.close(); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @Test + //test case1:set debugFlag=135 + //expect:debugFlag:135 + //result:pass + public void setConfigfordebugFlag() { + try { + Properties props = new Properties(); + //set debugFlag=135 + props.setProperty("debugFlag", "135"); + Connection conn = DriverManager.getConnection("jdbc:TAOS://" + host + ":0/?user=root&password=taosdata", props); + + Statement stmt = conn.createStatement(); + stmt.execute("drop database if exists " + dbname); + stmt.execute("create database if not exists " + dbname); + stmt.execute("use " + dbname); + stmt.execute("create table weather(ts timestamp, f1 int) tags(loc nchar(10))"); + stmt.execute("drop database if exists " + dbname); + + stmt.close(); + conn.close(); + } catch (SQLException e) { + e.printStackTrace(); + } + } + @Test + //test case2:set debugFlag=abc (wrong type) + //expect:debugFlag:135 + //result:pass + public void setConfigforwrongtype() { + try { + Properties props = new Properties(); + //set debugFlag=135 + props.setProperty("debugFlag", "abc"); + Connection conn = DriverManager.getConnection("jdbc:TAOS://" + host + ":0/?user=root&password=taosdata", props); + + Statement stmt = conn.createStatement(); + stmt.execute("drop database if exists " + dbname); + stmt.execute("create database if not exists " + dbname); + stmt.execute("use " + dbname); + stmt.execute("create table weather(ts timestamp, f1 int) tags(loc nchar(10))"); + stmt.execute("drop database if exists " + dbname); + + stmt.close(); + conn.close(); + } catch (SQLException e) { + e.printStackTrace(); + } + } + @Test + //test case3:set rpcTimer=0 (smaller than the boundary conditions) + //expect:rpcTimer:300 + //result:pass + public void setConfigrpcTimer() { + try { + Properties props = new Properties(); + //set rpcTimer=0 + props.setProperty("rpcTimer", "0"); + Connection conn = DriverManager.getConnection("jdbc:TAOS://" + host + ":0/?user=root&password=taosdata", props); + + Statement stmt = conn.createStatement(); + stmt.execute("drop database if exists " + dbname); + stmt.execute("create database if not exists " + dbname); + stmt.execute("use " + dbname); + stmt.execute("create table weather(ts timestamp, f1 int) tags(loc nchar(10))"); + stmt.execute("drop database if exists " + dbname); + + stmt.close(); + conn.close(); + } catch (SQLException e) { + e.printStackTrace(); + } + } + @Test + //test case4:set rpcMaxTime=10000 (bigger than the boundary conditions) + //expect:rpcMaxTime:600 + //result:pass + public void setConfigforrpcMaxTime() { + try { + Properties props = new Properties(); + //set rpcMaxTime=10000 + props.setProperty("rpcMaxTime", "10000"); + Connection conn = DriverManager.getConnection("jdbc:TAOS://" + host + ":0/?user=root&password=taosdata", props); + + Statement stmt = conn.createStatement(); + stmt.execute("drop database if exists " + dbname); + stmt.execute("create database if not exists " + dbname); + stmt.execute("use " + dbname); + stmt.execute("create table weather(ts timestamp, f1 int) tags(loc nchar(10))"); + stmt.execute("drop database if exists " + dbname); + + stmt.close(); + conn.close(); + } catch (SQLException e) { + e.printStackTrace(); + } + } + @Test + //test case5:set numOfThreadsPerCore=aaa (wrong type) + //expect:numOfThreadsPerCore:1.0 + //result:pass + public void setConfigfornumOfThreadsPerCore() { + try { + Properties props = new Properties(); + //set numOfThreadsPerCore=aaa + props.setProperty("numOfThreadsPerCore", "aaa"); + Connection conn = DriverManager.getConnection("jdbc:TAOS://" + host + ":0/?user=root&password=taosdata", props); + + Statement stmt = conn.createStatement(); + stmt.execute("drop database if exists " + dbname); + stmt.execute("create database if not exists " + dbname); + stmt.execute("use " + dbname); + stmt.execute("create table weather(ts timestamp, f1 int) tags(loc nchar(10))"); + stmt.execute("drop database if exists " + dbname); + + stmt.close(); + conn.close(); + } catch (SQLException e) { + e.printStackTrace(); + } + } + @Test + //test case6:set numOfThreadsPerCore=100000 (bigger than the boundary conditions) + //expect:numOfThreadsPerCore:1.0 + //result:pass + public void setConfignumOfThreadsPerCore() { + try { + Properties props = new Properties(); + //set numOfThreadsPerCore=100000 + props.setProperty("numOfThreadsPerCore", "100000"); + Connection conn = DriverManager.getConnection("jdbc:TAOS://" + host + ":0/?user=root&password=taosdata", props); + + Statement stmt = conn.createStatement(); + stmt.execute("drop database if exists " + dbname); + stmt.execute("create database if not exists " + dbname); + stmt.execute("use " + dbname); + stmt.execute("create table weather(ts timestamp, f1 int) tags(loc nchar(10))"); + stmt.execute("drop database if exists " + dbname); + + stmt.close(); + conn.close(); + } catch (SQLException e) { + e.printStackTrace(); + } + } + @Test + // test case7:set both true and wrong config(debugFlag=0,rpcDebugFlag=143,cDebugFlag=143,rpcTimer=100000) + // expect:rpcDebugFlag:143,cDebugFlag:143,rpcTimer:300 + // result:pass + public void setConfigformaxTmrCtrl() { + try { + Properties props = new Properties(); + props.setProperty("debugFlag", "0"); + props.setProperty("rpcDebugFlag", "143"); + props.setProperty("cDebugFlag", "143"); + props.setProperty("rpcTimer", "100000"); + Connection conn = DriverManager.getConnection("jdbc:TAOS://" + host + ":0/?user=root&password=taosdata", props); + + Statement stmt = conn.createStatement(); + stmt.execute("drop database if exists " + dbname); + stmt.execute("create database if not exists " + dbname); + stmt.execute("use " + dbname); + stmt.execute("create table weather(ts timestamp, f1 int) tags(loc nchar(10))"); + stmt.execute("drop database if exists " + dbname); + + stmt.close(); + conn.close(); + } catch (SQLException e) { + e.printStackTrace(); + } + } + @Test + //test case 8:use url to set with wrong type(debugFlag=abc,rpcTimer=abc) + //expect:default value + //result:pass + public void setConfigInUrlwithwrongtype() { + try { + Connection conn = DriverManager.getConnection("jdbc:TAOS://" + host + ":0/?user=root&password=taosdata&debugFlag=abc&rpcTimer=abc"); + Statement stmt = conn.createStatement(); + + stmt.execute("drop database if exists " + dbname); + stmt.execute("create database if not exists " + dbname); + stmt.execute("use " + dbname); + stmt.execute("create table weather(ts timestamp, f1 int) tags(loc nchar(10))"); + stmt.execute("drop database if exists " + dbname); + + stmt.close(); + conn.close(); + } catch (SQLException e) { + e.printStackTrace(); + } + } +} diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/TSDBJNIConnectorTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/TSDBJNIConnectorTest.java index 88ff5d3a811e17aaabbeb0a451fbff010307ab6d..8be6ae6b1c566abcd7ec398e7df3f5308e29e1b1 100644 --- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/TSDBJNIConnectorTest.java +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/TSDBJNIConnectorTest.java @@ -5,9 +5,9 @@ import org.junit.Test; import java.lang.management.ManagementFactory; import java.lang.management.RuntimeMXBean; import java.sql.SQLException; -import java.sql.SQLWarning; import java.util.ArrayList; import java.util.List; +import java.util.Properties; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; @@ -19,25 +19,25 @@ public class TSDBJNIConnectorTest { @Test public void test() { try { - try { //change sleepSeconds when debugging with attach to process to find PID int sleepSeconds = -1; - if (sleepSeconds>0) { + if (sleepSeconds > 0) { RuntimeMXBean runtimeBean = ManagementFactory.getRuntimeMXBean(); String jvmName = runtimeBean.getName(); long pid = Long.valueOf(jvmName.split("@")[0]); System.out.println("JVM PID = " + pid); - Thread.sleep(sleepSeconds*1000); + Thread.sleep(sleepSeconds * 1000); } - } - catch (Exception e) { + } catch (Exception e) { e.printStackTrace(); } // init - TSDBJNIConnector.init("/etc/taos", null, null, null); + Properties properties = new Properties(); + properties.setProperty(TSDBDriver.PROPERTY_KEY_CONFIG_DIR, "/etc/taos"); + TSDBJNIConnector.init(properties); // connect TSDBJNIConnector connector = new TSDBJNIConnector(); @@ -45,12 +45,12 @@ public class TSDBJNIConnectorTest { // setup String setupSqlStrs[] = {"create database if not exists d precision \"us\"", - "create table if not exists d.t(ts timestamp, f int)", - "create database if not exists d2", - "create table if not exists d2.t2(ts timestamp, f int)", - "insert into d.t values(now+100s, 100)", - "insert into d2.t2 values(now+200s, 200)" - }; + "create table if not exists d.t(ts timestamp, f int)", + "create database if not exists d2", + "create table if not exists d2.t2(ts timestamp, f int)", + "insert into d.t values(now+100s, 100)", + "insert into d2.t2 values(now+200s, 200)" + }; for (String setupSqlStr : setupSqlStrs) { long setupSql = connector.executeQuery(setupSqlStr); @@ -115,15 +115,13 @@ public class TSDBJNIConnectorTest { } // close statement connector.executeQuery("use d"); - String[] lines = new String[] {"st,t1=3i64,t2=4f64,t3=\"t3\" c1=3i64,c3=L\"passit\",c2=false,c4=4f64 1626006833639000000ns", - "st,t1=4i64,t3=\"t4\",t2=5f64,t4=5f64 c1=3i64,c3=L\"passitagin\",c2=true,c4=5f64,c5=5f64 1626006833640000000ns"}; + String[] lines = new String[]{"st,t1=3i64,t2=4f64,t3=\"t3\" c1=3i64,c3=L\"passit\",c2=false,c4=4f64 1626006833639000000ns", + "st,t1=4i64,t3=\"t4\",t2=5f64,t4=5f64 c1=3i64,c3=L\"passitagin\",c2=true,c4=5f64,c5=5f64 1626006833640000000ns"}; connector.insertLines(lines); // close connection connector.closeConnection(); - } catch (SQLWarning throwables) { - throwables.printStackTrace(); } catch (SQLException e) { e.printStackTrace(); } @@ -140,11 +138,7 @@ public class TSDBJNIConnectorTest { throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_JNI_RESULT_SET_NULL); } else if (code == TSDBConstants.JNI_NUM_OF_FIELDS_0) { throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_JNI_NUM_OF_FIELDS_0); - } else if (code == TSDBConstants.JNI_FETCH_END) { - return false; - } else { - return true; - } + } else return code != TSDBConstants.JNI_FETCH_END; } } diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/JDBCTypeAndTypeCompareTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/JDBCTypeAndTypeCompareTest.java new file mode 100644 index 0000000000000000000000000000000000000000..eb3b2985dfaff1b956909a50ca23470279cb48ca --- /dev/null +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/JDBCTypeAndTypeCompareTest.java @@ -0,0 +1,34 @@ +package com.taosdata.jdbc.cases; + +import org.junit.Test; + +import java.sql.*; + +public class JDBCTypeAndTypeCompareTest { + + @Test + public void test() throws SQLException { + Connection conn = DriverManager.getConnection("jdbc:TAOS://192.168.17.156:6030/", "root", "taosdata"); + Statement stmt = conn.createStatement(); + + stmt.execute("drop database if exists test"); + stmt.execute("create database if not exists test"); + stmt.execute("use test"); + stmt.execute("create table weather(ts timestamp, f1 int, f2 bigint, f3 float, f4 double, f5 smallint, f6 tinyint, f7 bool, f8 binary(10), f9 nchar(10) )"); + stmt.execute("insert into weather values(now, 1, 2, 3.0, 4.0, 5, 6, true, 'test','test')"); + + ResultSet rs = stmt.executeQuery("select * from weather"); + ResultSetMetaData meta = rs.getMetaData(); + while (rs.next()) { + for (int i = 1; i <= meta.getColumnCount(); i++) { + String columnName = meta.getColumnName(i); + String columnTypeName = meta.getColumnTypeName(i); + Object value = rs.getObject(i); + System.out.printf("columnName : %s, columnTypeName: %s, JDBCType: %s\n", columnName, columnTypeName, value.getClass().getName()); + } + } + + stmt.close(); + conn.close(); + } +} diff --git a/src/connector/python/taos/cinterface.py b/src/connector/python/taos/cinterface.py index aad9d1fdbfd4f900fe2db96dadbf343ea922be22..c5737ea5a07b7678e058307dfe3b47546dd99909 100644 --- a/src/connector/python/taos/cinterface.py +++ b/src/connector/python/taos/cinterface.py @@ -835,8 +835,14 @@ def taos_insert_telnet_lines(connection, lines): p_lines = lines_type(*lines) errno = _libtaos.taos_insert_telnet_lines(connection, p_lines, num_of_lines) if errno != 0: - raise LinesError("insert telnet lines error", errno) + raise TelnetLinesError("insert telnet lines error", errno) +def taos_insert_json_payload(connection, payload): + # type: (c_void_p, list[str] | tuple(str)) -> None + payload = payload.encode("utf-8") + errno = _libtaos.taos_insert_json_payload(connection, payload) + if errno != 0: + raise JsonPayloadError("insert json payload error", errno) class CTaosInterface(object): def __init__(self, config=None): diff --git a/src/connector/python/taos/connection.py b/src/connector/python/taos/connection.py index a8a71ecc3a8a5f2bdc960df364213e80018a70fe..35aca1fb26c1e612c3b3f6b1d8c794495bed0035 100644 --- a/src/connector/python/taos/connection.py +++ b/src/connector/python/taos/connection.py @@ -154,6 +154,25 @@ class TaosConnection(object): """ return taos_insert_telnet_lines(self._conn, lines) + def insert_json_payload(self, payload): + """OpenTSDB HTTP JSON format support + + ## Example + "{ + "metric": "cpu_load_0", + "timestamp": 1626006833610123, + "value": 55.5, + "tags": + { + "host": "ubuntu", + "interface": "eth0", + "Id": "tb0" + } + }" + + """ + return taos_insert_json_payload(self._conn, payload) + def cursor(self): # type: () -> TaosCursor """Return a new Cursor object using the connection.""" diff --git a/src/connector/python/taos/error.py b/src/connector/python/taos/error.py index a30adbb162f1c194bdfcf4cca5c43f01107a9776..f6a9d41f56a3fb071080daaae3bdd840190b154d 100644 --- a/src/connector/python/taos/error.py +++ b/src/connector/python/taos/error.py @@ -83,4 +83,14 @@ class ResultError(DatabaseError): class LinesError(DatabaseError): """taos_insert_lines errors.""" - pass \ No newline at end of file + pass + +class TelnetLinesError(DatabaseError): + """taos_insert_telnet_lines errors.""" + + pass + +class JsonPayloadError(DatabaseError): + """taos_insert_json_payload errors.""" + + pass diff --git a/src/connector/python/taos/field.py b/src/connector/python/taos/field.py index 445cd8afdba6f2512c73be95c9b0dbd8dc00da8a..b0bec58b932f2136b868739bb28fca04de759e3f 100644 --- a/src/connector/python/taos/field.py +++ b/src/connector/python/taos/field.py @@ -165,12 +165,14 @@ def _crow_binary_to_python_block(data, num_of_rows, nbytes=None, precision=Field assert nbytes is not None res = [] for i in range(abs(num_of_rows)): - try: - rbyte = ctypes.cast(data + nbytes * i, ctypes.POINTER(ctypes.c_short))[:1].pop() - tmpstr = ctypes.c_char_p(data + nbytes * i + 2) - res.append(tmpstr.value.decode()[0:rbyte]) - except ValueError: + rbyte = ctypes.cast(data + nbytes * i, ctypes.POINTER(ctypes.c_short))[:1].pop() + chars = ctypes.cast(c_char_p(data + nbytes * i + 2), ctypes.POINTER(c_char * rbyte)) + buffer = create_string_buffer(rbyte + 1) + buffer[:rbyte] = chars[0][:rbyte] + if rbyte == 1 and buffer[0] == b'\xff': res.append(None) + else: + res.append(cast(buffer, c_char_p).value.decode()) return res @@ -179,11 +181,14 @@ def _crow_nchar_to_python_block(data, num_of_rows, nbytes=None, precision=FieldT assert nbytes is not None res = [] for i in range(abs(num_of_rows)): - try: - tmpstr = ctypes.c_char_p(data + nbytes * i + 2) - res.append(tmpstr.value.decode()) - except ValueError: + rbyte = ctypes.cast(data + nbytes * i, ctypes.POINTER(ctypes.c_short))[:1].pop() + chars = ctypes.cast(c_char_p(data + nbytes * i + 2), ctypes.POINTER(c_char * rbyte)) + buffer = create_string_buffer(rbyte + 1) + buffer[:rbyte] = chars[0][:rbyte] + if rbyte == 4 and buffer[:4] == b'\xff'*4: res.append(None) + else: + res.append(cast(buffer, c_char_p).value.decode()) return res diff --git a/src/inc/query.h b/src/inc/query.h index fb9cbff8584892b4a6bc6e4a6ce046a7500aef39..0872e3dbaa517ded77dd758b30e69f273c13a580 100644 --- a/src/inc/query.h +++ b/src/inc/query.h @@ -76,6 +76,11 @@ void* qGetResultRetrieveMsg(qinfo_t qinfo); */ int32_t qKillQuery(qinfo_t qinfo); +//kill by qid +int32_t qKillQueryByQId(void* pMgmt, int64_t qId, int32_t waitMs, int32_t waitCount); + +bool qSolveCommitNoBlock(void* pRepo, void* pMgmt); + int32_t qQueryCompleted(qinfo_t qinfo); /** diff --git a/src/inc/taos.h b/src/inc/taos.h index c61b733b7f0969bb8e030a2066db6d7d9fde0fd5..bfa78ff4c2ce21df66f532c133980023df437dbe 100644 --- a/src/inc/taos.h +++ b/src/inc/taos.h @@ -63,6 +63,22 @@ typedef struct taosField { int16_t bytes; } TAOS_FIELD; +typedef enum { + SET_CONF_RET_SUCC = 0, + SET_CONF_RET_ERR_PART = -1, + SET_CONF_RET_ERR_INNER = -2, + SET_CONF_RET_ERR_JSON_INVALID = -3, + SET_CONF_RET_ERR_JSON_PARSE = -4, + SET_CONF_RET_ERR_ONLY_ONCE = -5, + SET_CONF_RET_ERR_TOO_LONG = -6 +} SET_CONF_RET_CODE; + +#define RET_MSG_LENGTH 1024 +typedef struct setConfRet { + SET_CONF_RET_CODE retCode; + char retMsg[RET_MSG_LENGTH]; +} setConfRet; + #ifdef _TD_GO_DLL_ #define DLL_EXPORT __declspec(dllexport) #else @@ -72,6 +88,7 @@ typedef struct taosField { DLL_EXPORT int taos_init(); DLL_EXPORT void taos_cleanup(void); DLL_EXPORT int taos_options(TSDB_OPTION option, const void *arg, ...); +DLL_EXPORT setConfRet taos_set_config(const char *config); DLL_EXPORT TAOS *taos_connect(const char *ip, const char *user, const char *pass, const char *db, uint16_t port); DLL_EXPORT TAOS *taos_connect_auth(const char *ip, const char *user, const char *auth, const char *db, uint16_t port); DLL_EXPORT void taos_close(TAOS *taos); @@ -175,6 +192,8 @@ DLL_EXPORT int taos_insert_lines(TAOS* taos, char* lines[], int numLines); DLL_EXPORT int taos_insert_telnet_lines(TAOS* taos, char* lines[], int numLines); +DLL_EXPORT int taos_insert_json_payload(TAOS* taos, char* payload); + #ifdef __cplusplus } #endif diff --git a/src/inc/taosdef.h b/src/inc/taosdef.h index 0cad8cc2786f4fa7df3a3ad49d363f6f6c75566b..fda6347223e752895872a4073dd2786abcd65e6f 100644 --- a/src/inc/taosdef.h +++ b/src/inc/taosdef.h @@ -166,8 +166,10 @@ do { \ #define TSDB_RELATION_NOT 13 #define TSDB_RELATION_MATCH 14 -#define TSDB_RELATION_QUESTION 15 -#define TSDB_RELATION_ARROW 16 +#define TSDB_RELATION_NMATCH 15 + +#define TSDB_RELATION_QUESTION 16 +#define TSDB_RELATION_ARROW 17 #define TSDB_BINARY_OP_ADD 30 #define TSDB_BINARY_OP_SUBTRACT 31 diff --git a/src/inc/taoserror.h b/src/inc/taoserror.h index 882aca2b5259385c6c7f308d1764f8da9bea80e9..d59b88c7e698b3e965b5923efdc760e0289f7250 100644 --- a/src/inc/taoserror.h +++ b/src/inc/taoserror.h @@ -35,6 +35,7 @@ int32_t* taosGetErrno(); #define terrno (*taosGetErrno()) #define TSDB_CODE_SUCCESS 0 +#define TSDB_CODE_FAILED -1 // unknown or needn't tell detail error // rpc #define TSDB_CODE_RPC_ACTION_IN_PROGRESS TAOS_DEF_ERROR_CODE(0, 0x0001) //"Action in progress") @@ -107,6 +108,9 @@ int32_t* taosGetErrno(); #define TSDB_CODE_TSC_INVALID_TAG_LENGTH TAOS_DEF_ERROR_CODE(0, 0x021E) //"Invalid tag length") #define TSDB_CODE_TSC_INVALID_COLUMN_LENGTH TAOS_DEF_ERROR_CODE(0, 0x021F) //"Invalid column length") #define TSDB_CODE_TSC_DUP_TAG_NAMES TAOS_DEF_ERROR_CODE(0, 0x0220) //"duplicated tag names") +#define TSDB_CODE_TSC_INVALID_JSON TAOS_DEF_ERROR_CODE(0, 0x0221) //"Invalid JSON format") +#define TSDB_CODE_TSC_INVALID_JSON_TYPE TAOS_DEF_ERROR_CODE(0, 0x0222) //"Invalid JSON data type") +#define TSDB_CODE_TSC_VALUE_OUT_OF_RANGE TAOS_DEF_ERROR_CODE(0, 0x0223) //"Value out of range") // mnode #define TSDB_CODE_MND_MSG_NOT_PROCESSED TAOS_DEF_ERROR_CODE(0, 0x0300) //"Message not processed") diff --git a/src/inc/taosmsg.h b/src/inc/taosmsg.h index 8f5269c158bd4a733d08b727ed0b3e3741821b25..c3c8625fec3290e2d39d64f53a173b43cf21d7e3 100644 --- a/src/inc/taosmsg.h +++ b/src/inc/taosmsg.h @@ -492,7 +492,6 @@ typedef struct { SSessionWindow sw; // session window uint16_t tagCondLen; // tag length in current query uint16_t colCondLen; // column length in current query - uint32_t tbnameCondLen; // table name filter condition string length int16_t numOfGroupCols; // num of group by columns int16_t orderByIdx; int16_t orderType; // used in group by xx order by xxx @@ -502,7 +501,6 @@ typedef struct { int64_t offset; uint32_t queryType; // denote another query process int16_t numOfOutput; // final output columns numbers - int16_t tagNameRelType; // relation of tag criteria and tbname criteria int16_t fillType; // interpolate type uint64_t fillVal; // default value array list int32_t secondStageOutput; @@ -766,27 +764,16 @@ typedef struct SSTableVgroupMsg { int32_t numOfTables; } SSTableVgroupMsg, SSTableVgroupRspMsg; -typedef struct { - int32_t vgId; - int8_t numOfEps; - SEpAddr1 epAddr[TSDB_MAX_REPLICA]; -} SVgroupInfo; - typedef struct { int32_t vgId; int8_t numOfEps; SEpAddrMsg epAddr[TSDB_MAX_REPLICA]; } SVgroupMsg; -typedef struct { - int32_t numOfVgroups; - SVgroupInfo vgroups[]; -} SVgroupsInfo; - typedef struct { int32_t numOfVgroups; SVgroupMsg vgroups[]; -} SVgroupsMsg; +} SVgroupsMsg, SVgroupsInfo; typedef struct STableMetaMsg { int32_t contLen; diff --git a/src/inc/tsdb.h b/src/inc/tsdb.h index 7abe3e99c720af1682fc103beec9a5d4caeb09eb..4e11e4f2478fe0616701e0d183d38455b9526514 100644 --- a/src/inc/tsdb.h +++ b/src/inc/tsdb.h @@ -39,6 +39,7 @@ extern "C" { #define TSDB_STATUS_COMMIT_START 1 #define TSDB_STATUS_COMMIT_OVER 2 +#define TSDB_STATUS_COMMIT_NOBLOCK 3 //commit no block, need to be solved // TSDB STATE DEFINITION #define TSDB_STATE_OK 0x0 @@ -351,8 +352,7 @@ SArray *tsdbRetrieveDataBlock(TsdbQueryHandleT *pQueryHandle, SArray *pColumnIdL * @param pTagCond. tag query condition */ int32_t tsdbQuerySTableByTagCond(STsdbRepo *tsdb, uint64_t uid, TSKEY key, const char *pTagCond, size_t len, - int16_t tagNameRelType, const char *tbnameCond, STableGroupInfo *pGroupList, - SColIndex *pColIndex, int32_t numOfCols); + STableGroupInfo *pGroupList, SColIndex *pColIndex, int32_t numOfCols); /** * destroy the created table group list, which is generated by tag query @@ -413,6 +413,11 @@ int tsdbSyncRecv(void *pRepo, SOCKET socketFd); // For TSDB Compact int tsdbCompact(STsdbRepo *pRepo); +// For TSDB Health Monitor + +// no problem return true +bool tsdbNoProblem(STsdbRepo* pRepo); + #ifdef __cplusplus } #endif diff --git a/src/inc/ttokendef.h b/src/inc/ttokendef.h index 5a4389b4679ba6e6825c188c4128a57d79b493c2..576d748565fc189472f99362f1fb92ac13fa08b3 100644 --- a/src/inc/ttokendef.h +++ b/src/inc/ttokendef.h @@ -39,181 +39,182 @@ #define TK_IS 21 #define TK_LIKE 22 #define TK_MATCH 23 -#define TK_GLOB 24 -#define TK_BETWEEN 25 -#define TK_IN 26 -#define TK_GT 27 -#define TK_GE 28 -#define TK_LT 29 -#define TK_LE 30 -#define TK_BITAND 31 -#define TK_BITOR 32 -#define TK_LSHIFT 33 -#define TK_RSHIFT 34 -#define TK_PLUS 35 -#define TK_MINUS 36 -#define TK_DIVIDE 37 -#define TK_TIMES 38 -#define TK_STAR 39 -#define TK_SLASH 40 -#define TK_REM 41 -#define TK_CONCAT 42 -#define TK_UMINUS 43 -#define TK_UPLUS 44 -#define TK_BITNOT 45 -#define TK_QUESTION 46 -#define TK_ARROW 47 -#define TK_SHOW 48 -#define TK_DATABASES 49 -#define TK_TOPICS 50 -#define TK_FUNCTIONS 51 -#define TK_MNODES 52 -#define TK_DNODES 53 -#define TK_ACCOUNTS 54 -#define TK_USERS 55 -#define TK_MODULES 56 -#define TK_QUERIES 57 -#define TK_CONNECTIONS 58 -#define TK_STREAMS 59 -#define TK_VARIABLES 60 -#define TK_SCORES 61 -#define TK_GRANTS 62 -#define TK_VNODES 63 -#define TK_DOT 64 -#define TK_CREATE 65 -#define TK_TABLE 66 -#define TK_STABLE 67 -#define TK_DATABASE 68 -#define TK_TABLES 69 -#define TK_STABLES 70 -#define TK_VGROUPS 71 -#define TK_DROP 72 -#define TK_TOPIC 73 -#define TK_FUNCTION 74 -#define TK_DNODE 75 -#define TK_USER 76 -#define TK_ACCOUNT 77 -#define TK_USE 78 -#define TK_DESCRIBE 79 -#define TK_DESC 80 -#define TK_ALTER 81 -#define TK_PASS 82 -#define TK_PRIVILEGE 83 -#define TK_LOCAL 84 -#define TK_COMPACT 85 -#define TK_LP 86 -#define TK_RP 87 -#define TK_IF 88 -#define TK_EXISTS 89 -#define TK_AS 90 -#define TK_OUTPUTTYPE 91 -#define TK_AGGREGATE 92 -#define TK_BUFSIZE 93 -#define TK_PPS 94 -#define TK_TSERIES 95 -#define TK_DBS 96 -#define TK_STORAGE 97 -#define TK_QTIME 98 -#define TK_CONNS 99 -#define TK_STATE 100 -#define TK_COMMA 101 -#define TK_KEEP 102 -#define TK_CACHE 103 -#define TK_REPLICA 104 -#define TK_QUORUM 105 -#define TK_DAYS 106 -#define TK_MINROWS 107 -#define TK_MAXROWS 108 -#define TK_BLOCKS 109 -#define TK_CTIME 110 -#define TK_WAL 111 -#define TK_FSYNC 112 -#define TK_COMP 113 -#define TK_PRECISION 114 -#define TK_UPDATE 115 -#define TK_CACHELAST 116 -#define TK_PARTITIONS 117 -#define TK_UNSIGNED 118 -#define TK_TAGS 119 -#define TK_USING 120 -#define TK_NULL 121 -#define TK_NOW 122 -#define TK_SELECT 123 -#define TK_UNION 124 -#define TK_ALL 125 -#define TK_DISTINCT 126 -#define TK_FROM 127 -#define TK_VARIABLE 128 -#define TK_INTERVAL 129 -#define TK_EVERY 130 -#define TK_SESSION 131 -#define TK_STATE_WINDOW 132 -#define TK_FILL 133 -#define TK_SLIDING 134 -#define TK_ORDER 135 -#define TK_BY 136 -#define TK_ASC 137 -#define TK_GROUP 138 -#define TK_HAVING 139 -#define TK_LIMIT 140 -#define TK_OFFSET 141 -#define TK_SLIMIT 142 -#define TK_SOFFSET 143 -#define TK_WHERE 144 -#define TK_RESET 145 -#define TK_QUERY 146 -#define TK_SYNCDB 147 -#define TK_ADD 148 -#define TK_COLUMN 149 -#define TK_MODIFY 150 -#define TK_TAG 151 -#define TK_CHANGE 152 -#define TK_SET 153 -#define TK_KILL 154 -#define TK_CONNECTION 155 -#define TK_STREAM 156 -#define TK_COLON 157 -#define TK_ABORT 158 -#define TK_AFTER 159 -#define TK_ATTACH 160 -#define TK_BEFORE 161 -#define TK_BEGIN 162 -#define TK_CASCADE 163 -#define TK_CLUSTER 164 -#define TK_CONFLICT 165 -#define TK_COPY 166 -#define TK_DEFERRED 167 -#define TK_DELIMITERS 168 -#define TK_DETACH 169 -#define TK_EACH 170 -#define TK_END 171 -#define TK_EXPLAIN 172 -#define TK_FAIL 173 -#define TK_FOR 174 -#define TK_IGNORE 175 -#define TK_IMMEDIATE 176 -#define TK_INITIALLY 177 -#define TK_INSTEAD 178 -#define TK_KEY 179 -#define TK_OF 180 -#define TK_RAISE 181 -#define TK_REPLACE 182 -#define TK_RESTRICT 183 -#define TK_ROW 184 -#define TK_STATEMENT 185 -#define TK_TRIGGER 186 -#define TK_VIEW 187 -#define TK_IPTOKEN 188 -#define TK_SEMI 189 -#define TK_NONE 190 -#define TK_PREV 191 -#define TK_LINEAR 192 -#define TK_IMPORT 193 -#define TK_TBNAME 194 -#define TK_JOIN 195 -#define TK_INSERT 196 -#define TK_INTO 197 -#define TK_VALUES 198 +#define TK_NMATCH 24 +#define TK_GLOB 25 +#define TK_BETWEEN 26 +#define TK_IN 27 +#define TK_GT 28 +#define TK_GE 29 +#define TK_LT 30 +#define TK_LE 31 +#define TK_BITAND 32 +#define TK_BITOR 33 +#define TK_LSHIFT 34 +#define TK_RSHIFT 35 +#define TK_PLUS 36 +#define TK_MINUS 37 +#define TK_DIVIDE 38 +#define TK_TIMES 39 +#define TK_STAR 40 +#define TK_SLASH 41 +#define TK_REM 42 +#define TK_CONCAT 43 +#define TK_UMINUS 44 +#define TK_UPLUS 45 +#define TK_BITNOT 46 +#define TK_QUESTION 47 +#define TK_ARROW 48 +#define TK_SHOW 49 +#define TK_DATABASES 50 +#define TK_TOPICS 51 +#define TK_FUNCTIONS 52 +#define TK_MNODES 53 +#define TK_DNODES 54 +#define TK_ACCOUNTS 55 +#define TK_USERS 56 +#define TK_MODULES 57 +#define TK_QUERIES 58 +#define TK_CONNECTIONS 59 +#define TK_STREAMS 60 +#define TK_VARIABLES 61 +#define TK_SCORES 62 +#define TK_GRANTS 63 +#define TK_VNODES 64 +#define TK_DOT 65 +#define TK_CREATE 66 +#define TK_TABLE 67 +#define TK_STABLE 68 +#define TK_DATABASE 69 +#define TK_TABLES 70 +#define TK_STABLES 71 +#define TK_VGROUPS 72 +#define TK_DROP 73 +#define TK_TOPIC 74 +#define TK_FUNCTION 75 +#define TK_DNODE 76 +#define TK_USER 77 +#define TK_ACCOUNT 78 +#define TK_USE 79 +#define TK_DESCRIBE 80 +#define TK_DESC 81 +#define TK_ALTER 82 +#define TK_PASS 83 +#define TK_PRIVILEGE 84 +#define TK_LOCAL 85 +#define TK_COMPACT 86 +#define TK_LP 87 +#define TK_RP 88 +#define TK_IF 89 +#define TK_EXISTS 90 +#define TK_AS 91 +#define TK_OUTPUTTYPE 92 +#define TK_AGGREGATE 93 +#define TK_BUFSIZE 94 +#define TK_PPS 95 +#define TK_TSERIES 96 +#define TK_DBS 97 +#define TK_STORAGE 98 +#define TK_QTIME 99 +#define TK_CONNS 100 +#define TK_STATE 101 +#define TK_COMMA 102 +#define TK_KEEP 103 +#define TK_CACHE 104 +#define TK_REPLICA 105 +#define TK_QUORUM 106 +#define TK_DAYS 107 +#define TK_MINROWS 108 +#define TK_MAXROWS 109 +#define TK_BLOCKS 110 +#define TK_CTIME 111 +#define TK_WAL 112 +#define TK_FSYNC 113 +#define TK_COMP 114 +#define TK_PRECISION 115 +#define TK_UPDATE 116 +#define TK_CACHELAST 117 +#define TK_PARTITIONS 118 +#define TK_UNSIGNED 119 +#define TK_TAGS 120 +#define TK_USING 121 +#define TK_NULL 122 +#define TK_NOW 123 +#define TK_SELECT 124 +#define TK_UNION 125 +#define TK_ALL 126 +#define TK_DISTINCT 127 +#define TK_FROM 128 +#define TK_VARIABLE 129 +#define TK_INTERVAL 130 +#define TK_EVERY 131 +#define TK_SESSION 132 +#define TK_STATE_WINDOW 133 +#define TK_FILL 134 +#define TK_SLIDING 135 +#define TK_ORDER 136 +#define TK_BY 137 +#define TK_ASC 138 +#define TK_GROUP 139 +#define TK_HAVING 140 +#define TK_LIMIT 141 +#define TK_OFFSET 142 +#define TK_SLIMIT 143 +#define TK_SOFFSET 144 +#define TK_WHERE 145 +#define TK_RESET 146 +#define TK_QUERY 147 +#define TK_SYNCDB 148 +#define TK_ADD 149 +#define TK_COLUMN 150 +#define TK_MODIFY 151 +#define TK_TAG 152 +#define TK_CHANGE 153 +#define TK_SET 154 +#define TK_KILL 155 +#define TK_CONNECTION 156 +#define TK_STREAM 157 +#define TK_COLON 158 +#define TK_ABORT 159 +#define TK_AFTER 160 +#define TK_ATTACH 161 +#define TK_BEFORE 162 +#define TK_BEGIN 163 +#define TK_CASCADE 164 +#define TK_CLUSTER 165 +#define TK_CONFLICT 166 +#define TK_COPY 167 +#define TK_DEFERRED 168 +#define TK_DELIMITERS 169 +#define TK_DETACH 170 +#define TK_EACH 171 +#define TK_END 172 +#define TK_EXPLAIN 173 +#define TK_FAIL 174 +#define TK_FOR 175 +#define TK_IGNORE 176 +#define TK_IMMEDIATE 177 +#define TK_INITIALLY 178 +#define TK_INSTEAD 179 +#define TK_KEY 180 +#define TK_OF 181 +#define TK_RAISE 182 +#define TK_REPLACE 183 +#define TK_RESTRICT 184 +#define TK_ROW 185 +#define TK_STATEMENT 186 +#define TK_TRIGGER 187 +#define TK_VIEW 188 +#define TK_IPTOKEN 189 +#define TK_SEMI 190 +#define TK_NONE 191 +#define TK_PREV 192 +#define TK_LINEAR 193 +#define TK_IMPORT 194 +#define TK_TBNAME 195 +#define TK_JOIN 196 +#define TK_INSERT 197 +#define TK_INTO 198 +#define TK_VALUES 199 #define TK_SPACE 300 #define TK_COMMENT 301 diff --git a/src/kit/shell/CMakeLists.txt b/src/kit/shell/CMakeLists.txt index bf2bbca14d25aff3b3717c7b9785f1dc470a013a..bca1b72a1bc2c1f5b3da311baad38cd1d55ddaed 100644 --- a/src/kit/shell/CMakeLists.txt +++ b/src/kit/shell/CMakeLists.txt @@ -19,9 +19,9 @@ ELSE () ENDIF () IF (TD_SOMODE_STATIC) - TARGET_LINK_LIBRARIES(shell taos_static lua ${LINK_JEMALLOC}) + TARGET_LINK_LIBRARIES(shell taos_static cJson lua ${LINK_JEMALLOC}) ELSE () - TARGET_LINK_LIBRARIES(shell taos lua ${LINK_JEMALLOC}) + TARGET_LINK_LIBRARIES(shell taos cJson lua ${LINK_JEMALLOC}) ENDIF () SET_TARGET_PROPERTIES(shell PROPERTIES OUTPUT_NAME taos) @@ -30,7 +30,7 @@ ELSEIF (TD_WINDOWS) LIST(APPEND SRC ./src/shellMain.c) LIST(APPEND SRC ./src/shellWindows.c) ADD_EXECUTABLE(shell ${SRC}) - TARGET_LINK_LIBRARIES(shell taos_static) + TARGET_LINK_LIBRARIES(shell taos_static cJson) IF (TD_POWER) SET_TARGET_PROPERTIES(shell PROPERTIES OUTPUT_NAME power) @@ -46,7 +46,7 @@ ELSEIF (TD_DARWIN) LIST(APPEND SRC ./src/shellCheck.c) ADD_EXECUTABLE(shell ${SRC}) # linking with dylib - TARGET_LINK_LIBRARIES(shell taos) + TARGET_LINK_LIBRARIES(shell taos cJson) # linking taos statically # TARGET_LINK_LIBRARIES(shell taos_static) SET_TARGET_PROPERTIES(shell PROPERTIES OUTPUT_NAME taos) diff --git a/src/kit/shell/src/shellEngine.c b/src/kit/shell/src/shellEngine.c index efc37403b46f2bfdd8e40eecd2ff53d00af6cd8a..4dfe424b11e82c78d5acbbcc7471c093cd75e7aa 100644 --- a/src/kit/shell/src/shellEngine.c +++ b/src/kit/shell/src/shellEngine.c @@ -569,7 +569,7 @@ static void shellPrintNChar(const char *str, int length, int width) { while (pos < length) { wchar_t wc; int bytes = mbtowc(&wc, str + pos, MB_CUR_MAX); - if (bytes == 0) { + if (bytes <= 0) { break; } pos += bytes; diff --git a/src/kit/taosdemo/taosdemo.c b/src/kit/taosdemo/taosdemo.c index 87102cc1c76206a0c6f779efc9ef22e9607409ef..ec75ff0840e56b4d571a7ccc195db476bf1c8e4f 100644 --- a/src/kit/taosdemo/taosdemo.c +++ b/src/kit/taosdemo/taosdemo.c @@ -20,6 +20,7 @@ #include #include +#include #define _GNU_SOURCE #define CURL_STATICLIB @@ -87,7 +88,7 @@ extern char configDir[]; #define DOUBLE_BUFF_LEN 42 #define TIMESTAMP_BUFF_LEN 21 -#define MAX_SAMPLES_ONCE_FROM_FILE 10000 +#define MAX_SAMPLES 10000 #define MAX_NUM_COLUMNS (TSDB_MAX_COLUMNS - 1) // exclude first column timestamp #define MAX_DB_COUNT 8 @@ -103,11 +104,19 @@ extern char configDir[]; #define DATATYPE_BUFF_LEN (SMALL_BUFF_LEN*3) #define NOTE_BUFF_LEN (SMALL_BUFF_LEN*16) +#define DEFAULT_NTHREADS 8 #define DEFAULT_TIMESTAMP_STEP 1 #define DEFAULT_INTERLACE_ROWS 0 #define DEFAULT_DATATYPE_NUM 1 #define DEFAULT_CHILDTABLES 10000 +#define STMT_BIND_PARAM_BATCH 1 + +char* g_sampleDataBuf = NULL; +#if STMT_BIND_PARAM_BATCH == 1 + // bind param batch +char* g_sampleBindBatchArray = NULL; +#endif enum TEST_MODE { INSERT_TEST, // 0 @@ -116,17 +125,17 @@ enum TEST_MODE { INVAID_TEST }; -typedef enum CREATE_SUB_TALBE_MOD_EN { +typedef enum CREATE_SUB_TABLE_MOD_EN { PRE_CREATE_SUBTBL, AUTO_CREATE_SUBTBL, NO_CREATE_SUBTBL -} CREATE_SUB_TALBE_MOD_EN; +} CREATE_SUB_TABLE_MOD_EN; -typedef enum TALBE_EXISTS_EN { +typedef enum TABLE_EXISTS_EN { TBL_NO_EXISTS, TBL_ALREADY_EXISTS, TBL_EXISTS_BUTT -} TALBE_EXISTS_EN; +} TABLE_EXISTS_EN; enum enumSYNC_MODE { SYNC_MODE, @@ -219,28 +228,30 @@ typedef struct SArguments_S { char * sqlFile; bool use_metric; bool drop_database; - bool insert_only; + bool aggr_func; bool answer_yes; bool debug_print; bool verbose_print; bool performance_print; char * output_file; bool async_mode; - char * datatype[MAX_NUM_COLUMNS + 1]; + char data_type[MAX_NUM_COLUMNS+1]; + char *dataType[MAX_NUM_COLUMNS+1]; uint32_t binwidth; - uint32_t num_of_CPR; - uint32_t num_of_threads; + uint32_t columnCount; + uint64_t lenOfOneRow; + uint32_t nthreads; uint64_t insert_interval; uint64_t timestamp_step; int64_t query_times; - uint32_t interlace_rows; - uint32_t num_of_RPR; // num_of_records_per_req + uint32_t interlaceRows; + uint32_t reqPerReq; // num_of_records_per_req uint64_t max_sql_len; - int64_t num_of_tables; - int64_t num_of_DPT; + int64_t ntables; + int64_t insertRows; int abort; uint32_t disorderRatio; // 0: no disorder, >0: x% - int disorderRange; // ms, us or ns. accordig to database precision + int disorderRange; // ms, us or ns. according to database precision uint32_t method_of_delete; uint64_t totalInsertRows; uint64_t totalAffectedRows; @@ -248,14 +259,15 @@ typedef struct SArguments_S { } SArguments; typedef struct SColumn_S { - char field[TSDB_COL_NAME_LEN]; - char dataType[DATATYPE_BUFF_LEN]; - uint32_t dataLen; - char note[NOTE_BUFF_LEN]; + char field[TSDB_COL_NAME_LEN]; + char data_type; + char dataType[DATATYPE_BUFF_LEN]; + uint32_t dataLen; + char note[NOTE_BUFF_LEN]; } StrColumn; typedef struct SSuperTable_S { - char sTblName[TSDB_TABLE_NAME_LEN]; + char stbName[TSDB_TABLE_NAME_LEN]; char dataSource[SMALL_BUFF_LEN]; // rand_gen or sample char childTblPrefix[TBNAME_PREFIX_LEN]; uint16_t childTblExists; @@ -291,14 +303,16 @@ typedef struct SSuperTable_S { uint64_t lenOfTagOfOneRow; char* sampleDataBuf; - //int sampleRowCount; - //int sampleUsePos; uint32_t tagSource; // 0: rand, 1: tag sample char* tagDataBuf; uint32_t tagSampleCount; uint32_t tagUsePos; +#if STMT_BIND_PARAM_BATCH == 1 + // bind param batch + char *sampleBindBatchArray; +#endif // statistics uint64_t totalInsertRows; uint64_t totalAffectedRows; @@ -362,8 +376,7 @@ typedef struct SDbs_S { char password[SHELL_MAX_PASSWORD_LEN]; char resultFile[MAX_FILE_NAME_LEN]; bool use_metric; - bool insert_only; - bool do_aggreFunc; + bool aggr_func; bool asyncMode; uint32_t threadCount; @@ -378,7 +391,7 @@ typedef struct SDbs_S { } SDbs; typedef struct SpecifiedQueryInfo_S { - uint64_t queryInterval; // 0: unlimit > 0 loop/s + uint64_t queryInterval; // 0: unlimited > 0 loop/s uint32_t concurrent; int sqlCount; uint32_t asyncMode; // 0: sync, 1: async @@ -398,8 +411,8 @@ typedef struct SpecifiedQueryInfo_S { } SpecifiedQueryInfo; typedef struct SuperQueryInfo_S { - char sTblName[TSDB_TABLE_NAME_LEN]; - uint64_t queryInterval; // 0: unlimit > 0 loop/s + char stbName[TSDB_TABLE_NAME_LEN]; + uint64_t queryInterval; // 0: unlimited > 0 loop/s uint32_t threadCnt; uint32_t asyncMode; // 0: sync, 1: async uint64_t subscribeInterval; // ms @@ -437,8 +450,16 @@ typedef struct SQueryMetaInfo_S { typedef struct SThreadInfo_S { TAOS * taos; TAOS_STMT *stmt; - char* sampleBindArray; - int64_t *bind_ts; + int64_t *bind_ts; + +#if STMT_BIND_PARAM_BATCH == 1 + int64_t *bind_ts_array; + char *bindParams; + char *is_null; +#else + char* sampleBindArray; +#endif + int threadID; char db_name[TSDB_DB_NAME_LEN]; uint32_t time_precision; @@ -584,8 +605,11 @@ char *g_rand_current_buff = NULL; char *g_rand_phase_buff = NULL; char *g_randdouble_buff = NULL; -char *g_aggreFunc[] = {"*", "count(*)", "avg(col0)", "sum(col0)", - "max(col0)", "min(col0)", "first(col0)", "last(col0)"}; +char *g_aggreFuncDemo[] = {"*", "count(*)", "avg(current)", "sum(current)", + "max(current)", "min(current)", "first(current)", "last(current)"}; + +char *g_aggreFunc[] = {"*", "count(*)", "avg(C0)", "sum(C0)", + "max(C0)", "min(C0)", "first(C0)", "last(C0)"}; SArguments g_args = { NULL, // metaFile @@ -607,29 +631,33 @@ SArguments g_args = { NULL, // sqlFile true, // use_metric true, // drop_database - true, // insert_only + false, // aggr_func false, // debug_print false, // verbose_print false, // performance statistic print false, // answer_yes; "./output.txt", // output_file 0, // mode : sync or async + {TSDB_DATA_TYPE_FLOAT, + TSDB_DATA_TYPE_INT, + TSDB_DATA_TYPE_FLOAT}, { - "FLOAT", // datatype - "INT", // datatype - "FLOAT", // datatype. DEFAULT_DATATYPE_NUM is 3 + "FLOAT", // dataType + "INT", // dataType + "FLOAT", // dataType. demo mode has 3 columns }, 64, // binwidth - 4, // num_of_CPR - 10, // num_of_connections/thread + 4, // columnCount, timestamp + float + int + float + 20 + FLOAT_BUFF_LEN + INT_BUFF_LEN + FLOAT_BUFF_LEN, // lenOfOneRow + DEFAULT_NTHREADS,// nthreads 0, // insert_interval DEFAULT_TIMESTAMP_STEP, // timestamp_step 1, // query_times - DEFAULT_INTERLACE_ROWS, // interlace_rows; - 30000, // num_of_RPR + DEFAULT_INTERLACE_ROWS, // interlaceRows; + 30000, // reqPerReq (1024*1024), // max_sql_len - DEFAULT_CHILDTABLES, // num_of_tables - 10000, // num_of_DPT + DEFAULT_CHILDTABLES, // ntables + 10000, // insertRows 0, // abort 0, // disorderRatio 1000, // disorderRange @@ -711,10 +739,10 @@ static void printVersion() { char taosdemo_status[] = TAOSDEMO_STATUS; if (strlen(taosdemo_status) == 0) { - printf("taosdemo verison %s-%s\n", + printf("taosdemo version %s-%s\n", tdengine_ver, taosdemo_ver); } else { - printf("taosdemo verison %s-%s, status:%s\n", + printf("taosdemo version %s-%s, status:%s\n", tdengine_ver, taosdemo_ver, taosdemo_status); } } @@ -723,19 +751,19 @@ static void printHelp() { char indent[10] = " "; printf("%s\n\n", "Usage: taosdemo [OPTION...]"); printf("%s%s%s%s\n", indent, "-f, --file=FILE", "\t\t", - "The meta file to the execution procedure. Default is './meta.json'."); + "The meta file to the execution procedure."); printf("%s%s%s%s\n", indent, "-u, --user=USER", "\t\t", "The user name to use when connecting to the server."); #ifdef _TD_POWER_ printf("%s%s%s%s\n", indent, "-p, --password", "\t\t", - "The password to use when connecting to the server. Default is 'powerdb'"); + "The password to use when connecting to the server. By default is 'powerdb'"); printf("%s%s%s%s\n", indent, "-c, --config-dir=CONFIG_DIR", "\t", - "Configuration directory. Default is '/etc/power/'."); + "Configuration directory. By default is '/etc/power/'."); #elif (_TD_TQ_ == true) printf("%s%s%s%s\n", indent, "-p, --password", "\t\t", - "The password to use when connecting to the server. Default is 'tqueue'"); + "The password to use when connecting to the server. By default is 'tqueue'"); printf("%s%s%s%s\n", indent, "-c, --config-dir=CONFIG_DIR", "\t", - "Configuration directory. Default is '/etc/tq/'."); + "Configuration directory. By default is '/etc/tq/'."); #else printf("%s%s%s%s\n", indent, "-p, --password", "\t\t", "The password to use when connecting to the server."); @@ -747,24 +775,24 @@ static void printHelp() { printf("%s%s%s%s\n", indent, "-P, --port=PORT", "\t\t", "The TCP/IP port number to use for the connection."); printf("%s%s%s%s\n", indent, "-I, --interface=INTERFACE", "\t", - "The interface (taosc, rest, and stmt) taosdemo uses. Default is 'taosc'."); + "The interface (taosc, rest, and stmt) taosdemo uses. By default use 'taosc'."); printf("%s%s%s%s\n", indent, "-d, --database=DATABASE", "\t", - "Destination database. Default is 'test'."); + "Destination database. By default is 'test'."); printf("%s%s%s%s\n", indent, "-a, --replica=REPLICA", "\t\t", - "Set the replica parameters of the database, Default 1, min: 1, max: 3."); + "Set the replica parameters of the database, By default use 1, min: 1, max: 3."); printf("%s%s%s%s\n", indent, "-m, --table-prefix=TABLEPREFIX", "\t", - "Table prefix name. Default is 'd'."); + "Table prefix name. By default use 'd'."); printf("%s%s%s%s\n", indent, "-s, --sql-file=FILE", "\t\t", "The select sql file."); printf("%s%s%s%s\n", indent, "-N, --normal-table", "\t\t", "Use normal table flag."); printf("%s%s%s%s\n", indent, "-o, --output=FILE", "\t\t", - "Direct output to the named file. Default is './output.txt'."); + "Direct output to the named file. By default use './output.txt'."); printf("%s%s%s%s\n", indent, "-q, --query-mode=MODE", "\t\t", - "Query mode -- 0: SYNC, 1: ASYNC. Default is SYNC."); + "Query mode -- 0: SYNC, 1: ASYNC. By default use SYNC."); printf("%s%s%s%s\n", indent, "-b, --data-type=DATATYPE", "\t", - "The data_type of columns, default: FLOAT, INT, FLOAT."); + "The data_type of columns, By default use: FLOAT, INT, FLOAT."); printf("%s%s%s%s%d\n", indent, "-w, --binwidth=WIDTH", "\t\t", - "The width of data_type 'BINARY' or 'NCHAR'. Default is ", + "The width of data_type 'BINARY' or 'NCHAR'. By default use ", g_args.binwidth); printf("%s%s%s%s%d%s%d\n", indent, "-l, --columns=COLUMNS", "\t\t", "The number of columns per record. Demo mode by default is ", @@ -773,32 +801,32 @@ static void printHelp() { MAX_NUM_COLUMNS); printf("%s%s%s%s\n", indent, indent, indent, "\t\t\t\tAll of the new column(s) type is INT. If use -b to specify column type, -l will be ignored."); - printf("%s%s%s%s\n", indent, "-T, --threads=NUMBER", "\t\t", - "The number of threads. Default is 10."); + printf("%s%s%s%s%d.\n", indent, "-T, --threads=NUMBER", "\t\t", + "The number of threads. By default use ", DEFAULT_NTHREADS); printf("%s%s%s%s\n", indent, "-i, --insert-interval=NUMBER", "\t", - "The sleep time (ms) between insertion. Default is 0."); + "The sleep time (ms) between insertion. By default is 0."); printf("%s%s%s%s%d.\n", indent, "-S, --time-step=TIME_STEP", "\t", - "The timestamp step between insertion. Default is ", + "The timestamp step between insertion. By default is ", DEFAULT_TIMESTAMP_STEP); printf("%s%s%s%s%d.\n", indent, "-B, --interlace-rows=NUMBER", "\t", - "The interlace rows of insertion. Default is ", + "The interlace rows of insertion. By default is ", DEFAULT_INTERLACE_ROWS); printf("%s%s%s%s\n", indent, "-r, --rec-per-req=NUMBER", "\t", - "The number of records per request. Default is 30000."); + "The number of records per request. By default is 30000."); printf("%s%s%s%s\n", indent, "-t, --tables=NUMBER", "\t\t", - "The number of tables. Default is 10000."); + "The number of tables. By default is 10000."); printf("%s%s%s%s\n", indent, "-n, --records=NUMBER", "\t\t", - "The number of records per table. Default is 10000."); + "The number of records per table. By default is 10000."); printf("%s%s%s%s\n", indent, "-M, --random", "\t\t\t", "The value of records generated are totally random."); - printf("%s\n", "\t\t\t\tThe default is to simulate power equipment senario."); - printf("%s%s%s%s\n", indent, "-x, --no-insert", "\t\t", - "No-insert flag."); - printf("%s%s%s%s\n", indent, "-y, --answer-yes", "\t\t", "Default input yes for prompt."); + printf("%s\n", "\t\t\t\tBy default to simulate power equipment scenario."); + printf("%s%s%s%s\n", indent, "-x, --aggr-func", "\t\t", + "Test aggregation functions after insertion."); + printf("%s%s%s%s\n", indent, "-y, --answer-yes", "\t\t", "Input yes for prompt."); printf("%s%s%s%s\n", indent, "-O, --disorder=NUMBER", "\t\t", - "Insert order mode--0: In order, 1 ~ 50: disorder ratio. Default is in order."); + "Insert order mode--0: In order, 1 ~ 50: disorder ratio. By default is in order."); printf("%s%s%s%s\n", indent, "-R, --disorder-range=NUMBER", "\t", - "Out of order data's range, ms, default is 1000."); + "Out of order data's range. Unit is ms. By default is 1000."); printf("%s%s%s%s\n", indent, "-g, --debug", "\t\t\t", "Print debug info."); printf("%s%s%s%s\n", indent, "-?, --help\t", "\t\t", @@ -836,7 +864,7 @@ static void errorWrongValue(char *program, char *wrong_arg, char *wrong_value) fprintf(stderr, "Try `taosdemo --help' or `taosdemo --usage' for more information.\n"); } -static void errorUnreconized(char *program, char *wrong_arg) +static void errorUnrecognized(char *program, char *wrong_arg) { fprintf(stderr, "%s: unrecognized options '%s'\n", program, wrong_arg); fprintf(stderr, "Try `taosdemo --help' or `taosdemo --usage' for more information.\n"); @@ -893,7 +921,7 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { } else if (0 == strncmp(argv[i], "--file=", strlen("--file="))) { arguments->metaFile = (char *)(argv[i] + strlen("--file=")); } else { - errorUnreconized(argv[0], argv[i]); + errorUnrecognized(argv[0], argv[i]); exit(EXIT_FAILURE); } } else if ((0 == strncmp(argv[i], "-c", strlen("-c"))) @@ -915,7 +943,7 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { } else if (0 == strncmp(argv[i], "--config-dir=", strlen("--config-dir="))) { tstrncpy(configDir, (char *)(argv[i] + strlen("--config-dir=")), TSDB_FILENAME_LEN); } else { - errorUnreconized(argv[0], argv[i]); + errorUnrecognized(argv[0], argv[i]); exit(EXIT_FAILURE); } } else if ((0 == strncmp(argv[i], "-h", strlen("-h"))) @@ -937,7 +965,7 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { } else if (0 == strncmp(argv[i], "--host=", strlen("--host="))) { arguments->host = (char *)(argv[i] + strlen("--host=")); } else { - errorUnreconized(argv[0], argv[i]); + errorUnrecognized(argv[0], argv[i]); exit(EXIT_FAILURE); } } else if (strcmp(argv[i], "-PP") == 0) { @@ -971,7 +999,7 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { } arguments->port = atoi(argv[++i]); } else { - errorUnreconized(argv[0], argv[i]); + errorUnrecognized(argv[0], argv[i]); exit(EXIT_FAILURE); } } else if ((0 == strncmp(argv[i], "-I", strlen("-I"))) @@ -1032,7 +1060,7 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { } i++; } else { - errorUnreconized(argv[0], argv[i]); + errorUnrecognized(argv[0], argv[i]); exit(EXIT_FAILURE); } } else if ((0 == strncmp(argv[i], "-u", strlen("-u"))) @@ -1054,7 +1082,7 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { } arguments->user = argv[++i]; } else { - errorUnreconized(argv[0], argv[i]); + errorUnrecognized(argv[0], argv[i]); exit(EXIT_FAILURE); } } else if ((0 == strncmp(argv[i], "-p", strlen("-p"))) @@ -1088,7 +1116,7 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { } arguments->output_file = argv[++i]; } else { - errorUnreconized(argv[0], argv[i]); + errorUnrecognized(argv[0], argv[i]); exit(EXIT_FAILURE); } } else if ((0 == strncmp(argv[i], "-s", strlen("-s"))) @@ -1110,7 +1138,7 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { } arguments->sqlFile = argv[++i]; } else { - errorUnreconized(argv[0], argv[i]); + errorUnrecognized(argv[0], argv[i]); exit(EXIT_FAILURE); } } else if ((0 == strncmp(argv[i], "-q", strlen("-q"))) @@ -1148,7 +1176,7 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { } arguments->async_mode = atoi(argv[++i]); } else { - errorUnreconized(argv[0], argv[i]); + errorUnrecognized(argv[0], argv[i]); exit(EXIT_FAILURE); } } else if ((0 == strncmp(argv[i], "-T", strlen("-T"))) @@ -1161,17 +1189,17 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { errorPrintReqArg2(argv[0], "T"); exit(EXIT_FAILURE); } - arguments->num_of_threads = atoi(argv[++i]); + arguments->nthreads = atoi(argv[++i]); } else if (0 == strncmp(argv[i], "--threads=", strlen("--threads="))) { if (isStringNumber((char *)(argv[i] + strlen("--threads=")))) { - arguments->num_of_threads = atoi((char *)(argv[i]+strlen("--threads="))); + arguments->nthreads = atoi((char *)(argv[i]+strlen("--threads="))); } else { errorPrintReqArg2(argv[0], "--threads"); exit(EXIT_FAILURE); } } else if (0 == strncmp(argv[i], "-T", strlen("-T"))) { if (isStringNumber((char *)(argv[i] + strlen("-T")))) { - arguments->num_of_threads = atoi((char *)(argv[i]+strlen("-T"))); + arguments->nthreads = atoi((char *)(argv[i]+strlen("-T"))); } else { errorPrintReqArg2(argv[0], "-T"); exit(EXIT_FAILURE); @@ -1184,9 +1212,9 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { errorPrintReqArg2(argv[0], "--threads"); exit(EXIT_FAILURE); } - arguments->num_of_threads = atoi(argv[++i]); + arguments->nthreads = atoi(argv[++i]); } else { - errorUnreconized(argv[0], argv[i]); + errorUnrecognized(argv[0], argv[i]); exit(EXIT_FAILURE); } } else if ((0 == strncmp(argv[i], "-i", strlen("-i"))) @@ -1224,7 +1252,7 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { } arguments->insert_interval = atoi(argv[++i]); } else { - errorUnreconized(argv[0], argv[i]); + errorUnrecognized(argv[0], argv[i]); exit(EXIT_FAILURE); } } else if ((0 == strncmp(argv[i], "-S", strlen("-S"))) @@ -1262,7 +1290,7 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { } arguments->async_mode = atoi(argv[++i]); } else { - errorUnreconized(argv[0], argv[i]); + errorUnrecognized(argv[0], argv[i]); exit(EXIT_FAILURE); } } else if (strcmp(argv[i], "-qt") == 0) { @@ -1283,17 +1311,17 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { errorPrintReqArg2(argv[0], "B"); exit(EXIT_FAILURE); } - arguments->interlace_rows = atoi(argv[++i]); + arguments->interlaceRows = atoi(argv[++i]); } else if (0 == strncmp(argv[i], "--interlace-rows=", strlen("--interlace-rows="))) { if (isStringNumber((char *)(argv[i] + strlen("--interlace-rows=")))) { - arguments->interlace_rows = atoi((char *)(argv[i]+strlen("--interlace-rows="))); + arguments->interlaceRows = atoi((char *)(argv[i]+strlen("--interlace-rows="))); } else { errorPrintReqArg2(argv[0], "--interlace-rows"); exit(EXIT_FAILURE); } } else if (0 == strncmp(argv[i], "-B", strlen("-B"))) { if (isStringNumber((char *)(argv[i] + strlen("-B")))) { - arguments->interlace_rows = atoi((char *)(argv[i]+strlen("-B"))); + arguments->interlaceRows = atoi((char *)(argv[i]+strlen("-B"))); } else { errorPrintReqArg2(argv[0], "-B"); exit(EXIT_FAILURE); @@ -1306,9 +1334,9 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { errorPrintReqArg2(argv[0], "--interlace-rows"); exit(EXIT_FAILURE); } - arguments->interlace_rows = atoi(argv[++i]); + arguments->interlaceRows = atoi(argv[++i]); } else { - errorUnreconized(argv[0], argv[i]); + errorUnrecognized(argv[0], argv[i]); exit(EXIT_FAILURE); } } else if ((0 == strncmp(argv[i], "-r", strlen("-r"))) @@ -1321,17 +1349,17 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { errorPrintReqArg2(argv[0], "r"); exit(EXIT_FAILURE); } - arguments->num_of_RPR = atoi(argv[++i]); + arguments->reqPerReq = atoi(argv[++i]); } else if (0 == strncmp(argv[i], "--rec-per-req=", strlen("--rec-per-req="))) { if (isStringNumber((char *)(argv[i] + strlen("--rec-per-req=")))) { - arguments->num_of_RPR = atoi((char *)(argv[i]+strlen("--rec-per-req="))); + arguments->reqPerReq = atoi((char *)(argv[i]+strlen("--rec-per-req="))); } else { errorPrintReqArg2(argv[0], "--rec-per-req"); exit(EXIT_FAILURE); } } else if (0 == strncmp(argv[i], "-r", strlen("-r"))) { if (isStringNumber((char *)(argv[i] + strlen("-r")))) { - arguments->num_of_RPR = atoi((char *)(argv[i]+strlen("-r"))); + arguments->reqPerReq = atoi((char *)(argv[i]+strlen("-r"))); } else { errorPrintReqArg2(argv[0], "-r"); exit(EXIT_FAILURE); @@ -1344,9 +1372,9 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { errorPrintReqArg2(argv[0], "--rec-per-req"); exit(EXIT_FAILURE); } - arguments->num_of_RPR = atoi(argv[++i]); + arguments->reqPerReq = atoi(argv[++i]); } else { - errorUnreconized(argv[0], argv[i]); + errorUnrecognized(argv[0], argv[i]); exit(EXIT_FAILURE); } } else if ((0 == strncmp(argv[i], "-t", strlen("-t"))) @@ -1359,17 +1387,17 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { errorPrintReqArg2(argv[0], "t"); exit(EXIT_FAILURE); } - arguments->num_of_tables = atoi(argv[++i]); + arguments->ntables = atoi(argv[++i]); } else if (0 == strncmp(argv[i], "--tables=", strlen("--tables="))) { if (isStringNumber((char *)(argv[i] + strlen("--tables=")))) { - arguments->num_of_tables = atoi((char *)(argv[i]+strlen("--tables="))); + arguments->ntables = atoi((char *)(argv[i]+strlen("--tables="))); } else { errorPrintReqArg2(argv[0], "--tables"); exit(EXIT_FAILURE); } } else if (0 == strncmp(argv[i], "-t", strlen("-t"))) { if (isStringNumber((char *)(argv[i] + strlen("-t")))) { - arguments->num_of_tables = atoi((char *)(argv[i]+strlen("-t"))); + arguments->ntables = atoi((char *)(argv[i]+strlen("-t"))); } else { errorPrintReqArg2(argv[0], "-t"); exit(EXIT_FAILURE); @@ -1382,13 +1410,13 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { errorPrintReqArg2(argv[0], "--tables"); exit(EXIT_FAILURE); } - arguments->num_of_tables = atoi(argv[++i]); + arguments->ntables = atoi(argv[++i]); } else { - errorUnreconized(argv[0], argv[i]); + errorUnrecognized(argv[0], argv[i]); exit(EXIT_FAILURE); } - g_totalChildTables = arguments->num_of_tables; + g_totalChildTables = arguments->ntables; } else if ((0 == strncmp(argv[i], "-n", strlen("-n"))) || (0 == strncmp(argv[i], "--records", strlen("--records")))) { if (2 == strlen(argv[i])) { @@ -1399,17 +1427,17 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { errorPrintReqArg2(argv[0], "n"); exit(EXIT_FAILURE); } - arguments->num_of_DPT = atoi(argv[++i]); + arguments->insertRows = atoi(argv[++i]); } else if (0 == strncmp(argv[i], "--records=", strlen("--records="))) { if (isStringNumber((char *)(argv[i] + strlen("--records=")))) { - arguments->num_of_DPT = atoi((char *)(argv[i]+strlen("--records="))); + arguments->insertRows = atoi((char *)(argv[i]+strlen("--records="))); } else { errorPrintReqArg2(argv[0], "--records"); exit(EXIT_FAILURE); } } else if (0 == strncmp(argv[i], "-n", strlen("-n"))) { if (isStringNumber((char *)(argv[i] + strlen("-n")))) { - arguments->num_of_DPT = atoi((char *)(argv[i]+strlen("-n"))); + arguments->insertRows = atoi((char *)(argv[i]+strlen("-n"))); } else { errorPrintReqArg2(argv[0], "-n"); exit(EXIT_FAILURE); @@ -1422,9 +1450,9 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { errorPrintReqArg2(argv[0], "--records"); exit(EXIT_FAILURE); } - arguments->num_of_DPT = atoi(argv[++i]); + arguments->insertRows = atoi(argv[++i]); } else { - errorUnreconized(argv[0], argv[i]); + errorUnrecognized(argv[0], argv[i]); exit(EXIT_FAILURE); } } else if ((0 == strncmp(argv[i], "-d", strlen("-d"))) @@ -1446,7 +1474,7 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { } arguments->database = argv[++i]; } else { - errorUnreconized(argv[0], argv[i]); + errorUnrecognized(argv[0], argv[i]); exit(EXIT_FAILURE); } } else if ((0 == strncmp(argv[i], "-l", strlen("-l"))) @@ -1460,17 +1488,17 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { errorPrintReqArg2(argv[0], "l"); exit(EXIT_FAILURE); } - arguments->num_of_CPR = atoi(argv[++i]); + arguments->columnCount = atoi(argv[++i]); } else if (0 == strncmp(argv[i], "--columns=", strlen("--columns="))) { if (isStringNumber((char *)(argv[i] + strlen("--columns=")))) { - arguments->num_of_CPR = atoi((char *)(argv[i]+strlen("--columns="))); + arguments->columnCount = atoi((char *)(argv[i]+strlen("--columns="))); } else { errorPrintReqArg2(argv[0], "--columns"); exit(EXIT_FAILURE); } } else if (0 == strncmp(argv[i], "-l", strlen("-l"))) { if (isStringNumber((char *)(argv[i] + strlen("-l")))) { - arguments->num_of_CPR = atoi((char *)(argv[i]+strlen("-l"))); + arguments->columnCount = atoi((char *)(argv[i]+strlen("-l"))); } else { errorPrintReqArg2(argv[0], "-l"); exit(EXIT_FAILURE); @@ -1483,23 +1511,25 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { errorPrintReqArg2(argv[0], "--columns"); exit(EXIT_FAILURE); } - arguments->num_of_CPR = atoi(argv[++i]); + arguments->columnCount = atoi(argv[++i]); } else { - errorUnreconized(argv[0], argv[i]); + errorUnrecognized(argv[0], argv[i]); exit(EXIT_FAILURE); } - if (arguments->num_of_CPR > MAX_NUM_COLUMNS) { - printf("WARNING: max acceptible columns count is %d\n", MAX_NUM_COLUMNS); + if (arguments->columnCount > MAX_NUM_COLUMNS) { + printf("WARNING: max acceptable columns count is %d\n", MAX_NUM_COLUMNS); prompt(); - arguments->num_of_CPR = MAX_NUM_COLUMNS; + arguments->columnCount = MAX_NUM_COLUMNS; } - for (int col = DEFAULT_DATATYPE_NUM; col < arguments->num_of_CPR; col ++) { - arguments->datatype[col] = "INT"; + for (int col = DEFAULT_DATATYPE_NUM; col < arguments->columnCount; col ++) { + arguments->dataType[col] = "INT"; + arguments->data_type[col] = TSDB_DATA_TYPE_INT; } - for (int col = arguments->num_of_CPR; col < MAX_NUM_COLUMNS; col++) { - arguments->datatype[col] = NULL; + for (int col = arguments->columnCount; col < MAX_NUM_COLUMNS; col++) { + arguments->dataType[col] = NULL; + arguments->data_type[col] = TSDB_DATA_TYPE_NULL; } } else if ((0 == strncmp(argv[i], "-b", strlen("-b"))) || (0 == strncmp(argv[i], "--data-type", strlen("--data-type")))) { @@ -1523,7 +1553,7 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { } dataType = argv[++i]; } else { - errorUnreconized(argv[0], argv[i]); + errorUnrecognized(argv[0], argv[i]); exit(EXIT_FAILURE); } @@ -1543,8 +1573,32 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { errorPrint("%s", "-b: Invalid data_type!\n"); exit(EXIT_FAILURE); } - arguments->datatype[0] = dataType; - arguments->datatype[1] = NULL; + arguments->dataType[0] = dataType; + if (0 == strcasecmp(dataType, "INT")) { + arguments->data_type[0] = TSDB_DATA_TYPE_INT; + } else if (0 == strcasecmp(dataType, "TINYINT")) { + arguments->data_type[0] = TSDB_DATA_TYPE_TINYINT; + } else if (0 == strcasecmp(dataType, "SMALLINT")) { + arguments->data_type[0] = TSDB_DATA_TYPE_SMALLINT; + } else if (0 == strcasecmp(dataType, "BIGINT")) { + arguments->data_type[0] = TSDB_DATA_TYPE_BIGINT; + } else if (0 == strcasecmp(dataType, "FLOAT")) { + arguments->data_type[0] = TSDB_DATA_TYPE_FLOAT; + } else if (0 == strcasecmp(dataType, "DOUBLE")) { + arguments->data_type[0] = TSDB_DATA_TYPE_DOUBLE; + } else if (0 == strcasecmp(dataType, "BINARY")) { + arguments->data_type[0] = TSDB_DATA_TYPE_BINARY; + } else if (0 == strcasecmp(dataType, "NCHAR")) { + arguments->data_type[0] = TSDB_DATA_TYPE_NCHAR; + } else if (0 == strcasecmp(dataType, "BOOL")) { + arguments->data_type[0] = TSDB_DATA_TYPE_BOOL; + } else if (0 == strcasecmp(dataType, "TIMESTAMP")) { + arguments->data_type[0] = TSDB_DATA_TYPE_TIMESTAMP; + } else { + arguments->data_type[0] = TSDB_DATA_TYPE_NULL; + } + arguments->dataType[1] = NULL; + arguments->data_type[1] = TSDB_DATA_TYPE_NULL; } else { // more than one col int index = 0; @@ -1567,11 +1621,37 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { errorPrint("%s", "-b: Invalid data_type!\n"); exit(EXIT_FAILURE); } - arguments->datatype[index++] = token; + + if (0 == strcasecmp(token, "INT")) { + arguments->data_type[index] = TSDB_DATA_TYPE_INT; + } else if (0 == strcasecmp(token, "FLOAT")) { + arguments->data_type[index] = TSDB_DATA_TYPE_FLOAT; + } else if (0 == strcasecmp(token, "SMALLINT")) { + arguments->data_type[index] = TSDB_DATA_TYPE_SMALLINT; + } else if (0 == strcasecmp(token, "BIGINT")) { + arguments->data_type[index] = TSDB_DATA_TYPE_BIGINT; + } else if (0 == strcasecmp(token, "DOUBLE")) { + arguments->data_type[index] = TSDB_DATA_TYPE_FLOAT; + } else if (0 == strcasecmp(token, "TINYINT")) { + arguments->data_type[index] = TSDB_DATA_TYPE_TINYINT; + } else if (0 == strcasecmp(token, "BINARY")) { + arguments->data_type[index] = TSDB_DATA_TYPE_BINARY; + } else if (0 == strcasecmp(token, "NCHAR")) { + arguments->data_type[index] = TSDB_DATA_TYPE_NCHAR; + } else if (0 == strcasecmp(token, "BOOL")) { + arguments->data_type[index] = TSDB_DATA_TYPE_BOOL; + } else if (0 == strcasecmp(token, "TIMESTAMP")) { + arguments->data_type[index] = TSDB_DATA_TYPE_TIMESTAMP; + } else { + arguments->data_type[index] = TSDB_DATA_TYPE_NULL; + } + arguments->dataType[index] = token; + index ++; token = strsep(&running, ","); if (index >= MAX_NUM_COLUMNS) break; } - arguments->datatype[index] = NULL; + arguments->dataType[index] = NULL; + arguments->data_type[index] = TSDB_DATA_TYPE_NULL; } } else if ((0 == strncmp(argv[i], "-w", strlen("-w"))) || (0 == strncmp(argv[i], "--binwidth", strlen("--binwidth")))) { @@ -1608,7 +1688,7 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { } arguments->binwidth = atoi(argv[++i]); } else { - errorUnreconized(argv[0], argv[i]); + errorUnrecognized(argv[0], argv[i]); exit(EXIT_FAILURE); } } else if ((0 == strncmp(argv[i], "-m", strlen("-m"))) @@ -1630,18 +1710,19 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { } arguments->tb_prefix = argv[++i]; } else { - errorUnreconized(argv[0], argv[i]); + errorUnrecognized(argv[0], argv[i]); exit(EXIT_FAILURE); } } else if ((strcmp(argv[i], "-N") == 0) || (0 == strcmp(argv[i], "--normal-table"))) { + arguments->demo_mode = false; arguments->use_metric = false; } else if ((strcmp(argv[i], "-M") == 0) || (0 == strcmp(argv[i], "--random"))) { arguments->demo_mode = false; } else if ((strcmp(argv[i], "-x") == 0) - || (0 == strcmp(argv[i], "--no-insert"))) { - arguments->insert_only = false; + || (0 == strcmp(argv[i], "--aggr-func"))) { + arguments->aggr_func = true; } else if ((strcmp(argv[i], "-y") == 0) || (0 == strcmp(argv[i], "--answer-yes"))) { arguments->answer_yes = true; @@ -1695,7 +1776,7 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { } arguments->disorderRange = atoi(argv[++i]); } else { - errorUnreconized(argv[0], argv[i]); + errorUnrecognized(argv[0], argv[i]); exit(EXIT_FAILURE); } } else if ((0 == strncmp(argv[i], "-O", strlen("-O"))) @@ -1733,7 +1814,7 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { } arguments->disorderRatio = atoi(argv[++i]); } else { - errorUnreconized(argv[0], argv[i]); + errorUnrecognized(argv[0], argv[i]); exit(EXIT_FAILURE); } @@ -1787,7 +1868,7 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { } arguments->replica = atoi(argv[++i]); } else { - errorUnreconized(argv[0], argv[i]); + errorUnrecognized(argv[0], argv[i]); exit(EXIT_FAILURE); } @@ -1799,7 +1880,7 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { } else if (strcmp(argv[i], "-D") == 0) { arguments->method_of_delete = atoi(argv[++i]); if (arguments->method_of_delete > 3) { - errorPrint("%s", "\n\t-D need a valud (0~3) number following!\n"); + errorPrint("%s", "\n\t-D need a value (0~3) number following!\n"); exit(EXIT_FAILURE); } } else if ((strcmp(argv[i], "--version") == 0) @@ -1814,7 +1895,7 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { printf(" Usage: taosdemo [-f JSONFILE] [-u USER] [-p PASSWORD] [-c CONFIG_DIR]\n\ [-h HOST] [-P PORT] [-I INTERFACE] [-d DATABASE] [-a REPLICA]\n\ [-m TABLEPREFIX] [-s SQLFILE] [-N] [-o OUTPUTFILE] [-q QUERYMODE]\n\ - [-b DATATYPES] [-w WIDTH_OF_BINARY] [-l COLUNNS] [-T THREADNUMBER]\n\ + [-b DATATYPES] [-w WIDTH_OF_BINARY] [-l COLUMNS] [-T THREADNUMBER]\n\ [-i SLEEPTIME] [-S TIME_STEP] [-B INTERLACE_ROWS] [-t TABLES]\n\ [-n RECORDS] [-M] [-x] [-y] [-O ORDERMODE] [-R RANGE] [-a REPLIcA][-g]\n\ [--help] [--usage] [--version]\n"); @@ -1842,7 +1923,7 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { int columnCount; for (columnCount = 0; columnCount < MAX_NUM_COLUMNS; columnCount ++) { - if (g_args.datatype[columnCount] == NULL) { + if (g_args.dataType[columnCount] == NULL) { break; } } @@ -1850,7 +1931,56 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { if (0 == columnCount) { ERROR_EXIT("data type error!"); } - g_args.num_of_CPR = columnCount; + g_args.columnCount = columnCount; + + g_args.lenOfOneRow = 20; // timestamp + for (int c = 0; c < g_args.columnCount; c++) { + switch(g_args.data_type[c]) { + case TSDB_DATA_TYPE_BINARY: + g_args.lenOfOneRow += g_args.binwidth + 3; + break; + + case TSDB_DATA_TYPE_NCHAR: + g_args.lenOfOneRow += g_args.binwidth + 3; + break; + + case TSDB_DATA_TYPE_INT: + g_args.lenOfOneRow += INT_BUFF_LEN; + break; + + case TSDB_DATA_TYPE_BIGINT: + g_args.lenOfOneRow += BIGINT_BUFF_LEN; + break; + + case TSDB_DATA_TYPE_SMALLINT: + g_args.lenOfOneRow += SMALLINT_BUFF_LEN; + break; + + case TSDB_DATA_TYPE_TINYINT: + g_args.lenOfOneRow += TINYINT_BUFF_LEN; + break; + + case TSDB_DATA_TYPE_BOOL: + g_args.lenOfOneRow += BOOL_BUFF_LEN; + break; + + case TSDB_DATA_TYPE_FLOAT: + g_args.lenOfOneRow += FLOAT_BUFF_LEN; + break; + + case TSDB_DATA_TYPE_DOUBLE: + g_args.lenOfOneRow += DOUBLE_BUFF_LEN; + break; + + case TSDB_DATA_TYPE_TIMESTAMP: + g_args.lenOfOneRow += TIMESTAMP_BUFF_LEN; + break; + + default: + errorPrint2("get error data type : %s\n", g_args.dataType[c]); + exit(EXIT_FAILURE); + } + } if (((arguments->debug_print) && (NULL != arguments->metaFile)) || arguments->verbose_print) { @@ -1863,11 +1993,11 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { printf("# Password: %s\n", arguments->password); printf("# Use metric: %s\n", arguments->use_metric ? "true" : "false"); - if (*(arguments->datatype)) { + if (*(arguments->dataType)) { printf("# Specified data type: "); for (int c = 0; c < MAX_NUM_COLUMNS; c++) - if (arguments->datatype[c]) - printf("%s,", arguments->datatype[c]); + if (arguments->dataType[c]) + printf("%s,", arguments->dataType[c]); else break; printf("\n"); @@ -1875,15 +2005,15 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { printf("# Insertion interval: %"PRIu64"\n", arguments->insert_interval); printf("# Number of records per req: %u\n", - arguments->num_of_RPR); + arguments->reqPerReq); printf("# Max SQL length: %"PRIu64"\n", arguments->max_sql_len); printf("# Length of Binary: %d\n", arguments->binwidth); - printf("# Number of Threads: %d\n", arguments->num_of_threads); + printf("# Number of Threads: %d\n", arguments->nthreads); printf("# Number of Tables: %"PRId64"\n", - arguments->num_of_tables); + arguments->ntables); printf("# Number of Data per Table: %"PRId64"\n", - arguments->num_of_DPT); + arguments->insertRows); printf("# Database name: %s\n", arguments->database); printf("# Table prefix: %s\n", arguments->tb_prefix); if (arguments->disorderRatio) { @@ -1909,31 +2039,20 @@ static void tmfclose(FILE *fp) { static void tmfree(char *buf) { if (NULL != buf) { free(buf); + buf = NULL; } } static int queryDbExec(TAOS *taos, char *command, QUERY_TYPE type, bool quiet) { - int i; - TAOS_RES *res = NULL; - int32_t code = -1; - for (i = 0; i < 5 /* retry */; i++) { - if (NULL != res) { - taos_free_result(res); - res = NULL; - } + verbosePrint("%s() LN%d - command: %s\n", __func__, __LINE__, command); - res = taos_query(taos, command); - code = taos_errno(res); - if (0 == code) { - break; - } - } + TAOS_RES *res = taos_query(taos, command); + int32_t code = taos_errno(res); - verbosePrint("%s() LN%d - command: %s\n", __func__, __LINE__, command); if (code != 0) { if (!quiet) { - errorPrint2("Failed to execute %s, reason: %s\n", + errorPrint2("Failed to execute <%s>, reason: %s\n", command, taos_errstr(res)); } taos_free_result(res); @@ -2314,10 +2433,11 @@ static void init_rand_data() { static int printfInsertMeta() { SHOW_PARSE_RESULT_START(); - if (g_args.demo_mode) - printf("\ntaosdemo is simulating data generated by power equipments monitoring...\n\n"); - else + if (g_args.demo_mode) { + printf("\ntaosdemo is simulating data generated by power equipment monitoring...\n\n"); + } else { printf("\ntaosdemo is simulating random data as you request..\n\n"); + } if (g_args.iface != INTERFACE_BUT) { // first time if no iface specified @@ -2338,7 +2458,7 @@ static int printfInsertMeta() { printf("top insert interval: \033[33m%"PRIu64"\033[0m\n", g_args.insert_interval); printf("number of records per req: \033[33m%u\033[0m\n", - g_args.num_of_RPR); + g_args.reqPerReq); printf("max sql length: \033[33m%"PRIu64"\033[0m\n", g_args.max_sql_len); @@ -2349,9 +2469,9 @@ static int printfInsertMeta() { printf(" database[%d] name: \033[33m%s\033[0m\n", i, g_Dbs.db[i].dbName); if (0 == g_Dbs.db[i].drop) { - printf(" drop: \033[33mno\033[0m\n"); + printf(" drop: \033[33m no\033[0m\n"); } else { - printf(" drop: \033[33myes\033[0m\n"); + printf(" drop: \033[33m yes\033[0m\n"); } if (g_Dbs.db[i].dbCfg.blocks > 0) { @@ -2420,7 +2540,7 @@ static int printfInsertMeta() { printf(" super table[\033[33m%"PRIu64"\033[0m]:\n", j); printf(" stbName: \033[33m%s\033[0m\n", - g_Dbs.db[i].superTbls[j].sTblName); + g_Dbs.db[i].superTbls[j].stbName); if (PRE_CREATE_SUBTBL == g_Dbs.db[i].superTbls[j].autoCreateTable) { printf(" autoCreateTable: \033[33m%s\033[0m\n", "no"); @@ -2460,9 +2580,9 @@ static int printfInsertMeta() { g_Dbs.db[i].superTbls[j].insertRows); /* if (0 == g_Dbs.db[i].superTbls[j].multiThreadWriteOneTbl) { - printf(" multiThreadWriteOneTbl: \033[33mno\033[0m\n"); + printf(" multiThreadWriteOneTbl: \033[33m no\033[0m\n"); }else { - printf(" multiThreadWriteOneTbl: \033[33myes\033[0m\n"); + printf(" multiThreadWriteOneTbl: \033[33m yes\033[0m\n"); } */ printf(" interlaceRows: \033[33m%u\033[0m\n", @@ -2543,7 +2663,7 @@ static void printfInsertMetaToFile(FILE* fp) { fprintf(fp, "resultFile: %s\n", g_Dbs.resultFile); fprintf(fp, "thread num of insert data: %d\n", g_Dbs.threadCount); fprintf(fp, "thread num of create table: %d\n", g_Dbs.threadCountForCreateTbl); - fprintf(fp, "number of records per req: %u\n", g_args.num_of_RPR); + fprintf(fp, "number of records per req: %u\n", g_args.reqPerReq); fprintf(fp, "max sql length: %"PRIu64"\n", g_args.max_sql_len); fprintf(fp, "database count: %d\n", g_Dbs.dbCount); @@ -2610,7 +2730,7 @@ static void printfInsertMetaToFile(FILE* fp) { fprintf(fp, " super table[%d]:\n", j); fprintf(fp, " stbName: %s\n", - g_Dbs.db[i].superTbls[j].sTblName); + g_Dbs.db[i].superTbls[j].stbName); if (PRE_CREATE_SUBTBL == g_Dbs.db[i].superTbls[j].autoCreateTable) { fprintf(fp, " autoCreateTable: %s\n", "no"); @@ -2769,7 +2889,7 @@ static void printfQueryMeta() { printf("childTblCount: \033[33m%"PRId64"\033[0m\n", g_queryInfo.superQueryInfo.childTblCount); printf("stable name: \033[33m%s\033[0m\n", - g_queryInfo.superQueryInfo.sTblName); + g_queryInfo.superQueryInfo.stbName); printf("stb query times:\033[33m%"PRIu64"\033[0m\n", g_queryInfo.superQueryInfo.queryTimes); @@ -2840,36 +2960,45 @@ static void xDumpFieldToFile(FILE* fp, const char* val, char buf[TSDB_MAX_BYTES_PER_ROW]; switch (field->type) { case TSDB_DATA_TYPE_BOOL: - fprintf(fp, "%d", ((((int32_t)(*((char *)val))) == 1) ? 1 : 0)); + fprintf(fp, "%d", ((((int32_t)(*((int8_t*)val))) == 1) ? 1 : 0)); break; + case TSDB_DATA_TYPE_TINYINT: fprintf(fp, "%d", *((int8_t *)val)); break; + case TSDB_DATA_TYPE_SMALLINT: fprintf(fp, "%d", *((int16_t *)val)); break; + case TSDB_DATA_TYPE_INT: fprintf(fp, "%d", *((int32_t *)val)); break; + case TSDB_DATA_TYPE_BIGINT: fprintf(fp, "%"PRId64"", *((int64_t *)val)); break; + case TSDB_DATA_TYPE_FLOAT: fprintf(fp, "%.5f", GET_FLOAT_VAL(val)); break; + case TSDB_DATA_TYPE_DOUBLE: fprintf(fp, "%.9f", GET_DOUBLE_VAL(val)); break; + case TSDB_DATA_TYPE_BINARY: case TSDB_DATA_TYPE_NCHAR: memcpy(buf, val, length); buf[length] = 0; fprintf(fp, "\'%s\'", buf); break; + case TSDB_DATA_TYPE_TIMESTAMP: formatTimestamp(buf, *(int64_t*)val, precision); fprintf(fp, "'%s'", buf); break; + default: break; } @@ -3356,29 +3485,50 @@ static int calcRowLen(SSuperTable* superTbls) { for (colIndex = 0; colIndex < superTbls->columnCount; colIndex++) { char* dataType = superTbls->columns[colIndex].dataType; - if (strcasecmp(dataType, "BINARY") == 0) { - lenOfOneRow += superTbls->columns[colIndex].dataLen + 3; - } else if (strcasecmp(dataType, "NCHAR") == 0) { - lenOfOneRow += superTbls->columns[colIndex].dataLen + 3; - } else if (strcasecmp(dataType, "INT") == 0) { - lenOfOneRow += INT_BUFF_LEN; - } else if (strcasecmp(dataType, "BIGINT") == 0) { - lenOfOneRow += BIGINT_BUFF_LEN; - } else if (strcasecmp(dataType, "SMALLINT") == 0) { - lenOfOneRow += SMALLINT_BUFF_LEN; - } else if (strcasecmp(dataType, "TINYINT") == 0) { - lenOfOneRow += TINYINT_BUFF_LEN; - } else if (strcasecmp(dataType, "BOOL") == 0) { - lenOfOneRow += BOOL_BUFF_LEN; - } else if (strcasecmp(dataType, "FLOAT") == 0) { - lenOfOneRow += FLOAT_BUFF_LEN; - } else if (strcasecmp(dataType, "DOUBLE") == 0) { - lenOfOneRow += DOUBLE_BUFF_LEN; - } else if (strcasecmp(dataType, "TIMESTAMP") == 0) { - lenOfOneRow += TIMESTAMP_BUFF_LEN; - } else { - errorPrint2("get error data type : %s\n", dataType); - exit(EXIT_FAILURE); + switch(superTbls->columns[colIndex].data_type) { + case TSDB_DATA_TYPE_BINARY: + lenOfOneRow += superTbls->columns[colIndex].dataLen + 3; + break; + + case TSDB_DATA_TYPE_NCHAR: + lenOfOneRow += superTbls->columns[colIndex].dataLen + 3; + break; + + case TSDB_DATA_TYPE_INT: + lenOfOneRow += INT_BUFF_LEN; + break; + + case TSDB_DATA_TYPE_BIGINT: + lenOfOneRow += BIGINT_BUFF_LEN; + break; + + case TSDB_DATA_TYPE_SMALLINT: + lenOfOneRow += SMALLINT_BUFF_LEN; + break; + + case TSDB_DATA_TYPE_TINYINT: + lenOfOneRow += TINYINT_BUFF_LEN; + break; + + case TSDB_DATA_TYPE_BOOL: + lenOfOneRow += BOOL_BUFF_LEN; + break; + + case TSDB_DATA_TYPE_FLOAT: + lenOfOneRow += FLOAT_BUFF_LEN; + break; + + case TSDB_DATA_TYPE_DOUBLE: + lenOfOneRow += DOUBLE_BUFF_LEN; + break; + + case TSDB_DATA_TYPE_TIMESTAMP: + lenOfOneRow += TIMESTAMP_BUFF_LEN; + break; + + default: + errorPrint2("get error data type : %s\n", dataType); + exit(EXIT_FAILURE); } } @@ -3418,9 +3568,8 @@ static int calcRowLen(SSuperTable* superTbls) { return 0; } - static int getChildNameOfSuperTableWithLimitAndOffset(TAOS * taos, - char* dbName, char* sTblName, char** childTblNameOfSuperTbl, + char* dbName, char* stbName, char** childTblNameOfSuperTbl, int64_t* childTblCountOfSuperTbl, int64_t limit, uint64_t offset) { char command[1024] = "\0"; @@ -3431,14 +3580,12 @@ static int getChildNameOfSuperTableWithLimitAndOffset(TAOS * taos, char* childTblName = *childTblNameOfSuperTbl; - if (offset >= 0) { - snprintf(limitBuf, 100, " limit %"PRId64" offset %"PRIu64"", - limit, offset); - } + snprintf(limitBuf, 100, " limit %"PRId64" offset %"PRIu64"", + limit, offset); //get all child table name use cmd: select tbname from superTblName; snprintf(command, 1024, "select tbname from %s.%s %s", - dbName, sTblName, limitBuf); + dbName, stbName, limitBuf); res = taos_query(taos, command); int32_t code = taos_errno(res); @@ -3489,7 +3636,7 @@ static int getChildNameOfSuperTableWithLimitAndOffset(TAOS * taos, taos_free_result(res); taos_close(taos); errorPrint2("%s() LN%d, realloc fail for save child table name of %s.%s\n", - __func__, __LINE__, dbName, sTblName); + __func__, __LINE__, dbName, stbName); exit(EXIT_FAILURE); } } @@ -3504,10 +3651,10 @@ static int getChildNameOfSuperTableWithLimitAndOffset(TAOS * taos, } static int getAllChildNameOfSuperTable(TAOS * taos, char* dbName, - char* sTblName, char** childTblNameOfSuperTbl, + char* stbName, char** childTblNameOfSuperTbl, int64_t* childTblCountOfSuperTbl) { - return getChildNameOfSuperTableWithLimitAndOffset(taos, dbName, sTblName, + return getChildNameOfSuperTableWithLimitAndOffset(taos, dbName, stbName, childTblNameOfSuperTbl, childTblCountOfSuperTbl, -1, 0); } @@ -3521,7 +3668,7 @@ static int getSuperTableFromServer(TAOS * taos, char* dbName, int count = 0; //get schema use cmd: describe superTblName; - snprintf(command, 1024, "describe %s.%s", dbName, superTbls->sTblName); + snprintf(command, 1024, "describe %s.%s", dbName, superTbls->stbName); res = taos_query(taos, command); int32_t code = taos_errno(res); if (code != 0) { @@ -3547,6 +3694,39 @@ static int getSuperTableFromServer(TAOS * taos, char* dbName, (char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX], min(DATATYPE_BUFF_LEN, fields[TSDB_DESCRIBE_METRIC_TYPE_INDEX].bytes) + 1); + if (0 == strncasecmp(superTbls->tags[tagIndex].dataType, + "INT", strlen("INT"))) { + superTbls->tags[tagIndex].data_type = TSDB_DATA_TYPE_INT; + } else if (0 == strncasecmp(superTbls->tags[tagIndex].dataType, + "TINYINT", strlen("TINYINT"))) { + superTbls->tags[tagIndex].data_type = TSDB_DATA_TYPE_TINYINT; + } else if (0 == strncasecmp(superTbls->tags[tagIndex].dataType, + "SMALLINT", strlen("SMALLINT"))) { + superTbls->tags[tagIndex].data_type = TSDB_DATA_TYPE_SMALLINT; + } else if (0 == strncasecmp(superTbls->tags[tagIndex].dataType, + "BIGINT", strlen("BIGINT"))) { + superTbls->tags[tagIndex].data_type = TSDB_DATA_TYPE_BIGINT; + } else if (0 == strncasecmp(superTbls->tags[tagIndex].dataType, + "FLOAT", strlen("FLOAT"))) { + superTbls->tags[tagIndex].data_type = TSDB_DATA_TYPE_FLOAT; + } else if (0 == strncasecmp(superTbls->tags[tagIndex].dataType, + "DOUBLE", strlen("DOUBLE"))) { + superTbls->tags[tagIndex].data_type = TSDB_DATA_TYPE_DOUBLE; + } else if (0 == strncasecmp(superTbls->tags[tagIndex].dataType, + "BINARY", strlen("BINARY"))) { + superTbls->tags[tagIndex].data_type = TSDB_DATA_TYPE_BINARY; + } else if (0 == strncasecmp(superTbls->tags[tagIndex].dataType, + "NCHAR", strlen("NCHAR"))) { + superTbls->tags[tagIndex].data_type = TSDB_DATA_TYPE_NCHAR; + } else if (0 == strncasecmp(superTbls->tags[tagIndex].dataType, + "BOOL", strlen("BOOL"))) { + superTbls->tags[tagIndex].data_type = TSDB_DATA_TYPE_BOOL; + } else if (0 == strncasecmp(superTbls->tags[tagIndex].dataType, + "TIMESTAMP", strlen("TIMESTAMP"))) { + superTbls->tags[tagIndex].data_type = TSDB_DATA_TYPE_TIMESTAMP; + } else { + superTbls->tags[tagIndex].data_type = TSDB_DATA_TYPE_NULL; + } superTbls->tags[tagIndex].dataLen = *((int *)row[TSDB_DESCRIBE_METRIC_LENGTH_INDEX]); tstrncpy(superTbls->tags[tagIndex].note, @@ -3558,16 +3738,51 @@ static int getSuperTableFromServer(TAOS * taos, char* dbName, tstrncpy(superTbls->columns[columnIndex].field, (char *)row[TSDB_DESCRIBE_METRIC_FIELD_INDEX], fields[TSDB_DESCRIBE_METRIC_FIELD_INDEX].bytes); + tstrncpy(superTbls->columns[columnIndex].dataType, (char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX], min(DATATYPE_BUFF_LEN, fields[TSDB_DESCRIBE_METRIC_TYPE_INDEX].bytes) + 1); + if (0 == strncasecmp(superTbls->columns[columnIndex].dataType, + "INT", strlen("INT"))) { + superTbls->columns[columnIndex].data_type = TSDB_DATA_TYPE_INT; + } else if (0 == strncasecmp(superTbls->columns[columnIndex].dataType, + "TINYINT", strlen("TINYINT"))) { + superTbls->columns[columnIndex].data_type = TSDB_DATA_TYPE_TINYINT; + } else if (0 == strncasecmp(superTbls->columns[columnIndex].dataType, + "SMALLINT", strlen("SMALLINT"))) { + superTbls->columns[columnIndex].data_type = TSDB_DATA_TYPE_SMALLINT; + } else if (0 == strncasecmp(superTbls->columns[columnIndex].dataType, + "BIGINT", strlen("BIGINT"))) { + superTbls->columns[columnIndex].data_type = TSDB_DATA_TYPE_BIGINT; + } else if (0 == strncasecmp(superTbls->columns[columnIndex].dataType, + "FLOAT", strlen("FLOAT"))) { + superTbls->columns[columnIndex].data_type = TSDB_DATA_TYPE_FLOAT; + } else if (0 == strncasecmp(superTbls->columns[columnIndex].dataType, + "DOUBLE", strlen("DOUBLE"))) { + superTbls->columns[columnIndex].data_type = TSDB_DATA_TYPE_DOUBLE; + } else if (0 == strncasecmp(superTbls->columns[columnIndex].dataType, + "BINARY", strlen("BINARY"))) { + superTbls->columns[columnIndex].data_type = TSDB_DATA_TYPE_BINARY; + } else if (0 == strncasecmp(superTbls->columns[columnIndex].dataType, + "NCHAR", strlen("NCHAR"))) { + superTbls->columns[columnIndex].data_type = TSDB_DATA_TYPE_NCHAR; + } else if (0 == strncasecmp(superTbls->columns[columnIndex].dataType, + "BOOL", strlen("BOOL"))) { + superTbls->columns[columnIndex].data_type = TSDB_DATA_TYPE_BOOL; + } else if (0 == strncasecmp(superTbls->columns[columnIndex].dataType, + "TIMESTAMP", strlen("TIMESTAMP"))) { + superTbls->columns[columnIndex].data_type = TSDB_DATA_TYPE_TIMESTAMP; + } else { + superTbls->columns[columnIndex].data_type = TSDB_DATA_TYPE_NULL; + } superTbls->columns[columnIndex].dataLen = *((int *)row[TSDB_DESCRIBE_METRIC_LENGTH_INDEX]); tstrncpy(superTbls->columns[columnIndex].note, (char *)row[TSDB_DESCRIBE_METRIC_NOTE_INDEX], min(NOTE_BUFF_LEN, fields[TSDB_DESCRIBE_METRIC_NOTE_INDEX].bytes) + 1); + columnIndex++; } count++; @@ -3589,7 +3804,7 @@ static int getSuperTableFromServer(TAOS * taos, char* dbName, return -1; } getAllChildNameOfSuperTable(taos, dbName, - superTbls->sTblName, + superTbls->stbName, &superTbls->childTblName, &superTbls->childTblCount); } @@ -3605,7 +3820,6 @@ static int createSuperTable( assert(command); char cols[COL_BUFFER_LEN] = "\0"; - int colIndex; int len = 0; int lenOfOneRow = 0; @@ -3617,67 +3831,87 @@ static int createSuperTable( return -1; } - for (colIndex = 0; colIndex < superTbl->columnCount; colIndex++) { - char* dataType = superTbl->columns[colIndex].dataType; + for (int colIndex = 0; colIndex < superTbl->columnCount; colIndex++) { - if (strcasecmp(dataType, "BINARY") == 0) { - len += snprintf(cols + len, COL_BUFFER_LEN - len, - ",C%d %s(%d)", colIndex, "BINARY", - superTbl->columns[colIndex].dataLen); - lenOfOneRow += superTbl->columns[colIndex].dataLen + 3; - } else if (strcasecmp(dataType, "NCHAR") == 0) { - len += snprintf(cols + len, COL_BUFFER_LEN - len, - ",C%d %s(%d)", colIndex, "NCHAR", - superTbl->columns[colIndex].dataLen); - lenOfOneRow += superTbl->columns[colIndex].dataLen + 3; - } else if (strcasecmp(dataType, "INT") == 0) { - if ((g_args.demo_mode) && (colIndex == 1)) { + switch(superTbl->columns[colIndex].data_type) { + case TSDB_DATA_TYPE_BINARY: len += snprintf(cols + len, COL_BUFFER_LEN - len, - ", VOLTAGE INT"); - } else { - len += snprintf(cols + len, COL_BUFFER_LEN - len, ",C%d %s", colIndex, "INT"); - } - lenOfOneRow += INT_BUFF_LEN; - } else if (strcasecmp(dataType, "BIGINT") == 0) { - len += snprintf(cols + len, COL_BUFFER_LEN - len, ",C%d %s", - colIndex, "BIGINT"); - lenOfOneRow += BIGINT_BUFF_LEN; - } else if (strcasecmp(dataType, "SMALLINT") == 0) { - len += snprintf(cols + len, COL_BUFFER_LEN - len, ",C%d %s", - colIndex, "SMALLINT"); - lenOfOneRow += SMALLINT_BUFF_LEN; - } else if (strcasecmp(dataType, "TINYINT") == 0) { - len += snprintf(cols + len, COL_BUFFER_LEN - len, ",C%d %s", colIndex, "TINYINT"); - lenOfOneRow += TINYINT_BUFF_LEN; - } else if (strcasecmp(dataType, "BOOL") == 0) { - len += snprintf(cols + len, COL_BUFFER_LEN - len, ",C%d %s", colIndex, "BOOL"); - lenOfOneRow += BOOL_BUFF_LEN; - } else if (strcasecmp(dataType, "FLOAT") == 0) { - if (g_args.demo_mode) { - if (colIndex == 0) { - len += snprintf(cols + len, COL_BUFFER_LEN - len, ", CURRENT FLOAT"); - } else if (colIndex == 2) { - len += snprintf(cols + len, COL_BUFFER_LEN - len, ", PHASE FLOAT"); + ",C%d %s(%d)", colIndex, "BINARY", + superTbl->columns[colIndex].dataLen); + lenOfOneRow += superTbl->columns[colIndex].dataLen + 3; + break; + + case TSDB_DATA_TYPE_NCHAR: + len += snprintf(cols + len, COL_BUFFER_LEN - len, + ",C%d %s(%d)", colIndex, "NCHAR", + superTbl->columns[colIndex].dataLen); + lenOfOneRow += superTbl->columns[colIndex].dataLen + 3; + break; + + case TSDB_DATA_TYPE_INT: + if ((g_args.demo_mode) && (colIndex == 1)) { + len += snprintf(cols + len, COL_BUFFER_LEN - len, + ", VOLTAGE INT"); + } else { + len += snprintf(cols + len, COL_BUFFER_LEN - len, ",C%d %s", colIndex, "INT"); } - } else { - len += snprintf(cols + len, COL_BUFFER_LEN - len, ",C%d %s", colIndex, "FLOAT"); - } + lenOfOneRow += INT_BUFF_LEN; + break; - lenOfOneRow += FLOAT_BUFF_LEN; - } else if (strcasecmp(dataType, "DOUBLE") == 0) { - len += snprintf(cols + len, COL_BUFFER_LEN - len, ",C%d %s", - colIndex, "DOUBLE"); - lenOfOneRow += DOUBLE_BUFF_LEN; - } else if (strcasecmp(dataType, "TIMESTAMP") == 0) { - len += snprintf(cols + len, COL_BUFFER_LEN - len, ",C%d %s", - colIndex, "TIMESTAMP"); - lenOfOneRow += TIMESTAMP_BUFF_LEN; - } else { - taos_close(taos); - free(command); - errorPrint2("%s() LN%d, config error data type : %s\n", - __func__, __LINE__, dataType); - exit(EXIT_FAILURE); + case TSDB_DATA_TYPE_BIGINT: + len += snprintf(cols + len, COL_BUFFER_LEN - len, ",C%d %s", + colIndex, "BIGINT"); + lenOfOneRow += BIGINT_BUFF_LEN; + break; + + case TSDB_DATA_TYPE_SMALLINT: + len += snprintf(cols + len, COL_BUFFER_LEN - len, ",C%d %s", + colIndex, "SMALLINT"); + lenOfOneRow += SMALLINT_BUFF_LEN; + break; + + case TSDB_DATA_TYPE_TINYINT: + len += snprintf(cols + len, COL_BUFFER_LEN - len, ",C%d %s", colIndex, "TINYINT"); + lenOfOneRow += TINYINT_BUFF_LEN; + break; + + case TSDB_DATA_TYPE_BOOL: + len += snprintf(cols + len, COL_BUFFER_LEN - len, ",C%d %s", colIndex, "BOOL"); + lenOfOneRow += BOOL_BUFF_LEN; + break; + + case TSDB_DATA_TYPE_FLOAT: + if (g_args.demo_mode) { + if (colIndex == 0) { + len += snprintf(cols + len, COL_BUFFER_LEN - len, ", CURRENT FLOAT"); + } else if (colIndex == 2) { + len += snprintf(cols + len, COL_BUFFER_LEN - len, ", PHASE FLOAT"); + } + } else { + len += snprintf(cols + len, COL_BUFFER_LEN - len, ",C%d %s", colIndex, "FLOAT"); + } + + lenOfOneRow += FLOAT_BUFF_LEN; + break; + + case TSDB_DATA_TYPE_DOUBLE: + len += snprintf(cols + len, COL_BUFFER_LEN - len, ",C%d %s", + colIndex, "DOUBLE"); + lenOfOneRow += DOUBLE_BUFF_LEN; + break; + + case TSDB_DATA_TYPE_TIMESTAMP: + len += snprintf(cols + len, COL_BUFFER_LEN - len, ",C%d %s", + colIndex, "TIMESTAMP"); + lenOfOneRow += TIMESTAMP_BUFF_LEN; + break; + + default: + taos_close(taos); + free(command); + errorPrint2("%s() LN%d, config error data type : %s\n", + __func__, __LINE__, superTbl->columns[colIndex].dataType); + exit(EXIT_FAILURE); } } @@ -3777,16 +4011,16 @@ static int createSuperTable( superTbl->lenOfTagOfOneRow = lenOfTagOfOneRow; snprintf(command, BUFFER_SIZE, - "create table if not exists %s.%s (ts timestamp%s) tags %s", - dbName, superTbl->sTblName, cols, tags); + "CREATE TABLE IF NOT EXISTS %s.%s (ts TIMESTAMP%s) TAGS %s", + dbName, superTbl->stbName, cols, tags); if (0 != queryDbExec(taos, command, NO_INSERT_TYPE, false)) { errorPrint2("create supertable %s failed!\n\n", - superTbl->sTblName); + superTbl->stbName); free(command); return -1; } - debugPrint("create supertable %s success!\n\n", superTbl->sTblName); + debugPrint("create supertable %s success!\n\n", superTbl->stbName); free(command); return 0; } @@ -3810,42 +4044,42 @@ int createDatabasesAndStables(char *command) { int dataLen = 0; dataLen += snprintf(command + dataLen, - BUFFER_SIZE - dataLen, "create database if not exists %s", + BUFFER_SIZE - dataLen, "CREATE DATABASE IF NOT EXISTS %s", g_Dbs.db[i].dbName); if (g_Dbs.db[i].dbCfg.blocks > 0) { dataLen += snprintf(command + dataLen, - BUFFER_SIZE - dataLen, " blocks %d", + BUFFER_SIZE - dataLen, " BLOCKS %d", g_Dbs.db[i].dbCfg.blocks); } if (g_Dbs.db[i].dbCfg.cache > 0) { dataLen += snprintf(command + dataLen, - BUFFER_SIZE - dataLen, " cache %d", + BUFFER_SIZE - dataLen, " CACHE %d", g_Dbs.db[i].dbCfg.cache); } if (g_Dbs.db[i].dbCfg.days > 0) { dataLen += snprintf(command + dataLen, - BUFFER_SIZE - dataLen, " days %d", + BUFFER_SIZE - dataLen, " DAYS %d", g_Dbs.db[i].dbCfg.days); } if (g_Dbs.db[i].dbCfg.keep > 0) { dataLen += snprintf(command + dataLen, - BUFFER_SIZE - dataLen, " keep %d", + BUFFER_SIZE - dataLen, " KEEP %d", g_Dbs.db[i].dbCfg.keep); } if (g_Dbs.db[i].dbCfg.quorum > 1) { dataLen += snprintf(command + dataLen, - BUFFER_SIZE - dataLen, " quorum %d", + BUFFER_SIZE - dataLen, " QUORUM %d", g_Dbs.db[i].dbCfg.quorum); } if (g_Dbs.db[i].dbCfg.replica > 0) { dataLen += snprintf(command + dataLen, - BUFFER_SIZE - dataLen, " replica %d", + BUFFER_SIZE - dataLen, " REPLICA %d", g_Dbs.db[i].dbCfg.replica); } if (g_Dbs.db[i].dbCfg.update > 0) { dataLen += snprintf(command + dataLen, - BUFFER_SIZE - dataLen, " update %d", + BUFFER_SIZE - dataLen, " UPDATE %d", g_Dbs.db[i].dbCfg.update); } //if (g_Dbs.db[i].dbCfg.maxtablesPerVnode > 0) { @@ -3854,17 +4088,17 @@ int createDatabasesAndStables(char *command) { //} if (g_Dbs.db[i].dbCfg.minRows > 0) { dataLen += snprintf(command + dataLen, - BUFFER_SIZE - dataLen, " minrows %d", + BUFFER_SIZE - dataLen, " MINROWS %d", g_Dbs.db[i].dbCfg.minRows); } if (g_Dbs.db[i].dbCfg.maxRows > 0) { dataLen += snprintf(command + dataLen, - BUFFER_SIZE - dataLen, " maxrows %d", + BUFFER_SIZE - dataLen, " MAXROWS %d", g_Dbs.db[i].dbCfg.maxRows); } if (g_Dbs.db[i].dbCfg.comp > 0) { dataLen += snprintf(command + dataLen, - BUFFER_SIZE - dataLen, " comp %d", + BUFFER_SIZE - dataLen, " COMP %d", g_Dbs.db[i].dbCfg.comp); } if (g_Dbs.db[i].dbCfg.walLevel > 0) { @@ -3874,12 +4108,12 @@ int createDatabasesAndStables(char *command) { } if (g_Dbs.db[i].dbCfg.cacheLast > 0) { dataLen += snprintf(command + dataLen, - BUFFER_SIZE - dataLen, " cachelast %d", + BUFFER_SIZE - dataLen, " CACHELAST %d", g_Dbs.db[i].dbCfg.cacheLast); } if (g_Dbs.db[i].dbCfg.fsync > 0) { dataLen += snprintf(command + dataLen, BUFFER_SIZE - dataLen, - " fsync %d", g_Dbs.db[i].dbCfg.fsync); + " FSYNC %d", g_Dbs.db[i].dbCfg.fsync); } if ((0 == strncasecmp(g_Dbs.db[i].dbCfg.precision, "ms", 2)) || (0 == strncasecmp(g_Dbs.db[i].dbCfg.precision, @@ -3906,7 +4140,7 @@ int createDatabasesAndStables(char *command) { for (uint64_t j = 0; j < g_Dbs.db[i].superTblCount; j++) { sprintf(command, "describe %s.%s;", g_Dbs.db[i].dbName, - g_Dbs.db[i].superTbls[j].sTblName); + g_Dbs.db[i].superTbls[j].stbName); ret = queryDbExec(taos, command, NO_INSERT_TYPE, true); if ((ret != 0) || (g_Dbs.db[i].drop)) { @@ -3923,7 +4157,7 @@ int createDatabasesAndStables(char *command) { &g_Dbs.db[i].superTbls[j]); if (0 != ret) { errorPrint2("\nget super table %s.%s info failed!\n\n", - g_Dbs.db[i].dbName, g_Dbs.db[i].superTbls[j].sTblName); + g_Dbs.db[i].dbName, g_Dbs.db[i].superTbls[j].stbName); continue; } @@ -3965,7 +4199,7 @@ static void* createTable(void *sarg) i <= pThreadInfo->end_table_to; i++) { if (0 == g_Dbs.use_metric) { snprintf(pThreadInfo->buffer, buff_len, - "create table if not exists %s.%s%"PRIu64" %s;", + "CREATE TABLE IF NOT EXISTS %s.%s%"PRIu64" %s;", pThreadInfo->db_name, g_args.tb_prefix, i, pThreadInfo->cols); @@ -3981,7 +4215,7 @@ static void* createTable(void *sarg) batchNum = 0; memset(pThreadInfo->buffer, 0, buff_len); len += snprintf(pThreadInfo->buffer + len, - buff_len - len, "create table "); + buff_len - len, "CREATE TABLE "); } char* tagsValBuf = NULL; @@ -4006,7 +4240,7 @@ static void* createTable(void *sarg) "if not exists %s.%s%"PRIu64" using %s.%s tags %s ", pThreadInfo->db_name, stbInfo->childTblPrefix, i, pThreadInfo->db_name, - stbInfo->sTblName, tagsValBuf); + stbInfo->stbName, tagsValBuf); free(tagsValBuf); batchNum++; if ((batchNum < stbInfo->batchCreateTableNum) @@ -4151,15 +4385,15 @@ static void createChildTables() { } else { // normal table len = snprintf(tblColsBuf, TSDB_MAX_BYTES_PER_ROW, "(TS TIMESTAMP"); - for (int j = 0; j < g_args.num_of_CPR; j++) { - if ((strncasecmp(g_args.datatype[j], "BINARY", strlen("BINARY")) == 0) - || (strncasecmp(g_args.datatype[j], + for (int j = 0; j < g_args.columnCount; j++) { + if ((strncasecmp(g_args.dataType[j], "BINARY", strlen("BINARY")) == 0) + || (strncasecmp(g_args.dataType[j], "NCHAR", strlen("NCHAR")) == 0)) { snprintf(tblColsBuf + len, TSDB_MAX_BYTES_PER_ROW - len, - ",C%d %s(%d)", j, g_args.datatype[j], g_args.binwidth); + ",C%d %s(%d)", j, g_args.dataType[j], g_args.binwidth); } else { snprintf(tblColsBuf + len, TSDB_MAX_BYTES_PER_ROW - len, - ",C%d %s", j, g_args.datatype[j]); + ",C%d %s", j, g_args.dataType[j]); } len = strlen(tblColsBuf); } @@ -4168,12 +4402,12 @@ static void createChildTables() { verbosePrint("%s() LN%d: dbName: %s num of tb: %"PRId64" schema: %s\n", __func__, __LINE__, - g_Dbs.db[i].dbName, g_args.num_of_tables, tblColsBuf); + g_Dbs.db[i].dbName, g_args.ntables, tblColsBuf); startMultiThreadCreateChildTable( tblColsBuf, g_Dbs.threadCountForCreateTbl, 0, - g_args.num_of_tables, + g_args.ntables, g_Dbs.db[i].dbName, NULL); } @@ -4251,7 +4485,7 @@ static int readTagFromCsvFileToMem(SSuperTable * stbInfo) { /* Read 10000 lines at most. If more than 10000 lines, continue to read after using */ -static int readSampleFromCsvFileToMem( +static int generateSampleFromCsvForStb( SSuperTable* stbInfo) { size_t n = 0; ssize_t readLen = 0; @@ -4267,7 +4501,7 @@ static int readSampleFromCsvFileToMem( assert(stbInfo->sampleDataBuf); memset(stbInfo->sampleDataBuf, 0, - MAX_SAMPLES_ONCE_FROM_FILE * stbInfo->lenOfOneRow); + MAX_SAMPLES * stbInfo->lenOfOneRow); while(1) { readLen = tgetline(&line, &n, fp); if (-1 == readLen) { @@ -4298,7 +4532,7 @@ static int readSampleFromCsvFileToMem( line, readLen); getRows++; - if (getRows == MAX_SAMPLES_ONCE_FROM_FILE) { + if (getRows == MAX_SAMPLES) { break; } } @@ -4377,6 +4611,7 @@ static bool getColumnAndTagTypeFromInsertJsonFile( tstrncpy(superTbls->columns[index].dataType, columnCase.dataType, min(DATATYPE_BUFF_LEN, strlen(columnCase.dataType) + 1)); + superTbls->columns[index].dataLen = columnCase.dataLen; index++; } @@ -4390,6 +4625,42 @@ static bool getColumnAndTagTypeFromInsertJsonFile( superTbls->columnCount = index; + for (int c = 0; c < superTbls->columnCount; c++) { + if (0 == strncasecmp(superTbls->columns[c].dataType, + "INT", strlen("INT"))) { + superTbls->columns[c].data_type = TSDB_DATA_TYPE_INT; + } else if (0 == strncasecmp(superTbls->columns[c].dataType, + "TINYINT", strlen("TINYINT"))) { + superTbls->columns[c].data_type = TSDB_DATA_TYPE_TINYINT; + } else if (0 == strncasecmp(superTbls->columns[c].dataType, + "SMALLINT", strlen("SMALLINT"))) { + superTbls->columns[c].data_type = TSDB_DATA_TYPE_SMALLINT; + } else if (0 == strncasecmp(superTbls->columns[c].dataType, + "BIGINT", strlen("BIGINT"))) { + superTbls->columns[c].data_type = TSDB_DATA_TYPE_BIGINT; + } else if (0 == strncasecmp(superTbls->columns[c].dataType, + "FLOAT", strlen("FLOAT"))) { + superTbls->columns[c].data_type = TSDB_DATA_TYPE_FLOAT; + } else if (0 == strncasecmp(superTbls->columns[c].dataType, + "DOUBLE", strlen("DOUBLE"))) { + superTbls->columns[c].data_type = TSDB_DATA_TYPE_DOUBLE; + } else if (0 == strncasecmp(superTbls->columns[c].dataType, + "BINARY", strlen("BINARY"))) { + superTbls->columns[c].data_type = TSDB_DATA_TYPE_BINARY; + } else if (0 == strncasecmp(superTbls->columns[c].dataType, + "NCHAR", strlen("NCHAR"))) { + superTbls->columns[c].data_type = TSDB_DATA_TYPE_NCHAR; + } else if (0 == strncasecmp(superTbls->columns[c].dataType, + "BOOL", strlen("BOOL"))) { + superTbls->columns[c].data_type = TSDB_DATA_TYPE_BOOL; + } else if (0 == strncasecmp(superTbls->columns[c].dataType, + "TIMESTAMP", strlen("TIMESTAMP"))) { + superTbls->columns[c].data_type = TSDB_DATA_TYPE_TIMESTAMP; + } else { + superTbls->columns[c].data_type = TSDB_DATA_TYPE_NULL; + } + } + count = 1; index = 0; // tags @@ -4459,6 +4730,42 @@ static bool getColumnAndTagTypeFromInsertJsonFile( superTbls->tagCount = index; + for (int t = 0; t < superTbls->tagCount; t++) { + if (0 == strncasecmp(superTbls->tags[t].dataType, + "INT", strlen("INT"))) { + superTbls->tags[t].data_type = TSDB_DATA_TYPE_INT; + } else if (0 == strncasecmp(superTbls->tags[t].dataType, + "TINYINT", strlen("TINYINT"))) { + superTbls->tags[t].data_type = TSDB_DATA_TYPE_TINYINT; + } else if (0 == strncasecmp(superTbls->tags[t].dataType, + "SMALLINT", strlen("SMALLINT"))) { + superTbls->tags[t].data_type = TSDB_DATA_TYPE_SMALLINT; + } else if (0 == strncasecmp(superTbls->tags[t].dataType, + "BIGINT", strlen("BIGINT"))) { + superTbls->tags[t].data_type = TSDB_DATA_TYPE_BIGINT; + } else if (0 == strncasecmp(superTbls->tags[t].dataType, + "FLOAT", strlen("FLOAT"))) { + superTbls->tags[t].data_type = TSDB_DATA_TYPE_FLOAT; + } else if (0 == strncasecmp(superTbls->tags[t].dataType, + "DOUBLE", strlen("DOUBLE"))) { + superTbls->tags[t].data_type = TSDB_DATA_TYPE_DOUBLE; + } else if (0 == strncasecmp(superTbls->tags[t].dataType, + "BINARY", strlen("BINARY"))) { + superTbls->tags[t].data_type = TSDB_DATA_TYPE_BINARY; + } else if (0 == strncasecmp(superTbls->tags[t].dataType, + "NCHAR", strlen("NCHAR"))) { + superTbls->tags[t].data_type = TSDB_DATA_TYPE_NCHAR; + } else if (0 == strncasecmp(superTbls->tags[t].dataType, + "BOOL", strlen("BOOL"))) { + superTbls->tags[t].data_type = TSDB_DATA_TYPE_BOOL; + } else if (0 == strncasecmp(superTbls->tags[t].dataType, + "TIMESTAMP", strlen("TIMESTAMP"))) { + superTbls->tags[t].data_type = TSDB_DATA_TYPE_TIMESTAMP; + } else { + superTbls->tags[t].data_type = TSDB_DATA_TYPE_NULL; + } + } + if ((superTbls->columnCount + superTbls->tagCount + 1 /* ts */) > TSDB_MAX_COLUMNS) { errorPrint("columns + tags is more than allowed max columns count: %d\n", TSDB_MAX_COLUMNS); @@ -4553,15 +4860,15 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { cJSON* interlaceRows = cJSON_GetObjectItem(root, "interlace_rows"); if (interlaceRows && interlaceRows->type == cJSON_Number) { if (interlaceRows->valueint < 0) { - errorPrint("%s", "failed to read json, interlace_rows input mistake\n"); + errorPrint("%s", "failed to read json, interlaceRows input mistake\n"); goto PARSE_OVER; } - g_args.interlace_rows = interlaceRows->valueint; + g_args.interlaceRows = interlaceRows->valueint; } else if (!interlaceRows) { - g_args.interlace_rows = 0; // 0 means progressive mode, > 0 mean interlace mode. max value is less or equ num_of_records_per_req + g_args.interlaceRows = 0; // 0 means progressive mode, > 0 mean interlace mode. max value is less or equ num_of_records_per_req } else { - errorPrint("%s", "failed to read json, interlace_rows input mistake\n"); + errorPrint("%s", "failed to read json, interlaceRows input mistake\n"); goto PARSE_OVER; } @@ -4595,9 +4902,9 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { prompt(); numRecPerReq->valueint = MAX_RECORDS_PER_REQ; } - g_args.num_of_RPR = numRecPerReq->valueint; + g_args.reqPerReq = numRecPerReq->valueint; } else if (!numRecPerReq) { - g_args.num_of_RPR = MAX_RECORDS_PER_REQ; + g_args.reqPerReq = MAX_RECORDS_PER_REQ; } else { errorPrint("%s() LN%d, failed to read json, num_of_records_per_req not found\n", __func__, __LINE__); @@ -4623,13 +4930,13 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { } // rows per table need be less than insert batch - if (g_args.interlace_rows > g_args.num_of_RPR) { + if (g_args.interlaceRows > g_args.reqPerReq) { printf("NOTICE: interlace rows value %u > num_of_records_per_req %u\n\n", - g_args.interlace_rows, g_args.num_of_RPR); + g_args.interlaceRows, g_args.reqPerReq); printf(" interlace rows value will be set to num_of_records_per_req %u\n\n", - g_args.num_of_RPR); + g_args.reqPerReq); prompt(); - g_args.interlace_rows = g_args.num_of_RPR; + g_args.interlaceRows = g_args.reqPerReq; } cJSON* dbs = cJSON_GetObjectItem(root, "databases"); @@ -4831,7 +5138,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { goto PARSE_OVER; } - // super_talbes + // super_tables cJSON *stables = cJSON_GetObjectItem(dbinfos, "super_tables"); if (!stables || stables->type != cJSON_Array) { errorPrint("%s", "failed to read json, super_tables not found\n"); @@ -4858,7 +5165,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { errorPrint("%s", "failed to read json, stb name not found\n"); goto PARSE_OVER; } - tstrncpy(g_Dbs.db[i].superTbls[j].sTblName, stbName->valuestring, + tstrncpy(g_Dbs.db[i].superTbls[j].stbName, stbName->valuestring, TSDB_TABLE_NAME_LEN); cJSON *prefix = cJSON_GetObjectItem(stbInfo, "childtable_prefix"); @@ -5127,7 +5434,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { g_Dbs.db[i].superTbls[j].interlaceRows = g_Dbs.db[i].superTbls[j].insertRows; } } else if (!stbInterlaceRows) { - g_Dbs.db[i].superTbls[j].interlaceRows = 0; // 0 means progressive mode, > 0 mean interlace mode. max value is less or equ num_of_records_per_req + g_Dbs.db[i].superTbls[j].interlaceRows = g_args.interlaceRows; // 0 means progressive mode, > 0 mean interlace mode. max value is less or equ num_of_records_per_req } else { errorPrint( "%s", "failed to read json, interlace rows input mistake\n"); @@ -5168,7 +5475,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { goto PARSE_OVER; } } else if (!insertInterval) { - verbosePrint("%s() LN%d: stable insert interval be overrided by global %"PRIu64".\n", + verbosePrint("%s() LN%d: stable insert interval be overrode by global %"PRIu64".\n", __func__, __LINE__, g_args.insert_interval); g_Dbs.db[i].superTbls[j].insertInterval = g_args.insert_interval; } else { @@ -5512,7 +5819,7 @@ static bool getMetaFromQueryJsonFile(cJSON* root) { cJSON* stblname = cJSON_GetObjectItem(superQuery, "stblname"); if (stblname && stblname->type == cJSON_String && stblname->valuestring != NULL) { - tstrncpy(g_queryInfo.superQueryInfo.sTblName, stblname->valuestring, + tstrncpy(g_queryInfo.superQueryInfo.stbName, stblname->valuestring, TSDB_TABLE_NAME_LEN); } else { errorPrint("%s", "failed to read json, super table name input error\n"); @@ -5734,23 +6041,37 @@ static int prepareSampleData() { static void postFreeResource() { tmfclose(g_fpOfInsertResult); + for (int i = 0; i < g_Dbs.dbCount; i++) { for (uint64_t j = 0; j < g_Dbs.db[i].superTblCount; j++) { if (0 != g_Dbs.db[i].superTbls[j].colsOfCreateChildTable) { - free(g_Dbs.db[i].superTbls[j].colsOfCreateChildTable); + tmfree(g_Dbs.db[i].superTbls[j].colsOfCreateChildTable); g_Dbs.db[i].superTbls[j].colsOfCreateChildTable = NULL; } if (0 != g_Dbs.db[i].superTbls[j].sampleDataBuf) { - free(g_Dbs.db[i].superTbls[j].sampleDataBuf); + tmfree(g_Dbs.db[i].superTbls[j].sampleDataBuf); g_Dbs.db[i].superTbls[j].sampleDataBuf = NULL; } +#if STMT_BIND_PARAM_BATCH == 1 + for (int c = 0; + c < g_Dbs.db[i].superTbls[j].columnCount; c ++) { + + if (g_Dbs.db[i].superTbls[j].sampleBindBatchArray) { + + tmfree((char *)((uintptr_t)*(uintptr_t*)( + g_Dbs.db[i].superTbls[j].sampleBindBatchArray + + sizeof(char*) * c))); + } + } + tmfree(g_Dbs.db[i].superTbls[j].sampleBindBatchArray); +#endif if (0 != g_Dbs.db[i].superTbls[j].tagDataBuf) { - free(g_Dbs.db[i].superTbls[j].tagDataBuf); + tmfree(g_Dbs.db[i].superTbls[j].tagDataBuf); g_Dbs.db[i].superTbls[j].tagDataBuf = NULL; } if (0 != g_Dbs.db[i].superTbls[j].childTblName) { - free(g_Dbs.db[i].superTbls[j].childTblName); + tmfree(g_Dbs.db[i].superTbls[j].childTblName); g_Dbs.db[i].superTbls[j].childTblName = NULL; } } @@ -5766,13 +6087,26 @@ static void postFreeResource() { tmfree(g_rand_current_buff); tmfree(g_rand_phase_buff); + tmfree(g_sampleDataBuf); + +#if STMT_BIND_PARAM_BATCH == 1 + for (int l = 0; + l < g_args.columnCount; l ++) { + if (g_sampleBindBatchArray) { + tmfree((char *)((uintptr_t)*(uintptr_t*)( + g_sampleBindBatchArray + + sizeof(char*) * l))); + } + } + tmfree(g_sampleBindBatchArray); +#endif } static int getRowDataFromSample( char* dataBuf, int64_t maxLen, int64_t timestamp, SSuperTable* stbInfo, int64_t* sampleUsePos) { - if ((*sampleUsePos) == MAX_SAMPLES_ONCE_FROM_FILE) { + if ((*sampleUsePos) == MAX_SAMPLES) { *sampleUsePos = 0; } @@ -5803,13 +6137,14 @@ static int64_t generateStbRowData( int tmpLen; dataLen += snprintf(pstr + dataLen, maxLen - dataLen, - "(%" PRId64 ",", timestamp); + "(%" PRId64 "", timestamp); for (int i = 0; i < stbInfo->columnCount; i++) { - if ((0 == strncasecmp(stbInfo->columns[i].dataType, - "BINARY", 6)) - || (0 == strncasecmp(stbInfo->columns[i].dataType, - "NCHAR", 5))) { + tstrncpy(pstr + dataLen, ",", 2); + dataLen += 1; + + if ((stbInfo->columns[i].data_type == TSDB_DATA_TYPE_BINARY) + || (stbInfo->columns[i].data_type == TSDB_DATA_TYPE_NCHAR)) { if (stbInfo->columns[i].dataLen > TSDB_MAX_BINARY_LEN) { errorPrint2("binary or nchar length overflow, max size:%u\n", (uint32_t)TSDB_MAX_BINARY_LEN); @@ -5827,80 +6162,91 @@ static int64_t generateStbRowData( return -1; } rand_string(buf, stbInfo->columns[i].dataLen); - dataLen += snprintf(pstr + dataLen, maxLen - dataLen, "\'%s\',", buf); + dataLen += snprintf(pstr + dataLen, maxLen - dataLen, "\'%s\'", buf); tmfree(buf); } else { - char *tmp; + char *tmp = NULL; + switch(stbInfo->columns[i].data_type) { + case TSDB_DATA_TYPE_INT: + if ((g_args.demo_mode) && (i == 1)) { + tmp = demo_voltage_int_str(); + } else { + tmp = rand_int_str(); + } + tmpLen = strlen(tmp); + tstrncpy(pstr + dataLen, tmp, min(tmpLen + 1, INT_BUFF_LEN)); + break; - if (0 == strncasecmp(stbInfo->columns[i].dataType, - "INT", 3)) { - if ((g_args.demo_mode) && (i == 1)) { - tmp = demo_voltage_int_str(); - } else { - tmp = rand_int_str(); - } - tmpLen = strlen(tmp); - tstrncpy(pstr + dataLen, tmp, min(tmpLen + 1, INT_BUFF_LEN)); - } else if (0 == strncasecmp(stbInfo->columns[i].dataType, - "BIGINT", 6)) { - tmp = rand_bigint_str(); - tstrncpy(pstr + dataLen, tmp, BIGINT_BUFF_LEN); - } else if (0 == strncasecmp(stbInfo->columns[i].dataType, - "FLOAT", 5)) { - if (g_args.demo_mode) { - if (i == 0) { - tmp = demo_current_float_str(); + case TSDB_DATA_TYPE_BIGINT: + tmp = rand_bigint_str(); + tstrncpy(pstr + dataLen, tmp, BIGINT_BUFF_LEN); + break; + + case TSDB_DATA_TYPE_FLOAT: + if (g_args.demo_mode) { + if (i == 0) { + tmp = demo_current_float_str(); + } else { + tmp = demo_phase_float_str(); + } } else { - tmp = demo_phase_float_str(); + tmp = rand_float_str(); } - } else { - tmp = rand_float_str(); - } - tmpLen = strlen(tmp); - tstrncpy(pstr + dataLen, tmp, min(tmpLen +1, FLOAT_BUFF_LEN)); - } else if (0 == strncasecmp(stbInfo->columns[i].dataType, - "DOUBLE", 6)) { - tmp = rand_double_str(); - tmpLen = strlen(tmp); - tstrncpy(pstr + dataLen, tmp, min(tmpLen +1, DOUBLE_BUFF_LEN)); - } else if (0 == strncasecmp(stbInfo->columns[i].dataType, - "SMALLINT", 8)) { - tmp = rand_smallint_str(); - tmpLen = strlen(tmp); - tstrncpy(pstr + dataLen, tmp, - min(tmpLen + 1, SMALLINT_BUFF_LEN)); - } else if (0 == strncasecmp(stbInfo->columns[i].dataType, - "TINYINT", 7)) { - tmp = rand_tinyint_str(); - tmpLen = strlen(tmp); - tstrncpy(pstr + dataLen, tmp, min(tmpLen +1, TINYINT_BUFF_LEN)); - } else if (0 == strncasecmp(stbInfo->columns[i].dataType, - "BOOL", 4)) { - tmp = rand_bool_str(); - tmpLen = strlen(tmp); - tstrncpy(pstr + dataLen, tmp, min(tmpLen +1, BOOL_BUFF_LEN)); - } else if (0 == strncasecmp(stbInfo->columns[i].dataType, - "TIMESTAMP", 9)) { - tmp = rand_bigint_str(); - tmpLen = strlen(tmp); - tstrncpy(pstr + dataLen, tmp, min(tmpLen +1, BIGINT_BUFF_LEN)); - } else { - errorPrint2("Not support data type: %s\n", - stbInfo->columns[i].dataType); - return -1; + tmpLen = strlen(tmp); + tstrncpy(pstr + dataLen, tmp, min(tmpLen +1, FLOAT_BUFF_LEN)); + break; + + case TSDB_DATA_TYPE_DOUBLE: + tmp = rand_double_str(); + tmpLen = strlen(tmp); + tstrncpy(pstr + dataLen, tmp, min(tmpLen +1, DOUBLE_BUFF_LEN)); + break; + + case TSDB_DATA_TYPE_SMALLINT: + tmp = rand_smallint_str(); + tmpLen = strlen(tmp); + tstrncpy(pstr + dataLen, tmp, + min(tmpLen + 1, SMALLINT_BUFF_LEN)); + break; + + case TSDB_DATA_TYPE_TINYINT: + tmp = rand_tinyint_str(); + tmpLen = strlen(tmp); + tstrncpy(pstr + dataLen, tmp, min(tmpLen +1, TINYINT_BUFF_LEN)); + break; + + case TSDB_DATA_TYPE_BOOL: + tmp = rand_bool_str(); + tmpLen = strlen(tmp); + tstrncpy(pstr + dataLen, tmp, min(tmpLen +1, BOOL_BUFF_LEN)); + break; + + case TSDB_DATA_TYPE_TIMESTAMP: + tmp = rand_bigint_str(); + tmpLen = strlen(tmp); + tstrncpy(pstr + dataLen, tmp, min(tmpLen +1, BIGINT_BUFF_LEN)); + break; + + case TSDB_DATA_TYPE_NULL: + break; + + default: + errorPrint2("Not support data type: %s\n", + stbInfo->columns[i].dataType); + exit(EXIT_FAILURE); } - dataLen += strlen(tmp); - tstrncpy(pstr + dataLen, ",", 2); - dataLen += 1; + if (tmp) { + dataLen += strlen(tmp); + } } if (dataLen > (remainderBufLen - (128))) return 0; } - tstrncpy(pstr + dataLen - 1, ")", 2); + tstrncpy(pstr + dataLen, ")", 2); verbosePrint("%s() LN%d, dataLen:%"PRId64"\n", __func__, __LINE__, dataLen); verbosePrint("%s() LN%d, recBuf:\n\t%s\n", __func__, __LINE__, recBuf); @@ -5908,53 +6254,83 @@ static int64_t generateStbRowData( return strlen(recBuf); } -static int64_t generateData(char *recBuf, char **data_type, +static int64_t generateData(char *recBuf, char *data_type, int64_t timestamp, int lenOfBinary) { memset(recBuf, 0, MAX_DATA_SIZE); char *pstr = recBuf; pstr += sprintf(pstr, "(%"PRId64"", timestamp); - int columnCount = g_args.num_of_CPR; + int columnCount = g_args.columnCount; + bool b; + char *s; for (int i = 0; i < columnCount; i++) { - if (strcasecmp(data_type[i % columnCount], "TINYINT") == 0) { - pstr += sprintf(pstr, ",%d", rand_tinyint() ); - } else if (strcasecmp(data_type[i % columnCount], "SMALLINT") == 0) { - pstr += sprintf(pstr, ",%d", rand_smallint()); - } else if (strcasecmp(data_type[i % columnCount], "INT") == 0) { - pstr += sprintf(pstr, ",%d", rand_int()); - } else if (strcasecmp(data_type[i % columnCount], "BIGINT") == 0) { - pstr += sprintf(pstr, ",%"PRId64"", rand_bigint()); - } else if (strcasecmp(data_type[i % columnCount], "TIMESTAMP") == 0) { - pstr += sprintf(pstr, ",%"PRId64"", rand_bigint()); - } else if (strcasecmp(data_type[i % columnCount], "FLOAT") == 0) { - pstr += sprintf(pstr, ",%10.4f", rand_float()); - } else if (strcasecmp(data_type[i % columnCount], "DOUBLE") == 0) { - double t = rand_double(); - pstr += sprintf(pstr, ",%20.8f", t); - } else if (strcasecmp(data_type[i % columnCount], "BOOL") == 0) { - bool b = rand_bool() & 1; - pstr += sprintf(pstr, ",%s", b ? "true" : "false"); - } else if (strcasecmp(data_type[i % columnCount], "BINARY") == 0) { - char *s = malloc(lenOfBinary + 1); - if (s == NULL) { - errorPrint2("%s() LN%d, memory allocation %d bytes failed\n", - __func__, __LINE__, lenOfBinary + 1); - exit(EXIT_FAILURE); - } - rand_string(s, lenOfBinary); - pstr += sprintf(pstr, ",\"%s\"", s); - free(s); - } else if (strcasecmp(data_type[i % columnCount], "NCHAR") == 0) { - char *s = malloc(lenOfBinary + 1); - if (s == NULL) { - errorPrint2("%s() LN%d, memory allocation %d bytes failed\n", - __func__, __LINE__, lenOfBinary + 1); + switch (data_type[i]) { + case TSDB_DATA_TYPE_TINYINT: + pstr += sprintf(pstr, ",%d", rand_tinyint() ); + break; + + case TSDB_DATA_TYPE_SMALLINT: + pstr += sprintf(pstr, ",%d", rand_smallint()); + break; + + case TSDB_DATA_TYPE_INT: + pstr += sprintf(pstr, ",%d", rand_int()); + break; + + case TSDB_DATA_TYPE_BIGINT: + pstr += sprintf(pstr, ",%"PRId64"", rand_bigint()); + break; + + case TSDB_DATA_TYPE_TIMESTAMP: + pstr += sprintf(pstr, ",%"PRId64"", rand_bigint()); + break; + + case TSDB_DATA_TYPE_FLOAT: + pstr += sprintf(pstr, ",%10.4f", rand_float()); + break; + + case TSDB_DATA_TYPE_DOUBLE: + pstr += sprintf(pstr, ",%20.8f", rand_double()); + break; + + case TSDB_DATA_TYPE_BOOL: + b = rand_bool() & 1; + pstr += sprintf(pstr, ",%s", b ? "true" : "false"); + break; + + case TSDB_DATA_TYPE_BINARY: + s = malloc(lenOfBinary + 1); + if (s == NULL) { + errorPrint2("%s() LN%d, memory allocation %d bytes failed\n", + __func__, __LINE__, lenOfBinary + 1); + exit(EXIT_FAILURE); + } + rand_string(s, lenOfBinary); + pstr += sprintf(pstr, ",\"%s\"", s); + free(s); + break; + + case TSDB_DATA_TYPE_NCHAR: + s = malloc(lenOfBinary + 1); + if (s == NULL) { + errorPrint2("%s() LN%d, memory allocation %d bytes failed\n", + __func__, __LINE__, lenOfBinary + 1); + exit(EXIT_FAILURE); + } + rand_string(s, lenOfBinary); + pstr += sprintf(pstr, ",\"%s\"", s); + free(s); + break; + + case TSDB_DATA_TYPE_NULL: + break; + + default: + errorPrint2("%s() LN%d, Unknown data type %d\n", + __func__, __LINE__, + data_type[i]); exit(EXIT_FAILURE); - } - rand_string(s, lenOfBinary); - pstr += sprintf(pstr, ",\"%s\"", s); - free(s); } if (strlen(recBuf) > MAX_DATA_SIZE) { @@ -5969,97 +6345,160 @@ static int64_t generateData(char *recBuf, char **data_type, return (int32_t)strlen(recBuf); } -static int generateSampleMemoryFromRand(SSuperTable *stbInfo) +static int generateSampleFromRand( + char *sampleDataBuf, + uint64_t lenOfOneRow, + int columnCount, + StrColumn *columns + ) { char data[MAX_DATA_SIZE]; memset(data, 0, MAX_DATA_SIZE); - char *buff = malloc(stbInfo->lenOfOneRow); + char *buff = malloc(lenOfOneRow); if (NULL == buff) { - errorPrint2("%s() LN%d, memory allocation %"PRId64" bytes failed\n", - __func__, __LINE__, stbInfo->lenOfOneRow); + errorPrint2("%s() LN%d, memory allocation %"PRIu64" bytes failed\n", + __func__, __LINE__, lenOfOneRow); exit(EXIT_FAILURE); } - for (int i=0; i < MAX_SAMPLES_ONCE_FROM_FILE; i++) { + for (int i=0; i < MAX_SAMPLES; i++) { uint64_t pos = 0; - memset(buff, 0, stbInfo->lenOfOneRow); + memset(buff, 0, lenOfOneRow); - for (int c = 0; c < stbInfo->columnCount; c++) { - char *tmp; - if (0 == strncasecmp(stbInfo->columns[c].dataType, - "BINARY", strlen("BINARY"))) { - rand_string(data, stbInfo->columns[c].dataLen); - pos += sprintf(buff + pos, "%s,", data); - } else if (0 == strncasecmp(stbInfo->columns[c].dataType, - "NCHAR", strlen("NCHAR"))) { - rand_string(data, stbInfo->columns[c].dataLen); - pos += sprintf(buff + pos, "%s,", data); - } else if (0 == strncasecmp(stbInfo->columns[c].dataType, - "INT", strlen("INT"))) { - if ((g_args.demo_mode) && (c == 1)) { - tmp = demo_voltage_int_str(); - } else { - tmp = rand_int_str(); - } - pos += sprintf(buff + pos, "%s,", tmp); - } else if (0 == strncasecmp(stbInfo->columns[c].dataType, - "BIGINT", strlen("BIGINT"))) { - pos += sprintf(buff + pos, "%s,", rand_bigint_str()); - } else if (0 == strncasecmp(stbInfo->columns[c].dataType, - "FLOAT", strlen("FLOAT"))) { - if (g_args.demo_mode) { - if (c == 0) { - tmp = demo_current_float_str(); - } else { - tmp = demo_phase_float_str(); - } - } else { - tmp = rand_float_str(); - } - pos += sprintf(buff + pos, "%s,", tmp); - } else if (0 == strncasecmp(stbInfo->columns[c].dataType, - "DOUBLE", strlen("DOUBLE"))) { - pos += sprintf(buff + pos, "%s,", rand_double_str()); - } else if (0 == strncasecmp(stbInfo->columns[c].dataType, - "SMALLINT", strlen("SMALLINT"))) { - pos += sprintf(buff + pos, "%s,", rand_smallint_str()); - } else if (0 == strncasecmp(stbInfo->columns[c].dataType, - "TINYINT", strlen("TINYINT"))) { - pos += sprintf(buff + pos, "%s,", rand_tinyint_str()); - } else if (0 == strncasecmp(stbInfo->columns[c].dataType, - "BOOL", strlen("BOOL"))) { - pos += sprintf(buff + pos, "%s,", rand_bool_str()); - } else if (0 == strncasecmp(stbInfo->columns[c].dataType, - "TIMESTAMP", strlen("TIMESTAMP"))) { - pos += sprintf(buff + pos, "%s,", rand_bigint_str()); + for (int c = 0; c < columnCount; c++) { + char *tmp = NULL; + + uint32_t dataLen; + char data_type = (columns)?(columns[c].data_type):g_args.data_type[c]; + + switch(data_type) { + case TSDB_DATA_TYPE_BINARY: + dataLen = (columns)?columns[c].dataLen:g_args.binwidth; + rand_string(data, dataLen); + pos += sprintf(buff + pos, "%s,", data); + break; + + case TSDB_DATA_TYPE_NCHAR: + dataLen = (columns)?columns[c].dataLen:g_args.binwidth; + rand_string(data, dataLen); + pos += sprintf(buff + pos, "%s,", data); + break; + + case TSDB_DATA_TYPE_INT: + if ((g_args.demo_mode) && (c == 1)) { + tmp = demo_voltage_int_str(); + } else { + tmp = rand_int_str(); + } + pos += sprintf(buff + pos, "%s,", tmp); + break; + + case TSDB_DATA_TYPE_BIGINT: + pos += sprintf(buff + pos, "%s,", rand_bigint_str()); + break; + + case TSDB_DATA_TYPE_FLOAT: + if (g_args.demo_mode) { + if (c == 0) { + tmp = demo_current_float_str(); + } else { + tmp = demo_phase_float_str(); + } + } else { + tmp = rand_float_str(); + } + pos += sprintf(buff + pos, "%s,", tmp); + break; + + case TSDB_DATA_TYPE_DOUBLE: + pos += sprintf(buff + pos, "%s,", rand_double_str()); + break; + + case TSDB_DATA_TYPE_SMALLINT: + pos += sprintf(buff + pos, "%s,", rand_smallint_str()); + break; + + case TSDB_DATA_TYPE_TINYINT: + pos += sprintf(buff + pos, "%s,", rand_tinyint_str()); + break; + + case TSDB_DATA_TYPE_BOOL: + pos += sprintf(buff + pos, "%s,", rand_bool_str()); + break; + + case TSDB_DATA_TYPE_TIMESTAMP: + pos += sprintf(buff + pos, "%s,", rand_bigint_str()); + break; + + case TSDB_DATA_TYPE_NULL: + break; + + default: + errorPrint2("%s() LN%d, Unknown data type %s\n", + __func__, __LINE__, + (columns)?(columns[c].dataType):g_args.dataType[c]); + exit(EXIT_FAILURE); } } + *(buff + pos - 1) = 0; - memcpy(stbInfo->sampleDataBuf + i * stbInfo->lenOfOneRow, buff, pos); + memcpy(sampleDataBuf + i * lenOfOneRow, buff, pos); } free(buff); return 0; } -static int prepareSampleDataForSTable(SSuperTable *stbInfo) { +static int generateSampleFromRandForNtb() +{ + return generateSampleFromRand( + g_sampleDataBuf, + g_args.lenOfOneRow, + g_args.columnCount, + NULL); +} + +static int generateSampleFromRandForStb(SSuperTable *stbInfo) +{ + return generateSampleFromRand( + stbInfo->sampleDataBuf, + stbInfo->lenOfOneRow, + stbInfo->columnCount, + stbInfo->columns); +} + +static int prepareSampleForNtb() { + g_sampleDataBuf = calloc(g_args.lenOfOneRow * MAX_SAMPLES, 1); + if (NULL == g_sampleDataBuf) { + errorPrint2("%s() LN%d, Failed to calloc %"PRIu64" Bytes, reason:%s\n", + __func__, __LINE__, + g_args.lenOfOneRow * MAX_SAMPLES, + strerror(errno)); + return -1; + } + + return generateSampleFromRandForNtb(); +} + +static int prepareSampleForStb(SSuperTable *stbInfo) { stbInfo->sampleDataBuf = calloc( - stbInfo->lenOfOneRow * MAX_SAMPLES_ONCE_FROM_FILE, 1); + stbInfo->lenOfOneRow * MAX_SAMPLES, 1); if (NULL == stbInfo->sampleDataBuf) { errorPrint2("%s() LN%d, Failed to calloc %"PRIu64" Bytes, reason:%s\n", __func__, __LINE__, - stbInfo->lenOfOneRow * MAX_SAMPLES_ONCE_FROM_FILE, + stbInfo->lenOfOneRow * MAX_SAMPLES, strerror(errno)); return -1; } int ret; - if (0 == strncasecmp(stbInfo->dataSource, "sample", strlen("sample"))) - ret = readSampleFromCsvFileToMem(stbInfo); - else - ret = generateSampleMemoryFromRand(stbInfo); + if (0 == strncasecmp(stbInfo->dataSource, "sample", strlen("sample"))) { + ret = generateSampleFromCsvForStb(stbInfo); + } else { + ret = generateSampleFromRandForStb(stbInfo); + } if (0 != ret) { errorPrint2("%s() LN%d, read sample from csv file failed.\n", @@ -6184,7 +6623,7 @@ static int32_t generateDataTailWithoutStb( int64_t retLen = 0; - char **data_type = g_args.datatype; + char *data_type = g_args.data_type; int lenOfBinary = g_args.binwidth; if (g_args.disorderRatio) { @@ -6370,7 +6809,7 @@ static int generateStbSQLHead( dbName, tableName, dbName, - stbInfo->sTblName, + stbInfo->stbName, tagsValBuf); tmfree(tagsValBuf); } else if (TBL_ALREADY_EXISTS == stbInfo->childTblExists) { @@ -6502,202 +6941,224 @@ static int64_t generateInterlaceDataWithoutStb( static int32_t prepareStmtBindArrayByType( TAOS_BIND *bind, - char *dataType, int32_t dataLen, + char data_type, int32_t dataLen, int32_t timePrec, char *value) { - if (0 == strncasecmp(dataType, - "BINARY", strlen("BINARY"))) { - if (dataLen > TSDB_MAX_BINARY_LEN) { - errorPrint2("binary length overflow, max size:%u\n", - (uint32_t)TSDB_MAX_BINARY_LEN); - return -1; - } - char *bind_binary; + int32_t *bind_int; + int64_t *bind_bigint; + float *bind_float; + double *bind_double; + int8_t *bind_bool; + int64_t *bind_ts2; + int16_t *bind_smallint; + int8_t *bind_tinyint; + + switch(data_type) { + case TSDB_DATA_TYPE_BINARY: + if (dataLen > TSDB_MAX_BINARY_LEN) { + errorPrint2("binary length overflow, max size:%u\n", + (uint32_t)TSDB_MAX_BINARY_LEN); + return -1; + } + char *bind_binary; - bind->buffer_type = TSDB_DATA_TYPE_BINARY; - if (value) { - bind_binary = calloc(1, strlen(value) + 1); - strncpy(bind_binary, value, strlen(value)); - bind->buffer_length = strlen(bind_binary); - } else { - bind_binary = calloc(1, dataLen + 1); - rand_string(bind_binary, dataLen); - bind->buffer_length = dataLen; - } + bind->buffer_type = TSDB_DATA_TYPE_BINARY; + if (value) { + bind_binary = calloc(1, strlen(value) + 1); + strncpy(bind_binary, value, strlen(value)); + bind->buffer_length = strlen(bind_binary); + } else { + bind_binary = calloc(1, dataLen + 1); + rand_string(bind_binary, dataLen); + bind->buffer_length = dataLen; + } - bind->length = &bind->buffer_length; - bind->buffer = bind_binary; - bind->is_null = NULL; - } else if (0 == strncasecmp(dataType, - "NCHAR", strlen("NCHAR"))) { - if (dataLen > TSDB_MAX_BINARY_LEN) { - errorPrint2("nchar length overflow, max size:%u\n", - (uint32_t)TSDB_MAX_BINARY_LEN); - return -1; - } - char *bind_nchar; + bind->length = &bind->buffer_length; + bind->buffer = bind_binary; + bind->is_null = NULL; + break; - bind->buffer_type = TSDB_DATA_TYPE_NCHAR; - if (value) { - bind_nchar = calloc(1, strlen(value) + 1); - strncpy(bind_nchar, value, strlen(value)); - } else { - bind_nchar = calloc(1, dataLen + 1); - rand_string(bind_nchar, dataLen); - } + case TSDB_DATA_TYPE_NCHAR: + if (dataLen > TSDB_MAX_BINARY_LEN) { + errorPrint2("nchar length overflow, max size:%u\n", + (uint32_t)TSDB_MAX_BINARY_LEN); + return -1; + } + char *bind_nchar; - bind->buffer_length = strlen(bind_nchar); - bind->buffer = bind_nchar; - bind->length = &bind->buffer_length; - bind->is_null = NULL; - } else if (0 == strncasecmp(dataType, - "INT", strlen("INT"))) { - int32_t *bind_int = malloc(sizeof(int32_t)); - assert(bind_int); + bind->buffer_type = TSDB_DATA_TYPE_NCHAR; + if (value) { + bind_nchar = calloc(1, strlen(value) + 1); + strncpy(bind_nchar, value, strlen(value)); + } else { + bind_nchar = calloc(1, dataLen + 1); + rand_string(bind_nchar, dataLen); + } - if (value) { - *bind_int = atoi(value); - } else { - *bind_int = rand_int(); - } - bind->buffer_type = TSDB_DATA_TYPE_INT; - bind->buffer_length = sizeof(int32_t); - bind->buffer = bind_int; - bind->length = &bind->buffer_length; - bind->is_null = NULL; - } else if (0 == strncasecmp(dataType, - "BIGINT", strlen("BIGINT"))) { - int64_t *bind_bigint = malloc(sizeof(int64_t)); - assert(bind_bigint); + bind->buffer_length = strlen(bind_nchar); + bind->buffer = bind_nchar; + bind->length = &bind->buffer_length; + bind->is_null = NULL; + break; - if (value) { - *bind_bigint = atoll(value); - } else { - *bind_bigint = rand_bigint(); - } - bind->buffer_type = TSDB_DATA_TYPE_BIGINT; - bind->buffer_length = sizeof(int64_t); - bind->buffer = bind_bigint; - bind->length = &bind->buffer_length; - bind->is_null = NULL; - } else if (0 == strncasecmp(dataType, - "FLOAT", strlen("FLOAT"))) { - float *bind_float = malloc(sizeof(float)); - assert(bind_float); + case TSDB_DATA_TYPE_INT: + bind_int = malloc(sizeof(int32_t)); + assert(bind_int); - if (value) { - *bind_float = (float)atof(value); - } else { - *bind_float = rand_float(); - } - bind->buffer_type = TSDB_DATA_TYPE_FLOAT; - bind->buffer_length = sizeof(float); - bind->buffer = bind_float; - bind->length = &bind->buffer_length; - bind->is_null = NULL; - } else if (0 == strncasecmp(dataType, - "DOUBLE", strlen("DOUBLE"))) { - double *bind_double = malloc(sizeof(double)); - assert(bind_double); + if (value) { + *bind_int = atoi(value); + } else { + *bind_int = rand_int(); + } + bind->buffer_type = TSDB_DATA_TYPE_INT; + bind->buffer_length = sizeof(int32_t); + bind->buffer = bind_int; + bind->length = &bind->buffer_length; + bind->is_null = NULL; + break; - if (value) { - *bind_double = atof(value); - } else { - *bind_double = rand_double(); - } - bind->buffer_type = TSDB_DATA_TYPE_DOUBLE; - bind->buffer_length = sizeof(double); - bind->buffer = bind_double; - bind->length = &bind->buffer_length; - bind->is_null = NULL; - } else if (0 == strncasecmp(dataType, - "SMALLINT", strlen("SMALLINT"))) { - int16_t *bind_smallint = malloc(sizeof(int16_t)); - assert(bind_smallint); + case TSDB_DATA_TYPE_BIGINT: + bind_bigint = malloc(sizeof(int64_t)); + assert(bind_bigint); - if (value) { - *bind_smallint = (int16_t)atoi(value); - } else { - *bind_smallint = rand_smallint(); - } - bind->buffer_type = TSDB_DATA_TYPE_SMALLINT; - bind->buffer_length = sizeof(int16_t); - bind->buffer = bind_smallint; - bind->length = &bind->buffer_length; - bind->is_null = NULL; - } else if (0 == strncasecmp(dataType, - "TINYINT", strlen("TINYINT"))) { - int8_t *bind_tinyint = malloc(sizeof(int8_t)); - assert(bind_tinyint); + if (value) { + *bind_bigint = atoll(value); + } else { + *bind_bigint = rand_bigint(); + } + bind->buffer_type = TSDB_DATA_TYPE_BIGINT; + bind->buffer_length = sizeof(int64_t); + bind->buffer = bind_bigint; + bind->length = &bind->buffer_length; + bind->is_null = NULL; + break; - if (value) { - *bind_tinyint = (int8_t)atoi(value); - } else { - *bind_tinyint = rand_tinyint(); - } - bind->buffer_type = TSDB_DATA_TYPE_TINYINT; - bind->buffer_length = sizeof(int8_t); - bind->buffer = bind_tinyint; - bind->length = &bind->buffer_length; - bind->is_null = NULL; - } else if (0 == strncasecmp(dataType, - "BOOL", strlen("BOOL"))) { - int8_t *bind_bool = malloc(sizeof(int8_t)); - assert(bind_bool); - - if (value) { - if (strncasecmp(value, "true", 4)) { - *bind_bool = true; + case TSDB_DATA_TYPE_FLOAT: + bind_float = malloc(sizeof(float)); + assert(bind_float); + + if (value) { + *bind_float = (float)atof(value); } else { - *bind_bool = false; + *bind_float = rand_float(); } - } else { - *bind_bool = rand_bool(); - } - bind->buffer_type = TSDB_DATA_TYPE_BOOL; - bind->buffer_length = sizeof(int8_t); - bind->buffer = bind_bool; - bind->length = &bind->buffer_length; - bind->is_null = NULL; + bind->buffer_type = TSDB_DATA_TYPE_FLOAT; + bind->buffer_length = sizeof(float); + bind->buffer = bind_float; + bind->length = &bind->buffer_length; + bind->is_null = NULL; + break; + + case TSDB_DATA_TYPE_DOUBLE: + bind_double = malloc(sizeof(double)); + assert(bind_double); + + if (value) { + *bind_double = atof(value); + } else { + *bind_double = rand_double(); + } + bind->buffer_type = TSDB_DATA_TYPE_DOUBLE; + bind->buffer_length = sizeof(double); + bind->buffer = bind_double; + bind->length = &bind->buffer_length; + bind->is_null = NULL; + break; + + case TSDB_DATA_TYPE_SMALLINT: + bind_smallint = malloc(sizeof(int16_t)); + assert(bind_smallint); + + if (value) { + *bind_smallint = (int16_t)atoi(value); + } else { + *bind_smallint = rand_smallint(); + } + bind->buffer_type = TSDB_DATA_TYPE_SMALLINT; + bind->buffer_length = sizeof(int16_t); + bind->buffer = bind_smallint; + bind->length = &bind->buffer_length; + bind->is_null = NULL; + break; + + case TSDB_DATA_TYPE_TINYINT: + bind_tinyint = malloc(sizeof(int8_t)); + assert(bind_tinyint); + + if (value) { + *bind_tinyint = (int8_t)atoi(value); + } else { + *bind_tinyint = rand_tinyint(); + } + bind->buffer_type = TSDB_DATA_TYPE_TINYINT; + bind->buffer_length = sizeof(int8_t); + bind->buffer = bind_tinyint; + bind->length = &bind->buffer_length; + bind->is_null = NULL; + break; + + case TSDB_DATA_TYPE_BOOL: + bind_bool = malloc(sizeof(int8_t)); + assert(bind_bool); + + if (value) { + if (strncasecmp(value, "true", 4)) { + *bind_bool = true; + } else { + *bind_bool = false; + } + } else { + *bind_bool = rand_bool(); + } + bind->buffer_type = TSDB_DATA_TYPE_BOOL; + bind->buffer_length = sizeof(int8_t); + bind->buffer = bind_bool; + bind->length = &bind->buffer_length; + bind->is_null = NULL; + break; - } else if (0 == strncasecmp(dataType, - "TIMESTAMP", strlen("TIMESTAMP"))) { - int64_t *bind_ts2 = malloc(sizeof(int64_t)); - assert(bind_ts2); - - if (value) { - if (strchr(value, ':') && strchr(value, '-')) { - int i = 0; - while(value[i] != '\0') { - if (value[i] == '\"' || value[i] == '\'') { - value[i] = ' '; + case TSDB_DATA_TYPE_TIMESTAMP: + bind_ts2 = malloc(sizeof(int64_t)); + assert(bind_ts2); + + if (value) { + if (strchr(value, ':') && strchr(value, '-')) { + int i = 0; + while(value[i] != '\0') { + if (value[i] == '\"' || value[i] == '\'') { + value[i] = ' '; + } + i++; } - i++; - } - int64_t tmpEpoch; - if (TSDB_CODE_SUCCESS != taosParseTime( - value, &tmpEpoch, strlen(value), - timePrec, 0)) { - free(bind_ts2); - errorPrint2("Input %s, time format error!\n", value); - return -1; + int64_t tmpEpoch; + if (TSDB_CODE_SUCCESS != taosParseTime( + value, &tmpEpoch, strlen(value), + timePrec, 0)) { + free(bind_ts2); + errorPrint2("Input %s, time format error!\n", value); + return -1; + } + *bind_ts2 = tmpEpoch; + } else { + *bind_ts2 = atoll(value); } - *bind_ts2 = tmpEpoch; } else { - *bind_ts2 = atoll(value); + *bind_ts2 = rand_bigint(); } - } else { - *bind_ts2 = rand_bigint(); - } - bind->buffer_type = TSDB_DATA_TYPE_TIMESTAMP; - bind->buffer_length = sizeof(int64_t); - bind->buffer = bind_ts2; - bind->length = &bind->buffer_length; - bind->is_null = NULL; - } else { - errorPrint2("Not support data type: %s\n", dataType); - return -1; + bind->buffer_type = TSDB_DATA_TYPE_TIMESTAMP; + bind->buffer_length = sizeof(int64_t); + bind->buffer = bind_ts2; + bind->length = &bind->buffer_length; + bind->is_null = NULL; + break; + + case TSDB_DATA_TYPE_NULL: + break; + + default: + errorPrint2("Not support data type: %d\n", data_type); + exit(EXIT_FAILURE); } return 0; @@ -6705,209 +7166,230 @@ static int32_t prepareStmtBindArrayByType( static int32_t prepareStmtBindArrayByTypeForRand( TAOS_BIND *bind, - char *dataType, int32_t dataLen, + char data_type, int32_t dataLen, int32_t timePrec, char **ptr, char *value) { - if (0 == strncasecmp(dataType, - "BINARY", strlen("BINARY"))) { - if (dataLen > TSDB_MAX_BINARY_LEN) { - errorPrint2("binary length overflow, max size:%u\n", - (uint32_t)TSDB_MAX_BINARY_LEN); - return -1; - } - char *bind_binary = (char *)*ptr; + int32_t *bind_int; + int64_t *bind_bigint; + float *bind_float; + double *bind_double; + int16_t *bind_smallint; + int8_t *bind_tinyint; + int8_t *bind_bool; + int64_t *bind_ts2; + + switch(data_type) { + case TSDB_DATA_TYPE_BINARY: - bind->buffer_type = TSDB_DATA_TYPE_BINARY; - if (value) { - strncpy(bind_binary, value, strlen(value)); - bind->buffer_length = strlen(bind_binary); - } else { - rand_string(bind_binary, dataLen); - bind->buffer_length = dataLen; - } + if (dataLen > TSDB_MAX_BINARY_LEN) { + errorPrint2("binary length overflow, max size:%u\n", + (uint32_t)TSDB_MAX_BINARY_LEN); + return -1; + } + char *bind_binary = (char *)*ptr; - bind->length = &bind->buffer_length; - bind->buffer = bind_binary; - bind->is_null = NULL; + bind->buffer_type = TSDB_DATA_TYPE_BINARY; + if (value) { + strncpy(bind_binary, value, strlen(value)); + bind->buffer_length = strlen(bind_binary); + } else { + rand_string(bind_binary, dataLen); + bind->buffer_length = dataLen; + } - *ptr += bind->buffer_length; - } else if (0 == strncasecmp(dataType, - "NCHAR", strlen("NCHAR"))) { - if (dataLen > TSDB_MAX_BINARY_LEN) { - errorPrint2("nchar length overflow, max size: %u\n", - (uint32_t)TSDB_MAX_BINARY_LEN); - return -1; - } - char *bind_nchar = (char *)*ptr; + bind->length = &bind->buffer_length; + bind->buffer = bind_binary; + bind->is_null = NULL; - bind->buffer_type = TSDB_DATA_TYPE_NCHAR; - if (value) { - strncpy(bind_nchar, value, strlen(value)); - } else { - rand_string(bind_nchar, dataLen); - } + *ptr += bind->buffer_length; + break; - bind->buffer_length = strlen(bind_nchar); - bind->buffer = bind_nchar; - bind->length = &bind->buffer_length; - bind->is_null = NULL; + case TSDB_DATA_TYPE_NCHAR: + if (dataLen > TSDB_MAX_BINARY_LEN) { + errorPrint2("nchar length overflow, max size: %u\n", + (uint32_t)TSDB_MAX_BINARY_LEN); + return -1; + } + char *bind_nchar = (char *)*ptr; - *ptr += bind->buffer_length; - } else if (0 == strncasecmp(dataType, - "INT", strlen("INT"))) { - int32_t *bind_int = (int32_t *)*ptr; + bind->buffer_type = TSDB_DATA_TYPE_NCHAR; + if (value) { + strncpy(bind_nchar, value, strlen(value)); + } else { + rand_string(bind_nchar, dataLen); + } - if (value) { - *bind_int = atoi(value); - } else { - *bind_int = rand_int(); - } - bind->buffer_type = TSDB_DATA_TYPE_INT; - bind->buffer_length = sizeof(int32_t); - bind->buffer = bind_int; - bind->length = &bind->buffer_length; - bind->is_null = NULL; + bind->buffer_length = strlen(bind_nchar); + bind->buffer = bind_nchar; + bind->length = &bind->buffer_length; + bind->is_null = NULL; - *ptr += bind->buffer_length; - } else if (0 == strncasecmp(dataType, - "BIGINT", strlen("BIGINT"))) { - int64_t *bind_bigint = (int64_t *)*ptr; + *ptr += bind->buffer_length; + break; - if (value) { - *bind_bigint = atoll(value); - } else { - *bind_bigint = rand_bigint(); - } - bind->buffer_type = TSDB_DATA_TYPE_BIGINT; - bind->buffer_length = sizeof(int64_t); - bind->buffer = bind_bigint; - bind->length = &bind->buffer_length; - bind->is_null = NULL; + case TSDB_DATA_TYPE_INT: + bind_int = (int32_t *)*ptr; - *ptr += bind->buffer_length; - } else if (0 == strncasecmp(dataType, - "FLOAT", strlen("FLOAT"))) { - float *bind_float = (float *)*ptr; + if (value) { + *bind_int = atoi(value); + } else { + *bind_int = rand_int(); + } + bind->buffer_type = TSDB_DATA_TYPE_INT; + bind->buffer_length = sizeof(int32_t); + bind->buffer = bind_int; + bind->length = &bind->buffer_length; + bind->is_null = NULL; - if (value) { - *bind_float = (float)atof(value); - } else { - *bind_float = rand_float(); - } - bind->buffer_type = TSDB_DATA_TYPE_FLOAT; - bind->buffer_length = sizeof(float); - bind->buffer = bind_float; - bind->length = &bind->buffer_length; - bind->is_null = NULL; + *ptr += bind->buffer_length; + break; - *ptr += bind->buffer_length; - } else if (0 == strncasecmp(dataType, - "DOUBLE", strlen("DOUBLE"))) { - double *bind_double = (double *)*ptr; + case TSDB_DATA_TYPE_BIGINT: + bind_bigint = (int64_t *)*ptr; - if (value) { - *bind_double = atof(value); - } else { - *bind_double = rand_double(); - } - bind->buffer_type = TSDB_DATA_TYPE_DOUBLE; - bind->buffer_length = sizeof(double); - bind->buffer = bind_double; - bind->length = &bind->buffer_length; - bind->is_null = NULL; + if (value) { + *bind_bigint = atoll(value); + } else { + *bind_bigint = rand_bigint(); + } + bind->buffer_type = TSDB_DATA_TYPE_BIGINT; + bind->buffer_length = sizeof(int64_t); + bind->buffer = bind_bigint; + bind->length = &bind->buffer_length; + bind->is_null = NULL; - *ptr += bind->buffer_length; - } else if (0 == strncasecmp(dataType, - "SMALLINT", strlen("SMALLINT"))) { - int16_t *bind_smallint = (int16_t *)*ptr; + *ptr += bind->buffer_length; + break; - if (value) { - *bind_smallint = (int16_t)atoi(value); - } else { - *bind_smallint = rand_smallint(); - } - bind->buffer_type = TSDB_DATA_TYPE_SMALLINT; - bind->buffer_length = sizeof(int16_t); - bind->buffer = bind_smallint; - bind->length = &bind->buffer_length; - bind->is_null = NULL; + case TSDB_DATA_TYPE_FLOAT: + bind_float = (float *)*ptr; - *ptr += bind->buffer_length; - } else if (0 == strncasecmp(dataType, - "TINYINT", strlen("TINYINT"))) { - int8_t *bind_tinyint = (int8_t *)*ptr; + if (value) { + *bind_float = (float)atof(value); + } else { + *bind_float = rand_float(); + } + bind->buffer_type = TSDB_DATA_TYPE_FLOAT; + bind->buffer_length = sizeof(float); + bind->buffer = bind_float; + bind->length = &bind->buffer_length; + bind->is_null = NULL; - if (value) { - *bind_tinyint = (int8_t)atoi(value); - } else { - *bind_tinyint = rand_tinyint(); - } - bind->buffer_type = TSDB_DATA_TYPE_TINYINT; - bind->buffer_length = sizeof(int8_t); - bind->buffer = bind_tinyint; - bind->length = &bind->buffer_length; - bind->is_null = NULL; + *ptr += bind->buffer_length; + break; - *ptr += bind->buffer_length; - } else if (0 == strncasecmp(dataType, - "BOOL", strlen("BOOL"))) { - int8_t *bind_bool = (int8_t *)*ptr; + case TSDB_DATA_TYPE_DOUBLE: + bind_double = (double *)*ptr; - if (value) { - if (strncasecmp(value, "true", 4)) { - *bind_bool = true; + if (value) { + *bind_double = atof(value); } else { - *bind_bool = false; + *bind_double = rand_double(); } - } else { - *bind_bool = rand_bool(); - } - bind->buffer_type = TSDB_DATA_TYPE_BOOL; - bind->buffer_length = sizeof(int8_t); - bind->buffer = bind_bool; - bind->length = &bind->buffer_length; - bind->is_null = NULL; + bind->buffer_type = TSDB_DATA_TYPE_DOUBLE; + bind->buffer_length = sizeof(double); + bind->buffer = bind_double; + bind->length = &bind->buffer_length; + bind->is_null = NULL; - *ptr += bind->buffer_length; - } else if (0 == strncasecmp(dataType, - "TIMESTAMP", strlen("TIMESTAMP"))) { - int64_t *bind_ts2 = (int64_t *)*ptr; - - if (value) { - if (strchr(value, ':') && strchr(value, '-')) { - int i = 0; - while(value[i] != '\0') { - if (value[i] == '\"' || value[i] == '\'') { - value[i] = ' '; - } - i++; + *ptr += bind->buffer_length; + break; + + case TSDB_DATA_TYPE_SMALLINT: + bind_smallint = (int16_t *)*ptr; + + if (value) { + *bind_smallint = (int16_t)atoi(value); + } else { + *bind_smallint = rand_smallint(); + } + bind->buffer_type = TSDB_DATA_TYPE_SMALLINT; + bind->buffer_length = sizeof(int16_t); + bind->buffer = bind_smallint; + bind->length = &bind->buffer_length; + bind->is_null = NULL; + + *ptr += bind->buffer_length; + break; + + case TSDB_DATA_TYPE_TINYINT: + bind_tinyint = (int8_t *)*ptr; + + if (value) { + *bind_tinyint = (int8_t)atoi(value); + } else { + *bind_tinyint = rand_tinyint(); + } + bind->buffer_type = TSDB_DATA_TYPE_TINYINT; + bind->buffer_length = sizeof(int8_t); + bind->buffer = bind_tinyint; + bind->length = &bind->buffer_length; + bind->is_null = NULL; + + *ptr += bind->buffer_length; + break; + + case TSDB_DATA_TYPE_BOOL: + bind_bool = (int8_t *)*ptr; + + if (value) { + if (strncasecmp(value, "true", 4)) { + *bind_bool = true; + } else { + *bind_bool = false; } - int64_t tmpEpoch; - if (TSDB_CODE_SUCCESS != taosParseTime( - value, &tmpEpoch, strlen(value), - timePrec, 0)) { - errorPrint2("Input %s, time format error!\n", value); - return -1; + } else { + *bind_bool = rand_bool(); + } + bind->buffer_type = TSDB_DATA_TYPE_BOOL; + bind->buffer_length = sizeof(int8_t); + bind->buffer = bind_bool; + bind->length = &bind->buffer_length; + bind->is_null = NULL; + + *ptr += bind->buffer_length; + break; + + case TSDB_DATA_TYPE_TIMESTAMP: + bind_ts2 = (int64_t *)*ptr; + + if (value) { + if (strchr(value, ':') && strchr(value, '-')) { + int i = 0; + while(value[i] != '\0') { + if (value[i] == '\"' || value[i] == '\'') { + value[i] = ' '; + } + i++; + } + int64_t tmpEpoch; + if (TSDB_CODE_SUCCESS != taosParseTime( + value, &tmpEpoch, strlen(value), + timePrec, 0)) { + errorPrint2("Input %s, time format error!\n", value); + return -1; + } + *bind_ts2 = tmpEpoch; + } else { + *bind_ts2 = atoll(value); } - *bind_ts2 = tmpEpoch; } else { - *bind_ts2 = atoll(value); + *bind_ts2 = rand_bigint(); } - } else { - *bind_ts2 = rand_bigint(); - } - bind->buffer_type = TSDB_DATA_TYPE_TIMESTAMP; - bind->buffer_length = sizeof(int64_t); - bind->buffer = bind_ts2; - bind->length = &bind->buffer_length; - bind->is_null = NULL; + bind->buffer_type = TSDB_DATA_TYPE_TIMESTAMP; + bind->buffer_length = sizeof(int64_t); + bind->buffer = bind_ts2; + bind->length = &bind->buffer_length; + bind->is_null = NULL; - *ptr += bind->buffer_length; - } else { - errorPrint2("No support data type: %s\n", dataType); - return -1; + *ptr += bind->buffer_length; + break; + + default: + errorPrint2("No support data type: %d\n", data_type); + return -1; } return 0; @@ -6929,12 +7411,12 @@ static int32_t prepareStmtWithoutStb( return ret; } - char **data_type = g_args.datatype; + char *data_type = g_args.data_type; - char *bindArray = malloc(sizeof(TAOS_BIND) * (g_args.num_of_CPR + 1)); + char *bindArray = malloc(sizeof(TAOS_BIND) * (g_args.columnCount + 1)); if (bindArray == NULL) { errorPrint2("Failed to allocate %d bind params\n", - (g_args.num_of_CPR + 1)); + (g_args.columnCount + 1)); return -1; } @@ -6961,7 +7443,7 @@ static int32_t prepareStmtWithoutStb( bind->length = &bind->buffer_length; bind->is_null = NULL; - for (int i = 0; i < g_args.num_of_CPR; i ++) { + for (int i = 0; i < g_args.columnCount; i ++) { bind = (TAOS_BIND *)((char *)bindArray + (sizeof(TAOS_BIND) * (i + 1))); if ( -1 == prepareStmtBindArrayByType( @@ -6970,6 +7452,7 @@ static int32_t prepareStmtWithoutStb( g_args.binwidth, pThreadInfo->time_precision, NULL)) { + free(bindArray); return -1; } } @@ -7001,29 +7484,20 @@ static int32_t prepareStbStmtBindTag( char *tagsVal, int32_t timePrec) { - char *bindBuffer = calloc(1, DOUBLE_BUFF_LEN); // g_args.binwidth); - if (bindBuffer == NULL) { - errorPrint2("%s() LN%d, Failed to allocate %d bind buffer\n", - __func__, __LINE__, DOUBLE_BUFF_LEN); - return -1; - } - TAOS_BIND *tag; for (int t = 0; t < stbInfo->tagCount; t ++) { tag = (TAOS_BIND *)((char *)bindArray + (sizeof(TAOS_BIND) * t)); if ( -1 == prepareStmtBindArrayByType( tag, - stbInfo->tags[t].dataType, + stbInfo->tags[t].data_type, stbInfo->tags[t].dataLen, timePrec, NULL)) { - free(bindBuffer); return -1; } } - free(bindBuffer); return 0; } @@ -7033,13 +7507,6 @@ static int32_t prepareStbStmtBindRand( int64_t startTime, int32_t recSeq, int32_t timePrec) { - char *bindBuffer = calloc(1, DOUBLE_BUFF_LEN); // g_args.binwidth); - if (bindBuffer == NULL) { - errorPrint2("%s() LN%d, Failed to allocate %d bind buffer\n", - __func__, __LINE__, DOUBLE_BUFF_LEN); - return -1; - } - char data[MAX_DATA_SIZE]; memset(data, 0, MAX_DATA_SIZE); char *ptr = data; @@ -7069,51 +7536,15 @@ static int32_t prepareStbStmtBindRand( ptr += bind->buffer_length; } else if ( -1 == prepareStmtBindArrayByTypeForRand( bind, - stbInfo->columns[i-1].dataType, + stbInfo->columns[i-1].data_type, stbInfo->columns[i-1].dataLen, timePrec, &ptr, NULL)) { - tmfree(bindBuffer); return -1; } } - tmfree(bindBuffer); - return 0; -} - -static int32_t prepareStbStmtBindStartTime( - char *tableName, - int64_t *ts, - char *bindArray, SSuperTable *stbInfo, - int64_t startTime, int32_t recSeq, - int32_t timePrec) -{ - TAOS_BIND *bind; - - bind = (TAOS_BIND *)bindArray; - - int64_t *bind_ts = ts; - - bind->buffer_type = TSDB_DATA_TYPE_TIMESTAMP; - if (stbInfo->disorderRatio) { - *bind_ts = startTime + getTSRandTail( - stbInfo->timeStampStep, recSeq, - stbInfo->disorderRatio, - stbInfo->disorderRange); - } else { - *bind_ts = startTime + stbInfo->timeStampStep * recSeq; - } - - verbosePrint("%s() LN%d, tableName: %s, bind_ts=%"PRId64"\n", - __func__, __LINE__, tableName, *bind_ts); - - bind->buffer_length = sizeof(int64_t); - bind->buffer = bind_ts; - bind->length = &bind->buffer_length; - bind->is_null = NULL; - return 0; } @@ -7229,7 +7660,8 @@ UNUSED_FUNC static int32_t prepareStbStmtRand( return k; } -static int32_t prepareStbStmtWithSample( +#if STMT_BIND_PARAM_BATCH == 1 +static int execBindParamBatch( threadInfo *pThreadInfo, char *tableName, int64_t tableSeq, @@ -7240,94 +7672,182 @@ static int32_t prepareStbStmtWithSample( int64_t *pSamplePos) { int ret; - SSuperTable *stbInfo = pThreadInfo->stbInfo; TAOS_STMT *stmt = pThreadInfo->stmt; - if (AUTO_CREATE_SUBTBL == stbInfo->autoCreateTable) { - char* tagsValBuf = NULL; + SSuperTable *stbInfo = pThreadInfo->stbInfo; + uint32_t columnCount = (stbInfo)?pThreadInfo->stbInfo->columnCount:g_args.columnCount; + + uint32_t thisBatch = MAX_SAMPLES - (*pSamplePos); + + if (thisBatch > batch) { + thisBatch = batch; + } + verbosePrint("%s() LN%d, batch=%d pos=%"PRId64" thisBatch=%d\n", + __func__, __LINE__, batch, *pSamplePos, thisBatch); + + memset(pThreadInfo->bindParams, 0, + (sizeof(TAOS_MULTI_BIND) * (columnCount + 1))); + memset(pThreadInfo->is_null, 0, thisBatch); + + for (int c = 0; c < columnCount + 1; c ++) { + TAOS_MULTI_BIND *param = (TAOS_MULTI_BIND *)(pThreadInfo->bindParams + sizeof(TAOS_MULTI_BIND) * c); + + char data_type; + + if (c == 0) { + data_type = TSDB_DATA_TYPE_TIMESTAMP; + param->buffer_length = sizeof(int64_t); + param->buffer = pThreadInfo->bind_ts_array; - if (0 == stbInfo->tagSource) { - tagsValBuf = generateTagValuesForStb(stbInfo, tableSeq); } else { - tagsValBuf = getTagValueFromTagSample( - stbInfo, - tableSeq % stbInfo->tagSampleCount); - } + data_type = (stbInfo)?stbInfo->columns[c-1].data_type:g_args.data_type[c-1]; - if (NULL == tagsValBuf) { - errorPrint2("%s() LN%d, tag buf failed to allocate memory\n", - __func__, __LINE__); - return -1; - } + char *tmpP; - char *tagsArray = calloc(1, sizeof(TAOS_BIND) * stbInfo->tagCount); - if (NULL == tagsArray) { - tmfree(tagsValBuf); - errorPrint2("%s() LN%d, tag buf failed to allocate memory\n", - __func__, __LINE__); - return -1; - } + switch(data_type) { + case TSDB_DATA_TYPE_BINARY: + case TSDB_DATA_TYPE_NCHAR: + param->buffer_length = + ((stbInfo)?stbInfo->columns[c-1].dataLen:g_args.binwidth); - if (-1 == prepareStbStmtBindTag( - tagsArray, stbInfo, tagsValBuf, pThreadInfo->time_precision - /* is tag */)) { - tmfree(tagsValBuf); - tmfree(tagsArray); - return -1; - } + tmpP = + (char *)((uintptr_t)*(uintptr_t*)(stbInfo->sampleBindBatchArray + +sizeof(char*)*(c-1))); - ret = taos_stmt_set_tbname_tags(stmt, tableName, (TAOS_BIND *)tagsArray); + verbosePrint("%s() LN%d, tmpP=%p pos=%"PRId64" width=%d position=%"PRId64"\n", + __func__, __LINE__, tmpP, *pSamplePos, + (((stbInfo)?stbInfo->columns[c-1].dataLen:g_args.binwidth)), + (*pSamplePos) * + (((stbInfo)?stbInfo->columns[c-1].dataLen:g_args.binwidth))); - tmfree(tagsValBuf); - tmfree(tagsArray); + param->buffer = (void *)(tmpP + *pSamplePos * + (((stbInfo)?stbInfo->columns[c-1].dataLen:g_args.binwidth)) + ); + break; - if (0 != ret) { - errorPrint2("%s() LN%d, stmt_set_tbname_tags() failed! reason: %s\n", - __func__, __LINE__, taos_stmt_errstr(stmt)); - return -1; + case TSDB_DATA_TYPE_INT: + param->buffer_length = sizeof(int32_t); + param->buffer = (stbInfo)? + (void *)((uintptr_t)*(uintptr_t*)(stbInfo->sampleBindBatchArray+sizeof(char*)*(c-1)) + + stbInfo->columns[c-1].dataLen * (*pSamplePos)): + (void *)((uintptr_t)*(uintptr_t*)(g_sampleBindBatchArray+sizeof(char*)*(c-1)) + + sizeof(int32_t)*(*pSamplePos)); + break; + + case TSDB_DATA_TYPE_TINYINT: + param->buffer_length = sizeof(int8_t); + param->buffer = (stbInfo)? + (void *)((uintptr_t)*(uintptr_t*)( + stbInfo->sampleBindBatchArray + +sizeof(char*)*(c-1)) + + stbInfo->columns[c-1].dataLen*(*pSamplePos)): + (void *)((uintptr_t)*(uintptr_t*)( + g_sampleBindBatchArray+sizeof(char*)*(c-1)) + + sizeof(int8_t)*(*pSamplePos)); + break; + + case TSDB_DATA_TYPE_SMALLINT: + param->buffer_length = sizeof(int16_t); + param->buffer = (stbInfo)? + (void *)((uintptr_t)*(uintptr_t*)(stbInfo->sampleBindBatchArray+sizeof(char*)*(c-1)) + + stbInfo->columns[c-1].dataLen * (*pSamplePos)): + (void *)((uintptr_t)*(uintptr_t*)(g_sampleBindBatchArray+sizeof(char*)*(c-1)) + + sizeof(int16_t)*(*pSamplePos)); + break; + + case TSDB_DATA_TYPE_BIGINT: + param->buffer_length = sizeof(int64_t); + param->buffer = (stbInfo)? + (void *)((uintptr_t)*(uintptr_t*)(stbInfo->sampleBindBatchArray+sizeof(char*)*(c-1)) + + stbInfo->columns[c-1].dataLen * (*pSamplePos)): + (void *)((uintptr_t)*(uintptr_t*)(g_sampleBindBatchArray+sizeof(char*)*(c-1)) + + sizeof(int64_t)*(*pSamplePos)); + break; + + case TSDB_DATA_TYPE_BOOL: + param->buffer_length = sizeof(int8_t); + param->buffer = (stbInfo)? + (void *)((uintptr_t)*(uintptr_t*)(stbInfo->sampleBindBatchArray+sizeof(char*)*(c-1)) + + stbInfo->columns[c-1].dataLen * (*pSamplePos)): + (void *)((uintptr_t)*(uintptr_t*)(g_sampleBindBatchArray+sizeof(char*)*(c-1)) + + sizeof(int8_t)*(*pSamplePos)); + break; + + case TSDB_DATA_TYPE_FLOAT: + param->buffer_length = sizeof(float); + param->buffer = (stbInfo)? + (void *)((uintptr_t)*(uintptr_t*)(stbInfo->sampleBindBatchArray+sizeof(char*)*(c-1)) + + stbInfo->columns[c-1].dataLen * (*pSamplePos)): + (void *)((uintptr_t)*(uintptr_t*)(g_sampleBindBatchArray+sizeof(char*)*(c-1)) + + sizeof(float)*(*pSamplePos)); + break; + + case TSDB_DATA_TYPE_DOUBLE: + param->buffer_length = sizeof(double); + param->buffer = (stbInfo)? + (void *)((uintptr_t)*(uintptr_t*)(stbInfo->sampleBindBatchArray+sizeof(char*)*(c-1)) + + stbInfo->columns[c-1].dataLen * (*pSamplePos)): + (void *)((uintptr_t)*(uintptr_t*)(g_sampleBindBatchArray+sizeof(char*)*(c-1)) + + sizeof(double)*(*pSamplePos)); + break; + + case TSDB_DATA_TYPE_TIMESTAMP: + param->buffer_length = sizeof(int64_t); + param->buffer = (stbInfo)? + (void *)((uintptr_t)*(uintptr_t*)(stbInfo->sampleBindBatchArray+sizeof(char*)*(c-1)) + + stbInfo->columns[c-1].dataLen * (*pSamplePos)): + (void *)((uintptr_t)*(uintptr_t*)(g_sampleBindBatchArray+sizeof(char*)*(c-1)) + + sizeof(int64_t)*(*pSamplePos)); + break; + + default: + errorPrint("%s() LN%d, wrong data type: %d\n", + __func__, + __LINE__, + data_type); + exit(EXIT_FAILURE); + + } } - } else { - ret = taos_stmt_set_tbname(stmt, tableName); - if (0 != ret) { - errorPrint2("%s() LN%d, stmt_set_tbname() failed! reason: %s\n", - __func__, __LINE__, taos_stmt_errstr(stmt)); - return -1; + + param->buffer_type = data_type; + param->length = malloc(sizeof(int32_t) * thisBatch); + assert(param->length); + + for (int b = 0; b < thisBatch; b++) { + if (param->buffer_type == TSDB_DATA_TYPE_NCHAR) { + param->length[b] = strlen( + (char *)param->buffer + b * + ((stbInfo)?stbInfo->columns[c].dataLen:g_args.binwidth) + ); + } else { + param->length[b] = param->buffer_length; + } } + param->is_null = pThreadInfo->is_null; + param->num = thisBatch; } uint32_t k; - for (k = 0; k < batch;) { - char *bindArray = (char *)(*((uintptr_t *) - (pThreadInfo->sampleBindArray + (sizeof(char *)) * (*pSamplePos)))); + for (k = 0; k < thisBatch;) { /* columnCount + 1 (ts) */ - if (-1 == prepareStbStmtBindStartTime( - tableName, - pThreadInfo->bind_ts, - bindArray, stbInfo, - startTime, k, - pThreadInfo->time_precision - /* is column */)) { - return -1; - } - ret = taos_stmt_bind_param(stmt, (TAOS_BIND *)bindArray); - if (0 != ret) { - errorPrint2("%s() LN%d, stmt_bind_param() failed! reason: %s\n", - __func__, __LINE__, taos_stmt_errstr(stmt)); - return -1; - } - // if msg > 3MB, break - ret = taos_stmt_add_batch(stmt); - if (0 != ret) { - errorPrint2("%s() LN%d, stmt_add_batch() failed! reason: %s\n", - __func__, __LINE__, taos_stmt_errstr(stmt)); - return -1; + if (stbInfo->disorderRatio) { + *(pThreadInfo->bind_ts_array + k) = startTime + getTSRandTail( + stbInfo->timeStampStep, k, + stbInfo->disorderRatio, + stbInfo->disorderRange); + } else { + *(pThreadInfo->bind_ts_array + k) = startTime + stbInfo->timeStampStep * k; } + debugPrint("%s() LN%d, k=%d ts=%"PRId64"\n", + __func__, __LINE__, + k, *(pThreadInfo->bind_ts_array +k)); k++; recordFrom ++; (*pSamplePos) ++; - if ((*pSamplePos) == MAX_SAMPLES_ONCE_FROM_FILE) { + if ((*pSamplePos) == MAX_SAMPLES) { *pSamplePos = 0; } @@ -7336,115 +7856,1074 @@ static int32_t prepareStbStmtWithSample( } } + ret = taos_stmt_bind_param_batch(stmt, (TAOS_MULTI_BIND *)pThreadInfo->bindParams); + if (0 != ret) { + errorPrint2("%s() LN%d, stmt_bind_param() failed! reason: %s\n", + __func__, __LINE__, taos_stmt_errstr(stmt)); + return -1; + } + + for (int c = 0; c < stbInfo->columnCount + 1; c ++) { + TAOS_MULTI_BIND *param = (TAOS_MULTI_BIND *)(pThreadInfo->bindParams + sizeof(TAOS_MULTI_BIND) * c); + free(param->length); + } + + // if msg > 3MB, break + ret = taos_stmt_add_batch(stmt); + if (0 != ret) { + errorPrint2("%s() LN%d, stmt_add_batch() failed! reason: %s\n", + __func__, __LINE__, taos_stmt_errstr(stmt)); + return -1; + } return k; } -static int32_t generateStbProgressiveData( - SSuperTable *stbInfo, - char *tableName, - int64_t tableSeq, - char *dbName, char *buffer, - int64_t insertRows, - uint64_t recordFrom, int64_t startTime, int64_t *pSamplePos, - int64_t *pRemainderBufLen) +static int parseSamplefileToStmtBatch( + SSuperTable* stbInfo) { - assert(buffer != NULL); - char *pstr = buffer; + // char *sampleDataBuf = (stbInfo)? + // stbInfo->sampleDataBuf:g_sampleDataBuf; + int32_t columnCount = (stbInfo)?stbInfo->columnCount:g_args.columnCount; + char *sampleBindBatchArray = NULL; - memset(pstr, 0, *pRemainderBufLen); - - int64_t headLen = generateStbSQLHead( - stbInfo, - tableName, tableSeq, dbName, - buffer, *pRemainderBufLen); - - if (headLen <= 0) { - return 0; + if (stbInfo) { + stbInfo->sampleBindBatchArray = calloc(1, sizeof(uintptr_t *) * columnCount); + sampleBindBatchArray = stbInfo->sampleBindBatchArray; + } else { + g_sampleBindBatchArray = calloc(1, sizeof(uintptr_t *) * columnCount); + sampleBindBatchArray = g_sampleBindBatchArray; } - pstr += headLen; - *pRemainderBufLen -= headLen; + assert(sampleBindBatchArray); - int64_t dataLen; + for (int c = 0; c < columnCount; c++) { + char data_type = (stbInfo)?stbInfo->columns[c].data_type:g_args.data_type[c]; - return generateStbDataTail(stbInfo, - g_args.num_of_RPR, pstr, *pRemainderBufLen, - insertRows, recordFrom, - startTime, - pSamplePos, &dataLen); -} + char *tmpP = NULL; -static int32_t generateProgressiveDataWithoutStb( - char *tableName, - /* int64_t tableSeq, */ - threadInfo *pThreadInfo, char *buffer, - int64_t insertRows, - uint64_t recordFrom, int64_t startTime, /*int64_t *pSamplePos, */ - int64_t *pRemainderBufLen) -{ - assert(buffer != NULL); - char *pstr = buffer; + switch(data_type) { + case TSDB_DATA_TYPE_INT: + tmpP = calloc(1, sizeof(int) * MAX_SAMPLES); + assert(tmpP); + *(uintptr_t*)(sampleBindBatchArray+ sizeof(uintptr_t*)*c) = (uintptr_t)tmpP; + break; - memset(buffer, 0, *pRemainderBufLen); + case TSDB_DATA_TYPE_TINYINT: + tmpP = calloc(1, sizeof(int8_t) * MAX_SAMPLES); + assert(tmpP); + *(uintptr_t*)(sampleBindBatchArray+ sizeof(uintptr_t*)*c) = (uintptr_t)tmpP; + break; + + case TSDB_DATA_TYPE_SMALLINT: + tmpP = calloc(1, sizeof(int16_t) * MAX_SAMPLES); + assert(tmpP); + *(uintptr_t*)(sampleBindBatchArray+ sizeof(uintptr_t*)*c) = (uintptr_t)tmpP; + break; + + case TSDB_DATA_TYPE_BIGINT: + tmpP = calloc(1, sizeof(int64_t) * MAX_SAMPLES); + assert(tmpP); + *(uintptr_t*)(sampleBindBatchArray+ sizeof(uintptr_t*)*c) = (uintptr_t)tmpP; + break; + + case TSDB_DATA_TYPE_BOOL: + tmpP = calloc(1, sizeof(int8_t) * MAX_SAMPLES); + assert(tmpP); + *(uintptr_t*)(sampleBindBatchArray+ sizeof(uintptr_t*)*c) = (uintptr_t)tmpP; + break; + + case TSDB_DATA_TYPE_FLOAT: + tmpP = calloc(1, sizeof(float) * MAX_SAMPLES); + assert(tmpP); + *(uintptr_t*)(sampleBindBatchArray+ sizeof(uintptr_t*)*c) = (uintptr_t)tmpP; + break; + + case TSDB_DATA_TYPE_DOUBLE: + tmpP = calloc(1, sizeof(double) * MAX_SAMPLES); + assert(tmpP); + *(uintptr_t*)(sampleBindBatchArray+ sizeof(uintptr_t*)*c) = (uintptr_t)tmpP; + break; + + case TSDB_DATA_TYPE_BINARY: + case TSDB_DATA_TYPE_NCHAR: + tmpP = calloc(1, MAX_SAMPLES * + (((stbInfo)?stbInfo->columns[c].dataLen:g_args.binwidth))); + assert(tmpP); + *(uintptr_t*)(sampleBindBatchArray+ sizeof(uintptr_t*)*c) = (uintptr_t)tmpP; + break; + + case TSDB_DATA_TYPE_TIMESTAMP: + tmpP = calloc(1, sizeof(int64_t) * MAX_SAMPLES); + assert(tmpP); + *(uintptr_t*)(sampleBindBatchArray+ sizeof(uintptr_t*)*c) = (uintptr_t)tmpP; + break; + + default: + errorPrint("Unknown data type: %s\n", + (stbInfo)?stbInfo->columns[c].dataType:g_args.dataType[c]); + exit(EXIT_FAILURE); + } + } + + char *sampleDataBuf = (stbInfo)?stbInfo->sampleDataBuf:g_sampleDataBuf; + int64_t lenOfOneRow = (stbInfo)?stbInfo->lenOfOneRow:g_args.lenOfOneRow; + + for (int i=0; i < MAX_SAMPLES; i++) { + int cursor = 0; + + for (int c = 0; c < columnCount; c++) { + char data_type = (stbInfo)? + stbInfo->columns[c].data_type: + g_args.data_type[c]; + char *restStr = sampleDataBuf + + lenOfOneRow * i + cursor; + int lengthOfRest = strlen(restStr); + + int index = 0; + for (index = 0; index < lengthOfRest; index ++) { + if (restStr[index] == ',') { + break; + } + } + + char *tmpStr = calloc(1, index + 1); + if (NULL == tmpStr) { + errorPrint2("%s() LN%d, Failed to allocate %d bind buffer\n", + __func__, __LINE__, index + 1); + return -1; + } + + strncpy(tmpStr, restStr, index); + cursor += index + 1; // skip ',' too + char *tmpP; + + switch(data_type) { + case TSDB_DATA_TYPE_INT: + *((int32_t*)((uintptr_t)*(uintptr_t*)(sampleBindBatchArray + +sizeof(char*)*c)+sizeof(int32_t)*i)) = + atoi(tmpStr); + break; + + case TSDB_DATA_TYPE_FLOAT: + *(float*)(((uintptr_t)*(uintptr_t*)(sampleBindBatchArray + +sizeof(char*)*c)+sizeof(float)*i)) = + (float)atof(tmpStr); + break; + + case TSDB_DATA_TYPE_DOUBLE: + *(double*)(((uintptr_t)*(uintptr_t*)(sampleBindBatchArray + +sizeof(char*)*c)+sizeof(double)*i)) = + atof(tmpStr); + break; + + case TSDB_DATA_TYPE_TINYINT: + *((int8_t*)((uintptr_t)*(uintptr_t*)(sampleBindBatchArray + +sizeof(char*)*c)+sizeof(int8_t)*i)) = + (int8_t)atoi(tmpStr); + break; + + case TSDB_DATA_TYPE_SMALLINT: + *((int16_t*)((uintptr_t)*(uintptr_t*)(sampleBindBatchArray + +sizeof(char*)*c)+sizeof(int16_t)*i)) = + (int16_t)atoi(tmpStr); + break; + + case TSDB_DATA_TYPE_BIGINT: + *((int64_t*)((uintptr_t)*(uintptr_t*)(sampleBindBatchArray + +sizeof(char*)*c)+sizeof(int64_t)*i)) = + (int64_t)atol(tmpStr); + break; + + case TSDB_DATA_TYPE_BOOL: + *((int8_t*)((uintptr_t)*(uintptr_t*)(sampleBindBatchArray + +sizeof(char*)*c)+sizeof(int8_t)*i)) = + (int8_t)atoi(tmpStr); + break; + + case TSDB_DATA_TYPE_TIMESTAMP: + *((int64_t*)((uintptr_t)*(uintptr_t*)(sampleBindBatchArray + +sizeof(char*)*c)+sizeof(int64_t)*i)) = + (int64_t)atol(tmpStr); + break; + + case TSDB_DATA_TYPE_BINARY: + case TSDB_DATA_TYPE_NCHAR: + tmpP = (char *)(*(uintptr_t*)(sampleBindBatchArray + +sizeof(char*)*c)); + strcpy(tmpP + i* + (((stbInfo)?stbInfo->columns[c].dataLen:g_args.binwidth)) + , tmpStr); + break; + + default: + break; + } + + free(tmpStr); + } + } + + return 0; +} + +static int parseSampleToStmtBatchForThread( + threadInfo *pThreadInfo, SSuperTable *stbInfo, + uint32_t timePrec, + uint32_t batch) +{ + uint32_t columnCount = (stbInfo)?stbInfo->columnCount:g_args.columnCount; + + pThreadInfo->bind_ts_array = malloc(sizeof(int64_t) * batch); + assert(pThreadInfo->bind_ts_array); + + pThreadInfo->bindParams = malloc(sizeof(TAOS_MULTI_BIND) * (columnCount + 1)); + assert(pThreadInfo->bindParams); + + pThreadInfo->is_null = malloc(batch); + assert(pThreadInfo->is_null); + + return 0; +} + +static int parseStbSampleToStmtBatchForThread( + threadInfo *pThreadInfo, + SSuperTable *stbInfo, + uint32_t timePrec, + uint32_t batch) +{ + return parseSampleToStmtBatchForThread( + pThreadInfo, stbInfo, timePrec, batch); +} + +static int parseNtbSampleToStmtBatchForThread( + threadInfo *pThreadInfo, uint32_t timePrec, uint32_t batch) +{ + return parseSampleToStmtBatchForThread( + pThreadInfo, NULL, timePrec, batch); +} + +#else +static int parseSampleToStmt( + threadInfo *pThreadInfo, + SSuperTable *stbInfo, uint32_t timePrec) +{ + pThreadInfo->sampleBindArray = + (char *)calloc(1, sizeof(char *) * MAX_SAMPLES); + if (pThreadInfo->sampleBindArray == NULL) { + errorPrint2("%s() LN%d, Failed to allocate %"PRIu64" bind array buffer\n", + __func__, __LINE__, + (uint64_t)sizeof(char *) * MAX_SAMPLES); + return -1; + } + + int32_t columnCount = (stbInfo)?stbInfo->columnCount:g_args.columnCount; + char *sampleDataBuf = (stbInfo)?stbInfo->sampleDataBuf:g_sampleDataBuf; + int64_t lenOfOneRow = (stbInfo)?stbInfo->lenOfOneRow:g_args.lenOfOneRow; + + for (int i=0; i < MAX_SAMPLES; i++) { + char *bindArray = + calloc(1, sizeof(TAOS_BIND) * (columnCount + 1)); + if (bindArray == NULL) { + errorPrint2("%s() LN%d, Failed to allocate %d bind params\n", + __func__, __LINE__, (columnCount + 1)); + return -1; + } + + TAOS_BIND *bind; + int cursor = 0; + + for (int c = 0; c < columnCount + 1; c++) { + bind = (TAOS_BIND *)((char *)bindArray + (sizeof(TAOS_BIND) * c)); + + if (c == 0) { + bind->buffer_type = TSDB_DATA_TYPE_TIMESTAMP; + bind->buffer_length = sizeof(int64_t); + bind->buffer = NULL; //bind_ts; + bind->length = &bind->buffer_length; + bind->is_null = NULL; + } else { + char data_type = (stbInfo)? + stbInfo->columns[c-1].data_type: + g_args.data_type[c-1]; + int32_t dataLen = (stbInfo)? + stbInfo->columns[c-1].dataLen: + g_args.binwidth; + char *restStr = sampleDataBuf + + lenOfOneRow * i + cursor; + int lengthOfRest = strlen(restStr); + + int index = 0; + for (index = 0; index < lengthOfRest; index ++) { + if (restStr[index] == ',') { + break; + } + } + + char *bindBuffer = calloc(1, index + 1); + if (bindBuffer == NULL) { + errorPrint2("%s() LN%d, Failed to allocate %d bind buffer\n", + __func__, __LINE__, index + 1); + return -1; + } + + strncpy(bindBuffer, restStr, index); + cursor += index + 1; // skip ',' too + + if (-1 == prepareStmtBindArrayByType( + bind, + data_type, + dataLen, + timePrec, + bindBuffer)) { + free(bindBuffer); + free(bindArray); + return -1; + } + free(bindBuffer); + } + } + *((uintptr_t *)(pThreadInfo->sampleBindArray + (sizeof(char *)) * i)) = + (uintptr_t)bindArray; + } + + return 0; +} + +static int parseStbSampleToStmt( + threadInfo *pThreadInfo, + SSuperTable *stbInfo, uint32_t timePrec) +{ + return parseSampleToStmt( + pThreadInfo, + stbInfo, timePrec); +} + +static int parseNtbSampleToStmt( + threadInfo *pThreadInfo, + uint32_t timePrec) +{ + return parseSampleToStmt( + pThreadInfo, + NULL, + timePrec); +} + +static int32_t prepareStbStmtBindStartTime( + char *tableName, + int64_t *ts, + char *bindArray, SSuperTable *stbInfo, + int64_t startTime, int32_t recSeq) +{ + TAOS_BIND *bind; + + bind = (TAOS_BIND *)bindArray; + + int64_t *bind_ts = ts; + + bind->buffer_type = TSDB_DATA_TYPE_TIMESTAMP; + if (stbInfo->disorderRatio) { + *bind_ts = startTime + getTSRandTail( + stbInfo->timeStampStep, recSeq, + stbInfo->disorderRatio, + stbInfo->disorderRange); + } else { + *bind_ts = startTime + stbInfo->timeStampStep * recSeq; + } + + verbosePrint("%s() LN%d, tableName: %s, bind_ts=%"PRId64"\n", + __func__, __LINE__, tableName, *bind_ts); + + bind->buffer_length = sizeof(int64_t); + bind->buffer = bind_ts; + bind->length = &bind->buffer_length; + bind->is_null = NULL; + + return 0; +} + +static uint32_t execBindParam( + threadInfo *pThreadInfo, + char *tableName, + int64_t tableSeq, + uint32_t batch, + uint64_t insertRows, + uint64_t recordFrom, + int64_t startTime, + int64_t *pSamplePos) +{ + int ret; + SSuperTable *stbInfo = pThreadInfo->stbInfo; + TAOS_STMT *stmt = pThreadInfo->stmt; + + uint32_t k; + for (k = 0; k < batch;) { + char *bindArray = (char *)(*((uintptr_t *) + (pThreadInfo->sampleBindArray + (sizeof(char *)) * (*pSamplePos)))); + /* columnCount + 1 (ts) */ + if (-1 == prepareStbStmtBindStartTime( + tableName, + pThreadInfo->bind_ts, + bindArray, stbInfo, + startTime, k + /* is column */)) { + return -1; + } + ret = taos_stmt_bind_param(stmt, (TAOS_BIND *)bindArray); + if (0 != ret) { + errorPrint2("%s() LN%d, stmt_bind_param() failed! reason: %s\n", + __func__, __LINE__, taos_stmt_errstr(stmt)); + return -1; + } + // if msg > 3MB, break + ret = taos_stmt_add_batch(stmt); + if (0 != ret) { + errorPrint2("%s() LN%d, stmt_add_batch() failed! reason: %s\n", + __func__, __LINE__, taos_stmt_errstr(stmt)); + return -1; + } + + k++; + recordFrom ++; + + (*pSamplePos) ++; + if ((*pSamplePos) == MAX_SAMPLES) { + *pSamplePos = 0; + } + + if (recordFrom >= insertRows) { + break; + } + } + + return k; +} +#endif + +static int32_t prepareStbStmt( + threadInfo *pThreadInfo, + char *tableName, + int64_t tableSeq, + uint32_t batch, + uint64_t insertRows, + uint64_t recordFrom, + int64_t startTime, + int64_t *pSamplePos) +{ + int ret; + SSuperTable *stbInfo = pThreadInfo->stbInfo; + TAOS_STMT *stmt = pThreadInfo->stmt; + + if (AUTO_CREATE_SUBTBL == stbInfo->autoCreateTable) { + char* tagsValBuf = NULL; + + if (0 == stbInfo->tagSource) { + tagsValBuf = generateTagValuesForStb(stbInfo, tableSeq); + } else { + tagsValBuf = getTagValueFromTagSample( + stbInfo, + tableSeq % stbInfo->tagSampleCount); + } + + if (NULL == tagsValBuf) { + errorPrint2("%s() LN%d, tag buf failed to allocate memory\n", + __func__, __LINE__); + return -1; + } + + char *tagsArray = calloc(1, sizeof(TAOS_BIND) * stbInfo->tagCount); + if (NULL == tagsArray) { + tmfree(tagsValBuf); + errorPrint2("%s() LN%d, tag buf failed to allocate memory\n", + __func__, __LINE__); + return -1; + } + + if (-1 == prepareStbStmtBindTag( + tagsArray, stbInfo, tagsValBuf, pThreadInfo->time_precision + /* is tag */)) { + tmfree(tagsValBuf); + tmfree(tagsArray); + return -1; + } + + ret = taos_stmt_set_tbname_tags(stmt, tableName, (TAOS_BIND *)tagsArray); + + tmfree(tagsValBuf); + tmfree(tagsArray); + + if (0 != ret) { + errorPrint2("%s() LN%d, stmt_set_tbname_tags() failed! reason: %s\n", + __func__, __LINE__, taos_stmt_errstr(stmt)); + return -1; + } + } else { + ret = taos_stmt_set_tbname(stmt, tableName); + if (0 != ret) { + errorPrint2("%s() LN%d, stmt_set_tbname() failed! reason: %s\n", + __func__, __LINE__, taos_stmt_errstr(stmt)); + return -1; + } + } + +#if STMT_BIND_PARAM_BATCH == 1 + return execBindParamBatch( + pThreadInfo, + tableName, + tableSeq, + batch, + insertRows, + recordFrom, + startTime, + pSamplePos); +#else + return execBindParam( + pThreadInfo, + tableName, + tableSeq, + batch, + insertRows, + recordFrom, + startTime, + pSamplePos); +#endif +} + +static int32_t generateStbProgressiveData( + SSuperTable *stbInfo, + char *tableName, + int64_t tableSeq, + char *dbName, char *buffer, + int64_t insertRows, + uint64_t recordFrom, int64_t startTime, int64_t *pSamplePos, + int64_t *pRemainderBufLen) +{ + assert(buffer != NULL); + char *pstr = buffer; + + memset(pstr, 0, *pRemainderBufLen); + + int64_t headLen = generateStbSQLHead( + stbInfo, + tableName, tableSeq, dbName, + buffer, *pRemainderBufLen); + + if (headLen <= 0) { + return 0; + } + pstr += headLen; + *pRemainderBufLen -= headLen; + + int64_t dataLen; + + return generateStbDataTail(stbInfo, + g_args.reqPerReq, pstr, *pRemainderBufLen, + insertRows, recordFrom, + startTime, + pSamplePos, &dataLen); +} + +static int32_t generateProgressiveDataWithoutStb( + char *tableName, + /* int64_t tableSeq, */ + threadInfo *pThreadInfo, char *buffer, + int64_t insertRows, + uint64_t recordFrom, int64_t startTime, /*int64_t *pSamplePos, */ + int64_t *pRemainderBufLen) +{ + assert(buffer != NULL); + char *pstr = buffer; + + memset(buffer, 0, *pRemainderBufLen); int64_t headLen = generateSQLHeadWithoutStb( tableName, pThreadInfo->db_name, buffer, *pRemainderBufLen); - if (headLen <= 0) { - return 0; - } - pstr += headLen; - *pRemainderBufLen -= headLen; + if (headLen <= 0) { + return 0; + } + pstr += headLen; + *pRemainderBufLen -= headLen; + + int64_t dataLen; + + return generateDataTailWithoutStb( + g_args.reqPerReq, pstr, *pRemainderBufLen, insertRows, recordFrom, + startTime, + /*pSamplePos, */&dataLen); +} + +static void printStatPerThread(threadInfo *pThreadInfo) +{ + if (0 == pThreadInfo->totalDelay) + pThreadInfo->totalDelay = 1; + + fprintf(stderr, "====thread[%d] completed total inserted rows: %"PRIu64 ", total affected rows: %"PRIu64". %.2f records/second====\n", + pThreadInfo->threadID, + pThreadInfo->totalInsertRows, + pThreadInfo->totalAffectedRows, + (double)(pThreadInfo->totalAffectedRows/((double)pThreadInfo->totalDelay/1000000.0)) + ); +} + +#if STMT_BIND_PARAM_BATCH == 1 +// stmt sync write interlace data +static void* syncWriteInterlaceStmtBatch(threadInfo *pThreadInfo, uint32_t interlaceRows) { + debugPrint("[%d] %s() LN%d: ### stmt interlace write\n", + pThreadInfo->threadID, __func__, __LINE__); + + int64_t insertRows; + int64_t timeStampStep; + uint64_t insert_interval; + + SSuperTable* stbInfo = pThreadInfo->stbInfo; + + if (stbInfo) { + insertRows = stbInfo->insertRows; + timeStampStep = stbInfo->timeStampStep; + insert_interval = stbInfo->insertInterval; + } else { + insertRows = g_args.insertRows; + timeStampStep = g_args.timestamp_step; + insert_interval = g_args.insert_interval; + } + + debugPrint("[%d] %s() LN%d: start_table_from=%"PRIu64" ntables=%"PRId64" insertRows=%"PRIu64"\n", + pThreadInfo->threadID, __func__, __LINE__, + pThreadInfo->start_table_from, + pThreadInfo->ntables, insertRows); + + uint64_t timesInterlace = (insertRows / interlaceRows) + 1; + uint32_t precalcBatch = interlaceRows; + + if (precalcBatch > g_args.reqPerReq) + precalcBatch = g_args.reqPerReq; + + if (precalcBatch > MAX_SAMPLES) + precalcBatch = MAX_SAMPLES; + + pThreadInfo->totalInsertRows = 0; + pThreadInfo->totalAffectedRows = 0; + + uint64_t st = 0; + uint64_t et = UINT64_MAX; + + uint64_t lastPrintTime = taosGetTimestampMs(); + uint64_t startTs = taosGetTimestampMs(); + uint64_t endTs; + + uint64_t tableSeq = pThreadInfo->start_table_from; + int64_t startTime; + + bool flagSleep = true; + uint64_t sleepTimeTotal = 0; + + int percentComplete = 0; + int64_t totalRows = insertRows * pThreadInfo->ntables; + pThreadInfo->samplePos = 0; + + for (int64_t interlace = 0; + interlace < timesInterlace; interlace ++) { + if ((flagSleep) && (insert_interval)) { + st = taosGetTimestampMs(); + flagSleep = false; + } + + int64_t generated = 0; + int64_t samplePos; + + for (; tableSeq < pThreadInfo->start_table_from + pThreadInfo->ntables; tableSeq ++) { + char tableName[TSDB_TABLE_NAME_LEN]; + getTableName(tableName, pThreadInfo, tableSeq); + if (0 == strlen(tableName)) { + errorPrint2("[%d] %s() LN%d, getTableName return null\n", + pThreadInfo->threadID, __func__, __LINE__); + return NULL; + } + + samplePos = pThreadInfo->samplePos; + startTime = pThreadInfo->start_time + + interlace * interlaceRows * timeStampStep; + uint64_t remainRecPerTbl = + insertRows - interlaceRows * interlace; + uint64_t recPerTbl = 0; + + uint64_t remainPerInterlace; + if (remainRecPerTbl > interlaceRows) { + remainPerInterlace = interlaceRows; + } else { + remainPerInterlace = remainRecPerTbl; + } + + while(remainPerInterlace > 0) { + + uint32_t batch; + if (remainPerInterlace > precalcBatch) { + batch = precalcBatch; + } else { + batch = remainPerInterlace; + } + debugPrint("[%d] %s() LN%d, tableName:%s, batch:%d startTime:%"PRId64"\n", + pThreadInfo->threadID, + __func__, __LINE__, + tableName, batch, startTime); + + if (stbInfo) { + generated = prepareStbStmt( + pThreadInfo, + tableName, + tableSeq, + batch, + insertRows, 0, + startTime, + &samplePos); + } else { + generated = prepareStmtWithoutStb( + pThreadInfo, + tableName, + batch, + insertRows, + interlaceRows * interlace + recPerTbl, + startTime); + } + + debugPrint("[%d] %s() LN%d, generated records is %"PRId64"\n", + pThreadInfo->threadID, __func__, __LINE__, generated); + if (generated < 0) { + errorPrint2("[%d] %s() LN%d, generated records is %"PRId64"\n", + pThreadInfo->threadID, __func__, __LINE__, generated); + goto free_of_interlace_stmt; + } else if (generated == 0) { + break; + } + + recPerTbl += generated; + remainPerInterlace -= generated; + pThreadInfo->totalInsertRows += generated; + + verbosePrint("[%d] %s() LN%d totalInsertRows=%"PRIu64"\n", + pThreadInfo->threadID, __func__, __LINE__, + pThreadInfo->totalInsertRows); + + startTs = taosGetTimestampUs(); + + int64_t affectedRows = execInsert(pThreadInfo, generated); + + endTs = taosGetTimestampUs(); + uint64_t delay = endTs - startTs; + performancePrint("%s() LN%d, insert execution time is %10.2f ms\n", + __func__, __LINE__, delay / 1000.0); + verbosePrint("[%d] %s() LN%d affectedRows=%"PRId64"\n", + pThreadInfo->threadID, + __func__, __LINE__, affectedRows); + + if (delay > pThreadInfo->maxDelay) pThreadInfo->maxDelay = delay; + if (delay < pThreadInfo->minDelay) pThreadInfo->minDelay = delay; + pThreadInfo->cntDelay++; + pThreadInfo->totalDelay += delay; + + if (generated != affectedRows) { + errorPrint2("[%d] %s() LN%d execInsert() insert %"PRId64", affected rows: %"PRId64"\n\n", + pThreadInfo->threadID, __func__, __LINE__, + generated, affectedRows); + goto free_of_interlace_stmt; + } + + pThreadInfo->totalAffectedRows += affectedRows; + + int currentPercent = pThreadInfo->totalAffectedRows * 100 / totalRows; + if (currentPercent > percentComplete ) { + printf("[%d]:%d%%\n", pThreadInfo->threadID, currentPercent); + percentComplete = currentPercent; + } + int64_t currentPrintTime = taosGetTimestampMs(); + if (currentPrintTime - lastPrintTime > 30*1000) { + printf("thread[%d] has currently inserted rows: %"PRIu64 ", affected rows: %"PRIu64 "\n", + pThreadInfo->threadID, + pThreadInfo->totalInsertRows, + pThreadInfo->totalAffectedRows); + lastPrintTime = currentPrintTime; + } + + startTime += (generated * timeStampStep); + } + } + pThreadInfo->samplePos = samplePos; + + if (tableSeq == pThreadInfo->start_table_from + + pThreadInfo->ntables) { + // turn to first table + tableSeq = pThreadInfo->start_table_from; + + flagSleep = true; + } + + if ((insert_interval) && flagSleep) { + et = taosGetTimestampMs(); + + if (insert_interval > (et - st) ) { + uint64_t sleepTime = insert_interval - (et -st); + performancePrint("%s() LN%d sleep: %"PRId64" ms for insert interval\n", + __func__, __LINE__, sleepTime); + taosMsleep(sleepTime); // ms + sleepTimeTotal += insert_interval; + } + } + } + if (percentComplete < 100) + printf("[%d]:%d%%\n", pThreadInfo->threadID, percentComplete); + +free_of_interlace_stmt: + printStatPerThread(pThreadInfo); + return NULL; +} +#else +// stmt sync write interlace data +static void* syncWriteInterlaceStmt(threadInfo *pThreadInfo, uint32_t interlaceRows) { + debugPrint("[%d] %s() LN%d: ### stmt interlace write\n", + pThreadInfo->threadID, __func__, __LINE__); + + int64_t insertRows; + uint64_t maxSqlLen; + int64_t timeStampStep; + uint64_t insert_interval; + + SSuperTable* stbInfo = pThreadInfo->stbInfo; + + if (stbInfo) { + insertRows = stbInfo->insertRows; + maxSqlLen = stbInfo->maxSqlLen; + timeStampStep = stbInfo->timeStampStep; + insert_interval = stbInfo->insertInterval; + } else { + insertRows = g_args.insertRows; + maxSqlLen = g_args.max_sql_len; + timeStampStep = g_args.timestamp_step; + insert_interval = g_args.insert_interval; + } + + debugPrint("[%d] %s() LN%d: start_table_from=%"PRIu64" ntables=%"PRId64" insertRows=%"PRIu64"\n", + pThreadInfo->threadID, __func__, __LINE__, + pThreadInfo->start_table_from, + pThreadInfo->ntables, insertRows); + + uint32_t batchPerTbl = interlaceRows; + uint32_t batchPerTblTimes; + + if (interlaceRows > g_args.reqPerReq) + interlaceRows = g_args.reqPerReq; + + if ((interlaceRows > 0) && (pThreadInfo->ntables > 1)) { + batchPerTblTimes = + g_args.reqPerReq / interlaceRows; + } else { + batchPerTblTimes = 1; + } + + pThreadInfo->totalInsertRows = 0; + pThreadInfo->totalAffectedRows = 0; + + uint64_t st = 0; + uint64_t et = UINT64_MAX; + + uint64_t lastPrintTime = taosGetTimestampMs(); + uint64_t startTs = taosGetTimestampMs(); + uint64_t endTs; + + uint64_t tableSeq = pThreadInfo->start_table_from; + int64_t startTime = pThreadInfo->start_time; + + uint64_t generatedRecPerTbl = 0; + bool flagSleep = true; + uint64_t sleepTimeTotal = 0; + + int percentComplete = 0; + int64_t totalRows = insertRows * pThreadInfo->ntables; + + while(pThreadInfo->totalInsertRows < pThreadInfo->ntables * insertRows) { + if ((flagSleep) && (insert_interval)) { + st = taosGetTimestampMs(); + flagSleep = false; + } + + uint32_t recOfBatch = 0; + + int32_t generated; + for (uint64_t i = 0; i < batchPerTblTimes; i ++) { + char tableName[TSDB_TABLE_NAME_LEN]; + + getTableName(tableName, pThreadInfo, tableSeq); + if (0 == strlen(tableName)) { + errorPrint2("[%d] %s() LN%d, getTableName return null\n", + pThreadInfo->threadID, __func__, __LINE__); + return NULL; + } + + debugPrint("[%d] %s() LN%d, tableName:%s, batch:%d startTime:%"PRId64"\n", + pThreadInfo->threadID, + __func__, __LINE__, + tableName, batchPerTbl, startTime); + if (stbInfo) { + generated = prepareStbStmt( + pThreadInfo, + tableName, + tableSeq, + batchPerTbl, + insertRows, 0, + startTime, + &(pThreadInfo->samplePos)); + } else { + generated = prepareStmtWithoutStb( + pThreadInfo, + tableName, + batchPerTbl, + insertRows, i, + startTime); + } + + debugPrint("[%d] %s() LN%d, generated records is %d\n", + pThreadInfo->threadID, __func__, __LINE__, generated); + if (generated < 0) { + errorPrint2("[%d] %s() LN%d, generated records is %d\n", + pThreadInfo->threadID, __func__, __LINE__, generated); + goto free_of_interlace_stmt; + } else if (generated == 0) { + break; + } + + tableSeq ++; + recOfBatch += batchPerTbl; + + pThreadInfo->totalInsertRows += batchPerTbl; + + verbosePrint("[%d] %s() LN%d batchPerTbl=%d recOfBatch=%d\n", + pThreadInfo->threadID, __func__, __LINE__, + batchPerTbl, recOfBatch); + + if (tableSeq == pThreadInfo->start_table_from + pThreadInfo->ntables) { + // turn to first table + tableSeq = pThreadInfo->start_table_from; + generatedRecPerTbl += batchPerTbl; - int64_t dataLen; + startTime = pThreadInfo->start_time + + generatedRecPerTbl * timeStampStep; - return generateDataTailWithoutStb( - g_args.num_of_RPR, pstr, *pRemainderBufLen, insertRows, recordFrom, - startTime, - /*pSamplePos, */&dataLen); -} + flagSleep = true; + if (generatedRecPerTbl >= insertRows) + break; -static void printStatPerThread(threadInfo *pThreadInfo) -{ - fprintf(stderr, "====thread[%d] completed total inserted rows: %"PRIu64 ", total affected rows: %"PRIu64". %.2f records/second====\n", - pThreadInfo->threadID, - pThreadInfo->totalInsertRows, - pThreadInfo->totalAffectedRows, - (pThreadInfo->totalDelay)? - (double)(pThreadInfo->totalAffectedRows/((double)pThreadInfo->totalDelay/1000000.0)): - FLT_MAX); + int64_t remainRows = insertRows - generatedRecPerTbl; + if ((remainRows > 0) && (batchPerTbl > remainRows)) + batchPerTbl = remainRows; + + if (pThreadInfo->ntables * batchPerTbl < g_args.reqPerReq) + break; + } + + verbosePrint("[%d] %s() LN%d generatedRecPerTbl=%"PRId64" insertRows=%"PRId64"\n", + pThreadInfo->threadID, __func__, __LINE__, + generatedRecPerTbl, insertRows); + + if ((g_args.reqPerReq - recOfBatch) < batchPerTbl) + break; + } + + verbosePrint("[%d] %s() LN%d recOfBatch=%d totalInsertRows=%"PRIu64"\n", + pThreadInfo->threadID, __func__, __LINE__, recOfBatch, + pThreadInfo->totalInsertRows); + + startTs = taosGetTimestampUs(); + + if (recOfBatch == 0) { + errorPrint2("[%d] %s() LN%d Failed to insert records of batch %d\n", + pThreadInfo->threadID, __func__, __LINE__, + batchPerTbl); + if (batchPerTbl > 0) { + errorPrint("\tIf the batch is %d, the length of the SQL to insert a row must be less then %"PRId64"\n", + batchPerTbl, maxSqlLen / batchPerTbl); + } + goto free_of_interlace_stmt; + } + int64_t affectedRows = execInsert(pThreadInfo, recOfBatch); + + endTs = taosGetTimestampUs(); + uint64_t delay = endTs - startTs; + performancePrint("%s() LN%d, insert execution time is %10.2f ms\n", + __func__, __LINE__, delay / 1000.0); + verbosePrint("[%d] %s() LN%d affectedRows=%"PRId64"\n", + pThreadInfo->threadID, + __func__, __LINE__, affectedRows); + + if (delay > pThreadInfo->maxDelay) pThreadInfo->maxDelay = delay; + if (delay < pThreadInfo->minDelay) pThreadInfo->minDelay = delay; + pThreadInfo->cntDelay++; + pThreadInfo->totalDelay += delay; + + if (recOfBatch != affectedRows) { + errorPrint2("[%d] %s() LN%d execInsert insert %d, affected rows: %"PRId64"\n\n", + pThreadInfo->threadID, __func__, __LINE__, + recOfBatch, affectedRows); + goto free_of_interlace_stmt; + } + + pThreadInfo->totalAffectedRows += affectedRows; + + int currentPercent = pThreadInfo->totalAffectedRows * 100 / totalRows; + if (currentPercent > percentComplete ) { + printf("[%d]:%d%%\n", pThreadInfo->threadID, currentPercent); + percentComplete = currentPercent; + } + int64_t currentPrintTime = taosGetTimestampMs(); + if (currentPrintTime - lastPrintTime > 30*1000) { + printf("thread[%d] has currently inserted rows: %"PRIu64 ", affected rows: %"PRIu64 "\n", + pThreadInfo->threadID, + pThreadInfo->totalInsertRows, + pThreadInfo->totalAffectedRows); + lastPrintTime = currentPrintTime; + } + + if ((insert_interval) && flagSleep) { + et = taosGetTimestampMs(); + + if (insert_interval > (et - st) ) { + uint64_t sleepTime = insert_interval - (et -st); + performancePrint("%s() LN%d sleep: %"PRId64" ms for insert interval\n", + __func__, __LINE__, sleepTime); + taosMsleep(sleepTime); // ms + sleepTimeTotal += insert_interval; + } + } + } + if (percentComplete < 100) + printf("[%d]:%d%%\n", pThreadInfo->threadID, percentComplete); + +free_of_interlace_stmt: + printStatPerThread(pThreadInfo); + return NULL; } +#endif + // sync write interlace data -static void* syncWriteInterlace(threadInfo *pThreadInfo) { +static void* syncWriteInterlace(threadInfo *pThreadInfo, uint32_t interlaceRows) { debugPrint("[%d] %s() LN%d: ### interlace write\n", pThreadInfo->threadID, __func__, __LINE__); int64_t insertRows; - uint32_t interlaceRows; uint64_t maxSqlLen; - int64_t nTimeStampStep; + int64_t timeStampStep; uint64_t insert_interval; SSuperTable* stbInfo = pThreadInfo->stbInfo; if (stbInfo) { insertRows = stbInfo->insertRows; - - if ((stbInfo->interlaceRows == 0) - && (g_args.interlace_rows > 0)) { - interlaceRows = g_args.interlace_rows; - } else { - interlaceRows = stbInfo->interlaceRows; - } maxSqlLen = stbInfo->maxSqlLen; - nTimeStampStep = stbInfo->timeStampStep; + timeStampStep = stbInfo->timeStampStep; insert_interval = stbInfo->insertInterval; } else { - insertRows = g_args.num_of_DPT; - interlaceRows = g_args.interlace_rows; + insertRows = g_args.insertRows; maxSqlLen = g_args.max_sql_len; - nTimeStampStep = g_args.timestamp_step; + timeStampStep = g_args.timestamp_step; insert_interval = g_args.insert_interval; } @@ -7452,23 +8931,35 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) { pThreadInfo->threadID, __func__, __LINE__, pThreadInfo->start_table_from, pThreadInfo->ntables, insertRows); - - if (interlaceRows > insertRows) - interlaceRows = insertRows; - - if (interlaceRows > g_args.num_of_RPR) - interlaceRows = g_args.num_of_RPR; +#if 1 + if (interlaceRows > g_args.reqPerReq) + interlaceRows = g_args.reqPerReq; uint32_t batchPerTbl = interlaceRows; uint32_t batchPerTblTimes; if ((interlaceRows > 0) && (pThreadInfo->ntables > 1)) { batchPerTblTimes = - g_args.num_of_RPR / interlaceRows; + g_args.reqPerReq / interlaceRows; } else { batchPerTblTimes = 1; } +#else + uint32_t batchPerTbl; + if (interlaceRows > g_args.reqPerReq) + batchPerTbl = g_args.reqPerReq; + else + batchPerTbl = interlaceRows; + + uint32_t batchPerTblTimes; + if ((interlaceRows > 0) && (pThreadInfo->ntables > 1)) { + batchPerTblTimes = + interlaceRows / batchPerTbl; + } else { + batchPerTblTimes = 1; + } +#endif pThreadInfo->buffer = calloc(maxSqlLen, 1); if (NULL == pThreadInfo->buffer) { errorPrint2( "%s() LN%d, Failed to alloc %"PRIu64" Bytes, reason:%s\n", @@ -7501,6 +8992,7 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) { st = taosGetTimestampMs(); flagSleep = false; } + // generate data memset(pThreadInfo->buffer, 0, maxSqlLen); uint64_t remainderBufLen = maxSqlLen; @@ -7514,6 +9006,7 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) { uint32_t recOfBatch = 0; + int32_t generated; for (uint64_t i = 0; i < batchPerTblTimes; i ++) { char tableName[TSDB_TABLE_NAME_LEN]; @@ -7527,49 +9020,24 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) { uint64_t oldRemainderLen = remainderBufLen; - int32_t generated; if (stbInfo) { - if (stbInfo->iface == STMT_IFACE) { - generated = prepareStbStmtWithSample( - pThreadInfo, - tableName, - tableSeq, - batchPerTbl, - insertRows, 0, - startTime, - &(pThreadInfo->samplePos)); - } else { - generated = generateStbInterlaceData( - pThreadInfo, - tableName, batchPerTbl, i, - batchPerTblTimes, - tableSeq, - pstr, - insertRows, - startTime, - &remainderBufLen); - } + generated = generateStbInterlaceData( + pThreadInfo, + tableName, batchPerTbl, i, + batchPerTblTimes, + tableSeq, + pstr, + insertRows, + startTime, + &remainderBufLen); } else { - if (g_args.iface == STMT_IFACE) { - debugPrint("[%d] %s() LN%d, tableName:%s, batch:%d startTime:%"PRId64"\n", - pThreadInfo->threadID, - __func__, __LINE__, - tableName, batchPerTbl, startTime); - generated = prepareStmtWithoutStb( - pThreadInfo, - tableName, - batchPerTbl, - insertRows, i, - startTime); - } else { - generated = generateInterlaceDataWithoutStb( - tableName, batchPerTbl, - tableSeq, - pThreadInfo->db_name, pstr, - insertRows, - startTime, - &remainderBufLen); - } + generated = generateInterlaceDataWithoutStb( + tableName, batchPerTbl, + tableSeq, + pThreadInfo->db_name, pstr, + insertRows, + startTime, + &remainderBufLen); } debugPrint("[%d] %s() LN%d, generated records is %d\n", @@ -7598,7 +9066,7 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) { generatedRecPerTbl += batchPerTbl; startTime = pThreadInfo->start_time - + generatedRecPerTbl * nTimeStampStep; + + generatedRecPerTbl * timeStampStep; flagSleep = true; if (generatedRecPerTbl >= insertRows) @@ -7608,7 +9076,7 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) { if ((remainRows > 0) && (batchPerTbl > remainRows)) batchPerTbl = remainRows; - if (pThreadInfo->ntables * batchPerTbl < g_args.num_of_RPR) + if (pThreadInfo->ntables * batchPerTbl < g_args.reqPerReq) break; } @@ -7616,7 +9084,7 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) { pThreadInfo->threadID, __func__, __LINE__, generatedRecPerTbl, insertRows); - if ((g_args.num_of_RPR - recOfBatch) < batchPerTbl) + if ((g_args.reqPerReq - recOfBatch) < batchPerTbl) break; } @@ -7636,69 +9104,207 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) { errorPrint("\tIf the batch is %d, the length of the SQL to insert a row must be less then %"PRId64"\n", batchPerTbl, maxSqlLen / batchPerTbl); } - errorPrint("\tPlease check if the buffer length(%"PRId64") or batch(%d) is set with proper value!\n", - maxSqlLen, batchPerTbl); - goto free_of_interlace; - } - int64_t affectedRows = execInsert(pThreadInfo, recOfBatch); + errorPrint("\tPlease check if the buffer length(%"PRId64") or batch(%d) is set with proper value!\n", + maxSqlLen, batchPerTbl); + goto free_of_interlace; + } + int64_t affectedRows = execInsert(pThreadInfo, recOfBatch); + + endTs = taosGetTimestampUs(); + uint64_t delay = endTs - startTs; + performancePrint("%s() LN%d, insert execution time is %10.2f ms\n", + __func__, __LINE__, delay / 1000.0); + verbosePrint("[%d] %s() LN%d affectedRows=%"PRId64"\n", + pThreadInfo->threadID, + __func__, __LINE__, affectedRows); + + if (delay > pThreadInfo->maxDelay) pThreadInfo->maxDelay = delay; + if (delay < pThreadInfo->minDelay) pThreadInfo->minDelay = delay; + pThreadInfo->cntDelay++; + pThreadInfo->totalDelay += delay; + + if (recOfBatch != affectedRows) { + errorPrint2("[%d] %s() LN%d execInsert insert %d, affected rows: %"PRId64"\n%s\n", + pThreadInfo->threadID, __func__, __LINE__, + recOfBatch, affectedRows, pThreadInfo->buffer); + goto free_of_interlace; + } + + pThreadInfo->totalAffectedRows += affectedRows; + + int currentPercent = pThreadInfo->totalAffectedRows * 100 / totalRows; + if (currentPercent > percentComplete ) { + printf("[%d]:%d%%\n", pThreadInfo->threadID, currentPercent); + percentComplete = currentPercent; + } + int64_t currentPrintTime = taosGetTimestampMs(); + if (currentPrintTime - lastPrintTime > 30*1000) { + printf("thread[%d] has currently inserted rows: %"PRIu64 ", affected rows: %"PRIu64 "\n", + pThreadInfo->threadID, + pThreadInfo->totalInsertRows, + pThreadInfo->totalAffectedRows); + lastPrintTime = currentPrintTime; + } + + if ((insert_interval) && flagSleep) { + et = taosGetTimestampMs(); + + if (insert_interval > (et - st) ) { + uint64_t sleepTime = insert_interval - (et -st); + performancePrint("%s() LN%d sleep: %"PRId64" ms for insert interval\n", + __func__, __LINE__, sleepTime); + taosMsleep(sleepTime); // ms + sleepTimeTotal += insert_interval; + } + } + } + if (percentComplete < 100) + printf("[%d]:%d%%\n", pThreadInfo->threadID, percentComplete); + +free_of_interlace: + tmfree(pThreadInfo->buffer); + printStatPerThread(pThreadInfo); + return NULL; +} + +static void* syncWriteProgressiveStmt(threadInfo *pThreadInfo) { + debugPrint("%s() LN%d: ### stmt progressive write\n", __func__, __LINE__); + + SSuperTable* stbInfo = pThreadInfo->stbInfo; + int64_t timeStampStep = + stbInfo?stbInfo->timeStampStep:g_args.timestamp_step; + int64_t insertRows = + (stbInfo)?stbInfo->insertRows:g_args.insertRows; + verbosePrint("%s() LN%d insertRows=%"PRId64"\n", + __func__, __LINE__, insertRows); + + uint64_t lastPrintTime = taosGetTimestampMs(); + uint64_t startTs = taosGetTimestampMs(); + uint64_t endTs; + + pThreadInfo->totalInsertRows = 0; + pThreadInfo->totalAffectedRows = 0; + + pThreadInfo->samplePos = 0; + + int percentComplete = 0; + int64_t totalRows = insertRows * pThreadInfo->ntables; + + for (uint64_t tableSeq = pThreadInfo->start_table_from; + tableSeq <= pThreadInfo->end_table_to; + tableSeq ++) { + int64_t start_time = pThreadInfo->start_time; + + for (uint64_t i = 0; i < insertRows;) { + char tableName[TSDB_TABLE_NAME_LEN]; + getTableName(tableName, pThreadInfo, tableSeq); + verbosePrint("%s() LN%d: tid=%d seq=%"PRId64" tableName=%s\n", + __func__, __LINE__, + pThreadInfo->threadID, tableSeq, tableName); + if (0 == strlen(tableName)) { + errorPrint2("[%d] %s() LN%d, getTableName return null\n", + pThreadInfo->threadID, __func__, __LINE__); + return NULL; + } - endTs = taosGetTimestampUs(); - uint64_t delay = endTs - startTs; - performancePrint("%s() LN%d, insert execution time is %10.2f ms\n", - __func__, __LINE__, delay / 1000.0); - verbosePrint("[%d] %s() LN%d affectedRows=%"PRId64"\n", - pThreadInfo->threadID, - __func__, __LINE__, affectedRows); + // measure prepare + insert + startTs = taosGetTimestampUs(); - if (delay > pThreadInfo->maxDelay) pThreadInfo->maxDelay = delay; - if (delay < pThreadInfo->minDelay) pThreadInfo->minDelay = delay; - pThreadInfo->cntDelay++; - pThreadInfo->totalDelay += delay; + int32_t generated; + if (stbInfo) { + generated = prepareStbStmt( + pThreadInfo, + tableName, + tableSeq, + (g_args.reqPerReq>stbInfo->insertRows)? + stbInfo->insertRows: + g_args.reqPerReq, + insertRows, i, start_time, + &(pThreadInfo->samplePos)); + } else { + generated = prepareStmtWithoutStb( + pThreadInfo, + tableName, + g_args.reqPerReq, + insertRows, i, + start_time); + } - if (recOfBatch != affectedRows) { - errorPrint2("[%d] %s() LN%d execInsert insert %d, affected rows: %"PRId64"\n%s\n", - pThreadInfo->threadID, __func__, __LINE__, - recOfBatch, affectedRows, pThreadInfo->buffer); - goto free_of_interlace; - } + verbosePrint("[%d] %s() LN%d generated=%d\n", + pThreadInfo->threadID, + __func__, __LINE__, generated); - pThreadInfo->totalAffectedRows += affectedRows; + if (generated > 0) + i += generated; + else + goto free_of_stmt_progressive; - int currentPercent = pThreadInfo->totalAffectedRows * 100 / totalRows; - if (currentPercent > percentComplete ) { - printf("[%d]:%d%%\n", pThreadInfo->threadID, currentPercent); - percentComplete = currentPercent; - } - int64_t currentPrintTime = taosGetTimestampMs(); - if (currentPrintTime - lastPrintTime > 30*1000) { - printf("thread[%d] has currently inserted rows: %"PRIu64 ", affected rows: %"PRIu64 "\n", + start_time += generated * timeStampStep; + pThreadInfo->totalInsertRows += generated; + + // only measure insert + // startTs = taosGetTimestampUs(); + + int32_t affectedRows = execInsert(pThreadInfo, generated); + + endTs = taosGetTimestampUs(); + uint64_t delay = endTs - startTs; + performancePrint("%s() LN%d, insert execution time is %10.f ms\n", + __func__, __LINE__, delay/1000.0); + verbosePrint("[%d] %s() LN%d affectedRows=%d\n", pThreadInfo->threadID, - pThreadInfo->totalInsertRows, - pThreadInfo->totalAffectedRows); - lastPrintTime = currentPrintTime; - } + __func__, __LINE__, affectedRows); - if ((insert_interval) && flagSleep) { - et = taosGetTimestampMs(); + if (delay > pThreadInfo->maxDelay) pThreadInfo->maxDelay = delay; + if (delay < pThreadInfo->minDelay) pThreadInfo->minDelay = delay; + pThreadInfo->cntDelay++; + pThreadInfo->totalDelay += delay; - if (insert_interval > (et - st) ) { - uint64_t sleepTime = insert_interval - (et -st); - performancePrint("%s() LN%d sleep: %"PRId64" ms for insert interval\n", - __func__, __LINE__, sleepTime); - taosMsleep(sleepTime); // ms - sleepTimeTotal += insert_interval; + if (affectedRows < 0) { + errorPrint2("%s() LN%d, affected rows: %d\n", + __func__, __LINE__, affectedRows); + goto free_of_stmt_progressive; + } + + pThreadInfo->totalAffectedRows += affectedRows; + + int currentPercent = pThreadInfo->totalAffectedRows * 100 / totalRows; + if (currentPercent > percentComplete ) { + printf("[%d]:%d%%\n", pThreadInfo->threadID, currentPercent); + percentComplete = currentPercent; + } + int64_t currentPrintTime = taosGetTimestampMs(); + if (currentPrintTime - lastPrintTime > 30*1000) { + printf("thread[%d] has currently inserted rows: %"PRId64 ", affected rows: %"PRId64 "\n", + pThreadInfo->threadID, + pThreadInfo->totalInsertRows, + pThreadInfo->totalAffectedRows); + lastPrintTime = currentPrintTime; } + + if (i >= insertRows) + break; + } // insertRows + + if ((g_args.verbose_print) && + (tableSeq == pThreadInfo->ntables - 1) && (stbInfo) + && (0 == strncasecmp( + stbInfo->dataSource, + "sample", strlen("sample")))) { + verbosePrint("%s() LN%d samplePos=%"PRId64"\n", + __func__, __LINE__, pThreadInfo->samplePos); } - } - if (percentComplete < 100) + } // tableSeq + + if (percentComplete < 100) { printf("[%d]:%d%%\n", pThreadInfo->threadID, percentComplete); + } -free_of_interlace: +free_of_stmt_progressive: tmfree(pThreadInfo->buffer); printStatPerThread(pThreadInfo); return NULL; } - // sync insertion progressive data static void* syncWriteProgressive(threadInfo *pThreadInfo) { debugPrint("%s() LN%d: ### progressive write\n", __func__, __LINE__); @@ -7708,7 +9314,7 @@ static void* syncWriteProgressive(threadInfo *pThreadInfo) { int64_t timeStampStep = stbInfo?stbInfo->timeStampStep:g_args.timestamp_step; int64_t insertRows = - (stbInfo)?stbInfo->insertRows:g_args.num_of_DPT; + (stbInfo)?stbInfo->insertRows:g_args.insertRows; verbosePrint("%s() LN%d insertRows=%"PRId64"\n", __func__, __LINE__, insertRows); @@ -7765,11 +9371,13 @@ static void* syncWriteProgressive(threadInfo *pThreadInfo) { int32_t generated; if (stbInfo) { if (stbInfo->iface == STMT_IFACE) { - generated = prepareStbStmtWithSample( + generated = prepareStbStmt( pThreadInfo, tableName, tableSeq, - g_args.num_of_RPR, + (g_args.reqPerReq>stbInfo->insertRows)? + stbInfo->insertRows: + g_args.reqPerReq, insertRows, i, start_time, &(pThreadInfo->samplePos)); } else { @@ -7786,7 +9394,7 @@ static void* syncWriteProgressive(threadInfo *pThreadInfo) { generated = prepareStmtWithoutStb( pThreadInfo, tableName, - g_args.num_of_RPR, + g_args.reqPerReq, insertRows, i, start_time); } else { @@ -7854,7 +9462,7 @@ static void* syncWriteProgressive(threadInfo *pThreadInfo) { if (i >= insertRows) break; - } // num_of_DPT + } // insertRows if ((g_args.verbose_print) && (tableSeq == pThreadInfo->ntables - 1) && (stbInfo) @@ -7865,8 +9473,10 @@ static void* syncWriteProgressive(threadInfo *pThreadInfo) { __func__, __LINE__, pThreadInfo->samplePos); } } // tableSeq - if (percentComplete < 100) + + if (percentComplete < 100) { printf("[%d]:%d%%\n", pThreadInfo->threadID, percentComplete); + } free_of_progressive: tmfree(pThreadInfo->buffer); @@ -7881,26 +9491,40 @@ static void* syncWrite(void *sarg) { setThreadName("syncWrite"); - uint32_t interlaceRows; + uint32_t interlaceRows = 0; if (stbInfo) { - if ((stbInfo->interlaceRows == 0) - && (g_args.interlace_rows > 0)) { - interlaceRows = g_args.interlace_rows; - } else { + if (stbInfo->interlaceRows < stbInfo->insertRows) interlaceRows = stbInfo->interlaceRows; - } } else { - interlaceRows = g_args.interlace_rows; + if (g_args.interlaceRows < g_args.insertRows) + interlaceRows = g_args.interlaceRows; } if (interlaceRows > 0) { // interlace mode - return syncWriteInterlace(pThreadInfo); + if (stbInfo) { + if (STMT_IFACE == stbInfo->iface) { +#if STMT_BIND_PARAM_BATCH == 1 + return syncWriteInterlaceStmtBatch(pThreadInfo, interlaceRows); +#else + return syncWriteInterlaceStmt(pThreadInfo, interlaceRows); +#endif + } else { + return syncWriteInterlace(pThreadInfo, interlaceRows); + } + } } else { - // progressive mode - return syncWriteProgressive(pThreadInfo); + // progressive mode + if (((stbInfo) && (STMT_IFACE == stbInfo->iface)) + || (STMT_IFACE == g_args.iface)) { + return syncWriteProgressiveStmt(pThreadInfo); + } else { + return syncWriteProgressive(pThreadInfo); + } } + + return NULL; } static void callBack(void *param, TAOS_RES *res, int code) { @@ -7919,11 +9543,11 @@ static void callBack(void *param, TAOS_RES *res, int code) { char *buffer = calloc(1, pThreadInfo->stbInfo->maxSqlLen); char data[MAX_DATA_SIZE]; char *pstr = buffer; - pstr += sprintf(pstr, "insert into %s.%s%"PRId64" values", + pstr += sprintf(pstr, "INSERT INTO %s.%s%"PRId64" VALUES", pThreadInfo->db_name, pThreadInfo->tb_prefix, pThreadInfo->start_table_from); // if (pThreadInfo->counter >= pThreadInfo->stbInfo->insertRows) { - if (pThreadInfo->counter >= g_args.num_of_RPR) { + if (pThreadInfo->counter >= g_args.reqPerReq) { pThreadInfo->start_table_from++; pThreadInfo->counter = 0; } @@ -7934,7 +9558,7 @@ static void callBack(void *param, TAOS_RES *res, int code) { return; } - for (int i = 0; i < g_args.num_of_RPR; i++) { + for (int i = 0; i < g_args.reqPerReq; i++) { int rand_num = taosRandom() % 100; if (0 != pThreadInfo->stbInfo->disorderRatio && rand_num < pThreadInfo->stbInfo->disorderRatio) { @@ -8014,81 +9638,6 @@ static int convertHostToServAddr(char *host, uint16_t port, struct sockaddr_in * return 0; } -static int parseSampleFileToStmt( - threadInfo *pThreadInfo, - SSuperTable *stbInfo, uint32_t timePrec) -{ - pThreadInfo->sampleBindArray = - calloc(1, sizeof(char *) * MAX_SAMPLES_ONCE_FROM_FILE); - if (pThreadInfo->sampleBindArray == NULL) { - errorPrint2("%s() LN%d, Failed to allocate %"PRIu64" bind array buffer\n", - __func__, __LINE__, - (uint64_t)sizeof(char *) * MAX_SAMPLES_ONCE_FROM_FILE); - return -1; - } - - for (int i=0; i < MAX_SAMPLES_ONCE_FROM_FILE; i++) { - char *bindArray = - calloc(1, sizeof(TAOS_BIND) * (stbInfo->columnCount + 1)); - if (bindArray == NULL) { - errorPrint2("%s() LN%d, Failed to allocate %d bind params\n", - __func__, __LINE__, (stbInfo->columnCount + 1)); - return -1; - } - - TAOS_BIND *bind; - int cursor = 0; - - for (int c = 0; c < stbInfo->columnCount + 1; c++) { - bind = (TAOS_BIND *)((char *)bindArray + (sizeof(TAOS_BIND) * c)); - - if (c == 0) { - bind->buffer_type = TSDB_DATA_TYPE_TIMESTAMP; - bind->buffer_length = sizeof(int64_t); - bind->buffer = NULL; //bind_ts; - bind->length = &bind->buffer_length; - bind->is_null = NULL; - } else { - char *restStr = stbInfo->sampleDataBuf - + stbInfo->lenOfOneRow * i + cursor; - int lengthOfRest = strlen(restStr); - - int index = 0; - for (index = 0; index < lengthOfRest; index ++) { - if (restStr[index] == ',') { - break; - } - } - - char *bindBuffer = calloc(1, index + 1); - if (bindBuffer == NULL) { - errorPrint2("%s() LN%d, Failed to allocate %d bind buffer\n", - __func__, __LINE__, DOUBLE_BUFF_LEN); - return -1; - } - - strncpy(bindBuffer, restStr, index); - cursor += index + 1; // skip ',' too - - if (-1 == prepareStmtBindArrayByType( - bind, - stbInfo->columns[c-1].dataType, - stbInfo->columns[c-1].dataLen, - timePrec, - bindBuffer)) { - free(bindBuffer); - return -1; - } - free(bindBuffer); - } - } - *((uintptr_t *)(pThreadInfo->sampleBindArray + (sizeof(char *)) * i)) = - (uintptr_t)bindArray; - } - - return 0; -} - static void startMultiThreadInsertData(int threads, char* db_name, char* precision, SSuperTable* stbInfo) { @@ -8106,32 +9655,37 @@ static void startMultiThreadInsertData(int threads, char* db_name, } } - int64_t start_time; + int64_t startTime; if (stbInfo) { if (0 == strncasecmp(stbInfo->startTimestamp, "now", 3)) { - start_time = taosGetTimestamp(timePrec); + startTime = taosGetTimestamp(timePrec); } else { if (TSDB_CODE_SUCCESS != taosParseTime( stbInfo->startTimestamp, - &start_time, + &startTime, strlen(stbInfo->startTimestamp), timePrec, 0)) { ERROR_EXIT("failed to parse time!\n"); } } } else { - start_time = DEFAULT_START_TIME; + startTime = DEFAULT_START_TIME; } - debugPrint("%s() LN%d, start_time= %"PRId64"\n", - __func__, __LINE__, start_time); + debugPrint("%s() LN%d, startTime= %"PRId64"\n", + __func__, __LINE__, startTime); // read sample data from file first + int ret; if (stbInfo) { - if (0 != prepareSampleDataForSTable(stbInfo)) { - errorPrint2("%s() LN%d, prepare sample data for stable failed!\n", - __func__, __LINE__); - exit(EXIT_FAILURE); - } + ret = prepareSampleForStb(stbInfo); + } else { + ret = prepareSampleForNtb(); + } + + if (0 != ret) { + errorPrint2("%s() LN%d, prepare sample data for stable failed!\n", + __func__, __LINE__); + exit(EXIT_FAILURE); } TAOS* taos0 = taos_connect( @@ -8162,6 +9716,12 @@ static void startMultiThreadInsertData(int threads, char* db_name, || ((stbInfo->childTblOffset + stbInfo->childTblLimit) > (stbInfo->childTblCount))) { + + if (stbInfo->childTblCount < stbInfo->childTblOffset) { + printf("WARNING: offset will not be used since the child tables count is less then offset!\n"); + + stbInfo->childTblOffset = 0; + } stbInfo->childTblLimit = stbInfo->childTblCount - stbInfo->childTblOffset; } @@ -8200,12 +9760,13 @@ static void startMultiThreadInsertData(int threads, char* db_name, int64_t childTblCount; getChildNameOfSuperTableWithLimitAndOffset( taos0, - db_name, stbInfo->sTblName, + db_name, stbInfo->stbName, &stbInfo->childTblName, &childTblCount, limit, offset); + ntables = childTblCount; // CBD } else { - ntables = g_args.num_of_tables; + ntables = g_args.ntables; tableFrom = 0; } @@ -8231,16 +9792,34 @@ static void startMultiThreadInsertData(int threads, char* db_name, } pthread_t *pids = calloc(1, threads * sizeof(pthread_t)); - assert(pids != NULL); - threadInfo *infos = calloc(1, threads * sizeof(threadInfo)); + assert(pids != NULL); assert(infos != NULL); - memset(pids, 0, threads * sizeof(pthread_t)); - memset(infos, 0, threads * sizeof(threadInfo)); - char *stmtBuffer = calloc(1, BUFFER_SIZE); assert(stmtBuffer); + +#if STMT_BIND_PARAM_BATCH == 1 + uint32_t interlaceRows = 0; + uint32_t batch; + + if (stbInfo) { + if (stbInfo->interlaceRows < stbInfo->insertRows) + interlaceRows = stbInfo->interlaceRows; + } else { + if (g_args.interlaceRows < g_args.insertRows) + interlaceRows = g_args.interlaceRows; + } + + if (interlaceRows > 0) { + batch = interlaceRows; + } else { + batch = (g_args.reqPerReq>g_args.insertRows)? + g_args.insertRows:g_args.reqPerReq; + } + +#endif + if ((g_args.iface == STMT_IFACE) || ((stbInfo) && (stbInfo->iface == STMT_IFACE))) { @@ -8250,7 +9829,7 @@ static void startMultiThreadInsertData(int threads, char* db_name, && (AUTO_CREATE_SUBTBL == stbInfo->autoCreateTable)) { pstr += sprintf(pstr, "INSERT INTO ? USING %s TAGS(?", - stbInfo->sTblName); + stbInfo->stbName); for (int tag = 0; tag < (stbInfo->tagCount - 1); tag ++ ) { pstr += sprintf(pstr, ",?"); @@ -8260,12 +9839,9 @@ static void startMultiThreadInsertData(int threads, char* db_name, pstr += sprintf(pstr, "INSERT INTO ? VALUES(?"); } - int columnCount; - if (stbInfo) { - columnCount = stbInfo->columnCount; - } else { - columnCount = g_args.num_of_CPR; - } + int columnCount = (stbInfo)? + stbInfo->columnCount: + g_args.columnCount; for (int col = 0; col < columnCount; col ++) { pstr += sprintf(pstr, ",?"); @@ -8273,6 +9849,9 @@ static void startMultiThreadInsertData(int threads, char* db_name, pstr += sprintf(pstr, ")"); debugPrint("%s() LN%d, stmtBuffer: %s", __func__, __LINE__, stmtBuffer); +#if STMT_BIND_PARAM_BATCH == 1 + parseSamplefileToStmtBatch(stbInfo); +#endif } for (int i = 0; i < threads; i++) { @@ -8283,7 +9862,7 @@ static void startMultiThreadInsertData(int threads, char* db_name, pThreadInfo->time_precision = timePrec; pThreadInfo->stbInfo = stbInfo; - pThreadInfo->start_time = start_time; + pThreadInfo->start_time = startTime; pThreadInfo->minDelay = UINT64_MAX; if ((NULL == stbInfo) || @@ -8316,8 +9895,7 @@ static void startMultiThreadInsertData(int threads, char* db_name, exit(EXIT_FAILURE); } - int ret = taos_stmt_prepare(pThreadInfo->stmt, stmtBuffer, 0); - if (ret != 0) { + if (0 != taos_stmt_prepare(pThreadInfo->stmt, stmtBuffer, 0)) { free(pids); free(infos); free(stmtBuffer); @@ -8328,7 +9906,19 @@ static void startMultiThreadInsertData(int threads, char* db_name, pThreadInfo->bind_ts = malloc(sizeof(int64_t)); if (stbInfo) { - parseSampleFileToStmt(pThreadInfo, stbInfo, timePrec); +#if STMT_BIND_PARAM_BATCH == 1 + parseStbSampleToStmtBatchForThread( + pThreadInfo, stbInfo, timePrec, batch); +#else + parseStbSampleToStmt(pThreadInfo, stbInfo, timePrec); +#endif + } else { +#if STMT_BIND_PARAM_BATCH == 1 + parseNtbSampleToStmtBatchForThread( + pThreadInfo, timePrec, batch); +#else + parseNtbSampleToStmt(pThreadInfo, timePrec); +#endif } } } else { @@ -8373,19 +9963,28 @@ static void startMultiThreadInsertData(int threads, char* db_name, for (int i = 0; i < threads; i++) { threadInfo *pThreadInfo = infos + i; + tsem_destroy(&(pThreadInfo->lock_sem)); + taos_close(pThreadInfo->taos); + if (pThreadInfo->stmt) { taos_stmt_close(pThreadInfo->stmt); - tmfree((char *)pThreadInfo->bind_ts); } - tsem_destroy(&(pThreadInfo->lock_sem)); - taos_close(pThreadInfo->taos); + tmfree((char *)pThreadInfo->bind_ts); +#if STMT_BIND_PARAM_BATCH == 1 + tmfree((char *)pThreadInfo->bind_ts_array); + tmfree(pThreadInfo->bindParams); + tmfree(pThreadInfo->is_null); +#else if (pThreadInfo->sampleBindArray) { - for (int k = 0; k < MAX_SAMPLES_ONCE_FROM_FILE; k++) { + for (int k = 0; k < MAX_SAMPLES; k++) { uintptr_t *tmp = (uintptr_t *)(*(uintptr_t *)( pThreadInfo->sampleBindArray + sizeof(uintptr_t *) * k)); - for (int c = 1; c < pThreadInfo->stbInfo->columnCount + 1; c++) { + int columnCount = (pThreadInfo->stbInfo)? + pThreadInfo->stbInfo->columnCount: + g_args.columnCount; + for (int c = 1; c < columnCount + 1; c++) { TAOS_BIND *bind = (TAOS_BIND *)((char *)tmp + (sizeof(TAOS_BIND) * c)); if (bind) tmfree(bind->buffer); @@ -8394,6 +9993,7 @@ static void startMultiThreadInsertData(int threads, char* db_name, } tmfree(pThreadInfo->sampleBindArray); } +#endif debugPrint("%s() LN%d, [%d] totalInsert=%"PRIu64" totalAffected=%"PRIu64"\n", __func__, __LINE__, @@ -8412,7 +10012,6 @@ static void startMultiThreadInsertData(int threads, char* db_name, if (pThreadInfo->maxDelay > maxDelay) maxDelay = pThreadInfo->maxDelay; if (pThreadInfo->minDelay < minDelay) minDelay = pThreadInfo->minDelay; } - cntDelay -= 1; if (cntDelay == 0) cntDelay = 1; avgDelay = (double)totalDelay / cntDelay; @@ -8427,7 +10026,7 @@ static void startMultiThreadInsertData(int threads, char* db_name, fprintf(stderr, "Spent %.4f seconds to insert rows: %"PRIu64", affected rows: %"PRIu64" with %d thread(s) into %s.%s. %.2f records/second\n\n", tInMs, stbInfo->totalInsertRows, stbInfo->totalAffectedRows, - threads, db_name, stbInfo->sTblName, + threads, db_name, stbInfo->stbName, (double)(stbInfo->totalInsertRows/tInMs)); if (g_fpOfInsertResult) { @@ -8435,7 +10034,7 @@ static void startMultiThreadInsertData(int threads, char* db_name, "Spent %.4f seconds to insert rows: %"PRIu64", affected rows: %"PRIu64" with %d thread(s) into %s.%s. %.2f records/second\n\n", tInMs, stbInfo->totalInsertRows, stbInfo->totalAffectedRows, - threads, db_name, stbInfo->sTblName, + threads, db_name, stbInfo->stbName, (double)(stbInfo->totalInsertRows/tInMs)); } } else { @@ -8471,15 +10070,14 @@ static void startMultiThreadInsertData(int threads, char* db_name, free(infos); } -static void *readTable(void *sarg) { -#if 1 +static void *queryNtableAggrFunc(void *sarg) { threadInfo *pThreadInfo = (threadInfo *)sarg; TAOS *taos = pThreadInfo->taos; - setThreadName("readTable"); + setThreadName("queryNtableAggrFunc"); char *command = calloc(1, BUFFER_SIZE); assert(command); - uint64_t sTime = pThreadInfo->start_time; + uint64_t startTime = pThreadInfo->start_time; char *tb_prefix = pThreadInfo->tb_prefix; FILE *fp = fopen(pThreadInfo->filePath, "a"); if (NULL == fp) { @@ -8488,20 +10086,30 @@ static void *readTable(void *sarg) { return NULL; } - int64_t num_of_DPT; + int64_t insertRows; /* if (pThreadInfo->stbInfo) { - num_of_DPT = pThreadInfo->stbInfo->insertRows; // nrecords_per_table; + insertRows = pThreadInfo->stbInfo->insertRows; // nrecords_per_table; } else { */ - num_of_DPT = g_args.num_of_DPT; + insertRows = g_args.insertRows; // } - int64_t num_of_tables = pThreadInfo->ntables; // rinfo->end_table_to - rinfo->start_table_from + 1; - int64_t totalData = num_of_DPT * num_of_tables; - bool do_aggreFunc = g_Dbs.do_aggreFunc; + int64_t ntables = pThreadInfo->ntables; // pThreadInfo->end_table_to - pThreadInfo->start_table_from + 1; + int64_t totalData = insertRows * ntables; + bool aggr_func = g_Dbs.aggr_func; + + char **aggreFunc; + int n; - int n = do_aggreFunc ? (sizeof(g_aggreFunc) / sizeof(g_aggreFunc[0])) : 2; - if (!do_aggreFunc) { + if (g_args.demo_mode) { + aggreFunc = g_aggreFuncDemo; + n = aggr_func?(sizeof(g_aggreFuncDemo) / sizeof(g_aggreFuncDemo[0])) : 2; + } else { + aggreFunc = g_aggreFunc; + n = aggr_func?(sizeof(g_aggreFunc) / sizeof(g_aggreFunc[0])) : 2; + } + + if (!aggr_func) { printf("\nThe first field is either Binary or Bool. Aggregation functions are not supported.\n"); } printf("%"PRId64" records:\n", totalData); @@ -8510,11 +10118,13 @@ static void *readTable(void *sarg) { for (int j = 0; j < n; j++) { double totalT = 0; uint64_t count = 0; - for (int64_t i = 0; i < num_of_tables; i++) { - sprintf(command, "select %s from %s%"PRId64" where ts>= %" PRIu64, - g_aggreFunc[j], tb_prefix, i, sTime); + for (int64_t i = 0; i < ntables; i++) { + sprintf(command, "SELECT %s FROM %s%"PRId64" WHERE ts>= %" PRIu64, + aggreFunc[j], tb_prefix, i, startTime); - double t = taosGetTimestampMs(); + double t = taosGetTimestampUs(); + debugPrint("%s() LN%d, sql command: %s\n", + __func__, __LINE__, command); TAOS_RES *pSql = taos_query(taos, command); int32_t code = taos_errno(pSql); @@ -8531,29 +10141,27 @@ static void *readTable(void *sarg) { count++; } - t = taosGetTimestampMs() - t; + t = taosGetTimestampUs() - t; totalT += t; taos_free_result(pSql); } fprintf(fp, "|%10s | %"PRId64" | %12.2f | %10.2f |\n", - g_aggreFunc[j][0] == '*' ? " * " : g_aggreFunc[j], totalData, - (double)(num_of_tables * num_of_DPT) / totalT, totalT * 1000); - printf("select %10s took %.6f second(s)\n", g_aggreFunc[j], totalT * 1000); + aggreFunc[j][0] == '*' ? " * " : aggreFunc[j], totalData, + (double)(ntables * insertRows) / totalT, totalT / 1000000); + printf("select %10s took %.6f second(s)\n", aggreFunc[j], totalT / 1000000); } fprintf(fp, "\n"); fclose(fp); free(command); -#endif return NULL; } -static void *readMetric(void *sarg) { -#if 1 +static void *queryStableAggrFunc(void *sarg) { threadInfo *pThreadInfo = (threadInfo *)sarg; TAOS *taos = pThreadInfo->taos; - setThreadName("readMetric"); + setThreadName("queryStableAggrFunc"); char *command = calloc(1, BUFFER_SIZE); assert(command); @@ -8564,15 +10172,26 @@ static void *readMetric(void *sarg) { return NULL; } - int64_t num_of_DPT = pThreadInfo->stbInfo->insertRows; - int64_t num_of_tables = pThreadInfo->ntables; // rinfo->end_table_to - rinfo->start_table_from + 1; - int64_t totalData = num_of_DPT * num_of_tables; - bool do_aggreFunc = g_Dbs.do_aggreFunc; + int64_t insertRows = pThreadInfo->stbInfo->insertRows; + int64_t ntables = pThreadInfo->ntables; // pThreadInfo->end_table_to - pThreadInfo->start_table_from + 1; + int64_t totalData = insertRows * ntables; + bool aggr_func = g_Dbs.aggr_func; + + char **aggreFunc; + int n; + + if (g_args.demo_mode) { + aggreFunc = g_aggreFuncDemo; + n = aggr_func?(sizeof(g_aggreFuncDemo) / sizeof(g_aggreFuncDemo[0])) : 2; + } else { + aggreFunc = g_aggreFunc; + n = aggr_func?(sizeof(g_aggreFunc) / sizeof(g_aggreFunc[0])) : 2; + } - int n = do_aggreFunc ? (sizeof(g_aggreFunc) / sizeof(g_aggreFunc[0])) : 2; - if (!do_aggreFunc) { + if (!aggr_func) { printf("\nThe first field is either Binary or Bool. Aggregation functions are not supported.\n"); } + printf("%"PRId64" records:\n", totalData); fprintf(fp, "Querying On %"PRId64" records:\n", totalData); @@ -8580,22 +10199,33 @@ static void *readMetric(void *sarg) { char condition[COND_BUF_LEN] = "\0"; char tempS[64] = "\0"; - int64_t m = 10 < num_of_tables ? 10 : num_of_tables; + int64_t m = 10 < ntables ? 10 : ntables; for (int64_t i = 1; i <= m; i++) { if (i == 1) { - sprintf(tempS, "t1 = %"PRId64"", i); + if (g_args.demo_mode) { + sprintf(tempS, "groupid = %"PRId64"", i); + } else { + sprintf(tempS, "t0 = %"PRId64"", i); + } } else { - sprintf(tempS, " or t1 = %"PRId64" ", i); + if (g_args.demo_mode) { + sprintf(tempS, " or groupid = %"PRId64" ", i); + } else { + sprintf(tempS, " or t0 = %"PRId64" ", i); + } } strncat(condition, tempS, COND_BUF_LEN - 1); - sprintf(command, "select %s from meters where %s", g_aggreFunc[j], condition); + sprintf(command, "SELECT %s FROM meters WHERE %s", aggreFunc[j], condition); printf("Where condition: %s\n", condition); + + debugPrint("%s() LN%d, sql command: %s\n", + __func__, __LINE__, command); fprintf(fp, "%s\n", command); - double t = taosGetTimestampMs(); + double t = taosGetTimestampUs(); TAOS_RES *pSql = taos_query(taos, command); int32_t code = taos_errno(pSql); @@ -8612,11 +10242,11 @@ static void *readMetric(void *sarg) { while(taos_fetch_row(pSql) != NULL) { count++; } - t = taosGetTimestampMs() - t; + t = taosGetTimestampUs() - t; fprintf(fp, "| Speed: %12.2f(per s) | Latency: %.4f(ms) |\n", - num_of_tables * num_of_DPT / (t * 1000.0), t); - printf("select %10s took %.6f second(s)\n\n", g_aggreFunc[j], t * 1000.0); + ntables * insertRows / (t / 1000), t); + printf("select %10s took %.6f second(s)\n\n", aggreFunc[j], t / 1000000); taos_free_result(pSql); } @@ -8624,7 +10254,7 @@ static void *readMetric(void *sarg) { } fclose(fp); free(command); -#endif + return NULL; } @@ -8671,7 +10301,7 @@ static int insertTestProcess() { } free(cmdBuffer); - // pretreatement + // pretreatment if (prepareSampleData() != 0) { if (g_fpOfInsertResult) fclose(g_fpOfInsertResult); @@ -8948,7 +10578,7 @@ static int queryTestProcess() { if (0 != g_queryInfo.superQueryInfo.sqlCount) { getAllChildNameOfSuperTable(taos, g_queryInfo.dbName, - g_queryInfo.superQueryInfo.sTblName, + g_queryInfo.superQueryInfo.stbName, &g_queryInfo.superQueryInfo.childTblName, &g_queryInfo.superQueryInfo.childTblCount); } @@ -9004,7 +10634,7 @@ static int queryTestProcess() { } } - pThreadInfo->taos = NULL;// TODO: workaround to use separate taos connection; + pThreadInfo->taos = NULL;// workaround to use separate taos connection; pthread_create(pids + seq, NULL, specifiedTableQuery, pThreadInfo); @@ -9054,7 +10684,7 @@ static int queryTestProcess() { pThreadInfo->ntables = iend_table_to = i < b ? tableFrom + a : tableFrom + a - 1; tableFrom = pThreadInfo->end_table_to + 1; - pThreadInfo->taos = NULL; // TODO: workaround to use separate taos connection; + pThreadInfo->taos = NULL; // workaround to use separate taos connection; pthread_create(pidsOfSub + i, NULL, superTableQuery, pThreadInfo); } @@ -9081,7 +10711,7 @@ static int queryTestProcess() { tmfree((char*)pidsOfSub); tmfree((char*)infosOfSub); - // taos_close(taos);// TODO: workaround to use separate taos connection; + // taos_close(taos);// workaround to use separate taos connection; uint64_t endTs = taosGetTimestampMs(); uint64_t totalQueried = g_queryInfo.specifiedQueryInfo.totalQueried + @@ -9103,7 +10733,7 @@ static void stable_sub_callback( if (param) fetchResult(res, (threadInfo *)param); - // tao_unscribe() will free result. + // tao_unsubscribe() will free result. } static void specified_sub_callback( @@ -9116,7 +10746,7 @@ static void specified_sub_callback( if (param) fetchResult(res, (threadInfo *)param); - // tao_unscribe() will free result. + // tao_unsubscribe() will free result. } static TAOS_SUB* subscribeImpl( @@ -9441,12 +11071,12 @@ static int subscribeTestProcess() { if (0 != g_queryInfo.superQueryInfo.sqlCount) { getAllChildNameOfSuperTable(taos, g_queryInfo.dbName, - g_queryInfo.superQueryInfo.sTblName, + g_queryInfo.superQueryInfo.stbName, &g_queryInfo.superQueryInfo.childTblName, &g_queryInfo.superQueryInfo.childTblCount); } - taos_close(taos); // TODO: workaround to use separate taos connection; + taos_close(taos); // workaround to use separate taos connection; pthread_t *pids = NULL; threadInfo *infos = NULL; @@ -9456,12 +11086,12 @@ static int subscribeTestProcess() { //==== create threads for query for specified table if (g_queryInfo.specifiedQueryInfo.sqlCount <= 0) { - debugPrint("%s() LN%d, sepcified query sqlCount %d.\n", + debugPrint("%s() LN%d, specified query sqlCount %d.\n", __func__, __LINE__, g_queryInfo.specifiedQueryInfo.sqlCount); } else { if (g_queryInfo.specifiedQueryInfo.concurrent <= 0) { - errorPrint2("%s() LN%d, sepcified query sqlCount %d.\n", + errorPrint2("%s() LN%d, specified query sqlCount %d.\n", __func__, __LINE__, g_queryInfo.specifiedQueryInfo.sqlCount); exit(EXIT_FAILURE); @@ -9488,7 +11118,7 @@ static int subscribeTestProcess() { threadInfo *pThreadInfo = infos + seq; pThreadInfo->threadID = seq; pThreadInfo->querySeq = i; - pThreadInfo->taos = NULL; // TODO: workaround to use separate taos connection; + pThreadInfo->taos = NULL; // workaround to use separate taos connection; pthread_create(pids + seq, NULL, specifiedSubscribe, pThreadInfo); } } @@ -9545,7 +11175,7 @@ static int subscribeTestProcess() { pThreadInfo->ntables = jend_table_to = jend_table_to + 1; - pThreadInfo->taos = NULL; // TODO: workaround to use separate taos connection; + pThreadInfo->taos = NULL; // workaround to use separate taos connection; pthread_create(pidsOfStable + seq, NULL, superSubscribe, pThreadInfo); } @@ -9618,8 +11248,8 @@ static void setParaFromArg() { g_Dbs.port = g_args.port; } - g_Dbs.threadCount = g_args.num_of_threads; - g_Dbs.threadCountForCreateTbl = g_args.num_of_threads; + g_Dbs.threadCount = g_args.nthreads; + g_Dbs.threadCountForCreateTbl = g_args.nthreads; g_Dbs.dbCount = 1; g_Dbs.db[0].drop = true; @@ -9631,27 +11261,27 @@ static void setParaFromArg() { tstrncpy(g_Dbs.resultFile, g_args.output_file, MAX_FILE_NAME_LEN); g_Dbs.use_metric = g_args.use_metric; - g_Dbs.insert_only = g_args.insert_only; - g_Dbs.do_aggreFunc = true; + g_Dbs.aggr_func = g_args.aggr_func; char dataString[TSDB_MAX_BYTES_PER_ROW]; - char **data_type = g_args.datatype; + char *data_type = g_args.data_type; + char **dataType = g_args.dataType; memset(dataString, 0, TSDB_MAX_BYTES_PER_ROW); - if (strcasecmp(data_type[0], "BINARY") == 0 - || strcasecmp(data_type[0], "BOOL") == 0 - || strcasecmp(data_type[0], "NCHAR") == 0 ) { - g_Dbs.do_aggreFunc = false; + if ((data_type[0] == TSDB_DATA_TYPE_BINARY) + || (data_type[0] == TSDB_DATA_TYPE_BOOL) + || (data_type[0] == TSDB_DATA_TYPE_NCHAR)) { + g_Dbs.aggr_func = false; } if (g_args.use_metric) { g_Dbs.db[0].superTblCount = 1; - tstrncpy(g_Dbs.db[0].superTbls[0].sTblName, "meters", TSDB_TABLE_NAME_LEN); - g_Dbs.db[0].superTbls[0].childTblCount = g_args.num_of_tables; - g_Dbs.threadCount = g_args.num_of_threads; - g_Dbs.threadCountForCreateTbl = g_args.num_of_threads; + tstrncpy(g_Dbs.db[0].superTbls[0].stbName, "meters", TSDB_TABLE_NAME_LEN); + g_Dbs.db[0].superTbls[0].childTblCount = g_args.ntables; + g_Dbs.threadCount = g_args.nthreads; + g_Dbs.threadCountForCreateTbl = g_args.nthreads; g_Dbs.asyncMode = g_args.async_mode; g_Dbs.db[0].superTbls[0].autoCreateTable = PRE_CREATE_SUBTBL; @@ -9671,26 +11301,28 @@ static void setParaFromArg() { "2017-07-14 10:40:00.000", MAX_TB_NAME_SIZE); g_Dbs.db[0].superTbls[0].timeStampStep = g_args.timestamp_step; - g_Dbs.db[0].superTbls[0].insertRows = g_args.num_of_DPT; + g_Dbs.db[0].superTbls[0].insertRows = g_args.insertRows; g_Dbs.db[0].superTbls[0].maxSqlLen = g_args.max_sql_len; g_Dbs.db[0].superTbls[0].columnCount = 0; for (int i = 0; i < MAX_NUM_COLUMNS; i++) { - if (data_type[i] == NULL) { + if (data_type[i] == TSDB_DATA_TYPE_NULL) { break; } + g_Dbs.db[0].superTbls[0].columns[i].data_type = data_type[i]; tstrncpy(g_Dbs.db[0].superTbls[0].columns[i].dataType, - data_type[i], min(DATATYPE_BUFF_LEN, strlen(data_type[i]) + 1)); + dataType[i], min(DATATYPE_BUFF_LEN, strlen(dataType[i]) + 1)); g_Dbs.db[0].superTbls[0].columns[i].dataLen = g_args.binwidth; g_Dbs.db[0].superTbls[0].columnCount++; } - if (g_Dbs.db[0].superTbls[0].columnCount > g_args.num_of_CPR) { - g_Dbs.db[0].superTbls[0].columnCount = g_args.num_of_CPR; + if (g_Dbs.db[0].superTbls[0].columnCount > g_args.columnCount) { + g_Dbs.db[0].superTbls[0].columnCount = g_args.columnCount; } else { for (int i = g_Dbs.db[0].superTbls[0].columnCount; - i < g_args.num_of_CPR; i++) { + i < g_args.columnCount; i++) { + g_Dbs.db[0].superTbls[0].columns[i].data_type = TSDB_DATA_TYPE_INT; tstrncpy(g_Dbs.db[0].superTbls[0].columns[i].dataType, "INT", min(DATATYPE_BUFF_LEN, strlen("INT") + 1)); g_Dbs.db[0].superTbls[0].columns[i].dataLen = 0; @@ -9707,7 +11339,7 @@ static void setParaFromArg() { g_Dbs.db[0].superTbls[0].tags[1].dataLen = g_args.binwidth; g_Dbs.db[0].superTbls[0].tagCount = 2; } else { - g_Dbs.threadCountForCreateTbl = g_args.num_of_threads; + g_Dbs.threadCountForCreateTbl = g_args.nthreads; g_Dbs.db[0].superTbls[0].tagCount = 0; } } @@ -9823,7 +11455,7 @@ static void testMetaFile() { } } -static void queryResult() { +static void queryAggrFunc() { // query data pthread_t read_id; @@ -9832,7 +11464,6 @@ static void queryResult() { pThreadInfo->start_time = DEFAULT_START_TIME; // 2017-07-14 10:40:00.000 pThreadInfo->start_table_from = 0; - //pThreadInfo->do_aggreFunc = g_Dbs.do_aggreFunc; if (g_args.use_metric) { pThreadInfo->ntables = g_Dbs.db[0].superTbls[0].childTblCount; pThreadInfo->end_table_to = g_Dbs.db[0].superTbls[0].childTblCount - 1; @@ -9840,8 +11471,8 @@ static void queryResult() { tstrncpy(pThreadInfo->tb_prefix, g_Dbs.db[0].superTbls[0].childTblPrefix, TBNAME_PREFIX_LEN); } else { - pThreadInfo->ntables = g_args.num_of_tables; - pThreadInfo->end_table_to = g_args.num_of_tables -1; + pThreadInfo->ntables = g_args.ntables; + pThreadInfo->end_table_to = g_args.ntables -1; tstrncpy(pThreadInfo->tb_prefix, g_args.tb_prefix, TSDB_TABLE_NAME_LEN); } @@ -9861,9 +11492,9 @@ static void queryResult() { tstrncpy(pThreadInfo->filePath, g_Dbs.resultFile, MAX_FILE_NAME_LEN); if (!g_Dbs.use_metric) { - pthread_create(&read_id, NULL, readTable, pThreadInfo); + pthread_create(&read_id, NULL, queryNtableAggrFunc, pThreadInfo); } else { - pthread_create(&read_id, NULL, readMetric, pThreadInfo); + pthread_create(&read_id, NULL, queryStableAggrFunc, pThreadInfo); } pthread_join(read_id, NULL); taos_close(pThreadInfo->taos); @@ -9885,8 +11516,9 @@ static void testCmdLine() { g_args.test_mode = INSERT_TEST; insertTestProcess(); - if (false == g_Dbs.insert_only) - queryResult(); + if (g_Dbs.aggr_func) { + queryAggrFunc(); + } } int main(int argc, char *argv[]) { diff --git a/src/kit/taosdump/CMakeLists.txt b/src/kit/taosdump/CMakeLists.txt index 51f4748eab462c8e883e83cd5923f38dd7fb9b5a..c3c914e96fc096f59aa701d3496455c754356aa8 100644 --- a/src/kit/taosdump/CMakeLists.txt +++ b/src/kit/taosdump/CMakeLists.txt @@ -9,9 +9,9 @@ AUX_SOURCE_DIRECTORY(. SRC) IF (TD_LINUX) ADD_EXECUTABLE(taosdump ${SRC}) IF (TD_SOMODE_STATIC) - TARGET_LINK_LIBRARIES(taosdump taos_static) + TARGET_LINK_LIBRARIES(taosdump taos_static cJson) ELSE () - TARGET_LINK_LIBRARIES(taosdump taos) + TARGET_LINK_LIBRARIES(taosdump taos cJson) ENDIF () ENDIF () @@ -19,8 +19,8 @@ IF (TD_DARWIN) # missing for macosx # ADD_EXECUTABLE(taosdump ${SRC}) # IF (TD_SOMODE_STATIC) - # TARGET_LINK_LIBRARIES(taosdump taos_static) + # TARGET_LINK_LIBRARIES(taosdump taos_static cJson) # ELSE () - # TARGET_LINK_LIBRARIES(taosdump taos) + # TARGET_LINK_LIBRARIES(taosdump taos cJson) # ENDIF () ENDIF () diff --git a/src/kit/taosdump/taosdump.c b/src/kit/taosdump/taosdump.c index ae2193a82eb447f0e948abc1757c21cab46ccf34..ef9e584978f12e636bccca28689062388ffd595c 100644 --- a/src/kit/taosdump/taosdump.c +++ b/src/kit/taosdump/taosdump.c @@ -181,6 +181,7 @@ typedef struct { int32_t threadIndex; int32_t totalThreads; char dbName[TSDB_DB_NAME_LEN]; + int precision; void *taosCon; int64_t rowsOfDumpOut; int64_t tablesOfDumpOut; @@ -246,11 +247,6 @@ static struct argp_option options[] = { {"avro", 'v', 0, 0, "Dump apache avro format data file. By default, dump sql command sequence.", 2}, {"start-time", 'S', "START_TIME", 0, "Start time to dump. Either epoch or ISO8601/RFC3339 format is acceptable. ISO8601 format example: 2017-10-01T00:00:00.000+0800 or 2017-10-0100:00:00:000+0800 or '2017-10-01 00:00:00.000+0800'", 4}, {"end-time", 'E', "END_TIME", 0, "End time to dump. Either epoch or ISO8601/RFC3339 format is acceptable. ISO8601 format example: 2017-10-01T00:00:00.000+0800 or 2017-10-0100:00:00.000+0800 or '2017-10-01 00:00:00.000+0800'", 5}, -#if TSDB_SUPPORT_NANOSECOND == 1 - {"precision", 'C', "PRECISION", 0, "Specify precision for converting human-readable time to epoch. Valid value is one of ms, us, and ns. Default is ms.", 6}, -#else - {"precision", 'C', "PRECISION", 0, "Use specified precision to convert human-readable time. Valid value is one of ms and us. Default is ms.", 6}, -#endif {"data-batch", 'B', "DATA_BATCH", 0, "Number of data point per insert statement. Max value is 32766. Default is 1.", 3}, {"max-sql-len", 'L', "SQL_LEN", 0, "Max length of one sql. Default is 65480.", 3}, {"table-batch", 't', "TABLE_BATCH", 0, "Number of table dumpout into one output file. Default is 1.", 3}, @@ -281,8 +277,11 @@ typedef struct arguments { bool with_property; bool avro; int64_t start_time; + char humanStartTime[28]; int64_t end_time; + char humanEndTime[28]; char precision[8]; + int32_t data_batch; int32_t max_sql_len; int32_t table_batch; // num of table which will be dump into one output file. @@ -296,6 +295,8 @@ typedef struct arguments { bool debug_print; bool verbose_print; bool performance_print; + + int dbCount; } SArguments; /* Our argp parser. */ @@ -318,13 +319,17 @@ static void taosDumpCreateTableClause(STableDef *tableDes, int numOfCols, static void taosDumpCreateMTableClause(STableDef *tableDes, char *metric, int numOfCols, FILE *fp, char* dbName); static int32_t taosDumpTable(char *tbName, char *metric, - FILE *fp, TAOS* taosCon, char* dbName); + FILE *fp, TAOS* taosCon, char* dbName, int precision); static int taosDumpTableData(FILE *fp, char *tbName, TAOS* taosCon, char* dbName, + int precision, char *jsonAvroSchema); static int taosCheckParam(struct arguments *arguments); static void taosFreeDbInfos(); -static void taosStartDumpOutWorkThreads(int32_t numOfThread, char *dbName); +static void taosStartDumpOutWorkThreads( + int32_t numOfThread, + char *dbName, + int precision); struct arguments g_args = { // connection option @@ -349,8 +354,10 @@ struct arguments g_args = { false, // schemeonly true, // with_property false, // avro format - -INT64_MAX, // start_time + -INT64_MAX + 1, // start_time + {0}, // humanStartTime INT64_MAX, // end_time + {0}, // humanEndTime "ms", // precision 1, // data_batch TSDB_MAX_SQL_LEN, // max_sql_len @@ -364,7 +371,8 @@ struct arguments g_args = { false, // isDumpIn false, // debug_print false, // verbose_print - false // performance_print + false, // performance_print + 0, // dbCount }; static void errorPrintReqArg2(char *program, char *wrong_arg) @@ -472,12 +480,8 @@ static error_t parse_opt(int key, char *arg, struct argp_state *state) { break; case 'S': // parse time here. - g_args.start_time = atol(arg); break; case 'E': - g_args.end_time = atol(arg); - break; - case 'C': break; case 'B': g_args.data_batch = atoi(arg); @@ -550,7 +554,7 @@ static int queryDbImpl(TAOS *taos, char *command) { return 0; } -static void parse_precision_first( +UNUSED_FUNC static void parse_precision_first( int argc, char *argv[], SArguments *arguments) { for (int i = 1; i < argc; i++) { if (strcmp(argv[i], "-C") == 0) { @@ -616,6 +620,73 @@ static void parse_args( } } +static void copyHumanTimeToArg(char *timeStr, bool isStartTime) +{ + if (isStartTime) + strcpy(g_args.humanStartTime, timeStr); + else + strcpy(g_args.humanEndTime, timeStr); +} + +static void copyTimestampToArg(char *timeStr, bool isStartTime) +{ + if (isStartTime) + g_args.start_time = atol(timeStr); + else + g_args.end_time = atol(timeStr); +} + +static void parse_timestamp( + int argc, char *argv[], SArguments *arguments) { + for (int i = 1; i < argc; i++) { + char *tmp; + bool isStartTime = false; + bool isEndTime = false; + + if (strcmp(argv[i], "-S") == 0) { + isStartTime = true; + } else if (strcmp(argv[i], "-E") == 0) { + isEndTime = true; + } + + if (isStartTime || isEndTime) { + if (NULL == argv[i+1]) { + errorPrint("%s need a valid value following!\n", argv[i]); + exit(-1); + } + tmp = strdup(argv[i+1]); + + if (strchr(tmp, ':') && strchr(tmp, '-')) { + copyHumanTimeToArg(tmp, isStartTime); + } else { + copyTimestampToArg(tmp, isStartTime); + } + } + } +} + +static int getPrecisionByString(char *precision) +{ + if (0 == strncasecmp(precision, + "ms", 2)) { + return TSDB_TIME_PRECISION_MILLI; + } else if (0 == strncasecmp(precision, + "us", 2)) { + return TSDB_TIME_PRECISION_MICRO; +#if TSDB_SUPPORT_NANOSECOND == 1 + } else if (0 == strncasecmp(precision, + "ns", 2)) { + return TSDB_TIME_PRECISION_NANO; +#endif + } else { + errorPrint("Invalid time precision: %s", + precision); + } + + return -1; +} + +/* static void parse_timestamp( int argc, char *argv[], SArguments *arguments) { for (int i = 1; i < argc; i++) { @@ -634,6 +705,7 @@ static void parse_timestamp( int64_t tmpEpoch; if (strchr(tmp, ':') && strchr(tmp, '-')) { + strcpy(g_args.humanStartTime, tmp) int32_t timePrec; if (0 == strncasecmp(arguments->precision, "ms", strlen("ms"))) { @@ -672,6 +744,7 @@ static void parse_timestamp( } } } +*/ int main(int argc, char *argv[]) { static char verType[32] = {0}; @@ -682,7 +755,7 @@ int main(int argc, char *argv[]) { /* Parse our arguments; every option seen by parse_opt will be reflected in arguments. */ if (argc > 1) { - parse_precision_first(argc, argv, &g_args); +// parse_precision_first(argc, argv, &g_args); parse_timestamp(argc, argv, &g_args); parse_args(argc, argv, &g_args); } @@ -714,7 +787,9 @@ int main(int argc, char *argv[]) { printf("with_property: %s\n", g_args.with_property?"true":"false"); printf("avro format: %s\n", g_args.avro?"true":"false"); printf("start_time: %" PRId64 "\n", g_args.start_time); + printf("human readable start time: %s \n", g_args.humanStartTime); printf("end_time: %" PRId64 "\n", g_args.end_time); + printf("human readable end time: %s \n", g_args.humanEndTime); printf("precision: %s\n", g_args.precision); printf("data_batch: %d\n", g_args.data_batch); printf("max_sql_len: %d\n", g_args.max_sql_len); @@ -759,7 +834,9 @@ int main(int argc, char *argv[]) { fprintf(g_fpOfResult, "with_property: %s\n", g_args.with_property?"true":"false"); fprintf(g_fpOfResult, "avro format: %s\n", g_args.avro?"true":"false"); fprintf(g_fpOfResult, "start_time: %" PRId64 "\n", g_args.start_time); + fprintf(g_fpOfResult, "human readable start time: %s \n", g_args.humanStartTime); fprintf(g_fpOfResult, "end_time: %" PRId64 "\n", g_args.end_time); + fprintf(g_fpOfResult, "human readable end time: %s \n", g_args.humanEndTime); fprintf(g_fpOfResult, "precision: %s\n", g_args.precision); fprintf(g_fpOfResult, "data_batch: %d\n", g_args.data_batch); fprintf(g_fpOfResult, "max_sql_len: %d\n", g_args.max_sql_len); @@ -816,7 +893,8 @@ int main(int argc, char *argv[]) { static void taosFreeDbInfos() { if (g_dbInfos == NULL) return; - for (int i = 0; i < 128; i++) tfree(g_dbInfos[i]); + for (int i = 0; i < g_args.dbCount; i++) + tfree(g_dbInfos[i]); tfree(g_dbInfos); } @@ -1046,6 +1124,88 @@ static int32_t taosSaveTableOfMetricToTempFile( return 0; } +static int getDbCount() +{ + int count; + + TAOS *taos = NULL; + TAOS_RES *result = NULL; + char *command = NULL; + TAOS_ROW row; + + command = (char *)malloc(COMMAND_SIZE); + if (command == NULL) { + errorPrint("%s() LN%d, failed to allocate command buffer\n", __func__, __LINE__); + return 0; + } + + /* Connect to server */ + taos = taos_connect(g_args.host, g_args.user, g_args.password, + NULL, g_args.port); + if (NULL == taos) { + errorPrint("Failed to connect to TDengine server %s\n", g_args.host); + free(command); + return 0; + } + + sprintf(command, "show databases"); + result = taos_query(taos, command); + int32_t code = taos_errno(result); + + if (0 != code) { + errorPrint("%s() LN%d, failed to run command: %s, reason: %s\n", + __func__, __LINE__, command, taos_errstr(result)); + free(command); + return 0; + } + + TAOS_FIELD *fields = taos_fetch_fields(result); + + while ((row = taos_fetch_row(result)) != NULL) { + // sys database name : 'log', but subsequent version changed to 'log' + if ((strncasecmp(row[TSDB_SHOW_DB_NAME_INDEX], "log", + fields[TSDB_SHOW_DB_NAME_INDEX].bytes) == 0) + && (!g_args.allow_sys)) { + continue; + } + + if (g_args.databases) { // input multi dbs + for (int i = 0; g_args.arg_list[i]; i++) { + if (strncasecmp(g_args.arg_list[i], + (char *)row[TSDB_SHOW_DB_NAME_INDEX], + fields[TSDB_SHOW_DB_NAME_INDEX].bytes) == 0) + goto _dump_db_point; + } + continue; + } else if (!g_args.all_databases) { // only input one db + if (strncasecmp(g_args.arg_list[0], + (char *)row[TSDB_SHOW_DB_NAME_INDEX], + fields[TSDB_SHOW_DB_NAME_INDEX].bytes) == 0) + goto _dump_db_point; + else + continue; + } + +_dump_db_point: + + count++; + + if (g_args.databases) { + if (count > g_args.arg_list_len) break; + + } else if (!g_args.all_databases) { + if (count >= 1) break; + } + } + + if (count == 0) { + errorPrint("%d databases valid to dump\n", count); + } + + free(command); + return count; +} + static int taosDumpOut() { TAOS *taos = NULL; TAOS_RES *result = NULL; @@ -1070,7 +1230,14 @@ static int taosDumpOut() { return -1; } - g_dbInfos = (SDbInfo **)calloc(128, sizeof(SDbInfo *)); + g_args.dbCount = getDbCount(); + + if (0 == g_args.dbCount) { + errorPrint("%d databases valid to dump\n", g_args.dbCount); + return -1; + } + + g_dbInfos = (SDbInfo **)calloc(g_args.dbCount, sizeof(SDbInfo *)); if (g_dbInfos == NULL) { errorPrint("%s() LN%d, failed to allocate memory\n", __func__, __LINE__); @@ -1165,9 +1332,9 @@ _dump_db_point: g_dbInfos[count]->comp = (int8_t)(*((int8_t *)row[TSDB_SHOW_DB_COMP_INDEX])); g_dbInfos[count]->cachelast = (int8_t)(*((int8_t *)row[TSDB_SHOW_DB_CACHELAST_INDEX])); - tstrncpy(g_dbInfos[count]->precision, (char *)row[TSDB_SHOW_DB_PRECISION_INDEX], - min(8, fields[TSDB_SHOW_DB_PRECISION_INDEX].bytes + 1)); - //g_dbInfos[count]->precision = *((int8_t *)row[TSDB_SHOW_DB_PRECISION_INDEX]); + tstrncpy(g_dbInfos[count]->precision, + (char *)row[TSDB_SHOW_DB_PRECISION_INDEX], + DB_PRECISION_LEN); g_dbInfos[count]->update = *((int8_t *)row[TSDB_SHOW_DB_UPDATE_INDEX]); } count++; @@ -1209,14 +1376,14 @@ _dump_db_point: fprintf(fp, "USE %s;\n\n", g_dbInfos[0]->name); - int32_t totalNumOfThread = 1; // 0: all normal talbe into .tables.tmp.0 + int32_t totalNumOfThread = 1; // 0: all normal table into .tables.tmp.0 int normalTblFd = -1; int32_t retCode; int superTblCnt = 0 ; for (int i = 1; g_args.arg_list[i]; i++) { if (taosGetTableRecordInfo(g_args.arg_list[i], &tableRecordInfo, taos) < 0) { - errorPrint("input the invalide table %s\n", + errorPrint("input the invalid table %s\n", g_args.arg_list[i]); continue; } @@ -1263,8 +1430,10 @@ _dump_db_point: } // start multi threads to dumpout + taosStartDumpOutWorkThreads(totalNumOfThread, - g_dbInfos[0]->name); + g_dbInfos[0]->name, + getPrecisionByString(g_dbInfos[0]->precision)); char tmpFileName[MAX_FILE_NAME_LEN]; _clean_tmp_file: @@ -1341,11 +1510,10 @@ static int taosGetTableDes( return count; } - // if chidl-table have tag, using select tagName from table to get tagValue + // if child-table have tag, using select tagName from table to get tagValue for (int i = 0 ; i < count; i++) { if (strcmp(stableDes->cols[i].note, "TAG") != 0) continue; - sprintf(sqlstr, "select %s from %s.%s", stableDes->cols[i].field, dbName, table); @@ -1454,7 +1622,7 @@ static int convertSchemaToAvroSchema(STableDef *stableDes, char **avroSchema) static int32_t taosDumpTable( char *tbName, char *metric, - FILE *fp, TAOS* taosCon, char* dbName) { + FILE *fp, TAOS* taosCon, char* dbName, int precision) { int count = 0; STableDef *tableDes = (STableDef *)calloc(1, sizeof(STableDef) @@ -1505,7 +1673,7 @@ static int32_t taosDumpTable( int32_t ret = 0; if (!g_args.schemaonly) { - ret = taosDumpTableData(fp, tbName, taosCon, dbName, + ret = taosDumpTableData(fp, tbName, taosCon, dbName, precision, jsonAvroSchema); } @@ -1596,7 +1764,8 @@ static void* taosDumpOutWorkThreadFp(void *arg) int ret = taosDumpTable( tableRecord.name, tableRecord.metric, - fp, pThread->taosCon, pThread->dbName); + fp, pThread->taosCon, pThread->dbName, + pThread->precision); if (ret >= 0) { // TODO: sum table count and table rows by self pThread->tablesOfDumpOut++; @@ -1645,7 +1814,7 @@ static void* taosDumpOutWorkThreadFp(void *arg) return NULL; } -static void taosStartDumpOutWorkThreads(int32_t numOfThread, char *dbName) +static void taosStartDumpOutWorkThreads(int32_t numOfThread, char *dbName, int precision) { pthread_attr_t thattr; SThreadParaObj *threadObj = @@ -1664,6 +1833,7 @@ static void taosStartDumpOutWorkThreads(int32_t numOfThread, char *dbName) pThread->threadIndex = t; pThread->totalThreads = numOfThread; tstrncpy(pThread->dbName, dbName, TSDB_DB_NAME_LEN); + pThread->precision = precision; pThread->taosCon = taos_connect(g_args.host, g_args.user, g_args.password, NULL, g_args.port); if (pThread->taosCon == NULL) { @@ -1913,7 +2083,8 @@ static int taosDumpDb(SDbInfo *dbInfo, FILE *fp, TAOS *taosCon) { } // start multi threads to dumpout - taosStartDumpOutWorkThreads(numOfThread, dbInfo->name); + taosStartDumpOutWorkThreads(numOfThread, dbInfo->name, + getPrecisionByString(dbInfo->precision)); for (int loopCnt = 0; loopCnt < numOfThread; loopCnt++) { sprintf(tmpBuf, ".tables.tmp.%d", loopCnt); (void)remove(tmpBuf); @@ -2191,14 +2362,38 @@ static int64_t writeResultToSql(TAOS_RES *res, FILE *fp, char *dbName, char *tbN } static int taosDumpTableData(FILE *fp, char *tbName, - TAOS* taosCon, char* dbName, + TAOS* taosCon, char* dbName, int precision, char *jsonAvroSchema) { int64_t totalRows = 0; char sqlstr[1024] = {0}; + + int64_t start_time, end_time; + if (strlen(g_args.humanStartTime)) { + if (TSDB_CODE_SUCCESS != taosParseTime( + g_args.humanStartTime, &start_time, strlen(g_args.humanStartTime), + precision, 0)) { + errorPrint("Input %s, time format error!\n", g_args.humanStartTime); + return -1; + } + } else { + start_time = g_args.start_time; + } + + if (strlen(g_args.humanEndTime)) { + if (TSDB_CODE_SUCCESS != taosParseTime( + g_args.humanEndTime, &end_time, strlen(g_args.humanEndTime), + precision, 0)) { + errorPrint("Input %s, time format error!\n", g_args.humanEndTime); + return -1; + } + } else { + end_time = g_args.end_time; + } + sprintf(sqlstr, "select * from %s.%s where _c0 >= %" PRId64 " and _c0 <= %" PRId64 " order by _c0 asc;", - dbName, tbName, g_args.start_time, g_args.end_time); + dbName, tbName, start_time, end_time); TAOS_RES* res = taos_query(taosCon, sqlstr); int32_t code = taos_errno(res); @@ -2443,7 +2638,7 @@ static int taosGetFilesNum(const char *directoryName, } if (fileNum <= 0) { - errorPrint("directory:%s is empry\n", directoryName); + errorPrint("directory:%s is empty\n", directoryName); exit(-1); } @@ -2620,9 +2815,9 @@ static int taosDumpInOneFile(TAOS* taos, FILE* fp, char* fcharset, memcpy(cmd + cmd_len, line, read_len); cmd[read_len + cmd_len]= '\0'; if (queryDbImpl(taos, cmd)) { - errorPrint("%s() LN%d, error sql: linenu:%d, file:%s\n", + errorPrint("%s() LN%d, error sql: lineno:%d, file:%s\n", __func__, __LINE__, lineNo, fileName); - fprintf(g_fpOfResult, "error sql: linenu:%d, file:%s\n", lineNo, fileName); + fprintf(g_fpOfResult, "error sql: lineno:%d, file:%s\n", lineNo, fileName); } memset(cmd, 0, TSDB_MAX_ALLOWED_SQL_LEN); diff --git a/src/mnode/src/mnodeTable.c b/src/mnode/src/mnodeTable.c index 68529ab8a240c2313ae9417bef9f4112759b0c9f..a6158906a7cc77b57244594fe51881e5df0b68c8 100644 --- a/src/mnode/src/mnodeTable.c +++ b/src/mnode/src/mnodeTable.c @@ -1231,7 +1231,9 @@ static int32_t mnodeAddSuperTableTagCb(SMnodeMsg *pMsg, int32_t code) { SSTableObj *pStable = (SSTableObj *)pMsg->pTable; mLInfo("msg:%p, app:%p stable %s, add tag result:%s, numOfTags:%d", pMsg, pMsg->rpcMsg.ahandle, pStable->info.tableId, tstrerror(code), pStable->numOfTags); - + if (code == TSDB_CODE_SUCCESS) { + code = mnodeGetSuperTableMeta(pMsg); + } return code; } @@ -1287,6 +1289,9 @@ static int32_t mnodeDropSuperTableTagCb(SMnodeMsg *pMsg, int32_t code) { SSTableObj *pStable = (SSTableObj *)pMsg->pTable; mLInfo("msg:%p, app:%p stable %s, drop tag result:%s", pMsg, pMsg->rpcMsg.ahandle, pStable->info.tableId, tstrerror(code)); + if (code == TSDB_CODE_SUCCESS) { + code = mnodeGetSuperTableMeta(pMsg); + } return code; } @@ -1321,6 +1326,9 @@ static int32_t mnodeModifySuperTableTagNameCb(SMnodeMsg *pMsg, int32_t code) { SSTableObj *pStable = (SSTableObj *)pMsg->pTable; mLInfo("msg:%p, app:%p stable %s, modify tag result:%s", pMsg, pMsg->rpcMsg.ahandle, pStable->info.tableId, tstrerror(code)); + if (code == TSDB_CODE_SUCCESS) { + code = mnodeGetSuperTableMeta(pMsg); + } return code; } @@ -1376,6 +1384,9 @@ static int32_t mnodeAddSuperTableColumnCb(SMnodeMsg *pMsg, int32_t code) { SSTableObj *pStable = (SSTableObj *)pMsg->pTable; mLInfo("msg:%p, app:%p stable %s, add column result:%s", pMsg, pMsg->rpcMsg.ahandle, pStable->info.tableId, tstrerror(code)); + if (code == TSDB_CODE_SUCCESS) { + code = mnodeGetSuperTableMeta(pMsg); + } return code; } @@ -1444,6 +1455,9 @@ static int32_t mnodeDropSuperTableColumnCb(SMnodeMsg *pMsg, int32_t code) { SSTableObj *pStable = (SSTableObj *)pMsg->pTable; mLInfo("msg:%p, app:%p stable %s, delete column result:%s", pMsg, pMsg->rpcMsg.ahandle, pStable->info.tableId, tstrerror(code)); + if (code == TSDB_CODE_SUCCESS) { + code = mnodeGetSuperTableMeta(pMsg); + } return code; } @@ -1489,6 +1503,9 @@ static int32_t mnodeChangeSuperTableColumnCb(SMnodeMsg *pMsg, int32_t code) { SSTableObj *pStable = (SSTableObj *)pMsg->pTable; mLInfo("msg:%p, app:%p stable %s, change column result:%s", pMsg, pMsg->rpcMsg.ahandle, pStable->info.tableId, tstrerror(code)); + if (code == TSDB_CODE_SUCCESS) { + code = mnodeGetSuperTableMeta(pMsg); + } return code; } diff --git a/src/os/tests/CMakeLists.txt b/src/os/tests/CMakeLists.txt index 3c477641899994bf34237e93122c3d83f0365fad..9ec5076b7201b2d5ed9b2b6eb682eea7d6a83827 100644 --- a/src/os/tests/CMakeLists.txt +++ b/src/os/tests/CMakeLists.txt @@ -17,5 +17,5 @@ IF (HEADER_GTEST_INCLUDE_DIR AND (LIB_GTEST_STATIC_DIR OR LIB_GTEST_SHARED_DIR)) AUX_SOURCE_DIRECTORY(${CMAKE_CURRENT_SOURCE_DIR} SOURCE_LIST) ADD_EXECUTABLE(osTest ${SOURCE_LIST}) - TARGET_LINK_LIBRARIES(osTest taos os tutil common gtest pthread) + TARGET_LINK_LIBRARIES(osTest taos os cJson tutil common gtest pthread) ENDIF() diff --git a/src/plugins/http/inc/httpUtil.h b/src/plugins/http/inc/httpUtil.h index 54c95b6980f8241c3ea6c8e563e0e42c7c737286..21690ebca96d35423e126a9e747d8ce6bb5a43a0 100644 --- a/src/plugins/http/inc/httpUtil.h +++ b/src/plugins/http/inc/httpUtil.h @@ -17,6 +17,7 @@ #define TDENGINE_HTTP_UTIL_H bool httpCheckUsedbSql(char *sql); +bool httpCheckAlterSql(char *sql); void httpTimeToString(int32_t t, char *buf, int32_t buflen); bool httpUrlMatch(HttpContext *pContext, int32_t pos, char *cmp); diff --git a/src/plugins/http/src/httpHandle.c b/src/plugins/http/src/httpHandle.c index d51c774ff269d5790868727941a632d133dd6733..9719d93824b50064ec1cf23677c641428434592c 100644 --- a/src/plugins/http/src/httpHandle.c +++ b/src/plugins/http/src/httpHandle.c @@ -35,6 +35,7 @@ bool httpProcessData(HttpContext* pContext) { if (!httpAlterContextState(pContext, HTTP_CONTEXT_STATE_READY, HTTP_CONTEXT_STATE_HANDLING)) { httpTrace("context:%p, fd:%d, state:%s not in ready state, stop process request", pContext, pContext->fd, httpContextStateStr(pContext->state)); + pContext->error = true; httpCloseContextByApp(pContext); return false; } diff --git a/src/plugins/http/src/httpParser.c b/src/plugins/http/src/httpParser.c index 62b1737f6fe7128ee132727b2870fca5f62b737a..7066f19769754e78dffeed6a40b672584c0310f1 100644 --- a/src/plugins/http/src/httpParser.c +++ b/src/plugins/http/src/httpParser.c @@ -1157,10 +1157,6 @@ static int32_t httpParseChar(HttpParser *parser, const char c, int32_t *again) { httpOnError(parser, HTTP_CODE_INTERNAL_SERVER_ERROR, TSDB_CODE_HTTP_PARSE_ERROR_STATE); } - if (ok != 0) { - pContext->error = true; - } - return ok; } diff --git a/src/plugins/http/src/httpResp.c b/src/plugins/http/src/httpResp.c index 79e728dd456fb8a340e50f9d7e9cbd3c409614db..1d05b455cb5c66e4f492140e1f337210da04caef 100644 --- a/src/plugins/http/src/httpResp.c +++ b/src/plugins/http/src/httpResp.c @@ -147,6 +147,8 @@ void httpSendErrorResp(HttpContext *pContext, int32_t errNo) { httpCode = pContext->parser->httpCode; } + pContext->error = true; + char *httpCodeStr = httpGetStatusDesc(httpCode); httpSendErrorRespImp(pContext, httpCode, httpCodeStr, errNo & 0XFFFF, tstrerror(errNo)); } diff --git a/src/plugins/http/src/httpRestJson.c b/src/plugins/http/src/httpRestJson.c index 47f2d4ff5bcc513aafb8ea8f4e2a85db5a35b12a..13596b0e8a4ea4d183cc4bf75917fd08a9dd7290 100644 --- a/src/plugins/http/src/httpRestJson.c +++ b/src/plugins/http/src/httpRestJson.c @@ -16,6 +16,7 @@ #define _DEFAULT_SOURCE #include "os.h" #include "tglobal.h" +#include "tsclient.h" #include "httpLog.h" #include "httpJson.h" #include "httpRestHandle.h" @@ -62,13 +63,21 @@ void restStartSqlJson(HttpContext *pContext, HttpSqlCmd *cmd, TAOS_RES *result) httpJsonItemToken(jsonBuf); httpJsonToken(jsonBuf, JsonArrStt); + SSqlObj *pObj = (SSqlObj *) result; + bool isAlterSql = (pObj->sqlstr == NULL) ? false : httpCheckAlterSql(pObj->sqlstr); + if (num_fields == 0) { httpJsonItemToken(jsonBuf); httpJsonString(jsonBuf, REST_JSON_AFFECT_ROWS, REST_JSON_AFFECT_ROWS_LEN); } else { - for (int32_t i = 0; i < num_fields; ++i) { + if (isAlterSql == true) { httpJsonItemToken(jsonBuf); - httpJsonString(jsonBuf, fields[i].name, (int32_t)strlen(fields[i].name)); + httpJsonString(jsonBuf, REST_JSON_AFFECT_ROWS, REST_JSON_AFFECT_ROWS_LEN); + } else { + for (int32_t i = 0; i < num_fields; ++i) { + httpJsonItemToken(jsonBuf); + httpJsonString(jsonBuf, fields[i].name, (int32_t)strlen(fields[i].name)); + } } } @@ -99,8 +108,14 @@ void restStartSqlJson(HttpContext *pContext, HttpSqlCmd *cmd, TAOS_RES *result) httpJsonItemToken(jsonBuf); httpJsonToken(jsonBuf, JsonArrStt); - httpJsonItemToken(jsonBuf); - httpJsonString(jsonBuf, fields[i].name, (int32_t)strlen(fields[i].name)); + if (isAlterSql == true) { + httpJsonItemToken(jsonBuf); + httpJsonString(jsonBuf, REST_JSON_AFFECT_ROWS, REST_JSON_AFFECT_ROWS_LEN); + } else { + httpJsonItemToken(jsonBuf); + httpJsonString(jsonBuf, fields[i].name, (int32_t)strlen(fields[i].name)); + } + httpJsonItemToken(jsonBuf); httpJsonInt(jsonBuf, fields[i].type); httpJsonItemToken(jsonBuf); diff --git a/src/plugins/http/src/httpServer.c b/src/plugins/http/src/httpServer.c index f02859f165499b0c69b095599dd47890e644c604..13a0835c3960333c6d12aa443025de5fb95d565e 100644 --- a/src/plugins/http/src/httpServer.c +++ b/src/plugins/http/src/httpServer.c @@ -191,8 +191,6 @@ static void httpProcessHttpData(void *param) { if (httpReadData(pContext)) { (*(pThread->processData))(pContext); atomic_fetch_add_32(&pServer->requestNum, 1); - } else { - httpReleaseContext(pContext/*, false*/); } } } @@ -402,13 +400,17 @@ static bool httpReadData(HttpContext *pContext) { } else if (nread < 0) { if (errno == EINTR || errno == EAGAIN || errno == EWOULDBLOCK) { httpDebug("context:%p, fd:%d, read from socket error:%d, wait another event", pContext, pContext->fd, errno); - return false; // later again + continue; // later again } else { httpError("context:%p, fd:%d, read from socket error:%d, close connect", pContext, pContext->fd, errno); + taosCloseSocket(pContext->fd); + httpReleaseContext(pContext/*, false */); return false; } } else { httpError("context:%p, fd:%d, nread:%d, wait another event", pContext, pContext->fd, nread); + taosCloseSocket(pContext->fd); + httpReleaseContext(pContext/*, false */); return false; } } diff --git a/src/plugins/http/src/httpSql.c b/src/plugins/http/src/httpSql.c index 0dd451f72dbd78233ac8f73d552b6815e3a3fab8..602767a6563b3ca3430501c0dbcee65333f1d44b 100644 --- a/src/plugins/http/src/httpSql.c +++ b/src/plugins/http/src/httpSql.c @@ -405,7 +405,6 @@ void httpProcessRequestCb(void *param, TAOS_RES *result, int32_t code) { if (pContext->session == NULL) { httpSendErrorResp(pContext, TSDB_CODE_HTTP_SESSION_FULL); - httpCloseContextByApp(pContext); } else { httpExecCmd(pContext); } diff --git a/src/plugins/http/src/httpUtil.c b/src/plugins/http/src/httpUtil.c index ade50bdad6bf6b0a7a2d43bb354851d90686be49..f30ac7326eef20f4abf5558b288f16f6ee313b42 100644 --- a/src/plugins/http/src/httpUtil.c +++ b/src/plugins/http/src/httpUtil.c @@ -21,6 +21,7 @@ #include "httpResp.h" #include "httpSql.h" #include "httpUtil.h" +#include "ttoken.h" bool httpCheckUsedbSql(char *sql) { if (strstr(sql, "use ") != NULL) { @@ -29,6 +30,17 @@ bool httpCheckUsedbSql(char *sql) { return false; } +bool httpCheckAlterSql(char *sql) { + int32_t index = 0; + + do { + SStrToken t0 = tStrGetToken(sql, &index, false); + if (t0.type != TK_LP) { + return t0.type == TK_ALTER; + } + } while (1); +} + void httpTimeToString(int32_t t, char *buf, int32_t buflen) { memset(buf, 0, (size_t)buflen); char ts[32] = {0}; diff --git a/src/query/inc/qAggMain.h b/src/query/inc/qAggMain.h index d4116fbfb2daec9b47c4a891c3c886728e6ca515..4f7821708c3e9b3c3d0eb975125e1ad12c5f82a4 100644 --- a/src/query/inc/qAggMain.h +++ b/src/query/inc/qAggMain.h @@ -70,14 +70,14 @@ extern "C" { #define TSDB_FUNC_DERIVATIVE 32 #define TSDB_FUNC_BLKINFO 33 - -#define TSDB_FUNC_HISTOGRAM 34 -#define TSDB_FUNC_HLL 35 -#define TSDB_FUNC_MODE 36 -#define TSDB_FUNC_SAMPLE 37 -#define TSDB_FUNC_CEIL 38 -#define TSDB_FUNC_FLOOR 39 -#define TSDB_FUNC_ROUND 40 +#define TSDB_FUNC_CEIL 34 +#define TSDB_FUNC_FLOOR 35 +#define TSDB_FUNC_ROUND 36 + +#define TSDB_FUNC_HISTOGRAM 37 +#define TSDB_FUNC_HLL 38 +#define TSDB_FUNC_MODE 39 +#define TSDB_FUNC_SAMPLE 40 #define TSDB_FUNC_MAVG 41 #define TSDB_FUNC_CSUM 42 @@ -88,6 +88,7 @@ extern "C" { #define TSDB_FUNCSTATE_OF 0x10u // outer forward #define TSDB_FUNCSTATE_NEED_TS 0x20u // timestamp is required during query processing #define TSDB_FUNCSTATE_SELECTIVITY 0x40u // selectivity functions, can exists along with tag columns +#define TSDB_FUNCSTATE_SCALAR 0x80u #define TSDB_BASE_FUNC_SO TSDB_FUNCSTATE_SO | TSDB_FUNCSTATE_STREAM | TSDB_FUNCSTATE_STABLE | TSDB_FUNCSTATE_OF #define TSDB_BASE_FUNC_MO TSDB_FUNCSTATE_MO | TSDB_FUNCSTATE_STREAM | TSDB_FUNCSTATE_STABLE | TSDB_FUNCSTATE_OF diff --git a/src/query/inc/qExecutor.h b/src/query/inc/qExecutor.h index 31db6492f69c35904970cc5f48cc4a10c9fecd39..82f4f34a57c7d6d10a021fb2e426ff83cb3604e6 100644 --- a/src/query/inc/qExecutor.h +++ b/src/query/inc/qExecutor.h @@ -86,11 +86,18 @@ typedef struct SResultRow { char *key; // start key of current result row } SResultRow; +typedef struct SResultRowCell { + uint64_t groupId; + SResultRow *pRow; +} SResultRowCell; + typedef struct SGroupResInfo { int32_t totalGroup; int32_t currentGroup; int32_t index; SArray* pRows; // SArray + bool ordered; + int32_t position; } SGroupResInfo; /** @@ -257,7 +264,7 @@ typedef struct SQueryAttr { SOrderedPrjQueryInfo prjInfo; // limit value for each vgroup, only available in global order projection query. SSingleColumnFilterInfo* pFilterInfo; - SFilterInfo *pFilters; + void *pFilters; void* tsdb; SMemRef memRef; @@ -284,8 +291,9 @@ typedef struct SQueryRuntimeEnv { SDiskbasedResultBuf* pResultBuf; // query result buffer based on blocked-wised disk file SHashObj* pResultRowHashTable; // quick locate the window object for each result SHashObj* pResultRowListSet; // used to check if current ResultRowInfo has ResultRow object or not + SArray* pResultRowArrayList; // The array list that contains the Result rows char* keyBuf; // window key buffer - SResultRowPool* pool; // window result object pool + SResultRowPool* pool; // The window result objects pool, all the resultRow Objects are allocated and managed by this object. char** prevRow; SArray* prevResult; // intermediate result, SArray @@ -391,7 +399,6 @@ typedef struct SQueryParam { char *sql; char *tagCond; char *colCond; - char *tbnameCond; char *prevResult; SArray *pTableIdList; SSqlExpr **pExpr; @@ -399,7 +406,7 @@ typedef struct SQueryParam { SExprInfo *pExprs; SExprInfo *pSecExprs; - SFilterInfo *pFilters; + void *pFilters; SColIndex *pGroupColIndex; SColumnInfo *pTagColumnInfo; @@ -409,6 +416,11 @@ typedef struct SQueryParam { SUdfInfo *pUdfInfo; } SQueryParam; +typedef struct SColumnDataParam{ + int32_t numOfCols; + SArray* pDataBlock; +} SColumnDataParam; + typedef struct STableScanInfo { void *pQueryHandle; int32_t numOfBlocks; @@ -632,11 +644,11 @@ int32_t createQueryFunc(SQueriedTableInfo* pTableInfo, int32_t numOfOutput, SExp int32_t createIndirectQueryFuncExprFromMsg(SQueryTableMsg *pQueryMsg, int32_t numOfOutput, SExprInfo **pExprInfo, SSqlExpr **pExpr, SExprInfo *prevExpr, SUdfInfo *pUdfInfo); -int32_t createQueryFilter(char *data, uint16_t len, SFilterInfo** pFilters); +int32_t createQueryFilter(char *data, uint16_t len, void** pFilters); SGroupbyExpr *createGroupbyExprFromMsg(SQueryTableMsg *pQueryMsg, SColIndex *pColIndex, int32_t *code); SQInfo *createQInfoImpl(SQueryTableMsg *pQueryMsg, SGroupbyExpr *pGroupbyExpr, SExprInfo *pExprs, - SExprInfo *pSecExprs, STableGroupInfo *pTableGroupInfo, SColumnInfo* pTagCols, SFilterInfo* pFilters, int32_t vgId, char* sql, uint64_t qId, SUdfInfo* pUdfInfo); + SExprInfo *pSecExprs, STableGroupInfo *pTableGroupInfo, SColumnInfo* pTagCols, void* pFilters, int32_t vgId, char* sql, uint64_t qId, SUdfInfo* pUdfInfo); int32_t initQInfo(STsBufInfo* pTsBufInfo, void* tsdb, void* sourceOptr, SQInfo* pQInfo, SQueryParam* param, char* start, int32_t prevResultLen, void* merger); @@ -676,5 +688,6 @@ void freeQueryAttr(SQueryAttr *pQuery); int32_t getMaximumIdleDurationSec(); void doInvokeUdf(SUdfInfo* pUdfInfo, SQLFunctionCtx *pCtx, int32_t idx, int32_t type); +int32_t getColumnDataFromId(void *param, int32_t id, void **data); #endif // TDENGINE_QEXECUTOR_H diff --git a/src/query/inc/qFilter.h b/src/query/inc/qFilter.h index af45b816f9e6725579403069843295895cf57cc8..c34a56cc1cd6e135947eee897f87d060880f15c7 100644 --- a/src/query/inc/qFilter.h +++ b/src/query/inc/qFilter.h @@ -83,6 +83,12 @@ enum { RANGE_TYPE_MR_CTX = 3, }; +enum { + FI_ACTION_NO_NEED = 1, + FI_ACTION_CONTINUE, + FI_ACTION_STOP, +}; + typedef struct OptrStr { uint16_t optr; char *str; @@ -106,6 +112,7 @@ typedef struct SFilterColRange { typedef bool (*rangeCompFunc) (const void *, const void *, const void *, const void *, __compar_fn_t); typedef int32_t(*filter_desc_compare_func)(const void *, const void *); typedef bool(*filter_exec_func)(void *, int32_t, int8_t**, SDataStatis *, int16_t); +typedef int32_t (*filer_get_col_from_id)(void *, int32_t, void **); typedef struct SFilterRangeCompare { int64_t s; @@ -323,14 +330,16 @@ typedef struct SFilterInfo { #define FILTER_EMPTY_RES(i) FILTER_GET_FLAG((i)->status, FI_STATUS_EMPTY) -extern int32_t filterInitFromTree(tExprNode* tree, SFilterInfo **pinfo, uint32_t options); +extern int32_t filterInitFromTree(tExprNode* tree, void **pinfo, uint32_t options); extern bool filterExecute(SFilterInfo *info, int32_t numOfRows, int8_t** p, SDataStatis *statis, int16_t numOfCols); -extern int32_t filterSetColFieldData(SFilterInfo *info, int32_t numOfCols, SArray* pDataBlock); +extern int32_t filterSetColFieldData(SFilterInfo *info, void *param, filer_get_col_from_id fp); extern int32_t filterGetTimeRange(SFilterInfo *info, STimeWindow *win); extern int32_t filterConverNcharColumns(SFilterInfo* pFilterInfo, int32_t rows, bool *gotNchar); extern int32_t filterFreeNcharColumns(SFilterInfo* pFilterInfo); extern void filterFreeInfo(SFilterInfo *info); extern bool filterRangeExecute(SFilterInfo *info, SDataStatis *pDataStatis, int32_t numOfCols, int32_t numOfRows); +extern int32_t filterIsIndexedColumnQuery(SFilterInfo* info, int32_t idxId, bool *res); +extern int32_t filterGetIndexedColumnInfo(SFilterInfo* info, char** val, int32_t *order, int32_t *flag); #ifdef __cplusplus } diff --git a/src/query/inc/qTableMeta.h b/src/query/inc/qTableMeta.h index 746c5f8569ac98c465e8283a2401e27c18cadcc4..948a1ae91e01331c4f566ac5089485f717fc5632 100644 --- a/src/query/inc/qTableMeta.h +++ b/src/query/inc/qTableMeta.h @@ -38,12 +38,6 @@ typedef struct SJoinInfo { } SJoinInfo; typedef struct STagCond { - // relation between tbname list and query condition, including : TK_AND or TK_OR - int16_t relType; - - // tbname query condition, only support tbname query condition on one table - SCond tbnameCond; - // join condition, only support two tables join currently SJoinInfo joinInfo; @@ -93,6 +87,7 @@ typedef struct STableMetaInfo { SName name; char aliasName[TSDB_TABLE_NAME_LEN]; // alias name of table specified in query sql SArray *tagColList; // SArray, involved tag columns + int32_t joinTagNum; } STableMetaInfo; struct SQInfo; // global merge operator @@ -100,7 +95,7 @@ struct SQueryAttr; // query object typedef struct STableFilter { uint64_t uid; - SFilterInfo info; + void *info; } STableFilter; typedef struct SQueryInfo { diff --git a/src/query/inc/sql.y b/src/query/inc/sql.y index 27e5415133c8f5285aa31efd47c89415d550e21c..a047a0e48b5dc96d4e7e7d528d48dd6223e65cc4 100644 --- a/src/query/inc/sql.y +++ b/src/query/inc/sql.y @@ -11,7 +11,7 @@ %left OR. %left AND. %right NOT. -%left EQ NE ISNULL NOTNULL IS LIKE MATCH GLOB BETWEEN IN. +%left EQ NE ISNULL NOTNULL IS LIKE MATCH NMATCH GLOB BETWEEN IN. %left GT GE LT LE. %left BITAND BITOR LSHIFT RSHIFT. %left PLUS MINUS. @@ -754,6 +754,7 @@ expr(A) ::= expr(X) LIKE expr(Y). {A = tSqlExprCreate(X, Y, TK_LIKE); } // match expression expr(A) ::= expr(X) MATCH expr(Y). {A = tSqlExprCreate(X, Y, TK_MATCH); } +expr(A) ::= expr(X) NMATCH expr(Y). {A = tSqlExprCreate(X, Y, TK_NMATCH); } // question expression expr(A) ::= expr(X) QUESTION expr(Y). {A = tSqlExprCreate(X, Y, TK_QUESTION); } @@ -926,5 +927,5 @@ cmd ::= KILL QUERY INTEGER(X) COLON(Z) INTEGER(Y). {X.n += (Z.n + Y.n); s %fallback ID ABORT AFTER ASC ATTACH BEFORE BEGIN CASCADE CLUSTER CONFLICT COPY DATABASE DEFERRED DELIMITERS DESC DETACH EACH END EXPLAIN FAIL FOR GLOB IGNORE IMMEDIATE INITIALLY INSTEAD - LIKE MATCH KEY OF OFFSET RAISE REPLACE RESTRICT ROW STATEMENT TRIGGER VIEW ALL + LIKE MATCH NMATCH KEY OF OFFSET RAISE REPLACE RESTRICT ROW STATEMENT TRIGGER VIEW ALL NOW IPTOKEN SEMI NONE PREV LINEAR IMPORT TBNAME JOIN STABLE NULL INSERT INTO VALUES. diff --git a/src/query/src/qAggMain.c b/src/query/src/qAggMain.c index c0c6d7a1404dbef6bdb00bd676a30fcfc908671a..1fd682aebd6ac7899ca0a88f6a4744cd4ebbb006 100644 --- a/src/query/src/qAggMain.c +++ b/src/query/src/qAggMain.c @@ -179,7 +179,9 @@ int32_t getResultDataInfo(int32_t dataType, int32_t dataBytes, int32_t functionI if (functionId == TSDB_FUNC_TS || functionId == TSDB_FUNC_TS_DUMMY || functionId == TSDB_FUNC_TAG_DUMMY || functionId == TSDB_FUNC_DIFF || functionId == TSDB_FUNC_PRJ || functionId == TSDB_FUNC_TAGPRJ || - functionId == TSDB_FUNC_TAG || functionId == TSDB_FUNC_INTERP) { + functionId == TSDB_FUNC_TAG || functionId == TSDB_FUNC_INTERP || functionId == TSDB_FUNC_CEIL || + functionId == TSDB_FUNC_FLOOR || functionId == TSDB_FUNC_ROUND) + { *type = (int16_t)dataType; *bytes = (int16_t)dataBytes; @@ -405,7 +407,7 @@ int32_t getResultDataInfo(int32_t dataType, int32_t dataBytes, int32_t functionI // TODO use hash table int32_t isValidFunction(const char* name, int32_t len) { - for(int32_t i = 0; i <= TSDB_FUNC_BLKINFO; ++i) { + for(int32_t i = 0; i <= TSDB_FUNC_ROUND; ++i) { int32_t nameLen = (int32_t) strlen(aAggs[i].name); if (len != nameLen) { continue; @@ -4256,6 +4258,231 @@ void blockinfo_func_finalizer(SQLFunctionCtx* pCtx) { doFinalizer(pCtx); } +#define CFR_SET_VAL(type, data, pCtx, func, i, step, notNullElems) \ + do { \ + type *pData = (type *) data; \ + type *pOutput = (type *) pCtx->pOutput; \ + \ + for (; i < pCtx->size && i >= 0; i += step) { \ + if (pCtx->hasNull && isNull((const char*) &pData[i], pCtx->inputType)) { \ + continue; \ + } \ + \ + *pOutput++ = (type) func((double) pData[i]); \ + \ + notNullElems++; \ + } \ + } while (0) + +#define CFR_SET_VAL_DOUBLE(data, pCtx, func, i, step, notNullElems) \ + do { \ + double *pData = (double *) data; \ + double *pOutput = (double *) pCtx->pOutput; \ + \ + for (; i < pCtx->size && i >= 0; i += step) { \ + if (pCtx->hasNull && isNull((const char*) &pData[i], pCtx->inputType)) { \ + continue; \ + } \ + \ + SET_DOUBLE_VAL(pOutput, func(pData[i])); \ + pOutput++; \ + \ + notNullElems++; \ + } \ + } while (0) + +static void ceil_function(SQLFunctionCtx *pCtx) { + void *data = GET_INPUT_DATA_LIST(pCtx); + + int32_t notNullElems = 0; + + int32_t step = GET_FORWARD_DIRECTION_FACTOR(pCtx->order); + int32_t i = (pCtx->order == TSDB_ORDER_ASC) ? 0 : pCtx->size - 1; + + switch (pCtx->inputType) { + case TSDB_DATA_TYPE_INT: { + CFR_SET_VAL(int32_t, data, pCtx, ceil, i, step, notNullElems); + break; + }; + case TSDB_DATA_TYPE_UINT: { + CFR_SET_VAL(uint32_t, data, pCtx, ceil, i, step, notNullElems); + break; + }; + case TSDB_DATA_TYPE_BIGINT: { + CFR_SET_VAL(int64_t, data, pCtx, ceil, i, step, notNullElems); + break; + } + case TSDB_DATA_TYPE_UBIGINT: { + CFR_SET_VAL(uint64_t, data, pCtx, ceil, i, step, notNullElems); + break; + } + case TSDB_DATA_TYPE_DOUBLE: { + CFR_SET_VAL_DOUBLE(data, pCtx, ceil, i, step, notNullElems); + break; + } + case TSDB_DATA_TYPE_FLOAT: { + CFR_SET_VAL(float, data, pCtx, ceil, i, step, notNullElems); + break; + } + case TSDB_DATA_TYPE_SMALLINT: { + CFR_SET_VAL(int16_t, data, pCtx, ceil, i, step, notNullElems); + break; + } + case TSDB_DATA_TYPE_USMALLINT: { + CFR_SET_VAL(uint16_t, data, pCtx, ceil, i, step, notNullElems); + break; + } + case TSDB_DATA_TYPE_TINYINT: { + CFR_SET_VAL(int8_t, data, pCtx, ceil, i, step, notNullElems); + break; + } + case TSDB_DATA_TYPE_UTINYINT: { + CFR_SET_VAL(uint8_t, data, pCtx, ceil, i, step, notNullElems); + break; + } + default: + qError("error input type"); + } + + if (notNullElems <= 0) { + /* + * current block may be null value + */ + assert(pCtx->hasNull); + } else { + GET_RES_INFO(pCtx)->numOfRes += notNullElems; + } +} + +static void floor_function(SQLFunctionCtx *pCtx) { + void *data = GET_INPUT_DATA_LIST(pCtx); + + int32_t notNullElems = 0; + + int32_t step = GET_FORWARD_DIRECTION_FACTOR(pCtx->order); + int32_t i = (pCtx->order == TSDB_ORDER_ASC) ? 0 : pCtx->size - 1; + + switch (pCtx->inputType) { + case TSDB_DATA_TYPE_INT: { + CFR_SET_VAL(int32_t, data, pCtx, floor, i, step, notNullElems); + break; + }; + case TSDB_DATA_TYPE_UINT: { + CFR_SET_VAL(uint32_t, data, pCtx, floor, i, step, notNullElems); + break; + }; + case TSDB_DATA_TYPE_BIGINT: { + CFR_SET_VAL(int64_t, data, pCtx, floor, i, step, notNullElems); + break; + } + case TSDB_DATA_TYPE_UBIGINT: { + CFR_SET_VAL(uint64_t, data, pCtx, floor, i, step, notNullElems); + break; + } + case TSDB_DATA_TYPE_DOUBLE: { + CFR_SET_VAL_DOUBLE(data, pCtx, floor, i, step, notNullElems); + break; + } + case TSDB_DATA_TYPE_FLOAT: { + CFR_SET_VAL(float, data, pCtx, floor, i, step, notNullElems); + break; + } + case TSDB_DATA_TYPE_SMALLINT: { + CFR_SET_VAL(int16_t, data, pCtx, floor, i, step, notNullElems); + break; + } + case TSDB_DATA_TYPE_USMALLINT: { + CFR_SET_VAL(uint16_t, data, pCtx, floor, i, step, notNullElems); + break; + } + case TSDB_DATA_TYPE_TINYINT: { + CFR_SET_VAL(int8_t, data, pCtx, floor, i, step, notNullElems); + break; + } + case TSDB_DATA_TYPE_UTINYINT: { + CFR_SET_VAL(uint8_t, data, pCtx, floor, i, step, notNullElems); + break; + } + default: + qError("error input type"); + } + + if (notNullElems <= 0) { + /* + * current block may be null value + */ + assert(pCtx->hasNull); + } else { + GET_RES_INFO(pCtx)->numOfRes += notNullElems; + } +} + +static void round_function(SQLFunctionCtx *pCtx) { + void *data = GET_INPUT_DATA_LIST(pCtx); + + int32_t notNullElems = 0; + + int32_t step = GET_FORWARD_DIRECTION_FACTOR(pCtx->order); + int32_t i = (pCtx->order == TSDB_ORDER_ASC) ? 0 : pCtx->size - 1; + + switch (pCtx->inputType) { + case TSDB_DATA_TYPE_INT: { + CFR_SET_VAL(int32_t, data, pCtx, round, i, step, notNullElems); + break; + }; + case TSDB_DATA_TYPE_UINT: { + CFR_SET_VAL(uint32_t, data, pCtx, round, i, step, notNullElems); + break; + }; + case TSDB_DATA_TYPE_BIGINT: { + CFR_SET_VAL(int64_t, data, pCtx, round, i, step, notNullElems); + break; + } + case TSDB_DATA_TYPE_UBIGINT: { + CFR_SET_VAL(uint64_t, data, pCtx, round, i, step, notNullElems); + break; + } + case TSDB_DATA_TYPE_DOUBLE: { + CFR_SET_VAL_DOUBLE(data, pCtx, round, i, step, notNullElems); + break; + } + case TSDB_DATA_TYPE_FLOAT: { + CFR_SET_VAL(float, data, pCtx, round, i, step, notNullElems); + break; + } + case TSDB_DATA_TYPE_SMALLINT: { + CFR_SET_VAL(int16_t, data, pCtx, round, i, step, notNullElems); + break; + } + case TSDB_DATA_TYPE_USMALLINT: { + CFR_SET_VAL(uint16_t, data, pCtx, round, i, step, notNullElems); + break; + } + case TSDB_DATA_TYPE_TINYINT: { + CFR_SET_VAL(int8_t, data, pCtx, round, i, step, notNullElems); + break; + } + case TSDB_DATA_TYPE_UTINYINT: { + CFR_SET_VAL(uint8_t, data, pCtx, round, i, step, notNullElems); + break; + } + default: + qError("error input type"); + } + + if (notNullElems <= 0) { + /* + * current block may be null value + */ + assert(pCtx->hasNull); + } else { + GET_RES_INFO(pCtx)->numOfRes += notNullElems; + } +} + +#undef CFR_SET_VAL +#undef CFR_SET_VAL_DOUBLE + ///////////////////////////////////////////////////////////////////////////////////////////// /* * function compatible list. @@ -4274,8 +4501,8 @@ int32_t functionCompatList[] = { 4, -1, -1, 1, 1, 1, 1, 1, 1, -1, // tag, colprj, tagprj, arithmetic, diff, first_dist, last_dist, stddev_dst, interp rate irate 1, 1, 1, 1, -1, 1, 1, 1, 5, 1, 1, - // tid_tag, derivative, blk_info - 6, 8, 7, + // tid_tag, derivative, blk_info,ceil, floor, round + 6, 8, 7, 1, 1, 1 }; SAggFunctionInfo aAggs[] = {{ @@ -4678,7 +4905,7 @@ SAggFunctionInfo aAggs[] = {{ dataBlockRequired, }, { - // 33 + // 33 "_block_dist", // return table id and the corresponding tags for join match and subscribe TSDB_FUNC_BLKINFO, TSDB_FUNC_BLKINFO, @@ -4688,4 +4915,40 @@ SAggFunctionInfo aAggs[] = {{ blockinfo_func_finalizer, block_func_merge, dataBlockRequired, + }, + { + // 34 + "ceil", + TSDB_FUNC_CEIL, + TSDB_FUNC_CEIL, + TSDB_FUNCSTATE_MO | TSDB_FUNCSTATE_STABLE | TSDB_FUNCSTATE_NEED_TS | TSDB_FUNCSTATE_SCALAR, + function_setup, + ceil_function, + doFinalizer, + noop1, + dataBlockRequired + }, + { + // 35 + "floor", + TSDB_FUNC_FLOOR, + TSDB_FUNC_FLOOR, + TSDB_FUNCSTATE_MO | TSDB_FUNCSTATE_STABLE | TSDB_FUNCSTATE_NEED_TS | TSDB_FUNCSTATE_SCALAR, + function_setup, + floor_function, + doFinalizer, + noop1, + dataBlockRequired + }, + { + // 36 + "round", + TSDB_FUNC_ROUND, + TSDB_FUNC_ROUND, + TSDB_FUNCSTATE_MO | TSDB_FUNCSTATE_STABLE | TSDB_FUNCSTATE_NEED_TS | TSDB_FUNCSTATE_SCALAR, + function_setup, + round_function, + doFinalizer, + noop1, + dataBlockRequired }}; diff --git a/src/query/src/qExecutor.c b/src/query/src/qExecutor.c index 8fefed51c8c8e80d6a6f05d0da74dc1c6075d1bd..982996d70d6e8c05c45425e737b57a08daf331c9 100644 --- a/src/query/src/qExecutor.c +++ b/src/query/src/qExecutor.c @@ -405,6 +405,25 @@ static bool isSelectivityWithTagsQuery(SQLFunctionCtx *pCtx, int32_t numOfOutput return (numOfSelectivity > 0 && hasTags); } +static bool isScalarWithTagsQuery(SQLFunctionCtx *pCtx, int32_t numOfOutput) { + bool hasTags = false; + int32_t numOfScalar = 0; + + for (int32_t i = 0; i < numOfOutput; ++i) { + int32_t functId = pCtx[i].functionId; + if (functId == TSDB_FUNC_TAG_DUMMY || functId == TSDB_FUNC_TS_DUMMY) { + hasTags = true; + continue; + } + + if ((aAggs[functId].status & TSDB_FUNCSTATE_SCALAR) != 0) { + numOfScalar++; + } + } + + return (numOfScalar > 0 && hasTags); +} + static bool isProjQuery(SQueryAttr *pQueryAttr) { for (int32_t i = 0; i < pQueryAttr->numOfOutput; ++i) { int32_t functId = pQueryAttr->pExpr1[i].base.functionId; @@ -544,6 +563,8 @@ static SResultRow* doSetResultOutBufByKey(SQueryRuntimeEnv* pRuntimeEnv, SResult // add a new result set for a new group taosHashPut(pRuntimeEnv->pResultRowHashTable, pRuntimeEnv->keyBuf, GET_RES_WINDOW_KEY_LEN(bytes), &pResult, POINTER_BYTES); + SResultRowCell cell = {.groupId = tableGroupId, .pRow = pResult}; + taosArrayPush(pRuntimeEnv->pResultRowArrayList, &cell); } else { pResult = *p1; } @@ -1937,7 +1958,7 @@ void setBlockStatisInfo(SQLFunctionCtx *pCtx, SSDataBlock* pSDataBlock, SColInde // set the output buffer for the selectivity + tag query static int32_t setCtxTagColumnInfo(SQLFunctionCtx *pCtx, int32_t numOfOutput) { - if (!isSelectivityWithTagsQuery(pCtx, numOfOutput)) { + if (!isSelectivityWithTagsQuery(pCtx, numOfOutput) && !isScalarWithTagsQuery(pCtx, numOfOutput)) { return TSDB_CODE_SUCCESS; } @@ -1956,7 +1977,7 @@ static int32_t setCtxTagColumnInfo(SQLFunctionCtx *pCtx, int32_t numOfOutput) { if (functionId == TSDB_FUNC_TAG_DUMMY || functionId == TSDB_FUNC_TS_DUMMY) { tagLen += pCtx[i].outputBytes; pTagCtx[num++] = &pCtx[i]; - } else if ((aAggs[functionId].status & TSDB_FUNCSTATE_SELECTIVITY) != 0) { + } else if ((aAggs[functionId].status & TSDB_FUNCSTATE_SELECTIVITY) != 0 || (aAggs[functionId].status & TSDB_FUNCSTATE_SCALAR) != 0) { p = &pCtx[i]; } else if (functionId == TSDB_FUNC_TS || functionId == TSDB_FUNC_TAG) { // tag function may be the group by tag column @@ -2107,9 +2128,10 @@ static int32_t setupQueryRuntimeEnv(SQueryRuntimeEnv *pRuntimeEnv, int32_t numOf pRuntimeEnv->pQueryAttr = pQueryAttr; pRuntimeEnv->pResultRowHashTable = taosHashInit(numOfTables, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK); - pRuntimeEnv->pResultRowListSet = taosHashInit(numOfTables, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), false, HASH_NO_LOCK); + pRuntimeEnv->pResultRowListSet = taosHashInit(numOfTables * 10, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), false, HASH_NO_LOCK); pRuntimeEnv->keyBuf = malloc(pQueryAttr->maxTableColumnWidth + sizeof(int64_t) + POINTER_BYTES); pRuntimeEnv->pool = initResultRowPool(getResultRowSize(pRuntimeEnv)); + pRuntimeEnv->pResultRowArrayList = taosArrayInit(numOfTables, sizeof(SResultRowCell)); pRuntimeEnv->prevRow = malloc(POINTER_BYTES * pQueryAttr->numOfCols + pQueryAttr->srcRowSize); pRuntimeEnv->tagVal = malloc(pQueryAttr->tagLen); @@ -2384,6 +2406,7 @@ static void teardownQueryRuntimeEnv(SQueryRuntimeEnv *pRuntimeEnv) { pRuntimeEnv->pool = destroyResultRowPool(pRuntimeEnv->pool); taosArrayDestroyEx(pRuntimeEnv->prevResult, freeInterResult); + taosArrayDestroy(pRuntimeEnv->pResultRowArrayList); pRuntimeEnv->prevResult = NULL; } @@ -2398,11 +2421,11 @@ bool isQueryKilled(SQInfo *pQInfo) { // query has been executed more than tsShellActivityTimer, and the retrieve has not arrived // abort current query execution. - if (pQInfo->owner != 0 && ((taosGetTimestampSec() - pQInfo->startExecTs) > getMaximumIdleDurationSec()) && + if (pQInfo->owner != 0 && ((taosGetTimestampSec() - pQInfo->startExecTs/1000) > getMaximumIdleDurationSec()) && (!needBuildResAfterQueryComplete(pQInfo))) { assert(pQInfo->startExecTs != 0); - qDebug("QInfo:%" PRIu64 " retrieve not arrive beyond %d sec, abort current query execution, start:%" PRId64 + qDebug("QInfo:%" PRIu64 " retrieve not arrive beyond %d ms, abort current query execution, start:%" PRId64 ", current:%d", pQInfo->qId, 1, pQInfo->startExecTs, taosGetTimestampSec()); return true; } @@ -2946,6 +2969,10 @@ void filterRowsInDataBlock(SQueryRuntimeEnv* pRuntimeEnv, SSingleColumnFilterInf } if (!tsBufNextPos(pRuntimeEnv->pTsBuf)) { + if (i < (numOfRows - 1)) { + all = false; + } + break; } } @@ -2987,11 +3014,15 @@ void filterColRowsInDataBlock(SQueryRuntimeEnv* pRuntimeEnv, SSDataBlock* pBlock p[offset] = true; } - if (!tsBufNextPos(pRuntimeEnv->pTsBuf)) { + if (!tsBufNextPos(pRuntimeEnv->pTsBuf)) { + if (i < (numOfRows - 1)) { + all = false; + } + break; } } - + // save the cursor status pRuntimeEnv->current->cur = tsBufGetCursor(pRuntimeEnv->pTsBuf); } else { @@ -3053,6 +3084,22 @@ void doSetFilterColumnInfo(SSingleColumnFilterInfo* pFilterInfo, int32_t numOfFi } } +FORCE_INLINE int32_t getColumnDataFromId(void *param, int32_t id, void **data) { + int32_t numOfCols = ((SColumnDataParam *)param)->numOfCols; + SArray* pDataBlock = ((SColumnDataParam *)param)->pDataBlock; + + for (int32_t j = 0; j < numOfCols; ++j) { + SColumnInfoData* pColInfo = taosArrayGet(pDataBlock, j); + if (id == pColInfo->info.colId) { + *data = pColInfo->pData; + break; + } + } + + return TSDB_CODE_SUCCESS; +} + + int32_t loadDataBlockOnDemand(SQueryRuntimeEnv* pRuntimeEnv, STableScanInfo* pTableScanInfo, SSDataBlock* pBlock, uint32_t* status) { *status = BLK_DATA_NO_NEEDED; @@ -3207,7 +3254,8 @@ int32_t loadDataBlockOnDemand(SQueryRuntimeEnv* pRuntimeEnv, STableScanInfo* pTa } if (pQueryAttr->pFilters != NULL) { - filterSetColFieldData(pQueryAttr->pFilters, pBlock->info.numOfCols, pBlock->pDataBlock); + SColumnDataParam param = {.numOfCols = pBlock->info.numOfCols, .pDataBlock = pBlock->pDataBlock}; + filterSetColFieldData(pQueryAttr->pFilters, ¶m, getColumnDataFromId); } if (pQueryAttr->pFilters != NULL || pRuntimeEnv->pTsBuf != NULL) { @@ -4808,7 +4856,6 @@ int32_t doInitQInfo(SQInfo* pQInfo, STSBuf* pTsBuf, void* tsdb, void* sourceOptr SQueryAttr *pQueryAttr = pQInfo->runtimeEnv.pQueryAttr; pQueryAttr->tsdb = tsdb; - if (tsdb != NULL) { int32_t code = setupQueryHandle(tsdb, pRuntimeEnv, pQInfo->qId, pQueryAttr->stableQuery); if (code != TSDB_CODE_SUCCESS) { @@ -6379,6 +6426,7 @@ static SSDataBlock* hashGroupbyAggregate(void* param, bool* newgroup) { if (!pRuntimeEnv->pQueryAttr->stableQuery) { sortGroupResByOrderList(&pRuntimeEnv->groupResInfo, pRuntimeEnv, pInfo->binfo.pRes); } + toSSDataBlock(&pRuntimeEnv->groupResInfo, pRuntimeEnv, pInfo->binfo.pRes); if (pInfo->binfo.pRes->info.rows == 0 || !hasRemainDataInCurrentGroup(&pRuntimeEnv->groupResInfo)) { @@ -7494,7 +7542,6 @@ int32_t convertQueryMsg(SQueryTableMsg *pQueryMsg, SQueryParam* param) { pQueryMsg->order = htons(pQueryMsg->order); pQueryMsg->orderColId = htons(pQueryMsg->orderColId); pQueryMsg->queryType = htonl(pQueryMsg->queryType); - pQueryMsg->tagNameRelType = htons(pQueryMsg->tagNameRelType); pQueryMsg->numOfCols = htons(pQueryMsg->numOfCols); pQueryMsg->numOfOutput = htons(pQueryMsg->numOfOutput); @@ -7509,7 +7556,6 @@ int32_t convertQueryMsg(SQueryTableMsg *pQueryMsg, SQueryParam* param) { pQueryMsg->tsBuf.tsOrder = htonl(pQueryMsg->tsBuf.tsOrder); pQueryMsg->numOfTags = htonl(pQueryMsg->numOfTags); - pQueryMsg->tbnameCondLen = htonl(pQueryMsg->tbnameCondLen); pQueryMsg->secondStageOutput = htonl(pQueryMsg->secondStageOutput); pQueryMsg->sqlstrLen = htonl(pQueryMsg->sqlstrLen); pQueryMsg->prevResultLen = htonl(pQueryMsg->prevResultLen); @@ -7600,8 +7646,8 @@ int32_t convertQueryMsg(SQueryTableMsg *pQueryMsg, SQueryParam* param) { pMsg += sizeof(SSqlExpr); for (int32_t j = 0; j < pExprMsg->numOfParams; ++j) { - pExprMsg->param[j].nType = htons(pExprMsg->param[j].nType); - pExprMsg->param[j].nLen = htons(pExprMsg->param[j].nLen); + pExprMsg->param[j].nType = htonl(pExprMsg->param[j].nType); + pExprMsg->param[j].nLen = htonl(pExprMsg->param[j].nLen); if (pExprMsg->param[j].nType == TSDB_DATA_TYPE_BINARY) { pExprMsg->param[j].pz = pMsg; @@ -7648,8 +7694,8 @@ int32_t convertQueryMsg(SQueryTableMsg *pQueryMsg, SQueryParam* param) { pMsg += sizeof(SSqlExpr); for (int32_t j = 0; j < pExprMsg->numOfParams; ++j) { - pExprMsg->param[j].nType = htons(pExprMsg->param[j].nType); - pExprMsg->param[j].nLen = htons(pExprMsg->param[j].nLen); + pExprMsg->param[j].nType = htonl(pExprMsg->param[j].nType); + pExprMsg->param[j].nLen = htonl(pExprMsg->param[j].nLen); if (pExprMsg->param[j].nType == TSDB_DATA_TYPE_BINARY) { pExprMsg->param[j].pz = pMsg; @@ -7753,17 +7799,6 @@ int32_t convertQueryMsg(SQueryTableMsg *pQueryMsg, SQueryParam* param) { pMsg += pQueryMsg->prevResultLen; } - if (pQueryMsg->tbnameCondLen > 0) { - param->tbnameCond = calloc(1, pQueryMsg->tbnameCondLen + 1); - if (param->tbnameCond == NULL) { - code = TSDB_CODE_QRY_OUT_OF_MEMORY; - goto _cleanup; - } - - strncpy(param->tbnameCond, pMsg, pQueryMsg->tbnameCondLen); - pMsg += pQueryMsg->tbnameCondLen; - } - //skip ts buf if ((pQueryMsg->tsBuf.tsOffset + pQueryMsg->tsBuf.tsLen) > 0) { pMsg = (char *)pQueryMsg + pQueryMsg->tsBuf.tsOffset + pQueryMsg->tsBuf.tsLen; @@ -8144,7 +8179,7 @@ int32_t createQueryFunc(SQueriedTableInfo* pTableInfo, int32_t numOfOutput, SExp return TSDB_CODE_SUCCESS; } -int32_t createQueryFilter(char *data, uint16_t len, SFilterInfo** pFilters) { +int32_t createQueryFilter(char *data, uint16_t len, void** pFilters) { tExprNode* expr = NULL; TRY(TSDB_MAX_TAG_CONDITIONS) { @@ -8398,7 +8433,7 @@ FORCE_INLINE bool checkQIdEqual(void *qHandle, uint64_t qId) { } SQInfo* createQInfoImpl(SQueryTableMsg* pQueryMsg, SGroupbyExpr* pGroupbyExpr, SExprInfo* pExprs, - SExprInfo* pSecExprs, STableGroupInfo* pTableGroupInfo, SColumnInfo* pTagCols, SFilterInfo* pFilters, int32_t vgId, + SExprInfo* pSecExprs, STableGroupInfo* pTableGroupInfo, SColumnInfo* pTagCols, void* pFilters, int32_t vgId, char* sql, uint64_t qId, SUdfInfo* pUdfInfo) { int16_t numOfCols = pQueryMsg->numOfCols; int16_t numOfOutput = pQueryMsg->numOfOutput; @@ -8409,6 +8444,7 @@ SQInfo* createQInfoImpl(SQueryTableMsg* pQueryMsg, SGroupbyExpr* pGroupbyExpr, S } pQInfo->qId = qId; + pQInfo->startExecTs = 0; pQInfo->runtimeEnv.pUdfInfo = pUdfInfo; @@ -8647,7 +8683,6 @@ int32_t initQInfo(STsBufInfo* pTsBufInfo, void* tsdb, void* sourceOptr, SQInfo* SArray* prevResult = NULL; if (prevResultLen > 0) { prevResult = interResFromBinary(param->prevResult, prevResultLen); - pRuntimeEnv->prevResult = prevResult; } diff --git a/src/query/src/qFilter.c b/src/query/src/qFilter.c index a6988d7adc403cd518e6fce91899a515305ab5c0..c7a7ea963d5635c76030d2199ac99a60924d99a7 100644 --- a/src/query/src/qFilter.c +++ b/src/query/src/qFilter.c @@ -28,13 +28,14 @@ OptrStr gOptrStr[] = { {TSDB_RELATION_GREATER_EQUAL, ">="}, {TSDB_RELATION_NOT_EQUAL, "!="}, {TSDB_RELATION_LIKE, "like"}, - {TSDB_RELATION_MATCH, "match"}, {TSDB_RELATION_ISNULL, "is null"}, {TSDB_RELATION_NOTNULL, "not null"}, {TSDB_RELATION_IN, "in"}, {TSDB_RELATION_AND, "and"}, {TSDB_RELATION_OR, "or"}, - {TSDB_RELATION_NOT, "not"} + {TSDB_RELATION_NOT, "not"}, + {TSDB_RELATION_MATCH, "match"}, + {TSDB_RELATION_NMATCH, "nmatch"}, }; static FORCE_INLINE int32_t filterFieldColDescCompare(const void *desc1, const void *desc2) { @@ -157,7 +158,7 @@ int8_t filterGetRangeCompFuncFromOptrs(uint8_t optr, uint8_t optr2) { __compar_fn_t gDataCompare[] = {compareInt32Val, compareInt8Val, compareInt16Val, compareInt64Val, compareFloatVal, compareDoubleVal, compareLenPrefixedStr, compareStrPatternComp, compareFindItemInSet, compareWStrPatternComp, compareLenPrefixedWStr, compareUint8Val, compareUint16Val, compareUint32Val, compareUint64Val, - setCompareBytes1, setCompareBytes2, setCompareBytes4, setCompareBytes8, compareStrRegexComp, + setCompareBytes1, setCompareBytes2, setCompareBytes4, setCompareBytes8, compareStrRegexCompMatch, compareStrRegexCompNMatch }; int8_t filterGetCompFuncIdx(int32_t type, int32_t optr) { @@ -198,6 +199,8 @@ int8_t filterGetCompFuncIdx(int32_t type, int32_t optr) { case TSDB_DATA_TYPE_BINARY: { if (optr == TSDB_RELATION_MATCH) { comparFn = 19; + } else if (optr == TSDB_RELATION_NMATCH) { + comparFn = 20; } else if (optr == TSDB_RELATION_LIKE) { /* wildcard query using like operator */ comparFn = 7; } else if (optr == TSDB_RELATION_IN) { @@ -212,6 +215,8 @@ int8_t filterGetCompFuncIdx(int32_t type, int32_t optr) { case TSDB_DATA_TYPE_NCHAR: { if (optr == TSDB_RELATION_MATCH) { comparFn = 19; + } else if (optr == TSDB_RELATION_NMATCH) { + comparFn = 20; } else if (optr == TSDB_RELATION_LIKE) { comparFn = 9; } else if (optr == TSDB_RELATION_IN) { @@ -932,7 +937,7 @@ int32_t filterAddUnitToGroup(SFilterGroup *group, uint16_t unitIdx) { return TSDB_CODE_SUCCESS; } -int32_t filterConvertSetFromBinary(void **q, const char *buf, int32_t len, uint32_t tType) { +int32_t filterConvertSetFromBinary(void **q, const char *buf, int32_t len, uint32_t tType, bool tolower) { SBufferReader br = tbufInitReader(buf, len, false); uint32_t sType = tbufReadUint32(&br); SHashObj *pObj = taosHashInit(256, taosGetDefaultHashFunction(tType), true, false); @@ -1108,6 +1113,10 @@ int32_t filterConvertSetFromBinary(void **q, const char *buf, int32_t len, uint3 } t = varDataLen(tmp); pvar = varDataVal(tmp); + + if (tolower) { + strntolower_s(pvar, pvar, (int32_t)t); + } break; } case TSDB_DATA_TYPE_NCHAR: { @@ -1152,7 +1161,7 @@ int32_t filterAddGroupUnitFromNode(SFilterInfo *info, tExprNode* tree, SArray *g if (tree->_node.optr == TSDB_RELATION_IN && (!IS_VAR_DATA_TYPE(type))) { void *data = NULL; - filterConvertSetFromBinary((void **)&data, var->pz, var->nLen, type); + filterConvertSetFromBinary((void **)&data, var->pz, var->nLen, type, false); CHK_LRET(data == NULL, TSDB_CODE_QRY_APP_ERROR, "failed to convert in param"); if (taosHashGetSize((SHashObj *)data) <= 0) { @@ -1479,19 +1488,6 @@ _return: return code; } -#if 0 -int32_t filterInitUnitFunc(SFilterInfo *info) { - for (uint16_t i = 0; i < info->unitNum; ++i) { - SFilterUnit* unit = &info->units[i]; - - info->cunits[i].func = getComparFunc(FILTER_UNIT_DATA_TYPE(unit), unit->compare.optr); - } - - return TSDB_CODE_SUCCESS; -} -#endif - - void filterDumpInfoToString(SFilterInfo *info, const char *msg, int32_t options) { if (qDebugFlag & DEBUG_DEBUG) { CHK_LRETV(info == NULL, "%s - FilterInfo: EMPTY", msg); @@ -1806,7 +1802,10 @@ int32_t filterInitValFieldData(SFilterInfo *info) { } if (unit->compare.optr == TSDB_RELATION_IN) { - filterConvertSetFromBinary((void **)&fi->data, var->pz, var->nLen, type); + SSchema *sch = FILTER_UNIT_COL_DESC(info, unit); + bool tolower = (sch->colId == -1) ? true : false; + + filterConvertSetFromBinary((void **)&fi->data, var->pz, var->nLen, type, tolower); CHK_LRET(fi->data == NULL, TSDB_CODE_QRY_APP_ERROR, "failed to convert in param"); FILTER_SET_FLAG(fi->flag, FLD_DATA_IS_HASH); @@ -1879,6 +1878,9 @@ bool filterDoCompare(__compar_fn_t func, uint8_t optr, void *left, void *right) case TSDB_RELATION_MATCH: { return ret == 0; } + case TSDB_RELATION_NMATCH: { + return ret == 0; + } case TSDB_RELATION_IN: { return ret == 1; } @@ -2536,8 +2538,6 @@ int32_t filterPostProcessRange(SFilterInfo *info) { int32_t filterGenerateComInfo(SFilterInfo *info) { - uint16_t n = 0; - info->cunits = malloc(info->unitNum * sizeof(*info->cunits)); info->blkUnitRes = malloc(sizeof(*info->blkUnitRes) * info->unitNum); info->blkUnits = malloc(sizeof(*info->blkUnits) * (info->unitNum + 1) * info->groupNum); @@ -2565,24 +2565,6 @@ int32_t filterGenerateComInfo(SFilterInfo *info) { info->cunits[i].dataSize = FILTER_UNIT_COL_SIZE(info, unit); info->cunits[i].dataType = FILTER_UNIT_DATA_TYPE(unit); } - - uint16_t cgroupNum = info->groupNum + 1; - - for (uint16_t i = 0; i < info->groupNum; ++i) { - cgroupNum += info->groups[i].unitNum; - } - - info->cgroups = malloc(cgroupNum * sizeof(*info->cgroups)); - - for (uint16_t i = 0; i < info->groupNum; ++i) { - info->cgroups[n++] = info->groups[i].unitNum; - - for (uint16_t m = 0; m < info->groups[i].unitNum; ++m) { - info->cgroups[n++] = info->groups[i].unitIdxs[m]; - } - } - - info->cgroups[n] = 0; return TSDB_CODE_SUCCESS; } @@ -2656,10 +2638,12 @@ int32_t filterRmUnitByRange(SFilterInfo *info, SDataStatis *pDataStatis, int32_t SDataStatis* pDataBlockst = &pDataStatis[index]; void *minVal, *maxVal; + float minv = 0; + float maxv = 0; if (cunit->dataType == TSDB_DATA_TYPE_FLOAT) { - float minv = (float)(*(double *)(&pDataBlockst->min)); - float maxv = (float)(*(double *)(&pDataBlockst->max)); + minv = (float)(*(double *)(&pDataBlockst->min)); + maxv = (float)(*(double *)(&pDataBlockst->max)); minVal = &minv; maxVal = &maxv; @@ -2778,8 +2762,10 @@ bool filterExecuteBasedOnStatisImpl(void *pinfo, int32_t numOfRows, int8_t** p, bool all = true; uint16_t *unitIdx = NULL; - *p = calloc(numOfRows, sizeof(int8_t)); - + if (*p == NULL) { + *p = calloc(numOfRows, sizeof(int8_t)); + } + for (int32_t i = 0; i < numOfRows; ++i) { //FILTER_UNIT_CLR_F(info); @@ -2881,12 +2867,14 @@ static FORCE_INLINE bool filterExecuteImplIsNull(void *pinfo, int32_t numOfRows, return all; } - *p = calloc(numOfRows, sizeof(int8_t)); + if (*p == NULL) { + *p = calloc(numOfRows, sizeof(int8_t)); + } for (int32_t i = 0; i < numOfRows; ++i) { uint16_t uidx = info->groups[0].unitIdxs[0]; void *colData = (char *)info->cunits[uidx].colData + info->cunits[uidx].dataSize * i; - (*p)[i] = isNull(colData, info->cunits[uidx].dataType); + (*p)[i] = ((colData == NULL) || isNull(colData, info->cunits[uidx].dataType)); if ((*p)[i] == 0) { all = false; } @@ -2902,12 +2890,14 @@ static FORCE_INLINE bool filterExecuteImplNotNull(void *pinfo, int32_t numOfRows return all; } - *p = calloc(numOfRows, sizeof(int8_t)); - + if (*p == NULL) { + *p = calloc(numOfRows, sizeof(int8_t)); + } + for (int32_t i = 0; i < numOfRows; ++i) { uint16_t uidx = info->groups[0].unitIdxs[0]; void *colData = (char *)info->cunits[uidx].colData + info->cunits[uidx].dataSize * i; - (*p)[i] = !isNull(colData, info->cunits[uidx].dataType); + (*p)[i] = ((colData != NULL) && !isNull(colData, info->cunits[uidx].dataType)); if ((*p)[i] == 0) { all = false; } @@ -2930,10 +2920,12 @@ bool filterExecuteImplRange(void *pinfo, int32_t numOfRows, int8_t** p, SDataSta return all; } - *p = calloc(numOfRows, sizeof(int8_t)); + if (*p == NULL) { + *p = calloc(numOfRows, sizeof(int8_t)); + } for (int32_t i = 0; i < numOfRows; ++i) { - if (isNull(colData, info->cunits[0].dataType)) { + if (colData == NULL || isNull(colData, info->cunits[0].dataType)) { all = false; colData += dataSize; continue; @@ -2958,13 +2950,16 @@ bool filterExecuteImplMisc(void *pinfo, int32_t numOfRows, int8_t** p, SDataStat if (filterExecuteBasedOnStatis(info, numOfRows, p, statis, numOfCols, &all) == 0) { return all; } - - *p = calloc(numOfRows, sizeof(int8_t)); + + if (*p == NULL) { + *p = calloc(numOfRows, sizeof(int8_t)); + } for (int32_t i = 0; i < numOfRows; ++i) { uint16_t uidx = info->groups[0].unitIdxs[0]; void *colData = (char *)info->cunits[uidx].colData + info->cunits[uidx].dataSize * i; - if (isNull(colData, info->cunits[uidx].dataType)) { + if (colData == NULL || isNull(colData, info->cunits[uidx].dataType)) { + (*p)[i] = 0; all = false; continue; } @@ -2988,8 +2983,10 @@ bool filterExecuteImpl(void *pinfo, int32_t numOfRows, int8_t** p, SDataStatis * return all; } - *p = calloc(numOfRows, sizeof(int8_t)); - + if (*p == NULL) { + *p = calloc(numOfRows, sizeof(int8_t)); + } + for (int32_t i = 0; i < numOfRows; ++i) { //FILTER_UNIT_CLR_F(info); @@ -3005,7 +3002,7 @@ bool filterExecuteImpl(void *pinfo, int32_t numOfRows, int8_t** p, SDataStatis * //} else { uint8_t optr = cunit->optr; - if (isNull(colData, cunit->dataType)) { + if (colData == NULL || isNull(colData, cunit->dataType)) { (*p)[i] = optr == TSDB_RELATION_ISNULL ? true : false; } else { if (optr == TSDB_RELATION_NOTNULL) { @@ -3124,7 +3121,7 @@ _return: return TSDB_CODE_SUCCESS; } -int32_t filterSetColFieldData(SFilterInfo *info, int32_t numOfCols, SArray* pDataBlock) { +int32_t filterSetColFieldData(SFilterInfo *info, void *param, filer_get_col_from_id fp) { CHK_LRET(info == NULL, TSDB_CODE_QRY_APP_ERROR, "info NULL"); CHK_LRET(info->fields[FLD_TYPE_COLUMN].num <= 0, TSDB_CODE_QRY_APP_ERROR, "no column fileds"); @@ -3135,15 +3132,8 @@ int32_t filterSetColFieldData(SFilterInfo *info, int32_t numOfCols, SArray* pDat for (uint16_t i = 0; i < info->fields[FLD_TYPE_COLUMN].num; ++i) { SFilterField* fi = &info->fields[FLD_TYPE_COLUMN].fields[i]; SSchema* sch = fi->desc; - - for (int32_t j = 0; j < numOfCols; ++j) { - SColumnInfoData* pColInfo = taosArrayGet(pDataBlock, j); - if (sch->colId == pColInfo->info.colId) { - fi->data = pColInfo->pData; - - break; - } - } + + (*fp)(param, sch->colId, &fi->data); } filterUpdateComUnits(info); @@ -3152,7 +3142,7 @@ int32_t filterSetColFieldData(SFilterInfo *info, int32_t numOfCols, SArray* pDat } -int32_t filterInitFromTree(tExprNode* tree, SFilterInfo **pinfo, uint32_t options) { +int32_t filterInitFromTree(tExprNode* tree, void **pinfo, uint32_t options) { int32_t code = TSDB_CODE_SUCCESS; SFilterInfo *info = NULL; @@ -3189,8 +3179,6 @@ int32_t filterInitFromTree(tExprNode* tree, SFilterInfo **pinfo, uint32_t option taosArrayDestroy(group); return code; } - - //ERR_JRET(filterInitUnitFunc(info)); } info->unitRes = malloc(info->unitNum * sizeof(*info->unitRes)); @@ -3249,30 +3237,35 @@ bool filterRangeExecute(SFilterInfo *info, SDataStatis *pDataStatis, int32_t num break; } - if ((pDataStatis[index].numOfNull <= 0) && (ctx->isnull && !ctx->notnull && !ctx->isrange)) { - ret = false; - break; - } - - // all data in current column are NULL, no need to check its boundary value - if (pDataStatis[index].numOfNull == numOfRows) { - - // if isNULL query exists, load the null data column - if ((ctx->notnull || ctx->isrange) && (!ctx->isnull)) { + if (pDataStatis[index].numOfNull <= 0) { + if (ctx->isnull && !ctx->notnull && !ctx->isrange) { ret = false; break; } + } else if (pDataStatis[index].numOfNull > 0) { + if (pDataStatis[index].numOfNull == numOfRows) { + if ((ctx->notnull || ctx->isrange) && (!ctx->isnull)) { + ret = false; + break; + } - continue; + continue; + } else { + if (ctx->isnull) { + continue; + } + } } SDataStatis* pDataBlockst = &pDataStatis[index]; SFilterRangeNode *r = ctx->rs; + float minv = 0; + float maxv = 0; if (ctx->type == TSDB_DATA_TYPE_FLOAT) { - float minv = (float)(*(double *)(&pDataBlockst->min)); - float maxv = (float)(*(double *)(&pDataBlockst->max)); + minv = (float)(*(double *)(&pDataBlockst->min)); + maxv = (float)(*(double *)(&pDataBlockst->max)); minVal = &minv; maxVal = &maxv; @@ -3433,6 +3426,52 @@ int32_t filterFreeNcharColumns(SFilterInfo* info) { return TSDB_CODE_SUCCESS; } +int32_t filterIsIndexedColumnQuery(SFilterInfo* info, int32_t idxId, bool *res) { + CHK_LRET(info == NULL, TSDB_CODE_QRY_APP_ERROR, "null parameter"); + + CHK_JMP(info->fields[FLD_TYPE_COLUMN].num > 1 || info->fields[FLD_TYPE_COLUMN].num <= 0); + + CHK_JMP(info->unitNum > 1 || info->unitNum <= 0); + + CHK_JMP(FILTER_GET_COL_FIELD_ID(FILTER_GET_COL_FIELD(info, 0)) != idxId); + + int32_t optr = FILTER_UNIT_OPTR(info->units); + + CHK_JMP(optr == TSDB_RELATION_LIKE || optr == TSDB_RELATION_IN || optr == TSDB_RELATION_MATCH + || optr == TSDB_RELATION_ISNULL || optr == TSDB_RELATION_NOTNULL); + + *res = true; + + return TSDB_CODE_SUCCESS; + +_return: + *res = false; + + return TSDB_CODE_SUCCESS; +} + + +int32_t filterGetIndexedColumnInfo(SFilterInfo* info, char** val, int32_t *order, int32_t *flag) { + SFilterComUnit *cunit = info->cunits; + uint8_t optr = cunit->optr; + + *val = cunit->valData; + *order = TSDB_ORDER_ASC; + + if (optr == TSDB_RELATION_LESS || optr == TSDB_RELATION_LESS_EQUAL) { + *order = TSDB_ORDER_DESC; + } + + if (optr == TSDB_RELATION_NOT_EQUAL) { + *order = TSDB_ORDER_ASC|TSDB_ORDER_DESC; + } + + if (cunit->valData2 == cunit->valData && optr != TSDB_RELATION_EQUAL) { + FILTER_SET_FLAG(*flag, FI_ACTION_NO_NEED); + } + + return TSDB_CODE_SUCCESS; +} diff --git a/src/query/src/qPlan.c b/src/query/src/qPlan.c index 1988fc9df7710f15770ca8a9994542d9f4bc8c66..abfa20714b333754478e5c48b9265f839b05a4b1 100644 --- a/src/query/src/qPlan.c +++ b/src/query/src/qPlan.c @@ -645,6 +645,12 @@ SArray* createExecOperatorPlan(SQueryAttr* pQueryAttr) { } else { op = OP_Project; taosArrayPush(plan, &op); + + if (pQueryAttr->pExpr2 != NULL) { + op = OP_Project; + taosArrayPush(plan, &op); + } + if (pQueryAttr->distinct) { op = OP_Distinct; taosArrayPush(plan, &op); diff --git a/src/query/src/qUtil.c b/src/query/src/qUtil.c index 4caf351799adbf000265566fb22617067efb725d..bc27e094db3dcb85ffa73810e922d73cd42ab3a0 100644 --- a/src/query/src/qUtil.c +++ b/src/query/src/qUtil.c @@ -436,13 +436,13 @@ static int32_t tableResultComparFn(const void *pLeft, const void *pRight, void * } STableQueryInfo** pList = supporter->pTableQueryInfo; - - SResultRowInfo *pWindowResInfo1 = &(pList[left]->resInfo); - SResultRow * pWindowRes1 = getResultRow(pWindowResInfo1, leftPos); + SResultRow* pWindowRes1 = pList[left]->resInfo.pResult[leftPos]; +// SResultRow * pWindowRes1 = getResultRow(&(pList[left]->resInfo), leftPos); TSKEY leftTimestamp = pWindowRes1->win.skey; - SResultRowInfo *pWindowResInfo2 = &(pList[right]->resInfo); - SResultRow * pWindowRes2 = getResultRow(pWindowResInfo2, rightPos); +// SResultRowInfo *pWindowResInfo2 = &(pList[right]->resInfo); +// SResultRow * pWindowRes2 = getResultRow(pWindowResInfo2, rightPos); + SResultRow* pWindowRes2 = pList[right]->resInfo.pResult[rightPos]; TSKEY rightTimestamp = pWindowRes2->win.skey; if (leftTimestamp == rightTimestamp) { @@ -456,7 +456,77 @@ static int32_t tableResultComparFn(const void *pLeft, const void *pRight, void * } } -static int32_t mergeIntoGroupResultImpl(SQueryRuntimeEnv *pRuntimeEnv, SGroupResInfo* pGroupResInfo, SArray *pTableList, +int32_t tsAscOrder(const void* p1, const void* p2) { + SResultRowCell* pc1 = (SResultRowCell*) p1; + SResultRowCell* pc2 = (SResultRowCell*) p2; + + if (pc1->groupId == pc2->groupId) { + if (pc1->pRow->win.skey == pc2->pRow->win.skey) { + return 0; + } else { + return (pc1->pRow->win.skey < pc2->pRow->win.skey)? -1:1; + } + } else { + return (pc1->groupId < pc2->groupId)? -1:1; + } +} + +int32_t tsDescOrder(const void* p1, const void* p2) { + SResultRowCell* pc1 = (SResultRowCell*) p1; + SResultRowCell* pc2 = (SResultRowCell*) p2; + + if (pc1->groupId == pc2->groupId) { + if (pc1->pRow->win.skey == pc2->pRow->win.skey) { + return 0; + } else { + return (pc1->pRow->win.skey < pc2->pRow->win.skey)? 1:-1; + } + } else { + return (pc1->groupId < pc2->groupId)? -1:1; + } +} + +void orderTheResultRows(SQueryRuntimeEnv* pRuntimeEnv) { + __compar_fn_t fn = NULL; + if (pRuntimeEnv->pQueryAttr->order.order == TSDB_ORDER_ASC) { + fn = tsAscOrder; + } else { + fn = tsDescOrder; + } + + taosArraySort(pRuntimeEnv->pResultRowArrayList, fn); +} + +static int32_t mergeIntoGroupResultImplRv(SQueryRuntimeEnv *pRuntimeEnv, SGroupResInfo* pGroupResInfo, uint64_t groupId, int32_t* rowCellInfoOffset) { + if (!pGroupResInfo->ordered) { + orderTheResultRows(pRuntimeEnv); + pGroupResInfo->ordered = true; + } + + if (pGroupResInfo->pRows == NULL) { + pGroupResInfo->pRows = taosArrayInit(100, POINTER_BYTES); + } + + size_t len = taosArrayGetSize(pRuntimeEnv->pResultRowArrayList); + for(; pGroupResInfo->position < len; ++pGroupResInfo->position) { + SResultRowCell* pResultRowCell = taosArrayGet(pRuntimeEnv->pResultRowArrayList, pGroupResInfo->position); + if (pResultRowCell->groupId != groupId) { + break; + } + + int64_t num = getNumOfResultWindowRes(pRuntimeEnv, pResultRowCell->pRow, rowCellInfoOffset); + if (num <= 0) { + continue; + } + + taosArrayPush(pGroupResInfo->pRows, &pResultRowCell->pRow); + pResultRowCell->pRow->numOfRows = (uint32_t) num; + } + + return TSDB_CODE_SUCCESS; +} + +static UNUSED_FUNC int32_t mergeIntoGroupResultImpl(SQueryRuntimeEnv *pRuntimeEnv, SGroupResInfo* pGroupResInfo, SArray *pTableList, int32_t* rowCellInfoOffset) { bool ascQuery = QUERY_IS_ASC_QUERY(pRuntimeEnv->pQueryAttr); @@ -562,12 +632,7 @@ int32_t mergeIntoGroupResult(SGroupResInfo* pGroupResInfo, SQueryRuntimeEnv* pRu int64_t st = taosGetTimestampUs(); while (pGroupResInfo->currentGroup < pGroupResInfo->totalGroup) { - SArray *group = GET_TABLEGROUP(pRuntimeEnv, pGroupResInfo->currentGroup); - - int32_t ret = mergeIntoGroupResultImpl(pRuntimeEnv, pGroupResInfo, group, offset); - if (ret != TSDB_CODE_SUCCESS) { - return ret; - } + mergeIntoGroupResultImplRv(pRuntimeEnv, pGroupResInfo, pGroupResInfo->currentGroup, offset); // this group generates at least one result, return results if (taosArrayGetSize(pGroupResInfo->pRows) > 0) { @@ -583,7 +648,6 @@ int32_t mergeIntoGroupResult(SGroupResInfo* pGroupResInfo, SQueryRuntimeEnv* pRu qDebug("QInfo:%"PRIu64" merge res data into group, index:%d, total group:%d, elapsed time:%" PRId64 "us", GET_QID(pRuntimeEnv), pGroupResInfo->currentGroup, pGroupResInfo->totalGroup, elapsedTime); -// pQInfo->summary.firstStageMergeTime += elapsedTime; return TSDB_CODE_SUCCESS; } diff --git a/src/query/src/queryMain.c b/src/query/src/queryMain.c index d56c12ab8735d0683db146f7000429d4d554dda5..c6e6eddce7d8f56095d5d78f4d1f84ed1d4f3c97 100644 --- a/src/query/src/queryMain.c +++ b/src/query/src/queryMain.c @@ -35,7 +35,7 @@ typedef struct SQueryMgmt { bool closed; } SQueryMgmt; -static void queryMgmtKillQueryFn(void* handle) { +static void queryMgmtKillQueryFn(void* handle, void* param1) { void** fp = (void**)handle; qKillQuery(*fp); } @@ -53,7 +53,6 @@ static void freeqinfoFn(void *qhandle) { void freeParam(SQueryParam *param) { tfree(param->sql); tfree(param->tagCond); - tfree(param->tbnameCond); tfree(param->pTableIdList); taosArrayDestroy(param->pOperator); tfree(param->pExprs); @@ -140,7 +139,7 @@ int32_t qCreateQueryInfo(void* tsdb, int32_t vgId, SQueryTableMsg* pQueryMsg, qi qDebug("qmsg:%p query stable, uid:%"PRIu64", tid:%d", pQueryMsg, id->uid, id->tid); code = tsdbQuerySTableByTagCond(tsdb, id->uid, pQueryMsg->window.skey, param.tagCond, pQueryMsg->tagCondLen, - pQueryMsg->tagNameRelType, param.tbnameCond, &tableGroupInfo, param.pGroupColIndex, numOfGroupByCols); + &tableGroupInfo, param.pGroupColIndex, numOfGroupByCols); if (code != TSDB_CODE_SUCCESS) { qError("qmsg:%p failed to query stable, reason: %s", pQueryMsg, tstrerror(code)); @@ -215,6 +214,51 @@ int32_t qCreateQueryInfo(void* tsdb, int32_t vgId, SQueryTableMsg* pQueryMsg, qi return code; } +#ifdef TEST_IMPL +// wait moment +int waitMoment(SQInfo* pQInfo){ + if(pQInfo->sql) { + int ms = 0; + char* pcnt = strstr(pQInfo->sql, " count(*)"); + if(pcnt) return 0; + + char* pos = strstr(pQInfo->sql, " t_"); + if(pos){ + pos += 3; + ms = atoi(pos); + while(*pos >= '0' && *pos <= '9'){ + pos ++; + } + char unit_char = *pos; + if(unit_char == 'h'){ + ms *= 3600*1000; + } else if(unit_char == 'm'){ + ms *= 60*1000; + } else if(unit_char == 's'){ + ms *= 1000; + } + } + if(ms == 0) return 0; + printf("test wait sleep %dms. sql=%s ...\n", ms, pQInfo->sql); + + if(ms < 1000) { + taosMsleep(ms); + } else { + int used_ms = 0; + while(used_ms < ms) { + taosMsleep(1000); + used_ms += 1000; + if(isQueryKilled(pQInfo)){ + printf("test check query is canceled, sleep break.%s\n", pQInfo->sql); + break; + } + } + } + } + return 1; +} +#endif + bool qTableQuery(qinfo_t qinfo, uint64_t *qId) { SQInfo *pQInfo = (SQInfo *)qinfo; assert(pQInfo && pQInfo->signature == pQInfo); @@ -228,7 +272,8 @@ bool qTableQuery(qinfo_t qinfo, uint64_t *qId) { } *qId = pQInfo->qId; - pQInfo->startExecTs = taosGetTimestampSec(); + if(pQInfo->startExecTs == 0) + pQInfo->startExecTs = taosGetTimestampMs(); if (isQueryKilled(pQInfo)) { qDebug("QInfo:0x%"PRIx64" it is already killed, abort", pQInfo->qId); @@ -259,7 +304,9 @@ bool qTableQuery(qinfo_t qinfo, uint64_t *qId) { int64_t st = taosGetTimestampUs(); pRuntimeEnv->outputBuf = pRuntimeEnv->proot->exec(pRuntimeEnv->proot, &newgroup); pQInfo->summary.elapsedTime += (taosGetTimestampUs() - st); - +#ifdef TEST_IMPL + waitMoment(pQInfo); +#endif publishOperatorProfEvent(pRuntimeEnv->proot, QUERY_PROF_AFTER_OPERATOR_EXEC); pRuntimeEnv->resultInfo.total += GET_NUM_OF_RESULTS(pRuntimeEnv); @@ -479,7 +526,7 @@ void qQueryMgmtNotifyClosed(void* pQMgmt) { pQueryMgmt->closed = true; pthread_mutex_unlock(&pQueryMgmt->lock); - taosCacheRefresh(pQueryMgmt->qinfoPool, queryMgmtKillQueryFn); + taosCacheRefresh(pQueryMgmt->qinfoPool, queryMgmtKillQueryFn, NULL); } void qQueryMgmtReOpen(void *pQMgmt) { @@ -574,3 +621,148 @@ void** qReleaseQInfo(void* pMgmt, void* pQInfo, bool freeHandle) { taosCacheRelease(pQueryMgmt->qinfoPool, pQInfo, freeHandle); return 0; } + +//kill by qid +int32_t qKillQueryByQId(void* pMgmt, int64_t qId, int32_t waitMs, int32_t waitCount) { + int32_t error = TSDB_CODE_SUCCESS; + void** handle = qAcquireQInfo(pMgmt, qId); + if(handle == NULL) return terrno; + + SQInfo* pQInfo = (SQInfo*)(*handle); + if (pQInfo == NULL || !isValidQInfo(pQInfo)) { + return TSDB_CODE_QRY_INVALID_QHANDLE; + } + qWarn("QId:0x%"PRIx64" be killed(no memory commit).", pQInfo->qId); + setQueryKilled(pQInfo); + + // wait query stop + int32_t loop = 0; + while (pQInfo->owner != 0) { + taosMsleep(waitMs); + if(loop++ > waitCount){ + error = TSDB_CODE_FAILED; + break; + } + } + + qReleaseQInfo(pMgmt, (void **)&handle, true); + return error; +} + +// local struct +typedef struct { + int64_t qId; + int64_t startExecTs; +} SLongQuery; + +// callbark for sort compare +static int compareLongQuery(const void* p1, const void* p2) { + // sort desc + SLongQuery* plq1 = *(SLongQuery**)p1; + SLongQuery* plq2 = *(SLongQuery**)p2; + if(plq1->startExecTs == plq2->startExecTs) { + return 0; + } else if(plq1->startExecTs > plq2->startExecTs) { + return 1; + } else { + return -1; + } +} + +// callback for taosCacheRefresh +static void cbFoundItem(void* handle, void* param1) { + SQInfo * qInfo = *(SQInfo**) handle; + if(qInfo == NULL) return ; + SArray* qids = (SArray*) param1; + if(qids == NULL) return ; + + bool usedMem = true; + bool usedIMem = true; + SMemTable* mem = qInfo->query.memRef.snapshot.omem; + SMemTable* imem = qInfo->query.memRef.snapshot.imem; + if(mem == NULL || T_REF_VAL_GET(mem) == 0) + usedMem = false; + if(imem == NULL || T_REF_VAL_GET(mem) == 0) + usedIMem = false ; + + if(!usedMem && !usedIMem) + return ; + + // push to qids + SLongQuery* plq = (SLongQuery*)malloc(sizeof(SLongQuery)); + plq->qId = qInfo->qId; + plq->startExecTs = qInfo->startExecTs; + taosArrayPush(qids, &plq); +} + +// longquery +void* qObtainLongQuery(void* param){ + SQueryMgmt* qMgmt = (SQueryMgmt*)param; + if(qMgmt == NULL || qMgmt->qinfoPool == NULL) + return NULL; + SArray* qids = taosArrayInit(4, sizeof(int64_t*)); + if(qids == NULL) return NULL; + // Get each item + taosCacheRefresh(qMgmt->qinfoPool, cbFoundItem, qids); + + size_t cnt = taosArrayGetSize(qids); + if(cnt == 0) { + taosArrayDestroy(qids); + return NULL; + } + if(cnt > 1) + taosArraySort(qids, compareLongQuery); + + return qids; +} + +//solve tsdb no block to commit +bool qFixedNoBlock(void* pRepo, void* pMgmt, int32_t longQueryMs) { + SQueryMgmt *pQueryMgmt = pMgmt; + bool fixed = false; + + // qid top list + SArray *qids = (SArray*)qObtainLongQuery(pQueryMgmt); + if(qids == NULL) return false; + + // kill Query + int64_t now = taosGetTimestampMs(); + size_t cnt = taosArrayGetSize(qids); + size_t i; + SLongQuery* plq; + for(i=0; i < cnt; i++) { + plq = (SLongQuery* )taosArrayGetP(qids, i); + if(plq->startExecTs > now) continue; + if(now - plq->startExecTs >= longQueryMs) { + qKillQueryByQId(pMgmt, plq->qId, 500, 10); // wait 50*100 ms + if(tsdbNoProblem(pRepo)) { + fixed = true; + qWarn("QId:0x%"PRIx64" fixed problem after kill this query.", plq->qId); + break; + } + } + } + + // free qids + for(i=0; i < cnt; i++) { + free(taosArrayGetP(qids, i)); + } + taosArrayDestroy(qids); + return fixed; +} + +//solve tsdb no block to commit +bool qSolveCommitNoBlock(void* pRepo, void* pMgmt) { + qWarn("pRepo=%p start solve problem.", pRepo); + if(qFixedNoBlock(pRepo, pMgmt, 10*60*1000)) { + return true; + } + if(qFixedNoBlock(pRepo, pMgmt, 2*60*1000)){ + return true; + } + if(qFixedNoBlock(pRepo, pMgmt, 30*1000)){ + return true; + } + qWarn("pRepo=%p solve problem failed.", pRepo); + return false; +} diff --git a/src/query/src/sql.c b/src/query/src/sql.c index 6e6a7d4f59131ba746772310b90f6fe3b044cfb9..0dd9165d5480a62cf27e76acfa1796d731235421 100644 --- a/src/query/src/sql.c +++ b/src/query/src/sql.c @@ -66,181 +66,182 @@ #define TK_IS 21 #define TK_LIKE 22 #define TK_MATCH 23 -#define TK_GLOB 24 -#define TK_BETWEEN 25 -#define TK_IN 26 -#define TK_GT 27 -#define TK_GE 28 -#define TK_LT 29 -#define TK_LE 30 -#define TK_BITAND 31 -#define TK_BITOR 32 -#define TK_LSHIFT 33 -#define TK_RSHIFT 34 -#define TK_PLUS 35 -#define TK_MINUS 36 -#define TK_DIVIDE 37 -#define TK_TIMES 38 -#define TK_STAR 39 -#define TK_SLASH 40 -#define TK_REM 41 -#define TK_CONCAT 42 -#define TK_UMINUS 43 -#define TK_UPLUS 44 -#define TK_BITNOT 45 -#define TK_QUESTION 46 -#define TK_ARROW 47 -#define TK_SHOW 48 -#define TK_DATABASES 49 -#define TK_TOPICS 50 -#define TK_FUNCTIONS 51 -#define TK_MNODES 52 -#define TK_DNODES 53 -#define TK_ACCOUNTS 54 -#define TK_USERS 55 -#define TK_MODULES 56 -#define TK_QUERIES 57 -#define TK_CONNECTIONS 58 -#define TK_STREAMS 59 -#define TK_VARIABLES 60 -#define TK_SCORES 61 -#define TK_GRANTS 62 -#define TK_VNODES 63 -#define TK_DOT 64 -#define TK_CREATE 65 -#define TK_TABLE 66 -#define TK_STABLE 67 -#define TK_DATABASE 68 -#define TK_TABLES 69 -#define TK_STABLES 70 -#define TK_VGROUPS 71 -#define TK_DROP 72 -#define TK_TOPIC 73 -#define TK_FUNCTION 74 -#define TK_DNODE 75 -#define TK_USER 76 -#define TK_ACCOUNT 77 -#define TK_USE 78 -#define TK_DESCRIBE 79 -#define TK_DESC 80 -#define TK_ALTER 81 -#define TK_PASS 82 -#define TK_PRIVILEGE 83 -#define TK_LOCAL 84 -#define TK_COMPACT 85 -#define TK_LP 86 -#define TK_RP 87 -#define TK_IF 88 -#define TK_EXISTS 89 -#define TK_AS 90 -#define TK_OUTPUTTYPE 91 -#define TK_AGGREGATE 92 -#define TK_BUFSIZE 93 -#define TK_PPS 94 -#define TK_TSERIES 95 -#define TK_DBS 96 -#define TK_STORAGE 97 -#define TK_QTIME 98 -#define TK_CONNS 99 -#define TK_STATE 100 -#define TK_COMMA 101 -#define TK_KEEP 102 -#define TK_CACHE 103 -#define TK_REPLICA 104 -#define TK_QUORUM 105 -#define TK_DAYS 106 -#define TK_MINROWS 107 -#define TK_MAXROWS 108 -#define TK_BLOCKS 109 -#define TK_CTIME 110 -#define TK_WAL 111 -#define TK_FSYNC 112 -#define TK_COMP 113 -#define TK_PRECISION 114 -#define TK_UPDATE 115 -#define TK_CACHELAST 116 -#define TK_PARTITIONS 117 -#define TK_UNSIGNED 118 -#define TK_TAGS 119 -#define TK_USING 120 -#define TK_NULL 121 -#define TK_NOW 122 -#define TK_SELECT 123 -#define TK_UNION 124 -#define TK_ALL 125 -#define TK_DISTINCT 126 -#define TK_FROM 127 -#define TK_VARIABLE 128 -#define TK_INTERVAL 129 -#define TK_EVERY 130 -#define TK_SESSION 131 -#define TK_STATE_WINDOW 132 -#define TK_FILL 133 -#define TK_SLIDING 134 -#define TK_ORDER 135 -#define TK_BY 136 -#define TK_ASC 137 -#define TK_GROUP 138 -#define TK_HAVING 139 -#define TK_LIMIT 140 -#define TK_OFFSET 141 -#define TK_SLIMIT 142 -#define TK_SOFFSET 143 -#define TK_WHERE 144 -#define TK_RESET 145 -#define TK_QUERY 146 -#define TK_SYNCDB 147 -#define TK_ADD 148 -#define TK_COLUMN 149 -#define TK_MODIFY 150 -#define TK_TAG 151 -#define TK_CHANGE 152 -#define TK_SET 153 -#define TK_KILL 154 -#define TK_CONNECTION 155 -#define TK_STREAM 156 -#define TK_COLON 157 -#define TK_ABORT 158 -#define TK_AFTER 159 -#define TK_ATTACH 160 -#define TK_BEFORE 161 -#define TK_BEGIN 162 -#define TK_CASCADE 163 -#define TK_CLUSTER 164 -#define TK_CONFLICT 165 -#define TK_COPY 166 -#define TK_DEFERRED 167 -#define TK_DELIMITERS 168 -#define TK_DETACH 169 -#define TK_EACH 170 -#define TK_END 171 -#define TK_EXPLAIN 172 -#define TK_FAIL 173 -#define TK_FOR 174 -#define TK_IGNORE 175 -#define TK_IMMEDIATE 176 -#define TK_INITIALLY 177 -#define TK_INSTEAD 178 -#define TK_KEY 179 -#define TK_OF 180 -#define TK_RAISE 181 -#define TK_REPLACE 182 -#define TK_RESTRICT 183 -#define TK_ROW 184 -#define TK_STATEMENT 185 -#define TK_TRIGGER 186 -#define TK_VIEW 187 -#define TK_IPTOKEN 188 -#define TK_SEMI 189 -#define TK_NONE 190 -#define TK_PREV 191 -#define TK_LINEAR 192 -#define TK_IMPORT 193 -#define TK_TBNAME 194 -#define TK_JOIN 195 -#define TK_INSERT 196 -#define TK_INTO 197 -#define TK_VALUES 198 +#define TK_NMATCH 24 +#define TK_GLOB 25 +#define TK_BETWEEN 26 +#define TK_IN 27 +#define TK_GT 28 +#define TK_GE 29 +#define TK_LT 30 +#define TK_LE 31 +#define TK_BITAND 32 +#define TK_BITOR 33 +#define TK_LSHIFT 34 +#define TK_RSHIFT 35 +#define TK_PLUS 36 +#define TK_MINUS 37 +#define TK_DIVIDE 38 +#define TK_TIMES 39 +#define TK_STAR 40 +#define TK_SLASH 41 +#define TK_REM 42 +#define TK_CONCAT 43 +#define TK_UMINUS 44 +#define TK_UPLUS 45 +#define TK_BITNOT 46 +#define TK_QUESTION 47 +#define TK_ARROW 48 +#define TK_SHOW 49 +#define TK_DATABASES 50 +#define TK_TOPICS 51 +#define TK_FUNCTIONS 52 +#define TK_MNODES 53 +#define TK_DNODES 54 +#define TK_ACCOUNTS 55 +#define TK_USERS 56 +#define TK_MODULES 57 +#define TK_QUERIES 58 +#define TK_CONNECTIONS 59 +#define TK_STREAMS 60 +#define TK_VARIABLES 61 +#define TK_SCORES 62 +#define TK_GRANTS 63 +#define TK_VNODES 64 +#define TK_DOT 65 +#define TK_CREATE 66 +#define TK_TABLE 67 +#define TK_STABLE 68 +#define TK_DATABASE 69 +#define TK_TABLES 70 +#define TK_STABLES 71 +#define TK_VGROUPS 72 +#define TK_DROP 73 +#define TK_TOPIC 74 +#define TK_FUNCTION 75 +#define TK_DNODE 76 +#define TK_USER 77 +#define TK_ACCOUNT 78 +#define TK_USE 79 +#define TK_DESCRIBE 80 +#define TK_DESC 81 +#define TK_ALTER 82 +#define TK_PASS 83 +#define TK_PRIVILEGE 84 +#define TK_LOCAL 85 +#define TK_COMPACT 86 +#define TK_LP 87 +#define TK_RP 88 +#define TK_IF 89 +#define TK_EXISTS 90 +#define TK_AS 91 +#define TK_OUTPUTTYPE 92 +#define TK_AGGREGATE 93 +#define TK_BUFSIZE 94 +#define TK_PPS 95 +#define TK_TSERIES 96 +#define TK_DBS 97 +#define TK_STORAGE 98 +#define TK_QTIME 99 +#define TK_CONNS 100 +#define TK_STATE 101 +#define TK_COMMA 102 +#define TK_KEEP 103 +#define TK_CACHE 104 +#define TK_REPLICA 105 +#define TK_QUORUM 106 +#define TK_DAYS 107 +#define TK_MINROWS 108 +#define TK_MAXROWS 109 +#define TK_BLOCKS 110 +#define TK_CTIME 111 +#define TK_WAL 112 +#define TK_FSYNC 113 +#define TK_COMP 114 +#define TK_PRECISION 115 +#define TK_UPDATE 116 +#define TK_CACHELAST 117 +#define TK_PARTITIONS 118 +#define TK_UNSIGNED 119 +#define TK_TAGS 120 +#define TK_USING 121 +#define TK_NULL 122 +#define TK_NOW 123 +#define TK_SELECT 124 +#define TK_UNION 125 +#define TK_ALL 126 +#define TK_DISTINCT 127 +#define TK_FROM 128 +#define TK_VARIABLE 129 +#define TK_INTERVAL 130 +#define TK_EVERY 131 +#define TK_SESSION 132 +#define TK_STATE_WINDOW 133 +#define TK_FILL 134 +#define TK_SLIDING 135 +#define TK_ORDER 136 +#define TK_BY 137 +#define TK_ASC 138 +#define TK_GROUP 139 +#define TK_HAVING 140 +#define TK_LIMIT 141 +#define TK_OFFSET 142 +#define TK_SLIMIT 143 +#define TK_SOFFSET 144 +#define TK_WHERE 145 +#define TK_RESET 146 +#define TK_QUERY 147 +#define TK_SYNCDB 148 +#define TK_ADD 149 +#define TK_COLUMN 150 +#define TK_MODIFY 151 +#define TK_TAG 152 +#define TK_CHANGE 153 +#define TK_SET 154 +#define TK_KILL 155 +#define TK_CONNECTION 156 +#define TK_STREAM 157 +#define TK_COLON 158 +#define TK_ABORT 159 +#define TK_AFTER 160 +#define TK_ATTACH 161 +#define TK_BEFORE 162 +#define TK_BEGIN 163 +#define TK_CASCADE 164 +#define TK_CLUSTER 165 +#define TK_CONFLICT 166 +#define TK_COPY 167 +#define TK_DEFERRED 168 +#define TK_DELIMITERS 169 +#define TK_DETACH 170 +#define TK_EACH 171 +#define TK_END 172 +#define TK_EXPLAIN 173 +#define TK_FAIL 174 +#define TK_FOR 175 +#define TK_IGNORE 176 +#define TK_IMMEDIATE 177 +#define TK_INITIALLY 178 +#define TK_INSTEAD 179 +#define TK_KEY 180 +#define TK_OF 181 +#define TK_RAISE 182 +#define TK_REPLACE 183 +#define TK_RESTRICT 184 +#define TK_ROW 185 +#define TK_STATEMENT 186 +#define TK_TRIGGER 187 +#define TK_VIEW 188 +#define TK_IPTOKEN 189 +#define TK_SEMI 190 +#define TK_NONE 191 +#define TK_PREV 192 +#define TK_LINEAR 193 +#define TK_IMPORT 194 +#define TK_TBNAME 195 +#define TK_JOIN 196 +#define TK_INSERT 197 +#define TK_INTO 198 +#define TK_VALUES 199 #endif /**************** End token definitions ***************************************/ @@ -300,29 +301,29 @@ #endif /************* Begin control #defines *****************************************/ #define YYCODETYPE unsigned short int -#define YYNOCODE 279 +#define YYNOCODE 281 #define YYACTIONTYPE unsigned short int #define ParseTOKENTYPE SStrToken typedef union { int yyinit; ParseTOKENTYPE yy0; - SCreatedTableInfo yy78; - SCreateTableSql* yy110; - SLimitVal yy126; - int yy130; - SArray* yy135; - SIntervalVal yy160; - TAOS_FIELD yy181; - SCreateDbInfo yy256; - SWindowStateVal yy258; - int32_t yy262; - SCreateAcctInfo yy277; - tVariant yy308; - SRelationInfo* yy460; - SSqlNode* yy488; - SSessionWindowVal yy511; - tSqlExpr* yy526; - int64_t yy531; + int32_t yy2; + SCreatedTableInfo yy42; + tSqlExpr* yy44; + SRelationInfo* yy46; + SCreateAcctInfo yy47; + TAOS_FIELD yy179; + SLimitVal yy204; + int yy222; + SSqlNode* yy246; + SArray* yy247; + SCreateDbInfo yy262; + SCreateTableSql* yy336; + tVariant yy378; + int64_t yy403; + SIntervalVal yy430; + SWindowStateVal yy492; + SSessionWindowVal yy507; } YYMINORTYPE; #ifndef YYSTACKDEPTH #define YYSTACKDEPTH 100 @@ -338,18 +339,18 @@ typedef union { #define ParseCTX_FETCH #define ParseCTX_STORE #define YYFALLBACK 1 -#define YYNSTATE 373 -#define YYNRULE 295 -#define YYNRULE_WITH_ACTION 295 -#define YYNTOKEN 198 -#define YY_MAX_SHIFT 372 -#define YY_MIN_SHIFTREDUCE 578 -#define YY_MAX_SHIFTREDUCE 872 -#define YY_ERROR_ACTION 873 -#define YY_ACCEPT_ACTION 874 -#define YY_NO_ACTION 875 -#define YY_MIN_REDUCE 876 -#define YY_MAX_REDUCE 1170 +#define YYNSTATE 375 +#define YYNRULE 296 +#define YYNRULE_WITH_ACTION 296 +#define YYNTOKEN 200 +#define YY_MAX_SHIFT 374 +#define YY_MIN_SHIFTREDUCE 580 +#define YY_MAX_SHIFTREDUCE 875 +#define YY_ERROR_ACTION 876 +#define YY_ACCEPT_ACTION 877 +#define YY_NO_ACTION 878 +#define YY_MIN_REDUCE 879 +#define YY_MAX_REDUCE 1174 /************* End control #defines *******************************************/ #define YY_NLOOKAHEAD ((int)(sizeof(yy_lookahead)/sizeof(yy_lookahead[0]))) @@ -416,297 +417,299 @@ typedef union { ** yy_default[] Default action for each state. ** *********** Begin parsing tables **********************************************/ -#define YY_ACTTAB_COUNT (788) +#define YY_ACTTAB_COUNT (796) static const YYACTIONTYPE yy_action[] = { - /* 0 */ 1026, 630, 158, 630, 212, 257, 251, 165, 261, 631, - /* 10 */ 1054, 631, 666, 60, 61, 1146, 64, 65, 371, 235, - /* 20 */ 260, 54, 53, 630, 63, 329, 68, 66, 69, 67, - /* 30 */ 1002, 631, 1000, 1001, 59, 58, 165, 1003, 57, 56, - /* 40 */ 55, 1004, 1045, 1005, 1006, 52, 51, 248, 60, 61, - /* 50 */ 241, 64, 65, 23, 1032, 260, 54, 53, 238, 63, - /* 60 */ 329, 68, 66, 69, 67, 212, 212, 98, 212, 59, - /* 70 */ 58, 289, 288, 57, 56, 55, 1147, 1147, 1051, 1147, - /* 80 */ 52, 51, 1092, 60, 61, 1031, 64, 65, 1018, 83, - /* 90 */ 260, 54, 53, 38, 63, 329, 68, 66, 69, 67, - /* 100 */ 1015, 1016, 35, 1019, 59, 58, 89, 29, 57, 56, - /* 110 */ 55, 1093, 126, 301, 247, 52, 51, 327, 1032, 60, - /* 120 */ 62, 808, 64, 65, 359, 249, 260, 54, 53, 1032, - /* 130 */ 63, 329, 68, 66, 69, 67, 773, 774, 237, 165, - /* 140 */ 59, 58, 1029, 45, 57, 56, 55, 61, 630, 64, - /* 150 */ 65, 52, 51, 260, 54, 53, 631, 63, 329, 68, - /* 160 */ 66, 69, 67, 44, 165, 366, 365, 59, 58, 359, - /* 170 */ 364, 57, 56, 55, 363, 327, 362, 361, 52, 51, - /* 180 */ 994, 982, 983, 984, 985, 986, 987, 988, 989, 990, - /* 190 */ 991, 992, 993, 995, 996, 579, 580, 581, 582, 583, - /* 200 */ 584, 585, 586, 587, 588, 589, 590, 591, 592, 156, - /* 210 */ 209, 236, 64, 65, 95, 38, 260, 54, 53, 96, - /* 220 */ 63, 329, 68, 66, 69, 67, 370, 369, 149, 38, - /* 230 */ 59, 58, 101, 84, 57, 56, 55, 303, 210, 94, - /* 240 */ 174, 52, 51, 44, 325, 366, 365, 324, 323, 322, - /* 250 */ 364, 321, 320, 319, 363, 318, 362, 361, 291, 24, - /* 260 */ 245, 259, 823, 925, 1029, 812, 935, 815, 1020, 818, - /* 270 */ 193, 259, 823, 193, 246, 812, 220, 815, 1029, 818, - /* 280 */ 57, 56, 55, 222, 155, 153, 152, 52, 51, 140, - /* 290 */ 139, 138, 221, 1045, 233, 234, 334, 89, 330, 68, - /* 300 */ 66, 69, 67, 926, 233, 234, 283, 59, 58, 239, - /* 310 */ 193, 57, 56, 55, 5, 41, 183, 714, 52, 51, - /* 320 */ 39, 182, 107, 112, 103, 111, 874, 372, 738, 38, - /* 330 */ 38, 735, 14, 736, 45, 737, 97, 263, 252, 38, - /* 340 */ 314, 124, 118, 129, 274, 70, 349, 348, 128, 268, - /* 350 */ 134, 137, 127, 278, 277, 70, 281, 269, 81, 131, - /* 360 */ 265, 266, 203, 201, 199, 231, 100, 38, 179, 198, - /* 370 */ 144, 143, 142, 141, 338, 38, 1045, 38, 1029, 1028, - /* 380 */ 824, 819, 269, 38, 339, 269, 38, 820, 1029, 38, - /* 390 */ 824, 819, 282, 180, 59, 58, 1030, 820, 57, 56, - /* 400 */ 55, 814, 82, 817, 754, 52, 51, 813, 790, 816, - /* 410 */ 367, 963, 340, 264, 86, 262, 1029, 337, 336, 751, - /* 420 */ 341, 87, 345, 770, 1029, 270, 1029, 267, 346, 344, - /* 430 */ 343, 347, 1029, 74, 351, 1029, 1017, 160, 1029, 1, - /* 440 */ 181, 3, 194, 780, 77, 9, 739, 740, 781, 724, - /* 450 */ 306, 285, 258, 726, 308, 725, 34, 71, 285, 847, - /* 460 */ 52, 51, 26, 39, 39, 789, 825, 71, 99, 71, - /* 470 */ 810, 331, 629, 25, 6, 75, 743, 80, 744, 16, - /* 480 */ 25, 15, 136, 135, 78, 741, 25, 742, 117, 309, - /* 490 */ 116, 758, 253, 18, 20, 17, 19, 123, 22, 122, - /* 500 */ 21, 215, 216, 254, 279, 255, 256, 811, 227, 713, - /* 510 */ 228, 213, 214, 217, 1166, 211, 157, 218, 219, 224, - /* 520 */ 225, 226, 223, 821, 208, 1158, 1103, 1102, 243, 822, - /* 530 */ 1099, 1098, 244, 350, 1053, 48, 1064, 1046, 1061, 1062, - /* 540 */ 154, 1066, 286, 159, 1085, 1043, 1027, 164, 297, 315, - /* 550 */ 1084, 175, 176, 1025, 177, 178, 940, 998, 311, 312, - /* 560 */ 173, 313, 769, 166, 167, 316, 290, 240, 168, 169, - /* 570 */ 317, 292, 46, 294, 206, 42, 827, 328, 934, 335, - /* 580 */ 79, 1165, 114, 1164, 1161, 184, 76, 342, 1157, 304, - /* 590 */ 120, 1156, 50, 1153, 185, 960, 302, 43, 40, 47, - /* 600 */ 207, 293, 300, 922, 130, 920, 132, 133, 918, 917, - /* 610 */ 271, 196, 197, 914, 913, 298, 912, 911, 910, 909, - /* 620 */ 908, 200, 202, 905, 903, 901, 899, 204, 896, 205, - /* 630 */ 892, 296, 172, 284, 85, 90, 295, 49, 1086, 360, - /* 640 */ 125, 352, 232, 353, 250, 310, 354, 355, 356, 357, - /* 650 */ 229, 358, 230, 368, 872, 108, 939, 109, 938, 272, - /* 660 */ 273, 871, 275, 276, 870, 853, 852, 280, 916, 915, - /* 670 */ 285, 187, 961, 186, 907, 188, 189, 190, 192, 191, - /* 680 */ 4, 145, 146, 2, 147, 906, 305, 898, 962, 148, - /* 690 */ 897, 33, 10, 88, 746, 170, 171, 30, 287, 91, - /* 700 */ 771, 161, 1008, 782, 162, 163, 776, 92, 242, 778, - /* 710 */ 93, 299, 11, 31, 12, 32, 13, 27, 307, 28, - /* 720 */ 100, 102, 644, 36, 104, 105, 37, 106, 679, 677, - /* 730 */ 676, 675, 673, 672, 671, 668, 634, 326, 110, 7, - /* 740 */ 332, 828, 333, 113, 115, 39, 826, 8, 72, 73, - /* 750 */ 716, 715, 119, 121, 712, 660, 658, 650, 656, 652, - /* 760 */ 654, 648, 646, 682, 681, 680, 678, 674, 670, 669, - /* 770 */ 195, 632, 596, 876, 875, 875, 875, 875, 875, 875, - /* 780 */ 875, 875, 875, 875, 875, 875, 150, 151, + /* 0 */ 23, 632, 159, 877, 374, 632, 210, 1057, 213, 633, + /* 10 */ 373, 237, 243, 633, 61, 62, 1035, 65, 66, 1150, + /* 20 */ 166, 262, 55, 54, 53, 668, 64, 331, 69, 67, + /* 30 */ 70, 68, 1005, 1034, 1003, 1004, 60, 59, 175, 1006, + /* 40 */ 58, 57, 56, 1007, 166, 1008, 1009, 52, 51, 259, + /* 50 */ 253, 61, 62, 1048, 65, 66, 293, 211, 262, 55, + /* 60 */ 54, 53, 1021, 64, 331, 69, 67, 70, 68, 284, + /* 70 */ 166, 291, 290, 60, 59, 1054, 716, 58, 57, 56, + /* 80 */ 58, 57, 56, 38, 52, 51, 632, 52, 51, 61, + /* 90 */ 62, 84, 65, 66, 633, 1095, 262, 55, 54, 53, + /* 100 */ 90, 64, 331, 69, 67, 70, 68, 351, 350, 213, + /* 110 */ 213, 60, 59, 263, 249, 58, 57, 56, 1035, 96, + /* 120 */ 1151, 1151, 52, 51, 361, 810, 61, 63, 239, 65, + /* 130 */ 66, 1029, 1032, 262, 55, 54, 53, 45, 64, 331, + /* 140 */ 69, 67, 70, 68, 127, 1096, 251, 303, 60, 59, + /* 150 */ 1035, 329, 58, 57, 56, 62, 361, 65, 66, 52, + /* 160 */ 51, 262, 55, 54, 53, 166, 64, 331, 69, 67, + /* 170 */ 70, 68, 29, 213, 775, 776, 60, 59, 250, 812, + /* 180 */ 58, 57, 56, 83, 1151, 65, 66, 52, 51, 262, + /* 190 */ 55, 54, 53, 632, 64, 331, 69, 67, 70, 68, + /* 200 */ 44, 633, 368, 367, 60, 59, 34, 366, 58, 57, + /* 210 */ 56, 365, 38, 364, 363, 52, 51, 1020, 813, 44, + /* 220 */ 327, 368, 367, 326, 325, 324, 366, 323, 322, 321, + /* 230 */ 365, 320, 364, 363, 372, 371, 150, 329, 305, 311, + /* 240 */ 95, 254, 581, 582, 583, 584, 585, 586, 587, 588, + /* 250 */ 589, 590, 591, 592, 593, 594, 157, 247, 238, 369, + /* 260 */ 966, 1032, 1, 182, 997, 985, 986, 987, 988, 989, + /* 270 */ 990, 991, 992, 993, 994, 995, 996, 998, 999, 24, + /* 280 */ 38, 69, 67, 70, 68, 255, 38, 265, 216, 60, + /* 290 */ 59, 792, 1048, 58, 57, 56, 222, 38, 261, 825, + /* 300 */ 52, 51, 814, 224, 817, 816, 820, 819, 240, 141, + /* 310 */ 140, 139, 223, 281, 261, 825, 336, 90, 814, 102, + /* 320 */ 817, 815, 820, 818, 1048, 248, 156, 154, 153, 1032, + /* 330 */ 97, 340, 217, 235, 236, 1032, 740, 332, 928, 737, + /* 340 */ 241, 738, 341, 739, 85, 194, 1032, 270, 791, 235, + /* 350 */ 236, 5, 41, 184, 45, 1023, 38, 218, 183, 108, + /* 360 */ 113, 104, 112, 266, 271, 264, 756, 339, 338, 256, + /* 370 */ 267, 268, 125, 119, 130, 180, 283, 316, 82, 129, + /* 380 */ 99, 135, 138, 128, 71, 233, 204, 202, 200, 38, + /* 390 */ 132, 38, 38, 199, 145, 144, 143, 142, 60, 59, + /* 400 */ 71, 342, 58, 57, 56, 1032, 38, 3, 195, 52, + /* 410 */ 51, 87, 276, 1018, 1019, 35, 1022, 38, 38, 826, + /* 420 */ 821, 280, 279, 272, 753, 269, 822, 346, 345, 823, + /* 430 */ 75, 88, 285, 271, 343, 826, 821, 347, 1032, 14, + /* 440 */ 1031, 1032, 822, 98, 181, 824, 39, 271, 287, 78, + /* 450 */ 9, 348, 938, 760, 929, 1032, 741, 742, 1033, 194, + /* 460 */ 772, 194, 349, 353, 782, 783, 1032, 1032, 287, 726, + /* 470 */ 308, 728, 76, 101, 161, 260, 310, 727, 72, 26, + /* 480 */ 850, 52, 51, 39, 39, 72, 827, 333, 631, 79, + /* 490 */ 100, 72, 81, 745, 25, 746, 16, 6, 15, 118, + /* 500 */ 25, 117, 25, 18, 743, 17, 744, 20, 257, 19, + /* 510 */ 124, 22, 123, 21, 137, 136, 258, 229, 230, 214, + /* 520 */ 215, 219, 1170, 212, 715, 220, 221, 226, 1162, 227, + /* 530 */ 228, 225, 209, 1106, 1105, 245, 1102, 1101, 246, 158, + /* 540 */ 352, 1056, 1067, 48, 1064, 1088, 1087, 1065, 155, 1049, + /* 550 */ 1069, 288, 160, 165, 1030, 292, 299, 176, 177, 286, + /* 560 */ 1028, 178, 242, 294, 169, 179, 943, 313, 314, 315, + /* 570 */ 318, 319, 46, 207, 771, 42, 330, 937, 167, 296, + /* 580 */ 337, 1169, 115, 80, 77, 1046, 1168, 1165, 185, 344, + /* 590 */ 306, 50, 1161, 829, 168, 121, 1160, 1157, 186, 963, + /* 600 */ 304, 43, 170, 40, 47, 208, 925, 302, 131, 923, + /* 610 */ 133, 134, 300, 921, 920, 273, 197, 198, 917, 916, + /* 620 */ 915, 914, 913, 912, 911, 201, 203, 908, 906, 904, + /* 630 */ 902, 205, 899, 298, 206, 895, 49, 295, 317, 86, + /* 640 */ 91, 297, 1089, 362, 126, 354, 355, 356, 357, 358, + /* 650 */ 234, 171, 252, 312, 359, 360, 370, 875, 274, 275, + /* 660 */ 231, 942, 232, 109, 941, 110, 874, 277, 278, 873, + /* 670 */ 856, 282, 919, 918, 855, 307, 910, 189, 287, 146, + /* 680 */ 964, 187, 192, 188, 190, 191, 193, 147, 2, 148, + /* 690 */ 909, 965, 149, 1001, 901, 4, 900, 174, 172, 33, + /* 700 */ 10, 173, 89, 748, 30, 289, 1011, 92, 773, 162, + /* 710 */ 164, 784, 163, 244, 778, 93, 31, 780, 94, 301, + /* 720 */ 11, 32, 12, 13, 27, 28, 309, 103, 101, 646, + /* 730 */ 106, 36, 105, 679, 37, 681, 107, 678, 677, 675, + /* 740 */ 674, 673, 670, 636, 7, 328, 828, 8, 111, 830, + /* 750 */ 334, 335, 114, 116, 120, 73, 74, 718, 39, 717, + /* 760 */ 714, 122, 662, 660, 652, 658, 654, 656, 650, 648, + /* 770 */ 684, 683, 682, 680, 676, 672, 671, 196, 634, 598, + /* 780 */ 879, 878, 878, 878, 878, 878, 878, 878, 878, 878, + /* 790 */ 878, 878, 878, 878, 151, 152, }; static const YYCODETYPE yy_lookahead[] = { - /* 0 */ 200, 1, 200, 1, 267, 207, 207, 200, 207, 9, - /* 10 */ 200, 9, 5, 13, 14, 278, 16, 17, 200, 201, - /* 20 */ 20, 21, 22, 1, 24, 25, 26, 27, 28, 29, - /* 30 */ 224, 9, 226, 227, 34, 35, 200, 231, 38, 39, - /* 40 */ 40, 235, 248, 237, 238, 45, 46, 247, 13, 14, - /* 50 */ 246, 16, 17, 267, 250, 20, 21, 22, 264, 24, - /* 60 */ 25, 26, 27, 28, 29, 267, 267, 208, 267, 34, - /* 70 */ 35, 269, 270, 38, 39, 40, 278, 278, 268, 278, - /* 80 */ 45, 46, 275, 13, 14, 250, 16, 17, 0, 89, - /* 90 */ 20, 21, 22, 200, 24, 25, 26, 27, 28, 29, - /* 100 */ 241, 242, 243, 244, 34, 35, 85, 85, 38, 39, - /* 110 */ 40, 275, 81, 277, 246, 45, 46, 87, 250, 13, - /* 120 */ 14, 86, 16, 17, 93, 246, 20, 21, 22, 250, - /* 130 */ 24, 25, 26, 27, 28, 29, 128, 129, 245, 200, - /* 140 */ 34, 35, 249, 122, 38, 39, 40, 14, 1, 16, - /* 150 */ 17, 45, 46, 20, 21, 22, 9, 24, 25, 26, - /* 160 */ 27, 28, 29, 101, 200, 103, 104, 34, 35, 93, - /* 170 */ 108, 38, 39, 40, 112, 87, 114, 115, 45, 46, - /* 180 */ 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, - /* 190 */ 234, 235, 236, 237, 238, 48, 49, 50, 51, 52, - /* 200 */ 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, - /* 210 */ 267, 64, 16, 17, 275, 200, 20, 21, 22, 251, - /* 220 */ 24, 25, 26, 27, 28, 29, 68, 69, 70, 200, - /* 230 */ 34, 35, 208, 265, 38, 39, 40, 273, 267, 275, - /* 240 */ 254, 45, 46, 101, 102, 103, 104, 105, 106, 107, - /* 250 */ 108, 109, 110, 111, 112, 113, 114, 115, 272, 47, - /* 260 */ 245, 1, 2, 206, 249, 5, 206, 7, 244, 9, - /* 270 */ 213, 1, 2, 213, 245, 5, 64, 7, 249, 9, - /* 280 */ 38, 39, 40, 71, 65, 66, 67, 45, 46, 77, - /* 290 */ 78, 79, 80, 248, 34, 35, 84, 85, 38, 26, - /* 300 */ 27, 28, 29, 206, 34, 35, 86, 34, 35, 264, - /* 310 */ 213, 38, 39, 40, 65, 66, 67, 5, 45, 46, - /* 320 */ 100, 72, 73, 74, 75, 76, 198, 199, 2, 200, - /* 330 */ 200, 5, 85, 7, 122, 9, 89, 71, 267, 200, - /* 340 */ 91, 65, 66, 67, 145, 85, 34, 35, 72, 71, - /* 350 */ 74, 75, 76, 154, 155, 85, 144, 200, 146, 83, - /* 360 */ 34, 35, 65, 66, 67, 153, 119, 200, 211, 72, - /* 370 */ 73, 74, 75, 76, 245, 200, 248, 200, 249, 249, - /* 380 */ 120, 121, 200, 200, 245, 200, 200, 127, 249, 200, - /* 390 */ 120, 121, 264, 211, 34, 35, 211, 127, 38, 39, - /* 400 */ 40, 5, 208, 7, 38, 45, 46, 5, 79, 7, - /* 410 */ 222, 223, 245, 147, 86, 149, 249, 151, 152, 100, - /* 420 */ 245, 86, 245, 86, 249, 147, 249, 149, 245, 151, - /* 430 */ 152, 245, 249, 100, 245, 249, 242, 100, 249, 209, - /* 440 */ 210, 204, 205, 86, 100, 126, 120, 121, 86, 86, - /* 450 */ 86, 123, 63, 86, 86, 86, 85, 100, 123, 86, - /* 460 */ 45, 46, 100, 100, 100, 136, 86, 100, 100, 100, - /* 470 */ 1, 15, 86, 100, 85, 142, 5, 85, 7, 148, - /* 480 */ 100, 150, 81, 82, 140, 5, 100, 7, 148, 118, - /* 490 */ 150, 125, 267, 148, 148, 150, 150, 148, 148, 150, - /* 500 */ 150, 267, 267, 267, 200, 267, 267, 38, 267, 117, - /* 510 */ 267, 267, 267, 267, 250, 267, 200, 267, 267, 267, - /* 520 */ 267, 267, 267, 127, 267, 250, 240, 240, 240, 127, - /* 530 */ 240, 240, 240, 240, 200, 266, 200, 248, 200, 200, - /* 540 */ 63, 200, 248, 200, 276, 263, 248, 200, 200, 92, - /* 550 */ 276, 252, 200, 200, 200, 200, 200, 239, 200, 200, - /* 560 */ 255, 200, 127, 262, 261, 200, 271, 271, 260, 259, - /* 570 */ 200, 271, 200, 271, 200, 200, 120, 200, 200, 200, - /* 580 */ 139, 200, 200, 200, 200, 200, 141, 200, 200, 134, - /* 590 */ 200, 200, 138, 200, 200, 200, 137, 200, 200, 200, - /* 600 */ 200, 133, 132, 200, 200, 200, 200, 200, 200, 200, - /* 610 */ 200, 200, 200, 200, 200, 131, 200, 200, 200, 200, - /* 620 */ 200, 200, 200, 200, 200, 200, 200, 200, 200, 200, - /* 630 */ 200, 130, 256, 202, 202, 202, 202, 143, 202, 116, - /* 640 */ 99, 98, 202, 54, 202, 202, 95, 97, 58, 96, - /* 650 */ 202, 94, 202, 87, 5, 208, 212, 208, 212, 156, - /* 660 */ 5, 5, 156, 5, 5, 103, 102, 145, 202, 202, - /* 670 */ 123, 219, 221, 220, 202, 215, 218, 216, 214, 217, - /* 680 */ 204, 203, 203, 209, 203, 202, 118, 202, 223, 203, - /* 690 */ 202, 253, 85, 124, 86, 258, 257, 85, 100, 100, - /* 700 */ 86, 85, 239, 86, 85, 100, 86, 85, 1, 86, - /* 710 */ 85, 85, 135, 100, 135, 100, 85, 85, 118, 85, - /* 720 */ 119, 81, 5, 90, 89, 73, 90, 89, 9, 5, - /* 730 */ 5, 5, 5, 5, 5, 5, 88, 15, 81, 85, - /* 740 */ 25, 120, 62, 150, 150, 100, 86, 85, 16, 16, - /* 750 */ 5, 5, 150, 150, 86, 5, 5, 5, 5, 5, - /* 760 */ 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, - /* 770 */ 100, 88, 63, 0, 279, 279, 279, 279, 279, 279, - /* 780 */ 279, 279, 279, 279, 279, 279, 21, 21, 279, 279, - /* 790 */ 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, - /* 800 */ 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, - /* 810 */ 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, - /* 820 */ 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, - /* 830 */ 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, - /* 840 */ 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, - /* 850 */ 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, - /* 860 */ 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, - /* 870 */ 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, - /* 880 */ 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, - /* 890 */ 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, - /* 900 */ 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, - /* 910 */ 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, - /* 920 */ 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, - /* 930 */ 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, - /* 940 */ 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, - /* 950 */ 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, - /* 960 */ 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, - /* 970 */ 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, - /* 980 */ 279, 279, 279, 279, 279, 279, + /* 0 */ 269, 1, 202, 200, 201, 1, 269, 202, 269, 9, + /* 10 */ 202, 203, 248, 9, 14, 15, 252, 17, 18, 280, + /* 20 */ 202, 21, 22, 23, 24, 5, 26, 27, 28, 29, + /* 30 */ 30, 31, 226, 252, 228, 229, 36, 37, 256, 233, + /* 40 */ 40, 41, 42, 237, 202, 239, 240, 47, 48, 209, + /* 50 */ 209, 14, 15, 250, 17, 18, 274, 269, 21, 22, + /* 60 */ 23, 24, 0, 26, 27, 28, 29, 30, 31, 266, + /* 70 */ 202, 271, 272, 36, 37, 270, 5, 40, 41, 42, + /* 80 */ 40, 41, 42, 202, 47, 48, 1, 47, 48, 14, + /* 90 */ 15, 91, 17, 18, 9, 277, 21, 22, 23, 24, + /* 100 */ 87, 26, 27, 28, 29, 30, 31, 36, 37, 269, + /* 110 */ 269, 36, 37, 209, 248, 40, 41, 42, 252, 277, + /* 120 */ 280, 280, 47, 48, 95, 88, 14, 15, 247, 17, + /* 130 */ 18, 202, 251, 21, 22, 23, 24, 124, 26, 27, + /* 140 */ 28, 29, 30, 31, 83, 277, 248, 279, 36, 37, + /* 150 */ 252, 89, 40, 41, 42, 15, 95, 17, 18, 47, + /* 160 */ 48, 21, 22, 23, 24, 202, 26, 27, 28, 29, + /* 170 */ 30, 31, 87, 269, 130, 131, 36, 37, 249, 1, + /* 180 */ 40, 41, 42, 210, 280, 17, 18, 47, 48, 21, + /* 190 */ 22, 23, 24, 1, 26, 27, 28, 29, 30, 31, + /* 200 */ 103, 9, 105, 106, 36, 37, 87, 110, 40, 41, + /* 210 */ 42, 114, 202, 116, 117, 47, 48, 244, 40, 103, + /* 220 */ 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, + /* 230 */ 114, 115, 116, 117, 70, 71, 72, 89, 275, 120, + /* 240 */ 277, 269, 50, 51, 52, 53, 54, 55, 56, 57, + /* 250 */ 58, 59, 60, 61, 62, 63, 64, 247, 66, 224, + /* 260 */ 225, 251, 211, 212, 226, 227, 228, 229, 230, 231, + /* 270 */ 232, 233, 234, 235, 236, 237, 238, 239, 240, 49, + /* 280 */ 202, 28, 29, 30, 31, 269, 202, 73, 269, 36, + /* 290 */ 37, 81, 250, 40, 41, 42, 66, 202, 1, 2, + /* 300 */ 47, 48, 5, 73, 7, 5, 9, 7, 266, 79, + /* 310 */ 80, 81, 82, 202, 1, 2, 86, 87, 5, 210, + /* 320 */ 7, 5, 9, 7, 250, 247, 67, 68, 69, 251, + /* 330 */ 253, 247, 269, 36, 37, 251, 2, 40, 208, 5, + /* 340 */ 266, 7, 247, 9, 267, 215, 251, 73, 138, 36, + /* 350 */ 37, 67, 68, 69, 124, 246, 202, 269, 74, 75, + /* 360 */ 76, 77, 78, 149, 202, 151, 40, 153, 154, 269, + /* 370 */ 36, 37, 67, 68, 69, 213, 146, 93, 148, 74, + /* 380 */ 210, 76, 77, 78, 87, 155, 67, 68, 69, 202, + /* 390 */ 85, 202, 202, 74, 75, 76, 77, 78, 36, 37, + /* 400 */ 87, 247, 40, 41, 42, 251, 202, 206, 207, 47, + /* 410 */ 48, 88, 147, 243, 244, 245, 246, 202, 202, 122, + /* 420 */ 123, 156, 157, 149, 102, 151, 129, 153, 154, 129, + /* 430 */ 102, 88, 88, 202, 247, 122, 123, 247, 251, 87, + /* 440 */ 251, 251, 129, 91, 213, 129, 102, 202, 125, 102, + /* 450 */ 128, 247, 208, 127, 208, 251, 122, 123, 213, 215, + /* 460 */ 88, 215, 247, 247, 88, 88, 251, 251, 125, 88, + /* 470 */ 88, 88, 144, 121, 102, 65, 88, 88, 102, 102, + /* 480 */ 88, 47, 48, 102, 102, 102, 88, 16, 88, 142, + /* 490 */ 102, 102, 87, 5, 102, 7, 150, 87, 152, 150, + /* 500 */ 102, 152, 102, 150, 5, 152, 7, 150, 269, 152, + /* 510 */ 150, 150, 152, 152, 83, 84, 269, 269, 269, 269, + /* 520 */ 269, 269, 252, 269, 119, 269, 269, 269, 252, 269, + /* 530 */ 269, 269, 269, 242, 242, 242, 242, 242, 242, 202, + /* 540 */ 242, 202, 202, 268, 202, 278, 278, 202, 65, 250, + /* 550 */ 202, 250, 202, 202, 250, 273, 202, 254, 202, 204, + /* 560 */ 202, 202, 273, 273, 262, 202, 202, 202, 202, 202, + /* 570 */ 202, 202, 202, 202, 129, 202, 202, 202, 264, 273, + /* 580 */ 202, 202, 202, 141, 143, 265, 202, 202, 202, 202, + /* 590 */ 136, 140, 202, 122, 263, 202, 202, 202, 202, 202, + /* 600 */ 139, 202, 261, 202, 202, 202, 202, 134, 202, 202, + /* 610 */ 202, 202, 133, 202, 202, 202, 202, 202, 202, 202, + /* 620 */ 202, 202, 202, 202, 202, 202, 202, 202, 202, 202, + /* 630 */ 202, 202, 202, 132, 202, 202, 145, 135, 94, 204, + /* 640 */ 204, 204, 204, 118, 101, 100, 56, 97, 99, 60, + /* 650 */ 204, 260, 204, 204, 98, 96, 89, 5, 158, 5, + /* 660 */ 204, 214, 204, 210, 214, 210, 5, 158, 5, 5, + /* 670 */ 105, 147, 204, 204, 104, 120, 204, 217, 125, 205, + /* 680 */ 223, 222, 219, 221, 220, 218, 216, 205, 211, 205, + /* 690 */ 204, 225, 205, 241, 204, 206, 204, 257, 259, 255, + /* 700 */ 87, 258, 126, 88, 87, 102, 241, 102, 88, 87, + /* 710 */ 102, 88, 87, 1, 88, 87, 102, 88, 87, 87, + /* 720 */ 137, 102, 137, 87, 87, 87, 120, 83, 121, 5, + /* 730 */ 75, 92, 91, 5, 92, 9, 91, 5, 5, 5, + /* 740 */ 5, 5, 5, 90, 87, 16, 88, 87, 83, 122, + /* 750 */ 27, 64, 152, 152, 152, 17, 17, 5, 102, 5, + /* 760 */ 88, 152, 5, 5, 5, 5, 5, 5, 5, 5, + /* 770 */ 5, 5, 5, 5, 5, 5, 5, 102, 90, 65, + /* 780 */ 0, 281, 281, 281, 281, 281, 281, 281, 281, 281, + /* 790 */ 281, 281, 281, 281, 22, 22, 281, 281, 281, 281, + /* 800 */ 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, + /* 810 */ 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, + /* 820 */ 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, + /* 830 */ 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, + /* 840 */ 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, + /* 850 */ 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, + /* 860 */ 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, + /* 870 */ 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, + /* 880 */ 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, + /* 890 */ 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, + /* 900 */ 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, + /* 910 */ 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, + /* 920 */ 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, + /* 930 */ 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, + /* 940 */ 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, + /* 950 */ 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, + /* 960 */ 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, + /* 970 */ 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, + /* 980 */ 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, + /* 990 */ 281, 281, 281, 281, 281, 281, }; -#define YY_SHIFT_COUNT (372) +#define YY_SHIFT_COUNT (374) #define YY_SHIFT_MIN (0) -#define YY_SHIFT_MAX (773) +#define YY_SHIFT_MAX (780) static const unsigned short int yy_shift_ofst[] = { - /* 0 */ 212, 142, 142, 62, 62, 30, 260, 270, 270, 22, - /* 10 */ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, - /* 20 */ 2, 2, 2, 0, 147, 270, 326, 326, 326, 21, - /* 30 */ 21, 2, 2, 8, 2, 88, 2, 2, 2, 2, - /* 40 */ 31, 30, 76, 76, 7, 788, 788, 788, 270, 270, - /* 50 */ 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, - /* 60 */ 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, - /* 70 */ 270, 326, 326, 326, 312, 312, 312, 312, 312, 312, - /* 80 */ 312, 2, 2, 2, 366, 2, 2, 2, 21, 21, - /* 90 */ 2, 2, 2, 2, 329, 329, 319, 21, 2, 2, - /* 100 */ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, - /* 110 */ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, - /* 120 */ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, - /* 130 */ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, - /* 140 */ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, - /* 150 */ 2, 2, 2, 2, 2, 2, 2, 477, 477, 477, - /* 160 */ 435, 435, 435, 435, 477, 477, 441, 445, 455, 454, - /* 170 */ 459, 470, 484, 501, 468, 494, 477, 477, 477, 457, - /* 180 */ 457, 523, 30, 30, 477, 477, 541, 543, 589, 551, - /* 190 */ 550, 590, 553, 557, 523, 7, 477, 477, 566, 566, - /* 200 */ 477, 566, 477, 566, 477, 477, 788, 788, 35, 70, - /* 210 */ 70, 106, 70, 133, 196, 273, 273, 273, 273, 273, - /* 220 */ 249, 276, 297, 360, 360, 360, 360, 242, 242, 266, - /* 230 */ 278, 199, 247, 396, 402, 158, 219, 220, 328, 335, - /* 240 */ 337, 357, 362, 333, 344, 363, 364, 367, 368, 369, - /* 250 */ 371, 373, 415, 415, 415, 415, 415, 380, 469, 389, - /* 260 */ 456, 386, 331, 340, 345, 471, 480, 346, 349, 392, - /* 270 */ 350, 401, 649, 503, 655, 656, 506, 658, 659, 562, - /* 280 */ 564, 522, 547, 568, 607, 569, 608, 612, 598, 599, - /* 290 */ 614, 616, 617, 619, 620, 605, 622, 623, 625, 707, - /* 300 */ 626, 613, 577, 615, 579, 631, 568, 632, 600, 634, - /* 310 */ 601, 640, 633, 635, 652, 717, 636, 638, 719, 724, - /* 320 */ 725, 726, 727, 728, 729, 730, 648, 722, 657, 654, - /* 330 */ 660, 621, 662, 715, 680, 732, 593, 594, 645, 645, - /* 340 */ 645, 645, 733, 602, 603, 645, 645, 645, 745, 746, - /* 350 */ 668, 645, 750, 751, 752, 753, 754, 755, 756, 757, - /* 360 */ 758, 759, 760, 761, 762, 763, 764, 670, 683, 765, - /* 370 */ 766, 709, 773, + /* 0 */ 230, 116, 116, 97, 97, 148, 297, 313, 313, 85, + /* 10 */ 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + /* 20 */ 4, 4, 4, 0, 192, 313, 334, 334, 334, 13, + /* 30 */ 13, 4, 4, 44, 4, 62, 4, 4, 4, 4, + /* 40 */ 61, 148, 29, 29, 20, 796, 796, 796, 313, 313, + /* 50 */ 313, 313, 313, 313, 313, 313, 313, 313, 313, 313, + /* 60 */ 313, 313, 313, 313, 313, 313, 313, 313, 313, 313, + /* 70 */ 313, 313, 334, 334, 334, 71, 71, 71, 71, 71, + /* 80 */ 71, 71, 4, 4, 4, 326, 4, 4, 4, 13, + /* 90 */ 13, 4, 4, 4, 4, 210, 210, 322, 13, 4, + /* 100 */ 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + /* 110 */ 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + /* 120 */ 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + /* 130 */ 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + /* 140 */ 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + /* 150 */ 4, 4, 4, 4, 4, 4, 4, 4, 483, 483, + /* 160 */ 483, 445, 445, 445, 445, 483, 483, 442, 441, 454, + /* 170 */ 451, 461, 473, 479, 501, 502, 491, 483, 483, 483, + /* 180 */ 544, 544, 525, 148, 148, 483, 483, 543, 545, 590, + /* 190 */ 550, 549, 589, 556, 559, 525, 20, 483, 483, 567, + /* 200 */ 567, 483, 567, 483, 567, 483, 483, 796, 796, 37, + /* 210 */ 75, 75, 112, 75, 140, 168, 253, 253, 253, 253, + /* 220 */ 253, 253, 284, 305, 319, 362, 362, 362, 362, 40, + /* 230 */ 40, 214, 274, 265, 352, 300, 316, 164, 259, 344, + /* 240 */ 323, 343, 372, 376, 377, 328, 347, 381, 382, 383, + /* 250 */ 388, 389, 119, 392, 434, 434, 434, 434, 434, 398, + /* 260 */ 178, 410, 471, 400, 346, 349, 353, 488, 499, 357, + /* 270 */ 360, 405, 361, 431, 652, 500, 654, 661, 509, 663, + /* 280 */ 664, 565, 570, 524, 553, 555, 613, 576, 615, 617, + /* 290 */ 603, 605, 620, 622, 623, 625, 626, 608, 628, 629, + /* 300 */ 631, 712, 632, 614, 583, 619, 585, 636, 555, 637, + /* 310 */ 606, 638, 607, 644, 639, 641, 655, 724, 642, 645, + /* 320 */ 726, 728, 732, 733, 734, 735, 736, 737, 653, 729, + /* 330 */ 665, 657, 658, 627, 660, 723, 687, 738, 600, 601, + /* 340 */ 656, 656, 656, 656, 739, 602, 609, 656, 656, 656, + /* 350 */ 752, 754, 672, 656, 757, 758, 759, 760, 761, 762, + /* 360 */ 763, 764, 765, 766, 767, 768, 769, 770, 771, 675, + /* 370 */ 688, 772, 773, 714, 780, }; -#define YY_REDUCE_COUNT (207) -#define YY_REDUCE_MIN (-263) -#define YY_REDUCE_MAX (488) +#define YY_REDUCE_COUNT (208) +#define YY_REDUCE_MIN (-269) +#define YY_REDUCE_MAX (492) static const short yy_reduce_ofst[] = { - /* 0 */ 128, -44, -44, -194, -194, -141, -202, -201, -199, -198, - /* 10 */ -107, -164, -36, 15, 29, 129, 139, 167, 175, 177, - /* 20 */ 183, 186, 189, -190, -182, -263, -196, -132, -121, -206, - /* 30 */ 45, -193, -61, -14, -200, 24, 157, 182, 185, 130, - /* 40 */ 57, 194, 60, 97, 188, -32, 230, 237, -214, -57, - /* 50 */ -29, 71, 225, 234, 235, 236, 238, 239, 241, 243, - /* 60 */ 244, 245, 246, 248, 250, 251, 252, 253, 254, 255, - /* 70 */ 257, -165, 264, 275, 286, 287, 288, 290, 291, 292, - /* 80 */ 293, 304, 316, 334, 269, 336, 338, 339, 289, 294, - /* 90 */ 341, 343, 347, 348, 268, 274, 299, 298, 352, 353, - /* 100 */ 354, 355, 356, 358, 359, 361, 365, 370, 372, 374, - /* 110 */ 375, 377, 378, 379, 381, 382, 383, 384, 385, 387, - /* 120 */ 388, 390, 391, 393, 394, 395, 397, 398, 399, 400, - /* 130 */ 403, 404, 405, 406, 407, 408, 409, 410, 411, 412, - /* 140 */ 413, 414, 416, 417, 418, 419, 420, 421, 422, 423, - /* 150 */ 424, 425, 426, 427, 428, 429, 430, 431, 432, 433, - /* 160 */ 295, 296, 300, 302, 434, 436, 282, 301, 303, 308, - /* 170 */ 310, 437, 439, 376, 305, 438, 440, 442, 443, 444, - /* 180 */ 446, 318, 447, 449, 448, 450, 451, 453, 452, 460, - /* 190 */ 458, 461, 462, 464, 463, 465, 466, 467, 478, 479, - /* 200 */ 472, 481, 483, 486, 485, 488, 474, 476, + /* 0 */ -197, 38, 38, -194, -194, 170, -160, -159, -96, -200, + /* 10 */ -119, -132, -37, 10, 78, 84, 95, 154, 187, 190, + /* 20 */ 204, 215, 216, -195, -192, -261, -236, -134, -102, 42, + /* 30 */ 74, -182, -158, -218, -71, 109, 162, 231, 245, 189, + /* 40 */ 130, -27, 244, 246, 35, 77, 51, 201, -269, -263, + /* 50 */ -212, -28, 16, 19, 63, 88, 100, 239, 247, 248, + /* 60 */ 249, 250, 251, 252, 254, 256, 257, 258, 260, 261, + /* 70 */ 262, 263, -219, 270, 276, 291, 292, 293, 294, 295, + /* 80 */ 296, 298, 111, 337, 339, 275, 340, 342, 345, 299, + /* 90 */ 301, 348, 350, 351, 354, 267, 268, 303, 304, 356, + /* 100 */ 358, 359, 363, 364, 365, 366, 367, 368, 369, 370, + /* 110 */ 371, 373, 374, 375, 378, 379, 380, 384, 385, 386, + /* 120 */ 387, 390, 393, 394, 395, 396, 397, 399, 401, 402, + /* 130 */ 403, 404, 406, 407, 408, 409, 411, 412, 413, 414, + /* 140 */ 415, 416, 417, 418, 419, 420, 421, 422, 423, 424, + /* 150 */ 425, 426, 427, 428, 429, 430, 432, 433, 355, 435, + /* 160 */ 436, 282, 289, 290, 306, 437, 438, 320, 314, 331, + /* 170 */ 302, 341, 391, 439, 443, 440, 444, 446, 448, 449, + /* 180 */ 447, 450, 452, 453, 455, 456, 458, 457, 459, 462, + /* 190 */ 460, 464, 467, 463, 470, 465, 466, 468, 469, 474, + /* 200 */ 482, 472, 484, 486, 487, 490, 492, 477, 489, }; static const YYACTIONTYPE yy_default[] = { - /* 0 */ 873, 997, 936, 1007, 923, 933, 1149, 1149, 1149, 873, - /* 10 */ 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, - /* 20 */ 873, 873, 873, 1055, 893, 1149, 873, 873, 873, 873, - /* 30 */ 873, 873, 873, 1070, 873, 933, 873, 873, 873, 873, - /* 40 */ 943, 933, 943, 943, 873, 1050, 981, 999, 873, 873, - /* 50 */ 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, - /* 60 */ 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, - /* 70 */ 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, - /* 80 */ 873, 873, 873, 873, 1057, 1063, 1060, 873, 873, 873, - /* 90 */ 1065, 873, 873, 873, 1089, 1089, 1048, 873, 873, 873, - /* 100 */ 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, - /* 110 */ 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, - /* 120 */ 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, - /* 130 */ 921, 873, 919, 873, 873, 873, 873, 873, 873, 873, - /* 140 */ 873, 873, 873, 873, 873, 873, 873, 873, 873, 904, - /* 150 */ 873, 873, 873, 873, 873, 873, 891, 895, 895, 895, - /* 160 */ 873, 873, 873, 873, 895, 895, 1096, 1100, 1082, 1094, - /* 170 */ 1090, 1077, 1075, 1073, 1081, 1104, 895, 895, 895, 941, - /* 180 */ 941, 937, 933, 933, 895, 895, 959, 957, 955, 947, - /* 190 */ 953, 949, 951, 945, 924, 873, 895, 895, 931, 931, - /* 200 */ 895, 931, 895, 931, 895, 895, 981, 999, 873, 1105, - /* 210 */ 1095, 873, 1148, 1135, 1134, 1142, 1141, 1133, 1132, 1131, - /* 220 */ 873, 873, 873, 1127, 1130, 1129, 1128, 1137, 1136, 873, - /* 230 */ 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, - /* 240 */ 873, 873, 873, 1101, 1097, 873, 873, 873, 873, 873, - /* 250 */ 873, 873, 1144, 1143, 1140, 1139, 1138, 873, 873, 1107, - /* 260 */ 873, 873, 873, 873, 873, 873, 873, 873, 873, 1009, - /* 270 */ 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, - /* 280 */ 873, 873, 1047, 873, 873, 873, 873, 873, 1059, 1058, - /* 290 */ 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, - /* 300 */ 873, 1091, 873, 1083, 873, 873, 1021, 873, 873, 873, - /* 310 */ 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, - /* 320 */ 873, 873, 873, 873, 873, 873, 873, 873, 873, 873, - /* 330 */ 873, 873, 873, 873, 873, 873, 873, 873, 1167, 1162, - /* 340 */ 1163, 1160, 873, 873, 873, 1159, 1154, 1155, 873, 873, - /* 350 */ 873, 1152, 873, 873, 873, 873, 873, 873, 873, 873, - /* 360 */ 873, 873, 873, 873, 873, 873, 873, 965, 873, 902, - /* 370 */ 900, 873, 873, + /* 0 */ 876, 1000, 939, 1010, 926, 936, 1153, 1153, 1153, 876, + /* 10 */ 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, + /* 20 */ 876, 876, 876, 1058, 896, 1153, 876, 876, 876, 876, + /* 30 */ 876, 876, 876, 1073, 876, 936, 876, 876, 876, 876, + /* 40 */ 946, 936, 946, 946, 876, 1053, 984, 1002, 876, 876, + /* 50 */ 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, + /* 60 */ 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, + /* 70 */ 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, + /* 80 */ 876, 876, 876, 876, 876, 1060, 1066, 1063, 876, 876, + /* 90 */ 876, 1068, 876, 876, 876, 1092, 1092, 1051, 876, 876, + /* 100 */ 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, + /* 110 */ 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, + /* 120 */ 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, + /* 130 */ 876, 924, 876, 922, 876, 876, 876, 876, 876, 876, + /* 140 */ 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, + /* 150 */ 907, 876, 876, 876, 876, 876, 876, 894, 898, 898, + /* 160 */ 898, 876, 876, 876, 876, 898, 898, 1099, 1103, 1085, + /* 170 */ 1097, 1093, 1080, 1078, 1076, 1084, 1107, 898, 898, 898, + /* 180 */ 944, 944, 940, 936, 936, 898, 898, 962, 960, 958, + /* 190 */ 950, 956, 952, 954, 948, 927, 876, 898, 898, 934, + /* 200 */ 934, 898, 934, 898, 934, 898, 898, 984, 1002, 876, + /* 210 */ 1108, 1098, 876, 1152, 1138, 1137, 1146, 1145, 1144, 1136, + /* 220 */ 1135, 1134, 876, 876, 876, 1130, 1133, 1132, 1131, 1140, + /* 230 */ 1139, 876, 876, 876, 876, 876, 876, 876, 876, 876, + /* 240 */ 876, 876, 876, 876, 876, 1104, 1100, 876, 876, 876, + /* 250 */ 876, 876, 876, 876, 1148, 1147, 1143, 1142, 1141, 876, + /* 260 */ 876, 1110, 876, 876, 876, 876, 876, 876, 876, 876, + /* 270 */ 876, 1012, 876, 876, 876, 876, 876, 876, 876, 876, + /* 280 */ 876, 876, 876, 876, 1050, 876, 876, 876, 876, 876, + /* 290 */ 1062, 1061, 876, 876, 876, 876, 876, 876, 876, 876, + /* 300 */ 876, 876, 876, 1094, 876, 1086, 876, 876, 1024, 876, + /* 310 */ 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, + /* 320 */ 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, + /* 330 */ 876, 876, 876, 876, 876, 876, 876, 876, 876, 876, + /* 340 */ 1171, 1166, 1167, 1164, 876, 876, 876, 1163, 1158, 1159, + /* 350 */ 876, 876, 876, 1156, 876, 876, 876, 876, 876, 876, + /* 360 */ 876, 876, 876, 876, 876, 876, 876, 876, 876, 968, + /* 370 */ 876, 905, 903, 876, 876, }; /********** End of lemon-generated parsing tables *****************************/ @@ -739,6 +742,7 @@ static const YYCODETYPE yyFallback[] = { 1, /* TIMESTAMP => ID */ 1, /* BINARY => ID */ 1, /* NCHAR => ID */ + 1, /* JSON => ID */ 0, /* OR => nothing */ 0, /* AND => nothing */ 0, /* NOT => nothing */ @@ -749,6 +753,7 @@ static const YYCODETYPE yyFallback[] = { 0, /* IS => nothing */ 1, /* LIKE => ID */ 1, /* MATCH => ID */ + 1, /* NMATCH => ID */ 1, /* GLOB => ID */ 0, /* BETWEEN => nothing */ 0, /* IN => nothing */ @@ -1025,272 +1030,274 @@ static const char *const yyTokenName[] = { /* 10 */ "TIMESTAMP", /* 11 */ "BINARY", /* 12 */ "NCHAR", - /* 13 */ "OR", - /* 14 */ "AND", - /* 15 */ "NOT", - /* 16 */ "EQ", - /* 17 */ "NE", - /* 18 */ "ISNULL", - /* 19 */ "NOTNULL", - /* 20 */ "IS", - /* 21 */ "LIKE", - /* 22 */ "MATCH", - /* 23 */ "GLOB", - /* 24 */ "BETWEEN", - /* 25 */ "IN", - /* 26 */ "GT", - /* 27 */ "GE", - /* 28 */ "LT", - /* 29 */ "LE", - /* 30 */ "BITAND", - /* 31 */ "BITOR", - /* 32 */ "LSHIFT", - /* 33 */ "RSHIFT", - /* 34 */ "PLUS", - /* 35 */ "MINUS", - /* 36 */ "DIVIDE", - /* 37 */ "TIMES", - /* 38 */ "STAR", - /* 39 */ "SLASH", - /* 40 */ "REM", - /* 41 */ "CONCAT", - /* 42 */ "UMINUS", - /* 43 */ "UPLUS", - /* 44 */ "BITNOT", - /* 45 */ "QUESTION", - /* 46 */ "ARROW", - /* 47 */ "SHOW", - /* 48 */ "DATABASES", - /* 49 */ "TOPICS", - /* 50 */ "FUNCTIONS", - /* 51 */ "MNODES", - /* 52 */ "DNODES", - /* 53 */ "ACCOUNTS", - /* 54 */ "USERS", - /* 55 */ "MODULES", - /* 56 */ "QUERIES", - /* 57 */ "CONNECTIONS", - /* 58 */ "STREAMS", - /* 59 */ "VARIABLES", - /* 60 */ "SCORES", - /* 61 */ "GRANTS", - /* 62 */ "VNODES", - /* 63 */ "DOT", - /* 64 */ "CREATE", - /* 65 */ "TABLE", - /* 66 */ "STABLE", - /* 67 */ "DATABASE", - /* 68 */ "TABLES", - /* 69 */ "STABLES", - /* 70 */ "VGROUPS", - /* 71 */ "DROP", - /* 72 */ "TOPIC", - /* 73 */ "FUNCTION", - /* 74 */ "DNODE", - /* 75 */ "USER", - /* 76 */ "ACCOUNT", - /* 77 */ "USE", - /* 78 */ "DESCRIBE", - /* 79 */ "DESC", - /* 80 */ "ALTER", - /* 81 */ "PASS", - /* 82 */ "PRIVILEGE", - /* 83 */ "LOCAL", - /* 84 */ "COMPACT", - /* 85 */ "LP", - /* 86 */ "RP", - /* 87 */ "IF", - /* 88 */ "EXISTS", - /* 89 */ "AS", - /* 90 */ "OUTPUTTYPE", - /* 91 */ "AGGREGATE", - /* 92 */ "BUFSIZE", - /* 93 */ "PPS", - /* 94 */ "TSERIES", - /* 95 */ "DBS", - /* 96 */ "STORAGE", - /* 97 */ "QTIME", - /* 98 */ "CONNS", - /* 99 */ "STATE", - /* 100 */ "COMMA", - /* 101 */ "KEEP", - /* 102 */ "CACHE", - /* 103 */ "REPLICA", - /* 104 */ "QUORUM", - /* 105 */ "DAYS", - /* 106 */ "MINROWS", - /* 107 */ "MAXROWS", - /* 108 */ "BLOCKS", - /* 109 */ "CTIME", - /* 110 */ "WAL", - /* 111 */ "FSYNC", - /* 112 */ "COMP", - /* 113 */ "PRECISION", - /* 114 */ "UPDATE", - /* 115 */ "CACHELAST", - /* 116 */ "PARTITIONS", - /* 117 */ "UNSIGNED", - /* 118 */ "TAGS", - /* 119 */ "USING", - /* 120 */ "NULL", - /* 121 */ "NOW", - /* 122 */ "SELECT", - /* 123 */ "UNION", - /* 124 */ "ALL", - /* 125 */ "DISTINCT", - /* 126 */ "FROM", - /* 127 */ "VARIABLE", - /* 128 */ "INTERVAL", - /* 129 */ "EVERY", - /* 130 */ "SESSION", - /* 131 */ "STATE_WINDOW", - /* 132 */ "FILL", - /* 133 */ "SLIDING", - /* 134 */ "ORDER", - /* 135 */ "BY", - /* 136 */ "ASC", - /* 137 */ "GROUP", - /* 138 */ "HAVING", - /* 139 */ "LIMIT", - /* 140 */ "OFFSET", - /* 141 */ "SLIMIT", - /* 142 */ "SOFFSET", - /* 143 */ "WHERE", - /* 144 */ "RESET", - /* 145 */ "QUERY", - /* 146 */ "SYNCDB", - /* 147 */ "ADD", - /* 148 */ "COLUMN", - /* 149 */ "MODIFY", - /* 150 */ "TAG", - /* 151 */ "CHANGE", - /* 152 */ "SET", - /* 153 */ "KILL", - /* 154 */ "CONNECTION", - /* 155 */ "STREAM", - /* 156 */ "COLON", - /* 157 */ "ABORT", - /* 158 */ "AFTER", - /* 159 */ "ATTACH", - /* 160 */ "BEFORE", - /* 161 */ "BEGIN", - /* 162 */ "CASCADE", - /* 163 */ "CLUSTER", - /* 164 */ "CONFLICT", - /* 165 */ "COPY", - /* 166 */ "DEFERRED", - /* 167 */ "DELIMITERS", - /* 168 */ "DETACH", - /* 169 */ "EACH", - /* 170 */ "END", - /* 171 */ "EXPLAIN", - /* 172 */ "FAIL", - /* 173 */ "FOR", - /* 174 */ "IGNORE", - /* 175 */ "IMMEDIATE", - /* 176 */ "INITIALLY", - /* 177 */ "INSTEAD", - /* 178 */ "KEY", - /* 179 */ "OF", - /* 180 */ "RAISE", - /* 181 */ "REPLACE", - /* 182 */ "RESTRICT", - /* 183 */ "ROW", - /* 184 */ "STATEMENT", - /* 185 */ "TRIGGER", - /* 186 */ "VIEW", - /* 187 */ "IPTOKEN", - /* 188 */ "SEMI", - /* 189 */ "NONE", - /* 190 */ "PREV", - /* 191 */ "LINEAR", - /* 192 */ "IMPORT", - /* 193 */ "TBNAME", - /* 194 */ "JOIN", - /* 195 */ "INSERT", - /* 196 */ "INTO", - /* 197 */ "VALUES", - /* 198 */ "program", - /* 199 */ "cmd", - /* 200 */ "ids", - /* 201 */ "dbPrefix", - /* 202 */ "cpxName", - /* 203 */ "ifexists", - /* 204 */ "alter_db_optr", - /* 205 */ "alter_topic_optr", - /* 206 */ "acct_optr", - /* 207 */ "exprlist", - /* 208 */ "ifnotexists", - /* 209 */ "db_optr", - /* 210 */ "topic_optr", - /* 211 */ "typename", - /* 212 */ "bufsize", - /* 213 */ "pps", - /* 214 */ "tseries", - /* 215 */ "dbs", - /* 216 */ "streams", - /* 217 */ "storage", - /* 218 */ "qtime", - /* 219 */ "users", - /* 220 */ "conns", - /* 221 */ "state", - /* 222 */ "intitemlist", - /* 223 */ "intitem", - /* 224 */ "keep", - /* 225 */ "cache", - /* 226 */ "replica", - /* 227 */ "quorum", - /* 228 */ "days", - /* 229 */ "minrows", - /* 230 */ "maxrows", - /* 231 */ "blocks", - /* 232 */ "ctime", - /* 233 */ "wal", - /* 234 */ "fsync", - /* 235 */ "comp", - /* 236 */ "prec", - /* 237 */ "update", - /* 238 */ "cachelast", - /* 239 */ "partitions", - /* 240 */ "signed", - /* 241 */ "create_table_args", - /* 242 */ "create_stable_args", - /* 243 */ "create_table_list", - /* 244 */ "create_from_stable", - /* 245 */ "columnlist", - /* 246 */ "tagitemlist", - /* 247 */ "tagNamelist", - /* 248 */ "select", - /* 249 */ "column", - /* 250 */ "tagitem", - /* 251 */ "selcollist", - /* 252 */ "from", - /* 253 */ "where_opt", - /* 254 */ "interval_option", - /* 255 */ "sliding_opt", - /* 256 */ "session_option", - /* 257 */ "windowstate_option", - /* 258 */ "fill_opt", - /* 259 */ "groupby_opt", - /* 260 */ "having_opt", - /* 261 */ "orderby_opt", - /* 262 */ "slimit_opt", - /* 263 */ "limit_opt", - /* 264 */ "union", - /* 265 */ "sclp", - /* 266 */ "distinct", - /* 267 */ "expr", - /* 268 */ "as", - /* 269 */ "tablelist", - /* 270 */ "sub", - /* 271 */ "tmvar", - /* 272 */ "intervalKey", - /* 273 */ "sortlist", - /* 274 */ "sortitem", - /* 275 */ "item", - /* 276 */ "sortorder", - /* 277 */ "grouplist", - /* 278 */ "expritem", + /* 13 */ "JSON", + /* 14 */ "OR", + /* 15 */ "AND", + /* 16 */ "NOT", + /* 17 */ "EQ", + /* 18 */ "NE", + /* 19 */ "ISNULL", + /* 20 */ "NOTNULL", + /* 21 */ "IS", + /* 22 */ "LIKE", + /* 23 */ "MATCH", + /* 24 */ "NMATCH", + /* 25 */ "GLOB", + /* 26 */ "BETWEEN", + /* 27 */ "IN", + /* 28 */ "GT", + /* 29 */ "GE", + /* 30 */ "LT", + /* 31 */ "LE", + /* 32 */ "BITAND", + /* 33 */ "BITOR", + /* 34 */ "LSHIFT", + /* 35 */ "RSHIFT", + /* 36 */ "PLUS", + /* 37 */ "MINUS", + /* 38 */ "DIVIDE", + /* 39 */ "TIMES", + /* 40 */ "STAR", + /* 41 */ "SLASH", + /* 42 */ "REM", + /* 43 */ "CONCAT", + /* 44 */ "UMINUS", + /* 45 */ "UPLUS", + /* 46 */ "BITNOT", + /* 47 */ "QUESTION", + /* 48 */ "ARROW", + /* 49 */ "SHOW", + /* 50 */ "DATABASES", + /* 51 */ "TOPICS", + /* 52 */ "FUNCTIONS", + /* 53 */ "MNODES", + /* 54 */ "DNODES", + /* 55 */ "ACCOUNTS", + /* 56 */ "USERS", + /* 57 */ "MODULES", + /* 58 */ "QUERIES", + /* 59 */ "CONNECTIONS", + /* 60 */ "STREAMS", + /* 61 */ "VARIABLES", + /* 62 */ "SCORES", + /* 63 */ "GRANTS", + /* 64 */ "VNODES", + /* 65 */ "DOT", + /* 66 */ "CREATE", + /* 67 */ "TABLE", + /* 68 */ "STABLE", + /* 69 */ "DATABASE", + /* 70 */ "TABLES", + /* 71 */ "STABLES", + /* 72 */ "VGROUPS", + /* 73 */ "DROP", + /* 74 */ "TOPIC", + /* 75 */ "FUNCTION", + /* 76 */ "DNODE", + /* 77 */ "USER", + /* 78 */ "ACCOUNT", + /* 79 */ "USE", + /* 80 */ "DESCRIBE", + /* 81 */ "DESC", + /* 82 */ "ALTER", + /* 83 */ "PASS", + /* 84 */ "PRIVILEGE", + /* 85 */ "LOCAL", + /* 86 */ "COMPACT", + /* 87 */ "LP", + /* 88 */ "RP", + /* 89 */ "IF", + /* 90 */ "EXISTS", + /* 91 */ "AS", + /* 92 */ "OUTPUTTYPE", + /* 93 */ "AGGREGATE", + /* 94 */ "BUFSIZE", + /* 95 */ "PPS", + /* 96 */ "TSERIES", + /* 97 */ "DBS", + /* 98 */ "STORAGE", + /* 99 */ "QTIME", + /* 100 */ "CONNS", + /* 101 */ "STATE", + /* 102 */ "COMMA", + /* 103 */ "KEEP", + /* 104 */ "CACHE", + /* 105 */ "REPLICA", + /* 106 */ "QUORUM", + /* 107 */ "DAYS", + /* 108 */ "MINROWS", + /* 109 */ "MAXROWS", + /* 110 */ "BLOCKS", + /* 111 */ "CTIME", + /* 112 */ "WAL", + /* 113 */ "FSYNC", + /* 114 */ "COMP", + /* 115 */ "PRECISION", + /* 116 */ "UPDATE", + /* 117 */ "CACHELAST", + /* 118 */ "PARTITIONS", + /* 119 */ "UNSIGNED", + /* 120 */ "TAGS", + /* 121 */ "USING", + /* 122 */ "NULL", + /* 123 */ "NOW", + /* 124 */ "SELECT", + /* 125 */ "UNION", + /* 126 */ "ALL", + /* 127 */ "DISTINCT", + /* 128 */ "FROM", + /* 129 */ "VARIABLE", + /* 130 */ "INTERVAL", + /* 131 */ "EVERY", + /* 132 */ "SESSION", + /* 133 */ "STATE_WINDOW", + /* 134 */ "FILL", + /* 135 */ "SLIDING", + /* 136 */ "ORDER", + /* 137 */ "BY", + /* 138 */ "ASC", + /* 139 */ "GROUP", + /* 140 */ "HAVING", + /* 141 */ "LIMIT", + /* 142 */ "OFFSET", + /* 143 */ "SLIMIT", + /* 144 */ "SOFFSET", + /* 145 */ "WHERE", + /* 146 */ "RESET", + /* 147 */ "QUERY", + /* 148 */ "SYNCDB", + /* 149 */ "ADD", + /* 150 */ "COLUMN", + /* 151 */ "MODIFY", + /* 152 */ "TAG", + /* 153 */ "CHANGE", + /* 154 */ "SET", + /* 155 */ "KILL", + /* 156 */ "CONNECTION", + /* 157 */ "STREAM", + /* 158 */ "COLON", + /* 159 */ "ABORT", + /* 160 */ "AFTER", + /* 161 */ "ATTACH", + /* 162 */ "BEFORE", + /* 163 */ "BEGIN", + /* 164 */ "CASCADE", + /* 165 */ "CLUSTER", + /* 166 */ "CONFLICT", + /* 167 */ "COPY", + /* 168 */ "DEFERRED", + /* 169 */ "DELIMITERS", + /* 170 */ "DETACH", + /* 171 */ "EACH", + /* 172 */ "END", + /* 173 */ "EXPLAIN", + /* 174 */ "FAIL", + /* 175 */ "FOR", + /* 176 */ "IGNORE", + /* 177 */ "IMMEDIATE", + /* 178 */ "INITIALLY", + /* 179 */ "INSTEAD", + /* 180 */ "KEY", + /* 181 */ "OF", + /* 182 */ "RAISE", + /* 183 */ "REPLACE", + /* 184 */ "RESTRICT", + /* 185 */ "ROW", + /* 186 */ "STATEMENT", + /* 187 */ "TRIGGER", + /* 188 */ "VIEW", + /* 189 */ "IPTOKEN", + /* 190 */ "SEMI", + /* 191 */ "NONE", + /* 192 */ "PREV", + /* 193 */ "LINEAR", + /* 194 */ "IMPORT", + /* 195 */ "TBNAME", + /* 196 */ "JOIN", + /* 197 */ "INSERT", + /* 198 */ "INTO", + /* 199 */ "VALUES", + /* 200 */ "program", + /* 201 */ "cmd", + /* 202 */ "ids", + /* 203 */ "dbPrefix", + /* 204 */ "cpxName", + /* 205 */ "ifexists", + /* 206 */ "alter_db_optr", + /* 207 */ "alter_topic_optr", + /* 208 */ "acct_optr", + /* 209 */ "exprlist", + /* 210 */ "ifnotexists", + /* 211 */ "db_optr", + /* 212 */ "topic_optr", + /* 213 */ "typename", + /* 214 */ "bufsize", + /* 215 */ "pps", + /* 216 */ "tseries", + /* 217 */ "dbs", + /* 218 */ "streams", + /* 219 */ "storage", + /* 220 */ "qtime", + /* 221 */ "users", + /* 222 */ "conns", + /* 223 */ "state", + /* 224 */ "intitemlist", + /* 225 */ "intitem", + /* 226 */ "keep", + /* 227 */ "cache", + /* 228 */ "replica", + /* 229 */ "quorum", + /* 230 */ "days", + /* 231 */ "minrows", + /* 232 */ "maxrows", + /* 233 */ "blocks", + /* 234 */ "ctime", + /* 235 */ "wal", + /* 236 */ "fsync", + /* 237 */ "comp", + /* 238 */ "prec", + /* 239 */ "update", + /* 240 */ "cachelast", + /* 241 */ "partitions", + /* 242 */ "signed", + /* 243 */ "create_table_args", + /* 244 */ "create_stable_args", + /* 245 */ "create_table_list", + /* 246 */ "create_from_stable", + /* 247 */ "columnlist", + /* 248 */ "tagitemlist", + /* 249 */ "tagNamelist", + /* 250 */ "select", + /* 251 */ "column", + /* 252 */ "tagitem", + /* 253 */ "selcollist", + /* 254 */ "from", + /* 255 */ "where_opt", + /* 256 */ "interval_option", + /* 257 */ "sliding_opt", + /* 258 */ "session_option", + /* 259 */ "windowstate_option", + /* 260 */ "fill_opt", + /* 261 */ "groupby_opt", + /* 262 */ "having_opt", + /* 263 */ "orderby_opt", + /* 264 */ "slimit_opt", + /* 265 */ "limit_opt", + /* 266 */ "union", + /* 267 */ "sclp", + /* 268 */ "distinct", + /* 269 */ "expr", + /* 270 */ "as", + /* 271 */ "tablelist", + /* 272 */ "sub", + /* 273 */ "tmvar", + /* 274 */ "intervalKey", + /* 275 */ "sortlist", + /* 276 */ "sortitem", + /* 277 */ "item", + /* 278 */ "sortorder", + /* 279 */ "grouplist", + /* 280 */ "expritem", }; #endif /* defined(YYCOVERAGE) || !defined(NDEBUG) */ @@ -1565,34 +1572,35 @@ static const char *const yyRuleName[] = { /* 264 */ "expr ::= expr REM expr", /* 265 */ "expr ::= expr LIKE expr", /* 266 */ "expr ::= expr MATCH expr", - /* 267 */ "expr ::= expr QUESTION expr", - /* 268 */ "expr ::= expr ARROW expr", - /* 269 */ "expr ::= expr IN LP exprlist RP", - /* 270 */ "exprlist ::= exprlist COMMA expritem", - /* 271 */ "exprlist ::= expritem", - /* 272 */ "expritem ::= expr", - /* 273 */ "expritem ::=", - /* 274 */ "cmd ::= RESET QUERY CACHE", - /* 275 */ "cmd ::= SYNCDB ids REPLICA", - /* 276 */ "cmd ::= ALTER TABLE ids cpxName ADD COLUMN columnlist", - /* 277 */ "cmd ::= ALTER TABLE ids cpxName DROP COLUMN ids", - /* 278 */ "cmd ::= ALTER TABLE ids cpxName MODIFY COLUMN columnlist", - /* 279 */ "cmd ::= ALTER TABLE ids cpxName ADD TAG columnlist", - /* 280 */ "cmd ::= ALTER TABLE ids cpxName DROP TAG ids", - /* 281 */ "cmd ::= ALTER TABLE ids cpxName CHANGE TAG ids ids", - /* 282 */ "cmd ::= ALTER TABLE ids cpxName SET TAG ids EQ tagitem", - /* 283 */ "cmd ::= ALTER TABLE ids cpxName MODIFY TAG columnlist", - /* 284 */ "cmd ::= ALTER STABLE ids cpxName ADD COLUMN columnlist", - /* 285 */ "cmd ::= ALTER STABLE ids cpxName DROP COLUMN ids", - /* 286 */ "cmd ::= ALTER STABLE ids cpxName MODIFY COLUMN columnlist", - /* 287 */ "cmd ::= ALTER STABLE ids cpxName ADD TAG columnlist", - /* 288 */ "cmd ::= ALTER STABLE ids cpxName DROP TAG ids", - /* 289 */ "cmd ::= ALTER STABLE ids cpxName CHANGE TAG ids ids", - /* 290 */ "cmd ::= ALTER STABLE ids cpxName SET TAG ids EQ tagitem", - /* 291 */ "cmd ::= ALTER STABLE ids cpxName MODIFY TAG columnlist", - /* 292 */ "cmd ::= KILL CONNECTION INTEGER", - /* 293 */ "cmd ::= KILL STREAM INTEGER COLON INTEGER", - /* 294 */ "cmd ::= KILL QUERY INTEGER COLON INTEGER", + /* 267 */ "expr ::= expr NMATCH expr", + /* 268 */ "expr ::= expr QUESTION expr", + /* 269 */ "expr ::= expr ARROW expr", + /* 270 */ "expr ::= expr IN LP exprlist RP", + /* 271 */ "exprlist ::= exprlist COMMA expritem", + /* 272 */ "exprlist ::= expritem", + /* 273 */ "expritem ::= expr", + /* 274 */ "expritem ::=", + /* 275 */ "cmd ::= RESET QUERY CACHE", + /* 276 */ "cmd ::= SYNCDB ids REPLICA", + /* 277 */ "cmd ::= ALTER TABLE ids cpxName ADD COLUMN columnlist", + /* 278 */ "cmd ::= ALTER TABLE ids cpxName DROP COLUMN ids", + /* 279 */ "cmd ::= ALTER TABLE ids cpxName MODIFY COLUMN columnlist", + /* 280 */ "cmd ::= ALTER TABLE ids cpxName ADD TAG columnlist", + /* 281 */ "cmd ::= ALTER TABLE ids cpxName DROP TAG ids", + /* 282 */ "cmd ::= ALTER TABLE ids cpxName CHANGE TAG ids ids", + /* 283 */ "cmd ::= ALTER TABLE ids cpxName SET TAG ids EQ tagitem", + /* 284 */ "cmd ::= ALTER TABLE ids cpxName MODIFY TAG columnlist", + /* 285 */ "cmd ::= ALTER STABLE ids cpxName ADD COLUMN columnlist", + /* 286 */ "cmd ::= ALTER STABLE ids cpxName DROP COLUMN ids", + /* 287 */ "cmd ::= ALTER STABLE ids cpxName MODIFY COLUMN columnlist", + /* 288 */ "cmd ::= ALTER STABLE ids cpxName ADD TAG columnlist", + /* 289 */ "cmd ::= ALTER STABLE ids cpxName DROP TAG ids", + /* 290 */ "cmd ::= ALTER STABLE ids cpxName CHANGE TAG ids ids", + /* 291 */ "cmd ::= ALTER STABLE ids cpxName SET TAG ids EQ tagitem", + /* 292 */ "cmd ::= ALTER STABLE ids cpxName MODIFY TAG columnlist", + /* 293 */ "cmd ::= KILL CONNECTION INTEGER", + /* 294 */ "cmd ::= KILL STREAM INTEGER COLON INTEGER", + /* 295 */ "cmd ::= KILL QUERY INTEGER COLON INTEGER", }; #endif /* NDEBUG */ @@ -1718,76 +1726,76 @@ static void yy_destructor( ** inside the C code. */ /********* Begin destructor definitions ***************************************/ - case 207: /* exprlist */ - case 251: /* selcollist */ - case 265: /* sclp */ + case 209: /* exprlist */ + case 253: /* selcollist */ + case 267: /* sclp */ { -#line 768 "sql.y" -tSqlExprListDestroy((yypminor->yy135)); -#line 1726 "sql.c" +#line 769 "sql.y" +tSqlExprListDestroy((yypminor->yy247)); +#line 1735 "sql.c" } break; - case 222: /* intitemlist */ - case 224: /* keep */ - case 245: /* columnlist */ - case 246: /* tagitemlist */ - case 247: /* tagNamelist */ - case 258: /* fill_opt */ - case 259: /* groupby_opt */ - case 261: /* orderby_opt */ - case 273: /* sortlist */ - case 277: /* grouplist */ + case 224: /* intitemlist */ + case 226: /* keep */ + case 247: /* columnlist */ + case 248: /* tagitemlist */ + case 249: /* tagNamelist */ + case 260: /* fill_opt */ + case 261: /* groupby_opt */ + case 263: /* orderby_opt */ + case 275: /* sortlist */ + case 279: /* grouplist */ { #line 257 "sql.y" -taosArrayDestroy((yypminor->yy135)); -#line 1742 "sql.c" +taosArrayDestroy((yypminor->yy247)); +#line 1751 "sql.c" } break; - case 243: /* create_table_list */ + case 245: /* create_table_list */ { #line 365 "sql.y" -destroyCreateTableSql((yypminor->yy110)); -#line 1749 "sql.c" +destroyCreateTableSql((yypminor->yy336)); +#line 1758 "sql.c" } break; - case 248: /* select */ + case 250: /* select */ { #line 485 "sql.y" -destroySqlNode((yypminor->yy488)); -#line 1756 "sql.c" +destroySqlNode((yypminor->yy246)); +#line 1765 "sql.c" } break; - case 252: /* from */ - case 269: /* tablelist */ - case 270: /* sub */ + case 254: /* from */ + case 271: /* tablelist */ + case 272: /* sub */ { #line 540 "sql.y" -destroyRelationInfo((yypminor->yy460)); -#line 1765 "sql.c" +destroyRelationInfo((yypminor->yy46)); +#line 1774 "sql.c" } break; - case 253: /* where_opt */ - case 260: /* having_opt */ - case 267: /* expr */ - case 278: /* expritem */ + case 255: /* where_opt */ + case 262: /* having_opt */ + case 269: /* expr */ + case 280: /* expritem */ { #line 692 "sql.y" -tSqlExprDestroy((yypminor->yy526)); -#line 1775 "sql.c" +tSqlExprDestroy((yypminor->yy44)); +#line 1784 "sql.c" } break; - case 264: /* union */ + case 266: /* union */ { #line 493 "sql.y" -destroyAllSqlNode((yypminor->yy135)); -#line 1782 "sql.c" +destroyAllSqlNode((yypminor->yy247)); +#line 1791 "sql.c" } break; - case 274: /* sortitem */ + case 276: /* sortitem */ { #line 625 "sql.y" -tVariantDestroy(&(yypminor->yy308)); -#line 1789 "sql.c" +tVariantDestroy(&(yypminor->yy378)); +#line 1798 "sql.c" } break; /********* End destructor definitions *****************************************/ @@ -2076,301 +2084,302 @@ static void yy_shift( /* For rule J, yyRuleInfoLhs[J] contains the symbol on the left-hand side ** of that rule */ static const YYCODETYPE yyRuleInfoLhs[] = { - 198, /* (0) program ::= cmd */ - 199, /* (1) cmd ::= SHOW DATABASES */ - 199, /* (2) cmd ::= SHOW TOPICS */ - 199, /* (3) cmd ::= SHOW FUNCTIONS */ - 199, /* (4) cmd ::= SHOW MNODES */ - 199, /* (5) cmd ::= SHOW DNODES */ - 199, /* (6) cmd ::= SHOW ACCOUNTS */ - 199, /* (7) cmd ::= SHOW USERS */ - 199, /* (8) cmd ::= SHOW MODULES */ - 199, /* (9) cmd ::= SHOW QUERIES */ - 199, /* (10) cmd ::= SHOW CONNECTIONS */ - 199, /* (11) cmd ::= SHOW STREAMS */ - 199, /* (12) cmd ::= SHOW VARIABLES */ - 199, /* (13) cmd ::= SHOW SCORES */ - 199, /* (14) cmd ::= SHOW GRANTS */ - 199, /* (15) cmd ::= SHOW VNODES */ - 199, /* (16) cmd ::= SHOW VNODES ids */ - 201, /* (17) dbPrefix ::= */ - 201, /* (18) dbPrefix ::= ids DOT */ - 202, /* (19) cpxName ::= */ - 202, /* (20) cpxName ::= DOT ids */ - 199, /* (21) cmd ::= SHOW CREATE TABLE ids cpxName */ - 199, /* (22) cmd ::= SHOW CREATE STABLE ids cpxName */ - 199, /* (23) cmd ::= SHOW CREATE DATABASE ids */ - 199, /* (24) cmd ::= SHOW dbPrefix TABLES */ - 199, /* (25) cmd ::= SHOW dbPrefix TABLES LIKE ids */ - 199, /* (26) cmd ::= SHOW dbPrefix STABLES */ - 199, /* (27) cmd ::= SHOW dbPrefix STABLES LIKE ids */ - 199, /* (28) cmd ::= SHOW dbPrefix VGROUPS */ - 199, /* (29) cmd ::= SHOW dbPrefix VGROUPS ids */ - 199, /* (30) cmd ::= DROP TABLE ifexists ids cpxName */ - 199, /* (31) cmd ::= DROP STABLE ifexists ids cpxName */ - 199, /* (32) cmd ::= DROP DATABASE ifexists ids */ - 199, /* (33) cmd ::= DROP TOPIC ifexists ids */ - 199, /* (34) cmd ::= DROP FUNCTION ids */ - 199, /* (35) cmd ::= DROP DNODE ids */ - 199, /* (36) cmd ::= DROP USER ids */ - 199, /* (37) cmd ::= DROP ACCOUNT ids */ - 199, /* (38) cmd ::= USE ids */ - 199, /* (39) cmd ::= DESCRIBE ids cpxName */ - 199, /* (40) cmd ::= DESC ids cpxName */ - 199, /* (41) cmd ::= ALTER USER ids PASS ids */ - 199, /* (42) cmd ::= ALTER USER ids PRIVILEGE ids */ - 199, /* (43) cmd ::= ALTER DNODE ids ids */ - 199, /* (44) cmd ::= ALTER DNODE ids ids ids */ - 199, /* (45) cmd ::= ALTER LOCAL ids */ - 199, /* (46) cmd ::= ALTER LOCAL ids ids */ - 199, /* (47) cmd ::= ALTER DATABASE ids alter_db_optr */ - 199, /* (48) cmd ::= ALTER TOPIC ids alter_topic_optr */ - 199, /* (49) cmd ::= ALTER ACCOUNT ids acct_optr */ - 199, /* (50) cmd ::= ALTER ACCOUNT ids PASS ids acct_optr */ - 199, /* (51) cmd ::= COMPACT VNODES IN LP exprlist RP */ - 200, /* (52) ids ::= ID */ - 200, /* (53) ids ::= STRING */ - 203, /* (54) ifexists ::= IF EXISTS */ - 203, /* (55) ifexists ::= */ - 208, /* (56) ifnotexists ::= IF NOT EXISTS */ - 208, /* (57) ifnotexists ::= */ - 199, /* (58) cmd ::= CREATE DNODE ids */ - 199, /* (59) cmd ::= CREATE ACCOUNT ids PASS ids acct_optr */ - 199, /* (60) cmd ::= CREATE DATABASE ifnotexists ids db_optr */ - 199, /* (61) cmd ::= CREATE TOPIC ifnotexists ids topic_optr */ - 199, /* (62) cmd ::= CREATE FUNCTION ids AS ids OUTPUTTYPE typename bufsize */ - 199, /* (63) cmd ::= CREATE AGGREGATE FUNCTION ids AS ids OUTPUTTYPE typename bufsize */ - 199, /* (64) cmd ::= CREATE USER ids PASS ids */ - 212, /* (65) bufsize ::= */ - 212, /* (66) bufsize ::= BUFSIZE INTEGER */ - 213, /* (67) pps ::= */ - 213, /* (68) pps ::= PPS INTEGER */ - 214, /* (69) tseries ::= */ - 214, /* (70) tseries ::= TSERIES INTEGER */ - 215, /* (71) dbs ::= */ - 215, /* (72) dbs ::= DBS INTEGER */ - 216, /* (73) streams ::= */ - 216, /* (74) streams ::= STREAMS INTEGER */ - 217, /* (75) storage ::= */ - 217, /* (76) storage ::= STORAGE INTEGER */ - 218, /* (77) qtime ::= */ - 218, /* (78) qtime ::= QTIME INTEGER */ - 219, /* (79) users ::= */ - 219, /* (80) users ::= USERS INTEGER */ - 220, /* (81) conns ::= */ - 220, /* (82) conns ::= CONNS INTEGER */ - 221, /* (83) state ::= */ - 221, /* (84) state ::= STATE ids */ - 206, /* (85) acct_optr ::= pps tseries storage streams qtime dbs users conns state */ - 222, /* (86) intitemlist ::= intitemlist COMMA intitem */ - 222, /* (87) intitemlist ::= intitem */ - 223, /* (88) intitem ::= INTEGER */ - 224, /* (89) keep ::= KEEP intitemlist */ - 225, /* (90) cache ::= CACHE INTEGER */ - 226, /* (91) replica ::= REPLICA INTEGER */ - 227, /* (92) quorum ::= QUORUM INTEGER */ - 228, /* (93) days ::= DAYS INTEGER */ - 229, /* (94) minrows ::= MINROWS INTEGER */ - 230, /* (95) maxrows ::= MAXROWS INTEGER */ - 231, /* (96) blocks ::= BLOCKS INTEGER */ - 232, /* (97) ctime ::= CTIME INTEGER */ - 233, /* (98) wal ::= WAL INTEGER */ - 234, /* (99) fsync ::= FSYNC INTEGER */ - 235, /* (100) comp ::= COMP INTEGER */ - 236, /* (101) prec ::= PRECISION STRING */ - 237, /* (102) update ::= UPDATE INTEGER */ - 238, /* (103) cachelast ::= CACHELAST INTEGER */ - 239, /* (104) partitions ::= PARTITIONS INTEGER */ - 209, /* (105) db_optr ::= */ - 209, /* (106) db_optr ::= db_optr cache */ - 209, /* (107) db_optr ::= db_optr replica */ - 209, /* (108) db_optr ::= db_optr quorum */ - 209, /* (109) db_optr ::= db_optr days */ - 209, /* (110) db_optr ::= db_optr minrows */ - 209, /* (111) db_optr ::= db_optr maxrows */ - 209, /* (112) db_optr ::= db_optr blocks */ - 209, /* (113) db_optr ::= db_optr ctime */ - 209, /* (114) db_optr ::= db_optr wal */ - 209, /* (115) db_optr ::= db_optr fsync */ - 209, /* (116) db_optr ::= db_optr comp */ - 209, /* (117) db_optr ::= db_optr prec */ - 209, /* (118) db_optr ::= db_optr keep */ - 209, /* (119) db_optr ::= db_optr update */ - 209, /* (120) db_optr ::= db_optr cachelast */ - 210, /* (121) topic_optr ::= db_optr */ - 210, /* (122) topic_optr ::= topic_optr partitions */ - 204, /* (123) alter_db_optr ::= */ - 204, /* (124) alter_db_optr ::= alter_db_optr replica */ - 204, /* (125) alter_db_optr ::= alter_db_optr quorum */ - 204, /* (126) alter_db_optr ::= alter_db_optr keep */ - 204, /* (127) alter_db_optr ::= alter_db_optr blocks */ - 204, /* (128) alter_db_optr ::= alter_db_optr comp */ - 204, /* (129) alter_db_optr ::= alter_db_optr update */ - 204, /* (130) alter_db_optr ::= alter_db_optr cachelast */ - 205, /* (131) alter_topic_optr ::= alter_db_optr */ - 205, /* (132) alter_topic_optr ::= alter_topic_optr partitions */ - 211, /* (133) typename ::= ids */ - 211, /* (134) typename ::= ids LP signed RP */ - 211, /* (135) typename ::= ids UNSIGNED */ - 240, /* (136) signed ::= INTEGER */ - 240, /* (137) signed ::= PLUS INTEGER */ - 240, /* (138) signed ::= MINUS INTEGER */ - 199, /* (139) cmd ::= CREATE TABLE create_table_args */ - 199, /* (140) cmd ::= CREATE TABLE create_stable_args */ - 199, /* (141) cmd ::= CREATE STABLE create_stable_args */ - 199, /* (142) cmd ::= CREATE TABLE create_table_list */ - 243, /* (143) create_table_list ::= create_from_stable */ - 243, /* (144) create_table_list ::= create_table_list create_from_stable */ - 241, /* (145) create_table_args ::= ifnotexists ids cpxName LP columnlist RP */ - 242, /* (146) create_stable_args ::= ifnotexists ids cpxName LP columnlist RP TAGS LP columnlist RP */ - 244, /* (147) create_from_stable ::= ifnotexists ids cpxName USING ids cpxName TAGS LP tagitemlist RP */ - 244, /* (148) create_from_stable ::= ifnotexists ids cpxName USING ids cpxName LP tagNamelist RP TAGS LP tagitemlist RP */ - 247, /* (149) tagNamelist ::= tagNamelist COMMA ids */ - 247, /* (150) tagNamelist ::= ids */ - 241, /* (151) create_table_args ::= ifnotexists ids cpxName AS select */ - 245, /* (152) columnlist ::= columnlist COMMA column */ - 245, /* (153) columnlist ::= column */ - 249, /* (154) column ::= ids typename */ - 246, /* (155) tagitemlist ::= tagitemlist COMMA tagitem */ - 246, /* (156) tagitemlist ::= tagitem */ - 250, /* (157) tagitem ::= INTEGER */ - 250, /* (158) tagitem ::= FLOAT */ - 250, /* (159) tagitem ::= STRING */ - 250, /* (160) tagitem ::= BOOL */ - 250, /* (161) tagitem ::= NULL */ - 250, /* (162) tagitem ::= NOW */ - 250, /* (163) tagitem ::= MINUS INTEGER */ - 250, /* (164) tagitem ::= MINUS FLOAT */ - 250, /* (165) tagitem ::= PLUS INTEGER */ - 250, /* (166) tagitem ::= PLUS FLOAT */ - 248, /* (167) select ::= SELECT selcollist from where_opt interval_option sliding_opt session_option windowstate_option fill_opt groupby_opt having_opt orderby_opt slimit_opt limit_opt */ - 248, /* (168) select ::= LP select RP */ - 264, /* (169) union ::= select */ - 264, /* (170) union ::= union UNION ALL select */ - 199, /* (171) cmd ::= union */ - 248, /* (172) select ::= SELECT selcollist */ - 265, /* (173) sclp ::= selcollist COMMA */ - 265, /* (174) sclp ::= */ - 251, /* (175) selcollist ::= sclp distinct expr as */ - 251, /* (176) selcollist ::= sclp STAR */ - 268, /* (177) as ::= AS ids */ - 268, /* (178) as ::= ids */ - 268, /* (179) as ::= */ - 266, /* (180) distinct ::= DISTINCT */ - 266, /* (181) distinct ::= */ - 252, /* (182) from ::= FROM tablelist */ - 252, /* (183) from ::= FROM sub */ - 270, /* (184) sub ::= LP union RP */ - 270, /* (185) sub ::= LP union RP ids */ - 270, /* (186) sub ::= sub COMMA LP union RP ids */ - 269, /* (187) tablelist ::= ids cpxName */ - 269, /* (188) tablelist ::= ids cpxName ids */ - 269, /* (189) tablelist ::= tablelist COMMA ids cpxName */ - 269, /* (190) tablelist ::= tablelist COMMA ids cpxName ids */ - 271, /* (191) tmvar ::= VARIABLE */ - 254, /* (192) interval_option ::= intervalKey LP tmvar RP */ - 254, /* (193) interval_option ::= intervalKey LP tmvar COMMA tmvar RP */ - 254, /* (194) interval_option ::= */ - 272, /* (195) intervalKey ::= INTERVAL */ - 272, /* (196) intervalKey ::= EVERY */ - 256, /* (197) session_option ::= */ - 256, /* (198) session_option ::= SESSION LP ids cpxName COMMA tmvar RP */ - 257, /* (199) windowstate_option ::= */ - 257, /* (200) windowstate_option ::= STATE_WINDOW LP ids RP */ - 258, /* (201) fill_opt ::= */ - 258, /* (202) fill_opt ::= FILL LP ID COMMA tagitemlist RP */ - 258, /* (203) fill_opt ::= FILL LP ID RP */ - 255, /* (204) sliding_opt ::= SLIDING LP tmvar RP */ - 255, /* (205) sliding_opt ::= */ - 261, /* (206) orderby_opt ::= */ - 261, /* (207) orderby_opt ::= ORDER BY sortlist */ - 273, /* (208) sortlist ::= sortlist COMMA item sortorder */ - 273, /* (209) sortlist ::= item sortorder */ - 275, /* (210) item ::= ids cpxName */ - 276, /* (211) sortorder ::= ASC */ - 276, /* (212) sortorder ::= DESC */ - 276, /* (213) sortorder ::= */ - 259, /* (214) groupby_opt ::= */ - 259, /* (215) groupby_opt ::= GROUP BY grouplist */ - 277, /* (216) grouplist ::= grouplist COMMA item */ - 277, /* (217) grouplist ::= item */ - 260, /* (218) having_opt ::= */ - 260, /* (219) having_opt ::= HAVING expr */ - 263, /* (220) limit_opt ::= */ - 263, /* (221) limit_opt ::= LIMIT signed */ - 263, /* (222) limit_opt ::= LIMIT signed OFFSET signed */ - 263, /* (223) limit_opt ::= LIMIT signed COMMA signed */ - 262, /* (224) slimit_opt ::= */ - 262, /* (225) slimit_opt ::= SLIMIT signed */ - 262, /* (226) slimit_opt ::= SLIMIT signed SOFFSET signed */ - 262, /* (227) slimit_opt ::= SLIMIT signed COMMA signed */ - 253, /* (228) where_opt ::= */ - 253, /* (229) where_opt ::= WHERE expr */ - 267, /* (230) expr ::= LP expr RP */ - 267, /* (231) expr ::= ID */ - 267, /* (232) expr ::= ID DOT ID */ - 267, /* (233) expr ::= ID DOT STAR */ - 267, /* (234) expr ::= INTEGER */ - 267, /* (235) expr ::= MINUS INTEGER */ - 267, /* (236) expr ::= PLUS INTEGER */ - 267, /* (237) expr ::= FLOAT */ - 267, /* (238) expr ::= MINUS FLOAT */ - 267, /* (239) expr ::= PLUS FLOAT */ - 267, /* (240) expr ::= STRING */ - 267, /* (241) expr ::= NOW */ - 267, /* (242) expr ::= VARIABLE */ - 267, /* (243) expr ::= PLUS VARIABLE */ - 267, /* (244) expr ::= MINUS VARIABLE */ - 267, /* (245) expr ::= BOOL */ - 267, /* (246) expr ::= NULL */ - 267, /* (247) expr ::= ID LP exprlist RP */ - 267, /* (248) expr ::= ID LP STAR RP */ - 267, /* (249) expr ::= expr IS NULL */ - 267, /* (250) expr ::= expr IS NOT NULL */ - 267, /* (251) expr ::= expr LT expr */ - 267, /* (252) expr ::= expr GT expr */ - 267, /* (253) expr ::= expr LE expr */ - 267, /* (254) expr ::= expr GE expr */ - 267, /* (255) expr ::= expr NE expr */ - 267, /* (256) expr ::= expr EQ expr */ - 267, /* (257) expr ::= expr BETWEEN expr AND expr */ - 267, /* (258) expr ::= expr AND expr */ - 267, /* (259) expr ::= expr OR expr */ - 267, /* (260) expr ::= expr PLUS expr */ - 267, /* (261) expr ::= expr MINUS expr */ - 267, /* (262) expr ::= expr STAR expr */ - 267, /* (263) expr ::= expr SLASH expr */ - 267, /* (264) expr ::= expr REM expr */ - 267, /* (265) expr ::= expr LIKE expr */ - 267, /* (266) expr ::= expr MATCH expr */ - 267, /* (267) expr ::= expr QUESTION expr */ - 267, /* (268) expr ::= expr ARROW expr */ - 267, /* (269) expr ::= expr IN LP exprlist RP */ - 207, /* (270) exprlist ::= exprlist COMMA expritem */ - 207, /* (271) exprlist ::= expritem */ - 278, /* (272) expritem ::= expr */ - 278, /* (273) expritem ::= */ - 199, /* (274) cmd ::= RESET QUERY CACHE */ - 199, /* (275) cmd ::= SYNCDB ids REPLICA */ - 199, /* (276) cmd ::= ALTER TABLE ids cpxName ADD COLUMN columnlist */ - 199, /* (277) cmd ::= ALTER TABLE ids cpxName DROP COLUMN ids */ - 199, /* (278) cmd ::= ALTER TABLE ids cpxName MODIFY COLUMN columnlist */ - 199, /* (279) cmd ::= ALTER TABLE ids cpxName ADD TAG columnlist */ - 199, /* (280) cmd ::= ALTER TABLE ids cpxName DROP TAG ids */ - 199, /* (281) cmd ::= ALTER TABLE ids cpxName CHANGE TAG ids ids */ - 199, /* (282) cmd ::= ALTER TABLE ids cpxName SET TAG ids EQ tagitem */ - 199, /* (283) cmd ::= ALTER TABLE ids cpxName MODIFY TAG columnlist */ - 199, /* (284) cmd ::= ALTER STABLE ids cpxName ADD COLUMN columnlist */ - 199, /* (285) cmd ::= ALTER STABLE ids cpxName DROP COLUMN ids */ - 199, /* (286) cmd ::= ALTER STABLE ids cpxName MODIFY COLUMN columnlist */ - 199, /* (287) cmd ::= ALTER STABLE ids cpxName ADD TAG columnlist */ - 199, /* (288) cmd ::= ALTER STABLE ids cpxName DROP TAG ids */ - 199, /* (289) cmd ::= ALTER STABLE ids cpxName CHANGE TAG ids ids */ - 199, /* (290) cmd ::= ALTER STABLE ids cpxName SET TAG ids EQ tagitem */ - 199, /* (291) cmd ::= ALTER STABLE ids cpxName MODIFY TAG columnlist */ - 199, /* (292) cmd ::= KILL CONNECTION INTEGER */ - 199, /* (293) cmd ::= KILL STREAM INTEGER COLON INTEGER */ - 199, /* (294) cmd ::= KILL QUERY INTEGER COLON INTEGER */ + 200, /* (0) program ::= cmd */ + 201, /* (1) cmd ::= SHOW DATABASES */ + 201, /* (2) cmd ::= SHOW TOPICS */ + 201, /* (3) cmd ::= SHOW FUNCTIONS */ + 201, /* (4) cmd ::= SHOW MNODES */ + 201, /* (5) cmd ::= SHOW DNODES */ + 201, /* (6) cmd ::= SHOW ACCOUNTS */ + 201, /* (7) cmd ::= SHOW USERS */ + 201, /* (8) cmd ::= SHOW MODULES */ + 201, /* (9) cmd ::= SHOW QUERIES */ + 201, /* (10) cmd ::= SHOW CONNECTIONS */ + 201, /* (11) cmd ::= SHOW STREAMS */ + 201, /* (12) cmd ::= SHOW VARIABLES */ + 201, /* (13) cmd ::= SHOW SCORES */ + 201, /* (14) cmd ::= SHOW GRANTS */ + 201, /* (15) cmd ::= SHOW VNODES */ + 201, /* (16) cmd ::= SHOW VNODES ids */ + 203, /* (17) dbPrefix ::= */ + 203, /* (18) dbPrefix ::= ids DOT */ + 204, /* (19) cpxName ::= */ + 204, /* (20) cpxName ::= DOT ids */ + 201, /* (21) cmd ::= SHOW CREATE TABLE ids cpxName */ + 201, /* (22) cmd ::= SHOW CREATE STABLE ids cpxName */ + 201, /* (23) cmd ::= SHOW CREATE DATABASE ids */ + 201, /* (24) cmd ::= SHOW dbPrefix TABLES */ + 201, /* (25) cmd ::= SHOW dbPrefix TABLES LIKE ids */ + 201, /* (26) cmd ::= SHOW dbPrefix STABLES */ + 201, /* (27) cmd ::= SHOW dbPrefix STABLES LIKE ids */ + 201, /* (28) cmd ::= SHOW dbPrefix VGROUPS */ + 201, /* (29) cmd ::= SHOW dbPrefix VGROUPS ids */ + 201, /* (30) cmd ::= DROP TABLE ifexists ids cpxName */ + 201, /* (31) cmd ::= DROP STABLE ifexists ids cpxName */ + 201, /* (32) cmd ::= DROP DATABASE ifexists ids */ + 201, /* (33) cmd ::= DROP TOPIC ifexists ids */ + 201, /* (34) cmd ::= DROP FUNCTION ids */ + 201, /* (35) cmd ::= DROP DNODE ids */ + 201, /* (36) cmd ::= DROP USER ids */ + 201, /* (37) cmd ::= DROP ACCOUNT ids */ + 201, /* (38) cmd ::= USE ids */ + 201, /* (39) cmd ::= DESCRIBE ids cpxName */ + 201, /* (40) cmd ::= DESC ids cpxName */ + 201, /* (41) cmd ::= ALTER USER ids PASS ids */ + 201, /* (42) cmd ::= ALTER USER ids PRIVILEGE ids */ + 201, /* (43) cmd ::= ALTER DNODE ids ids */ + 201, /* (44) cmd ::= ALTER DNODE ids ids ids */ + 201, /* (45) cmd ::= ALTER LOCAL ids */ + 201, /* (46) cmd ::= ALTER LOCAL ids ids */ + 201, /* (47) cmd ::= ALTER DATABASE ids alter_db_optr */ + 201, /* (48) cmd ::= ALTER TOPIC ids alter_topic_optr */ + 201, /* (49) cmd ::= ALTER ACCOUNT ids acct_optr */ + 201, /* (50) cmd ::= ALTER ACCOUNT ids PASS ids acct_optr */ + 201, /* (51) cmd ::= COMPACT VNODES IN LP exprlist RP */ + 202, /* (52) ids ::= ID */ + 202, /* (53) ids ::= STRING */ + 205, /* (54) ifexists ::= IF EXISTS */ + 205, /* (55) ifexists ::= */ + 210, /* (56) ifnotexists ::= IF NOT EXISTS */ + 210, /* (57) ifnotexists ::= */ + 201, /* (58) cmd ::= CREATE DNODE ids */ + 201, /* (59) cmd ::= CREATE ACCOUNT ids PASS ids acct_optr */ + 201, /* (60) cmd ::= CREATE DATABASE ifnotexists ids db_optr */ + 201, /* (61) cmd ::= CREATE TOPIC ifnotexists ids topic_optr */ + 201, /* (62) cmd ::= CREATE FUNCTION ids AS ids OUTPUTTYPE typename bufsize */ + 201, /* (63) cmd ::= CREATE AGGREGATE FUNCTION ids AS ids OUTPUTTYPE typename bufsize */ + 201, /* (64) cmd ::= CREATE USER ids PASS ids */ + 214, /* (65) bufsize ::= */ + 214, /* (66) bufsize ::= BUFSIZE INTEGER */ + 215, /* (67) pps ::= */ + 215, /* (68) pps ::= PPS INTEGER */ + 216, /* (69) tseries ::= */ + 216, /* (70) tseries ::= TSERIES INTEGER */ + 217, /* (71) dbs ::= */ + 217, /* (72) dbs ::= DBS INTEGER */ + 218, /* (73) streams ::= */ + 218, /* (74) streams ::= STREAMS INTEGER */ + 219, /* (75) storage ::= */ + 219, /* (76) storage ::= STORAGE INTEGER */ + 220, /* (77) qtime ::= */ + 220, /* (78) qtime ::= QTIME INTEGER */ + 221, /* (79) users ::= */ + 221, /* (80) users ::= USERS INTEGER */ + 222, /* (81) conns ::= */ + 222, /* (82) conns ::= CONNS INTEGER */ + 223, /* (83) state ::= */ + 223, /* (84) state ::= STATE ids */ + 208, /* (85) acct_optr ::= pps tseries storage streams qtime dbs users conns state */ + 224, /* (86) intitemlist ::= intitemlist COMMA intitem */ + 224, /* (87) intitemlist ::= intitem */ + 225, /* (88) intitem ::= INTEGER */ + 226, /* (89) keep ::= KEEP intitemlist */ + 227, /* (90) cache ::= CACHE INTEGER */ + 228, /* (91) replica ::= REPLICA INTEGER */ + 229, /* (92) quorum ::= QUORUM INTEGER */ + 230, /* (93) days ::= DAYS INTEGER */ + 231, /* (94) minrows ::= MINROWS INTEGER */ + 232, /* (95) maxrows ::= MAXROWS INTEGER */ + 233, /* (96) blocks ::= BLOCKS INTEGER */ + 234, /* (97) ctime ::= CTIME INTEGER */ + 235, /* (98) wal ::= WAL INTEGER */ + 236, /* (99) fsync ::= FSYNC INTEGER */ + 237, /* (100) comp ::= COMP INTEGER */ + 238, /* (101) prec ::= PRECISION STRING */ + 239, /* (102) update ::= UPDATE INTEGER */ + 240, /* (103) cachelast ::= CACHELAST INTEGER */ + 241, /* (104) partitions ::= PARTITIONS INTEGER */ + 211, /* (105) db_optr ::= */ + 211, /* (106) db_optr ::= db_optr cache */ + 211, /* (107) db_optr ::= db_optr replica */ + 211, /* (108) db_optr ::= db_optr quorum */ + 211, /* (109) db_optr ::= db_optr days */ + 211, /* (110) db_optr ::= db_optr minrows */ + 211, /* (111) db_optr ::= db_optr maxrows */ + 211, /* (112) db_optr ::= db_optr blocks */ + 211, /* (113) db_optr ::= db_optr ctime */ + 211, /* (114) db_optr ::= db_optr wal */ + 211, /* (115) db_optr ::= db_optr fsync */ + 211, /* (116) db_optr ::= db_optr comp */ + 211, /* (117) db_optr ::= db_optr prec */ + 211, /* (118) db_optr ::= db_optr keep */ + 211, /* (119) db_optr ::= db_optr update */ + 211, /* (120) db_optr ::= db_optr cachelast */ + 212, /* (121) topic_optr ::= db_optr */ + 212, /* (122) topic_optr ::= topic_optr partitions */ + 206, /* (123) alter_db_optr ::= */ + 206, /* (124) alter_db_optr ::= alter_db_optr replica */ + 206, /* (125) alter_db_optr ::= alter_db_optr quorum */ + 206, /* (126) alter_db_optr ::= alter_db_optr keep */ + 206, /* (127) alter_db_optr ::= alter_db_optr blocks */ + 206, /* (128) alter_db_optr ::= alter_db_optr comp */ + 206, /* (129) alter_db_optr ::= alter_db_optr update */ + 206, /* (130) alter_db_optr ::= alter_db_optr cachelast */ + 207, /* (131) alter_topic_optr ::= alter_db_optr */ + 207, /* (132) alter_topic_optr ::= alter_topic_optr partitions */ + 213, /* (133) typename ::= ids */ + 213, /* (134) typename ::= ids LP signed RP */ + 213, /* (135) typename ::= ids UNSIGNED */ + 242, /* (136) signed ::= INTEGER */ + 242, /* (137) signed ::= PLUS INTEGER */ + 242, /* (138) signed ::= MINUS INTEGER */ + 201, /* (139) cmd ::= CREATE TABLE create_table_args */ + 201, /* (140) cmd ::= CREATE TABLE create_stable_args */ + 201, /* (141) cmd ::= CREATE STABLE create_stable_args */ + 201, /* (142) cmd ::= CREATE TABLE create_table_list */ + 245, /* (143) create_table_list ::= create_from_stable */ + 245, /* (144) create_table_list ::= create_table_list create_from_stable */ + 243, /* (145) create_table_args ::= ifnotexists ids cpxName LP columnlist RP */ + 244, /* (146) create_stable_args ::= ifnotexists ids cpxName LP columnlist RP TAGS LP columnlist RP */ + 246, /* (147) create_from_stable ::= ifnotexists ids cpxName USING ids cpxName TAGS LP tagitemlist RP */ + 246, /* (148) create_from_stable ::= ifnotexists ids cpxName USING ids cpxName LP tagNamelist RP TAGS LP tagitemlist RP */ + 249, /* (149) tagNamelist ::= tagNamelist COMMA ids */ + 249, /* (150) tagNamelist ::= ids */ + 243, /* (151) create_table_args ::= ifnotexists ids cpxName AS select */ + 247, /* (152) columnlist ::= columnlist COMMA column */ + 247, /* (153) columnlist ::= column */ + 251, /* (154) column ::= ids typename */ + 248, /* (155) tagitemlist ::= tagitemlist COMMA tagitem */ + 248, /* (156) tagitemlist ::= tagitem */ + 252, /* (157) tagitem ::= INTEGER */ + 252, /* (158) tagitem ::= FLOAT */ + 252, /* (159) tagitem ::= STRING */ + 252, /* (160) tagitem ::= BOOL */ + 252, /* (161) tagitem ::= NULL */ + 252, /* (162) tagitem ::= NOW */ + 252, /* (163) tagitem ::= MINUS INTEGER */ + 252, /* (164) tagitem ::= MINUS FLOAT */ + 252, /* (165) tagitem ::= PLUS INTEGER */ + 252, /* (166) tagitem ::= PLUS FLOAT */ + 250, /* (167) select ::= SELECT selcollist from where_opt interval_option sliding_opt session_option windowstate_option fill_opt groupby_opt having_opt orderby_opt slimit_opt limit_opt */ + 250, /* (168) select ::= LP select RP */ + 266, /* (169) union ::= select */ + 266, /* (170) union ::= union UNION ALL select */ + 201, /* (171) cmd ::= union */ + 250, /* (172) select ::= SELECT selcollist */ + 267, /* (173) sclp ::= selcollist COMMA */ + 267, /* (174) sclp ::= */ + 253, /* (175) selcollist ::= sclp distinct expr as */ + 253, /* (176) selcollist ::= sclp STAR */ + 270, /* (177) as ::= AS ids */ + 270, /* (178) as ::= ids */ + 270, /* (179) as ::= */ + 268, /* (180) distinct ::= DISTINCT */ + 268, /* (181) distinct ::= */ + 254, /* (182) from ::= FROM tablelist */ + 254, /* (183) from ::= FROM sub */ + 272, /* (184) sub ::= LP union RP */ + 272, /* (185) sub ::= LP union RP ids */ + 272, /* (186) sub ::= sub COMMA LP union RP ids */ + 271, /* (187) tablelist ::= ids cpxName */ + 271, /* (188) tablelist ::= ids cpxName ids */ + 271, /* (189) tablelist ::= tablelist COMMA ids cpxName */ + 271, /* (190) tablelist ::= tablelist COMMA ids cpxName ids */ + 273, /* (191) tmvar ::= VARIABLE */ + 256, /* (192) interval_option ::= intervalKey LP tmvar RP */ + 256, /* (193) interval_option ::= intervalKey LP tmvar COMMA tmvar RP */ + 256, /* (194) interval_option ::= */ + 274, /* (195) intervalKey ::= INTERVAL */ + 274, /* (196) intervalKey ::= EVERY */ + 258, /* (197) session_option ::= */ + 258, /* (198) session_option ::= SESSION LP ids cpxName COMMA tmvar RP */ + 259, /* (199) windowstate_option ::= */ + 259, /* (200) windowstate_option ::= STATE_WINDOW LP ids RP */ + 260, /* (201) fill_opt ::= */ + 260, /* (202) fill_opt ::= FILL LP ID COMMA tagitemlist RP */ + 260, /* (203) fill_opt ::= FILL LP ID RP */ + 257, /* (204) sliding_opt ::= SLIDING LP tmvar RP */ + 257, /* (205) sliding_opt ::= */ + 263, /* (206) orderby_opt ::= */ + 263, /* (207) orderby_opt ::= ORDER BY sortlist */ + 275, /* (208) sortlist ::= sortlist COMMA item sortorder */ + 275, /* (209) sortlist ::= item sortorder */ + 277, /* (210) item ::= ids cpxName */ + 278, /* (211) sortorder ::= ASC */ + 278, /* (212) sortorder ::= DESC */ + 278, /* (213) sortorder ::= */ + 261, /* (214) groupby_opt ::= */ + 261, /* (215) groupby_opt ::= GROUP BY grouplist */ + 279, /* (216) grouplist ::= grouplist COMMA item */ + 279, /* (217) grouplist ::= item */ + 262, /* (218) having_opt ::= */ + 262, /* (219) having_opt ::= HAVING expr */ + 265, /* (220) limit_opt ::= */ + 265, /* (221) limit_opt ::= LIMIT signed */ + 265, /* (222) limit_opt ::= LIMIT signed OFFSET signed */ + 265, /* (223) limit_opt ::= LIMIT signed COMMA signed */ + 264, /* (224) slimit_opt ::= */ + 264, /* (225) slimit_opt ::= SLIMIT signed */ + 264, /* (226) slimit_opt ::= SLIMIT signed SOFFSET signed */ + 264, /* (227) slimit_opt ::= SLIMIT signed COMMA signed */ + 255, /* (228) where_opt ::= */ + 255, /* (229) where_opt ::= WHERE expr */ + 269, /* (230) expr ::= LP expr RP */ + 269, /* (231) expr ::= ID */ + 269, /* (232) expr ::= ID DOT ID */ + 269, /* (233) expr ::= ID DOT STAR */ + 269, /* (234) expr ::= INTEGER */ + 269, /* (235) expr ::= MINUS INTEGER */ + 269, /* (236) expr ::= PLUS INTEGER */ + 269, /* (237) expr ::= FLOAT */ + 269, /* (238) expr ::= MINUS FLOAT */ + 269, /* (239) expr ::= PLUS FLOAT */ + 269, /* (240) expr ::= STRING */ + 269, /* (241) expr ::= NOW */ + 269, /* (242) expr ::= VARIABLE */ + 269, /* (243) expr ::= PLUS VARIABLE */ + 269, /* (244) expr ::= MINUS VARIABLE */ + 269, /* (245) expr ::= BOOL */ + 269, /* (246) expr ::= NULL */ + 269, /* (247) expr ::= ID LP exprlist RP */ + 269, /* (248) expr ::= ID LP STAR RP */ + 269, /* (249) expr ::= expr IS NULL */ + 269, /* (250) expr ::= expr IS NOT NULL */ + 269, /* (251) expr ::= expr LT expr */ + 269, /* (252) expr ::= expr GT expr */ + 269, /* (253) expr ::= expr LE expr */ + 269, /* (254) expr ::= expr GE expr */ + 269, /* (255) expr ::= expr NE expr */ + 269, /* (256) expr ::= expr EQ expr */ + 269, /* (257) expr ::= expr BETWEEN expr AND expr */ + 269, /* (258) expr ::= expr AND expr */ + 269, /* (259) expr ::= expr OR expr */ + 269, /* (260) expr ::= expr PLUS expr */ + 269, /* (261) expr ::= expr MINUS expr */ + 269, /* (262) expr ::= expr STAR expr */ + 269, /* (263) expr ::= expr SLASH expr */ + 269, /* (264) expr ::= expr REM expr */ + 269, /* (265) expr ::= expr LIKE expr */ + 269, /* (266) expr ::= expr MATCH expr */ + 269, /* (267) expr ::= expr NMATCH expr */ + 269, /* (268) expr ::= expr QUESTION expr */ + 269, /* (269) expr ::= expr ARROW expr */ + 269, /* (270) expr ::= expr IN LP exprlist RP */ + 209, /* (271) exprlist ::= exprlist COMMA expritem */ + 209, /* (272) exprlist ::= expritem */ + 280, /* (273) expritem ::= expr */ + 280, /* (274) expritem ::= */ + 201, /* (275) cmd ::= RESET QUERY CACHE */ + 201, /* (276) cmd ::= SYNCDB ids REPLICA */ + 201, /* (277) cmd ::= ALTER TABLE ids cpxName ADD COLUMN columnlist */ + 201, /* (278) cmd ::= ALTER TABLE ids cpxName DROP COLUMN ids */ + 201, /* (279) cmd ::= ALTER TABLE ids cpxName MODIFY COLUMN columnlist */ + 201, /* (280) cmd ::= ALTER TABLE ids cpxName ADD TAG columnlist */ + 201, /* (281) cmd ::= ALTER TABLE ids cpxName DROP TAG ids */ + 201, /* (282) cmd ::= ALTER TABLE ids cpxName CHANGE TAG ids ids */ + 201, /* (283) cmd ::= ALTER TABLE ids cpxName SET TAG ids EQ tagitem */ + 201, /* (284) cmd ::= ALTER TABLE ids cpxName MODIFY TAG columnlist */ + 201, /* (285) cmd ::= ALTER STABLE ids cpxName ADD COLUMN columnlist */ + 201, /* (286) cmd ::= ALTER STABLE ids cpxName DROP COLUMN ids */ + 201, /* (287) cmd ::= ALTER STABLE ids cpxName MODIFY COLUMN columnlist */ + 201, /* (288) cmd ::= ALTER STABLE ids cpxName ADD TAG columnlist */ + 201, /* (289) cmd ::= ALTER STABLE ids cpxName DROP TAG ids */ + 201, /* (290) cmd ::= ALTER STABLE ids cpxName CHANGE TAG ids ids */ + 201, /* (291) cmd ::= ALTER STABLE ids cpxName SET TAG ids EQ tagitem */ + 201, /* (292) cmd ::= ALTER STABLE ids cpxName MODIFY TAG columnlist */ + 201, /* (293) cmd ::= KILL CONNECTION INTEGER */ + 201, /* (294) cmd ::= KILL STREAM INTEGER COLON INTEGER */ + 201, /* (295) cmd ::= KILL QUERY INTEGER COLON INTEGER */ }; /* For rule J, yyRuleInfoNRhs[J] contains the negative of the number @@ -2643,34 +2652,35 @@ static const signed char yyRuleInfoNRhs[] = { -3, /* (264) expr ::= expr REM expr */ -3, /* (265) expr ::= expr LIKE expr */ -3, /* (266) expr ::= expr MATCH expr */ - -3, /* (267) expr ::= expr QUESTION expr */ - -3, /* (268) expr ::= expr ARROW expr */ - -5, /* (269) expr ::= expr IN LP exprlist RP */ - -3, /* (270) exprlist ::= exprlist COMMA expritem */ - -1, /* (271) exprlist ::= expritem */ - -1, /* (272) expritem ::= expr */ - 0, /* (273) expritem ::= */ - -3, /* (274) cmd ::= RESET QUERY CACHE */ - -3, /* (275) cmd ::= SYNCDB ids REPLICA */ - -7, /* (276) cmd ::= ALTER TABLE ids cpxName ADD COLUMN columnlist */ - -7, /* (277) cmd ::= ALTER TABLE ids cpxName DROP COLUMN ids */ - -7, /* (278) cmd ::= ALTER TABLE ids cpxName MODIFY COLUMN columnlist */ - -7, /* (279) cmd ::= ALTER TABLE ids cpxName ADD TAG columnlist */ - -7, /* (280) cmd ::= ALTER TABLE ids cpxName DROP TAG ids */ - -8, /* (281) cmd ::= ALTER TABLE ids cpxName CHANGE TAG ids ids */ - -9, /* (282) cmd ::= ALTER TABLE ids cpxName SET TAG ids EQ tagitem */ - -7, /* (283) cmd ::= ALTER TABLE ids cpxName MODIFY TAG columnlist */ - -7, /* (284) cmd ::= ALTER STABLE ids cpxName ADD COLUMN columnlist */ - -7, /* (285) cmd ::= ALTER STABLE ids cpxName DROP COLUMN ids */ - -7, /* (286) cmd ::= ALTER STABLE ids cpxName MODIFY COLUMN columnlist */ - -7, /* (287) cmd ::= ALTER STABLE ids cpxName ADD TAG columnlist */ - -7, /* (288) cmd ::= ALTER STABLE ids cpxName DROP TAG ids */ - -8, /* (289) cmd ::= ALTER STABLE ids cpxName CHANGE TAG ids ids */ - -9, /* (290) cmd ::= ALTER STABLE ids cpxName SET TAG ids EQ tagitem */ - -7, /* (291) cmd ::= ALTER STABLE ids cpxName MODIFY TAG columnlist */ - -3, /* (292) cmd ::= KILL CONNECTION INTEGER */ - -5, /* (293) cmd ::= KILL STREAM INTEGER COLON INTEGER */ - -5, /* (294) cmd ::= KILL QUERY INTEGER COLON INTEGER */ + -3, /* (267) expr ::= expr NMATCH expr */ + -3, /* (268) expr ::= expr QUESTION expr */ + -3, /* (269) expr ::= expr ARROW expr */ + -5, /* (270) expr ::= expr IN LP exprlist RP */ + -3, /* (271) exprlist ::= exprlist COMMA expritem */ + -1, /* (272) exprlist ::= expritem */ + -1, /* (273) expritem ::= expr */ + 0, /* (274) expritem ::= */ + -3, /* (275) cmd ::= RESET QUERY CACHE */ + -3, /* (276) cmd ::= SYNCDB ids REPLICA */ + -7, /* (277) cmd ::= ALTER TABLE ids cpxName ADD COLUMN columnlist */ + -7, /* (278) cmd ::= ALTER TABLE ids cpxName DROP COLUMN ids */ + -7, /* (279) cmd ::= ALTER TABLE ids cpxName MODIFY COLUMN columnlist */ + -7, /* (280) cmd ::= ALTER TABLE ids cpxName ADD TAG columnlist */ + -7, /* (281) cmd ::= ALTER TABLE ids cpxName DROP TAG ids */ + -8, /* (282) cmd ::= ALTER TABLE ids cpxName CHANGE TAG ids ids */ + -9, /* (283) cmd ::= ALTER TABLE ids cpxName SET TAG ids EQ tagitem */ + -7, /* (284) cmd ::= ALTER TABLE ids cpxName MODIFY TAG columnlist */ + -7, /* (285) cmd ::= ALTER STABLE ids cpxName ADD COLUMN columnlist */ + -7, /* (286) cmd ::= ALTER STABLE ids cpxName DROP COLUMN ids */ + -7, /* (287) cmd ::= ALTER STABLE ids cpxName MODIFY COLUMN columnlist */ + -7, /* (288) cmd ::= ALTER STABLE ids cpxName ADD TAG columnlist */ + -7, /* (289) cmd ::= ALTER STABLE ids cpxName DROP TAG ids */ + -8, /* (290) cmd ::= ALTER STABLE ids cpxName CHANGE TAG ids ids */ + -9, /* (291) cmd ::= ALTER STABLE ids cpxName SET TAG ids EQ tagitem */ + -7, /* (292) cmd ::= ALTER STABLE ids cpxName MODIFY TAG columnlist */ + -3, /* (293) cmd ::= KILL CONNECTION INTEGER */ + -5, /* (294) cmd ::= KILL STREAM INTEGER COLON INTEGER */ + -5, /* (295) cmd ::= KILL QUERY INTEGER COLON INTEGER */ }; static void yy_accept(yyParser*); /* Forward Declaration */ @@ -2718,108 +2728,108 @@ static YYACTIONTYPE yy_reduce( case 141: /* cmd ::= CREATE STABLE create_stable_args */ yytestcase(yyruleno==141); #line 64 "sql.y" {} -#line 2720 "sql.c" +#line 2731 "sql.c" break; case 1: /* cmd ::= SHOW DATABASES */ #line 67 "sql.y" { setShowOptions(pInfo, TSDB_MGMT_TABLE_DB, 0, 0);} -#line 2725 "sql.c" +#line 2736 "sql.c" break; case 2: /* cmd ::= SHOW TOPICS */ #line 68 "sql.y" { setShowOptions(pInfo, TSDB_MGMT_TABLE_TP, 0, 0);} -#line 2730 "sql.c" +#line 2741 "sql.c" break; case 3: /* cmd ::= SHOW FUNCTIONS */ #line 69 "sql.y" { setShowOptions(pInfo, TSDB_MGMT_TABLE_FUNCTION, 0, 0);} -#line 2735 "sql.c" +#line 2746 "sql.c" break; case 4: /* cmd ::= SHOW MNODES */ #line 70 "sql.y" { setShowOptions(pInfo, TSDB_MGMT_TABLE_MNODE, 0, 0);} -#line 2740 "sql.c" +#line 2751 "sql.c" break; case 5: /* cmd ::= SHOW DNODES */ #line 71 "sql.y" { setShowOptions(pInfo, TSDB_MGMT_TABLE_DNODE, 0, 0);} -#line 2745 "sql.c" +#line 2756 "sql.c" break; case 6: /* cmd ::= SHOW ACCOUNTS */ #line 72 "sql.y" { setShowOptions(pInfo, TSDB_MGMT_TABLE_ACCT, 0, 0);} -#line 2750 "sql.c" +#line 2761 "sql.c" break; case 7: /* cmd ::= SHOW USERS */ #line 73 "sql.y" { setShowOptions(pInfo, TSDB_MGMT_TABLE_USER, 0, 0);} -#line 2755 "sql.c" +#line 2766 "sql.c" break; case 8: /* cmd ::= SHOW MODULES */ #line 75 "sql.y" { setShowOptions(pInfo, TSDB_MGMT_TABLE_MODULE, 0, 0); } -#line 2760 "sql.c" +#line 2771 "sql.c" break; case 9: /* cmd ::= SHOW QUERIES */ #line 76 "sql.y" { setShowOptions(pInfo, TSDB_MGMT_TABLE_QUERIES, 0, 0); } -#line 2765 "sql.c" +#line 2776 "sql.c" break; case 10: /* cmd ::= SHOW CONNECTIONS */ #line 77 "sql.y" { setShowOptions(pInfo, TSDB_MGMT_TABLE_CONNS, 0, 0);} -#line 2770 "sql.c" +#line 2781 "sql.c" break; case 11: /* cmd ::= SHOW STREAMS */ #line 78 "sql.y" { setShowOptions(pInfo, TSDB_MGMT_TABLE_STREAMS, 0, 0); } -#line 2775 "sql.c" +#line 2786 "sql.c" break; case 12: /* cmd ::= SHOW VARIABLES */ #line 79 "sql.y" { setShowOptions(pInfo, TSDB_MGMT_TABLE_VARIABLES, 0, 0); } -#line 2780 "sql.c" +#line 2791 "sql.c" break; case 13: /* cmd ::= SHOW SCORES */ #line 80 "sql.y" { setShowOptions(pInfo, TSDB_MGMT_TABLE_SCORES, 0, 0); } -#line 2785 "sql.c" +#line 2796 "sql.c" break; case 14: /* cmd ::= SHOW GRANTS */ #line 81 "sql.y" { setShowOptions(pInfo, TSDB_MGMT_TABLE_GRANTS, 0, 0); } -#line 2790 "sql.c" +#line 2801 "sql.c" break; case 15: /* cmd ::= SHOW VNODES */ #line 83 "sql.y" { setShowOptions(pInfo, TSDB_MGMT_TABLE_VNODES, 0, 0); } -#line 2795 "sql.c" +#line 2806 "sql.c" break; case 16: /* cmd ::= SHOW VNODES ids */ #line 84 "sql.y" { setShowOptions(pInfo, TSDB_MGMT_TABLE_VNODES, &yymsp[0].minor.yy0, 0); } -#line 2800 "sql.c" +#line 2811 "sql.c" break; case 17: /* dbPrefix ::= */ #line 88 "sql.y" {yymsp[1].minor.yy0.n = 0; yymsp[1].minor.yy0.type = 0;} -#line 2805 "sql.c" +#line 2816 "sql.c" break; case 18: /* dbPrefix ::= ids DOT */ #line 89 "sql.y" {yylhsminor.yy0 = yymsp[-1].minor.yy0; } -#line 2810 "sql.c" +#line 2821 "sql.c" yymsp[-1].minor.yy0 = yylhsminor.yy0; break; case 19: /* cpxName ::= */ #line 92 "sql.y" {yymsp[1].minor.yy0.n = 0; } -#line 2816 "sql.c" +#line 2827 "sql.c" break; case 20: /* cpxName ::= DOT ids */ #line 93 "sql.y" {yymsp[-1].minor.yy0 = yymsp[0].minor.yy0; yymsp[-1].minor.yy0.n += 1; } -#line 2821 "sql.c" +#line 2832 "sql.c" break; case 21: /* cmd ::= SHOW CREATE TABLE ids cpxName */ #line 95 "sql.y" @@ -2827,7 +2837,7 @@ static YYACTIONTYPE yy_reduce( yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; setDCLSqlElems(pInfo, TSDB_SQL_SHOW_CREATE_TABLE, 1, &yymsp[-1].minor.yy0); } -#line 2829 "sql.c" +#line 2840 "sql.c" break; case 22: /* cmd ::= SHOW CREATE STABLE ids cpxName */ #line 99 "sql.y" @@ -2835,35 +2845,35 @@ static YYACTIONTYPE yy_reduce( yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; setDCLSqlElems(pInfo, TSDB_SQL_SHOW_CREATE_STABLE, 1, &yymsp[-1].minor.yy0); } -#line 2837 "sql.c" +#line 2848 "sql.c" break; case 23: /* cmd ::= SHOW CREATE DATABASE ids */ #line 104 "sql.y" { setDCLSqlElems(pInfo, TSDB_SQL_SHOW_CREATE_DATABASE, 1, &yymsp[0].minor.yy0); } -#line 2844 "sql.c" +#line 2855 "sql.c" break; case 24: /* cmd ::= SHOW dbPrefix TABLES */ #line 108 "sql.y" { setShowOptions(pInfo, TSDB_MGMT_TABLE_TABLE, &yymsp[-1].minor.yy0, 0); } -#line 2851 "sql.c" +#line 2862 "sql.c" break; case 25: /* cmd ::= SHOW dbPrefix TABLES LIKE ids */ #line 112 "sql.y" { setShowOptions(pInfo, TSDB_MGMT_TABLE_TABLE, &yymsp[-3].minor.yy0, &yymsp[0].minor.yy0); } -#line 2858 "sql.c" +#line 2869 "sql.c" break; case 26: /* cmd ::= SHOW dbPrefix STABLES */ #line 116 "sql.y" { setShowOptions(pInfo, TSDB_MGMT_TABLE_METRIC, &yymsp[-1].minor.yy0, 0); } -#line 2865 "sql.c" +#line 2876 "sql.c" break; case 27: /* cmd ::= SHOW dbPrefix STABLES LIKE ids */ #line 120 "sql.y" @@ -2872,7 +2882,7 @@ static YYACTIONTYPE yy_reduce( tSetDbName(&token, &yymsp[-3].minor.yy0); setShowOptions(pInfo, TSDB_MGMT_TABLE_METRIC, &token, &yymsp[0].minor.yy0); } -#line 2874 "sql.c" +#line 2885 "sql.c" break; case 28: /* cmd ::= SHOW dbPrefix VGROUPS */ #line 126 "sql.y" @@ -2881,7 +2891,7 @@ static YYACTIONTYPE yy_reduce( tSetDbName(&token, &yymsp[-1].minor.yy0); setShowOptions(pInfo, TSDB_MGMT_TABLE_VGROUP, &token, 0); } -#line 2883 "sql.c" +#line 2894 "sql.c" break; case 29: /* cmd ::= SHOW dbPrefix VGROUPS ids */ #line 132 "sql.y" @@ -2890,7 +2900,7 @@ static YYACTIONTYPE yy_reduce( tSetDbName(&token, &yymsp[-2].minor.yy0); setShowOptions(pInfo, TSDB_MGMT_TABLE_VGROUP, &token, &yymsp[0].minor.yy0); } -#line 2892 "sql.c" +#line 2903 "sql.c" break; case 30: /* cmd ::= DROP TABLE ifexists ids cpxName */ #line 139 "sql.y" @@ -2898,7 +2908,7 @@ static YYACTIONTYPE yy_reduce( yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; setDropDbTableInfo(pInfo, TSDB_SQL_DROP_TABLE, &yymsp[-1].minor.yy0, &yymsp[-2].minor.yy0, -1, -1); } -#line 2900 "sql.c" +#line 2911 "sql.c" break; case 31: /* cmd ::= DROP STABLE ifexists ids cpxName */ #line 145 "sql.y" @@ -2906,42 +2916,42 @@ static YYACTIONTYPE yy_reduce( yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; setDropDbTableInfo(pInfo, TSDB_SQL_DROP_TABLE, &yymsp[-1].minor.yy0, &yymsp[-2].minor.yy0, -1, TSDB_SUPER_TABLE); } -#line 2908 "sql.c" +#line 2919 "sql.c" break; case 32: /* cmd ::= DROP DATABASE ifexists ids */ #line 150 "sql.y" { setDropDbTableInfo(pInfo, TSDB_SQL_DROP_DB, &yymsp[0].minor.yy0, &yymsp[-1].minor.yy0, TSDB_DB_TYPE_DEFAULT, -1); } -#line 2913 "sql.c" +#line 2924 "sql.c" break; case 33: /* cmd ::= DROP TOPIC ifexists ids */ #line 151 "sql.y" { setDropDbTableInfo(pInfo, TSDB_SQL_DROP_DB, &yymsp[0].minor.yy0, &yymsp[-1].minor.yy0, TSDB_DB_TYPE_TOPIC, -1); } -#line 2918 "sql.c" +#line 2929 "sql.c" break; case 34: /* cmd ::= DROP FUNCTION ids */ #line 152 "sql.y" { setDropFuncInfo(pInfo, TSDB_SQL_DROP_FUNCTION, &yymsp[0].minor.yy0); } -#line 2923 "sql.c" +#line 2934 "sql.c" break; case 35: /* cmd ::= DROP DNODE ids */ #line 154 "sql.y" { setDCLSqlElems(pInfo, TSDB_SQL_DROP_DNODE, 1, &yymsp[0].minor.yy0); } -#line 2928 "sql.c" +#line 2939 "sql.c" break; case 36: /* cmd ::= DROP USER ids */ #line 155 "sql.y" { setDCLSqlElems(pInfo, TSDB_SQL_DROP_USER, 1, &yymsp[0].minor.yy0); } -#line 2933 "sql.c" +#line 2944 "sql.c" break; case 37: /* cmd ::= DROP ACCOUNT ids */ #line 156 "sql.y" { setDCLSqlElems(pInfo, TSDB_SQL_DROP_ACCT, 1, &yymsp[0].minor.yy0); } -#line 2938 "sql.c" +#line 2949 "sql.c" break; case 38: /* cmd ::= USE ids */ #line 159 "sql.y" { setDCLSqlElems(pInfo, TSDB_SQL_USE_DB, 1, &yymsp[0].minor.yy0);} -#line 2943 "sql.c" +#line 2954 "sql.c" break; case 39: /* cmd ::= DESCRIBE ids cpxName */ case 40: /* cmd ::= DESC ids cpxName */ yytestcase(yyruleno==40); @@ -2950,113 +2960,113 @@ static YYACTIONTYPE yy_reduce( yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; setDCLSqlElems(pInfo, TSDB_SQL_DESCRIBE_TABLE, 1, &yymsp[-1].minor.yy0); } -#line 2952 "sql.c" +#line 2963 "sql.c" break; case 41: /* cmd ::= ALTER USER ids PASS ids */ #line 171 "sql.y" { setAlterUserSql(pInfo, TSDB_ALTER_USER_PASSWD, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0, NULL); } -#line 2957 "sql.c" +#line 2968 "sql.c" break; case 42: /* cmd ::= ALTER USER ids PRIVILEGE ids */ #line 172 "sql.y" { setAlterUserSql(pInfo, TSDB_ALTER_USER_PRIVILEGES, &yymsp[-2].minor.yy0, NULL, &yymsp[0].minor.yy0);} -#line 2962 "sql.c" +#line 2973 "sql.c" break; case 43: /* cmd ::= ALTER DNODE ids ids */ #line 173 "sql.y" { setDCLSqlElems(pInfo, TSDB_SQL_CFG_DNODE, 2, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy0); } -#line 2967 "sql.c" +#line 2978 "sql.c" break; case 44: /* cmd ::= ALTER DNODE ids ids ids */ #line 174 "sql.y" { setDCLSqlElems(pInfo, TSDB_SQL_CFG_DNODE, 3, &yymsp[-2].minor.yy0, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy0); } -#line 2972 "sql.c" +#line 2983 "sql.c" break; case 45: /* cmd ::= ALTER LOCAL ids */ #line 175 "sql.y" { setDCLSqlElems(pInfo, TSDB_SQL_CFG_LOCAL, 1, &yymsp[0].minor.yy0); } -#line 2977 "sql.c" +#line 2988 "sql.c" break; case 46: /* cmd ::= ALTER LOCAL ids ids */ #line 176 "sql.y" { setDCLSqlElems(pInfo, TSDB_SQL_CFG_LOCAL, 2, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy0); } -#line 2982 "sql.c" +#line 2993 "sql.c" break; case 47: /* cmd ::= ALTER DATABASE ids alter_db_optr */ case 48: /* cmd ::= ALTER TOPIC ids alter_topic_optr */ yytestcase(yyruleno==48); #line 177 "sql.y" -{ SStrToken t = {0}; setCreateDbInfo(pInfo, TSDB_SQL_ALTER_DB, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy256, &t);} -#line 2988 "sql.c" +{ SStrToken t = {0}; setCreateDbInfo(pInfo, TSDB_SQL_ALTER_DB, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy262, &t);} +#line 2999 "sql.c" break; case 49: /* cmd ::= ALTER ACCOUNT ids acct_optr */ #line 180 "sql.y" -{ setCreateAcctSql(pInfo, TSDB_SQL_ALTER_ACCT, &yymsp[-1].minor.yy0, NULL, &yymsp[0].minor.yy277);} -#line 2993 "sql.c" +{ setCreateAcctSql(pInfo, TSDB_SQL_ALTER_ACCT, &yymsp[-1].minor.yy0, NULL, &yymsp[0].minor.yy47);} +#line 3004 "sql.c" break; case 50: /* cmd ::= ALTER ACCOUNT ids PASS ids acct_optr */ #line 181 "sql.y" -{ setCreateAcctSql(pInfo, TSDB_SQL_ALTER_ACCT, &yymsp[-3].minor.yy0, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy277);} -#line 2998 "sql.c" +{ setCreateAcctSql(pInfo, TSDB_SQL_ALTER_ACCT, &yymsp[-3].minor.yy0, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy47);} +#line 3009 "sql.c" break; case 51: /* cmd ::= COMPACT VNODES IN LP exprlist RP */ #line 185 "sql.y" -{ setCompactVnodeSql(pInfo, TSDB_SQL_COMPACT_VNODE, yymsp[-1].minor.yy135);} -#line 3003 "sql.c" +{ setCompactVnodeSql(pInfo, TSDB_SQL_COMPACT_VNODE, yymsp[-1].minor.yy247);} +#line 3014 "sql.c" break; case 52: /* ids ::= ID */ case 53: /* ids ::= STRING */ yytestcase(yyruleno==53); #line 191 "sql.y" {yylhsminor.yy0 = yymsp[0].minor.yy0; } -#line 3009 "sql.c" +#line 3020 "sql.c" yymsp[0].minor.yy0 = yylhsminor.yy0; break; case 54: /* ifexists ::= IF EXISTS */ #line 195 "sql.y" { yymsp[-1].minor.yy0.n = 1;} -#line 3015 "sql.c" +#line 3026 "sql.c" break; case 55: /* ifexists ::= */ case 57: /* ifnotexists ::= */ yytestcase(yyruleno==57); case 181: /* distinct ::= */ yytestcase(yyruleno==181); #line 196 "sql.y" { yymsp[1].minor.yy0.n = 0;} -#line 3022 "sql.c" +#line 3033 "sql.c" break; case 56: /* ifnotexists ::= IF NOT EXISTS */ #line 199 "sql.y" { yymsp[-2].minor.yy0.n = 1;} -#line 3027 "sql.c" +#line 3038 "sql.c" break; case 58: /* cmd ::= CREATE DNODE ids */ #line 204 "sql.y" { setDCLSqlElems(pInfo, TSDB_SQL_CREATE_DNODE, 1, &yymsp[0].minor.yy0);} -#line 3032 "sql.c" +#line 3043 "sql.c" break; case 59: /* cmd ::= CREATE ACCOUNT ids PASS ids acct_optr */ #line 206 "sql.y" -{ setCreateAcctSql(pInfo, TSDB_SQL_CREATE_ACCT, &yymsp[-3].minor.yy0, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy277);} -#line 3037 "sql.c" +{ setCreateAcctSql(pInfo, TSDB_SQL_CREATE_ACCT, &yymsp[-3].minor.yy0, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy47);} +#line 3048 "sql.c" break; case 60: /* cmd ::= CREATE DATABASE ifnotexists ids db_optr */ case 61: /* cmd ::= CREATE TOPIC ifnotexists ids topic_optr */ yytestcase(yyruleno==61); #line 207 "sql.y" -{ setCreateDbInfo(pInfo, TSDB_SQL_CREATE_DB, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy256, &yymsp[-2].minor.yy0);} -#line 3043 "sql.c" +{ setCreateDbInfo(pInfo, TSDB_SQL_CREATE_DB, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy262, &yymsp[-2].minor.yy0);} +#line 3054 "sql.c" break; case 62: /* cmd ::= CREATE FUNCTION ids AS ids OUTPUTTYPE typename bufsize */ #line 209 "sql.y" -{ setCreateFuncInfo(pInfo, TSDB_SQL_CREATE_FUNCTION, &yymsp[-5].minor.yy0, &yymsp[-3].minor.yy0, &yymsp[-1].minor.yy181, &yymsp[0].minor.yy0, 1);} -#line 3048 "sql.c" +{ setCreateFuncInfo(pInfo, TSDB_SQL_CREATE_FUNCTION, &yymsp[-5].minor.yy0, &yymsp[-3].minor.yy0, &yymsp[-1].minor.yy179, &yymsp[0].minor.yy0, 1);} +#line 3059 "sql.c" break; case 63: /* cmd ::= CREATE AGGREGATE FUNCTION ids AS ids OUTPUTTYPE typename bufsize */ #line 210 "sql.y" -{ setCreateFuncInfo(pInfo, TSDB_SQL_CREATE_FUNCTION, &yymsp[-5].minor.yy0, &yymsp[-3].minor.yy0, &yymsp[-1].minor.yy181, &yymsp[0].minor.yy0, 2);} -#line 3053 "sql.c" +{ setCreateFuncInfo(pInfo, TSDB_SQL_CREATE_FUNCTION, &yymsp[-5].minor.yy0, &yymsp[-3].minor.yy0, &yymsp[-1].minor.yy179, &yymsp[0].minor.yy0, 2);} +#line 3064 "sql.c" break; case 64: /* cmd ::= CREATE USER ids PASS ids */ #line 211 "sql.y" { setCreateUserSql(pInfo, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0);} -#line 3058 "sql.c" +#line 3069 "sql.c" break; case 65: /* bufsize ::= */ case 67: /* pps ::= */ yytestcase(yyruleno==67); @@ -3070,7 +3080,7 @@ static YYACTIONTYPE yy_reduce( case 83: /* state ::= */ yytestcase(yyruleno==83); #line 213 "sql.y" { yymsp[1].minor.yy0.n = 0; } -#line 3072 "sql.c" +#line 3083 "sql.c" break; case 66: /* bufsize ::= BUFSIZE INTEGER */ case 68: /* pps ::= PPS INTEGER */ yytestcase(yyruleno==68); @@ -3084,37 +3094,37 @@ static YYACTIONTYPE yy_reduce( case 84: /* state ::= STATE ids */ yytestcase(yyruleno==84); #line 214 "sql.y" { yymsp[-1].minor.yy0 = yymsp[0].minor.yy0; } -#line 3086 "sql.c" +#line 3097 "sql.c" break; case 85: /* acct_optr ::= pps tseries storage streams qtime dbs users conns state */ #line 244 "sql.y" { - yylhsminor.yy277.maxUsers = (yymsp[-2].minor.yy0.n>0)?atoi(yymsp[-2].minor.yy0.z):-1; - yylhsminor.yy277.maxDbs = (yymsp[-3].minor.yy0.n>0)?atoi(yymsp[-3].minor.yy0.z):-1; - yylhsminor.yy277.maxTimeSeries = (yymsp[-7].minor.yy0.n>0)?atoi(yymsp[-7].minor.yy0.z):-1; - yylhsminor.yy277.maxStreams = (yymsp[-5].minor.yy0.n>0)?atoi(yymsp[-5].minor.yy0.z):-1; - yylhsminor.yy277.maxPointsPerSecond = (yymsp[-8].minor.yy0.n>0)?atoi(yymsp[-8].minor.yy0.z):-1; - yylhsminor.yy277.maxStorage = (yymsp[-6].minor.yy0.n>0)?strtoll(yymsp[-6].minor.yy0.z, NULL, 10):-1; - yylhsminor.yy277.maxQueryTime = (yymsp[-4].minor.yy0.n>0)?strtoll(yymsp[-4].minor.yy0.z, NULL, 10):-1; - yylhsminor.yy277.maxConnections = (yymsp[-1].minor.yy0.n>0)?atoi(yymsp[-1].minor.yy0.z):-1; - yylhsminor.yy277.stat = yymsp[0].minor.yy0; + yylhsminor.yy47.maxUsers = (yymsp[-2].minor.yy0.n>0)?atoi(yymsp[-2].minor.yy0.z):-1; + yylhsminor.yy47.maxDbs = (yymsp[-3].minor.yy0.n>0)?atoi(yymsp[-3].minor.yy0.z):-1; + yylhsminor.yy47.maxTimeSeries = (yymsp[-7].minor.yy0.n>0)?atoi(yymsp[-7].minor.yy0.z):-1; + yylhsminor.yy47.maxStreams = (yymsp[-5].minor.yy0.n>0)?atoi(yymsp[-5].minor.yy0.z):-1; + yylhsminor.yy47.maxPointsPerSecond = (yymsp[-8].minor.yy0.n>0)?atoi(yymsp[-8].minor.yy0.z):-1; + yylhsminor.yy47.maxStorage = (yymsp[-6].minor.yy0.n>0)?strtoll(yymsp[-6].minor.yy0.z, NULL, 10):-1; + yylhsminor.yy47.maxQueryTime = (yymsp[-4].minor.yy0.n>0)?strtoll(yymsp[-4].minor.yy0.z, NULL, 10):-1; + yylhsminor.yy47.maxConnections = (yymsp[-1].minor.yy0.n>0)?atoi(yymsp[-1].minor.yy0.z):-1; + yylhsminor.yy47.stat = yymsp[0].minor.yy0; } -#line 3101 "sql.c" - yymsp[-8].minor.yy277 = yylhsminor.yy277; +#line 3112 "sql.c" + yymsp[-8].minor.yy47 = yylhsminor.yy47; break; case 86: /* intitemlist ::= intitemlist COMMA intitem */ case 155: /* tagitemlist ::= tagitemlist COMMA tagitem */ yytestcase(yyruleno==155); #line 260 "sql.y" -{ yylhsminor.yy135 = tVariantListAppend(yymsp[-2].minor.yy135, &yymsp[0].minor.yy308, -1); } -#line 3108 "sql.c" - yymsp[-2].minor.yy135 = yylhsminor.yy135; +{ yylhsminor.yy247 = tVariantListAppend(yymsp[-2].minor.yy247, &yymsp[0].minor.yy378, -1); } +#line 3119 "sql.c" + yymsp[-2].minor.yy247 = yylhsminor.yy247; break; case 87: /* intitemlist ::= intitem */ case 156: /* tagitemlist ::= tagitem */ yytestcase(yyruleno==156); #line 261 "sql.y" -{ yylhsminor.yy135 = tVariantListAppend(NULL, &yymsp[0].minor.yy308, -1); } -#line 3115 "sql.c" - yymsp[0].minor.yy135 = yylhsminor.yy135; +{ yylhsminor.yy247 = tVariantListAppend(NULL, &yymsp[0].minor.yy378, -1); } +#line 3126 "sql.c" + yymsp[0].minor.yy247 = yylhsminor.yy247; break; case 88: /* intitem ::= INTEGER */ case 157: /* tagitem ::= INTEGER */ yytestcase(yyruleno==157); @@ -3122,14 +3132,14 @@ static YYACTIONTYPE yy_reduce( case 159: /* tagitem ::= STRING */ yytestcase(yyruleno==159); case 160: /* tagitem ::= BOOL */ yytestcase(yyruleno==160); #line 263 "sql.y" -{ toTSDBType(yymsp[0].minor.yy0.type); tVariantCreate(&yylhsminor.yy308, &yymsp[0].minor.yy0); } -#line 3125 "sql.c" - yymsp[0].minor.yy308 = yylhsminor.yy308; +{ toTSDBType(yymsp[0].minor.yy0.type); tVariantCreate(&yylhsminor.yy378, &yymsp[0].minor.yy0); } +#line 3136 "sql.c" + yymsp[0].minor.yy378 = yylhsminor.yy378; break; case 89: /* keep ::= KEEP intitemlist */ #line 267 "sql.y" -{ yymsp[-1].minor.yy135 = yymsp[0].minor.yy135; } -#line 3131 "sql.c" +{ yymsp[-1].minor.yy247 = yymsp[0].minor.yy247; } +#line 3142 "sql.c" break; case 90: /* cache ::= CACHE INTEGER */ case 91: /* replica ::= REPLICA INTEGER */ yytestcase(yyruleno==91); @@ -3148,182 +3158,182 @@ static YYACTIONTYPE yy_reduce( case 104: /* partitions ::= PARTITIONS INTEGER */ yytestcase(yyruleno==104); #line 269 "sql.y" { yymsp[-1].minor.yy0 = yymsp[0].minor.yy0; } -#line 3150 "sql.c" +#line 3161 "sql.c" break; case 105: /* db_optr ::= */ #line 286 "sql.y" -{setDefaultCreateDbOption(&yymsp[1].minor.yy256); yymsp[1].minor.yy256.dbType = TSDB_DB_TYPE_DEFAULT;} -#line 3155 "sql.c" +{setDefaultCreateDbOption(&yymsp[1].minor.yy262); yymsp[1].minor.yy262.dbType = TSDB_DB_TYPE_DEFAULT;} +#line 3166 "sql.c" break; case 106: /* db_optr ::= db_optr cache */ #line 288 "sql.y" -{ yylhsminor.yy256 = yymsp[-1].minor.yy256; yylhsminor.yy256.cacheBlockSize = strtol(yymsp[0].minor.yy0.z, NULL, 10); } -#line 3160 "sql.c" - yymsp[-1].minor.yy256 = yylhsminor.yy256; +{ yylhsminor.yy262 = yymsp[-1].minor.yy262; yylhsminor.yy262.cacheBlockSize = strtol(yymsp[0].minor.yy0.z, NULL, 10); } +#line 3171 "sql.c" + yymsp[-1].minor.yy262 = yylhsminor.yy262; break; case 107: /* db_optr ::= db_optr replica */ case 124: /* alter_db_optr ::= alter_db_optr replica */ yytestcase(yyruleno==124); #line 289 "sql.y" -{ yylhsminor.yy256 = yymsp[-1].minor.yy256; yylhsminor.yy256.replica = strtol(yymsp[0].minor.yy0.z, NULL, 10); } -#line 3167 "sql.c" - yymsp[-1].minor.yy256 = yylhsminor.yy256; +{ yylhsminor.yy262 = yymsp[-1].minor.yy262; yylhsminor.yy262.replica = strtol(yymsp[0].minor.yy0.z, NULL, 10); } +#line 3178 "sql.c" + yymsp[-1].minor.yy262 = yylhsminor.yy262; break; case 108: /* db_optr ::= db_optr quorum */ case 125: /* alter_db_optr ::= alter_db_optr quorum */ yytestcase(yyruleno==125); #line 290 "sql.y" -{ yylhsminor.yy256 = yymsp[-1].minor.yy256; yylhsminor.yy256.quorum = strtol(yymsp[0].minor.yy0.z, NULL, 10); } -#line 3174 "sql.c" - yymsp[-1].minor.yy256 = yylhsminor.yy256; +{ yylhsminor.yy262 = yymsp[-1].minor.yy262; yylhsminor.yy262.quorum = strtol(yymsp[0].minor.yy0.z, NULL, 10); } +#line 3185 "sql.c" + yymsp[-1].minor.yy262 = yylhsminor.yy262; break; case 109: /* db_optr ::= db_optr days */ #line 291 "sql.y" -{ yylhsminor.yy256 = yymsp[-1].minor.yy256; yylhsminor.yy256.daysPerFile = strtol(yymsp[0].minor.yy0.z, NULL, 10); } -#line 3180 "sql.c" - yymsp[-1].minor.yy256 = yylhsminor.yy256; +{ yylhsminor.yy262 = yymsp[-1].minor.yy262; yylhsminor.yy262.daysPerFile = strtol(yymsp[0].minor.yy0.z, NULL, 10); } +#line 3191 "sql.c" + yymsp[-1].minor.yy262 = yylhsminor.yy262; break; case 110: /* db_optr ::= db_optr minrows */ #line 292 "sql.y" -{ yylhsminor.yy256 = yymsp[-1].minor.yy256; yylhsminor.yy256.minRowsPerBlock = strtod(yymsp[0].minor.yy0.z, NULL); } -#line 3186 "sql.c" - yymsp[-1].minor.yy256 = yylhsminor.yy256; +{ yylhsminor.yy262 = yymsp[-1].minor.yy262; yylhsminor.yy262.minRowsPerBlock = strtod(yymsp[0].minor.yy0.z, NULL); } +#line 3197 "sql.c" + yymsp[-1].minor.yy262 = yylhsminor.yy262; break; case 111: /* db_optr ::= db_optr maxrows */ #line 293 "sql.y" -{ yylhsminor.yy256 = yymsp[-1].minor.yy256; yylhsminor.yy256.maxRowsPerBlock = strtod(yymsp[0].minor.yy0.z, NULL); } -#line 3192 "sql.c" - yymsp[-1].minor.yy256 = yylhsminor.yy256; +{ yylhsminor.yy262 = yymsp[-1].minor.yy262; yylhsminor.yy262.maxRowsPerBlock = strtod(yymsp[0].minor.yy0.z, NULL); } +#line 3203 "sql.c" + yymsp[-1].minor.yy262 = yylhsminor.yy262; break; case 112: /* db_optr ::= db_optr blocks */ case 127: /* alter_db_optr ::= alter_db_optr blocks */ yytestcase(yyruleno==127); #line 294 "sql.y" -{ yylhsminor.yy256 = yymsp[-1].minor.yy256; yylhsminor.yy256.numOfBlocks = strtol(yymsp[0].minor.yy0.z, NULL, 10); } -#line 3199 "sql.c" - yymsp[-1].minor.yy256 = yylhsminor.yy256; +{ yylhsminor.yy262 = yymsp[-1].minor.yy262; yylhsminor.yy262.numOfBlocks = strtol(yymsp[0].minor.yy0.z, NULL, 10); } +#line 3210 "sql.c" + yymsp[-1].minor.yy262 = yylhsminor.yy262; break; case 113: /* db_optr ::= db_optr ctime */ #line 295 "sql.y" -{ yylhsminor.yy256 = yymsp[-1].minor.yy256; yylhsminor.yy256.commitTime = strtol(yymsp[0].minor.yy0.z, NULL, 10); } -#line 3205 "sql.c" - yymsp[-1].minor.yy256 = yylhsminor.yy256; +{ yylhsminor.yy262 = yymsp[-1].minor.yy262; yylhsminor.yy262.commitTime = strtol(yymsp[0].minor.yy0.z, NULL, 10); } +#line 3216 "sql.c" + yymsp[-1].minor.yy262 = yylhsminor.yy262; break; case 114: /* db_optr ::= db_optr wal */ #line 296 "sql.y" -{ yylhsminor.yy256 = yymsp[-1].minor.yy256; yylhsminor.yy256.walLevel = strtol(yymsp[0].minor.yy0.z, NULL, 10); } -#line 3211 "sql.c" - yymsp[-1].minor.yy256 = yylhsminor.yy256; +{ yylhsminor.yy262 = yymsp[-1].minor.yy262; yylhsminor.yy262.walLevel = strtol(yymsp[0].minor.yy0.z, NULL, 10); } +#line 3222 "sql.c" + yymsp[-1].minor.yy262 = yylhsminor.yy262; break; case 115: /* db_optr ::= db_optr fsync */ #line 297 "sql.y" -{ yylhsminor.yy256 = yymsp[-1].minor.yy256; yylhsminor.yy256.fsyncPeriod = strtol(yymsp[0].minor.yy0.z, NULL, 10); } -#line 3217 "sql.c" - yymsp[-1].minor.yy256 = yylhsminor.yy256; +{ yylhsminor.yy262 = yymsp[-1].minor.yy262; yylhsminor.yy262.fsyncPeriod = strtol(yymsp[0].minor.yy0.z, NULL, 10); } +#line 3228 "sql.c" + yymsp[-1].minor.yy262 = yylhsminor.yy262; break; case 116: /* db_optr ::= db_optr comp */ case 128: /* alter_db_optr ::= alter_db_optr comp */ yytestcase(yyruleno==128); #line 298 "sql.y" -{ yylhsminor.yy256 = yymsp[-1].minor.yy256; yylhsminor.yy256.compressionLevel = strtol(yymsp[0].minor.yy0.z, NULL, 10); } -#line 3224 "sql.c" - yymsp[-1].minor.yy256 = yylhsminor.yy256; +{ yylhsminor.yy262 = yymsp[-1].minor.yy262; yylhsminor.yy262.compressionLevel = strtol(yymsp[0].minor.yy0.z, NULL, 10); } +#line 3235 "sql.c" + yymsp[-1].minor.yy262 = yylhsminor.yy262; break; case 117: /* db_optr ::= db_optr prec */ #line 299 "sql.y" -{ yylhsminor.yy256 = yymsp[-1].minor.yy256; yylhsminor.yy256.precision = yymsp[0].minor.yy0; } -#line 3230 "sql.c" - yymsp[-1].minor.yy256 = yylhsminor.yy256; +{ yylhsminor.yy262 = yymsp[-1].minor.yy262; yylhsminor.yy262.precision = yymsp[0].minor.yy0; } +#line 3241 "sql.c" + yymsp[-1].minor.yy262 = yylhsminor.yy262; break; case 118: /* db_optr ::= db_optr keep */ case 126: /* alter_db_optr ::= alter_db_optr keep */ yytestcase(yyruleno==126); #line 300 "sql.y" -{ yylhsminor.yy256 = yymsp[-1].minor.yy256; yylhsminor.yy256.keep = yymsp[0].minor.yy135; } -#line 3237 "sql.c" - yymsp[-1].minor.yy256 = yylhsminor.yy256; +{ yylhsminor.yy262 = yymsp[-1].minor.yy262; yylhsminor.yy262.keep = yymsp[0].minor.yy247; } +#line 3248 "sql.c" + yymsp[-1].minor.yy262 = yylhsminor.yy262; break; case 119: /* db_optr ::= db_optr update */ case 129: /* alter_db_optr ::= alter_db_optr update */ yytestcase(yyruleno==129); #line 301 "sql.y" -{ yylhsminor.yy256 = yymsp[-1].minor.yy256; yylhsminor.yy256.update = strtol(yymsp[0].minor.yy0.z, NULL, 10); } -#line 3244 "sql.c" - yymsp[-1].minor.yy256 = yylhsminor.yy256; +{ yylhsminor.yy262 = yymsp[-1].minor.yy262; yylhsminor.yy262.update = strtol(yymsp[0].minor.yy0.z, NULL, 10); } +#line 3255 "sql.c" + yymsp[-1].minor.yy262 = yylhsminor.yy262; break; case 120: /* db_optr ::= db_optr cachelast */ case 130: /* alter_db_optr ::= alter_db_optr cachelast */ yytestcase(yyruleno==130); #line 302 "sql.y" -{ yylhsminor.yy256 = yymsp[-1].minor.yy256; yylhsminor.yy256.cachelast = strtol(yymsp[0].minor.yy0.z, NULL, 10); } -#line 3251 "sql.c" - yymsp[-1].minor.yy256 = yylhsminor.yy256; +{ yylhsminor.yy262 = yymsp[-1].minor.yy262; yylhsminor.yy262.cachelast = strtol(yymsp[0].minor.yy0.z, NULL, 10); } +#line 3262 "sql.c" + yymsp[-1].minor.yy262 = yylhsminor.yy262; break; case 121: /* topic_optr ::= db_optr */ case 131: /* alter_topic_optr ::= alter_db_optr */ yytestcase(yyruleno==131); #line 306 "sql.y" -{ yylhsminor.yy256 = yymsp[0].minor.yy256; yylhsminor.yy256.dbType = TSDB_DB_TYPE_TOPIC; } -#line 3258 "sql.c" - yymsp[0].minor.yy256 = yylhsminor.yy256; +{ yylhsminor.yy262 = yymsp[0].minor.yy262; yylhsminor.yy262.dbType = TSDB_DB_TYPE_TOPIC; } +#line 3269 "sql.c" + yymsp[0].minor.yy262 = yylhsminor.yy262; break; case 122: /* topic_optr ::= topic_optr partitions */ case 132: /* alter_topic_optr ::= alter_topic_optr partitions */ yytestcase(yyruleno==132); #line 307 "sql.y" -{ yylhsminor.yy256 = yymsp[-1].minor.yy256; yylhsminor.yy256.partitions = strtol(yymsp[0].minor.yy0.z, NULL, 10); } -#line 3265 "sql.c" - yymsp[-1].minor.yy256 = yylhsminor.yy256; +{ yylhsminor.yy262 = yymsp[-1].minor.yy262; yylhsminor.yy262.partitions = strtol(yymsp[0].minor.yy0.z, NULL, 10); } +#line 3276 "sql.c" + yymsp[-1].minor.yy262 = yylhsminor.yy262; break; case 123: /* alter_db_optr ::= */ #line 310 "sql.y" -{ setDefaultCreateDbOption(&yymsp[1].minor.yy256); yymsp[1].minor.yy256.dbType = TSDB_DB_TYPE_DEFAULT;} -#line 3271 "sql.c" +{ setDefaultCreateDbOption(&yymsp[1].minor.yy262); yymsp[1].minor.yy262.dbType = TSDB_DB_TYPE_DEFAULT;} +#line 3282 "sql.c" break; case 133: /* typename ::= ids */ #line 330 "sql.y" { yymsp[0].minor.yy0.type = 0; - tSetColumnType (&yylhsminor.yy181, &yymsp[0].minor.yy0); + tSetColumnType (&yylhsminor.yy179, &yymsp[0].minor.yy0); } -#line 3279 "sql.c" - yymsp[0].minor.yy181 = yylhsminor.yy181; +#line 3290 "sql.c" + yymsp[0].minor.yy179 = yylhsminor.yy179; break; case 134: /* typename ::= ids LP signed RP */ #line 336 "sql.y" { - if (yymsp[-1].minor.yy531 <= 0) { + if (yymsp[-1].minor.yy403 <= 0) { yymsp[-3].minor.yy0.type = 0; - tSetColumnType(&yylhsminor.yy181, &yymsp[-3].minor.yy0); + tSetColumnType(&yylhsminor.yy179, &yymsp[-3].minor.yy0); } else { - yymsp[-3].minor.yy0.type = -yymsp[-1].minor.yy531; // negative value of name length - tSetColumnType(&yylhsminor.yy181, &yymsp[-3].minor.yy0); + yymsp[-3].minor.yy0.type = -yymsp[-1].minor.yy403; // negative value of name length + tSetColumnType(&yylhsminor.yy179, &yymsp[-3].minor.yy0); } } -#line 3293 "sql.c" - yymsp[-3].minor.yy181 = yylhsminor.yy181; +#line 3304 "sql.c" + yymsp[-3].minor.yy179 = yylhsminor.yy179; break; case 135: /* typename ::= ids UNSIGNED */ #line 347 "sql.y" { yymsp[-1].minor.yy0.type = 0; yymsp[-1].minor.yy0.n = ((yymsp[0].minor.yy0.z + yymsp[0].minor.yy0.n) - yymsp[-1].minor.yy0.z); - tSetColumnType (&yylhsminor.yy181, &yymsp[-1].minor.yy0); + tSetColumnType (&yylhsminor.yy179, &yymsp[-1].minor.yy0); } -#line 3303 "sql.c" - yymsp[-1].minor.yy181 = yylhsminor.yy181; +#line 3314 "sql.c" + yymsp[-1].minor.yy179 = yylhsminor.yy179; break; case 136: /* signed ::= INTEGER */ #line 354 "sql.y" -{ yylhsminor.yy531 = strtol(yymsp[0].minor.yy0.z, NULL, 10); } -#line 3309 "sql.c" - yymsp[0].minor.yy531 = yylhsminor.yy531; +{ yylhsminor.yy403 = strtol(yymsp[0].minor.yy0.z, NULL, 10); } +#line 3320 "sql.c" + yymsp[0].minor.yy403 = yylhsminor.yy403; break; case 137: /* signed ::= PLUS INTEGER */ #line 355 "sql.y" -{ yymsp[-1].minor.yy531 = strtol(yymsp[0].minor.yy0.z, NULL, 10); } -#line 3315 "sql.c" +{ yymsp[-1].minor.yy403 = strtol(yymsp[0].minor.yy0.z, NULL, 10); } +#line 3326 "sql.c" break; case 138: /* signed ::= MINUS INTEGER */ #line 356 "sql.y" -{ yymsp[-1].minor.yy531 = -strtol(yymsp[0].minor.yy0.z, NULL, 10);} -#line 3320 "sql.c" +{ yymsp[-1].minor.yy403 = -strtol(yymsp[0].minor.yy0.z, NULL, 10);} +#line 3331 "sql.c" break; case 142: /* cmd ::= CREATE TABLE create_table_list */ #line 362 "sql.y" -{ pInfo->type = TSDB_SQL_CREATE_TABLE; pInfo->pCreateTableInfo = yymsp[0].minor.yy110;} -#line 3325 "sql.c" +{ pInfo->type = TSDB_SQL_CREATE_TABLE; pInfo->pCreateTableInfo = yymsp[0].minor.yy336;} +#line 3336 "sql.c" break; case 143: /* create_table_list ::= create_from_stable */ #line 366 "sql.y" @@ -3331,121 +3341,121 @@ static YYACTIONTYPE yy_reduce( SCreateTableSql* pCreateTable = calloc(1, sizeof(SCreateTableSql)); pCreateTable->childTableInfo = taosArrayInit(4, sizeof(SCreatedTableInfo)); - taosArrayPush(pCreateTable->childTableInfo, &yymsp[0].minor.yy78); + taosArrayPush(pCreateTable->childTableInfo, &yymsp[0].minor.yy42); pCreateTable->type = TSQL_CREATE_TABLE_FROM_STABLE; - yylhsminor.yy110 = pCreateTable; + yylhsminor.yy336 = pCreateTable; } -#line 3337 "sql.c" - yymsp[0].minor.yy110 = yylhsminor.yy110; +#line 3348 "sql.c" + yymsp[0].minor.yy336 = yylhsminor.yy336; break; case 144: /* create_table_list ::= create_table_list create_from_stable */ #line 375 "sql.y" { - taosArrayPush(yymsp[-1].minor.yy110->childTableInfo, &yymsp[0].minor.yy78); - yylhsminor.yy110 = yymsp[-1].minor.yy110; + taosArrayPush(yymsp[-1].minor.yy336->childTableInfo, &yymsp[0].minor.yy42); + yylhsminor.yy336 = yymsp[-1].minor.yy336; } -#line 3346 "sql.c" - yymsp[-1].minor.yy110 = yylhsminor.yy110; +#line 3357 "sql.c" + yymsp[-1].minor.yy336 = yylhsminor.yy336; break; case 145: /* create_table_args ::= ifnotexists ids cpxName LP columnlist RP */ #line 381 "sql.y" { - yylhsminor.yy110 = tSetCreateTableInfo(yymsp[-1].minor.yy135, NULL, NULL, TSQL_CREATE_TABLE); - setSqlInfo(pInfo, yylhsminor.yy110, NULL, TSDB_SQL_CREATE_TABLE); + yylhsminor.yy336 = tSetCreateTableInfo(yymsp[-1].minor.yy247, NULL, NULL, TSQL_CREATE_TABLE); + setSqlInfo(pInfo, yylhsminor.yy336, NULL, TSDB_SQL_CREATE_TABLE); yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n; setCreatedTableName(pInfo, &yymsp[-4].minor.yy0, &yymsp[-5].minor.yy0); } -#line 3358 "sql.c" - yymsp[-5].minor.yy110 = yylhsminor.yy110; +#line 3369 "sql.c" + yymsp[-5].minor.yy336 = yylhsminor.yy336; break; case 146: /* create_stable_args ::= ifnotexists ids cpxName LP columnlist RP TAGS LP columnlist RP */ #line 391 "sql.y" { - yylhsminor.yy110 = tSetCreateTableInfo(yymsp[-5].minor.yy135, yymsp[-1].minor.yy135, NULL, TSQL_CREATE_STABLE); - setSqlInfo(pInfo, yylhsminor.yy110, NULL, TSDB_SQL_CREATE_TABLE); + yylhsminor.yy336 = tSetCreateTableInfo(yymsp[-5].minor.yy247, yymsp[-1].minor.yy247, NULL, TSQL_CREATE_STABLE); + setSqlInfo(pInfo, yylhsminor.yy336, NULL, TSDB_SQL_CREATE_TABLE); yymsp[-8].minor.yy0.n += yymsp[-7].minor.yy0.n; setCreatedTableName(pInfo, &yymsp[-8].minor.yy0, &yymsp[-9].minor.yy0); } -#line 3370 "sql.c" - yymsp[-9].minor.yy110 = yylhsminor.yy110; +#line 3381 "sql.c" + yymsp[-9].minor.yy336 = yylhsminor.yy336; break; case 147: /* create_from_stable ::= ifnotexists ids cpxName USING ids cpxName TAGS LP tagitemlist RP */ #line 402 "sql.y" { yymsp[-5].minor.yy0.n += yymsp[-4].minor.yy0.n; yymsp[-8].minor.yy0.n += yymsp[-7].minor.yy0.n; - yylhsminor.yy78 = createNewChildTableInfo(&yymsp[-5].minor.yy0, NULL, yymsp[-1].minor.yy135, &yymsp[-8].minor.yy0, &yymsp[-9].minor.yy0); + yylhsminor.yy42 = createNewChildTableInfo(&yymsp[-5].minor.yy0, NULL, yymsp[-1].minor.yy247, &yymsp[-8].minor.yy0, &yymsp[-9].minor.yy0); } -#line 3380 "sql.c" - yymsp[-9].minor.yy78 = yylhsminor.yy78; +#line 3391 "sql.c" + yymsp[-9].minor.yy42 = yylhsminor.yy42; break; case 148: /* create_from_stable ::= ifnotexists ids cpxName USING ids cpxName LP tagNamelist RP TAGS LP tagitemlist RP */ #line 408 "sql.y" { yymsp[-8].minor.yy0.n += yymsp[-7].minor.yy0.n; yymsp[-11].minor.yy0.n += yymsp[-10].minor.yy0.n; - yylhsminor.yy78 = createNewChildTableInfo(&yymsp[-8].minor.yy0, yymsp[-5].minor.yy135, yymsp[-1].minor.yy135, &yymsp[-11].minor.yy0, &yymsp[-12].minor.yy0); + yylhsminor.yy42 = createNewChildTableInfo(&yymsp[-8].minor.yy0, yymsp[-5].minor.yy247, yymsp[-1].minor.yy247, &yymsp[-11].minor.yy0, &yymsp[-12].minor.yy0); } -#line 3390 "sql.c" - yymsp[-12].minor.yy78 = yylhsminor.yy78; +#line 3401 "sql.c" + yymsp[-12].minor.yy42 = yylhsminor.yy42; break; case 149: /* tagNamelist ::= tagNamelist COMMA ids */ #line 416 "sql.y" -{taosArrayPush(yymsp[-2].minor.yy135, &yymsp[0].minor.yy0); yylhsminor.yy135 = yymsp[-2].minor.yy135; } -#line 3396 "sql.c" - yymsp[-2].minor.yy135 = yylhsminor.yy135; +{taosArrayPush(yymsp[-2].minor.yy247, &yymsp[0].minor.yy0); yylhsminor.yy247 = yymsp[-2].minor.yy247; } +#line 3407 "sql.c" + yymsp[-2].minor.yy247 = yylhsminor.yy247; break; case 150: /* tagNamelist ::= ids */ #line 417 "sql.y" -{yylhsminor.yy135 = taosArrayInit(4, sizeof(SStrToken)); taosArrayPush(yylhsminor.yy135, &yymsp[0].minor.yy0);} -#line 3402 "sql.c" - yymsp[0].minor.yy135 = yylhsminor.yy135; +{yylhsminor.yy247 = taosArrayInit(4, sizeof(SStrToken)); taosArrayPush(yylhsminor.yy247, &yymsp[0].minor.yy0);} +#line 3413 "sql.c" + yymsp[0].minor.yy247 = yylhsminor.yy247; break; case 151: /* create_table_args ::= ifnotexists ids cpxName AS select */ #line 421 "sql.y" { - yylhsminor.yy110 = tSetCreateTableInfo(NULL, NULL, yymsp[0].minor.yy488, TSQL_CREATE_STREAM); - setSqlInfo(pInfo, yylhsminor.yy110, NULL, TSDB_SQL_CREATE_TABLE); + yylhsminor.yy336 = tSetCreateTableInfo(NULL, NULL, yymsp[0].minor.yy246, TSQL_CREATE_STREAM); + setSqlInfo(pInfo, yylhsminor.yy336, NULL, TSDB_SQL_CREATE_TABLE); yymsp[-3].minor.yy0.n += yymsp[-2].minor.yy0.n; setCreatedTableName(pInfo, &yymsp[-3].minor.yy0, &yymsp[-4].minor.yy0); } -#line 3414 "sql.c" - yymsp[-4].minor.yy110 = yylhsminor.yy110; +#line 3425 "sql.c" + yymsp[-4].minor.yy336 = yylhsminor.yy336; break; case 152: /* columnlist ::= columnlist COMMA column */ #line 432 "sql.y" -{taosArrayPush(yymsp[-2].minor.yy135, &yymsp[0].minor.yy181); yylhsminor.yy135 = yymsp[-2].minor.yy135; } -#line 3420 "sql.c" - yymsp[-2].minor.yy135 = yylhsminor.yy135; +{taosArrayPush(yymsp[-2].minor.yy247, &yymsp[0].minor.yy179); yylhsminor.yy247 = yymsp[-2].minor.yy247; } +#line 3431 "sql.c" + yymsp[-2].minor.yy247 = yylhsminor.yy247; break; case 153: /* columnlist ::= column */ #line 433 "sql.y" -{yylhsminor.yy135 = taosArrayInit(4, sizeof(TAOS_FIELD)); taosArrayPush(yylhsminor.yy135, &yymsp[0].minor.yy181);} -#line 3426 "sql.c" - yymsp[0].minor.yy135 = yylhsminor.yy135; +{yylhsminor.yy247 = taosArrayInit(4, sizeof(TAOS_FIELD)); taosArrayPush(yylhsminor.yy247, &yymsp[0].minor.yy179);} +#line 3437 "sql.c" + yymsp[0].minor.yy247 = yylhsminor.yy247; break; case 154: /* column ::= ids typename */ #line 437 "sql.y" { - tSetColumnInfo(&yylhsminor.yy181, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy181); + tSetColumnInfo(&yylhsminor.yy179, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy179); } -#line 3434 "sql.c" - yymsp[-1].minor.yy181 = yylhsminor.yy181; +#line 3445 "sql.c" + yymsp[-1].minor.yy179 = yylhsminor.yy179; break; case 161: /* tagitem ::= NULL */ #line 452 "sql.y" -{ yymsp[0].minor.yy0.type = 0; tVariantCreate(&yylhsminor.yy308, &yymsp[0].minor.yy0); } -#line 3440 "sql.c" - yymsp[0].minor.yy308 = yylhsminor.yy308; +{ yymsp[0].minor.yy0.type = 0; tVariantCreate(&yylhsminor.yy378, &yymsp[0].minor.yy0); } +#line 3451 "sql.c" + yymsp[0].minor.yy378 = yylhsminor.yy378; break; case 162: /* tagitem ::= NOW */ #line 453 "sql.y" -{ yymsp[0].minor.yy0.type = TSDB_DATA_TYPE_TIMESTAMP; tVariantCreate(&yylhsminor.yy308, &yymsp[0].minor.yy0);} -#line 3446 "sql.c" - yymsp[0].minor.yy308 = yylhsminor.yy308; +{ yymsp[0].minor.yy0.type = TSDB_DATA_TYPE_TIMESTAMP; tVariantCreate(&yylhsminor.yy378, &yymsp[0].minor.yy0);} +#line 3457 "sql.c" + yymsp[0].minor.yy378 = yylhsminor.yy378; break; case 163: /* tagitem ::= MINUS INTEGER */ case 164: /* tagitem ::= MINUS FLOAT */ yytestcase(yyruleno==164); @@ -3456,219 +3466,219 @@ static YYACTIONTYPE yy_reduce( yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; yymsp[-1].minor.yy0.type = yymsp[0].minor.yy0.type; toTSDBType(yymsp[-1].minor.yy0.type); - tVariantCreate(&yylhsminor.yy308, &yymsp[-1].minor.yy0); + tVariantCreate(&yylhsminor.yy378, &yymsp[-1].minor.yy0); } -#line 3460 "sql.c" - yymsp[-1].minor.yy308 = yylhsminor.yy308; +#line 3471 "sql.c" + yymsp[-1].minor.yy378 = yylhsminor.yy378; break; case 167: /* select ::= SELECT selcollist from where_opt interval_option sliding_opt session_option windowstate_option fill_opt groupby_opt having_opt orderby_opt slimit_opt limit_opt */ #line 486 "sql.y" { - yylhsminor.yy488 = tSetQuerySqlNode(&yymsp[-13].minor.yy0, yymsp[-12].minor.yy135, yymsp[-11].minor.yy460, yymsp[-10].minor.yy526, yymsp[-4].minor.yy135, yymsp[-2].minor.yy135, &yymsp[-9].minor.yy160, &yymsp[-7].minor.yy511, &yymsp[-6].minor.yy258, &yymsp[-8].minor.yy0, yymsp[-5].minor.yy135, &yymsp[0].minor.yy126, &yymsp[-1].minor.yy126, yymsp[-3].minor.yy526); + yylhsminor.yy246 = tSetQuerySqlNode(&yymsp[-13].minor.yy0, yymsp[-12].minor.yy247, yymsp[-11].minor.yy46, yymsp[-10].minor.yy44, yymsp[-4].minor.yy247, yymsp[-2].minor.yy247, &yymsp[-9].minor.yy430, &yymsp[-7].minor.yy507, &yymsp[-6].minor.yy492, &yymsp[-8].minor.yy0, yymsp[-5].minor.yy247, &yymsp[0].minor.yy204, &yymsp[-1].minor.yy204, yymsp[-3].minor.yy44); } -#line 3468 "sql.c" - yymsp[-13].minor.yy488 = yylhsminor.yy488; +#line 3479 "sql.c" + yymsp[-13].minor.yy246 = yylhsminor.yy246; break; case 168: /* select ::= LP select RP */ #line 490 "sql.y" -{yymsp[-2].minor.yy488 = yymsp[-1].minor.yy488;} -#line 3474 "sql.c" +{yymsp[-2].minor.yy246 = yymsp[-1].minor.yy246;} +#line 3485 "sql.c" break; case 169: /* union ::= select */ #line 494 "sql.y" -{ yylhsminor.yy135 = setSubclause(NULL, yymsp[0].minor.yy488); } -#line 3479 "sql.c" - yymsp[0].minor.yy135 = yylhsminor.yy135; +{ yylhsminor.yy247 = setSubclause(NULL, yymsp[0].minor.yy246); } +#line 3490 "sql.c" + yymsp[0].minor.yy247 = yylhsminor.yy247; break; case 170: /* union ::= union UNION ALL select */ #line 495 "sql.y" -{ yylhsminor.yy135 = appendSelectClause(yymsp[-3].minor.yy135, yymsp[0].minor.yy488); } -#line 3485 "sql.c" - yymsp[-3].minor.yy135 = yylhsminor.yy135; +{ yylhsminor.yy247 = appendSelectClause(yymsp[-3].minor.yy247, yymsp[0].minor.yy246); } +#line 3496 "sql.c" + yymsp[-3].minor.yy247 = yylhsminor.yy247; break; case 171: /* cmd ::= union */ #line 497 "sql.y" -{ setSqlInfo(pInfo, yymsp[0].minor.yy135, NULL, TSDB_SQL_SELECT); } -#line 3491 "sql.c" +{ setSqlInfo(pInfo, yymsp[0].minor.yy247, NULL, TSDB_SQL_SELECT); } +#line 3502 "sql.c" break; case 172: /* select ::= SELECT selcollist */ #line 504 "sql.y" { - yylhsminor.yy488 = tSetQuerySqlNode(&yymsp[-1].minor.yy0, yymsp[0].minor.yy135, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL); + yylhsminor.yy246 = tSetQuerySqlNode(&yymsp[-1].minor.yy0, yymsp[0].minor.yy247, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL); } -#line 3498 "sql.c" - yymsp[-1].minor.yy488 = yylhsminor.yy488; +#line 3509 "sql.c" + yymsp[-1].minor.yy246 = yylhsminor.yy246; break; case 173: /* sclp ::= selcollist COMMA */ #line 516 "sql.y" -{yylhsminor.yy135 = yymsp[-1].minor.yy135;} -#line 3504 "sql.c" - yymsp[-1].minor.yy135 = yylhsminor.yy135; +{yylhsminor.yy247 = yymsp[-1].minor.yy247;} +#line 3515 "sql.c" + yymsp[-1].minor.yy247 = yylhsminor.yy247; break; case 174: /* sclp ::= */ case 206: /* orderby_opt ::= */ yytestcase(yyruleno==206); #line 517 "sql.y" -{yymsp[1].minor.yy135 = 0;} -#line 3511 "sql.c" +{yymsp[1].minor.yy247 = 0;} +#line 3522 "sql.c" break; case 175: /* selcollist ::= sclp distinct expr as */ #line 518 "sql.y" { - yylhsminor.yy135 = tSqlExprListAppend(yymsp[-3].minor.yy135, yymsp[-1].minor.yy526, yymsp[-2].minor.yy0.n? &yymsp[-2].minor.yy0:0, yymsp[0].minor.yy0.n?&yymsp[0].minor.yy0:0); + yylhsminor.yy247 = tSqlExprListAppend(yymsp[-3].minor.yy247, yymsp[-1].minor.yy44, yymsp[-2].minor.yy0.n? &yymsp[-2].minor.yy0:0, yymsp[0].minor.yy0.n?&yymsp[0].minor.yy0:0); } -#line 3518 "sql.c" - yymsp[-3].minor.yy135 = yylhsminor.yy135; +#line 3529 "sql.c" + yymsp[-3].minor.yy247 = yylhsminor.yy247; break; case 176: /* selcollist ::= sclp STAR */ #line 522 "sql.y" { tSqlExpr *pNode = tSqlExprCreateIdValue(NULL, TK_ALL); - yylhsminor.yy135 = tSqlExprListAppend(yymsp[-1].minor.yy135, pNode, 0, 0); + yylhsminor.yy247 = tSqlExprListAppend(yymsp[-1].minor.yy247, pNode, 0, 0); } -#line 3527 "sql.c" - yymsp[-1].minor.yy135 = yylhsminor.yy135; +#line 3538 "sql.c" + yymsp[-1].minor.yy247 = yylhsminor.yy247; break; case 177: /* as ::= AS ids */ #line 530 "sql.y" { yymsp[-1].minor.yy0 = yymsp[0].minor.yy0; } -#line 3533 "sql.c" +#line 3544 "sql.c" break; case 178: /* as ::= ids */ #line 531 "sql.y" { yylhsminor.yy0 = yymsp[0].minor.yy0; } -#line 3538 "sql.c" +#line 3549 "sql.c" yymsp[0].minor.yy0 = yylhsminor.yy0; break; case 179: /* as ::= */ #line 532 "sql.y" { yymsp[1].minor.yy0.n = 0; } -#line 3544 "sql.c" +#line 3555 "sql.c" break; case 180: /* distinct ::= DISTINCT */ #line 535 "sql.y" { yylhsminor.yy0 = yymsp[0].minor.yy0; } -#line 3549 "sql.c" +#line 3560 "sql.c" yymsp[0].minor.yy0 = yylhsminor.yy0; break; case 182: /* from ::= FROM tablelist */ case 183: /* from ::= FROM sub */ yytestcase(yyruleno==183); #line 541 "sql.y" -{yymsp[-1].minor.yy460 = yymsp[0].minor.yy460;} -#line 3556 "sql.c" +{yymsp[-1].minor.yy46 = yymsp[0].minor.yy46;} +#line 3567 "sql.c" break; case 184: /* sub ::= LP union RP */ #line 546 "sql.y" -{yymsp[-2].minor.yy460 = addSubqueryElem(NULL, yymsp[-1].minor.yy135, NULL);} -#line 3561 "sql.c" +{yymsp[-2].minor.yy46 = addSubqueryElem(NULL, yymsp[-1].minor.yy247, NULL);} +#line 3572 "sql.c" break; case 185: /* sub ::= LP union RP ids */ #line 547 "sql.y" -{yymsp[-3].minor.yy460 = addSubqueryElem(NULL, yymsp[-2].minor.yy135, &yymsp[0].minor.yy0);} -#line 3566 "sql.c" +{yymsp[-3].minor.yy46 = addSubqueryElem(NULL, yymsp[-2].minor.yy247, &yymsp[0].minor.yy0);} +#line 3577 "sql.c" break; case 186: /* sub ::= sub COMMA LP union RP ids */ #line 548 "sql.y" -{yylhsminor.yy460 = addSubqueryElem(yymsp[-5].minor.yy460, yymsp[-2].minor.yy135, &yymsp[0].minor.yy0);} -#line 3571 "sql.c" - yymsp[-5].minor.yy460 = yylhsminor.yy460; +{yylhsminor.yy46 = addSubqueryElem(yymsp[-5].minor.yy46, yymsp[-2].minor.yy247, &yymsp[0].minor.yy0);} +#line 3582 "sql.c" + yymsp[-5].minor.yy46 = yylhsminor.yy46; break; case 187: /* tablelist ::= ids cpxName */ #line 552 "sql.y" { yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; - yylhsminor.yy460 = setTableNameList(NULL, &yymsp[-1].minor.yy0, NULL); + yylhsminor.yy46 = setTableNameList(NULL, &yymsp[-1].minor.yy0, NULL); } -#line 3580 "sql.c" - yymsp[-1].minor.yy460 = yylhsminor.yy460; +#line 3591 "sql.c" + yymsp[-1].minor.yy46 = yylhsminor.yy46; break; case 188: /* tablelist ::= ids cpxName ids */ #line 557 "sql.y" { yymsp[-2].minor.yy0.n += yymsp[-1].minor.yy0.n; - yylhsminor.yy460 = setTableNameList(NULL, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0); + yylhsminor.yy46 = setTableNameList(NULL, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0); } -#line 3589 "sql.c" - yymsp[-2].minor.yy460 = yylhsminor.yy460; +#line 3600 "sql.c" + yymsp[-2].minor.yy46 = yylhsminor.yy46; break; case 189: /* tablelist ::= tablelist COMMA ids cpxName */ #line 562 "sql.y" { yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; - yylhsminor.yy460 = setTableNameList(yymsp[-3].minor.yy460, &yymsp[-1].minor.yy0, NULL); + yylhsminor.yy46 = setTableNameList(yymsp[-3].minor.yy46, &yymsp[-1].minor.yy0, NULL); } -#line 3598 "sql.c" - yymsp[-3].minor.yy460 = yylhsminor.yy460; +#line 3609 "sql.c" + yymsp[-3].minor.yy46 = yylhsminor.yy46; break; case 190: /* tablelist ::= tablelist COMMA ids cpxName ids */ #line 567 "sql.y" { yymsp[-2].minor.yy0.n += yymsp[-1].minor.yy0.n; - yylhsminor.yy460 = setTableNameList(yymsp[-4].minor.yy460, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0); + yylhsminor.yy46 = setTableNameList(yymsp[-4].minor.yy46, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0); } -#line 3607 "sql.c" - yymsp[-4].minor.yy460 = yylhsminor.yy460; +#line 3618 "sql.c" + yymsp[-4].minor.yy46 = yylhsminor.yy46; break; case 191: /* tmvar ::= VARIABLE */ #line 574 "sql.y" {yylhsminor.yy0 = yymsp[0].minor.yy0;} -#line 3613 "sql.c" +#line 3624 "sql.c" yymsp[0].minor.yy0 = yylhsminor.yy0; break; case 192: /* interval_option ::= intervalKey LP tmvar RP */ #line 577 "sql.y" -{yylhsminor.yy160.interval = yymsp[-1].minor.yy0; yylhsminor.yy160.offset.n = 0; yylhsminor.yy160.token = yymsp[-3].minor.yy262;} -#line 3619 "sql.c" - yymsp[-3].minor.yy160 = yylhsminor.yy160; +{yylhsminor.yy430.interval = yymsp[-1].minor.yy0; yylhsminor.yy430.offset.n = 0; yylhsminor.yy430.token = yymsp[-3].minor.yy2;} +#line 3630 "sql.c" + yymsp[-3].minor.yy430 = yylhsminor.yy430; break; case 193: /* interval_option ::= intervalKey LP tmvar COMMA tmvar RP */ #line 578 "sql.y" -{yylhsminor.yy160.interval = yymsp[-3].minor.yy0; yylhsminor.yy160.offset = yymsp[-1].minor.yy0; yylhsminor.yy160.token = yymsp[-5].minor.yy262;} -#line 3625 "sql.c" - yymsp[-5].minor.yy160 = yylhsminor.yy160; +{yylhsminor.yy430.interval = yymsp[-3].minor.yy0; yylhsminor.yy430.offset = yymsp[-1].minor.yy0; yylhsminor.yy430.token = yymsp[-5].minor.yy2;} +#line 3636 "sql.c" + yymsp[-5].minor.yy430 = yylhsminor.yy430; break; case 194: /* interval_option ::= */ #line 579 "sql.y" -{memset(&yymsp[1].minor.yy160, 0, sizeof(yymsp[1].minor.yy160));} -#line 3631 "sql.c" +{memset(&yymsp[1].minor.yy430, 0, sizeof(yymsp[1].minor.yy430));} +#line 3642 "sql.c" break; case 195: /* intervalKey ::= INTERVAL */ #line 582 "sql.y" -{yymsp[0].minor.yy262 = TK_INTERVAL;} -#line 3636 "sql.c" +{yymsp[0].minor.yy2 = TK_INTERVAL;} +#line 3647 "sql.c" break; case 196: /* intervalKey ::= EVERY */ #line 583 "sql.y" -{yymsp[0].minor.yy262 = TK_EVERY; } -#line 3641 "sql.c" +{yymsp[0].minor.yy2 = TK_EVERY; } +#line 3652 "sql.c" break; case 197: /* session_option ::= */ #line 586 "sql.y" -{yymsp[1].minor.yy511.col.n = 0; yymsp[1].minor.yy511.gap.n = 0;} -#line 3646 "sql.c" +{yymsp[1].minor.yy507.col.n = 0; yymsp[1].minor.yy507.gap.n = 0;} +#line 3657 "sql.c" break; case 198: /* session_option ::= SESSION LP ids cpxName COMMA tmvar RP */ #line 587 "sql.y" { yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n; - yymsp[-6].minor.yy511.col = yymsp[-4].minor.yy0; - yymsp[-6].minor.yy511.gap = yymsp[-1].minor.yy0; + yymsp[-6].minor.yy507.col = yymsp[-4].minor.yy0; + yymsp[-6].minor.yy507.gap = yymsp[-1].minor.yy0; } -#line 3655 "sql.c" +#line 3666 "sql.c" break; case 199: /* windowstate_option ::= */ #line 594 "sql.y" -{ yymsp[1].minor.yy258.col.n = 0; yymsp[1].minor.yy258.col.z = NULL;} -#line 3660 "sql.c" +{ yymsp[1].minor.yy492.col.n = 0; yymsp[1].minor.yy492.col.z = NULL;} +#line 3671 "sql.c" break; case 200: /* windowstate_option ::= STATE_WINDOW LP ids RP */ #line 595 "sql.y" -{ yymsp[-3].minor.yy258.col = yymsp[-1].minor.yy0; } -#line 3665 "sql.c" +{ yymsp[-3].minor.yy492.col = yymsp[-1].minor.yy0; } +#line 3676 "sql.c" break; case 201: /* fill_opt ::= */ #line 599 "sql.y" -{ yymsp[1].minor.yy135 = 0; } -#line 3670 "sql.c" +{ yymsp[1].minor.yy247 = 0; } +#line 3681 "sql.c" break; case 202: /* fill_opt ::= FILL LP ID COMMA tagitemlist RP */ #line 600 "sql.y" @@ -3677,49 +3687,49 @@ static YYACTIONTYPE yy_reduce( toTSDBType(yymsp[-3].minor.yy0.type); tVariantCreate(&A, &yymsp[-3].minor.yy0); - tVariantListInsert(yymsp[-1].minor.yy135, &A, -1, 0); - yymsp[-5].minor.yy135 = yymsp[-1].minor.yy135; + tVariantListInsert(yymsp[-1].minor.yy247, &A, -1, 0); + yymsp[-5].minor.yy247 = yymsp[-1].minor.yy247; } -#line 3682 "sql.c" +#line 3693 "sql.c" break; case 203: /* fill_opt ::= FILL LP ID RP */ #line 609 "sql.y" { toTSDBType(yymsp[-1].minor.yy0.type); - yymsp[-3].minor.yy135 = tVariantListAppendToken(NULL, &yymsp[-1].minor.yy0, -1); + yymsp[-3].minor.yy247 = tVariantListAppendToken(NULL, &yymsp[-1].minor.yy0, -1); } -#line 3690 "sql.c" +#line 3701 "sql.c" break; case 204: /* sliding_opt ::= SLIDING LP tmvar RP */ #line 615 "sql.y" {yymsp[-3].minor.yy0 = yymsp[-1].minor.yy0; } -#line 3695 "sql.c" +#line 3706 "sql.c" break; case 205: /* sliding_opt ::= */ #line 616 "sql.y" {yymsp[1].minor.yy0.n = 0; yymsp[1].minor.yy0.z = NULL; yymsp[1].minor.yy0.type = 0; } -#line 3700 "sql.c" +#line 3711 "sql.c" break; case 207: /* orderby_opt ::= ORDER BY sortlist */ #line 628 "sql.y" -{yymsp[-2].minor.yy135 = yymsp[0].minor.yy135;} -#line 3705 "sql.c" +{yymsp[-2].minor.yy247 = yymsp[0].minor.yy247;} +#line 3716 "sql.c" break; case 208: /* sortlist ::= sortlist COMMA item sortorder */ #line 630 "sql.y" { - yylhsminor.yy135 = tVariantListAppend(yymsp[-3].minor.yy135, &yymsp[-1].minor.yy308, yymsp[0].minor.yy130); + yylhsminor.yy247 = tVariantListAppend(yymsp[-3].minor.yy247, &yymsp[-1].minor.yy378, yymsp[0].minor.yy222); } -#line 3712 "sql.c" - yymsp[-3].minor.yy135 = yylhsminor.yy135; +#line 3723 "sql.c" + yymsp[-3].minor.yy247 = yylhsminor.yy247; break; case 209: /* sortlist ::= item sortorder */ #line 634 "sql.y" { - yylhsminor.yy135 = tVariantListAppend(NULL, &yymsp[-1].minor.yy308, yymsp[0].minor.yy130); + yylhsminor.yy247 = tVariantListAppend(NULL, &yymsp[-1].minor.yy378, yymsp[0].minor.yy222); } -#line 3720 "sql.c" - yymsp[-1].minor.yy135 = yylhsminor.yy135; +#line 3731 "sql.c" + yymsp[-1].minor.yy247 = yylhsminor.yy247; break; case 210: /* item ::= ids cpxName */ #line 639 "sql.y" @@ -3727,361 +3737,367 @@ static YYACTIONTYPE yy_reduce( toTSDBType(yymsp[-1].minor.yy0.type); yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; - tVariantCreate(&yylhsminor.yy308, &yymsp[-1].minor.yy0); + tVariantCreate(&yylhsminor.yy378, &yymsp[-1].minor.yy0); } -#line 3731 "sql.c" - yymsp[-1].minor.yy308 = yylhsminor.yy308; +#line 3742 "sql.c" + yymsp[-1].minor.yy378 = yylhsminor.yy378; break; case 211: /* sortorder ::= ASC */ #line 647 "sql.y" -{ yymsp[0].minor.yy130 = TSDB_ORDER_ASC; } -#line 3737 "sql.c" +{ yymsp[0].minor.yy222 = TSDB_ORDER_ASC; } +#line 3748 "sql.c" break; case 212: /* sortorder ::= DESC */ #line 648 "sql.y" -{ yymsp[0].minor.yy130 = TSDB_ORDER_DESC;} -#line 3742 "sql.c" +{ yymsp[0].minor.yy222 = TSDB_ORDER_DESC;} +#line 3753 "sql.c" break; case 213: /* sortorder ::= */ #line 649 "sql.y" -{ yymsp[1].minor.yy130 = TSDB_ORDER_ASC; } -#line 3747 "sql.c" +{ yymsp[1].minor.yy222 = TSDB_ORDER_ASC; } +#line 3758 "sql.c" break; case 214: /* groupby_opt ::= */ #line 657 "sql.y" -{ yymsp[1].minor.yy135 = 0;} -#line 3752 "sql.c" +{ yymsp[1].minor.yy247 = 0;} +#line 3763 "sql.c" break; case 215: /* groupby_opt ::= GROUP BY grouplist */ #line 658 "sql.y" -{ yymsp[-2].minor.yy135 = yymsp[0].minor.yy135;} -#line 3757 "sql.c" +{ yymsp[-2].minor.yy247 = yymsp[0].minor.yy247;} +#line 3768 "sql.c" break; case 216: /* grouplist ::= grouplist COMMA item */ #line 660 "sql.y" { - yylhsminor.yy135 = tVariantListAppend(yymsp[-2].minor.yy135, &yymsp[0].minor.yy308, -1); + yylhsminor.yy247 = tVariantListAppend(yymsp[-2].minor.yy247, &yymsp[0].minor.yy378, -1); } -#line 3764 "sql.c" - yymsp[-2].minor.yy135 = yylhsminor.yy135; +#line 3775 "sql.c" + yymsp[-2].minor.yy247 = yylhsminor.yy247; break; case 217: /* grouplist ::= item */ #line 664 "sql.y" { - yylhsminor.yy135 = tVariantListAppend(NULL, &yymsp[0].minor.yy308, -1); + yylhsminor.yy247 = tVariantListAppend(NULL, &yymsp[0].minor.yy378, -1); } -#line 3772 "sql.c" - yymsp[0].minor.yy135 = yylhsminor.yy135; +#line 3783 "sql.c" + yymsp[0].minor.yy247 = yylhsminor.yy247; break; case 218: /* having_opt ::= */ case 228: /* where_opt ::= */ yytestcase(yyruleno==228); - case 273: /* expritem ::= */ yytestcase(yyruleno==273); + case 274: /* expritem ::= */ yytestcase(yyruleno==274); #line 671 "sql.y" -{yymsp[1].minor.yy526 = 0;} -#line 3780 "sql.c" +{yymsp[1].minor.yy44 = 0;} +#line 3791 "sql.c" break; case 219: /* having_opt ::= HAVING expr */ case 229: /* where_opt ::= WHERE expr */ yytestcase(yyruleno==229); #line 672 "sql.y" -{yymsp[-1].minor.yy526 = yymsp[0].minor.yy526;} -#line 3786 "sql.c" +{yymsp[-1].minor.yy44 = yymsp[0].minor.yy44;} +#line 3797 "sql.c" break; case 220: /* limit_opt ::= */ case 224: /* slimit_opt ::= */ yytestcase(yyruleno==224); #line 676 "sql.y" -{yymsp[1].minor.yy126.limit = -1; yymsp[1].minor.yy126.offset = 0;} -#line 3792 "sql.c" +{yymsp[1].minor.yy204.limit = -1; yymsp[1].minor.yy204.offset = 0;} +#line 3803 "sql.c" break; case 221: /* limit_opt ::= LIMIT signed */ case 225: /* slimit_opt ::= SLIMIT signed */ yytestcase(yyruleno==225); #line 677 "sql.y" -{yymsp[-1].minor.yy126.limit = yymsp[0].minor.yy531; yymsp[-1].minor.yy126.offset = 0;} -#line 3798 "sql.c" +{yymsp[-1].minor.yy204.limit = yymsp[0].minor.yy403; yymsp[-1].minor.yy204.offset = 0;} +#line 3809 "sql.c" break; case 222: /* limit_opt ::= LIMIT signed OFFSET signed */ #line 679 "sql.y" -{ yymsp[-3].minor.yy126.limit = yymsp[-2].minor.yy531; yymsp[-3].minor.yy126.offset = yymsp[0].minor.yy531;} -#line 3803 "sql.c" +{ yymsp[-3].minor.yy204.limit = yymsp[-2].minor.yy403; yymsp[-3].minor.yy204.offset = yymsp[0].minor.yy403;} +#line 3814 "sql.c" break; case 223: /* limit_opt ::= LIMIT signed COMMA signed */ #line 681 "sql.y" -{ yymsp[-3].minor.yy126.limit = yymsp[0].minor.yy531; yymsp[-3].minor.yy126.offset = yymsp[-2].minor.yy531;} -#line 3808 "sql.c" +{ yymsp[-3].minor.yy204.limit = yymsp[0].minor.yy403; yymsp[-3].minor.yy204.offset = yymsp[-2].minor.yy403;} +#line 3819 "sql.c" break; case 226: /* slimit_opt ::= SLIMIT signed SOFFSET signed */ #line 687 "sql.y" -{yymsp[-3].minor.yy126.limit = yymsp[-2].minor.yy531; yymsp[-3].minor.yy126.offset = yymsp[0].minor.yy531;} -#line 3813 "sql.c" +{yymsp[-3].minor.yy204.limit = yymsp[-2].minor.yy403; yymsp[-3].minor.yy204.offset = yymsp[0].minor.yy403;} +#line 3824 "sql.c" break; case 227: /* slimit_opt ::= SLIMIT signed COMMA signed */ #line 689 "sql.y" -{yymsp[-3].minor.yy126.limit = yymsp[0].minor.yy531; yymsp[-3].minor.yy126.offset = yymsp[-2].minor.yy531;} -#line 3818 "sql.c" +{yymsp[-3].minor.yy204.limit = yymsp[0].minor.yy403; yymsp[-3].minor.yy204.offset = yymsp[-2].minor.yy403;} +#line 3829 "sql.c" break; case 230: /* expr ::= LP expr RP */ #line 702 "sql.y" -{yylhsminor.yy526 = yymsp[-1].minor.yy526; yylhsminor.yy526->exprToken.z = yymsp[-2].minor.yy0.z; yylhsminor.yy526->exprToken.n = (yymsp[0].minor.yy0.z - yymsp[-2].minor.yy0.z + 1);} -#line 3823 "sql.c" - yymsp[-2].minor.yy526 = yylhsminor.yy526; +{yylhsminor.yy44 = yymsp[-1].minor.yy44; yylhsminor.yy44->exprToken.z = yymsp[-2].minor.yy0.z; yylhsminor.yy44->exprToken.n = (yymsp[0].minor.yy0.z - yymsp[-2].minor.yy0.z + 1);} +#line 3834 "sql.c" + yymsp[-2].minor.yy44 = yylhsminor.yy44; break; case 231: /* expr ::= ID */ #line 704 "sql.y" -{ yylhsminor.yy526 = tSqlExprCreateIdValue(&yymsp[0].minor.yy0, TK_ID);} -#line 3829 "sql.c" - yymsp[0].minor.yy526 = yylhsminor.yy526; +{ yylhsminor.yy44 = tSqlExprCreateIdValue(&yymsp[0].minor.yy0, TK_ID);} +#line 3840 "sql.c" + yymsp[0].minor.yy44 = yylhsminor.yy44; break; case 232: /* expr ::= ID DOT ID */ #line 705 "sql.y" -{ yymsp[-2].minor.yy0.n += (1+yymsp[0].minor.yy0.n); yylhsminor.yy526 = tSqlExprCreateIdValue(&yymsp[-2].minor.yy0, TK_ID);} -#line 3835 "sql.c" - yymsp[-2].minor.yy526 = yylhsminor.yy526; +{ yymsp[-2].minor.yy0.n += (1+yymsp[0].minor.yy0.n); yylhsminor.yy44 = tSqlExprCreateIdValue(&yymsp[-2].minor.yy0, TK_ID);} +#line 3846 "sql.c" + yymsp[-2].minor.yy44 = yylhsminor.yy44; break; case 233: /* expr ::= ID DOT STAR */ #line 706 "sql.y" -{ yymsp[-2].minor.yy0.n += (1+yymsp[0].minor.yy0.n); yylhsminor.yy526 = tSqlExprCreateIdValue(&yymsp[-2].minor.yy0, TK_ALL);} -#line 3841 "sql.c" - yymsp[-2].minor.yy526 = yylhsminor.yy526; +{ yymsp[-2].minor.yy0.n += (1+yymsp[0].minor.yy0.n); yylhsminor.yy44 = tSqlExprCreateIdValue(&yymsp[-2].minor.yy0, TK_ALL);} +#line 3852 "sql.c" + yymsp[-2].minor.yy44 = yylhsminor.yy44; break; case 234: /* expr ::= INTEGER */ #line 708 "sql.y" -{ yylhsminor.yy526 = tSqlExprCreateIdValue(&yymsp[0].minor.yy0, TK_INTEGER);} -#line 3847 "sql.c" - yymsp[0].minor.yy526 = yylhsminor.yy526; +{ yylhsminor.yy44 = tSqlExprCreateIdValue(&yymsp[0].minor.yy0, TK_INTEGER);} +#line 3858 "sql.c" + yymsp[0].minor.yy44 = yylhsminor.yy44; break; case 235: /* expr ::= MINUS INTEGER */ case 236: /* expr ::= PLUS INTEGER */ yytestcase(yyruleno==236); #line 709 "sql.y" -{ yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; yymsp[-1].minor.yy0.type = TK_INTEGER; yylhsminor.yy526 = tSqlExprCreateIdValue(&yymsp[-1].minor.yy0, TK_INTEGER);} -#line 3854 "sql.c" - yymsp[-1].minor.yy526 = yylhsminor.yy526; +{ yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; yymsp[-1].minor.yy0.type = TK_INTEGER; yylhsminor.yy44 = tSqlExprCreateIdValue(&yymsp[-1].minor.yy0, TK_INTEGER);} +#line 3865 "sql.c" + yymsp[-1].minor.yy44 = yylhsminor.yy44; break; case 237: /* expr ::= FLOAT */ #line 711 "sql.y" -{ yylhsminor.yy526 = tSqlExprCreateIdValue(&yymsp[0].minor.yy0, TK_FLOAT);} -#line 3860 "sql.c" - yymsp[0].minor.yy526 = yylhsminor.yy526; +{ yylhsminor.yy44 = tSqlExprCreateIdValue(&yymsp[0].minor.yy0, TK_FLOAT);} +#line 3871 "sql.c" + yymsp[0].minor.yy44 = yylhsminor.yy44; break; case 238: /* expr ::= MINUS FLOAT */ case 239: /* expr ::= PLUS FLOAT */ yytestcase(yyruleno==239); #line 712 "sql.y" -{ yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; yymsp[-1].minor.yy0.type = TK_FLOAT; yylhsminor.yy526 = tSqlExprCreateIdValue(&yymsp[-1].minor.yy0, TK_FLOAT);} -#line 3867 "sql.c" - yymsp[-1].minor.yy526 = yylhsminor.yy526; +{ yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; yymsp[-1].minor.yy0.type = TK_FLOAT; yylhsminor.yy44 = tSqlExprCreateIdValue(&yymsp[-1].minor.yy0, TK_FLOAT);} +#line 3878 "sql.c" + yymsp[-1].minor.yy44 = yylhsminor.yy44; break; case 240: /* expr ::= STRING */ #line 714 "sql.y" -{ yylhsminor.yy526 = tSqlExprCreateIdValue(&yymsp[0].minor.yy0, TK_STRING);} -#line 3873 "sql.c" - yymsp[0].minor.yy526 = yylhsminor.yy526; +{ yylhsminor.yy44 = tSqlExprCreateIdValue(&yymsp[0].minor.yy0, TK_STRING);} +#line 3884 "sql.c" + yymsp[0].minor.yy44 = yylhsminor.yy44; break; case 241: /* expr ::= NOW */ #line 715 "sql.y" -{ yylhsminor.yy526 = tSqlExprCreateIdValue(&yymsp[0].minor.yy0, TK_NOW); } -#line 3879 "sql.c" - yymsp[0].minor.yy526 = yylhsminor.yy526; +{ yylhsminor.yy44 = tSqlExprCreateIdValue(&yymsp[0].minor.yy0, TK_NOW); } +#line 3890 "sql.c" + yymsp[0].minor.yy44 = yylhsminor.yy44; break; case 242: /* expr ::= VARIABLE */ #line 716 "sql.y" -{ yylhsminor.yy526 = tSqlExprCreateIdValue(&yymsp[0].minor.yy0, TK_VARIABLE);} -#line 3885 "sql.c" - yymsp[0].minor.yy526 = yylhsminor.yy526; +{ yylhsminor.yy44 = tSqlExprCreateIdValue(&yymsp[0].minor.yy0, TK_VARIABLE);} +#line 3896 "sql.c" + yymsp[0].minor.yy44 = yylhsminor.yy44; break; case 243: /* expr ::= PLUS VARIABLE */ case 244: /* expr ::= MINUS VARIABLE */ yytestcase(yyruleno==244); #line 717 "sql.y" -{ yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; yymsp[-1].minor.yy0.type = TK_VARIABLE; yylhsminor.yy526 = tSqlExprCreateIdValue(&yymsp[-1].minor.yy0, TK_VARIABLE);} -#line 3892 "sql.c" - yymsp[-1].minor.yy526 = yylhsminor.yy526; +{ yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; yymsp[-1].minor.yy0.type = TK_VARIABLE; yylhsminor.yy44 = tSqlExprCreateIdValue(&yymsp[-1].minor.yy0, TK_VARIABLE);} +#line 3903 "sql.c" + yymsp[-1].minor.yy44 = yylhsminor.yy44; break; case 245: /* expr ::= BOOL */ #line 719 "sql.y" -{ yylhsminor.yy526 = tSqlExprCreateIdValue(&yymsp[0].minor.yy0, TK_BOOL);} -#line 3898 "sql.c" - yymsp[0].minor.yy526 = yylhsminor.yy526; +{ yylhsminor.yy44 = tSqlExprCreateIdValue(&yymsp[0].minor.yy0, TK_BOOL);} +#line 3909 "sql.c" + yymsp[0].minor.yy44 = yylhsminor.yy44; break; case 246: /* expr ::= NULL */ #line 720 "sql.y" -{ yylhsminor.yy526 = tSqlExprCreateIdValue(&yymsp[0].minor.yy0, TK_NULL);} -#line 3904 "sql.c" - yymsp[0].minor.yy526 = yylhsminor.yy526; +{ yylhsminor.yy44 = tSqlExprCreateIdValue(&yymsp[0].minor.yy0, TK_NULL);} +#line 3915 "sql.c" + yymsp[0].minor.yy44 = yylhsminor.yy44; break; case 247: /* expr ::= ID LP exprlist RP */ #line 723 "sql.y" -{ tStrTokenAppend(pInfo->funcs, &yymsp[-3].minor.yy0); yylhsminor.yy526 = tSqlExprCreateFunction(yymsp[-1].minor.yy135, &yymsp[-3].minor.yy0, &yymsp[0].minor.yy0, yymsp[-3].minor.yy0.type); } -#line 3910 "sql.c" - yymsp[-3].minor.yy526 = yylhsminor.yy526; +{ tStrTokenAppend(pInfo->funcs, &yymsp[-3].minor.yy0); yylhsminor.yy44 = tSqlExprCreateFunction(yymsp[-1].minor.yy247, &yymsp[-3].minor.yy0, &yymsp[0].minor.yy0, yymsp[-3].minor.yy0.type); } +#line 3921 "sql.c" + yymsp[-3].minor.yy44 = yylhsminor.yy44; break; case 248: /* expr ::= ID LP STAR RP */ #line 726 "sql.y" -{ tStrTokenAppend(pInfo->funcs, &yymsp[-3].minor.yy0); yylhsminor.yy526 = tSqlExprCreateFunction(NULL, &yymsp[-3].minor.yy0, &yymsp[0].minor.yy0, yymsp[-3].minor.yy0.type); } -#line 3916 "sql.c" - yymsp[-3].minor.yy526 = yylhsminor.yy526; +{ tStrTokenAppend(pInfo->funcs, &yymsp[-3].minor.yy0); yylhsminor.yy44 = tSqlExprCreateFunction(NULL, &yymsp[-3].minor.yy0, &yymsp[0].minor.yy0, yymsp[-3].minor.yy0.type); } +#line 3927 "sql.c" + yymsp[-3].minor.yy44 = yylhsminor.yy44; break; case 249: /* expr ::= expr IS NULL */ #line 729 "sql.y" -{yylhsminor.yy526 = tSqlExprCreate(yymsp[-2].minor.yy526, NULL, TK_ISNULL);} -#line 3922 "sql.c" - yymsp[-2].minor.yy526 = yylhsminor.yy526; +{yylhsminor.yy44 = tSqlExprCreate(yymsp[-2].minor.yy44, NULL, TK_ISNULL);} +#line 3933 "sql.c" + yymsp[-2].minor.yy44 = yylhsminor.yy44; break; case 250: /* expr ::= expr IS NOT NULL */ #line 730 "sql.y" -{yylhsminor.yy526 = tSqlExprCreate(yymsp[-3].minor.yy526, NULL, TK_NOTNULL);} -#line 3928 "sql.c" - yymsp[-3].minor.yy526 = yylhsminor.yy526; +{yylhsminor.yy44 = tSqlExprCreate(yymsp[-3].minor.yy44, NULL, TK_NOTNULL);} +#line 3939 "sql.c" + yymsp[-3].minor.yy44 = yylhsminor.yy44; break; case 251: /* expr ::= expr LT expr */ #line 733 "sql.y" -{yylhsminor.yy526 = tSqlExprCreate(yymsp[-2].minor.yy526, yymsp[0].minor.yy526, TK_LT);} -#line 3934 "sql.c" - yymsp[-2].minor.yy526 = yylhsminor.yy526; +{yylhsminor.yy44 = tSqlExprCreate(yymsp[-2].minor.yy44, yymsp[0].minor.yy44, TK_LT);} +#line 3945 "sql.c" + yymsp[-2].minor.yy44 = yylhsminor.yy44; break; case 252: /* expr ::= expr GT expr */ #line 734 "sql.y" -{yylhsminor.yy526 = tSqlExprCreate(yymsp[-2].minor.yy526, yymsp[0].minor.yy526, TK_GT);} -#line 3940 "sql.c" - yymsp[-2].minor.yy526 = yylhsminor.yy526; +{yylhsminor.yy44 = tSqlExprCreate(yymsp[-2].minor.yy44, yymsp[0].minor.yy44, TK_GT);} +#line 3951 "sql.c" + yymsp[-2].minor.yy44 = yylhsminor.yy44; break; case 253: /* expr ::= expr LE expr */ #line 735 "sql.y" -{yylhsminor.yy526 = tSqlExprCreate(yymsp[-2].minor.yy526, yymsp[0].minor.yy526, TK_LE);} -#line 3946 "sql.c" - yymsp[-2].minor.yy526 = yylhsminor.yy526; +{yylhsminor.yy44 = tSqlExprCreate(yymsp[-2].minor.yy44, yymsp[0].minor.yy44, TK_LE);} +#line 3957 "sql.c" + yymsp[-2].minor.yy44 = yylhsminor.yy44; break; case 254: /* expr ::= expr GE expr */ #line 736 "sql.y" -{yylhsminor.yy526 = tSqlExprCreate(yymsp[-2].minor.yy526, yymsp[0].minor.yy526, TK_GE);} -#line 3952 "sql.c" - yymsp[-2].minor.yy526 = yylhsminor.yy526; +{yylhsminor.yy44 = tSqlExprCreate(yymsp[-2].minor.yy44, yymsp[0].minor.yy44, TK_GE);} +#line 3963 "sql.c" + yymsp[-2].minor.yy44 = yylhsminor.yy44; break; case 255: /* expr ::= expr NE expr */ #line 737 "sql.y" -{yylhsminor.yy526 = tSqlExprCreate(yymsp[-2].minor.yy526, yymsp[0].minor.yy526, TK_NE);} -#line 3958 "sql.c" - yymsp[-2].minor.yy526 = yylhsminor.yy526; +{yylhsminor.yy44 = tSqlExprCreate(yymsp[-2].minor.yy44, yymsp[0].minor.yy44, TK_NE);} +#line 3969 "sql.c" + yymsp[-2].minor.yy44 = yylhsminor.yy44; break; case 256: /* expr ::= expr EQ expr */ #line 738 "sql.y" -{yylhsminor.yy526 = tSqlExprCreate(yymsp[-2].minor.yy526, yymsp[0].minor.yy526, TK_EQ);} -#line 3964 "sql.c" - yymsp[-2].minor.yy526 = yylhsminor.yy526; +{yylhsminor.yy44 = tSqlExprCreate(yymsp[-2].minor.yy44, yymsp[0].minor.yy44, TK_EQ);} +#line 3975 "sql.c" + yymsp[-2].minor.yy44 = yylhsminor.yy44; break; case 257: /* expr ::= expr BETWEEN expr AND expr */ #line 740 "sql.y" -{ tSqlExpr* X2 = tSqlExprClone(yymsp[-4].minor.yy526); yylhsminor.yy526 = tSqlExprCreate(tSqlExprCreate(yymsp[-4].minor.yy526, yymsp[-2].minor.yy526, TK_GE), tSqlExprCreate(X2, yymsp[0].minor.yy526, TK_LE), TK_AND);} -#line 3970 "sql.c" - yymsp[-4].minor.yy526 = yylhsminor.yy526; +{ tSqlExpr* X2 = tSqlExprClone(yymsp[-4].minor.yy44); yylhsminor.yy44 = tSqlExprCreate(tSqlExprCreate(yymsp[-4].minor.yy44, yymsp[-2].minor.yy44, TK_GE), tSqlExprCreate(X2, yymsp[0].minor.yy44, TK_LE), TK_AND);} +#line 3981 "sql.c" + yymsp[-4].minor.yy44 = yylhsminor.yy44; break; case 258: /* expr ::= expr AND expr */ #line 742 "sql.y" -{yylhsminor.yy526 = tSqlExprCreate(yymsp[-2].minor.yy526, yymsp[0].minor.yy526, TK_AND);} -#line 3976 "sql.c" - yymsp[-2].minor.yy526 = yylhsminor.yy526; +{yylhsminor.yy44 = tSqlExprCreate(yymsp[-2].minor.yy44, yymsp[0].minor.yy44, TK_AND);} +#line 3987 "sql.c" + yymsp[-2].minor.yy44 = yylhsminor.yy44; break; case 259: /* expr ::= expr OR expr */ #line 743 "sql.y" -{yylhsminor.yy526 = tSqlExprCreate(yymsp[-2].minor.yy526, yymsp[0].minor.yy526, TK_OR); } -#line 3982 "sql.c" - yymsp[-2].minor.yy526 = yylhsminor.yy526; +{yylhsminor.yy44 = tSqlExprCreate(yymsp[-2].minor.yy44, yymsp[0].minor.yy44, TK_OR); } +#line 3993 "sql.c" + yymsp[-2].minor.yy44 = yylhsminor.yy44; break; case 260: /* expr ::= expr PLUS expr */ #line 746 "sql.y" -{yylhsminor.yy526 = tSqlExprCreate(yymsp[-2].minor.yy526, yymsp[0].minor.yy526, TK_PLUS); } -#line 3988 "sql.c" - yymsp[-2].minor.yy526 = yylhsminor.yy526; +{yylhsminor.yy44 = tSqlExprCreate(yymsp[-2].minor.yy44, yymsp[0].minor.yy44, TK_PLUS); } +#line 3999 "sql.c" + yymsp[-2].minor.yy44 = yylhsminor.yy44; break; case 261: /* expr ::= expr MINUS expr */ #line 747 "sql.y" -{yylhsminor.yy526 = tSqlExprCreate(yymsp[-2].minor.yy526, yymsp[0].minor.yy526, TK_MINUS); } -#line 3994 "sql.c" - yymsp[-2].minor.yy526 = yylhsminor.yy526; +{yylhsminor.yy44 = tSqlExprCreate(yymsp[-2].minor.yy44, yymsp[0].minor.yy44, TK_MINUS); } +#line 4005 "sql.c" + yymsp[-2].minor.yy44 = yylhsminor.yy44; break; case 262: /* expr ::= expr STAR expr */ #line 748 "sql.y" -{yylhsminor.yy526 = tSqlExprCreate(yymsp[-2].minor.yy526, yymsp[0].minor.yy526, TK_STAR); } -#line 4000 "sql.c" - yymsp[-2].minor.yy526 = yylhsminor.yy526; +{yylhsminor.yy44 = tSqlExprCreate(yymsp[-2].minor.yy44, yymsp[0].minor.yy44, TK_STAR); } +#line 4011 "sql.c" + yymsp[-2].minor.yy44 = yylhsminor.yy44; break; case 263: /* expr ::= expr SLASH expr */ #line 749 "sql.y" -{yylhsminor.yy526 = tSqlExprCreate(yymsp[-2].minor.yy526, yymsp[0].minor.yy526, TK_DIVIDE);} -#line 4006 "sql.c" - yymsp[-2].minor.yy526 = yylhsminor.yy526; +{yylhsminor.yy44 = tSqlExprCreate(yymsp[-2].minor.yy44, yymsp[0].minor.yy44, TK_DIVIDE);} +#line 4017 "sql.c" + yymsp[-2].minor.yy44 = yylhsminor.yy44; break; case 264: /* expr ::= expr REM expr */ #line 750 "sql.y" -{yylhsminor.yy526 = tSqlExprCreate(yymsp[-2].minor.yy526, yymsp[0].minor.yy526, TK_REM); } -#line 4012 "sql.c" - yymsp[-2].minor.yy526 = yylhsminor.yy526; +{yylhsminor.yy44 = tSqlExprCreate(yymsp[-2].minor.yy44, yymsp[0].minor.yy44, TK_REM); } +#line 4023 "sql.c" + yymsp[-2].minor.yy44 = yylhsminor.yy44; break; case 265: /* expr ::= expr LIKE expr */ #line 753 "sql.y" -{yylhsminor.yy526 = tSqlExprCreate(yymsp[-2].minor.yy526, yymsp[0].minor.yy526, TK_LIKE); } -#line 4018 "sql.c" - yymsp[-2].minor.yy526 = yylhsminor.yy526; +{yylhsminor.yy44 = tSqlExprCreate(yymsp[-2].minor.yy44, yymsp[0].minor.yy44, TK_LIKE); } +#line 4029 "sql.c" + yymsp[-2].minor.yy44 = yylhsminor.yy44; break; case 266: /* expr ::= expr MATCH expr */ #line 756 "sql.y" -{yylhsminor.yy526 = tSqlExprCreate(yymsp[-2].minor.yy526, yymsp[0].minor.yy526, TK_MATCH); } -#line 4024 "sql.c" - yymsp[-2].minor.yy526 = yylhsminor.yy526; - break; - case 267: /* expr ::= expr QUESTION expr */ -#line 759 "sql.y" -{yylhsminor.yy526 = tSqlExprCreate(yymsp[-2].minor.yy526, yymsp[0].minor.yy526, TK_QUESTION); } -#line 4030 "sql.c" - yymsp[-2].minor.yy526 = yylhsminor.yy526; - break; - case 268: /* expr ::= expr ARROW expr */ -#line 762 "sql.y" -{yylhsminor.yy526 = tSqlExprCreate(yymsp[-2].minor.yy526, yymsp[0].minor.yy526, TK_ARROW); } -#line 4036 "sql.c" - yymsp[-2].minor.yy526 = yylhsminor.yy526; - break; - case 269: /* expr ::= expr IN LP exprlist RP */ -#line 765 "sql.y" -{yylhsminor.yy526 = tSqlExprCreate(yymsp[-4].minor.yy526, (tSqlExpr*)yymsp[-1].minor.yy135, TK_IN); } -#line 4042 "sql.c" - yymsp[-4].minor.yy526 = yylhsminor.yy526; - break; - case 270: /* exprlist ::= exprlist COMMA expritem */ -#line 773 "sql.y" -{yylhsminor.yy135 = tSqlExprListAppend(yymsp[-2].minor.yy135,yymsp[0].minor.yy526,0, 0);} -#line 4048 "sql.c" - yymsp[-2].minor.yy135 = yylhsminor.yy135; - break; - case 271: /* exprlist ::= expritem */ +{yylhsminor.yy44 = tSqlExprCreate(yymsp[-2].minor.yy44, yymsp[0].minor.yy44, TK_MATCH); } +#line 4035 "sql.c" + yymsp[-2].minor.yy44 = yylhsminor.yy44; + break; + case 267: /* expr ::= expr NMATCH expr */ +#line 757 "sql.y" +{yylhsminor.yy44 = tSqlExprCreate(yymsp[-2].minor.yy44, yymsp[0].minor.yy44, TK_NMATCH); } +#line 4041 "sql.c" + yymsp[-2].minor.yy44 = yylhsminor.yy44; + break; + case 268: /* expr ::= expr QUESTION expr */ +#line 760 "sql.y" +{yylhsminor.yy44 = tSqlExprCreate(yymsp[-2].minor.yy44, yymsp[0].minor.yy44, TK_QUESTION); } +#line 4047 "sql.c" + yymsp[-2].minor.yy44 = yylhsminor.yy44; + break; + case 269: /* expr ::= expr ARROW expr */ +#line 763 "sql.y" +{yylhsminor.yy44 = tSqlExprCreate(yymsp[-2].minor.yy44, yymsp[0].minor.yy44, TK_ARROW); } +#line 4053 "sql.c" + yymsp[-2].minor.yy44 = yylhsminor.yy44; + break; + case 270: /* expr ::= expr IN LP exprlist RP */ +#line 766 "sql.y" +{yylhsminor.yy44 = tSqlExprCreate(yymsp[-4].minor.yy44, (tSqlExpr*)yymsp[-1].minor.yy247, TK_IN); } +#line 4059 "sql.c" + yymsp[-4].minor.yy44 = yylhsminor.yy44; + break; + case 271: /* exprlist ::= exprlist COMMA expritem */ #line 774 "sql.y" -{yylhsminor.yy135 = tSqlExprListAppend(0,yymsp[0].minor.yy526,0, 0);} -#line 4054 "sql.c" - yymsp[0].minor.yy135 = yylhsminor.yy135; +{yylhsminor.yy247 = tSqlExprListAppend(yymsp[-2].minor.yy247,yymsp[0].minor.yy44,0, 0);} +#line 4065 "sql.c" + yymsp[-2].minor.yy247 = yylhsminor.yy247; break; - case 272: /* expritem ::= expr */ + case 272: /* exprlist ::= expritem */ #line 775 "sql.y" -{yylhsminor.yy526 = yymsp[0].minor.yy526;} -#line 4060 "sql.c" - yymsp[0].minor.yy526 = yylhsminor.yy526; +{yylhsminor.yy247 = tSqlExprListAppend(0,yymsp[0].minor.yy44,0, 0);} +#line 4071 "sql.c" + yymsp[0].minor.yy247 = yylhsminor.yy247; + break; + case 273: /* expritem ::= expr */ +#line 776 "sql.y" +{yylhsminor.yy44 = yymsp[0].minor.yy44;} +#line 4077 "sql.c" + yymsp[0].minor.yy44 = yylhsminor.yy44; break; - case 274: /* cmd ::= RESET QUERY CACHE */ -#line 779 "sql.y" + case 275: /* cmd ::= RESET QUERY CACHE */ +#line 780 "sql.y" { setDCLSqlElems(pInfo, TSDB_SQL_RESET_CACHE, 0);} -#line 4066 "sql.c" +#line 4083 "sql.c" break; - case 275: /* cmd ::= SYNCDB ids REPLICA */ -#line 782 "sql.y" + case 276: /* cmd ::= SYNCDB ids REPLICA */ +#line 783 "sql.y" { setDCLSqlElems(pInfo, TSDB_SQL_SYNC_DB_REPLICA, 1, &yymsp[-1].minor.yy0);} -#line 4071 "sql.c" +#line 4088 "sql.c" break; - case 276: /* cmd ::= ALTER TABLE ids cpxName ADD COLUMN columnlist */ -#line 785 "sql.y" + case 277: /* cmd ::= ALTER TABLE ids cpxName ADD COLUMN columnlist */ +#line 786 "sql.y" { yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n; - SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&yymsp[-4].minor.yy0, yymsp[0].minor.yy135, NULL, TSDB_ALTER_TABLE_ADD_COLUMN, -1); + SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&yymsp[-4].minor.yy0, yymsp[0].minor.yy247, NULL, TSDB_ALTER_TABLE_ADD_COLUMN, -1); setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } -#line 4080 "sql.c" +#line 4097 "sql.c" break; - case 277: /* cmd ::= ALTER TABLE ids cpxName DROP COLUMN ids */ -#line 791 "sql.y" + case 278: /* cmd ::= ALTER TABLE ids cpxName DROP COLUMN ids */ +#line 792 "sql.y" { yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n; @@ -4091,28 +4107,28 @@ static YYACTIONTYPE yy_reduce( SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&yymsp[-4].minor.yy0, NULL, K, TSDB_ALTER_TABLE_DROP_COLUMN, -1); setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } -#line 4093 "sql.c" +#line 4110 "sql.c" break; - case 278: /* cmd ::= ALTER TABLE ids cpxName MODIFY COLUMN columnlist */ -#line 801 "sql.y" + case 279: /* cmd ::= ALTER TABLE ids cpxName MODIFY COLUMN columnlist */ +#line 802 "sql.y" { yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n; - SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&yymsp[-4].minor.yy0, yymsp[0].minor.yy135, NULL, TSDB_ALTER_TABLE_CHANGE_COLUMN, -1); + SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&yymsp[-4].minor.yy0, yymsp[0].minor.yy247, NULL, TSDB_ALTER_TABLE_CHANGE_COLUMN, -1); setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } -#line 4102 "sql.c" +#line 4119 "sql.c" break; - case 279: /* cmd ::= ALTER TABLE ids cpxName ADD TAG columnlist */ -#line 808 "sql.y" + case 280: /* cmd ::= ALTER TABLE ids cpxName ADD TAG columnlist */ +#line 809 "sql.y" { yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n; - SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&yymsp[-4].minor.yy0, yymsp[0].minor.yy135, NULL, TSDB_ALTER_TABLE_ADD_TAG_COLUMN, -1); + SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&yymsp[-4].minor.yy0, yymsp[0].minor.yy247, NULL, TSDB_ALTER_TABLE_ADD_TAG_COLUMN, -1); setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } -#line 4111 "sql.c" +#line 4128 "sql.c" break; - case 280: /* cmd ::= ALTER TABLE ids cpxName DROP TAG ids */ -#line 813 "sql.y" + case 281: /* cmd ::= ALTER TABLE ids cpxName DROP TAG ids */ +#line 814 "sql.y" { yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n; @@ -4122,10 +4138,10 @@ static YYACTIONTYPE yy_reduce( SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&yymsp[-4].minor.yy0, NULL, A, TSDB_ALTER_TABLE_DROP_TAG_COLUMN, -1); setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } -#line 4124 "sql.c" +#line 4141 "sql.c" break; - case 281: /* cmd ::= ALTER TABLE ids cpxName CHANGE TAG ids ids */ -#line 823 "sql.y" + case 282: /* cmd ::= ALTER TABLE ids cpxName CHANGE TAG ids ids */ +#line 824 "sql.y" { yymsp[-5].minor.yy0.n += yymsp[-4].minor.yy0.n; @@ -4138,42 +4154,42 @@ static YYACTIONTYPE yy_reduce( SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&yymsp[-5].minor.yy0, NULL, A, TSDB_ALTER_TABLE_CHANGE_TAG_COLUMN, -1); setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } -#line 4140 "sql.c" +#line 4157 "sql.c" break; - case 282: /* cmd ::= ALTER TABLE ids cpxName SET TAG ids EQ tagitem */ -#line 836 "sql.y" + case 283: /* cmd ::= ALTER TABLE ids cpxName SET TAG ids EQ tagitem */ +#line 837 "sql.y" { yymsp[-6].minor.yy0.n += yymsp[-5].minor.yy0.n; toTSDBType(yymsp[-2].minor.yy0.type); SArray* A = tVariantListAppendToken(NULL, &yymsp[-2].minor.yy0, -1); - A = tVariantListAppend(A, &yymsp[0].minor.yy308, -1); + A = tVariantListAppend(A, &yymsp[0].minor.yy378, -1); SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&yymsp[-6].minor.yy0, NULL, A, TSDB_ALTER_TABLE_UPDATE_TAG_VAL, -1); setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } -#line 4154 "sql.c" +#line 4171 "sql.c" break; - case 283: /* cmd ::= ALTER TABLE ids cpxName MODIFY TAG columnlist */ -#line 847 "sql.y" + case 284: /* cmd ::= ALTER TABLE ids cpxName MODIFY TAG columnlist */ +#line 848 "sql.y" { yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n; - SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&yymsp[-4].minor.yy0, yymsp[0].minor.yy135, NULL, TSDB_ALTER_TABLE_MODIFY_TAG_COLUMN, -1); + SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&yymsp[-4].minor.yy0, yymsp[0].minor.yy247, NULL, TSDB_ALTER_TABLE_MODIFY_TAG_COLUMN, -1); setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } -#line 4163 "sql.c" +#line 4180 "sql.c" break; - case 284: /* cmd ::= ALTER STABLE ids cpxName ADD COLUMN columnlist */ -#line 854 "sql.y" + case 285: /* cmd ::= ALTER STABLE ids cpxName ADD COLUMN columnlist */ +#line 855 "sql.y" { yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n; - SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&yymsp[-4].minor.yy0, yymsp[0].minor.yy135, NULL, TSDB_ALTER_TABLE_ADD_COLUMN, TSDB_SUPER_TABLE); + SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&yymsp[-4].minor.yy0, yymsp[0].minor.yy247, NULL, TSDB_ALTER_TABLE_ADD_COLUMN, TSDB_SUPER_TABLE); setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } -#line 4172 "sql.c" +#line 4189 "sql.c" break; - case 285: /* cmd ::= ALTER STABLE ids cpxName DROP COLUMN ids */ -#line 860 "sql.y" + case 286: /* cmd ::= ALTER STABLE ids cpxName DROP COLUMN ids */ +#line 861 "sql.y" { yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n; @@ -4183,28 +4199,28 @@ static YYACTIONTYPE yy_reduce( SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&yymsp[-4].minor.yy0, NULL, K, TSDB_ALTER_TABLE_DROP_COLUMN, TSDB_SUPER_TABLE); setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } -#line 4185 "sql.c" +#line 4202 "sql.c" break; - case 286: /* cmd ::= ALTER STABLE ids cpxName MODIFY COLUMN columnlist */ -#line 870 "sql.y" + case 287: /* cmd ::= ALTER STABLE ids cpxName MODIFY COLUMN columnlist */ +#line 871 "sql.y" { yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n; - SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&yymsp[-4].minor.yy0, yymsp[0].minor.yy135, NULL, TSDB_ALTER_TABLE_CHANGE_COLUMN, TSDB_SUPER_TABLE); + SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&yymsp[-4].minor.yy0, yymsp[0].minor.yy247, NULL, TSDB_ALTER_TABLE_CHANGE_COLUMN, TSDB_SUPER_TABLE); setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } -#line 4194 "sql.c" +#line 4211 "sql.c" break; - case 287: /* cmd ::= ALTER STABLE ids cpxName ADD TAG columnlist */ -#line 877 "sql.y" + case 288: /* cmd ::= ALTER STABLE ids cpxName ADD TAG columnlist */ +#line 878 "sql.y" { yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n; - SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&yymsp[-4].minor.yy0, yymsp[0].minor.yy135, NULL, TSDB_ALTER_TABLE_ADD_TAG_COLUMN, TSDB_SUPER_TABLE); + SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&yymsp[-4].minor.yy0, yymsp[0].minor.yy247, NULL, TSDB_ALTER_TABLE_ADD_TAG_COLUMN, TSDB_SUPER_TABLE); setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } -#line 4203 "sql.c" +#line 4220 "sql.c" break; - case 288: /* cmd ::= ALTER STABLE ids cpxName DROP TAG ids */ -#line 882 "sql.y" + case 289: /* cmd ::= ALTER STABLE ids cpxName DROP TAG ids */ +#line 883 "sql.y" { yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n; @@ -4214,10 +4230,10 @@ static YYACTIONTYPE yy_reduce( SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&yymsp[-4].minor.yy0, NULL, A, TSDB_ALTER_TABLE_DROP_TAG_COLUMN, TSDB_SUPER_TABLE); setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } -#line 4216 "sql.c" +#line 4233 "sql.c" break; - case 289: /* cmd ::= ALTER STABLE ids cpxName CHANGE TAG ids ids */ -#line 892 "sql.y" + case 290: /* cmd ::= ALTER STABLE ids cpxName CHANGE TAG ids ids */ +#line 893 "sql.y" { yymsp[-5].minor.yy0.n += yymsp[-4].minor.yy0.n; @@ -4230,45 +4246,45 @@ static YYACTIONTYPE yy_reduce( SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&yymsp[-5].minor.yy0, NULL, A, TSDB_ALTER_TABLE_CHANGE_TAG_COLUMN, TSDB_SUPER_TABLE); setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } -#line 4232 "sql.c" +#line 4249 "sql.c" break; - case 290: /* cmd ::= ALTER STABLE ids cpxName SET TAG ids EQ tagitem */ -#line 905 "sql.y" + case 291: /* cmd ::= ALTER STABLE ids cpxName SET TAG ids EQ tagitem */ +#line 906 "sql.y" { yymsp[-6].minor.yy0.n += yymsp[-5].minor.yy0.n; toTSDBType(yymsp[-2].minor.yy0.type); SArray* A = tVariantListAppendToken(NULL, &yymsp[-2].minor.yy0, -1); - A = tVariantListAppend(A, &yymsp[0].minor.yy308, -1); + A = tVariantListAppend(A, &yymsp[0].minor.yy378, -1); SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&yymsp[-6].minor.yy0, NULL, A, TSDB_ALTER_TABLE_UPDATE_TAG_VAL, TSDB_SUPER_TABLE); setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } -#line 4246 "sql.c" +#line 4263 "sql.c" break; - case 291: /* cmd ::= ALTER STABLE ids cpxName MODIFY TAG columnlist */ -#line 916 "sql.y" + case 292: /* cmd ::= ALTER STABLE ids cpxName MODIFY TAG columnlist */ +#line 917 "sql.y" { yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n; - SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&yymsp[-4].minor.yy0, yymsp[0].minor.yy135, NULL, TSDB_ALTER_TABLE_MODIFY_TAG_COLUMN, TSDB_SUPER_TABLE); + SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&yymsp[-4].minor.yy0, yymsp[0].minor.yy247, NULL, TSDB_ALTER_TABLE_MODIFY_TAG_COLUMN, TSDB_SUPER_TABLE); setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } -#line 4255 "sql.c" +#line 4272 "sql.c" break; - case 292: /* cmd ::= KILL CONNECTION INTEGER */ -#line 923 "sql.y" + case 293: /* cmd ::= KILL CONNECTION INTEGER */ +#line 924 "sql.y" {setKillSql(pInfo, TSDB_SQL_KILL_CONNECTION, &yymsp[0].minor.yy0);} -#line 4260 "sql.c" +#line 4277 "sql.c" break; - case 293: /* cmd ::= KILL STREAM INTEGER COLON INTEGER */ -#line 924 "sql.y" + case 294: /* cmd ::= KILL STREAM INTEGER COLON INTEGER */ +#line 925 "sql.y" {yymsp[-2].minor.yy0.n += (yymsp[-1].minor.yy0.n + yymsp[0].minor.yy0.n); setKillSql(pInfo, TSDB_SQL_KILL_STREAM, &yymsp[-2].minor.yy0);} -#line 4265 "sql.c" +#line 4282 "sql.c" break; - case 294: /* cmd ::= KILL QUERY INTEGER COLON INTEGER */ -#line 925 "sql.y" + case 295: /* cmd ::= KILL QUERY INTEGER COLON INTEGER */ +#line 926 "sql.y" {yymsp[-2].minor.yy0.n += (yymsp[-1].minor.yy0.n + yymsp[0].minor.yy0.n); setKillSql(pInfo, TSDB_SQL_KILL_QUERY, &yymsp[-2].minor.yy0);} -#line 4270 "sql.c" +#line 4287 "sql.c" break; default: break; @@ -4353,7 +4369,7 @@ static void yy_syntax_error( } assert(len <= outputBufLen); -#line 4355 "sql.c" +#line 4372 "sql.c" /************ End %syntax_error code ******************************************/ ParseARG_STORE /* Suppress warning about unused %extra_argument variable */ ParseCTX_STORE @@ -4380,7 +4396,7 @@ static void yy_accept( ** parser accepts */ /*********** Begin %parse_accept code *****************************************/ #line 62 "sql.y" -#line 4382 "sql.c" +#line 4399 "sql.c" /*********** End %parse_accept code *******************************************/ ParseARG_STORE /* Suppress warning about unused %extra_argument variable */ ParseCTX_STORE diff --git a/src/query/tests/CMakeLists.txt b/src/query/tests/CMakeLists.txt index 349d511f1570e3df835494ebd4e3e86d7795c873..8c4b9c2e6a2e9a5f6835baf411ecc94e6889fcbe 100644 --- a/src/query/tests/CMakeLists.txt +++ b/src/query/tests/CMakeLists.txt @@ -18,7 +18,7 @@ IF (HEADER_GTEST_INCLUDE_DIR AND (LIB_GTEST_STATIC_DIR OR LIB_GTEST_SHARED_DIR)) AUX_SOURCE_DIRECTORY(${CMAKE_CURRENT_SOURCE_DIR} SOURCE_LIST) ADD_EXECUTABLE(queryTest ${SOURCE_LIST}) - TARGET_LINK_LIBRARIES(queryTest taos query gtest pthread) + TARGET_LINK_LIBRARIES(queryTest taos cJson query gtest pthread) ENDIF() SET_SOURCE_FILES_PROPERTIES(./astTest.cpp PROPERTIES COMPILE_FLAGS -w) diff --git a/src/rpc/src/rpcMain.c b/src/rpc/src/rpcMain.c index e958a8e5ec5b6542d609028ee052d21a9a84d397..9ea5fd539244820f111a3fbb3c60aee088e727c5 100644 --- a/src/rpc/src/rpcMain.c +++ b/src/rpc/src/rpcMain.c @@ -407,7 +407,7 @@ void rpcSendRequest(void *shandle, const SRpcEpSet *pEpSet, SRpcMsg *pMsg, int64 if (type == TSDB_MSG_TYPE_QUERY || type == TSDB_MSG_TYPE_CM_RETRIEVE || type == TSDB_MSG_TYPE_FETCH || type == TSDB_MSG_TYPE_CM_STABLE_VGROUP || type == TSDB_MSG_TYPE_CM_TABLES_META || type == TSDB_MSG_TYPE_CM_TABLE_META - || type == TSDB_MSG_TYPE_CM_SHOW || type == TSDB_MSG_TYPE_DM_STATUS) + || type == TSDB_MSG_TYPE_CM_SHOW || type == TSDB_MSG_TYPE_DM_STATUS || type == TSDB_MSG_TYPE_CM_ALTER_TABLE) pContext->connType = RPC_CONN_TCPC; pContext->rid = taosAddRef(tsRpcRefId, pContext); diff --git a/src/tsdb/CMakeLists.txt b/src/tsdb/CMakeLists.txt index c5b77df5a25f9f0b1e9294228520f171b9befddd..efbed6f0a6e8218c3a0b46d2913f6a792bf48ce4 100644 --- a/src/tsdb/CMakeLists.txt +++ b/src/tsdb/CMakeLists.txt @@ -2,6 +2,7 @@ CMAKE_MINIMUM_REQUIRED(VERSION 2.8...3.20) PROJECT(TDengine) INCLUDE_DIRECTORIES(inc) +INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/query/inc) AUX_SOURCE_DIRECTORY(src SRC) ADD_LIBRARY(tsdb ${SRC}) TARGET_LINK_LIBRARIES(tsdb tfs common tutil) diff --git a/src/tsdb/inc/tsdbBuffer.h b/src/tsdb/inc/tsdbBuffer.h index ec6b057aef142fb938993b3a27717c5e64937258..4b650d3993a54f6a98caf00a3605feb37e972ebd 100644 --- a/src/tsdb/inc/tsdbBuffer.h +++ b/src/tsdb/inc/tsdbBuffer.h @@ -29,6 +29,7 @@ typedef struct { int tBufBlocks; int nBufBlocks; int nRecycleBlocks; + int nElasticBlocks; int64_t index; SList* bufBlockList; } STsdbBufPool; @@ -41,6 +42,10 @@ int tsdbOpenBufPool(STsdbRepo* pRepo); void tsdbCloseBufPool(STsdbRepo* pRepo); SListNode* tsdbAllocBufBlockFromPool(STsdbRepo* pRepo); int tsdbExpandPool(STsdbRepo* pRepo, int32_t oldTotalBlocks); -void tsdbRecycleBufferBlock(STsdbBufPool* pPool, SListNode *pNode); +void tsdbRecycleBufferBlock(STsdbBufPool* pPool, SListNode *pNode, bool bELastic); + +// health cite +STsdbBufBlock *tsdbNewBufBlock(int bufBlockSize); +void tsdbFreeBufBlock(STsdbBufBlock *pBufBlock); #endif /* _TD_TSDB_BUFFER_H_ */ diff --git a/src/tsdb/inc/tsdbHealth.h b/src/tsdb/inc/tsdbHealth.h new file mode 100644 index 0000000000000000000000000000000000000000..324f4312e05fc0ca0200c319728bf692bf476bf6 --- /dev/null +++ b/src/tsdb/inc/tsdbHealth.h @@ -0,0 +1,25 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#ifndef _TD_TSDB_HEALTH_H_ +#define _TD_TSDB_HEALTH_H_ + +bool tsdbUrgeQueryFree(STsdbRepo* pRepo); +int32_t tsdbInsertNewBlock(STsdbRepo* pRepo); + +bool tsdbIdleMemEnough(); +bool tsdbAllowNewBlock(STsdbRepo* pRepo); + +#endif /* _TD_TSDB_BUFFER_H_ */ diff --git a/src/tsdb/inc/tsdbMeta.h b/src/tsdb/inc/tsdbMeta.h index 7ab00d96e52ee989a6abd89a8ae21ed839a1561b..d70fb9de121adc1c988ca5ed2ca0692678b0abc4 100644 --- a/src/tsdb/inc/tsdbMeta.h +++ b/src/tsdb/inc/tsdbMeta.h @@ -109,7 +109,7 @@ static FORCE_INLINE int tsdbCompareSchemaVersion(const void *key1, const void *k } static FORCE_INLINE STSchema* tsdbGetTableSchemaImpl(STable* pTable, bool lock, bool copy, int16_t _version) { - STable* pDTable = (TABLE_TYPE(pTable) == TSDB_CHILD_TABLE) ? pTable->pSuper : pTable; + STable* pDTable = (pTable->pSuper != NULL) ? pTable->pSuper : pTable; // for performance purpose STSchema* pSchema = NULL; STSchema* pTSchema = NULL; diff --git a/src/tsdb/inc/tsdbint.h b/src/tsdb/inc/tsdbint.h index 532907ae01be576e40feea2969761846f07170b3..80e92975799f47d68ff72ef80a52efb6fe901b5e 100644 --- a/src/tsdb/inc/tsdbint.h +++ b/src/tsdb/inc/tsdbint.h @@ -97,6 +97,7 @@ struct STsdbRepo { SMergeBuf mergeBuf; //used when update=2 int8_t compactState; // compact state: inCompact/noCompact/waitingCompact? + pthread_t* pthread; }; #define REPO_ID(r) (r)->config.tsdbId diff --git a/src/tsdb/src/tsdbBuffer.c b/src/tsdb/src/tsdbBuffer.c index e675bf6f9de04021112d43a1db70cf56cf430f08..70589031f6516a129a5a683b0e76edb23b814e15 100644 --- a/src/tsdb/src/tsdbBuffer.c +++ b/src/tsdb/src/tsdbBuffer.c @@ -14,12 +14,10 @@ */ #include "tsdbint.h" +#include "tsdbHealth.h" #define POOL_IS_EMPTY(b) (listNEles((b)->bufBlockList) == 0) -static STsdbBufBlock *tsdbNewBufBlock(int bufBlockSize); -static void tsdbFreeBufBlock(STsdbBufBlock *pBufBlock); - // ---------------- INTERNAL FUNCTIONS ---------------- STsdbBufPool *tsdbNewBufPool() { STsdbBufPool *pBufPool = (STsdbBufPool *)calloc(1, sizeof(*pBufPool)); @@ -65,10 +63,10 @@ int tsdbOpenBufPool(STsdbRepo *pRepo) { STsdbBufPool *pPool = pRepo->pPool; ASSERT(pPool != NULL); - pPool->bufBlockSize = pCfg->cacheBlockSize * 1024 * 1024; // MB pPool->tBufBlocks = pCfg->totalBlocks; pPool->nBufBlocks = 0; + pPool->nElasticBlocks = 0; pPool->index = 0; pPool->nRecycleBlocks = 0; @@ -120,6 +118,18 @@ SListNode *tsdbAllocBufBlockFromPool(STsdbRepo *pRepo) { STsdbBufPool *pBufPool = pRepo->pPool; while (POOL_IS_EMPTY(pBufPool)) { + if(tsDeadLockKillQuery) { + // supply new Block + if(tsdbInsertNewBlock(pRepo) > 0) { + tsdbWarn("vgId:%d add new elastic block . elasticBlocks=%d cur free Blocks=%d", REPO_ID(pRepo), pBufPool->nElasticBlocks, pBufPool->bufBlockList->numOfEles); + break; + } else { + // no newBlock, kill query free + if(!tsdbUrgeQueryFree(pRepo)) + tsdbWarn("vgId:%d Urge query free thread start failed.", REPO_ID(pRepo)); + } + } + pRepo->repoLocked = false; pthread_cond_wait(&(pBufPool->poolNotEmpty), &(pRepo->mutex)); pRepo->repoLocked = true; @@ -139,11 +149,11 @@ SListNode *tsdbAllocBufBlockFromPool(STsdbRepo *pRepo) { } // ---------------- LOCAL FUNCTIONS ---------------- -static STsdbBufBlock *tsdbNewBufBlock(int bufBlockSize) { +STsdbBufBlock *tsdbNewBufBlock(int bufBlockSize) { STsdbBufBlock *pBufBlock = (STsdbBufBlock *)malloc(sizeof(*pBufBlock) + bufBlockSize); if (pBufBlock == NULL) { terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; - goto _err; + return NULL; } pBufBlock->blockId = 0; @@ -151,13 +161,9 @@ static STsdbBufBlock *tsdbNewBufBlock(int bufBlockSize) { pBufBlock->remain = bufBlockSize; return pBufBlock; - -_err: - tsdbFreeBufBlock(pBufBlock); - return NULL; } -static void tsdbFreeBufBlock(STsdbBufBlock *pBufBlock) { tfree(pBufBlock); } + void tsdbFreeBufBlock(STsdbBufBlock *pBufBlock) { tfree(pBufBlock); } int tsdbExpandPool(STsdbRepo* pRepo, int32_t oldTotalBlocks) { if (oldTotalBlocks == pRepo->config.totalBlocks) { @@ -193,10 +199,16 @@ err: return err; } -void tsdbRecycleBufferBlock(STsdbBufPool* pPool, SListNode *pNode) { +void tsdbRecycleBufferBlock(STsdbBufPool* pPool, SListNode *pNode, bool bELastic) { STsdbBufBlock *pBufBlock = NULL; tdListNodeGetData(pPool->bufBlockList, pNode, (void *)(&pBufBlock)); tsdbFreeBufBlock(pBufBlock); free(pNode); - pPool->nBufBlocks--; -} + if(bELastic) + { + pPool->nElasticBlocks--; + tsdbWarn("pPool=%p elastic block reduce one . nElasticBlocks=%d cur free Blocks=%d", pPool, pPool->nElasticBlocks, pPool->bufBlockList->numOfEles); + } + else + pPool->nBufBlocks--; +} \ No newline at end of file diff --git a/src/tsdb/src/tsdbHealth.c b/src/tsdb/src/tsdbHealth.c new file mode 100644 index 0000000000000000000000000000000000000000..8198c480334912b1ce373ceca7b82409f5a644f2 --- /dev/null +++ b/src/tsdb/src/tsdbHealth.c @@ -0,0 +1,98 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#include "os.h" +#include "taosmsg.h" +#include "tarray.h" +#include "query.h" +#include "tglobal.h" +#include "tlist.h" +#include "tsdbint.h" +#include "tsdbBuffer.h" +#include "tsdbLog.h" +#include "tsdbHealth.h" +#include "ttimer.h" +#include "tthread.h" + + +// return malloc new block count +int32_t tsdbInsertNewBlock(STsdbRepo * pRepo) { + STsdbBufPool *pPool = pRepo->pPool; + int32_t cnt = 0; + + if(tsdbAllowNewBlock(pRepo)) { + STsdbBufBlock *pBufBlock = tsdbNewBufBlock(pPool->bufBlockSize); + if (pBufBlock) { + if (tdListAppend(pPool->bufBlockList, (void *)(&pBufBlock)) < 0) { + // append error + tsdbFreeBufBlock(pBufBlock); + } else { + pPool->nElasticBlocks ++; + cnt ++ ; + } + } + } + return cnt; +} + +// switch anther thread to run +void* cbKillQueryFree(void* param) { + STsdbRepo* pRepo = (STsdbRepo*)param; + // vnode + if(pRepo->appH.notifyStatus) { + pRepo->appH.notifyStatus(pRepo->appH.appH, TSDB_STATUS_COMMIT_NOBLOCK, TSDB_CODE_SUCCESS); + } + + // free + if(pRepo->pthread){ + void* p = pRepo->pthread; + pRepo->pthread = NULL; + free(p); + } + + return NULL; +} + +// return true do free , false do nothing +bool tsdbUrgeQueryFree(STsdbRepo * pRepo) { + // check previous running + if(pRepo->pthread && taosThreadRunning(pRepo->pthread)) { + tsdbWarn("vgId:%d pre urge thread is runing. nBlocks=%d nElasticBlocks=%d", REPO_ID(pRepo), pRepo->pPool->nBufBlocks, pRepo->pPool->nElasticBlocks); + return false; + } + // create new + pRepo->pthread = taosCreateThread(cbKillQueryFree, pRepo); + if(pRepo->pthread == NULL) { + tsdbError("vgId:%d create urge thread error.", REPO_ID(pRepo)); + return false; + } + return true; +} + +bool tsdbAllowNewBlock(STsdbRepo* pRepo) { + int32_t nMaxElastic = pRepo->config.totalBlocks/3; + STsdbBufPool* pPool = pRepo->pPool; + if(pPool->nElasticBlocks >= nMaxElastic) { + tsdbWarn("vgId:%d tsdbAllowNewBlock return fasle. nElasticBlock(%d) >= MaxElasticBlocks(%d)", REPO_ID(pRepo), pPool->nElasticBlocks, nMaxElastic); + return false; + } + return true; +} + +bool tsdbNoProblem(STsdbRepo* pRepo) { + if(listNEles(pRepo->pPool->bufBlockList) == 0) + return false; + return true; +} \ No newline at end of file diff --git a/src/tsdb/src/tsdbMain.c b/src/tsdb/src/tsdbMain.c index b2e6fe89161d0e9bceaf74a46807f51ec402fb2a..c2021963e0d0c8be4ed42588549153dcd20be63c 100644 --- a/src/tsdb/src/tsdbMain.c +++ b/src/tsdb/src/tsdbMain.c @@ -16,6 +16,8 @@ // no test file errors here #include "taosdef.h" #include "tsdbint.h" +#include "ttimer.h" +#include "tthread.h" #define IS_VALID_PRECISION(precision) \ (((precision) >= TSDB_TIME_PRECISION_MILLI) && ((precision) <= TSDB_TIME_PRECISION_NANO)) @@ -126,6 +128,10 @@ int tsdbCloseRepo(STsdbRepo *repo, int toCommit) { terrno = TSDB_CODE_SUCCESS; tsdbStopStream(pRepo); + if(pRepo->pthread){ + taosDestoryThread(pRepo->pthread); + pRepo->pthread = NULL; + } if (toCommit) { tsdbSyncCommit(repo); @@ -547,6 +553,7 @@ static STsdbRepo *tsdbNewRepo(STsdbCfg *pCfg, STsdbAppH *pAppH) { pRepo->appH = *pAppH; } pRepo->repoLocked = false; + pRepo->pthread = NULL; int code = pthread_mutex_init(&(pRepo->mutex), NULL); if (code != 0) { diff --git a/src/tsdb/src/tsdbMemTable.c b/src/tsdb/src/tsdbMemTable.c index e766d97a97a5905db87691426d282a219eef9d68..3890dca5b96c26009dcf3ca72205ca4b1725aa29 100644 --- a/src/tsdb/src/tsdbMemTable.c +++ b/src/tsdb/src/tsdbMemTable.c @@ -99,17 +99,22 @@ int tsdbUnRefMemTable(STsdbRepo *pRepo, SMemTable *pMemTable) { STsdbBufPool *pBufPool = pRepo->pPool; SListNode *pNode = NULL; - bool recycleBlocks = pBufPool->nRecycleBlocks > 0; + bool addNew = false; if (tsdbLockRepo(pRepo) < 0) return -1; while ((pNode = tdListPopHead(pMemTable->bufBlockList)) != NULL) { if (pBufPool->nRecycleBlocks > 0) { - tsdbRecycleBufferBlock(pBufPool, pNode); + tsdbRecycleBufferBlock(pBufPool, pNode, false); pBufPool->nRecycleBlocks -= 1; } else { - tdListAppendNode(pBufPool->bufBlockList, pNode); + if(pBufPool->nElasticBlocks > 0 && listNEles(pBufPool->bufBlockList) > 2) { + tsdbRecycleBufferBlock(pBufPool, pNode, true); + } else { + tdListAppendNode(pBufPool->bufBlockList, pNode); + addNew = true; + } } } - if (!recycleBlocks) { + if (addNew) { int code = pthread_cond_signal(&pBufPool->poolNotEmpty); if (code != 0) { if (tsdbUnlockRepo(pRepo) < 0) return -1; diff --git a/src/tsdb/src/tsdbRead.c b/src/tsdb/src/tsdbRead.c index c1b935e0ee3cdbd3177710fbddf8994283319b36..4aab9dff7debc0b0f193e38d77222f1752196c65 100644 --- a/src/tsdb/src/tsdbRead.c +++ b/src/tsdb/src/tsdbRead.c @@ -25,6 +25,7 @@ #include "tlosertree.h" #include "tsdbint.h" #include "texpr.h" +#include "qFilter.h" #define EXTRA_BYTES 2 #define ASCENDING_TRAVERSE(o) (o == TSDB_ORDER_ASC) @@ -167,6 +168,7 @@ static int32_t doGetExternalRow(STsdbQueryHandle* pQueryHandle, int16_t type, SM static void* doFreeColumnInfoData(SArray* pColumnInfoData); static void* destroyTableCheckInfo(SArray* pTableCheckInfo); static bool tsdbGetExternalRow(TsdbQueryHandleT pHandle); +static int32_t tsdbQueryTableList(STable* pTable, SArray* pRes, void* filterInfo); static void tsdbInitDataBlockLoadInfo(SDataBlockLoadInfo* pBlockLoadInfo) { pBlockLoadInfo->slot = -1; @@ -288,8 +290,6 @@ static SArray* createCheckInfoFromTableGroup(STsdbQueryHandle* pQueryHandle, STa STableKeyInfo* pKeyInfo = (STableKeyInfo*) taosArrayGet(group, j); STableCheckInfo info = { .lastKey = pKeyInfo->lastKey, .pTableObj = pKeyInfo->pTable }; - info.tableId = ((STable*)(pKeyInfo->pTable))->tableId; - assert(info.pTableObj != NULL && (info.pTableObj->type == TSDB_NORMAL_TABLE || info.pTableObj->type == TSDB_CHILD_TABLE || info.pTableObj->type == TSDB_STREAM_TABLE)); @@ -2218,7 +2218,7 @@ static int32_t createDataBlocksInfo(STsdbQueryHandle* pQueryHandle, int32_t numO SBlock* pBlock = pTableCheck->pCompInfo->blocks; sup.numOfBlocksPerTable[numOfQualTables] = pTableCheck->numOfBlocks; - char* buf = calloc(1, sizeof(STableBlockInfo) * pTableCheck->numOfBlocks); + char* buf = malloc(sizeof(STableBlockInfo) * pTableCheck->numOfBlocks); if (buf == NULL) { cleanBlockOrderSupporter(&sup, numOfQualTables); return TSDB_CODE_TDB_OUT_OF_MEMORY; @@ -2690,21 +2690,6 @@ static int32_t getAllTableList(STable* pSuperTable, SArray* list) { return TSDB_CODE_SUCCESS; } -static void destroyHelper(void* param) { - if (param == NULL) { - return; - } - - tQueryInfo* pInfo = (tQueryInfo*)param; - if (pInfo->optr != TSDB_RELATION_IN) { - tfree(pInfo->q); - } else { - taosHashCleanup((SHashObj *)(pInfo->q)); - } - - free(param); -} - static bool loadBlockOfActiveTable(STsdbQueryHandle* pQueryHandle) { if (pQueryHandle->checkFiles) { // check if the query range overlaps with the file data block @@ -3618,8 +3603,6 @@ SArray* createTableGroup(SArray* pTableList, STSchema* pTagSchema, SColIndex* pC for(int32_t i = 0; i < size; ++i) { STableKeyInfo *pKeyInfo = taosArrayGet(pTableList, i); - assert(((STable*)pKeyInfo->pTable)->type == TSDB_CHILD_TABLE); - tsdbRefTable(pKeyInfo->pTable); STableKeyInfo info = {.pTable = pKeyInfo->pTable, .lastKey = skey}; @@ -3641,106 +3624,8 @@ SArray* createTableGroup(SArray* pTableList, STSchema* pTagSchema, SColIndex* pC return pTableGroup; } -static bool tableFilterFp(const void* pNode, void* param) { - tQueryInfo* pInfo = (tQueryInfo*) param; - - STable* pTable = (STable*)(SL_GET_NODE_DATA((SSkipListNode*)pNode)); - - char* val = NULL; - if (pInfo->sch.colId == TSDB_TBNAME_COLUMN_INDEX) { - val = (char*) TABLE_NAME(pTable); - } else { - val = tdGetKVRowValOfCol(pTable->tagVal, pInfo->sch.colId); - } - - if (pInfo->optr == TSDB_RELATION_ISNULL || pInfo->optr == TSDB_RELATION_NOTNULL) { - if (pInfo->optr == TSDB_RELATION_ISNULL) { - return (val == NULL) || isNull(val, pInfo->sch.type); - } else if (pInfo->optr == TSDB_RELATION_NOTNULL) { - return (val != NULL) && (!isNull(val, pInfo->sch.type)); - } - } else if (pInfo->optr == TSDB_RELATION_IN) { - int type = pInfo->sch.type; - if (type == TSDB_DATA_TYPE_BOOL || IS_SIGNED_NUMERIC_TYPE(type) || type == TSDB_DATA_TYPE_TIMESTAMP) { - int64_t v; - GET_TYPED_DATA(v, int64_t, pInfo->sch.type, val); - return NULL != taosHashGet((SHashObj *)pInfo->q, (char *)&v, sizeof(v)); - } else if (IS_UNSIGNED_NUMERIC_TYPE(type)) { - uint64_t v; - GET_TYPED_DATA(v, uint64_t, pInfo->sch.type, val); - return NULL != taosHashGet((SHashObj *)pInfo->q, (char *)&v, sizeof(v)); - } - else if (type == TSDB_DATA_TYPE_DOUBLE || type == TSDB_DATA_TYPE_FLOAT) { - double v; - GET_TYPED_DATA(v, double, pInfo->sch.type, val); - return NULL != taosHashGet((SHashObj *)pInfo->q, (char *)&v, sizeof(v)); - } else if (type == TSDB_DATA_TYPE_BINARY || type == TSDB_DATA_TYPE_NCHAR){ - return NULL != taosHashGet((SHashObj *)pInfo->q, varDataVal(val), varDataLen(val)); - } - - } - - int32_t ret = 0; - if (val == NULL) { //the val is possible to be null, so check it out carefully - ret = -1; // val is missing in table tags value pairs - } else { - ret = pInfo->compare(val, pInfo->q); - } - - switch (pInfo->optr) { - case TSDB_RELATION_EQUAL: { - return ret == 0; - } - case TSDB_RELATION_NOT_EQUAL: { - return ret != 0; - } - case TSDB_RELATION_GREATER_EQUAL: { - return ret >= 0; - } - case TSDB_RELATION_GREATER: { - return ret > 0; - } - case TSDB_RELATION_LESS_EQUAL: { - return ret <= 0; - } - case TSDB_RELATION_LESS: { - return ret < 0; - } - case TSDB_RELATION_LIKE: { - return ret == 0; - } - case TSDB_RELATION_MATCH: { - return ret == 0; - } - case TSDB_RELATION_IN: { - return ret == 1; - } - - default: - assert(false); - } - - return true; -} - -static void getTableListfromSkipList(tExprNode *pExpr, SSkipList *pSkipList, SArray *result, SExprTraverseSupp *param); - -static int32_t doQueryTableList(STable* pSTable, SArray* pRes, tExprNode* pExpr) { - // query according to the expression tree - SExprTraverseSupp supp = { - .nodeFilterFn = (__result_filter_fn_t) tableFilterFp, - .setupInfoFn = filterPrepare, - .pExtInfo = pSTable->tagSchema, - }; - - getTableListfromSkipList(pExpr, pSTable->pIndex, pRes, &supp); - tExprTreeDestroy(pExpr, destroyHelper); - return TSDB_CODE_SUCCESS; -} - -int32_t tsdbQuerySTableByTagCond(STsdbRepo* tsdb, uint64_t uid, TSKEY skey, const char* pTagCond, size_t len, - int16_t tagNameRelType, const char* tbnameCond, STableGroupInfo* pGroupInfo, - SColIndex* pColIndex, int32_t numOfCols) { +int32_t tsdbQuerySTableByTagCond(STsdbRepo* tsdb, uint64_t uid, TSKEY skey, const char* pTagCond, size_t len, + STableGroupInfo* pGroupInfo, SColIndex* pColIndex, int32_t numOfCols) { if (tsdbRLockRepoMeta(tsdb) < 0) goto _error; STable* pTable = tsdbGetTableByUid(tsdbGetMeta(tsdb), uid); @@ -3766,7 +3651,7 @@ int32_t tsdbQuerySTableByTagCond(STsdbRepo* tsdb, uint64_t uid, TSKEY skey, cons STSchema* pTagSchema = tsdbGetTableTagSchema(pTable); // no tags and tbname condition, all child tables of this stable are involved - if (tbnameCond == NULL && (pTagCond == NULL || len == 0)) { + if (pTagCond == NULL || len == 0) { int32_t ret = getAllTableList(pTable, res); if (ret != TSDB_CODE_SUCCESS) { tsdbUnlockRepoMeta(tsdb); @@ -3788,25 +3673,7 @@ int32_t tsdbQuerySTableByTagCond(STsdbRepo* tsdb, uint64_t uid, TSKEY skey, cons tExprNode* expr = NULL; TRY(TSDB_MAX_TAG_CONDITIONS) { - expr = exprTreeFromTableName(tbnameCond); - if (expr == NULL) { - expr = exprTreeFromBinary(pTagCond, len); - } else { - CLEANUP_PUSH_VOID_PTR_PTR(true, tExprTreeDestroy, expr, NULL); - tExprNode* tagExpr = exprTreeFromBinary(pTagCond, len); - if (tagExpr != NULL) { - CLEANUP_PUSH_VOID_PTR_PTR(true, tExprTreeDestroy, tagExpr, NULL); - tExprNode* tbnameExpr = expr; - expr = calloc(1, sizeof(tExprNode)); - if (expr == NULL) { - THROW( TSDB_CODE_TDB_OUT_OF_MEMORY ); - } - expr->nodeType = TSQL_NODE_EXPR; - expr->_node.optr = (uint8_t)tagNameRelType; - expr->_node.pLeft = tagExpr; - expr->_node.pRight = tbnameExpr; - } - } + expr = exprTreeFromBinary(pTagCond, len); CLEANUP_EXECUTE(); } CATCH( code ) { @@ -3818,7 +3685,20 @@ int32_t tsdbQuerySTableByTagCond(STsdbRepo* tsdb, uint64_t uid, TSKEY skey, cons // TODO: more error handling } END_TRY - doQueryTableList(pTable, res, expr); + void *filterInfo = NULL; + + ret = filterInitFromTree(expr, &filterInfo, 0); + if (ret != TSDB_CODE_SUCCESS) { + terrno = ret; + goto _error; + } + + tsdbQueryTableList(pTable, res, filterInfo); + + filterFreeInfo(filterInfo); + + tExprTreeDestroy(expr, NULL); + pGroupInfo->numOfTables = (uint32_t)taosArrayGetSize(res); pGroupInfo->pGroupList = createTableGroup(res, pTagSchema, pColIndex, numOfCols, skey); @@ -4002,254 +3882,115 @@ void tsdbDestroyTableGroup(STableGroupInfo *pGroupList) { pGroupList->numOfTables = 0; } -static void applyFilterToSkipListNode(SSkipList *pSkipList, tExprNode *pExpr, SArray *pResult, SExprTraverseSupp *param) { - SSkipListIterator* iter = tSkipListCreateIter(pSkipList); - // Scan each node in the skiplist by using iterator - while (tSkipListIterNext(iter)) { - SSkipListNode *pNode = tSkipListIterGet(iter); - if (exprTreeApplyFilter(pExpr, pNode, param)) { - taosArrayPush(pResult, &(SL_GET_NODE_DATA(pNode))); - } - } - - tSkipListDestroyIter(iter); -} - -typedef struct { - char* v; - int32_t optr; -} SEndPoint; - -typedef struct { - SEndPoint* start; - SEndPoint* end; -} SQueryCond; - -// todo check for malloc failure -static int32_t setQueryCond(tQueryInfo *queryColInfo, SQueryCond* pCond) { - int32_t optr = queryColInfo->optr; - - if (optr == TSDB_RELATION_GREATER || optr == TSDB_RELATION_GREATER_EQUAL || - optr == TSDB_RELATION_EQUAL || optr == TSDB_RELATION_NOT_EQUAL) { - pCond->start = calloc(1, sizeof(SEndPoint)); - pCond->start->optr = queryColInfo->optr; - pCond->start->v = queryColInfo->q; - } else if (optr == TSDB_RELATION_LESS || optr == TSDB_RELATION_LESS_EQUAL) { - pCond->end = calloc(1, sizeof(SEndPoint)); - pCond->end->optr = queryColInfo->optr; - pCond->end->v = queryColInfo->q; - } else if (optr == TSDB_RELATION_IN) { - pCond->start = calloc(1, sizeof(SEndPoint)); - pCond->start->optr = queryColInfo->optr; - pCond->start->v = queryColInfo->q; - } else if (optr == TSDB_RELATION_LIKE) { - assert(0); - } else if (optr == TSDB_RELATION_MATCH) { - assert(0); +static FORCE_INLINE int32_t tsdbGetTagDataFromId(void *param, int32_t id, void **data) { + STable* pTable = (STable*)(SL_GET_NODE_DATA((SSkipListNode *)param)); + + if (id == TSDB_TBNAME_COLUMN_INDEX) { + *data = TABLE_NAME(pTable); + } else { + *data = tdGetKVRowValOfCol(pTable->tagVal, id); } return TSDB_CODE_SUCCESS; } -static void queryIndexedColumn(SSkipList* pSkipList, tQueryInfo* pQueryInfo, SArray* result) { - SSkipListIterator* iter = NULL; - - SQueryCond cond = {0}; - if (setQueryCond(pQueryInfo, &cond) != TSDB_CODE_SUCCESS) { - //todo handle error - } - - if (cond.start != NULL) { - iter = tSkipListCreateIterFromVal(pSkipList, (char*) cond.start->v, pSkipList->type, TSDB_ORDER_ASC); - } else { - iter = tSkipListCreateIterFromVal(pSkipList, (char*)(cond.end ? cond.end->v: NULL), pSkipList->type, TSDB_ORDER_DESC); - } - - if (cond.start != NULL) { - int32_t optr = cond.start->optr; - - if (optr == TSDB_RELATION_EQUAL) { // equals - while(tSkipListIterNext(iter)) { - SSkipListNode* pNode = tSkipListIterGet(iter); - - int32_t ret = pQueryInfo->compare(SL_GET_NODE_KEY(pSkipList, pNode), cond.start->v); - if (ret != 0) { - break; - } - - STableKeyInfo info = {.pTable = (void*)SL_GET_NODE_DATA(pNode), .lastKey = TSKEY_INITIAL_VAL}; - taosArrayPush(result, &info); - } - } else if (optr == TSDB_RELATION_GREATER || optr == TSDB_RELATION_GREATER_EQUAL) { // greater equal - bool comp = true; - int32_t ret = 0; - - while(tSkipListIterNext(iter)) { - SSkipListNode* pNode = tSkipListIterGet(iter); - - if (comp) { - ret = pQueryInfo->compare(SL_GET_NODE_KEY(pSkipList, pNode), cond.start->v); - assert(ret >= 0); - } - - if (ret == 0 && optr == TSDB_RELATION_GREATER) { - continue; - } else { - STableKeyInfo info = {.pTable = (void*)SL_GET_NODE_DATA(pNode), .lastKey = TSKEY_INITIAL_VAL}; - taosArrayPush(result, &info); - comp = false; - } - } - } else if (optr == TSDB_RELATION_NOT_EQUAL) { // not equal - bool comp = true; - - while(tSkipListIterNext(iter)) { - SSkipListNode* pNode = tSkipListIterGet(iter); - comp = comp && (pQueryInfo->compare(SL_GET_NODE_KEY(pSkipList, pNode), cond.start->v) == 0); - if (comp) { - continue; - } - - STableKeyInfo info = {.pTable = (void*)SL_GET_NODE_DATA(pNode), .lastKey = TSKEY_INITIAL_VAL}; - taosArrayPush(result, &info); - } - tSkipListDestroyIter(iter); - comp = true; - iter = tSkipListCreateIterFromVal(pSkipList, (char*) cond.start->v, pSkipList->type, TSDB_ORDER_DESC); - while(tSkipListIterNext(iter)) { - SSkipListNode* pNode = tSkipListIterGet(iter); - comp = comp && (pQueryInfo->compare(SL_GET_NODE_KEY(pSkipList, pNode), cond.start->v) == 0); - if (comp) { - continue; - } - - STableKeyInfo info = {.pTable = (void*)SL_GET_NODE_DATA(pNode), .lastKey = TSKEY_INITIAL_VAL}; - taosArrayPush(result, &info); - } +static void queryIndexedColumn(SSkipList* pSkipList, void* filterInfo, SArray* res) { + SSkipListIterator* iter = NULL; + char *startVal = NULL; + int32_t order = 0; + int32_t inRange = 0; + int32_t flag = 0; + bool all = false; + int8_t *addToResult = NULL; - } else if (optr == TSDB_RELATION_IN) { - while(tSkipListIterNext(iter)) { - SSkipListNode* pNode = tSkipListIterGet(iter); + filterGetIndexedColumnInfo(filterInfo, &startVal, &order, &flag); - int32_t ret = pQueryInfo->compare(SL_GET_NODE_KEY(pSkipList, pNode), cond.start->v); - if (ret != 0) { - break; - } + tsdbDebug("filter index column start, order:%d, flag:%d", order, flag); - STableKeyInfo info = {.pTable = (void*)SL_GET_NODE_DATA(pNode), .lastKey = TSKEY_INITIAL_VAL}; - taosArrayPush(result, &info); - } - + while (order) { + if (FILTER_GET_FLAG(order, TSDB_ORDER_ASC)) { + iter = tSkipListCreateIterFromVal(pSkipList, startVal, pSkipList->type, TSDB_ORDER_ASC); + FILTER_CLR_FLAG(order, TSDB_ORDER_ASC); } else { - assert(0); + iter = tSkipListCreateIterFromVal(pSkipList, startVal, pSkipList->type, TSDB_ORDER_DESC); + FILTER_CLR_FLAG(order, TSDB_ORDER_DESC); } - } else { - int32_t optr = cond.end ? cond.end->optr : TSDB_RELATION_INVALID; - if (optr == TSDB_RELATION_LESS || optr == TSDB_RELATION_LESS_EQUAL) { - bool comp = true; - int32_t ret = 0; - - while (tSkipListIterNext(iter)) { - SSkipListNode *pNode = tSkipListIterGet(iter); - - if (comp) { - ret = pQueryInfo->compare(SL_GET_NODE_KEY(pSkipList, pNode), cond.end->v); - assert(ret <= 0); - } + + while (tSkipListIterNext(iter)) { + SSkipListNode *pNode = tSkipListIterGet(iter); - if (ret == 0 && optr == TSDB_RELATION_LESS) { - continue; - } else { - STableKeyInfo info = {.pTable = (void *)SL_GET_NODE_DATA(pNode), .lastKey = TSKEY_INITIAL_VAL}; - taosArrayPush(result, &info); - comp = false; // no need to compare anymore - } + if (inRange == 0 || !FILTER_GET_FLAG(flag, FI_ACTION_NO_NEED)) { + tsdbDebug("filter index column, filter it"); + filterSetColFieldData(filterInfo, pNode, tsdbGetTagDataFromId); + all = filterExecute(filterInfo, 1, &addToResult, NULL, 0); } - } else { - assert(pQueryInfo->optr == TSDB_RELATION_ISNULL || pQueryInfo->optr == TSDB_RELATION_NOTNULL); + + char *pData = SL_GET_NODE_DATA(pNode); - while (tSkipListIterNext(iter)) { - SSkipListNode *pNode = tSkipListIterGet(iter); + tsdbDebug("filter index column, table:%s, result:%d", ((STable *)pData)->name->data, all); - bool isnull = isNull(SL_GET_NODE_KEY(pSkipList, pNode), pQueryInfo->sch.type); - if ((pQueryInfo->optr == TSDB_RELATION_ISNULL && isnull) || - (pQueryInfo->optr == TSDB_RELATION_NOTNULL && (!isnull))) { - STableKeyInfo info = {.pTable = (void *)SL_GET_NODE_DATA(pNode), .lastKey = TSKEY_INITIAL_VAL}; - taosArrayPush(result, &info); - } + if (all || (addToResult && *addToResult)) { + STableKeyInfo info = {.pTable = (void*)pData, .lastKey = TSKEY_INITIAL_VAL}; + taosArrayPush(res, &info); + inRange = 1; + } else if (inRange){ + break; } } + + inRange = 0; + + tfree(addToResult); + tSkipListDestroyIter(iter); } - free(cond.start); - free(cond.end); - tSkipListDestroyIter(iter); + tsdbDebug("filter index column end"); } -static void queryIndexlessColumn(SSkipList* pSkipList, tQueryInfo* pQueryInfo, SArray* res, __result_filter_fn_t filterFp) { +static void queryIndexlessColumn(SSkipList* pSkipList, void* filterInfo, SArray* res) { SSkipListIterator* iter = tSkipListCreateIter(pSkipList); + int8_t *addToResult = NULL; while (tSkipListIterNext(iter)) { - bool addToResult = false; SSkipListNode *pNode = tSkipListIterGet(iter); + filterSetColFieldData(filterInfo, pNode, tsdbGetTagDataFromId); + char *pData = SL_GET_NODE_DATA(pNode); - tstr *name = (tstr*) tsdbGetTableName((void*) pData); - - // todo speed up by using hash - if (pQueryInfo->sch.colId == TSDB_TBNAME_COLUMN_INDEX) { - if (pQueryInfo->optr == TSDB_RELATION_IN) { - addToResult = pQueryInfo->compare(name, pQueryInfo->q); - } else if (pQueryInfo->optr == TSDB_RELATION_LIKE || pQueryInfo->optr == TSDB_RELATION_MATCH) { - addToResult = !pQueryInfo->compare(name, pQueryInfo->q); - } - } else { - addToResult = filterFp(pNode, pQueryInfo); - } - if (addToResult) { + bool all = filterExecute(filterInfo, 1, &addToResult, NULL, 0); + + if (all || (addToResult && *addToResult)) { STableKeyInfo info = {.pTable = (void*)pData, .lastKey = TSKEY_INITIAL_VAL}; taosArrayPush(res, &info); - } + } } + tfree(addToResult); + tSkipListDestroyIter(iter); } -// Apply the filter expression to each node in the skiplist to acquire the qualified nodes in skip list -void getTableListfromSkipList(tExprNode *pExpr, SSkipList *pSkipList, SArray *result, SExprTraverseSupp *param) { - if (pExpr == NULL) { - return; - } - - tExprNode *pLeft = pExpr->_node.pLeft; - tExprNode *pRight = pExpr->_node.pRight; - // column project - if (pLeft->nodeType != TSQL_NODE_EXPR && pRight->nodeType != TSQL_NODE_EXPR) { - assert(pLeft->nodeType == TSQL_NODE_COL && (pRight->nodeType == TSQL_NODE_VALUE || pRight->nodeType == TSQL_NODE_DUMMY)); - - param->setupInfoFn(pExpr, param->pExtInfo); +static int32_t tsdbQueryTableList(STable* pTable, SArray* pRes, void* filterInfo) { + STSchema* pTSSchema = pTable->tagSchema; + bool indexQuery = false; + SSkipList *pSkipList = pTable->pIndex; + + filterIsIndexedColumnQuery(filterInfo, pTSSchema->columns->colId, &indexQuery); + + if (indexQuery) { + queryIndexedColumn(pSkipList, filterInfo, pRes); + } else { + queryIndexlessColumn(pSkipList, filterInfo, pRes); + } - tQueryInfo *pQueryInfo = pExpr->_node.info; - if (pQueryInfo->indexed && (pQueryInfo->optr != TSDB_RELATION_LIKE && pQueryInfo->optr != TSDB_RELATION_MATCH - && pQueryInfo->optr != TSDB_RELATION_IN)) { - queryIndexedColumn(pSkipList, pQueryInfo, result); - } else { - queryIndexlessColumn(pSkipList, pQueryInfo, result, param->nodeFilterFn); - } + return TSDB_CODE_SUCCESS; +} - return; - } - // The value of hasPK is always 0. - uint8_t weight = pLeft->_node.hasPK + pRight->_node.hasPK; - assert(weight == 0 && pSkipList != NULL && taosArrayGetSize(result) == 0); - //apply the hierarchical filter expression to every node in skiplist to find the qualified nodes - applyFilterToSkipListNode(pSkipList, pExpr, result, param); -} diff --git a/src/util/inc/tcache.h b/src/util/inc/tcache.h index e41b544d00e55f7eece904c5957ef9c06063e6c3..40069d7d273caa14ce3b80467b25d68ea476fb75 100644 --- a/src/util/inc/tcache.h +++ b/src/util/inc/tcache.h @@ -33,6 +33,7 @@ extern "C" { #endif typedef void (*__cache_free_fn_t)(void*); +typedef void (*__cache_trav_fn_t)(void*, void*); typedef struct SCacheStatis { int64_t missCount; @@ -176,7 +177,7 @@ void taosCacheCleanup(SCacheObj *pCacheObj); * @param fp * @return */ -void taosCacheRefresh(SCacheObj *pCacheObj, __cache_free_fn_t fp); +void taosCacheRefresh(SCacheObj *pCacheObj, __cache_trav_fn_t fp, void* param1); /** * stop background refresh worker thread diff --git a/src/util/inc/tcompare.h b/src/util/inc/tcompare.h index ef4c1096023bd670335603dee6ab274470de3ed7..1125516d34c65da1b5d0c47dadd126aa0b1959fa 100644 --- a/src/util/inc/tcompare.h +++ b/src/util/inc/tcompare.h @@ -84,6 +84,8 @@ int32_t compareLenPrefixedStr(const void *pLeft, const void *pRight); int32_t compareLenPrefixedWStr(const void *pLeft, const void *pRight); int32_t compareStrPatternComp(const void* pLeft, const void* pRight); int32_t compareStrRegexComp(const void* pLeft, const void* pRight); +int32_t compareStrRegexCompMatch(const void* pLeft, const void* pRight); +int32_t compareStrRegexCompNMatch(const void* pLeft, const void* pRight); int32_t compareFindItemInSet(const void *pLeft, const void* pRight); int32_t compareWStrPatternComp(const void* pLeft, const void* pRight); diff --git a/src/util/inc/tconfig.h b/src/util/inc/tconfig.h index d03ce6e0f1f34478951a84b2ab18020f5cbec92b..2c632d4a17f5394dc28df72414948855b89bc001 100644 --- a/src/util/inc/tconfig.h +++ b/src/util/inc/tconfig.h @@ -20,7 +20,7 @@ extern "C" { #endif -#define TSDB_CFG_MAX_NUM 116 // 110 + 6 with lossy option +#define TSDB_CFG_MAX_NUM 123 #define TSDB_CFG_PRINT_LEN 23 #define TSDB_CFG_OPTION_LEN 24 #define TSDB_CFG_VALUE_LEN 41 @@ -89,6 +89,7 @@ void taosDumpGlobalCfg(); void taosInitConfigOption(SGlobalCfg cfg); SGlobalCfg * taosGetConfigOption(const char *option); +bool taosReadConfigOption(const char *option, char *value, char *value2, char *value3, int8_t cfgStatus, int8_t sourceType); #ifdef __cplusplus } diff --git a/src/util/inc/tlosertree.h b/src/util/inc/tlosertree.h index 4c731625dd5c7950c321b2180ca913e49362059b..58f2ca8c5c81408b35c2c9435357deeb2b0f13a4 100644 --- a/src/util/inc/tlosertree.h +++ b/src/util/inc/tlosertree.h @@ -26,7 +26,7 @@ typedef int (*__merge_compare_fn_t)(const void *, const void *, void *param); typedef struct SLoserTreeNode { int32_t index; - void * pData; + void *pData; } SLoserTreeNode; typedef struct SLoserTreeInfo { @@ -34,8 +34,7 @@ typedef struct SLoserTreeInfo { int32_t totalEntries; __merge_compare_fn_t comparFn; void * param; - - SLoserTreeNode *pNode; + SLoserTreeNode *pNode; } SLoserTreeInfo; uint32_t tLoserTreeCreate(SLoserTreeInfo **pTree, int32_t numOfEntries, void *param, __merge_compare_fn_t compareFn); diff --git a/src/util/inc/tthread.h b/src/util/inc/tthread.h new file mode 100644 index 0000000000000000000000000000000000000000..7443ad706dcbef529d857fe823cddd0cc1efbdd3 --- /dev/null +++ b/src/util/inc/tthread.h @@ -0,0 +1,37 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#ifndef TDENGINE_TTHREAD_H +#define TDENGINE_TTHREAD_H + +#ifdef __cplusplus +extern "C" { +#endif + +#include "os.h" +#include "taosdef.h" + +// create new thread +pthread_t* taosCreateThread( void *(*__start_routine) (void *), void* param); +// destory thread +bool taosDestoryThread(pthread_t* pthread); +// thread running return true +bool taosThreadRunning(pthread_t* pthread); + +#ifdef __cplusplus +} +#endif + +#endif // TDENGINE_TTHREAD_H diff --git a/src/util/src/hash.c b/src/util/src/hash.c index 9ef203f082180a590cd190a433a66a622bdeaa57..b1a5a040f52c13d7895ee7fe805cc8c8de316703 100644 --- a/src/util/src/hash.c +++ b/src/util/src/hash.c @@ -747,17 +747,19 @@ void taosHashTableResize(SHashObj *pHashObj) { } SHashNode *doCreateHashNode(const void *key, size_t keyLen, const void *pData, size_t dsize, uint32_t hashVal) { - SHashNode *pNewNode = calloc(1, sizeof(SHashNode) + keyLen + dsize); + SHashNode *pNewNode = malloc(sizeof(SHashNode) + keyLen + dsize); if (pNewNode == NULL) { uError("failed to allocate memory, reason:%s", strerror(errno)); return NULL; } - pNewNode->keyLen = (uint32_t)keyLen; + pNewNode->keyLen = (uint32_t)keyLen; pNewNode->hashVal = hashVal; pNewNode->dataLen = (uint32_t) dsize; - pNewNode->count = 1; + pNewNode->count = 1; + pNewNode->removed = 0; + pNewNode->next = NULL; memcpy(GET_HASH_NODE_DATA(pNewNode), pData, dsize); memcpy(GET_HASH_NODE_KEY(pNewNode), key, keyLen); diff --git a/src/util/src/tarray.c b/src/util/src/tarray.c index 2d6c513cb57ce1d524a1fb69df68702e624ede7b..007ce0682974d06bf506a82d8bbbc809092eb9e4 100644 --- a/src/util/src/tarray.c +++ b/src/util/src/tarray.c @@ -24,11 +24,12 @@ void* taosArrayInit(size_t size, size_t elemSize) { size = TARRAY_MIN_SIZE; } - SArray* pArray = calloc(1, sizeof(SArray)); + SArray* pArray = malloc(sizeof(SArray)); if (pArray == NULL) { return NULL; } + pArray->size = 0; pArray->pData = calloc(size, elemSize); if (pArray->pData == NULL) { free(pArray); diff --git a/src/util/src/tcache.c b/src/util/src/tcache.c index 6665c25a90a7fcb0db83766b88c9c9c7fe047fbf..589d3d4fa57c42b472319673a72d2e7ab599689f 100644 --- a/src/util/src/tcache.c +++ b/src/util/src/tcache.c @@ -505,7 +505,8 @@ void taosCacheRelease(SCacheObj *pCacheObj, void **data, bool _remove) { typedef struct SHashTravSupp { SCacheObj* pCacheObj; int64_t time; - __cache_free_fn_t fp; + __cache_trav_fn_t fp; + void* param1; } SHashTravSupp; static bool travHashTableEmptyFn(void* param, void* data) { @@ -667,17 +668,17 @@ bool travHashTableFn(void* param, void* data) { } if (ps->fp) { - (ps->fp)(pNode->data); + (ps->fp)(pNode->data, ps->param1); } // do not remove element in hash table return true; } -static void doCacheRefresh(SCacheObj* pCacheObj, int64_t time, __cache_free_fn_t fp) { +static void doCacheRefresh(SCacheObj* pCacheObj, int64_t time, __cache_trav_fn_t fp, void* param1) { assert(pCacheObj != NULL); - SHashTravSupp sup = {.pCacheObj = pCacheObj, .fp = fp, .time = time}; + SHashTravSupp sup = {.pCacheObj = pCacheObj, .fp = fp, .time = time, .param1 = param1}; taosHashCondTraverse(pCacheObj->pHashTable, travHashTableFn, &sup); } @@ -748,7 +749,7 @@ void* taosCacheTimedRefresh(void *handle) { // refresh data in hash table if (elemInHash > 0) { int64_t now = taosGetTimestampMs(); - doCacheRefresh(pCacheObj, now, NULL); + doCacheRefresh(pCacheObj, now, NULL, NULL); } taosTrashcanEmpty(pCacheObj, false); @@ -766,13 +767,13 @@ void* taosCacheTimedRefresh(void *handle) { return NULL; } -void taosCacheRefresh(SCacheObj *pCacheObj, __cache_free_fn_t fp) { +void taosCacheRefresh(SCacheObj *pCacheObj, __cache_trav_fn_t fp, void* param1) { if (pCacheObj == NULL) { return; } int64_t now = taosGetTimestampMs(); - doCacheRefresh(pCacheObj, now, fp); + doCacheRefresh(pCacheObj, now, fp, param1); } void taosStopCacheRefreshWorker(void) { diff --git a/src/util/src/tcompare.c b/src/util/src/tcompare.c index 4c76724921404b2a799479c86166cd462220a99c..179fbd05a5a8f5ddfb28b68130f87e26ed4e522f 100644 --- a/src/util/src/tcompare.c +++ b/src/util/src/tcompare.c @@ -233,14 +233,20 @@ int patternMatch(const char *patterStr, const char *str, size_t size, const SPat int32_t i = 0; int32_t j = 0; + int32_t o = 0; + int32_t m = 0; while ((c = patterStr[i++]) != 0) { if (c == pInfo->matchAll) { /* Match "*" */ while ((c = patterStr[i++]) == pInfo->matchAll || c == pInfo->matchOne) { - if (c == pInfo->matchOne && (j > size || str[j++] == 0)) { - // empty string, return not match - return TSDB_PATTERN_NOWILDCARDMATCH; + if (c == pInfo->matchOne) { + if (j > size || str[j++] == 0) { + // empty string, return not match + return TSDB_PATTERN_NOWILDCARDMATCH; + } else { + ++o; + } } } @@ -249,9 +255,10 @@ int patternMatch(const char *patterStr, const char *str, size_t size, const SPat } char next[3] = {toupper(c), tolower(c), 0}; + m = o; while (1) { - size_t n = strcspn(str, next); - str += n; + size_t n = strcspn(str + m, next); + str += m + n; if (str[0] == 0 || (n >= size)) { break; @@ -261,12 +268,14 @@ int patternMatch(const char *patterStr, const char *str, size_t size, const SPat if (ret != TSDB_PATTERN_NOMATCH) { return ret; } + m = 0; } return TSDB_PATTERN_NOWILDCARDMATCH; } c1 = str[j++]; - + ++o; + if (j <= size) { if (c == '\\' && patterStr[i] == '_' && c1 == '_') { i++; continue; } if (c == c1 || tolower(c) == tolower(c1) || (c == pInfo->matchOne && c1 != 0)) { @@ -292,7 +301,7 @@ int WCSPatternMatch(const wchar_t *patterStr, const wchar_t *str, size_t size, c if (c == matchAll) { /* Match "%" */ while ((c = patterStr[i++]) == matchAll || c == matchOne) { - if (c == matchOne && (j > size || str[j++] == 0)) { + if (c == matchOne && (j >= size || str[j++] == 0)) { return TSDB_PATTERN_NOWILDCARDMATCH; } } @@ -350,6 +359,14 @@ int32_t compareStrPatternComp(const void* pLeft, const void* pRight) { return (ret == TSDB_PATTERN_MATCH) ? 0 : 1; } +int32_t compareStrRegexCompMatch(const void* pLeft, const void* pRight) { + return compareStrRegexComp(pLeft, pRight); +} + +int32_t compareStrRegexCompNMatch(const void* pLeft, const void* pRight) { + return compareStrRegexComp(pLeft, pRight) ? 0 : 1; +} + int32_t compareStrRegexComp(const void* pLeft, const void* pRight) { size_t sz = varDataLen(pRight); char *pattern = malloc(sz + 1); @@ -449,7 +466,9 @@ __compar_fn_t getComparFunc(int32_t type, int32_t optr) { case TSDB_DATA_TYPE_DOUBLE: comparFn = compareDoubleVal; break; case TSDB_DATA_TYPE_BINARY: { if (optr == TSDB_RELATION_MATCH) { - comparFn = compareStrRegexComp; + comparFn = compareStrRegexCompMatch; + } else if (optr == TSDB_RELATION_NMATCH) { + comparFn = compareStrRegexCompNMatch; } else if (optr == TSDB_RELATION_LIKE) { /* wildcard query using like operator */ comparFn = compareStrPatternComp; } else if (optr == TSDB_RELATION_IN) { @@ -463,7 +482,9 @@ __compar_fn_t getComparFunc(int32_t type, int32_t optr) { case TSDB_DATA_TYPE_NCHAR: { if (optr == TSDB_RELATION_MATCH) { - comparFn = compareStrRegexComp; + comparFn = compareStrRegexCompMatch; + } else if (optr == TSDB_RELATION_NMATCH) { + comparFn = compareStrRegexCompNMatch; } else if (optr == TSDB_RELATION_LIKE) { comparFn = compareWStrPatternComp; } else if (optr == TSDB_RELATION_IN) { diff --git a/src/util/src/tconfig.c b/src/util/src/tconfig.c index 5a3dc3f9bcdee41f974e48f22b27beb2a1eb5a35..6ed9cff9fbabad06d00cb883933fefae443a1f5f 100644 --- a/src/util/src/tconfig.c +++ b/src/util/src/tconfig.c @@ -26,6 +26,11 @@ SGlobalCfg tsGlobalConfig[TSDB_CFG_MAX_NUM] = {{0}}; int32_t tsGlobalConfigNum = 0; +#define ATOI_JUDGE if ( !value && strcmp(input_value, "0") != 0) { \ + uError("atoi error, input value:%s",input_value); \ + return false; \ + } + static char *tsGlobalUnit[] = { " ", "(%)", @@ -44,12 +49,14 @@ char *tsCfgStatusStr[] = { "program argument list" }; -static void taosReadFloatConfig(SGlobalCfg *cfg, char *input_value) { +static bool taosReadFloatConfig(SGlobalCfg *cfg, char *input_value) { float value = (float)atof(input_value); + ATOI_JUDGE float *option = (float *)cfg->ptr; if (value < cfg->minValue || value > cfg->maxValue) { uError("config option:%s, input value:%s, out of range[%f, %f], use default value:%f", cfg->option, input_value, cfg->minValue, cfg->maxValue, *option); + return false; } else { if (cfg->cfgStatus <= TAOS_CFG_CSTATUS_FILE) { *option = value; @@ -57,16 +64,20 @@ static void taosReadFloatConfig(SGlobalCfg *cfg, char *input_value) { } else { uWarn("config option:%s, input value:%s, is configured by %s, use %f", cfg->option, input_value, tsCfgStatusStr[cfg->cfgStatus], *option); + return false; } } + return true; } -static void taosReadDoubleConfig(SGlobalCfg *cfg, char *input_value) { +static bool taosReadDoubleConfig(SGlobalCfg *cfg, char *input_value) { double value = atof(input_value); + ATOI_JUDGE double *option = (double *)cfg->ptr; if (value < cfg->minValue || value > cfg->maxValue) { uError("config option:%s, input value:%s, out of range[%f, %f], use default value:%f", cfg->option, input_value, cfg->minValue, cfg->maxValue, *option); + return false; } else { if (cfg->cfgStatus <= TAOS_CFG_CSTATUS_FILE) { *option = value; @@ -74,17 +85,21 @@ static void taosReadDoubleConfig(SGlobalCfg *cfg, char *input_value) { } else { uWarn("config option:%s, input value:%s, is configured by %s, use %f", cfg->option, input_value, tsCfgStatusStr[cfg->cfgStatus], *option); + return false; } } + return true; } -static void taosReadInt32Config(SGlobalCfg *cfg, char *input_value) { +static bool taosReadInt32Config(SGlobalCfg *cfg, char *input_value) { int32_t value = atoi(input_value); + ATOI_JUDGE int32_t *option = (int32_t *)cfg->ptr; if (value < cfg->minValue || value > cfg->maxValue) { uError("config option:%s, input value:%s, out of range[%f, %f], use default value:%d", cfg->option, input_value, cfg->minValue, cfg->maxValue, *option); + return false; } else { if (cfg->cfgStatus <= TAOS_CFG_CSTATUS_FILE) { *option = value; @@ -92,16 +107,20 @@ static void taosReadInt32Config(SGlobalCfg *cfg, char *input_value) { } else { uWarn("config option:%s, input value:%s, is configured by %s, use %d", cfg->option, input_value, tsCfgStatusStr[cfg->cfgStatus], *option); + return false; } } + return true; } -static void taosReadInt16Config(SGlobalCfg *cfg, char *input_value) { +static bool taosReadInt16Config(SGlobalCfg *cfg, char *input_value) { int32_t value = atoi(input_value); + ATOI_JUDGE int16_t *option = (int16_t *)cfg->ptr; if (value < cfg->minValue || value > cfg->maxValue) { uError("config option:%s, input value:%s, out of range[%f, %f], use default value:%d", cfg->option, input_value, cfg->minValue, cfg->maxValue, *option); + return false; } else { if (cfg->cfgStatus <= TAOS_CFG_CSTATUS_FILE) { *option = (int16_t)value; @@ -109,16 +128,20 @@ static void taosReadInt16Config(SGlobalCfg *cfg, char *input_value) { } else { uWarn("config option:%s, input value:%s, is configured by %s, use %d", cfg->option, input_value, tsCfgStatusStr[cfg->cfgStatus], *option); + return false; } } + return true; } -static void taosReadUInt16Config(SGlobalCfg *cfg, char *input_value) { +static bool taosReadUInt16Config(SGlobalCfg *cfg, char *input_value) { int32_t value = atoi(input_value); + ATOI_JUDGE uint16_t *option = (uint16_t *)cfg->ptr; if (value < cfg->minValue || value > cfg->maxValue) { uError("config option:%s, input value:%s, out of range[%f, %f], use default value:%d", cfg->option, input_value, cfg->minValue, cfg->maxValue, *option); + return false; } else { if (cfg->cfgStatus <= TAOS_CFG_CSTATUS_FILE) { *option = (uint16_t)value; @@ -126,16 +149,20 @@ static void taosReadUInt16Config(SGlobalCfg *cfg, char *input_value) { } else { uWarn("config option:%s, input value:%s, is configured by %s, use %d", cfg->option, input_value, tsCfgStatusStr[cfg->cfgStatus], *option); + return false; } } + return true; } -static void taosReadInt8Config(SGlobalCfg *cfg, char *input_value) { +static bool taosReadInt8Config(SGlobalCfg *cfg, char *input_value) { int32_t value = atoi(input_value); + ATOI_JUDGE int8_t *option = (int8_t *)cfg->ptr; if (value < cfg->minValue || value > cfg->maxValue) { uError("config option:%s, input value:%s, out of range[%f, %f], use default value:%d", cfg->option, input_value, cfg->minValue, cfg->maxValue, *option); + return false; } else { if (cfg->cfgStatus <= TAOS_CFG_CSTATUS_FILE) { *option = (int8_t)value; @@ -143,8 +170,10 @@ static void taosReadInt8Config(SGlobalCfg *cfg, char *input_value) { } else { uWarn("config option:%s, input value:%s, is configured by %s, use %d", cfg->option, input_value, tsCfgStatusStr[cfg->cfgStatus], *option); + return false; } } + return true; } static bool taosReadDirectoryConfig(SGlobalCfg *cfg, char *input_value) { @@ -191,12 +220,13 @@ static bool taosReadDirectoryConfig(SGlobalCfg *cfg, char *input_value) { return true; } -static void taosReadIpStrConfig(SGlobalCfg *cfg, char *input_value) { +static bool taosReadIpStrConfig(SGlobalCfg *cfg, char *input_value) { uint32_t value = taosInetAddr(input_value); char * option = (char *)cfg->ptr; if (value == INADDR_NONE) { uError("config option:%s, input value:%s, is not a valid ip address, use default value:%s", cfg->option, input_value, option); + return false; } else { if (cfg->cfgStatus <= TAOS_CFG_CSTATUS_FILE) { strncpy(option, input_value, cfg->ptrLength); @@ -204,16 +234,19 @@ static void taosReadIpStrConfig(SGlobalCfg *cfg, char *input_value) { } else { uWarn("config option:%s, input value:%s, is configured by %s, use %s", cfg->option, input_value, tsCfgStatusStr[cfg->cfgStatus], option); + return false; } } + return true; } -static void taosReadStringConfig(SGlobalCfg *cfg, char *input_value) { +static bool taosReadStringConfig(SGlobalCfg *cfg, char *input_value) { int length = (int) strlen(input_value); char *option = (char *)cfg->ptr; if (length <= 0 || length > cfg->ptrLength) { uError("config option:%s, input value:%s, length out of range[0, %d], use default value:%s", cfg->option, input_value, cfg->ptrLength, option); + return false; } else { if (cfg->cfgStatus <= TAOS_CFG_CSTATUS_FILE) { strncpy(option, input_value, cfg->ptrLength); @@ -221,8 +254,10 @@ static void taosReadStringConfig(SGlobalCfg *cfg, char *input_value) { } else { uWarn("config option:%s, input value:%s, is configured by %s, use %s", cfg->option, input_value, tsCfgStatusStr[cfg->cfgStatus], option); + return false; } } + return true; } static void taosReadLogOption(char *option, char *value) { @@ -258,51 +293,59 @@ SGlobalCfg *taosGetConfigOption(const char *option) { return NULL; } -static void taosReadConfigOption(const char *option, char *value, char *value2, char *value3) { +bool taosReadConfigOption(const char *option, char *value, char *value2, char *value3, + int8_t cfgStatus, int8_t sourceType) { + bool ret = false; for (int i = 0; i < tsGlobalConfigNum; ++i) { SGlobalCfg *cfg = tsGlobalConfig + i; if (!(cfg->cfgType & TSDB_CFG_CTYPE_B_CONFIG)) continue; + if (sourceType != 0 && !(cfg->cfgType & sourceType)) continue; if (strcasecmp(cfg->option, option) != 0) continue; switch (cfg->valType) { case TAOS_CFG_VTYPE_INT8: - taosReadInt8Config(cfg, value); + ret = taosReadInt8Config(cfg, value); break; case TAOS_CFG_VTYPE_INT16: - taosReadInt16Config(cfg, value); + ret = taosReadInt16Config(cfg, value); break; case TAOS_CFG_VTYPE_INT32: - taosReadInt32Config(cfg, value); + ret = taosReadInt32Config(cfg, value); break; case TAOS_CFG_VTYPE_UINT16: - taosReadUInt16Config(cfg, value); + ret = taosReadUInt16Config(cfg, value); break; case TAOS_CFG_VTYPE_FLOAT: - taosReadFloatConfig(cfg, value); + ret = taosReadFloatConfig(cfg, value); break; case TAOS_CFG_VTYPE_DOUBLE: - taosReadDoubleConfig(cfg, value); + ret = taosReadDoubleConfig(cfg, value); break; case TAOS_CFG_VTYPE_STRING: - taosReadStringConfig(cfg, value); + ret = taosReadStringConfig(cfg, value); break; case TAOS_CFG_VTYPE_IPSTR: - taosReadIpStrConfig(cfg, value); + ret = taosReadIpStrConfig(cfg, value); break; case TAOS_CFG_VTYPE_DIRECTORY: - taosReadDirectoryConfig(cfg, value); + ret = taosReadDirectoryConfig(cfg, value); break; case TAOS_CFG_VTYPE_DATA_DIRCTORY: if (taosReadDirectoryConfig(cfg, value)) { - taosReadDataDirCfg(value, value2, value3); + taosReadDataDirCfg(value, value2, value3); + ret = true; } + ret = false; break; default: uError("config option:%s, input value:%s, can't be recognized", option, value); - break; + ret = false; + } + if(ret && cfgStatus == TAOS_CFG_CSTATUS_OPTION){ + cfg->cfgStatus = TAOS_CFG_CSTATUS_OPTION; } - break; } + return ret; } void taosInitConfigOption(SGlobalCfg cfg) { @@ -437,7 +480,7 @@ bool taosReadGlobalCfg() { if (vlen3 != 0) value3[vlen3] = 0; } - taosReadConfigOption(option, value, value2, value3); + taosReadConfigOption(option, value, value2, value3, TAOS_CFG_CSTATUS_FILE, 0); } fclose(fp); @@ -560,4 +603,4 @@ void taosDumpGlobalCfg() { taosDumpCfg(cfg); } -} +} \ No newline at end of file diff --git a/src/util/src/terror.c b/src/util/src/terror.c index 8fb39cd1702fe670e44f2e0db1639a0f48ab5ab0..e3d022a6b0a4a929b6c06b2c305fb71b6980a865 100644 --- a/src/util/src/terror.c +++ b/src/util/src/terror.c @@ -116,6 +116,9 @@ TAOS_DEFINE_ERROR(TSDB_CODE_TSC_DUP_COL_NAMES, "duplicated column nam TAOS_DEFINE_ERROR(TSDB_CODE_TSC_INVALID_TAG_LENGTH, "Invalid tag length") TAOS_DEFINE_ERROR(TSDB_CODE_TSC_INVALID_COLUMN_LENGTH, "Invalid column length") TAOS_DEFINE_ERROR(TSDB_CODE_TSC_DUP_TAG_NAMES, "duplicated tag names") +TAOS_DEFINE_ERROR(TSDB_CODE_TSC_INVALID_JSON, "Invalid JSON format") +TAOS_DEFINE_ERROR(TSDB_CODE_TSC_INVALID_JSON_TYPE, "Invalid JSON data type") +TAOS_DEFINE_ERROR(TSDB_CODE_TSC_VALUE_OUT_OF_RANGE, "Value out of range") // mnode TAOS_DEFINE_ERROR(TSDB_CODE_MND_MSG_NOT_PROCESSED, "Message not processed") diff --git a/src/util/src/tlosertree.c b/src/util/src/tlosertree.c index e793548407ad37e2021fdba7db106db3a48fcaf0..0f104c4b63a36880a79ad564a0f837f9b09e7819 100644 --- a/src/util/src/tlosertree.c +++ b/src/util/src/tlosertree.c @@ -90,12 +90,13 @@ void tLoserTreeAdjust(SLoserTreeInfo* pTree, int32_t idx) { SLoserTreeNode kLeaf = pTree->pNode[idx]; while (parentId > 0) { - if (pTree->pNode[parentId].index == -1) { + SLoserTreeNode* pCur = &pTree->pNode[parentId]; + if (pCur->index == -1) { pTree->pNode[parentId] = kLeaf; return; } - int32_t ret = pTree->comparFn(&pTree->pNode[parentId], &kLeaf, pTree->param); + int32_t ret = pTree->comparFn(pCur, &kLeaf, pTree->param); if (ret < 0) { SLoserTreeNode t = pTree->pNode[parentId]; pTree->pNode[parentId] = kLeaf; diff --git a/src/util/src/tthread.c b/src/util/src/tthread.c new file mode 100644 index 0000000000000000000000000000000000000000..043b2de2f241297d209041294428dde2c55e974e --- /dev/null +++ b/src/util/src/tthread.c @@ -0,0 +1,62 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#include "os.h" +#include "tthread.h" +#include "tglobal.h" +#include "taosdef.h" +#include "tutil.h" +#include "tulog.h" +#include "taoserror.h" + +// create new thread +pthread_t* taosCreateThread( void *(*__start_routine) (void *), void* param) { + pthread_t* pthread = (pthread_t*)malloc(sizeof(pthread_t)); + pthread_attr_t thattr; + pthread_attr_init(&thattr); + pthread_attr_setdetachstate(&thattr, PTHREAD_CREATE_JOINABLE); + int32_t ret = pthread_create(pthread, &thattr, __start_routine, param); + pthread_attr_destroy(&thattr); + + if (ret != 0) { + free(pthread); + return NULL; + } + return pthread; +} + +// destory thread +bool taosDestoryThread(pthread_t* pthread) { + if(pthread == NULL) return false; + if(taosThreadRunning(pthread)) { + pthread_cancel(*pthread); + pthread_join(*pthread, NULL); + } + + free(pthread); + return true; +} + +// thread running return true +bool taosThreadRunning(pthread_t* pthread) { + if(pthread == NULL) return false; + int ret = pthread_kill(*pthread, 0); + if(ret == ESRCH) + return false; + if(ret == EINVAL) + return false; + // alive + return true; +} diff --git a/src/util/src/ttokenizer.c b/src/util/src/ttokenizer.c index bee5a3a4d7b4a655fc2a468d5169105fc2c1e8cf..b84c3fed6f5055b21e952a79d7ba4fedc0ad306b 100644 --- a/src/util/src/ttokenizer.c +++ b/src/util/src/ttokenizer.c @@ -196,6 +196,7 @@ static SKeyword keywordTable[] = { {"INITIALLY", TK_INITIALLY}, {"INSTEAD", TK_INSTEAD}, {"MATCH", TK_MATCH}, + {"NMATCH", TK_NMATCH}, {"KEY", TK_KEY}, {"OF", TK_OF}, {"RAISE", TK_RAISE}, diff --git a/src/vnode/src/vnodeMain.c b/src/vnode/src/vnodeMain.c index f826c1aecd336a0eedeb3f02df0a7acc61895bb2..c823880ae2028c4bcfe26dbfc5cd60af62443722 100644 --- a/src/vnode/src/vnodeMain.c +++ b/src/vnode/src/vnodeMain.c @@ -560,5 +560,10 @@ static int32_t vnodeProcessTsdbStatus(void *arg, int32_t status, int32_t eno) { return vnodeSaveVersion(pVnode); } + // timer thread callback + if(status == TSDB_STATUS_COMMIT_NOBLOCK) { + qSolveCommitNoBlock(pVnode->tsdb, pVnode->qMgmt); + } + return 0; } diff --git a/tests/connectorTest/C#Test/nanosupport/TDengineDriver.cs b/tests/connectorTest/C#Test/nanosupport/TDengineDriver.cs new file mode 100644 index 0000000000000000000000000000000000000000..e6c3a598adc0bc4bcf5ea84953f649b418199555 --- /dev/null +++ b/tests/connectorTest/C#Test/nanosupport/TDengineDriver.cs @@ -0,0 +1,170 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +using System; +using System.Collections.Generic; +using System.Runtime.InteropServices; + +namespace TDengineDriver +{ + enum TDengineDataType + { + TSDB_DATA_TYPE_NULL = 0, // 1 bytes + TSDB_DATA_TYPE_BOOL = 1, // 1 bytes + TSDB_DATA_TYPE_TINYINT = 2, // 1 bytes + TSDB_DATA_TYPE_SMALLINT = 3, // 2 bytes + TSDB_DATA_TYPE_INT = 4, // 4 bytes + TSDB_DATA_TYPE_BIGINT = 5, // 8 bytes + TSDB_DATA_TYPE_FLOAT = 6, // 4 bytes + TSDB_DATA_TYPE_DOUBLE = 7, // 8 bytes + TSDB_DATA_TYPE_BINARY = 8, // string + TSDB_DATA_TYPE_TIMESTAMP = 9,// 8 bytes + TSDB_DATA_TYPE_NCHAR = 10, // unicode string + TSDB_DATA_TYPE_UTINYINT = 11,// 1 byte + TSDB_DATA_TYPE_USMALLINT= 12,// 2 bytes + TSDB_DATA_TYPE_UINT = 13, // 4 bytes + TSDB_DATA_TYPE_UBIGINT= 14 // 8 bytes + } + + enum TDengineInitOption + { + TSDB_OPTION_LOCALE = 0, + TSDB_OPTION_CHARSET = 1, + TSDB_OPTION_TIMEZONE = 2, + TDDB_OPTION_CONFIGDIR = 3, + TDDB_OPTION_SHELL_ACTIVITY_TIMER = 4 + } + + class TDengineMeta + { + public string name; + public short size; + public byte type; + public string TypeName() + { + switch ((TDengineDataType)type) + { + case TDengineDataType.TSDB_DATA_TYPE_BOOL: + return "BOOL"; + case TDengineDataType.TSDB_DATA_TYPE_TINYINT: + return "TINYINT"; + case TDengineDataType.TSDB_DATA_TYPE_SMALLINT: + return "SMALLINT"; + case TDengineDataType.TSDB_DATA_TYPE_INT: + return "INT"; + case TDengineDataType.TSDB_DATA_TYPE_BIGINT: + return "BIGINT"; + case TDengineDataType.TSDB_DATA_TYPE_UTINYINT: + return "TINYINT UNSIGNED"; + case TDengineDataType.TSDB_DATA_TYPE_USMALLINT: + return "SMALLINT UNSIGNED"; + case TDengineDataType.TSDB_DATA_TYPE_UINT: + return "INT UNSIGNED"; + case TDengineDataType.TSDB_DATA_TYPE_UBIGINT: + return "BIGINT UNSIGNED"; + case TDengineDataType.TSDB_DATA_TYPE_FLOAT: + return "FLOAT"; + case TDengineDataType.TSDB_DATA_TYPE_DOUBLE: + return "DOUBLE"; + case TDengineDataType.TSDB_DATA_TYPE_BINARY: + return "STRING"; + case TDengineDataType.TSDB_DATA_TYPE_TIMESTAMP: + return "TIMESTAMP"; + case TDengineDataType.TSDB_DATA_TYPE_NCHAR: + return "NCHAR"; + default: + return "undefine"; + } + } + } + + class TDengine + { + public const int TSDB_CODE_SUCCESS = 0; + + [DllImport("taos", EntryPoint = "taos_init", CallingConvention = CallingConvention.Cdecl)] + static extern public void Init(); + + [DllImport("taos", EntryPoint = "taos_cleanup", CallingConvention = CallingConvention.Cdecl)] + static extern public void Cleanup(); + + [DllImport("taos", EntryPoint = "taos_options", CallingConvention = CallingConvention.Cdecl)] + static extern public void Options(int option, string value); + + [DllImport("taos", EntryPoint = "taos_connect", CallingConvention = CallingConvention.Cdecl)] + static extern public IntPtr Connect(string ip, string user, string password, string db, short port); + + [DllImport("taos", EntryPoint = "taos_errstr", CallingConvention = CallingConvention.Cdecl)] + static extern private IntPtr taos_errstr(IntPtr res); + static public string Error(IntPtr res) + { + IntPtr errPtr = taos_errstr(res); + return Marshal.PtrToStringAnsi(errPtr); + } + + [DllImport("taos", EntryPoint = "taos_errno", CallingConvention = CallingConvention.Cdecl)] + static extern public int ErrorNo(IntPtr res); + + [DllImport("taos", EntryPoint = "taos_query", CallingConvention = CallingConvention.Cdecl)] + static extern public IntPtr Query(IntPtr conn, string sqlstr); + + [DllImport("taos", EntryPoint = "taos_affected_rows", CallingConvention = CallingConvention.Cdecl)] + static extern public int AffectRows(IntPtr res); + + [DllImport("taos", EntryPoint = "taos_field_count", CallingConvention = CallingConvention.Cdecl)] + static extern public int FieldCount(IntPtr res); + + [DllImport("taos", EntryPoint = "taos_fetch_fields", CallingConvention = CallingConvention.Cdecl)] + static extern private IntPtr taos_fetch_fields(IntPtr res); + static public List FetchFields(IntPtr res) + { + const int fieldSize = 68; + + List metas = new List(); + if (res == IntPtr.Zero) + { + return metas; + } + + int fieldCount = FieldCount(res); + IntPtr fieldsPtr = taos_fetch_fields(res); + + for (int i = 0; i < fieldCount; ++i) + { + int offset = i * fieldSize; + + TDengineMeta meta = new TDengineMeta(); + meta.name = Marshal.PtrToStringAnsi(fieldsPtr + offset); + meta.type = Marshal.ReadByte(fieldsPtr + offset + 65); + meta.size = Marshal.ReadInt16(fieldsPtr + offset + 66); + metas.Add(meta); + } + + return metas; + } + + [DllImport("taos", EntryPoint = "taos_fetch_row", CallingConvention = CallingConvention.Cdecl)] + static extern public IntPtr FetchRows(IntPtr res); + + [DllImport("taos", EntryPoint = "taos_free_result", CallingConvention = CallingConvention.Cdecl)] + static extern public IntPtr FreeResult(IntPtr res); + + [DllImport("taos", EntryPoint = "taos_close", CallingConvention = CallingConvention.Cdecl)] + static extern public int Close(IntPtr taos); + //get precisionin parameter restultset + [DllImport("taos", EntryPoint = "taos_result_precision", CallingConvention = CallingConvention.Cdecl)] + static extern public int ResultPrecision(IntPtr taos); + } +} diff --git a/tests/connectorTest/C#Test/nanosupport/nanotest.cs b/tests/connectorTest/C#Test/nanosupport/nanotest.cs new file mode 100644 index 0000000000000000000000000000000000000000..b9eaefef8c740f8196a715282c8c28ffd79bbdac --- /dev/null +++ b/tests/connectorTest/C#Test/nanosupport/nanotest.cs @@ -0,0 +1,502 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +using System; +using System.Text; +using System.Collections.Generic; +using System.Runtime.InteropServices; +using System.Collections; +namespace TDengineDriver +{ + class TDengineNanoTest + { + //connect parameters + private string host="localhost"; + private string configDir="/etc/taos"; + private string user="root"; + private string password="taosdata"; + private short port = 0; + + //sql parameters + private string dbName; + private string tbName; + private string precision; + + private bool isInsertData; + private bool isQueryData; + + private long tableCount; + private long totalRows; + private long batchRows; + private long beginTimestamp = 1551369600000L; + + private IntPtr conn = IntPtr.Zero; + private long rowsInserted = 0; + + static void Main(string[] args) + { + TDengineNanoTest tester = new TDengineNanoTest(); + //tester.ReadArgument(args); + + tester.InitTDengine(); + tester.ConnectTDengine(); + tester.execute("reset query cache"); + tester.execute("drop database if exists db"); + tester.execute("create database db precision 'ns'"); + tester.executeQuery("show databases;"); + //tester.checkData(0,16,"ns"); + tester.execute("use db"); + + Console.WriteLine("testing nanosecond support in 1st timestamp"); + tester.execute("create table tb (ts timestamp, speed int)"); + tester.execute("insert into tb values('2021-06-10 0:00:00.100000001', 1);"); + tester.execute("insert into tb values(1623254400150000000, 2);"); + tester.execute("import into tb values(1623254400300000000, 3);"); + tester.execute("import into tb values(1623254400299999999, 4);"); + tester.execute("insert into tb values(1623254400300000001, 5);"); + tester.execute("insert into tb values(1623254400999999999, 7);"); + tester.executeQuery("select * from tb;"); + + Console.WriteLine("expect data is "); + + tester.executeQuery("select * from tb;"); + + // Console.WriteLine("expected is : {0}", width); + // tdSql.checkData(0,0,"2021-06-10 0:00:00.100000001"); + // tdSql.checkData(1,0,"2021-06-10 0:00:00.150000000"); + // tdSql.checkData(2,0,"2021-06-10 0:00:00.299999999"); + // tdSql.checkData(3,1,3); + // tdSql.checkData(4,1,5); + // tdSql.checkData(5,1,7); + // tdSql.checkRows(6); + + tester.executeQuery("select count(*) from tb where ts > 1623254400100000000 and ts < 1623254400100000002;"); + Console.WriteLine("expected is : 1 " ); + tester.executeQuery("select count(*) from tb where ts > '2021-06-10 0:00:00.100000001' and ts < '2021-06-10 0:00:00.160000000';"); + Console.WriteLine("expected is : 1 " ); + + tester.executeQuery("select count(*) from tb where ts > 1623254400100000000 and ts < 1623254400150000000;"); + Console.WriteLine("expected is : 1 " ); + tester.executeQuery("select count(*) from tb where ts > '2021-06-10 0:00:00.100000000' and ts < '2021-06-10 0:00:00.150000000';"); + Console.WriteLine("expected is : 1 " ); + + tester.executeQuery("select count(*) from tb where ts > 1623254400400000000;"); + Console.WriteLine("expected is : 1 " ); + tester.executeQuery("select count(*) from tb where ts < '2021-06-10 00:00:00.400000000';"); + Console.WriteLine("expected is : 5 " ); + + tester.executeQuery("select count(*) from tb where ts > now + 400000000b;"); + Console.WriteLine("expected is : 0 " ); + + tester.executeQuery("select count(*) from tb where ts >= '2021-06-10 0:00:00.100000001';"); + Console.WriteLine("expected is : 6 " ); + + tester.executeQuery("select count(*) from tb where ts <= 1623254400300000000;"); + Console.WriteLine("expected is : 4 " ); + + tester.executeQuery("select count(*) from tb where ts = '2021-06-10 0:00:00.000000000';"); + Console.WriteLine("expected is : 0 " ); + + tester.executeQuery("select count(*) from tb where ts = 1623254400150000000;"); + Console.WriteLine("expected is : 1 " ); + + tester.executeQuery("select count(*) from tb where ts = '2021-06-10 0:00:00.100000001';"); + Console.WriteLine("expected is : 1 " ); + + tester.executeQuery("select count(*) from tb where ts between 1623254400000000000 and 1623254400400000000;"); + Console.WriteLine("expected is : 5 " ); + + tester.executeQuery("select count(*) from tb where ts between '2021-06-10 0:00:00.299999999' and '2021-06-10 0:00:00.300000001';"); + Console.WriteLine("expected is : 3 " ); + + tester.executeQuery("select avg(speed) from tb interval(5000000000b);"); + Console.WriteLine("expected is : 1 " ); + + tester.executeQuery("select avg(speed) from tb interval(100000000b)"); + Console.WriteLine("expected is : 4 " ); + + // tdSql.error("select avg(speed) from tb interval(1b);") + // tdSql.error("select avg(speed) from tb interval(999b);") + + tester.executeQuery("select avg(speed) from tb interval(1000b);"); + Console.WriteLine("expected is : 5 rows " ); + + tester.executeQuery("select avg(speed) from tb interval(1u);"); + Console.WriteLine("expected is : 5 rows " ); + + tester.executeQuery("select avg(speed) from tb interval(100000000b) sliding (100000000b);"); + Console.WriteLine("expected is : 4 rows " ); + + tester.executeQuery("select last(*) from tb"); + Console.WriteLine("expected is :1623254400999999999 " ); + + // tdSql.checkData(0,0, "2021-06-10 0:00:00.999999999") + // tdSql.checkData(0,0, 1623254400999999999) + + tester.executeQuery("select first(*) from tb"); + Console.WriteLine("expected is : 1623254400100000001" ); + // tdSql.checkData(0,0, 1623254400100000001); + // tdSql.checkData(0,0, "2021-06-10 0:00:00.100000001"); + + tester.execute("insert into tb values(now + 500000000b, 6);"); + tester.executeQuery("select * from tb;"); + // tdSql.checkRows(7); + + tester.execute("create table tb2 (ts timestamp, speed int, ts2 timestamp);"); + tester.execute("insert into tb2 values('2021-06-10 0:00:00.100000001', 1, '2021-06-11 0:00:00.100000001');"); + tester.execute("insert into tb2 values(1623254400150000000, 2, 1623340800150000000);"); + tester.execute("import into tb2 values(1623254400300000000, 3, 1623340800300000000);"); + tester.execute("import into tb2 values(1623254400299999999, 4, 1623340800299999999);"); + tester.execute("insert into tb2 values(1623254400300000001, 5, 1623340800300000001);"); + tester.execute("insert into tb2 values(1623254400999999999, 7, 1623513600999999999);"); + + tester.executeQuery("select * from tb2;"); + // tdSql.checkData(0,0,"2021-06-10 0:00:00.100000001"); + // tdSql.checkData(1,0,"2021-06-10 0:00:00.150000000"); + // tdSql.checkData(2,1,4); + // tdSql.checkData(3,1,3); + // tdSql.checkData(4,2,"2021-06-11 00:00:00.300000001"); + // tdSql.checkData(5,2,"2021-06-13 00:00:00.999999999"); + // tdSql.checkRows(6); + tester.executeQuery("select count(*) from tb2 where ts2 > 1623340800000000000 and ts2 < 1623340800150000000;"); + Console.WriteLine("expected is : 1 " ); + // tdSql.checkData(0,0,1); + + tester.executeQuery("select count(*) from tb2 where ts2 > '2021-06-11 0:00:00.100000000' and ts2 < '2021-06-11 0:00:00.100000002';"); + Console.WriteLine("expected is : 1 " ); + // tdSql.checkData(0,0,1); + + tester.executeQuery("select count(*) from tb2 where ts2 > 1623340800500000000;"); + Console.WriteLine("expected is : 1 " ); + // tdSql.checkData(0,0,1); + tester.executeQuery("select count(*) from tb2 where ts2 < '2021-06-11 0:00:00.400000000';"); + Console.WriteLine("expected is : 5 " ); + // tdSql.checkData(0,0,5); + + tester.executeQuery("select count(*) from tb2 where ts2 > now + 400000000b;"); + Console.WriteLine("expected is : 0 " ); + // tdSql.checkRows(0); + + tester.executeQuery("select count(*) from tb2 where ts2 >= '2021-06-11 0:00:00.100000001';"); + Console.WriteLine("expected is : 6 " ); + // tdSql.checkData(0,0,6); + + tester.executeQuery("select count(*) from tb2 where ts2 <= 1623340800400000000;"); + Console.WriteLine("expected is : 5 " ); + // tdSql.checkData(0,0,5); + + tester.executeQuery("select count(*) from tb2 where ts2 = '2021-06-11 0:00:00.000000000';"); + Console.WriteLine("expected is : 0 " ); + // tdSql.checkRows(0); + + tester.executeQuery("select count(*) from tb2 where ts2 = '2021-06-11 0:00:00.300000001';"); + Console.WriteLine("expected is : 1 " ); + // tdSql.checkData(0,0,1); + + tester.executeQuery("select count(*) from tb2 where ts2 = 1623340800300000001;"); + Console.WriteLine("expected is : 1 " ); + // tdSql.checkData(0,0,1); + + tester.executeQuery("select count(*) from tb2 where ts2 between 1623340800000000000 and 1623340800450000000;"); + Console.WriteLine("expected is : 5 " ); + // tdSql.checkData(0,0,5); + + tester.executeQuery("select count(*) from tb2 where ts2 between '2021-06-11 0:00:00.299999999' and '2021-06-11 0:00:00.300000001';"); + Console.WriteLine("expected is : 3 " ); + // tdSql.checkData(0,0,3); + + tester.executeQuery("select count(*) from tb2 where ts2 <> 1623513600999999999;"); + Console.WriteLine("expected is : 5 " ); + // tdSql.checkData(0,0,5); + + tester.executeQuery("select count(*) from tb2 where ts2 <> '2021-06-11 0:00:00.100000001';"); + Console.WriteLine("expected is : 5 " ); + // tdSql.checkData(0,0,5); + + tester.executeQuery("select count(*) from tb2 where ts2 <> '2021-06-11 0:00:00.100000000';"); + Console.WriteLine("expected is : 6 " ); + // tdSql.checkData(0,0,6); + + tester.executeQuery("select count(*) from tb2 where ts2 != 1623513600999999999;"); + Console.WriteLine("expected is : 5 " ); + // tdSql.checkData(0,0,5); + + tester.executeQuery("select count(*) from tb2 where ts2 != '2021-06-11 0:00:00.100000001';"); + Console.WriteLine("expected is : 5 " ); + // tdSql.checkData(0,0,5); + + tester.executeQuery("select count(*) from tb2 where ts2 != '2021-06-11 0:00:00.100000000';"); + Console.WriteLine("expected is : 6 " ); + // tdSql.checkData(0,0,6); + + tester.execute("insert into tb2 values(now + 500000000b, 6, now +2d);"); + tester.executeQuery("select * from tb2;"); + Console.WriteLine("expected is : 7 rows" ); + // tdSql.checkRows(7); + + // tdLog.debug("testing ill nanosecond format handling"); + tester.execute("create table tb3 (ts timestamp, speed int);"); + // tdSql.error("insert into tb3 values(16232544001500000, 2);"); + tester.execute("insert into tb3 values('2021-06-10 0:00:00.123456', 2);"); + tester.executeQuery("select * from tb3 where ts = '2021-06-10 0:00:00.123456000';"); + // tdSql.checkRows(1); + Console.WriteLine("expected is : 1 rows " ); + + tester.execute("insert into tb3 values('2021-06-10 0:00:00.123456789000', 2);"); + tester.executeQuery("select * from tb3 where ts = '2021-06-10 0:00:00.123456789';"); + // tdSql.checkRows(1); + Console.WriteLine("expected is : 1 rows " ); + + // check timezone support + Console.WriteLine("nsdb" ); + tester.execute("drop database if exists nsdb;"); + tester.execute("create database nsdb precision 'ns';"); + tester.execute("use nsdb;"); + tester.execute("create stable st (ts timestamp ,speed float ) tags(time timestamp ,id int);"); + tester.execute("insert into tb1 using st tags('2021-06-10 0:00:00.123456789' , 1 ) values('2021-06-10T0:00:00.123456789+07:00' , 1.0);" ); + tester.executeQuery("select first(*) from tb1;"); + Console.WriteLine("expected is : 1623258000123456789 " ); + // tdSql.checkData(0,0,1623258000123456789); + + + + Console.WriteLine("usdb" ); + tester.execute("create database usdb precision 'us';"); + tester.execute("use usdb;"); + tester.execute("create stable st (ts timestamp ,speed float ) tags(time timestamp ,id int);"); + tester.execute("insert into tb1 using st tags('2021-06-10 0:00:00.123456' , 1 ) values('2021-06-10T0:00:00.123456+07:00' , 1.0);" ); + tester.executeQuery("select first(*) from tb1;"); + + Console.WriteLine("expected is : 1623258000123456 " ); + + Console.WriteLine("msdb" ); + tester.execute("drop database if exists msdb;"); + tester.execute("create database msdb precision 'ms';"); + tester.execute("use msdb;"); + tester.execute("create stable st (ts timestamp ,speed float ) tags(time timestamp ,id int);"); + tester.execute("insert into tb1 using st tags('2021-06-10 0:00:00.123' , 1 ) values('2021-06-10T0:00:00.123+07:00' , 1.0);" ); + tester.executeQuery("select first(*) from tb1;"); + Console.WriteLine("expected is : 1623258000123 " ); + + + + tester.CloseConnection(); + tester.cleanup(); + + + } + + public void InitTDengine() + { + TDengine.Options((int)TDengineInitOption.TDDB_OPTION_CONFIGDIR, this.configDir); + TDengine.Options((int)TDengineInitOption.TDDB_OPTION_SHELL_ACTIVITY_TIMER, "60"); + Console.WriteLine("init..."); + TDengine.Init(); + Console.WriteLine("get connection starting..."); + } + + public void ConnectTDengine() + { + string db = ""; + this.conn = TDengine.Connect(this.host, this.user, this.password, db, this.port); + if (this.conn == IntPtr.Zero) + { + Console.WriteLine("connection failed: " + this.host); + ExitProgram(); + } + else + { + Console.WriteLine("[ OK ] Connection established."); + } + } + //EXECUTE SQL + public void execute(string sql) + { + DateTime dt1 = DateTime.Now; + IntPtr res = TDengine.Query(this.conn, sql.ToString()); + DateTime dt2 = DateTime.Now; + TimeSpan span = dt2 - dt1; + + if ((res == IntPtr.Zero) || (TDengine.ErrorNo(res) != 0)) + { + Console.Write(sql.ToString() + " failure, "); + if (res != IntPtr.Zero) { + Console.Write("reason: " + TDengine.Error(res)); + } + Console.WriteLine(""); + ExitProgram(); + } + else + { + Console.WriteLine(sql.ToString() + " success"); + } + TDengine.FreeResult(res); + } + //EXECUTE QUERY + public void executeQuery(string sql) + { + + DateTime dt1 = DateTime.Now; + long queryRows = 0; + IntPtr res = TDengine.Query(conn, sql); + getPrecision(res); + if ((res == IntPtr.Zero) || (TDengine.ErrorNo(res) != 0)) + { + Console.Write(sql.ToString() + " failure, "); + if (res != IntPtr.Zero) { + Console.Write("reason: " + TDengine.Error(res)); + } + Console.WriteLine(""); + ExitProgram(); + } + DateTime dt2 = DateTime.Now; + TimeSpan span = dt2 - dt1; + Console.WriteLine("[OK] time cost: " + span.ToString() + "ms, execute statement ====> " + sql.ToString()); + int fieldCount = TDengine.FieldCount(res); + + List metas = TDengine.FetchFields(res); + for (int j = 0; j < metas.Count; j++) + { + TDengineMeta meta = (TDengineMeta)metas[j]; + } + + IntPtr rowdata; + StringBuilder builder = new StringBuilder(); + while ((rowdata = TDengine.FetchRows(res)) != IntPtr.Zero) + { + queryRows++; + for (int fields = 0; fields < fieldCount; ++fields) + { + TDengineMeta meta = metas[fields]; + int offset = IntPtr.Size * fields; + IntPtr data = Marshal.ReadIntPtr(rowdata, offset); + + builder.Append("---"); + + if (data == IntPtr.Zero) + { + builder.Append("NULL"); + continue; + } + + switch ((TDengineDataType)meta.type) + { + case TDengineDataType.TSDB_DATA_TYPE_BOOL: + bool v1 = Marshal.ReadByte(data) == 0 ? false : true; + builder.Append(v1); + break; + case TDengineDataType.TSDB_DATA_TYPE_TINYINT: + byte v2 = Marshal.ReadByte(data); + builder.Append(v2); + break; + case TDengineDataType.TSDB_DATA_TYPE_SMALLINT: + short v3 = Marshal.ReadInt16(data); + builder.Append(v3); + break; + case TDengineDataType.TSDB_DATA_TYPE_INT: + int v4 = Marshal.ReadInt32(data); + builder.Append(v4); + break; + case TDengineDataType.TSDB_DATA_TYPE_BIGINT: + long v5 = Marshal.ReadInt64(data); + builder.Append(v5); + break; + case TDengineDataType.TSDB_DATA_TYPE_FLOAT: + float v6 = (float)Marshal.PtrToStructure(data, typeof(float)); + builder.Append(v6); + break; + case TDengineDataType.TSDB_DATA_TYPE_DOUBLE: + double v7 = (double)Marshal.PtrToStructure(data, typeof(double)); + builder.Append(v7); + break; + case TDengineDataType.TSDB_DATA_TYPE_BINARY: + string v8 = Marshal.PtrToStringAnsi(data); + builder.Append(v8); + break; + case TDengineDataType.TSDB_DATA_TYPE_TIMESTAMP: + long v9 = Marshal.ReadInt64(data); + builder.Append(v9); + break; + case TDengineDataType.TSDB_DATA_TYPE_NCHAR: + string v10 = Marshal.PtrToStringAnsi(data); + builder.Append(v10); + break; + } + } + builder.Append("---"); + + if (queryRows <= 10) + { + Console.WriteLine(builder.ToString()); + } + builder.Clear(); + } + + if (TDengine.ErrorNo(res) != 0) + { + Console.Write("Query is not complete, Error {0:G}", TDengine.ErrorNo(res), TDengine.Error(res)); + } + Console.WriteLine(""); + + TDengine.FreeResult(res); + + } + + public void CloseConnection() + { + if (this.conn != IntPtr.Zero) + { + TDengine.Close(this.conn); + Console.WriteLine("connection closed."); + } + } + + static void ExitProgram() + { + System.Environment.Exit(0); + } + + public void cleanup() + { + Console.WriteLine("clean up..."); + System.Environment.Exit(0); + } + + // method to get db precision + public void getPrecision(IntPtr res) + { + int psc=TDengine.ResultPrecision(res); + switch(psc) + { + case 0: + Console.WriteLine("db:[{0:G}]'s precision is {1:G}",this.dbName,"millisecond"); + break; + case 1: + Console.WriteLine("db:[{0:G}]'s precision is {1:G}",this.dbName,"microsecond"); + break; + case 2: + Console.WriteLine("db:[{0:G}]'s precision is {1:G}",this.dbName,"nanosecond"); + break; + } + + } + + // public void checkData(int x ,int y , long ts ){ + + // } + + } +} + diff --git a/tests/connectorTest/nodejsTest/nanosupport/nanosecondTest.js b/tests/connectorTest/nodejsTest/nanosupport/nanosecondTest.js new file mode 100644 index 0000000000000000000000000000000000000000..11812ac84b91d5c639a3b3bd73c8b81838c5cc23 --- /dev/null +++ b/tests/connectorTest/nodejsTest/nanosupport/nanosecondTest.js @@ -0,0 +1,290 @@ +const taos = require('td2.0-connector'); +var conn = taos.connect({host:"localhost", user:"root", password:"taosdata", config:"/etc/taos",port:6030}) +var c1 = conn.cursor(); + + +function checkData(sql,row,col,data){ + + + console.log(sql) + c1.execute(sql) + var d = c1.fetchall(); + let checkdata = d[row][col]; + if (checkdata == data) { + + console.log('check pass') + } + else{ + console.log('check failed') + console.log('checked is :',checkdata) + console.log("expected is :",data) + + + } +} + + +// nano basic case + +c1.execute('reset query cache') +c1.execute('drop database if exists db') +c1.execute('create database db precision "ns";') +c1.execute('use db'); +c1.execute('create table tb (ts timestamp, speed int)') +c1.execute('insert into tb values(\'2021-06-10 00:00:00.100000001\', 1);') +c1.execute('insert into tb values(1623254400150000000, 2);') +c1.execute('import into tb values(1623254400300000000, 3);') +c1.execute('import into tb values(1623254400299999999, 4);') +c1.execute('insert into tb values(1623254400300000001, 5);') +c1.execute('insert into tb values(1623254400999999999, 7);') +c1.execute('insert into tb values(1623254400123456789, 8);') +sql = 'select * from tb;' + +console.log('*******************************************') +console.log('this is area about checkdata result') +//check data about insert data +checkData(sql,0,0,'2021-06-10 00:00:00.100000001') +checkData(sql,1,0,'2021-06-10 00:00:00.123456789') +checkData(sql,2,0,'2021-06-10 00:00:00.150000000') +checkData(sql,3,0,'2021-06-10 00:00:00.299999999') +checkData(sql,4,0,'2021-06-10 00:00:00.300000000') +checkData(sql,5,0,'2021-06-10 00:00:00.300000001') +checkData(sql,6,0,'2021-06-10 00:00:00.999999999') +checkData(sql,0,1,1) +checkData(sql,1,1,8) +checkData(sql,2,1,2) +checkData(sql,5,1,5) + + + +// us basic case + +c1.execute('reset query cache') +c1.execute('drop database if exists usdb') +c1.execute('create database usdb precision "us";') +c1.execute('use usdb'); +c1.execute('create table tb (ts timestamp, speed int)') +c1.execute('insert into tb values(\'2021-06-10 00:00:00.100001\', 1);') +c1.execute('insert into tb values(1623254400150000, 2);') +c1.execute('import into tb values(1623254400300000, 3);') +c1.execute('import into tb values(1623254400299999, 4);') +c1.execute('insert into tb values(1623254400300001, 5);') +c1.execute('insert into tb values(1623254400999999, 7);') +c1.execute('insert into tb values(1623254400123789, 8);') +sql = 'select * from tb;' + +console.log('*******************************************') + +//check data about insert data +checkData(sql,0,0,'2021-06-10 00:00:00.100001') +checkData(sql,1,0,'2021-06-10 00:00:00.123789') +checkData(sql,2,0,'2021-06-10 00:00:00.150000') +checkData(sql,3,0,'2021-06-10 00:00:00.299999') +checkData(sql,4,0,'2021-06-10 00:00:00.300000') +checkData(sql,5,0,'2021-06-10 00:00:00.300001') +checkData(sql,6,0,'2021-06-10 00:00:00.999999') +checkData(sql,0,1,1) +checkData(sql,1,1,8) +checkData(sql,2,1,2) +checkData(sql,5,1,5) + +console.log('*******************************************') + +// ms basic case + +c1.execute('reset query cache') +c1.execute('drop database if exists msdb') +c1.execute('create database msdb precision "ms";') +c1.execute('use msdb'); +c1.execute('create table tb (ts timestamp, speed int)') +c1.execute('insert into tb values(\'2021-06-10 00:00:00.101\', 1);') +c1.execute('insert into tb values(1623254400150, 2);') +c1.execute('import into tb values(1623254400300, 3);') +c1.execute('import into tb values(1623254400299, 4);') +c1.execute('insert into tb values(1623254400301, 5);') +c1.execute('insert into tb values(1623254400789, 7);') +c1.execute('insert into tb values(1623254400999, 8);') +sql = 'select * from tb;' + +console.log('*******************************************') +console.log('this is area about checkdata result') +//check data about insert data +checkData(sql,0,0,'2021-06-10 00:00:00.101') +checkData(sql,1,0,'2021-06-10 00:00:00.150') +checkData(sql,2,0,'2021-06-10 00:00:00.299') +checkData(sql,3,0,'2021-06-10 00:00:00.300') +checkData(sql,4,0,'2021-06-10 00:00:00.301') +checkData(sql,5,0,'2021-06-10 00:00:00.789') +checkData(sql,6,0,'2021-06-10 00:00:00.999') +checkData(sql,0,1,1) +checkData(sql,1,1,2) +checkData(sql,2,1,4) +checkData(sql,5,1,7) + +console.log('*******************************************') + +// offfical query result to show +// console.log('this is area about fetch all data') +// var query = c1.query(sql) +// var promise = query.execute(); +// promise.then(function(result) { +// result.pretty(); +// }); + +console.log('*******************************************') +c1.execute('use db') + +sql2 = 'select count(*) from tb where ts > 1623254400100000000 and ts < 1623254400100000002;' +checkData(sql2,0,0,1) + +sql3 = 'select count(*) from tb where ts > \'2021-06-10 0:00:00.100000001\' and ts < \'2021-06-10 0:00:00.160000000\';' +checkData(sql3,0,0,2) + +sql4 = 'select count(*) from tb where ts > 1623254400100000000 and ts < 1623254400150000000;' +checkData(sql4,0,0,2) + +sql5 = 'select count(*) from tb where ts > \'2021-06-10 0:00:00.100000000\' and ts < \'2021-06-10 0:00:00.150000000\';' +checkData(sql5,0,0,2) + +sql6 = 'select count(*) from tb where ts > 1623254400400000000;' +checkData(sql6,0,0,1) + +sql7 = 'select count(*) from tb where ts < \'2021-06-10 00:00:00.400000000\';' +checkData(sql7,0,0,6) + +sql8 = 'select count(*) from tb where ts > now + 400000000b;' +c1.execute(sql8) + +sql9 = 'select count(*) from tb where ts >= \'2021-06-10 0:00:00.100000001\';' +checkData(sql9,0,0,7) + +sql10 = 'select count(*) from tb where ts <= 1623254400300000000;' +checkData(sql10,0,0,5) + +sql11 = 'select count(*) from tb where ts = \'2021-06-10 0:00:00.000000000\';' +c1.execute(sql11) + +sql12 = 'select count(*) from tb where ts = 1623254400150000000;' +checkData(sql12,0,0,1) + +sql13 = 'select count(*) from tb where ts = \'2021-06-10 0:00:00.100000001\';' +checkData(sql13,0,0,1) + +sql14 = 'select count(*) from tb where ts between 1623254400000000000 and 1623254400400000000;' +checkData(sql14,0,0,6) + +sql15 = 'select count(*) from tb where ts between \'2021-06-10 0:00:00.299999999\' and \'2021-06-10 0:00:00.300000001\';' +checkData(sql15,0,0,3) + +sql16 = 'select avg(speed) from tb interval(5000000000b);' +checkData(sql16,0,0,'2021-06-10 00:00:00.000000000') + +sql17 = 'select avg(speed) from tb interval(100000000b)' +checkData(sql17,0,1,3.6666666666666665) +checkData(sql17,1,1,4.000000000) + +checkData(sql17,2,0,'2021-06-10 00:00:00.300000000') +checkData(sql17,3,0,'2021-06-10 00:00:00.900000000') + +console.log("print break ") + +// sql18 = 'select avg(speed) from tb interval(999b)' +// c1.execute(sql18) + +console.log("print break2 ") +sql19 = 'select avg(speed) from tb interval(1u);' +checkData(sql19,2,1,2.000000000) +checkData(sql19,3,0,'2021-06-10 00:00:00.299999000') + +sql20 = 'select avg(speed) from tb interval(100000000b) sliding (100000000b);' +checkData(sql20,2,1,4.000000000) +checkData(sql20,3,0,'2021-06-10 00:00:00.900000000') + +sql21 = 'select last(*) from tb;' +checkData(sql21,0,0,'2021-06-10 00:00:00.999999999') + +sql22 = 'select first(*) from tb;' +checkData(sql22,0,0,'2021-06-10 00:00:00.100000001') + +// timezone support + +console.log('testing nanosecond support in other timestamps') + +c1.execute('create table tb2 (ts timestamp, speed int, ts2 timestamp);') +c1.execute('insert into tb2 values(\'2021-06-10 0:00:00.100000001\', 1, \'2021-06-11 0:00:00.100000001\');') +c1.execute('insert into tb2 values(1623254400150000000, 2, 1623340800150000000);') +c1.execute('import into tb2 values(1623254400300000000, 3, 1623340800300000000);') +c1.execute('import into tb2 values(1623254400299999999, 4, 1623340800299999999);') +c1.execute('insert into tb2 values(1623254400300000001, 5, 1623340800300000001);') +c1.execute('insert into tb2 values(1623254400999999999, 7, 1623513600999999999);') + +sql23 = 'select * from tb2;' +checkData(sql23,0,0,'2021-06-10 00:00:00.100000001') +checkData(sql23,1,0,'2021-06-10 00:00:00.150000000') +checkData(sql23,2,1,4) +checkData(sql23,3,1,3) +checkData(sql23,4,2,'2021-06-11 00:00:00.300000001') +checkData(sql23,5,2,'2021-06-13 00:00:00.999999999') + +sql24 = 'select count(*) from tb2 where ts2 >= \'2021-06-11 0:00:00.100000001\';' +checkData(sql24,0,0,6) + +sql25 = 'select count(*) from tb2 where ts2 <= 1623340800400000000;' +checkData(sql25,0,0,5) + +sql26 = 'select count(*) from tb2 where ts2 = \'2021-06-11 0:00:00.300000001\';' +checkData(sql26,0,0,1) + +sql27 = 'select count(*) from tb2 where ts2 = 1623340800300000001;' +checkData(sql27,0,0,1) + +sql28 = 'select count(*) from tb2 where ts2 between 1623340800000000000 and 1623340800450000000;' +checkData(sql28,0,0,5) + +sql29 = 'select count(*) from tb2 where ts2 between \'2021-06-11 0:00:00.299999999\' and \'2021-06-11 0:00:00.300000001\';' +checkData(sql29,0,0,3) + +sql30 = 'select count(*) from tb2 where ts2 <> 1623513600999999999;' +checkData(sql30,0,0,5) + +sql31 = 'select count(*) from tb2 where ts2 <> \'2021-06-11 0:00:00.100000001\';' +checkData(sql31,0,0,5) + +sql32 = 'select count(*) from tb2 where ts2 != 1623513600999999999;' +checkData(sql32,0,0,5) + +sql33 = 'select count(*) from tb2 where ts2 != \'2021-06-11 0:00:00.100000001\';' +checkData(sql33,0,0,5) + +c1.execute('insert into tb2 values(now + 500000000b, 6, now +2d);') + +sql34 = 'select count(*) from tb2;' +checkData(sql34,0,0,7) + + +// check timezone support + +c1.execute('use db;') +c1.execute('create stable st (ts timestamp ,speed float ) tags(time timestamp ,id int);') +c1.execute('insert into stb1 using st tags("2021-06-10 0:00:00.123456789" , 1 ) values("2021-06-10T0:00:00.123456789+07:00" , 1.0);' ) +sql35 = 'select first(*) from stb1;' +checkData(sql35,0,0,'2021-06-10 01:00:00.123456789') + +c1.execute('use usdb;') +c1.execute('create stable st (ts timestamp ,speed float ) tags(time timestamp ,id int);') +c1.execute('insert into stb1 using st tags("2021-06-10 0:00:00.123456" , 1 ) values("2021-06-10T0:00:00.123456+07:00" , 1.0);' ) +sql36 = 'select first(*) from stb1;' +checkData(sql36,0,0,'2021-06-10 01:00:00.123456') + +c1.execute('use msdb;') +c1.execute('create stable st (ts timestamp ,speed float ) tags(time timestamp ,id int);') +c1.execute('insert into stb1 using st tags("2021-06-10 0:00:00.123456" , 1 ) values("2021-06-10T0:00:00.123456+07:00" , 1.0);' ) +sql36 = 'select first(*) from stb1;' +checkData(sql36,0,0,'2021-06-10 01:00:00.123') + + + + + + + diff --git a/tests/connectorTest/nodejsTest/nodetaos/cinterface.js b/tests/connectorTest/nodejsTest/nodetaos/cinterface.js new file mode 100644 index 0000000000000000000000000000000000000000..03d27e5593ccb15d8ff47cd3c3dedba765d14fc1 --- /dev/null +++ b/tests/connectorTest/nodejsTest/nodetaos/cinterface.js @@ -0,0 +1,587 @@ +/** + * C Interface with TDengine Module + * @module CTaosInterface + */ + +const ref = require('ref-napi'); +const os = require('os'); +const ffi = require('ffi-napi'); +const ArrayType = require('ref-array-napi'); +const Struct = require('ref-struct-napi'); +const FieldTypes = require('./constants'); +const errors = require('./error'); +const TaosObjects = require('./taosobjects'); +const { NULL_POINTER } = require('ref-napi'); + +module.exports = CTaosInterface; + +function convertTimestamp(data, num_of_rows, nbytes = 0, offset = 0, precision = 0) { + data = ref.reinterpret(data.deref(), nbytes * num_of_rows, offset); + let res = []; + let currOffset = 0; + while (currOffset < data.length) { + let time = data.readInt64LE(currOffset); + currOffset += nbytes; + res.push(new TaosObjects.TaosTimestamp(time, precision)); + } + return res; +} +function convertBool(data, num_of_rows, nbytes = 0, offset = 0, precision = 0) { + data = ref.reinterpret(data.deref(), nbytes * num_of_rows, offset); + let res = new Array(data.length); + for (let i = 0; i < data.length; i++) { + if (data[i] == 0) { + res[i] = false; + } + else if (data[i] == 1) { + res[i] = true; + } + else if (data[i] == FieldTypes.C_BOOL_NULL) { + res[i] = null; + } + } + return res; +} +function convertTinyint(data, num_of_rows, nbytes = 0, offset = 0, precision = 0) { + data = ref.reinterpret(data.deref(), nbytes * num_of_rows, offset); + let res = []; + let currOffset = 0; + while (currOffset < data.length) { + let d = data.readIntLE(currOffset, 1); + res.push(d == FieldTypes.C_TINYINT_NULL ? null : d); + currOffset += nbytes; + } + return res; +} +function convertSmallint(data, num_of_rows, nbytes = 0, offset = 0, precision = 0) { + data = ref.reinterpret(data.deref(), nbytes * num_of_rows, offset); + let res = []; + let currOffset = 0; + while (currOffset < data.length) { + let d = data.readIntLE(currOffset, 2); + res.push(d == FieldTypes.C_SMALLINT_NULL ? null : d); + currOffset += nbytes; + } + return res; +} +function convertInt(data, num_of_rows, nbytes = 0, offset = 0, precision = 0) { + data = ref.reinterpret(data.deref(), nbytes * num_of_rows, offset); + let res = []; + let currOffset = 0; + while (currOffset < data.length) { + let d = data.readInt32LE(currOffset); + res.push(d == FieldTypes.C_INT_NULL ? null : d); + currOffset += nbytes; + } + return res; +} +function convertBigint(data, num_of_rows, nbytes = 0, offset = 0, precision = 0) { + data = ref.reinterpret(data.deref(), nbytes * num_of_rows, offset); + let res = []; + let currOffset = 0; + while (currOffset < data.length) { + let d = data.readInt64LE(currOffset); + res.push(d == FieldTypes.C_BIGINT_NULL ? null : BigInt(d)); + currOffset += nbytes; + } + return res; +} +function convertFloat(data, num_of_rows, nbytes = 0, offset = 0, precision = 0) { + data = ref.reinterpret(data.deref(), nbytes * num_of_rows, offset); + let res = []; + let currOffset = 0; + while (currOffset < data.length) { + let d = parseFloat(data.readFloatLE(currOffset).toFixed(5)); + res.push(isNaN(d) ? null : d); + currOffset += nbytes; + } + return res; +} +function convertDouble(data, num_of_rows, nbytes = 0, offset = 0, precision = 0) { + data = ref.reinterpret(data.deref(), nbytes * num_of_rows, offset); + let res = []; + let currOffset = 0; + while (currOffset < data.length) { + let d = parseFloat(data.readDoubleLE(currOffset).toFixed(16)); + res.push(isNaN(d) ? null : d); + currOffset += nbytes; + } + return res; +} + +function convertNchar(data, num_of_rows, nbytes = 0, offset = 0, precision = 0) { + data = ref.reinterpret(data.deref(), nbytes * num_of_rows, offset); + let res = []; + + let currOffset = 0; + while (currOffset < data.length) { + let len = data.readIntLE(currOffset, 2); + let dataEntry = data.slice(currOffset + 2, currOffset + len + 2); //one entry in a row under a column; + res.push(dataEntry.toString("utf-8")); + currOffset += nbytes; + } + return res; +} + +// Object with all the relevant converters from pblock data to javascript readable data +let convertFunctions = { + [FieldTypes.C_BOOL]: convertBool, + [FieldTypes.C_TINYINT]: convertTinyint, + [FieldTypes.C_SMALLINT]: convertSmallint, + [FieldTypes.C_INT]: convertInt, + [FieldTypes.C_BIGINT]: convertBigint, + [FieldTypes.C_FLOAT]: convertFloat, + [FieldTypes.C_DOUBLE]: convertDouble, + [FieldTypes.C_BINARY]: convertNchar, + [FieldTypes.C_TIMESTAMP]: convertTimestamp, + [FieldTypes.C_NCHAR]: convertNchar +} + +// Define TaosField structure +var char_arr = ArrayType(ref.types.char); +var TaosField = Struct({ + 'name': char_arr, +}); +TaosField.fields.name.type.size = 65; +TaosField.defineProperty('type', ref.types.char); +TaosField.defineProperty('bytes', ref.types.short); + + +/** + * + * @param {Object} config - Configuration options for the interface + * @return {CTaosInterface} + * @class CTaosInterface + * @classdesc The CTaosInterface is the interface through which Node.JS communicates data back and forth with TDengine. It is not advised to + * access this class directly and use it unless you understand what these functions do. + */ +function CTaosInterface(config = null, pass = false) { + ref.types.char_ptr = ref.refType(ref.types.char); + ref.types.void_ptr = ref.refType(ref.types.void); + ref.types.void_ptr2 = ref.refType(ref.types.void_ptr); + /*Declare a bunch of functions first*/ + /* Note, pointers to TAOS_RES, TAOS, are ref.types.void_ptr. The connection._conn buffer is supplied for pointers to TAOS * */ + + if ('win32' == os.platform()) { + taoslibname = 'taos'; + } else { + taoslibname = 'libtaos'; + } + this.libtaos = ffi.Library(taoslibname, { + 'taos_options': [ref.types.int, [ref.types.int, ref.types.void_ptr]], + 'taos_init': [ref.types.void, []], + //TAOS *taos_connect(char *ip, char *user, char *pass, char *db, int port) + 'taos_connect': [ref.types.void_ptr, [ref.types.char_ptr, ref.types.char_ptr, ref.types.char_ptr, ref.types.char_ptr, ref.types.int]], + //void taos_close(TAOS *taos) + 'taos_close': [ref.types.void, [ref.types.void_ptr]], + //int *taos_fetch_lengths(TAOS_RES *res); + 'taos_fetch_lengths': [ref.types.void_ptr, [ref.types.void_ptr]], + //int taos_query(TAOS *taos, char *sqlstr) + 'taos_query': [ref.types.void_ptr, [ref.types.void_ptr, ref.types.char_ptr]], + //int taos_affected_rows(TAOS_RES *res) + 'taos_affected_rows': [ref.types.int, [ref.types.void_ptr]], + //int taos_fetch_block(TAOS_RES *res, TAOS_ROW *rows) + 'taos_fetch_block': [ref.types.int, [ref.types.void_ptr, ref.types.void_ptr]], + //int taos_num_fields(TAOS_RES *res); + 'taos_num_fields': [ref.types.int, [ref.types.void_ptr]], + //TAOS_ROW taos_fetch_row(TAOS_RES *res) + //TAOS_ROW is void **, but we set the return type as a reference instead to get the row + 'taos_fetch_row': [ref.refType(ref.types.void_ptr2), [ref.types.void_ptr]], + 'taos_print_row': [ref.types.int, [ref.types.char_ptr, ref.types.void_ptr, ref.types.void_ptr, ref.types.int]], + //int taos_result_precision(TAOS_RES *res) + 'taos_result_precision': [ref.types.int, [ref.types.void_ptr]], + //void taos_free_result(TAOS_RES *res) + 'taos_free_result': [ref.types.void, [ref.types.void_ptr]], + //int taos_field_count(TAOS *taos) + 'taos_field_count': [ref.types.int, [ref.types.void_ptr]], + //TAOS_FIELD *taos_fetch_fields(TAOS_RES *res) + 'taos_fetch_fields': [ref.refType(TaosField), [ref.types.void_ptr]], + //int taos_errno(TAOS *taos) + 'taos_errno': [ref.types.int, [ref.types.void_ptr]], + //char *taos_errstr(TAOS *taos) + 'taos_errstr': [ref.types.char_ptr, [ref.types.void_ptr]], + //void taos_stop_query(TAOS_RES *res); + 'taos_stop_query': [ref.types.void, [ref.types.void_ptr]], + //char *taos_get_server_info(TAOS *taos); + 'taos_get_server_info': [ref.types.char_ptr, [ref.types.void_ptr]], + //char *taos_get_client_info(); + 'taos_get_client_info': [ref.types.char_ptr, []], + + // ASYNC + // void taos_query_a(TAOS *taos, char *sqlstr, void (*fp)(void *, TAOS_RES *, int), void *param) + 'taos_query_a': [ref.types.void, [ref.types.void_ptr, ref.types.char_ptr, ref.types.void_ptr, ref.types.void_ptr]], + // void taos_fetch_rows_a(TAOS_RES *res, void (*fp)(void *param, TAOS_RES *, int numOfRows), void *param); + 'taos_fetch_rows_a': [ref.types.void, [ref.types.void_ptr, ref.types.void_ptr, ref.types.void_ptr]], + + // Subscription + //TAOS_SUB *taos_subscribe(TAOS* taos, int restart, const char* topic, const char *sql, TAOS_SUBSCRIBE_CALLBACK fp, void *param, int interval) + 'taos_subscribe': [ref.types.void_ptr, [ref.types.void_ptr, ref.types.int, ref.types.char_ptr, ref.types.char_ptr, ref.types.void_ptr, ref.types.void_ptr, ref.types.int]], + // TAOS_RES *taos_consume(TAOS_SUB *tsub) + 'taos_consume': [ref.types.void_ptr, [ref.types.void_ptr]], + //void taos_unsubscribe(TAOS_SUB *tsub); + 'taos_unsubscribe': [ref.types.void, [ref.types.void_ptr]], + + // Continuous Query + //TAOS_STREAM *taos_open_stream(TAOS *taos, char *sqlstr, void (*fp)(void *param, TAOS_RES *, TAOS_ROW row), + // int64_t stime, void *param, void (*callback)(void *)); + 'taos_open_stream': [ref.types.void_ptr, [ref.types.void_ptr, ref.types.char_ptr, ref.types.void_ptr, ref.types.int64, ref.types.void_ptr, ref.types.void_ptr]], + //void taos_close_stream(TAOS_STREAM *tstr); + 'taos_close_stream': [ref.types.void, [ref.types.void_ptr]] + + }); + if (pass == false) { + if (config == null) { + this._config = ref.alloc(ref.types.char_ptr, ref.NULL); + } + else { + try { + this._config = ref.allocCString(config); + } + catch (err) { + throw "Attribute Error: config is expected as a str"; + } + } + if (config != null) { + this.libtaos.taos_options(3, this._config); + } + this.libtaos.taos_init(); + } + return this; +} +CTaosInterface.prototype.config = function config() { + return this._config; +} +CTaosInterface.prototype.connect = function connect(host = null, user = "root", password = "taosdata", db = null, port = 0) { + let _host, _user, _password, _db, _port; + try { + _host = host != null ? ref.allocCString(host) : ref.NULL; + } + catch (err) { + throw "Attribute Error: host is expected as a str"; + } + try { + _user = ref.allocCString(user) + } + catch (err) { + throw "Attribute Error: user is expected as a str"; + } + try { + _password = ref.allocCString(password); + } + catch (err) { + throw "Attribute Error: password is expected as a str"; + } + try { + _db = db != null ? ref.allocCString(db) : ref.NULL; + } + catch (err) { + throw "Attribute Error: db is expected as a str"; + } + try { + _port = ref.alloc(ref.types.int, port); + } + catch (err) { + throw TypeError("port is expected as an int") + } + let connection = this.libtaos.taos_connect(_host, _user, _password, _db, _port); + if (ref.isNull(connection)) { + throw new errors.TDError('Failed to connect to TDengine'); + } + else { + console.log('Successfully connected to TDengine'); + } + return connection; +} +CTaosInterface.prototype.close = function close(connection) { + this.libtaos.taos_close(connection); + console.log("Connection is closed"); +} +CTaosInterface.prototype.query = function query(connection, sql) { + return this.libtaos.taos_query(connection, ref.allocCString(sql)); +} +CTaosInterface.prototype.affectedRows = function affectedRows(result) { + return this.libtaos.taos_affected_rows(result); +} +CTaosInterface.prototype.useResult = function useResult(result) { + + let fields = []; + let pfields = this.fetchFields(result); + if (ref.isNull(pfields) == false) { + pfields = ref.reinterpret(pfields, this.fieldsCount(result) * 68, 0); + for (let i = 0; i < pfields.length; i += 68) { + //0 - 63 = name //64 - 65 = bytes, 66 - 67 = type + fields.push({ + name: ref.readCString(ref.reinterpret(pfields, 65, i)), + type: pfields[i + 65], + bytes: pfields[i + 66] + }) + } + } + return fields; +} +CTaosInterface.prototype.fetchBlock = function fetchBlock(result, fields) { + let pblock = ref.NULL_POINTER; + let num_of_rows = this.libtaos.taos_fetch_block(result, pblock); + if (ref.isNull(pblock.deref()) == true) { + return { block: null, num_of_rows: 0 }; + } + + var fieldL = this.libtaos.taos_fetch_lengths(result); + let precision = this.libtaos.taos_result_precision(result); + + var fieldlens = []; + + if (ref.isNull(fieldL) == false) { + for (let i = 0; i < fields.length; i++) { + let plen = ref.reinterpret(fieldL, 4, i * 4); + let len = plen.readInt32LE(0); + fieldlens.push(len); + } + } + + let blocks = new Array(fields.length); + blocks.fill(null); + num_of_rows = Math.abs(num_of_rows); + let offset = 0; + let ptr = pblock.deref(); + + for (let i = 0; i < fields.length; i++) { + pdata = ref.reinterpret(ptr, 8, i * 8); + if (ref.isNull(pdata.readPointer())) { + blocks[i] = new Array(); + } else { + pdata = ref.ref(pdata.readPointer()); + if (!convertFunctions[fields[i]['type']]) { + throw new errors.DatabaseError("Invalid data type returned from database"); + } + blocks[i] = convertFunctions[fields[i]['type']](pdata, num_of_rows, fieldlens[i], offset, precision); + } + } + return { blocks: blocks, num_of_rows } +} +CTaosInterface.prototype.fetchRow = function fetchRow(result, fields) { + let row = this.libtaos.taos_fetch_row(result); + return row; +} +CTaosInterface.prototype.freeResult = function freeResult(result) { + this.libtaos.taos_free_result(result); + result = null; +} +/** Number of fields returned in this result handle, must use with async */ +CTaosInterface.prototype.numFields = function numFields(result) { + return this.libtaos.taos_num_fields(result); +} +// Fetch fields count by connection, the latest query +CTaosInterface.prototype.fieldsCount = function fieldsCount(result) { + return this.libtaos.taos_field_count(result); +} +CTaosInterface.prototype.fetchFields = function fetchFields(result) { + return this.libtaos.taos_fetch_fields(result); +} +CTaosInterface.prototype.errno = function errno(result) { + return this.libtaos.taos_errno(result); +} +CTaosInterface.prototype.errStr = function errStr(result) { + return ref.readCString(this.libtaos.taos_errstr(result)); +} +// Async +CTaosInterface.prototype.query_a = function query_a(connection, sql, callback, param = ref.ref(ref.NULL)) { + // void taos_query_a(TAOS *taos, char *sqlstr, void (*fp)(void *param, TAOS_RES *, int), void *param) + callback = ffi.Callback(ref.types.void, [ref.types.void_ptr, ref.types.void_ptr, ref.types.int], callback); + this.libtaos.taos_query_a(connection, ref.allocCString(sql), callback, param); + return param; +} +/** Asynchrnously fetches the next block of rows. Wraps callback and transfers a 4th argument to the cursor, the row data as blocks in javascript form + * Note: This isn't a recursive function, in order to fetch all data either use the TDengine cursor object, TaosQuery object, or implement a recrusive + * function yourself using the libtaos.taos_fetch_rows_a function + */ +CTaosInterface.prototype.fetch_rows_a = function fetch_rows_a(result, callback, param = ref.ref(ref.NULL)) { + // void taos_fetch_rows_a(TAOS_RES *res, void (*fp)(void *param, TAOS_RES *, int numOfRows), void *param); + var cti = this; + // wrap callback with a function so interface can access the numOfRows value, needed in order to properly process the binary data + let asyncCallbackWrapper = function (param2, result2, numOfRows2) { + // Data preparation to pass to cursor. Could be bottleneck in query execution callback times. + let row = cti.libtaos.taos_fetch_row(result2); + let fields = cti.fetchFields_a(result2); + + let precision = cti.libtaos.taos_result_precision(result2); + let blocks = new Array(fields.length); + blocks.fill(null); + numOfRows2 = Math.abs(numOfRows2); + let offset = 0; + var fieldL = cti.libtaos.taos_fetch_lengths(result); + var fieldlens = []; + if (ref.isNull(fieldL) == false) { + + for (let i = 0; i < fields.length; i++) { + let plen = ref.reinterpret(fieldL, 8, i * 8); + let len = ref.get(plen, 0, ref.types.int32); + fieldlens.push(len); + } + } + if (numOfRows2 > 0) { + for (let i = 0; i < fields.length; i++) { + if (ref.isNull(pdata.readPointer())) { + blocks[i] = new Array(); + } else { + if (!convertFunctions[fields[i]['type']]) { + throw new errors.DatabaseError("Invalid data type returned from database"); + } + let prow = ref.reinterpret(row, 8, i * 8); + prow = prow.readPointer(); + prow = ref.ref(prow); + blocks[i] = convertFunctions[fields[i]['type']](prow, 1, fieldlens[i], offset, precision); + //offset += fields[i]['bytes'] * numOfRows2; + } + } + } + callback(param2, result2, numOfRows2, blocks); + } + asyncCallbackWrapper = ffi.Callback(ref.types.void, [ref.types.void_ptr, ref.types.void_ptr, ref.types.int], asyncCallbackWrapper); + this.libtaos.taos_fetch_rows_a(result, asyncCallbackWrapper, param); + return param; +} +// Fetch field meta data by result handle +CTaosInterface.prototype.fetchFields_a = function fetchFields_a(result) { + let pfields = this.fetchFields(result); + let pfieldscount = this.numFields(result); + let fields = []; + if (ref.isNull(pfields) == false) { + pfields = ref.reinterpret(pfields, 68 * pfieldscount, 0); + for (let i = 0; i < pfields.length; i += 68) { + //0 - 64 = name //65 = type, 66 - 67 = bytes + fields.push({ + name: ref.readCString(ref.reinterpret(pfields, 65, i)), + type: pfields[i + 65], + bytes: pfields[i + 66] + }) + } + } + return fields; +} +// Stop a query by result handle +CTaosInterface.prototype.stopQuery = function stopQuery(result) { + if (result != null) { + this.libtaos.taos_stop_query(result); + } + else { + throw new errors.ProgrammingError("No result handle passed to stop query"); + } +} +CTaosInterface.prototype.getServerInfo = function getServerInfo(connection) { + return ref.readCString(this.libtaos.taos_get_server_info(connection)); +} +CTaosInterface.prototype.getClientInfo = function getClientInfo() { + return ref.readCString(this.libtaos.taos_get_client_info()); +} + +// Subscription +CTaosInterface.prototype.subscribe = function subscribe(connection, restart, topic, sql, interval) { + let topicOrig = topic; + let sqlOrig = sql; + try { + sql = sql != null ? ref.allocCString(sql) : ref.alloc(ref.types.char_ptr, ref.NULL); + } + catch (err) { + throw "Attribute Error: sql is expected as a str"; + } + try { + topic = topic != null ? ref.allocCString(topic) : ref.alloc(ref.types.char_ptr, ref.NULL); + } + catch (err) { + throw TypeError("topic is expected as a str"); + } + + restart = ref.alloc(ref.types.int, restart); + + let subscription = this.libtaos.taos_subscribe(connection, restart, topic, sql, null, null, interval); + if (ref.isNull(subscription)) { + throw new errors.TDError('Failed to subscribe to TDengine | Database: ' + dbOrig + ', Table: ' + tableOrig); + } + else { + console.log('Successfully subscribed to TDengine - Topic: ' + topicOrig); + } + return subscription; +} + +CTaosInterface.prototype.consume = function consume(subscription) { + let result = this.libtaos.taos_consume(subscription); + let fields = []; + let pfields = this.fetchFields(result); + if (ref.isNull(pfields) == false) { + pfields = ref.reinterpret(pfields, this.numFields(result) * 68, 0); + for (let i = 0; i < pfields.length; i += 68) { + //0 - 63 = name //64 - 65 = bytes, 66 - 67 = type + fields.push({ + name: ref.readCString(ref.reinterpret(pfields, 64, i)), + bytes: pfields[i + 64], + type: pfields[i + 66] + }) + } + } + + let data = []; + while (true) { + let { blocks, num_of_rows } = this.fetchBlock(result, fields); + if (num_of_rows == 0) { + break; + } + for (let i = 0; i < num_of_rows; i++) { + data.push([]); + let rowBlock = new Array(fields.length); + for (let j = 0; j < fields.length; j++) { + rowBlock[j] = blocks[j][i]; + } + data[data.length - 1] = (rowBlock); + } + } + return { data: data, fields: fields, result: result }; +} +CTaosInterface.prototype.unsubscribe = function unsubscribe(subscription) { + //void taos_unsubscribe(TAOS_SUB *tsub); + this.libtaos.taos_unsubscribe(subscription); +} + +// Continuous Query +CTaosInterface.prototype.openStream = function openStream(connection, sql, callback, stime, stoppingCallback, param = ref.ref(ref.NULL)) { + try { + sql = ref.allocCString(sql); + } + catch (err) { + throw "Attribute Error: sql string is expected as a str"; + } + var cti = this; + let asyncCallbackWrapper = function (param2, result2, row) { + let fields = cti.fetchFields_a(result2); + let precision = cti.libtaos.taos_result_precision(result2); + let blocks = new Array(fields.length); + blocks.fill(null); + let numOfRows2 = 1; + let offset = 0; + if (numOfRows2 > 0) { + for (let i = 0; i < fields.length; i++) { + if (!convertFunctions[fields[i]['type']]) { + throw new errors.DatabaseError("Invalid data type returned from database"); + } + blocks[i] = convertFunctions[fields[i]['type']](row, numOfRows2, fields[i]['bytes'], offset, precision); + offset += fields[i]['bytes'] * numOfRows2; + } + } + callback(param2, result2, blocks, fields); + } + asyncCallbackWrapper = ffi.Callback(ref.types.void, [ref.types.void_ptr, ref.types.void_ptr, ref.refType(ref.types.void_ptr2)], asyncCallbackWrapper); + asyncStoppingCallbackWrapper = ffi.Callback(ref.types.void, [ref.types.void_ptr], stoppingCallback); + let streamHandle = this.libtaos.taos_open_stream(connection, sql, asyncCallbackWrapper, stime, param, asyncStoppingCallbackWrapper); + if (ref.isNull(streamHandle)) { + throw new errors.TDError('Failed to open a stream with TDengine'); + return false; + } + else { + console.log("Succesfully opened stream"); + return streamHandle; + } +} +CTaosInterface.prototype.closeStream = function closeStream(stream) { + this.libtaos.taos_close_stream(stream); + console.log("Closed stream"); +} diff --git a/tests/connectorTest/nodejsTest/nodetaos/connection.js b/tests/connectorTest/nodejsTest/nodetaos/connection.js new file mode 100644 index 0000000000000000000000000000000000000000..08186f87053ad0ed0982ec8941f0cf38c4ad0467 --- /dev/null +++ b/tests/connectorTest/nodejsTest/nodetaos/connection.js @@ -0,0 +1,84 @@ +const TDengineCursor = require('./cursor') +const CTaosInterface = require('./cinterface') +module.exports = TDengineConnection; + +/** + * TDengine Connection Class + * @param {object} options - Options for configuring the connection with TDengine + * @return {TDengineConnection} + * @class TDengineConnection + * @constructor + * @example + * //Initialize a new connection + * var conn = new TDengineConnection({host:"127.0.0.1", user:"root", password:"taosdata", config:"/etc/taos",port:0}) + * + */ +function TDengineConnection(options) { + this._conn = null; + this._host = null; + this._user = "root"; //The default user + this._password = "taosdata"; //The default password + this._database = null; + this._port = 0; + this._config = null; + this._chandle = null; + this._configConn(options) + return this; +} +/** + * Configure the connection to TDengine + * @private + * @memberof TDengineConnection + */ +TDengineConnection.prototype._configConn = function _configConn(options) { + if (options['host']) { + this._host = options['host']; + } + if (options['user']) { + this._user = options['user']; + } + if (options['password']) { + this._password = options['password']; + } + if (options['database']) { + this._database = options['database']; + } + if (options['port']) { + this._port = options['port']; + } + if (options['config']) { + this._config = options['config']; + } + this._chandle = new CTaosInterface(this._config); + this._conn = this._chandle.connect(this._host, this._user, this._password, this._database, this._port); +} +/** Close the connection to TDengine */ +TDengineConnection.prototype.close = function close() { + this._chandle.close(this._conn); +} +/** + * Initialize a new cursor to interact with TDengine with + * @return {TDengineCursor} + */ +TDengineConnection.prototype.cursor = function cursor() { + //Pass the connection object to the cursor + return new TDengineCursor(this); +} +TDengineConnection.prototype.commit = function commit() { + return this; +} +TDengineConnection.prototype.rollback = function rollback() { + return this; +} +/** + * Clear the results from connector + * @private + */ +/* + TDengineConnection.prototype._clearResultSet = function _clearResultSet() { + var result = this._chandle.useResult(this._conn).result; + if (result) { + this._chandle.freeResult(result) + } +} +*/ diff --git a/tests/connectorTest/nodejsTest/nodetaos/constants.js b/tests/connectorTest/nodejsTest/nodetaos/constants.js new file mode 100644 index 0000000000000000000000000000000000000000..cd6a0c9fbaff51e7f0ecd3ab06907b7b1fb7dcb1 --- /dev/null +++ b/tests/connectorTest/nodejsTest/nodetaos/constants.js @@ -0,0 +1,76 @@ +/** + * Contains the the definitions/values assigned to various field types + * @module FieldTypes + */ +/** + * TDengine Field Types and their type codes + * @typedef {Object} FieldTypes + * @global + * @property {number} C_NULL - Null + * @property {number} C_BOOL - Boolean. Note, 0x02 is the C_BOOL_NULL value. + * @property {number} C_TINYINT - Tiny Int, values in the range [-2^7+1, 2^7-1]. Note, -2^7 has been used as the C_TINYINT_NULL value + * @property {number} C_SMALLINT - Small Int, values in the range [-2^15+1, 2^15-1]. Note, -2^15 has been used as the C_SMALLINT_NULL value + * @property {number} C_INT - Int, values in the range [-2^31+1, 2^31-1]. Note, -2^31 has been used as the C_INT_NULL value + * @property {number} C_BIGINT - Big Int, values in the range [-2^59, 2^59]. + * @property {number} C_FLOAT - Float, values in the range [-3.4E38, 3.4E38], accurate up to 6-7 decimal places. + * @property {number} C_DOUBLE - Double, values in the range [-1.7E308, 1.7E308], accurate up to 15-16 decimal places. + * @property {number} C_BINARY - Binary, encoded in utf-8. + * @property {number} C_TIMESTAMP - Timestamp in format "YYYY:MM:DD HH:MM:SS.MMM". Measured in number of milliseconds passed after + 1970-01-01 08:00:00.000 GMT. + * @property {number} C_NCHAR - NChar field type encoded in ASCII, a wide string. + * + * + * + * @property {number} C_TIMESTAMP_MILLI - The code for millisecond timestamps, as returned by libtaos.taos_result_precision(result). + * @property {number} C_TIMESTAMP_MICRO - The code for microsecond timestamps, as returned by libtaos.taos_result_precision(result). + */ +module.exports = { + C_NULL : 0, + C_BOOL : 1, + C_TINYINT : 2, + C_SMALLINT : 3, + C_INT : 4, + C_BIGINT : 5, + C_FLOAT : 6, + C_DOUBLE : 7, + C_BINARY : 8, + C_TIMESTAMP : 9, + C_NCHAR : 10, + // NULL value definition + // NOTE: These values should change according to C definition in tsdb.h + C_BOOL_NULL : 2, + C_TINYINT_NULL : -128, + C_SMALLINT_NULL : -32768, + C_INT_NULL : -2147483648, + C_BIGINT_NULL : -9223372036854775808, + C_FLOAT_NULL : 2146435072, + C_DOUBLE_NULL : -9223370937343148032, + C_NCHAR_NULL : 4294967295, + C_BINARY_NULL : 255, + C_TIMESTAMP_MILLI : 0, + C_TIMESTAMP_MICRO : 1, + getType, +} + +const typeCodesToName = { + 0 : 'Null', + 1 : 'Boolean', + 2 : 'Tiny Int', + 3 : 'Small Int', + 4 : 'Int', + 5 : 'Big Int', + 6 : 'Float', + 7 : 'Double', + 8 : 'Binary', + 9 : 'Timestamp', + 10 : 'Nchar', +} + +/** + * @function + * @param {number} typecode - The code to get the name of the type for + * @return {string} Name of the field type + */ +function getType(typecode) { + return typeCodesToName[typecode]; +} diff --git a/tests/connectorTest/nodejsTest/nodetaos/cursor.js b/tests/connectorTest/nodejsTest/nodetaos/cursor.js new file mode 100644 index 0000000000000000000000000000000000000000..f879d89d487eae9290fd9fc70259699f27937928 --- /dev/null +++ b/tests/connectorTest/nodejsTest/nodetaos/cursor.js @@ -0,0 +1,476 @@ +const ref = require('ref-napi'); +require('./globalfunc.js') +const CTaosInterface = require('./cinterface') +const errors = require('./error') +const TaosQuery = require('./taosquery') +const { PerformanceObserver, performance } = require('perf_hooks'); +module.exports = TDengineCursor; + +/** + * @typedef {Object} Buffer - A Node.js buffer. Please refer to {@link https://nodejs.org/api/buffer.html} for more details + * @global + */ + +/** + * @class TDengineCursor + * @classdesc The TDengine Cursor works directly with the C Interface which works with TDengine. It refrains from + * returning parsed data and majority of functions return the raw data such as cursor.fetchall() as compared to the TaosQuery class which + * has functions that "prettify" the data and add more functionality and can be used through cursor.query("your query"). Instead of + * promises, the class and its functions use callbacks. + * @param {TDengineConnection} - The TDengine Connection this cursor uses to interact with TDengine + * @property {data} - Latest retrieved data from query execution. It is an empty array by default + * @property {fields} - Array of the field objects in order from left to right of the latest data retrieved + * @since 1.0.0 + */ +function TDengineCursor(connection = null) { + //All parameters are store for sync queries only. + this._rowcount = -1; + this._connection = null; + this._result = null; + this._fields = null; + this.data = []; + this.fields = null; + if (connection != null) { + this._connection = connection + this._chandle = connection._chandle //pass through, just need library loaded. + } + else { + throw new errors.ProgrammingError("A TDengineConnection object is required to be passed to the TDengineCursor"); + } + +} +/** + * Get the row counts of the latest query + * @since 1.0.0 + * @return {number} Rowcount + */ +TDengineCursor.prototype.rowcount = function rowcount() { + return this._rowcount; +} +/** + * Close the cursor by setting its connection to null and freeing results from the connection and resetting the results it has stored + * @return {boolean} Whether or not the cursor was succesfully closed + * @since 1.0.0 + */ +TDengineCursor.prototype.close = function close() { + if (this._connection == null) { + return false; + } + this._connection._clearResultSet(); + this._reset_result(); + this._connection = null; + return true; +} +/** + * Create a TaosQuery object to perform a query to TDengine and retrieve data. + * @param {string} operation - The operation string to perform a query on + * @param {boolean} execute - Whether or not to immedietely perform the query. Default is false. + * @return {TaosQuery | Promise} A TaosQuery object + * @example + * var query = cursor.query("select count(*) from meterinfo.meters"); + * query.execute(); + * @since 1.0.6 + */ +TDengineCursor.prototype.query = function query(operation, execute = false) { + return new TaosQuery(operation, this, execute); +} + +/** + * Execute a query. Also stores all the field meta data returned from the query into cursor.fields. It is preferable to use cursor.query() to create + * queries and execute them instead of using the cursor object directly. + * @param {string} operation - The query operation to execute in the taos shell + * @param {Object} options - Execution options object. quiet : true turns off logging from queries + * @param {boolean} options.quiet - True if you want to surpress logging such as "Query OK, 1 row(s) ..." + * @param {function} callback - A callback function to execute after the query is made to TDengine + * @return {number | Buffer} Number of affected rows or a Buffer that points to the results of the query + * @since 1.0.0 + */ +TDengineCursor.prototype.execute = function execute(operation, options, callback) { + if (operation == undefined) { + throw new errors.ProgrammingError('No operation passed as argument'); + return null; + } + + if (typeof options == 'function') { + callback = options; + } + if (typeof options != 'object') options = {} + if (this._connection == null) { + throw new errors.ProgrammingError('Cursor is not connected'); + } + + this._reset_result(); + + let stmt = operation; + let time = 0; + let res; + if (options['quiet'] != true) { + const obs = new PerformanceObserver((items) => { + time = items.getEntries()[0].duration; + performance.clearMarks(); + }); + obs.observe({ entryTypes: ['measure'] }); + performance.mark('A'); + this._result = this._chandle.query(this._connection._conn, stmt); + performance.mark('B'); + performance.measure('query', 'A', 'B'); + } + else { + this._result = this._chandle.query(this._connection._conn, stmt); + } + res = this._chandle.errno(this._result); + if (res == 0) { + let fieldCount = this._chandle.fieldsCount(this._result); + if (fieldCount == 0) { + let affectedRowCount = this._chandle.affectedRows(this._result); + let response = this._createAffectedResponse(affectedRowCount, time) + if (options['quiet'] != true) { + console.log(response); + } + wrapCB(callback); + return affectedRowCount; //return num of affected rows, common with insert, use statements + } + else { + this._fields = this._chandle.useResult(this._result); + this.fields = this._fields; + wrapCB(callback); + + return this._result; //return a pointer to the result + } + } + else { + throw new errors.ProgrammingError(this._chandle.errStr(this._result)) + } + +} +TDengineCursor.prototype._createAffectedResponse = function (num, time) { + return "Query OK, " + num + " row(s) affected (" + (time * 0.001).toFixed(8) + "s)"; +} +TDengineCursor.prototype._createSetResponse = function (num, time) { + return "Query OK, " + num + " row(s) in set (" + (time * 0.001).toFixed(8) + "s)"; +} +TDengineCursor.prototype.executemany = function executemany() { + +} +TDengineCursor.prototype.fetchone = function fetchone() { + +} +TDengineCursor.prototype.fetchmany = function fetchmany() { + +} +/** + * Fetches all results from a query and also stores results into cursor.data. It is preferable to use cursor.query() to create + * queries and execute them instead of using the cursor object directly. + * @param {function} callback - callback function executing on the complete fetched data + * @return {Array} The resultant array, with entries corresponding to each retreived row from the query results, sorted in + * order by the field name ordering in the table. + * @since 1.0.0 + * @example + * cursor.execute('select * from db.table'); + * var data = cursor.fetchall(function(results) { + * results.forEach(row => console.log(row)); + * }) + */ +TDengineCursor.prototype.fetchall = function fetchall(options, callback) { + if (this._result == null || this._fields == null) { + throw new errors.OperationalError("Invalid use of fetchall, either result or fields from query are null. First execute a query first"); + } + + let num_of_rows = this._chandle.affectedRows(this._result); + let data = new Array(num_of_rows); + + this._rowcount = 0; + + let time = 0; + const obs = new PerformanceObserver((items) => { + time += items.getEntries()[0].duration; + performance.clearMarks(); + }); + obs.observe({ entryTypes: ['measure'] }); + performance.mark('A'); + while (true) { + let blockAndRows = this._chandle.fetchBlock(this._result, this._fields); + // console.log(blockAndRows); + // break; + let block = blockAndRows.blocks; + let num_of_rows = blockAndRows.num_of_rows; + if (num_of_rows == 0) { + break; + } + this._rowcount += num_of_rows; + let numoffields = this._fields.length; + for (let i = 0; i < num_of_rows; i++) { + // data.push([]); + + let rowBlock = new Array(numoffields); + for (let j = 0; j < numoffields; j++) { + rowBlock[j] = block[j][i]; + } + data[this._rowcount - num_of_rows + i] = (rowBlock); + // data.push(rowBlock); + } + + } + + performance.mark('B'); + performance.measure('query', 'A', 'B'); + let response = this._createSetResponse(this._rowcount, time) + console.log(response); + + // this._connection._clearResultSet(); + let fields = this.fields; + this._reset_result(); + this.data = data; + this.fields = fields; + + wrapCB(callback, data); + + return data; +} +/** + * Asynchrnously execute a query to TDengine. NOTE, insertion requests must be done in sync if on the same table. + * @param {string} operation - The query operation to execute in the taos shell + * @param {Object} options - Execution options object. quiet : true turns off logging from queries + * @param {boolean} options.quiet - True if you want to surpress logging such as "Query OK, 1 row(s) ..." + * @param {function} callback - A callback function to execute after the query is made to TDengine + * @return {number | Buffer} Number of affected rows or a Buffer that points to the results of the query + * @since 1.0.0 + */ +TDengineCursor.prototype.execute_a = function execute_a(operation, options, callback, param) { + if (operation == undefined) { + throw new errors.ProgrammingError('No operation passed as argument'); + return null; + } + if (typeof options == 'function') { + //we expect the parameter after callback to be param + param = callback; + callback = options; + } + if (typeof options != 'object') options = {} + if (this._connection == null) { + throw new errors.ProgrammingError('Cursor is not connected'); + } + if (typeof callback != 'function') { + throw new errors.ProgrammingError("No callback function passed to execute_a function"); + } + // Async wrapper for callback; + var cr = this; + + let asyncCallbackWrapper = function (param2, res2, resCode) { + if (typeof callback == 'function') { + callback(param2, res2, resCode); + } + + if (resCode >= 0) { + // let fieldCount = cr._chandle.numFields(res2); + // if (fieldCount == 0) { + // //cr._chandle.freeResult(res2); + // return res2; + // } + // else { + // return res2; + // } + return res2; + + } + else { + throw new errors.ProgrammingError("Error occuring with use of execute_a async function. Status code was returned with failure"); + } + } + + let stmt = operation; + let time = 0; + + // Use ref module to write to buffer in cursor.js instead of taosquery to maintain a difference in levels. Have taosquery stay high level + // through letting it pass an object as param + var buf = ref.alloc('Object'); + ref.writeObject(buf, 0, param); + const obs = new PerformanceObserver((items) => { + time = items.getEntries()[0].duration; + performance.clearMarks(); + }); + obs.observe({ entryTypes: ['measure'] }); + performance.mark('A'); + this._chandle.query_a(this._connection._conn, stmt, asyncCallbackWrapper, buf); + performance.mark('B'); + performance.measure('query', 'A', 'B'); + return param; + + +} +/** + * Fetches all results from an async query. It is preferable to use cursor.query_a() to create + * async queries and execute them instead of using the cursor object directly. + * @param {Object} options - An options object containing options for this function + * @param {function} callback - callback function that is callbacked on the COMPLETE fetched data (it is calledback only once!). + * Must be of form function (param, result, rowCount, rowData) + * @param {Object} param - A parameter that is also passed to the main callback function. Important! Param must be an object, and the key "data" cannot be used + * @return {{param:Object, result:Buffer}} An object with the passed parameters object and the buffer instance that is a pointer to the result handle. + * @since 1.2.0 + * @example + * cursor.execute('select * from db.table'); + * var data = cursor.fetchall(function(results) { + * results.forEach(row => console.log(row)); + * }) + */ +TDengineCursor.prototype.fetchall_a = function fetchall_a(result, options, callback, param = {}) { + if (typeof options == 'function') { + //we expect the parameter after callback to be param + param = callback; + callback = options; + } + if (typeof options != 'object') options = {} + if (this._connection == null) { + throw new errors.ProgrammingError('Cursor is not connected'); + } + if (typeof callback != 'function') { + throw new errors.ProgrammingError('No callback function passed to fetchall_a function') + } + if (param.data) { + throw new errors.ProgrammingError("You aren't allowed to set the key 'data' for the parameters object"); + } + let buf = ref.alloc('Object'); + param.data = []; + var cr = this; + + // This callback wrapper accumulates the data from the fetch_rows_a function from the cinterface. It is accumulated by passing the param2 + // object which holds accumulated data in the data key. + let asyncCallbackWrapper = function asyncCallbackWrapper(param2, result2, numOfRows2, rowData) { + param2 = ref.readObject(param2); //return the object back from the pointer + if (numOfRows2 > 0 && rowData.length != 0) { + // Keep fetching until now rows left. + let buf2 = ref.alloc('Object'); + param2.data.push(rowData); + ref.writeObject(buf2, 0, param2); + cr._chandle.fetch_rows_a(result2, asyncCallbackWrapper, buf2); + } + else { + let finalData = param2.data; + let fields = cr._chandle.fetchFields_a(result2); + let data = []; + for (let i = 0; i < finalData.length; i++) { + let num_of_rows = finalData[i][0].length; //fetched block number i; + let block = finalData[i]; + for (let j = 0; j < num_of_rows; j++) { + data.push([]); + let rowBlock = new Array(fields.length); + for (let k = 0; k < fields.length; k++) { + rowBlock[k] = block[k][j]; + } + data[data.length - 1] = rowBlock; + } + } + cr._chandle.freeResult(result2); // free result, avoid seg faults and mem leaks! + callback(param2, result2, numOfRows2, { data: data, fields: fields }); + + } + } + ref.writeObject(buf, 0, param); + param = this._chandle.fetch_rows_a(result, asyncCallbackWrapper, buf); //returned param + return { param: param, result: result }; +} +/** + * Stop a query given the result handle. + * @param {Buffer} result - The buffer that acts as the result handle + * @since 1.3.0 + */ +TDengineCursor.prototype.stopQuery = function stopQuery(result) { + this._chandle.stopQuery(result); +} +TDengineCursor.prototype._reset_result = function _reset_result() { + this._rowcount = -1; + if (this._result != null) { + this._chandle.freeResult(this._result); + } + this._result = null; + this._fields = null; + this.data = []; + this.fields = null; +} +/** + * Get server info such as version number + * @return {string} + * @since 1.3.0 + */ +TDengineCursor.prototype.getServerInfo = function getServerInfo() { + return this._chandle.getServerInfo(this._connection._conn); +} +/** + * Get client info such as version number + * @return {string} + * @since 1.3.0 + */ +TDengineCursor.prototype.getClientInfo = function getClientInfo() { + return this._chandle.getClientInfo(); +} +/** + * Subscribe to a table from a database in TDengine. + * @param {Object} config - A configuration object containing the configuration options for the subscription + * @param {string} config.restart - whether or not to continue a subscription if it already exits, otherwise start from beginning + * @param {string} config.topic - The unique identifier of a subscription + * @param {string} config.sql - A sql statement for data query + * @param {string} config.interval - The pulling interval + * @return {Buffer} A buffer pointing to the subscription session handle + * @since 1.3.0 + */ +TDengineCursor.prototype.subscribe = function subscribe(config) { + let restart = config.restart ? 1 : 0; + return this._chandle.subscribe(this._connection._conn, restart, config.topic, config.sql, config.interval); +}; +/** + * An infinite loop that consumes the latest data and calls a callback function that is provided. + * @param {Buffer} subscription - A buffer object pointing to the subscription session handle + * @param {function} callback - The callback function that takes the row data, field/column meta data, and the subscription session handle as input + * @since 1.3.0 + */ +TDengineCursor.prototype.consumeData = async function consumeData(subscription, callback) { + while (true) { + let { data, fields, result } = this._chandle.consume(subscription); + callback(data, fields, result); + } +} +/** + * Unsubscribe the provided buffer object pointing to the subscription session handle + * @param {Buffer} subscription - A buffer object pointing to the subscription session handle that is to be unsubscribed + * @since 1.3.0 + */ +TDengineCursor.prototype.unsubscribe = function unsubscribe(subscription) { + this._chandle.unsubscribe(subscription); +} +/** + * Open a stream with TDengine to run the sql query periodically in the background + * @param {string} sql - The query to run + * @param {function} callback - The callback function to run after each query, accepting inputs as param, result handle, data, fields meta data + * @param {number} stime - The time of the stream starts in the form of epoch milliseconds. If 0 is given, the start time is set as the current time. + * @param {function} stoppingCallback - The callback function to run when the continuous query stops. It takes no inputs + * @param {object} param - A parameter that is passed to the main callback function + * @return {Buffer} A buffer pointing to the stream handle + * @since 1.3.0 + */ +TDengineCursor.prototype.openStream = function openStream(sql, callback, stime = 0, stoppingCallback, param = {}) { + let buf = ref.alloc('Object'); + ref.writeObject(buf, 0, param); + + let asyncCallbackWrapper = function (param2, result2, blocks, fields) { + let data = []; + let num_of_rows = blocks[0].length; + for (let j = 0; j < num_of_rows; j++) { + data.push([]); + let rowBlock = new Array(fields.length); + for (let k = 0; k < fields.length; k++) { + rowBlock[k] = blocks[k][j]; + } + data[data.length - 1] = rowBlock; + } + callback(param2, result2, blocks, fields); + } + return this._chandle.openStream(this._connection._conn, sql, asyncCallbackWrapper, stime, stoppingCallback, buf); +} +/** + * Close a stream + * @param {Buffer} - A buffer pointing to the handle of the stream to be closed + * @since 1.3.0 + */ +TDengineCursor.prototype.closeStream = function closeStream(stream) { + this._chandle.closeStream(stream); +} diff --git a/tests/connectorTest/nodejsTest/nodetaos/error.js b/tests/connectorTest/nodejsTest/nodetaos/error.js new file mode 100644 index 0000000000000000000000000000000000000000..8ab91a50c7d81a4675246617e0969ee8c81c514e --- /dev/null +++ b/tests/connectorTest/nodejsTest/nodetaos/error.js @@ -0,0 +1,96 @@ + +/** + * TDengine Error Class + * @ignore + */ +class TDError extends Error { + constructor(args) { + super(args) + this.name = "TDError"; + } +} +/** Exception raised for important warnings like data truncations while inserting. + * @ignore + */ +class Warning extends Error { + constructor(args) { + super(args) + this.name = "Warning"; + } +} +/** Exception raised for errors that are related to the database interface rather than the database itself. + * @ignore + */ +class InterfaceError extends TDError { + constructor(args) { + super(args) + this.name = "TDError.InterfaceError"; + } +} +/** Exception raised for errors that are related to the database. + * @ignore + */ +class DatabaseError extends TDError { + constructor(args) { + super(args) + this.name = "TDError.DatabaseError"; + } +} +/** Exception raised for errors that are due to problems with the processed data like division by zero, numeric value out of range. + * @ignore + */ +class DataError extends DatabaseError { + constructor(args) { + super(args) + this.name = "TDError.DatabaseError.DataError"; + } +} +/** Exception raised for errors that are related to the database's operation and not necessarily under the control of the programmer + * @ignore + */ +class OperationalError extends DatabaseError { + constructor(args) { + super(args) + this.name = "TDError.DatabaseError.OperationalError"; + } +} +/** Exception raised when the relational integrity of the database is affected. + * @ignore + */ +class IntegrityError extends DatabaseError { + constructor(args) { + super(args) + this.name = "TDError.DatabaseError.IntegrityError"; + } +} +/** Exception raised when the database encounters an internal error. + * @ignore + */ +class InternalError extends DatabaseError { + constructor(args) { + super(args) + this.name = "TDError.DatabaseError.InternalError"; + } +} +/** Exception raised for programming errors. + * @ignore + */ +class ProgrammingError extends DatabaseError { + constructor(args) { + super(args) + this.name = "TDError.DatabaseError.ProgrammingError"; + } +} +/** Exception raised in case a method or database API was used which is not supported by the database. + * @ignore + */ +class NotSupportedError extends DatabaseError { + constructor(args) { + super(args) + this.name = "TDError.DatabaseError.NotSupportedError"; + } +} + +module.exports = { + TDError, Warning, InterfaceError, DatabaseError, DataError, OperationalError, IntegrityError, InternalError, ProgrammingError, NotSupportedError +}; diff --git a/tests/connectorTest/nodejsTest/nodetaos/globalfunc.js b/tests/connectorTest/nodejsTest/nodetaos/globalfunc.js new file mode 100644 index 0000000000000000000000000000000000000000..cf7344c868ee94831eba47ff55369a684e34b02f --- /dev/null +++ b/tests/connectorTest/nodejsTest/nodetaos/globalfunc.js @@ -0,0 +1,14 @@ +/* Wrap a callback, reduce code amount */ +function wrapCB(callback, input) { + if (typeof callback === 'function') { + callback(input); + } + return; +} +global.wrapCB = wrapCB; +function toTaosTSString(date) { + date = new Date(date); + let tsArr = date.toISOString().split("T") + return tsArr[0] + " " + tsArr[1].substring(0, tsArr[1].length-1); +} +global.toTaosTSString = toTaosTSString; diff --git a/tests/connectorTest/nodejsTest/nodetaos/taosobjects.js b/tests/connectorTest/nodejsTest/nodetaos/taosobjects.js new file mode 100644 index 0000000000000000000000000000000000000000..3bc0fe0aca060a32daa7a5cebd2dbfb99ac29a7c --- /dev/null +++ b/tests/connectorTest/nodejsTest/nodetaos/taosobjects.js @@ -0,0 +1,152 @@ +const FieldTypes = require('./constants'); +const util = require('util'); +/** + * Various objects such as TaosRow and TaosColumn that help make parsing data easier + * @module TaosObjects + * + */ + +/** + * The TaosRow object. Contains the data from a retrieved row from a database and functions that parse the data. + * @typedef {Object} TaosRow - A row of data retrieved from a table. + * @global + * @example + * var trow = new TaosRow(row); + * console.log(trow.data); + */ +function TaosRow(row) { + this.data = row; + this.length = row.length; + return this; +} + +/** + * @typedef {Object} TaosField - A field/column's metadata from a table. + * @global + * @example + * var tfield = new TaosField(field); + * console.log(tfield.name); + */ + +function TaosField(field) { + this._field = field; + this.name = field.name; + this.type = FieldTypes.getType(field.type); + return this; +} + +/** + * A TaosTimestamp object, which is the standard date object with added functionality + * @global + * @memberof TaosObjects + * @param {Date} date - A Javascript date time object or the time in milliseconds past 1970-1-1 00:00:00.000 + */ +class TaosTimestamp extends Date { + constructor(date, precision = 0) { + if (precision === 1) { + super(Math.floor(date / 1000)); + this.precisionExtras = date % 1000; + } else if (precision === 2) { + // use BigInt to fix: 1623254400999999999 / 1000000 = 1623254401000 which not expected + super(parseInt(BigInt(date) / 1000000n)); + // use BigInt to fix: 1625801548423914405 % 1000000 = 914496 which not expected (914405) + this.precisionExtras = parseInt(BigInt(date) % 1000000n); + } else { + super(parseInt(date)); + } + this.precision = precision; + } + + /** + * TDengine raw timestamp. + * @returns raw taos timestamp (int64) + */ + taosTimestamp() { + if (this.precision == 1) { + return (this * 1000 + this.precisionExtras); + } else if (this.precision == 2) { + return (this * 1000000 + this.precisionExtras); + } else { + return Math.floor(this); + } + } + + /** + * Gets the microseconds of a Date. + * @return {Int} A microseconds integer + */ + getMicroseconds() { + if (this.precision == 1) { + return this.getMilliseconds() * 1000 + this.precisionExtras; + } else if (this.precision == 2) { + return this.getMilliseconds() * 1000 + this.precisionExtras / 1000; + } else { + return 0; + } + } + /** + * Gets the nanoseconds of a TaosTimestamp. + * @return {Int} A nanoseconds integer + */ + getNanoseconds() { + if (this.precision == 1) { + return this.getMilliseconds() * 1000000 + this.precisionExtras * 1000; + } else if (this.precision == 2) { + return this.getMilliseconds() * 1000000 + this.precisionExtras; + } else { + return 0; + } + } + + /** + * @returns {String} a string for timestamp string format + */ + _precisionExtra() { + if (this.precision == 1) { + return String(this.precisionExtras).padStart(3, '0'); + } else if (this.precision == 2) { + return String(this.precisionExtras).padStart(6, '0'); + } else { + return ''; + } + } + /** + * @function Returns the date into a string usable by TDengine + * @return {string} A Taos Timestamp String + */ + toTaosString() { + var tzo = -this.getTimezoneOffset(), + dif = tzo >= 0 ? '+' : '-', + pad = function (num) { + var norm = Math.floor(Math.abs(num)); + return (norm < 10 ? '0' : '') + norm; + }, + pad2 = function (num) { + var norm = Math.floor(Math.abs(num)); + if (norm < 10) return '00' + norm; + if (norm < 100) return '0' + norm; + if (norm < 1000) return norm; + }; + return this.getFullYear() + + '-' + pad(this.getMonth() + 1) + + '-' + pad(this.getDate()) + + ' ' + pad(this.getHours()) + + ':' + pad(this.getMinutes()) + + ':' + pad(this.getSeconds()) + + '.' + pad2(this.getMilliseconds()) + + '' + this._precisionExtra(); + } + + /** + * Custom console.log + * @returns {String} string format for debug + */ + [util.inspect.custom](depth, opts) { + return this.toTaosString() + JSON.stringify({ precision: this.precision, precisionExtras: this.precisionExtras }, opts); + } + toString() { + return this.toTaosString(); + } +} + +module.exports = { TaosRow, TaosField, TaosTimestamp } diff --git a/tests/connectorTest/nodejsTest/nodetaos/taosquery.js b/tests/connectorTest/nodejsTest/nodetaos/taosquery.js new file mode 100644 index 0000000000000000000000000000000000000000..eeede3ff6885e27c1d1c569a7a410f88109c9acd --- /dev/null +++ b/tests/connectorTest/nodejsTest/nodetaos/taosquery.js @@ -0,0 +1,112 @@ +var TaosResult = require('./taosresult') +require('./globalfunc.js') +module.exports = TaosQuery; + + +/** + * @class TaosQuery + * @classdesc The TaosQuery class is one level above the TDengine Cursor in that it makes sure to generally return promises from functions, and wrap + * all data with objects such as wrapping a row of data with Taos Row. This is meant to enable an higher level API that allows additional + * functionality and save time whilst also making it easier to debug and enter less problems with the use of promises. + * @param {string} query - Query to construct object from + * @param {TDengineCursor} cursor - The cursor from which this query will execute from + * @param {boolean} execute - Whether or not to immedietely execute the query synchronously and fetch all results. Default is false. + * @property {string} query - The current query in string format the TaosQuery object represents + * @return {TaosQuery} + * @since 1.0.6 + */ +function TaosQuery(query = "", cursor = null, execute = false) { + this.query = query; + this._cursor = cursor; + if (execute == true) { + return this.execute(); + } + return this; +} + +/** + * Executes the query object and returns a Promise + * @memberof TaosQuery + * @return {Promise} A promise that resolves with a TaosResult object, or rejects with an error + * @since 1.0.6 + */ +TaosQuery.prototype.execute = async function execute() { + var taosQuery = this; //store the current instance of taosQuery to avoid async issues? + var executionPromise = new Promise(function(resolve, reject) { + let data = []; + let fields = []; + let result; + try { + taosQuery._cursor.execute(taosQuery.query); + if (taosQuery._cursor._fields) fields = taosQuery._cursor._fields; + if (taosQuery._cursor._result != null) data = taosQuery._cursor.fetchall(); + result = new TaosResult(data, fields) + } + catch(err) { + reject(err); + } + resolve(result) + + }); + return executionPromise; +} + +/** + * Executes the query object asynchronously and returns a Promise. Completes query to completion. + * @memberof TaosQuery + * @param {Object} options - Execution options + * @return {Promise} A promise that resolves with a TaosResult object, or rejects with an error + * @since 1.2.0 + */ +TaosQuery.prototype.execute_a = async function execute_a(options = {}) { + var executionPromise = new Promise( (resolve, reject) => { + + }); + var fres; + var frej; + var fetchPromise = new Promise( (resolve, reject) => { + fres = resolve; + frej = reject; + }); + let asyncCallbackFetchall = async function(param, res, numOfRows, blocks) { + if (numOfRows > 0) { + // Likely a query like insert + fres(); + } + else { + fres(new TaosResult(blocks.data, blocks.fields)); + } + } + let asyncCallback = async function(param, res, code) { + //upon success, we fetchall results + this._cursor.fetchall_a(res, options, asyncCallbackFetchall, {}); + } + this._cursor.execute_a(this.query, asyncCallback.bind(this), {}); + return fetchPromise; +} + +/** + * Bind arguments to the query and automatically parses them into the right format + * @param {array | ...args} args - A number of arguments to bind to each ? in the query + * @return {TaosQuery} + * @example + * // An example of binding a javascript date and a number to a query + * var query = cursor.query("select count(*) from meterinfo.meters where ts <= ? and areaid = ?").bind(new Date(), 3); + * var promise1 = query.execute(); + * promise1.then(function(result) { + * result.pretty(); // Log the prettified version of the results. + * }); + * @since 1.0.6 + */ +TaosQuery.prototype.bind = function bind(f, ...args) { + if (typeof f == 'object' && f.constructor.name != 'Array') args.unshift(f); //param is not an array object + else if (typeof f != 'object') args.unshift(f); + else { args = f; } + args.forEach(function(arg) { + if (arg.constructor.name == 'TaosTimestamp') arg = "\"" + arg.toTaosString() + "\""; + else if (arg.constructor.name == 'Date') arg = "\"" + toTaosTSString(arg) + "\""; + else if (typeof arg == 'string') arg = "\"" + arg + "\""; + this.query = this.query.replace(/\?/,arg); + }, this); + return this; +} diff --git a/tests/connectorTest/nodejsTest/nodetaos/taosresult.js b/tests/connectorTest/nodejsTest/nodetaos/taosresult.js new file mode 100644 index 0000000000000000000000000000000000000000..4138ebbec6e1b792691d17a25b7c18d35b6a922a --- /dev/null +++ b/tests/connectorTest/nodejsTest/nodetaos/taosresult.js @@ -0,0 +1,85 @@ +require('./globalfunc.js') +const TaosObjects = require('./taosobjects'); +const TaosRow = TaosObjects.TaosRow; +const TaosField = TaosObjects.TaosField; + +module.exports = TaosResult; +/** + * @class TaosResult + * @classdesc A TaosResult class consts of the row data and the fields metadata, all wrapped under various objects for higher functionality. + * @param {Array} data - Array of result rows + * @param {Array} fields - Array of field meta data + * @property {Array} data - Array of TaosRows forming the result data (this does not include field meta data) + * @property {Array} fields - Array of TaosFields forming the fields meta data array. + * @return {TaosResult} + * @since 1.0.6 + */ +function TaosResult(data, fields) { + this.data = data.map(row => new TaosRow(row)); + this.rowcount = this.data.length; + this.fields = fields.map(field => new TaosField(field)); +} +/** + * Pretty print data and the fields meta data as if you were using the taos shell + * @memberof TaosResult + * @function pretty + * @since 1.0.6 + */ + +TaosResult.prototype.pretty = function pretty() { + let fieldsStr = ""; + let sizing = []; + this.fields.forEach((field,i) => { + if (field._field.type == 8 || field._field.type == 10){ + sizing.push(Math.max(field.name.length, field._field.bytes)); + } + else { + sizing.push(Math.max(field.name.length, suggestedMinWidths[field._field.type])); + } + fieldsStr += fillEmpty(Math.floor(sizing[i]/2 - field.name.length / 2)) + field.name + fillEmpty(Math.ceil(sizing[i]/2 - field.name.length / 2)) + " | "; + }); + var sumLengths = sizing.reduce((a,b)=> a+=b,(0)) + sizing.length * 3; + + console.log("\n" + fieldsStr); + console.log(printN("=",sumLengths)); + this.data.forEach(row => { + let rowStr = ""; + row.data.forEach((entry, i) => { + if (this.fields[i]._field.type == 9) { + entry = entry.toTaosString(); + } else { + entry = entry == null ? 'null' : entry.toString(); + } + rowStr += entry + rowStr += fillEmpty(sizing[i] - entry.length) + " | "; + }); + console.log(rowStr); + }); +} +const suggestedMinWidths = { + 0: 4, + 1: 4, + 2: 4, + 3: 6, + 4: 11, + 5: 12, + 6: 24, + 7: 24, + 8: 10, + 9: 25, + 10: 10, +} +function printN(s, n) { + let f = ""; + for (let i = 0; i < n; i ++) { + f += s; + } + return f; +} +function fillEmpty(n) { + let str = ""; + for (let i = 0; i < n; i++) { + str += " "; + } + return str; +} diff --git a/tests/connectorTest/nodejsTest/readme.md b/tests/connectorTest/nodejsTest/readme.md new file mode 100644 index 0000000000000000000000000000000000000000..26a28afbdd514ad97e969302e7d790f6240bb770 --- /dev/null +++ b/tests/connectorTest/nodejsTest/readme.md @@ -0,0 +1,161 @@ +# TDengine Node.js connector +[![minzip](https://img.shields.io/bundlephobia/minzip/td2.0-connector.svg)](https://github.com/taosdata/TDengine/tree/master/src/connector/nodejs) [![NPM](https://img.shields.io/npm/l/td2.0-connector.svg)](https://github.com/taosdata/TDengine/#what-is-tdengine) + +This is the Node.js library that lets you connect to [TDengine](https://www.github.com/taosdata/tdengine) 2.0 version. It is built so that you can use as much of it as you want or as little of it as you want through providing an extensive API. If you want the raw data in the form of an array of arrays for the row data retrieved from a table, you can do that. If you want to wrap that data with objects that allow you easily manipulate and display data such as using a prettifier function, you can do that! + +## Installation + +To get started, just type in the following to install the connector through [npm](https://www.npmjs.com/) + +```cmd +npm install td2.0-connector +``` + +To interact with TDengine, we make use of the [node-gyp](https://github.com/nodejs/node-gyp) library. To install, you will need to install the following depending on platform (the following instructions are quoted from node-gyp) + +### On Linux + +- `python` (`v2.7` recommended, `v3.x.x` is **not** supported) +- `make` +- A proper C/C++ compiler toolchain, like [GCC](https://gcc.gnu.org) +- `node` (between `v10.x` and `v11.x`, other version has some dependency compatibility problems) + +### On macOS + +- `python` (`v2.7` recommended, `v3.x.x` is **not** supported) (already installed on macOS) + +- Xcode + + - You also need to install the + + ``` + Command Line Tools + ``` + + via Xcode. You can find this under the menu + + ``` + Xcode -> Preferences -> Locations + ``` + + (or by running + + ``` + xcode-select --install + ``` + + in your Terminal) + + - This step will install `gcc` and the related toolchain containing `make` + +### On Windows + +#### Option 1 + +Install all the required tools and configurations using Microsoft's [windows-build-tools](https://github.com/felixrieseberg/windows-build-tools) using `npm install --global --production windows-build-tools` from an elevated PowerShell or CMD.exe (run as Administrator). + +#### Option 2 + +Install tools and configuration manually: + +- Install Visual C++ Build Environment: [Visual Studio Build Tools](https://visualstudio.microsoft.com/thank-you-downloading-visual-studio/?sku=BuildTools) (using "Visual C++ build tools" workload) or [Visual Studio 2017 Community](https://visualstudio.microsoft.com/pl/thank-you-downloading-visual-studio/?sku=Community) (using the "Desktop development with C++" workload) +- Install [Python 2.7](https://www.python.org/downloads/) (`v3.x.x` is not supported), and run `npm config set python python2.7` (or see below for further instructions on specifying the proper Python version and path.) +- Launch cmd, `npm config set msvs_version 2017` + +If the above steps didn't work for you, please visit [Microsoft's Node.js Guidelines for Windows](https://github.com/Microsoft/nodejs-guidelines/blob/master/windows-environment.md#compiling-native-addon-modules) for additional tips. + +To target native ARM64 Node.js on Windows 10 on ARM, add the components "Visual C++ compilers and libraries for ARM64" and "Visual C++ ATL for ARM64". + +## Usage + +The following is a short summary of the basic usage of the connector, the full api and documentation can be found [here](http://docs.taosdata.com/node) + +### Connection + +To use the connector, first require the library ```td2.0-connector```. Running the function ```taos.connect``` with the connection options passed in as an object will return a TDengine connection object. The required connection option is ```host```, other options if not set, will be the default values as shown below. + +A cursor also needs to be initialized in order to interact with TDengine from Node.js. + +```javascript +const taos = require('td2.0-connector'); +var conn = taos.connect({host:"127.0.0.1", user:"root", password:"taosdata", config:"/etc/taos",port:0}) +var cursor = conn.cursor(); // Initializing a new cursor +``` + +Close a connection + +```javascript +conn.close(); +``` + +### Queries + +We can now start executing simple queries through the ```cursor.query``` function, which returns a TaosQuery object. + +```javascript +var query = cursor.query('show databases;') +``` + +We can get the results of the queries through the ```query.execute()``` function, which returns a promise that resolves with a TaosResult object, which contains the raw data and additional functionalities such as pretty printing the results. + +```javascript +var promise = query.execute(); +promise.then(function(result) { + result.pretty(); //logs the results to the console as if you were in the taos shell +}); +``` + +You can also query by binding parameters to a query by filling in the question marks in a string as so. The query will automatically parse what was binded and convert it to the proper format for use with TDengine +```javascript +var query = cursor.query('select * from meterinfo.meters where ts <= ? and areaid = ?;').bind(new Date(), 5); +query.execute().then(function(result) { + result.pretty(); +}) +``` + +The TaosQuery object can also be immediately executed upon creation by passing true as the second argument, returning a promise instead of a TaosQuery. +```javascript +var promise = cursor.query('select * from meterinfo.meters where v1 = 30;', true) +promise.then(function(result) { + result.pretty(); +}) +``` + +If you want to execute queries without objects being wrapped around the data, use ```cursor.execute()``` directly and ```cursor.fetchall()``` to retrieve data if there is any. +```javascript +cursor.execute('select count(*), avg(v1), min(v2) from meterinfo.meters where ts >= \"2019-07-20 00:00:00.000\";'); +var data = cursor.fetchall(); +console.log(cursor.fields); // Latest query's Field metadata is stored in cursor.fields +console.log(cursor.data); // Latest query's result data is stored in cursor.data, also returned by fetchall. +``` + +### Async functionality + +Async queries can be performed using the same functions such as `cursor.execute`, `TaosQuery.query`, but now with `_a` appended to them. + +Say you want to execute an two async query on two separate tables, using `cursor.query`, you can do that and get a TaosQuery object, which upon executing with the `execute_a` function, returns a promise that resolves with a TaosResult object. + +```javascript +var promise1 = cursor.query('select count(*), avg(v1), avg(v2) from meter1;').execute_a() +var promise2 = cursor.query('select count(*), avg(v1), avg(v2) from meter2;').execute_a(); +promise1.then(function(result) { + result.pretty(); +}) +promise2.then(function(result) { + result.pretty(); +}) +``` + +## Example + +An example of using the NodeJS connector to create a table with weather data and create and execute queries can be found [here](https://github.com/taosdata/TDengine/tree/master/tests/examples/nodejs/node-example.js) (The preferred method for using the connector) + +An example of using the NodeJS connector to achieve the same things but without all the object wrappers that wrap around the data returned to achieve higher functionality can be found [here](https://github.com/taosdata/TDengine/tree/master/tests/examples/nodejs/node-example-raw.js) + +## Contributing to TDengine + +Please follow the [contribution guidelines](https://github.com/taosdata/TDengine/blob/master/CONTRIBUTING.md) to contribute to the project. + +## License + +[GNU AGPL v3.0](http://www.gnu.org/licenses/agpl-3.0.html) diff --git a/tests/connectorTest/nodejsTest/tdengine.js b/tests/connectorTest/nodejsTest/tdengine.js new file mode 100644 index 0000000000000000000000000000000000000000..047c744a4fc90c6306e851eaa529a7f9f578fe12 --- /dev/null +++ b/tests/connectorTest/nodejsTest/tdengine.js @@ -0,0 +1,4 @@ +var TDengineConnection = require('./nodetaos/connection.js') +module.exports.connect = function (connection={}) { + return new TDengineConnection(connection); +} diff --git a/tests/connectorTest/nodejsTest/test/performance.js b/tests/connectorTest/nodejsTest/test/performance.js new file mode 100644 index 0000000000000000000000000000000000000000..ea197f034435e28edd67df8d5f4b141f410fed81 --- /dev/null +++ b/tests/connectorTest/nodejsTest/test/performance.js @@ -0,0 +1,89 @@ +function memoryUsageData() { + let s = process.memoryUsage() + for (key in s) { + s[key] = (s[key]/1000000).toFixed(3) + "MB"; + } + return s; +} +console.log("initial mem usage:", memoryUsageData()); + +const { PerformanceObserver, performance } = require('perf_hooks'); +const taos = require('../tdengine'); +var conn = taos.connect({host:"127.0.0.1", user:"root", password:"taosdata", config:"/etc/taos",port:0}); +var c1 = conn.cursor(); + +// Initialize env +c1.execute('create database if not exists td_connector_test;'); +c1.execute('use td_connector_test;') +c1.execute('create table if not exists all_types (ts timestamp, _int int, _bigint bigint, _float float, _double double, _binary binary(40), _smallint smallint, _tinyint tinyint, _bool bool, _nchar nchar(40));'); +c1.execute('create table if not exists stabletest (ts timestamp, v1 int, v2 int, v3 int, v4 double) tags (id int, location binary(20));') + + +// Insertion into single table Performance Test +var dataPrepTime = 0; +var insertTime = 0; +var insertTime5000 = 0; +var avgInsert5ktime = 0; +const obs = new PerformanceObserver((items) => { + let entry = items.getEntries()[0]; + + if (entry.name == 'Data Prep') { + dataPrepTime += entry.duration; + } + else if (entry.name == 'Insert'){ + insertTime += entry.duration + } + else { + console.log(entry.name + ': ' + (entry.duration/1000).toFixed(8) + 's'); + } + performance.clearMarks(); +}); +obs.observe({ entryTypes: ['measure'] }); + +function R(l,r) { + return Math.random() * (r - l) - r; +} +function randomBool() { + if (Math.random() < 0.5) { + return true; + } + return false; +} +function insertN(n) { + for (let i = 0; i < n; i++) { + performance.mark('A3'); + let insertData = ["now + " + i + "m", // Timestamp + parseInt( R(-Math.pow(2,31) + 1 , Math.pow(2,31) - 1) ), // Int + parseInt( R(-Math.pow(2,31) + 1 , Math.pow(2,31) - 1) ), // BigInt + parseFloat( R(-3.4E38, 3.4E38) ), // Float + parseFloat( R(-1.7E308, 1.7E308) ), // Double + "\"Long Binary\"", // Binary + parseInt( R(-32767, 32767) ), // Small Int + parseInt( R(-127, 127) ), // Tiny Int + randomBool(), + "\"Nchars 一些中文字幕\""]; // Bool + let query = 'insert into td_connector_test.all_types values(' + insertData.join(',') + ' );'; + performance.mark('B3'); + performance.measure('Data Prep', 'A3', 'B3'); + performance.mark('A2'); + c1.execute(query, {quiet:true}); + performance.mark('B2'); + performance.measure('Insert', 'A2', 'B2'); + if ( i % 5000 == 4999) { + console.log("Insert # " + (i+1)); + console.log('Insert 5k records: ' + ((insertTime - insertTime5000)/1000).toFixed(8) + 's'); + insertTime5000 = insertTime; + avgInsert5ktime = (avgInsert5ktime/1000 * Math.floor(i / 5000) + insertTime5000/1000) / Math.ceil( i / 5000); + console.log('DataPrepTime So Far: ' + (dataPrepTime/1000).toFixed(8) + 's | Inserting time So Far: ' + (insertTime/1000).toFixed(8) + 's | Avg. Insert 5k time: ' + avgInsert5ktime.toFixed(8)); + + + } + } +} +performance.mark('insert 1E5') +insertN(1E5); +performance.mark('insert 1E5 2') +performance.measure('Insert With Logs', 'insert 1E5', 'insert 1E5 2'); +console.log('DataPrepTime: ' + (dataPrepTime/1000).toFixed(8) + 's | Inserting time: ' + (insertTime/1000).toFixed(8) + 's'); +dataPrepTime = 0; insertTime = 0; +//'insert into td_connector_test.all_types values (now, null,null,null,null,null,null,null,null,null);' diff --git a/tests/connectorTest/nodejsTest/test/test.js b/tests/connectorTest/nodejsTest/test/test.js new file mode 100644 index 0000000000000000000000000000000000000000..caf05955da4c960ebedc872f400c17d18be767dd --- /dev/null +++ b/tests/connectorTest/nodejsTest/test/test.js @@ -0,0 +1,170 @@ +const taos = require('../tdengine'); +var conn = taos.connect(); +var c1 = conn.cursor(); +let stime = new Date(); +let interval = 1000; + +function convertDateToTS(date) { + let tsArr = date.toISOString().split("T") + return "\"" + tsArr[0] + " " + tsArr[1].substring(0, tsArr[1].length-1) + "\""; +} +function R(l,r) { + return Math.random() * (r - l) - r; +} +function randomBool() { + if (Math.random() < 0.5) { + return true; + } + return false; +} + +// Initialize +//c1.execute('drop database td_connector_test;'); +c1.execute('create database if not exists td_connector_test;'); +c1.execute('use td_connector_test;') +c1.execute('create table if not exists all_types (ts timestamp, _int int, _bigint bigint, _float float, _double double, _binary binary(40), _smallint smallint, _tinyint tinyint, _bool bool, _nchar nchar(40));'); +c1.execute('create table if not exists stabletest (ts timestamp, v1 int, v2 int, v3 int, v4 double) tags (id int, location binary(20));') + +// Shell Test : The following uses the cursor to imitate the taos shell + +// Insert +for (let i = 0; i < 10000; i++) { + let insertData = ["now+" + i + "s", // Timestamp + parseInt( R(-Math.pow(2,31) + 1 , Math.pow(2,31) - 1) ), // Int + parseInt( R(-Math.pow(2,31) + 1 , Math.pow(2,31) - 1) ), // BigInt + parseFloat( R(-3.4E38, 3.4E38) ), // Float + parseFloat( R(-1.7E30, 1.7E30) ), // Double + "\"Long Binary\"", // Binary + parseInt( R(-32767, 32767) ), // Small Int + parseInt( R(-127, 127) ), // Tiny Int + randomBool(), + "\"Nchars\""]; // Bool + c1.execute('insert into td_connector_test.all_types values(' + insertData.join(',') + ' );', {quiet:true}); + if (i % 1000 == 0) { + console.log("Insert # " , i); + } +} + +// Select +console.log('select * from td_connector_test.all_types limit 3 offset 100;'); +c1.execute('select * from td_connector_test.all_types limit 2 offset 100;'); + +var d = c1.fetchall(); +console.log(c1.fields); +console.log(d); + +// Functions +console.log('select count(*), avg(_int), sum(_float), max(_bigint), min(_double) from td_connector_test.all_types;') +c1.execute('select count(*), avg(_int), sum(_float), max(_bigint), min(_double) from td_connector_test.all_types;'); +var d = c1.fetchall(); +console.log(c1.fields); +console.log(d); + +// Immediate Execution like the Shell + +c1.query('select count(*), stddev(_double), min(_tinyint) from all_types where _tinyint > 50 and _int < 0;', true).then(function(result){ + result.pretty(); +}) + +c1.query('select _tinyint, _bool from all_types where _tinyint > 50 and _int < 0 limit 50;', true).then(function(result){ + result.pretty(); +}) + +c1.query('select stddev(_double), stddev(_bigint), stddev(_float) from all_types;', true).then(function(result){ + result.pretty(); +}) +c1.query('select stddev(_double), stddev(_bigint), stddev(_float) from all_types interval(1m) limit 100;', true).then(function(result){ + result.pretty(); +}) + +// Binding arguments, and then using promise +var q = c1.query('select _nchar from td_connector_test.all_types where ts >= ? and _int > ? limit 100 offset 40;').bind(new Date(1231), 100) +console.log(q.query); +q.execute().then(function(r) { + r.pretty(); +}); + + +// test query null value +c1.execute("create table if not exists td_connector_test.weather(ts timestamp, temperature float, humidity int) tags(location nchar(64))"); +c1.execute("insert into t1 using weather tags('北京') values(now, 11.11, 11)"); +c1.execute("insert into t1(ts, temperature) values(now, 22.22)"); +c1.execute("insert into t1(ts, humidity) values(now, 33)"); +c1.query('select * from test.t1', true).then(function (result) { + result.pretty(); +}); + +var q = c1.query('select * from td_connector_test.weather'); +console.log(q.query); +q.execute().then(function(r) { + r.pretty(); +}); + +function sleep(sleepTime) { + for(var start = +new Date; +new Date - start <= sleepTime; ) { } +} + +sleep(10000); + +// Raw Async Testing (Callbacks, not promises) +function cb2(param, result, rowCount, rd) { + console.log('CB2 Callbacked!'); + console.log("RES *", result); + console.log("Async fetched", rowCount, " rows"); + console.log("Passed Param: ", param); + console.log("Fields ", rd.fields); + console.log("Data ", rd.data); +} +function cb1(param,result,code) { + console.log('CB1 Callbacked!'); + console.log("RES * ", result); + console.log("Status: ", code); + console.log("Passed Param ", param); + c1.fetchall_a(result, cb2, param); +} + +c1.execute_a("describe td_connector_test.all_types;", cb1, {myparam:3.141}); + +function cb4(param, result, rowCount, rd) { + console.log('CB4 Callbacked!'); + console.log("RES *", result); + console.log("Async fetched", rowCount, "rows"); + console.log("Passed Param: ", param); + console.log("Fields", rd.fields); + console.log("Data", rd.data); +} +// Without directly calling fetchall_a +var thisRes; +function cb3(param,result,code) { + console.log('CB3 Callbacked!'); + console.log("RES *", result); + console.log("Status:", code); + console.log("Passed Param", param); + thisRes = result; +} +//Test calling execute and fetchall seperately and not through callbacks +var param = c1.execute_a("describe td_connector_test.all_types;", cb3, {e:2.718}); +console.log("Passed Param outside of callback: ", param); +console.log(param); +setTimeout(function(){ + c1.fetchall_a(thisRes, cb4, param); +},100); + + +// Async through promises +var aq = c1.query('select count(*) from td_connector_test.all_types;',false); +aq.execute_a().then(function(data) { + data.pretty(); +}); + +c1.query('describe td_connector_test.stabletest').execute_a().then(function(r){ + r.pretty() +}); + +setTimeout(function(){ + c1.query('drop database td_connector_test;'); +},200); + +setTimeout(function(){ + conn.close(); +},2000); diff --git a/tests/connectorTest/nodejsTest/test/testMicroseconds.js b/tests/connectorTest/nodejsTest/test/testMicroseconds.js new file mode 100644 index 0000000000000000000000000000000000000000..cc65b3d919f92b3b4d7e0e216c6c8ac64a294d7f --- /dev/null +++ b/tests/connectorTest/nodejsTest/test/testMicroseconds.js @@ -0,0 +1,49 @@ +const taos = require('../tdengine'); +var conn = taos.connect(); +var c1 = conn.cursor(); +let stime = new Date(); +let interval = 1000; + +function convertDateToTS(date) { + let tsArr = date.toISOString().split("T") + return "\"" + tsArr[0] + " " + tsArr[1].substring(0, tsArr[1].length - 1) + "\""; +} +function R(l, r) { + return Math.random() * (r - l) - r; +} +function randomBool() { + if (Math.random() < 0.5) { + return true; + } + return false; +} + +// Initialize +//c1.execute('drop database td_connector_test;'); +const dbname = 'nodejs_test_us'; +c1.execute('create database if not exists ' + dbname + ' precision "us"'); +c1.execute('use ' + dbname) +c1.execute('create table if not exists tstest (ts timestamp, _int int);'); +c1.execute('insert into tstest values(1625801548423914, 0)'); +// Select +console.log('select * from tstest'); +c1.execute('select * from tstest'); + +var d = c1.fetchall(); +console.log(c1.fields); +let ts = d[0][0]; +console.log(ts); + +if (ts.taosTimestamp() != 1625801548423914) { + throw "microseconds not match!"; +} +if (ts.getMicroseconds() % 1000 !== 914) { + throw "micronsecond precision error"; +} +setTimeout(function () { + c1.query('drop database nodejs_us_test;'); +}, 200); + +setTimeout(function () { + conn.close(); +}, 2000); diff --git a/tests/connectorTest/nodejsTest/test/testNanoseconds.js b/tests/connectorTest/nodejsTest/test/testNanoseconds.js new file mode 100644 index 0000000000000000000000000000000000000000..85a7600b01f2c908f22e621488f22678083149ea --- /dev/null +++ b/tests/connectorTest/nodejsTest/test/testNanoseconds.js @@ -0,0 +1,49 @@ +const taos = require('../tdengine'); +var conn = taos.connect(); +var c1 = conn.cursor(); +let stime = new Date(); +let interval = 1000; + +function convertDateToTS(date) { + let tsArr = date.toISOString().split("T") + return "\"" + tsArr[0] + " " + tsArr[1].substring(0, tsArr[1].length - 1) + "\""; +} +function R(l, r) { + return Math.random() * (r - l) - r; +} +function randomBool() { + if (Math.random() < 0.5) { + return true; + } + return false; +} + +// Initialize +//c1.execute('drop database td_connector_test;'); +const dbname = 'nodejs_test_ns'; +c1.execute('create database if not exists ' + dbname + ' precision "ns"'); +c1.execute('use ' + dbname) +c1.execute('create table if not exists tstest (ts timestamp, _int int);'); +c1.execute('insert into tstest values(1625801548423914405, 0)'); +// Select +console.log('select * from tstest'); +c1.execute('select * from tstest'); + +var d = c1.fetchall(); +console.log(c1.fields); +let ts = d[0][0]; +console.log(ts); + +if (ts.taosTimestamp() != 1625801548423914405) { + throw "nanosecond not match!"; +} +if (ts.getNanoseconds() % 1000000 !== 914405) { + throw "nanosecond precision error"; +} +setTimeout(function () { + c1.query('drop database nodejs_ns_test;'); +}, 200); + +setTimeout(function () { + conn.close(); +}, 2000); diff --git a/tests/connectorTest/nodejsTest/test/testSubscribe.js b/tests/connectorTest/nodejsTest/test/testSubscribe.js new file mode 100644 index 0000000000000000000000000000000000000000..30fb3f425683f0113873534f2b67255db811edcc --- /dev/null +++ b/tests/connectorTest/nodejsTest/test/testSubscribe.js @@ -0,0 +1,16 @@ +const taos = require('../tdengine'); +var conn = taos.connect({host:"127.0.0.1", user:"root", password:"taosdata", config:"/etc/taos",port:10}); +var c1 = conn.cursor(); +let stime = new Date(); +let interval = 1000; +c1.execute('use td_connector_test'); +let sub = c1.subscribe({ + restart: true, + sql: "select AVG(_int) from td_connector_test.all_Types;", + topic: 'all_Types', + interval: 1000 +}); + +c1.consumeData(sub, (data, fields) => { + console.log(data); +}); \ No newline at end of file diff --git a/tests/connectorTest/odbcTest/nanosupport/nanoTest_odbc.py b/tests/connectorTest/odbcTest/nanosupport/nanoTest_odbc.py new file mode 100644 index 0000000000000000000000000000000000000000..e6a4bc73aef3e19bc56e817325acd62d21156d67 --- /dev/null +++ b/tests/connectorTest/odbcTest/nanosupport/nanoTest_odbc.py @@ -0,0 +1,111 @@ +import pyodbc +import argparse +import sys + +parser = argparse.ArgumentParser(description='Access TDengine via ODBC.') +parser.add_argument('--DSN', help='DSN to use') +parser.add_argument('--UID', help='UID to use') +parser.add_argument('--PWD', help='PWD to use') +parser.add_argument('--Server', help='Server to use') +parser.add_argument('-C', metavar='CONNSTR', help='Connection string to use') + +args = parser.parse_args() + +a = 'DSN=%s'%args.DSN if args.DSN else None +b = 'UID=%s'%args.UID if args.UID else None +c = 'PWD=%s'%args.PWD if args.PWD else None +d = 'Server=%s'%args.Server if args.Server else None +conn_str = ';'.join(filter(None, [a,b,c,d])) if args.DSN else None +conn_str = conn_str if conn_str else args.C +if not conn_str: + parser.print_help(file=sys.stderr) + exit() + +print('connecting: [%s]' % conn_str) +cnxn = pyodbc.connect(conn_str, autocommit=True) +cnxn.setdecoding(pyodbc.SQL_CHAR, encoding='utf-8') + +cursor = cnxn.cursor() +cursor.execute("drop database if exists db"); +cursor.close() + +cursor = cnxn.cursor() +cursor.execute("create database db"); +cursor.close() + +cursor = cnxn.cursor() +cursor.execute("create table db.mt (ts timestamp, b bool, v1 tinyint, v2 smallint, v4 int, v8 bigint, f4 float, f8 double, bin binary(10), blob nchar(10))"); +cursor.close() + +cursor = cnxn.cursor() +cursor.execute("insert into db.mt values('2020-10-13 06:44:00.123', 1, 127, 32767, 2147483647, 32769, 123.456, 789.987, 'hello', 'helloworld')") +cursor.close() + +cursor = cnxn.cursor() +cursor.execute("insert into db.mt values(?,?,?,?,?,?,?,?,?,?)", "2020-10-13 07:06:00.234", 0, 127, 32767, 32768, 32769, 123.456, 789.987, "hel后lo".encode('utf-8'), "wo哈rlxd129") +##cursor.execute("insert into db.mt values(?,?,?,?,?,?,?,?,?,?)", 1502535178128, 9223372036854775807, 127, 32767, 32768, 32769, 123.456, 789.987, "hel后lo".encode('utf-8'), "wo哈rlxd123"); +cursor.close() + +cursor = cnxn.cursor() +cursor.execute(""" +INSERT INTO db.mt (ts,b,v1,v2,v4,v8,f4,f8,bin,blob) values (?,?,?,?,?,?,?,?,?,?) +""", +"2020-12-12 00:00:00", +'true', +'-127', +'-32767', +'-2147483647', +'-9223372036854775807', +'-1.23e10', +'-11.23e6', +'abcdefghij'.encode('utf-8'), +"人啊大发测试及abc") +cursor.close() + +cursor = cnxn.cursor() +cursor.execute("drop database if exists db"); +cursor.close() + +cursor = cnxn.cursor() +cursor.execute("create database db"); +cursor.close() + +cursor = cnxn.cursor() +cursor.execute("create table db.t (ts timestamp, b bool, v1 tinyint, v2 smallint, v4 int, v8 bigint, f4 float, f8 double, bin binary(4), blob nchar(4))"); +cursor.close() + +cursor = cnxn.cursor() +cursor.execute("insert into db.t values('2020-10-13 06:44:00', 1, 127, 32767, 32768, 32769, 123.456, 789.987, 'hell', 'w我你z')") +cursor.close() + +cursor = cnxn.cursor() +cursor.execute("create table db.v (ts timestamp, v1 tinyint, v2 smallint, name nchar(10), ts2 timestamp)") +cursor.close() + +params = [ ('2020-10-16 00:00:00.123', 19, '2111-01-02 01:02:03.123'), + ('2020-10-16 00:00:01', 41, '2111-01-02 01:02:03.423'), + ('2020-10-16 00:00:02', 57, '2111-01-02 01:02:03.153'), + ('2020-10-16 00:00:03.009', 26, '2111-01-02 01:02:03.623') ] +cursor = cnxn.cursor() +cursor.fast_executemany = True +print('py:...................') +cursor.executemany("insert into db.v (ts, v1, ts2) values (?, ?, ?)", params) +print('py:...................') +cursor.close() + +## cursor = cnxn.cursor() +## cursor.execute("SELECT * from db.v where v1 > ?", 4) +## row = cursor.fetchone() +## while row: +## print(row) +## row = cursor.fetchone() +## cursor.close() +## +## cursor = cnxn.cursor() +## cursor.execute("SELECT * from db.v where v1 > ?", '5') +## row = cursor.fetchone() +## while row: +## print(row) +## row = cursor.fetchone() +## cursor.close() + diff --git a/tests/connectorTest/odbcTest/nanosupport/odbc.go b/tests/connectorTest/odbcTest/nanosupport/odbc.go new file mode 100644 index 0000000000000000000000000000000000000000..4d9c760c4e87a4a899051edc74692ecca8a19d15 --- /dev/null +++ b/tests/connectorTest/odbcTest/nanosupport/odbc.go @@ -0,0 +1,84 @@ +package main + +import ( + "context" + "database/sql" + "flag" + "log" + "os" + "os/signal" + "time" + _ "github.com/alexbrainman/odbc" +) + +var pool *sql.DB // Database connection pool. + +func main() { + id := flag.Int64("id", 32768, "person ID to find") + dsn := flag.String("dsn", os.Getenv("DSN"), "connection data source name") + flag.Parse() + + if len(*dsn) == 0 { + log.Fatal("missing dsn flag") + } + if *id == 0 { + log.Fatal("missing person ID") + } + var err error + + // Opening a driver typically will not attempt to connect to the database. + pool, err = sql.Open("odbc", *dsn) + if err != nil { + // This will not be a connection error, but a DSN parse error or + // another initialization error. + log.Fatal("unable to use data source name", err) + } + defer pool.Close() + + pool.SetConnMaxLifetime(0) + pool.SetMaxIdleConns(3) + pool.SetMaxOpenConns(3) + + ctx, stop := context.WithCancel(context.Background()) + defer stop() + + appSignal := make(chan os.Signal, 3) + signal.Notify(appSignal, os.Interrupt) + + go func() { + select { + case <-appSignal: + stop() + } + }() + + Ping(ctx) + + Query(ctx, *id) +} + +// Ping the database to verify DSN provided by the user is valid and the +// server accessible. If the ping fails exit the program with an error. +func Ping(ctx context.Context) { + ctx, cancel := context.WithTimeout(ctx, 1*time.Second) + defer cancel() + + if err := pool.PingContext(ctx); err != nil { + log.Fatalf("unable to connect to database: %v", err) + } +} + +// Query the database for the information requested and prints the results. +// If the query fails exit the program with an error. +func Query(ctx context.Context, id int64) { + ctx, cancel := context.WithTimeout(ctx, 5*time.Second) + defer cancel() + + var name string + err := pool.QueryRowContext(ctx, "select name from m.t").Scan(&name) + if err != nil { + log.Fatal("unable to execute search query", err) + } + log.Println("name=", name) +} + diff --git a/tests/connectorTest/odbcTest/nanosupport/odbc.py b/tests/connectorTest/odbcTest/nanosupport/odbc.py new file mode 100644 index 0000000000000000000000000000000000000000..cee0cf1a13f6360790de368637e2b6a05de3564f --- /dev/null +++ b/tests/connectorTest/odbcTest/nanosupport/odbc.py @@ -0,0 +1,115 @@ +import pyodbc +import argparse +import sys + +parser = argparse.ArgumentParser(description='Access TDengine via ODBC.') +parser.add_argument('--DSN', help='DSN to use') +parser.add_argument('--UID', help='UID to use') +parser.add_argument('--PWD', help='PWD to use') +parser.add_argument('--Server', help='Server to use') +parser.add_argument('-C', metavar='CONNSTR', help='Connection string to use') + +args = parser.parse_args() + +a = 'DSN=%s'%args.DSN if args.DSN else None +b = 'UID=%s'%args.UID if args.UID else None +c = 'PWD=%s'%args.PWD if args.PWD else None +d = 'Server=%s'%args.Server if args.Server else None +conn_str = ';'.join(filter(None, [a,b,c,d])) if args.DSN else None +conn_str = conn_str if conn_str else args.C +if not conn_str: + parser.print_help(file=sys.stderr) + exit() + +print('connecting: [%s]' % conn_str) +cnxn = pyodbc.connect(conn_str, autocommit=True) +cnxn.setdecoding(pyodbc.SQL_CHAR, encoding='utf-8') + +cursor = cnxn.cursor() +cursor.execute("drop database if exists db"); +cursor.close() + +cursor = cnxn.cursor() +cursor.execute("create database db"); +cursor.close() + +cursor = cnxn.cursor() +cursor.execute("create table db.mt (ts timestamp, b bool, v1 tinyint, v2 smallint, v4 int, v8 bigint, f4 float, f8 double, bin binary(10), blob nchar(10))"); +cursor.close() + +cursor = cnxn.cursor() +cursor.execute("insert into db.mt values('2020-10-13 06:44:00.123', 1, 127, 32767, 2147483647, 32769, 123.456, 789.987, 'hello', 'helloworld')") +cursor.close() + +cursor = cnxn.cursor() +cursor.execute("insert into db.mt values(?,?,?,?,?,?,?,?,?,?)", "2020-10-13 07:06:00.234", 0, 127, 32767, 32768, 32769, 123.456, 789.987, "hel后lo".encode('utf-8'), "wo哈rlxd129") +##cursor.execute("insert into db.mt values(?,?,?,?,?,?,?,?,?,?)", 1502535178128, 9223372036854775807, 127, 32767, 32768, 32769, 123.456, 789.987, "hel后lo".encode('utf-8'), "wo哈rlxd123"); +cursor.close() + +cursor = cnxn.cursor() +cursor.execute(""" +INSERT INTO db.mt (ts,b,v1,v2,v4,v8,f4,f8,bin,blob) values (?,?,?,?,?,?,?,?,?,?) +""", +"2020-12-12 00:00:00", +'true', +'-127', +'-32767', +'-2147483647', +'-9223372036854775807', +'-1.23e10', +'-11.23e6', +'abcdefghij'.encode('utf-8'), +"人啊大发测试及abc") +cursor.close() + +cursor = cnxn.cursor() +cursor.execute("drop database if exists db"); +cursor.close() + +cursor = cnxn.cursor() +cursor.execute("create database db"); +cursor.close() + +cursor = cnxn.cursor() +cursor.execute("create table db.t (ts timestamp, b bool, v1 tinyint, v2 smallint, v4 int, v8 bigint, f4 float, f8 double, bin binary(4), blob nchar(4))"); +cursor.close() + +cursor = cnxn.cursor() +cursor.execute("insert into db.t values('2020-10-13 06:44:00', 1, 127, 32767, 32768, 32769, 123.456, 789.987, 'hell', 'w我你z')") +cursor.close() + +cursor = cnxn.cursor() +cursor.execute("create table db.v (ts timestamp, v1 tinyint, v2 smallint, name nchar(10), ts2 timestamp)") +cursor.close() + +cursor = cnxn.cursor() +cursor.execute("select * from db.v") +cursor.close() + +params = [ ('2020-10-16 00:00:00.123', 19, '2111-01-02 01:02:03.123'), + ('2020-10-16 00:00:01', 41, '2111-01-02 01:02:03.423'), + ('2020-10-16 00:00:02', 57, '2111-01-02 01:02:03.153'), + ('2020-10-16 00:00:03.009', 26, '2111-01-02 01:02:03.623') ] +cursor = cnxn.cursor() +cursor.fast_executemany = True +print('py:...................') +cursor.executemany("insert into db.v (ts, v1, ts2) values (?, ?, ?)", params) +print('py:...................') +cursor.close() + +## cursor = cnxn.cursor() +## cursor.execute("SELECT * from db.v where v1 > ?", 4) +## row = cursor.fetchone() +## while row: +## print(row) +## row = cursor.fetchone() +## cursor.close() +## +## cursor = cnxn.cursor() +## cursor.execute("SELECT * from db.v where v1 > ?", '5') +## row = cursor.fetchone() +## while row: +## print(row) +## row = cursor.fetchone() +## cursor.close() + diff --git a/tests/examples/JDBC/JDBCDemo/pom.xml b/tests/examples/JDBC/JDBCDemo/pom.xml index fed00c147b87621c70d60ea206b06f1b0f3e8d8f..8cf0356721f8ffd568e87fa4a77c86eb0f90a62b 100644 --- a/tests/examples/JDBC/JDBCDemo/pom.xml +++ b/tests/examples/JDBC/JDBCDemo/pom.xml @@ -17,7 +17,7 @@ com.taosdata.jdbc taos-jdbcdriver - 2.0.31 + 2.0.34 diff --git a/tests/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/JdbcDemo.java b/tests/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/JdbcDemo.java index d4ea5f919d2882e4f82b817380172eff20d7c611..5bc23403087578c0791b0a5e6fca74a47aad8184 100644 --- a/tests/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/JdbcDemo.java +++ b/tests/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/JdbcDemo.java @@ -7,6 +7,9 @@ public class JdbcDemo { private static String host; private static final String dbName = "test"; private static final String tbName = "weather"; + private static final String user = "root"; + private static final String password = "taosdata"; + private Connection connection; public static void main(String[] args) { @@ -30,10 +33,9 @@ public class JdbcDemo { } private void init() { - final String url = "jdbc:TAOS://" + host + ":6030/?user=root&password=taosdata"; + final String url = "jdbc:TAOS://" + host + ":6030/?user=" + user + "&password=" + password; // get connection try { - Class.forName("com.taosdata.jdbc.TSDBDriver"); Properties properties = new Properties(); properties.setProperty("charset", "UTF-8"); properties.setProperty("locale", "en_US.UTF-8"); @@ -42,8 +44,7 @@ public class JdbcDemo { connection = DriverManager.getConnection(url, properties); if (connection != null) System.out.println("[ OK ] Connection established."); - } catch (ClassNotFoundException | SQLException e) { - System.out.println("[ ERROR! ] Connection establish failed."); + } catch (SQLException e) { e.printStackTrace(); } } @@ -74,7 +75,7 @@ public class JdbcDemo { } private void select() { - final String sql = "select * from "+ dbName + "." + tbName; + final String sql = "select * from " + dbName + "." + tbName; executeQuery(sql); } @@ -89,8 +90,6 @@ public class JdbcDemo { } } - /************************************************************************/ - private void executeQuery(String sql) { long start = System.currentTimeMillis(); try (Statement statement = connection.createStatement()) { @@ -117,7 +116,6 @@ public class JdbcDemo { } } - private void printSql(String sql, boolean succeed, long cost) { System.out.println("[ " + (succeed ? "OK" : "ERROR!") + " ] time cost: " + cost + " ms, execute statement ====> " + sql); } @@ -132,7 +130,6 @@ public class JdbcDemo { long end = System.currentTimeMillis(); printSql(sql, false, (end - start)); e.printStackTrace(); - } } @@ -141,5 +138,4 @@ public class JdbcDemo { System.exit(0); } - -} \ No newline at end of file +} diff --git a/tests/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/JdbcRestfulDemo.java b/tests/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/JdbcRestfulDemo.java index 5bf980f6d84e53438573812aa9f07d8d463f08c3..d89476b8ca718dab24202e2320e842366533a763 100644 --- a/tests/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/JdbcRestfulDemo.java +++ b/tests/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/JdbcRestfulDemo.java @@ -4,14 +4,15 @@ import java.sql.*; import java.util.Properties; public class JdbcRestfulDemo { - private static final String host = "127.0.0.1"; + private static final String host = "localhost"; + private static final String dbname = "test"; + private static final String user = "root"; + private static final String password = "taosdata"; public static void main(String[] args) { try { - // load JDBC-restful driver - Class.forName("com.taosdata.jdbc.rs.RestfulDriver"); // use port 6041 in url when use JDBC-restful - String url = "jdbc:TAOS-RS://" + host + ":6041/?user=root&password=taosdata"; + String url = "jdbc:TAOS-RS://" + host + ":6041/?user=" + user + "&password=" + password; Properties properties = new Properties(); properties.setProperty("charset", "UTF-8"); @@ -21,12 +22,12 @@ public class JdbcRestfulDemo { Connection conn = DriverManager.getConnection(url, properties); Statement stmt = conn.createStatement(); - stmt.execute("drop database if exists restful_test"); - stmt.execute("create database if not exists restful_test"); - stmt.execute("use restful_test"); - stmt.execute("create table restful_test.weather(ts timestamp, temperature float) tags(location nchar(64))"); - stmt.executeUpdate("insert into t1 using restful_test.weather tags('北京') values(now, 18.2)"); - ResultSet rs = stmt.executeQuery("select * from restful_test.weather"); + stmt.execute("drop database if exists " + dbname); + stmt.execute("create database if not exists " + dbname); + stmt.execute("use " + dbname); + stmt.execute("create table " + dbname + ".weather(ts timestamp, temperature float) tags(location nchar(64))"); + stmt.executeUpdate("insert into t1 using " + dbname + ".weather tags('北京') values(now, 18.2)"); + ResultSet rs = stmt.executeQuery("select * from " + dbname + ".weather"); ResultSetMetaData meta = rs.getMetaData(); while (rs.next()) { for (int i = 1; i <= meta.getColumnCount(); i++) { @@ -38,8 +39,6 @@ public class JdbcRestfulDemo { rs.close(); stmt.close(); conn.close(); - } catch (ClassNotFoundException e) { - e.printStackTrace(); } catch (SQLException e) { e.printStackTrace(); } diff --git a/tests/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/SubscribeDemo.java b/tests/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/SubscribeDemo.java index def4c649027034028d222bfedb71e37d82b99380..4c499b0b3abb518b48b222eca9bbbcb388bd2008 100644 --- a/tests/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/SubscribeDemo.java +++ b/tests/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/SubscribeDemo.java @@ -34,9 +34,8 @@ public class SubscribeDemo { System.out.println(usage); return; } - /*********************************************************************************************/ + try { - Class.forName("com.taosdata.jdbc.TSDBDriver"); Properties properties = new Properties(); properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8"); properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8"); diff --git a/tests/examples/JDBC/springbootdemo/pom.xml b/tests/examples/JDBC/springbootdemo/pom.xml index 6c83718896cc2e5716f599ba08212d3dc8292133..9126813b67e71691692109920f891a6fb4cc5ab5 100644 --- a/tests/examples/JDBC/springbootdemo/pom.xml +++ b/tests/examples/JDBC/springbootdemo/pom.xml @@ -60,12 +60,15 @@ + + org.springframework.boot + spring-boot-starter-aop + + com.taosdata.jdbc taos-jdbcdriver - 2.0.28 - - + 2.0.34 diff --git a/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/example/springbootdemo/SpringbootdemoApplication.java b/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/example/springbootdemo/SpringbootdemoApplication.java index fa10f3b0929e4c25c1379f489f73fc12ad9c1917..53edaa5796cccc7e4a4f274048c83a9ca7bbc7bb 100644 --- a/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/example/springbootdemo/SpringbootdemoApplication.java +++ b/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/example/springbootdemo/SpringbootdemoApplication.java @@ -4,7 +4,7 @@ import org.mybatis.spring.annotation.MapperScan; import org.springframework.boot.SpringApplication; import org.springframework.boot.autoconfigure.SpringBootApplication; -@MapperScan(basePackages = {"com.taosdata.example.springbootdemo.dao"}) +@MapperScan(basePackages = {"com.taosdata.example.springbootdemo"}) @SpringBootApplication public class SpringbootdemoApplication { diff --git a/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/example/springbootdemo/controller/WeatherController.java b/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/example/springbootdemo/controller/WeatherController.java index cf14f5d84ace6348f38709ac3d3668ee8d2a0797..ed720fe6c02dd3a7eba6e645ea1e76d704c04d0c 100644 --- a/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/example/springbootdemo/controller/WeatherController.java +++ b/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/example/springbootdemo/controller/WeatherController.java @@ -15,35 +15,21 @@ public class WeatherController { @Autowired private WeatherService weatherService; - /** - * create database and table - * - * @return - */ + @GetMapping("/lastOne") + public Weather lastOne() { + return weatherService.lastOne(); + } + @GetMapping("/init") public int init() { return weatherService.init(); } - /** - * Pagination Query - * - * @param limit - * @param offset - * @return - */ @GetMapping("/{limit}/{offset}") public List queryWeather(@PathVariable Long limit, @PathVariable Long offset) { return weatherService.query(limit, offset); } - /** - * upload single weather info - * - * @param temperature - * @param humidity - * @return - */ @PostMapping("/{temperature}/{humidity}") public int saveWeather(@PathVariable float temperature, @PathVariable float humidity) { return weatherService.save(temperature, humidity); diff --git a/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/example/springbootdemo/dao/WeatherMapper.java b/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/example/springbootdemo/dao/WeatherMapper.java index ad6733558a9d548be196cf8c9c0c63dc96227b39..d9202b45b4cc3dddf8e5a082ac339c1f88d4ec01 100644 --- a/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/example/springbootdemo/dao/WeatherMapper.java +++ b/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/example/springbootdemo/dao/WeatherMapper.java @@ -8,6 +8,8 @@ import java.util.Map; public interface WeatherMapper { + Map lastOne(); + void dropDB(); void createDB(); diff --git a/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/example/springbootdemo/dao/WeatherMapper.xml b/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/example/springbootdemo/dao/WeatherMapper.xml index 2d3e0540650f35c1018992795ac33fb6cb7c4837..91938ca24e3cf9c3e0f2895cf40f214d484c55d5 100644 --- a/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/example/springbootdemo/dao/WeatherMapper.xml +++ b/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/example/springbootdemo/dao/WeatherMapper.xml @@ -9,20 +9,48 @@ + + - drop database if exists test + drop + database if exists test - create database if not exists test + create + database if not exists test - create table if not exists test.weather(ts timestamp, temperature float, humidity float) tags(location nchar(64), groupId int) + create table if not exists test.weather + ( + ts + timestamp, + temperature + float, + humidity + float, + note + binary + ( + 64 + )) tags + ( + location nchar + ( + 64 + ), groupId int) - create table if not exists test.t#{groupId} using test.weather tags(#{location}, #{groupId}) + create table if not exists test.t#{groupId} using test.weather tags + ( + #{location}, + #{groupId} + ) - insert into test.t#{groupId} (ts, temperature, humidity) values (#{ts}, ${temperature}, ${humidity}) + insert into test.t#{groupId} (ts, temperature, humidity, note) + values (#{ts}, ${temperature}, ${humidity}, #{note}) - - - + + + \ No newline at end of file diff --git a/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/example/springbootdemo/domain/Weather.java b/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/example/springbootdemo/domain/Weather.java index c11b9a6f50655788d1e35eb9607a101d2d06c872..e4238127bd32b0f6ad21a514f3a1f07f6069b6d5 100644 --- a/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/example/springbootdemo/domain/Weather.java +++ b/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/example/springbootdemo/domain/Weather.java @@ -11,6 +11,7 @@ public class Weather { private Float temperature; private Float humidity; private String location; + private String note; private int groupId; public Weather() { @@ -61,4 +62,12 @@ public class Weather { public void setGroupId(int groupId) { this.groupId = groupId; } + + public String getNote() { + return note; + } + + public void setNote(String note) { + this.note = note; + } } diff --git a/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/example/springbootdemo/service/WeatherService.java b/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/example/springbootdemo/service/WeatherService.java index 26d09c7d128015739cdb0a87956affa4910b4b4e..2264b200afc3e0c2b7dd8e496e607649f940581d 100644 --- a/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/example/springbootdemo/service/WeatherService.java +++ b/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/example/springbootdemo/service/WeatherService.java @@ -29,6 +29,7 @@ public class WeatherService { Weather weather = new Weather(new Timestamp(ts + (thirtySec * i)), 30 * random.nextFloat(), random.nextInt(100)); weather.setLocation(locations[random.nextInt(locations.length)]); weather.setGroupId(i % locations.length); + weather.setNote("note-" + i); weatherMapper.createTable(weather); count += weatherMapper.insert(weather); } @@ -58,4 +59,21 @@ public class WeatherService { public List avg() { return weatherMapper.avg(); } + + public Weather lastOne() { + Map result = weatherMapper.lastOne(); + + long ts = (long) result.get("ts"); + float temperature = (float) result.get("temperature"); + float humidity = (float) result.get("humidity"); + String note = (String) result.get("note"); + int groupId = (int) result.get("groupid"); + String location = (String) result.get("location"); + + Weather weather = new Weather(new Timestamp(ts), temperature, humidity); + weather.setNote(note); + weather.setGroupId(groupId); + weather.setLocation(location); + return weather; + } } diff --git a/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/example/springbootdemo/util/TaosAspect.java b/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/example/springbootdemo/util/TaosAspect.java new file mode 100644 index 0000000000000000000000000000000000000000..80dad1bd7d669ba6b912c7e5fa816c29b7e37c87 --- /dev/null +++ b/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/example/springbootdemo/util/TaosAspect.java @@ -0,0 +1,36 @@ +package com.taosdata.example.springbootdemo.util; + +import org.aspectj.lang.ProceedingJoinPoint; +import org.aspectj.lang.annotation.Around; +import org.aspectj.lang.annotation.Aspect; +import org.springframework.stereotype.Component; + +import java.sql.Timestamp; +import java.util.Map; + +@Aspect +@Component +public class TaosAspect { + + @Around("execution(java.util.Map com.taosdata.example.springbootdemo.dao.*.*(..))") + public Object handleType(ProceedingJoinPoint joinPoint) { + Map result = null; + try { + result = (Map) joinPoint.proceed(); + for (String key : result.keySet()) { + Object obj = result.get(key); + if (obj instanceof byte[]) { + obj = new String((byte[]) obj); + result.put(key, obj); + } + if (obj instanceof Timestamp) { + obj = ((Timestamp) obj).getTime(); + result.put(key, obj); + } + } + } catch (Throwable e) { + e.printStackTrace(); + } + return result; + } +} diff --git a/tests/examples/JDBC/springbootdemo/src/main/resources/application.properties b/tests/examples/JDBC/springbootdemo/src/main/resources/application.properties index 4d7e64d10576388827502a459df9e68da2721dbb..06daa81bbb06450d99ab3f6e640c9795c0ad5d2e 100644 --- a/tests/examples/JDBC/springbootdemo/src/main/resources/application.properties +++ b/tests/examples/JDBC/springbootdemo/src/main/resources/application.properties @@ -1,22 +1,20 @@ # datasource config - JDBC-JNI #spring.datasource.driver-class-name=com.taosdata.jdbc.TSDBDriver -#spring.datasource.url=jdbc:TAOS://127.0.0.1:6030/test?timezone=UTC-8&charset=UTF-8&locale=en_US.UTF-8 +#spring.datasource.url=jdbc:TAOS://localhost:6030/?timezone=UTC-8&charset=UTF-8&locale=en_US.UTF-8 #spring.datasource.username=root #spring.datasource.password=taosdata - # datasource config - JDBC-RESTful spring.datasource.driver-class-name=com.taosdata.jdbc.rs.RestfulDriver -spring.datasource.url=jdbc:TAOS-RS://master:6041/test?timezone=UTC-8&charset=UTF-8&locale=en_US.UTF-8 +spring.datasource.url=jdbc:TAOS-RS://localhsot:6041/test?timezone=UTC-8&charset=UTF-8&locale=en_US.UTF-8 spring.datasource.username=root spring.datasource.password=taosdata - spring.datasource.druid.initial-size=5 spring.datasource.druid.min-idle=5 spring.datasource.druid.max-active=5 spring.datasource.druid.max-wait=30000 spring.datasource.druid.validation-query=select server_status(); - +spring.aop.auto=true +spring.aop.proxy-target-class=true #mybatis mybatis.mapper-locations=classpath:mapper/*.xml - logging.level.com.taosdata.jdbc.springbootdemo.dao=debug diff --git a/tests/examples/c/-g b/tests/examples/c/-g new file mode 100755 index 0000000000000000000000000000000000000000..3909909e8fe531a7b6d35ca315b8277e7270bb02 Binary files /dev/null and b/tests/examples/c/-g differ diff --git a/tests/examples/c/apitest.c b/tests/examples/c/apitest.c index 621950a834c515962f35e000279bc91e4c25b5e0..03123afb3584ea94417c88e55edd9f8e232b0fe9 100644 --- a/tests/examples/c/apitest.c +++ b/tests/examples/c/apitest.c @@ -2,6 +2,7 @@ // to compile: gcc -o apitest apitest.c -ltaos #include "taoserror.h" +#include "cJSON.h" #include #include @@ -1020,7 +1021,7 @@ int32_t verify_schema_less(TAOS* taos) { void verify_telnet_insert(TAOS* taos) { TAOS_RES *result; - result = taos_query(taos, "drop database if exists test;"); + result = taos_query(taos, "drop database if exists db;"); taos_free_result(result); usleep(100000); result = taos_query(taos, "create database db precision 'ms';"); @@ -1032,13 +1033,13 @@ void verify_telnet_insert(TAOS* taos) { /* metric */ char* lines0[] = { - "stb0_0 1626006833639000000ns 4i8 host=\"host0\",interface=\"eth0\"", - "stb0_1 1626006833639000000ns 4i8 host=\"host0\",interface=\"eth0\"", - "stb0_2 1626006833639000000ns 4i8 host=\"host0\",interface=\"eth0\"", + "stb0_0 1626006833639000000ns 4i8 host=\"host0\" interface=\"eth0\"", + "stb0_1 1626006833639000000ns 4i8 host=\"host0\" interface=\"eth0\"", + "stb0_2 1626006833639000000ns 4i8 host=\"host0\" interface=\"eth0\"", }; code = taos_insert_telnet_lines(taos, lines0, 3); if (code) { - printf("code: %d, %s.\n", code, tstrerror(code)); + printf("lines0 code: %d, %s.\n", code, tstrerror(code)); } /* timestamp */ @@ -1052,18 +1053,18 @@ void verify_telnet_insert(TAOS* taos) { }; code = taos_insert_telnet_lines(taos, lines1, 6); if (code) { - printf("code: %d, %s.\n", code, tstrerror(code)); + printf("lines1 code: %d, %s.\n", code, tstrerror(code)); } /* metric value */ - //tinyin + //tinyint char* lines2_0[] = { "stb2_0 1626006833651ms -127i8 host=\"host0\"", "stb2_0 1626006833652ms 127i8 host=\"host0\"" }; code = taos_insert_telnet_lines(taos, lines2_0, 2); if (code) { - printf("code: %d, %s.\n", code, tstrerror(code)); + printf("lines2_0 code: %d, %s.\n", code, tstrerror(code)); } //smallint @@ -1073,7 +1074,7 @@ void verify_telnet_insert(TAOS* taos) { }; code = taos_insert_telnet_lines(taos, lines2_1, 2); if (code) { - printf("code: %d, %s.\n", code, tstrerror(code)); + printf("lines2_1 code: %d, %s.\n", code, tstrerror(code)); } //int @@ -1083,7 +1084,7 @@ void verify_telnet_insert(TAOS* taos) { }; code = taos_insert_telnet_lines(taos, lines2_2, 2); if (code) { - printf("code: %d, %s.\n", code, tstrerror(code)); + printf("lines2_2 code: %d, %s.\n", code, tstrerror(code)); } //bigint @@ -1093,7 +1094,7 @@ void verify_telnet_insert(TAOS* taos) { }; code = taos_insert_telnet_lines(taos, lines2_3, 2); if (code) { - printf("code: %d, %s.\n", code, tstrerror(code)); + printf("lines2_3 code: %d, %s.\n", code, tstrerror(code)); } //float @@ -1112,7 +1113,7 @@ void verify_telnet_insert(TAOS* taos) { }; code = taos_insert_telnet_lines(taos, lines2_4, 11); if (code) { - printf("code: %d, %s.\n", code, tstrerror(code)); + printf("lines2_4 code: %d, %s.\n", code, tstrerror(code)); } //double @@ -1130,7 +1131,7 @@ void verify_telnet_insert(TAOS* taos) { }; code = taos_insert_telnet_lines(taos, lines2_5, 10); if (code) { - printf("code: %d, %s.\n", code, tstrerror(code)); + printf("lines2_5 code: %d, %s.\n", code, tstrerror(code)); } //bool @@ -1148,7 +1149,7 @@ void verify_telnet_insert(TAOS* taos) { }; code = taos_insert_telnet_lines(taos, lines2_6, 10); if (code) { - printf("code: %d, %s.\n", code, tstrerror(code)); + printf("lines2_6 code: %d, %s.\n", code, tstrerror(code)); } //binary @@ -1159,7 +1160,7 @@ void verify_telnet_insert(TAOS* taos) { }; code = taos_insert_telnet_lines(taos, lines2_7, 3); if (code) { - printf("code: %d, %s.\n", code, tstrerror(code)); + printf("lines2_7 code: %d, %s.\n", code, tstrerror(code)); } //nchar @@ -1169,34 +1170,767 @@ void verify_telnet_insert(TAOS* taos) { }; code = taos_insert_telnet_lines(taos, lines2_8, 2); if (code) { - printf("code: %d, %s.\n", code, tstrerror(code)); + printf("lines2_8 code: %d, %s.\n", code, tstrerror(code)); } /* tags */ //tag value types char* lines3_0[] = { - "stb3_0 1626006833610ms 1 t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=3.4E38f32,t6=1.7E308f64,t7=true,t8=\"binary_val_1\",t9=L\"标签值1\"", - "stb3_0 1626006833610ms 2 t1=-127i8,t2=-32767i16,t3=-2147483647i32,t4=-9223372036854775807i64,t5=-3.4E38f32,t6=-1.7E308f64,t7=false,t8=\"binary_val_2\",t9=L\"标签值2\"" + "stb3_0 1626006833610ms 1 t1=127i8 t2=32767i16 t3=2147483647i32 t4=9223372036854775807i64 t5=3.4E38f32 t6=1.7E308f64 t7=true t8=\"binary_val_1\" t9=L\"标签值1\"", + "stb3_0 1626006833610ms 2 t1=-127i8 t2=-32767i16 t3=-2147483647i32 t4=-9223372036854775807i64 t5=-3.4E38f32 t6=-1.7E308f64 t7=false t8=\"binary_val_2\" t9=L\"标签值2\"" }; code = taos_insert_telnet_lines(taos, lines3_0, 2); if (code) { - printf("code: %d, %s.\n", code, tstrerror(code)); + printf("lines3_0 code: %d, %s.\n", code, tstrerror(code)); } //tag ID as child table name char* lines3_1[] = { - "stb3_1 1626006833610ms 1 id=\"child_table1\",host=\"host1\"", - "stb3_1 1626006833610ms 2 host=\"host2\",iD=\"child_table2\"", - "stb3_1 1626006833610ms 3 ID=\"child_table3\",host=\"host3\"" + "stb3_1 1626006833610ms 1 id=\"child_table1\" host=\"host1\"", + "stb3_1 1626006833610ms 2 host=\"host2\" iD=\"child_table2\"", + "stb3_1 1626006833610ms 3 ID=\"child_table3\" host=\"host3\"" }; code = taos_insert_telnet_lines(taos, lines3_1, 3); if (code) { - printf("code: %d, %s.\n", code, tstrerror(code)); + printf("lines3_1 code: %d, %s.\n", code, tstrerror(code)); } return; } +void verify_json_insert(TAOS* taos) { + TAOS_RES *result; + + result = taos_query(taos, "drop database if exists db;"); + taos_free_result(result); + usleep(100000); + result = taos_query(taos, "create database db precision 'ms';"); + taos_free_result(result); + usleep(100000); + + (void)taos_select_db(taos, "db"); + int32_t code = 0; + + char *message = + "{ \ + \"metric\":\"cpu_load_0\", \ + \"timestamp\": 1626006833610123, \ + \"value\": 55.5, \ + \"tags\": \ + { \ + \"host\": \"ubuntu\", \ + \"interface1\": \"eth0\", \ + \"Id\": \"tb0\" \ + } \ + }"; + + code = taos_insert_json_payload(taos, message); + if (code) { + printf("payload_0 code: %d, %s.\n", code, tstrerror(code)); + } + + char *message1 = + "[ \ + { \ + \"metric\":\"cpu_load_1\", \ + \"timestamp\": 1626006833610123, \ + \"value\": 55.5, \ + \"tags\": \ + { \ + \"host\": \"ubuntu\", \ + \"interface\": \"eth1\", \ + \"Id\": \"tb1\" \ + } \ + }, \ + { \ + \"metric\":\"cpu_load_2\", \ + \"timestamp\": 1626006833610123, \ + \"value\": 55.5, \ + \"tags\": \ + { \ + \"host\": \"ubuntu\", \ + \"interface\": \"eth2\", \ + \"Id\": \"tb2\" \ + } \ + } \ + ]"; + + code = taos_insert_json_payload(taos, message1); + if (code) { + printf("payload_1 code: %d, %s.\n", code, tstrerror(code)); + } + + char *message2 = + "[ \ + { \ + \"metric\":\"cpu_load_3\", \ + \"timestamp\": \ + { \ + \"value\": 1626006833610123, \ + \"type\": \"us\" \ + }, \ + \"value\": \ + { \ + \"value\": 55, \ + \"type\": \"int\" \ + }, \ + \"tags\": \ + { \ + \"host\": \ + { \ + \"value\": \"ubuntu\", \ + \"type\": \"binary\" \ + }, \ + \"interface\": \ + { \ + \"value\": \"eth3\", \ + \"type\": \"nchar\" \ + }, \ + \"ID\": \"tb3\", \ + \"port\": \ + { \ + \"value\": 4040, \ + \"type\": \"int\" \ + } \ + } \ + }, \ + { \ + \"metric\":\"cpu_load_4\", \ + \"timestamp\": 1626006833610123, \ + \"value\": 66.6, \ + \"tags\": \ + { \ + \"host\": \"ubuntu\", \ + \"interface\": \"eth4\", \ + \"Id\": \"tb4\" \ + } \ + } \ + ]"; + code = taos_insert_json_payload(taos, message2); + if (code) { + printf("payload_2 code: %d, %s.\n", code, tstrerror(code)); + } + + + cJSON *payload, *tags; + char *payload_str; + + /* Default format */ + //number + payload = cJSON_CreateObject(); + cJSON_AddStringToObject(payload, "metric", "stb0_0"); + cJSON_AddNumberToObject(payload, "timestamp", 1626006833610123); + cJSON_AddNumberToObject(payload, "value", 10); + tags = cJSON_CreateObject(); + cJSON_AddTrueToObject(tags, "t1"); + cJSON_AddFalseToObject(tags, "t2"); + cJSON_AddNumberToObject(tags, "t3", 10); + cJSON_AddStringToObject(tags, "t4", "123_abc_.!@#$%^&*:;,./?|+-=()[]{}<>"); + cJSON_AddItemToObject(payload, "tags", tags); + payload_str = cJSON_Print(payload); + //printf("%s\n", payload_str); + + code = taos_insert_json_payload(taos, payload_str); + if (code) { + printf("payload0_0 code: %d, %s.\n", code, tstrerror(code)); + } + free(payload_str); + cJSON_Delete(payload); + + //true + payload = cJSON_CreateObject(); + cJSON_AddStringToObject(payload, "metric", "stb0_1"); + cJSON_AddNumberToObject(payload, "timestamp", 1626006833610123); + cJSON_AddTrueToObject(payload, "value"); + tags = cJSON_CreateObject(); + cJSON_AddTrueToObject(tags, "t1"); + cJSON_AddFalseToObject(tags, "t2"); + cJSON_AddNumberToObject(tags, "t3", 10); + cJSON_AddStringToObject(tags, "t4", "123_abc_.!@#$%^&*:;,./?|+-=()[]{}<>"); + cJSON_AddItemToObject(payload, "tags", tags); + payload_str = cJSON_Print(payload); + //printf("%s\n", payload_str); + + code = taos_insert_json_payload(taos, payload_str); + if (code) { + printf("payload0_1 code: %d, %s.\n", code, tstrerror(code)); + } + free(payload_str); + cJSON_Delete(payload); + + //false + payload = cJSON_CreateObject(); + cJSON_AddStringToObject(payload, "metric", "stb0_2"); + cJSON_AddNumberToObject(payload, "timestamp", 1626006833610123); + cJSON_AddFalseToObject(payload, "value"); + tags = cJSON_CreateObject(); + cJSON_AddTrueToObject(tags, "t1"); + cJSON_AddFalseToObject(tags, "t2"); + cJSON_AddNumberToObject(tags, "t3", 10); + cJSON_AddStringToObject(tags, "t4", "123_abc_.!@#$%^&*:;,./?|+-=()[]{}<>"); + cJSON_AddItemToObject(payload, "tags", tags); + payload_str = cJSON_Print(payload); + //printf("%s\n", payload_str); + + code = taos_insert_json_payload(taos, payload_str); + if (code) { + printf("payload0_2 code: %d, %s.\n", code, tstrerror(code)); + } + free(payload_str); + cJSON_Delete(payload); + + //string + payload = cJSON_CreateObject(); + cJSON_AddStringToObject(payload, "metric", "stb0_3"); + cJSON_AddNumberToObject(payload, "timestamp", 1626006833610123); + cJSON_AddStringToObject(payload, "value", "123_abc_.!@#$%^&*:;,./?|+-=()[]{}<>"); + tags = cJSON_CreateObject(); + cJSON_AddTrueToObject(tags, "t1"); + cJSON_AddFalseToObject(tags, "t2"); + cJSON_AddNumberToObject(tags, "t3", 10); + cJSON_AddStringToObject(tags, "t4", "123_abc_.!@#$%^&*:;,./?|+-=()[]{}<>"); + cJSON_AddItemToObject(payload, "tags", tags); + payload_str = cJSON_Print(payload); + //printf("%s\n", payload_str); + + code = taos_insert_json_payload(taos, payload_str); + if (code) { + printf("payload0_3 code: %d, %s.\n", code, tstrerror(code)); + } + free(payload_str); + cJSON_Delete(payload); + + //timestamp 0 -> current time + payload = cJSON_CreateObject(); + cJSON_AddStringToObject(payload, "metric", "stb0_4"); + cJSON_AddNumberToObject(payload, "timestamp", 0); + cJSON_AddNumberToObject(payload, "value", 123); + tags = cJSON_CreateObject(); + cJSON_AddTrueToObject(tags, "t1"); + cJSON_AddFalseToObject(tags, "t2"); + cJSON_AddNumberToObject(tags, "t3", 10); + cJSON_AddStringToObject(tags, "t4", "123_abc_.!@#$%^&*:;,./?|+-=()[]{}<>"); + cJSON_AddItemToObject(payload, "tags", tags); + payload_str = cJSON_Print(payload); + //printf("%s\n", payload_str); + + code = taos_insert_json_payload(taos, payload_str); + if (code) { + printf("payload0_4 code: %d, %s.\n", code, tstrerror(code)); + } + free(payload_str); + cJSON_Delete(payload); + + //ID + payload = cJSON_CreateObject(); + cJSON_AddStringToObject(payload, "metric", "stb0_5"); + cJSON_AddNumberToObject(payload, "timestamp", 0); + cJSON_AddNumberToObject(payload, "value", 123); + tags = cJSON_CreateObject(); + cJSON_AddStringToObject(tags, "ID", "tb0_5"); + cJSON_AddTrueToObject(tags, "t1"); + cJSON_AddStringToObject(tags, "iD", "tb000"); + cJSON_AddFalseToObject(tags, "t2"); + cJSON_AddNumberToObject(tags, "t3", 10); + cJSON_AddStringToObject(tags, "t4", "123_abc_.!@#$%^&*:;,./?|+-=()[]{}<>"); + cJSON_AddStringToObject(tags, "id", "tb555"); + cJSON_AddItemToObject(payload, "tags", tags); + payload_str = cJSON_Print(payload); + //printf("%s\n", payload_str); + + code = taos_insert_json_payload(taos, payload_str); + if (code) { + printf("payload0_5 code: %d, %s.\n", code, tstrerror(code)); + } + free(payload_str); + cJSON_Delete(payload); + + /* Nested format */ + //timestamp + cJSON *timestamp; + //seconds + payload = cJSON_CreateObject(); + cJSON_AddStringToObject(payload, "metric", "stb1_0"); + + timestamp = cJSON_CreateObject(); + cJSON_AddNumberToObject(timestamp, "value", 1626006833); + cJSON_AddStringToObject(timestamp, "type", "s"); + cJSON_AddItemToObject(payload, "timestamp", timestamp); + + cJSON_AddNumberToObject(payload, "value", 10); + tags = cJSON_CreateObject(); + cJSON_AddTrueToObject(tags, "t1"); + cJSON_AddFalseToObject(tags, "t2"); + cJSON_AddNumberToObject(tags, "t3", 10); + cJSON_AddStringToObject(tags, "t4", "123_abc_.!@#$%^&*:;,./?|+-=()[]{}<>"); + cJSON_AddItemToObject(payload, "tags", tags); + payload_str = cJSON_Print(payload); + //printf("%s\n", payload_str); + + code = taos_insert_json_payload(taos, payload_str); + if (code) { + printf("payload1_0 code: %d, %s.\n", code, tstrerror(code)); + } + free(payload_str); + cJSON_Delete(payload); + + //milleseconds + payload = cJSON_CreateObject(); + cJSON_AddStringToObject(payload, "metric", "stb1_1"); + + timestamp = cJSON_CreateObject(); + cJSON_AddNumberToObject(timestamp, "value", 1626006833610); + cJSON_AddStringToObject(timestamp, "type", "ms"); + cJSON_AddItemToObject(payload, "timestamp", timestamp); + + cJSON_AddNumberToObject(payload, "value", 10); + tags = cJSON_CreateObject(); + cJSON_AddTrueToObject(tags, "t1"); + cJSON_AddFalseToObject(tags, "t2"); + cJSON_AddNumberToObject(tags, "t3", 10); + cJSON_AddStringToObject(tags, "t4", "123_abc_.!@#$%^&*:;,./?|+-=()[]{}<>"); + cJSON_AddItemToObject(payload, "tags", tags); + payload_str = cJSON_Print(payload); + //printf("%s\n", payload_str); + + code = taos_insert_json_payload(taos, payload_str); + if (code) { + printf("payload1_1 code: %d, %s.\n", code, tstrerror(code)); + } + free(payload_str); + cJSON_Delete(payload); + + //microseconds + payload = cJSON_CreateObject(); + cJSON_AddStringToObject(payload, "metric", "stb1_2"); + + timestamp = cJSON_CreateObject(); + cJSON_AddNumberToObject(timestamp, "value", 1626006833610123); + cJSON_AddStringToObject(timestamp, "type", "us"); + cJSON_AddItemToObject(payload, "timestamp", timestamp); + + cJSON_AddNumberToObject(payload, "value", 10); + tags = cJSON_CreateObject(); + cJSON_AddTrueToObject(tags, "t1"); + cJSON_AddFalseToObject(tags, "t2"); + cJSON_AddNumberToObject(tags, "t3", 10); + cJSON_AddStringToObject(tags, "t4", "123_abc_.!@#$%^&*:;,./?|+-=()[]{}<>"); + cJSON_AddItemToObject(payload, "tags", tags); + payload_str = cJSON_Print(payload); + //printf("%s\n", payload_str); + + code = taos_insert_json_payload(taos, payload_str); + if (code) { + printf("payload1_2 code: %d, %s.\n", code, tstrerror(code)); + } + free(payload_str); + cJSON_Delete(payload); + + //nanoseconds + payload = cJSON_CreateObject(); + cJSON_AddStringToObject(payload, "metric", "stb1_3"); + + timestamp = cJSON_CreateObject(); + cJSON_AddNumberToObject(timestamp, "value", 1626006833610123321); + cJSON_AddStringToObject(timestamp, "type", "ns"); + cJSON_AddItemToObject(payload, "timestamp", timestamp); + + cJSON_AddNumberToObject(payload, "value", 10); + tags = cJSON_CreateObject(); + cJSON_AddTrueToObject(tags, "t1"); + cJSON_AddFalseToObject(tags, "t2"); + cJSON_AddNumberToObject(tags, "t3", 10); + cJSON_AddStringToObject(tags, "t4", "123_abc_.!@#$%^&*:;,./?|+-=()[]{}<>"); + cJSON_AddItemToObject(payload, "tags", tags); + payload_str = cJSON_Print(payload); + //printf("%s\n", payload_str); + + code = taos_insert_json_payload(taos, payload_str); + if (code) { + printf("payload1_3 code: %d, %s.\n", code, tstrerror(code)); + } + free(payload_str); + cJSON_Delete(payload); + + //now + payload = cJSON_CreateObject(); + cJSON_AddStringToObject(payload, "metric", "stb1_4"); + + timestamp = cJSON_CreateObject(); + cJSON_AddNumberToObject(timestamp, "value", 0); + cJSON_AddStringToObject(timestamp, "type", "ns"); + cJSON_AddItemToObject(payload, "timestamp", timestamp); + + cJSON_AddNumberToObject(payload, "value", 10); + tags = cJSON_CreateObject(); + cJSON_AddTrueToObject(tags, "t1"); + cJSON_AddFalseToObject(tags, "t2"); + cJSON_AddNumberToObject(tags, "t3", 10); + cJSON_AddStringToObject(tags, "t4", "123_abc_.!@#$%^&*:;,./?|+-=()[]{}<>"); + cJSON_AddItemToObject(payload, "tags", tags); + payload_str = cJSON_Print(payload); + //printf("%s\n", payload_str); + + code = taos_insert_json_payload(taos, payload_str); + if (code) { + printf("payload1_4 code: %d, %s.\n", code, tstrerror(code)); + } + free(payload_str); + cJSON_Delete(payload); + + //metric value + cJSON *metric_val; + //bool + payload = cJSON_CreateObject(); + cJSON_AddStringToObject(payload, "metric", "stb2_0"); + + timestamp = cJSON_CreateObject(); + cJSON_AddNumberToObject(timestamp, "value", 1626006833); + cJSON_AddStringToObject(timestamp, "type", "s"); + cJSON_AddItemToObject(payload, "timestamp", timestamp); + + metric_val = cJSON_CreateObject(); + cJSON_AddTrueToObject(metric_val, "value"); + cJSON_AddStringToObject(metric_val, "type", "bool"); + cJSON_AddItemToObject(payload, "value", metric_val); + + tags = cJSON_CreateObject(); + cJSON_AddTrueToObject(tags, "t1"); + cJSON_AddFalseToObject(tags, "t2"); + cJSON_AddNumberToObject(tags, "t3", 10); + cJSON_AddStringToObject(tags, "t4", "123_abc_.!@#$%^&*:;,./?|+-=()[]{}<>"); + cJSON_AddItemToObject(payload, "tags", tags); + payload_str = cJSON_Print(payload); + //printf("%s\n", payload_str); + + code = taos_insert_json_payload(taos, payload_str); + if (code) { + printf("payload2_0 code: %d, %s.\n", code, tstrerror(code)); + } + free(payload_str); + cJSON_Delete(payload); + + //tinyint + payload = cJSON_CreateObject(); + cJSON_AddStringToObject(payload, "metric", "stb2_1"); + + timestamp = cJSON_CreateObject(); + cJSON_AddNumberToObject(timestamp, "value", 1626006833); + cJSON_AddStringToObject(timestamp, "type", "s"); + cJSON_AddItemToObject(payload, "timestamp", timestamp); + + metric_val = cJSON_CreateObject(); + cJSON_AddNumberToObject(metric_val, "value", 127); + cJSON_AddStringToObject(metric_val, "type", "tinyint"); + cJSON_AddItemToObject(payload, "value", metric_val); + + tags = cJSON_CreateObject(); + cJSON_AddTrueToObject(tags, "t1"); + cJSON_AddFalseToObject(tags, "t2"); + cJSON_AddNumberToObject(tags, "t3", 10); + cJSON_AddStringToObject(tags, "t4", "123_abc_.!@#$%^&*:;,./?|+-=()[]{}<>"); + cJSON_AddItemToObject(payload, "tags", tags); + payload_str = cJSON_Print(payload); + //printf("%s\n", payload_str); + + code = taos_insert_json_payload(taos, payload_str); + if (code) { + printf("payload2_1 code: %d, %s.\n", code, tstrerror(code)); + } + free(payload_str); + cJSON_Delete(payload); + + //smallint + payload = cJSON_CreateObject(); + cJSON_AddStringToObject(payload, "metric", "stb2_2"); + + timestamp = cJSON_CreateObject(); + cJSON_AddNumberToObject(timestamp, "value", 1626006833); + cJSON_AddStringToObject(timestamp, "type", "s"); + cJSON_AddItemToObject(payload, "timestamp", timestamp); + + metric_val = cJSON_CreateObject(); + cJSON_AddNumberToObject(metric_val, "value", 32767); + cJSON_AddStringToObject(metric_val, "type", "smallint"); + cJSON_AddItemToObject(payload, "value", metric_val); + + tags = cJSON_CreateObject(); + cJSON_AddTrueToObject(tags, "t1"); + cJSON_AddFalseToObject(tags, "t2"); + cJSON_AddNumberToObject(tags, "t3", 10); + cJSON_AddStringToObject(tags, "t4", "123_abc_.!@#$%^&*:;,./?|+-=()[]{}<>"); + cJSON_AddItemToObject(payload, "tags", tags); + payload_str = cJSON_Print(payload); + //printf("%s\n", payload_str); + + code = taos_insert_json_payload(taos, payload_str); + if (code) { + printf("payload2_2 code: %d, %s.\n", code, tstrerror(code)); + } + free(payload_str); + cJSON_Delete(payload); + + //int + payload = cJSON_CreateObject(); + cJSON_AddStringToObject(payload, "metric", "stb2_3"); + + timestamp = cJSON_CreateObject(); + cJSON_AddNumberToObject(timestamp, "value", 1626006833); + cJSON_AddStringToObject(timestamp, "type", "s"); + cJSON_AddItemToObject(payload, "timestamp", timestamp); + + metric_val = cJSON_CreateObject(); + cJSON_AddNumberToObject(metric_val, "value", 2147483647); + cJSON_AddStringToObject(metric_val, "type", "int"); + cJSON_AddItemToObject(payload, "value", metric_val); + + tags = cJSON_CreateObject(); + cJSON_AddTrueToObject(tags, "t1"); + cJSON_AddFalseToObject(tags, "t2"); + cJSON_AddNumberToObject(tags, "t3", 10); + cJSON_AddStringToObject(tags, "t4", "123_abc_.!@#$%^&*:;,./?|+-=()[]{}<>"); + cJSON_AddItemToObject(payload, "tags", tags); + payload_str = cJSON_Print(payload); + //printf("%s\n", payload_str); + + code = taos_insert_json_payload(taos, payload_str); + if (code) { + printf("payload2_3 code: %d, %s.\n", code, tstrerror(code)); + } + free(payload_str); + cJSON_Delete(payload); + + //bigint + payload = cJSON_CreateObject(); + cJSON_AddStringToObject(payload, "metric", "stb2_4"); + + timestamp = cJSON_CreateObject(); + cJSON_AddNumberToObject(timestamp, "value", 1626006833); + cJSON_AddStringToObject(timestamp, "type", "s"); + cJSON_AddItemToObject(payload, "timestamp", timestamp); + + metric_val = cJSON_CreateObject(); + cJSON_AddNumberToObject(metric_val, "value", 9223372036854775807); + cJSON_AddStringToObject(metric_val, "type", "bigint"); + cJSON_AddItemToObject(payload, "value", metric_val); + + tags = cJSON_CreateObject(); + cJSON_AddTrueToObject(tags, "t1"); + cJSON_AddFalseToObject(tags, "t2"); + cJSON_AddNumberToObject(tags, "t3", 10); + cJSON_AddStringToObject(tags, "t4", "123_abc_.!@#$%^&*:;,./?|+-=()[]{}<>"); + cJSON_AddItemToObject(payload, "tags", tags); + payload_str = cJSON_Print(payload); + //printf("%s\n", payload_str); + + code = taos_insert_json_payload(taos, payload_str); + if (code) { + printf("payload2_4 code: %d, %s.\n", code, tstrerror(code)); + } + free(payload_str); + cJSON_Delete(payload); + + //float + payload = cJSON_CreateObject(); + cJSON_AddStringToObject(payload, "metric", "stb2_5"); + + timestamp = cJSON_CreateObject(); + cJSON_AddNumberToObject(timestamp, "value", 1626006833); + cJSON_AddStringToObject(timestamp, "type", "s"); + cJSON_AddItemToObject(payload, "timestamp", timestamp); + + metric_val = cJSON_CreateObject(); + cJSON_AddNumberToObject(metric_val, "value", 11.12345); + cJSON_AddStringToObject(metric_val, "type", "float"); + cJSON_AddItemToObject(payload, "value", metric_val); + + tags = cJSON_CreateObject(); + cJSON_AddTrueToObject(tags, "t1"); + cJSON_AddFalseToObject(tags, "t2"); + cJSON_AddNumberToObject(tags, "t3", 10); + cJSON_AddStringToObject(tags, "t4", "123_abc_.!@#$%^&*:;,./?|+-=()[]{}<>"); + cJSON_AddItemToObject(payload, "tags", tags); + payload_str = cJSON_Print(payload); + //printf("%s\n", payload_str); + + code = taos_insert_json_payload(taos, payload_str); + if (code) { + printf("payload2_5 code: %d, %s.\n", code, tstrerror(code)); + } + free(payload_str); + cJSON_Delete(payload); + + //double + payload = cJSON_CreateObject(); + cJSON_AddStringToObject(payload, "metric", "stb2_6"); + + timestamp = cJSON_CreateObject(); + cJSON_AddNumberToObject(timestamp, "value", 1626006833); + cJSON_AddStringToObject(timestamp, "type", "s"); + cJSON_AddItemToObject(payload, "timestamp", timestamp); + + metric_val = cJSON_CreateObject(); + cJSON_AddNumberToObject(metric_val, "value", 22.123456789); + cJSON_AddStringToObject(metric_val, "type", "double"); + cJSON_AddItemToObject(payload, "value", metric_val); + + tags = cJSON_CreateObject(); + cJSON_AddTrueToObject(tags, "t1"); + cJSON_AddFalseToObject(tags, "t2"); + cJSON_AddNumberToObject(tags, "t3", 10); + cJSON_AddStringToObject(tags, "t4", "123_abc_.!@#$%^&*:;,./?|+-=()[]{}<>"); + cJSON_AddItemToObject(payload, "tags", tags); + payload_str = cJSON_Print(payload); + //printf("%s\n", payload_str); + + code = taos_insert_json_payload(taos, payload_str); + if (code) { + printf("payload2_6 code: %d, %s.\n", code, tstrerror(code)); + } + free(payload_str); + cJSON_Delete(payload); + + //binary + payload = cJSON_CreateObject(); + cJSON_AddStringToObject(payload, "metric", "stb2_7"); + + timestamp = cJSON_CreateObject(); + cJSON_AddNumberToObject(timestamp, "value", 1626006833); + cJSON_AddStringToObject(timestamp, "type", "s"); + cJSON_AddItemToObject(payload, "timestamp", timestamp); + + metric_val = cJSON_CreateObject(); + cJSON_AddStringToObject(metric_val, "value", "123_abc_.!@#$%^&*:;,./?|+-=()[]{}<>"); + cJSON_AddStringToObject(metric_val, "type", "binary"); + cJSON_AddItemToObject(payload, "value", metric_val); + + tags = cJSON_CreateObject(); + cJSON_AddTrueToObject(tags, "t1"); + cJSON_AddFalseToObject(tags, "t2"); + cJSON_AddNumberToObject(tags, "t3", 10); + cJSON_AddStringToObject(tags, "t4", "123_abc_.!@#$%^&*:;,./?|+-=()[]{}<>"); + cJSON_AddItemToObject(payload, "tags", tags); + payload_str = cJSON_Print(payload); + //printf("%s\n", payload_str); + + code = taos_insert_json_payload(taos, payload_str); + if (code) { + printf("payload2_7 code: %d, %s.\n", code, tstrerror(code)); + } + free(payload_str); + cJSON_Delete(payload); + + //nchar + payload = cJSON_CreateObject(); + cJSON_AddStringToObject(payload, "metric", "stb2_8"); + + timestamp = cJSON_CreateObject(); + cJSON_AddNumberToObject(timestamp, "value", 1626006833); + cJSON_AddStringToObject(timestamp, "type", "s"); + cJSON_AddItemToObject(payload, "timestamp", timestamp); + + metric_val = cJSON_CreateObject(); + cJSON_AddStringToObject(metric_val, "value", "你好"); + cJSON_AddStringToObject(metric_val, "type", "nchar"); + cJSON_AddItemToObject(payload, "value", metric_val); + + tags = cJSON_CreateObject(); + cJSON_AddTrueToObject(tags, "t1"); + cJSON_AddFalseToObject(tags, "t2"); + cJSON_AddNumberToObject(tags, "t3", 10); + cJSON_AddStringToObject(tags, "t4", "123_abc_.!@#$%^&*:;,./?|+-=()[]{}<>"); + cJSON_AddItemToObject(payload, "tags", tags); + payload_str = cJSON_Print(payload); + //printf("%s\n", payload_str); + + code = taos_insert_json_payload(taos, payload_str); + if (code) { + printf("payload2_8 code: %d, %s.\n", code, tstrerror(code)); + } + free(payload_str); + cJSON_Delete(payload); + + //tag value + cJSON *tag; + + payload = cJSON_CreateObject(); + cJSON_AddStringToObject(payload, "metric", "stb3_0"); + + timestamp = cJSON_CreateObject(); + cJSON_AddNumberToObject(timestamp, "value", 1626006833); + cJSON_AddStringToObject(timestamp, "type", "s"); + cJSON_AddItemToObject(payload, "timestamp", timestamp); + + metric_val = cJSON_CreateObject(); + cJSON_AddStringToObject(metric_val, "value", "hello"); + cJSON_AddStringToObject(metric_val, "type", "nchar"); + cJSON_AddItemToObject(payload, "value", metric_val); + + tags = cJSON_CreateObject(); + + tag = cJSON_CreateObject(); + cJSON_AddTrueToObject(tag, "value"); + cJSON_AddStringToObject(tag, "type", "bool"); + cJSON_AddItemToObject(tags, "t1", tag); + + tag = cJSON_CreateObject(); + cJSON_AddFalseToObject(tag, "value"); + cJSON_AddStringToObject(tag, "type", "bool"); + cJSON_AddItemToObject(tags, "t2", tag); + + tag = cJSON_CreateObject(); + cJSON_AddNumberToObject(tag, "value", 127); + cJSON_AddStringToObject(tag, "type", "tinyint"); + cJSON_AddItemToObject(tags, "t3", tag); + + tag = cJSON_CreateObject(); + cJSON_AddNumberToObject(tag, "value", 32767); + cJSON_AddStringToObject(tag, "type", "smallint"); + cJSON_AddItemToObject(tags, "t4", tag); + + tag = cJSON_CreateObject(); + cJSON_AddNumberToObject(tag, "value", 2147483647); + cJSON_AddStringToObject(tag, "type", "int"); + cJSON_AddItemToObject(tags, "t5", tag); + + tag = cJSON_CreateObject(); + cJSON_AddNumberToObject(tag, "value", 9223372036854775807); + cJSON_AddStringToObject(tag, "type", "bigint"); + cJSON_AddItemToObject(tags, "t6", tag); + + tag = cJSON_CreateObject(); + cJSON_AddNumberToObject(tag, "value", 11.12345); + cJSON_AddStringToObject(tag, "type", "float"); + cJSON_AddItemToObject(tags, "t7", tag); + + tag = cJSON_CreateObject(); + cJSON_AddNumberToObject(tag, "value", 22.1234567890); + cJSON_AddStringToObject(tag, "type", "double"); + cJSON_AddItemToObject(tags, "t8", tag); + + tag = cJSON_CreateObject(); + cJSON_AddStringToObject(tag, "value", "binary_val"); + cJSON_AddStringToObject(tag, "type", "binary"); + cJSON_AddItemToObject(tags, "t9", tag); + + tag = cJSON_CreateObject(); + cJSON_AddStringToObject(tag, "value", "你好"); + cJSON_AddStringToObject(tag, "type", "nchar"); + cJSON_AddItemToObject(tags, "t10", tag); + + cJSON_AddItemToObject(payload, "tags", tags); + + payload_str = cJSON_Print(payload); + //printf("%s\n", payload_str); + + code = taos_insert_json_payload(taos, payload_str); + if (code) { + printf("payload3_0 code: %d, %s.\n", code, tstrerror(code)); + } + free(payload_str); + cJSON_Delete(payload); +} + int main(int argc, char *argv[]) { const char* host = "127.0.0.1"; const char* user = "root"; @@ -1220,6 +1954,9 @@ int main(int argc, char *argv[]) { printf("************ verify telnet-insert *************\n"); verify_telnet_insert(taos); + printf("************ verify json-insert *************\n"); + verify_json_insert(taos); + printf("************ verify query *************\n"); verify_query(taos); diff --git a/tests/examples/c/clientcfgtest-taosd.c b/tests/examples/c/clientcfgtest-taosd.c new file mode 100644 index 0000000000000000000000000000000000000000..fbfbd8935a34481c23e806bbe461882ed9a10437 --- /dev/null +++ b/tests/examples/c/clientcfgtest-taosd.c @@ -0,0 +1,33 @@ +#include +#include +#include +#include +#include +#include "os.h" +#include "taosdef.h" +#include "taoserror.h" +#include "tconfig.h" +#include "tglobal.h" +#include "tulog.h" +#include "tsocket.h" +#include "tutil.h" +extern SGlobalCfg *taosGetConfigOption(const char *option) ; +int main( int argc, char *argv[]){ + + printf("start to test\n"); + + //case1: + //Test config to wrong type + const char config1[128] = "{\"cache\":\"4\"}";//input the parameter which want to be configured + taos_set_config(config1); //configure the parameter + + SGlobalCfg *cfg1 ; + + cfg1 = taosGetConfigOption("cache");//check the option result + if(cfg1->cfgStatus == 3) //If cfgStatus is 3,it means configure is success + printf("config cache to '4'success!\n"); + else + printf("config cache failure!\n"); + return 0 ; + +} diff --git a/tests/examples/c/clientcfgtest-wrongjson.c b/tests/examples/c/clientcfgtest-wrongjson.c new file mode 100644 index 0000000000000000000000000000000000000000..eecb5dae6d27c213731afdea005af3fc265dd47f --- /dev/null +++ b/tests/examples/c/clientcfgtest-wrongjson.c @@ -0,0 +1,62 @@ +#include +#include +#include +#include +#include +#include "os.h" +#include "taosdef.h" +#include "taoserror.h" +#include "tconfig.h" +#include "tglobal.h" +#include "tulog.h" +#include "tsocket.h" +#include "tutil.h" +extern SGlobalCfg *taosGetConfigOption(const char *option) ; +int main( int argc, char *argv[]){ + + printf("start to test\n"); + + //case1: + //Test config with wrong JSON + //The result is failure + const char config1[128] = "{\"firstEp\":\"BCC-2:6030\",\"debugFlag\":\135\"}";//input the parameter which want to be configured + taos_set_config(config1); //configure the parameter + + SGlobalCfg *cfg1 ; + cfg1 = taosGetConfigOption("firstEp");//check the option result + if(cfg1->cfgStatus == 3) //If cfgStatus is 3,it means configure is success + printf("config firstEp 'BCC-2:6030'success!\n"); + else + printf("config firstEp failure!\n"); + SGlobalCfg *cfg2 ; + cfg2 = taosGetConfigOption("debugFlag");//check the option result + if(cfg1->cfgStatus == 3) //If cfgStatus is 3,it means configure is success + printf("config debugFlag '135'success!\n"); + else + printf("config debugFlag failure!\n"); + + + //case2: + //repair the JSON and try again + //The result is success + const char config2[128] = "{\"firstEp\":\"BCC-2:6030\",\"debugFlag\":\"135\"}";//input the parameter which want to be configured + taos_set_config(config2); //configure the parameter + + SGlobalCfg *cfg3 ; + + cfg3 = taosGetConfigOption("firstEp");//check the option result + if(cfg3->cfgStatus == 3) //If cfgStatus is 3,it means configure is success + printf("config firstEp 'BCC-2:6030'success!\n"); + else + printf("config firstEp failure!\n"); + + SGlobalCfg *cfg4 ; + + cfg4 = taosGetConfigOption("debugFlag");//check the option result + if(cfg4->cfgStatus == 3) //If cfgStatus is 3,it means configure is success + printf("config debugFlag '135'success!\n"); + else + printf("config debugFlag failure!\n"); + return 0 ; + +} diff --git a/tests/examples/c/clientcfgtest-wrongtype.c b/tests/examples/c/clientcfgtest-wrongtype.c new file mode 100644 index 0000000000000000000000000000000000000000..d88cbeebe8e5114ed4836e77b9494de1cc54aba8 --- /dev/null +++ b/tests/examples/c/clientcfgtest-wrongtype.c @@ -0,0 +1,48 @@ +#include +#include +#include +#include +#include +#include "os.h" +#include "taosdef.h" +#include "taoserror.h" +#include "tconfig.h" +#include "tglobal.h" +#include "tulog.h" +#include "tsocket.h" +#include "tutil.h" +extern SGlobalCfg *taosGetConfigOption(const char *option) ; +int main( int argc, char *argv[]){ + + printf("start to test\n"); + + //case1: + //Test config to wrong type + //The result is failure + const char config1[128] = "{\"debugFlag\":\"9999999999999999999999999\"}";//input the parameter which want to be configured + taos_set_config(config1); //configure the parameter + + SGlobalCfg *cfg1 ; + + cfg1 = taosGetConfigOption("debugFlag");//check the option result + if(cfg1->cfgStatus == 3) //If cfgStatus is 3,it means configure is success + printf("config debugFlag '9999999999999999999999999\n"); + else + printf("config debugFlag failure!\n"); + + //case2: + //Try again with right parameter + //The result is failure + const char config2[128] = "{\"debugFlag\":\"135\"}";//input the parameter which want to be configured + taos_set_config(config2); //configure the parameter + + SGlobalCfg *cfg2 ; + + cfg2 = taosGetConfigOption("debugFlag");//check the option result + if(cfg2->cfgStatus == 3) //If cfgStatus is 3,it means configure is success + printf("config debugflag '135'success!\n"); + else + printf("config debugflag failure!\n"); + return 0 ; + +} diff --git a/tests/examples/c/clientcfgtest-wrongvalue.c b/tests/examples/c/clientcfgtest-wrongvalue.c new file mode 100644 index 0000000000000000000000000000000000000000..f0d44a47f62696d14844ea12276b74da7d0ff408 --- /dev/null +++ b/tests/examples/c/clientcfgtest-wrongvalue.c @@ -0,0 +1,46 @@ +#include +#include +#include +#include +#include +#include "os.h" +#include "taosdef.h" +#include "taoserror.h" +#include "tconfig.h" +#include "tglobal.h" +#include "tulog.h" +#include "tsocket.h" +#include "tutil.h" +extern SGlobalCfg *taosGetConfigOption(const char *option) ; +int main( int argc, char *argv[]){ + + printf("start to test\n"); + + //case1: + //Test config to wrong type + const char config1[128] = "{\"rpcTimer\":\"0\"}";//input the parameter which want to be configured + taos_set_config(config1); //configure the parameter + + SGlobalCfg *cfg1 ; + + cfg1 = taosGetConfigOption("rpcTimer");//check the option result + if(cfg1->cfgStatus == 3) //If cfgStatus is 3,it means configure is success + printf("config rpcTimer to '0'success!\n"); + else + printf("config rpcTimer failure!\n"); + + //case2: + //Try again with right parameter + const char config2[128] = "{\"rpcTimer\":\"400\"}";//input the parameter which want to be configured + taos_set_config(config2); //configure the parameter + + SGlobalCfg *cfg2 ; + + cfg2 = taosGetConfigOption("rpcTimer");//check the option result + if(cfg2->cfgStatus == 3) //If cfgStatus is 3,it means configure is success + printf("config rpcTimer '400'success!\n"); + else + printf("config rpcTimer failure!\n"); + return 0 ; + +} diff --git a/tests/examples/c/clientcfgtest.c b/tests/examples/c/clientcfgtest.c new file mode 100644 index 0000000000000000000000000000000000000000..5f8f51cdb1156a25544273fc6419f65b86ea4ecc --- /dev/null +++ b/tests/examples/c/clientcfgtest.c @@ -0,0 +1,55 @@ +#include +#include +#include +#include +#include +#include "os.h" +#include "taosdef.h" +#include "taoserror.h" +#include "tconfig.h" +#include "tglobal.h" +#include "tulog.h" +#include "tsocket.h" +#include "tutil.h" +extern SGlobalCfg *taosGetConfigOption(const char *option) ; +int main( int argc, char *argv[]){ + + printf("start to test\n"); + + //case1: + //Test config firstEp success + const char config1[128] = "{\"firstEp\":\"BCC-2:6030\",\"debugFlag\":\"135\"}";//input the parameter which want to be configured + taos_set_config(config1); //configure the parameter + + SGlobalCfg *cfg1 ; + + cfg1 = taosGetConfigOption("firstEp");//check the option result + if(cfg1->cfgStatus == 3) //If cfgStatus is 3,it means configure is success + printf("config firstEp 'BCC-2:6030'success!\n"); + else + printf("config firstEp failure!\n"); + + + SGlobalCfg *cfg2 ; + + cfg2 = taosGetConfigOption("debugFlag");//check the option result + if(cfg2->cfgStatus == 3) //If cfgStatus is 3,it means configure is success + printf("config debugFlag '135' success!\n"); + else + printf("config debugFlag failure!\n"); + //case2: + //Test config only useful at the first time + //The result is failure + const char config2[128] = "{\"fqdn\":\"BCC-3\"}";//input the parameter which want to be configured + taos_set_config(config2); //configure the parameter + + SGlobalCfg *cfg3 ; + + cfg2 = taosGetConfigOption("fqdn");//check the option result + if(cfg2->cfgStatus == 3) //If cfgStatus is 3,it means configure is success + printf("config fqdn to 'BCC-3'success!\n"); + else + printf("config fqdn failure!\n"); + return 0 ; + +} diff --git a/tests/examples/c/makefile b/tests/examples/c/makefile index 304623c27af27cd23a301af134647fb3b9746d64..f364eb76fc34ab0975c00dcae2b8348e58b38517 100644 --- a/tests/examples/c/makefile +++ b/tests/examples/c/makefile @@ -6,8 +6,8 @@ TARGET=exe LFLAGS = '-Wl,-rpath,/usr/local/taos/driver/' -ltaos -lpthread -lm -lrt CFLAGS = -O3 -g -Wall -Wno-deprecated -fPIC -Wno-unused-result -Wconversion \ -Wno-char-subscripts -D_REENTRANT -Wno-format -D_REENTRANT -DLINUX \ - -Wno-unused-function -D_M_X64 -I/usr/local/taos/include -std=gnu99 - + -Wno-unused-function -D_M_X64 -I/usr/local/taos/include -std=gnu99 \ + -I../../../deps/cJson/inc all: $(TARGET) exe: @@ -17,6 +17,12 @@ exe: gcc $(CFLAGS) ./stream.c -o $(ROOT)stream $(LFLAGS) gcc $(CFLAGS) ./subscribe.c -o $(ROOT)subscribe $(LFLAGS) gcc $(CFLAGS) ./apitest.c -o $(ROOT)apitest $(LFLAGS) + gcc $(CFLAGS) ./clientcfgtest.c -o $(ROOT)clientcfgtest $(LFLAGS) + gcc $(CFLAGS) ./clientcfgtest-wrongtype.c -o $(ROOT)clientcfgtest-wrongtype $(LFLAGS) + gcc $(CFLAGS) ./clientcfgtest-wrongjson.c -o $(ROOT)clientcfgtest-wrongjson $(LFLAGS) + gcc $(CFLAGS) ./clientcfgtest-wrongvalue.c -o $(ROOT)clientcfgtest-wrongvalue $(LFLAGS) + gcc $(CFLAGS) ./clientcfgtest-taosd.c -o $(ROOT)clientcfgtest-taosd $(LFLAGS) + clean: rm $(ROOT)asyncdemo @@ -26,3 +32,9 @@ clean: rm $(ROOT)stream rm $(ROOT)subscribe rm $(ROOT)apitest + rm $(ROOT)clientcfgtest + rm $(ROOT)clientcfgtest-wrongtype + rm $(ROOT)clientcfgtest-wrongjson + rm $(ROOT)clientcfgtest-wrongvalue + rm $(ROOT)clientcfgtest-taosd + diff --git a/tests/gotest/batchtest.bat b/tests/gotest/batchtest.bat index efd8961bb0be2eb6f20e291114b92b00469b984f..2a96ee31eb6211dbc5f300fbb2f3d62c03df3061 100755 --- a/tests/gotest/batchtest.bat +++ b/tests/gotest/batchtest.bat @@ -1,3 +1,4 @@ + @echo off echo ==== start Go connector test cases test ==== cd /d %~dp0 @@ -18,3 +19,10 @@ rem case002.bat :: cd case002 :: case002.bat + + +rem cd nanosupport +rem nanoCase.bat + +:: cd nanosupport +:: nanoCase.bat \ No newline at end of file diff --git a/tests/gotest/batchtest.sh b/tests/gotest/batchtest.sh index 8f5a7fe8f032134e55c9d9675361590ed6d5b19b..503d77b226885b10e3874a3e0718789bed34b200 100755 --- a/tests/gotest/batchtest.sh +++ b/tests/gotest/batchtest.sh @@ -19,3 +19,4 @@ go env -w GOPROXY=https://goproxy.io,direct bash ./case001/case001.sh $severIp $serverPort bash ./case002/case002.sh $severIp $serverPort #bash ./case003/case003.sh $severIp $serverPort +bash ./nanosupport/nanoCase.sh $severIp $serverPort diff --git a/tests/gotest/case001/case001.go b/tests/gotest/case001/case001.go index 9d35888f313461a2ce90c7a6ed4ef2791229866c..29bc92f2a0668b3f576145d5bd6d08ed37c82f1b 100644 --- a/tests/gotest/case001/case001.go +++ b/tests/gotest/case001/case001.go @@ -12,7 +12,6 @@ * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . */ - package main import ( diff --git a/tests/gotest/case001/case001.sh b/tests/gotest/case001/case001.sh index 94e5bb44e03a1f7d2704752fcf9c080abcb4f23f..831e9f83ac482c0a2c668e2ad0d16c4bf59f19aa 100644 --- a/tests/gotest/case001/case001.sh +++ b/tests/gotest/case001/case001.sh @@ -15,8 +15,7 @@ script_dir="$(dirname $(readlink -f $0))" ###### step 3: start build cd $script_dir rm -f go.* -go mod init demotest > /dev/null 2>&1 -go mod tidy > /dev/null 2>&1 -go build > /dev/null 2>&1 +go mod init demotest +go build sleep 1s ./demotest -h $1 -p $2 diff --git a/tests/gotest/case002/case002.bat b/tests/gotest/case002/case002.bat index ebec576e724ccb14319dd380c9783a783ac0db62..385677acae826e248a410472bfc7a022ff3003ab 100644 --- a/tests/gotest/case002/case002.bat +++ b/tests/gotest/case002/case002.bat @@ -1,5 +1,5 @@ @echo off -echo ==== start run cases001.go +echo ==== start run cases002.go del go.* go mod init demotest diff --git a/tests/gotest/case002/case002.go b/tests/gotest/case002/case002.go index c69da04cb271c24e33953ca8fdfea71c67349b4f..e2ba5ea28ee4f92cfbdca27c78d47268a387c693 100644 --- a/tests/gotest/case002/case002.go +++ b/tests/gotest/case002/case002.go @@ -43,10 +43,9 @@ func main() { os.Exit(1) } defer db.Close() - db.Exec("drop if exists database test") - db.Exec("create if not exists database test") + db.Exec("drop database if exists test") + db.Exec("create database if not exists test ") db.Exec("use test") - db.Exec("drop if exists database test") db.Exec("create table test (ts timestamp ,level int)") for i := 0; i < 10; i++ { sqlcmd := fmt.Sprintf("insert into test values(%d,%d)", ts+i, i) diff --git a/tests/gotest/case002/case002.sh b/tests/gotest/case002/case002.sh index 94e5bb44e03a1f7d2704752fcf9c080abcb4f23f..d98337cce7cfeb51ec9305226b20abdd7b360a46 100644 --- a/tests/gotest/case002/case002.sh +++ b/tests/gotest/case002/case002.sh @@ -1,6 +1,6 @@ #!/bin/bash -echo "==== start run cases001.go" +echo "==== start run cases002.go" set +e #set -x diff --git a/tests/gotest/nanosupport/connector/executor.go b/tests/gotest/nanosupport/connector/executor.go new file mode 100644 index 0000000000000000000000000000000000000000..218ea29af3b34a8cfb5ab56585eeb07bc467d209 --- /dev/null +++ b/tests/gotest/nanosupport/connector/executor.go @@ -0,0 +1,208 @@ +package connector + +import ( + "context" + "fmt" + "reflect" + "time" + + "github.com/taosdata/go-utils/log" + "github.com/taosdata/go-utils/tdengine/config" + "github.com/taosdata/go-utils/tdengine/connector" + tdengineExecutor "github.com/taosdata/go-utils/tdengine/executor" +) + +type Executor struct { + executor *tdengineExecutor.Executor + ctx context.Context +} + +var Logger = log.NewLogger("taos test") + +func NewExecutor(conf *config.TDengineGo, db string, showSql bool) (*Executor, error) { + tdengineConnector, err := connector.NewTDengineConnector("go", conf) + if err != nil { + return nil, err + } + executor := tdengineExecutor.NewExecutor(tdengineConnector, db, showSql, Logger) + return &Executor{ + executor: executor, + ctx: context.Background(), + }, nil +} + +func (e *Executor) Execute(sql string) (int64, error) { + return e.executor.DoExec(e.ctx, sql) +} +func (e *Executor) Query(sql string) (*connector.Data, error) { + fmt.Println("query :", sql) + return e.executor.DoQuery(e.ctx, sql) +} +func (e *Executor) CheckData(row, col int, value interface{}, data *connector.Data) (bool, error) { + if data == nil { + return false, fmt.Errorf("data is nil") + } + if col >= len(data.Head) { + return false, fmt.Errorf("col out of data") + } + if row >= len(data.Data) { + return false, fmt.Errorf("row out of data") + } + dataValue := data.Data[row][col] + + if dataValue == nil && value != nil { + return false, fmt.Errorf("dataValue is nil but value is not nil") + } + if dataValue == nil && value == nil { + return true, nil + } + if reflect.TypeOf(dataValue) != reflect.TypeOf(value) { + return false, fmt.Errorf("type not match expect %s got %s", reflect.TypeOf(value), reflect.TypeOf(dataValue)) + } + switch value.(type) { + case time.Time: + t, _ := dataValue.(time.Time) + if value.(time.Time).Nanosecond() != t.Nanosecond() { + return false, fmt.Errorf("value not match expect %d got %d", value.(time.Time).Nanosecond(), t.Nanosecond()) + } + case string: + if value.(string) != dataValue.(string) { + return false, fmt.Errorf("value not match expect %s got %s", value.(string), dataValue.(string)) + } + case int8: + if value.(int8) != dataValue.(int8) { + return false, fmt.Errorf("value not match expect %d got %d", value.(int8), dataValue.(int8)) + } + case int16: + if value.(int16) != dataValue.(int16) { + return false, fmt.Errorf("value not match expect %d got %d", value.(int16), dataValue.(int16)) + } + case int32: + if value.(int32) != dataValue.(int32) { + return false, fmt.Errorf("value not match expect %d got %d", value.(int32), dataValue.(int32)) + } + case int64: + if value.(int64) != dataValue.(int64) { + return false, fmt.Errorf("value not match expect %d got %d", value.(int64), dataValue.(int64)) + } + case float32: + if value.(float32) != dataValue.(float32) { + return false, fmt.Errorf("value not match expect %f got %f", value.(float32), dataValue.(float32)) + } + case float64: + if value.(float64) != dataValue.(float64) { + return false, fmt.Errorf("value not match expect %f got %f", value.(float32), dataValue.(float32)) + } + case bool: + if value.(bool) != dataValue.(bool) { + return false, fmt.Errorf("value not match expect %t got %t", value.(bool), dataValue.(bool)) + } + default: + return false, fmt.Errorf("unsupport type %v", reflect.TypeOf(value)) + } + return true, nil +} + +func (e *Executor) CheckData2(row, col int, value interface{}, data *connector.Data) { + + match, err := e.CheckData(row, col, value, data) + fmt.Println("expect data is :", value) + fmt.Println("go got data is :", data.Data[row][col]) + if err != nil { + fmt.Println(err) + } + if !match { + fmt.Println(" data not match") + + } + + /* + fmt.Println(value) + if data == nil { + // return false, fmt.Errorf("data is nil") + // fmt.Println("check failed") + } + if col >= len(data.Head) { + // return false, fmt.Errorf("col out of data") + // fmt.Println("check failed") + } + if row >= len(data.Data) { + // return false, fmt.Errorf("row out of data") + // fmt.Println("check failed") + } + dataValue := data.Data[row][col] + + if dataValue == nil && value != nil { + // return false, fmt.Errorf("dataValue is nil but value is not nil") + // fmt.Println("check failed") + } + if dataValue == nil && value == nil { + // return true, nil + fmt.Println("check pass") + } + if reflect.TypeOf(dataValue) != reflect.TypeOf(value) { + // return false, fmt.Errorf("type not match expect %s got %s", reflect.TypeOf(value), reflect.TypeOf(dataValue)) + fmt.Println("check failed") + } + switch value.(type) { + case time.Time: + t, _ := dataValue.(time.Time) + if value.(time.Time).Nanosecond() != t.Nanosecond() { + // return false, fmt.Errorf("value not match expect %d got %d", value.(time.Time).Nanosecond(), t.Nanosecond()) + // fmt.Println("check failed") + } + case string: + if value.(string) != dataValue.(string) { + // return false, fmt.Errorf("value not match expect %s got %s", value.(string), dataValue.(string)) + // fmt.Println("check failed") + } + case int8: + if value.(int8) != dataValue.(int8) { + // return false, fmt.Errorf("value not match expect %d got %d", value.(int8), dataValue.(int8)) + // fmt.Println("check failed") + } + case int16: + if value.(int16) != dataValue.(int16) { + // return false, fmt.Errorf("value not match expect %d got %d", value.(int16), dataValue.(int16)) + // fmt.Println("check failed") + } + case int32: + if value.(int32) != dataValue.(int32) { + // return false, fmt.Errorf("value not match expect %d got %d", value.(int32), dataValue.(int32)) + // fmt.Println("check failed") + } + case int64: + if value.(int64) != dataValue.(int64) { + // return false, fmt.Errorf("value not match expect %d got %d", value.(int64), dataValue.(int64)) + // fmt.Println("check failed") + } + case float32: + if value.(float32) != dataValue.(float32) { + // return false, fmt.Errorf("value not match expect %f got %f", value.(float32), dataValue.(float32)) + // fmt.Println("check failed") + } + case float64: + if value.(float64) != dataValue.(float64) { + // return false, fmt.Errorf("value not match expect %f got %f", value.(float32), dataValue.(float32)) + // fmt.Println("check failed") + } + case bool: + if value.(bool) != dataValue.(bool) { + // return false, fmt.Errorf("value not match expect %t got %t", value.(bool), dataValue.(bool)) + // fmt.Println("check failed") + } + default: + // return false, fmt.Errorf("unsupport type %v", reflect.TypeOf(value)) + // fmt.Println("check failed") + } + // return true, nil + // fmt.Println("check pass") + */ +} + +func (e *Executor) CheckRow(count int, data *connector.Data) { + + if len(data.Data) != count { + fmt.Println("check failed !") + } +} diff --git a/tests/gotest/nanosupport/nanoCase.bat b/tests/gotest/nanosupport/nanoCase.bat new file mode 100644 index 0000000000000000000000000000000000000000..86bddd5b02c5399d5b8d70bd08020e96a7d1c0e5 --- /dev/null +++ b/tests/gotest/nanosupport/nanoCase.bat @@ -0,0 +1,9 @@ +@echo off +echo ==== start run nanosupport.go + +del go.* +go mod init nano +go mod tidy +go build +nano.exe -h %1 -p %2 +cd .. diff --git a/tests/gotest/nanosupport/nanoCase.sh b/tests/gotest/nanosupport/nanoCase.sh new file mode 100644 index 0000000000000000000000000000000000000000..bec8929f14c0a56e7c4074efa39d1e1e881fb12e --- /dev/null +++ b/tests/gotest/nanosupport/nanoCase.sh @@ -0,0 +1,22 @@ +#!/bin/bash + +echo "==== start run nanosupport.go " + +set +e +#set -x + +script_dir="$(dirname $(readlink -f $0))" +#echo "pwd: $script_dir, para0: $0" + +#execName=$0 +#execName=`echo ${execName##*/}` +#goName=`echo ${execName%.*}` + +###### step 3: start build +cd $script_dir +rm -f go.* +go mod init nano +go mod tidy +go build +sleep 10s +./nano -h $1 -p $2 diff --git a/tests/gotest/nanosupport/nanosupport.go b/tests/gotest/nanosupport/nanosupport.go new file mode 100644 index 0000000000000000000000000000000000000000..e2f24a73c0a6db3c94b90879c73d0f05e2476307 --- /dev/null +++ b/tests/gotest/nanosupport/nanosupport.go @@ -0,0 +1,269 @@ +package main + +import ( + "fmt" + "log" + "nano/connector" + "time" + + "github.com/taosdata/go-utils/tdengine/config" +) + +func main() { + e, err := connector.NewExecutor(&config.TDengineGo{ + Address: "root:taosdata@/tcp(127.0.0.1:6030)/", + MaxIdle: 20, + MaxOpen: 30, + MaxLifetime: 30, + }, "db", false) + if err != nil { + panic(err) + } + prepareData(e) + data, err := e.Query("select * from tb") + if err != nil { + panic(err) + } + + layout := "2006-01-02 15:04:05.999999999" + t0, _ := time.Parse(layout, "2021-06-10 00:00:00.100000001") + t1, _ := time.Parse(layout, "2021-06-10 00:00:00.150000000") + t2, _ := time.Parse(layout, "2021-06-10 00:00:00.299999999") + t3, _ := time.Parse(layout, "2021-06-10 00:00:00.300000000") + t4, _ := time.Parse(layout, "2021-06-10 00:00:00.300000001") + t5, _ := time.Parse(layout, "2021-06-10 00:00:00.999999999") + + e.CheckData2(0, 0, t0, data) + e.CheckData2(1, 0, t1, data) + e.CheckData2(2, 0, t2, data) + e.CheckData2(3, 0, t3, data) + e.CheckData2(4, 0, t4, data) + e.CheckData2(5, 0, t5, data) + e.CheckData2(3, 1, int32(3), data) + e.CheckData2(4, 1, int32(5), data) + e.CheckData2(5, 1, int32(7), data) + + fmt.Println(" start check nano support!") + + data, _ = e.Query("select count(*) from tb where ts > 1623254400100000000 and ts < 1623254400100000002;") + e.CheckData2(0, 0, int64(1), data) + + data, _ = e.Query("select count(*) from tb where ts > \"2021-06-10 0:00:00.100000001\" and ts < \"2021-06-10 0:00:00.160000000\";") + e.CheckData2(0, 0, int64(1), data) + + data, _ = e.Query("select count(*) from tb where ts > 1623254400100000000 and ts < 1623254400150000000;") + e.CheckData2(0, 0, int64(1), data) + data, _ = e.Query("select count(*) from tb where ts > \"2021-06-10 0:00:00.100000000\" and ts < \"2021-06-10 0:00:00.150000000\";") + e.CheckData2(0, 0, int64(1), data) + + data, _ = e.Query("select count(*) from tb where ts > 1623254400400000000;") + e.CheckData2(0, 0, int64(1), data) + data, _ = e.Query("select count(*) from tb where ts < \"2021-06-10 00:00:00.400000000\";") + e.CheckData2(0, 0, int64(5), data) + + data, _ = e.Query("select count(*) from tb where ts < now + 400000000b;") + e.CheckData2(0, 0, int64(6), data) + + data, _ = e.Query("select count(*) from tb where ts >= \"2021-06-10 0:00:00.100000001\";") + e.CheckData2(0, 0, int64(6), data) + + data, _ = e.Query("select count(*) from tb where ts <= 1623254400300000000;") + e.CheckData2(0, 0, int64(4), data) + + data, _ = e.Query("select count(*) from tb where ts = \"2021-06-10 0:00:00.000000000\";") + + data, _ = e.Query("select count(*) from tb where ts = 1623254400150000000;") + e.CheckData2(0, 0, int64(1), data) + + data, _ = e.Query("select count(*) from tb where ts = \"2021-06-10 0:00:00.100000001\";") + e.CheckData2(0, 0, int64(1), data) + + data, _ = e.Query("select count(*) from tb where ts between 1623254400000000000 and 1623254400400000000;") + e.CheckData2(0, 0, int64(5), data) + + data, _ = e.Query("select count(*) from tb where ts between \"2021-06-10 0:00:00.299999999\" and \"2021-06-10 0:00:00.300000001\";") + e.CheckData2(0, 0, int64(3), data) + + data, _ = e.Query("select avg(speed) from tb interval(5000000000b);") + e.CheckRow(1, data) + + data, _ = e.Query("select avg(speed) from tb interval(100000000b)") + e.CheckRow(4, data) + + data, _ = e.Query("select avg(speed) from tb interval(1000b);") + e.CheckRow(5, data) + + data, _ = e.Query("select avg(speed) from tb interval(1u);") + e.CheckRow(5, data) + + data, _ = e.Query("select avg(speed) from tb interval(100000000b) sliding (100000000b);") + e.CheckRow(4, data) + + data, _ = e.Query("select last(*) from tb") + tt, _ := time.Parse(layout, "2021-06-10 0:00:00.999999999") + e.CheckData2(0, 0, tt, data) + + data, _ = e.Query("select first(*) from tb") + tt1, _ := time.Parse(layout, "2021-06-10 0:00:00.100000001") + e.CheckData2(0, 0, tt1, data) + + e.Execute("insert into tb values(now + 500000000b, 6);") + data, _ = e.Query("select * from tb;") + e.CheckRow(7, data) + + e.Execute("create table tb2 (ts timestamp, speed int, ts2 timestamp);") + e.Execute("insert into tb2 values(\"2021-06-10 0:00:00.100000001\", 1, \"2021-06-11 0:00:00.100000001\");") + e.Execute("insert into tb2 values(1623254400150000000, 2, 1623340800150000000);") + e.Execute("import into tb2 values(1623254400300000000, 3, 1623340800300000000);") + e.Execute("import into tb2 values(1623254400299999999, 4, 1623340800299999999);") + e.Execute("insert into tb2 values(1623254400300000001, 5, 1623340800300000001);") + e.Execute("insert into tb2 values(1623254400999999999, 7, 1623513600999999999);") + + data, _ = e.Query("select * from tb2;") + tt2, _ := time.Parse(layout, "2021-06-10 0:00:00.100000001") + tt3, _ := time.Parse(layout, "2021-06-10 0:00:00.150000000") + + e.CheckData2(0, 0, tt2, data) + e.CheckData2(1, 0, tt3, data) + e.CheckData2(2, 1, int32(4), data) + e.CheckData2(3, 1, int32(3), data) + tt4, _ := time.Parse(layout, "2021-06-11 00:00:00.300000001") + e.CheckData2(4, 2, tt4, data) + e.CheckRow(6, data) + + data, _ = e.Query("select count(*) from tb2 where ts2 > 1623340800000000000 and ts2 < 1623340800150000000;") + e.CheckData2(0, 0, int64(1), data) + + data, _ = e.Query("select count(*) from tb2 where ts2 > \"2021-06-11 0:00:00.100000000\" and ts2 < \"2021-06-11 0:00:00.100000002\";") + e.CheckData2(0, 0, int64(1), data) + + data, _ = e.Query("select count(*) from tb2 where ts2 > 1623340800500000000;") + e.CheckData2(0, 0, int64(1), data) + data, _ = e.Query("select count(*) from tb2 where ts2 < \"2021-06-11 0:00:00.400000000\";") + e.CheckData2(0, 0, int64(5), data) + + data, _ = e.Query("select count(*) from tb2 where ts2 < now + 400000000b;") + e.CheckData2(0, 0, int64(6), data) + + data, _ = e.Query("select count(*) from tb2 where ts2 >= \"2021-06-11 0:00:00.100000001\";") + e.CheckData2(0, 0, int64(6), data) + + data, _ = e.Query("select count(*) from tb2 where ts2 <= 1623340800400000000;") + e.CheckData2(0, 0, int64(5), data) + + data, _ = e.Query("select count(*) from tb2 where ts2 = \"2021-06-11 0:00:00.000000000\";") + + data, _ = e.Query("select count(*) from tb2 where ts2 = \"2021-06-11 0:00:00.300000001\";") + e.CheckData2(0, 0, int64(1), data) + + data, _ = e.Query("select count(*) from tb2 where ts2 = 1623340800300000001;") + e.CheckData2(0, 0, int64(1), data) + + data, _ = e.Query("select count(*) from tb2 where ts2 between 1623340800000000000 and 1623340800450000000;") + e.CheckData2(0, 0, int64(5), data) + + data, _ = e.Query("select count(*) from tb2 where ts2 between \"2021-06-11 0:00:00.299999999\" and \"2021-06-11 0:00:00.300000001\";") + e.CheckData2(0, 0, int64(3), data) + + data, _ = e.Query("select count(*) from tb2 where ts2 <> 1623513600999999999;") + e.CheckData2(0, 0, int64(5), data) + + data, _ = e.Query("select count(*) from tb2 where ts2 <> \"2021-06-11 0:00:00.100000001\";") + e.CheckData2(0, 0, int64(5), data) + + data, _ = e.Query("select count(*) from tb2 where ts2 <> \"2021-06-11 0:00:00.100000000\";") + e.CheckData2(0, 0, int64(6), data) + + data, _ = e.Query("select count(*) from tb2 where ts2 != 1623513600999999999;") + e.CheckData2(0, 0, int64(5), data) + + data, _ = e.Query("select count(*) from tb2 where ts2 != \"2021-06-11 0:00:00.100000001\";") + e.CheckData2(0, 0, int64(5), data) + + data, _ = e.Query("select count(*) from tb2 where ts2 != \"2021-06-11 0:00:00.100000000\";") + e.CheckData2(0, 0, int64(6), data) + + e.Execute("insert into tb2 values(now + 500000000b, 6, now +2d);") + data, _ = e.Query("select * from tb2;") + e.CheckRow(7, data) + + e.Execute("create table tb3 (ts timestamp, speed int);") + _, err = e.Execute("insert into tb3 values(16232544001500000, 2);") + if err != nil { + fmt.Println("check pass! ") + } + + e.Execute("insert into tb3 values(\"2021-06-10 0:00:00.123456\", 2);") + data, _ = e.Query("select * from tb3 where ts = \"2021-06-10 0:00:00.123456000\";") + e.CheckRow(1, data) + + e.Execute("insert into tb3 values(\"2021-06-10 0:00:00.123456789000\", 2);") + data, _ = e.Query("select * from tb3 where ts = \"2021-06-10 0:00:00.123456789\";") + e.CheckRow(1, data) + + // check timezone support + + e.Execute("drop database if exists nsdb;") + e.Execute("create database nsdb precision 'ns';") + e.Execute("use nsdb;") + e.Execute("create stable st (ts timestamp ,speed float ) tags(time timestamp ,id int);") + e.Execute("insert into tb1 using st tags('2021-06-10 0:00:00.123456789' , 1 ) values('2021-06-10T0:00:00.123456789+07:00' , 1.0);") + data, _ = e.Query("select first(*) from tb1;") + + ttt, _ := time.Parse(layout, "2021-06-10 01:00:00.123456789") + e.CheckData2(0, 0, ttt, data) + + e.Execute("create database usdb precision 'us';") + e.Execute("use usdb;") + e.Execute("create stable st (ts timestamp ,speed float ) tags(time timestamp ,id int);") + e.Execute("insert into tb1 using st tags('2021-06-10 0:00:00.123456' , 1 ) values('2021-06-10T0:00:00.123456+07:00' , 1.0);") + data, _ = e.Query("select first(*) from tb1;") + ttt2, _ := time.Parse(layout, "2021-06-10 01:00:00.123456") + e.CheckData2(0, 0, ttt2, data) + + e.Execute("drop database if exists msdb;") + e.Execute("create database msdb precision 'ms';") + e.Execute("use msdb;") + e.Execute("create stable st (ts timestamp ,speed float ) tags(time timestamp ,id int);") + e.Execute("insert into tb1 using st tags('2021-06-10 0:00:00.123' , 1 ) values('2021-06-10T0:00:00.123+07:00' , 1.0);") + data, _ = e.Query("select first(*) from tb1;") + ttt3, _ := time.Parse(layout, "2021-06-10 01:00:00.123") + e.CheckData2(0, 0, ttt3, data) + fmt.Println("all test done!") + +} + +func prepareData(e *connector.Executor) { + sqlList := []string{ + "reset query cache;", + "drop database if exists db;", + "create database db;", + "use db;", + "reset query cache;", + "drop database if exists db;", + "create database db precision 'ns';", + "show databases;", + "use db;", + "create table tb (ts timestamp, speed int);", + "insert into tb values('2021-06-10 0:00:00.100000001', 1);", + "insert into tb values(1623254400150000000, 2);", + "import into tb values(1623254400300000000, 3);", + "import into tb values(1623254400299999999, 4);", + "insert into tb values(1623254400300000001, 5);", + "insert into tb values(1623254400999999999, 7);", + } + for _, sql := range sqlList { + err := executeSql(e, sql) + if err != nil { + log.Fatalf("prepare data error:%v, sql:%s", err, sql) + } + } +} + +func executeSql(e *connector.Executor, sql string) error { + _, err := e.Execute(sql) + if err != nil { + return err + } + return nil +} diff --git a/tests/perftest-scripts/perftest-query.sh b/tests/perftest-scripts/perftest-query.sh index 68b64fd4e0c4f09ff0b8e96d7802b954b774fbc5..05b2d45ce434d0990d7c143863b9ca268a7d6a26 100755 --- a/tests/perftest-scripts/perftest-query.sh +++ b/tests/perftest-scripts/perftest-query.sh @@ -21,7 +21,8 @@ fi today=`date +"%Y%m%d"` WORK_DIR=/root/pxiao -PERFORMANCE_TEST_REPORT=$WORK_DIR/TDengine/tests/performance-report-$branch-$type-$today.log +name=`echo $branch | cut -d '/' -f2` +PERFORMANCE_TEST_REPORT=$WORK_DIR/TDinternal/community/tests/performance-report-$name-$type-$today.log # Coloured Echoes # function red_echo { echo -e "\033[31m$@\033[0m"; } # @@ -54,11 +55,12 @@ function stopTaosd { } function buildTDengine { - echoInfo "Build TDengine" - cd $WORK_DIR/TDengine + echoInfo "Build TDinternal" + cd $WORK_DIR/TDinternal git remote update > /dev/null git reset --hard HEAD + git fetch git checkout $branch REMOTE_COMMIT=`git rev-parse --short remotes/origin/$branch` LOCAL_COMMIT=`git rev-parse --short @` @@ -69,13 +71,22 @@ function buildTDengine { echo "repo up-to-date" fi + cd community + git reset --hard HEAD + cd .. + echo "git submodule update --init --recursive" + git submodule update --init --recursive + git pull > /dev/null 2>&1 - if [ $type = "jemalloc" ];then - echo "git submodule update --init --recursive" - git submodule update --init --recursive - fi + + cd community + git remote update > /dev/null + git reset --hard HEAD + git fetch + git checkout $branch + REMOTE_COMMIT=`git rev-parse --short remotes/origin/$branch` LOCAL_COMMIT=`git rev-parse --short @` - cd debug + cd ../debug rm -rf * if [ $type = "jemalloc" ];then echo "cmake .. -DJEMALLOC_ENABLED=true > /dev/null" @@ -83,6 +94,10 @@ function buildTDengine { else cmake .. > /dev/null fi + #cp $WORK_DIR/taosdemoPerformance.py $WORK_DIR/TDinternal/community/tests/pytest/tools/ + #cp $WORK_DIR/insertFromCSVPerformance.py $WORK_DIR/TDinternal/community/tests/pytest/insert/ + #cp $WORK_DIR/queryPerformance.py $WORK_DIR/TDinternal/community/tests/pytest/query/ + rm -rf $WORK_DIR/TDinternal/community/tests/pytest/query/operator.py make > /dev/null 2>&1 make install > /dev/null 2>&1 echo "Build TDengine on remote server" @@ -91,24 +106,24 @@ function buildTDengine { function runQueryPerfTest { [ -f $PERFORMANCE_TEST_REPORT ] && rm $PERFORMANCE_TEST_REPORT - nohup $WORK_DIR/TDengine/debug/build/bin/taosd -c /etc/perf/ > /dev/null 2>&1 & + nohup $WORK_DIR/TDinternal/debug/build/bin/taosd -c /etc/perf/ > /dev/null 2>&1 & echoInfo "Wait TDengine to start" sleep 60 echoInfo "Run Performance Test" - cd $WORK_DIR/TDengine/tests/pytest + cd $WORK_DIR/TDinternal/community/tests/pytest - python3 query/queryPerformance.py -c $LOCAL_COMMIT -b $branch -T $type | tee -a $PERFORMANCE_TEST_REPORT + python3 query/queryPerformance.py -c $LOCAL_COMMIT -b $branch -T $type -d perf2 | tee -a $PERFORMANCE_TEST_REPORT python3 insert/insertFromCSVPerformance.py -c $LOCAL_COMMIT -b $branch -T $type | tee -a $PERFORMANCE_TEST_REPORT echo "=========== taosdemo performance: 4 int columns, 10000 tables, 100000 recoreds per table ===========" | tee -a $PERFORMANCE_TEST_REPORT python3 tools/taosdemoPerformance.py -c $LOCAL_COMMIT -b $branch -T $type | tee -a $PERFORMANCE_TEST_REPORT - echo "=========== taosdemo performance: 400 int columns, 400 double columns, 200 binary(128) columns, 10000 tables, 1000 recoreds per table ===========" | tee -a $PERFORMANCE_TEST_REPORT - python3 tools/taosdemoPerformance.py -c $LOCAL_COMMIT -b $branch -T $type -i 400 -D 400 -B 200 -t 10000 -r 100 | tee -a $PERFORMANCE_TEST_REPORT + echo "=========== taosdemo performance: 400 int columns, 400 double columns, 200 binary(128) columns, 10000 tables, 10 recoreds per table ===========" | tee -a $PERFORMANCE_TEST_REPORT + python3 tools/taosdemoPerformance.py -c $LOCAL_COMMIT -b $branch -T $type -i 400 -D 400 -B 200 -t 10000 -r 10 | tee -a $PERFORMANCE_TEST_REPORT - echo "=========== taosdemo performance: 1900 int columns, 1900 double columns, 200 binary(128) columns, 10000 tables, 1000 recoreds per table ===========" | tee -a $PERFORMANCE_TEST_REPORT - python3 tools/taosdemoPerformance.py -c $LOCAL_COMMIT -b $branch -T $type -i 1900 -D 1900 -B 200 -t 10000 -r 100 | tee -a $PERFORMANCE_TEST_REPORT + echo "=========== taosdemo performance: 1900 int columns, 1900 double columns, 200 binary(128) columns, 10000 tables, 10 recoreds per table ===========" | tee -a $PERFORMANCE_TEST_REPORT + python3 tools/taosdemoPerformance.py -c $LOCAL_COMMIT -b $branch -T $type -i 1900 -D 1900 -B 200 -t 10000 -r 10 | tee -a $PERFORMANCE_TEST_REPORT } @@ -121,7 +136,7 @@ function sendReport { sed -i 's/\x1b\[[0-9;]*m//g' $PERFORMANCE_TEST_REPORT BODY_CONTENT=`cat $PERFORMANCE_TEST_REPORT` - echo -e "From: \nto: ${receiver}\nsubject: Query Performace Report ${branch} ${jemalloc} commit ID: ${LOCAL_COMMIT}\n\n${today}:\n${BODY_CONTENT}" | \ + echo -e "From: \nto: ${receiver}\nsubject: Query Performace Report ${branch} ${type} commit ID: ${LOCAL_COMMIT}\n\n${today}:\n${BODY_CONTENT}" | \ (cat - && uuencode $PERFORMANCE_TEST_REPORT performance-test-report-$today.log) | \ /usr/sbin/ssmtp "${receiver}" && echo "Report Sent!" } diff --git a/tests/pytest/crash_gen/valgrind_taos.supp b/tests/pytest/crash_gen/valgrind_taos.supp index ec44a85d5b29c0471db64b0362126804ae73adec..344ad5dde5f9fc58b760691b94f112e9b458f1d7 100644 --- a/tests/pytest/crash_gen/valgrind_taos.supp +++ b/tests/pytest/crash_gen/valgrind_taos.supp @@ -18109,3 +18109,72 @@ fun:_PyEval_EvalCodeWithName fun:_PyFunction_Vectorcall } +{ + + Memcheck:Leak + match-leak-kinds: definite + fun:malloc + fun:lib_build_and_cache_attr + fun:lib_getattr + fun:PyObject_GetAttr + fun:_PyEval_EvalFrameDefault + fun:_PyFunction_Vectorcall + fun:_PyEval_EvalFrameDefault + fun:_PyEval_EvalCodeWithName + fun:PyEval_EvalCode + obj:/usr/bin/python3.8 + obj:/usr/bin/python3.8 + fun:PyVectorcall_Call +} +{ + + Memcheck:Leak + match-leak-kinds: definite + fun:malloc + fun:lib_build_and_cache_attr + fun:lib_getattr + fun:PyObject_GetAttr + obj:/usr/bin/python3.8 + obj:/usr/bin/python3.8 + fun:_PyEval_EvalFrameDefault + fun:_PyFunction_Vectorcall + fun:_PyEval_EvalFrameDefault + obj:/usr/bin/python3.8 + fun:_PyEval_EvalFrameDefault + obj:/usr/bin/python3.8 +} +{ + + Memcheck:Leak + match-leak-kinds: definite + fun:malloc + fun:_my_Py_InitModule + fun:b_init_cffi_1_0_external_module + obj:/usr/bin/python3.8 + obj:/usr/bin/python3.8 + fun:PyObject_CallMethod + fun:PyInit__constant_time + fun:_PyImport_LoadDynamicModuleWithSpec + obj:/usr/bin/python3.8 + obj:/usr/bin/python3.8 + fun:PyVectorcall_Call + fun:_PyEval_EvalFrameDefault + fun:_PyEval_EvalCodeWithName +} +{ + + Memcheck:Leak + match-leak-kinds: definite + fun:malloc + fun:lib_build_cpython_func.isra.87 + fun:lib_build_and_cache_attr + fun:lib_getattr + fun:PyObject_GetAttr + obj:/usr/bin/python3.8 + obj:/usr/bin/python3.8 + fun:_PyEval_EvalFrameDefault + fun:_PyFunction_Vectorcall + fun:_PyEval_EvalFrameDefault + obj:/usr/bin/python3.8 + fun:_PyEval_EvalFrameDefault +} \ No newline at end of file diff --git a/tests/pytest/fulltest.sh b/tests/pytest/fulltest.sh index 06ec3c6bfabfe4d9c378c9d17dda944990f624a8..050f1fd060e5ef455881769f39a60e6f59169a53 100755 --- a/tests/pytest/fulltest.sh +++ b/tests/pytest/fulltest.sh @@ -181,7 +181,10 @@ python3 test.py -f tools/taosdemoAllTest/NanoTestCase/taosdemoTestSupportNanoIns python3 test.py -f tools/taosdemoAllTest/NanoTestCase/taosdemoTestSupportNanoQuery.py python3 test.py -f tools/taosdemoAllTest/NanoTestCase/taosdemoTestSupportNanosubscribe.py python3 test.py -f tools/taosdemoAllTest/NanoTestCase/taosdemoTestInsertTime_step.py -python3 test.py -f tools/taosdemoAllTest/NanoTestCase/taosdumpTestNanoSupport.py +python3 test.py -f tools/taosdumpTestNanoSupport.py + +# +python3 ./test.py -f tsdb/tsdbComp.py # update python3 ./test.py -f update/allow_update.py @@ -267,7 +270,7 @@ python3 ./test.py -f query/queryStateWindow.py # python3 ./test.py -f query/nestedQuery/queryWithOrderLimit.py python3 ./test.py -f query/nestquery_last_row.py python3 ./test.py -f query/queryCnameDisplay.py -python3 ./test.py -f query/operator_cost.py +# python3 ./test.py -f query/operator_cost.py # python3 ./test.py -f query/long_where_query.py python3 test.py -f query/nestedQuery/queryWithSpread.py @@ -356,6 +359,9 @@ python3 ./test.py -f functions/queryTestCases.py python3 ./test.py -f functions/function_stateWindow.py python3 ./test.py -f functions/function_derivative.py python3 ./test.py -f functions/function_irate.py +python3 ./test.py -f functions/function_ceil.py +python3 ./test.py -f functions/function_floor.py +python3 ./test.py -f functions/function_round.py python3 ./test.py -f insert/unsignedInt.py python3 ./test.py -f insert/unsignedBigint.py diff --git a/tests/pytest/functions/function_ceil.py b/tests/pytest/functions/function_ceil.py new file mode 100644 index 0000000000000000000000000000000000000000..9197b0eec45a2154c2345a5b2fc469e54b1e41f9 --- /dev/null +++ b/tests/pytest/functions/function_ceil.py @@ -0,0 +1,1518 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import taos +from util.log import * +from util.cases import * +from util.sql import * +import numpy as np +import random + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor()) + + def randomInt(self): + return random.randint(-2147483647, 2147483647) + + def randomUInt(self): + return random.randint(0, 4294967294) + + def randomBigint(self): + return random.randint(-2**63 + 1, 2**63 - 1) + + def randomUBigint(self): + return random.randint(0, 18446744073709551614) + + def randomDouble(self): + return random.random() + + def randomNchar(self): + return random.choice('abcdefghijklmnopqrstuvwxyz') + + def randomSmallint(self): + return random.randint(-32767, 32767) + + def randomUSmallint(self): + return random.randint(0, 65534) + + def randomTinyint(self): + return random.randint(-127, 127) + + def randomUTinyint(self): + return random.randint(0, 254) + + def run(self): + select_command = [ + "ceil(ts)", + "ceil(timestamp_col)", + "ceil(int_col)", + "ceil(bigint_col)", + "ceil(float_col)", + "ceil(double_col)", + "ceil(binary_col)", + "ceil(smallint_col)", + "ceil(tinyint_col)", + "ceil(bool_col)", + "ceil(nchar_col)", + "ceil(uint_col)", + "ceil(ubigint_col)", + "ceil(usmallint_col)", + "ceil(utinyint_col)", + "ceil(timestamp_tag)", + "ceil(int_tag)", + "ceil(bigint_tag)", + "ceil(float_tag)", + "ceil(double_tag)", + "ceil(binary_tag)", + "ceil(smallint_tag)", + "ceil(tinyint_tag)", + "ceil(bool_tag)", + "ceil(nchar_tag)", + "ceil(uint_tag)", + "ceil(ubigint_tag)", + "ceil(usmallint_tag)", + "ceil(utinyint_tag)", + "count(ceil(int_col))", + "count(ceil(bigint_col))", + "count(ceil(float_col))", + "count(ceil(double_col))", + "count(ceil(smallint_col))", + "count(ceil(tinyint_col))", + "count(ceil(uint_col))", + "count(ceil(ubigint_col))", + "count(ceil(usmallint_col))", + "count(ceil(utinyint_col))", + "avg(ceil(int_col))", + "avg(ceil(bigint_col))", + "avg(ceil(float_col))", + "avg(ceil(double_col))", + "avg(ceil(smallint_col))", + "avg(ceil(tinyint_col))", + "avg(ceil(uint_col))", + "avg(ceil(ubigint_col))", + "avg(ceil(usmallint_col))", + "avg(ceil(utinyint_col))", + "twa(ceil(int_col))", + "twa(ceil(bigint_col))", + "twa(ceil(float_col))", + "twa(ceil(double_col))", + "twa(ceil(smallint_col))", + "twa(ceil(tinyint_col))", + "twa(ceil(uint_col))", + "twa(ceil(ubigint_col))", + "twa(ceil(usmallint_col))", + "twa(ceil(utinyint_col))", + "sum(ceil(int_col))", + "sum(ceil(bigint_col))", + "sum(ceil(float_col))", + "sum(ceil(double_col))", + "sum(ceil(smallint_col))", + "sum(ceil(tinyint_col))", + "sum(ceil(uint_col))", + "sum(ceil(ubigint_col))", + "sum(ceil(usmallint_col))", + "sum(ceil(utinyint_col))", + "stddev(ceil(int_col))", + "stddev(ceil(bigint_col))", + "stddev(ceil(float_col))", + "stddev(ceil(double_col))", + "stddev(ceil(smallint_col))", + "stddev(ceil(tinyint_col))", + "stddev(ceil(uint_col))", + "stddev(ceil(ubigint_col))", + "stddev(ceil(usmallint_col))", + "stddev(ceil(utinyint_col))", + "irate(ceil(int_col))", + "irate(ceil(bigint_col))", + "irate(ceil(float_col))", + "irate(ceil(double_col))", + "irate(ceil(smallint_col))", + "irate(ceil(tinyint_col))", + "irate(ceil(uint_col))", + "irate(ceil(ubigint_col))", + "irate(ceil(usmallint_col))", + "irate(ceil(utinyint_col))", + "leastsquares(ceil(int_col), 1, 1)", + "leastsquares(ceil(bigint_col), 1, 1)", + "leastsquares(ceil(float_col), 1, 1)", + "leastsquares(ceil(double_col), 1, 1)", + "leastsquares(ceil(smallint_col), 1, 1)", + "leastsquares(ceil(tinyint_col), 1, 1)", + "leastsquares(ceil(uint_col), 1, 1)", + "leastsquares(ceil(ubigint_col), 1, 1)", + "leastsquares(ceil(usmallint_col), 1, 1)", + "leastsquares(ceil(utinyint_col), 1, 1)", + "min(ceil(int_col))", + "min(ceil(bigint_col))", + "min(ceil(float_col))", + "min(ceil(double_col))", + "min(ceil(smallint_col))", + "min(ceil(tinyint_col))", + "min(ceil(uint_col))", + "min(ceil(ubigint_col))", + "min(ceil(usmallint_col))", + "min(ceil(utinyint_col))", + "max(ceil(int_col))", + "max(ceil(bigint_col))", + "max(ceil(float_col))", + "max(ceil(double_col))", + "max(ceil(smallint_col))", + "max(ceil(tinyint_col))", + "max(ceil(uint_col))", + "max(ceil(ubigint_col))", + "max(ceil(usmallint_col))", + "max(ceil(utinyint_col))", + "first(ceil(int_col))", + "first(ceil(bigint_col))", + "first(ceil(float_col))", + "first(ceil(double_col))", + "first(ceil(smallint_col))", + "first(ceil(tinyint_col))", + "first(ceil(uint_col))", + "first(ceil(ubigint_col))", + "first(ceil(usmallint_col))", + "first(ceil(utinyint_col))", + "last(ceil(int_col))", + "last(ceil(bigint_col))", + "last(ceil(float_col))", + "last(ceil(double_col))", + "last(ceil(smallint_col))", + "last(ceil(tinyint_col))", + "last(ceil(uint_col))", + "last(ceil(ubigint_col))", + "last(ceil(usmallint_col))", + "last(ceil(utinyint_col))", + "top(ceil(int_col), 1)", + "top(ceil(bigint_col), 1)", + "top(ceil(float_col), 1)", + "top(ceil(double_col), 1)", + "top(ceil(smallint_col), 1)", + "top(ceil(tinyint_col), 1)", + "top(ceil(uint_col), 1)", + "top(ceil(ubigint_col), 1)", + "top(ceil(usmallint_col), 1)", + "top(ceil(utinyint_col), 1)", + "bottom(ceil(int_col), 1)", + "bottom(ceil(bigint_col), 1)", + "bottom(ceil(float_col), 1)", + "bottom(ceil(double_col), 1)", + "bottom(ceil(smallint_col), 1)", + "bottom(ceil(tinyint_col), 1)", + "bottom(ceil(uint_col), 1)", + "bottom(ceil(ubigint_col), 1)", + "bottom(ceil(usmallint_col), 1)", + "bottom(ceil(utinyint_col), 1)", + "percentile(ceil(int_col), 20)", + "percentile(ceil(bigint_col), 20)", + "percentile(ceil(float_col), 20)", + "percentile(ceil(double_col), 20)", + "percentile(ceil(smallint_col), 20)", + "percentile(ceil(tinyint_col), 20)", + "percentile(ceil(uint_col), 20)", + "percentile(ceil(ubigint_col), 20)", + "percentile(ceil(usmallint_col), 20)", + "percentile(ceil(utinyint_col), 20)", + "apercentile(ceil(int_col), 20)", + "apercentile(ceil(bigint_col), 20)", + "apercentile(ceil(float_col), 20)", + "apercentile(ceil(double_col), 20)", + "apercentile(ceil(smallint_col), 20)", + "apercentile(ceil(tinyint_col), 20)", + "apercentile(ceil(uint_col), 20)", + "apercentile(ceil(ubigint_col), 20)", + "apercentile(ceil(usmallint_col), 20)", + "apercentile(ceil(utinyint_col), 20)", + "last_row(ceil(int_col))", + "last_row(ceil(bigint_col))", + "last_row(ceil(float_col))", + "last_row(ceil(double_col))", + "last_row(ceil(smallint_col))", + "last_row(ceil(tinyint_col))", + "last_row(ceil(uint_col))", + "last_row(ceil(ubigint_col))", + "last_row(ceil(usmallint_col))", + "last_row(ceil(utinyint_col))", + "interp(ceil(int_col))", + "interp(ceil(bigint_col))", + "interp(ceil(float_col))", + "interp(ceil(double_col))", + "interp(ceil(smallint_col))", + "interp(ceil(tinyint_col))", + "interp(ceil(uint_col))", + "interp(ceil(ubigint_col))", + "interp(ceil(usmallint_col))", + "interp(ceil(utinyint_col))", + "diff(ceil(int_col))", + "diff(ceil(bigint_col))", + "diff(ceil(float_col))", + "diff(ceil(double_col))", + "diff(ceil(smallint_col))", + "diff(ceil(tinyint_col))", + "diff(ceil(uint_col))", + "diff(ceil(ubigint_col))", + "diff(ceil(usmallint_col))", + "diff(ceil(utinyint_col))", + "spread(ceil(int_col))", + "spread(ceil(bigint_col))", + "spread(ceil(float_col))", + "spread(ceil(double_col))", + "spread(ceil(smallint_col))", + "spread(ceil(tinyint_col))", + "spread(ceil(uint_col))", + "spread(ceil(ubigint_col))", + "spread(ceil(usmallint_col))", + "spread(ceil(utinyint_col))", + "derivative(ceil(int_col), 1s, 0)", + "derivative(ceil(bigint_col), 1s, 0)", + "derivative(ceil(float_col), 1s, 0)", + "derivative(ceil(double_col), 1s, 0)", + "derivative(ceil(smallint_col), 1s, 0)", + "derivative(ceil(tinyint_col), 1s, 0)", + "derivative(ceil(uint_col), 1s, 0)", + "derivative(ceil(ubigint_col), 1s, 0)", + "derivative(ceil(usmallint_col), 1s, 0)", + "derivative(ceil(utinyint_col), 1s, 0)", + "ceil(int_col) - ceil(int_col)", + "ceil(bigint_col) - ceil(bigint_col)", + "ceil(float_col) - ceil(float_col)", + "ceil(double_col) - ceil(double_col)", + "ceil(smallint_col) - ceil(smallint_col)", + "ceil(tinyint_col) - ceil(tinyint_col)", + "ceil(uint_col) - ceil(uint_col)", + "ceil(ubigint_col) - ceil(ubigint_col)", + "ceil(usmallint_col) - ceil(usmallint_col)", + "ceil(utinyint_col) - ceil(utinyint_col)", + "ceil(int_col) / ceil(int_col)", + "ceil(bigint_col) / ceil(bigint_col)", + "ceil(float_col) / ceil(float_col)", + "ceil(double_col) / ceil(double_col)", + "ceil(smallint_col) / ceil(smallint_col)", + "ceil(tinyint_col) / ceil(tinyint_col)", + "ceil(uint_col) / ceil(uint_col)", + "ceil(ubigint_col) / ceil(ubigint_col)", + "ceil(usmallint_col) / ceil(usmallint_col)", + "ceil(utinyint_col) / ceil(utinyint_col)", + "ceil(int_col) * ceil(int_col)", + "ceil(bigint_col) * ceil(bigint_col)", + "ceil(float_col) * ceil(float_col)", + "ceil(double_col) * ceil(double_col)", + "ceil(smallint_col) * ceil(smallint_col)", + "ceil(tinyint_col) * ceil(tinyint_col)", + "ceil(uint_col) * ceil(uint_col)", + "ceil(ubigint_col) * ceil(ubigint_col)", + "ceil(usmallint_col) * ceil(usmallint_col)", + "ceil(utinyint_col) * ceil(utinyint_col)", + "ceil(count(ts))", + "ceil(count(timestamp_col))", + "ceil(count(int_col))", + "ceil(count(bigint_col))", + "ceil(count(float_col))", + "ceil(count(double_col))", + "ceil(count(binary_col))", + "ceil(count(smallint_col))", + "ceil(count(tinyint_col))", + "ceil(count(bool_col))", + "ceil(count(nchar_col))", + "ceil(count(uint_col))", + "ceil(count(ubigint_col))", + "ceil(count(usmallint_col))", + "ceil(count(utinyint_col))", + "ceil(count(timestamp_tag))", + "ceil(count(int_tag))", + "ceil(count(bigint_tag))", + "ceil(count(float_tag))", + "ceil(count(double_tag))", + "ceil(count(binary_tag))", + "ceil(count(smallint_tag))", + "ceil(count(tinyint_tag))", + "ceil(count(bool_tag))", + "ceil(count(nchar_tag))", + "ceil(count(uint_tag))", + "ceil(count(ubigint_tag))", + "ceil(count(usmallint_tag))", + "ceil(count(utinyint_tag))", + "ceil(avg(ts))", + "ceil(avg(timestamp_col))", + "ceil(avg(int_col))", + "ceil(avg(bigint_col))", + "ceil(avg(float_col))", + "ceil(avg(double_col))", + "ceil(avg(binary_col))", + "ceil(avg(smallint_col))", + "ceil(avg(tinyint_col))", + "ceil(avg(bool_col))", + "ceil(avg(nchar_col))", + "ceil(avg(uint_col))", + "ceil(avg(ubigint_col))", + "ceil(avg(usmallint_col))", + "ceil(avg(utinyint_col))", + "ceil(avg(timestamp_tag))", + "ceil(avg(int_tag))", + "ceil(avg(bigint_tag))", + "ceil(avg(float_tag))", + "ceil(avg(double_tag))", + "ceil(avg(binary_tag))", + "ceil(avg(smallint_tag))", + "ceil(avg(tinyint_tag))", + "ceil(avg(bool_tag))", + "ceil(avg(nchar_tag))", + "ceil(avg(uint_tag))", + "ceil(avg(ubigint_tag))", + "ceil(avg(usmallint_tag))", + "ceil(avg(utinyint_tag))", + "ceil(twa(ts))", + "ceil(twa(timestamp_col))", + "ceil(twa(int_col))", + "ceil(twa(bigint_col))", + "ceil(twa(float_col))", + "ceil(twa(double_col))", + "ceil(twa(binary_col))", + "ceil(twa(smallint_col))", + "ceil(twa(tinyint_col))", + "ceil(twa(bool_col))", + "ceil(twa(nchar_col))", + "ceil(twa(uint_col))", + "ceil(twa(ubigint_col))", + "ceil(twa(usmallint_col))", + "ceil(twa(utinyint_col))", + "ceil(twa(timestamp_tag))", + "ceil(twa(int_tag))", + "ceil(twa(bigint_tag))", + "ceil(twa(float_tag))", + "ceil(twa(double_tag))", + "ceil(twa(binary_tag))", + "ceil(twa(smallint_tag))", + "ceil(twa(tinyint_tag))", + "ceil(twa(bool_tag))", + "ceil(twa(nchar_tag))", + "ceil(twa(uint_tag))", + "ceil(twa(ubigint_tag))", + "ceil(twa(usmallint_tag))", + "ceil(twa(utinyint_tag))", + "ceil(sum(ts))", + "ceil(sum(timestamp_col))", + "ceil(sum(int_col))", + "ceil(sum(bigint_col))", + "ceil(sum(float_col))", + "ceil(sum(double_col))", + "ceil(sum(binary_col))", + "ceil(sum(smallint_col))", + "ceil(sum(tinyint_col))", + "ceil(sum(bool_col))", + "ceil(sum(nchar_col))", + "ceil(sum(uint_col))", + "ceil(sum(ubigint_col))", + "ceil(sum(usmallint_col))", + "ceil(sum(utinyint_col))", + "ceil(sum(timestamp_tag))", + "ceil(sum(int_tag))", + "ceil(sum(bigint_tag))", + "ceil(sum(float_tag))", + "ceil(sum(double_tag))", + "ceil(sum(binary_tag))", + "ceil(sum(smallint_tag))", + "ceil(sum(tinyint_tag))", + "ceil(sum(bool_tag))", + "ceil(sum(nchar_tag))", + "ceil(sum(uint_tag))", + "ceil(sum(ubigint_tag))", + "ceil(sum(usmallint_tag))", + "ceil(sum(utinyint_tag))", + "ceil(stddev(ts))", + "ceil(stddev(timestamp_col))", + "ceil(stddev(int_col))", + "ceil(stddev(bigint_col))", + "ceil(stddev(float_col))", + "ceil(stddev(double_col))", + "ceil(stddev(binary_col))", + "ceil(stddev(smallint_col))", + "ceil(stddev(tinyint_col))", + "ceil(stddev(bool_col))", + "ceil(stddev(nchar_col))", + "ceil(stddev(uint_col))", + "ceil(stddev(ubigint_col))", + "ceil(stddev(usmallint_col))", + "ceil(stddev(utinyint_col))", + "ceil(stddev(timestamp_tag))", + "ceil(stddev(int_tag))", + "ceil(stddev(bigint_tag))", + "ceil(stddev(float_tag))", + "ceil(stddev(double_tag))", + "ceil(stddev(binary_tag))", + "ceil(stddev(smallint_tag))", + "ceil(stddev(tinyint_tag))", + "ceil(stddev(bool_tag))", + "ceil(stddev(nchar_tag))", + "ceil(stddev(uint_tag))", + "ceil(stddev(ubigint_tag))", + "ceil(stddev(usmallint_tag))", + "ceil(stddev(utinyint_tag))", + "ceil(leastsquares(ts, 1, 1))", + "ceil(leastsquares(timestamp_col, 1, 1))", + "ceil(leastsquares(int_col, 1, 1))", + "ceil(leastsquares(bigint_col, 1, 1))", + "ceil(leastsquares(float_col, 1, 1))", + "ceil(leastsquares(double_col, 1, 1))", + "ceil(leastsquares(binary_col, 1, 1))", + "ceil(leastsquares(smallint_col, 1, 1))", + "ceil(leastsquares(tinyint_col, 1, 1))", + "ceil(leastsquares(bool_col, 1, 1))", + "ceil(leastsquares(nchar_col, 1, 1))", + "ceil(leastsquares(uint_col, 1, 1))", + "ceil(leastsquares(ubigint_col, 1, 1))", + "ceil(leastsquares(usmallint_col, 1, 1))", + "ceil(leastsquares(utinyint_col, 1, 1))", + "ceil(leastsquares(timestamp_tag, 1, 1))", + "ceil(leastsquares(int_tag, 1, 1))", + "ceil(leastsquares(bigint_tag, 1, 1))", + "ceil(leastsquares(float_tag, 1, 1))", + "ceil(leastsquares(double_tag, 1, 1))", + "ceil(leastsquares(binary_tag, 1, 1))", + "ceil(leastsquares(smallint_tag, 1, 1))", + "ceil(leastsquares(tinyint_tag, 1, 1))", + "ceil(leastsquares(bool_tag, 1, 1))", + "ceil(leastsquares(nchar_tag, 1, 1))", + "ceil(leastsquares(uint_tag, 1, 1))", + "ceil(leastsquares(ubigint_tag, 1, 1))", + "ceil(leastsquares(usmallint_tag, 1, 1))", + "ceil(leastsquares(utinyint_tag, 1, 1))", + "ceil(irate(ts))", + "ceil(irate(timestamp_col))", + "ceil(irate(int_col))", + "ceil(irate(bigint_col))", + "ceil(irate(float_col))", + "ceil(irate(double_col))", + "ceil(irate(binary_col))", + "ceil(irate(smallint_col))", + "ceil(irate(tinyint_col))", + "ceil(irate(bool_col))", + "ceil(irate(nchar_col))", + "ceil(irate(uint_col))", + "ceil(irate(ubigint_col))", + "ceil(irate(usmallint_col))", + "ceil(irate(utinyint_col))", + "ceil(irate(timestamp_tag))", + "ceil(irate(int_tag))", + "ceil(irate(bigint_tag))", + "ceil(irate(float_tag))", + "ceil(irate(double_tag))", + "ceil(irate(binary_tag))", + "ceil(irate(smallint_tag))", + "ceil(irate(tinyint_tag))", + "ceil(irate(bool_tag))", + "ceil(irate(nchar_tag))", + "ceil(irate(uint_tag))", + "ceil(irate(ubigint_tag))", + "ceil(irate(usmallint_tag))", + "ceil(irate(utinyint_tag))", + "ceil(min(ts))", + "ceil(min(timestamp_col))", + "ceil(min(int_col))", + "ceil(min(bigint_col))", + "ceil(min(float_col))", + "ceil(min(double_col))", + "ceil(min(binary_col))", + "ceil(min(smallint_col))", + "ceil(min(tinyint_col))", + "ceil(min(bool_col))", + "ceil(min(nchar_col))", + "ceil(min(uint_col))", + "ceil(min(ubigint_col))", + "ceil(min(usmallint_col))", + "ceil(min(utinyint_col))", + "ceil(min(timestamp_tag))", + "ceil(min(int_tag))", + "ceil(min(bigint_tag))", + "ceil(min(float_tag))", + "ceil(min(double_tag))", + "ceil(min(binary_tag))", + "ceil(min(smallint_tag))", + "ceil(min(tinyint_tag))", + "ceil(min(bool_tag))", + "ceil(min(nchar_tag))", + "ceil(min(uint_tag))", + "ceil(min(ubigint_tag))", + "ceil(min(usmallint_tag))", + "ceil(min(utinyint_tag))", + "ceil(max(ts))", + "ceil(max(timestamp_col))", + "ceil(max(int_col))", + "ceil(max(bigint_col))", + "ceil(max(float_col))", + "ceil(max(double_col))", + "ceil(max(binary_col))", + "ceil(max(smallint_col))", + "ceil(max(tinyint_col))", + "ceil(max(bool_col))", + "ceil(max(nchar_col))", + "ceil(max(uint_col))", + "ceil(max(ubigint_col))", + "ceil(max(usmallint_col))", + "ceil(max(utinyint_col))", + "ceil(max(timestamp_tag))", + "ceil(max(int_tag))", + "ceil(max(bigint_tag))", + "ceil(max(float_tag))", + "ceil(max(double_tag))", + "ceil(max(binary_tag))", + "ceil(max(smallint_tag))", + "ceil(max(tinyint_tag))", + "ceil(max(bool_tag))", + "ceil(max(nchar_tag))", + "ceil(max(uint_tag))", + "ceil(max(ubigint_tag))", + "ceil(max(usmallint_tag))", + "ceil(max(utinyint_tag))", + "ceil(first(ts))", + "ceil(first(timestamp_col))", + "ceil(first(int_col))", + "ceil(first(bigint_col))", + "ceil(first(float_col))", + "ceil(first(double_col))", + "ceil(first(binary_col))", + "ceil(first(smallint_col))", + "ceil(first(tinyint_col))", + "ceil(first(bool_col))", + "ceil(first(nchar_col))", + "ceil(first(uint_col))", + "ceil(first(ubigint_col))", + "ceil(first(usmallint_col))", + "ceil(first(utinyint_col))", + "ceil(first(timestamp_tag))", + "ceil(first(int_tag))", + "ceil(first(bigint_tag))", + "ceil(first(float_tag))", + "ceil(first(double_tag))", + "ceil(first(binary_tag))", + "ceil(first(smallint_tag))", + "ceil(first(tinyint_tag))", + "ceil(first(bool_tag))", + "ceil(first(nchar_tag))", + "ceil(first(uint_tag))", + "ceil(first(ubigint_tag))", + "ceil(first(usmallint_tag))", + "ceil(first(utinyint_tag))", + "ceil(last(ts))", + "ceil(last(timestamp_col))", + "ceil(last(int_col))", + "ceil(last(bigint_col))", + "ceil(last(float_col))", + "ceil(last(double_col))", + "ceil(last(binary_col))", + "ceil(last(smallint_col))", + "ceil(last(tinyint_col))", + "ceil(last(bool_col))", + "ceil(last(nchar_col))", + "ceil(last(uint_col))", + "ceil(last(ubigint_col))", + "ceil(last(usmallint_col))", + "ceil(last(utinyint_col))", + "ceil(last(timestamp_tag))", + "ceil(last(int_tag))", + "ceil(last(bigint_tag))", + "ceil(last(float_tag))", + "ceil(last(double_tag))", + "ceil(last(binary_tag))", + "ceil(last(smallint_tag))", + "ceil(last(tinyint_tag))", + "ceil(last(bool_tag))", + "ceil(last(nchar_tag))", + "ceil(last(uint_tag))", + "ceil(last(ubigint_tag))", + "ceil(last(usmallint_tag))", + "ceil(last(utinyint_tag))", + "ceil(top(ts, 1))", + "ceil(top(timestamp_col, 1))", + "ceil(top(int_col, 1))", + "ceil(top(bigint_col, 1))", + "ceil(top(float_col, 1))", + "ceil(top(double_col, 1))", + "ceil(top(binary_col, 1))", + "ceil(top(smallint_col, 1))", + "ceil(top(tinyint_col, 1))", + "ceil(top(bool_col, 1))", + "ceil(top(nchar_col, 1))", + "ceil(top(uint_col, 1))", + "ceil(top(ubigint_col, 1))", + "ceil(top(usmallint_col, 1))", + "ceil(top(utinyint_col, 1))", + "ceil(top(timestamp_tag, 1))", + "ceil(top(int_tag, 1))", + "ceil(top(bigint_tag, 1))", + "ceil(top(float_tag, 1))", + "ceil(top(double_tag, 1))", + "ceil(top(binary_tag, 1))", + "ceil(top(smallint_tag, 1))", + "ceil(top(tinyint_tag, 1))", + "ceil(top(bool_tag, 1))", + "ceil(top(nchar_tag, 1))", + "ceil(top(uint_tag, 1))", + "ceil(top(ubigint_tag, 1))", + "ceil(top(usmallint_tag, 1))", + "ceil(top(utinyint_tag, 1))", + "ceil(bottom(ts, 1))", + "ceil(bottom(timestamp_col, 1))", + "ceil(bottom(int_col, 1))", + "ceil(bottom(bigint_col, 1))", + "ceil(bottom(float_col, 1))", + "ceil(bottom(double_col, 1))", + "ceil(bottom(binary_col, 1))", + "ceil(bottom(smallint_col, 1))", + "ceil(bottom(tinyint_col, 1))", + "ceil(bottom(bool_col, 1))", + "ceil(bottom(nchar_col, 1))", + "ceil(bottom(uint_col, 1))", + "ceil(bottom(ubigint_col, 1))", + "ceil(bottom(usmallint_col, 1))", + "ceil(bottom(utinyint_col, 1))", + "ceil(bottom(timestamp_tag, 1))", + "ceil(bottom(int_tag, 1))", + "ceil(bottom(bigint_tag, 1))", + "ceil(bottom(float_tag, 1))", + "ceil(bottom(double_tag, 1))", + "ceil(bottom(binary_tag, 1))", + "ceil(bottom(smallint_tag, 1))", + "ceil(bottom(tinyint_tag, 1))", + "ceil(bottom(bool_tag, 1))", + "ceil(bottom(nchar_tag, 1))", + "ceil(bottom(uint_tag, 1))", + "ceil(bottom(ubigint_tag, 1))", + "ceil(bottom(usmallint_tag, 1))", + "ceil(bottom(utinyint_tag, 1))", + "ceil(percentile(ts, 1))", + "ceil(percentile(timestamp_col, 1))", + "ceil(percentile(int_col, 1))", + "ceil(percentile(bigint_col, 1))", + "ceil(percentile(float_col, 1))", + "ceil(percentile(double_col, 1))", + "ceil(percentile(binary_col, 1))", + "ceil(percentile(smallint_col, 1))", + "ceil(percentile(tinyint_col, 1))", + "ceil(percentile(bool_col, 1))", + "ceil(percentile(nchar_col, 1))", + "ceil(percentile(uint_col, 1))", + "ceil(percentile(ubigint_col, 1))", + "ceil(percentile(usmallint_col, 1))", + "ceil(percentile(utinyint_col, 1))", + "ceil(percentile(timestamp_tag, 1))", + "ceil(percentile(int_tag, 1))", + "ceil(percentile(bigint_tag, 1))", + "ceil(percentile(float_tag, 1))", + "ceil(percentile(double_tag, 1))", + "ceil(percentile(binary_tag, 1))", + "ceil(percentile(smallint_tag, 1))", + "ceil(percentile(tinyint_tag, 1))", + "ceil(percentile(bool_tag, 1))", + "ceil(percentile(nchar_tag, 1))", + "ceil(percentile(uint_tag, 1))", + "ceil(percentile(ubigint_tag, 1))", + "ceil(percentile(usmallint_tag, 1))", + "ceil(percentile(utinyint_tag, 1))", + "ceil(apercentile(ts, 1))", + "ceil(apercentile(timestamp_col, 1))", + "ceil(apercentile(int_col, 1))", + "ceil(apercentile(bigint_col, 1))", + "ceil(apercentile(float_col, 1))", + "ceil(apercentile(double_col, 1))", + "ceil(apercentile(binary_col, 1))", + "ceil(apercentile(smallint_col, 1))", + "ceil(apercentile(tinyint_col, 1))", + "ceil(apercentile(bool_col, 1))", + "ceil(apercentile(nchar_col, 1))", + "ceil(apercentile(uint_col, 1))", + "ceil(apercentile(ubigint_col, 1))", + "ceil(apercentile(usmallint_col, 1))", + "ceil(apercentile(utinyint_col, 1))", + "ceil(apercentile(timestamp_tag, 1))", + "ceil(apercentile(int_tag, 1))", + "ceil(apercentile(bigint_tag, 1))", + "ceil(apercentile(float_tag, 1))", + "ceil(apercentile(double_tag, 1))", + "ceil(apercentile(binary_tag, 1))", + "ceil(apercentile(smallint_tag, 1))", + "ceil(apercentile(tinyint_tag, 1))", + "ceil(apercentile(bool_tag, 1))", + "ceil(apercentile(nchar_tag, 1))", + "ceil(apercentile(uint_tag, 1))", + "ceil(apercentile(ubigint_tag, 1))", + "ceil(apercentile(usmallint_tag, 1))", + "ceil(apercentile(utinyint_tag, 1))", + "ceil(last_row(ts))", + "ceil(last_row(timestamp_col))", + "ceil(last_row(int_col))", + "ceil(last_row(bigint_col))", + "ceil(last_row(float_col))", + "ceil(last_row(double_col))", + "ceil(last_row(binary_col))", + "ceil(last_row(smallint_col))", + "ceil(last_row(tinyint_col))", + "ceil(last_row(bool_col))", + "ceil(last_row(nchar_col))", + "ceil(last_row(uint_col))", + "ceil(last_row(ubigint_col))", + "ceil(last_row(usmallint_col))", + "ceil(last_row(utinyint_col))", + "ceil(last_row(timestamp_tag))", + "ceil(last_row(int_tag))", + "ceil(last_row(bigint_tag))", + "ceil(last_row(float_tag))", + "ceil(last_row(double_tag))", + "ceil(last_row(binary_tag))", + "ceil(last_row(smallint_tag))", + "ceil(last_row(tinyint_tag))", + "ceil(last_row(bool_tag))", + "ceil(last_row(nchar_tag))", + "ceil(last_row(uint_tag))", + "ceil(last_row(ubigint_tag))", + "ceil(last_row(usmallint_tag))", + "ceil(last_row(utinyint_tag))", + "ceil(interp(ts))", + "ceil(interp(timestamp_col))", + "ceil(interp(int_col))", + "ceil(interp(bigint_col))", + "ceil(interp(float_col))", + "ceil(interp(double_col))", + "ceil(interp(binary_col))", + "ceil(interp(smallint_col))", + "ceil(interp(tinyint_col))", + "ceil(interp(bool_col))", + "ceil(interp(nchar_col))", + "ceil(interp(uint_col))", + "ceil(interp(ubigint_col))", + "ceil(interp(usmallint_col))", + "ceil(interp(utinyint_col))", + "ceil(interp(timestamp_tag))", + "ceil(interp(int_tag))", + "ceil(interp(bigint_tag))", + "ceil(interp(float_tag))", + "ceil(interp(double_tag))", + "ceil(interp(binary_tag))", + "ceil(interp(smallint_tag))", + "ceil(interp(tinyint_tag))", + "ceil(interp(bool_tag))", + "ceil(interp(nchar_tag))", + "ceil(interp(uint_tag))", + "ceil(interp(ubigint_tag))", + "ceil(interp(usmallint_tag))", + "ceil(interp(utinyint_tag))", + "ceil(diff(ts))", + "ceil(diff(timestamp_col))", + "ceil(diff(int_col))", + "ceil(diff(bigint_col))", + "ceil(diff(float_col))", + "ceil(diff(double_col))", + "ceil(diff(binary_col))", + "ceil(diff(smallint_col))", + "ceil(diff(tinyint_col))", + "ceil(diff(bool_col))", + "ceil(diff(nchar_col))", + "ceil(diff(uint_col))", + "ceil(diff(ubigint_col))", + "ceil(diff(usmallint_col))", + "ceil(diff(utinyint_col))", + "ceil(diff(timestamp_tag))", + "ceil(diff(int_tag))", + "ceil(diff(bigint_tag))", + "ceil(diff(float_tag))", + "ceil(diff(double_tag))", + "ceil(diff(binary_tag))", + "ceil(diff(smallint_tag))", + "ceil(diff(tinyint_tag))", + "ceil(diff(bool_tag))", + "ceil(diff(nchar_tag))", + "ceil(diff(uint_tag))", + "ceil(diff(ubigint_tag))", + "ceil(diff(usmallint_tag))", + "ceil(diff(utinyint_tag))", + "ceil(spread(ts))", + "ceil(spread(timestamp_col))", + "ceil(spread(int_col))", + "ceil(spread(bigint_col))", + "ceil(spread(float_col))", + "ceil(spread(double_col))", + "ceil(spread(binary_col))", + "ceil(spread(smallint_col))", + "ceil(spread(tinyint_col))", + "ceil(spread(bool_col))", + "ceil(spread(nchar_col))", + "ceil(spread(uint_col))", + "ceil(spread(ubigint_col))", + "ceil(spread(usmallint_col))", + "ceil(spread(utinyint_col))", + "ceil(spread(timestamp_tag))", + "ceil(spread(int_tag))", + "ceil(spread(bigint_tag))", + "ceil(spread(float_tag))", + "ceil(spread(double_tag))", + "ceil(spread(binary_tag))", + "ceil(spread(smallint_tag))", + "ceil(spread(tinyint_tag))", + "ceil(spread(bool_tag))", + "ceil(spread(nchar_tag))", + "ceil(spread(uint_tag))", + "ceil(spread(ubigint_tag))", + "ceil(spread(usmallint_tag))", + "ceil(spread(utinyint_tag))", + "ceil(derivative(ts, 1s, 0))", + "ceil(derivative(timestamp_col, 1s, 0))", + "ceil(derivative(int_col, 1s, 0))", + "ceil(derivative(bigint_col, 1s, 0))", + "ceil(derivative(float_col, 1s, 0))", + "ceil(derivative(double_col, 1s, 0))", + "ceil(derivative(binary_col, 1s, 0))", + "ceil(derivative(smallint_col, 1s, 0))", + "ceil(derivative(tinyint_col, 1s, 0))", + "ceil(derivative(bool_col, 1s, 0))", + "ceil(derivative(nchar_col, 1s, 0))", + "ceil(derivative(uint_col, 1s, 0))", + "ceil(derivative(ubigint_col, 1s, 0))", + "ceil(derivative(usmallint_col, 1s, 0))", + "ceil(derivative(utinyint_col, 1s, 0))", + "ceil(derivative(timestamp_tag, 1s, 0))", + "ceil(derivative(int_tag, 1s, 0))", + "ceil(derivative(bigint_tag, 1s, 0))", + "ceil(derivative(float_tag, 1s, 0))", + "ceil(derivative(double_tag, 1s, 0))", + "ceil(derivative(binary_tag, 1s, 0))", + "ceil(derivative(smallint_tag, 1s, 0))", + "ceil(derivative(tinyint_tag, 1s, 0))", + "ceil(derivative(bool_tag, 1s, 0))", + "ceil(derivative(nchar_tag, 1s, 0))", + "ceil(derivative(uint_tag, 1s, 0))", + "ceil(derivative(ubigint_tag, 1s, 0))", + "ceil(derivative(usmallint_tag, 1s, 0))", + "ceil(derivative(utinyint_tag, 1s, 0))", + "ceil(ts + ts)", + "ceil(timestamp_col + timestamp_col)", + "ceil(int_col + int_col)", + "ceil(bigint_col + bigint_col)", + "ceil(float_col + float_col)", + "ceil(double_col + double_col)", + "ceil(binary_col + binary_col)", + "ceil(smallint_col + smallint_col)", + "ceil(tinyint_col + tinyint_col)", + "ceil(bool_col + bool_col)", + "ceil(nchar_col + nchar_col)", + "ceil(uint_col + uint_col)", + "ceil(ubigint_col + ubigint_col)", + "ceil(usmallint_col + usmallint_col)", + "ceil(utinyint_col + utinyint_col)", + "ceil(timestamp_tag + timestamp_tag)", + "ceil(int_tag + int_tag)", + "ceil(bigint_tag + bigint_tag)", + "ceil(float_tag + float_tag)", + "ceil(double_tag + double_tag)", + "ceil(binary_tag + binary_tag)", + "ceil(smallint_tag + smallint_tag)", + "ceil(tinyint_tag + tinyint_tag)", + "ceil(bool_tag + bool_tag)", + "ceil(nchar_tag + nchar_tag)", + "ceil(uint_tag + uint_tag)", + "ceil(ubigint_tag + ubigint_tag)", + "ceil(usmallint_tag + usmallint_tag)", + "ceil(utinyint_tag + utinyint_tag)", + "ceil(ts - ts)", + "ceil(timestamp_col - timestamp_col)", + "ceil(int_col - int_col)", + "ceil(bigint_col - bigint_col)", + "ceil(float_col - float_col)", + "ceil(double_col - double_col)", + "ceil(binary_col - binary_col)", + "ceil(smallint_col - smallint_col)", + "ceil(tinyint_col - tinyint_col)", + "ceil(bool_col - bool_col)", + "ceil(nchar_col - nchar_col)", + "ceil(uint_col - uint_col)", + "ceil(ubigint_col - ubigint_col)", + "ceil(usmallint_col - usmallint_col)", + "ceil(utinyint_col - utinyint_col)", + "ceil(timestamp_tag - timestamp_tag)", + "ceil(int_tag - int_tag)", + "ceil(bigint_tag - bigint_tag)", + "ceil(float_tag - float_tag)", + "ceil(double_tag - double_tag)", + "ceil(binary_tag - binary_tag)", + "ceil(smallint_tag - smallint_tag)", + "ceil(tinyint_tag - tinyint_tag)", + "ceil(bool_tag - bool_tag)", + "ceil(nchar_tag - nchar_tag)", + "ceil(uint_tag - uint_tag)", + "ceil(ubigint_tag - ubigint_tag)", + "ceil(usmallint_tag - usmallint_tag)", + "ceil(utinyint_tag - utinyint_tag)", + "ceil(ts * ts)", + "ceil(timestamp_col * timestamp_col)", + "ceil(int_col * int_col)", + "ceil(bigint_col * bigint_col)", + "ceil(float_col * float_col)", + "ceil(double_col * double_col)", + "ceil(binary_col * binary_col)", + "ceil(smallint_col * smallint_col)", + "ceil(tinyint_col * tinyint_col)", + "ceil(bool_col * bool_col)", + "ceil(nchar_col * nchar_col)", + "ceil(uint_col * uint_col)", + "ceil(ubigint_col * ubigint_col)", + "ceil(usmallint_col * usmallint_col)", + "ceil(utinyint_col * utinyint_col)", + "ceil(timestamp_tag * timestamp_tag)", + "ceil(int_tag * int_tag)", + "ceil(bigint_tag * bigint_tag)", + "ceil(float_tag * float_tag)", + "ceil(double_tag * double_tag)", + "ceil(binary_tag * binary_tag)", + "ceil(smallint_tag * smallint_tag)", + "ceil(tinyint_tag * tinyint_tag)", + "ceil(bool_tag * bool_tag)", + "ceil(nchar_tag * nchar_tag)", + "ceil(uint_tag * uint_tag)", + "ceil(ubigint_tag * ubigint_tag)", + "ceil(usmallint_tag * usmallint_tag)", + "ceil(utinyint_tag * utinyint_tag)", + "ceil(ts / ts)", + "ceil(timestamp_col / timestamp_col)", + "ceil(int_col / int_col)", + "ceil(bigint_col / bigint_col)", + "ceil(float_col / float_col)", + "ceil(double_col / double_col)", + "ceil(binary_col / binary_col)", + "ceil(smallint_col / smallint_col)", + "ceil(tinyint_col / tinyint_col)", + "ceil(bool_col / bool_col)", + "ceil(nchar_col / nchar_col)", + "ceil(uint_col / uint_col)", + "ceil(ubigint_col / ubigint_col)", + "ceil(usmallint_col / usmallint_col)", + "ceil(utinyint_col / utinyint_col)", + "ceil(timestamp_tag / timestamp_tag)", + "ceil(int_tag / int_tag)", + "ceil(bigint_tag / bigint_tag)", + "ceil(float_tag / float_tag)", + "ceil(double_tag / double_tag)", + "ceil(binary_tag / binary_tag)", + "ceil(smallint_tag / smallint_tag)", + "ceil(tinyint_tag / tinyint_tag)", + "ceil(bool_tag / bool_tag)", + "ceil(nchar_tag / nchar_tag)", + "ceil(uint_tag / uint_tag)", + "ceil(ubigint_tag / ubigint_tag)", + "ceil(usmallint_tag / usmallint_tag)", + "ceil(utinyint_tag / utinyint_tag)", + "int_col, ceil(int_col), int_col", + "bigint_col, ceil(bigint_col), bigint_col", + "float_col, ceil(float_col), float_col", + "double_col, ceil(double_col), double_col", + "smallint_col, ceil(smallint_col), smallint_col", + "tinyint_col, ceil(tinyint_col), tinyint_col", + "uint_col, ceil(uint_col), uint_col", + "ubigint_col, ceil(ubigint_col), ubigint_col", + "usmallint_col, ceil(usmallint_col), usmallint_col", + "utinyint_col, ceil(utinyint_col), utinyint_col", + "count(int_col), ceil(int_col), count(int_col)", + "count(bigint_col), ceil(bigint_col), count(bigint_col)", + "count(float_col), ceil(float_col), count(float_col)", + "count(double_col), ceil(double_col), count(double_col)", + "count(smallint_col), ceil(smallint_col), count(smallint_col)", + "count(tinyint_col), ceil(tinyint_col), count(tinyint_col)", + "count(uint_col), ceil(uint_col), count(uint_col)", + "count(ubigint_col), ceil(ubigint_col), count(ubigint_col)", + "count(usmallint_col), ceil(usmallint_col), count(usmallint_col)", + "count(utinyint_col), ceil(utinyint_col), count(utinyint_col)", + "avg(int_col), ceil(int_col), avg(int_col)", + "avg(bigint_col), ceil(bigint_col), avg(bigint_col)", + "avg(float_col), ceil(float_col), avg(float_col)", + "avg(double_col), ceil(double_col), avg(double_col)", + "avg(smallint_col), ceil(smallint_col), avg(smallint_col)", + "avg(tinyint_col), ceil(tinyint_col), avg(tinyint_col)", + "avg(uint_col), ceil(uint_col), avg(uint_col)", + "avg(ubigint_col), ceil(ubigint_col), avg(ubigint_col)", + "avg(usmallint_col), ceil(usmallint_col), avg(usmallint_col)", + "avg(utinyint_col), ceil(utinyint_col), avg(utinyint_col)", + "twa(int_col), ceil(int_col), twa(int_col)", + "twa(bigint_col), ceil(bigint_col), twa(bigint_col)", + "twa(float_col), ceil(float_col), twa(float_col)", + "twa(double_col), ceil(double_col), twa(double_col)", + "twa(smallint_col), ceil(smallint_col), twa(smallint_col)", + "twa(tinyint_col), ceil(tinyint_col), twa(tinyint_col)", + "twa(uint_col), ceil(uint_col), twa(uint_col)", + "twa(ubigint_col), ceil(ubigint_col), twa(ubigint_col)", + "twa(usmallint_col), ceil(usmallint_col), twa(usmallint_col)", + "twa(utinyint_col), ceil(utinyint_col), twa(utinyint_col)", + "sum(int_col), ceil(int_col), sum(int_col)", + "sum(bigint_col), ceil(bigint_col), sum(bigint_col)", + "sum(float_col), ceil(float_col), sum(float_col)", + "sum(double_col), ceil(double_col), sum(double_col)", + "sum(smallint_col), ceil(smallint_col), sum(smallint_col)", + "sum(tinyint_col), ceil(tinyint_col), sum(tinyint_col)", + "sum(uint_col), ceil(uint_col), sum(uint_col)", + "sum(ubigint_col), ceil(ubigint_col), sum(ubigint_col)", + "sum(usmallint_col), ceil(usmallint_col), sum(usmallint_col)", + "sum(utinyint_col), ceil(utinyint_col), sum(utinyint_col)", + "stddev(int_col), ceil(int_col), stddev(int_col)", + "stddev(bigint_col), ceil(bigint_col), stddev(bigint_col)", + "stddev(float_col), ceil(float_col), stddev(float_col)", + "stddev(double_col), ceil(double_col), stddev(double_col)", + "stddev(smallint_col), ceil(smallint_col), stddev(smallint_col)", + "stddev(tinyint_col), ceil(tinyint_col), stddev(tinyint_col)", + "stddev(uint_col), ceil(uint_col), stddev(uint_col)", + "stddev(ubigint_col), ceil(ubigint_col), stddev(ubigint_col)", + "stddev(usmallint_col), ceil(usmallint_col), stddev(usmallint_col)", + "stddev(utinyint_col), ceil(utinyint_col), stddev(utinyint_col)", + "irate(int_col), ceil(int_col), irate(int_col)", + "irate(bigint_col), ceil(bigint_col), irate(bigint_col)", + "irate(float_col), ceil(float_col), irate(float_col)", + "irate(double_col), ceil(double_col), irate(double_col)", + "irate(smallint_col), ceil(smallint_col), irate(smallint_col)", + "irate(tinyint_col), ceil(tinyint_col), irate(tinyint_col)", + "irate(uint_col), ceil(uint_col), irate(uint_col)", + "irate(ubigint_col), ceil(ubigint_col), irate(ubigint_col)", + "irate(usmallint_col), ceil(usmallint_col), irate(usmallint_col)", + "irate(utinyint_col), ceil(utinyint_col), irate(utinyint_col)", + "min(int_col), ceil(int_col), min(int_col)", + "min(bigint_col), ceil(bigint_col), min(bigint_col)", + "min(float_col), ceil(float_col), min(float_col)", + "min(double_col), ceil(double_col), min(double_col)", + "min(smallint_col), ceil(smallint_col), min(smallint_col)", + "min(tinyint_col), ceil(tinyint_col), min(tinyint_col)", + "min(uint_col), ceil(uint_col), min(uint_col)", + "min(ubigint_col), ceil(ubigint_col), min(ubigint_col)", + "min(usmallint_col), ceil(usmallint_col), min(usmallint_col)", + "min(utinyint_col), ceil(utinyint_col), min(utinyint_col)", + "max(int_col), ceil(int_col), max(int_col)", + "max(bigint_col), ceil(bigint_col), max(bigint_col)", + "max(float_col), ceil(float_col), max(float_col)", + "max(double_col), ceil(double_col), max(double_col)", + "max(smallint_col), ceil(smallint_col), max(smallint_col)", + "max(tinyint_col), ceil(tinyint_col), max(tinyint_col)", + "max(uint_col), ceil(uint_col), max(uint_col)", + "max(ubigint_col), ceil(ubigint_col), max(ubigint_col)", + "max(usmallint_col), ceil(usmallint_col), max(usmallint_col)", + "max(utinyint_col), ceil(utinyint_col), max(utinyint_col)", + "first(int_col), ceil(int_col), first(int_col)", + "first(bigint_col), ceil(bigint_col), first(bigint_col)", + "first(float_col), ceil(float_col), first(float_col)", + "first(double_col), ceil(double_col), first(double_col)", + "first(smallint_col), ceil(smallint_col), first(smallint_col)", + "first(tinyint_col), ceil(tinyint_col), first(tinyint_col)", + "first(uint_col), ceil(uint_col), first(uint_col)", + "first(ubigint_col), ceil(ubigint_col), first(ubigint_col)", + "first(usmallint_col), ceil(usmallint_col), first(usmallint_col)", + "first(utinyint_col), ceil(utinyint_col), first(utinyint_col)", + "last(int_col), ceil(int_col), last(int_col)", + "last(bigint_col), ceil(bigint_col), last(bigint_col)", + "last(float_col), ceil(float_col), last(float_col)", + "last(double_col), ceil(double_col), last(double_col)", + "last(smallint_col), ceil(smallint_col), last(smallint_col)", + "last(tinyint_col), ceil(tinyint_col), last(tinyint_col)", + "last(uint_col), ceil(uint_col), last(uint_col)", + "last(ubigint_col), ceil(ubigint_col), last(ubigint_col)", + "last(usmallint_col), ceil(usmallint_col), last(usmallint_col)", + "last(utinyint_col), ceil(utinyint_col), last(utinyint_col)", + "last_row(int_col), ceil(int_col), last_row(int_col)", + "last_row(bigint_col), ceil(bigint_col), last_row(bigint_col)", + "last_row(float_col), ceil(float_col), last_row(float_col)", + "last_row(double_col), ceil(double_col), last_row(double_col)", + "last_row(smallint_col), ceil(smallint_col), last_row(smallint_col)", + "last_row(tinyint_col), ceil(tinyint_col), last_row(tinyint_col)", + "last_row(uint_col), ceil(uint_col), last_row(uint_col)", + "last_row(ubigint_col), ceil(ubigint_col), last_row(ubigint_col)", + "last_row(usmallint_col), ceil(usmallint_col), last_row(usmallint_col)", + "last_row(utinyint_col), ceil(utinyint_col), last_row(utinyint_col)", + "interp(int_col), ceil(int_col), interp(int_col)", + "interp(bigint_col), ceil(bigint_col), interp(bigint_col)", + "interp(float_col), ceil(float_col), interp(float_col)", + "interp(double_col), ceil(double_col), interp(double_col)", + "interp(smallint_col), ceil(smallint_col), interp(smallint_col)", + "interp(tinyint_col), ceil(tinyint_col), interp(tinyint_col)", + "interp(uint_col), ceil(uint_col), interp(uint_col)", + "interp(ubigint_col), ceil(ubigint_col), interp(ubigint_col)", + "interp(usmallint_col), ceil(usmallint_col), interp(usmallint_col)", + "interp(utinyint_col), ceil(utinyint_col), interp(utinyint_col)", + "diff(int_col), ceil(int_col), diff(int_col)", + "diff(bigint_col), ceil(bigint_col), diff(bigint_col)", + "diff(float_col), ceil(float_col), diff(float_col)", + "diff(double_col), ceil(double_col), diff(double_col)", + "diff(smallint_col), ceil(smallint_col), diff(smallint_col)", + "diff(tinyint_col), ceil(tinyint_col), diff(tinyint_col)", + "diff(uint_col), ceil(uint_col), diff(uint_col)", + "diff(ubigint_col), ceil(ubigint_col), diff(ubigint_col)", + "diff(usmallint_col), ceil(usmallint_col), diff(usmallint_col)", + "diff(utinyint_col), ceil(utinyint_col), diff(utinyint_col)", + "spread(int_col), ceil(int_col), spread(int_col)", + "spread(bigint_col), ceil(bigint_col), spread(bigint_col)", + "spread(float_col), ceil(float_col), spread(float_col)", + "spread(double_col), ceil(double_col), spread(double_col)", + "spread(smallint_col), ceil(smallint_col), spread(smallint_col)", + "spread(tinyint_col), ceil(tinyint_col), spread(tinyint_col)", + "spread(uint_col), ceil(uint_col), spread(uint_col)", + "spread(ubigint_col), ceil(ubigint_col), spread(ubigint_col)", + "spread(usmallint_col), ceil(usmallint_col), spread(usmallint_col)", + "spread(utinyint_col), ceil(utinyint_col), spread(utinyint_col)", + "leastsquares(int_col, 1, 1), ceil(int_col), leastsquares(int_col, 1, 1)", + "leastsquares(bigint_col, 1, 1), ceil(bigint_col), leastsquares(bigint_col, 1, 1)", + "leastsquares(float_col, 1, 1), ceil(float_col), leastsquares(float_col, 1, 1)", + "leastsquares(double_col, 1, 1), ceil(double_col), leastsquares(double_col, 1, 1)", + "leastsquares(smallint_col, 1, 1), ceil(smallint_col), leastsquares(smallint_col, 1, 1)", + "leastsquares(tinyint_col, 1, 1), ceil(tinyint_col), leastsquares(tinyint_col, 1, 1)", + "leastsquares(uint_col, 1, 1), ceil(uint_col), leastsquares(uint_col, 1, 1)", + "leastsquares(ubigint_col, 1, 1), ceil(ubigint_col), leastsquares(ubigint_col, 1, 1)", + "leastsquares(usmallint_col, 1, 1), ceil(usmallint_col), leastsquares(usmallint_col, 1, 1)", + "leastsquares(utinyint_col, 1, 1), ceil(utinyint_col), leastsquares(utinyint_col, 1, 1)", + "top(int_col, 1), ceil(int_col), top(int_col, 1)", + "top(bigint_col, 1), ceil(bigint_col), top(bigint_col, 1)", + "top(float_col, 1), ceil(float_col), top(float_col, 1)", + "top(double_col, 1), ceil(double_col), top(double_col, 1)", + "top(smallint_col, 1), ceil(smallint_col), top(smallint_col, 1)", + "top(tinyint_col, 1), ceil(tinyint_col), top(tinyint_col, 1)", + "top(uint_col, 1), ceil(uint_col), top(uint_col, 1)", + "top(ubigint_col, 1), ceil(ubigint_col), top(ubigint_col, 1)", + "top(usmallint_col, 1), ceil(usmallint_col), top(usmallint_col, 1)", + "top(utinyint_col, 1), ceil(utinyint_col), top(utinyint_col, 1)", + "bottom(int_col, 1), ceil(int_col), bottom(int_col, 1)", + "bottom(bigint_col, 1), ceil(bigint_col), bottom(bigint_col, 1)", + "bottom(float_col, 1), ceil(float_col), bottom(float_col, 1)", + "bottom(double_col, 1), ceil(double_col), bottom(double_col, 1)", + "bottom(smallint_col, 1), ceil(smallint_col), bottom(smallint_col, 1)", + "bottom(tinyint_col, 1), ceil(tinyint_col), bottom(tinyint_col, 1)", + "bottom(uint_col, 1), ceil(uint_col), bottom(uint_col, 1)", + "bottom(ubigint_col, 1), ceil(ubigint_col), bottom(ubigint_col, 1)", + "bottom(usmallint_col, 1), ceil(usmallint_col), bottom(usmallint_col, 1)", + "bottom(utinyint_col, 1), ceil(utinyint_col), bottom(utinyint_col, 1)", + "percentile(int_col, 1), ceil(int_col), percentile(int_col, 1)", + "percentile(bigint_col, 1), ceil(bigint_col), percentile(bigint_col, 1)", + "percentile(float_col, 1), ceil(float_col), percentile(float_col, 1)", + "percentile(double_col, 1), ceil(double_col), percentile(double_col, 1)", + "percentile(smallint_col, 1), ceil(smallint_col), percentile(smallint_col, 1)", + "percentile(tinyint_col, 1), ceil(tinyint_col), percentile(tinyint_col, 1)", + "percentile(uint_col, 1), ceil(uint_col), percentile(uint_col, 1)", + "percentile(ubigint_col, 1), ceil(ubigint_col), percentile(ubigint_col, 1)", + "percentile(usmallint_col, 1), ceil(usmallint_col), percentile(usmallint_col, 1)", + "percentile(utinyint_col, 1), ceil(utinyint_col), percentile(utinyint_col, 1)", + "apercentile(int_col, 1), ceil(int_col), apercentile(int_col, 1)", + "apercentile(bigint_col, 1), ceil(bigint_col), apercentile(bigint_col, 1)", + "apercentile(float_col, 1), ceil(float_col), apercentile(float_col, 1)", + "apercentile(double_col, 1), ceil(double_col), apercentile(double_col, 1)", + "apercentile(smallint_col, 1), ceil(smallint_col), apercentile(smallint_col, 1)", + "apercentile(tinyint_col, 1), ceil(tinyint_col), apercentile(tinyint_col, 1)", + "apercentile(uint_col, 1), ceil(uint_col), apercentile(uint_col, 1)", + "apercentile(ubigint_col, 1), ceil(ubigint_col), apercentile(ubigint_col, 1)", + "apercentile(usmallint_col, 1), ceil(usmallint_col), apercentile(usmallint_col, 1)", + "apercentile(utinyint_col, 1), ceil(utinyint_col), apercentile(utinyint_col, 1)", + "derivative(int_col, 1s, 0), ceil(int_col), derivative(int_col, 1s, 0)", + "derivative(bigint_col, 1s, 0), ceil(bigint_col), derivative(bigint_col, 1s, 0)", + "derivative(float_col, 1s, 0), ceil(float_col), derivative(float_col, 1s, 0)", + "derivative(double_col, 1s, 0), ceil(double_col), derivative(double_col, 1s, 0)", + "derivative(smallint_col, 1s, 0), ceil(smallint_col), derivative(smallint_col, 1s, 0)", + "derivative(tinyint_col, 1s, 0), ceil(tinyint_col), derivative(tinyint_col, 1s, 0)", + "derivative(uint_col, 1s, 0), ceil(uint_col), derivative(uint_col, 1s, 0)", + "derivative(ubigint_col, 1s, 0), ceil(ubigint_col), derivative(ubigint_col, 1s, 0)", + "derivative(usmallint_col, 1s, 0), ceil(usmallint_col), derivative(usmallint_col, 1s, 0)", + "derivative(utinyint_col, 1s, 0), ceil(utinyint_col), derivative(utinyint_col, 1s, 0)", + "1, ceil(int_col), 1", + "1, ceil(bigint_col), 1", + "1, ceil(float_col), 1", + "1, ceil(double_col), 1", + "1, ceil(smallint_col), 1", + "1, ceil(tinyint_col), 1", + "1, ceil(uint_col), 1", + "1, ceil(ubigint_col), 1", + "1, ceil(usmallint_col), 1", + "1, ceil(utinyint_col), 1", + "ceil(int_col) as anyName", + "ceil(bigint_col) as anyName", + "ceil(float_col) as anyName", + "ceil(double_col) as anyName", + "ceil(smallint_col) as anyName", + "ceil(tinyint_col) as anyName", + "ceil(uint_col) as anyName", + "ceil(ubigint_col) as anyName", + "ceil(usmallint_col) as anyName", + "ceil(utinyint_col) as anyName", + "distinct ceil(int_col)", + "distinct ceil(bigint_col)", + "distinct ceil(float_col)", + "distinct ceil(double_col)", + "distinct ceil(smallint_col)", + "distinct ceil(tinyint_col)", + "distinct ceil(uint_col)", + "distinct ceil(ubigint_col)", + "distinct ceil(usmallint_col)", + "distinct ceil(utinyint_col)", + ] + simple_select_command = [ + "ceil(super.int_col)", + "ceil(super.bigint_col)", + "ceil(super.float_col)", + "ceil(super.double_col)", + "ceil(super.smallint_col)", + "ceil(super.tinyint_col)", + "ceil(super.uint_col)", + "ceil(super.ubigint_col)", + "ceil(super.usmallint_col)", + "ceil(super.utinyint_col)", + "ceil(t1.int_col)", + "ceil(t1.bigint_col)", + "ceil(t1.float_col)", + "ceil(t1.double_col)", + "ceil(t1.smallint_col)", + "ceil(t1.tinyint_col)", + "ceil(t1.uint_col)", + "ceil(t1.ubigint_col)", + "ceil(t1.usmallint_col)", + "ceil(t1.utinyint_col)", + ] + from_command = [" from super", " from t1"] + advance_from_command = [ + " from super", " from t1", + " from super, superb where super.ts = superb.ts and super.int_tag = superb.int_tag" + ] + filter_command = [ + "", " session(ts, 1s)", " state_window(int_col)", " interval (1s)", + " interval (1s) sliding (1s)", " group by (ts)" + ] + fill_command = [ + "", " fill(prev)", " fill(next)", " fill(null)", " fill(1)", + " fill(linear)" + ] + tdSql.prepare() + tdSql.execute( + "create stable super (ts timestamp, timestamp_col timestamp, int_col int, bigint_col bigint, float_col float,\ + double_col double, binary_col binary(8), smallint_col smallint, tinyint_col tinyint, bool_col bool, nchar_col nchar(8), \ + uint_col int unsigned, ubigint_col bigint unsigned, usmallint_col smallint unsigned, utinyint_col tinyint unsigned) tags (int_tag int, bigint_tag bigint, \ + float_tag float, double_tag double, binary_tag binary(8), smallint_tag smallint, tinyint_tag tinyint, bool_tag bool, nchar_tag nchar(8),\ + uint_tag int unsigned, ubigint_tag bigint unsigned, usmallint_tag smallint unsigned, utinyint_tag tinyint unsigned)" + ) + tdSql.execute( + "create stable superb (ts timestamp, timestamp_col timestamp, int_col int, bigint_col bigint, float_col float,\ + double_col double, binary_col binary(8), smallint_col smallint, tinyint_col tinyint, bool_col bool, nchar_col nchar(8), \ + uint_col int unsigned, ubigint_col bigint unsigned, usmallint_col smallint unsigned, utinyint_col tinyint unsigned) tags (int_tag int, bigint_tag bigint, \ + float_tag float, double_tag double, binary_tag binary(8), smallint_tag smallint, tinyint_tag tinyint, bool_tag bool, nchar_tag nchar(8),\ + uint_tag int unsigned, ubigint_tag bigint unsigned, usmallint_tag smallint unsigned, utinyint_tag tinyint unsigned)" + ) + tdSql.execute( + "create table t1 using super tags (1, %d, %f, %f, '%s', %d, %d, 1, '%s', %d, %d, %d, %d)" + % (self.randomBigint(), self.randomDouble(), self.randomDouble(), + self.randomNchar(), self.randomSmallint(), self.randomTinyint(), + self.randomNchar(), self.randomUInt(), self.randomUBigint(), + self.randomUSmallint(), self.randomUTinyint())) + tdSql.execute( + "insert into t1 values (1629796215891, 1629796215891, %d, %d, %f, %f, '%s', %d, %d, 1, '%s', %d, %d, %d, %d)" + % (self.randomInt(), self.randomBigint(), self.randomDouble(), + self.randomDouble(), self.randomNchar(), self.randomSmallint(), + self.randomTinyint(), self.randomNchar(), self.randomUInt(), + self.randomUBigint(), self.randomUSmallint(), + self.randomUTinyint())) + tdSql.execute( + "insert into t1 values (1629796215892, 1629796215891, %d, %d, %f, %f, '%s', %d, %d, 0, '%s', %d, %d, %d, %d)" + % (self.randomInt(), self.randomBigint(), self.randomDouble(), + self.randomDouble(), self.randomNchar(), self.randomSmallint(), + self.randomTinyint(), self.randomNchar(), self.randomUInt(), + self.randomUBigint(), self.randomUSmallint(), + self.randomUTinyint())) + tdSql.execute( + "insert into t1 values (1629796215893, 1629796215891, %d, %d, %f, %f, '%s', %d, %d, 1, '%s', %d, %d, %d, %d)" + % (self.randomInt(), self.randomBigint(), self.randomDouble(), + self.randomDouble(), self.randomNchar(), self.randomSmallint(), + self.randomTinyint(), self.randomNchar(), self.randomUInt(), + self.randomUBigint(), self.randomUSmallint(), + self.randomUTinyint())) + tdSql.execute( + "insert into t1 values (1629796215894, 1629796215891, %d, %d, %f, %f, '%s', %d, %d, 0, '%s', %d, %d, %d, %d)" + % (self.randomInt(), self.randomBigint(), self.randomDouble(), + self.randomDouble(), self.randomNchar(), self.randomSmallint(), + self.randomTinyint(), self.randomNchar(), self.randomUInt(), + self.randomUBigint(), self.randomUSmallint(), + self.randomUTinyint())) + tdSql.execute( + "create table t2 using superb tags (1, %d, %f, %f, '%s', %d, %d, 1, '%s', %d, %d, %d, %d)" + % (self.randomBigint(), self.randomDouble(), self.randomDouble(), + self.randomNchar(), self.randomSmallint(), self.randomTinyint(), + self.randomNchar(), self.randomUInt(), self.randomUBigint(), + self.randomUSmallint(), self.randomUTinyint())) + tdSql.execute( + "insert into t2 values (1629796215891, 1629796215891, %d, %d, %f, %f, '%s', %d, %d, 1, '%s', %d, %d, %d, %d)" + % (self.randomInt(), self.randomBigint(), self.randomDouble(), + self.randomDouble(), self.randomNchar(), self.randomSmallint(), + self.randomTinyint(), self.randomNchar(), self.randomUInt(), + self.randomUBigint(), self.randomUSmallint(), + self.randomUTinyint())) + tdSql.execute( + "insert into t2 values (1629796215892, 1629796215891, %d, %d, %f, %f, '%s', %d, %d, 0, '%s', %d, %d, %d, %d)" + % (self.randomInt(), self.randomBigint(), self.randomDouble(), + self.randomDouble(), self.randomNchar(), self.randomSmallint(), + self.randomTinyint(), self.randomNchar(), self.randomUInt(), + self.randomUBigint(), self.randomUSmallint(), + self.randomUTinyint())) + tdSql.execute( + "insert into t2 values (1629796215893, 1629796215891, %d, %d, %f, %f, '%s', %d, %d, 1, '%s', %d, %d, %d, %d)" + % (self.randomInt(), self.randomBigint(), self.randomDouble(), + self.randomDouble(), self.randomNchar(), self.randomSmallint(), + self.randomTinyint(), self.randomNchar(), self.randomUInt(), + self.randomUBigint(), self.randomUSmallint(), + self.randomUTinyint())) + tdSql.execute( + "insert into t2 values (1629796215894, 1629796215891, %d, %d, %f, %f, '%s', %d, %d, 0, '%s', %d, %d, %d, %d)" + % (self.randomInt(), self.randomBigint(), self.randomDouble(), + self.randomDouble(), self.randomNchar(), self.randomSmallint(), + self.randomTinyint(), self.randomNchar(), self.randomUInt(), + self.randomUBigint(), self.randomUSmallint(), + self.randomUTinyint())) + + for s in range(len(select_command)): + for f in range(len(from_command)): + sql = "select " + select_command[s] + from_command[f] + if (select_command[s] == "ceil(int_col)"\ + or select_command[s] == "ceil(bigint_col)"\ + or select_command[s] == "ceil(smallint_col)" \ + or select_command[s] == "ceil(float_col)"\ + or select_command[s] == "ceil(double_col)"\ + or select_command[s] == "ceil(tinyint_col)"\ + or select_command[s] == "ceil(uint_col)"\ + or select_command[s] == "ceil(ubigint_col)"\ + or select_command[s] == "ceil(usmallint_col)"\ + or select_command[s] == "ceil(utinyint_col)"\ + or select_command[s] == "1, ceil(int_col), 1"\ + or select_command[s] == "1, ceil(bigint_col), 1"\ + or select_command[s] == "1, ceil(float_col), 1"\ + or select_command[s] == "1, ceil(double_col), 1"\ + or select_command[s] == "1, ceil(smallint_col), 1"\ + or select_command[s] == "1, ceil(tinyint_col), 1"\ + or select_command[s] == "1, ceil(uint_col), 1"\ + or select_command[s] == "1, ceil(ubigint_col), 1"\ + or select_command[s] == "1, ceil(usmallint_col), 1"\ + or select_command[s] == "1, ceil(utinyint_col), 1"\ + or select_command[s] == "int_col, ceil(int_col), int_col"\ + or select_command[s] == "bigint_col, ceil(bigint_col), bigint_col"\ + or select_command[s] == "float_col, ceil(float_col), float_col"\ + or select_command[s] == "double_col, ceil(double_col), double_col"\ + or select_command[s] == "smallint_col, ceil(smallint_col), smallint_col"\ + or select_command[s] == "tinyint_col, ceil(tinyint_col), tinyint_col"\ + or select_command[s] == "uint_col, ceil(uint_col), uint_col"\ + or select_command[s] == "ubigint_col, ceil(ubigint_col), ubigint_col"\ + or select_command[s] == "usmallint_col, ceil(usmallint_col), usmallint_col"\ + or select_command[s] == "utinyint_col, ceil(utinyint_col), utinyint_col"\ + or select_command[s] == "ceil(int_col) as anyName"\ + or select_command[s] == "ceil(bigint_col) as anyName"\ + or select_command[s] == "ceil(float_col) as anyName"\ + or select_command[s] == "ceil(double_col) as anyName"\ + or select_command[s] == "ceil(smallint_col) as anyName"\ + or select_command[s] == "ceil(tinyint_col) as anyName"\ + or select_command[s] == "ceil(uint_col) as anyName"\ + or select_command[s] == "ceil(ubigint_col) as anyName"\ + or select_command[s] == "ceil(usmallint_col) as anyName"\ + or select_command[s] == "ceil(utinyint_col) as anyName"\ + or select_command[s] == "ceil(int_col) + ceil(int_col)"\ + or select_command[s] == "ceil(bigint_col) + ceil(bigint_col)"\ + or select_command[s] == "ceil(float_col) + ceil(float_col)"\ + or select_command[s] == "ceil(double_col) + ceil(double_col)"\ + or select_command[s] == "ceil(smallint_col) + ceil(smallint_col)"\ + or select_command[s] == "ceil(tinyint_col) + ceil(tinyint_col)"\ + or select_command[s] == "ceil(uint_col) + ceil(uint_col)"\ + or select_command[s] == "ceil(ubigint_col) + ceil(ubigint_col)"\ + or select_command[s] == "ceil(usmallint_col) + ceil(usmallint_col)"\ + or select_command[s] == "ceil(utinyint_col) + ceil(utinyint_col)"\ + or select_command[s] == "ceil(int_col) + ceil(int_col)"\ + or select_command[s] == "ceil(bigint_col) + ceil(bigint_col)"\ + or select_command[s] == "ceil(float_col) + ceil(float_col)"\ + or select_command[s] == "ceil(double_col) + ceil(double_col)"\ + or select_command[s] == "ceil(smallint_col) + ceil(smallint_col)"\ + or select_command[s] == "ceil(tinyint_col) + ceil(tinyint_col)"\ + or select_command[s] == "ceil(uint_col) + ceil(uint_col)"\ + or select_command[s] == "ceil(ubigint_col) + ceil(ubigint_col)"\ + or select_command[s] == "ceil(usmallint_col) + ceil(usmallint_col)"\ + or select_command[s] == "ceil(utinyint_col) + cei(utinyint_col)"\ + or select_command[s] == "ceil(int_col) - ceil(int_col)"\ + or select_command[s] == "ceil(bigint_col) - ceil(bigint_col)"\ + or select_command[s] == "ceil(float_col) - ceil(float_col)"\ + or select_command[s] == "ceil(double_col) - ceil(double_col)"\ + or select_command[s] == "ceil(smallint_col) - ceil(smallint_col)"\ + or select_command[s] == "ceil(tinyint_col) - ceil(tinyint_col)"\ + or select_command[s] == "ceil(uint_col) - ceil(uint_col)"\ + or select_command[s] == "ceil(ubigint_col) - ceil(ubigint_col)"\ + or select_command[s] == "ceil(usmallint_col) - ceil(usmallint_col)"\ + or select_command[s] == "ceil(utinyint_col) - ceil(utinyint_col)"\ + or select_command[s] == "ceil(int_col) * ceil(int_col)"\ + or select_command[s] == "ceil(bigint_col) * ceil(bigint_col)"\ + or select_command[s] == "ceil(float_col) * ceil(float_col)"\ + or select_command[s] == "ceil(double_col) * ceil(double_col)"\ + or select_command[s] == "ceil(smallint_col) * ceil(smallint_col)"\ + or select_command[s] == "ceil(tinyint_col) * ceil(tinyint_col)"\ + or select_command[s] == "ceil(uint_col) * ceil(uint_col)"\ + or select_command[s] == "ceil(ubigint_col) * ceil(ubigint_col)"\ + or select_command[s] == "ceil(usmallint_col) * ceil(usmallint_col)"\ + or select_command[s] == "ceil(utinyint_col) * ceil(utinyint_col)"\ + or select_command[s] == "ceil(int_col) / ceil(int_col)"\ + or select_command[s] == "ceil(bigint_col) / ceil(bigint_col)"\ + or select_command[s] == "ceil(float_col) / ceil(float_col)"\ + or select_command[s] == "ceil(double_col) / ceil(double_col)"\ + or select_command[s] == "ceil(smallint_col) / ceil(smallint_col)"\ + or select_command[s] == "ceil(tinyint_col) / ceil(tinyint_col)"\ + or select_command[s] == "ceil(uint_col) / ceil(uint_col)"\ + or select_command[s] == "ceil(ubigint_col) / ceil(ubigint_col)"\ + or select_command[s] == "ceil(usmallint_col) / ceil(usmallint_col)"\ + or select_command[s] == "ceil(utinyint_col) / ceil(utinyint_col)"): + tdSql.query(sql) + else: + tdSql.error(sql) + for sim in range(len(simple_select_command)): + for fr in range(len(advance_from_command)): + for filter in range(len(filter_command)): + for fill in range(len(fill_command)): + sql = "select " + simple_select_command[ + sim] + advance_from_command[fr] + filter_command[ + filter] + fill_command[fill] + if sql == "select ceil(t1.int_col) from t1"\ + or sql == "select ceil(super.int_col) from super"\ + or sql == "select ceil(t1.bigint_col) from t1"\ + or sql == "select ceil(super.bigint_col) from super"\ + or sql == "select ceil(t1.smallint_col) from t1"\ + or sql == "select ceil(super.smallint_col) from super"\ + or sql == "select ceil(t1.tinyint_col) from t1"\ + or sql == "select ceil(super.tinyint_col) from super"\ + or sql == "select ceil(t1.float_col) from t1"\ + or sql == "select ceil(super.float_col) from super"\ + or sql == "select ceil(t1.double_col) from t1"\ + or sql == "select ceil(super.double_col) from super"\ + or sql == "select ceil(t1.uint_col) from t1"\ + or sql == "select ceil(super.uint_col) from super"\ + or sql == "select ceil(t1.ubigint_col) from t1"\ + or sql == "select ceil(super.ubigint_col) from super"\ + or sql == "select ceil(t1.usmallint_col) from t1"\ + or sql == "select ceil(super.usmallint_col) from super"\ + or sql == "select ceil(t1.utinyint_col) from t1"\ + or sql == "select ceil(super.utinyint_col) from super"\ + or sql == "select ceil(super.int_col) from super, superb where super.ts = superb.ts and super.int_tag = superb.int_tag"\ + or sql == "select ceil(super.bigint_col) from super, superb where super.ts = superb.ts and super.int_tag = superb.int_tag"\ + or sql == "select ceil(super.smallint_col) from super, superb where super.ts = superb.ts and super.int_tag = superb.int_tag"\ + or sql == "select ceil(super.tinyint_col) from super, superb where super.ts = superb.ts and super.int_tag = superb.int_tag"\ + or sql == "select ceil(super.float_col) from super, superb where super.ts = superb.ts and super.int_tag = superb.int_tag"\ + or sql == "select ceil(super.double_col) from super, superb where super.ts = superb.ts and super.int_tag = superb.int_tag"\ + or sql == "select ceil(super.uint_col) from super, superb where super.ts = superb.ts and super.int_tag = superb.int_tag"\ + or sql == "select ceil(super.ubigint_col) from super, superb where super.ts = superb.ts and super.int_tag = superb.int_tag"\ + or sql == "select ceil(super.usmallint_col) from super, superb where super.ts = superb.ts and super.int_tag = superb.int_tag"\ + or sql == "select ceil(super.utinyint_col) from super, superb where super.ts = superb.ts and super.int_tag = superb.int_tag": + tdSql.query(sql) + else: + tdSql.error(sql) + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/functions/function_floor.py b/tests/pytest/functions/function_floor.py new file mode 100644 index 0000000000000000000000000000000000000000..305e3b798a74376766a14cd824ded617db3cc8a2 --- /dev/null +++ b/tests/pytest/functions/function_floor.py @@ -0,0 +1,1518 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import taos +from util.log import * +from util.cases import * +from util.sql import * +import numpy as np +import random + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor()) + + def randomInt(self): + return random.randint(-2147483647, 2147483647) + + def randomUInt(self): + return random.randint(0, 4294967294) + + def randomBigint(self): + return random.randint(-2**63 + 1, 2**63 - 1) + + def randomUBigint(self): + return random.randint(0, 18446744073709551614) + + def randomDouble(self): + return random.random() + + def randomNchar(self): + return random.choice('abcdefghijklmnopqrstuvwxyz') + + def randomSmallint(self): + return random.randint(-32767, 32767) + + def randomUSmallint(self): + return random.randint(0, 65534) + + def randomTinyint(self): + return random.randint(-127, 127) + + def randomUTinyint(self): + return random.randint(0, 254) + + def run(self): + select_command = [ + "floor(ts)", + "floor(timestamp_col)", + "floor(int_col)", + "floor(bigint_col)", + "floor(float_col)", + "floor(double_col)", + "floor(binary_col)", + "floor(smallint_col)", + "floor(tinyint_col)", + "floor(bool_col)", + "floor(nchar_col)", + "floor(uint_col)", + "floor(ubigint_col)", + "floor(usmallint_col)", + "floor(utinyint_col)", + "floor(timestamp_tag)", + "floor(int_tag)", + "floor(bigint_tag)", + "floor(float_tag)", + "floor(double_tag)", + "floor(binary_tag)", + "floor(smallint_tag)", + "floor(tinyint_tag)", + "floor(bool_tag)", + "floor(nchar_tag)", + "floor(uint_tag)", + "floor(ubigint_tag)", + "floor(usmallint_tag)", + "floor(utinyint_tag)", + "count(floor(int_col))", + "count(floor(bigint_col))", + "count(floor(float_col))", + "count(floor(double_col))", + "count(floor(smallint_col))", + "count(floor(tinyint_col))", + "count(floor(uint_col))", + "count(floor(ubigint_col))", + "count(floor(usmallint_col))", + "count(floor(utinyint_col))", + "avg(floor(int_col))", + "avg(floor(bigint_col))", + "avg(floor(float_col))", + "avg(floor(double_col))", + "avg(floor(smallint_col))", + "avg(floor(tinyint_col))", + "avg(floor(uint_col))", + "avg(floor(ubigint_col))", + "avg(floor(usmallint_col))", + "avg(floor(utinyint_col))", + "twa(floor(int_col))", + "twa(floor(bigint_col))", + "twa(floor(float_col))", + "twa(floor(double_col))", + "twa(floor(smallint_col))", + "twa(floor(tinyint_col))", + "twa(floor(uint_col))", + "twa(floor(ubigint_col))", + "twa(floor(usmallint_col))", + "twa(floor(utinyint_col))", + "sum(floor(int_col))", + "sum(floor(bigint_col))", + "sum(floor(float_col))", + "sum(floor(double_col))", + "sum(floor(smallint_col))", + "sum(floor(tinyint_col))", + "sum(floor(uint_col))", + "sum(floor(ubigint_col))", + "sum(floor(usmallint_col))", + "sum(floor(utinyint_col))", + "stddev(floor(int_col))", + "stddev(floor(bigint_col))", + "stddev(floor(float_col))", + "stddev(floor(double_col))", + "stddev(floor(smallint_col))", + "stddev(floor(tinyint_col))", + "stddev(floor(uint_col))", + "stddev(floor(ubigint_col))", + "stddev(floor(usmallint_col))", + "stddev(floor(utinyint_col))", + "irate(floor(int_col))", + "irate(floor(bigint_col))", + "irate(floor(float_col))", + "irate(floor(double_col))", + "irate(floor(smallint_col))", + "irate(floor(tinyint_col))", + "irate(floor(uint_col))", + "irate(floor(ubigint_col))", + "irate(floor(usmallint_col))", + "irate(floor(utinyint_col))", + "leastsquares(floor(int_col), 1, 1)", + "leastsquares(floor(bigint_col), 1, 1)", + "leastsquares(floor(float_col), 1, 1)", + "leastsquares(floor(double_col), 1, 1)", + "leastsquares(floor(smallint_col), 1, 1)", + "leastsquares(floor(tinyint_col), 1, 1)", + "leastsquares(floor(uint_col), 1, 1)", + "leastsquares(floor(ubigint_col), 1, 1)", + "leastsquares(floor(usmallint_col), 1, 1)", + "leastsquares(floor(utinyint_col), 1, 1)", + "min(floor(int_col))", + "min(floor(bigint_col))", + "min(floor(float_col))", + "min(floor(double_col))", + "min(floor(smallint_col))", + "min(floor(tinyint_col))", + "min(floor(uint_col))", + "min(floor(ubigint_col))", + "min(floor(usmallint_col))", + "min(floor(utinyint_col))", + "max(floor(int_col))", + "max(floor(bigint_col))", + "max(floor(float_col))", + "max(floor(double_col))", + "max(floor(smallint_col))", + "max(floor(tinyint_col))", + "max(floor(uint_col))", + "max(floor(ubigint_col))", + "max(floor(usmallint_col))", + "max(floor(utinyint_col))", + "first(floor(int_col))", + "first(floor(bigint_col))", + "first(floor(float_col))", + "first(floor(double_col))", + "first(floor(smallint_col))", + "first(floor(tinyint_col))", + "first(floor(uint_col))", + "first(floor(ubigint_col))", + "first(floor(usmallint_col))", + "first(floor(utinyint_col))", + "last(floor(int_col))", + "last(floor(bigint_col))", + "last(floor(float_col))", + "last(floor(double_col))", + "last(floor(smallint_col))", + "last(floor(tinyint_col))", + "last(floor(uint_col))", + "last(floor(ubigint_col))", + "last(floor(usmallint_col))", + "last(floor(utinyint_col))", + "top(floor(int_col), 1)", + "top(floor(bigint_col), 1)", + "top(floor(float_col), 1)", + "top(floor(double_col), 1)", + "top(floor(smallint_col), 1)", + "top(floor(tinyint_col), 1)", + "top(floor(uint_col), 1)", + "top(floor(ubigint_col), 1)", + "top(floor(usmallint_col), 1)", + "top(floor(utinyint_col), 1)", + "bottom(floor(int_col), 1)", + "bottom(floor(bigint_col), 1)", + "bottom(floor(float_col), 1)", + "bottom(floor(double_col), 1)", + "bottom(floor(smallint_col), 1)", + "bottom(floor(tinyint_col), 1)", + "bottom(floor(uint_col), 1)", + "bottom(floor(ubigint_col), 1)", + "bottom(floor(usmallint_col), 1)", + "bottom(floor(utinyint_col), 1)", + "percentile(floor(int_col), 20)", + "percentile(floor(bigint_col), 20)", + "percentile(floor(float_col), 20)", + "percentile(floor(double_col), 20)", + "percentile(floor(smallint_col), 20)", + "percentile(floor(tinyint_col), 20)", + "percentile(floor(uint_col), 20)", + "percentile(floor(ubigint_col), 20)", + "percentile(floor(usmallint_col), 20)", + "percentile(floor(utinyint_col), 20)", + "apercentile(floor(int_col), 20)", + "apercentile(floor(bigint_col), 20)", + "apercentile(floor(float_col), 20)", + "apercentile(floor(double_col), 20)", + "apercentile(floor(smallint_col), 20)", + "apercentile(floor(tinyint_col), 20)", + "apercentile(floor(uint_col), 20)", + "apercentile(floor(ubigint_col), 20)", + "apercentile(floor(usmallint_col), 20)", + "apercentile(floor(utinyint_col), 20)", + "last_row(floor(int_col))", + "last_row(floor(bigint_col))", + "last_row(floor(float_col))", + "last_row(floor(double_col))", + "last_row(floor(smallint_col))", + "last_row(floor(tinyint_col))", + "last_row(floor(uint_col))", + "last_row(floor(ubigint_col))", + "last_row(floor(usmallint_col))", + "last_row(floor(utinyint_col))", + "interp(floor(int_col))", + "interp(floor(bigint_col))", + "interp(floor(float_col))", + "interp(floor(double_col))", + "interp(floor(smallint_col))", + "interp(floor(tinyint_col))", + "interp(floor(uint_col))", + "interp(floor(ubigint_col))", + "interp(floor(usmallint_col))", + "interp(floor(utinyint_col))", + "diff(floor(int_col))", + "diff(floor(bigint_col))", + "diff(floor(float_col))", + "diff(floor(double_col))", + "diff(floor(smallint_col))", + "diff(floor(tinyint_col))", + "diff(floor(uint_col))", + "diff(floor(ubigint_col))", + "diff(floor(usmallint_col))", + "diff(floor(utinyint_col))", + "spread(floor(int_col))", + "spread(floor(bigint_col))", + "spread(floor(float_col))", + "spread(floor(double_col))", + "spread(floor(smallint_col))", + "spread(floor(tinyint_col))", + "spread(floor(uint_col))", + "spread(floor(ubigint_col))", + "spread(floor(usmallint_col))", + "spread(floor(utinyint_col))", + "derivative(floor(int_col), 1s, 0)", + "derivative(floor(bigint_col), 1s, 0)", + "derivative(floor(float_col), 1s, 0)", + "derivative(floor(double_col), 1s, 0)", + "derivative(floor(smallint_col), 1s, 0)", + "derivative(floor(tinyint_col), 1s, 0)", + "derivative(floor(uint_col), 1s, 0)", + "derivative(floor(ubigint_col), 1s, 0)", + "derivative(floor(usmallint_col), 1s, 0)", + "derivative(floor(utinyint_col), 1s, 0)", + "floor(int_col) - floor(int_col)", + "floor(bigint_col) - floor(bigint_col)", + "floor(float_col) - floor(float_col)", + "floor(double_col) - floor(double_col)", + "floor(smallint_col) - floor(smallint_col)", + "floor(tinyint_col) - floor(tinyint_col)", + "floor(uint_col) - floor(uint_col)", + "floor(ubigint_col) - floor(ubigint_col)", + "floor(usmallint_col) - floor(usmallint_col)", + "floor(utinyint_col) - floor(utinyint_col)", + "floor(int_col) / floor(int_col)", + "floor(bigint_col) / floor(bigint_col)", + "floor(float_col) / floor(float_col)", + "floor(double_col) / floor(double_col)", + "floor(smallint_col) / floor(smallint_col)", + "floor(tinyint_col) / floor(tinyint_col)", + "floor(uint_col) / floor(uint_col)", + "floor(ubigint_col) / floor(ubigint_col)", + "floor(usmallint_col) / floor(usmallint_col)", + "floor(utinyint_col) / floor(utinyint_col)", + "floor(int_col) * floor(int_col)", + "floor(bigint_col) * floor(bigint_col)", + "floor(float_col) * floor(float_col)", + "floor(double_col) * floor(double_col)", + "floor(smallint_col) * floor(smallint_col)", + "floor(tinyint_col) * floor(tinyint_col)", + "floor(uint_col) * floor(uint_col)", + "floor(ubigint_col) * floor(ubigint_col)", + "floor(usmallint_col) * floor(usmallint_col)", + "floor(utinyint_col) * floor(utinyint_col)", + "floor(count(ts))", + "floor(count(timestamp_col))", + "floor(count(int_col))", + "floor(count(bigint_col))", + "floor(count(float_col))", + "floor(count(double_col))", + "floor(count(binary_col))", + "floor(count(smallint_col))", + "floor(count(tinyint_col))", + "floor(count(bool_col))", + "floor(count(nchar_col))", + "floor(count(uint_col))", + "floor(count(ubigint_col))", + "floor(count(usmallint_col))", + "floor(count(utinyint_col))", + "floor(count(timestamp_tag))", + "floor(count(int_tag))", + "floor(count(bigint_tag))", + "floor(count(float_tag))", + "floor(count(double_tag))", + "floor(count(binary_tag))", + "floor(count(smallint_tag))", + "floor(count(tinyint_tag))", + "floor(count(bool_tag))", + "floor(count(nchar_tag))", + "floor(count(uint_tag))", + "floor(count(ubigint_tag))", + "floor(count(usmallint_tag))", + "floor(count(utinyint_tag))", + "floor(avg(ts))", + "floor(avg(timestamp_col))", + "floor(avg(int_col))", + "floor(avg(bigint_col))", + "floor(avg(float_col))", + "floor(avg(double_col))", + "floor(avg(binary_col))", + "floor(avg(smallint_col))", + "floor(avg(tinyint_col))", + "floor(avg(bool_col))", + "floor(avg(nchar_col))", + "floor(avg(uint_col))", + "floor(avg(ubigint_col))", + "floor(avg(usmallint_col))", + "floor(avg(utinyint_col))", + "floor(avg(timestamp_tag))", + "floor(avg(int_tag))", + "floor(avg(bigint_tag))", + "floor(avg(float_tag))", + "floor(avg(double_tag))", + "floor(avg(binary_tag))", + "floor(avg(smallint_tag))", + "floor(avg(tinyint_tag))", + "floor(avg(bool_tag))", + "floor(avg(nchar_tag))", + "floor(avg(uint_tag))", + "floor(avg(ubigint_tag))", + "floor(avg(usmallint_tag))", + "floor(avg(utinyint_tag))", + "floor(twa(ts))", + "floor(twa(timestamp_col))", + "floor(twa(int_col))", + "floor(twa(bigint_col))", + "floor(twa(float_col))", + "floor(twa(double_col))", + "floor(twa(binary_col))", + "floor(twa(smallint_col))", + "floor(twa(tinyint_col))", + "floor(twa(bool_col))", + "floor(twa(nchar_col))", + "floor(twa(uint_col))", + "floor(twa(ubigint_col))", + "floor(twa(usmallint_col))", + "floor(twa(utinyint_col))", + "floor(twa(timestamp_tag))", + "floor(twa(int_tag))", + "floor(twa(bigint_tag))", + "floor(twa(float_tag))", + "floor(twa(double_tag))", + "floor(twa(binary_tag))", + "floor(twa(smallint_tag))", + "floor(twa(tinyint_tag))", + "floor(twa(bool_tag))", + "floor(twa(nchar_tag))", + "floor(twa(uint_tag))", + "floor(twa(ubigint_tag))", + "floor(twa(usmallint_tag))", + "floor(twa(utinyint_tag))", + "floor(sum(ts))", + "floor(sum(timestamp_col))", + "floor(sum(int_col))", + "floor(sum(bigint_col))", + "floor(sum(float_col))", + "floor(sum(double_col))", + "floor(sum(binary_col))", + "floor(sum(smallint_col))", + "floor(sum(tinyint_col))", + "floor(sum(bool_col))", + "floor(sum(nchar_col))", + "floor(sum(uint_col))", + "floor(sum(ubigint_col))", + "floor(sum(usmallint_col))", + "floor(sum(utinyint_col))", + "floor(sum(timestamp_tag))", + "floor(sum(int_tag))", + "floor(sum(bigint_tag))", + "floor(sum(float_tag))", + "floor(sum(double_tag))", + "floor(sum(binary_tag))", + "floor(sum(smallint_tag))", + "floor(sum(tinyint_tag))", + "floor(sum(bool_tag))", + "floor(sum(nchar_tag))", + "floor(sum(uint_tag))", + "floor(sum(ubigint_tag))", + "floor(sum(usmallint_tag))", + "floor(sum(utinyint_tag))", + "floor(stddev(ts))", + "floor(stddev(timestamp_col))", + "floor(stddev(int_col))", + "floor(stddev(bigint_col))", + "floor(stddev(float_col))", + "floor(stddev(double_col))", + "floor(stddev(binary_col))", + "floor(stddev(smallint_col))", + "floor(stddev(tinyint_col))", + "floor(stddev(bool_col))", + "floor(stddev(nchar_col))", + "floor(stddev(uint_col))", + "floor(stddev(ubigint_col))", + "floor(stddev(usmallint_col))", + "floor(stddev(utinyint_col))", + "floor(stddev(timestamp_tag))", + "floor(stddev(int_tag))", + "floor(stddev(bigint_tag))", + "floor(stddev(float_tag))", + "floor(stddev(double_tag))", + "floor(stddev(binary_tag))", + "floor(stddev(smallint_tag))", + "floor(stddev(tinyint_tag))", + "floor(stddev(bool_tag))", + "floor(stddev(nchar_tag))", + "floor(stddev(uint_tag))", + "floor(stddev(ubigint_tag))", + "floor(stddev(usmallint_tag))", + "floor(stddev(utinyint_tag))", + "floor(leastsquares(ts, 1, 1))", + "floor(leastsquares(timestamp_col, 1, 1))", + "floor(leastsquares(int_col, 1, 1))", + "floor(leastsquares(bigint_col, 1, 1))", + "floor(leastsquares(float_col, 1, 1))", + "floor(leastsquares(double_col, 1, 1))", + "floor(leastsquares(binary_col, 1, 1))", + "floor(leastsquares(smallint_col, 1, 1))", + "floor(leastsquares(tinyint_col, 1, 1))", + "floor(leastsquares(bool_col, 1, 1))", + "floor(leastsquares(nchar_col, 1, 1))", + "floor(leastsquares(uint_col, 1, 1))", + "floor(leastsquares(ubigint_col, 1, 1))", + "floor(leastsquares(usmallint_col, 1, 1))", + "floor(leastsquares(utinyint_col, 1, 1))", + "floor(leastsquares(timestamp_tag, 1, 1))", + "floor(leastsquares(int_tag, 1, 1))", + "floor(leastsquares(bigint_tag, 1, 1))", + "floor(leastsquares(float_tag, 1, 1))", + "floor(leastsquares(double_tag, 1, 1))", + "floor(leastsquares(binary_tag, 1, 1))", + "floor(leastsquares(smallint_tag, 1, 1))", + "floor(leastsquares(tinyint_tag, 1, 1))", + "floor(leastsquares(bool_tag, 1, 1))", + "floor(leastsquares(nchar_tag, 1, 1))", + "floor(leastsquares(uint_tag, 1, 1))", + "floor(leastsquares(ubigint_tag, 1, 1))", + "floor(leastsquares(usmallint_tag, 1, 1))", + "floor(leastsquares(utinyint_tag, 1, 1))", + "floor(irate(ts))", + "floor(irate(timestamp_col))", + "floor(irate(int_col))", + "floor(irate(bigint_col))", + "floor(irate(float_col))", + "floor(irate(double_col))", + "floor(irate(binary_col))", + "floor(irate(smallint_col))", + "floor(irate(tinyint_col))", + "floor(irate(bool_col))", + "floor(irate(nchar_col))", + "floor(irate(uint_col))", + "floor(irate(ubigint_col))", + "floor(irate(usmallint_col))", + "floor(irate(utinyint_col))", + "floor(irate(timestamp_tag))", + "floor(irate(int_tag))", + "floor(irate(bigint_tag))", + "floor(irate(float_tag))", + "floor(irate(double_tag))", + "floor(irate(binary_tag))", + "floor(irate(smallint_tag))", + "floor(irate(tinyint_tag))", + "floor(irate(bool_tag))", + "floor(irate(nchar_tag))", + "floor(irate(uint_tag))", + "floor(irate(ubigint_tag))", + "floor(irate(usmallint_tag))", + "floor(irate(utinyint_tag))", + "floor(min(ts))", + "floor(min(timestamp_col))", + "floor(min(int_col))", + "floor(min(bigint_col))", + "floor(min(float_col))", + "floor(min(double_col))", + "floor(min(binary_col))", + "floor(min(smallint_col))", + "floor(min(tinyint_col))", + "floor(min(bool_col))", + "floor(min(nchar_col))", + "floor(min(uint_col))", + "floor(min(ubigint_col))", + "floor(min(usmallint_col))", + "floor(min(utinyint_col))", + "floor(min(timestamp_tag))", + "floor(min(int_tag))", + "floor(min(bigint_tag))", + "floor(min(float_tag))", + "floor(min(double_tag))", + "floor(min(binary_tag))", + "floor(min(smallint_tag))", + "floor(min(tinyint_tag))", + "floor(min(bool_tag))", + "floor(min(nchar_tag))", + "floor(min(uint_tag))", + "floor(min(ubigint_tag))", + "floor(min(usmallint_tag))", + "floor(min(utinyint_tag))", + "floor(max(ts))", + "floor(max(timestamp_col))", + "floor(max(int_col))", + "floor(max(bigint_col))", + "floor(max(float_col))", + "floor(max(double_col))", + "floor(max(binary_col))", + "floor(max(smallint_col))", + "floor(max(tinyint_col))", + "floor(max(bool_col))", + "floor(max(nchar_col))", + "floor(max(uint_col))", + "floor(max(ubigint_col))", + "floor(max(usmallint_col))", + "floor(max(utinyint_col))", + "floor(max(timestamp_tag))", + "floor(max(int_tag))", + "floor(max(bigint_tag))", + "floor(max(float_tag))", + "floor(max(double_tag))", + "floor(max(binary_tag))", + "floor(max(smallint_tag))", + "floor(max(tinyint_tag))", + "floor(max(bool_tag))", + "floor(max(nchar_tag))", + "floor(max(uint_tag))", + "floor(max(ubigint_tag))", + "floor(max(usmallint_tag))", + "floor(max(utinyint_tag))", + "floor(first(ts))", + "floor(first(timestamp_col))", + "floor(first(int_col))", + "floor(first(bigint_col))", + "floor(first(float_col))", + "floor(first(double_col))", + "floor(first(binary_col))", + "floor(first(smallint_col))", + "floor(first(tinyint_col))", + "floor(first(bool_col))", + "floor(first(nchar_col))", + "floor(first(uint_col))", + "floor(first(ubigint_col))", + "floor(first(usmallint_col))", + "floor(first(utinyint_col))", + "floor(first(timestamp_tag))", + "floor(first(int_tag))", + "floor(first(bigint_tag))", + "floor(first(float_tag))", + "floor(first(double_tag))", + "floor(first(binary_tag))", + "floor(first(smallint_tag))", + "floor(first(tinyint_tag))", + "floor(first(bool_tag))", + "floor(first(nchar_tag))", + "floor(first(uint_tag))", + "floor(first(ubigint_tag))", + "floor(first(usmallint_tag))", + "floor(first(utinyint_tag))", + "floor(last(ts))", + "floor(last(timestamp_col))", + "floor(last(int_col))", + "floor(last(bigint_col))", + "floor(last(float_col))", + "floor(last(double_col))", + "floor(last(binary_col))", + "floor(last(smallint_col))", + "floor(last(tinyint_col))", + "floor(last(bool_col))", + "floor(last(nchar_col))", + "floor(last(uint_col))", + "floor(last(ubigint_col))", + "floor(last(usmallint_col))", + "floor(last(utinyint_col))", + "floor(last(timestamp_tag))", + "floor(last(int_tag))", + "floor(last(bigint_tag))", + "floor(last(float_tag))", + "floor(last(double_tag))", + "floor(last(binary_tag))", + "floor(last(smallint_tag))", + "floor(last(tinyint_tag))", + "floor(last(bool_tag))", + "floor(last(nchar_tag))", + "floor(last(uint_tag))", + "floor(last(ubigint_tag))", + "floor(last(usmallint_tag))", + "floor(last(utinyint_tag))", + "floor(top(ts, 1))", + "floor(top(timestamp_col, 1))", + "floor(top(int_col, 1))", + "floor(top(bigint_col, 1))", + "floor(top(float_col, 1))", + "floor(top(double_col, 1))", + "floor(top(binary_col, 1))", + "floor(top(smallint_col, 1))", + "floor(top(tinyint_col, 1))", + "floor(top(bool_col, 1))", + "floor(top(nchar_col, 1))", + "floor(top(uint_col, 1))", + "floor(top(ubigint_col, 1))", + "floor(top(usmallint_col, 1))", + "floor(top(utinyint_col, 1))", + "floor(top(timestamp_tag, 1))", + "floor(top(int_tag, 1))", + "floor(top(bigint_tag, 1))", + "floor(top(float_tag, 1))", + "floor(top(double_tag, 1))", + "floor(top(binary_tag, 1))", + "floor(top(smallint_tag, 1))", + "floor(top(tinyint_tag, 1))", + "floor(top(bool_tag, 1))", + "floor(top(nchar_tag, 1))", + "floor(top(uint_tag, 1))", + "floor(top(ubigint_tag, 1))", + "floor(top(usmallint_tag, 1))", + "floor(top(utinyint_tag, 1))", + "floor(bottom(ts, 1))", + "floor(bottom(timestamp_col, 1))", + "floor(bottom(int_col, 1))", + "floor(bottom(bigint_col, 1))", + "floor(bottom(float_col, 1))", + "floor(bottom(double_col, 1))", + "floor(bottom(binary_col, 1))", + "floor(bottom(smallint_col, 1))", + "floor(bottom(tinyint_col, 1))", + "floor(bottom(bool_col, 1))", + "floor(bottom(nchar_col, 1))", + "floor(bottom(uint_col, 1))", + "floor(bottom(ubigint_col, 1))", + "floor(bottom(usmallint_col, 1))", + "floor(bottom(utinyint_col, 1))", + "floor(bottom(timestamp_tag, 1))", + "floor(bottom(int_tag, 1))", + "floor(bottom(bigint_tag, 1))", + "floor(bottom(float_tag, 1))", + "floor(bottom(double_tag, 1))", + "floor(bottom(binary_tag, 1))", + "floor(bottom(smallint_tag, 1))", + "floor(bottom(tinyint_tag, 1))", + "floor(bottom(bool_tag, 1))", + "floor(bottom(nchar_tag, 1))", + "floor(bottom(uint_tag, 1))", + "floor(bottom(ubigint_tag, 1))", + "floor(bottom(usmallint_tag, 1))", + "floor(bottom(utinyint_tag, 1))", + "floor(percentile(ts, 1))", + "floor(percentile(timestamp_col, 1))", + "floor(percentile(int_col, 1))", + "floor(percentile(bigint_col, 1))", + "floor(percentile(float_col, 1))", + "floor(percentile(double_col, 1))", + "floor(percentile(binary_col, 1))", + "floor(percentile(smallint_col, 1))", + "floor(percentile(tinyint_col, 1))", + "floor(percentile(bool_col, 1))", + "floor(percentile(nchar_col, 1))", + "floor(percentile(uint_col, 1))", + "floor(percentile(ubigint_col, 1))", + "floor(percentile(usmallint_col, 1))", + "floor(percentile(utinyint_col, 1))", + "floor(percentile(timestamp_tag, 1))", + "floor(percentile(int_tag, 1))", + "floor(percentile(bigint_tag, 1))", + "floor(percentile(float_tag, 1))", + "floor(percentile(double_tag, 1))", + "floor(percentile(binary_tag, 1))", + "floor(percentile(smallint_tag, 1))", + "floor(percentile(tinyint_tag, 1))", + "floor(percentile(bool_tag, 1))", + "floor(percentile(nchar_tag, 1))", + "floor(percentile(uint_tag, 1))", + "floor(percentile(ubigint_tag, 1))", + "floor(percentile(usmallint_tag, 1))", + "floor(percentile(utinyint_tag, 1))", + "floor(apercentile(ts, 1))", + "floor(apercentile(timestamp_col, 1))", + "floor(apercentile(int_col, 1))", + "floor(apercentile(bigint_col, 1))", + "floor(apercentile(float_col, 1))", + "floor(apercentile(double_col, 1))", + "floor(apercentile(binary_col, 1))", + "floor(apercentile(smallint_col, 1))", + "floor(apercentile(tinyint_col, 1))", + "floor(apercentile(bool_col, 1))", + "floor(apercentile(nchar_col, 1))", + "floor(apercentile(uint_col, 1))", + "floor(apercentile(ubigint_col, 1))", + "floor(apercentile(usmallint_col, 1))", + "floor(apercentile(utinyint_col, 1))", + "floor(apercentile(timestamp_tag, 1))", + "floor(apercentile(int_tag, 1))", + "floor(apercentile(bigint_tag, 1))", + "floor(apercentile(float_tag, 1))", + "floor(apercentile(double_tag, 1))", + "floor(apercentile(binary_tag, 1))", + "floor(apercentile(smallint_tag, 1))", + "floor(apercentile(tinyint_tag, 1))", + "floor(apercentile(bool_tag, 1))", + "floor(apercentile(nchar_tag, 1))", + "floor(apercentile(uint_tag, 1))", + "floor(apercentile(ubigint_tag, 1))", + "floor(apercentile(usmallint_tag, 1))", + "floor(apercentile(utinyint_tag, 1))", + "floor(last_row(ts))", + "floor(last_row(timestamp_col))", + "floor(last_row(int_col))", + "floor(last_row(bigint_col))", + "floor(last_row(float_col))", + "floor(last_row(double_col))", + "floor(last_row(binary_col))", + "floor(last_row(smallint_col))", + "floor(last_row(tinyint_col))", + "floor(last_row(bool_col))", + "floor(last_row(nchar_col))", + "floor(last_row(uint_col))", + "floor(last_row(ubigint_col))", + "floor(last_row(usmallint_col))", + "floor(last_row(utinyint_col))", + "floor(last_row(timestamp_tag))", + "floor(last_row(int_tag))", + "floor(last_row(bigint_tag))", + "floor(last_row(float_tag))", + "floor(last_row(double_tag))", + "floor(last_row(binary_tag))", + "floor(last_row(smallint_tag))", + "floor(last_row(tinyint_tag))", + "floor(last_row(bool_tag))", + "floor(last_row(nchar_tag))", + "floor(last_row(uint_tag))", + "floor(last_row(ubigint_tag))", + "floor(last_row(usmallint_tag))", + "floor(last_row(utinyint_tag))", + "floor(interp(ts))", + "floor(interp(timestamp_col))", + "floor(interp(int_col))", + "floor(interp(bigint_col))", + "floor(interp(float_col))", + "floor(interp(double_col))", + "floor(interp(binary_col))", + "floor(interp(smallint_col))", + "floor(interp(tinyint_col))", + "floor(interp(bool_col))", + "floor(interp(nchar_col))", + "floor(interp(uint_col))", + "floor(interp(ubigint_col))", + "floor(interp(usmallint_col))", + "floor(interp(utinyint_col))", + "floor(interp(timestamp_tag))", + "floor(interp(int_tag))", + "floor(interp(bigint_tag))", + "floor(interp(float_tag))", + "floor(interp(double_tag))", + "floor(interp(binary_tag))", + "floor(interp(smallint_tag))", + "floor(interp(tinyint_tag))", + "floor(interp(bool_tag))", + "floor(interp(nchar_tag))", + "floor(interp(uint_tag))", + "floor(interp(ubigint_tag))", + "floor(interp(usmallint_tag))", + "floor(interp(utinyint_tag))", + "floor(diff(ts))", + "floor(diff(timestamp_col))", + "floor(diff(int_col))", + "floor(diff(bigint_col))", + "floor(diff(float_col))", + "floor(diff(double_col))", + "floor(diff(binary_col))", + "floor(diff(smallint_col))", + "floor(diff(tinyint_col))", + "floor(diff(bool_col))", + "floor(diff(nchar_col))", + "floor(diff(uint_col))", + "floor(diff(ubigint_col))", + "floor(diff(usmallint_col))", + "floor(diff(utinyint_col))", + "floor(diff(timestamp_tag))", + "floor(diff(int_tag))", + "floor(diff(bigint_tag))", + "floor(diff(float_tag))", + "floor(diff(double_tag))", + "floor(diff(binary_tag))", + "floor(diff(smallint_tag))", + "floor(diff(tinyint_tag))", + "floor(diff(bool_tag))", + "floor(diff(nchar_tag))", + "floor(diff(uint_tag))", + "floor(diff(ubigint_tag))", + "floor(diff(usmallint_tag))", + "floor(diff(utinyint_tag))", + "floor(spread(ts))", + "floor(spread(timestamp_col))", + "floor(spread(int_col))", + "floor(spread(bigint_col))", + "floor(spread(float_col))", + "floor(spread(double_col))", + "floor(spread(binary_col))", + "floor(spread(smallint_col))", + "floor(spread(tinyint_col))", + "floor(spread(bool_col))", + "floor(spread(nchar_col))", + "floor(spread(uint_col))", + "floor(spread(ubigint_col))", + "floor(spread(usmallint_col))", + "floor(spread(utinyint_col))", + "floor(spread(timestamp_tag))", + "floor(spread(int_tag))", + "floor(spread(bigint_tag))", + "floor(spread(float_tag))", + "floor(spread(double_tag))", + "floor(spread(binary_tag))", + "floor(spread(smallint_tag))", + "floor(spread(tinyint_tag))", + "floor(spread(bool_tag))", + "floor(spread(nchar_tag))", + "floor(spread(uint_tag))", + "floor(spread(ubigint_tag))", + "floor(spread(usmallint_tag))", + "floor(spread(utinyint_tag))", + "floor(derivative(ts, 1s, 0))", + "floor(derivative(timestamp_col, 1s, 0))", + "floor(derivative(int_col, 1s, 0))", + "floor(derivative(bigint_col, 1s, 0))", + "floor(derivative(float_col, 1s, 0))", + "floor(derivative(double_col, 1s, 0))", + "floor(derivative(binary_col, 1s, 0))", + "floor(derivative(smallint_col, 1s, 0))", + "floor(derivative(tinyint_col, 1s, 0))", + "floor(derivative(bool_col, 1s, 0))", + "floor(derivative(nchar_col, 1s, 0))", + "floor(derivative(uint_col, 1s, 0))", + "floor(derivative(ubigint_col, 1s, 0))", + "floor(derivative(usmallint_col, 1s, 0))", + "floor(derivative(utinyint_col, 1s, 0))", + "floor(derivative(timestamp_tag, 1s, 0))", + "floor(derivative(int_tag, 1s, 0))", + "floor(derivative(bigint_tag, 1s, 0))", + "floor(derivative(float_tag, 1s, 0))", + "floor(derivative(double_tag, 1s, 0))", + "floor(derivative(binary_tag, 1s, 0))", + "floor(derivative(smallint_tag, 1s, 0))", + "floor(derivative(tinyint_tag, 1s, 0))", + "floor(derivative(bool_tag, 1s, 0))", + "floor(derivative(nchar_tag, 1s, 0))", + "floor(derivative(uint_tag, 1s, 0))", + "floor(derivative(ubigint_tag, 1s, 0))", + "floor(derivative(usmallint_tag, 1s, 0))", + "floor(derivative(utinyint_tag, 1s, 0))", + "floor(ts + ts)", + "floor(timestamp_col + timestamp_col)", + "floor(int_col + int_col)", + "floor(bigint_col + bigint_col)", + "floor(float_col + float_col)", + "floor(double_col + double_col)", + "floor(binary_col + binary_col)", + "floor(smallint_col + smallint_col)", + "floor(tinyint_col + tinyint_col)", + "floor(bool_col + bool_col)", + "floor(nchar_col + nchar_col)", + "floor(uint_col + uint_col)", + "floor(ubigint_col + ubigint_col)", + "floor(usmallint_col + usmallint_col)", + "floor(utinyint_col + utinyint_col)", + "floor(timestamp_tag + timestamp_tag)", + "floor(int_tag + int_tag)", + "floor(bigint_tag + bigint_tag)", + "floor(float_tag + float_tag)", + "floor(double_tag + double_tag)", + "floor(binary_tag + binary_tag)", + "floor(smallint_tag + smallint_tag)", + "floor(tinyint_tag + tinyint_tag)", + "floor(bool_tag + bool_tag)", + "floor(nchar_tag + nchar_tag)", + "floor(uint_tag + uint_tag)", + "floor(ubigint_tag + ubigint_tag)", + "floor(usmallint_tag + usmallint_tag)", + "floor(utinyint_tag + utinyint_tag)", + "floor(ts - ts)", + "floor(timestamp_col - timestamp_col)", + "floor(int_col - int_col)", + "floor(bigint_col - bigint_col)", + "floor(float_col - float_col)", + "floor(double_col - double_col)", + "floor(binary_col - binary_col)", + "floor(smallint_col - smallint_col)", + "floor(tinyint_col - tinyint_col)", + "floor(bool_col - bool_col)", + "floor(nchar_col - nchar_col)", + "floor(uint_col - uint_col)", + "floor(ubigint_col - ubigint_col)", + "floor(usmallint_col - usmallint_col)", + "floor(utinyint_col - utinyint_col)", + "floor(timestamp_tag - timestamp_tag)", + "floor(int_tag - int_tag)", + "floor(bigint_tag - bigint_tag)", + "floor(float_tag - float_tag)", + "floor(double_tag - double_tag)", + "floor(binary_tag - binary_tag)", + "floor(smallint_tag - smallint_tag)", + "floor(tinyint_tag - tinyint_tag)", + "floor(bool_tag - bool_tag)", + "floor(nchar_tag - nchar_tag)", + "floor(uint_tag - uint_tag)", + "floor(ubigint_tag - ubigint_tag)", + "floor(usmallint_tag - usmallint_tag)", + "floor(utinyint_tag - utinyint_tag)", + "floor(ts * ts)", + "floor(timestamp_col * timestamp_col)", + "floor(int_col * int_col)", + "floor(bigint_col * bigint_col)", + "floor(float_col * float_col)", + "floor(double_col * double_col)", + "floor(binary_col * binary_col)", + "floor(smallint_col * smallint_col)", + "floor(tinyint_col * tinyint_col)", + "floor(bool_col * bool_col)", + "floor(nchar_col * nchar_col)", + "floor(uint_col * uint_col)", + "floor(ubigint_col * ubigint_col)", + "floor(usmallint_col * usmallint_col)", + "floor(utinyint_col * utinyint_col)", + "floor(timestamp_tag * timestamp_tag)", + "floor(int_tag * int_tag)", + "floor(bigint_tag * bigint_tag)", + "floor(float_tag * float_tag)", + "floor(double_tag * double_tag)", + "floor(binary_tag * binary_tag)", + "floor(smallint_tag * smallint_tag)", + "floor(tinyint_tag * tinyint_tag)", + "floor(bool_tag * bool_tag)", + "floor(nchar_tag * nchar_tag)", + "floor(uint_tag * uint_tag)", + "floor(ubigint_tag * ubigint_tag)", + "floor(usmallint_tag * usmallint_tag)", + "floor(utinyint_tag * utinyint_tag)", + "floor(ts / ts)", + "floor(timestamp_col / timestamp_col)", + "floor(int_col / int_col)", + "floor(bigint_col / bigint_col)", + "floor(float_col / float_col)", + "floor(double_col / double_col)", + "floor(binary_col / binary_col)", + "floor(smallint_col / smallint_col)", + "floor(tinyint_col / tinyint_col)", + "floor(bool_col / bool_col)", + "floor(nchar_col / nchar_col)", + "floor(uint_col / uint_col)", + "floor(ubigint_col / ubigint_col)", + "floor(usmallint_col / usmallint_col)", + "floor(utinyint_col / utinyint_col)", + "floor(timestamp_tag / timestamp_tag)", + "floor(int_tag / int_tag)", + "floor(bigint_tag / bigint_tag)", + "floor(float_tag / float_tag)", + "floor(double_tag / double_tag)", + "floor(binary_tag / binary_tag)", + "floor(smallint_tag / smallint_tag)", + "floor(tinyint_tag / tinyint_tag)", + "floor(bool_tag / bool_tag)", + "floor(nchar_tag / nchar_tag)", + "floor(uint_tag / uint_tag)", + "floor(ubigint_tag / ubigint_tag)", + "floor(usmallint_tag / usmallint_tag)", + "floor(utinyint_tag / utinyint_tag)", + "int_col, floor(int_col), int_col", + "bigint_col, floor(bigint_col), bigint_col", + "float_col, floor(float_col), float_col", + "double_col, floor(double_col), double_col", + "smallint_col, floor(smallint_col), smallint_col", + "tinyint_col, floor(tinyint_col), tinyint_col", + "uint_col, floor(uint_col), uint_col", + "ubigint_col, floor(ubigint_col), ubigint_col", + "usmallint_col, floor(usmallint_col), usmallint_col", + "utinyint_col, floor(utinyint_col), utinyint_col", + "count(int_col), floor(int_col), count(int_col)", + "count(bigint_col), floor(bigint_col), count(bigint_col)", + "count(float_col), floor(float_col), count(float_col)", + "count(double_col), floor(double_col), count(double_col)", + "count(smallint_col), floor(smallint_col), count(smallint_col)", + "count(tinyint_col), floor(tinyint_col), count(tinyint_col)", + "count(uint_col), floor(uint_col), count(uint_col)", + "count(ubigint_col), floor(ubigint_col), count(ubigint_col)", + "count(usmallint_col), floor(usmallint_col), count(usmallint_col)", + "count(utinyint_col), floor(utinyint_col), count(utinyint_col)", + "avg(int_col), floor(int_col), avg(int_col)", + "avg(bigint_col), floor(bigint_col), avg(bigint_col)", + "avg(float_col), floor(float_col), avg(float_col)", + "avg(double_col), floor(double_col), avg(double_col)", + "avg(smallint_col), floor(smallint_col), avg(smallint_col)", + "avg(tinyint_col), floor(tinyint_col), avg(tinyint_col)", + "avg(uint_col), floor(uint_col), avg(uint_col)", + "avg(ubigint_col), floor(ubigint_col), avg(ubigint_col)", + "avg(usmallint_col), floor(usmallint_col), avg(usmallint_col)", + "avg(utinyint_col), floor(utinyint_col), avg(utinyint_col)", + "twa(int_col), floor(int_col), twa(int_col)", + "twa(bigint_col), floor(bigint_col), twa(bigint_col)", + "twa(float_col), floor(float_col), twa(float_col)", + "twa(double_col), floor(double_col), twa(double_col)", + "twa(smallint_col), floor(smallint_col), twa(smallint_col)", + "twa(tinyint_col), floor(tinyint_col), twa(tinyint_col)", + "twa(uint_col), floor(uint_col), twa(uint_col)", + "twa(ubigint_col), floor(ubigint_col), twa(ubigint_col)", + "twa(usmallint_col), floor(usmallint_col), twa(usmallint_col)", + "twa(utinyint_col), floor(utinyint_col), twa(utinyint_col)", + "sum(int_col), floor(int_col), sum(int_col)", + "sum(bigint_col), floor(bigint_col), sum(bigint_col)", + "sum(float_col), floor(float_col), sum(float_col)", + "sum(double_col), floor(double_col), sum(double_col)", + "sum(smallint_col), floor(smallint_col), sum(smallint_col)", + "sum(tinyint_col), floor(tinyint_col), sum(tinyint_col)", + "sum(uint_col), floor(uint_col), sum(uint_col)", + "sum(ubigint_col), floor(ubigint_col), sum(ubigint_col)", + "sum(usmallint_col), floor(usmallint_col), sum(usmallint_col)", + "sum(utinyint_col), floor(utinyint_col), sum(utinyint_col)", + "stddev(int_col), floor(int_col), stddev(int_col)", + "stddev(bigint_col), floor(bigint_col), stddev(bigint_col)", + "stddev(float_col), floor(float_col), stddev(float_col)", + "stddev(double_col), floor(double_col), stddev(double_col)", + "stddev(smallint_col), floor(smallint_col), stddev(smallint_col)", + "stddev(tinyint_col), floor(tinyint_col), stddev(tinyint_col)", + "stddev(uint_col), floor(uint_col), stddev(uint_col)", + "stddev(ubigint_col), floor(ubigint_col), stddev(ubigint_col)", + "stddev(usmallint_col), floor(usmallint_col), stddev(usmallint_col)", + "stddev(utinyint_col), floor(utinyint_col), stddev(utinyint_col)", + "irate(int_col), floor(int_col), irate(int_col)", + "irate(bigint_col), floor(bigint_col), irate(bigint_col)", + "irate(float_col), floor(float_col), irate(float_col)", + "irate(double_col), floor(double_col), irate(double_col)", + "irate(smallint_col), floor(smallint_col), irate(smallint_col)", + "irate(tinyint_col), floor(tinyint_col), irate(tinyint_col)", + "irate(uint_col), floor(uint_col), irate(uint_col)", + "irate(ubigint_col), floor(ubigint_col), irate(ubigint_col)", + "irate(usmallint_col), floor(usmallint_col), irate(usmallint_col)", + "irate(utinyint_col), floor(utinyint_col), irate(utinyint_col)", + "min(int_col), floor(int_col), min(int_col)", + "min(bigint_col), floor(bigint_col), min(bigint_col)", + "min(float_col), floor(float_col), min(float_col)", + "min(double_col), floor(double_col), min(double_col)", + "min(smallint_col), floor(smallint_col), min(smallint_col)", + "min(tinyint_col), floor(tinyint_col), min(tinyint_col)", + "min(uint_col), floor(uint_col), min(uint_col)", + "min(ubigint_col), floor(ubigint_col), min(ubigint_col)", + "min(usmallint_col), floor(usmallint_col), min(usmallint_col)", + "min(utinyint_col), floor(utinyint_col), min(utinyint_col)", + "max(int_col), floor(int_col), max(int_col)", + "max(bigint_col), floor(bigint_col), max(bigint_col)", + "max(float_col), floor(float_col), max(float_col)", + "max(double_col), floor(double_col), max(double_col)", + "max(smallint_col), floor(smallint_col), max(smallint_col)", + "max(tinyint_col), floor(tinyint_col), max(tinyint_col)", + "max(uint_col), floor(uint_col), max(uint_col)", + "max(ubigint_col), floor(ubigint_col), max(ubigint_col)", + "max(usmallint_col), floor(usmallint_col), max(usmallint_col)", + "max(utinyint_col), floor(utinyint_col), max(utinyint_col)", + "first(int_col), floor(int_col), first(int_col)", + "first(bigint_col), floor(bigint_col), first(bigint_col)", + "first(float_col), floor(float_col), first(float_col)", + "first(double_col), floor(double_col), first(double_col)", + "first(smallint_col), floor(smallint_col), first(smallint_col)", + "first(tinyint_col), floor(tinyint_col), first(tinyint_col)", + "first(uint_col), floor(uint_col), first(uint_col)", + "first(ubigint_col), floor(ubigint_col), first(ubigint_col)", + "first(usmallint_col), floor(usmallint_col), first(usmallint_col)", + "first(utinyint_col), floor(utinyint_col), first(utinyint_col)", + "last(int_col), floor(int_col), last(int_col)", + "last(bigint_col), floor(bigint_col), last(bigint_col)", + "last(float_col), floor(float_col), last(float_col)", + "last(double_col), floor(double_col), last(double_col)", + "last(smallint_col), floor(smallint_col), last(smallint_col)", + "last(tinyint_col), floor(tinyint_col), last(tinyint_col)", + "last(uint_col), floor(uint_col), last(uint_col)", + "last(ubigint_col), floor(ubigint_col), last(ubigint_col)", + "last(usmallint_col), floor(usmallint_col), last(usmallint_col)", + "last(utinyint_col), floor(utinyint_col), last(utinyint_col)", + "last_row(int_col), floor(int_col), last_row(int_col)", + "last_row(bigint_col), floor(bigint_col), last_row(bigint_col)", + "last_row(float_col), floor(float_col), last_row(float_col)", + "last_row(double_col), floor(double_col), last_row(double_col)", + "last_row(smallint_col), floor(smallint_col), last_row(smallint_col)", + "last_row(tinyint_col), floor(tinyint_col), last_row(tinyint_col)", + "last_row(uint_col), floor(uint_col), last_row(uint_col)", + "last_row(ubigint_col), floor(ubigint_col), last_row(ubigint_col)", + "last_row(usmallint_col), floor(usmallint_col), last_row(usmallint_col)", + "last_row(utinyint_col), floor(utinyint_col), last_row(utinyint_col)", + "interp(int_col), floor(int_col), interp(int_col)", + "interp(bigint_col), floor(bigint_col), interp(bigint_col)", + "interp(float_col), floor(float_col), interp(float_col)", + "interp(double_col), floor(double_col), interp(double_col)", + "interp(smallint_col), floor(smallint_col), interp(smallint_col)", + "interp(tinyint_col), floor(tinyint_col), interp(tinyint_col)", + "interp(uint_col), floor(uint_col), interp(uint_col)", + "interp(ubigint_col), floor(ubigint_col), interp(ubigint_col)", + "interp(usmallint_col), floor(usmallint_col), interp(usmallint_col)", + "interp(utinyint_col), floor(utinyint_col), interp(utinyint_col)", + "diff(int_col), floor(int_col), diff(int_col)", + "diff(bigint_col), floor(bigint_col), diff(bigint_col)", + "diff(float_col), floor(float_col), diff(float_col)", + "diff(double_col), floor(double_col), diff(double_col)", + "diff(smallint_col), floor(smallint_col), diff(smallint_col)", + "diff(tinyint_col), floor(tinyint_col), diff(tinyint_col)", + "diff(uint_col), floor(uint_col), diff(uint_col)", + "diff(ubigint_col), floor(ubigint_col), diff(ubigint_col)", + "diff(usmallint_col), floor(usmallint_col), diff(usmallint_col)", + "diff(utinyint_col), floor(utinyint_col), diff(utinyint_col)", + "spread(int_col), floor(int_col), spread(int_col)", + "spread(bigint_col), floor(bigint_col), spread(bigint_col)", + "spread(float_col), floor(float_col), spread(float_col)", + "spread(double_col), floor(double_col), spread(double_col)", + "spread(smallint_col), floor(smallint_col), spread(smallint_col)", + "spread(tinyint_col), floor(tinyint_col), spread(tinyint_col)", + "spread(uint_col), floor(uint_col), spread(uint_col)", + "spread(ubigint_col), floor(ubigint_col), spread(ubigint_col)", + "spread(usmallint_col), floor(usmallint_col), spread(usmallint_col)", + "spread(utinyint_col), floor(utinyint_col), spread(utinyint_col)", + "leastsquares(int_col, 1, 1), floor(int_col), leastsquares(int_col, 1, 1)", + "leastsquares(bigint_col, 1, 1), floor(bigint_col), leastsquares(bigint_col, 1, 1)", + "leastsquares(float_col, 1, 1), floor(float_col), leastsquares(float_col, 1, 1)", + "leastsquares(double_col, 1, 1), floor(double_col), leastsquares(double_col, 1, 1)", + "leastsquares(smallint_col, 1, 1), floor(smallint_col), leastsquares(smallint_col, 1, 1)", + "leastsquares(tinyint_col, 1, 1), floor(tinyint_col), leastsquares(tinyint_col, 1, 1)", + "leastsquares(uint_col, 1, 1), floor(uint_col), leastsquares(uint_col, 1, 1)", + "leastsquares(ubigint_col, 1, 1), floor(ubigint_col), leastsquares(ubigint_col, 1, 1)", + "leastsquares(usmallint_col, 1, 1), floor(usmallint_col), leastsquares(usmallint_col, 1, 1)", + "leastsquares(utinyint_col, 1, 1), floor(utinyint_col), leastsquares(utinyint_col, 1, 1)", + "top(int_col, 1), floor(int_col), top(int_col, 1)", + "top(bigint_col, 1), floor(bigint_col), top(bigint_col, 1)", + "top(float_col, 1), floor(float_col), top(float_col, 1)", + "top(double_col, 1), floor(double_col), top(double_col, 1)", + "top(smallint_col, 1), floor(smallint_col), top(smallint_col, 1)", + "top(tinyint_col, 1), floor(tinyint_col), top(tinyint_col, 1)", + "top(uint_col, 1), floor(uint_col), top(uint_col, 1)", + "top(ubigint_col, 1), floor(ubigint_col), top(ubigint_col, 1)", + "top(usmallint_col, 1), floor(usmallint_col), top(usmallint_col, 1)", + "top(utinyint_col, 1), floor(utinyint_col), top(utinyint_col, 1)", + "bottom(int_col, 1), floor(int_col), bottom(int_col, 1)", + "bottom(bigint_col, 1), floor(bigint_col), bottom(bigint_col, 1)", + "bottom(float_col, 1), floor(float_col), bottom(float_col, 1)", + "bottom(double_col, 1), floor(double_col), bottom(double_col, 1)", + "bottom(smallint_col, 1), floor(smallint_col), bottom(smallint_col, 1)", + "bottom(tinyint_col, 1), floor(tinyint_col), bottom(tinyint_col, 1)", + "bottom(uint_col, 1), floor(uint_col), bottom(uint_col, 1)", + "bottom(ubigint_col, 1), floor(ubigint_col), bottom(ubigint_col, 1)", + "bottom(usmallint_col, 1), floor(usmallint_col), bottom(usmallint_col, 1)", + "bottom(utinyint_col, 1), floor(utinyint_col), bottom(utinyint_col, 1)", + "percentile(int_col, 1), floor(int_col), percentile(int_col, 1)", + "percentile(bigint_col, 1), floor(bigint_col), percentile(bigint_col, 1)", + "percentile(float_col, 1), floor(float_col), percentile(float_col, 1)", + "percentile(double_col, 1), floor(double_col), percentile(double_col, 1)", + "percentile(smallint_col, 1), floor(smallint_col), percentile(smallint_col, 1)", + "percentile(tinyint_col, 1), floor(tinyint_col), percentile(tinyint_col, 1)", + "percentile(uint_col, 1), floor(uint_col), percentile(uint_col, 1)", + "percentile(ubigint_col, 1), floor(ubigint_col), percentile(ubigint_col, 1)", + "percentile(usmallint_col, 1), floor(usmallint_col), percentile(usmallint_col, 1)", + "percentile(utinyint_col, 1), floor(utinyint_col), percentile(utinyint_col, 1)", + "apercentile(int_col, 1), floor(int_col), apercentile(int_col, 1)", + "apercentile(bigint_col, 1), floor(bigint_col), apercentile(bigint_col, 1)", + "apercentile(float_col, 1), floor(float_col), apercentile(float_col, 1)", + "apercentile(double_col, 1), floor(double_col), apercentile(double_col, 1)", + "apercentile(smallint_col, 1), floor(smallint_col), apercentile(smallint_col, 1)", + "apercentile(tinyint_col, 1), floor(tinyint_col), apercentile(tinyint_col, 1)", + "apercentile(uint_col, 1), floor(uint_col), apercentile(uint_col, 1)", + "apercentile(ubigint_col, 1), floor(ubigint_col), apercentile(ubigint_col, 1)", + "apercentile(usmallint_col, 1), floor(usmallint_col), apercentile(usmallint_col, 1)", + "apercentile(utinyint_col, 1), floor(utinyint_col), apercentile(utinyint_col, 1)", + "derivative(int_col, 1s, 0), floor(int_col), derivative(int_col, 1s, 0)", + "derivative(bigint_col, 1s, 0), floor(bigint_col), derivative(bigint_col, 1s, 0)", + "derivative(float_col, 1s, 0), floor(float_col), derivative(float_col, 1s, 0)", + "derivative(double_col, 1s, 0), floor(double_col), derivative(double_col, 1s, 0)", + "derivative(smallint_col, 1s, 0), floor(smallint_col), derivative(smallint_col, 1s, 0)", + "derivative(tinyint_col, 1s, 0), floor(tinyint_col), derivative(tinyint_col, 1s, 0)", + "derivative(uint_col, 1s, 0), floor(uint_col), derivative(uint_col, 1s, 0)", + "derivative(ubigint_col, 1s, 0), floor(ubigint_col), derivative(ubigint_col, 1s, 0)", + "derivative(usmallint_col, 1s, 0), floor(usmallint_col), derivative(usmallint_col, 1s, 0)", + "derivative(utinyint_col, 1s, 0), floor(utinyint_col), derivative(utinyint_col, 1s, 0)", + "1, floor(int_col), 1", + "1, floor(bigint_col), 1", + "1, floor(float_col), 1", + "1, floor(double_col), 1", + "1, floor(smallint_col), 1", + "1, floor(tinyint_col), 1", + "1, floor(uint_col), 1", + "1, floor(ubigint_col), 1", + "1, floor(usmallint_col), 1", + "1, floor(utinyint_col), 1", + "floor(int_col) as anyName", + "floor(bigint_col) as anyName", + "floor(float_col) as anyName", + "floor(double_col) as anyName", + "floor(smallint_col) as anyName", + "floor(tinyint_col) as anyName", + "floor(uint_col) as anyName", + "floor(ubigint_col) as anyName", + "floor(usmallint_col) as anyName", + "floor(utinyint_col) as anyName", + "distinct floor(int_col)", + "distinct floor(bigint_col)", + "distinct floor(float_col)", + "distinct floor(double_col)", + "distinct floor(smallint_col)", + "distinct floor(tinyint_col)", + "distinct floor(uint_col)", + "distinct floor(ubigint_col)", + "distinct floor(usmallint_col)", + "distinct floor(utinyint_col)", + ] + simple_select_command = [ + "floor(super.int_col)", + "floor(super.bigint_col)", + "floor(super.float_col)", + "floor(super.double_col)", + "floor(super.smallint_col)", + "floor(super.tinyint_col)", + "floor(super.uint_col)", + "floor(super.ubigint_col)", + "floor(super.usmallint_col)", + "floor(super.utinyint_col)", + "floor(t1.int_col)", + "floor(t1.bigint_col)", + "floor(t1.float_col)", + "floor(t1.double_col)", + "floor(t1.smallint_col)", + "floor(t1.tinyint_col)", + "floor(t1.uint_col)", + "floor(t1.ubigint_col)", + "floor(t1.usmallint_col)", + "floor(t1.utinyint_col)", + ] + from_command = [" from super", " from t1"] + advance_from_command = [ + " from super", " from t1", + " from super, superb where super.ts = superb.ts and super.int_tag = superb.int_tag" + ] + filter_command = [ + "", " session(ts, 1s)", " state_window(int_col)", " interval (1s)", + " interval (1s) sliding (1s)", " group by (ts)" + ] + fill_command = [ + "", " fill(prev)", " fill(next)", " fill(null)", " fill(1)", + " fill(linear)" + ] + tdSql.prepare() + tdSql.execute( + "create stable super (ts timestamp, timestamp_col timestamp, int_col int, bigint_col bigint, float_col float,\ + double_col double, binary_col binary(8), smallint_col smallint, tinyint_col tinyint, bool_col bool, nchar_col nchar(8), \ + uint_col int unsigned, ubigint_col bigint unsigned, usmallint_col smallint unsigned, utinyint_col tinyint unsigned) tags (int_tag int, bigint_tag bigint, \ + float_tag float, double_tag double, binary_tag binary(8), smallint_tag smallint, tinyint_tag tinyint, bool_tag bool, nchar_tag nchar(8),\ + uint_tag int unsigned, ubigint_tag bigint unsigned, usmallint_tag smallint unsigned, utinyint_tag tinyint unsigned)" + ) + tdSql.execute( + "create stable superb (ts timestamp, timestamp_col timestamp, int_col int, bigint_col bigint, float_col float,\ + double_col double, binary_col binary(8), smallint_col smallint, tinyint_col tinyint, bool_col bool, nchar_col nchar(8), \ + uint_col int unsigned, ubigint_col bigint unsigned, usmallint_col smallint unsigned, utinyint_col tinyint unsigned) tags (int_tag int, bigint_tag bigint, \ + float_tag float, double_tag double, binary_tag binary(8), smallint_tag smallint, tinyint_tag tinyint, bool_tag bool, nchar_tag nchar(8),\ + uint_tag int unsigned, ubigint_tag bigint unsigned, usmallint_tag smallint unsigned, utinyint_tag tinyint unsigned)" + ) + tdSql.execute( + "create table t1 using super tags (1, %d, %f, %f, '%s', %d, %d, 1, '%s', %d, %d, %d, %d)" + % (self.randomBigint(), self.randomDouble(), self.randomDouble(), + self.randomNchar(), self.randomSmallint(), self.randomTinyint(), + self.randomNchar(), self.randomUInt(), self.randomUBigint(), + self.randomUSmallint(), self.randomUTinyint())) + tdSql.execute( + "insert into t1 values (1629796215891, 1629796215891, %d, %d, %f, %f, '%s', %d, %d, 1, '%s', %d, %d, %d, %d)" + % (self.randomInt(), self.randomBigint(), self.randomDouble(), + self.randomDouble(), self.randomNchar(), self.randomSmallint(), + self.randomTinyint(), self.randomNchar(), self.randomUInt(), + self.randomUBigint(), self.randomUSmallint(), + self.randomUTinyint())) + tdSql.execute( + "insert into t1 values (1629796215892, 1629796215891, %d, %d, %f, %f, '%s', %d, %d, 0, '%s', %d, %d, %d, %d)" + % (self.randomInt(), self.randomBigint(), self.randomDouble(), + self.randomDouble(), self.randomNchar(), self.randomSmallint(), + self.randomTinyint(), self.randomNchar(), self.randomUInt(), + self.randomUBigint(), self.randomUSmallint(), + self.randomUTinyint())) + tdSql.execute( + "insert into t1 values (1629796215893, 1629796215891, %d, %d, %f, %f, '%s', %d, %d, 1, '%s', %d, %d, %d, %d)" + % (self.randomInt(), self.randomBigint(), self.randomDouble(), + self.randomDouble(), self.randomNchar(), self.randomSmallint(), + self.randomTinyint(), self.randomNchar(), self.randomUInt(), + self.randomUBigint(), self.randomUSmallint(), + self.randomUTinyint())) + tdSql.execute( + "insert into t1 values (1629796215894, 1629796215891, %d, %d, %f, %f, '%s', %d, %d, 0, '%s', %d, %d, %d, %d)" + % (self.randomInt(), self.randomBigint(), self.randomDouble(), + self.randomDouble(), self.randomNchar(), self.randomSmallint(), + self.randomTinyint(), self.randomNchar(), self.randomUInt(), + self.randomUBigint(), self.randomUSmallint(), + self.randomUTinyint())) + tdSql.execute( + "create table t2 using superb tags (1, %d, %f, %f, '%s', %d, %d, 1, '%s', %d, %d, %d, %d)" + % (self.randomBigint(), self.randomDouble(), self.randomDouble(), + self.randomNchar(), self.randomSmallint(), self.randomTinyint(), + self.randomNchar(), self.randomUInt(), self.randomUBigint(), + self.randomUSmallint(), self.randomUTinyint())) + tdSql.execute( + "insert into t2 values (1629796215891, 1629796215891, %d, %d, %f, %f, '%s', %d, %d, 1, '%s', %d, %d, %d, %d)" + % (self.randomInt(), self.randomBigint(), self.randomDouble(), + self.randomDouble(), self.randomNchar(), self.randomSmallint(), + self.randomTinyint(), self.randomNchar(), self.randomUInt(), + self.randomUBigint(), self.randomUSmallint(), + self.randomUTinyint())) + tdSql.execute( + "insert into t2 values (1629796215892, 1629796215891, %d, %d, %f, %f, '%s', %d, %d, 0, '%s', %d, %d, %d, %d)" + % (self.randomInt(), self.randomBigint(), self.randomDouble(), + self.randomDouble(), self.randomNchar(), self.randomSmallint(), + self.randomTinyint(), self.randomNchar(), self.randomUInt(), + self.randomUBigint(), self.randomUSmallint(), + self.randomUTinyint())) + tdSql.execute( + "insert into t2 values (1629796215893, 1629796215891, %d, %d, %f, %f, '%s', %d, %d, 1, '%s', %d, %d, %d, %d)" + % (self.randomInt(), self.randomBigint(), self.randomDouble(), + self.randomDouble(), self.randomNchar(), self.randomSmallint(), + self.randomTinyint(), self.randomNchar(), self.randomUInt(), + self.randomUBigint(), self.randomUSmallint(), + self.randomUTinyint())) + tdSql.execute( + "insert into t2 values (1629796215894, 1629796215891, %d, %d, %f, %f, '%s', %d, %d, 0, '%s', %d, %d, %d, %d)" + % (self.randomInt(), self.randomBigint(), self.randomDouble(), + self.randomDouble(), self.randomNchar(), self.randomSmallint(), + self.randomTinyint(), self.randomNchar(), self.randomUInt(), + self.randomUBigint(), self.randomUSmallint(), + self.randomUTinyint())) + + for s in range(len(select_command)): + for f in range(len(from_command)): + sql = "select " + select_command[s] + from_command[f] + if (select_command[s] == "floor(int_col)"\ + or select_command[s] == "floor(bigint_col)"\ + or select_command[s] == "floor(smallint_col)" \ + or select_command[s] == "floor(float_col)"\ + or select_command[s] == "floor(double_col)"\ + or select_command[s] == "floor(tinyint_col)"\ + or select_command[s] == "floor(uint_col)"\ + or select_command[s] == "floor(ubigint_col)"\ + or select_command[s] == "floor(usmallint_col)"\ + or select_command[s] == "floor(utinyint_col)"\ + or select_command[s] == "1, floor(int_col), 1"\ + or select_command[s] == "1, floor(bigint_col), 1"\ + or select_command[s] == "1, floor(float_col), 1"\ + or select_command[s] == "1, floor(double_col), 1"\ + or select_command[s] == "1, floor(smallint_col), 1"\ + or select_command[s] == "1, floor(tinyint_col), 1"\ + or select_command[s] == "1, floor(uint_col), 1"\ + or select_command[s] == "1, floor(ubigint_col), 1"\ + or select_command[s] == "1, floor(usmallint_col), 1"\ + or select_command[s] == "1, floor(utinyint_col), 1"\ + or select_command[s] == "int_col, floor(int_col), int_col"\ + or select_command[s] == "bigint_col, floor(bigint_col), bigint_col"\ + or select_command[s] == "float_col, floor(float_col), float_col"\ + or select_command[s] == "double_col, floor(double_col), double_col"\ + or select_command[s] == "smallint_col, floor(smallint_col), smallint_col"\ + or select_command[s] == "tinyint_col, floor(tinyint_col), tinyint_col"\ + or select_command[s] == "uint_col, floor(uint_col), uint_col"\ + or select_command[s] == "ubigint_col, floor(ubigint_col), ubigint_col"\ + or select_command[s] == "usmallint_col, floor(usmallint_col), usmallint_col"\ + or select_command[s] == "utinyint_col, floor(utinyint_col), utinyint_col"\ + or select_command[s] == "floor(int_col) as anyName"\ + or select_command[s] == "floor(bigint_col) as anyName"\ + or select_command[s] == "floor(float_col) as anyName"\ + or select_command[s] == "floor(double_col) as anyName"\ + or select_command[s] == "floor(smallint_col) as anyName"\ + or select_command[s] == "floor(tinyint_col) as anyName"\ + or select_command[s] == "floor(uint_col) as anyName"\ + or select_command[s] == "floor(ubigint_col) as anyName"\ + or select_command[s] == "floor(usmallint_col) as anyName"\ + or select_command[s] == "floor(utinyint_col) as anyName"\ + or select_command[s] == "floor(int_col) + floor(int_col)"\ + or select_command[s] == "floor(bigint_col) + floor(bigint_col)"\ + or select_command[s] == "floor(float_col) + floor(float_col)"\ + or select_command[s] == "floor(double_col) + floor(double_col)"\ + or select_command[s] == "floor(smallint_col) + floor(smallint_col)"\ + or select_command[s] == "floor(tinyint_col) + floor(tinyint_col)"\ + or select_command[s] == "floor(uint_col) + floor(uint_col)"\ + or select_command[s] == "floor(ubigint_col) + floor(ubigint_col)"\ + or select_command[s] == "floor(usmallint_col) + floor(usmallint_col)"\ + or select_command[s] == "floor(utinyint_col) + floor(utinyint_col)"\ + or select_command[s] == "floor(int_col) + floor(int_col)"\ + or select_command[s] == "floor(bigint_col) + floor(bigint_col)"\ + or select_command[s] == "floor(float_col) + floor(float_col)"\ + or select_command[s] == "floor(double_col) + floor(double_col)"\ + or select_command[s] == "floor(smallint_col) + floor(smallint_col)"\ + or select_command[s] == "floor(tinyint_col) + floor(tinyint_col)"\ + or select_command[s] == "floor(uint_col) + floor(uint_col)"\ + or select_command[s] == "floor(ubigint_col) + floor(ubigint_col)"\ + or select_command[s] == "floor(usmallint_col) + floor(usmallint_col)"\ + or select_command[s] == "floor(utinyint_col) + cei(utinyint_col)"\ + or select_command[s] == "floor(int_col) - floor(int_col)"\ + or select_command[s] == "floor(bigint_col) - floor(bigint_col)"\ + or select_command[s] == "floor(float_col) - floor(float_col)"\ + or select_command[s] == "floor(double_col) - floor(double_col)"\ + or select_command[s] == "floor(smallint_col) - floor(smallint_col)"\ + or select_command[s] == "floor(tinyint_col) - floor(tinyint_col)"\ + or select_command[s] == "floor(uint_col) - floor(uint_col)"\ + or select_command[s] == "floor(ubigint_col) - floor(ubigint_col)"\ + or select_command[s] == "floor(usmallint_col) - floor(usmallint_col)"\ + or select_command[s] == "floor(utinyint_col) - floor(utinyint_col)"\ + or select_command[s] == "floor(int_col) * floor(int_col)"\ + or select_command[s] == "floor(bigint_col) * floor(bigint_col)"\ + or select_command[s] == "floor(float_col) * floor(float_col)"\ + or select_command[s] == "floor(double_col) * floor(double_col)"\ + or select_command[s] == "floor(smallint_col) * floor(smallint_col)"\ + or select_command[s] == "floor(tinyint_col) * floor(tinyint_col)"\ + or select_command[s] == "floor(uint_col) * floor(uint_col)"\ + or select_command[s] == "floor(ubigint_col) * floor(ubigint_col)"\ + or select_command[s] == "floor(usmallint_col) * floor(usmallint_col)"\ + or select_command[s] == "floor(utinyint_col) * floor(utinyint_col)"\ + or select_command[s] == "floor(int_col) / floor(int_col)"\ + or select_command[s] == "floor(bigint_col) / floor(bigint_col)"\ + or select_command[s] == "floor(float_col) / floor(float_col)"\ + or select_command[s] == "floor(double_col) / floor(double_col)"\ + or select_command[s] == "floor(smallint_col) / floor(smallint_col)"\ + or select_command[s] == "floor(tinyint_col) / floor(tinyint_col)"\ + or select_command[s] == "floor(uint_col) / floor(uint_col)"\ + or select_command[s] == "floor(ubigint_col) / floor(ubigint_col)"\ + or select_command[s] == "floor(usmallint_col) / floor(usmallint_col)"\ + or select_command[s] == "floor(utinyint_col) / floor(utinyint_col)"): + tdSql.query(sql) + else: + tdSql.error(sql) + for sim in range(len(simple_select_command)): + for fr in range(len(advance_from_command)): + for filter in range(len(filter_command)): + for fill in range(len(fill_command)): + sql = "select " + simple_select_command[ + sim] + advance_from_command[fr] + filter_command[ + filter] + fill_command[fill] + if sql == "select floor(t1.int_col) from t1"\ + or sql == "select floor(super.int_col) from super"\ + or sql == "select floor(t1.bigint_col) from t1"\ + or sql == "select floor(super.bigint_col) from super"\ + or sql == "select floor(t1.smallint_col) from t1"\ + or sql == "select floor(super.smallint_col) from super"\ + or sql == "select floor(t1.tinyint_col) from t1"\ + or sql == "select floor(super.tinyint_col) from super"\ + or sql == "select floor(t1.float_col) from t1"\ + or sql == "select floor(super.float_col) from super"\ + or sql == "select floor(t1.double_col) from t1"\ + or sql == "select floor(super.double_col) from super"\ + or sql == "select floor(t1.uint_col) from t1"\ + or sql == "select floor(super.uint_col) from super"\ + or sql == "select floor(t1.ubigint_col) from t1"\ + or sql == "select floor(super.ubigint_col) from super"\ + or sql == "select floor(t1.usmallint_col) from t1"\ + or sql == "select floor(super.usmallint_col) from super"\ + or sql == "select floor(t1.utinyint_col) from t1"\ + or sql == "select floor(super.utinyint_col) from super"\ + or sql == "select floor(super.int_col) from super, superb where super.ts = superb.ts and super.int_tag = superb.int_tag"\ + or sql == "select floor(super.bigint_col) from super, superb where super.ts = superb.ts and super.int_tag = superb.int_tag"\ + or sql == "select floor(super.smallint_col) from super, superb where super.ts = superb.ts and super.int_tag = superb.int_tag"\ + or sql == "select floor(super.tinyint_col) from super, superb where super.ts = superb.ts and super.int_tag = superb.int_tag"\ + or sql == "select floor(super.float_col) from super, superb where super.ts = superb.ts and super.int_tag = superb.int_tag"\ + or sql == "select floor(super.double_col) from super, superb where super.ts = superb.ts and super.int_tag = superb.int_tag"\ + or sql == "select floor(super.uint_col) from super, superb where super.ts = superb.ts and super.int_tag = superb.int_tag"\ + or sql == "select floor(super.ubigint_col) from super, superb where super.ts = superb.ts and super.int_tag = superb.int_tag"\ + or sql == "select floor(super.usmallint_col) from super, superb where super.ts = superb.ts and super.int_tag = superb.int_tag"\ + or sql == "select floor(super.utinyint_col) from super, superb where super.ts = superb.ts and super.int_tag = superb.int_tag": + tdSql.query(sql) + else: + tdSql.error(sql) + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/functions/function_round.py b/tests/pytest/functions/function_round.py new file mode 100644 index 0000000000000000000000000000000000000000..93cace49ad8d16c6491584ed530b3dff07ef6fe4 --- /dev/null +++ b/tests/pytest/functions/function_round.py @@ -0,0 +1,1518 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import taos +from util.log import * +from util.cases import * +from util.sql import * +import numpy as np +import random + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor()) + + def randomInt(self): + return random.randint(-2147483647, 2147483647) + + def randomUInt(self): + return random.randint(0, 4294967294) + + def randomBigint(self): + return random.randint(-2**63 + 1, 2**63 - 1) + + def randomUBigint(self): + return random.randint(0, 18446744073709551614) + + def randomDouble(self): + return random.random() + + def randomNchar(self): + return random.choice('abcdefghijklmnopqrstuvwxyz') + + def randomSmallint(self): + return random.randint(-32767, 32767) + + def randomUSmallint(self): + return random.randint(0, 65534) + + def randomTinyint(self): + return random.randint(-127, 127) + + def randomUTinyint(self): + return random.randint(0, 254) + + def run(self): + select_command = [ + "round(ts)", + "round(timestamp_col)", + "round(int_col)", + "round(bigint_col)", + "round(float_col)", + "round(double_col)", + "round(binary_col)", + "round(smallint_col)", + "round(tinyint_col)", + "round(bool_col)", + "round(nchar_col)", + "round(uint_col)", + "round(ubigint_col)", + "round(usmallint_col)", + "round(utinyint_col)", + "round(timestamp_tag)", + "round(int_tag)", + "round(bigint_tag)", + "round(float_tag)", + "round(double_tag)", + "round(binary_tag)", + "round(smallint_tag)", + "round(tinyint_tag)", + "round(bool_tag)", + "round(nchar_tag)", + "round(uint_tag)", + "round(ubigint_tag)", + "round(usmallint_tag)", + "round(utinyint_tag)", + "count(round(int_col))", + "count(round(bigint_col))", + "count(round(float_col))", + "count(round(double_col))", + "count(round(smallint_col))", + "count(round(tinyint_col))", + "count(round(uint_col))", + "count(round(ubigint_col))", + "count(round(usmallint_col))", + "count(round(utinyint_col))", + "avg(round(int_col))", + "avg(round(bigint_col))", + "avg(round(float_col))", + "avg(round(double_col))", + "avg(round(smallint_col))", + "avg(round(tinyint_col))", + "avg(round(uint_col))", + "avg(round(ubigint_col))", + "avg(round(usmallint_col))", + "avg(round(utinyint_col))", + "twa(round(int_col))", + "twa(round(bigint_col))", + "twa(round(float_col))", + "twa(round(double_col))", + "twa(round(smallint_col))", + "twa(round(tinyint_col))", + "twa(round(uint_col))", + "twa(round(ubigint_col))", + "twa(round(usmallint_col))", + "twa(round(utinyint_col))", + "sum(round(int_col))", + "sum(round(bigint_col))", + "sum(round(float_col))", + "sum(round(double_col))", + "sum(round(smallint_col))", + "sum(round(tinyint_col))", + "sum(round(uint_col))", + "sum(round(ubigint_col))", + "sum(round(usmallint_col))", + "sum(round(utinyint_col))", + "stddev(round(int_col))", + "stddev(round(bigint_col))", + "stddev(round(float_col))", + "stddev(round(double_col))", + "stddev(round(smallint_col))", + "stddev(round(tinyint_col))", + "stddev(round(uint_col))", + "stddev(round(ubigint_col))", + "stddev(round(usmallint_col))", + "stddev(round(utinyint_col))", + "irate(round(int_col))", + "irate(round(bigint_col))", + "irate(round(float_col))", + "irate(round(double_col))", + "irate(round(smallint_col))", + "irate(round(tinyint_col))", + "irate(round(uint_col))", + "irate(round(ubigint_col))", + "irate(round(usmallint_col))", + "irate(round(utinyint_col))", + "leastsquares(round(int_col), 1, 1)", + "leastsquares(round(bigint_col), 1, 1)", + "leastsquares(round(float_col), 1, 1)", + "leastsquares(round(double_col), 1, 1)", + "leastsquares(round(smallint_col), 1, 1)", + "leastsquares(round(tinyint_col), 1, 1)", + "leastsquares(round(uint_col), 1, 1)", + "leastsquares(round(ubigint_col), 1, 1)", + "leastsquares(round(usmallint_col), 1, 1)", + "leastsquares(round(utinyint_col), 1, 1)", + "min(round(int_col))", + "min(round(bigint_col))", + "min(round(float_col))", + "min(round(double_col))", + "min(round(smallint_col))", + "min(round(tinyint_col))", + "min(round(uint_col))", + "min(round(ubigint_col))", + "min(round(usmallint_col))", + "min(round(utinyint_col))", + "max(round(int_col))", + "max(round(bigint_col))", + "max(round(float_col))", + "max(round(double_col))", + "max(round(smallint_col))", + "max(round(tinyint_col))", + "max(round(uint_col))", + "max(round(ubigint_col))", + "max(round(usmallint_col))", + "max(round(utinyint_col))", + "first(round(int_col))", + "first(round(bigint_col))", + "first(round(float_col))", + "first(round(double_col))", + "first(round(smallint_col))", + "first(round(tinyint_col))", + "first(round(uint_col))", + "first(round(ubigint_col))", + "first(round(usmallint_col))", + "first(round(utinyint_col))", + "last(round(int_col))", + "last(round(bigint_col))", + "last(round(float_col))", + "last(round(double_col))", + "last(round(smallint_col))", + "last(round(tinyint_col))", + "last(round(uint_col))", + "last(round(ubigint_col))", + "last(round(usmallint_col))", + "last(round(utinyint_col))", + "top(round(int_col), 1)", + "top(round(bigint_col), 1)", + "top(round(float_col), 1)", + "top(round(double_col), 1)", + "top(round(smallint_col), 1)", + "top(round(tinyint_col), 1)", + "top(round(uint_col), 1)", + "top(round(ubigint_col), 1)", + "top(round(usmallint_col), 1)", + "top(round(utinyint_col), 1)", + "bottom(round(int_col), 1)", + "bottom(round(bigint_col), 1)", + "bottom(round(float_col), 1)", + "bottom(round(double_col), 1)", + "bottom(round(smallint_col), 1)", + "bottom(round(tinyint_col), 1)", + "bottom(round(uint_col), 1)", + "bottom(round(ubigint_col), 1)", + "bottom(round(usmallint_col), 1)", + "bottom(round(utinyint_col), 1)", + "percentile(round(int_col), 20)", + "percentile(round(bigint_col), 20)", + "percentile(round(float_col), 20)", + "percentile(round(double_col), 20)", + "percentile(round(smallint_col), 20)", + "percentile(round(tinyint_col), 20)", + "percentile(round(uint_col), 20)", + "percentile(round(ubigint_col), 20)", + "percentile(round(usmallint_col), 20)", + "percentile(round(utinyint_col), 20)", + "apercentile(round(int_col), 20)", + "apercentile(round(bigint_col), 20)", + "apercentile(round(float_col), 20)", + "apercentile(round(double_col), 20)", + "apercentile(round(smallint_col), 20)", + "apercentile(round(tinyint_col), 20)", + "apercentile(round(uint_col), 20)", + "apercentile(round(ubigint_col), 20)", + "apercentile(round(usmallint_col), 20)", + "apercentile(round(utinyint_col), 20)", + "last_row(round(int_col))", + "last_row(round(bigint_col))", + "last_row(round(float_col))", + "last_row(round(double_col))", + "last_row(round(smallint_col))", + "last_row(round(tinyint_col))", + "last_row(round(uint_col))", + "last_row(round(ubigint_col))", + "last_row(round(usmallint_col))", + "last_row(round(utinyint_col))", + "interp(round(int_col))", + "interp(round(bigint_col))", + "interp(round(float_col))", + "interp(round(double_col))", + "interp(round(smallint_col))", + "interp(round(tinyint_col))", + "interp(round(uint_col))", + "interp(round(ubigint_col))", + "interp(round(usmallint_col))", + "interp(round(utinyint_col))", + "diff(round(int_col))", + "diff(round(bigint_col))", + "diff(round(float_col))", + "diff(round(double_col))", + "diff(round(smallint_col))", + "diff(round(tinyint_col))", + "diff(round(uint_col))", + "diff(round(ubigint_col))", + "diff(round(usmallint_col))", + "diff(round(utinyint_col))", + "spread(round(int_col))", + "spread(round(bigint_col))", + "spread(round(float_col))", + "spread(round(double_col))", + "spread(round(smallint_col))", + "spread(round(tinyint_col))", + "spread(round(uint_col))", + "spread(round(ubigint_col))", + "spread(round(usmallint_col))", + "spread(round(utinyint_col))", + "derivative(round(int_col), 1s, 0)", + "derivative(round(bigint_col), 1s, 0)", + "derivative(round(float_col), 1s, 0)", + "derivative(round(double_col), 1s, 0)", + "derivative(round(smallint_col), 1s, 0)", + "derivative(round(tinyint_col), 1s, 0)", + "derivative(round(uint_col), 1s, 0)", + "derivative(round(ubigint_col), 1s, 0)", + "derivative(round(usmallint_col), 1s, 0)", + "derivative(round(utinyint_col), 1s, 0)", + "round(int_col) - round(int_col)", + "round(bigint_col) - round(bigint_col)", + "round(float_col) - round(float_col)", + "round(double_col) - round(double_col)", + "round(smallint_col) - round(smallint_col)", + "round(tinyint_col) - round(tinyint_col)", + "round(uint_col) - round(uint_col)", + "round(ubigint_col) - round(ubigint_col)", + "round(usmallint_col) - round(usmallint_col)", + "round(utinyint_col) - round(utinyint_col)", + "round(int_col) / round(int_col)", + "round(bigint_col) / round(bigint_col)", + "round(float_col) / round(float_col)", + "round(double_col) / round(double_col)", + "round(smallint_col) / round(smallint_col)", + "round(tinyint_col) / round(tinyint_col)", + "round(uint_col) / round(uint_col)", + "round(ubigint_col) / round(ubigint_col)", + "round(usmallint_col) / round(usmallint_col)", + "round(utinyint_col) / round(utinyint_col)", + "round(int_col) * round(int_col)", + "round(bigint_col) * round(bigint_col)", + "round(float_col) * round(float_col)", + "round(double_col) * round(double_col)", + "round(smallint_col) * round(smallint_col)", + "round(tinyint_col) * round(tinyint_col)", + "round(uint_col) * round(uint_col)", + "round(ubigint_col) * round(ubigint_col)", + "round(usmallint_col) * round(usmallint_col)", + "round(utinyint_col) * round(utinyint_col)", + "round(count(ts))", + "round(count(timestamp_col))", + "round(count(int_col))", + "round(count(bigint_col))", + "round(count(float_col))", + "round(count(double_col))", + "round(count(binary_col))", + "round(count(smallint_col))", + "round(count(tinyint_col))", + "round(count(bool_col))", + "round(count(nchar_col))", + "round(count(uint_col))", + "round(count(ubigint_col))", + "round(count(usmallint_col))", + "round(count(utinyint_col))", + "round(count(timestamp_tag))", + "round(count(int_tag))", + "round(count(bigint_tag))", + "round(count(float_tag))", + "round(count(double_tag))", + "round(count(binary_tag))", + "round(count(smallint_tag))", + "round(count(tinyint_tag))", + "round(count(bool_tag))", + "round(count(nchar_tag))", + "round(count(uint_tag))", + "round(count(ubigint_tag))", + "round(count(usmallint_tag))", + "round(count(utinyint_tag))", + "round(avg(ts))", + "round(avg(timestamp_col))", + "round(avg(int_col))", + "round(avg(bigint_col))", + "round(avg(float_col))", + "round(avg(double_col))", + "round(avg(binary_col))", + "round(avg(smallint_col))", + "round(avg(tinyint_col))", + "round(avg(bool_col))", + "round(avg(nchar_col))", + "round(avg(uint_col))", + "round(avg(ubigint_col))", + "round(avg(usmallint_col))", + "round(avg(utinyint_col))", + "round(avg(timestamp_tag))", + "round(avg(int_tag))", + "round(avg(bigint_tag))", + "round(avg(float_tag))", + "round(avg(double_tag))", + "round(avg(binary_tag))", + "round(avg(smallint_tag))", + "round(avg(tinyint_tag))", + "round(avg(bool_tag))", + "round(avg(nchar_tag))", + "round(avg(uint_tag))", + "round(avg(ubigint_tag))", + "round(avg(usmallint_tag))", + "round(avg(utinyint_tag))", + "round(twa(ts))", + "round(twa(timestamp_col))", + "round(twa(int_col))", + "round(twa(bigint_col))", + "round(twa(float_col))", + "round(twa(double_col))", + "round(twa(binary_col))", + "round(twa(smallint_col))", + "round(twa(tinyint_col))", + "round(twa(bool_col))", + "round(twa(nchar_col))", + "round(twa(uint_col))", + "round(twa(ubigint_col))", + "round(twa(usmallint_col))", + "round(twa(utinyint_col))", + "round(twa(timestamp_tag))", + "round(twa(int_tag))", + "round(twa(bigint_tag))", + "round(twa(float_tag))", + "round(twa(double_tag))", + "round(twa(binary_tag))", + "round(twa(smallint_tag))", + "round(twa(tinyint_tag))", + "round(twa(bool_tag))", + "round(twa(nchar_tag))", + "round(twa(uint_tag))", + "round(twa(ubigint_tag))", + "round(twa(usmallint_tag))", + "round(twa(utinyint_tag))", + "round(sum(ts))", + "round(sum(timestamp_col))", + "round(sum(int_col))", + "round(sum(bigint_col))", + "round(sum(float_col))", + "round(sum(double_col))", + "round(sum(binary_col))", + "round(sum(smallint_col))", + "round(sum(tinyint_col))", + "round(sum(bool_col))", + "round(sum(nchar_col))", + "round(sum(uint_col))", + "round(sum(ubigint_col))", + "round(sum(usmallint_col))", + "round(sum(utinyint_col))", + "round(sum(timestamp_tag))", + "round(sum(int_tag))", + "round(sum(bigint_tag))", + "round(sum(float_tag))", + "round(sum(double_tag))", + "round(sum(binary_tag))", + "round(sum(smallint_tag))", + "round(sum(tinyint_tag))", + "round(sum(bool_tag))", + "round(sum(nchar_tag))", + "round(sum(uint_tag))", + "round(sum(ubigint_tag))", + "round(sum(usmallint_tag))", + "round(sum(utinyint_tag))", + "round(stddev(ts))", + "round(stddev(timestamp_col))", + "round(stddev(int_col))", + "round(stddev(bigint_col))", + "round(stddev(float_col))", + "round(stddev(double_col))", + "round(stddev(binary_col))", + "round(stddev(smallint_col))", + "round(stddev(tinyint_col))", + "round(stddev(bool_col))", + "round(stddev(nchar_col))", + "round(stddev(uint_col))", + "round(stddev(ubigint_col))", + "round(stddev(usmallint_col))", + "round(stddev(utinyint_col))", + "round(stddev(timestamp_tag))", + "round(stddev(int_tag))", + "round(stddev(bigint_tag))", + "round(stddev(float_tag))", + "round(stddev(double_tag))", + "round(stddev(binary_tag))", + "round(stddev(smallint_tag))", + "round(stddev(tinyint_tag))", + "round(stddev(bool_tag))", + "round(stddev(nchar_tag))", + "round(stddev(uint_tag))", + "round(stddev(ubigint_tag))", + "round(stddev(usmallint_tag))", + "round(stddev(utinyint_tag))", + "round(leastsquares(ts, 1, 1))", + "round(leastsquares(timestamp_col, 1, 1))", + "round(leastsquares(int_col, 1, 1))", + "round(leastsquares(bigint_col, 1, 1))", + "round(leastsquares(float_col, 1, 1))", + "round(leastsquares(double_col, 1, 1))", + "round(leastsquares(binary_col, 1, 1))", + "round(leastsquares(smallint_col, 1, 1))", + "round(leastsquares(tinyint_col, 1, 1))", + "round(leastsquares(bool_col, 1, 1))", + "round(leastsquares(nchar_col, 1, 1))", + "round(leastsquares(uint_col, 1, 1))", + "round(leastsquares(ubigint_col, 1, 1))", + "round(leastsquares(usmallint_col, 1, 1))", + "round(leastsquares(utinyint_col, 1, 1))", + "round(leastsquares(timestamp_tag, 1, 1))", + "round(leastsquares(int_tag, 1, 1))", + "round(leastsquares(bigint_tag, 1, 1))", + "round(leastsquares(float_tag, 1, 1))", + "round(leastsquares(double_tag, 1, 1))", + "round(leastsquares(binary_tag, 1, 1))", + "round(leastsquares(smallint_tag, 1, 1))", + "round(leastsquares(tinyint_tag, 1, 1))", + "round(leastsquares(bool_tag, 1, 1))", + "round(leastsquares(nchar_tag, 1, 1))", + "round(leastsquares(uint_tag, 1, 1))", + "round(leastsquares(ubigint_tag, 1, 1))", + "round(leastsquares(usmallint_tag, 1, 1))", + "round(leastsquares(utinyint_tag, 1, 1))", + "round(irate(ts))", + "round(irate(timestamp_col))", + "round(irate(int_col))", + "round(irate(bigint_col))", + "round(irate(float_col))", + "round(irate(double_col))", + "round(irate(binary_col))", + "round(irate(smallint_col))", + "round(irate(tinyint_col))", + "round(irate(bool_col))", + "round(irate(nchar_col))", + "round(irate(uint_col))", + "round(irate(ubigint_col))", + "round(irate(usmallint_col))", + "round(irate(utinyint_col))", + "round(irate(timestamp_tag))", + "round(irate(int_tag))", + "round(irate(bigint_tag))", + "round(irate(float_tag))", + "round(irate(double_tag))", + "round(irate(binary_tag))", + "round(irate(smallint_tag))", + "round(irate(tinyint_tag))", + "round(irate(bool_tag))", + "round(irate(nchar_tag))", + "round(irate(uint_tag))", + "round(irate(ubigint_tag))", + "round(irate(usmallint_tag))", + "round(irate(utinyint_tag))", + "round(min(ts))", + "round(min(timestamp_col))", + "round(min(int_col))", + "round(min(bigint_col))", + "round(min(float_col))", + "round(min(double_col))", + "round(min(binary_col))", + "round(min(smallint_col))", + "round(min(tinyint_col))", + "round(min(bool_col))", + "round(min(nchar_col))", + "round(min(uint_col))", + "round(min(ubigint_col))", + "round(min(usmallint_col))", + "round(min(utinyint_col))", + "round(min(timestamp_tag))", + "round(min(int_tag))", + "round(min(bigint_tag))", + "round(min(float_tag))", + "round(min(double_tag))", + "round(min(binary_tag))", + "round(min(smallint_tag))", + "round(min(tinyint_tag))", + "round(min(bool_tag))", + "round(min(nchar_tag))", + "round(min(uint_tag))", + "round(min(ubigint_tag))", + "round(min(usmallint_tag))", + "round(min(utinyint_tag))", + "round(max(ts))", + "round(max(timestamp_col))", + "round(max(int_col))", + "round(max(bigint_col))", + "round(max(float_col))", + "round(max(double_col))", + "round(max(binary_col))", + "round(max(smallint_col))", + "round(max(tinyint_col))", + "round(max(bool_col))", + "round(max(nchar_col))", + "round(max(uint_col))", + "round(max(ubigint_col))", + "round(max(usmallint_col))", + "round(max(utinyint_col))", + "round(max(timestamp_tag))", + "round(max(int_tag))", + "round(max(bigint_tag))", + "round(max(float_tag))", + "round(max(double_tag))", + "round(max(binary_tag))", + "round(max(smallint_tag))", + "round(max(tinyint_tag))", + "round(max(bool_tag))", + "round(max(nchar_tag))", + "round(max(uint_tag))", + "round(max(ubigint_tag))", + "round(max(usmallint_tag))", + "round(max(utinyint_tag))", + "round(first(ts))", + "round(first(timestamp_col))", + "round(first(int_col))", + "round(first(bigint_col))", + "round(first(float_col))", + "round(first(double_col))", + "round(first(binary_col))", + "round(first(smallint_col))", + "round(first(tinyint_col))", + "round(first(bool_col))", + "round(first(nchar_col))", + "round(first(uint_col))", + "round(first(ubigint_col))", + "round(first(usmallint_col))", + "round(first(utinyint_col))", + "round(first(timestamp_tag))", + "round(first(int_tag))", + "round(first(bigint_tag))", + "round(first(float_tag))", + "round(first(double_tag))", + "round(first(binary_tag))", + "round(first(smallint_tag))", + "round(first(tinyint_tag))", + "round(first(bool_tag))", + "round(first(nchar_tag))", + "round(first(uint_tag))", + "round(first(ubigint_tag))", + "round(first(usmallint_tag))", + "round(first(utinyint_tag))", + "round(last(ts))", + "round(last(timestamp_col))", + "round(last(int_col))", + "round(last(bigint_col))", + "round(last(float_col))", + "round(last(double_col))", + "round(last(binary_col))", + "round(last(smallint_col))", + "round(last(tinyint_col))", + "round(last(bool_col))", + "round(last(nchar_col))", + "round(last(uint_col))", + "round(last(ubigint_col))", + "round(last(usmallint_col))", + "round(last(utinyint_col))", + "round(last(timestamp_tag))", + "round(last(int_tag))", + "round(last(bigint_tag))", + "round(last(float_tag))", + "round(last(double_tag))", + "round(last(binary_tag))", + "round(last(smallint_tag))", + "round(last(tinyint_tag))", + "round(last(bool_tag))", + "round(last(nchar_tag))", + "round(last(uint_tag))", + "round(last(ubigint_tag))", + "round(last(usmallint_tag))", + "round(last(utinyint_tag))", + "round(top(ts, 1))", + "round(top(timestamp_col, 1))", + "round(top(int_col, 1))", + "round(top(bigint_col, 1))", + "round(top(float_col, 1))", + "round(top(double_col, 1))", + "round(top(binary_col, 1))", + "round(top(smallint_col, 1))", + "round(top(tinyint_col, 1))", + "round(top(bool_col, 1))", + "round(top(nchar_col, 1))", + "round(top(uint_col, 1))", + "round(top(ubigint_col, 1))", + "round(top(usmallint_col, 1))", + "round(top(utinyint_col, 1))", + "round(top(timestamp_tag, 1))", + "round(top(int_tag, 1))", + "round(top(bigint_tag, 1))", + "round(top(float_tag, 1))", + "round(top(double_tag, 1))", + "round(top(binary_tag, 1))", + "round(top(smallint_tag, 1))", + "round(top(tinyint_tag, 1))", + "round(top(bool_tag, 1))", + "round(top(nchar_tag, 1))", + "round(top(uint_tag, 1))", + "round(top(ubigint_tag, 1))", + "round(top(usmallint_tag, 1))", + "round(top(utinyint_tag, 1))", + "round(bottom(ts, 1))", + "round(bottom(timestamp_col, 1))", + "round(bottom(int_col, 1))", + "round(bottom(bigint_col, 1))", + "round(bottom(float_col, 1))", + "round(bottom(double_col, 1))", + "round(bottom(binary_col, 1))", + "round(bottom(smallint_col, 1))", + "round(bottom(tinyint_col, 1))", + "round(bottom(bool_col, 1))", + "round(bottom(nchar_col, 1))", + "round(bottom(uint_col, 1))", + "round(bottom(ubigint_col, 1))", + "round(bottom(usmallint_col, 1))", + "round(bottom(utinyint_col, 1))", + "round(bottom(timestamp_tag, 1))", + "round(bottom(int_tag, 1))", + "round(bottom(bigint_tag, 1))", + "round(bottom(float_tag, 1))", + "round(bottom(double_tag, 1))", + "round(bottom(binary_tag, 1))", + "round(bottom(smallint_tag, 1))", + "round(bottom(tinyint_tag, 1))", + "round(bottom(bool_tag, 1))", + "round(bottom(nchar_tag, 1))", + "round(bottom(uint_tag, 1))", + "round(bottom(ubigint_tag, 1))", + "round(bottom(usmallint_tag, 1))", + "round(bottom(utinyint_tag, 1))", + "round(percentile(ts, 1))", + "round(percentile(timestamp_col, 1))", + "round(percentile(int_col, 1))", + "round(percentile(bigint_col, 1))", + "round(percentile(float_col, 1))", + "round(percentile(double_col, 1))", + "round(percentile(binary_col, 1))", + "round(percentile(smallint_col, 1))", + "round(percentile(tinyint_col, 1))", + "round(percentile(bool_col, 1))", + "round(percentile(nchar_col, 1))", + "round(percentile(uint_col, 1))", + "round(percentile(ubigint_col, 1))", + "round(percentile(usmallint_col, 1))", + "round(percentile(utinyint_col, 1))", + "round(percentile(timestamp_tag, 1))", + "round(percentile(int_tag, 1))", + "round(percentile(bigint_tag, 1))", + "round(percentile(float_tag, 1))", + "round(percentile(double_tag, 1))", + "round(percentile(binary_tag, 1))", + "round(percentile(smallint_tag, 1))", + "round(percentile(tinyint_tag, 1))", + "round(percentile(bool_tag, 1))", + "round(percentile(nchar_tag, 1))", + "round(percentile(uint_tag, 1))", + "round(percentile(ubigint_tag, 1))", + "round(percentile(usmallint_tag, 1))", + "round(percentile(utinyint_tag, 1))", + "round(apercentile(ts, 1))", + "round(apercentile(timestamp_col, 1))", + "round(apercentile(int_col, 1))", + "round(apercentile(bigint_col, 1))", + "round(apercentile(float_col, 1))", + "round(apercentile(double_col, 1))", + "round(apercentile(binary_col, 1))", + "round(apercentile(smallint_col, 1))", + "round(apercentile(tinyint_col, 1))", + "round(apercentile(bool_col, 1))", + "round(apercentile(nchar_col, 1))", + "round(apercentile(uint_col, 1))", + "round(apercentile(ubigint_col, 1))", + "round(apercentile(usmallint_col, 1))", + "round(apercentile(utinyint_col, 1))", + "round(apercentile(timestamp_tag, 1))", + "round(apercentile(int_tag, 1))", + "round(apercentile(bigint_tag, 1))", + "round(apercentile(float_tag, 1))", + "round(apercentile(double_tag, 1))", + "round(apercentile(binary_tag, 1))", + "round(apercentile(smallint_tag, 1))", + "round(apercentile(tinyint_tag, 1))", + "round(apercentile(bool_tag, 1))", + "round(apercentile(nchar_tag, 1))", + "round(apercentile(uint_tag, 1))", + "round(apercentile(ubigint_tag, 1))", + "round(apercentile(usmallint_tag, 1))", + "round(apercentile(utinyint_tag, 1))", + "round(last_row(ts))", + "round(last_row(timestamp_col))", + "round(last_row(int_col))", + "round(last_row(bigint_col))", + "round(last_row(float_col))", + "round(last_row(double_col))", + "round(last_row(binary_col))", + "round(last_row(smallint_col))", + "round(last_row(tinyint_col))", + "round(last_row(bool_col))", + "round(last_row(nchar_col))", + "round(last_row(uint_col))", + "round(last_row(ubigint_col))", + "round(last_row(usmallint_col))", + "round(last_row(utinyint_col))", + "round(last_row(timestamp_tag))", + "round(last_row(int_tag))", + "round(last_row(bigint_tag))", + "round(last_row(float_tag))", + "round(last_row(double_tag))", + "round(last_row(binary_tag))", + "round(last_row(smallint_tag))", + "round(last_row(tinyint_tag))", + "round(last_row(bool_tag))", + "round(last_row(nchar_tag))", + "round(last_row(uint_tag))", + "round(last_row(ubigint_tag))", + "round(last_row(usmallint_tag))", + "round(last_row(utinyint_tag))", + "round(interp(ts))", + "round(interp(timestamp_col))", + "round(interp(int_col))", + "round(interp(bigint_col))", + "round(interp(float_col))", + "round(interp(double_col))", + "round(interp(binary_col))", + "round(interp(smallint_col))", + "round(interp(tinyint_col))", + "round(interp(bool_col))", + "round(interp(nchar_col))", + "round(interp(uint_col))", + "round(interp(ubigint_col))", + "round(interp(usmallint_col))", + "round(interp(utinyint_col))", + "round(interp(timestamp_tag))", + "round(interp(int_tag))", + "round(interp(bigint_tag))", + "round(interp(float_tag))", + "round(interp(double_tag))", + "round(interp(binary_tag))", + "round(interp(smallint_tag))", + "round(interp(tinyint_tag))", + "round(interp(bool_tag))", + "round(interp(nchar_tag))", + "round(interp(uint_tag))", + "round(interp(ubigint_tag))", + "round(interp(usmallint_tag))", + "round(interp(utinyint_tag))", + "round(diff(ts))", + "round(diff(timestamp_col))", + "round(diff(int_col))", + "round(diff(bigint_col))", + "round(diff(float_col))", + "round(diff(double_col))", + "round(diff(binary_col))", + "round(diff(smallint_col))", + "round(diff(tinyint_col))", + "round(diff(bool_col))", + "round(diff(nchar_col))", + "round(diff(uint_col))", + "round(diff(ubigint_col))", + "round(diff(usmallint_col))", + "round(diff(utinyint_col))", + "round(diff(timestamp_tag))", + "round(diff(int_tag))", + "round(diff(bigint_tag))", + "round(diff(float_tag))", + "round(diff(double_tag))", + "round(diff(binary_tag))", + "round(diff(smallint_tag))", + "round(diff(tinyint_tag))", + "round(diff(bool_tag))", + "round(diff(nchar_tag))", + "round(diff(uint_tag))", + "round(diff(ubigint_tag))", + "round(diff(usmallint_tag))", + "round(diff(utinyint_tag))", + "round(spread(ts))", + "round(spread(timestamp_col))", + "round(spread(int_col))", + "round(spread(bigint_col))", + "round(spread(float_col))", + "round(spread(double_col))", + "round(spread(binary_col))", + "round(spread(smallint_col))", + "round(spread(tinyint_col))", + "round(spread(bool_col))", + "round(spread(nchar_col))", + "round(spread(uint_col))", + "round(spread(ubigint_col))", + "round(spread(usmallint_col))", + "round(spread(utinyint_col))", + "round(spread(timestamp_tag))", + "round(spread(int_tag))", + "round(spread(bigint_tag))", + "round(spread(float_tag))", + "round(spread(double_tag))", + "round(spread(binary_tag))", + "round(spread(smallint_tag))", + "round(spread(tinyint_tag))", + "round(spread(bool_tag))", + "round(spread(nchar_tag))", + "round(spread(uint_tag))", + "round(spread(ubigint_tag))", + "round(spread(usmallint_tag))", + "round(spread(utinyint_tag))", + "round(derivative(ts, 1s, 0))", + "round(derivative(timestamp_col, 1s, 0))", + "round(derivative(int_col, 1s, 0))", + "round(derivative(bigint_col, 1s, 0))", + "round(derivative(float_col, 1s, 0))", + "round(derivative(double_col, 1s, 0))", + "round(derivative(binary_col, 1s, 0))", + "round(derivative(smallint_col, 1s, 0))", + "round(derivative(tinyint_col, 1s, 0))", + "round(derivative(bool_col, 1s, 0))", + "round(derivative(nchar_col, 1s, 0))", + "round(derivative(uint_col, 1s, 0))", + "round(derivative(ubigint_col, 1s, 0))", + "round(derivative(usmallint_col, 1s, 0))", + "round(derivative(utinyint_col, 1s, 0))", + "round(derivative(timestamp_tag, 1s, 0))", + "round(derivative(int_tag, 1s, 0))", + "round(derivative(bigint_tag, 1s, 0))", + "round(derivative(float_tag, 1s, 0))", + "round(derivative(double_tag, 1s, 0))", + "round(derivative(binary_tag, 1s, 0))", + "round(derivative(smallint_tag, 1s, 0))", + "round(derivative(tinyint_tag, 1s, 0))", + "round(derivative(bool_tag, 1s, 0))", + "round(derivative(nchar_tag, 1s, 0))", + "round(derivative(uint_tag, 1s, 0))", + "round(derivative(ubigint_tag, 1s, 0))", + "round(derivative(usmallint_tag, 1s, 0))", + "round(derivative(utinyint_tag, 1s, 0))", + "round(ts + ts)", + "round(timestamp_col + timestamp_col)", + "round(int_col + int_col)", + "round(bigint_col + bigint_col)", + "round(float_col + float_col)", + "round(double_col + double_col)", + "round(binary_col + binary_col)", + "round(smallint_col + smallint_col)", + "round(tinyint_col + tinyint_col)", + "round(bool_col + bool_col)", + "round(nchar_col + nchar_col)", + "round(uint_col + uint_col)", + "round(ubigint_col + ubigint_col)", + "round(usmallint_col + usmallint_col)", + "round(utinyint_col + utinyint_col)", + "round(timestamp_tag + timestamp_tag)", + "round(int_tag + int_tag)", + "round(bigint_tag + bigint_tag)", + "round(float_tag + float_tag)", + "round(double_tag + double_tag)", + "round(binary_tag + binary_tag)", + "round(smallint_tag + smallint_tag)", + "round(tinyint_tag + tinyint_tag)", + "round(bool_tag + bool_tag)", + "round(nchar_tag + nchar_tag)", + "round(uint_tag + uint_tag)", + "round(ubigint_tag + ubigint_tag)", + "round(usmallint_tag + usmallint_tag)", + "round(utinyint_tag + utinyint_tag)", + "round(ts - ts)", + "round(timestamp_col - timestamp_col)", + "round(int_col - int_col)", + "round(bigint_col - bigint_col)", + "round(float_col - float_col)", + "round(double_col - double_col)", + "round(binary_col - binary_col)", + "round(smallint_col - smallint_col)", + "round(tinyint_col - tinyint_col)", + "round(bool_col - bool_col)", + "round(nchar_col - nchar_col)", + "round(uint_col - uint_col)", + "round(ubigint_col - ubigint_col)", + "round(usmallint_col - usmallint_col)", + "round(utinyint_col - utinyint_col)", + "round(timestamp_tag - timestamp_tag)", + "round(int_tag - int_tag)", + "round(bigint_tag - bigint_tag)", + "round(float_tag - float_tag)", + "round(double_tag - double_tag)", + "round(binary_tag - binary_tag)", + "round(smallint_tag - smallint_tag)", + "round(tinyint_tag - tinyint_tag)", + "round(bool_tag - bool_tag)", + "round(nchar_tag - nchar_tag)", + "round(uint_tag - uint_tag)", + "round(ubigint_tag - ubigint_tag)", + "round(usmallint_tag - usmallint_tag)", + "round(utinyint_tag - utinyint_tag)", + "round(ts * ts)", + "round(timestamp_col * timestamp_col)", + "round(int_col * int_col)", + "round(bigint_col * bigint_col)", + "round(float_col * float_col)", + "round(double_col * double_col)", + "round(binary_col * binary_col)", + "round(smallint_col * smallint_col)", + "round(tinyint_col * tinyint_col)", + "round(bool_col * bool_col)", + "round(nchar_col * nchar_col)", + "round(uint_col * uint_col)", + "round(ubigint_col * ubigint_col)", + "round(usmallint_col * usmallint_col)", + "round(utinyint_col * utinyint_col)", + "round(timestamp_tag * timestamp_tag)", + "round(int_tag * int_tag)", + "round(bigint_tag * bigint_tag)", + "round(float_tag * float_tag)", + "round(double_tag * double_tag)", + "round(binary_tag * binary_tag)", + "round(smallint_tag * smallint_tag)", + "round(tinyint_tag * tinyint_tag)", + "round(bool_tag * bool_tag)", + "round(nchar_tag * nchar_tag)", + "round(uint_tag * uint_tag)", + "round(ubigint_tag * ubigint_tag)", + "round(usmallint_tag * usmallint_tag)", + "round(utinyint_tag * utinyint_tag)", + "round(ts / ts)", + "round(timestamp_col / timestamp_col)", + "round(int_col / int_col)", + "round(bigint_col / bigint_col)", + "round(float_col / float_col)", + "round(double_col / double_col)", + "round(binary_col / binary_col)", + "round(smallint_col / smallint_col)", + "round(tinyint_col / tinyint_col)", + "round(bool_col / bool_col)", + "round(nchar_col / nchar_col)", + "round(uint_col / uint_col)", + "round(ubigint_col / ubigint_col)", + "round(usmallint_col / usmallint_col)", + "round(utinyint_col / utinyint_col)", + "round(timestamp_tag / timestamp_tag)", + "round(int_tag / int_tag)", + "round(bigint_tag / bigint_tag)", + "round(float_tag / float_tag)", + "round(double_tag / double_tag)", + "round(binary_tag / binary_tag)", + "round(smallint_tag / smallint_tag)", + "round(tinyint_tag / tinyint_tag)", + "round(bool_tag / bool_tag)", + "round(nchar_tag / nchar_tag)", + "round(uint_tag / uint_tag)", + "round(ubigint_tag / ubigint_tag)", + "round(usmallint_tag / usmallint_tag)", + "round(utinyint_tag / utinyint_tag)", + "int_col, round(int_col), int_col", + "bigint_col, round(bigint_col), bigint_col", + "float_col, round(float_col), float_col", + "double_col, round(double_col), double_col", + "smallint_col, round(smallint_col), smallint_col", + "tinyint_col, round(tinyint_col), tinyint_col", + "uint_col, round(uint_col), uint_col", + "ubigint_col, round(ubigint_col), ubigint_col", + "usmallint_col, round(usmallint_col), usmallint_col", + "utinyint_col, round(utinyint_col), utinyint_col", + "count(int_col), round(int_col), count(int_col)", + "count(bigint_col), round(bigint_col), count(bigint_col)", + "count(float_col), round(float_col), count(float_col)", + "count(double_col), round(double_col), count(double_col)", + "count(smallint_col), round(smallint_col), count(smallint_col)", + "count(tinyint_col), round(tinyint_col), count(tinyint_col)", + "count(uint_col), round(uint_col), count(uint_col)", + "count(ubigint_col), round(ubigint_col), count(ubigint_col)", + "count(usmallint_col), round(usmallint_col), count(usmallint_col)", + "count(utinyint_col), round(utinyint_col), count(utinyint_col)", + "avg(int_col), round(int_col), avg(int_col)", + "avg(bigint_col), round(bigint_col), avg(bigint_col)", + "avg(float_col), round(float_col), avg(float_col)", + "avg(double_col), round(double_col), avg(double_col)", + "avg(smallint_col), round(smallint_col), avg(smallint_col)", + "avg(tinyint_col), round(tinyint_col), avg(tinyint_col)", + "avg(uint_col), round(uint_col), avg(uint_col)", + "avg(ubigint_col), round(ubigint_col), avg(ubigint_col)", + "avg(usmallint_col), round(usmallint_col), avg(usmallint_col)", + "avg(utinyint_col), round(utinyint_col), avg(utinyint_col)", + "twa(int_col), round(int_col), twa(int_col)", + "twa(bigint_col), round(bigint_col), twa(bigint_col)", + "twa(float_col), round(float_col), twa(float_col)", + "twa(double_col), round(double_col), twa(double_col)", + "twa(smallint_col), round(smallint_col), twa(smallint_col)", + "twa(tinyint_col), round(tinyint_col), twa(tinyint_col)", + "twa(uint_col), round(uint_col), twa(uint_col)", + "twa(ubigint_col), round(ubigint_col), twa(ubigint_col)", + "twa(usmallint_col), round(usmallint_col), twa(usmallint_col)", + "twa(utinyint_col), round(utinyint_col), twa(utinyint_col)", + "sum(int_col), round(int_col), sum(int_col)", + "sum(bigint_col), round(bigint_col), sum(bigint_col)", + "sum(float_col), round(float_col), sum(float_col)", + "sum(double_col), round(double_col), sum(double_col)", + "sum(smallint_col), round(smallint_col), sum(smallint_col)", + "sum(tinyint_col), round(tinyint_col), sum(tinyint_col)", + "sum(uint_col), round(uint_col), sum(uint_col)", + "sum(ubigint_col), round(ubigint_col), sum(ubigint_col)", + "sum(usmallint_col), round(usmallint_col), sum(usmallint_col)", + "sum(utinyint_col), round(utinyint_col), sum(utinyint_col)", + "stddev(int_col), round(int_col), stddev(int_col)", + "stddev(bigint_col), round(bigint_col), stddev(bigint_col)", + "stddev(float_col), round(float_col), stddev(float_col)", + "stddev(double_col), round(double_col), stddev(double_col)", + "stddev(smallint_col), round(smallint_col), stddev(smallint_col)", + "stddev(tinyint_col), round(tinyint_col), stddev(tinyint_col)", + "stddev(uint_col), round(uint_col), stddev(uint_col)", + "stddev(ubigint_col), round(ubigint_col), stddev(ubigint_col)", + "stddev(usmallint_col), round(usmallint_col), stddev(usmallint_col)", + "stddev(utinyint_col), round(utinyint_col), stddev(utinyint_col)", + "irate(int_col), round(int_col), irate(int_col)", + "irate(bigint_col), round(bigint_col), irate(bigint_col)", + "irate(float_col), round(float_col), irate(float_col)", + "irate(double_col), round(double_col), irate(double_col)", + "irate(smallint_col), round(smallint_col), irate(smallint_col)", + "irate(tinyint_col), round(tinyint_col), irate(tinyint_col)", + "irate(uint_col), round(uint_col), irate(uint_col)", + "irate(ubigint_col), round(ubigint_col), irate(ubigint_col)", + "irate(usmallint_col), round(usmallint_col), irate(usmallint_col)", + "irate(utinyint_col), round(utinyint_col), irate(utinyint_col)", + "min(int_col), round(int_col), min(int_col)", + "min(bigint_col), round(bigint_col), min(bigint_col)", + "min(float_col), round(float_col), min(float_col)", + "min(double_col), round(double_col), min(double_col)", + "min(smallint_col), round(smallint_col), min(smallint_col)", + "min(tinyint_col), round(tinyint_col), min(tinyint_col)", + "min(uint_col), round(uint_col), min(uint_col)", + "min(ubigint_col), round(ubigint_col), min(ubigint_col)", + "min(usmallint_col), round(usmallint_col), min(usmallint_col)", + "min(utinyint_col), round(utinyint_col), min(utinyint_col)", + "max(int_col), round(int_col), max(int_col)", + "max(bigint_col), round(bigint_col), max(bigint_col)", + "max(float_col), round(float_col), max(float_col)", + "max(double_col), round(double_col), max(double_col)", + "max(smallint_col), round(smallint_col), max(smallint_col)", + "max(tinyint_col), round(tinyint_col), max(tinyint_col)", + "max(uint_col), round(uint_col), max(uint_col)", + "max(ubigint_col), round(ubigint_col), max(ubigint_col)", + "max(usmallint_col), round(usmallint_col), max(usmallint_col)", + "max(utinyint_col), round(utinyint_col), max(utinyint_col)", + "first(int_col), round(int_col), first(int_col)", + "first(bigint_col), round(bigint_col), first(bigint_col)", + "first(float_col), round(float_col), first(float_col)", + "first(double_col), round(double_col), first(double_col)", + "first(smallint_col), round(smallint_col), first(smallint_col)", + "first(tinyint_col), round(tinyint_col), first(tinyint_col)", + "first(uint_col), round(uint_col), first(uint_col)", + "first(ubigint_col), round(ubigint_col), first(ubigint_col)", + "first(usmallint_col), round(usmallint_col), first(usmallint_col)", + "first(utinyint_col), round(utinyint_col), first(utinyint_col)", + "last(int_col), round(int_col), last(int_col)", + "last(bigint_col), round(bigint_col), last(bigint_col)", + "last(float_col), round(float_col), last(float_col)", + "last(double_col), round(double_col), last(double_col)", + "last(smallint_col), round(smallint_col), last(smallint_col)", + "last(tinyint_col), round(tinyint_col), last(tinyint_col)", + "last(uint_col), round(uint_col), last(uint_col)", + "last(ubigint_col), round(ubigint_col), last(ubigint_col)", + "last(usmallint_col), round(usmallint_col), last(usmallint_col)", + "last(utinyint_col), round(utinyint_col), last(utinyint_col)", + "last_row(int_col), round(int_col), last_row(int_col)", + "last_row(bigint_col), round(bigint_col), last_row(bigint_col)", + "last_row(float_col), round(float_col), last_row(float_col)", + "last_row(double_col), round(double_col), last_row(double_col)", + "last_row(smallint_col), round(smallint_col), last_row(smallint_col)", + "last_row(tinyint_col), round(tinyint_col), last_row(tinyint_col)", + "last_row(uint_col), round(uint_col), last_row(uint_col)", + "last_row(ubigint_col), round(ubigint_col), last_row(ubigint_col)", + "last_row(usmallint_col), round(usmallint_col), last_row(usmallint_col)", + "last_row(utinyint_col), round(utinyint_col), last_row(utinyint_col)", + "interp(int_col), round(int_col), interp(int_col)", + "interp(bigint_col), round(bigint_col), interp(bigint_col)", + "interp(float_col), round(float_col), interp(float_col)", + "interp(double_col), round(double_col), interp(double_col)", + "interp(smallint_col), round(smallint_col), interp(smallint_col)", + "interp(tinyint_col), round(tinyint_col), interp(tinyint_col)", + "interp(uint_col), round(uint_col), interp(uint_col)", + "interp(ubigint_col), round(ubigint_col), interp(ubigint_col)", + "interp(usmallint_col), round(usmallint_col), interp(usmallint_col)", + "interp(utinyint_col), round(utinyint_col), interp(utinyint_col)", + "diff(int_col), round(int_col), diff(int_col)", + "diff(bigint_col), round(bigint_col), diff(bigint_col)", + "diff(float_col), round(float_col), diff(float_col)", + "diff(double_col), round(double_col), diff(double_col)", + "diff(smallint_col), round(smallint_col), diff(smallint_col)", + "diff(tinyint_col), round(tinyint_col), diff(tinyint_col)", + "diff(uint_col), round(uint_col), diff(uint_col)", + "diff(ubigint_col), round(ubigint_col), diff(ubigint_col)", + "diff(usmallint_col), round(usmallint_col), diff(usmallint_col)", + "diff(utinyint_col), round(utinyint_col), diff(utinyint_col)", + "spread(int_col), round(int_col), spread(int_col)", + "spread(bigint_col), round(bigint_col), spread(bigint_col)", + "spread(float_col), round(float_col), spread(float_col)", + "spread(double_col), round(double_col), spread(double_col)", + "spread(smallint_col), round(smallint_col), spread(smallint_col)", + "spread(tinyint_col), round(tinyint_col), spread(tinyint_col)", + "spread(uint_col), round(uint_col), spread(uint_col)", + "spread(ubigint_col), round(ubigint_col), spread(ubigint_col)", + "spread(usmallint_col), round(usmallint_col), spread(usmallint_col)", + "spread(utinyint_col), round(utinyint_col), spread(utinyint_col)", + "leastsquares(int_col, 1, 1), round(int_col), leastsquares(int_col, 1, 1)", + "leastsquares(bigint_col, 1, 1), round(bigint_col), leastsquares(bigint_col, 1, 1)", + "leastsquares(float_col, 1, 1), round(float_col), leastsquares(float_col, 1, 1)", + "leastsquares(double_col, 1, 1), round(double_col), leastsquares(double_col, 1, 1)", + "leastsquares(smallint_col, 1, 1), round(smallint_col), leastsquares(smallint_col, 1, 1)", + "leastsquares(tinyint_col, 1, 1), round(tinyint_col), leastsquares(tinyint_col, 1, 1)", + "leastsquares(uint_col, 1, 1), round(uint_col), leastsquares(uint_col, 1, 1)", + "leastsquares(ubigint_col, 1, 1), round(ubigint_col), leastsquares(ubigint_col, 1, 1)", + "leastsquares(usmallint_col, 1, 1), round(usmallint_col), leastsquares(usmallint_col, 1, 1)", + "leastsquares(utinyint_col, 1, 1), round(utinyint_col), leastsquares(utinyint_col, 1, 1)", + "top(int_col, 1), round(int_col), top(int_col, 1)", + "top(bigint_col, 1), round(bigint_col), top(bigint_col, 1)", + "top(float_col, 1), round(float_col), top(float_col, 1)", + "top(double_col, 1), round(double_col), top(double_col, 1)", + "top(smallint_col, 1), round(smallint_col), top(smallint_col, 1)", + "top(tinyint_col, 1), round(tinyint_col), top(tinyint_col, 1)", + "top(uint_col, 1), round(uint_col), top(uint_col, 1)", + "top(ubigint_col, 1), round(ubigint_col), top(ubigint_col, 1)", + "top(usmallint_col, 1), round(usmallint_col), top(usmallint_col, 1)", + "top(utinyint_col, 1), round(utinyint_col), top(utinyint_col, 1)", + "bottom(int_col, 1), round(int_col), bottom(int_col, 1)", + "bottom(bigint_col, 1), round(bigint_col), bottom(bigint_col, 1)", + "bottom(float_col, 1), round(float_col), bottom(float_col, 1)", + "bottom(double_col, 1), round(double_col), bottom(double_col, 1)", + "bottom(smallint_col, 1), round(smallint_col), bottom(smallint_col, 1)", + "bottom(tinyint_col, 1), round(tinyint_col), bottom(tinyint_col, 1)", + "bottom(uint_col, 1), round(uint_col), bottom(uint_col, 1)", + "bottom(ubigint_col, 1), round(ubigint_col), bottom(ubigint_col, 1)", + "bottom(usmallint_col, 1), round(usmallint_col), bottom(usmallint_col, 1)", + "bottom(utinyint_col, 1), round(utinyint_col), bottom(utinyint_col, 1)", + "percentile(int_col, 1), round(int_col), percentile(int_col, 1)", + "percentile(bigint_col, 1), round(bigint_col), percentile(bigint_col, 1)", + "percentile(float_col, 1), round(float_col), percentile(float_col, 1)", + "percentile(double_col, 1), round(double_col), percentile(double_col, 1)", + "percentile(smallint_col, 1), round(smallint_col), percentile(smallint_col, 1)", + "percentile(tinyint_col, 1), round(tinyint_col), percentile(tinyint_col, 1)", + "percentile(uint_col, 1), round(uint_col), percentile(uint_col, 1)", + "percentile(ubigint_col, 1), round(ubigint_col), percentile(ubigint_col, 1)", + "percentile(usmallint_col, 1), round(usmallint_col), percentile(usmallint_col, 1)", + "percentile(utinyint_col, 1), round(utinyint_col), percentile(utinyint_col, 1)", + "apercentile(int_col, 1), round(int_col), apercentile(int_col, 1)", + "apercentile(bigint_col, 1), round(bigint_col), apercentile(bigint_col, 1)", + "apercentile(float_col, 1), round(float_col), apercentile(float_col, 1)", + "apercentile(double_col, 1), round(double_col), apercentile(double_col, 1)", + "apercentile(smallint_col, 1), round(smallint_col), apercentile(smallint_col, 1)", + "apercentile(tinyint_col, 1), round(tinyint_col), apercentile(tinyint_col, 1)", + "apercentile(uint_col, 1), round(uint_col), apercentile(uint_col, 1)", + "apercentile(ubigint_col, 1), round(ubigint_col), apercentile(ubigint_col, 1)", + "apercentile(usmallint_col, 1), round(usmallint_col), apercentile(usmallint_col, 1)", + "apercentile(utinyint_col, 1), round(utinyint_col), apercentile(utinyint_col, 1)", + "derivative(int_col, 1s, 0), round(int_col), derivative(int_col, 1s, 0)", + "derivative(bigint_col, 1s, 0), round(bigint_col), derivative(bigint_col, 1s, 0)", + "derivative(float_col, 1s, 0), round(float_col), derivative(float_col, 1s, 0)", + "derivative(double_col, 1s, 0), round(double_col), derivative(double_col, 1s, 0)", + "derivative(smallint_col, 1s, 0), round(smallint_col), derivative(smallint_col, 1s, 0)", + "derivative(tinyint_col, 1s, 0), round(tinyint_col), derivative(tinyint_col, 1s, 0)", + "derivative(uint_col, 1s, 0), round(uint_col), derivative(uint_col, 1s, 0)", + "derivative(ubigint_col, 1s, 0), round(ubigint_col), derivative(ubigint_col, 1s, 0)", + "derivative(usmallint_col, 1s, 0), round(usmallint_col), derivative(usmallint_col, 1s, 0)", + "derivative(utinyint_col, 1s, 0), round(utinyint_col), derivative(utinyint_col, 1s, 0)", + "1, round(int_col), 1", + "1, round(bigint_col), 1", + "1, round(float_col), 1", + "1, round(double_col), 1", + "1, round(smallint_col), 1", + "1, round(tinyint_col), 1", + "1, round(uint_col), 1", + "1, round(ubigint_col), 1", + "1, round(usmallint_col), 1", + "1, round(utinyint_col), 1", + "round(int_col) as anyName", + "round(bigint_col) as anyName", + "round(float_col) as anyName", + "round(double_col) as anyName", + "round(smallint_col) as anyName", + "round(tinyint_col) as anyName", + "round(uint_col) as anyName", + "round(ubigint_col) as anyName", + "round(usmallint_col) as anyName", + "round(utinyint_col) as anyName", + "distinct round(int_col)", + "distinct round(bigint_col)", + "distinct round(float_col)", + "distinct round(double_col)", + "distinct round(smallint_col)", + "distinct round(tinyint_col)", + "distinct round(uint_col)", + "distinct round(ubigint_col)", + "distinct round(usmallint_col)", + "distinct round(utinyint_col)", + ] + simple_select_command = [ + "round(super.int_col)", + "round(super.bigint_col)", + "round(super.float_col)", + "round(super.double_col)", + "round(super.smallint_col)", + "round(super.tinyint_col)", + "round(super.uint_col)", + "round(super.ubigint_col)", + "round(super.usmallint_col)", + "round(super.utinyint_col)", + "round(t1.int_col)", + "round(t1.bigint_col)", + "round(t1.float_col)", + "round(t1.double_col)", + "round(t1.smallint_col)", + "round(t1.tinyint_col)", + "round(t1.uint_col)", + "round(t1.ubigint_col)", + "round(t1.usmallint_col)", + "round(t1.utinyint_col)", + ] + from_command = [" from super", " from t1"] + advance_from_command = [ + " from super", " from t1", + " from super, superb where super.ts = superb.ts and super.int_tag = superb.int_tag" + ] + filter_command = [ + "", " session(ts, 1s)", " state_window(int_col)", " interval (1s)", + " interval (1s) sliding (1s)", " group by (ts)" + ] + fill_command = [ + "", " fill(prev)", " fill(next)", " fill(null)", " fill(1)", + " fill(linear)" + ] + tdSql.prepare() + tdSql.execute( + "create stable super (ts timestamp, timestamp_col timestamp, int_col int, bigint_col bigint, float_col float,\ + double_col double, binary_col binary(8), smallint_col smallint, tinyint_col tinyint, bool_col bool, nchar_col nchar(8), \ + uint_col int unsigned, ubigint_col bigint unsigned, usmallint_col smallint unsigned, utinyint_col tinyint unsigned) tags (int_tag int, bigint_tag bigint, \ + float_tag float, double_tag double, binary_tag binary(8), smallint_tag smallint, tinyint_tag tinyint, bool_tag bool, nchar_tag nchar(8),\ + uint_tag int unsigned, ubigint_tag bigint unsigned, usmallint_tag smallint unsigned, utinyint_tag tinyint unsigned)" + ) + tdSql.execute( + "create stable superb (ts timestamp, timestamp_col timestamp, int_col int, bigint_col bigint, float_col float,\ + double_col double, binary_col binary(8), smallint_col smallint, tinyint_col tinyint, bool_col bool, nchar_col nchar(8), \ + uint_col int unsigned, ubigint_col bigint unsigned, usmallint_col smallint unsigned, utinyint_col tinyint unsigned) tags (int_tag int, bigint_tag bigint, \ + float_tag float, double_tag double, binary_tag binary(8), smallint_tag smallint, tinyint_tag tinyint, bool_tag bool, nchar_tag nchar(8),\ + uint_tag int unsigned, ubigint_tag bigint unsigned, usmallint_tag smallint unsigned, utinyint_tag tinyint unsigned)" + ) + tdSql.execute( + "create table t1 using super tags (1, %d, %f, %f, '%s', %d, %d, 1, '%s', %d, %d, %d, %d)" + % (self.randomBigint(), self.randomDouble(), self.randomDouble(), + self.randomNchar(), self.randomSmallint(), self.randomTinyint(), + self.randomNchar(), self.randomUInt(), self.randomUBigint(), + self.randomUSmallint(), self.randomUTinyint())) + tdSql.execute( + "insert into t1 values (1629796215891, 1629796215891, %d, %d, %f, %f, '%s', %d, %d, 1, '%s', %d, %d, %d, %d)" + % (self.randomInt(), self.randomBigint(), self.randomDouble(), + self.randomDouble(), self.randomNchar(), self.randomSmallint(), + self.randomTinyint(), self.randomNchar(), self.randomUInt(), + self.randomUBigint(), self.randomUSmallint(), + self.randomUTinyint())) + tdSql.execute( + "insert into t1 values (1629796215892, 1629796215891, %d, %d, %f, %f, '%s', %d, %d, 0, '%s', %d, %d, %d, %d)" + % (self.randomInt(), self.randomBigint(), self.randomDouble(), + self.randomDouble(), self.randomNchar(), self.randomSmallint(), + self.randomTinyint(), self.randomNchar(), self.randomUInt(), + self.randomUBigint(), self.randomUSmallint(), + self.randomUTinyint())) + tdSql.execute( + "insert into t1 values (1629796215893, 1629796215891, %d, %d, %f, %f, '%s', %d, %d, 1, '%s', %d, %d, %d, %d)" + % (self.randomInt(), self.randomBigint(), self.randomDouble(), + self.randomDouble(), self.randomNchar(), self.randomSmallint(), + self.randomTinyint(), self.randomNchar(), self.randomUInt(), + self.randomUBigint(), self.randomUSmallint(), + self.randomUTinyint())) + tdSql.execute( + "insert into t1 values (1629796215894, 1629796215891, %d, %d, %f, %f, '%s', %d, %d, 0, '%s', %d, %d, %d, %d)" + % (self.randomInt(), self.randomBigint(), self.randomDouble(), + self.randomDouble(), self.randomNchar(), self.randomSmallint(), + self.randomTinyint(), self.randomNchar(), self.randomUInt(), + self.randomUBigint(), self.randomUSmallint(), + self.randomUTinyint())) + tdSql.execute( + "create table t2 using superb tags (1, %d, %f, %f, '%s', %d, %d, 1, '%s', %d, %d, %d, %d)" + % (self.randomBigint(), self.randomDouble(), self.randomDouble(), + self.randomNchar(), self.randomSmallint(), self.randomTinyint(), + self.randomNchar(), self.randomUInt(), self.randomUBigint(), + self.randomUSmallint(), self.randomUTinyint())) + tdSql.execute( + "insert into t2 values (1629796215891, 1629796215891, %d, %d, %f, %f, '%s', %d, %d, 1, '%s', %d, %d, %d, %d)" + % (self.randomInt(), self.randomBigint(), self.randomDouble(), + self.randomDouble(), self.randomNchar(), self.randomSmallint(), + self.randomTinyint(), self.randomNchar(), self.randomUInt(), + self.randomUBigint(), self.randomUSmallint(), + self.randomUTinyint())) + tdSql.execute( + "insert into t2 values (1629796215892, 1629796215891, %d, %d, %f, %f, '%s', %d, %d, 0, '%s', %d, %d, %d, %d)" + % (self.randomInt(), self.randomBigint(), self.randomDouble(), + self.randomDouble(), self.randomNchar(), self.randomSmallint(), + self.randomTinyint(), self.randomNchar(), self.randomUInt(), + self.randomUBigint(), self.randomUSmallint(), + self.randomUTinyint())) + tdSql.execute( + "insert into t2 values (1629796215893, 1629796215891, %d, %d, %f, %f, '%s', %d, %d, 1, '%s', %d, %d, %d, %d)" + % (self.randomInt(), self.randomBigint(), self.randomDouble(), + self.randomDouble(), self.randomNchar(), self.randomSmallint(), + self.randomTinyint(), self.randomNchar(), self.randomUInt(), + self.randomUBigint(), self.randomUSmallint(), + self.randomUTinyint())) + tdSql.execute( + "insert into t2 values (1629796215894, 1629796215891, %d, %d, %f, %f, '%s', %d, %d, 0, '%s', %d, %d, %d, %d)" + % (self.randomInt(), self.randomBigint(), self.randomDouble(), + self.randomDouble(), self.randomNchar(), self.randomSmallint(), + self.randomTinyint(), self.randomNchar(), self.randomUInt(), + self.randomUBigint(), self.randomUSmallint(), + self.randomUTinyint())) + + for s in range(len(select_command)): + for f in range(len(from_command)): + sql = "select " + select_command[s] + from_command[f] + if (select_command[s] == "round(int_col)"\ + or select_command[s] == "round(bigint_col)"\ + or select_command[s] == "round(smallint_col)" \ + or select_command[s] == "round(float_col)"\ + or select_command[s] == "round(double_col)"\ + or select_command[s] == "round(tinyint_col)"\ + or select_command[s] == "round(uint_col)"\ + or select_command[s] == "round(ubigint_col)"\ + or select_command[s] == "round(usmallint_col)"\ + or select_command[s] == "round(utinyint_col)"\ + or select_command[s] == "1, round(int_col), 1"\ + or select_command[s] == "1, round(bigint_col), 1"\ + or select_command[s] == "1, round(float_col), 1"\ + or select_command[s] == "1, round(double_col), 1"\ + or select_command[s] == "1, round(smallint_col), 1"\ + or select_command[s] == "1, round(tinyint_col), 1"\ + or select_command[s] == "1, round(uint_col), 1"\ + or select_command[s] == "1, round(ubigint_col), 1"\ + or select_command[s] == "1, round(usmallint_col), 1"\ + or select_command[s] == "1, round(utinyint_col), 1"\ + or select_command[s] == "int_col, round(int_col), int_col"\ + or select_command[s] == "bigint_col, round(bigint_col), bigint_col"\ + or select_command[s] == "float_col, round(float_col), float_col"\ + or select_command[s] == "double_col, round(double_col), double_col"\ + or select_command[s] == "smallint_col, round(smallint_col), smallint_col"\ + or select_command[s] == "tinyint_col, round(tinyint_col), tinyint_col"\ + or select_command[s] == "uint_col, round(uint_col), uint_col"\ + or select_command[s] == "ubigint_col, round(ubigint_col), ubigint_col"\ + or select_command[s] == "usmallint_col, round(usmallint_col), usmallint_col"\ + or select_command[s] == "utinyint_col, round(utinyint_col), utinyint_col"\ + or select_command[s] == "round(int_col) as anyName"\ + or select_command[s] == "round(bigint_col) as anyName"\ + or select_command[s] == "round(float_col) as anyName"\ + or select_command[s] == "round(double_col) as anyName"\ + or select_command[s] == "round(smallint_col) as anyName"\ + or select_command[s] == "round(tinyint_col) as anyName"\ + or select_command[s] == "round(uint_col) as anyName"\ + or select_command[s] == "round(ubigint_col) as anyName"\ + or select_command[s] == "round(usmallint_col) as anyName"\ + or select_command[s] == "round(utinyint_col) as anyName"\ + or select_command[s] == "round(int_col) + round(int_col)"\ + or select_command[s] == "round(bigint_col) + round(bigint_col)"\ + or select_command[s] == "round(float_col) + round(float_col)"\ + or select_command[s] == "round(double_col) + round(double_col)"\ + or select_command[s] == "round(smallint_col) + round(smallint_col)"\ + or select_command[s] == "round(tinyint_col) + round(tinyint_col)"\ + or select_command[s] == "round(uint_col) + round(uint_col)"\ + or select_command[s] == "round(ubigint_col) + round(ubigint_col)"\ + or select_command[s] == "round(usmallint_col) + round(usmallint_col)"\ + or select_command[s] == "round(utinyint_col) + round(utinyint_col)"\ + or select_command[s] == "round(int_col) + round(int_col)"\ + or select_command[s] == "round(bigint_col) + round(bigint_col)"\ + or select_command[s] == "round(float_col) + round(float_col)"\ + or select_command[s] == "round(double_col) + round(double_col)"\ + or select_command[s] == "round(smallint_col) + round(smallint_col)"\ + or select_command[s] == "round(tinyint_col) + round(tinyint_col)"\ + or select_command[s] == "round(uint_col) + round(uint_col)"\ + or select_command[s] == "round(ubigint_col) + round(ubigint_col)"\ + or select_command[s] == "round(usmallint_col) + round(usmallint_col)"\ + or select_command[s] == "round(utinyint_col) + cei(utinyint_col)"\ + or select_command[s] == "round(int_col) - round(int_col)"\ + or select_command[s] == "round(bigint_col) - round(bigint_col)"\ + or select_command[s] == "round(float_col) - round(float_col)"\ + or select_command[s] == "round(double_col) - round(double_col)"\ + or select_command[s] == "round(smallint_col) - round(smallint_col)"\ + or select_command[s] == "round(tinyint_col) - round(tinyint_col)"\ + or select_command[s] == "round(uint_col) - round(uint_col)"\ + or select_command[s] == "round(ubigint_col) - round(ubigint_col)"\ + or select_command[s] == "round(usmallint_col) - round(usmallint_col)"\ + or select_command[s] == "round(utinyint_col) - round(utinyint_col)"\ + or select_command[s] == "round(int_col) * round(int_col)"\ + or select_command[s] == "round(bigint_col) * round(bigint_col)"\ + or select_command[s] == "round(float_col) * round(float_col)"\ + or select_command[s] == "round(double_col) * round(double_col)"\ + or select_command[s] == "round(smallint_col) * round(smallint_col)"\ + or select_command[s] == "round(tinyint_col) * round(tinyint_col)"\ + or select_command[s] == "round(uint_col) * round(uint_col)"\ + or select_command[s] == "round(ubigint_col) * round(ubigint_col)"\ + or select_command[s] == "round(usmallint_col) * round(usmallint_col)"\ + or select_command[s] == "round(utinyint_col) * round(utinyint_col)"\ + or select_command[s] == "round(int_col) / round(int_col)"\ + or select_command[s] == "round(bigint_col) / round(bigint_col)"\ + or select_command[s] == "round(float_col) / round(float_col)"\ + or select_command[s] == "round(double_col) / round(double_col)"\ + or select_command[s] == "round(smallint_col) / round(smallint_col)"\ + or select_command[s] == "round(tinyint_col) / round(tinyint_col)"\ + or select_command[s] == "round(uint_col) / round(uint_col)"\ + or select_command[s] == "round(ubigint_col) / round(ubigint_col)"\ + or select_command[s] == "round(usmallint_col) / round(usmallint_col)"\ + or select_command[s] == "round(utinyint_col) / round(utinyint_col)"): + tdSql.query(sql) + else: + tdSql.error(sql) + for sim in range(len(simple_select_command)): + for fr in range(len(advance_from_command)): + for filter in range(len(filter_command)): + for fill in range(len(fill_command)): + sql = "select " + simple_select_command[ + sim] + advance_from_command[fr] + filter_command[ + filter] + fill_command[fill] + if sql == "select round(t1.int_col) from t1"\ + or sql == "select round(super.int_col) from super"\ + or sql == "select round(t1.bigint_col) from t1"\ + or sql == "select round(super.bigint_col) from super"\ + or sql == "select round(t1.smallint_col) from t1"\ + or sql == "select round(super.smallint_col) from super"\ + or sql == "select round(t1.tinyint_col) from t1"\ + or sql == "select round(super.tinyint_col) from super"\ + or sql == "select round(t1.float_col) from t1"\ + or sql == "select round(super.float_col) from super"\ + or sql == "select round(t1.double_col) from t1"\ + or sql == "select round(super.double_col) from super"\ + or sql == "select round(t1.uint_col) from t1"\ + or sql == "select round(super.uint_col) from super"\ + or sql == "select round(t1.ubigint_col) from t1"\ + or sql == "select round(super.ubigint_col) from super"\ + or sql == "select round(t1.usmallint_col) from t1"\ + or sql == "select round(super.usmallint_col) from super"\ + or sql == "select round(t1.utinyint_col) from t1"\ + or sql == "select round(super.utinyint_col) from super"\ + or sql == "select round(super.int_col) from super, superb where super.ts = superb.ts and super.int_tag = superb.int_tag"\ + or sql == "select round(super.bigint_col) from super, superb where super.ts = superb.ts and super.int_tag = superb.int_tag"\ + or sql == "select round(super.smallint_col) from super, superb where super.ts = superb.ts and super.int_tag = superb.int_tag"\ + or sql == "select round(super.tinyint_col) from super, superb where super.ts = superb.ts and super.int_tag = superb.int_tag"\ + or sql == "select round(super.float_col) from super, superb where super.ts = superb.ts and super.int_tag = superb.int_tag"\ + or sql == "select round(super.double_col) from super, superb where super.ts = superb.ts and super.int_tag = superb.int_tag"\ + or sql == "select round(super.uint_col) from super, superb where super.ts = superb.ts and super.int_tag = superb.int_tag"\ + or sql == "select round(super.ubigint_col) from super, superb where super.ts = superb.ts and super.int_tag = superb.int_tag"\ + or sql == "select round(super.usmallint_col) from super, superb where super.ts = superb.ts and super.int_tag = superb.int_tag"\ + or sql == "select round(super.utinyint_col) from super, superb where super.ts = superb.ts and super.int_tag = superb.int_tag": + tdSql.query(sql) + else: + tdSql.error(sql) + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/functions/queryTestCases.py b/tests/pytest/functions/queryTestCases.py index 1311ad6b3c83e1d4a0ec6fdf73a707a44bd5297c..a20b89d47c5fc09d032e72335edb9544eb50e0aa 100644 --- a/tests/pytest/functions/queryTestCases.py +++ b/tests/pytest/functions/queryTestCases.py @@ -15,6 +15,7 @@ import sys import subprocess import random import math +import numpy as np from util.log import * from util.cases import * @@ -57,16 +58,33 @@ class TDTestCase: def td3690(self): tdLog.printNoPrefix("==========TD-3690==========") + + tdSql.prepare() + + tdSql.execute("show variables") + res_off = tdSql.cursor.fetchall() + resList = np.array(res_off) + index = np.where(resList == "offlineThreshold") + index_value = np.dstack((index[0])).squeeze() tdSql.query("show variables") - tdSql.checkData(53, 1, 864000) + tdSql.checkData(index_value, 1, 864000) def td4082(self): tdLog.printNoPrefix("==========TD-4082==========") + + tdSql.prepare() + cfgfile = self.getCfgFile() max_compressMsgSize = 100000000 + tdSql.execute("show variables") + res_com = tdSql.cursor.fetchall() + rescomlist = np.array(res_com) + cpms_index = np.where(rescomlist == "compressMsgSize") + index_value = np.dstack((cpms_index[0])).squeeze() + tdSql.query("show variables") - tdSql.checkData(26, 1, -1) + tdSql.checkData(index_value, 1, -1) tdSql.query("show dnodes") index = tdSql.getData(0, 0) @@ -80,7 +98,7 @@ class TDTestCase: tdDnodes.start(index) tdSql.query("show variables") - tdSql.checkData(26, 1, 100000000) + tdSql.checkData(index_value, 1, 100000000) tdDnodes.stop(index) cmd = f"sed -i '$s/{max_compressMsgSize}/{max_compressMsgSize+10}/g' {cfgfile} " @@ -91,7 +109,7 @@ class TDTestCase: tdDnodes.start(index) tdSql.query("show variables") - tdSql.checkData(26, 1, -1) + tdSql.checkData(index_value, 1, -1) tdDnodes.stop(index) cmd = f"sed -i '$d' {cfgfile}" @@ -104,8 +122,12 @@ class TDTestCase: def td4097(self): tdLog.printNoPrefix("==========TD-4097==========") + tdSql.execute("drop database if exists db") tdSql.execute("drop database if exists db1") + tdDnodes.stop(1) + tdDnodes.start(1) + tdSql.execute("create database if not exists db keep 3650") tdSql.execute("create database if not exists db1 keep 3650") tdSql.execute("create database if not exists new keep 3650") @@ -267,10 +289,22 @@ class TDTestCase: # keep ~ [days,365000] tdSql.execute("drop database if exists db") tdSql.execute("create database if not exists db") + + tdSql.execute("show variables") + res_kp = tdSql.cursor.fetchall() + resList = np.array(res_kp) + keep_index = np.where(resList == "keep") + index_value = np.dstack((keep_index[0])).squeeze() + tdSql.query("show variables") - tdSql.checkData(38, 1, 3650) + tdSql.checkData(index_value, 1, 3650) + tdSql.query("show databases") - tdSql.checkData(0,7,"3650,3650,3650") + selfPath = os.path.dirname(os.path.realpath(__file__)) + if ("community" in selfPath): + tdSql.checkData(0, 7, "3650,3650,3650") + else: + tdSql.checkData(0, 7, 3650) days = tdSql.getData(0, 6) tdSql.error("alter database db keep 3650001") @@ -289,14 +323,22 @@ class TDTestCase: tdSql.execute("alter database db keep 36500") tdSql.query("show databases") - tdSql.checkData(0, 7, "3650,3650,36500") + if ("community" in selfPath): + tdSql.checkData(0, 7, "36500,36500,36500") + else: + tdSql.checkData(0, 7, 36500) + tdSql.execute("drop database if exists db") tdSql.execute("create database if not exists db1") tdSql.query("show databases") - tdSql.checkData(0, 7, "3650,3650,3650") + if ("community" in selfPath): + tdSql.checkData(0, 7, "3650,3650,3650") + else: + tdSql.checkData(0, 7, 3650) + tdSql.query("show variables") - tdSql.checkData(38, 1, 3650) + tdSql.checkData(index_value, 1, 3650) tdSql.execute("alter database db1 keep 365") tdSql.execute("drop database if exists db1") @@ -552,7 +594,7 @@ class TDTestCase: tdSql.execute("use db") tdSql.execute("create stable db.stb1 (ts timestamp, c1 int) tags(t0 tinyint, t1 int)") tdSql.execute("create stable db.stb2 (ts timestamp, c1 int) tags(t0 binary(16), t1 binary(16))") - numtab=2000000 + numtab=20000 for i in range(numtab): sql = f"create table db.t{i} using db.stb1 tags({i%128}, {100+i})" tdSql.execute(sql) @@ -698,9 +740,7 @@ class TDTestCase: tdSql.query(f"select distinct c1 c2, c2 c3 from t1 where c1 <{tbnum}") tdSql.checkRows(3) tdSql.error("select distinct c1, c2 from stb1 order by ts") - #tdSql.checkRows(tbnum*3+1) tdSql.error("select distinct c1, c2 from t1 order by ts") - #tdSql.checkRows(4) tdSql.error("select distinct c1, ts from stb1 group by c2") tdSql.error("select distinct c1, ts from t1 group by c2") tdSql.error("select distinct c1, max(c2) from stb1 ") @@ -729,7 +769,7 @@ class TDTestCase: tdSql.query(f"select distinct c1,c2 from (select * from t1 where c1 < {tbnum}) ") tdSql.checkRows(3) tdSql.query(f"select distinct c1,c2 from (select * from stb1 where t2 !=0 and t2 != 1) ") - tdSql.checkRows(4) + tdSql.checkRows(0) tdSql.error("select distinct c1, c2 from (select distinct c1, c2 from stb1 where t0 > 2 and t1 < 3) ") tdSql.error("select c1, c2 from (select distinct c1, c2 from stb1 where t0 > 2 and t1 < 3) ") tdSql.query("select distinct c1, c2 from (select c2, c1 from stb1 where c1 > 2 ) where c1 < 4") @@ -975,6 +1015,84 @@ class TDTestCase: tdSql.error("select ts as t, bottom(t1, 3) from t1 order by c3") + tdSql.error("select ts as t, top(c1, 0) from t1") + tdSql.query("select ts as t, top(c1, 5) from t1") + tdSql.checkRows(5) + tdSql.checkCols(3) + for i in range(5): + data=tdSql.getData(i, 0) + tdSql.checkData(i, 1, data) + tdSql.query("select ts as t, top(c1, 5) from stb1") + tdSql.checkRows(5) + tdSql.query("select ts as t, top(c1, 5) from stb1 group by tbname") + tdSql.checkRows(500) + tdSql.query("select ts as t, top(c1, 8) from t1") + tdSql.checkRows(6) + tdSql.query("select ts as t, top(c2, 8) from t1") + tdSql.checkRows(6) + tdSql.error("select ts as t, top(c3, 5) from t1") + tdSql.error("select ts as t, top(c4, 5) from t1") + tdSql.query("select ts as t, top(c5, 8) from t1") + tdSql.checkRows(6) + tdSql.error("select ts as t, top(c6, 5) from t1") + tdSql.error("select ts as t, top(c5, 8) as b from t1 order by b") + tdSql.error("select ts as t, top(t1, 1) from t1") + tdSql.error("select ts as t, top(t1, 1) from stb1") + tdSql.error("select ts as t, top(t1, 3) from stb1 order by c3") + tdSql.error("select ts as t, top(t1, 3) from t1 order by c3") + + tdDnodes.stop(1) + tdDnodes.start(1) + + tdSql.query("select ts as t, diff(c1) from t1") + tdSql.checkRows(5) + tdSql.checkCols(3) + for i in range(5): + data=tdSql.getData(i, 0) + tdSql.checkData(i, 1, data) + tdSql.query("select ts as t, diff(c1) from stb1 group by tbname") + tdSql.checkRows(500) + tdSql.checkCols(4) + tdSql.query("select ts as t, diff(c1) from t1") + tdSql.query("select ts as t, diff(c1) from t1") + tdSql.error("select ts as t, diff(c1) from stb1") + tdSql.query("select ts as t, diff(c2) from t1") + tdSql.checkRows(5) + tdSql.error("select ts as t, diff(c3) from t1") + tdSql.error("select ts as t, diff(c4) from t1") + tdSql.query("select ts as t, diff(c5) from t1") + tdSql.checkRows(5) + tdSql.error("select ts as t, diff(c6) from t1") + tdSql.error("select ts as t, diff(t1) from t1") + tdSql.error("select ts as t, diff(c1, c2) from t1") + + tdSql.error("select ts as t, bottom(c1, 0) from t1") + tdSql.query("select ts as t, bottom(c1, 5) from t1") + tdSql.checkRows(5) + tdSql.checkCols(3) + for i in range(5): + data=tdSql.getData(i, 0) + tdSql.checkData(i, 1, data) + tdSql.query("select ts as t, bottom(c1, 5) from stb1") + tdSql.checkRows(5) + tdSql.query("select ts as t, bottom(c1, 5) from stb1 group by tbname") + tdSql.checkRows(500) + tdSql.query("select ts as t, bottom(c1, 8) from t1") + tdSql.checkRows(6) + tdSql.query("select ts as t, bottom(c2, 8) from t1") + tdSql.checkRows(6) + tdSql.error("select ts as t, bottom(c3, 5) from t1") + tdSql.error("select ts as t, bottom(c4, 5) from t1") + tdSql.query("select ts as t, bottom(c5, 8) from t1") + tdSql.checkRows(6) + tdSql.error("select ts as t, bottom(c6, 5) from t1") + tdSql.error("select ts as t, bottom(c5, 8) as b from t1 order by b") + tdSql.error("select ts as t, bottom(t1, 1) from t1") + tdSql.error("select ts as t, bottom(t1, 1) from stb1") + tdSql.error("select ts as t, bottom(t1, 3) from stb1 order by c3") + tdSql.error("select ts as t, bottom(t1, 3) from t1 order by c3") + + tdSql.error("select ts as t, top(c1, 0) from t1") tdSql.query("select ts as t, top(c1, 5) from t1") tdSql.checkRows(5) @@ -1011,15 +1129,15 @@ class TDTestCase: # self.td4082() # self.td4288() # self.td4724() - # self.td5798() # self.td5935() - self.td6068() + # self.td6068() # develop branch - # self.td4097() - # self.td4889() + self.td4097() + self.td4889() + self.td5798() # self.td5168() - # self.td5433() + self.td5433() def stop(self): tdSql.close() diff --git a/tests/pytest/functions/showOfflineThresholdIs864000.py b/tests/pytest/functions/showOfflineThresholdIs864000.py index 8ec25cef26b3c97bc55f2f4df3fe8cf55a19125c..7462d4cd72f600674fcb82aa1224019787d23fd5 100644 --- a/tests/pytest/functions/showOfflineThresholdIs864000.py +++ b/tests/pytest/functions/showOfflineThresholdIs864000.py @@ -12,6 +12,8 @@ # -*- coding: utf-8 -*- import sys +import numpy as np + from util.log import * from util.cases import * from util.sql import * @@ -24,8 +26,17 @@ class TDTestCase: tdSql.init(conn.cursor(), logSql) def run(self): + # tdSql.query("show variables") + # tdSql.checkData(54, 1, 864000) + tdSql.execute("show variables") + res = tdSql.cursor.fetchall() + resList = np.array(res) + index = np.where(resList == "offlineThreshold") + index_value = np.dstack((index[0])).squeeze() tdSql.query("show variables") - tdSql.checkData(55, 1, 864000) + tdSql.checkData(index_value, 1, 864000) + pass + def stop(self): tdSql.close() diff --git a/tests/pytest/import_merge/import_update_0.py b/tests/pytest/import_merge/import_update_0.py index 71f33c56704fdead8df07583dd105b00eb9e4a23..66e0d7d14420251a227e5f0c2bacec219273d032 100644 --- a/tests/pytest/import_merge/import_update_0.py +++ b/tests/pytest/import_merge/import_update_0.py @@ -55,7 +55,7 @@ class TDTestCase: tdSql.execute('''drop database if exists test_updata_0 ;''') # update 0 不更新 ; update 1 覆盖更新 ;update 2 合并更新 tdLog.info("========== test database updata = 0 ==========") - tdSql.execute('''create database test_updata_0 update 0 minrows 10 maxrows 200 ;''') + tdSql.execute('''create database test_updata_0 update 0 minrows 10 maxrows 200 keep 36500;''') tdSql.execute('''use test_updata_0;''') tdSql.execute('''create stable stable_1 (ts timestamp , q_int int , q_bigint bigint , q_smallint smallint , q_tinyint tinyint, diff --git a/tests/pytest/import_merge/import_update_1.py b/tests/pytest/import_merge/import_update_1.py index e72de2eb236a4190ec12fcc1315da849d5f21235..7edfd610c2e6eac6588ae78f81c939118845973d 100644 --- a/tests/pytest/import_merge/import_update_1.py +++ b/tests/pytest/import_merge/import_update_1.py @@ -55,7 +55,7 @@ class TDTestCase: tdSql.execute('''drop database if exists test_updata_1 ;''') # update 0 不更新 ; update 1 覆盖更新 ;update 2 合并更新 tdLog.info("========== test database updata = 1 ==========") - tdSql.execute('''create database test_updata_1 update 1 minrows 10 maxrows 200 ;''') + tdSql.execute('''create database test_updata_1 update 1 minrows 10 maxrows 200 keep 36500;''') tdSql.execute('''use test_updata_1;''') tdSql.execute('''create stable stable_1 (ts timestamp , q_int int , q_bigint bigint , q_smallint smallint , q_tinyint tinyint, diff --git a/tests/pytest/import_merge/import_update_2.py b/tests/pytest/import_merge/import_update_2.py index cfa3dbd26c0a0edfbd64d3d34b1bfa6ed23a266a..a0efe31ab25f68a898a124e0be22c369fedabf7f 100644 --- a/tests/pytest/import_merge/import_update_2.py +++ b/tests/pytest/import_merge/import_update_2.py @@ -55,7 +55,7 @@ class TDTestCase: tdSql.execute('''drop database if exists test_updata_2 ;''') # update 0 不更新 ; update 1 覆盖更新 ;update 2 合并更新 tdLog.info("========== test database updata = 2 ==========") - tdSql.execute('''create database test_updata_2 update 2 minrows 10 maxrows 200 ;''') + tdSql.execute('''create database test_updata_2 update 2 minrows 10 maxrows 200 keep 36500;''') tdSql.execute('''use test_updata_2;''') tdSql.execute('''create stable stable_1 (ts timestamp , q_int int , q_bigint bigint , q_smallint smallint , q_tinyint tinyint, diff --git a/tests/pytest/insert/insertJSONPayload.py b/tests/pytest/insert/insertJSONPayload.py new file mode 100644 index 0000000000000000000000000000000000000000..30f34446a93237f9b7b610efc9b1b5507ba09f4a --- /dev/null +++ b/tests/pytest/insert/insertJSONPayload.py @@ -0,0 +1,568 @@ +################################################################### +# Copyright (c) 2021 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +from util.log import * +from util.cases import * +from util.sql import * + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + self._conn = conn + + def run(self): + print("running {}".format(__file__)) + tdSql.execute("drop database if exists test") + tdSql.execute("create database if not exists test precision 'us'") + tdSql.execute('use test') + + + ### Default format ### + ### metric value ### + print("============= step1 : test metric value types ================") + payload = ''' + { + "metric": "stb0_0", + "timestamp": 1626006833610123, + "value": 10, + "tags": { + "t1": true, + "t2": false, + "t3": 10, + "t4": "123_abc_.!@#$%^&*:;,./?|+-=()[]{}<>" + } + } + ''' + code = self._conn.insert_json_payload(payload) + print("insert_json_payload result {}".format(code)) + + tdSql.query("describe stb0_0") + tdSql.checkData(1, 1, "FLOAT") + + payload = ''' + { + "metric": "stb0_1", + "timestamp": 1626006833610123, + "value": true, + "tags": { + "t1": true, + "t2": false, + "t3": 10, + "t4": "123_abc_.!@#$%^&*:;,./?|+-=()[]{}<>" + } + } + ''' + code = self._conn.insert_json_payload(payload) + print("insert_json_payload result {}".format(code)) + + tdSql.query("describe stb0_1") + tdSql.checkData(1, 1, "BOOL") + + payload = ''' + { + "metric": "stb0_2", + "timestamp": 1626006833610123, + "value": false, + "tags": { + "t1": true, + "t2": false, + "t3": 10, + "t4": "123_abc_.!@#$%^&*:;,./?|+-=()[]{}<>" + } + } + ''' + code = self._conn.insert_json_payload(payload) + print("insert_json_payload result {}".format(code)) + + tdSql.query("describe stb0_2") + tdSql.checkData(1, 1, "BOOL") + + payload = ''' + { + "metric": "stb0_3", + "timestamp": 1626006833610123, + "value": "123_abc_.!@#$%^&*:;,./?|+-=()[]{}<>", + "tags": { + "t1": true, + "t2": false, + "t3": 10, + "t4": "123_abc_.!@#$%^&*:;,./?|+-=()[]{}<>" + } + } + ''' + code = self._conn.insert_json_payload(payload) + print("insert_json_payload result {}".format(code)) + + tdSql.query("describe stb0_3") + tdSql.checkData(1, 1, "NCHAR") + + ### timestamp 0 ### + payload = ''' + { + "metric": "stb0_4", + "timestamp": 0, + "value": 123, + "tags": { + "t1": true, + "t2": false, + "t3": 10, + "t4": "123_abc_.!@#$%^&*:;,./?|+-=()[]{}<>" + } + } + ''' + code = self._conn.insert_json_payload(payload) + print("insert_json_payload result {}".format(code)) + + + ### ID ### + payload = ''' + { + "metric": "stb0_5", + "timestamp": 0, + "value": 123, + "tags": { + "ID": "tb0_5", + "t1": true, + "iD": "tb000", + "t2": false, + "t3": 10, + "t4": "123_abc_.!@#$%^&*:;,./?|+-=()[]{}<>", + "id": "tb555" + } + } + ''' + code = self._conn.insert_json_payload(payload) + print("insert_json_payload result {}".format(code)) + + tdSql.query("select tbname from stb0_5") + tdSql.checkData(0, 0, "tb0_5") + + ### Nested format ### + ### timestamp ### + #seconds + payload = ''' + { + "metric": "stb1_0", + "timestamp": { + "value": 1626006833, + "type": "s" + }, + "value": 10, + "tags": { + "t1": true, + "t2": false, + "t3": 10, + "t4": "123_abc_.!@#$%^&*:;,./?|+-=()[]{}<>" + } + } + ''' + code = self._conn.insert_json_payload(payload) + print("insert_json_payload result {}".format(code)) + + tdSql.query("select ts from stb1_0") + tdSql.checkData(0, 0, "2021-07-11 20:33:53.000000") + + #milliseconds + payload = ''' + { + "metric": "stb1_1", + "timestamp": { + "value": 1626006833610, + "type": "ms" + }, + "value": 10, + "tags": { + "t1": true, + "t2": false, + "t3": 10, + "t4": "123_abc_.!@#$%^&*:;,./?|+-=()[]{}<>" + } + } + ''' + code = self._conn.insert_json_payload(payload) + print("insert_json_payload result {}".format(code)) + + tdSql.query("select ts from stb1_1") + tdSql.checkData(0, 0, "2021-07-11 20:33:53.610000") + + #microseconds + payload = ''' + { + "metric": "stb1_2", + "timestamp": { + "value": 1626006833610123, + "type": "us" + }, + "value": 10, + "tags": { + "t1": true, + "t2": false, + "t3": 10, + "t4": "123_abc_.!@#$%^&*:;,./?|+-=()[]{}<>" + } + } + ''' + code = self._conn.insert_json_payload(payload) + print("insert_json_payload result {}".format(code)) + + tdSql.query("select ts from stb1_2") + tdSql.checkData(0, 0, "2021-07-11 20:33:53.610123") + + #nanoseconds + payload = ''' + { + "metric": "stb1_3", + "timestamp": { + "value": 1.6260068336101233e+18, + "type": "ns" + }, + "value": 10, + "tags": { + "t1": true, + "t2": false, + "t3": 10, + "t4": "123_abc_.!@#$%^&*:;,./?|+-=()[]{}<>" + } + } + ''' + code = self._conn.insert_json_payload(payload) + print("insert_json_payload result {}".format(code)) + + tdSql.query("select ts from stb1_3") + tdSql.checkData(0, 0, "2021-07-11 20:33:53.610123") + + #now + tdSql.execute('use test') + payload = ''' + { + "metric": "stb1_4", + "timestamp": { + "value": 0, + "type": "ns" + }, + "value": 10, + "tags": { + "t1": true, + "t2": false, + "t3": 10, + "t4": "123_abc_.!@#$%^&*:;,./?|+-=()[]{}<>" + } + } + ''' + code = self._conn.insert_json_payload(payload) + print("insert_json_payload result {}".format(code)) + + ### metric value ### + payload = ''' + { + "metric": "stb2_0", + "timestamp": { + "value": 1626006833, + "type": "s" + }, + "value": { + "value": true, + "type": "bool" + }, + "tags": { + "t1": true, + "t2": false, + "t3": 10, + "t4": "123_abc_.!@#$%^&*:;,./?|+-=()[]{}<>" + } + } + ''' + code = self._conn.insert_json_payload(payload) + print("insert_json_payload result {}".format(code)) + + tdSql.query("describe stb2_0") + tdSql.checkData(1, 1, "BOOL") + + payload = ''' + { + "metric": "stb2_1", + "timestamp": { + "value": 1626006833, + "type": "s" + }, + "value": { + "value": 127, + "type": "tinyint" + }, + "tags": { + "t1": true, + "t2": false, + "t3": 10, + "t4": "123_abc_.!@#$%^&*:;,./?|+-=()[]{}<>" + } + } + ''' + code = self._conn.insert_json_payload(payload) + print("insert_json_payload result {}".format(code)) + + tdSql.query("describe stb2_1") + tdSql.checkData(1, 1, "TINYINT") + + payload = ''' + { + "metric": "stb2_2", + "timestamp": { + "value": 1626006833, + "type": "s" + }, + "value": { + "value": 32767, + "type": "smallint" + }, + "tags": { + "t1": true, + "t2": false, + "t3": 10, + "t4": "123_abc_.!@#$%^&*:;,./?|+-=()[]{}<>" + } + } + ''' + code = self._conn.insert_json_payload(payload) + print("insert_json_payload result {}".format(code)) + + tdSql.query("describe stb2_2") + tdSql.checkData(1, 1, "SMALLINT") + + payload = ''' + { + "metric": "stb2_3", + "timestamp": { + "value": 1626006833, + "type": "s" + }, + "value": { + "value": 2147483647, + "type": "int" + }, + "tags": { + "t1": true, + "t2": false, + "t3": 10, + "t4": "123_abc_.!@#$%^&*:;,./?|+-=()[]{}<>" + } + } + ''' + code = self._conn.insert_json_payload(payload) + print("insert_json_payload result {}".format(code)) + + tdSql.query("describe stb2_3") + tdSql.checkData(1, 1, "INT") + + payload = ''' + { + "metric": "stb2_4", + "timestamp": { + "value": 1626006833, + "type": "s" + }, + "value": { + "value": 9.2233720368547758e+18, + "type": "bigint" + }, + "tags": { + "t1": true, + "t2": false, + "t3": 10, + "t4": "123_abc_.!@#$%^&*:;,./?|+-=()[]{}<>" + } + } + ''' + code = self._conn.insert_json_payload(payload) + print("insert_json_payload result {}".format(code)) + + tdSql.query("describe stb2_4") + tdSql.checkData(1, 1, "BIGINT") + + payload = ''' + { + "metric": "stb2_5", + "timestamp": { + "value": 1626006833, + "type": "s" + }, + "value": { + "value": 11.12345, + "type": "float" + }, + "tags": { + "t1": true, + "t2": false, + "t3": 10, + "t4": "123_abc_.!@#$%^&*:;,./?|+-=()[]{}<>" + } + } + ''' + code = self._conn.insert_json_payload(payload) + print("insert_json_payload result {}".format(code)) + + tdSql.query("describe stb2_5") + tdSql.checkData(1, 1, "FLOAT") + + payload = ''' + { + "metric": "stb2_6", + "timestamp": { + "value": 1626006833, + "type": "s" + }, + "value": { + "value": 22.123456789, + "type": "double" + }, + "tags": { + "t1": true, + "t2": false, + "t3": 10, + "t4": "123_abc_.!@#$%^&*:;,./?|+-=()[]{}<>" + } + } + ''' + code = self._conn.insert_json_payload(payload) + print("insert_json_payload result {}".format(code)) + + tdSql.query("describe stb2_6") + tdSql.checkData(1, 1, "DOUBLE") + + payload = ''' + { + "metric": "stb2_7", + "timestamp": { + "value": 1626006833, + "type": "s" + }, + "value": { + "value": "123_abc_.!@#$%^&*:;,./?|+-=()[]{}<>", + "type": "binary" + }, + "tags": { + "t1": true, + "t2": false, + "t3": 10, + "t4": "123_abc_.!@#$%^&*:;,./?|+-=()[]{}<>" + } + } + ''' + code = self._conn.insert_json_payload(payload) + print("insert_json_payload result {}".format(code)) + + tdSql.query("describe stb2_7") + tdSql.checkData(1, 1, "BINARY") + + payload = ''' + { + "metric": "stb2_8", + "timestamp": { + "value": 1626006833, + "type": "s" + }, + "value": { + "value": "你好", + "type": "nchar" + }, + "tags": { + "t1": true, + "t2": false, + "t3": 10, + "t4": "123_abc_.!@#$%^&*:;,./?|+-=()[]{}<>" + } + } + ''' + code = self._conn.insert_json_payload(payload) + print("insert_json_payload result {}".format(code)) + + tdSql.query("describe stb2_8") + tdSql.checkData(1, 1, "NCHAR") + + ### tag value ### + + payload = ''' + { + "metric": "stb3_0", + "timestamp": { + "value": 1626006833, + "type": "s" + }, + "value": { + "value": "hello", + "type": "nchar" + }, + "tags": { + "t1": { + "value": true, + "type": "bool" + }, + "t2": { + "value": 127, + "type": "tinyint" + }, + "t3": { + "value": 32767, + "type": "smallint" + }, + "t4": { + "value": 2147483647, + "type": "int" + }, + "t5": { + "value": 9.2233720368547758e+18, + "type": "bigint" + }, + "t6": { + "value": 11.12345, + "type": "float" + }, + "t7": { + "value": 22.123456789, + "type": "double" + }, + "t8": { + "value": "binary_val", + "type": "binary" + }, + "t9": { + "value": "你好", + "type": "nchar" + } + } + } + ''' + code = self._conn.insert_json_payload(payload) + print("insert_json_payload result {}".format(code)) + + tdSql.query("describe stb3_0") + tdSql.checkData(2, 1, "BOOL") + tdSql.checkData(3, 1, "TINYINT") + tdSql.checkData(4, 1, "SMALLINT") + tdSql.checkData(5, 1, "INT") + tdSql.checkData(6, 1, "BIGINT") + tdSql.checkData(7, 1, "FLOAT") + tdSql.checkData(8, 1, "DOUBLE") + tdSql.checkData(9, 1, "BINARY") + tdSql.checkData(10, 1, "NCHAR") + + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/insert/insertTelnetLines.py b/tests/pytest/insert/insertTelnetLines.py index 8ebb6bd3df4bcd4abfbb8c42cf5024fe066fcce3..4041b309a1007c1177f26d28b022f4e314dcf9ba 100644 --- a/tests/pytest/insert/insertTelnetLines.py +++ b/tests/pytest/insert/insertTelnetLines.py @@ -33,9 +33,9 @@ class TDTestCase: ### metric ### print("============= step1 : test metric ================") lines0 = [ - "stb0_0 1626006833639000000ns 4i8 host=\"host0\",interface=\"eth0\"", - "stb0_1 1626006833639000000ns 4i8 host=\"host0\",interface=\"eth0\"", - "stb0_2 1626006833639000000ns 4i8 host=\"host0\",interface=\"eth0\"", + "stb0_0 1626006833639000000ns 4i8 host=\"host0\" interface=\"eth0\"", + "stb0_1 1626006833639000000ns 4i8 host=\"host0\" interface=\"eth0\"", + "stb0_2 1626006833639000000ns 4i8 host=\"host0\" interface=\"eth0\"", ] code = self._conn.insert_telnet_lines(lines0) @@ -245,8 +245,8 @@ class TDTestCase: print("============= step3 : test tags ================") #tag value types lines3_0 = [ - "stb3_0 1626006833610ms 1 t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=3.4E38f32,t6=1.7E308f64,t7=true,t8=\"binary_val_1\",t9=L\"标签值1\"", - "stb3_0 1626006833610ms 2 t1=-127i8,t2=-32767i16,t3=-2147483647i32,t4=-9223372036854775807i64,t5=-3.4E38f32,t6=-1.7E308f64,t7=false,t8=\"binary_val_2\",t9=L\"标签值2\"" + "stb3_0 1626006833610ms 1 t1=127i8 t2=32767i16 t3=2147483647i32 t4=9223372036854775807i64 t5=3.4E38f32 t6=1.7E308f64 t7=true t8=\"binary_val_1\" t9=L\"标签值1\"", + "stb3_0 1626006833610ms 2 t1=-127i8 t2=-32767i16 t3=-2147483647i32 t4=-9223372036854775807i64 t5=-3.4E38f32 t6=-1.7E308f64 t7=false t8=\"binary_val_2\" t9=L\"标签值2\"" ] code = self._conn.insert_telnet_lines(lines3_0) @@ -288,9 +288,9 @@ class TDTestCase: #tag ID as child table name lines3_1 = [ - "stb3_1 1626006833610ms 1 id=\"child_table1\",host=\"host1\"", - "stb3_1 1626006833610ms 2 host=\"host2\",iD=\"child_table2\"", - "stb3_1 1626006833610ms 3 ID=\"child_table3\",host=\"host3\"" + "stb3_1 1626006833610ms 1 id=\"child_table1\" host=\"host1\"", + "stb3_1 1626006833610ms 2 host=\"host2\" iD=\"child_table2\"", + "stb3_1 1626006833610ms 3 ID=\"child_table3\" host=\"host3\"" ] code = self._conn.insert_telnet_lines(lines3_1) diff --git a/tests/pytest/insert/openTsdbTelnetLinesInsert.py b/tests/pytest/insert/openTsdbTelnetLinesInsert.py index e0d1c0d9669e77e236d4b1591b302a717c5a93d1..25518437e102c985b4d84887b1806f9e341c86d6 100644 --- a/tests/pytest/insert/openTsdbTelnetLinesInsert.py +++ b/tests/pytest/insert/openTsdbTelnetLinesInsert.py @@ -13,7 +13,6 @@ import traceback import random -import string from taos.error import LinesError import time from copy import deepcopy @@ -24,7 +23,6 @@ from util.sql import * from util.common import tdCom import threading - class TDTestCase: def init(self, conn, logSql): tdLog.debug("start to execute %s" % __file__) @@ -191,7 +189,8 @@ class TDTestCase: t4="9223372036854775807i64", t5="11.12345f32", t6="22.123456789f64", t7="\"binaryTagValue\"", t8="L\"ncharTagValue\"", ts="1626006833639000000ns", id_noexist_tag=None, id_change_tag=None, id_upper_tag=None, id_double_tag=None, - t_add_tag=None, t_mul_tag=None, t_multi_tag=None, t_blank_tag=None): + t_add_tag=None, t_mul_tag=None, t_multi_tag=None, c_blank_tag=None, t_blank_tag=None, + chinese_tag=None, multi_field_tag=None): if stb_name == "": stb_name = tdCom.getLongName(len=6, mode="letters") if tb_name == "": @@ -221,8 +220,14 @@ class TDTestCase: sql_seq = f'{stb_name} {ts} {value} t0={t0},t1={t1},t2={t2},t3={t3},t4={t4},t5={t5},t6={t6}' if t_multi_tag is not None: sql_seq = f'{stb_name} {ts} {value},{value} {id}=\"{tb_name}\",t0={t0},t1={t1},t2={t2},t3={t3},t4={t4},t5={t5},t6={t6}' - if t_blank_tag is not None: + if c_blank_tag is not None: sql_seq = f'{stb_name} {ts} {id}=\"{tb_name}\",t0={t0},t1={t1},t2={t2},t3={t3},t4={t4},t5={t5},t6={t6},t7={t7},t8={t8}' + if t_blank_tag is not None: + sql_seq = f'{stb_name} {ts} {value} {id}=\"{tb_name}\"' + if chinese_tag is not None: + sql_seq = f'{stb_name} {ts} L"涛思数据" t0={t0},t1=L"涛思数据"' + if multi_field_tag is not None: + sql_seq = f'{stb_name} {ts} {value} {id}=\"{tb_name}\",t0={t0} t1={t1}' return sql_seq, stb_name def genMulTagColStr(self, genType, count=1): @@ -259,8 +264,6 @@ class TDTestCase: def resHandle(self, query_sql, query_tag): tdSql.execute('reset query cache') row_info = tdSql.query(query_sql, query_tag) - print(query_sql) - print(row_info) col_info = tdSql.getColNameList(query_sql, query_tag) res_row_list = [] sub_list = [] @@ -277,22 +280,6 @@ class TDTestCase: self._conn.insert_telnet_lines([input_sql]) query_sql = f"{query_sql} {stb_name} {condition}" res_row_list, res_field_list_without_ts, res_type_list = self.resHandle(query_sql, True) - res = tdSql.query(f'select * from {stb_name}', True) - print(res) - - res = tdSql.query(f'select * from {stb_name}', True) - print(res) - time.sleep(2) - res = tdSql.query(f'select * from {stb_name}', True) - print(res) - time.sleep(2) - res = tdSql.query(f'select * from {stb_name}', True) - print(res) - time.sleep(2) - res = tdSql.query(f'select * from {stb_name}', True) - print(res) - - if ts == 0: res_ts = self.dateToTs(res_row_list[0][0]) current_time = time.time() @@ -535,8 +522,8 @@ class TDTestCase: input_sql, stb_name = self.genFullTypeSql(t6=t6) self.resCmp(input_sql, stb_name) # * limit set to 1.797693134862316*(10**308) - for c6 in [f'{-1.797693134862316*(10**308)}f64', f'{-1.797693134862316*(10**308)}f64']: - input_sql = self.genFullTypeSql(c6=c6)[0] + for t6 in [f'{-1.797693134862316*(10**308)}f64', f'{-1.797693134862316*(10**308)}f64']: + input_sql = self.genFullTypeSql(t6=t6)[0] try: self._conn.insert_telnet_lines([input_sql]) raise Exception("should not reach here") @@ -574,23 +561,25 @@ class TDTestCase: """ tdCom.cleanTb() # i8 - for c1 in ["-127i8", "127i8"]: - input_sql, stb_name = self.genFullTypeSql(c1=c1) + for value in ["-127i8", "127i8"]: + input_sql, stb_name = self.genFullTypeSql(value=value) self.resCmp(input_sql, stb_name) - - for c1 in ["-128i8", "128i8"]: - input_sql = self.genFullTypeSql(c1=c1)[0] + tdCom.cleanTb() + for value in ["-128i8", "128i8"]: + input_sql = self.genFullTypeSql(value=value)[0] try: self._conn.insert_telnet_lines([input_sql]) raise Exception("should not reach here") except LinesError as err: tdSql.checkNotEqual(err.errno, 0) # i16 - for c2 in ["-32767i16"]: - input_sql, stb_name = self.genFullTypeSql(c2=c2) + tdCom.cleanTb() + for value in ["-32767i16"]: + input_sql, stb_name = self.genFullTypeSql(value=value) self.resCmp(input_sql, stb_name) - for c2 in ["-32768i16", "32768i16"]: - input_sql = self.genFullTypeSql(c2=c2)[0] + tdCom.cleanTb() + for value in ["-32768i16", "32768i16"]: + input_sql = self.genFullTypeSql(value=value)[0] try: self._conn.insert_telnet_lines([input_sql]) raise Exception("should not reach here") @@ -598,11 +587,13 @@ class TDTestCase: tdSql.checkNotEqual(err.errno, 0) # i32 - for c3 in ["-2147483647i32"]: - input_sql, stb_name = self.genFullTypeSql(c3=c3) + tdCom.cleanTb() + for value in ["-2147483647i32"]: + input_sql, stb_name = self.genFullTypeSql(value=value) self.resCmp(input_sql, stb_name) - for c3 in ["-2147483648i32", "2147483648i32"]: - input_sql = self.genFullTypeSql(c3=c3)[0] + tdCom.cleanTb() + for value in ["-2147483648i32", "2147483648i32"]: + input_sql = self.genFullTypeSql(value=value)[0] try: self._conn.insert_telnet_lines([input_sql]) raise Exception("should not reach here") @@ -610,11 +601,13 @@ class TDTestCase: tdSql.checkNotEqual(err.errno, 0) # i64 - for c4 in ["-9223372036854775807i64"]: - input_sql, stb_name = self.genFullTypeSql(c4=c4) + tdCom.cleanTb() + for value in ["-9223372036854775807i64"]: + input_sql, stb_name = self.genFullTypeSql(value=value) self.resCmp(input_sql, stb_name) - for c4 in ["-9223372036854775808i64", "9223372036854775808i64"]: - input_sql = self.genFullTypeSql(c4=c4)[0] + tdCom.cleanTb() + for value in ["-9223372036854775808i64", "9223372036854775808i64"]: + input_sql = self.genFullTypeSql(value=value)[0] try: self._conn.insert_telnet_lines([input_sql]) raise Exception("should not reach here") @@ -622,12 +615,14 @@ class TDTestCase: tdSql.checkNotEqual(err.errno, 0) # f32 - for c5 in [f"{-3.4028234663852885981170418348451692544*(10**38)}f32", f"{3.4028234663852885981170418348451692544*(10**38)}f32"]: - input_sql, stb_name = self.genFullTypeSql(c5=c5) + tdCom.cleanTb() + for value in [f"{-3.4028234663852885981170418348451692544*(10**38)}f32", f"{3.4028234663852885981170418348451692544*(10**38)}f32"]: + input_sql, stb_name = self.genFullTypeSql(value=value) self.resCmp(input_sql, stb_name) # * limit set to 4028234664*(10**38) - for c5 in [f"{-3.4028234664*(10**38)}f32", f"{3.4028234664*(10**38)}f32"]: - input_sql = self.genFullTypeSql(c5=c5)[0] + tdCom.cleanTb() + for value in [f"{-3.4028234664*(10**38)}f32", f"{3.4028234664*(10**38)}f32"]: + input_sql = self.genFullTypeSql(value=value)[0] try: self._conn.insert_telnet_lines([input_sql]) raise Exception("should not reach here") @@ -635,12 +630,14 @@ class TDTestCase: tdSql.checkNotEqual(err.errno, 0) # f64 - for c6 in [f'{-1.79769313486231570814527423731704356798070567525844996598917476803157260780*(10**308)}f64', f'{-1.79769313486231570814527423731704356798070567525844996598917476803157260780*(10**308)}f64']: - input_sql, stb_name = self.genFullTypeSql(c6=c6) + tdCom.cleanTb() + for value in [f'{-1.79769313486231570814527423731704356798070567525844996598917476803157260780*(10**308)}f64', f'{-1.79769313486231570814527423731704356798070567525844996598917476803157260780*(10**308)}f64']: + input_sql, stb_name = self.genFullTypeSql(value=value) self.resCmp(input_sql, stb_name) # * limit set to 1.797693134862316*(10**308) - for c6 in [f'{-1.797693134862316*(10**308)}f64', f'{-1.797693134862316*(10**308)}f64']: - input_sql = self.genFullTypeSql(c6=c6)[0] + tdCom.cleanTb() + for value in [f'{-1.797693134862316*(10**308)}f64', f'{-1.797693134862316*(10**308)}f64']: + input_sql = self.genFullTypeSql(value=value)[0] try: self._conn.insert_telnet_lines([input_sql]) raise Exception("should not reach here") @@ -648,10 +645,12 @@ class TDTestCase: tdSql.checkNotEqual(err.errno, 0) # # binary + tdCom.cleanTb() stb_name = tdCom.getLongName(7, "letters") input_sql = f'{stb_name} 1626006833639000000ns "{tdCom.getLongName(16374, "letters")}" t0=t' self._conn.insert_telnet_lines([input_sql]) + tdCom.cleanTb() input_sql = f'{stb_name} 1626006833639000000ns "{tdCom.getLongName(16375, "letters")}" t0=t' try: self._conn.insert_telnet_lines([input_sql]) @@ -661,10 +660,12 @@ class TDTestCase: # nchar # * legal nchar could not be larger than 16374/4 + tdCom.cleanTb() stb_name = tdCom.getLongName(7, "letters") input_sql = f'{stb_name} 1626006833639000000ns L"{tdCom.getLongName(4093, "letters")}" t0=t' self._conn.insert_telnet_lines([input_sql]) + tdCom.cleanTb() input_sql = f'{stb_name} 1626006833639000000ns L"{tdCom.getLongName(4094, "letters")}" t0=t' try: self._conn.insert_telnet_lines([input_sql]) @@ -709,24 +710,21 @@ class TDTestCase: tdSql.checkNotEqual(err.errno, 0) # check binary and nchar blank - stb_name = tdCom.getLongName(7, "letters") - - input_sql1 = f'{stb_name} 1626006833639000000ns "abc aaa" t0=t' - input_sql2 = f'{stb_name} 1626006833639000000ns L"abc aaa" t0=t' - input_sql3 = f'{stb_name} 1626006833639000000ns t t0="abc aaa"' - input_sql4 = f'{stb_name} 1626006833639000000ns t t0=L"abc aaa"' + input_sql1 = f'{tdCom.getLongName(7, "letters")} 1626006833639000000ns "abc aaa" t0=t' + input_sql2 = f'{tdCom.getLongName(7, "letters")} 1626006833639000000ns L"abc aaa" t0=t' + input_sql3 = f'{tdCom.getLongName(7, "letters")} 1626006833639000000ns t t0="abc aaa"' + input_sql4 = f'{tdCom.getLongName(7, "letters")} 1626006833639000000ns t t0=L"abc aaa"' for input_sql in [input_sql1, input_sql2, input_sql3, input_sql4]: try: self._conn.insert_telnet_lines([input_sql]) - raise Exception("should not reach here") except LinesError as err: - tdSql.checkNotEqual(err.errno, 0) + pass # check accepted binary and nchar symbols # # * ~!@#$¥%^&*()-+={}|[]、「」:; for symbol in list('~!@#$¥%^&*()-+={}|[]、「」:;'): - input_sql1 = f'{stb_name} 1626006833639000000ns "abc{symbol}aaa" t0=t' - input_sql2 = f'{stb_name} 1626006833639000000ns t t0=t,t1="abc{symbol}aaa"' + input_sql1 = f'{tdCom.getLongName(7, "letters")} 1626006833639000000ns "abc{symbol}aaa" t0=t' + input_sql2 = f'{tdCom.getLongName(7, "letters")} 1626006833639000000ns t t0=t,t1="abc{symbol}aaa"' self._conn.insert_telnet_lines([input_sql1]) self._conn.insert_telnet_lines([input_sql2]) @@ -756,6 +754,7 @@ class TDTestCase: """ case no id when stb exist """ + print("noIdStbExistCheckCase") tdCom.cleanTb() input_sql, stb_name = self.genFullTypeSql(tb_name="sub_table_0123456", t0="f", value="f") self.resCmp(input_sql, stb_name) @@ -779,22 +778,24 @@ class TDTestCase: """ check length increase """ + print("tagColBinaryNcharLengthCheckCase") tdCom.cleanTb() input_sql, stb_name = self.genFullTypeSql() self.resCmp(input_sql, stb_name) tb_name = tdCom.getLongName(5, "letters") - input_sql, stb_name = self.genFullTypeSql(stb_name=stb_name, tb_name=tb_name,t7="\"binaryTagValuebinaryTagValue\"", t8="L\"ncharTagValuencharTagValue\"", c7="\"binaryTagValuebinaryTagValue\"", c8="L\"ncharTagValuencharTagValue\"") + input_sql, stb_name = self.genFullTypeSql(stb_name=stb_name, tb_name=tb_name,t7="\"binaryTagValuebinaryTagValue\"", t8="L\"ncharTagValuencharTagValue\"") self.resCmp(input_sql, stb_name, condition=f'where tbname like "{tb_name}"') def tagColAddDupIDCheckCase(self): """ - check column and tag count add, stb and tb duplicate + check tag count add, stb and tb duplicate * tag: alter table ... * col: when update==0 and ts is same, unchange * so this case tag&&value will be added, * col is added without value when update==0 * col is added with value when update==1 """ + print("tagColAddDupIDCheckCase") tdCom.cleanTb() tb_name = tdCom.getLongName(7, "letters") for db_update_tag in [0, 1]: @@ -802,25 +803,27 @@ class TDTestCase: self.createDb("test_update", db_update_tag=db_update_tag) input_sql, stb_name = self.genFullTypeSql(tb_name=tb_name, t0="f", value="f") self.resCmp(input_sql, stb_name) - self.genFullTypeSql(stb_name=stb_name, tb_name=tb_name, t0="f", value="f", ct_add_tag=True) + self.genFullTypeSql(stb_name=stb_name, tb_name=tb_name, t0="f", value="f", t_add_tag=True) if db_update_tag == 1 : self.resCmp(input_sql, stb_name, condition=f'where tbname like "{tb_name}"') else: self.resCmp(input_sql, stb_name, condition=f'where tbname like "{tb_name}"', none_check_tag=True) + self.createDb() def tagColAddCheckCase(self): """ - check column and tag count add + check tag count add """ + print("tagColAddCheckCase") tdCom.cleanTb() tb_name = tdCom.getLongName(7, "letters") input_sql, stb_name = self.genFullTypeSql(tb_name=tb_name, t0="f", value="f") self.resCmp(input_sql, stb_name) tb_name_1 = tdCom.getLongName(7, "letters") - input_sql, stb_name = self.genFullTypeSql(stb_name=stb_name, tb_name=tb_name_1, t0="f", value="f", ct_add_tag=True) + input_sql, stb_name = self.genFullTypeSql(stb_name=stb_name, tb_name=tb_name_1, t0="f", value="f", t_add_tag=True) self.resCmp(input_sql, stb_name, condition=f'where tbname like "{tb_name_1}"') - res_row_list = self.resHandle(f"select c10,c11,t10,t11 from {tb_name}", True)[0] - tdSql.checkEqual(res_row_list[0], ['None', 'None', 'None', 'None']) + res_row_list = self.resHandle(f"select t10,t11 from {tb_name}", True)[0] + tdSql.checkEqual(res_row_list[0], ['None', 'None']) self.resCmp(input_sql, stb_name, condition=f'where tbname like "{tb_name}"', none_check_tag=True) def tagMd5Check(self): @@ -838,7 +841,7 @@ class TDTestCase: tdSql.query(f"select * from {stb_name}") tdSql.checkRows(1) tdSql.checkEqual(tb_name1, tb_name2) - input_sql, stb_name = self.genFullTypeSql(stb_name=stb_name, t0="f", value="f", id_noexist_tag=True, ct_add_tag=True) + input_sql, stb_name = self.genFullTypeSql(stb_name=stb_name, t0="f", value="f", id_noexist_tag=True, t_add_tag=True) self._conn.insert_telnet_lines([input_sql]) tb_name3 = self.getNoIdTbName(stb_name) tdSql.query(f"select * from {stb_name}") @@ -853,16 +856,17 @@ class TDTestCase: tdCom.cleanTb() stb_name = tdCom.getLongName(7, "letters") tb_name = f'{stb_name}_1' - input_sql = f'{stb_name},id="{tb_name}",t0=t c0=f 1626006833639000000ns' + + input_sql = f'{stb_name} 1626006833639000000ns f id="{tb_name}",t0=t' self._conn.insert_telnet_lines([input_sql]) # * every binary and nchar must be length+2, so here is two tag, max length could not larger than 16384-2*2 - input_sql = f'{stb_name},t0=t,t1="{tdCom.getLongName(16374, "letters")}",t2="{tdCom.getLongName(5, "letters")}" c0=f 1626006833639000000ns' + input_sql = f'{stb_name} 1626006833639000000ns f t0=t,t1="{tdCom.getLongName(16374, "letters")}",t2="{tdCom.getLongName(5, "letters")}"' self._conn.insert_telnet_lines([input_sql]) tdSql.query(f"select * from {stb_name}") tdSql.checkRows(2) - input_sql = f'{stb_name},t0=t,t1="{tdCom.getLongName(16374, "letters")}",t2="{tdCom.getLongName(6, "letters")}" c0=f 1626006833639000000ns' + input_sql = f'{stb_name} 1626006833639000000ns f t0=t,t1="{tdCom.getLongName(16374, "letters")}",t2="{tdCom.getLongName(6, "letters")}"' try: self._conn.insert_telnet_lines([input_sql]) raise Exception("should not reach here") @@ -871,21 +875,6 @@ class TDTestCase: tdSql.query(f"select * from {stb_name}") tdSql.checkRows(2) - # # * check col,col+ts max in describe ---> 16143 - input_sql = f'{stb_name},t0=t c0=f,c1="{tdCom.getLongName(16374, "letters")}",c2="{tdCom.getLongName(16374, "letters")}",c3="{tdCom.getLongName(16374, "letters")}",c4="{tdCom.getLongName(12, "letters")}" 1626006833639000000ns' - self._conn.insert_telnet_lines([input_sql]) - - tdSql.query(f"select * from {stb_name}") - tdSql.checkRows(3) - input_sql = f'{stb_name},t0=t c0=f,c1="{tdCom.getLongName(16374, "letters")}",c2="{tdCom.getLongName(16374, "letters")}",c3="{tdCom.getLongName(16374, "letters")}",c4="{tdCom.getLongName(13, "letters")}" 1626006833639000000ns' - try: - self._conn.insert_telnet_lines([input_sql]) - raise Exception("should not reach here") - except LinesError as err: - tdSql.checkNotEqual(err.errno, 0) - tdSql.query(f"select * from {stb_name}") - tdSql.checkRows(3) - # * tag nchar max is 16374/4, col+ts nchar max 49151 def tagColNcharMaxLengthCheckCase(self): """ @@ -894,15 +883,15 @@ class TDTestCase: tdCom.cleanTb() stb_name = tdCom.getLongName(7, "letters") tb_name = f'{stb_name}_1' - input_sql = f'{stb_name},id="{tb_name}",t0=t c0=f 1626006833639000000ns' - code = self._conn.insert_telnet_lines([input_sql]) + input_sql = f'{stb_name} 1626006833639000000ns f id="{tb_name}",t0=t' + self._conn.insert_telnet_lines([input_sql]) # * legal nchar could not be larger than 16374/4 - input_sql = f'{stb_name},t0=t,t1=L"{tdCom.getLongName(4093, "letters")}",t2=L"{tdCom.getLongName(1, "letters")}" c0=f 1626006833639000000ns' + input_sql = f'{stb_name} 1626006833639000000ns f t0=t,t1=L"{tdCom.getLongName(4093, "letters")}",t2=L"{tdCom.getLongName(1, "letters")}"' self._conn.insert_telnet_lines([input_sql]) tdSql.query(f"select * from {stb_name}") tdSql.checkRows(2) - input_sql = f'{stb_name},t0=t,t1=L"{tdCom.getLongName(4093, "letters")}",t2=L"{tdCom.getLongName(2, "letters")}" c0=f 1626006833639000000ns' + input_sql = f'{stb_name} 1626006833639000000ns f t0=t,t1=L"{tdCom.getLongName(4093, "letters")}",t2=L"{tdCom.getLongName(2, "letters")}"' try: self._conn.insert_telnet_lines([input_sql]) raise Exception("should not reach here") @@ -911,19 +900,6 @@ class TDTestCase: tdSql.query(f"select * from {stb_name}") tdSql.checkRows(2) - input_sql = f'{stb_name},t0=t c0=f,c1=L"{tdCom.getLongName(4093, "letters")}",c2=L"{tdCom.getLongName(4093, "letters")}",c3=L"{tdCom.getLongName(4093, "letters")}",c4=L"{tdCom.getLongName(4, "letters")}" 1626006833639000000ns' - self._conn.insert_telnet_lines([input_sql]) - tdSql.query(f"select * from {stb_name}") - tdSql.checkRows(3) - input_sql = f'{stb_name},t0=t c0=f,c1=L"{tdCom.getLongName(4093, "letters")}",c2=L"{tdCom.getLongName(4093, "letters")}",c3=L"{tdCom.getLongName(4093, "letters")}",c4=L"{tdCom.getLongName(5, "letters")}" 1626006833639000000ns' - try: - self._conn.insert_telnet_lines([input_sql]) - raise Exception("should not reach here") - except LinesError as err: - tdSql.checkNotEqual(err.errno, 0) - tdSql.query(f"select * from {stb_name}") - tdSql.checkRows(3) - def batchInsertCheckCase(self): """ test batch insert @@ -931,17 +907,24 @@ class TDTestCase: tdCom.cleanTb() stb_name = tdCom.getLongName(8, "letters") tdSql.execute(f'create stable {stb_name}(ts timestamp, f int) tags(t1 bigint)') - lines = ["st123456,t1=3i64,t2=4f64,t3=\"t3\" c1=3i64,c3=L\"passit\",c2=false,c4=4f64 1626006833639000000ns", - "st123456,t1=4i64,t3=\"t4\",t2=5f64,t4=5f64 c1=3i64,c3=L\"passitagin\",c2=true,c4=5f64,c5=5f64 1626006833640000000ns", - f"{stb_name},t2=5f64,t3=L\"ste\" c1=true,c2=4i64,c3=\"iam\" 1626056811823316532ns", - "stf567890,t1=4i64,t3=\"t4\",t2=5f64,t4=5f64 c1=3i64,c3=L\"passitagin\",c2=true,c4=5f64,c5=5f64,c6=7u64 1626006933640000000ns", - "st123456,t1=4i64,t2=5f64,t3=\"t4\" c1=3i64,c3=L\"passitagain\",c2=true,c4=5f64 1626006833642000000ns", - f"{stb_name},t2=5f64,t3=L\"ste2\" c3=\"iamszhou\",c4=false 1626056811843316532ns", - f"{stb_name},t2=5f64,t3=L\"ste2\" c3=\"iamszhou\",c4=false,c5=32i8,c6=64i16,c7=32i32,c8=88.88f32 1626056812843316532ns", - "st123456,t1=4i64,t3=\"t4\",t2=5f64,t4=5f64 c1=3i64,c3=L\"passitagin\",c2=true,c4=5f64,c5=5f64,c6=7u64 1626006933640000000ns", - "st123456,t1=4i64,t3=\"t4\",t2=5f64,t4=5f64 c1=3i64,c3=L\"passitagin_stf\",c2=false,c5=5f64,c6=7u64 1626006933641000000ns" + + lines = ["st123456 1626006833639000000ns 1i64 t1=3i64,t2=4f64,t3=\"t3\"", + "st123456 1626006833640000000ns 2i64 t1=4i64,t3=\"t4\",t2=5f64,t4=5f64", + f'{stb_name} 1626056811823316532ns 3i64 t2=5f64,t3=L\"ste\"', + "stf567890 1626006933640000000ns 4i64 t1=4i64,t3=\"t4\",t2=5f64,t4=5f64", + "st123456 1626006833642000000ns 5i64 t1=4i64,t2=5f64,t3=\"t4\"", + f'{stb_name} 1626056811843316532ns 6i64 t2=5f64,t3=L\"ste2\"', + f'{stb_name} 1626056812843316532ns 7i64 t2=5f64,t3=L\"ste2\"', + "st123456 1626006933640000000ns 8i64 t1=4i64,t3=\"t4\",t2=5f64,t4=5f64", + "st123456 1626006933641000000ns 9i64 t1=4i64,t3=\"t4\",t2=5f64,t4=5f64" ] self._conn.insert_telnet_lines(lines) + tdSql.query('show stables') + tdSql.checkRows(3) + tdSql.query('show tables') + tdSql.checkRows(6) + tdSql.query('select * from st123456') + tdSql.checkRows(5) def multiInsertCheckCase(self, count): """ @@ -952,9 +935,11 @@ class TDTestCase: stb_name = tdCom.getLongName(8, "letters") tdSql.execute(f'create stable {stb_name}(ts timestamp, f int) tags(t1 bigint)') for i in range(count): - input_sql = self.genFullTypeSql(stb_name=stb_name, t7=f'"{tdCom.getLongName(8, "letters")}"', c7=f'"{tdCom.getLongName(8, "letters")}"', id_noexist_tag=True)[0] + input_sql = self.genFullTypeSql(stb_name=stb_name, t7=f'"{tdCom.getLongName(8, "letters")}"', value=f'"{tdCom.getLongName(8, "letters")}"', id_noexist_tag=True)[0] sql_list.append(input_sql) self._conn.insert_telnet_lines(sql_list) + tdSql.query('show tables') + tdSql.checkRows(1000) def batchErrorInsertCheckCase(self): """ @@ -962,14 +947,86 @@ class TDTestCase: """ tdCom.cleanTb() stb_name = tdCom.getLongName(8, "letters") - lines = ["st123456,t1=3i64,t2=4f64,t3=\"t3\" c1=3i64,c3=L\"passit\",c2=false,c4=4f64 1626006833639000000ns", - f"{stb_name},t2=5f64,t3=L\"ste\" c1=tRue,c2=4i64,c3=\"iam\" 1626056811823316532ns"] + lines = ["st123456 1626006833639000000ns 3i64 t1=3i64,t2=4f64,t3=\"t3\"", + f"{stb_name} 1626056811823316532ns tRue t2=5f64,t3=L\"ste\""] try: self._conn.insert_telnet_lines(lines) raise Exception("should not reach here") except LinesError as err: tdSql.checkNotEqual(err.errno, 0) + def multiColsInsertCheckCase(self): + """ + test multi cols insert + """ + tdCom.cleanTb() + input_sql = self.genFullTypeSql(t_multi_tag=True)[0] + try: + self._conn.insert_telnet_lines([input_sql]) + raise Exception("should not reach here") + except LinesError as err: + tdSql.checkNotEqual(err.errno, 0) + + def blankColInsertCheckCase(self): + """ + test blank col insert + """ + tdCom.cleanTb() + input_sql = self.genFullTypeSql(c_blank_tag=True)[0] + try: + self._conn.insert_telnet_lines([input_sql]) + raise Exception("should not reach here") + except LinesError as err: + tdSql.checkNotEqual(err.errno, 0) + + def blankTagInsertCheckCase(self): + """ + test blank tag insert + """ + tdCom.cleanTb() + input_sql = self.genFullTypeSql(t_blank_tag=True)[0] + try: + self._conn.insert_telnet_lines([input_sql]) + raise Exception("should not reach here") + except LinesError as err: + tdSql.checkNotEqual(err.errno, 0) + + def chineseCheckCase(self): + """ + check nchar ---> chinese + """ + tdCom.cleanTb() + input_sql, stb_name = self.genFullTypeSql(chinese_tag=True) + self.resCmp(input_sql, stb_name) + + def multiFieldCheckCase(self): + ''' + multi_field + ''' + tdCom.cleanTb() + input_sql = self.genFullTypeSql(multi_field_tag=True)[0] + try: + self._conn.insert_telnet_lines([input_sql]) + raise Exception("should not reach here") + except LinesError as err: + tdSql.checkNotEqual(err.errno, 0) + + def errorTypeCheckCase(self): + stb_name = tdCom.getLongName(8, "letters") + input_sql_list = [f'{stb_name} 0 "hkgjiwdj" t0=f,t1=127I8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7="vozamcts",t8=L"ncharTagValue"', \ + f'{stb_name} 0 "hkgjiwdj" t0=f,t1=127i8,t2=32767I16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7="vozamcts",t8=L"ncharTagValue"', \ + f'{stb_name} 0 "hkgjiwdj" t0=f,t1=127i8,t2=32767i16,t3=2147483647I32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7="vozamcts",t8=L"ncharTagValue"', \ + f'{stb_name} 0 "hkgjiwdj" t0=f,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807I64,t5=11.12345f32,t6=22.123456789f64,t7="vozamcts",t8=L"ncharTagValue"', \ + f'{stb_name} 0 "hkgjiwdj" t0=f,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345F32,t6=22.123456789f64,t7="vozamcts",t8=L"ncharTagValue"', \ + f'{stb_name} 0 "hkgjiwdj" t0=f,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789F64,t7="vozamcts",t8=L"ncharTagValue"', \ + f'{stb_name} 1626006833639000000NS "hkgjiwdj" t0=f,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7="vozamcts",t8=L"ncharTagValue"'] + for input_sql in input_sql_list: + try: + self._conn.insert_telnet_lines([input_sql]) + raise Exception("should not reach here") + except LinesError as err: + tdSql.checkNotEqual(err.errno, 0) + def genSqlList(self, count=5, stb_name="", tb_name=""): """ stb --> supertable @@ -984,36 +1041,36 @@ class TDTestCase: """ d_stb_d_tb_list = list() s_stb_s_tb_list = list() - s_stb_s_tb_a_col_a_tag_list = list() - s_stb_s_tb_m_col_m_tag_list = list() + s_stb_s_tb_a_tag_list = list() + s_stb_s_tb_m_tag_list = list() s_stb_d_tb_list = list() - s_stb_d_tb_a_col_m_tag_list = list() - s_stb_d_tb_a_tag_m_col_list = list() + s_stb_d_tb_m_tag_list = list() + s_stb_d_tb_a_tag_list = list() s_stb_s_tb_d_ts_list = list() - s_stb_s_tb_d_ts_a_col_m_tag_list = list() - s_stb_s_tb_d_ts_a_tag_m_col_list = list() + s_stb_s_tb_d_ts_m_tag_list = list() + s_stb_s_tb_d_ts_a_tag_list = list() s_stb_d_tb_d_ts_list = list() - s_stb_d_tb_d_ts_a_col_m_tag_list = list() - s_stb_d_tb_d_ts_a_tag_m_col_list = list() + s_stb_d_tb_d_ts_m_tag_list = list() + s_stb_d_tb_d_ts_a_tag_list = list() for i in range(count): - d_stb_d_tb_list.append(self.genFullTypeSql(t0="f", c0="f")) - s_stb_s_tb_list.append(self.genFullTypeSql(stb_name=stb_name, tb_name=tb_name, t7=f'"{tdCom.getLongName(8, "letters")}"', c7=f'"{tdCom.getLongName(8, "letters")}"')) - s_stb_s_tb_a_col_a_tag_list.append(self.genFullTypeSql(stb_name=stb_name, tb_name=tb_name, t7=f'"{tdCom.getLongName(8, "letters")}"', c7=f'"{tdCom.getLongName(8, "letters")}"', ct_add_tag=True)) - s_stb_s_tb_m_col_m_tag_list.append(self.genFullTypeSql(stb_name=stb_name, tb_name=tb_name, t7=f'"{tdCom.getLongName(8, "letters")}"', c7=f'"{tdCom.getLongName(8, "letters")}"', ct_min_tag=True)) - s_stb_d_tb_list.append(self.genFullTypeSql(stb_name=stb_name, t7=f'"{tdCom.getLongName(8, "letters")}"', c7=f'"{tdCom.getLongName(8, "letters")}"', id_noexist_tag=True)) - s_stb_d_tb_a_col_m_tag_list.append(self.genFullTypeSql(stb_name=stb_name, t7=f'"{tdCom.getLongName(8, "letters")}"', c7=f'"{tdCom.getLongName(8, "letters")}"', id_noexist_tag=True, ct_am_tag=True)) - s_stb_d_tb_a_tag_m_col_list.append(self.genFullTypeSql(stb_name=stb_name, t7=f'"{tdCom.getLongName(8, "letters")}"', c7=f'"{tdCom.getLongName(8, "letters")}"', id_noexist_tag=True, ct_ma_tag=True)) - s_stb_s_tb_d_ts_list.append(self.genFullTypeSql(stb_name=stb_name, tb_name=tb_name, t7=f'"{tdCom.getLongName(8, "letters")}"', c7=f'"{tdCom.getLongName(8, "letters")}"', ts=0)) - s_stb_s_tb_d_ts_a_col_m_tag_list.append(self.genFullTypeSql(stb_name=stb_name, tb_name=tb_name, t7=f'"{tdCom.getLongName(8, "letters")}"', c7=f'"{tdCom.getLongName(8, "letters")}"', ts=0, ct_am_tag=True)) - s_stb_s_tb_d_ts_a_tag_m_col_list.append(self.genFullTypeSql(stb_name=stb_name, tb_name=tb_name, t7=f'"{tdCom.getLongName(8, "letters")}"', c7=f'"{tdCom.getLongName(8, "letters")}"', ts=0, ct_ma_tag=True)) - s_stb_d_tb_d_ts_list.append(self.genFullTypeSql(stb_name=stb_name, t7=f'"{tdCom.getLongName(8, "letters")}"', c7=f'"{tdCom.getLongName(8, "letters")}"', id_noexist_tag=True, ts=0)) - s_stb_d_tb_d_ts_a_col_m_tag_list.append(self.genFullTypeSql(stb_name=stb_name, t7=f'"{tdCom.getLongName(8, "letters")}"', c7=f'"{tdCom.getLongName(8, "letters")}"', id_noexist_tag=True, ts=0, ct_am_tag=True)) - s_stb_d_tb_d_ts_a_tag_m_col_list.append(self.genFullTypeSql(stb_name=stb_name, t7=f'"{tdCom.getLongName(8, "letters")}"', c7=f'"{tdCom.getLongName(8, "letters")}"', id_noexist_tag=True, ts=0, ct_ma_tag=True)) - - return d_stb_d_tb_list, s_stb_s_tb_list, s_stb_s_tb_a_col_a_tag_list, s_stb_s_tb_m_col_m_tag_list, \ - s_stb_d_tb_list, s_stb_d_tb_a_col_m_tag_list, s_stb_d_tb_a_tag_m_col_list, s_stb_s_tb_d_ts_list, \ - s_stb_s_tb_d_ts_a_col_m_tag_list, s_stb_s_tb_d_ts_a_tag_m_col_list, s_stb_d_tb_d_ts_list, \ - s_stb_d_tb_d_ts_a_col_m_tag_list, s_stb_d_tb_d_ts_a_tag_m_col_list + d_stb_d_tb_list.append(self.genFullTypeSql(t0="f", value="f")) + s_stb_s_tb_list.append(self.genFullTypeSql(stb_name=stb_name, tb_name=tb_name, t7=f'"{tdCom.getLongName(8, "letters")}"', value=f'"{tdCom.getLongName(8, "letters")}"')) + s_stb_s_tb_a_tag_list.append(self.genFullTypeSql(stb_name=stb_name, tb_name=tb_name, t7=f'"{tdCom.getLongName(8, "letters")}"', value=f'"{tdCom.getLongName(8, "letters")}"', t_add_tag=True)) + s_stb_s_tb_m_tag_list.append(self.genFullTypeSql(stb_name=stb_name, tb_name=tb_name, t7=f'"{tdCom.getLongName(8, "letters")}"', value=f'"{tdCom.getLongName(8, "letters")}"', t_mul_tag=True)) + s_stb_d_tb_list.append(self.genFullTypeSql(stb_name=stb_name, t7=f'"{tdCom.getLongName(8, "letters")}"', value=f'"{tdCom.getLongName(8, "letters")}"', id_noexist_tag=True)) + s_stb_d_tb_m_tag_list.append(self.genFullTypeSql(stb_name=stb_name, t7=f'"{tdCom.getLongName(8, "letters")}"', value=f'"{tdCom.getLongName(8, "letters")}"', id_noexist_tag=True, t_mul_tag=True)) + s_stb_d_tb_a_tag_list.append(self.genFullTypeSql(stb_name=stb_name, t7=f'"{tdCom.getLongName(8, "letters")}"', value=f'"{tdCom.getLongName(8, "letters")}"', id_noexist_tag=True, t_add_tag=True)) + s_stb_s_tb_d_ts_list.append(self.genFullTypeSql(stb_name=stb_name, tb_name=tb_name, t7=f'"{tdCom.getLongName(8, "letters")}"', value=f'"{tdCom.getLongName(8, "letters")}"', ts=0)) + s_stb_s_tb_d_ts_m_tag_list.append(self.genFullTypeSql(stb_name=stb_name, tb_name=tb_name, t7=f'"{tdCom.getLongName(8, "letters")}"', value=f'"{tdCom.getLongName(8, "letters")}"', ts=0, t_mul_tag=True)) + s_stb_s_tb_d_ts_a_tag_list.append(self.genFullTypeSql(stb_name=stb_name, tb_name=tb_name, t7=f'"{tdCom.getLongName(8, "letters")}"', value=f'"{tdCom.getLongName(8, "letters")}"', ts=0, t_add_tag=True)) + s_stb_d_tb_d_ts_list.append(self.genFullTypeSql(stb_name=stb_name, t7=f'"{tdCom.getLongName(8, "letters")}"', value=f'"{tdCom.getLongName(8, "letters")}"', id_noexist_tag=True, ts=0)) + s_stb_d_tb_d_ts_m_tag_list.append(self.genFullTypeSql(stb_name=stb_name, t7=f'"{tdCom.getLongName(8, "letters")}"', value=f'"{tdCom.getLongName(8, "letters")}"', id_noexist_tag=True, ts=0, t_mul_tag=True)) + s_stb_d_tb_d_ts_a_tag_list.append(self.genFullTypeSql(stb_name=stb_name, t7=f'"{tdCom.getLongName(8, "letters")}"', value=f'"{tdCom.getLongName(8, "letters")}"', id_noexist_tag=True, ts=0, t_add_tag=True)) + + return d_stb_d_tb_list, s_stb_s_tb_list, s_stb_s_tb_a_tag_list, s_stb_s_tb_m_tag_list, \ + s_stb_d_tb_list, s_stb_d_tb_m_tag_list, s_stb_d_tb_a_tag_list, s_stb_s_tb_d_ts_list, \ + s_stb_s_tb_d_ts_m_tag_list, s_stb_s_tb_d_ts_a_tag_list, s_stb_d_tb_d_ts_list, \ + s_stb_d_tb_d_ts_m_tag_list, s_stb_d_tb_d_ts_a_tag_list def genMultiThreadSeq(self, sql_list): @@ -1045,7 +1102,7 @@ class TDTestCase: """ tdCom.cleanTb() tb_name = tdCom.getLongName(7, "letters") - input_sql, stb_name = self.genFullTypeSql(tb_name=tb_name) + input_sql, stb_name = self.genFullTypeSql(tb_name=tb_name, value="\"binaryTagValue\"") self.resCmp(input_sql, stb_name) s_stb_s_tb_list = self.genSqlList(stb_name=stb_name, tb_name=tb_name)[1] self.multiThreadRun(self.genMultiThreadSeq(s_stb_s_tb_list)) @@ -1056,16 +1113,16 @@ class TDTestCase: tdSql.query(f"select * from {stb_name};") tdSql.checkRows(1) - def sStbStbDdataAtcInsertMultiThreadCheckCase(self): + def sStbStbDdataAtInsertMultiThreadCheckCase(self): """ thread input same stb tb, different data, add columes and tags, result keep first data """ tdCom.cleanTb() tb_name = tdCom.getLongName(7, "letters") - input_sql, stb_name = self.genFullTypeSql(tb_name=tb_name) + input_sql, stb_name = self.genFullTypeSql(tb_name=tb_name, value="\"binaryTagValue\"") self.resCmp(input_sql, stb_name) - s_stb_s_tb_a_col_a_tag_list = self.genSqlList(stb_name=stb_name, tb_name=tb_name)[2] - self.multiThreadRun(self.genMultiThreadSeq(s_stb_s_tb_a_col_a_tag_list)) + s_stb_s_tb_a_tag_list = self.genSqlList(stb_name=stb_name, tb_name=tb_name)[2] + self.multiThreadRun(self.genMultiThreadSeq(s_stb_s_tb_a_tag_list)) tdSql.query(f"show tables;") tdSql.checkRows(1) expected_tb_name = self.getNoIdTbName(stb_name)[0] @@ -1073,16 +1130,16 @@ class TDTestCase: tdSql.query(f"select * from {stb_name};") tdSql.checkRows(1) - def sStbStbDdataMtcInsertMultiThreadCheckCase(self): + def sStbStbDdataMtInsertMultiThreadCheckCase(self): """ thread input same stb tb, different data, minus columes and tags, result keep first data """ tdCom.cleanTb() tb_name = tdCom.getLongName(7, "letters") - input_sql, stb_name = self.genFullTypeSql(tb_name=tb_name) + input_sql, stb_name = self.genFullTypeSql(tb_name=tb_name, value="\"binaryTagValue\"") self.resCmp(input_sql, stb_name) - s_stb_s_tb_m_col_m_tag_list = self.genSqlList(stb_name=stb_name, tb_name=tb_name)[3] - self.multiThreadRun(self.genMultiThreadSeq(s_stb_s_tb_m_col_m_tag_list)) + s_stb_s_tb_m_tag_list = self.genSqlList(stb_name=stb_name, tb_name=tb_name)[3] + self.multiThreadRun(self.genMultiThreadSeq(s_stb_s_tb_m_tag_list)) tdSql.query(f"show tables;") tdSql.checkRows(1) expected_tb_name = self.getNoIdTbName(stb_name)[0] @@ -1095,40 +1152,38 @@ class TDTestCase: thread input same stb, different tb, different data """ tdCom.cleanTb() - input_sql, stb_name = self.genFullTypeSql() + input_sql, stb_name = self.genFullTypeSql(value="\"binaryTagValue\"") self.resCmp(input_sql, stb_name) s_stb_d_tb_list = self.genSqlList(stb_name=stb_name)[4] self.multiThreadRun(self.genMultiThreadSeq(s_stb_d_tb_list)) tdSql.query(f"show tables;") tdSql.checkRows(6) - def sStbDtbDdataAcMtInsertMultiThreadCheckCase(self): - """ - #! concurrency conflict - """ + def sStbDtbDdataMtInsertMultiThreadCheckCase(self): """ thread input same stb, different tb, different data, add col, mul tag """ tdCom.cleanTb() - input_sql, stb_name = self.genFullTypeSql() + input_sql, stb_name = self.genFullTypeSql(value="\"binaryTagValue\"") self.resCmp(input_sql, stb_name) - s_stb_d_tb_a_col_m_tag_list = self.genSqlList(stb_name=stb_name)[5] - self.multiThreadRun(self.genMultiThreadSeq(s_stb_d_tb_a_col_m_tag_list)) + s_stb_d_tb_m_tag_list = [(f'{stb_name} 1626006833639000000ns "omfdhyom" t0=F,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64', 'yzwswz'), \ + (f'{stb_name} 1626006833639000000ns "vqowydbc" t0=F,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64', 'yzwswz'), \ + (f'{stb_name} 1626006833639000000ns "plgkckpv" t0=F,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64', 'yzwswz'), \ + (f'{stb_name} 1626006833639000000ns "cujyqvlj" t0=F,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64', 'yzwswz'), \ + (f'{stb_name} 1626006833639000000ns "twjxisat" t0=T,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64', 'yzwswz')] + self.multiThreadRun(self.genMultiThreadSeq(s_stb_d_tb_m_tag_list)) tdSql.query(f"show tables;") - tdSql.checkRows(6) + tdSql.checkRows(3) - def sStbDtbDdataAtMcInsertMultiThreadCheckCase(self): - """ - #! concurrency conflict - """ + def sStbDtbDdataAtInsertMultiThreadCheckCase(self): """ thread input same stb, different tb, different data, add tag, mul col """ tdCom.cleanTb() - input_sql, stb_name = self.genFullTypeSql() + input_sql, stb_name = self.genFullTypeSql(value="\"binaryTagValue\"") self.resCmp(input_sql, stb_name) - s_stb_d_tb_a_tag_m_col_list = self.genSqlList(stb_name=stb_name)[6] - self.multiThreadRun(self.genMultiThreadSeq(s_stb_d_tb_a_tag_m_col_list)) + s_stb_d_tb_a_tag_list = self.genSqlList(stb_name=stb_name)[6] + self.multiThreadRun(self.genMultiThreadSeq(s_stb_d_tb_a_tag_list)) tdSql.query(f"show tables;") tdSql.checkRows(6) @@ -1138,89 +1193,94 @@ class TDTestCase: """ tdCom.cleanTb() tb_name = tdCom.getLongName(7, "letters") - input_sql, stb_name = self.genFullTypeSql(tb_name=tb_name) + input_sql, stb_name = self.genFullTypeSql(tb_name=tb_name, value="\"binaryTagValue\"") self.resCmp(input_sql, stb_name) - s_stb_s_tb_d_ts_list = self.genSqlList(stb_name=stb_name, tb_name=tb_name)[7] + s_stb_s_tb_d_ts_list = [(f'{stb_name} 0 "hkgjiwdj" id="{tb_name}",t0=f,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7="vozamcts",t8=L"ncharTagValue"', 'dwpthv'), \ + (f'{stb_name} 0 "rljjrrul" id="{tb_name}",t0=False,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7="bmcanhbs",t8=L"ncharTagValue"', 'dwpthv'), \ + (f'{stb_name} 0 "basanglx" id="{tb_name}",t0=False,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7="enqkyvmb",t8=L"ncharTagValue"', 'dwpthv'), \ + (f'{stb_name} 0 "clsajzpp" id="{tb_name}",t0=F,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7="eivaegjk",t8=L"ncharTagValue"', 'dwpthv'), \ + (f'{stb_name} 0 "jitwseso" id="{tb_name}",t0=T,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7="yhlwkddq",t8=L"ncharTagValue"', 'dwpthv')] self.multiThreadRun(self.genMultiThreadSeq(s_stb_s_tb_d_ts_list)) tdSql.query(f"show tables;") tdSql.checkRows(1) tdSql.query(f"select * from {stb_name}") tdSql.checkRows(6) - def sStbStbDdataDtsAcMtInsertMultiThreadCheckCase(self): + def sStbStbDdataDtsMtInsertMultiThreadCheckCase(self): """ thread input same stb tb, different ts, add col, mul tag """ tdCom.cleanTb() tb_name = tdCom.getLongName(7, "letters") - input_sql, stb_name = self.genFullTypeSql(tb_name=tb_name) + input_sql, stb_name = self.genFullTypeSql(tb_name=tb_name, value="\"binaryTagValue\"") self.resCmp(input_sql, stb_name) - s_stb_s_tb_d_ts_a_col_m_tag_list = self.genSqlList(stb_name=stb_name, tb_name=tb_name)[8] - self.multiThreadRun(self.genMultiThreadSeq(s_stb_s_tb_d_ts_a_col_m_tag_list)) + s_stb_s_tb_d_ts_m_tag_list = self.genSqlList(stb_name=stb_name, tb_name=tb_name)[8] + self.multiThreadRun(self.genMultiThreadSeq(s_stb_s_tb_d_ts_m_tag_list)) tdSql.query(f"show tables;") tdSql.checkRows(1) tdSql.query(f"select * from {stb_name}") tdSql.checkRows(6) tdSql.query(f"select * from {stb_name} where t8 is not NULL") tdSql.checkRows(6) - tdSql.query(f"select * from {tb_name} where c11 is not NULL;") - tdSql.checkRows(5) - def sStbStbDdataDtsAtMcInsertMultiThreadCheckCase(self): + def sStbStbDdataDtsAtInsertMultiThreadCheckCase(self): """ thread input same stb tb, different ts, add tag, mul col """ tdCom.cleanTb() tb_name = tdCom.getLongName(7, "letters") - input_sql, stb_name = self.genFullTypeSql(tb_name=tb_name) + input_sql, stb_name = self.genFullTypeSql(tb_name=tb_name, value="\"binaryTagValue\"") self.resCmp(input_sql, stb_name) - s_stb_s_tb_d_ts_a_tag_m_col_list = self.genSqlList(stb_name=stb_name, tb_name=tb_name)[9] - self.multiThreadRun(self.genMultiThreadSeq(s_stb_s_tb_d_ts_a_tag_m_col_list)) + s_stb_s_tb_d_ts_a_tag_list = [(f'{stb_name} 0 "clummqfy" id="{tb_name}",t0=False,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7="hpxzrdiw",t8=L"ncharTagValue",t11=127i8,t10=L"ncharTagValue"', 'bokaxl'), \ + (f'{stb_name} 0 "yqeztggb" id="{tb_name}",t0=F,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7="gdtblmrc",t8=L"ncharTagValue",t11=127i8,t10=L"ncharTagValue"', 'bokaxl'), \ + (f'{stb_name} 0 "gbkinqdk" id="{tb_name}",t0=f,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7="iqniuvco",t8=L"ncharTagValue",t11=127i8,t10=L"ncharTagValue"', 'bokaxl'), \ + (f'{stb_name} 0 "ldxxejbd" id="{tb_name}",t0=f,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7="vxkipags",t8=L"ncharTagValue",t11=127i8,t10=L"ncharTagValue"', 'bokaxl'), \ + (f'{stb_name} 0 "tlvzwjes" id="{tb_name}",t0=true,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7="enwrlrtj",t8=L"ncharTagValue",t11=127i8,t10=L"ncharTagValue"', 'bokaxl')] + self.multiThreadRun(self.genMultiThreadSeq(s_stb_s_tb_d_ts_a_tag_list)) tdSql.query(f"show tables;") tdSql.checkRows(1) tdSql.query(f"select * from {stb_name}") tdSql.checkRows(6) - for c in ["c7", "c8", "c9"]: - tdSql.query(f"select * from {stb_name} where {c} is NULL") - tdSql.checkRows(5) for t in ["t10", "t11"]: tdSql.query(f"select * from {stb_name} where {t} is not NULL;") - tdSql.checkRows(6) + tdSql.checkRows(0) def sStbDtbDdataDtsInsertMultiThreadCheckCase(self): """ thread input same stb, different tb, data, ts """ tdCom.cleanTb() - input_sql, stb_name = self.genFullTypeSql() + input_sql, stb_name = self.genFullTypeSql(value="\"binaryTagValue\"") self.resCmp(input_sql, stb_name) s_stb_d_tb_d_ts_list = self.genSqlList(stb_name=stb_name)[10] self.multiThreadRun(self.genMultiThreadSeq(s_stb_d_tb_d_ts_list)) tdSql.query(f"show tables;") tdSql.checkRows(6) - def sStbDtbDdataDtsAcMtInsertMultiThreadCheckCase(self): - """ - # ! concurrency conflict - """ + def sStbDtbDdataDtsMtInsertMultiThreadCheckCase(self): """ thread input same stb, different tb, data, ts, add col, mul tag """ tdCom.cleanTb() - input_sql, stb_name = self.genFullTypeSql() + input_sql, stb_name = self.genFullTypeSql(value="\"binaryTagValue\"") self.resCmp(input_sql, stb_name) - s_stb_d_tb_d_ts_a_col_m_tag_list = self.genSqlList(stb_name=stb_name)[11] - self.multiThreadRun(self.genMultiThreadSeq(s_stb_d_tb_d_ts_a_col_m_tag_list)) + s_stb_d_tb_d_ts_m_tag_list = [(f'{stb_name} 0 "mnpmtzul" t0=f,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64', 'pcppkg'), \ + (f'{stb_name} 0 "zbvwckcd" t0=True,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64', 'pcppkg'), \ + (f'{stb_name} 0 "vymcjfwc" t0=F,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64', 'pcppkg'), \ + (f'{stb_name} 0 "laumkwfn" t0=False,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64', 'pcppkg'), \ + (f'{stb_name} 0 "nyultzxr" t0=false,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64', 'pcppkg')] + self.multiThreadRun(self.genMultiThreadSeq(s_stb_d_tb_d_ts_m_tag_list)) tdSql.query(f"show tables;") - tdSql.checkRows(6) - + tdSql.checkRows(3) def test(self): # input_sql1 = "stb2_5 1626006833610ms 3f64 host=\"host0\",host2=L\"host2\"" # input_sql2 = "rfasta,id=\"rfasta_1\",t0=true,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64 c0=True,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64 1626006933640000000ns" try: - input_sql, stb_name = self.genFullTypeSql() - self.resCmp(input_sql, stb_name) + input_sql = f'test_nchar 0 L"涛思数据" t0=f,t1=L"涛思数据",t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64' + self._conn.insert_telnet_lines([input_sql]) + # input_sql, stb_name = self.genFullTypeSql() + # self.resCmp(input_sql, stb_name) except LinesError as err: print(err.errno) # self._conn.insert_telnet_lines([input_sql2]) @@ -1232,68 +1292,64 @@ class TDTestCase: # self._conn.insert_telnet_lines([input_sql4]) def runAll(self): - # self.initCheckCase() - # self.boolTypeCheckCase() - self.symbolsCheckCase() - - - - - # self.tsCheckCase() - # self.idSeqCheckCase() - # self.idUpperCheckCase() - # self.noIdCheckCase() - # self.maxColTagCheckCase() - - # self.idIllegalNameCheckCase() - # self.idStartWithNumCheckCase() - # self.nowTsCheckCase() - # self.dateFormatTsCheckCase() - # self.illegalTsCheckCase() - # self.tagValueLengthCheckCase() - # self.colValueLengthCheckCase() - # self.tagColIllegalValueCheckCase() - # self.duplicateIdTagColInsertCheckCase() - # self.noIdStbExistCheckCase() - # self.duplicateInsertExistCheckCase() - # self.tagColBinaryNcharLengthCheckCase() - # self.tagColAddDupIDCheckCase() - # self.tagColAddCheckCase() - # self.tagMd5Check() - # self.tagColBinaryMaxLengthCheckCase() - # # self.tagColNcharMaxLengthCheckCase() - # self.batchInsertCheckCase() - # self.multiInsertCheckCase(1000) - # self.batchErrorInsertCheckCase() - # # MultiThreads + self.initCheckCase() + self.boolTypeCheckCase() + # ! leave a bug + #self.symbolsCheckCase() + self.tsCheckCase() + self.idSeqCheckCase() + self.idUpperCheckCase() + self.noIdCheckCase() + self.maxColTagCheckCase() + + self.idIllegalNameCheckCase() + self.idStartWithNumCheckCase() + self.nowTsCheckCase() + self.dateFormatTsCheckCase() + self.illegalTsCheckCase() + self.tagValueLengthCheckCase() + self.colValueLengthCheckCase() + self.tagColIllegalValueCheckCase() + self.duplicateIdTagColInsertCheckCase() + self.noIdStbExistCheckCase() + self.duplicateInsertExistCheckCase() + self.tagColBinaryNcharLengthCheckCase() + self.tagColAddDupIDCheckCase() + self.tagColAddCheckCase() + self.tagMd5Check() + self.tagColBinaryMaxLengthCheckCase() + self.tagColNcharMaxLengthCheckCase() + + self.batchInsertCheckCase() + self.multiInsertCheckCase(1000) + self.batchErrorInsertCheckCase() + self.multiColsInsertCheckCase() + self.blankColInsertCheckCase() + self.blankTagInsertCheckCase() + self.chineseCheckCase() + self.multiFieldCheckCase() + self.errorTypeCheckCase() + # MultiThreads # self.stbInsertMultiThreadCheckCase() # self.sStbStbDdataInsertMultiThreadCheckCase() - # self.sStbStbDdataAtcInsertMultiThreadCheckCase() - # self.sStbStbDdataMtcInsertMultiThreadCheckCase() + # self.sStbStbDdataAtInsertMultiThreadCheckCase() + # self.sStbStbDdataMtInsertMultiThreadCheckCase() # self.sStbDtbDdataInsertMultiThreadCheckCase() - - # # # ! concurrency conflict - # # self.sStbDtbDdataAcMtInsertMultiThreadCheckCase() - # # self.sStbDtbDdataAtMcInsertMultiThreadCheckCase() - + # self.sStbDtbDdataMtInsertMultiThreadCheckCase() + # self.sStbDtbDdataAtInsertMultiThreadCheckCase() # self.sStbStbDdataDtsInsertMultiThreadCheckCase() - - # # # ! concurrency conflict - # # self.sStbStbDdataDtsAcMtInsertMultiThreadCheckCase() - # # self.sStbStbDdataDtsAtMcInsertMultiThreadCheckCase() - + # self.sStbStbDdataDtsMtInsertMultiThreadCheckCase() + # self.sStbStbDdataDtsAtInsertMultiThreadCheckCase() # self.sStbDtbDdataDtsInsertMultiThreadCheckCase() - - # # ! concurrency conflict - # # self.sStbDtbDdataDtsAcMtInsertMultiThreadCheckCase() - - + # self.sStbDtbDdataDtsMtInsertMultiThreadCheckCase() def run(self): print("running {}".format(__file__)) self.createDb() try: + # self.symbolsCheckCase() self.runAll() + # self.test() except Exception as err: print(''.join(traceback.format_exception(None, err, err.__traceback__))) raise err diff --git a/tests/pytest/manualTest/TD-5114/continueCreateDn.py b/tests/pytest/manualTest/TD-5114/continueCreateDn.py index 4b724f0587a6a2bbe3f477e8a47e283c0924a29e..9494ee5f3685d3ddaeb1848a58878d63fa7a54b6 100644 --- a/tests/pytest/manualTest/TD-5114/continueCreateDn.py +++ b/tests/pytest/manualTest/TD-5114/continueCreateDn.py @@ -42,7 +42,7 @@ class TwoClients: tdSql.execute("drop database if exists db3") - # insert data with taosc + # insert data with c connector for i in range(10): os.system("taosdemo -f manualTest/TD-5114/insertDataDb3Replica2.json -y ") # # check data correct diff --git a/tests/pytest/query/filterOtherTypes.py b/tests/pytest/query/filterOtherTypes.py index f80552138deb6850a87c63bed0c3f543036e7c17..7d62f2502eaf7ef5e2591adadb1628a618233628 100644 --- a/tests/pytest/query/filterOtherTypes.py +++ b/tests/pytest/query/filterOtherTypes.py @@ -80,10 +80,12 @@ class TDTestCase: tdSql.error("select * from st where tbcol1 like '____'") # > for nchar type on column - tdSql.error("select * from st where tbcol2 > 'taosdata'") + tdSql.query("select * from st where tbcol2 > 'taosdata'") + tdSql.checkRows(10) # >= for nchar type on column - tdSql.error("select * from st where tbcol2 >= 'taosdata'") + tdSql.query("select * from st where tbcol2 >= 'taosdata'") + tdSql.checkRows(10) # = for nchar type on column tdSql.query("select * from st where tbcol2 = 'taosdata1'") @@ -98,10 +100,12 @@ class TDTestCase: tdSql.checkRows(9) # > for nchar type on column - tdSql.error("select * from st where tbcol2 < 'taodata'") + tdSql.query("select * from st where tbcol2 < 'taodata'") + tdSql.checkRows(0) # >= for nchar type on column - tdSql.error("select * from st where tbcol2 <= 'taodata'") + tdSql.query("select * from st where tbcol2 <= 'taodata'") + tdSql.checkRows(0) # % for nchar type on column case 1 tdSql.query("select * from st where tbcol2 like '%'") @@ -140,10 +144,12 @@ class TDTestCase: tdSql.checkRows(10) # > for binary type on column - tdSql.error("select * from st where tbcol3 > '涛思数据'") + tdSql.query("select * from st where tbcol3 > '涛思数据'") + tdSql.checkRows(10) # >= for binary type on column - tdSql.error("select * from st where tbcol3 >= '涛思数据'") + tdSql.query("select * from st where tbcol3 >= '涛思数据'") + tdSql.checkRows(10) # = for binary type on column tdSql.query("select * from st where tbcol3 = '涛思数据1'") @@ -158,10 +164,12 @@ class TDTestCase: tdSql.checkRows(9) # > for binary type on column - tdSql.error("select * from st where tbcol3 < '涛思数据'") + tdSql.query("select * from st where tbcol3 < '涛思数据'") + tdSql.checkRows(0) # >= for binary type on column - tdSql.error("select * from st where tbcol3 <= '涛思数据'") + tdSql.query("select * from st where tbcol3 <= '涛思数据'") + tdSql.checkRows(0) # % for binary type on column case 1 tdSql.query("select * from st where tbcol3 like '%'") diff --git a/tests/pytest/query/isNullTest.py b/tests/pytest/query/isNullTest.py index 7b79679c7d9d9ac4629a69b32acb1a11b61a83c1..f9fbb47715043fb63a51fbb127cb7a889883fafb 100644 --- a/tests/pytest/query/isNullTest.py +++ b/tests/pytest/query/isNullTest.py @@ -66,7 +66,7 @@ class TDTestCase: tdSql.checkData(0, 0, 12) tdSql.query("select count(*) from st where t2 <> '' ") - tdSql.checkData(0, 0, 24) + tdSql.checkData(0, 0, 12) tdSql.query("select count(*) from st where t3 is null") tdSql.checkData(0, 0, 12) @@ -81,7 +81,7 @@ class TDTestCase: tdSql.checkData(0, 0, 12) tdSql.query("select count(*) from st where t3 <> '' ") - tdSql.checkData(0, 0, 24) + tdSql.checkData(0, 0, 12) tdSql.query("select count(*) from st where c1 is not null") tdSql.checkData(0, 0, 30) diff --git a/tests/pytest/query/nestquery_last_row.py b/tests/pytest/query/nestquery_last_row.py index 3c4ada51744f620ca589266113acf1e3d8cfef43..8e9ee540c74569caffa18c209b745cbd70ecc71a 100644 --- a/tests/pytest/query/nestquery_last_row.py +++ b/tests/pytest/query/nestquery_last_row.py @@ -17,6 +17,7 @@ from util.log import tdLog from util.cases import tdCases from util.sql import tdSql import random +import time class TDTestCase: @@ -24,7 +25,8 @@ class TDTestCase: tdLog.debug("start to execute %s" % __file__) tdSql.init(conn.cursor(), logSql) - self.ts = 1600000000000 + now = time.time() + self.ts = int(round(now * 1000)) self.num = 10 def run(self): diff --git a/tests/pytest/query/operator_cost.py b/tests/pytest/query/operator_cost.py index 774a1e5f42403a6b5f67678e53be5e07beaccde2..f22cfcd4ec709b1d4440065fab398979afeb3adc 100644 --- a/tests/pytest/query/operator_cost.py +++ b/tests/pytest/query/operator_cost.py @@ -25,7 +25,8 @@ class TDTestCase: tdLog.debug("start to execute %s" % __file__) tdSql.init(conn.cursor(), logSql) - self.ts = 1600000000000 + now = time.time() + self.ts = int(round(now * 1000)) self.num = 10 def run(self): diff --git a/tests/pytest/tools/taosdemoAllTest/TD-4985/query-limit-offset.json b/tests/pytest/tools/taosdemoAllTest/TD-4985/query-limit-offset.json index 265f42036bc5a4e13dc0766b66fccf32924d7185..ad85f9607b72c5d4562266508bfdcf68837c33bd 100644 --- a/tests/pytest/tools/taosdemoAllTest/TD-4985/query-limit-offset.json +++ b/tests/pytest/tools/taosdemoAllTest/TD-4985/query-limit-offset.json @@ -22,7 +22,7 @@ "cache": 50, "blocks": 8, "precision": "ms", - "keep": 365, + "keep": 36500, "minRows": 100, "maxRows": 4096, "comp":2, diff --git a/tests/pytest/tools/taosdemoAllTest/TD-4985/query-limit-offset.py b/tests/pytest/tools/taosdemoAllTest/TD-4985/query-limit-offset.py index 7f551bcefd152007ebab7a1bc7d110468b69115a..5477223aad0262cf2874496481bc5d138fb3d2cf 100644 --- a/tests/pytest/tools/taosdemoAllTest/TD-4985/query-limit-offset.py +++ b/tests/pytest/tools/taosdemoAllTest/TD-4985/query-limit-offset.py @@ -13,6 +13,7 @@ import sys import os +import time from util.log import * from util.cases import * from util.sql import * @@ -24,6 +25,9 @@ class TDTestCase: tdLog.debug("start to execute %s" % __file__) tdSql.init(conn.cursor(), logSql) + now = time.time() + self.ts = int(round(now * 1000)) + def getBuildPath(self): selfPath = os.path.dirname(os.path.realpath(__file__)) @@ -50,6 +54,7 @@ class TDTestCase: # insert: create one or mutiple tables per sql and insert multiple rows per sql # test case for https://jira.taosdata.com:18080/browse/TD-4985 + os.system("rm -rf tools/taosdemoAllTest/TD-4985/query-limit-offset.py.sql") os.system("%staosdemo -f tools/taosdemoAllTest/TD-4985/query-limit-offset.json -y " % binPath) tdSql.execute("use db") tdSql.query("select count (tbname) from stb0") @@ -57,25 +62,25 @@ class TDTestCase: for i in range(1000): tdSql.execute('''insert into stb00_9999 values(%d, %d, %d,'test99.%s')''' - % (1600000000000 + i, i, -10000+i, i)) + % (self.ts + i, i, -10000+i, i)) tdSql.execute('''insert into stb00_8888 values(%d, %d, %d,'test98.%s')''' - % (1600000000000 + i, i, -10000+i, i)) + % (self.ts + i, i, -10000+i, i)) tdSql.execute('''insert into stb00_7777 values(%d, %d, %d,'test97.%s')''' - % (1600000000000 + i, i, -10000+i, i)) + % (self.ts + i, i, -10000+i, i)) tdSql.execute('''insert into stb00_6666 values(%d, %d, %d,'test96.%s')''' - % (1600000000000 + i, i, -10000+i, i)) + % (self.ts + i, i, -10000+i, i)) tdSql.execute('''insert into stb00_5555 values(%d, %d, %d,'test95.%s')''' - % (1600000000000 + i, i, -10000+i, i)) + % (self.ts + i, i, -10000+i, i)) tdSql.execute('''insert into stb00_4444 values(%d, %d, %d,'test94.%s')''' - % (1600000000000 + i, i, -10000+i, i)) + % (self.ts + i, i, -10000+i, i)) tdSql.execute('''insert into stb00_3333 values(%d, %d, %d,'test93.%s')''' - % (1600000000000 + i, i, -10000+i, i)) + % (self.ts + i, i, -10000+i, i)) tdSql.execute('''insert into stb00_2222 values(%d, %d, %d,'test92.%s')''' - % (1600000000000 + i, i, -10000+i, i)) + % (self.ts + i, i, -10000+i, i)) tdSql.execute('''insert into stb00_1111 values(%d, %d, %d,'test91.%s')''' - % (1600000000000 + i, i, -10000+i, i)) + % (self.ts + i, i, -10000+i, i)) tdSql.execute('''insert into stb00_100 values(%d, %d, %d,'test90.%s')''' - % (1600000000000 + i, i, -10000+i, i)) + % (self.ts + i, i, -10000+i, i)) tdSql.query("select * from stb0 where c2 like 'test99%' ") tdSql.checkRows(1000) tdSql.query("select * from stb0 where tbname like 'stb00_9999' limit 10" ) @@ -176,7 +181,7 @@ class TDTestCase: tdSql.checkData(0, 1, 5) tdSql.checkData(1, 1, 6) tdSql.checkData(2, 1, 7) - os.system("rm -rf tools/taosdemoAllTest/TD-4985/query-limit-offset.py.sql") + def stop(self): tdSql.close() diff --git a/tests/pytest/tools/taosdemoAllTest/TD-5213/insert4096columns_not_use_taosdemo.py b/tests/pytest/tools/taosdemoAllTest/TD-5213/insert4096columns_not_use_taosdemo.py index dfa829866d945b06d232aeeaba266b11ae229234..ec55acb848352def34e3090e66c4ef392b737ce0 100644 --- a/tests/pytest/tools/taosdemoAllTest/TD-5213/insert4096columns_not_use_taosdemo.py +++ b/tests/pytest/tools/taosdemoAllTest/TD-5213/insert4096columns_not_use_taosdemo.py @@ -26,7 +26,10 @@ class TDTestCase: tdLog.debug("start to execute %s" % __file__) tdSql.init(conn.cursor(), logSql) - self.ts = 1538548685000 + os.system("rm -rf tools/taosdemoAllTest/TD-5213/insert4096columns_not_use_taosdemo.py.sql") + + now = time.time() + self.ts = int(round(now * 1000)) self.num = 100 def get_random_string(self, length): @@ -691,7 +694,7 @@ class TDTestCase: tdSql.query("describe table_40") tdSql.checkRows(4096) - os.system("rm -rf tools/taosdemoAllTest/TD-5213/insert4096columns_not_use_taosdemo.py.sql") + def stop(self): diff --git a/tests/pytest/tools/taosdemoAllTest/TD-5213/insertSigcolumnsNum4096.json b/tests/pytest/tools/taosdemoAllTest/TD-5213/insertSigcolumnsNum4096.json index 25af3a1041dbcd06319dd6abfeb82fd33240c013..c9c4ae2c1b650da99853d6c82106b3f6ee80d0c0 100755 --- a/tests/pytest/tools/taosdemoAllTest/TD-5213/insertSigcolumnsNum4096.json +++ b/tests/pytest/tools/taosdemoAllTest/TD-5213/insertSigcolumnsNum4096.json @@ -22,7 +22,7 @@ "cache": 50, "blocks": 8, "precision": "ms", - "keep": 365, + "keep": 36500, "minRows": 100, "maxRows": 4096, "comp":2, diff --git a/tests/pytest/tools/taosdemoAllTest/insertInterlaceRowsLarge1M.json b/tests/pytest/tools/taosdemoAllTest/insertInterlaceRowsLarge1M.json index 1b56830189623d344168918f239887c3359b2645..197f8a208e85ca4ce57c06518a433ec3a3acbac3 100644 --- a/tests/pytest/tools/taosdemoAllTest/insertInterlaceRowsLarge1M.json +++ b/tests/pytest/tools/taosdemoAllTest/insertInterlaceRowsLarge1M.json @@ -41,7 +41,7 @@ "batch_create_tbl_num": 10, "data_source": "rand", "insert_mode": "taosc", - "insert_rows": 1000, + "insert_rows": 1001, "childtable_limit": 0, "childtable_offset":0, "multi_thread_write_one_tbl": "no", diff --git a/tests/pytest/tools/taosdemoPerformance.py b/tests/pytest/tools/taosdemoPerformance.py index 51b064a08e5cd55401f9cf803a8683653f722679..82c57a656dfea12f80fe4eb2b530742c5bfb0916 100644 --- a/tests/pytest/tools/taosdemoPerformance.py +++ b/tests/pytest/tools/taosdemoPerformance.py @@ -120,7 +120,7 @@ class taosdemoPerformace: projPath = selfPath[:selfPath.find("tests")] for root, dirs, files in os.walk(projPath): - if ("taosdemo" in files): + if ("taosd" in files): rootRealPath = os.path.dirname(os.path.realpath(root)) if ("packaging" not in rootRealPath): buildPath = root[:len(root) - len("/build/bin")] diff --git a/tests/pytest/tools/taosdemoTest.py b/tests/pytest/tools/taosdemoTest.py index 5662881031a01d19398cce223892eebbd8133c97..3cdcdcef5afcb14c04204d2489571bdfed937080 100644 --- a/tests/pytest/tools/taosdemoTest.py +++ b/tests/pytest/tools/taosdemoTest.py @@ -36,7 +36,7 @@ class TDTestCase: projPath = selfPath[:selfPath.find("tests")] for root, dirs, files in os.walk(projPath): - if ("taosdemo" in files): + if ("taosd" in files): rootRealPath = os.path.dirname(os.path.realpath(root)) if ("packaging" not in rootRealPath): buildPath = root[:len(root) - len("/build/bin")] diff --git a/tests/pytest/tools/taosdemoTestTblAlt.py b/tests/pytest/tools/taosdemoTestTblAlt.py index b70525ae4d87465a59ad524067d8b1e4a61d526a..70df535f59cbb97469b7a73e4e230d9a8671bfc7 100644 --- a/tests/pytest/tools/taosdemoTestTblAlt.py +++ b/tests/pytest/tools/taosdemoTestTblAlt.py @@ -26,7 +26,7 @@ class TDTestCase: tdLog.debug("start to execute %s" % __file__) tdSql.init(conn.cursor(), logSql) - self.numberOfTables = 10 + self.numberOfTables = 8 self.numberOfRecords = 1000000 def getBuildPath(self): @@ -86,7 +86,7 @@ class TDTestCase: while True: print("query started") try: - tdSql.query("select * from test.t9") + tdSql.query("select * from test.t7") except Exception as e: tdLog.info("select * test failed") time.sleep(2) @@ -100,8 +100,8 @@ class TDTestCase: print("alter table test.meters add column c10 int") tdSql.execute("alter table test.meters add column c10 int") - print("insert into test.t9 values (now, 1, 2, 3, 4, 0)") - tdSql.execute("insert into test.t9 values (now, 1, 2, 3, 4, 0)") + print("insert into test.t7 values (now, 1, 2, 3, 4, 0)") + tdSql.execute("insert into test.t7 values (now, 1, 2, 3, 4, 0)") def run(self): tdSql.prepare() diff --git a/tests/pytest/tools/taosdumpTest2.py b/tests/pytest/tools/taosdumpTest2.py index bed0564139e20fb6c562a7258af0cbd5b542069b..839988375b652b0cfad09d8a6de7697de19609ea 100644 --- a/tests/pytest/tools/taosdumpTest2.py +++ b/tests/pytest/tools/taosdumpTest2.py @@ -37,7 +37,7 @@ class TDTestCase: projPath = selfPath[:selfPath.find("tests")] for root, dirs, files in os.walk(projPath): - if ("taosdump" in files): + if ("taosd" in files): rootRealPath = os.path.dirname(os.path.realpath(root)) if ("packaging" not in rootRealPath): buildPath = root[:len(root) - len("/build/bin")] diff --git a/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdumpTestNanoSupport.py b/tests/pytest/tools/taosdumpTestNanoSupport.py similarity index 98% rename from tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdumpTestNanoSupport.py rename to tests/pytest/tools/taosdumpTestNanoSupport.py index ca8832170b7706621f5ef9d3225fe2cf16141c34..55f1671daaa09b148bb87d661b8bd1248e6cbb3a 100644 --- a/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdumpTestNanoSupport.py +++ b/tests/pytest/tools/taosdumpTestNanoSupport.py @@ -44,7 +44,7 @@ class TDTestCase: projPath = selfPath[:selfPath.find("tests")] for root, dirs, files in os.walk(projPath): - if ("taosdump" in files): + if ("taosd" in files): rootRealPath = os.path.dirname(os.path.realpath(root)) if ("packaging" not in rootRealPath): buildPath = root[:len(root) - len("/build/bin")] @@ -136,7 +136,7 @@ class TDTestCase: # dump part data with -S -E os.system( - '%staosdump --databases timedb1 -S 1625068810000000000 -E 1625068860000000000 -C ns -o ./taosdumptest/dumptmp2 ' % + '%staosdump --databases timedb1 -S 1625068810000000000 -E 1625068860000000000 -o ./taosdumptest/dumptmp2 ' % binPath) os.system( '%staosdump --databases timedb1 -S 1625068810000000000 -o ./taosdumptest/dumptmp3 ' % @@ -218,7 +218,7 @@ class TDTestCase: "%staosdump --databases timedb1 -o ./taosdumptest/dumptmp1" % binPath) os.system( - '%staosdump --databases timedb1 -S 1625068810000000 -E 1625068860000000 -C us -o ./taosdumptest/dumptmp2 ' % + '%staosdump --databases timedb1 -S 1625068810000000 -E 1625068860000000 -o ./taosdumptest/dumptmp2 ' % binPath) os.system( '%staosdump --databases timedb1 -S 1625068810000000 -o ./taosdumptest/dumptmp3 ' % @@ -299,7 +299,7 @@ class TDTestCase: "%staosdump --databases timedb1 -o ./taosdumptest/dumptmp1" % binPath) os.system( - '%staosdump --databases timedb1 -S 1625068810000 -E 1625068860000 -C ms -o ./taosdumptest/dumptmp2 ' % + '%staosdump --databases timedb1 -S 1625068810000 -E 1625068860000 -o ./taosdumptest/dumptmp2 ' % binPath) os.system( '%staosdump --databases timedb1 -S 1625068810000 -o ./taosdumptest/dumptmp3 ' % diff --git a/tests/pytest/tsdb/tsdbCompClusterReplica2.py b/tests/pytest/tsdb/tsdbCompClusterReplica2.py index 2e016deea0b78d6cf9f76a917ca49fc2c5744a6e..cfda271497cde59e8dbe60150ddf935ba63fd9be 100644 --- a/tests/pytest/tsdb/tsdbCompClusterReplica2.py +++ b/tests/pytest/tsdb/tsdbCompClusterReplica2.py @@ -24,7 +24,7 @@ from random import choice class TwoClients: def initConnection(self): - self.host = "chenhaoran02" + self.host = "chenhaoran01" self.user = "root" self.password = "taosdata" self.config = "/etc/taos/" @@ -116,8 +116,10 @@ class TwoClients: sleep(3) tdSql.execute(" drop dnode 'chenhaoran02:6030'; ") sleep(20) - os.system("rm -rf /var/lib/taos/*") + # remove data file; + os.system("rm -rf /home/chr/data/data0/*") print("clear dnode chenhaoran02'data files") + sleep(5) os.system("nohup /usr/bin/taosd > /dev/null 2>&1 &") print("start taosd") sleep(10) diff --git a/tests/script/general/compute/ceil.sim b/tests/script/general/compute/ceil.sim new file mode 100644 index 0000000000000000000000000000000000000000..8e8bcd04f003b1b86c8293cdb9a698da24073b38 --- /dev/null +++ b/tests/script/general/compute/ceil.sim @@ -0,0 +1,288 @@ +system sh/stop_dnodes.sh + +system sh/deploy.sh -n dnode1 -i 1 +system sh/cfg.sh -n dnode1 -c walLevel -v 1 +system sh/exec.sh -n dnode1 -s start +sleep 2000 +sql connect + +$dbPrefix = m_di_db +$tbPrefix = m_di_tb +$mtPrefix = m_di_mt +$tbNum = 2 +$rowNum = 5000 + +print =============== step1 +$i = 0 +$db = $dbPrefix . $i +$mt = $mtPrefix . $i + +sql drop database $db -x step1 +step1: +sql create database $db +sql use $db +sql create table $mt (ts timestamp, c1 int, c2 float, c3 bigint, c4 smallint, c5 tinyint, c6 double, c7 bool, c8 nchar(5), c9 binary(10)) TAGS (tgcol int) + +$i = 0 +while $i < $tbNum + $tb = $tbPrefix . $i + sql create table $tb using $mt tags( $i ) + + $x = 0 + $y = 0 + + $v0 = 5000.0 + $v1 = -5000.1 + $v2 = 5000.2 + $v3 = -5000.3 + $v4 = 5000.4 + $v5 = -5000.5 + $v6 = 5000.6 + $v7 = -5000.7 + $v8 = 5000.8 + $v9 = -5000.9 + + while $x < $rowNum + $cc = $x * 60000 + $ms = 1601481600000 + $cc + + $val = $v0 + + if $y == 0 then + $val = $v0 + endi + + if $y == 1 then + $val = $v1 + endi + + if $y == 2 then + $val = $v2 + endi + + if $y == 3 then + $val = $v3 + endi + + if $y == 4 then + $val = $v4 + endi + + if $y == 5 then + $val = $v5 + endi + + if $y == 6 then + $val = $v6 + endi + + if $y == 7 then + $val = $v7 + endi + + if $y == 8 then + $val = $v8 + endi + + if $y == 9 then + $val = $v9 + endi + + $tinyint = $x / 128 + sql insert into $tb values ($ms , $x , $val , $x , $x , $tinyint , $x , $x , $x , $x ) + $x = $x + 1 + $y = $y + 1 + if $y == 10 then + $y = 0 + endi + endw + + $i = $i + 1 +endw + +sleep 100 + +print =============== step2 +$i = 1 +$tb = $tbPrefix . $i + +sql select ceil(c2) from $tb +print ===> $data00 +if $data00 != 5000.00000 then + return -1 +endi +sql select ceil(c2) from $tb +print ===> $data10 +if $data10 != -5000.00000 then + return -1 +endi +sql select ceil(c2) from $tb +print ===> $data20 +if $data20 != 5001.00000 then + return -1 +endi +sql select ceil(c2) from $tb +print ===> $data30 +if $data30 != -5000.00000 then + return -1 +endi +sql select ceil(c2) from $tb +print ===> $data40 +if $data40 != 5001.00000 then + return -1 +endi +sql select ceil(c2) from $tb +print ===> $data50 +if $data50 != -5000.00000 then + return -1 +endi +sql select ceil(c2) from $tb +print ===> $data60 +if $data60 != 5001.00000 then + return -1 +endi +sql select ceil(c2) from $tb +print ===> $data70 +if $data70 != -5000.00000 then + return -1 +endi +sql select ceil(c2) from $tb +print ===> $data80 +if $data80 != 5001.00000 then + return -1 +endi +sql select ceil(c2) from $tb +print ===> $data90 +if $data90 != -5000.00000 then + return -1 +endi + +sql select ceil(c5) from $tb +print ===> $data10 +if $data10 != 0 then + return -1 +endi +sql select ts, ceil(c2) from $tb +sql select c2, ceil(c2) from $tb +sql select c2, c3, ceil(c2) from $tb +sql select ts, c2, c3, ceil(c2) from $tb + +sql select ceil(c2), ceil(c6) from $tb + +sql select ts, ceil(c2), ceil(c6) from $tb +sql select c2, ceil(c2), ceil(c6) from $tb +sql select c2, c3, ceil(c2), ceil(c6) from $tb +sql select ts, c2, c3, ceil(c2), ceil(c6) from $tb + +sql select ceil(c2), floor(c2), round(c2) from $tb + +sql select ts, ceil(c2), floor(c2), round(c2) from $tb +sql select c2, ceil(c2), floor(c2), round(c2) from $tb +sql select c2, c3, ceil(c2), floor(c2), round(c2) from $tb +sql select ts, c2, c3, ceil(c2), floor(c2), round(c2) from $tb + +sql select ts, ceil(c2) from $mt +sql select c2, ceil(c2) from $mt +sql select c2, c3, ceil(c2) from $mt +sql select ts, c2, c3, ceil(c2) from $mt + +sql select ceil(c2), ceil(c6) from $mt + +sql select ts, ceil(c2), ceil(c6) from $mt +sql select c2, ceil(c2), ceil(c6) from $mt +sql select c2, c3, ceil(c2), ceil(c6) from $mt +sql select ts, c2, c3, ceil(c2), ceil(c6) from $mt + +sql select ceil(c2), ceil(c2), round(c2) from $mt + +sql select ts, ceil(c2), floor(c2), round(c2) from $mt +sql select c2, ceil(c2), floor(c2), round(c2) from $mt +sql select c2, c3, ceil(c2), floor(c2), round(c2) from $mt +sql select ts, c2, c3, ceil(c2), floor(c2), round(c2) from $mt + +sql_error select ceil(c7) from $tb +sql_error select ceil(c8) from $tb +sql_error select ceil(c9) from $tb +sql_error select ceil(ts) from $tb +sql_error select ceil(c2+2) from $tb +sql_error select ceil(c2) from $tb where ts > 0 and ts < now + 100m interval(10m) +sql_error select ceil(ceil(c2)) from $tb +sql_error select ceil(c2) from m_di_tb1 where c2 like '2%' + +print =============== step3 +sql select ceil(c2) from $tb where c2 <= 5001.00000 +print ===> $data00 +if $data00 != 5000.00000 then + return -1 +endi +sql select ceil(c2) from $tb where c2 <= 5001.00000 +print ===> $data10 +if $data10 != -5000.00000 then + return -1 +endi +sql select ceil(c2) from $tb where c2 <= 5001.00000 +print ===> $data20 +if $data20 != 5001.00000 then + return -1 +endi +sql select ceil(c2) from $tb where c2 <= 5001.00000 +print ===> $data70 +if $data70 != -5000.00000 then + return -1 +endi +sql select ceil(c2) from $tb where c2 <= 5001.00000 +print ===> $data80 +if $data80 != 5001.00000 then + return -1 +endi +sql select ceil(c2) from $tb where c2 <= 5001.00000 +print ===> $data90 +if $data90 != -5000.00000 then + return -1 +endi + +print =============== step4 +sql select ceil(c2) from $tb where c2 >= -5001.00000 +print ===> $data00 +if $data00 != 5000.00000 then + return -1 +endi +sql select ceil(c2) from $tb where c2 >= -5001.00000 +print ===> $data10 +if $data10 != -5000.00000 then + return -1 +endi +sql select ceil(c2) from $tb where c2 >= -5001.00000 +print ===> $data20 +if $data20 != 5001.00000 then + return -1 +endi +sql select ceil(c2) from $tb where c2 >= -5001.00000 +print ===> $data70 +if $data70 != -5000.00000 then + return -1 +endi +sql select ceil(c2) from $tb where c2 >= -5001.00000 +print ===> $data80 +if $data80 != 5001.00000 then + return -1 +endi +sql select ceil(c2) from $tb where c2 >= -5001.00000 +print ===> $data90 +if $data90 != -5000.00000 then + return -1 +endi + +print =============== step5 +sql select ceil(c1) as b from $tb interval(1m) -x step5 + return -1 +step5: + +print =============== step6 +sql select ceil(c1) as b from $tb where ts < now + 4m interval(1m) -x step6 + return -1 +step6: + +print =============== clear + +system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/general/compute/floor.sim b/tests/script/general/compute/floor.sim new file mode 100644 index 0000000000000000000000000000000000000000..0fc14515acd576505dfb48328eda18fabb51e8c5 --- /dev/null +++ b/tests/script/general/compute/floor.sim @@ -0,0 +1,288 @@ +system sh/stop_dnodes.sh + +system sh/deploy.sh -n dnode1 -i 1 +system sh/cfg.sh -n dnode1 -c walLevel -v 1 +system sh/exec.sh -n dnode1 -s start +sleep 2000 +sql connect + +$dbPrefix = m_di_db +$tbPrefix = m_di_tb +$mtPrefix = m_di_mt +$tbNum = 2 +$rowNum = 10000 + +print =============== step1 +$i = 0 +$db = $dbPrefix . $i +$mt = $mtPrefix . $i + +sql drop database $db -x step1 +step1: +sql create database $db +sql use $db +sql create table $mt (ts timestamp, c1 int, c2 float, c3 bigint, c4 smallint, c5 tinyint, c6 double, c7 bool, c8 nchar(5), c9 binary(10)) TAGS (tgcol int) + +$i = 0 +while $i < $tbNum + $tb = $tbPrefix . $i + sql create table $tb using $mt tags( $i ) + + $x = 0 + $y = 0 + + $v0 = 5000.0 + $v1 = -5000.1 + $v2 = 5000.2 + $v3 = -5000.3 + $v4 = 5000.4 + $v5 = -5000.5 + $v6 = 5000.6 + $v7 = -5000.7 + $v8 = 5000.8 + $v9 = -5000.9 + + while $x < $rowNum + $cc = $x * 60000 + $ms = 1601481600000 + $cc + + $val = $v0 + + if $y == 0 then + $val = $v0 + endi + + if $y == 1 then + $val = $v1 + endi + + if $y == 2 then + $val = $v2 + endi + + if $y == 3 then + $val = $v3 + endi + + if $y == 4 then + $val = $v4 + endi + + if $y == 5 then + $val = $v5 + endi + + if $y == 6 then + $val = $v6 + endi + + if $y == 7 then + $val = $v7 + endi + + if $y == 8 then + $val = $v8 + endi + + if $y == 9 then + $val = $v9 + endi + + $tinyint = $x / 128 + sql insert into $tb values ($ms , $x , $val , $x , $x , $tinyint , $x , $x , $x , $x ) + $x = $x + 1 + $y = $y + 1 + if $y == 10 then + $y = 0 + endi + endw + + $i = $i + 1 +endw + +sleep 100 + +print =============== step2 +$i = 1 +$tb = $tbPrefix . $i + +sql select floor(c2) from $tb +print ===> $data00 +if $data00 != 5000.00000 then + return -1 +endi +sql select floor(c2) from $tb +print ===> $data10 +if $data10 != -5001.00000 then + return -1 +endi +sql select floor(c2) from $tb +print ===> $data20 +if $data20 != 5000.00000 then + return -1 +endi +sql select floor(c2) from $tb +print ===> $data30 +if $data30 != -5001.00000 then + return -1 +endi +sql select floor(c2) from $tb +print ===> $data40 +if $data40 != 5000.00000 then + return -1 +endi +sql select floor(c2) from $tb +print ===> $data50 +if $data50 != -5001.00000 then + return -1 +endi +sql select floor(c2) from $tb +print ===> $data60 +if $data60 != 5000.00000 then + return -1 +endi +sql select floor(c2) from $tb +print ===> $data70 +if $data70 != -5001.00000 then + return -1 +endi +sql select floor(c2) from $tb +print ===> $data80 +if $data80 != 5000.00000 then + return -1 +endi +sql select floor(c2) from $tb +print ===> $data90 +if $data90 != -5001.00000 then + return -1 +endi + +sql select floor(c5) from $tb +print ===> $data10 +if $data10 != 0 then + return -1 +endi +sql select ts, floor(c2) from $tb +sql select c2, floor(c2) from $tb +sql select c2, c3, floor(c2) from $tb +sql select ts, c2, c3, floor(c2) from $tb + +sql select floor(c2), floor(c6) from $tb + +sql select ts, floor(c2), floor(c6) from $tb +sql select c2, floor(c2), floor(c6) from $tb +sql select c2, c3, floor(c2), floor(c6) from $tb +sql select ts, c2, c3, floor(c2), floor(c6) from $tb + +sql select ceil(c2), floor(c2), round(c2) from $tb + +sql select ts, ceil(c2), floor(c2), round(c2) from $tb +sql select c2, ceil(c2), floor(c2), round(c2) from $tb +sql select c2, c3, ceil(c2), floor(c2), round(c2) from $tb +sql select ts, c2, c3, ceil(c2), floor(c2), round(c2) from $tb + +sql select ts, floor(c2) from $mt +sql select c2, floor(c2) from $mt +sql select c2, c3, floor(c2) from $mt +sql select ts, c2, c3, floor(c2) from $mt + +sql select floor(c2), floor(c6) from $mt + +sql select ts, floor(c2), floor(c6) from $mt +sql select c2, floor(c2), floor(c6) from $mt +sql select c2, c3, floor(c2), floor(c6) from $mt +sql select ts, c2, c3, floor(c2), floor(c6) from $mt + +sql select ceil(c2), floor(c2), round(c2) from $mt + +sql select ts, ceil(c2), floor(c2), round(c2) from $mt +sql select c2, ceil(c2), floor(c2), round(c2) from $mt +sql select c2, c3, ceil(c2), floor(c2), round(c2) from $mt +sql select ts, c2, c3, ceil(c2), floor(c2), round(c2) from $mt + +sql_error select floor(c7) from $tb +sql_error select floor(c8) from $tb +sql_error select floor(c9) from $tb +sql_error select floor(ts) from $tb +sql_error select floor(c2+2) from $tb +sql_error select floor(c2) from $tb where ts > 0 and ts < now + 100m interval(10m) +sql_error select floor(floor(c2)) from $tb +sql_error select floor(c2) from m_di_tb1 where c2 like '2%' + +print =============== step3 +sql select floor(c2) from $tb where c2 <= 5001.00000 +print ===> $data00 +if $data00 != 5000.00000 then + return -1 +endi +sql select floor(c2) from $tb where c2 <= 5001.00000 +print ===> $data10 +if $data10 != -5001.00000 then + return -1 +endi +sql select floor(c2) from $tb where c2 <= 5001.00000 +print ===> $data20 +if $data20 != 5000.00000 then + return -1 +endi +sql select floor(c2) from $tb where c2 <= 5001.00000 +print ===> $data70 +if $data70 != -5001.00000 then + return -1 +endi +sql select floor(c2) from $tb where c2 <= 5001.00000 +print ===> $data80 +if $data80 != 5000.00000 then + return -1 +endi +sql select floor(c2) from $tb where c2 <= 5001.00000 +print ===> $data90 +if $data90 != -5001.00000 then + return -1 +endi + +print =============== step4 +sql select floor(c2) from $tb where c2 >= -5001.00000 +print ===> $data00 +if $data00 != 5000.00000 then + return -1 +endi +sql select floor(c2) from $tb where c2 >= -5001.00000 +print ===> $data10 +if $data10 != -5001.00000 then + return -1 +endi +sql select floor(c2) from $tb where c2 >= -5001.00000 +print ===> $data20 +if $data20 != 5000.00000 then + return -1 +endi +sql select floor(c2) from $tb where c2 >= -5001.00000 +print ===> $data70 +if $data70 != -5001.00000 then + return -1 +endi +sql select floor(c2) from $tb where c2 >= -5001.00000 +print ===> $data80 +if $data80 != 5000.00000 then + return -1 +endi +sql select floor(c2) from $tb where c2 >= -5001.00000 +print ===> $data90 +if $data90 != -5001.00000 then + return -1 +endi + +print =============== step5 +sql select floor(c1) as b from $tb interval(1m) -x step5 + return -1 +step5: + +print =============== step6 +sql select floor(c1) as b from $tb where ts < now + 4m interval(1m) -x step6 + return -1 +step6: + +print =============== clear + +system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/general/compute/round.sim b/tests/script/general/compute/round.sim new file mode 100644 index 0000000000000000000000000000000000000000..5e03b47785fc4358e0c4b3a92db7fbc9233d4dbe --- /dev/null +++ b/tests/script/general/compute/round.sim @@ -0,0 +1,288 @@ +system sh/stop_dnodes.sh + +system sh/deploy.sh -n dnode1 -i 1 +system sh/cfg.sh -n dnode1 -c walLevel -v 1 +system sh/exec.sh -n dnode1 -s start +sleep 2000 +sql connect + +$dbPrefix = m_di_db +$tbPrefix = m_di_tb +$mtPrefix = m_di_mt +$tbNum = 2 +$rowNum = 10000 + +print =============== step1 +$i = 0 +$db = $dbPrefix . $i +$mt = $mtPrefix . $i + +sql drop database $db -x step1 +step1: +sql create database $db +sql use $db +sql create table $mt (ts timestamp, c1 int, c2 float, c3 bigint, c4 smallint, c5 tinyint, c6 double, c7 bool, c8 nchar(5), c9 binary(10)) TAGS (tgcol int) + +$i = 0 +while $i < $tbNum + $tb = $tbPrefix . $i + sql create table $tb using $mt tags( $i ) + + $x = 0 + $y = 0 + + $v0 = 5000.0 + $v1 = -5000.1 + $v2 = 5000.2 + $v3 = -5000.3 + $v4 = 5000.4 + $v5 = -5000.5 + $v6 = 5000.6 + $v7 = -5000.7 + $v8 = 5000.8 + $v9 = -5000.9 + + while $x < $rowNum + $cc = $x * 60000 + $ms = 1601481600000 + $cc + + $val = $v0 + + if $y == 0 then + $val = $v0 + endi + + if $y == 1 then + $val = $v1 + endi + + if $y == 2 then + $val = $v2 + endi + + if $y == 3 then + $val = $v3 + endi + + if $y == 4 then + $val = $v4 + endi + + if $y == 5 then + $val = $v5 + endi + + if $y == 6 then + $val = $v6 + endi + + if $y == 7 then + $val = $v7 + endi + + if $y == 8 then + $val = $v8 + endi + + if $y == 9 then + $val = $v9 + endi + + $tinyint = $x / 128 + sql insert into $tb values ($ms , $x , $val , $x , $x , $tinyint , $x , $x , $x , $x ) + $x = $x + 1 + $y = $y + 1 + if $y == 10 then + $y = 0 + endi + endw + + $i = $i + 1 +endw + +sleep 100 + +print =============== step2 +$i = 1 +$tb = $tbPrefix . $i + +sql select round(c2) from $tb +print ===> $data00 +if $data00 != 5000.00000 then + return -1 +endi +sql select round(c2) from $tb +print ===> $data10 +if $data10 != -5000.00000 then + return -1 +endi +sql select round(c2) from $tb +print ===> $data20 +if $data20 != 5000.00000 then + return -1 +endi +sql select round(c2) from $tb +print ===> $data30 +if $data30 != -5000.00000 then + return -1 +endi +sql select round(c2) from $tb +print ===> $data40 +if $data40 != 5000.00000 then + return -1 +endi +sql select round(c2) from $tb +print ===> $data50 +if $data50 != -5001.00000 then + return -1 +endi +sql select round(c2) from $tb +print ===> $data60 +if $data60 != 5001.00000 then + return -1 +endi +sql select round(c2) from $tb +print ===> $data70 +if $data70 != -5001.00000 then + return -1 +endi +sql select round(c2) from $tb +print ===> $data80 +if $data80 != 5001.00000 then + return -1 +endi +sql select round(c2) from $tb +print ===> $data90 +if $data90 != -5001.00000 then + return -1 +endi + +sql select round(c5) from $tb +print ===> $data10 +if $data10 != 0 then + return -1 +endi +sql select ts, round(c2) from $tb +sql select c2, round(c2) from $tb +sql select c2, c3, round(c2) from $tb +sql select ts, c2, c3, round(c2) from $tb + +sql select round(c2), round(c6) from $tb + +sql select ts, round(c2), round(c6) from $tb +sql select c2, round(c2), round(c6) from $tb +sql select c2, c3, round(c2), round(c6) from $tb +sql select ts, c2, c3, round(c2), round(c6) from $tb + +sql select ceil(c2), floor(c2), round(c2) from $tb + +sql select ts, ceil(c2), floor(c2), round(c2) from $tb +sql select c2, ceil(c2), floor(c2), round(c2) from $tb +sql select c2, c3, ceil(c2), floor(c2), round(c2) from $tb +sql select ts, c2, c3, ceil(c2), floor(c2), round(c2) from $tb + +sql select ts, round(c2) from $mt +sql select c2, round(c2) from $mt +sql select c2, c3, round(c2) from $mt +sql select ts, c2, c3, round(c2) from $mt + +sql select round(c2), round(c6) from $mt + +sql select ts, round(c2), round(c6) from $mt +sql select c2, round(c2), round(c6) from $mt +sql select c2, c3, round(c2), round(c6) from $mt +sql select ts, c2, c3, round(c2), round(c6) from $mt + +sql select ceil(c2), floor(c2), round(c2) from $mt + +sql select ts, ceil(c2), floor(c2), round(c2) from $mt +sql select c2, ceil(c2), floor(c2), round(c2) from $mt +sql select c2, c3, ceil(c2), floor(c2), round(c2) from $mt +sql select ts, c2, c3, ceil(c2), floor(c2), round(c2) from $mt + +sql_error select round(c7) from $tb +sql_error select round(c8) from $tb +sql_error select round(c9) from $tb +sql_error select round(ts) from $tb +sql_error select round(c2+2) from $tb +sql_error select round(c2) from $tb where ts > 0 and ts < now + 100m interval(10m) +sql_error select round(round(c2)) from $tb +sql_error select round(c2) from m_di_tb1 where c2 like '2%' + +print =============== step3 +sql select round(c2) from $tb where c2 <= 5001.00000 +print ===> $data00 +if $data00 != 5000.00000 then + return -1 +endi +sql select round(c2) from $tb where c2 <= 5001.00000 +print ===> $data10 +if $data10 != -5000.00000 then + return -1 +endi +sql select round(c2) from $tb where c2 <= 5001.00000 +print ===> $data20 +if $data20 != 5000.00000 then + return -1 +endi +sql select round(c2) from $tb where c2 <= 5001.00000 +print ===> $data70 +if $data70 != -5001.00000 then + return -1 +endi +sql select round(c2) from $tb where c2 <= 5001.00000 +print ===> $data80 +if $data80 != 5001.00000 then + return -1 +endi +sql select round(c2) from $tb where c2 <= 5001.00000 +print ===> $data90 +if $data90 != -5001.00000 then + return -1 +endi + +print =============== step4 +sql select round(c2) from $tb where c2 >= -5001.00000 +print ===> $data00 +if $data00 != 5000.00000 then + return -1 +endi +sql select round(c2) from $tb where c2 >= -5001.00000 +print ===> $data10 +if $data10 != -5000.00000 then + return -1 +endi +sql select round(c2) from $tb where c2 >= -5001.00000 +print ===> $data20 +if $data20 != 5000.00000 then + return -1 +endi +sql select round(c2) from $tb where c2 >= -5001.00000 +print ===> $data70 +if $data70 != -5001.00000 then + return -1 +endi +sql select round(c2) from $tb where c2 >= -5001.00000 +print ===> $data80 +if $data80 != 5001.00000 then + return -1 +endi +sql select round(c2) from $tb where c2 >= -5001.00000 +print ===> $data90 +if $data90 != -5001.00000 then + return -1 +endi + +print =============== step5 +sql select round(c1) as b from $tb interval(1m) -x step5 + return -1 +step5: + +print =============== step6 +sql select round(c1) as b from $tb where ts < now + 4m interval(1m) -x step6 + return -1 +step6: + +print =============== clear + +system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/general/parser/between_and.sim b/tests/script/general/parser/between_and.sim index cdced47cb65aea79618540b57e159b741bf9288a..5db40471d85bcbafbcb6dcaa5912e6cfb9a66bc5 100644 --- a/tests/script/general/parser/between_and.sim +++ b/tests/script/general/parser/between_and.sim @@ -159,7 +159,7 @@ if $data11 != 3 then endi sql_error select * from st2 where f7 between 2.0 and 3.0; -sql_error select * from st2 where f8 between 2.0 and 3.0; -sql_error select * from st2 where f9 between 2.0 and 3.0; +sql select * from st2 where f8 between 2.0 and 3.0; +sql select * from st2 where f9 between 2.0 and 3.0; system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/general/parser/condition.sim b/tests/script/general/parser/condition.sim index c3aed7e2a3b04c0ca2e27e2e62d92009e8b2fe8e..96cd1e80bd01ad6ffa197ccbe8c58ce6474f3339 100644 --- a/tests/script/general/parser/condition.sim +++ b/tests/script/general/parser/condition.sim @@ -2,7 +2,7 @@ system sh/stop_dnodes.sh system sh/deploy.sh -n dnode1 -i 1 system sh/cfg.sh -n dnode1 -c walLevel -v 1 -system sh/cfg.sh -n dnode1 -c maxtablespervnode -v 4 +system sh/cfg.sh -n dnode1 -c maxtablespervnode -v 6 system sh/cfg.sh -n dnode1 -c cache -v 1 system sh/exec.sh -n dnode1 -s start @@ -135,11 +135,63 @@ while $i < $blockNum $ts0 = $ts0 + 259200000 endw +sql create table stb5 (ts timestamp, c1 int, c2 float, c3 bigint, c4 smallint, c5 tinyint, c6 double, c7 bool, c8 binary(10), c9 nchar(9)) TAGS(t1 timestamp, t2 int, t3 float, t4 bigint, t5 smallint, t6 tinyint, t7 double, t8 bool, t9 binary(100), t10 nchar(10)) + +sql create table tb5_1 using stb5 tags('2021-05-05 18:19:01',1,1.0,1,1,1,1.0,true ,'111111111','1') +sql create table tb5_2 using stb5 tags('2021-05-05 18:19:02',2,2.0,2,2,2,2.0,true ,'222222222','2') +sql create table tb5_3 using stb5 tags('2021-05-05 18:19:03',3,3.0,3,3,3,3.0,false,'333333333','3') +sql create table tb5_4 using stb5 tags('2021-05-05 18:19:04',4,4.0,4,4,4,4.0,false,'444444444','4') +sql create table tb5_5 using stb5 tags('2021-05-05 18:19:05',5,5.0,5,5,5,5.0,true,'555555555','5') +sql create table tb5_6 using stb5 tags('2021-05-05 18:19:06',6,6.0,6,6,6,6.0,true,'666666666','6') +sql create table tb5_7 using stb5 tags(NULL,7,NULL,7,NULL,7,NULL,false,NULL,'7') +sql create table tb5_8 using stb5 tags('2021-05-05 18:19:08',NULL,8.0,NULL,8,NULL,8.0,NULL,'888888888',NULL) + +sql insert into tb5_1 values ('2021-05-05 18:19:00',1,1.0,1,1,1,1.0,true ,'1','1') +sql insert into tb5_1 values ('2021-05-05 18:19:01',2,2.0,2,2,2,2.0,true ,'2','2') +sql insert into tb5_1 values ('2021-05-05 18:19:02',3,3.0,3,3,3,3.0,false,'3','3') +sql insert into tb5_1 values ('2021-05-05 18:19:03',4,4.0,4,4,4,4.0,false,'4','4') +sql insert into tb5_1 values ('2021-05-05 18:19:04',11,11.0,11,11,11,11.0,true ,'11','11') +sql insert into tb5_1 values ('2021-05-05 18:19:05',12,12.0,12,12,12,12.0,true ,'12','12') +sql insert into tb5_1 values ('2021-05-05 18:19:06',13,13.0,13,13,13,13.0,false,'13','13') +sql insert into tb5_1 values ('2021-05-05 18:19:07',14,14.0,14,14,14,14.0,false,'14','14') +sql insert into tb5_2 values ('2021-05-05 18:19:08',21,21.0,21,21,21,21.0,true ,'21','21') +sql insert into tb5_2 values ('2021-05-05 18:19:09',22,22.0,22,22,22,22.0,true ,'22','22') +sql insert into tb5_2 values ('2021-05-05 18:19:10',23,23.0,23,23,23,23.0,false,'23','23') +sql insert into tb5_2 values ('2021-05-05 18:19:11',24,24.0,24,24,24,24.0,false,'24','24') +sql insert into tb5_3 values ('2021-05-05 18:19:12',31,31.0,31,31,31,31.0,true ,'31','31') +sql insert into tb5_3 values ('2021-05-05 18:19:13',32,32.0,32,32,32,32.0,true ,'32','32') +sql insert into tb5_3 values ('2021-05-05 18:19:14',33,33.0,33,33,33,33.0,false,'33','33') +sql insert into tb5_3 values ('2021-05-05 18:19:15',34,34.0,34,34,34,34.0,false,'34','34') +sql insert into tb5_4 values ('2021-05-05 18:19:16',41,41.0,41,41,41,41.0,true ,'41','41') +sql insert into tb5_4 values ('2021-05-05 18:19:17',42,42.0,42,42,42,42.0,true ,'42','42') +sql insert into tb5_4 values ('2021-05-05 18:19:18',43,43.0,43,43,43,43.0,false,'43','43') +sql insert into tb5_4 values ('2021-05-05 18:19:19',44,44.0,44,44,44,44.0,false,'44','44') +sql insert into tb5_5 values ('2021-05-05 18:19:20',51,51.0,51,51,51,51.0,true ,'51','51') +sql insert into tb5_5 values ('2021-05-05 18:19:21',52,52.0,52,52,52,52.0,true ,'52','52') +sql insert into tb5_5 values ('2021-05-05 18:19:22',53,53.0,53,53,53,53.0,false,'53','53') +sql insert into tb5_5 values ('2021-05-05 18:19:23',54,54.0,54,54,54,54.0,false,'54','54') +sql insert into tb5_6 values ('2021-05-05 18:19:24',61,61.0,61,61,61,61.0,true ,'61','61') +sql insert into tb5_6 values ('2021-05-05 18:19:25',62,62.0,62,62,62,62.0,true ,'62','62') +sql insert into tb5_6 values ('2021-05-05 18:19:26',63,63.0,63,63,63,63.0,false,'63','63') +sql insert into tb5_6 values ('2021-05-05 18:19:27',64,64.0,64,64,64,64.0,false,'64','64') +sql insert into tb5_6 values ('2021-05-05 18:19:28',NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL) +sql insert into tb5_7 values ('2021-05-05 18:19:29',71,71.0,71,71,71,71.0,true ,'71','71') +sql insert into tb5_7 values ('2021-05-05 18:19:30',72,72.0,72,72,72,72.0,true ,'72','72') +sql insert into tb5_7 values ('2021-05-05 18:19:31',73,73.0,73,73,73,73.0,false,'73','73') +sql insert into tb5_7 values ('2021-05-05 18:19:32',74,74.0,74,74,74,74.0,false,'74','74') +sql insert into tb5_7 values ('2021-05-05 18:19:33',NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL) +sql insert into tb5_8 values ('2021-05-05 18:19:34',81,81.0,81,81,81,81.0,true ,'81','81') +sql insert into tb5_8 values ('2021-05-05 18:19:35',82,82.0,82,82,82,82.0,true ,'82','82') +sql insert into tb5_8 values ('2021-05-05 18:19:36',83,83.0,83,83,83,83.0,false,'83','83') +sql insert into tb5_8 values ('2021-05-05 18:19:37',84,84.0,84,84,84,84.0,false,'84','84') +sql insert into tb5_8 values ('2021-05-05 18:19:38',NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL) + sleep 100 sql connect run general/parser/condition_query.sim +run general/parser/condition_query2.sim print ================== restart server to commit data into disk system sh/exec.sh -n dnode1 -s stop -x SIGINT @@ -150,4 +202,90 @@ sql connect sleep 100 run general/parser/condition_query.sim +run general/parser/condition_query2.sim + +sql drop database if exists cdb + + +system sh/exec.sh -n dnode1 -s stop -x SIGINT +sleep 100 +system sh/deploy.sh -n dnode1 -i 1 +system sh/cfg.sh -n dnode1 -c maxtablespervnode -v 1000 +system sh/cfg.sh -n dnode1 -c minTablesPerVnode -v 1000 +system sh/exec.sh -n dnode1 -s start + +sql create database if not exists cdb +sql use cdb +sql create table stba (ts timestamp, c1 int, c2 float, c3 bigint, c4 smallint, c5 tinyint, c6 double, c7 bool, c8 binary(10), c9 nchar(9)) TAGS(t1 int) +sql create table stbb (ts timestamp, c1 int, c2 float, c3 bigint, c4 smallint, c5 tinyint, c6 double, c7 bool, c8 binary(10), c9 nchar(9)) TAGS(t1 bool) +sql create table stbc (ts timestamp, c1 int, c2 float, c3 bigint, c4 smallint, c5 tinyint, c6 double, c7 bool, c8 binary(10), c9 nchar(9)) TAGS(t1 float) +sql create table stbd (ts timestamp, c1 int, c2 float, c3 bigint, c4 smallint, c5 tinyint, c6 double, c7 bool, c8 binary(10), c9 nchar(9)) TAGS(t1 binary(10)) +sql create table stbe (ts timestamp, c1 int, c2 float, c3 bigint, c4 smallint, c5 tinyint, c6 double, c7 bool, c8 binary(10), c9 nchar(9)) TAGS(t1 nchar(10)) + + +sql create table tba_0 using stba tags(0) +sql create table tba_1 using stba tags(1) +sql create table tba_2 using stba tags(2) +sql create table tba_3 using stba tags(3) +sql create table tba_4 using stba tags(4) +sql create table tba_5 using stba tags(5) +sql create table tba_6 using stba tags(6) +sql create table tba_7 using stba tags(7) +sql create table tba_8 using stba tags(8) +sql create table tba_9 using stba tags(9) + +sql create table tbb_0 using stbb tags(true) +sql create table tbb_1 using stbb tags(false) +sql create table tbb_2 using stbb tags(true) +sql create table tbb_3 using stbb tags(false) +sql create table tbb_4 using stbb tags(true) +sql create table tbb_5 using stbb tags(false) +sql create table tbb_6 using stbb tags(true) +sql create table tbb_7 using stbb tags(false) +sql create table tbb_8 using stbb tags(true) +sql create table tbb_9 using stbb tags(false) + +sql create table tbc_0 using stbc tags(0) +sql create table tbc_1 using stbc tags(1) +sql create table tbc_2 using stbc tags(2) +sql create table tbc_3 using stbc tags(3) +sql create table tbc_4 using stbc tags(4) +sql create table tbc_5 using stbc tags(5) +sql create table tbc_6 using stbc tags(6) +sql create table tbc_7 using stbc tags(7) +sql create table tbc_8 using stbc tags(8) +sql create table tbc_9 using stbc tags(9) + + +sql create table tbd_0 using stbd tags('0000') +sql create table tbd_1 using stbd tags('1111') +sql create table tbd_2 using stbd tags('2222') +sql create table tbd_3 using stbd tags('3333') +sql create table tbd_4 using stbd tags('4444') +sql create table tbd_5 using stbd tags('5555') +sql create table tbd_6 using stbd tags('6666') +sql create table tbd_7 using stbd tags('7777') +sql create table tbd_8 using stbd tags('8888') +sql create table tbd_9 using stbd tags('9999') + +sql create table tbe_0 using stbe tags('0000') +sql create table tbe_1 using stbe tags('1111') +sql create table tbe_2 using stbe tags('2222') +sql create table tbe_3 using stbe tags('3333') +sql create table tbe_4 using stbe tags('4444') +sql create table tbe_5 using stbe tags('5555') +sql create table tbe_6 using stbe tags('6666') +sql create table tbe_7 using stbe tags('7777') +sql create table tbe_8 using stbe tags('8888') +sql create table tbe_9 using stbe tags('9999') + + +run general/parser/condition_query3.sim + + +system sh/exec.sh -n dnode1 -s stop -x SIGINT +sleep 100 +system sh/exec.sh -n dnode1 -s start + +run general/parser/condition_query3.sim diff --git a/tests/script/general/parser/condition_query.sim b/tests/script/general/parser/condition_query.sim index 8dfa8dae0c9e0c56116cb4132d1e940e99f45d48..bebcbe709f005337795fe1286b34fc264b287cb0 100644 --- a/tests/script/general/parser/condition_query.sim +++ b/tests/script/general/parser/condition_query.sim @@ -1,4 +1,3 @@ - sql use cdb; print "column test" @@ -11,7 +10,7 @@ if $rows != 28 then return -1 endi -sql_error select * from stb1 where c8 > 0 + sql_error select * from stb1 where c7 in (0,2,3,1); sql_error select * from stb1 where c8 in (true); sql_error select * from stb1 where c8 in (1,2); @@ -33,14 +32,119 @@ sql_error select * from stb1 where c4 != 'null'; sql_error select * from stb1 where c5 >= 'null'; sql_error select * from stb1 where c6 <= 'null'; sql_error select * from stb1 where c7 < 'nuLl'; -sql_error select * from stb1 where c8 < 'nuLl'; -sql_error select * from stb1 where c9 > 'nuLl'; +sql_error select * from stb1 where c1 match '.*'; +sql_error select * from stb1 where c2 match '.*'; +sql_error select * from stb1 where c3 match '.*'; +sql_error select * from stb1 where c4 match '.*'; +sql_error select * from stb1 where c5 match '.*'; +sql_error select * from stb1 where c6 match '.*'; +sql_error select * from stb1 where c7 match '.*'; +sql_error select * from stb1 where c9 match '.*'; +sql_error select * from stb1 where ts match '.*'; +sql_error select * from stb1 where c1 nmatch '.*'; +sql_error select * from stb1 where c2 nmatch '.*'; +sql_error select * from stb1 where c3 nmatch '.*'; +sql_error select * from stb1 where c4 nmatch '.*'; +sql_error select * from stb1 where c5 nmatch '.*'; +sql_error select * from stb1 where c6 nmatch '.*'; +sql_error select * from stb1 where c7 nmatch '.*'; +sql_error select * from stb1 where c9 nmatch '.*'; +sql_error select * from stb1 where ts nmatch '.*'; + sql_error select * from (select * from stb1 where c7=true) a, (select * from stb1 where c1 > 30) b; sql_error select a.ts,a.c1,a.c8 from (select * from stb1 where c7=true) a, (select * from stb1 where c1 > 30) b where a.ts=b.ts and a.c1 > 50 or b.c1 < 60; sql_error select a.ts,a.c1,a.c8 from (select * from stb1 where c7=true) a, (select * from stb1 where c1 > 30) b where a.ts=b.ts and ((a.c1 > 50 and a.c1 < 60) or (b.c2 > 60)); sql_error select * from stb1 where 'c2' is null; sql_error select * from stb1 where 'c2' is not null; +sql select * from stb1 where c9 > 'nuLl'; +if $rows != 0 then + return -1 +endi + +sql select * from stb1 where c8 = '22' or c8 >= '62'; +if $rows != 4 then + return -1 +endi +if $data00 != @21-05-05 18:19:09.000@ then + return -1 +endi +if $data10 != @21-05-05 18:19:25.000@ then + return -1 +endi +if $data20 != @21-05-05 18:19:26.000@ then + return -1 +endi +if $data30 != @21-05-05 18:19:27.000@ then + return -1 +endi + +sql select * from stb1 where c8 < '11'; +if $rows != 4 then + return -1 +endi +if $data00 != @21-05-05 18:19:00.000@ then + return -1 +endi +if $data10 != @21-05-05 18:19:01.000@ then + return -1 +endi +if $data20 != @21-05-05 18:19:02.000@ then + return -1 +endi +if $data30 != @21-05-05 18:19:03.000@ then + return -1 +endi + +sql select * from stb1 where c8 <> '11'; +if $rows != 27 then + return -1 +endi + + +sql select * from stb1 where c9 > 'nuLl'; +if $rows != 0 then + return -1 +endi + +sql select * from stb1 where c9 > '11' and c9 <= '21'; +if $rows != 4 then + return -1 +endi +if $data00 != @21-05-05 18:19:05.000@ then + return -1 +endi +if $data10 != @21-05-05 18:19:06.000@ then + return -1 +endi +if $data20 != @21-05-05 18:19:07.000@ then + return -1 +endi +if $data30 != @21-05-05 18:19:08.000@ then + return -1 +endi + +sql select * from stb1 where c9 <= '11' and c9 > '2' and c9 <> 3; +if $rows != 2 then + return -1 +endi +if $data00 != @21-05-05 18:19:03.000@ then + return -1 +endi +if $data10 != @21-05-05 18:19:04.000@ then + return -1 +endi + +sql select * from stb1 where c8 > 0 +if $rows != 28 then + return -1 +endi + +sql select * from stb1 where c8 < 'nuLl'; +if $rows != 28 then + return -1 +endi + sql select * from stb1 where c2 > 3.0 or c2 < 60; if $rows != 28 then return -1 @@ -2071,179 +2175,61 @@ if $data20 != @21-07-16 01:00:00.899@ then return -1 endi - -print "ts test" -sql_error select ts,c1,c7 from stb1 where ts != '2021-05-05 18:19:27' -sql_error select ts,c1,c7 from stb1 where ts > '2021-05-05 18:19:03.000' or ts < '2021-05-05 18:19:02.000'; -sql_error select ts,c1,c7 from stb1 where ts > '2021-05-05 18:19:03.000' and ts > '2021-05-05 18:19:20.000' and ts != '2021-05-05 18:19:22.000'; -sql_error select * from stb1 where ts2 like '2021-05-05%'; -sql_error select ts,c1,c2 from stb1 where (ts > '2021-05-05 18:19:25.000' or ts < '2021-05-05 18:19:05.000') and ts > '2021-05-05 18:19:01.000' and ts < '2021-05-05 18:19:27.000'; -sql_error select ts,c1,c2 from stb1 where (ts > '2021-05-05 18:19:20.000' or ts < '2021-05-05 18:19:05.000') and ts != '2021-05-05 18:19:25.000'; -sql_error select ts,c1,c2 from stb1 where ((ts >= '2021-05-05 18:19:05.000' and ts <= '2021-05-05 18:19:10.000') or (ts >= '2021-05-05 18:19:15.000' and ts <= '2021-05-05 18:19:20.000') or (ts >= '2021-05-05 18:19:11.000' and ts <= '2021-05-05 18:19:14.000')); -sql_error select ts,c1,c2 from stb1 where ts >= '2021-05-05 18:19:25.000' or ts < '2021-05-05 18:19:24.000'; -sql select * from stb1 where ts is null; -if $rows != 0 then - return -1 -endi -sql select * from stb1 where ts is not null and ts is null; -if $rows != 0 then - return -1 -endi - -sql select ts,c1,c2 from stb1 where ts >= '2021-05-05 18:19:25.000' and ts < '2021-05-05 18:19:10.000'; -if $rows != 0 then - return -1 -endi - -sql select * from stb1 where ts > '2021-05-05 18:19:03.000' and ts < '2021-05-05 18:19:02'; -if $rows != 0 then +sql select * from stb1 where c8 > 0; +if $rows != 28 then return -1 endi -sql select * from stb1 where ts is not null; -if $rows != 29 then +sql select * from stb1 where c8 > 1 and c8 <= 21 and c8 < 11 and c8 >= 3; +if $rows != 2 then return -1 endi - -sql select * from stb1 where ts is not null or ts is null; -if $rows != 29 then +if $data00 != @21-05-05 18:19:02.000@ then return -1 endi - -sql select ts,c1,c2 from stb1 where ts >= '2021-05-05 18:19:25.000' or ts < '2021-05-05 18:19:25.000'; -if $rows != 29 then +if $data10 != @21-05-05 18:19:03.000@ then return -1 endi -sql select ts,c1,c2 from stb1 where ts >= '2021-05-05 18:19:25.000' and ts < '2021-05-05 18:19:26.000'; -if $rows != 1 then - return -1 -endi -if $data00 != @21-05-05 18:19:25.000@ then - return -1 -endi -sql select ts,c1,c2 from stb1 where ts >= '2021-05-05 18:19:25.000' or ts < '2021-05-05 18:19:28.000'; -if $rows != 29 then - return -1 -endi -sql select ts,c1,c2 from stb1 where ts >= '2021-05-05 18:19:25.000' or ts > '2021-05-05 18:19:27.000'; -if $rows != 4 then - return -1 -endi -if $data00 != @21-05-05 18:19:25.000@ then - return -1 -endi -if $data10 != @21-05-05 18:19:26.000@ then +sql select * from stb1 where c8 = 11 or c8 = 12 or c8 is null or c8 in ('13','23') or (c8 like '%4' and c8 like '3_'); +if $rows != 6 then return -1 endi -if $data20 != @21-05-05 18:19:27.000@ then +if $data00 != @21-05-05 18:19:04.000@ then return -1 endi -if $data30 != @21-05-05 18:19:28.000@ then +if $data10 != @21-05-05 18:19:05.000@ then return -1 endi - -sql select ts,c1,c2 from stb1 where ts > '2021-05-05 18:19:20.000' or ts < '2021-05-05 18:19:05.000' or ts != '2021-05-05 18:19:25.000'; -if $rows != 29 then +if $data20 != @21-05-05 18:19:06.000@ then return -1 endi - -sql select ts,c1,c2 from stb1 where ts >= '2021-05-05 18:19:25.000' or ts <> '2021-05-05 18:19:25.000'; -if $rows != 29 then +if $data30 != @21-05-05 18:19:10.000@ then return -1 endi - -sql select ts,c1,c2 from stb1 where ((ts >= '2021-05-05 18:19:05.000' and ts <= '2021-05-05 18:19:10.999') or (ts >= '2021-05-05 18:19:15.000' and ts <= '2021-05-05 18:19:20.000') or (ts >= '2021-05-05 18:19:11.000' and ts <= '2021-05-05 18:19:14.999')); -if $rows != 16 then +if $data40 != @21-05-05 18:19:15.000@ then return -1 endi -if $data00 != @21-05-05 18:19:05.000@ then +if $data50 != @21-05-05 18:19:28.000@ then return -1 endi -sql select ts,c1,c2 from stb1 where (ts >= '2021-05-05 18:19:05.000' and ts <= '2021-05-05 18:19:10.000') or (ts >= '2021-05-05 18:19:12.000' and ts <= '2021-05-05 18:19:14.000') or (ts >= '2021-05-05 18:19:08.000' and ts <= '2021-05-05 18:19:17.000'); +sql select * from stb1 where c9 >= 3 and c9 <= 33; if $rows != 13 then return -1 endi -if $data00 != @21-05-05 18:19:05.000@ then - return -1 -endi - -sql select ts,c1,c2 from stb1 where (ts >= '2021-05-05 18:19:05.000' and ts <= '2021-05-05 18:19:10.000') or (ts >= '2021-05-05 18:19:02.000' and ts <= '2021-05-05 18:19:03.000') or (ts >= '2021-05-05 18:19:01.000' and ts <= '2021-05-05 18:19:08.000'); -if $rows != 10 then - return -1 -endi -if $data00 != @21-05-05 18:19:01.000@ then - return -1 -endi - -sql select ts,c1,c2 from stb1 where ((ts >= '2021-05-05 18:19:08.000' and ts <= '2021-05-05 18:19:10.000') or (ts >= '2021-05-05 18:19:02.000' and ts <= '2021-05-05 18:19:03.000') or (ts >= '2021-05-05 18:19:05.000' and ts <= '2021-05-05 18:19:06.000') or (ts >= '2021-05-05 18:19:03.000' and ts <= '2021-05-05 18:19:12.000')) and (ts >= '2021-05-05 18:19:10.000'); -if $rows != 3 then - return -1 -endi -if $data00 != @21-05-05 18:19:10.000@ then - return -1 -endi -if $data10 != @21-05-05 18:19:11.000@ then - return -1 -endi -if $data20 != @21-05-05 18:19:12.000@ then - return -1 -endi -sql select ts,c1,c7 from stb1 where ts > '2021-05-05 18:19:25.000' and ts != '2021-05-05 18:19:18'; -if $rows != 3 then - return -1 -endi -if $data00 != @21-05-05 18:19:26.000@ then - return -1 -endi -if $data10 != @21-05-05 18:19:27.000@ then - return -1 -endi -if $data20 != @21-05-05 18:19:28.000@ then - return -1 -endi - - -sql select * from stb1 where ts > '2021-05-05 18:19:03.000' and ts > '2021-05-05 18:19:25'; -if $rows != 3 then - return -1 -endi -if $data00 != @21-05-05 18:19:26.000@ then - return -1 -endi -if $data10 != @21-05-05 18:19:27.000@ then - return -1 -endi -if $data20 != @21-05-05 18:19:28.000@ then - return -1 -endi - -sql select * from stb1 where ts < '2021-05-05 18:19:03.000' and ts < '2021-05-05 18:19:25'; -if $rows != 3 then - return -1 -endi -if $data00 != @21-05-05 18:19:00.000@ then - return -1 -endi -if $data10 != @21-05-05 18:19:01.000@ then - return -1 -endi -if $data20 != @21-05-05 18:19:02.000@ then - return -1 -endi - -sql select * from stb1 where ts > '2021-05-05 18:19:23.000' and ts < '2021-05-05 18:19:25'; +sql select * from stb1 where c9 > 22 and c9 <= 38 and c8 < 30 and c8 >= 24; if $rows != 1 then return -1 endi -if $data00 != @21-05-05 18:19:24.000@ then +if $data00 != @21-05-05 18:19:11.000@ then return -1 endi -sql select * from stb1 where ts > '2021-05-05 18:19:03.000' or ts > '2021-05-05 18:19:25'; -if $rows != 25 then + +sql select * from stb1 where c9 = 11 or c9 = 12 or c9 is null or c9 in ('13','23') or ((c9 like '%4' or c9 like '3_') and c9 <> 34 and c9 != 44) and c9 < 45 and c9 like '1_'; +if $rows != 6 then return -1 endi if $data00 != @21-05-05 18:19:04.000@ then @@ -2255,485 +2241,104 @@ endi if $data20 != @21-05-05 18:19:06.000@ then return -1 endi - -sql select * from stb1 where ts < '2021-05-05 18:19:03.000' or ts < '2021-05-05 18:19:25'; -if $rows != 25 then - return -1 -endi -if $data00 != @21-05-05 18:19:00.000@ then - return -1 -endi -if $data10 != @21-05-05 18:19:01.000@ then - return -1 -endi -if $data20 != @21-05-05 18:19:02.000@ then - return -1 -endi - -sql select * from stb1 where ts > '2021-05-05 18:19:23.000' or ts < '2021-05-05 18:19:25'; -if $rows != 29 then - return -1 -endi -if $data00 != @21-05-05 18:19:00.000@ then - return -1 -endi -if $data10 != @21-05-05 18:19:01.000@ then - return -1 -endi -if $data20 != @21-05-05 18:19:02.000@ then - return -1 -endi - -sql select * from stb1 where (ts > '2021-05-05 18:19:23.000' or ts < '2021-05-05 18:19:25') and (ts > '2021-05-05 18:19:23.000' and ts < '2021-05-05 18:19:26'); -if $rows != 2 then +if $data30 != @21-05-05 18:19:07.000@ then return -1 endi -if $data00 != @21-05-05 18:19:24.000@ then +if $data40 != @21-05-05 18:19:10.000@ then return -1 endi -if $data10 != @21-05-05 18:19:25.000@ then +if $data50 != @21-05-05 18:19:28.000@ then return -1 endi -sql select * from stb1 where (ts > '2021-05-05 18:19:23.000' or ts < '2021-05-05 18:19:25') and (ts > '2021-05-05 18:19:23.000' or ts > '2021-05-05 18:19:26'); +sql select * from stb1 where c8 match '^1.*'; if $rows != 5 then return -1 endi -if $data00 != @21-05-05 18:19:24.000@ then - return -1 -endi -if $data10 != @21-05-05 18:19:25.000@ then - return -1 -endi -if $data20 != @21-05-05 18:19:26.000@ then - return -1 -endi -if $data30 != @21-05-05 18:19:27.000@ then - return -1 -endi -if $data40 != @21-05-05 18:19:28.000@ then - return -1 -endi - - -sql select * from stb2 where ts2 in ('2021-05-05 18:28:03','2021-05-05 18:28:05','2021-05-05 18:28:08'); -if $rows != 3 then - return -1 -endi -if $data00 != @21-05-05 18:19:02.000@ then +if $data00 != @21-05-05 18:19:00.000@ then return -1 endi if $data10 != @21-05-05 18:19:04.000@ then return -1 endi -if $data20 != @21-05-05 18:19:07.000@ then - return -1 -endi - -sql select * from stb2 where t3 in ('2021-05-05 18:38:38','2021-05-05 18:38:28','2021-05-05 18:38:08') and ts2 in ('2021-05-05 18:28:04','2021-05-05 18:28:04','2021-05-05 18:28:03'); -if $rows != 2 then - return -1 -endi -if $data00 != @21-05-05 18:19:02.000@ then - return -1 -endi -if $data10 != @21-05-05 18:19:03.000@ then - return -1 -endi - -sql select a.ts,b.ts,a.c1,b.u1,b.u2 from (select * from stb1) a, (select * from stb2) b where a.ts=b.ts and (a.ts < '2021-05-05 18:19:03.000' or a.ts >= '2021-05-05 18:19:13.000') and (b.ts >= '2021-05-05 18:19:01.000' and b.ts <= '2021-05-05 18:19:14.000'); -if $rows != 4 then - return -1 -endi -if $data00 != @21-05-05 18:19:01.000@ then - return -1 -endi -if $data10 != @21-05-05 18:19:02.000@ then - return -1 -endi -if $data20 != @21-05-05 18:19:13.000@ then - return -1 -endi -if $data30 != @21-05-05 18:19:14.000@ then - return -1 -endi - -sql select a.ts,c.ts,b.c1,c.u1,c.u2 from (select * from stb1) a, (select * from stb1) b, (select * from stb2) c where a.ts=b.ts and b.ts=c.ts and a.ts <= '2021-05-05 18:19:12.000' and b.ts >= '2021-05-05 18:19:06.000' and c.ts >= '2021-05-05 18:19:08.000' and c.ts <= '2021-05-05 18:19:11.000' and a.ts != '2021-05-05 18:19:10.000'; -if $rows != 3 then - return -1 -endi -if $data00 != @21-05-05 18:19:08.000@ then - return -1 -endi -if $data10 != @21-05-05 18:19:09.000@ then - return -1 -endi -if $data20 != @21-05-05 18:19:11.000@ then - return -1 -endi - -sql select ts,c1,c2,c8 from (select * from stb1) where (ts <= '2021-05-05 18:19:06.000' or ts >= '2021-05-05 18:19:13.000') and (ts >= '2021-05-05 18:19:02.000' and ts <= '2021-05-05 18:19:14.000') and ts != '2021-05-05 18:19:04.000'; -if $rows != 6 then - return -1 -endi -if $data00 != @21-05-05 18:19:02.000@ then - return -1 -endi -if $data10 != @21-05-05 18:19:03.000@ then - return -1 -endi if $data20 != @21-05-05 18:19:05.000@ then return -1 endi if $data30 != @21-05-05 18:19:06.000@ then return -1 endi -if $data40 != @21-05-05 18:19:13.000@ then - return -1 -endi -if $data50 != @21-05-05 18:19:14.000@ then +if $data40 != @21-05-05 18:19:07.000@ then return -1 endi -sql select ts,c1,c2,c8 from (select * from stb1) where (ts <= '2021-05-05 18:19:03.000' or ts > '2021-05-05 18:19:26.000' or ts = '2021-05-05 18:19:26.000') and ts != '2021-05-05 18:19:03.000' and ts != '2021-05-05 18:19:26.000'; -if $rows != 5 then +sql select * from stb1 where c8 match '1.*'; +if $rows != 10 then return -1 endi if $data00 != @21-05-05 18:19:00.000@ then return -1 endi -if $data10 != @21-05-05 18:19:01.000@ then +if $data10 != @21-05-05 18:19:04.000@ then return -1 endi -if $data20 != @21-05-05 18:19:02.000@ then +if $data20 != @21-05-05 18:19:05.000@ then return -1 endi -if $data30 != @21-05-05 18:19:27.000@ then +if $data30 != @21-05-05 18:19:06.000@ then return -1 endi -if $data40 != @21-05-05 18:19:28.000@ then +if $data40 != @21-05-05 18:19:07.000@ then return -1 endi - -print "tbname test" -sql_error select * from stb1 where tbname like '%3' and tbname like '%4'; - -sql select * from stb1 where tbname like 'tb%'; -if $rows != 29 then +if $data50 != @21-05-05 18:19:08.000@ then return -1 endi - -sql select * from stb1 where tbname like '%2'; -if $rows != 4 then +if $data60 != @21-05-05 18:19:12.000@ then return -1 endi -if $data00 != @21-05-05 18:19:08.000@ then - return -1 -endi -if $data10 != @21-05-05 18:19:09.000@ then +if $data70 != @21-05-05 18:19:16.000@ then return -1 endi -if $data20 != @21-05-05 18:19:10.000@ then +if $data80 != @21-05-05 18:19:20.000@ then return -1 endi -if $data30 != @21-05-05 18:19:11.000@ then +if $data90 != @21-05-05 18:19:24.000@ then return -1 endi -print "tag test" -sql select * from stb1 where t1 in (1,2) and t1 in (2,3); +sql select * from stb1 where c8 match '1.+'; if $rows != 4 then return -1 endi -if $data00 != @21-05-05 18:19:08.000@ then - return -1 -endi -if $data10 != @21-05-05 18:19:09.000@ then - return -1 -endi -if $data20 != @21-05-05 18:19:10.000@ then - return -1 -endi -if $data30 != @21-05-05 18:19:11.000@ then - return -1 -endi - -sql select * from stb2 where t1 in (1,2) and t2 in (2) and t3 in ('2021-05-05 18:58:57.000'); -if $rows != 0 then - return -1 -endi - -print "join test" -sql_error select * from tb1, tb2_1 where tb1.ts=tb2_1.ts or tb1.ts =tb2_1.ts; -sql select tb1.ts from tb1, tb2_1 where tb1.ts=tb2_1.ts and tb1.ts > '2021-05-05 18:19:03.000' and tb2_1.ts < '2021-05-05 18:19:06.000'; -if $rows != 2 then - return -1 -endi if $data00 != @21-05-05 18:19:04.000@ then return -1 endi if $data10 != @21-05-05 18:19:05.000@ then return -1 endi -sql select tb1.ts,tb1.*,tb2_1.* from tb1, tb2_1 where tb1.ts=tb2_1.ts and tb1.ts > '2021-05-05 18:19:03.000' and tb2_1.u1 < 5; -if $rows != 2 then - return -1 -endi -if $data00 != @21-05-05 18:19:04.000@ then - return -1 -endi -if $data10 != @21-05-05 18:19:06.000@ then - return -1 -endi - -sql select tb1.ts,tb1.*,tb2_1.* from tb1, tb2_1 where tb1.ts=tb2_1.ts and tb1.ts >= '2021-05-05 18:19:03.000' and tb1.c7=false and tb2_1.u3>4; -if $rows != 2 then - return -1 -endi -if $data00 != @21-05-05 18:19:03.000@ then - return -1 -endi -if $data10 != @21-05-05 18:19:07.000@ then - return -1 -endi - -sql select stb1.ts,stb1.c1,stb1.t1,stb2.ts,stb2.u1,stb2.t4 from stb1, stb2 where stb1.ts=stb2.ts and stb1.t1 = stb2.t4; -if $rows != 9 then - return -1 -endi -if $data00 != @21-05-05 18:19:00.000@ then - return -1 -endi -if $data10 != @21-05-05 18:19:01.000@ then - return -1 -endi -if $data20 != @21-05-05 18:19:02.000@ then - return -1 -endi -if $data30 != @21-05-05 18:19:03.000@ then - return -1 -endi -if $data40 != @21-05-05 18:19:04.000@ then - return -1 -endi -if $data50 != @21-05-05 18:19:05.000@ then - return -1 -endi -if $data60 != @21-05-05 18:19:06.000@ then - return -1 -endi -if $data70 != @21-05-05 18:19:07.000@ then - return -1 -endi -if $data80 != @21-05-05 18:19:11.000@ then - return -1 -endi - -sql select stb1.ts,stb1.c1,stb1.t1,stb2.ts,stb2.u1,stb2.t4 from stb1, stb2 where stb1.ts=stb2.ts and stb1.t1 = stb2.t4 and stb1.c1 > 2 and stb2.u1 <=4; -if $rows != 3 then - return -1 -endi -if $data00 != @21-05-05 18:19:02.000@ then - return -1 -endi -if $data10 != @21-05-05 18:19:04.000@ then - return -1 -endi if $data20 != @21-05-05 18:19:06.000@ then return -1 endi - -print "column&ts test" -sql_error select count(*) from stb1 where ts > 0 or c1 > 0; -sql select * from stb1 where ts > '2021-05-05 18:19:03.000' and ts < '2021-05-05 18:19:20.000' and (c1 > 23 or c1 < 14) and c7 in (true) and c8 like '%2'; -if $rows != 3 then - return -1 -endi -if $data00 != @21-05-05 18:19:05.000@ then - return -1 -endi -if $data10 != @21-05-05 18:19:13.000@ then - return -1 -endi -if $data20 != @21-05-05 18:19:17.000@ then - return -1 -endi - -print "column&tbname test" -sql_error select count(*) from stb1 where tbname like 'tb%' or c1 > 0; -sql select * from stb1 where tbname like '%3' and c6 < 34 and c5 != 33 and c4 > 31; -if $rows != 1 then - return -1 -endi -if $data00 != @21-05-05 18:19:13.000@ then +if $data30 != @21-05-05 18:19:07.000@ then return -1 endi -print "column&tag test" -sql_error select * from stb1 where t1 > 0 or c1 > 0 -sql_error select * from stb1 where c1 > 0 or t1 > 0 -sql_error select * from stb1 where t1 > 0 or c1 > 0 or t1 > 1 -sql_error select * from stb1 where c1 > 0 or t1 > 0 or c1 > 1 -sql_error select * from stb1 where t1 > 0 and c1 > 0 or t1 > 1 -sql_error select * from stb1 where c1 > 0 or t1 > 0 and c1 > 1 -sql_error select * from stb1 where c1 > 0 or t1 > 0 and c1 > 1 -sql_error select * from stb1 where t1 > 0 or t1 > 0 and c1 > 1 -sql_error select * from stb1 where (c1 > 0 and t1 > 0 ) or (t1 > 1 and c1 > 3) -sql_error select * from stb1 where (c1 > 0 and t1 > 0 ) or t1 > 1 -sql_error select a.ts,b.ts,a.c1,b.u1,b.u2 from (select * from stb1) a, (select * from stb2) b where a.ts=b.ts and a.t1=b.t1; - -sql select * from stb1 where c1 < 63 and t1 > 5 -if $rows != 2 then - return -1 -endi -if $data00 != @21-05-05 18:19:24.000@ then - return -1 -endi -if $data10 != @21-05-05 18:19:25.000@ then - return -1 -endi -sql select * from stb1 where t1 > 3 and t1 < 5 and c1 != 42 and c1 != 44; -if $rows != 2 then - return -1 -endi -if $data00 != @21-05-05 18:19:16.000@ then - return -1 -endi -if $data10 != @21-05-05 18:19:18.000@ then - return -1 -endi -sql select * from stb1 where t1 > 1 and c1 > 21 and t1 < 3 and c1 < 24 and t1 != 3 and c1 != 23; -if $rows != 1 then - return -1 -endi -if $data00 != @21-05-05 18:19:09.000@ then - return -1 -endi -sql select * from stb1 where c1 > 1 and (t1 > 3 or t1 < 2) and (c2 > 2 and c2 < 62 and t1 != 4) and (t1 > 2 and t1 < 6) and c7 = true and c8 like '%2'; -if $rows != 1 then - return -1 -endi -if $data00 != @21-05-05 18:19:21.000@ then +sql select * from stb1 where c8 nmatch '^1.*'; +if $rows != 23 then return -1 endi -sql select * from stb1 where c1!=31 and c1 !=32 and c1 <> 63 and c1 <>1 and c1 <> 21 and c1 <> 2 and c7 <> true and c8 <> '3' and c9 <> '4' and c2<>13 and c3 <> 23 and c4 <> 33 and c5 <> 34 and c6 <> 43 and c2 <> 53 and t1 <> 5 and t2 <>4; +sql select ts,c8 from stb1 where c8 nmatch '[2345]+'; if $rows != 3 then return -1 endi -if $data00 != @21-05-05 18:19:07.000@ then - return -1 -endi -if $data10 != @21-05-05 18:19:11.000@ then - return -1 -endi -if $data20 != @21-05-05 18:19:27.000@ then - return -1 -endi - - -print "column&join test" -sql_error select tb1.ts,tb1.c1,tb2_1.u1 from tb1, tb2_1 where tb1.ts=tb2_1.ts or tb1.c1 > 0; - - -print "ts&tbname test" -sql_error select count(*) from stb1 where ts > 0 or tbname like 'tb%'; - -print "ts&tag test" -sql_error select count(*) from stb1 where ts > 0 or t1 > 0; - -sql select * from stb2 where t1!=1 and t2=2 and t3 in ('2021-05-05 18:58:58.000') and ts < '2021-05-05 18:19:13.000'; -if $rows != 2 then - return -1 -endi -if $data00 != @21-05-05 18:19:11.000@ then - return -1 -endi -if $data10 != @21-05-05 18:19:12.000@ then - return -1 -endi - -print "ts&join test" -sql_error select tb1.ts,tb1.c1,tb2_1.u1 from tb1, tb2_1 where tb1.ts=tb2_1.ts or tb1.ts > 0; -sql select tb1.ts,tb1.c1,tb2_1.u1 from tb1, tb2_1 where tb1.ts=tb2_1.ts and (tb1.ts > '2021-05-05 18:19:05.000' or tb1.ts < '2021-05-05 18:19:03.000' or tb1.ts > 0); - - -print "tbname&tag test" -sql select * from stb1 where tbname like 'tb%' and (t1=1 or t2=2 or t3=3) and t1 > 2; -if $rows != 4 then - return -1 -endi -if $data00 != @21-05-05 18:19:12.000@ then - return -1 -endi -if $data10 != @21-05-05 18:19:13.000@ then - return -1 -endi -if $data20 != @21-05-05 18:19:14.000@ then - return -1 -endi -if $data30 != @21-05-05 18:19:15.000@ then - return -1 -endi - -print "tbname&join test" - -print "tag&join test" - - - - - -print "column&ts&tbname test" -sql_error select count(*) from stb1 where tbname like 'tb%' or c1 > 0 or ts > 0; - -print "column&ts&tag test" -sql_error select count(*) from stb1 where t1 > 0 or c1 > 0 or ts > 0; -sql_error select count(*) from stb1 where c1 > 0 or t1 > 0 or ts > 0; - -sql select * from stb1 where (t1 > 0 or t1 > 2 ) and ts > '2021-05-05 18:19:10.000' and (c1 > 1 or c1 > 3) and (c6 > 40 or c6 < 30) and (c8 like '%3' or c8 like '_4') and (c9 like '1%' or c9 like '6%' or (c9 like '%3' and c9 != '23')) and ts > '2021-05-05 18:19:22.000' and ts <= '2021-05-05 18:19:26.000'; -if $rows != 1 then - return -1 -endi -if $data00 != @21-05-05 18:19:26.000@ then +if $data00 != @21-05-05 18:19:00.000@ then return -1 endi -sql select * from stb1 where ts > '2021-05-05 18:19:00.000' and c1 > 2 and t1 != 1 and c2 >= 23 and t2 >= 3 and c3 < 63 and c7 = false and t3 > 3 and t3 < 6 and c8 like '4%' and ts < '2021-05-05 18:19:19.000' and c2 > 40 and c3 != 42; -if $rows != 1 then +if $data10 != @21-05-05 18:19:04.000@ then return -1 endi -if $data00 != @21-05-05 18:19:18.000@ then +if $data20 != @21-05-05 18:19:24.000@ then return -1 endi -print "column&ts&join test" - -print "column&tbname&tag test" -sql_error select count(*) from stb1 where c1 > 0 or tbname in ('tb1') or t1 > 0; - -print "column&tbname&join test" -print "column&tag&join test" -print "ts&tbname&tag test" -sql_error select count(*) from stb1 where ts > 0 or tbname in ('tb1') or t1 > 0; - -print "ts&tbname&join test" -print "ts&tag&join test" -print "tbname&tag&join test" - - - - -print "column&ts&tbname&tag test" -sql_error select * from stb1 where (tbname like 'tb%' or ts > '2021-05-05 18:19:01.000') and (t1 > 5 or t1 < 4) and c1 > 0; -sql_error select * from stb1 where (ts > '2021-05-05 18:19:01.000') and (ts > '2021-05-05 18:19:02.000' or t1 > 3) and (t1 > 5 or t1 < 4) and c1 > 0; -sql_error select ts,c1,c7 from stb1 where ts > '2021-05-05 18:19:03.000' or ts > '2021-05-05 18:19:20.000' and col > 0 and t1 > 0; - - -print "column&ts&tbname&join test" -print "column&ts&tag&join test" -print "column&tbname&tag&join test" -print "ts&tbname&tag&join test" - - -print "column&ts&tbname&tag&join test" -#system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/general/parser/condition_query2.sim b/tests/script/general/parser/condition_query2.sim new file mode 100644 index 0000000000000000000000000000000000000000..513bb9900ba2e40a165cc6d76cdaf88a46fb00e5 --- /dev/null +++ b/tests/script/general/parser/condition_query2.sim @@ -0,0 +1,2345 @@ +sql use cdb; + +print "ts test" +sql_error select ts,c1,c7 from stb1 where ts != '2021-05-05 18:19:27' +sql_error select ts,c1,c7 from stb1 where ts > '2021-05-05 18:19:03.000' or ts < '2021-05-05 18:19:02.000'; +sql_error select ts,c1,c7 from stb1 where ts > '2021-05-05 18:19:03.000' and ts > '2021-05-05 18:19:20.000' and ts != '2021-05-05 18:19:22.000'; +sql_error select * from stb1 where ts2 like '2021-05-05%'; +sql_error select ts,c1,c2 from stb1 where (ts > '2021-05-05 18:19:25.000' or ts < '2021-05-05 18:19:05.000') and ts > '2021-05-05 18:19:01.000' and ts < '2021-05-05 18:19:27.000'; +sql_error select ts,c1,c2 from stb1 where (ts > '2021-05-05 18:19:20.000' or ts < '2021-05-05 18:19:05.000') and ts != '2021-05-05 18:19:25.000'; +sql_error select ts,c1,c2 from stb1 where ((ts >= '2021-05-05 18:19:05.000' and ts <= '2021-05-05 18:19:10.000') or (ts >= '2021-05-05 18:19:15.000' and ts <= '2021-05-05 18:19:20.000') or (ts >= '2021-05-05 18:19:11.000' and ts <= '2021-05-05 18:19:14.000')); +sql_error select ts,c1,c2 from stb1 where ts >= '2021-05-05 18:19:25.000' or ts < '2021-05-05 18:19:24.000'; +sql_error select tbname,ts,t1,t2 from stb5 where t1 > ''; + +sql select * from stb1 where ts is null; +if $rows != 0 then + return -1 +endi +sql select * from stb1 where ts is not null and ts is null; +if $rows != 0 then + return -1 +endi + +sql select ts,c1,c2 from stb1 where ts >= '2021-05-05 18:19:25.000' and ts < '2021-05-05 18:19:10.000'; +if $rows != 0 then + return -1 +endi + +sql select * from stb1 where ts > '2021-05-05 18:19:03.000' and ts < '2021-05-05 18:19:02'; +if $rows != 0 then + return -1 +endi + +sql select * from stb1 where ts is not null; +if $rows != 29 then + return -1 +endi + +sql select * from stb1 where ts is not null or ts is null; +if $rows != 29 then + return -1 +endi + +sql select ts,c1,c2 from stb1 where ts >= '2021-05-05 18:19:25.000' or ts < '2021-05-05 18:19:25.000'; +if $rows != 29 then + return -1 +endi + +sql select ts,c1,c2 from stb1 where ts >= '2021-05-05 18:19:25.000' and ts < '2021-05-05 18:19:26.000'; +if $rows != 1 then + return -1 +endi +if $data00 != @21-05-05 18:19:25.000@ then + return -1 +endi +sql select ts,c1,c2 from stb1 where ts >= '2021-05-05 18:19:25.000' or ts < '2021-05-05 18:19:28.000'; +if $rows != 29 then + return -1 +endi +sql select ts,c1,c2 from stb1 where ts >= '2021-05-05 18:19:25.000' or ts > '2021-05-05 18:19:27.000'; +if $rows != 4 then + return -1 +endi +if $data00 != @21-05-05 18:19:25.000@ then + return -1 +endi +if $data10 != @21-05-05 18:19:26.000@ then + return -1 +endi +if $data20 != @21-05-05 18:19:27.000@ then + return -1 +endi +if $data30 != @21-05-05 18:19:28.000@ then + return -1 +endi + +sql select ts,c1,c2 from stb1 where ts > '2021-05-05 18:19:20.000' or ts < '2021-05-05 18:19:05.000' or ts != '2021-05-05 18:19:25.000'; +if $rows != 29 then + return -1 +endi + +sql select ts,c1,c2 from stb1 where ts >= '2021-05-05 18:19:25.000' or ts <> '2021-05-05 18:19:25.000'; +if $rows != 29 then + return -1 +endi + +sql select ts,c1,c2 from stb1 where ((ts >= '2021-05-05 18:19:05.000' and ts <= '2021-05-05 18:19:10.999') or (ts >= '2021-05-05 18:19:15.000' and ts <= '2021-05-05 18:19:20.000') or (ts >= '2021-05-05 18:19:11.000' and ts <= '2021-05-05 18:19:14.999')); +if $rows != 16 then + return -1 +endi +if $data00 != @21-05-05 18:19:05.000@ then + return -1 +endi + +sql select ts,c1,c2 from stb1 where (ts >= '2021-05-05 18:19:05.000' and ts <= '2021-05-05 18:19:10.000') or (ts >= '2021-05-05 18:19:12.000' and ts <= '2021-05-05 18:19:14.000') or (ts >= '2021-05-05 18:19:08.000' and ts <= '2021-05-05 18:19:17.000'); +if $rows != 13 then + return -1 +endi +if $data00 != @21-05-05 18:19:05.000@ then + return -1 +endi + +sql select ts,c1,c2 from stb1 where (ts >= '2021-05-05 18:19:05.000' and ts <= '2021-05-05 18:19:10.000') or (ts >= '2021-05-05 18:19:02.000' and ts <= '2021-05-05 18:19:03.000') or (ts >= '2021-05-05 18:19:01.000' and ts <= '2021-05-05 18:19:08.000'); +if $rows != 10 then + return -1 +endi +if $data00 != @21-05-05 18:19:01.000@ then + return -1 +endi + +sql select ts,c1,c2 from stb1 where ((ts >= '2021-05-05 18:19:08.000' and ts <= '2021-05-05 18:19:10.000') or (ts >= '2021-05-05 18:19:02.000' and ts <= '2021-05-05 18:19:03.000') or (ts >= '2021-05-05 18:19:05.000' and ts <= '2021-05-05 18:19:06.000') or (ts >= '2021-05-05 18:19:03.000' and ts <= '2021-05-05 18:19:12.000')) and (ts >= '2021-05-05 18:19:10.000'); +if $rows != 3 then + return -1 +endi +if $data00 != @21-05-05 18:19:10.000@ then + return -1 +endi +if $data10 != @21-05-05 18:19:11.000@ then + return -1 +endi +if $data20 != @21-05-05 18:19:12.000@ then + return -1 +endi + +sql select ts,c1,c7 from stb1 where ts > '2021-05-05 18:19:25.000' and ts != '2021-05-05 18:19:18'; +if $rows != 3 then + return -1 +endi +if $data00 != @21-05-05 18:19:26.000@ then + return -1 +endi +if $data10 != @21-05-05 18:19:27.000@ then + return -1 +endi +if $data20 != @21-05-05 18:19:28.000@ then + return -1 +endi + + +sql select * from stb1 where ts > '2021-05-05 18:19:03.000' and ts > '2021-05-05 18:19:25'; +if $rows != 3 then + return -1 +endi +if $data00 != @21-05-05 18:19:26.000@ then + return -1 +endi +if $data10 != @21-05-05 18:19:27.000@ then + return -1 +endi +if $data20 != @21-05-05 18:19:28.000@ then + return -1 +endi + +sql select * from stb1 where ts < '2021-05-05 18:19:03.000' and ts < '2021-05-05 18:19:25'; +if $rows != 3 then + return -1 +endi +if $data00 != @21-05-05 18:19:00.000@ then + return -1 +endi +if $data10 != @21-05-05 18:19:01.000@ then + return -1 +endi +if $data20 != @21-05-05 18:19:02.000@ then + return -1 +endi + +sql select * from stb1 where ts > '2021-05-05 18:19:23.000' and ts < '2021-05-05 18:19:25'; +if $rows != 1 then + return -1 +endi +if $data00 != @21-05-05 18:19:24.000@ then + return -1 +endi + +sql select * from stb1 where ts > '2021-05-05 18:19:03.000' or ts > '2021-05-05 18:19:25'; +if $rows != 25 then + return -1 +endi +if $data00 != @21-05-05 18:19:04.000@ then + return -1 +endi +if $data10 != @21-05-05 18:19:05.000@ then + return -1 +endi +if $data20 != @21-05-05 18:19:06.000@ then + return -1 +endi + +sql select * from stb1 where ts < '2021-05-05 18:19:03.000' or ts < '2021-05-05 18:19:25'; +if $rows != 25 then + return -1 +endi +if $data00 != @21-05-05 18:19:00.000@ then + return -1 +endi +if $data10 != @21-05-05 18:19:01.000@ then + return -1 +endi +if $data20 != @21-05-05 18:19:02.000@ then + return -1 +endi + +sql select * from stb1 where ts > '2021-05-05 18:19:23.000' or ts < '2021-05-05 18:19:25'; +if $rows != 29 then + return -1 +endi +if $data00 != @21-05-05 18:19:00.000@ then + return -1 +endi +if $data10 != @21-05-05 18:19:01.000@ then + return -1 +endi +if $data20 != @21-05-05 18:19:02.000@ then + return -1 +endi + +sql select * from stb1 where (ts > '2021-05-05 18:19:23.000' or ts < '2021-05-05 18:19:25') and (ts > '2021-05-05 18:19:23.000' and ts < '2021-05-05 18:19:26'); +if $rows != 2 then + return -1 +endi +if $data00 != @21-05-05 18:19:24.000@ then + return -1 +endi +if $data10 != @21-05-05 18:19:25.000@ then + return -1 +endi + +sql select * from stb1 where (ts > '2021-05-05 18:19:23.000' or ts < '2021-05-05 18:19:25') and (ts > '2021-05-05 18:19:23.000' or ts > '2021-05-05 18:19:26'); +if $rows != 5 then + return -1 +endi +if $data00 != @21-05-05 18:19:24.000@ then + return -1 +endi +if $data10 != @21-05-05 18:19:25.000@ then + return -1 +endi +if $data20 != @21-05-05 18:19:26.000@ then + return -1 +endi +if $data30 != @21-05-05 18:19:27.000@ then + return -1 +endi +if $data40 != @21-05-05 18:19:28.000@ then + return -1 +endi + + +sql select * from stb2 where ts2 in ('2021-05-05 18:28:03','2021-05-05 18:28:05','2021-05-05 18:28:08'); +if $rows != 3 then + return -1 +endi +if $data00 != @21-05-05 18:19:02.000@ then + return -1 +endi +if $data10 != @21-05-05 18:19:04.000@ then + return -1 +endi +if $data20 != @21-05-05 18:19:07.000@ then + return -1 +endi + +sql select * from stb2 where t3 in ('2021-05-05 18:38:38','2021-05-05 18:38:28','2021-05-05 18:38:08') and ts2 in ('2021-05-05 18:28:04','2021-05-05 18:28:04','2021-05-05 18:28:03'); +if $rows != 2 then + return -1 +endi +if $data00 != @21-05-05 18:19:02.000@ then + return -1 +endi +if $data10 != @21-05-05 18:19:03.000@ then + return -1 +endi + +sql select a.ts,b.ts,a.c1,b.u1,b.u2 from (select * from stb1) a, (select * from stb2) b where a.ts=b.ts and (a.ts < '2021-05-05 18:19:03.000' or a.ts >= '2021-05-05 18:19:13.000') and (b.ts >= '2021-05-05 18:19:01.000' and b.ts <= '2021-05-05 18:19:14.000'); +if $rows != 4 then + return -1 +endi +if $data00 != @21-05-05 18:19:01.000@ then + return -1 +endi +if $data10 != @21-05-05 18:19:02.000@ then + return -1 +endi +if $data20 != @21-05-05 18:19:13.000@ then + return -1 +endi +if $data30 != @21-05-05 18:19:14.000@ then + return -1 +endi + +sql select a.ts,c.ts,b.c1,c.u1,c.u2 from (select * from stb1) a, (select * from stb1) b, (select * from stb2) c where a.ts=b.ts and b.ts=c.ts and a.ts <= '2021-05-05 18:19:12.000' and b.ts >= '2021-05-05 18:19:06.000' and c.ts >= '2021-05-05 18:19:08.000' and c.ts <= '2021-05-05 18:19:11.000' and a.ts != '2021-05-05 18:19:10.000'; +if $rows != 3 then + return -1 +endi +if $data00 != @21-05-05 18:19:08.000@ then + return -1 +endi +if $data10 != @21-05-05 18:19:09.000@ then + return -1 +endi +if $data20 != @21-05-05 18:19:11.000@ then + return -1 +endi + +sql select ts,c1,c2,c8 from (select * from stb1) where (ts <= '2021-05-05 18:19:06.000' or ts >= '2021-05-05 18:19:13.000') and (ts >= '2021-05-05 18:19:02.000' and ts <= '2021-05-05 18:19:14.000') and ts != '2021-05-05 18:19:04.000'; +if $rows != 6 then + return -1 +endi +if $data00 != @21-05-05 18:19:02.000@ then + return -1 +endi +if $data10 != @21-05-05 18:19:03.000@ then + return -1 +endi +if $data20 != @21-05-05 18:19:05.000@ then + return -1 +endi +if $data30 != @21-05-05 18:19:06.000@ then + return -1 +endi +if $data40 != @21-05-05 18:19:13.000@ then + return -1 +endi +if $data50 != @21-05-05 18:19:14.000@ then + return -1 +endi + +sql select ts,c1,c2,c8 from (select * from stb1) where (ts <= '2021-05-05 18:19:03.000' or ts > '2021-05-05 18:19:26.000' or ts = '2021-05-05 18:19:26.000') and ts != '2021-05-05 18:19:03.000' and ts != '2021-05-05 18:19:26.000'; +if $rows != 5 then + return -1 +endi +if $data00 != @21-05-05 18:19:00.000@ then + return -1 +endi +if $data10 != @21-05-05 18:19:01.000@ then + return -1 +endi +if $data20 != @21-05-05 18:19:02.000@ then + return -1 +endi +if $data30 != @21-05-05 18:19:27.000@ then + return -1 +endi +if $data40 != @21-05-05 18:19:28.000@ then + return -1 +endi + +print "tbname test" +sql select tbname from stb1; +if $rows != 6 then + return -1 +endi + +sql select * from stb1 where tbname like '%3' and tbname like '%4'; +if $rows != 0 then + return -1 +endi + +sql select * from stb1 where tbname like 'tb%'; +if $rows != 29 then + return -1 +endi + +sql select * from stb1 where tbname like '%2'; +if $rows != 4 then + return -1 +endi +if $data00 != @21-05-05 18:19:08.000@ then + return -1 +endi +if $data10 != @21-05-05 18:19:09.000@ then + return -1 +endi +if $data20 != @21-05-05 18:19:10.000@ then + return -1 +endi +if $data30 != @21-05-05 18:19:11.000@ then + return -1 +endi + +sql select tbname from stb1 where tbname = 'tb1'; +if $rows != 1 then + return -1 +endi +if $data00 != tb1 then + return -1 +endi + +sql select tbname from stb1 where tbname = 'tb1' or tbname = 'tb3'; +if $rows != 2 then + return -1 +endi +if $data00 != tb1 then + return -1 +endi +if $data10 != tb3 then + return -1 +endi + +sql select tbname from stb1 where tbname <> 'tb1'; +if $rows != 5 then + return -1 +endi +if $data00 != tb2 then + return -1 +endi +if $data10 != tb3 then + return -1 +endi +if $data20 != tb4 then + return -1 +endi +if $data30 != tb5 then + return -1 +endi +if $data40 != tb6 then + return -1 +endi + +sql select tbname from stb1 where tbname <> 'xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx'; +if $rows != 6 then + return -1 +endi +if $data00 != tb1 then + return -1 +endi +if $data10 != tb2 then + return -1 +endi +if $data20 != tb3 then + return -1 +endi +if $data30 != tb4 then + return -1 +endi +if $data40 != tb5 then + return -1 +endi +if $data50 != tb6 then + return -1 +endi + +sql select tbname from stb1 where tbname > 'tba'; +if $rows != 0 then + return -1 +endi + +sql select tbname from stb1 where tbname > 'tb2' and tbname <= 'tb5'; +if $rows != 3 then + return -1 +endi +if $data00 != tb3 then + return -1 +endi +if $data10 != tb4 then + return -1 +endi +if $data20 != tb5 then + return -1 +endi + +sql select tbname from stb1 where tbname >= 'tb5' or tbname <= 'tb2'; +if $rows != 4 then + return -1 +endi +if $data00 != tb1 then + return -1 +endi +if $data10 != tb2 then + return -1 +endi +if $data20 != tb5 then + return -1 +endi +if $data30 != tb6 then + return -1 +endi + +sql select tbname from stb1 where tbname is null; +if $rows != 0 then + return -1 +endi + + +sql select tbname from stb1 where tbname is not null; +if $rows != 6 then + return -1 +endi +if $data00 != tb1 then + return -1 +endi +if $data10 != tb2 then + return -1 +endi +if $data20 != tb3 then + return -1 +endi +if $data30 != tb4 then + return -1 +endi +if $data40 != tb5 then + return -1 +endi +if $data50 != tb6 then + return -1 +endi + +sql select tbname from stb1 where tbname in ('tb2','tb6'); +if $rows != 2 then + return -1 +endi +if $data00 != tb2 then + return -1 +endi +if $data10 != tb6 then + return -1 +endi + +sql select tbname from stb1 where tbname is not null and (tbname in ('tb2','tb6') or tbname like '%3'); +if $rows != 3 then + return -1 +endi +if $data00 != tb2 then + return -1 +endi +if $data10 != tb3 then + return -1 +endi +if $data20 != tb6 then + return -1 +endi + +sql select tbname from stb1 where (tbname like '%5' and tbname like 'tb%'); +if $rows != 1 then + return -1 +endi +if $data00 != tb5 then + return -1 +endi + +sql select * from stb1 where tbname = 'tb5' and tbname <> 'tb5'; +if $rows != 0 then + return -1 +endi + +sql select * from stb1 where tbname = 'tb5' and tbname <> 'tb4'; +if $rows != 4 then + return -1 +endi +if $data00 != @21-05-05 18:19:20.000@ then + return -1 +endi +if $data10 != @21-05-05 18:19:21.000@ then + return -1 +endi +if $data20 != @21-05-05 18:19:22.000@ then + return -1 +endi +if $data30 != @21-05-05 18:19:23.000@ then + return -1 +endi + +sql select *,tbname from stb1 where tbname between 'tb2' and 'tb3'; +if $rows != 8 then + return -1 +endi +if $data00 != @21-05-05 18:19:08.000@ then + return -1 +endi +if $data10 != @21-05-05 18:19:09.000@ then + return -1 +endi +if $data20 != @21-05-05 18:19:10.000@ then + return -1 +endi +if $data30 != @21-05-05 18:19:11.000@ then + return -1 +endi +if $data40 != @21-05-05 18:19:12.000@ then + return -1 +endi +if $data50 != @21-05-05 18:19:13.000@ then + return -1 +endi +if $data60 != @21-05-05 18:19:14.000@ then + return -1 +endi +if $data70 != @21-05-05 18:19:15.000@ then + return -1 +endi + +sql select ts,c1,t9,t10,tbname from stb5 where tbname match '^3'; +if $rows != 0 then + return -1 +endi + +sql select ts,c1,t9,t10,tbname from stb5 where tbname match 'tb.?_[34]'; +if $rows != 8 then + return -1 +endi +if $data00 != @21-05-05 18:19:12.000@ then + return -1 +endi +if $data10 != @21-05-05 18:19:13.000@ then + return -1 +endi +if $data20 != @21-05-05 18:19:14.000@ then + return -1 +endi +if $data30 != @21-05-05 18:19:15.000@ then + return -1 +endi +if $data40 != @21-05-05 18:19:16.000@ then + return -1 +endi +if $data50 != @21-05-05 18:19:17.000@ then + return -1 +endi +if $data60 != @21-05-05 18:19:18.000@ then + return -1 +endi +if $data70 != @21-05-05 18:19:19.000@ then + return -1 +endi + +sql select ts,c1,t9,t10,tbname from stb5 where tbname nmatch '[123467]+'; +if $rows != 9 then + return -1 +endi +if $data00 != @21-05-05 18:19:20.000@ then + return -1 +endi +if $data10 != @21-05-05 18:19:21.000@ then + return -1 +endi +if $data20 != @21-05-05 18:19:22.000@ then + return -1 +endi +if $data30 != @21-05-05 18:19:23.000@ then + return -1 +endi +if $data40 != @21-05-05 18:19:34.000@ then + return -1 +endi +if $data50 != @21-05-05 18:19:35.000@ then + return -1 +endi +if $data60 != @21-05-05 18:19:36.000@ then + return -1 +endi +if $data70 != @21-05-05 18:19:37.000@ then + return -1 +endi +if $data80 != @21-05-05 18:19:38.000@ then + return -1 +endi + +sql select ts,c1,t9,t10,tbname from stb5 where tbname in ('tb5_1', 'TB5_2'); +if $rows != 12 then + return -1 +endi + +sql select ts,c1,t9,t10,tbname from stb5 where tbname in ('tb5_1', 'TB5_2') or tbname in ('tb5_3'); +if $rows != 16 then + return -1 +endi + +sql select ts,c1,t9,t10,tbname from stb5 where tbname in ('tb5_1', 'TB5_2') and tbname in ('tb5_2'); +if $rows != 4 then + return -1 +endi +if $data00 != @21-05-05 18:19:08.000@ then + return -1 +endi +if $data10 != @21-05-05 18:19:09.000@ then + return -1 +endi +if $data20 != @21-05-05 18:19:10.000@ then + return -1 +endi +if $data30 != @21-05-05 18:19:11.000@ then + return -1 +endi +print "tag test" +sql_error select * from stb5 where t1 match '.*'; +sql_error select * from stb5 where t2 match '.*'; +sql_error select * from stb5 where t3 match '.*'; +sql_error select * from stb5 where t4 match '.*'; +sql_error select * from stb5 where t5 match '.*'; +sql_error select * from stb5 where t6 match '.*'; +sql_error select * from stb5 where t7 match '.*'; +sql_error select * from stb5 where t8 match '.*'; +sql_error select * from stb5 where t10 match '.*'; +sql_error select * from tb1 where t1 in (1,2) and t1 in (2,3); + +sql select * from stb1 where t1 in (1,2) and t1 in (2,3); +if $rows != 4 then + return -1 +endi +if $data00 != @21-05-05 18:19:08.000@ then + return -1 +endi +if $data10 != @21-05-05 18:19:09.000@ then + return -1 +endi +if $data20 != @21-05-05 18:19:10.000@ then + return -1 +endi +if $data30 != @21-05-05 18:19:11.000@ then + return -1 +endi + +sql select * from stb1 where t1 in (1,2) or t1 in (2,3); +if $rows != 16 then + return -1 +endi + +sql select * from stb2 where t1 in (1,2) and t2 in (2) and t3 in ('2021-05-05 18:58:57.000'); +if $rows != 0 then + return -1 +endi + +sql select *,t1 from stb5 where t1 > 0; +if $rows != 34 then + return -1 +endi + +sql select ts,c1,t1 from stb5 where ((t1 > 0 and t1 <= '2021-05-05 18:19:02.000') or (t1 >'2021-05-05 18:19:03.000' and t1 < '2021-05-05 18:19:06.000')) and t1 != '2021-05-05 18:19:04.000' and t1 is not null; +if $rows != 16 then + return -1 +endi + +sql select ts,c1,t1 from stb5 where ((t1 > 0 and t1 <= '2021-05-05 18:19:02.000') or (t1 >'2021-05-05 18:19:03.000' and t1 < '2021-05-05 18:19:06.000')) and t1 != '2021-05-05 18:19:04.000' and t1 is null; +if $rows != 0 then + return -1 +endi + +sql select ts,c1,t1 from stb5 where (((t1 > 0 and t1 <= '2021-05-05 18:19:02.000') or (t1 >'2021-05-05 18:19:03.000' and t1 < '2021-05-05 18:19:06.000')) and t1 != '2021-05-05 18:19:04.000') or t1 is null; +if $rows != 21 then + return -1 +endi + +sql select ts,c1,t2 from stb5 where t2 > 0 or t2 is null; +if $rows != 39 then + return -1 +endi + +sql select ts,c1,t2 from stb5 where (((t2 > 5 or t2 is null or t2 < 3) and t2 != 6) or t2 in (4, 3)) and t2 <= 3 and t2 >= 2; +if $rows != 8 then + return -1 +endi +if $data00 != @21-05-05 18:19:08.000@ then + return -1 +endi +if $data10 != @21-05-05 18:19:09.000@ then + return -1 +endi +if $data20 != @21-05-05 18:19:10.000@ then + return -1 +endi +if $data30 != @21-05-05 18:19:11.000@ then + return -1 +endi +if $data40 != @21-05-05 18:19:12.000@ then + return -1 +endi +if $data50 != @21-05-05 18:19:13.000@ then + return -1 +endi +if $data60 != @21-05-05 18:19:14.000@ then + return -1 +endi +if $data70 != @21-05-05 18:19:15.000@ then + return -1 +endi + +sql select ts,c1,t2 from stb5 where t2 > 5.5; +if $rows != 10 then + return -1 +endi +if $data00 != @21-05-05 18:19:24.000@ then + return -1 +endi +if $data10 != @21-05-05 18:19:25.000@ then + return -1 +endi +if $data20 != @21-05-05 18:19:26.000@ then + return -1 +endi +if $data30 != @21-05-05 18:19:27.000@ then + return -1 +endi +if $data40 != @21-05-05 18:19:28.000@ then + return -1 +endi +if $data50 != @21-05-05 18:19:29.000@ then + return -1 +endi +if $data60 != @21-05-05 18:19:30.000@ then + return -1 +endi +if $data70 != @21-05-05 18:19:31.000@ then + return -1 +endi +if $data80 != @21-05-05 18:19:32.000@ then + return -1 +endi +if $data90 != @21-05-05 18:19:33.000@ then + return -1 +endi + +sql select ts,c1,t2 from stb5 where t2 >= 5.5; +if $rows != 14 then + return -1 +endi + +sql select ts,c1,t3,t4,t5,t6,t7 from stb5 where t3 is null or t4 is null; +if $rows != 10 then + return -1 +endi +if $data00 != @21-05-05 18:19:29.000@ then + return -1 +endi +if $data10 != @21-05-05 18:19:30.000@ then + return -1 +endi +if $data20 != @21-05-05 18:19:31.000@ then + return -1 +endi +if $data30 != @21-05-05 18:19:32.000@ then + return -1 +endi +if $data40 != @21-05-05 18:19:33.000@ then + return -1 +endi +if $data50 != @21-05-05 18:19:34.000@ then + return -1 +endi +if $data60 != @21-05-05 18:19:35.000@ then + return -1 +endi +if $data70 != @21-05-05 18:19:36.000@ then + return -1 +endi +if $data80 != @21-05-05 18:19:37.000@ then + return -1 +endi +if $data90 != @21-05-05 18:19:38.000@ then + return -1 +endi + +sql select ts,c1,t3,t4,t5,t6,t7 from stb5 where t3 is null or t4 is null or t5 <= 1 or t6 in (2,3) or t7 < 5; +if $rows != 30 then + return -1 +endi + +sql select ts,c1,t8,t9 from stb5 where t8 = true; +if $rows != 21 then + return -1 +endi +sql select ts,c1,t8,t9 from stb5 where t8 <> true; +if $rows != 13 then + return -1 +endi + +sql select ts,c1,t8,t9 from stb5 where t8 = false; +if $rows != 13 then + return -1 +endi + +sql select ts,c1,t8,t9 from stb5 where t8 in (true, false); +if $rows != 34 then + return -1 +endi + +sql select ts,c1,t8,t9 from stb5 where t8 in (true, false) or t8 is null; +if $rows != 39 then + return -1 +endi + +sql select ts,c1,t8,t9 from stb5 where t8 in (true) and t8 is not null; +if $rows != 21 then + return -1 +endi + +sql select ts,c1,t9,t10 from stb5 where t9 > '12'; +if $rows != 34 then + return -1 +endi + +sql select ts,c1,t9,t10 from stb5 where t9 = '11'; +if $rows != 0 then + return -1 +endi + +sql select ts,c1,t9,t10 from stb5 where t9 <> '111111111'; +if $rows != 26 then + return -1 +endi + +sql select ts,c1,t9,t10 from stb5 where t9 = '111111111'; +if $rows != 8 then + return -1 +endi +if $data00 != @21-05-05 18:19:00.000@ then + return -1 +endi +if $data10 != @21-05-05 18:19:01.000@ then + return -1 +endi +if $data20 != @21-05-05 18:19:02.000@ then + return -1 +endi +if $data30 != @21-05-05 18:19:03.000@ then + return -1 +endi +if $data40 != @21-05-05 18:19:04.000@ then + return -1 +endi +if $data50 != @21-05-05 18:19:05.000@ then + return -1 +endi +if $data60 != @21-05-05 18:19:06.000@ then + return -1 +endi +if $data70 != @21-05-05 18:19:07.000@ then + return -1 +endi + +sql select ts,c1,t9,t10 from stb5 where t9 >= '888888888'; +if $rows != 5 then + return -1 +endi +if $data00 != @21-05-05 18:19:34.000@ then + return -1 +endi +if $data10 != @21-05-05 18:19:35.000@ then + return -1 +endi +if $data20 != @21-05-05 18:19:36.000@ then + return -1 +endi +if $data30 != @21-05-05 18:19:37.000@ then + return -1 +endi +if $data40 != @21-05-05 18:19:38.000@ then + return -1 +endi + +sql select ts,c1,t9,t10 from stb5 where t9 > '222222222' and t9 < '444444444'; +if $rows != 4 then + return -1 +endi +if $data00 != @21-05-05 18:19:12.000@ then + return -1 +endi +if $data10 != @21-05-05 18:19:13.000@ then + return -1 +endi +if $data20 != @21-05-05 18:19:14.000@ then + return -1 +endi +if $data30 != @21-05-05 18:19:15.000@ then + return -1 +endi + +sql select ts,c1,t9,t10 from stb5 where t9 > '0000000000' and t9 < '999999999'; +if $rows != 0 then + return -1 +endi + +sql select ts,c1,t9,t10 from stb5 where t9 like '_1'; +if $rows != 0 then + return -1 +endi + +sql select ts,c1,t9,t10 from stb5 where t9 like '%1'; +if $rows != 8 then + return -1 +endi +if $data00 != @21-05-05 18:19:00.000@ then + return -1 +endi +if $data10 != @21-05-05 18:19:01.000@ then + return -1 +endi +if $data20 != @21-05-05 18:19:02.000@ then + return -1 +endi +if $data30 != @21-05-05 18:19:03.000@ then + return -1 +endi +if $data40 != @21-05-05 18:19:04.000@ then + return -1 +endi +if $data50 != @21-05-05 18:19:05.000@ then + return -1 +endi +if $data60 != @21-05-05 18:19:06.000@ then + return -1 +endi +if $data70 != @21-05-05 18:19:07.000@ then + return -1 +endi + +sql select ts,c1,t9,t10 from stb5 where (t9 like '%1' or t9 in ('222222222','444444444')) and t9 is null; +if $rows != 0 then + return -1 +endi + +sql select ts,c1,t9,t10 from stb5 where t9 is null; +if $rows != 5 then + return -1 +endi +if $data00 != @21-05-05 18:19:29.000@ then + return -1 +endi +if $data10 != @21-05-05 18:19:30.000@ then + return -1 +endi +if $data20 != @21-05-05 18:19:31.000@ then + return -1 +endi +if $data30 != @21-05-05 18:19:32.000@ then + return -1 +endi +if $data40 != @21-05-05 18:19:33.000@ then + return -1 +endi + +sql select ts,c1,t9,t10 from stb5 where t9 is not null; +if $rows != 34 then + return -1 +endi + +sql select ts,c1,t9,t10 from stb5 where t9 is not null and t9 is null; +if $rows != 0 then + return -1 +endi + +sql select ts,c1,t9,t10 from stb5 where t9 like '_%' and t10 between 5 and 6; +if $rows != 9 then + return -1 +endi +if $data00 != @21-05-05 18:19:20.000@ then + return -1 +endi +if $data10 != @21-05-05 18:19:21.000@ then + return -1 +endi +if $data20 != @21-05-05 18:19:22.000@ then + return -1 +endi +if $data30 != @21-05-05 18:19:23.000@ then + return -1 +endi +if $data40 != @21-05-05 18:19:24.000@ then + return -1 +endi +if $data50 != @21-05-05 18:19:25.000@ then + return -1 +endi +if $data60 != @21-05-05 18:19:26.000@ then + return -1 +endi +if $data70 != @21-05-05 18:19:27.000@ then + return -1 +endi +if $data80 != @21-05-05 18:19:28.000@ then + return -1 +endi + +sql select ts,c1,t9,t10 from stb5 where t10 is not null; +if $rows != 34 then + return -1 +endi + +sql select ts,c1,t9,t10 from stb5 where t10 is null; +if $rows != 5 then + return -1 +endi +if $data00 != @21-05-05 18:19:34.000@ then + return -1 +endi +if $data10 != @21-05-05 18:19:35.000@ then + return -1 +endi +if $data20 != @21-05-05 18:19:36.000@ then + return -1 +endi +if $data30 != @21-05-05 18:19:37.000@ then + return -1 +endi +if $data40 != @21-05-05 18:19:38.000@ then + return -1 +endi + +sql select ts,c1,t9,t10 from stb5 where t10 is not null and t10 like '%__'; +if $rows != 0 then + return -1 +endi + +sql select ts,c1,t9,t10 from stb5 where t10 is not null and t10 like '%_%'; +if $rows != 34 then + return -1 +endi + +sql select ts,c1,t9,t10 from stb5 where t10 is not null and t10 like '%%_%%'; +if $rows != 34 then + return -1 +endi + +sql select ts,c1,t9,t10 from stb5 where t10 is not null and t10 like '%_%%%_'; +if $rows != 0 then + return -1 +endi + +sql select ts,c1,t9,t10 from stb5 where t10 like '_' and t10 between 2 and 3; +if $rows != 8 then + return -1 +endi +if $data00 != @21-05-05 18:19:08.000@ then + return -1 +endi +if $data10 != @21-05-05 18:19:09.000@ then + return -1 +endi +if $data20 != @21-05-05 18:19:10.000@ then + return -1 +endi +if $data30 != @21-05-05 18:19:11.000@ then + return -1 +endi +if $data40 != @21-05-05 18:19:12.000@ then + return -1 +endi +if $data50 != @21-05-05 18:19:13.000@ then + return -1 +endi +if $data60 != @21-05-05 18:19:14.000@ then + return -1 +endi +if $data70 != @21-05-05 18:19:15.000@ then + return -1 +endi + +sql select ts,c1,t9,t10 from stb5 where t10 in ('a','3','bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb') or t10 in (''); +if $rows != 4 then + return -1 +endi +if $data00 != @21-05-05 18:19:12.000@ then + return -1 +endi +if $data10 != @21-05-05 18:19:13.000@ then + return -1 +endi +if $data20 != @21-05-05 18:19:14.000@ then + return -1 +endi +if $data30 != @21-05-05 18:19:15.000@ then + return -1 +endi + +sql select ts,c1,t9,t10 from stb5 where t10 is null or (t10 > 1 and t10 < 7 and t10 in ('3','4','5') and t10 != 4 and t10 like '3'); +if $rows != 9 then + return -1 +endi +if $data00 != @21-05-05 18:19:12.000@ then + return -1 +endi +if $data10 != @21-05-05 18:19:13.000@ then + return -1 +endi +if $data20 != @21-05-05 18:19:14.000@ then + return -1 +endi +if $data30 != @21-05-05 18:19:15.000@ then + return -1 +endi +if $data40 != @21-05-05 18:19:34.000@ then + return -1 +endi +if $data50 != @21-05-05 18:19:35.000@ then + return -1 +endi +if $data60 != @21-05-05 18:19:36.000@ then + return -1 +endi +if $data70 != @21-05-05 18:19:37.000@ then + return -1 +endi +if $data80 != @21-05-05 18:19:38.000@ then + return -1 +endi + +sql select ts,c1,t8,t9,t10 from stb5 where (t10 is null or t9 is null) and t8 is null; +if $rows != 5 then + return -1 +endi +if $data00 != @21-05-05 18:19:34.000@ then + return -1 +endi +if $data10 != @21-05-05 18:19:35.000@ then + return -1 +endi +if $data20 != @21-05-05 18:19:36.000@ then + return -1 +endi +if $data30 != @21-05-05 18:19:37.000@ then + return -1 +endi +if $data40 != @21-05-05 18:19:38.000@ then + return -1 +endi + +sql select ts,c1,t8,t9,t10 from stb5 where t10 between 3 and 7 and t9 between '' and '00000000000' and t9 like '_________' and (t10 like '3%'or t10 like '%4'); +if $rows != 8 then + return -1 +endi +if $data00 != @21-05-05 18:19:12.000@ then + return -1 +endi +if $data10 != @21-05-05 18:19:13.000@ then + return -1 +endi +if $data20 != @21-05-05 18:19:14.000@ then + return -1 +endi +if $data30 != @21-05-05 18:19:15.000@ then + return -1 +endi +if $data40 != @21-05-05 18:19:16.000@ then + return -1 +endi +if $data50 != @21-05-05 18:19:17.000@ then + return -1 +endi +if $data60 != @21-05-05 18:19:18.000@ then + return -1 +endi +if $data70 != @21-05-05 18:19:19.000@ then + return -1 +endi + +sql select ts,t1,t2,t3,t4,t5,t6,t7,t8,t9,t10 from stb5 where t1 is not null and t2 between 2 and 7 and t3 in (2, 4,6) and t8 != false and t9 > '111111111' and t10 <= 5; +if $rows != 4 then + return -1 +endi +if $data00 != @21-05-05 18:19:08.000@ then + return -1 +endi +if $data10 != @21-05-05 18:19:09.000@ then + return -1 +endi +if $data20 != @21-05-05 18:19:10.000@ then + return -1 +endi +if $data30 != @21-05-05 18:19:11.000@ then + return -1 +endi + +sql select * from stb5 where t1 <> 1; +if $rows != 34 then + return -1 +endi + +sql select t1,ts,c1 from stb5 where t1 <> '2021-05-05 18:19:01.000' +if $rows != 26 then + return -1 +endi + +sql select t1,ts,c1 from stb5 where t1 <> '2021-05-05 18:19:01.000' and t1 <> '2021-05-05 18:19:08.000'; +if $rows != 21 then + return -1 +endi + +sql select t1,ts,c1 from stb5 where t1 > '2021-05-05 18:19:02.000'; +if $rows != 22 then + return -1 +endi + +sql select ts,c1,t9,t10 from stb5 where t9 match '1.+'; +if $rows != 8 then + return -1 +endi +if $data00 != @21-05-05 18:19:00.000@ then + return -1 +endi +if $data10 != @21-05-05 18:19:01.000@ then + return -1 +endi +if $data20 != @21-05-05 18:19:02.000@ then + return -1 +endi +if $data30 != @21-05-05 18:19:03.000@ then + return -1 +endi +if $data40 != @21-05-05 18:19:04.000@ then + return -1 +endi +if $data50 != @21-05-05 18:19:05.000@ then + return -1 +endi +if $data60 != @21-05-05 18:19:06.000@ then + return -1 +endi +if $data70 != @21-05-05 18:19:07.000@ then + return -1 +endi + +sql select ts,c1,t9,t10 from stb5 where t9 match '[3-9]'; +if $rows != 22 then + return -1 +endi + +sql select ts,c1,t9,t10 from stb5 where t9 match '8.*'; +if $rows != 5 then + return -1 +endi +if $data00 != @21-05-05 18:19:34.000@ then + return -1 +endi +if $data10 != @21-05-05 18:19:35.000@ then + return -1 +endi +if $data20 != @21-05-05 18:19:36.000@ then + return -1 +endi +if $data30 != @21-05-05 18:19:37.000@ then + return -1 +endi +if $data40 != @21-05-05 18:19:38.000@ then + return -1 +endi + +sql select ts,c1,t9,t10 from stb5 where t9 nmatch '^[12345]*\$'; +if $rows != 10 then + return -1 +endi +if $data00 != @21-05-05 18:19:24.000@ then + return -1 +endi +if $data10 != @21-05-05 18:19:25.000@ then + return -1 +endi +if $data20 != @21-05-05 18:19:26.000@ then + return -1 +endi +if $data30 != @21-05-05 18:19:27.000@ then + return -1 +endi +if $data40 != @21-05-05 18:19:28.000@ then + return -1 +endi +if $data50 != @21-05-05 18:19:34.000@ then + return -1 +endi +if $data60 != @21-05-05 18:19:35.000@ then + return -1 +endi +if $data70 != @21-05-05 18:19:36.000@ then + return -1 +endi +if $data80 != @21-05-05 18:19:37.000@ then + return -1 +endi +if $data90 != @21-05-05 18:19:38.000@ then + return -1 +endi + +print "join test" +sql_error select * from tb1, tb2_1 where tb1.ts=tb2_1.ts or tb1.ts =tb2_1.ts; +sql select tb1.ts from tb1, tb2_1 where tb1.ts=tb2_1.ts and tb1.ts > '2021-05-05 18:19:03.000' and tb2_1.ts < '2021-05-05 18:19:06.000'; +if $rows != 2 then + return -1 +endi +if $data00 != @21-05-05 18:19:04.000@ then + return -1 +endi +if $data10 != @21-05-05 18:19:05.000@ then + return -1 +endi +sql select tb1.ts,tb1.*,tb2_1.* from tb1, tb2_1 where tb1.ts=tb2_1.ts and tb1.ts > '2021-05-05 18:19:03.000' and tb2_1.u1 < 5; +if $rows != 2 then + return -1 +endi +if $data00 != @21-05-05 18:19:04.000@ then + return -1 +endi +if $data10 != @21-05-05 18:19:06.000@ then + return -1 +endi + +sql select tb1.ts,tb1.*,tb2_1.* from tb1, tb2_1 where tb1.ts=tb2_1.ts and tb1.ts >= '2021-05-05 18:19:03.000' and tb1.c7=false and tb2_1.u3>4; +if $rows != 2 then + return -1 +endi +if $data00 != @21-05-05 18:19:03.000@ then + return -1 +endi +if $data10 != @21-05-05 18:19:07.000@ then + return -1 +endi + +sql select stb1.ts,stb1.c1,stb1.t1,stb2.ts,stb2.u1,stb2.t4 from stb1, stb2 where stb1.ts=stb2.ts and stb1.t1 = stb2.t4; +if $rows != 9 then + return -1 +endi +if $data00 != @21-05-05 18:19:00.000@ then + return -1 +endi +if $data10 != @21-05-05 18:19:01.000@ then + return -1 +endi +if $data20 != @21-05-05 18:19:02.000@ then + return -1 +endi +if $data30 != @21-05-05 18:19:03.000@ then + return -1 +endi +if $data40 != @21-05-05 18:19:04.000@ then + return -1 +endi +if $data50 != @21-05-05 18:19:05.000@ then + return -1 +endi +if $data60 != @21-05-05 18:19:06.000@ then + return -1 +endi +if $data70 != @21-05-05 18:19:07.000@ then + return -1 +endi +if $data80 != @21-05-05 18:19:11.000@ then + return -1 +endi + +sql select stb1.ts,stb1.c1,stb1.t1,stb2.ts,stb2.u1,stb2.t4 from stb1, stb2 where stb1.ts=stb2.ts and stb1.t1 = stb2.t4 and stb1.c1 > 2 and stb2.u1 <=4; +if $rows != 3 then + return -1 +endi +if $data00 != @21-05-05 18:19:02.000@ then + return -1 +endi +if $data10 != @21-05-05 18:19:04.000@ then + return -1 +endi +if $data20 != @21-05-05 18:19:06.000@ then + return -1 +endi + +sql select stb1.ts,stb1.t1,stb1.c1,stb5.c2 from stb1, stb5 where stb1.t1=stb5.t2 and stb1.ts=stb5.ts and (stb1.c1 between 20 and 50 and (stb5.c2 > 22 and stb5.c2 < 33)); +if $rows != 4 then + return -1 +endi +if $data00 != @21-05-05 18:19:10.000@ then + return -1 +endi +if $data10 != @21-05-05 18:19:11.000@ then + return -1 +endi +if $data20 != @21-05-05 18:19:12.000@ then + return -1 +endi +if $data30 != @21-05-05 18:19:13.000@ then + return -1 +endi + +sql select stb1.ts,stb1.t1,stb1.c1,stb5.c2 from stb1, stb5 where stb1.t1=stb5.t2 and stb1.ts=stb5.ts; +if $rows != 29 then + return -1 +endi + +sql select stb1.c1,stb5.c1 from stb1, stb5 where stb1.t1=stb5.t2 and (stb1.c1 > 10 and stb5.c1 < 20) and stb1.ts=stb5.ts ; +if $rows != 4 then + return -1 +endi +if $data00 != 11 then + return -1 +endi +if $data10 != 12 then + return -1 +endi +if $data20 != 13 then + return -1 +endi +if $data30 != 14 then + return -1 +endi + +print "column&ts test" +sql_error select count(*) from stb1 where ts > 0 or c1 > 0; +sql select * from stb1 where ts > '2021-05-05 18:19:03.000' and ts < '2021-05-05 18:19:20.000' and (c1 > 23 or c1 < 14) and c7 in (true) and c8 like '%2'; +if $rows != 3 then + return -1 +endi +if $data00 != @21-05-05 18:19:05.000@ then + return -1 +endi +if $data10 != @21-05-05 18:19:13.000@ then + return -1 +endi +if $data20 != @21-05-05 18:19:17.000@ then + return -1 +endi + +print "column&tbname test" +sql_error select count(*) from stb1 where tbname like 'tb%' or c1 > 0; +sql select * from stb1 where tbname like '%3' and c6 < 34 and c5 != 33 and c4 > 31; +if $rows != 1 then + return -1 +endi +if $data00 != @21-05-05 18:19:13.000@ then + return -1 +endi + +sql select ts,c1,tbname from stb5 where c1 > 30 and tbname > 'tb5_3' and tbname < 'tb5_8' and c1 < 72 and c1 between 44 and 54; +if $rows != 5 then + return -1 +endi +if $data00 != @21-05-05 18:19:19.000@ then + return -1 +endi +if $data10 != @21-05-05 18:19:20.000@ then + return -1 +endi +if $data20 != @21-05-05 18:19:21.000@ then + return -1 +endi +if $data30 != @21-05-05 18:19:22.000@ then + return -1 +endi +if $data40 != @21-05-05 18:19:23.000@ then + return -1 +endi + +sql select ts,c8,tbname from stb5 where (tbname like '%3' or tbname like '%4' or tbname = 'tb5_6') and tbname between 'tb5_2' and 'tb5_7' and (c8 like '3_' or c8 like '_4'); +if $rows != 6 then + return -1 +endi +if $data00 != @21-05-05 18:19:12.000@ then + return -1 +endi +if $data10 != @21-05-05 18:19:13.000@ then + return -1 +endi +if $data20 != @21-05-05 18:19:14.000@ then + return -1 +endi +if $data30 != @21-05-05 18:19:15.000@ then + return -1 +endi +if $data40 != @21-05-05 18:19:19.000@ then + return -1 +endi +if $data50 != @21-05-05 18:19:27.000@ then + return -1 +endi + + +print "column&tag test" +sql_error select * from stb1 where t1 > 0 or c1 > 0 +sql_error select * from stb1 where c1 > 0 or t1 > 0 +sql_error select * from stb1 where t1 > 0 or c1 > 0 or t1 > 1 +sql_error select * from stb1 where c1 > 0 or t1 > 0 or c1 > 1 +sql_error select * from stb1 where t1 > 0 and c1 > 0 or t1 > 1 +sql_error select * from stb1 where c1 > 0 or t1 > 0 and c1 > 1 +sql_error select * from stb1 where c1 > 0 or t1 > 0 and c1 > 1 +sql_error select * from stb1 where t1 > 0 or t1 > 0 and c1 > 1 +sql_error select * from stb1 where (c1 > 0 and t1 > 0 ) or (t1 > 1 and c1 > 3) +sql_error select * from stb1 where (c1 > 0 and t1 > 0 ) or t1 > 1 +sql_error select a.ts,b.ts,a.c1,b.u1,b.u2 from (select * from stb1) a, (select * from stb2) b where a.ts=b.ts and a.t1=b.t1; + +sql select * from stb1 where c1 < 63 and t1 > 5 +if $rows != 2 then + return -1 +endi +if $data00 != @21-05-05 18:19:24.000@ then + return -1 +endi +if $data10 != @21-05-05 18:19:25.000@ then + return -1 +endi +sql select * from stb1 where t1 > 3 and t1 < 5 and c1 != 42 and c1 != 44; +if $rows != 2 then + return -1 +endi +if $data00 != @21-05-05 18:19:16.000@ then + return -1 +endi +if $data10 != @21-05-05 18:19:18.000@ then + return -1 +endi +sql select * from stb1 where t1 > 1 and c1 > 21 and t1 < 3 and c1 < 24 and t1 != 3 and c1 != 23; +if $rows != 1 then + return -1 +endi +if $data00 != @21-05-05 18:19:09.000@ then + return -1 +endi +sql select * from stb1 where c1 > 1 and (t1 > 3 or t1 < 2) and (c2 > 2 and c2 < 62 and t1 != 4) and (t1 > 2 and t1 < 6) and c7 = true and c8 like '%2'; +if $rows != 1 then + return -1 +endi +if $data00 != @21-05-05 18:19:21.000@ then + return -1 +endi + +sql select * from stb1 where c1!=31 and c1 !=32 and c1 <> 63 and c1 <>1 and c1 <> 21 and c1 <> 2 and c7 <> true and c8 <> '3' and c9 <> '4' and c2<>13 and c3 <> 23 and c4 <> 33 and c5 <> 34 and c6 <> 43 and c2 <> 53 and t1 <> 5 and t2 <>4; +if $rows != 3 then + return -1 +endi +if $data00 != @21-05-05 18:19:07.000@ then + return -1 +endi +if $data10 != @21-05-05 18:19:11.000@ then + return -1 +endi +if $data20 != @21-05-05 18:19:27.000@ then + return -1 +endi + +sql select ts,t1,c1,t2 from stb5 where t1 in ('2021-05-05 18:19:02.000','2021-05-05 18:19:04.000','2021-05-05 18:19:06.000','2021-05-05 18:19:08.000') and c1 is null and t2 is null; +if $rows != 1 then + return -1 +endi +if $data00 != @21-05-05 18:19:38.000@ then + return -1 +endi + +sql select ts,t1,c1,t2 from stb5 where t1 in ('2021-05-05 18:19:02.000','2021-05-05 18:19:04.000','2021-05-05 18:19:06.000','2021-05-05 18:19:08.000') and t2 is not null and ts between '2021-05-05 18:19:10.000' and '2021-05-05 18:19:20.000' and t2 < 3; +if $rows != 2 then + return -1 +endi +if $data00 != @21-05-05 18:19:10.000@ then + return -1 +endi +if $data10 != @21-05-05 18:19:11.000@ then + return -1 +endi + +sql select ts,t1,c1,t2,tbname,t8,t9 from stb5 where (((t8 != false or t8 is null) and (t9 like '%8' or t9 like '%6') and t2 is not null) or (t8 in (false) and t1 is null)) and (c1 is null or (c1 > 62 and c1 <= 72)); +if $rows != 6 then + return -1 +endi +if $data00 != @21-05-05 18:19:26.000@ then + return -1 +endi +if $data10 != @21-05-05 18:19:27.000@ then + return -1 +endi +if $data20 != @21-05-05 18:19:28.000@ then + return -1 +endi +if $data30 != @21-05-05 18:19:29.000@ then + return -1 +endi +if $data40 != @21-05-05 18:19:30.000@ then + return -1 +endi +if $data50 != @21-05-05 18:19:33.000@ then + return -1 +endi + +print "column&join test" +sql_error select tb1.ts,tb1.c1,tb2_1.u1 from tb1, tb2_1 where tb1.ts=tb2_1.ts or tb1.c1 > 0; + +sql select stb5.ts,stb5.c1,stb5.t1,stb5.t8,stb5.t9,stb5.t10 from stb1,stb5 where stb1.ts=stb5.ts and stb1.t3=stb5.t7 and (stb5.c1 > 60 or stb5.c1 <= 11 or stb5.c1 is null); +if $rows != 10 then + return -1 +endi +if $data00 != @21-05-05 18:19:00.000@ then + return -1 +endi +if $data10 != @21-05-05 18:19:01.000@ then + return -1 +endi +if $data20 != @21-05-05 18:19:02.000@ then + return -1 +endi +if $data30 != @21-05-05 18:19:03.000@ then + return -1 +endi +if $data40 != @21-05-05 18:19:04.000@ then + return -1 +endi +if $data50 != @21-05-05 18:19:24.000@ then + return -1 +endi +if $data60 != @21-05-05 18:19:25.000@ then + return -1 +endi +if $data70 != @21-05-05 18:19:26.000@ then + return -1 +endi +if $data80 != @21-05-05 18:19:27.000@ then + return -1 +endi +if $data90 != @21-05-05 18:19:28.000@ then + return -1 +endi + +sql select stb5.ts,stb5.c1,stb5.t1,stb5.t8,stb5.t9,stb5.t10 from stb1,stb5 where stb1.ts=stb5.ts and stb1.t3=stb5.t7 and (stb5.c1 > 60 or stb5.c1 <= 11 or stb5.c1 is null or stb5.c2 between 30 and 40) and (stb1.c9 like '%3' or stb1.c8 like '%4') and stb5.c9 like '%3%'; +if $rows != 4 then + return -1 +endi +if $data00 != @21-05-05 18:19:02.000@ then + return -1 +endi +if $data10 != @21-05-05 18:19:14.000@ then + return -1 +endi +if $data20 != @21-05-05 18:19:15.000@ then + return -1 +endi +if $data30 != @21-05-05 18:19:26.000@ then + return -1 +endi + + +print "ts&tbname test" +sql_error select count(*) from stb1 where ts > 0 or tbname like 'tb%'; +sql_error select tbname,ts,c1,tbname from stb5 where tbname like '%' and tbname between '' and 'tb5_3' and ts between '2021-05-05 18:19:05.000' and '2021-05-05 18:19:10.000' and ts <> '2021-05-05 18:19:07.000'; + +sql select tbname,ts,c1,tbname from stb5 where (tbname like '%5' or tbname like '%8') and ts between '2021-05-05 18:19:21.000' and '2021-05-05 18:19:35.000'; +if $rows != 5 then + return -1 +endi +if $data01 != @21-05-05 18:19:21.000@ then + return -1 +endi +if $data11 != @21-05-05 18:19:22.000@ then + return -1 +endi +if $data21 != @21-05-05 18:19:23.000@ then + return -1 +endi +if $data31 != @21-05-05 18:19:34.000@ then + return -1 +endi +if $data41 != @21-05-05 18:19:35.000@ then + return -1 +endi + +sql select tbname,ts,c1,tbname from stb5 where tbname like '%' and tbname between '' and 'tb5_3' and ts between '2021-05-05 18:19:10.000' and '2021-05-05 18:19:05.000'; +if $rows != 0 then + return -1 +endi + +sql select tbname,ts,c1,tbname from stb5 where tbname like '%' and tbname between '' and 'tb5_3' and ts between '2021-05-05 18:19:05.000' and '2021-05-05 18:19:10.000' and ts <> '2021-05-05 18:19:05.000' and ts != '2021-05-05 18:19:10.000' order by ts desc; +if $rows != 4 then + return -1 +endi +if $data01 != @21-05-05 18:19:09.000@ then + return -1 +endi +if $data11 != @21-05-05 18:19:08.000@ then + return -1 +endi +if $data21 != @21-05-05 18:19:07.000@ then + return -1 +endi +if $data31 != @21-05-05 18:19:06.000@ then + return -1 +endi + + +print "ts&tag test" +sql_error select count(*) from stb1 where ts > 0 or t1 > 0; + +sql select * from stb2 where t1!=1 and t2=2 and t3 in ('2021-05-05 18:58:58.000') and ts < '2021-05-05 18:19:13.000'; +if $rows != 2 then + return -1 +endi +if $data00 != @21-05-05 18:19:11.000@ then + return -1 +endi +if $data10 != @21-05-05 18:19:12.000@ then + return -1 +endi + +sql select t1,ts,c1,t1,t2 from stb5 where (t1 > 1 or t1 is null or t2 is null) and (t1 in ('2021-05-05 18:19:02.000','2021-05-05 18:19:04.000') or t1 is null) and t2 >= 4 order by ts; +if $rows != 9 then + return -1 +endi +if $data01 != @21-05-05 18:19:16.000@ then + return -1 +endi +if $data11 != @21-05-05 18:19:17.000@ then + return -1 +endi +if $data21 != @21-05-05 18:19:18.000@ then + return -1 +endi +if $data31 != @21-05-05 18:19:19.000@ then + return -1 +endi +if $data41 != @21-05-05 18:19:29.000@ then + return -1 +endi +if $data51 != @21-05-05 18:19:30.000@ then + return -1 +endi +if $data61 != @21-05-05 18:19:31.000@ then + return -1 +endi +if $data71 != @21-05-05 18:19:32.000@ then + return -1 +endi +if $data81 != @21-05-05 18:19:33.000@ then + return -1 +endi + +print "ts&join test" +sql_error select tb1.ts,tb1.c1,tb2_1.u1 from tb1, tb2_1 where tb1.ts=tb2_1.ts or tb1.ts > 0; +sql_error select stb1.tbname,stb5.tbname,stb1.ts,stb1.c1,stb1.t1 from stb1, stb5 where stb1.t1=stb5.t2 and stb1.ts=stb5.ts and (stb1.ts = '2021-05-05 18:19:10.000' or stb1.ts = '2021-05-05 18:19:11.000'); + +sql select tb1.ts,tb1.c1,tb2_1.u1 from tb1, tb2_1 where tb1.ts=tb2_1.ts and (tb1.ts > '2021-05-05 18:19:05.000' or tb1.ts < '2021-05-05 18:19:03.000' or tb1.ts > 0) and tb1.ts between '0' and '2021-05-05 18:19:04.000' and tb2_1.ts < '2021-05-05 18:19:03.000'; +if $rows != 3 then + return -1 +endi +if $data00 != @21-05-05 18:19:00.000@ then + return -1 +endi +if $data10 != @21-05-05 18:19:01.000@ then + return -1 +endi +if $data20 != @21-05-05 18:19:02.000@ then + return -1 +endi + +sql select stb1.tbname,stb5.tbname,stb1.ts,stb1.c1,stb1.t1 from stb1, stb5 where stb1.t1=stb5.t2 and stb1.ts=stb5.ts and (stb1.ts = '2021-05-05 18:19:10.000' or stb1.ts <= '2021-05-05 18:19:11.000') and stb5.ts > '2021-05-05 18:19:05.000' and stb5.ts != '2021-05-05 18:19:11.000'; +if $rows != 5 then + return -1 +endi +if $data02 != @21-05-05 18:19:06.000@ then + return -1 +endi +if $data12 != @21-05-05 18:19:07.000@ then + return -1 +endi +if $data22 != @21-05-05 18:19:08.000@ then + return -1 +endi +if $data32 != @21-05-05 18:19:09.000@ then + return -1 +endi +if $data42 != @21-05-05 18:19:10.000@ then + return -1 +endi + + +print "tbname&tag test" +sql select * from stb1 where tbname like 'tb%' and (t1=1 or t2=2 or t3=3) and t1 > 2; +if $rows != 4 then + return -1 +endi +if $data00 != @21-05-05 18:19:12.000@ then + return -1 +endi +if $data10 != @21-05-05 18:19:13.000@ then + return -1 +endi +if $data20 != @21-05-05 18:19:14.000@ then + return -1 +endi +if $data30 != @21-05-05 18:19:15.000@ then + return -1 +endi + +sql select ts,t1,c1,t2,tbname,t8,t9 from stb5 where tbname = 'tb5_1' or t1 = '2021-05-05 18:19:02.000'; +if $rows != 12 then + return -1 +endi + +sql select ts,t1,c1,t2,tbname,t8,t9 from stb5 where t1 = '2021-05-05 18:19:02.000' or tbname = 'tb5_1'; +if $rows != 12 then + return -1 +endi + +sql select t2,t1,t2,t3,t4,t8 from stb5 where t2 > 1 and t2 < 3 or t3 >= 1 and t3 <=1 or t8 in (false); +if $rows != 5 then + return -1 +endi + +sql select tbname,ts,c1,t1,t2,t3,t8 from stb5 where (t2 > 1 and t2 < 3 or t3 >= 1 and t3 <=1 or t8 in (false) or tbname like 'tb5_8' or tbname in ('tb5_5')) and tbname < 'tb5_3' and t3 > 1.0 and ts < '2021-05-05 18:19:10.000'; +if $rows != 2 then + return -1 +endi + + +print "tbname&join test" +sql select stb1.tbname,stb5.tbname,stb1.ts,stb1.c1,stb1.t1 from stb1, stb5 where stb1.t1=stb5.t2 and stb1.ts=stb5.ts and stb1.tbname < 'tb4' and (stb5.tbname like 'tb5_3' or stb5.tbname = 'tb5_1') and stb5.tbname like '%____%_%'; +if $rows != 12 then + return -1 +endi + +sql select stb1.tbname,stb5.tbname,stb1.ts,stb1.c1,stb1.t1 from stb1, stb5 where stb1.t1=stb5.t2 and stb1.ts=stb5.ts and stb1.tbname < 'tb4' and (stb5.tbname like 'tb5_3' or stb5.tbname = 'tb5_1') and stb5.tbname like '%____%_%_'; +if $rows != 0 then + return -1 +endi + + +print "tag&join test" +sql select stb1.ts,stb1.c1,stb1.t1 from stb1, stb5 where stb1.t1=stb5.t2 and stb1.ts=stb5.ts and stb1.t1 >= -1 and stb1.t1 < 100 and stb1.t1 in (1,2,3,5,6) and stb1.t1 <> 3 and stb1.t1 <= 5 and stb1.t1 >=2; +if $rows != 8 then + return -1 +endi +if $data00 != @21-05-05 18:19:08.000@ then + return -1 +endi +if $data10 != @21-05-05 18:19:09.000@ then + return -1 +endi +if $data20 != @21-05-05 18:19:10.000@ then + return -1 +endi +if $data30 != @21-05-05 18:19:11.000@ then + return -1 +endi +if $data40 != @21-05-05 18:19:20.000@ then + return -1 +endi +if $data50 != @21-05-05 18:19:21.000@ then + return -1 +endi +if $data60 != @21-05-05 18:19:22.000@ then + return -1 +endi +if $data70 != @21-05-05 18:19:23.000@ then + return -1 +endi + +sql select stb1.ts,stb1.c1,stb5.t1,stb5.t6,stb5.t7,stb5.t8 from stb1,stb5 where stb1.ts=stb5.ts and stb1.t3=stb5.t7 and stb5.t1 >'2021-05-05 18:19:01.000'; +if $rows != 21 then + return -1 +endi + +sql select stb1.ts,stb1.c1,stb1.t1,stb5.t3,stb5.t6,stb5.t7,stb5.t8,stb5.t9 from stb1, stb5 where stb1.t1=stb5.t2 and stb1.ts=stb5.ts and stb5.t9 like '_%_______5555%55_'; +if $rows != 0 then + return -1 +endi + +sql select stb1.ts,stb1.c1,stb1.t1,stb5.t3,stb5.t6,stb5.t7,stb5.t8,stb5.t9 from stb1, stb5 where stb1.t1=stb5.t2 and stb1.ts=stb5.ts and stb5.t9 like '%_%__55%%%%55%55'; +if $rows != 4 then + return -1 +endi +if $data00 != @21-05-05 18:19:20.000@ then + return -1 +endi +if $data10 != @21-05-05 18:19:21.000@ then + return -1 +endi +if $data20 != @21-05-05 18:19:22.000@ then + return -1 +endi +if $data30 != @21-05-05 18:19:23.000@ then + return -1 +endi + + +print "column&ts&tbname test" +sql_error select count(*) from stb1 where tbname like 'tb%' or c1 > 0 or ts > 0; + +sql select * from stb5 where tbname > '' and (tbname like '%8') and tbname is null; +if $rows != 0 then + return -1 +endi + +sql select ts,c1,ts,c1,ts,c1,c8 from stb5 where tbname > '' and (tbname like '%8' or tbname like '%3') and tbname is not null and tbname in ('tb5_2','tb5_8') and tbname < 'aaaaaaaaaaa' and ts <= 1620209977000 and (c9 like '_3' or c9 <> '82'); +if $rows != 3 then + return -1 +endi +if $data00 != @21-05-05 18:19:34.000@ then + return -1 +endi +if $data10 != @21-05-05 18:19:36.000@ then + return -1 +endi +if $data20 != @21-05-05 18:19:37.000@ then + return -1 +endi + + +print "column&ts&tag test" +sql_error select count(*) from stb1 where t1 > 0 or c1 > 0 or ts > 0; +sql_error select count(*) from stb1 where c1 > 0 or t1 > 0 or ts > 0; + +sql select * from stb1 where (t1 > 0 or t1 > 2 ) and ts > '2021-05-05 18:19:10.000' and (c1 > 1 or c1 > 3) and (c6 > 40 or c6 < 30) and (c8 like '%3' or c8 like '_4') and (c9 like '1%' or c9 like '6%' or (c9 like '%3' and c9 != '23')) and ts > '2021-05-05 18:19:22.000' and ts <= '2021-05-05 18:19:26.000'; +if $rows != 1 then + return -1 +endi +if $data00 != @21-05-05 18:19:26.000@ then + return -1 +endi +sql select * from stb1 where ts > '2021-05-05 18:19:00.000' and c1 > 2 and t1 != 1 and c2 >= 23 and t2 >= 3 and c3 < 63 and c7 = false and t3 > 3 and t3 < 6 and c8 like '4%' and ts < '2021-05-05 18:19:19.000' and c2 > 40 and c3 != 42; +if $rows != 1 then + return -1 +endi +if $data00 != @21-05-05 18:19:18.000@ then + return -1 +endi + + +print "column&ts&join test" +sql select stb1.ts,stb1.c1,stb1.t1 from stb1, stb5 where stb1.t1=stb5.t2 and stb1.ts=stb5.ts and stb1.ts > '2021-05-05 18:19:09.000' and stb5.ts < '2021-05-05 18:19:25.000' and stb1.c9 like '%4'; +if $rows != 4 then + return -1 +endi +if $data00 != @21-05-05 18:19:11.000@ then + return -1 +endi +if $data10 != @21-05-05 18:19:15.000@ then + return -1 +endi +if $data20 != @21-05-05 18:19:19.000@ then + return -1 +endi +if $data30 != @21-05-05 18:19:23.000@ then + return -1 +endi + +print "column&tbname&tag test" +sql_error select count(*) from stb1 where c1 > 0 or tbname in ('tb1') or t1 > 0; + +sql select * from stb5 where c1 > 10 and tbname in ('tb5_2','tb5_3','tb5_4') and t9 like '%4'; +if $rows != 4 then + return -1 +endi +if $data00 != @21-05-05 18:19:16.000@ then + return -1 +endi +if $data10 != @21-05-05 18:19:17.000@ then + return -1 +endi +if $data20 != @21-05-05 18:19:18.000@ then + return -1 +endi +if $data30 != @21-05-05 18:19:19.000@ then + return -1 +endi + + +print "column&tbname&join test" +sql select stb1.ts,stb1.c1,stb5.c1,stb1.t1 from stb1, stb5 where stb1.t1=stb5.t2 and stb1.ts=stb5.ts and (stb5.c1 > 10 or stb5.c1 is null) and stb5.tbname in ('tb5_2','tb5_3','tb5_6') and (stb5.c1 < 24 or stb5.c1 is null); +if $rows != 4 then + return -1 +endi +if $data00 != @21-05-05 18:19:08.000@ then + return -1 +endi +if $data10 != @21-05-05 18:19:09.000@ then + return -1 +endi +if $data20 != @21-05-05 18:19:10.000@ then + return -1 +endi +if $data30 != @21-05-05 18:19:28.000@ then + return -1 +endi + +sql select stb1.ts,stb1.c1,stb5.c1,stb1.t1 from stb1, stb5 where stb1.t1=stb5.t2 and stb1.ts=stb5.ts and (stb5.c1 > 10 or stb5.c1 is null) and stb5.tbname in ('tb5_2', 'tb5_3','tb5_6') and (stb5.c1 < 24 or stb5.c1 is null) and stb5.c1 is not null; +if $rows != 3 then + return -1 +endi +if $data00 != @21-05-05 18:19:08.000@ then + return -1 +endi +if $data10 != @21-05-05 18:19:09.000@ then + return -1 +endi +if $data20 != @21-05-05 18:19:10.000@ then + return -1 +endi + + +print "column&tag&join test" +sql select stb1.ts,stb1.c1,stb1.t1 from stb1, stb5 where stb1.t1=stb5.t2 and stb1.ts=stb5.ts and stb1.t1 >= -1 and stb1.t1 < 100 and stb1.t1 in (1,2,3,5,6) and stb1.t1 <> 3 and stb1.t1 <= 5 and stb1.t1 >=2 and stb1.c1 >= 22 and stb1.c1 <= 53 and stb1.c1 in (23,24,50,54,21); +if $rows != 2 then + return -1 +endi +if $data00 != @21-05-05 18:19:10.000@ then + return -1 +endi +if $data10 != @21-05-05 18:19:11.000@ then + return -1 +endi + +sql select stb1.ts,stb1.t3,stb1.tbname,stb1.c1,stb5.tbname from stb1,stb5 where stb1.ts=stb5.ts and stb1.t3=stb5.t7 and stb1.t2 > 1; +if $rows != 21 then + return -1 +endi + +sql select stb1.ts,stb1.t3,stb1.tbname,stb1.c1,stb5.t9,stb1.t2 from stb1,stb5 where stb1.ts=stb5.ts and stb1.t3=stb5.t7 and stb1.t2 between 2 and 5 and (stb5.t9 like '%2' or stb5.t9 like '%3') and stb1.ts < '2021-05-05 18:19:14.000' and stb5.ts > '2021-05-05 18:19:09.000'; +if $rows != 4 then + return -1 +endi +if $data00 != @21-05-05 18:19:10.000@ then + return -1 +endi +if $data10 != @21-05-05 18:19:11.000@ then + return -1 +endi +if $data20 != @21-05-05 18:19:12.000@ then + return -1 +endi +if $data30 != @21-05-05 18:19:13.000@ then + return -1 +endi + + +print "ts&tbname&tag test" +sql_error select count(*) from stb1 where ts > 0 or tbname in ('tb1') or t1 > 0; + +sql select tbname,ts,t1,t2 from stb5 where ts > 0 and ts between '2021-05-05 18:19:06.001' and '2021-05-05 18:19:30.000' and (tbname='tb5_6' or tbname in ('tb5_1')) and t1 > '2021-05-05 18:19:01.000'; +if $rows != 5 then + return -1 +endi +if $data01 != @21-05-05 18:19:24.000@ then + return -1 +endi +if $data11 != @21-05-05 18:19:25.000@ then + return -1 +endi +if $data21 != @21-05-05 18:19:26.000@ then + return -1 +endi +if $data31 != @21-05-05 18:19:27.000@ then + return -1 +endi +if $data41 != @21-05-05 18:19:28.000@ then + return -1 +endi + +print "ts&tbname&join test" + +sql_error select stb1.ts,stb1.c1,stb5.c1,stb1.t1 from stb1, stb5 where stb1.t1=stb5.t2 and stb1.ts=stb5.ts and stb5.tbname in ('tb5_2', 'tb5_3','tb5_6') and stb5.ts < 1111111111111111111111; + +sql select stb1.ts,stb1.c1,stb5.c1,stb1.t1,stb1.tbname from stb1, stb5 where stb1.t1=stb5.t2 and stb1.ts=stb5.ts and stb5.tbname in ('tb5_2', 'tb5_3','tb5_6') and stb5.ts < 11111111111111111 and (stb1.tbname like '%6' or stb1.tbname in ('tb2')) and stb1.ts between '2021-05-05 18:19:10.000' and '2021-05-05 18:19:26.000'; +if $rows != 5 then + return -1 +endi +if $data00 != @21-05-05 18:19:10.000@ then + return -1 +endi +if $data10 != @21-05-05 18:19:11.000@ then + return -1 +endi +if $data20 != @21-05-05 18:19:24.000@ then + return -1 +endi +if $data30 != @21-05-05 18:19:25.000@ then + return -1 +endi +if $data40 != @21-05-05 18:19:26.000@ then + return -1 +endi + +print "ts&tag&join test" +sql select stb1.ts,stb1.c1,stb5.t1,stb5.t3,stb5.t6,stb5.t7,stb5.t8,stb5.t9 from stb1, stb5 where stb1.t1=stb5.t2 and stb1.ts=stb5.ts and stb5.t1 > '2021-05-05 18:19:02.000' and stb5.t1 between '2021-05-05 18:19:05.000' and '2021-05-05 18:19:06.000' and stb5.ts between '2021-05-05 18:19:23.000' and '2021-05-05 18:19:25.000'; +if $rows != 3 then + return -1 +endi +if $data00 != @21-05-05 18:19:23.000@ then + return -1 +endi +if $data10 != @21-05-05 18:19:24.000@ then + return -1 +endi +if $data20 != @21-05-05 18:19:25.000@ then + return -1 +endi + + +print "tbname&tag&join test" +sql select stb1.ts,stb1.c1,stb5.t1,stb5.t3,stb5.t6,stb5.t7,stb5.t8,stb5.t9 from stb1, stb5 where stb1.t1=stb5.t2 and stb1.ts=stb5.ts and stb5.t1 > '2021-05-05 18:19:01.000' and stb5.t1 between '2021-05-05 18:19:00.000' and '2021-05-05 18:19:06.000' and (stb5.tbname like '%3' or stb5.tbname like '%2'); +if $rows != 8 then + return -1 +endi +if $data00 != @21-05-05 18:19:08.000@ then + return -1 +endi +if $data10 != @21-05-05 18:19:09.000@ then + return -1 +endi +if $data20 != @21-05-05 18:19:10.000@ then + return -1 +endi +if $data30 != @21-05-05 18:19:11.000@ then + return -1 +endi +if $data40 != @21-05-05 18:19:12.000@ then + return -1 +endi +if $data50 != @21-05-05 18:19:13.000@ then + return -1 +endi +if $data60 != @21-05-05 18:19:14.000@ then + return -1 +endi +if $data70 != @21-05-05 18:19:15.000@ then + return -1 +endi + +print "column&ts&tbname&tag test" +sql_error select * from stb1 where (tbname like 'tb%' or ts > '2021-05-05 18:19:01.000') and (t1 > 5 or t1 < 4) and c1 > 0; +sql_error select * from stb1 where (ts > '2021-05-05 18:19:01.000') and (ts > '2021-05-05 18:19:02.000' or t1 > 3) and (t1 > 5 or t1 < 4) and c1 > 0; +sql_error select ts,c1,c7 from stb1 where ts > '2021-05-05 18:19:03.000' or ts > '2021-05-05 18:19:20.000' and col > 0 and t1 > 0; + +sql select t4,tbname,ts,c1 from stb5 where ((tbname like '%4') or t4 >= 6) and ts between '2021-05-05 18:19:20.000' and '2021-05-05 18:19:30.000' and (c1 is null or c1 >= 62 and c1 <= 71); +if $rows != 5 then + return -1 +endi +if $data02 != @21-05-05 18:19:25.000@ then + return -1 +endi +if $data12 != @21-05-05 18:19:26.000@ then + return -1 +endi +if $data22 != @21-05-05 18:19:27.000@ then + return -1 +endi +if $data32 != @21-05-05 18:19:28.000@ then + return -1 +endi +if $data42 != @21-05-05 18:19:29.000@ then + return -1 +endi + +print "column&ts&tbname&join test" + +sql select stb1.ts,stb1.c1,stb5.t1,stb5.t3,stb5.t6,stb5.t7,stb5.t8,stb5.t9 from stb1, stb5 where stb1.t1=stb5.t2 and stb1.ts=stb5.ts and stb5.t1 > '2021-05-05 18:19:01.000' and stb5.t1 between '2021-05-05 18:19:00.000' and '2021-05-05 18:19:06.000' and (stb5.tbname like '%3' or stb5.tbname like '%2') and stb1.ts between '2021-05-05 18:19:09.000' and '2021-05-05 18:19:14.000' and stb1.c1 > 23; +if $rows != 4 then + return -1 +endi +if $data00 != @21-05-05 18:19:11.000@ then + return -1 +endi +if $data10 != @21-05-05 18:19:12.000@ then + return -1 +endi +if $data20 != @21-05-05 18:19:13.000@ then + return -1 +endi +if $data30 != @21-05-05 18:19:14.000@ then + return -1 +endi + +print "column&ts&tag&join test" +sql select stb1.ts,stb1.c1,stb5.t1,stb5.t6,stb5.t7,stb5.t8 from stb1,stb5 where stb1.ts=stb5.ts and stb1.t3=stb5.t7 and stb5.t8 = true and stb5.t7< 3.0000 and stb5.ts > '2021-05-05 18:19:02.000' and stb5.c1 between 10 and 22 and stb5.t1 >'2021-05-05 18:19:01.000'; +if $rows != 2 then + return -1 +endi +if $data00 != @21-05-05 18:19:08.000@ then + return -1 +endi +if $data10 != @21-05-05 18:19:09.000@ then + return -1 +endi + +print "column&tbname&tag&join test" +sql select stb1.ts,stb1.c1,stb5.t1,stb5.t6,stb5.t7,stb5.t8 from stb1,stb5 where stb1.ts=stb5.ts and stb1.t3=stb5.t7 and stb1.c1 > 11 and (stb5.tbname like '%3' or stb5.tbname like '%6' or stb5.tbname = 'tb5_4') and stb5.t7 > 4 and stb5.t8 <> 'false'; +if $rows != 4 then + return -1 +endi +if $data00 != @21-05-05 18:19:24.000@ then + return -1 +endi +if $data10 != @21-05-05 18:19:25.000@ then + return -1 +endi +if $data20 != @21-05-05 18:19:26.000@ then + return -1 +endi +if $data30 != @21-05-05 18:19:27.000@ then + return -1 +endi + + +print "ts&tbname&tag&join test" +sql select stb1.ts,stb1.c1,stb5.t1,stb5.t6,stb5.t7,stb5.t8 from stb1,stb5 where stb1.ts=stb5.ts and stb1.t3=stb5.t7 and stb1.ts between '2021-05-05 18:19:15.000' and '2099-05-05 18:19:15.000' and stb5.tbname in ('tb5_3','tb5_5','tb5_6') and stb5.t1 >= '2021-05-05 18:19:03.000' and stb5.t1 <= '2021-05-05 18:19:08.000' and stb5.ts <='2021-05-05 18:19:25.000'; +if $rows != 7 then + return -1 +endi +if $data00 != @21-05-05 18:19:15.000@ then + return -1 +endi +if $data10 != @21-05-05 18:19:20.000@ then + return -1 +endi +if $data20 != @21-05-05 18:19:21.000@ then + return -1 +endi +if $data30 != @21-05-05 18:19:22.000@ then + return -1 +endi +if $data40 != @21-05-05 18:19:23.000@ then + return -1 +endi +if $data50 != @21-05-05 18:19:24.000@ then + return -1 +endi +if $data60 != @21-05-05 18:19:25.000@ then + return -1 +endi + +print "column&ts&tbname&tag&join test" +sql select stb1.ts,stb1.c1,stb5.t1,stb5.t6,stb5.t7,stb5.t8 from stb1,stb5 where stb1.ts=stb5.ts and stb1.t3=stb5.t7 and stb1.ts between '2021-05-05 18:19:15.000' and '2099-05-05 18:19:15.000' and stb5.tbname in ('tb5_3','tb5_5','tb5_6') and stb5.t1 >= '2021-05-05 18:19:03.000' and stb5.t1 <= '2021-05-05 18:19:08.000' and stb5.ts <='2021-05-05 18:19:25.000' and stb1.c1 between 34 and 60; +if $rows != 5 then + return -1 +endi +if $data00 != @21-05-05 18:19:15.000@ then + return -1 +endi +if $data10 != @21-05-05 18:19:20.000@ then + return -1 +endi +if $data20 != @21-05-05 18:19:21.000@ then + return -1 +endi +if $data30 != @21-05-05 18:19:22.000@ then + return -1 +endi +if $data40 != @21-05-05 18:19:23.000@ then + return -1 +endi + +sql select stb1.ts,stb1.c1,stb5.t1,stb5.t6,stb5.t7,stb5.t8 from stb1,stb5 where stb1.ts=stb5.ts and stb1.t3=stb5.t7 and stb5.tbname<>'tb5_1' and stb5.t1 <> '2021-05-05 18:19:02.000' and stb1.ts > '2021-05-05 18:19:12.000' and stb5.c1 != 32 and stb5.t6 > 3 and stb5.t7 < 6 and stb5.t8 <> false; +if $rows != 4 then + return -1 +endi +if $data00 != @21-05-05 18:19:20.000@ then + return -1 +endi +if $data10 != @21-05-05 18:19:21.000@ then + return -1 +endi +if $data20 != @21-05-05 18:19:22.000@ then + return -1 +endi +if $data30 != @21-05-05 18:19:23.000@ then + return -1 +endi + + +#system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/general/parser/condition_query3.sim b/tests/script/general/parser/condition_query3.sim new file mode 100644 index 0000000000000000000000000000000000000000..a88d75f40d2d638e19d356b2f0ef76789c42b2db --- /dev/null +++ b/tests/script/general/parser/condition_query3.sim @@ -0,0 +1,210 @@ +sql use cdb; + +print "index tag test" + +sql select tbname,t1 from stba; +if $rows != 10 then + return -1 +endi + +sql select tbname,t1 from stba where t1 > 2; +if $rows != 7 then + return -1 +endi + +sql select tbname,t1 from stba where t1 >= 4; +if $rows != 6 then + return -1 +endi + +sql select tbname,t1 from stba where t1 >= 3 and t1 <= 6; +if $rows != 4 then + return -1 +endi + +sql select tbname,t1 from stba where t1 = 3; +if $rows != 1 then + return -1 +endi + +sql select tbname,t1 from stba where t1 <> 6; +if $rows != 9 then + return -1 +endi + +sql select tbname,t1 from stba where t1 < 6; +if $rows != 6 then + return -1 +endi + +sql select tbname,t1 from stba where t1 < 6 and t1 >= 2; +if $rows != 4 then + return -1 +endi + +sql select tbname,t1 from stba where t1 is null; +if $rows != 0 then + return -1 +endi +sql select tbname,t1 from stba where t1 is not null; +if $rows != 10 then + return -1 +endi + +sql_error select tbname,t1 from stbb where t1 > true; +sql select tbname,t1 from stbb where t1 = true; +if $rows != 5 then + return -1 +endi + +sql select tbname,t1 from stbb where t1 <> true; +if $rows != 5 then + return -1 +endi + +sql select tbname,t1 from stbb where t1 is null; +if $rows != 0 then + return -1 +endi +sql select tbname,t1 from stbb where t1 is not null; +if $rows != 10 then + return -1 +endi + +sql select tbname,t1 from stbc; +if $rows != 10 then + return -1 +endi + +sql select tbname,t1 from stbc where t1 > 2; +if $rows != 7 then + return -1 +endi + +sql select tbname,t1 from stbc where t1 >= 4; +if $rows != 6 then + return -1 +endi + +sql select tbname,t1 from stbc where t1 >= 3 and t1 <= 6; +if $rows != 4 then + return -1 +endi + +sql select tbname,t1 from stbc where t1 = 3; +if $rows != 1 then + return -1 +endi + +sql select tbname,t1 from stbc where t1 <> 6; +if $rows != 9 then + return -1 +endi + +sql select tbname,t1 from stbc where t1 < 6; +if $rows != 6 then + return -1 +endi + +sql select tbname,t1 from stbc where t1 < 6 and t1 >= 2; +if $rows != 4 then + return -1 +endi + +sql select tbname,t1 from stbc where t1 is null; +if $rows != 0 then + return -1 +endi +sql select tbname,t1 from stbc where t1 is not null; +if $rows != 10 then + return -1 +endi + +sql select tbname,t1 from stbd where t1 > '2222'; +if $rows != 7 then + return -1 +endi + +sql select tbname,t1 from stbd where t1 >= '4444'; +if $rows != 6 then + return -1 +endi + +sql select tbname,t1 from stbd where t1 >= '3333' and t1 <= '6666'; +if $rows != 4 then + return -1 +endi + +sql select tbname,t1 from stbd where t1 = '3333'; +if $rows != 1 then + return -1 +endi + +sql select tbname,t1 from stbd where t1 <> '6666'; +if $rows != 9 then + return -1 +endi + +sql select tbname,t1 from stbd where t1 < '6666'; +if $rows != 6 then + return -1 +endi + +sql select tbname,t1 from stbd where t1 < '6666' and t1 >= '2222'; +if $rows != 4 then + return -1 +endi + +sql select tbname,t1 from stbd where t1 is null; +if $rows != 0 then + return -1 +endi +sql select tbname,t1 from stbd where t1 is not null; +if $rows != 10 then + return -1 +endi +sql select tbname,t1 from stbe where t1 > '2222'; +if $rows != 7 then + return -1 +endi + +sql select tbname,t1 from stbe where t1 >= '4444'; +if $rows != 6 then + return -1 +endi + +sql select tbname,t1 from stbe where t1 >= '3333' and t1 <= '6666'; +if $rows != 4 then + return -1 +endi + +sql select tbname,t1 from stbe where t1 = '3333'; +if $rows != 1 then + return -1 +endi + +sql select tbname,t1 from stbe where t1 <> '6666'; +if $rows != 9 then + return -1 +endi + +sql select tbname,t1 from stbe where t1 < '6666'; +if $rows != 6 then + return -1 +endi + +sql select tbname,t1 from stbe where t1 < '6666' and t1 >= '2222'; +if $rows != 4 then + return -1 +endi + +sql select tbname,t1 from stbe where t1 is null; +if $rows != 0 then + return -1 +endi +sql select tbname,t1 from stbe where t1 is not null; +if $rows != 10 then + return -1 +endi + +#system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/general/parser/regex.sim b/tests/script/general/parser/regex.sim index 5351d914f34004b5bf198fb9e10792306f8ac32b..eed36018d4c04ec5752e64105d025347982bfcb0 100644 --- a/tests/script/general/parser/regex.sim +++ b/tests/script/general/parser/regex.sim @@ -29,13 +29,22 @@ endi sql select tbname from $st_name where tbname match '^ct[[:digit:]]' - if $rows != 2 then return -1 endi +sql select tbname from $st_name where tbname nmatch '^ct[[:digit:]]' +if $rows != 1 then + return -1 +endi + sql select tbname from $st_name where tbname match '.*' -if $rows !=3 then +if $rows != 3 then + return -1 +endi + +sql select tbname from $st_name where tbname nmatch '.*' +if $rows != 0 then return -1 endi @@ -44,6 +53,11 @@ if $rows != 2 then return -1 endi +sql select tbname from $st_name where t1b nmatch '[[:lower:]]+' +if $rows != 1 then + return -1 +endi + sql insert into $ct1_name values(now, 'this is engine') sql insert into $ct2_name values(now, 'this is app egnine') @@ -56,6 +70,52 @@ if $rows != 1 then return -1 endi +sql select c1b from $st_name where c1b nmatch 'engine' +if $data00 != @this is app egnine@ then + return -1 +endi + +if $rows != 1 then + return -1 +endi + +sql_error select c1b from $st_name where c1b match e; +sql_error select c1b from $st_name where c1b nmatch e; + +sql create table wrong_type(ts timestamp, c0 tinyint, c1 smallint, c2 int, c3 bigint, c4 float, c5 double, c6 bool, c7 nchar(20)) tags(t0 tinyint, t1 smallint, t2 int, t3 bigint, t4 float, t5 double, t6 bool, t7 nchar(10)) +sql insert into wrong_type_1 using wrong_type tags(1, 2, 3, 4, 5, 6, true, 'notsupport') values(now, 1, 2, 3, 4, 5, 6, false, 'notsupport') +sql_error select * from wrong_type where ts match '.*' +sql_error select * from wrong_type where ts nmatch '.*' +sql_error select * from wrong_type where c0 match '.*' +sql_error select * from wrong_type where c0 nmatch '.*' +sql_error select * from wrong_type where c1 match '.*' +sql_error select * from wrong_type where c1 nmatch '.*' +sql_error select * from wrong_type where c2 match '.*' +sql_error select * from wrong_type where c2 nmatch '.*' +sql_error select * from wrong_type where c3 match '.*' +sql_error select * from wrong_type where c3 nmatch '.*' +sql_error select * from wrong_type where c4 match '.*' +sql_error select * from wrong_type where c4 nmatch '.*' +sql_error select * from wrong_type where c5 match '.*' +sql_error select * from wrong_type where c5 nmatch '.*' +sql_error select * from wrong_type where c6 match '.*' +sql_error select * from wrong_type where c6 nmatch '.*' +sql_error select * from wrong_type where c7 match '.*' +sql_error select * from wrong_type where c7 nmatch '.*' +sql_error select * from wrong_type where t1 match '.*' +sql_error select * from wrong_type where t1 nmatch '.*' +sql_error select * from wrong_type where t2 match '.*' +sql_error select * from wrong_type where t2 nmatch '.*' +sql_error select * from wrong_type where t3 match '.*' +sql_error select * from wrong_type where t3 nmatch '.*' +sql_error select * from wrong_type where t4 match '.*' +sql_error select * from wrong_type where t4 nmatch '.*' +sql_error select * from wrong_type where t5 match '.*' +sql_error select * from wrong_type where t5 nmatch '.*' +sql_error select * from wrong_type where t6 match '.*' +sql_error select * from wrong_type where t6 nmatch '.*' +sql_error select * from wrong_type where t7 match '.*' +sql_error select * from wrong_type where t7 nmatch '.*' system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/general/parser/tbnameIn_query.sim b/tests/script/general/parser/tbnameIn_query.sim index db27886bbfde744910068b702199e2079d24c7d2..7fa579b9c2c9d1187c8630fd7f1dd17808b396f0 100644 --- a/tests/script/general/parser/tbnameIn_query.sim +++ b/tests/script/general/parser/tbnameIn_query.sim @@ -125,11 +125,10 @@ if $data21 != 2 then return -1 endi -# multiple tbname in is not allowed NOW -sql_error select count(*) from $stb where tbname in ('ti_tb1', 'ti_tb300') and tbname in ('ti_tb5', 'ti_tb1000') group by t1 order by t1 asc -#if $rows != 4 then -# return -1 -#endi +sql select count(*) from $stb where tbname in ('ti_tb1', 'ti_tb300') and tbname in ('ti_tb5', 'ti_tb1000') group by t1 order by t1 asc +if $rows != 0 then + return -1 +endi #if $data00 != $rowNum then # return -1 #endi